1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27#include <sys/zfs_context.h> 28#include <sys/fm/fs/zfs.h> 29#include <sys/spa.h> 30#include <sys/txg.h> 31#include <sys/spa_impl.h> 32#include <sys/vdev_impl.h> 33#include <sys/zio_impl.h> 34#include <sys/zio_compress.h> 35#include <sys/zio_checksum.h> 36#include <sys/dmu_objset.h> 37#include <sys/arc.h> 38#include <sys/ddt.h> 39#include <sys/trim_map.h> 40#include <sys/blkptr.h> 41#include <sys/zfeature.h> 42 43SYSCTL_DECL(_vfs_zfs); 44SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 45#if defined(__amd64__) 46static int zio_use_uma = 1; 47#else 48static int zio_use_uma = 0; 49#endif 50SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0, 51 "Use uma(9) for ZIO allocations"); 52static int zio_exclude_metadata = 0; 53SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0, 54 "Exclude metadata buffers from dumps as well"); 55 56zio_trim_stats_t zio_trim_stats = { 57 { "bytes", KSTAT_DATA_UINT64, 58 "Number of bytes successfully TRIMmed" }, 59 { "success", KSTAT_DATA_UINT64, 60 "Number of successful TRIM requests" }, 61 { "unsupported", KSTAT_DATA_UINT64, 62 "Number of TRIM requests that failed because TRIM is not supported" }, 63 { "failed", KSTAT_DATA_UINT64, 64 "Number of TRIM requests that failed for reasons other than not supported" }, 65}; 66 67static kstat_t *zio_trim_ksp; 68 69/* 70 * ========================================================================== 71 * I/O type descriptions 72 * ========================================================================== 73 */ 74const char *zio_type_name[ZIO_TYPES] = { 75 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 76 "zio_ioctl" 77}; 78 79/* 80 * ========================================================================== 81 * I/O kmem caches 82 * ========================================================================== 83 */ 84kmem_cache_t *zio_cache; 85kmem_cache_t *zio_link_cache; 86kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 87kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 88 89#ifdef _KERNEL 90extern vmem_t *zio_alloc_arena; 91#endif 92 93/* 94 * The following actions directly effect the spa's sync-to-convergence logic. 95 * The values below define the sync pass when we start performing the action. 96 * Care should be taken when changing these values as they directly impact 97 * spa_sync() performance. Tuning these values may introduce subtle performance 98 * pathologies and should only be done in the context of performance analysis. 99 * These tunables will eventually be removed and replaced with #defines once 100 * enough analysis has been done to determine optimal values. 101 * 102 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 103 * regular blocks are not deferred. 104 */ 105int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 106SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_deferred_free, CTLFLAG_RDTUN, 107 &zfs_sync_pass_deferred_free, 0, "defer frees starting in this pass"); 108int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 109SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_dont_compress, CTLFLAG_RDTUN, 110 &zfs_sync_pass_dont_compress, 0, "don't compress starting in this pass"); 111int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 112SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_rewrite, CTLFLAG_RDTUN, 113 &zfs_sync_pass_rewrite, 0, "rewrite new bps starting in this pass"); 114 115/* 116 * An allocating zio is one that either currently has the DVA allocate 117 * stage set or will have it later in its lifetime. 118 */ 119#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 120 121boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 122 123#ifdef ZFS_DEBUG 124int zio_buf_debug_limit = 16384; 125#else 126int zio_buf_debug_limit = 0; 127#endif 128 129void 130zio_init(void) 131{ 132 size_t c; 133 zio_cache = kmem_cache_create("zio_cache", 134 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 135 zio_link_cache = kmem_cache_create("zio_link_cache", 136 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 137 if (!zio_use_uma) 138 goto out; 139 140 /* 141 * For small buffers, we want a cache for each multiple of 142 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 143 * for each quarter-power of 2. For large buffers, we want 144 * a cache for each multiple of PAGESIZE. 145 */ 146 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 147 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 148 size_t p2 = size; 149 size_t align = 0; 150 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 151 152 while (p2 & (p2 - 1)) 153 p2 &= p2 - 1; 154 155#ifdef illumos 156#ifndef _KERNEL 157 /* 158 * If we are using watchpoints, put each buffer on its own page, 159 * to eliminate the performance overhead of trapping to the 160 * kernel when modifying a non-watched buffer that shares the 161 * page with a watched buffer. 162 */ 163 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 164 continue; 165#endif 166#endif /* illumos */ 167 if (size <= 4 * SPA_MINBLOCKSIZE) { 168 align = SPA_MINBLOCKSIZE; 169 } else if (IS_P2ALIGNED(size, PAGESIZE)) { 170 align = PAGESIZE; 171 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 172 align = p2 >> 2; 173 } 174 175 if (align != 0) { 176 char name[36]; 177 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 178 zio_buf_cache[c] = kmem_cache_create(name, size, 179 align, NULL, NULL, NULL, NULL, NULL, cflags); 180 181 /* 182 * Since zio_data bufs do not appear in crash dumps, we 183 * pass KMC_NOTOUCH so that no allocator metadata is 184 * stored with the buffers. 185 */ 186 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 187 zio_data_buf_cache[c] = kmem_cache_create(name, size, 188 align, NULL, NULL, NULL, NULL, NULL, 189 cflags | KMC_NOTOUCH | KMC_NODEBUG); 190 } 191 } 192 193 while (--c != 0) { 194 ASSERT(zio_buf_cache[c] != NULL); 195 if (zio_buf_cache[c - 1] == NULL) 196 zio_buf_cache[c - 1] = zio_buf_cache[c]; 197 198 ASSERT(zio_data_buf_cache[c] != NULL); 199 if (zio_data_buf_cache[c - 1] == NULL) 200 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 201 } 202out: 203 204 zio_inject_init(); 205 206 zio_trim_ksp = kstat_create("zfs", 0, "zio_trim", "misc", 207 KSTAT_TYPE_NAMED, 208 sizeof(zio_trim_stats) / sizeof(kstat_named_t), 209 KSTAT_FLAG_VIRTUAL); 210 211 if (zio_trim_ksp != NULL) { 212 zio_trim_ksp->ks_data = &zio_trim_stats; 213 kstat_install(zio_trim_ksp); 214 } 215} 216 217void 218zio_fini(void) 219{ 220 size_t c; 221 kmem_cache_t *last_cache = NULL; 222 kmem_cache_t *last_data_cache = NULL; 223 224 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 225 if (zio_buf_cache[c] != last_cache) { 226 last_cache = zio_buf_cache[c]; 227 kmem_cache_destroy(zio_buf_cache[c]); 228 } 229 zio_buf_cache[c] = NULL; 230 231 if (zio_data_buf_cache[c] != last_data_cache) { 232 last_data_cache = zio_data_buf_cache[c]; 233 kmem_cache_destroy(zio_data_buf_cache[c]); 234 } 235 zio_data_buf_cache[c] = NULL; 236 } 237 238 kmem_cache_destroy(zio_link_cache); 239 kmem_cache_destroy(zio_cache); 240 241 zio_inject_fini(); 242 243 if (zio_trim_ksp != NULL) { 244 kstat_delete(zio_trim_ksp); 245 zio_trim_ksp = NULL; 246 } 247} 248 249/* 250 * ========================================================================== 251 * Allocate and free I/O buffers 252 * ========================================================================== 253 */ 254 255/* 256 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 257 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 258 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 259 * excess / transient data in-core during a crashdump. 260 */ 261void * 262zio_buf_alloc(size_t size) 263{ 264 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 265 int flags = zio_exclude_metadata ? KM_NODEBUG : 0; 266 267 ASSERT3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 268 269 if (zio_use_uma) 270 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 271 else 272 return (kmem_alloc(size, KM_SLEEP|flags)); 273} 274 275/* 276 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 277 * crashdump if the kernel panics. This exists so that we will limit the amount 278 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 279 * of kernel heap dumped to disk when the kernel panics) 280 */ 281void * 282zio_data_buf_alloc(size_t size) 283{ 284 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 285 286 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 287 288 if (zio_use_uma) 289 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 290 else 291 return (kmem_alloc(size, KM_SLEEP | KM_NODEBUG)); 292} 293 294void 295zio_buf_free(void *buf, size_t size) 296{ 297 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 298 299 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 300 301 if (zio_use_uma) 302 kmem_cache_free(zio_buf_cache[c], buf); 303 else 304 kmem_free(buf, size); 305} 306 307void 308zio_data_buf_free(void *buf, size_t size) 309{ 310 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 311 312 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 313 314 if (zio_use_uma) 315 kmem_cache_free(zio_data_buf_cache[c], buf); 316 else 317 kmem_free(buf, size); 318} 319 320/* 321 * ========================================================================== 322 * Push and pop I/O transform buffers 323 * ========================================================================== 324 */ 325static void 326zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 327 zio_transform_func_t *transform) 328{ 329 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 330 331 zt->zt_orig_data = zio->io_data; 332 zt->zt_orig_size = zio->io_size; 333 zt->zt_bufsize = bufsize; 334 zt->zt_transform = transform; 335 336 zt->zt_next = zio->io_transform_stack; 337 zio->io_transform_stack = zt; 338 339 zio->io_data = data; 340 zio->io_size = size; 341} 342 343static void 344zio_pop_transforms(zio_t *zio) 345{ 346 zio_transform_t *zt; 347 348 while ((zt = zio->io_transform_stack) != NULL) { 349 if (zt->zt_transform != NULL) 350 zt->zt_transform(zio, 351 zt->zt_orig_data, zt->zt_orig_size); 352 353 if (zt->zt_bufsize != 0) 354 zio_buf_free(zio->io_data, zt->zt_bufsize); 355 356 zio->io_data = zt->zt_orig_data; 357 zio->io_size = zt->zt_orig_size; 358 zio->io_transform_stack = zt->zt_next; 359 360 kmem_free(zt, sizeof (zio_transform_t)); 361 } 362} 363 364/* 365 * ========================================================================== 366 * I/O transform callbacks for subblocks and decompression 367 * ========================================================================== 368 */ 369static void 370zio_subblock(zio_t *zio, void *data, uint64_t size) 371{ 372 ASSERT(zio->io_size > size); 373 374 if (zio->io_type == ZIO_TYPE_READ) 375 bcopy(zio->io_data, data, size); 376} 377 378static void 379zio_decompress(zio_t *zio, void *data, uint64_t size) 380{ 381 if (zio->io_error == 0 && 382 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 383 zio->io_data, data, zio->io_size, size) != 0) 384 zio->io_error = SET_ERROR(EIO); 385} 386 387/* 388 * ========================================================================== 389 * I/O parent/child relationships and pipeline interlocks 390 * ========================================================================== 391 */ 392/* 393 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 394 * continue calling these functions until they return NULL. 395 * Otherwise, the next caller will pick up the list walk in 396 * some indeterminate state. (Otherwise every caller would 397 * have to pass in a cookie to keep the state represented by 398 * io_walk_link, which gets annoying.) 399 */ 400zio_t * 401zio_walk_parents(zio_t *cio) 402{ 403 zio_link_t *zl = cio->io_walk_link; 404 list_t *pl = &cio->io_parent_list; 405 406 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 407 cio->io_walk_link = zl; 408 409 if (zl == NULL) 410 return (NULL); 411 412 ASSERT(zl->zl_child == cio); 413 return (zl->zl_parent); 414} 415 416zio_t * 417zio_walk_children(zio_t *pio) 418{ 419 zio_link_t *zl = pio->io_walk_link; 420 list_t *cl = &pio->io_child_list; 421 422 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 423 pio->io_walk_link = zl; 424 425 if (zl == NULL) 426 return (NULL); 427 428 ASSERT(zl->zl_parent == pio); 429 return (zl->zl_child); 430} 431 432zio_t * 433zio_unique_parent(zio_t *cio) 434{ 435 zio_t *pio = zio_walk_parents(cio); 436 437 VERIFY(zio_walk_parents(cio) == NULL); 438 return (pio); 439} 440 441void 442zio_add_child(zio_t *pio, zio_t *cio) 443{ 444 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 445 446 /* 447 * Logical I/Os can have logical, gang, or vdev children. 448 * Gang I/Os can have gang or vdev children. 449 * Vdev I/Os can only have vdev children. 450 * The following ASSERT captures all of these constraints. 451 */ 452 ASSERT(cio->io_child_type <= pio->io_child_type); 453 454 zl->zl_parent = pio; 455 zl->zl_child = cio; 456 457 mutex_enter(&cio->io_lock); 458 mutex_enter(&pio->io_lock); 459 460 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 461 462 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 463 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 464 465 list_insert_head(&pio->io_child_list, zl); 466 list_insert_head(&cio->io_parent_list, zl); 467 468 pio->io_child_count++; 469 cio->io_parent_count++; 470 471 mutex_exit(&pio->io_lock); 472 mutex_exit(&cio->io_lock); 473} 474 475static void 476zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 477{ 478 ASSERT(zl->zl_parent == pio); 479 ASSERT(zl->zl_child == cio); 480 481 mutex_enter(&cio->io_lock); 482 mutex_enter(&pio->io_lock); 483 484 list_remove(&pio->io_child_list, zl); 485 list_remove(&cio->io_parent_list, zl); 486 487 pio->io_child_count--; 488 cio->io_parent_count--; 489 490 mutex_exit(&pio->io_lock); 491 mutex_exit(&cio->io_lock); 492 493 kmem_cache_free(zio_link_cache, zl); 494} 495 496static boolean_t 497zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 498{ 499 uint64_t *countp = &zio->io_children[child][wait]; 500 boolean_t waiting = B_FALSE; 501 502 mutex_enter(&zio->io_lock); 503 ASSERT(zio->io_stall == NULL); 504 if (*countp != 0) { 505 zio->io_stage >>= 1; 506 zio->io_stall = countp; 507 waiting = B_TRUE; 508 } 509 mutex_exit(&zio->io_lock); 510 511 return (waiting); 512} 513 514static void 515zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 516{ 517 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 518 int *errorp = &pio->io_child_error[zio->io_child_type]; 519 520 mutex_enter(&pio->io_lock); 521 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 522 *errorp = zio_worst_error(*errorp, zio->io_error); 523 pio->io_reexecute |= zio->io_reexecute; 524 ASSERT3U(*countp, >, 0); 525 526 (*countp)--; 527 528 if (*countp == 0 && pio->io_stall == countp) { 529 pio->io_stall = NULL; 530 mutex_exit(&pio->io_lock); 531 zio_execute(pio); 532 } else { 533 mutex_exit(&pio->io_lock); 534 } 535} 536 537static void 538zio_inherit_child_errors(zio_t *zio, enum zio_child c) 539{ 540 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 541 zio->io_error = zio->io_child_error[c]; 542} 543 544/* 545 * ========================================================================== 546 * Create the various types of I/O (read, write, free, etc) 547 * ========================================================================== 548 */ 549static zio_t * 550zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 551 void *data, uint64_t size, zio_done_func_t *done, void *private, 552 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 553 vdev_t *vd, uint64_t offset, const zbookmark_phys_t *zb, 554 enum zio_stage stage, enum zio_stage pipeline) 555{ 556 zio_t *zio; 557 558 ASSERT3U(type == ZIO_TYPE_FREE || size, <=, SPA_MAXBLOCKSIZE); 559 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 560 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 561 562 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 563 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 564 ASSERT(vd || stage == ZIO_STAGE_OPEN); 565 566 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 567 bzero(zio, sizeof (zio_t)); 568 569 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 570 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 571 572 list_create(&zio->io_parent_list, sizeof (zio_link_t), 573 offsetof(zio_link_t, zl_parent_node)); 574 list_create(&zio->io_child_list, sizeof (zio_link_t), 575 offsetof(zio_link_t, zl_child_node)); 576 577 if (vd != NULL) 578 zio->io_child_type = ZIO_CHILD_VDEV; 579 else if (flags & ZIO_FLAG_GANG_CHILD) 580 zio->io_child_type = ZIO_CHILD_GANG; 581 else if (flags & ZIO_FLAG_DDT_CHILD) 582 zio->io_child_type = ZIO_CHILD_DDT; 583 else 584 zio->io_child_type = ZIO_CHILD_LOGICAL; 585 586 if (bp != NULL) { 587 zio->io_bp = (blkptr_t *)bp; 588 zio->io_bp_copy = *bp; 589 zio->io_bp_orig = *bp; 590 if (type != ZIO_TYPE_WRITE || 591 zio->io_child_type == ZIO_CHILD_DDT) 592 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 593 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 594 zio->io_logical = zio; 595 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 596 pipeline |= ZIO_GANG_STAGES; 597 } 598 599 zio->io_spa = spa; 600 zio->io_txg = txg; 601 zio->io_done = done; 602 zio->io_private = private; 603 zio->io_type = type; 604 zio->io_priority = priority; 605 zio->io_vd = vd; 606 zio->io_offset = offset; 607 zio->io_orig_data = zio->io_data = data; 608 zio->io_orig_size = zio->io_size = size; 609 zio->io_orig_flags = zio->io_flags = flags; 610 zio->io_orig_stage = zio->io_stage = stage; 611 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 612 613 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 614 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 615 616 if (zb != NULL) 617 zio->io_bookmark = *zb; 618 619 if (pio != NULL) { 620 if (zio->io_logical == NULL) 621 zio->io_logical = pio->io_logical; 622 if (zio->io_child_type == ZIO_CHILD_GANG) 623 zio->io_gang_leader = pio->io_gang_leader; 624 zio_add_child(pio, zio); 625 } 626 627 return (zio); 628} 629 630static void 631zio_destroy(zio_t *zio) 632{ 633 list_destroy(&zio->io_parent_list); 634 list_destroy(&zio->io_child_list); 635 mutex_destroy(&zio->io_lock); 636 cv_destroy(&zio->io_cv); 637 kmem_cache_free(zio_cache, zio); 638} 639 640zio_t * 641zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 642 void *private, enum zio_flag flags) 643{ 644 zio_t *zio; 645 646 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 647 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 648 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 649 650 return (zio); 651} 652 653zio_t * 654zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 655{ 656 return (zio_null(NULL, spa, NULL, done, private, flags)); 657} 658 659zio_t * 660zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 661 void *data, uint64_t size, zio_done_func_t *done, void *private, 662 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 663{ 664 zio_t *zio; 665 666 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 667 data, size, done, private, 668 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 669 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 670 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 671 672 return (zio); 673} 674 675zio_t * 676zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 677 void *data, uint64_t size, const zio_prop_t *zp, 678 zio_done_func_t *ready, zio_done_func_t *physdone, zio_done_func_t *done, 679 void *private, 680 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 681{ 682 zio_t *zio; 683 684 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 685 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 686 zp->zp_compress >= ZIO_COMPRESS_OFF && 687 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 688 DMU_OT_IS_VALID(zp->zp_type) && 689 zp->zp_level < 32 && 690 zp->zp_copies > 0 && 691 zp->zp_copies <= spa_max_replication(spa)); 692 693 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 694 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 695 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 696 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 697 698 zio->io_ready = ready; 699 zio->io_physdone = physdone; 700 zio->io_prop = *zp; 701 702 /* 703 * Data can be NULL if we are going to call zio_write_override() to 704 * provide the already-allocated BP. But we may need the data to 705 * verify a dedup hit (if requested). In this case, don't try to 706 * dedup (just take the already-allocated BP verbatim). 707 */ 708 if (data == NULL && zio->io_prop.zp_dedup_verify) { 709 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 710 } 711 712 return (zio); 713} 714 715zio_t * 716zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 717 uint64_t size, zio_done_func_t *done, void *private, 718 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 719{ 720 zio_t *zio; 721 722 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 723 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 724 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 725 726 return (zio); 727} 728 729void 730zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 731{ 732 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 733 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 734 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 735 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 736 737 /* 738 * We must reset the io_prop to match the values that existed 739 * when the bp was first written by dmu_sync() keeping in mind 740 * that nopwrite and dedup are mutually exclusive. 741 */ 742 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 743 zio->io_prop.zp_nopwrite = nopwrite; 744 zio->io_prop.zp_copies = copies; 745 zio->io_bp_override = bp; 746} 747 748void 749zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 750{ 751 752 /* 753 * The check for EMBEDDED is a performance optimization. We 754 * process the free here (by ignoring it) rather than 755 * putting it on the list and then processing it in zio_free_sync(). 756 */ 757 if (BP_IS_EMBEDDED(bp)) 758 return; 759 metaslab_check_free(spa, bp); 760 761 /* 762 * Frees that are for the currently-syncing txg, are not going to be 763 * deferred, and which will not need to do a read (i.e. not GANG or 764 * DEDUP), can be processed immediately. Otherwise, put them on the 765 * in-memory list for later processing. 766 */ 767 if (zfs_trim_enabled || BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 768 txg != spa->spa_syncing_txg || 769 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 770 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 771 } else { 772 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 773 BP_GET_PSIZE(bp), 0))); 774 } 775} 776 777zio_t * 778zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 779 uint64_t size, enum zio_flag flags) 780{ 781 zio_t *zio; 782 enum zio_stage stage = ZIO_FREE_PIPELINE; 783 784 ASSERT(!BP_IS_HOLE(bp)); 785 ASSERT(spa_syncing_txg(spa) == txg); 786 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 787 788 if (BP_IS_EMBEDDED(bp)) 789 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 790 791 metaslab_check_free(spa, bp); 792 arc_freed(spa, bp); 793 794 if (zfs_trim_enabled) 795 stage |= ZIO_STAGE_ISSUE_ASYNC | ZIO_STAGE_VDEV_IO_START | 796 ZIO_STAGE_VDEV_IO_ASSESS; 797 /* 798 * GANG and DEDUP blocks can induce a read (for the gang block header, 799 * or the DDT), so issue them asynchronously so that this thread is 800 * not tied up. 801 */ 802 else if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 803 stage |= ZIO_STAGE_ISSUE_ASYNC; 804 805 flags |= ZIO_FLAG_DONT_QUEUE; 806 807 zio = zio_create(pio, spa, txg, bp, NULL, size, 808 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, flags, 809 NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 810 811 return (zio); 812} 813 814zio_t * 815zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 816 zio_done_func_t *done, void *private, enum zio_flag flags) 817{ 818 zio_t *zio; 819 820 dprintf_bp(bp, "claiming in txg %llu", txg); 821 822 if (BP_IS_EMBEDDED(bp)) 823 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 824 825 /* 826 * A claim is an allocation of a specific block. Claims are needed 827 * to support immediate writes in the intent log. The issue is that 828 * immediate writes contain committed data, but in a txg that was 829 * *not* committed. Upon opening the pool after an unclean shutdown, 830 * the intent log claims all blocks that contain immediate write data 831 * so that the SPA knows they're in use. 832 * 833 * All claims *must* be resolved in the first txg -- before the SPA 834 * starts allocating blocks -- so that nothing is allocated twice. 835 * If txg == 0 we just verify that the block is claimable. 836 */ 837 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 838 ASSERT(txg == spa_first_txg(spa) || txg == 0); 839 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 840 841 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 842 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 843 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 844 845 return (zio); 846} 847 848zio_t * 849zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, uint64_t offset, 850 uint64_t size, zio_done_func_t *done, void *private, 851 zio_priority_t priority, enum zio_flag flags) 852{ 853 zio_t *zio; 854 int c; 855 856 if (vd->vdev_children == 0) { 857 zio = zio_create(pio, spa, 0, NULL, NULL, size, done, private, 858 ZIO_TYPE_IOCTL, priority, flags, vd, offset, NULL, 859 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 860 861 zio->io_cmd = cmd; 862 } else { 863 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 864 865 for (c = 0; c < vd->vdev_children; c++) 866 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 867 offset, size, done, private, priority, flags)); 868 } 869 870 return (zio); 871} 872 873zio_t * 874zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 875 void *data, int checksum, zio_done_func_t *done, void *private, 876 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 877{ 878 zio_t *zio; 879 880 ASSERT(vd->vdev_children == 0); 881 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 882 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 883 ASSERT3U(offset + size, <=, vd->vdev_psize); 884 885 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 886 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, 887 NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 888 889 zio->io_prop.zp_checksum = checksum; 890 891 return (zio); 892} 893 894zio_t * 895zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 896 void *data, int checksum, zio_done_func_t *done, void *private, 897 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 898{ 899 zio_t *zio; 900 901 ASSERT(vd->vdev_children == 0); 902 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 903 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 904 ASSERT3U(offset + size, <=, vd->vdev_psize); 905 906 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 907 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, 908 NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 909 910 zio->io_prop.zp_checksum = checksum; 911 912 if (zio_checksum_table[checksum].ci_eck) { 913 /* 914 * zec checksums are necessarily destructive -- they modify 915 * the end of the write buffer to hold the verifier/checksum. 916 * Therefore, we must make a local copy in case the data is 917 * being written to multiple places in parallel. 918 */ 919 void *wbuf = zio_buf_alloc(size); 920 bcopy(data, wbuf, size); 921 zio_push_transform(zio, wbuf, size, size, NULL); 922 } 923 924 return (zio); 925} 926 927/* 928 * Create a child I/O to do some work for us. 929 */ 930zio_t * 931zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 932 void *data, uint64_t size, int type, zio_priority_t priority, 933 enum zio_flag flags, zio_done_func_t *done, void *private) 934{ 935 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 936 zio_t *zio; 937 938 ASSERT(vd->vdev_parent == 939 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 940 941 if (type == ZIO_TYPE_READ && bp != NULL) { 942 /* 943 * If we have the bp, then the child should perform the 944 * checksum and the parent need not. This pushes error 945 * detection as close to the leaves as possible and 946 * eliminates redundant checksums in the interior nodes. 947 */ 948 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 949 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 950 } 951 952 /* Not all IO types require vdev io done stage e.g. free */ 953 if (!(pio->io_pipeline & ZIO_STAGE_VDEV_IO_DONE)) 954 pipeline &= ~ZIO_STAGE_VDEV_IO_DONE; 955 956 if (vd->vdev_children == 0) 957 offset += VDEV_LABEL_START_SIZE; 958 959 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 960 961 /* 962 * If we've decided to do a repair, the write is not speculative -- 963 * even if the original read was. 964 */ 965 if (flags & ZIO_FLAG_IO_REPAIR) 966 flags &= ~ZIO_FLAG_SPECULATIVE; 967 968 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 969 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 970 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 971 972 zio->io_physdone = pio->io_physdone; 973 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 974 zio->io_logical->io_phys_children++; 975 976 return (zio); 977} 978 979zio_t * 980zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 981 int type, zio_priority_t priority, enum zio_flag flags, 982 zio_done_func_t *done, void *private) 983{ 984 zio_t *zio; 985 986 ASSERT(vd->vdev_ops->vdev_op_leaf); 987 988 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 989 data, size, done, private, type, priority, 990 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 991 vd, offset, NULL, 992 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 993 994 return (zio); 995} 996 997void 998zio_flush(zio_t *zio, vdev_t *vd) 999{ 1000 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 0, 0, 1001 NULL, NULL, ZIO_PRIORITY_NOW, 1002 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1003} 1004 1005zio_t * 1006zio_trim(zio_t *zio, spa_t *spa, vdev_t *vd, uint64_t offset, uint64_t size) 1007{ 1008 1009 ASSERT(vd->vdev_ops->vdev_op_leaf); 1010 1011 return (zio_create(zio, spa, 0, NULL, NULL, size, NULL, NULL, 1012 ZIO_TYPE_FREE, ZIO_PRIORITY_TRIM, ZIO_FLAG_DONT_AGGREGATE | 1013 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, 1014 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PHYS_PIPELINE)); 1015} 1016 1017void 1018zio_shrink(zio_t *zio, uint64_t size) 1019{ 1020 ASSERT(zio->io_executor == NULL); 1021 ASSERT(zio->io_orig_size == zio->io_size); 1022 ASSERT(size <= zio->io_size); 1023 1024 /* 1025 * We don't shrink for raidz because of problems with the 1026 * reconstruction when reading back less than the block size. 1027 * Note, BP_IS_RAIDZ() assumes no compression. 1028 */ 1029 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1030 if (!BP_IS_RAIDZ(zio->io_bp)) 1031 zio->io_orig_size = zio->io_size = size; 1032} 1033 1034/* 1035 * ========================================================================== 1036 * Prepare to read and write logical blocks 1037 * ========================================================================== 1038 */ 1039 1040static int 1041zio_read_bp_init(zio_t *zio) 1042{ 1043 blkptr_t *bp = zio->io_bp; 1044 1045 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1046 zio->io_child_type == ZIO_CHILD_LOGICAL && 1047 !(zio->io_flags & ZIO_FLAG_RAW)) { 1048 uint64_t psize = 1049 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1050 void *cbuf = zio_buf_alloc(psize); 1051 1052 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 1053 } 1054 1055 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1056 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1057 decode_embedded_bp_compressed(bp, zio->io_data); 1058 } else { 1059 ASSERT(!BP_IS_EMBEDDED(bp)); 1060 } 1061 1062 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1063 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1064 1065 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1066 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1067 1068 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1069 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1070 1071 return (ZIO_PIPELINE_CONTINUE); 1072} 1073 1074static int 1075zio_write_bp_init(zio_t *zio) 1076{ 1077 spa_t *spa = zio->io_spa; 1078 zio_prop_t *zp = &zio->io_prop; 1079 enum zio_compress compress = zp->zp_compress; 1080 blkptr_t *bp = zio->io_bp; 1081 uint64_t lsize = zio->io_size; 1082 uint64_t psize = lsize; 1083 int pass = 1; 1084 1085 /* 1086 * If our children haven't all reached the ready stage, 1087 * wait for them and then repeat this pipeline stage. 1088 */ 1089 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 1090 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 1091 return (ZIO_PIPELINE_STOP); 1092 1093 if (!IO_IS_ALLOCATING(zio)) 1094 return (ZIO_PIPELINE_CONTINUE); 1095 1096 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1097 1098 if (zio->io_bp_override) { 1099 ASSERT(bp->blk_birth != zio->io_txg); 1100 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1101 1102 *bp = *zio->io_bp_override; 1103 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1104 1105 if (BP_IS_EMBEDDED(bp)) 1106 return (ZIO_PIPELINE_CONTINUE); 1107 1108 /* 1109 * If we've been overridden and nopwrite is set then 1110 * set the flag accordingly to indicate that a nopwrite 1111 * has already occurred. 1112 */ 1113 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1114 ASSERT(!zp->zp_dedup); 1115 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1116 return (ZIO_PIPELINE_CONTINUE); 1117 } 1118 1119 ASSERT(!zp->zp_nopwrite); 1120 1121 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1122 return (ZIO_PIPELINE_CONTINUE); 1123 1124 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || 1125 zp->zp_dedup_verify); 1126 1127 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1128 BP_SET_DEDUP(bp, 1); 1129 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1130 return (ZIO_PIPELINE_CONTINUE); 1131 } 1132 zio->io_bp_override = NULL; 1133 BP_ZERO(bp); 1134 } 1135 1136 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1137 /* 1138 * We're rewriting an existing block, which means we're 1139 * working on behalf of spa_sync(). For spa_sync() to 1140 * converge, it must eventually be the case that we don't 1141 * have to allocate new blocks. But compression changes 1142 * the blocksize, which forces a reallocate, and makes 1143 * convergence take longer. Therefore, after the first 1144 * few passes, stop compressing to ensure convergence. 1145 */ 1146 pass = spa_sync_pass(spa); 1147 1148 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1149 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1150 ASSERT(!BP_GET_DEDUP(bp)); 1151 1152 if (pass >= zfs_sync_pass_dont_compress) 1153 compress = ZIO_COMPRESS_OFF; 1154 1155 /* Make sure someone doesn't change their mind on overwrites */ 1156 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1157 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1158 } 1159 1160 if (compress != ZIO_COMPRESS_OFF) { 1161 void *cbuf = zio_buf_alloc(lsize); 1162 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1163 if (psize == 0 || psize == lsize) { 1164 compress = ZIO_COMPRESS_OFF; 1165 zio_buf_free(cbuf, lsize); 1166 } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 1167 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1168 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1169 encode_embedded_bp_compressed(bp, 1170 cbuf, compress, lsize, psize); 1171 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1172 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1173 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1174 zio_buf_free(cbuf, lsize); 1175 bp->blk_birth = zio->io_txg; 1176 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1177 ASSERT(spa_feature_is_active(spa, 1178 SPA_FEATURE_EMBEDDED_DATA)); 1179 return (ZIO_PIPELINE_CONTINUE); 1180 } else { 1181 /* 1182 * Round up compressed size to MINBLOCKSIZE and 1183 * zero the tail. 1184 */ 1185 size_t rounded = 1186 P2ROUNDUP(psize, (size_t)SPA_MINBLOCKSIZE); 1187 if (rounded > psize) { 1188 bzero((char *)cbuf + psize, rounded - psize); 1189 psize = rounded; 1190 } 1191 if (psize == lsize) { 1192 compress = ZIO_COMPRESS_OFF; 1193 zio_buf_free(cbuf, lsize); 1194 } else { 1195 zio_push_transform(zio, cbuf, 1196 psize, lsize, NULL); 1197 } 1198 } 1199 } 1200 1201 /* 1202 * The final pass of spa_sync() must be all rewrites, but the first 1203 * few passes offer a trade-off: allocating blocks defers convergence, 1204 * but newly allocated blocks are sequential, so they can be written 1205 * to disk faster. Therefore, we allow the first few passes of 1206 * spa_sync() to allocate new blocks, but force rewrites after that. 1207 * There should only be a handful of blocks after pass 1 in any case. 1208 */ 1209 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1210 BP_GET_PSIZE(bp) == psize && 1211 pass >= zfs_sync_pass_rewrite) { 1212 ASSERT(psize != 0); 1213 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1214 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1215 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1216 } else { 1217 BP_ZERO(bp); 1218 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1219 } 1220 1221 if (psize == 0) { 1222 if (zio->io_bp_orig.blk_birth != 0 && 1223 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1224 BP_SET_LSIZE(bp, lsize); 1225 BP_SET_TYPE(bp, zp->zp_type); 1226 BP_SET_LEVEL(bp, zp->zp_level); 1227 BP_SET_BIRTH(bp, zio->io_txg, 0); 1228 } 1229 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1230 } else { 1231 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1232 BP_SET_LSIZE(bp, lsize); 1233 BP_SET_TYPE(bp, zp->zp_type); 1234 BP_SET_LEVEL(bp, zp->zp_level); 1235 BP_SET_PSIZE(bp, psize); 1236 BP_SET_COMPRESS(bp, compress); 1237 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1238 BP_SET_DEDUP(bp, zp->zp_dedup); 1239 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1240 if (zp->zp_dedup) { 1241 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1242 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1243 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1244 } 1245 if (zp->zp_nopwrite) { 1246 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1247 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1248 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1249 } 1250 } 1251 1252 return (ZIO_PIPELINE_CONTINUE); 1253} 1254 1255static int 1256zio_free_bp_init(zio_t *zio) 1257{ 1258 blkptr_t *bp = zio->io_bp; 1259 1260 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1261 if (BP_GET_DEDUP(bp)) 1262 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1263 } 1264 1265 return (ZIO_PIPELINE_CONTINUE); 1266} 1267 1268/* 1269 * ========================================================================== 1270 * Execute the I/O pipeline 1271 * ========================================================================== 1272 */ 1273 1274static void 1275zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1276{ 1277 spa_t *spa = zio->io_spa; 1278 zio_type_t t = zio->io_type; 1279 int flags = (cutinline ? TQ_FRONT : 0); 1280 1281 ASSERT(q == ZIO_TASKQ_ISSUE || q == ZIO_TASKQ_INTERRUPT); 1282 1283 /* 1284 * If we're a config writer or a probe, the normal issue and 1285 * interrupt threads may all be blocked waiting for the config lock. 1286 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1287 */ 1288 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1289 t = ZIO_TYPE_NULL; 1290 1291 /* 1292 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1293 */ 1294 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1295 t = ZIO_TYPE_NULL; 1296 1297 /* 1298 * If this is a high priority I/O, then use the high priority taskq if 1299 * available. 1300 */ 1301 if (zio->io_priority == ZIO_PRIORITY_NOW && 1302 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1303 q++; 1304 1305 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1306 1307 /* 1308 * NB: We are assuming that the zio can only be dispatched 1309 * to a single taskq at a time. It would be a grievous error 1310 * to dispatch the zio to another taskq at the same time. 1311 */ 1312#if defined(illumos) || !defined(_KERNEL) 1313 ASSERT(zio->io_tqent.tqent_next == NULL); 1314#else 1315 ASSERT(zio->io_tqent.tqent_task.ta_pending == 0); 1316#endif 1317 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1318 flags, &zio->io_tqent); 1319} 1320 1321static boolean_t 1322zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1323{ 1324 kthread_t *executor = zio->io_executor; 1325 spa_t *spa = zio->io_spa; 1326 1327 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1328 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1329 uint_t i; 1330 for (i = 0; i < tqs->stqs_count; i++) { 1331 if (taskq_member(tqs->stqs_taskq[i], executor)) 1332 return (B_TRUE); 1333 } 1334 } 1335 1336 return (B_FALSE); 1337} 1338 1339static int 1340zio_issue_async(zio_t *zio) 1341{ 1342 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1343 1344 return (ZIO_PIPELINE_STOP); 1345} 1346 1347void 1348zio_interrupt(zio_t *zio) 1349{ 1350 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1351} 1352 1353/* 1354 * Execute the I/O pipeline until one of the following occurs: 1355 * 1356 * (1) the I/O completes 1357 * (2) the pipeline stalls waiting for dependent child I/Os 1358 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1359 * (4) the I/O is delegated by vdev-level caching or aggregation 1360 * (5) the I/O is deferred due to vdev-level queueing 1361 * (6) the I/O is handed off to another thread. 1362 * 1363 * In all cases, the pipeline stops whenever there's no CPU work; it never 1364 * burns a thread in cv_wait(). 1365 * 1366 * There's no locking on io_stage because there's no legitimate way 1367 * for multiple threads to be attempting to process the same I/O. 1368 */ 1369static zio_pipe_stage_t *zio_pipeline[]; 1370 1371void 1372zio_execute(zio_t *zio) 1373{ 1374 zio->io_executor = curthread; 1375 1376 while (zio->io_stage < ZIO_STAGE_DONE) { 1377 enum zio_stage pipeline = zio->io_pipeline; 1378 enum zio_stage stage = zio->io_stage; 1379 int rv; 1380 1381 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1382 ASSERT(ISP2(stage)); 1383 ASSERT(zio->io_stall == NULL); 1384 1385 do { 1386 stage <<= 1; 1387 } while ((stage & pipeline) == 0); 1388 1389 ASSERT(stage <= ZIO_STAGE_DONE); 1390 1391 /* 1392 * If we are in interrupt context and this pipeline stage 1393 * will grab a config lock that is held across I/O, 1394 * or may wait for an I/O that needs an interrupt thread 1395 * to complete, issue async to avoid deadlock. 1396 * 1397 * For VDEV_IO_START, we cut in line so that the io will 1398 * be sent to disk promptly. 1399 */ 1400 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1401 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1402 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1403 zio_requeue_io_start_cut_in_line : B_FALSE; 1404 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1405 return; 1406 } 1407 1408 zio->io_stage = stage; 1409 rv = zio_pipeline[highbit64(stage) - 1](zio); 1410 1411 if (rv == ZIO_PIPELINE_STOP) 1412 return; 1413 1414 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1415 } 1416} 1417 1418/* 1419 * ========================================================================== 1420 * Initiate I/O, either sync or async 1421 * ========================================================================== 1422 */ 1423int 1424zio_wait(zio_t *zio) 1425{ 1426 int error; 1427 1428 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1429 ASSERT(zio->io_executor == NULL); 1430 1431 zio->io_waiter = curthread; 1432 1433 zio_execute(zio); 1434 1435 mutex_enter(&zio->io_lock); 1436 while (zio->io_executor != NULL) 1437 cv_wait(&zio->io_cv, &zio->io_lock); 1438 mutex_exit(&zio->io_lock); 1439 1440 error = zio->io_error; 1441 zio_destroy(zio); 1442 1443 return (error); 1444} 1445 1446void 1447zio_nowait(zio_t *zio) 1448{ 1449 ASSERT(zio->io_executor == NULL); 1450 1451 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1452 zio_unique_parent(zio) == NULL) { 1453 /* 1454 * This is a logical async I/O with no parent to wait for it. 1455 * We add it to the spa_async_root_zio "Godfather" I/O which 1456 * will ensure they complete prior to unloading the pool. 1457 */ 1458 spa_t *spa = zio->io_spa; 1459
| 1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27#include <sys/zfs_context.h> 28#include <sys/fm/fs/zfs.h> 29#include <sys/spa.h> 30#include <sys/txg.h> 31#include <sys/spa_impl.h> 32#include <sys/vdev_impl.h> 33#include <sys/zio_impl.h> 34#include <sys/zio_compress.h> 35#include <sys/zio_checksum.h> 36#include <sys/dmu_objset.h> 37#include <sys/arc.h> 38#include <sys/ddt.h> 39#include <sys/trim_map.h> 40#include <sys/blkptr.h> 41#include <sys/zfeature.h> 42 43SYSCTL_DECL(_vfs_zfs); 44SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 45#if defined(__amd64__) 46static int zio_use_uma = 1; 47#else 48static int zio_use_uma = 0; 49#endif 50SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0, 51 "Use uma(9) for ZIO allocations"); 52static int zio_exclude_metadata = 0; 53SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0, 54 "Exclude metadata buffers from dumps as well"); 55 56zio_trim_stats_t zio_trim_stats = { 57 { "bytes", KSTAT_DATA_UINT64, 58 "Number of bytes successfully TRIMmed" }, 59 { "success", KSTAT_DATA_UINT64, 60 "Number of successful TRIM requests" }, 61 { "unsupported", KSTAT_DATA_UINT64, 62 "Number of TRIM requests that failed because TRIM is not supported" }, 63 { "failed", KSTAT_DATA_UINT64, 64 "Number of TRIM requests that failed for reasons other than not supported" }, 65}; 66 67static kstat_t *zio_trim_ksp; 68 69/* 70 * ========================================================================== 71 * I/O type descriptions 72 * ========================================================================== 73 */ 74const char *zio_type_name[ZIO_TYPES] = { 75 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 76 "zio_ioctl" 77}; 78 79/* 80 * ========================================================================== 81 * I/O kmem caches 82 * ========================================================================== 83 */ 84kmem_cache_t *zio_cache; 85kmem_cache_t *zio_link_cache; 86kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 87kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 88 89#ifdef _KERNEL 90extern vmem_t *zio_alloc_arena; 91#endif 92 93/* 94 * The following actions directly effect the spa's sync-to-convergence logic. 95 * The values below define the sync pass when we start performing the action. 96 * Care should be taken when changing these values as they directly impact 97 * spa_sync() performance. Tuning these values may introduce subtle performance 98 * pathologies and should only be done in the context of performance analysis. 99 * These tunables will eventually be removed and replaced with #defines once 100 * enough analysis has been done to determine optimal values. 101 * 102 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 103 * regular blocks are not deferred. 104 */ 105int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 106SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_deferred_free, CTLFLAG_RDTUN, 107 &zfs_sync_pass_deferred_free, 0, "defer frees starting in this pass"); 108int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 109SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_dont_compress, CTLFLAG_RDTUN, 110 &zfs_sync_pass_dont_compress, 0, "don't compress starting in this pass"); 111int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 112SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_rewrite, CTLFLAG_RDTUN, 113 &zfs_sync_pass_rewrite, 0, "rewrite new bps starting in this pass"); 114 115/* 116 * An allocating zio is one that either currently has the DVA allocate 117 * stage set or will have it later in its lifetime. 118 */ 119#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 120 121boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 122 123#ifdef ZFS_DEBUG 124int zio_buf_debug_limit = 16384; 125#else 126int zio_buf_debug_limit = 0; 127#endif 128 129void 130zio_init(void) 131{ 132 size_t c; 133 zio_cache = kmem_cache_create("zio_cache", 134 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 135 zio_link_cache = kmem_cache_create("zio_link_cache", 136 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 137 if (!zio_use_uma) 138 goto out; 139 140 /* 141 * For small buffers, we want a cache for each multiple of 142 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 143 * for each quarter-power of 2. For large buffers, we want 144 * a cache for each multiple of PAGESIZE. 145 */ 146 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 147 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 148 size_t p2 = size; 149 size_t align = 0; 150 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 151 152 while (p2 & (p2 - 1)) 153 p2 &= p2 - 1; 154 155#ifdef illumos 156#ifndef _KERNEL 157 /* 158 * If we are using watchpoints, put each buffer on its own page, 159 * to eliminate the performance overhead of trapping to the 160 * kernel when modifying a non-watched buffer that shares the 161 * page with a watched buffer. 162 */ 163 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 164 continue; 165#endif 166#endif /* illumos */ 167 if (size <= 4 * SPA_MINBLOCKSIZE) { 168 align = SPA_MINBLOCKSIZE; 169 } else if (IS_P2ALIGNED(size, PAGESIZE)) { 170 align = PAGESIZE; 171 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 172 align = p2 >> 2; 173 } 174 175 if (align != 0) { 176 char name[36]; 177 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 178 zio_buf_cache[c] = kmem_cache_create(name, size, 179 align, NULL, NULL, NULL, NULL, NULL, cflags); 180 181 /* 182 * Since zio_data bufs do not appear in crash dumps, we 183 * pass KMC_NOTOUCH so that no allocator metadata is 184 * stored with the buffers. 185 */ 186 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 187 zio_data_buf_cache[c] = kmem_cache_create(name, size, 188 align, NULL, NULL, NULL, NULL, NULL, 189 cflags | KMC_NOTOUCH | KMC_NODEBUG); 190 } 191 } 192 193 while (--c != 0) { 194 ASSERT(zio_buf_cache[c] != NULL); 195 if (zio_buf_cache[c - 1] == NULL) 196 zio_buf_cache[c - 1] = zio_buf_cache[c]; 197 198 ASSERT(zio_data_buf_cache[c] != NULL); 199 if (zio_data_buf_cache[c - 1] == NULL) 200 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 201 } 202out: 203 204 zio_inject_init(); 205 206 zio_trim_ksp = kstat_create("zfs", 0, "zio_trim", "misc", 207 KSTAT_TYPE_NAMED, 208 sizeof(zio_trim_stats) / sizeof(kstat_named_t), 209 KSTAT_FLAG_VIRTUAL); 210 211 if (zio_trim_ksp != NULL) { 212 zio_trim_ksp->ks_data = &zio_trim_stats; 213 kstat_install(zio_trim_ksp); 214 } 215} 216 217void 218zio_fini(void) 219{ 220 size_t c; 221 kmem_cache_t *last_cache = NULL; 222 kmem_cache_t *last_data_cache = NULL; 223 224 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 225 if (zio_buf_cache[c] != last_cache) { 226 last_cache = zio_buf_cache[c]; 227 kmem_cache_destroy(zio_buf_cache[c]); 228 } 229 zio_buf_cache[c] = NULL; 230 231 if (zio_data_buf_cache[c] != last_data_cache) { 232 last_data_cache = zio_data_buf_cache[c]; 233 kmem_cache_destroy(zio_data_buf_cache[c]); 234 } 235 zio_data_buf_cache[c] = NULL; 236 } 237 238 kmem_cache_destroy(zio_link_cache); 239 kmem_cache_destroy(zio_cache); 240 241 zio_inject_fini(); 242 243 if (zio_trim_ksp != NULL) { 244 kstat_delete(zio_trim_ksp); 245 zio_trim_ksp = NULL; 246 } 247} 248 249/* 250 * ========================================================================== 251 * Allocate and free I/O buffers 252 * ========================================================================== 253 */ 254 255/* 256 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 257 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 258 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 259 * excess / transient data in-core during a crashdump. 260 */ 261void * 262zio_buf_alloc(size_t size) 263{ 264 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 265 int flags = zio_exclude_metadata ? KM_NODEBUG : 0; 266 267 ASSERT3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 268 269 if (zio_use_uma) 270 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 271 else 272 return (kmem_alloc(size, KM_SLEEP|flags)); 273} 274 275/* 276 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 277 * crashdump if the kernel panics. This exists so that we will limit the amount 278 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 279 * of kernel heap dumped to disk when the kernel panics) 280 */ 281void * 282zio_data_buf_alloc(size_t size) 283{ 284 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 285 286 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 287 288 if (zio_use_uma) 289 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 290 else 291 return (kmem_alloc(size, KM_SLEEP | KM_NODEBUG)); 292} 293 294void 295zio_buf_free(void *buf, size_t size) 296{ 297 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 298 299 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 300 301 if (zio_use_uma) 302 kmem_cache_free(zio_buf_cache[c], buf); 303 else 304 kmem_free(buf, size); 305} 306 307void 308zio_data_buf_free(void *buf, size_t size) 309{ 310 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 311 312 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 313 314 if (zio_use_uma) 315 kmem_cache_free(zio_data_buf_cache[c], buf); 316 else 317 kmem_free(buf, size); 318} 319 320/* 321 * ========================================================================== 322 * Push and pop I/O transform buffers 323 * ========================================================================== 324 */ 325static void 326zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 327 zio_transform_func_t *transform) 328{ 329 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 330 331 zt->zt_orig_data = zio->io_data; 332 zt->zt_orig_size = zio->io_size; 333 zt->zt_bufsize = bufsize; 334 zt->zt_transform = transform; 335 336 zt->zt_next = zio->io_transform_stack; 337 zio->io_transform_stack = zt; 338 339 zio->io_data = data; 340 zio->io_size = size; 341} 342 343static void 344zio_pop_transforms(zio_t *zio) 345{ 346 zio_transform_t *zt; 347 348 while ((zt = zio->io_transform_stack) != NULL) { 349 if (zt->zt_transform != NULL) 350 zt->zt_transform(zio, 351 zt->zt_orig_data, zt->zt_orig_size); 352 353 if (zt->zt_bufsize != 0) 354 zio_buf_free(zio->io_data, zt->zt_bufsize); 355 356 zio->io_data = zt->zt_orig_data; 357 zio->io_size = zt->zt_orig_size; 358 zio->io_transform_stack = zt->zt_next; 359 360 kmem_free(zt, sizeof (zio_transform_t)); 361 } 362} 363 364/* 365 * ========================================================================== 366 * I/O transform callbacks for subblocks and decompression 367 * ========================================================================== 368 */ 369static void 370zio_subblock(zio_t *zio, void *data, uint64_t size) 371{ 372 ASSERT(zio->io_size > size); 373 374 if (zio->io_type == ZIO_TYPE_READ) 375 bcopy(zio->io_data, data, size); 376} 377 378static void 379zio_decompress(zio_t *zio, void *data, uint64_t size) 380{ 381 if (zio->io_error == 0 && 382 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 383 zio->io_data, data, zio->io_size, size) != 0) 384 zio->io_error = SET_ERROR(EIO); 385} 386 387/* 388 * ========================================================================== 389 * I/O parent/child relationships and pipeline interlocks 390 * ========================================================================== 391 */ 392/* 393 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 394 * continue calling these functions until they return NULL. 395 * Otherwise, the next caller will pick up the list walk in 396 * some indeterminate state. (Otherwise every caller would 397 * have to pass in a cookie to keep the state represented by 398 * io_walk_link, which gets annoying.) 399 */ 400zio_t * 401zio_walk_parents(zio_t *cio) 402{ 403 zio_link_t *zl = cio->io_walk_link; 404 list_t *pl = &cio->io_parent_list; 405 406 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 407 cio->io_walk_link = zl; 408 409 if (zl == NULL) 410 return (NULL); 411 412 ASSERT(zl->zl_child == cio); 413 return (zl->zl_parent); 414} 415 416zio_t * 417zio_walk_children(zio_t *pio) 418{ 419 zio_link_t *zl = pio->io_walk_link; 420 list_t *cl = &pio->io_child_list; 421 422 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 423 pio->io_walk_link = zl; 424 425 if (zl == NULL) 426 return (NULL); 427 428 ASSERT(zl->zl_parent == pio); 429 return (zl->zl_child); 430} 431 432zio_t * 433zio_unique_parent(zio_t *cio) 434{ 435 zio_t *pio = zio_walk_parents(cio); 436 437 VERIFY(zio_walk_parents(cio) == NULL); 438 return (pio); 439} 440 441void 442zio_add_child(zio_t *pio, zio_t *cio) 443{ 444 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 445 446 /* 447 * Logical I/Os can have logical, gang, or vdev children. 448 * Gang I/Os can have gang or vdev children. 449 * Vdev I/Os can only have vdev children. 450 * The following ASSERT captures all of these constraints. 451 */ 452 ASSERT(cio->io_child_type <= pio->io_child_type); 453 454 zl->zl_parent = pio; 455 zl->zl_child = cio; 456 457 mutex_enter(&cio->io_lock); 458 mutex_enter(&pio->io_lock); 459 460 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 461 462 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 463 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 464 465 list_insert_head(&pio->io_child_list, zl); 466 list_insert_head(&cio->io_parent_list, zl); 467 468 pio->io_child_count++; 469 cio->io_parent_count++; 470 471 mutex_exit(&pio->io_lock); 472 mutex_exit(&cio->io_lock); 473} 474 475static void 476zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 477{ 478 ASSERT(zl->zl_parent == pio); 479 ASSERT(zl->zl_child == cio); 480 481 mutex_enter(&cio->io_lock); 482 mutex_enter(&pio->io_lock); 483 484 list_remove(&pio->io_child_list, zl); 485 list_remove(&cio->io_parent_list, zl); 486 487 pio->io_child_count--; 488 cio->io_parent_count--; 489 490 mutex_exit(&pio->io_lock); 491 mutex_exit(&cio->io_lock); 492 493 kmem_cache_free(zio_link_cache, zl); 494} 495 496static boolean_t 497zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 498{ 499 uint64_t *countp = &zio->io_children[child][wait]; 500 boolean_t waiting = B_FALSE; 501 502 mutex_enter(&zio->io_lock); 503 ASSERT(zio->io_stall == NULL); 504 if (*countp != 0) { 505 zio->io_stage >>= 1; 506 zio->io_stall = countp; 507 waiting = B_TRUE; 508 } 509 mutex_exit(&zio->io_lock); 510 511 return (waiting); 512} 513 514static void 515zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 516{ 517 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 518 int *errorp = &pio->io_child_error[zio->io_child_type]; 519 520 mutex_enter(&pio->io_lock); 521 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 522 *errorp = zio_worst_error(*errorp, zio->io_error); 523 pio->io_reexecute |= zio->io_reexecute; 524 ASSERT3U(*countp, >, 0); 525 526 (*countp)--; 527 528 if (*countp == 0 && pio->io_stall == countp) { 529 pio->io_stall = NULL; 530 mutex_exit(&pio->io_lock); 531 zio_execute(pio); 532 } else { 533 mutex_exit(&pio->io_lock); 534 } 535} 536 537static void 538zio_inherit_child_errors(zio_t *zio, enum zio_child c) 539{ 540 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 541 zio->io_error = zio->io_child_error[c]; 542} 543 544/* 545 * ========================================================================== 546 * Create the various types of I/O (read, write, free, etc) 547 * ========================================================================== 548 */ 549static zio_t * 550zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 551 void *data, uint64_t size, zio_done_func_t *done, void *private, 552 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 553 vdev_t *vd, uint64_t offset, const zbookmark_phys_t *zb, 554 enum zio_stage stage, enum zio_stage pipeline) 555{ 556 zio_t *zio; 557 558 ASSERT3U(type == ZIO_TYPE_FREE || size, <=, SPA_MAXBLOCKSIZE); 559 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 560 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 561 562 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 563 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 564 ASSERT(vd || stage == ZIO_STAGE_OPEN); 565 566 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 567 bzero(zio, sizeof (zio_t)); 568 569 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 570 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 571 572 list_create(&zio->io_parent_list, sizeof (zio_link_t), 573 offsetof(zio_link_t, zl_parent_node)); 574 list_create(&zio->io_child_list, sizeof (zio_link_t), 575 offsetof(zio_link_t, zl_child_node)); 576 577 if (vd != NULL) 578 zio->io_child_type = ZIO_CHILD_VDEV; 579 else if (flags & ZIO_FLAG_GANG_CHILD) 580 zio->io_child_type = ZIO_CHILD_GANG; 581 else if (flags & ZIO_FLAG_DDT_CHILD) 582 zio->io_child_type = ZIO_CHILD_DDT; 583 else 584 zio->io_child_type = ZIO_CHILD_LOGICAL; 585 586 if (bp != NULL) { 587 zio->io_bp = (blkptr_t *)bp; 588 zio->io_bp_copy = *bp; 589 zio->io_bp_orig = *bp; 590 if (type != ZIO_TYPE_WRITE || 591 zio->io_child_type == ZIO_CHILD_DDT) 592 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 593 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 594 zio->io_logical = zio; 595 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 596 pipeline |= ZIO_GANG_STAGES; 597 } 598 599 zio->io_spa = spa; 600 zio->io_txg = txg; 601 zio->io_done = done; 602 zio->io_private = private; 603 zio->io_type = type; 604 zio->io_priority = priority; 605 zio->io_vd = vd; 606 zio->io_offset = offset; 607 zio->io_orig_data = zio->io_data = data; 608 zio->io_orig_size = zio->io_size = size; 609 zio->io_orig_flags = zio->io_flags = flags; 610 zio->io_orig_stage = zio->io_stage = stage; 611 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 612 613 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 614 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 615 616 if (zb != NULL) 617 zio->io_bookmark = *zb; 618 619 if (pio != NULL) { 620 if (zio->io_logical == NULL) 621 zio->io_logical = pio->io_logical; 622 if (zio->io_child_type == ZIO_CHILD_GANG) 623 zio->io_gang_leader = pio->io_gang_leader; 624 zio_add_child(pio, zio); 625 } 626 627 return (zio); 628} 629 630static void 631zio_destroy(zio_t *zio) 632{ 633 list_destroy(&zio->io_parent_list); 634 list_destroy(&zio->io_child_list); 635 mutex_destroy(&zio->io_lock); 636 cv_destroy(&zio->io_cv); 637 kmem_cache_free(zio_cache, zio); 638} 639 640zio_t * 641zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 642 void *private, enum zio_flag flags) 643{ 644 zio_t *zio; 645 646 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 647 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 648 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 649 650 return (zio); 651} 652 653zio_t * 654zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 655{ 656 return (zio_null(NULL, spa, NULL, done, private, flags)); 657} 658 659zio_t * 660zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 661 void *data, uint64_t size, zio_done_func_t *done, void *private, 662 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 663{ 664 zio_t *zio; 665 666 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 667 data, size, done, private, 668 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 669 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 670 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 671 672 return (zio); 673} 674 675zio_t * 676zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 677 void *data, uint64_t size, const zio_prop_t *zp, 678 zio_done_func_t *ready, zio_done_func_t *physdone, zio_done_func_t *done, 679 void *private, 680 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 681{ 682 zio_t *zio; 683 684 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 685 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 686 zp->zp_compress >= ZIO_COMPRESS_OFF && 687 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 688 DMU_OT_IS_VALID(zp->zp_type) && 689 zp->zp_level < 32 && 690 zp->zp_copies > 0 && 691 zp->zp_copies <= spa_max_replication(spa)); 692 693 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 694 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 695 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 696 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 697 698 zio->io_ready = ready; 699 zio->io_physdone = physdone; 700 zio->io_prop = *zp; 701 702 /* 703 * Data can be NULL if we are going to call zio_write_override() to 704 * provide the already-allocated BP. But we may need the data to 705 * verify a dedup hit (if requested). In this case, don't try to 706 * dedup (just take the already-allocated BP verbatim). 707 */ 708 if (data == NULL && zio->io_prop.zp_dedup_verify) { 709 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 710 } 711 712 return (zio); 713} 714 715zio_t * 716zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 717 uint64_t size, zio_done_func_t *done, void *private, 718 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 719{ 720 zio_t *zio; 721 722 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 723 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 724 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 725 726 return (zio); 727} 728 729void 730zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 731{ 732 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 733 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 734 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 735 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 736 737 /* 738 * We must reset the io_prop to match the values that existed 739 * when the bp was first written by dmu_sync() keeping in mind 740 * that nopwrite and dedup are mutually exclusive. 741 */ 742 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 743 zio->io_prop.zp_nopwrite = nopwrite; 744 zio->io_prop.zp_copies = copies; 745 zio->io_bp_override = bp; 746} 747 748void 749zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 750{ 751 752 /* 753 * The check for EMBEDDED is a performance optimization. We 754 * process the free here (by ignoring it) rather than 755 * putting it on the list and then processing it in zio_free_sync(). 756 */ 757 if (BP_IS_EMBEDDED(bp)) 758 return; 759 metaslab_check_free(spa, bp); 760 761 /* 762 * Frees that are for the currently-syncing txg, are not going to be 763 * deferred, and which will not need to do a read (i.e. not GANG or 764 * DEDUP), can be processed immediately. Otherwise, put them on the 765 * in-memory list for later processing. 766 */ 767 if (zfs_trim_enabled || BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 768 txg != spa->spa_syncing_txg || 769 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 770 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 771 } else { 772 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 773 BP_GET_PSIZE(bp), 0))); 774 } 775} 776 777zio_t * 778zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 779 uint64_t size, enum zio_flag flags) 780{ 781 zio_t *zio; 782 enum zio_stage stage = ZIO_FREE_PIPELINE; 783 784 ASSERT(!BP_IS_HOLE(bp)); 785 ASSERT(spa_syncing_txg(spa) == txg); 786 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 787 788 if (BP_IS_EMBEDDED(bp)) 789 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 790 791 metaslab_check_free(spa, bp); 792 arc_freed(spa, bp); 793 794 if (zfs_trim_enabled) 795 stage |= ZIO_STAGE_ISSUE_ASYNC | ZIO_STAGE_VDEV_IO_START | 796 ZIO_STAGE_VDEV_IO_ASSESS; 797 /* 798 * GANG and DEDUP blocks can induce a read (for the gang block header, 799 * or the DDT), so issue them asynchronously so that this thread is 800 * not tied up. 801 */ 802 else if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 803 stage |= ZIO_STAGE_ISSUE_ASYNC; 804 805 flags |= ZIO_FLAG_DONT_QUEUE; 806 807 zio = zio_create(pio, spa, txg, bp, NULL, size, 808 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, flags, 809 NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 810 811 return (zio); 812} 813 814zio_t * 815zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 816 zio_done_func_t *done, void *private, enum zio_flag flags) 817{ 818 zio_t *zio; 819 820 dprintf_bp(bp, "claiming in txg %llu", txg); 821 822 if (BP_IS_EMBEDDED(bp)) 823 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 824 825 /* 826 * A claim is an allocation of a specific block. Claims are needed 827 * to support immediate writes in the intent log. The issue is that 828 * immediate writes contain committed data, but in a txg that was 829 * *not* committed. Upon opening the pool after an unclean shutdown, 830 * the intent log claims all blocks that contain immediate write data 831 * so that the SPA knows they're in use. 832 * 833 * All claims *must* be resolved in the first txg -- before the SPA 834 * starts allocating blocks -- so that nothing is allocated twice. 835 * If txg == 0 we just verify that the block is claimable. 836 */ 837 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 838 ASSERT(txg == spa_first_txg(spa) || txg == 0); 839 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 840 841 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 842 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 843 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 844 845 return (zio); 846} 847 848zio_t * 849zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, uint64_t offset, 850 uint64_t size, zio_done_func_t *done, void *private, 851 zio_priority_t priority, enum zio_flag flags) 852{ 853 zio_t *zio; 854 int c; 855 856 if (vd->vdev_children == 0) { 857 zio = zio_create(pio, spa, 0, NULL, NULL, size, done, private, 858 ZIO_TYPE_IOCTL, priority, flags, vd, offset, NULL, 859 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 860 861 zio->io_cmd = cmd; 862 } else { 863 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 864 865 for (c = 0; c < vd->vdev_children; c++) 866 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 867 offset, size, done, private, priority, flags)); 868 } 869 870 return (zio); 871} 872 873zio_t * 874zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 875 void *data, int checksum, zio_done_func_t *done, void *private, 876 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 877{ 878 zio_t *zio; 879 880 ASSERT(vd->vdev_children == 0); 881 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 882 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 883 ASSERT3U(offset + size, <=, vd->vdev_psize); 884 885 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 886 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, 887 NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 888 889 zio->io_prop.zp_checksum = checksum; 890 891 return (zio); 892} 893 894zio_t * 895zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 896 void *data, int checksum, zio_done_func_t *done, void *private, 897 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 898{ 899 zio_t *zio; 900 901 ASSERT(vd->vdev_children == 0); 902 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 903 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 904 ASSERT3U(offset + size, <=, vd->vdev_psize); 905 906 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 907 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, 908 NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 909 910 zio->io_prop.zp_checksum = checksum; 911 912 if (zio_checksum_table[checksum].ci_eck) { 913 /* 914 * zec checksums are necessarily destructive -- they modify 915 * the end of the write buffer to hold the verifier/checksum. 916 * Therefore, we must make a local copy in case the data is 917 * being written to multiple places in parallel. 918 */ 919 void *wbuf = zio_buf_alloc(size); 920 bcopy(data, wbuf, size); 921 zio_push_transform(zio, wbuf, size, size, NULL); 922 } 923 924 return (zio); 925} 926 927/* 928 * Create a child I/O to do some work for us. 929 */ 930zio_t * 931zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 932 void *data, uint64_t size, int type, zio_priority_t priority, 933 enum zio_flag flags, zio_done_func_t *done, void *private) 934{ 935 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 936 zio_t *zio; 937 938 ASSERT(vd->vdev_parent == 939 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 940 941 if (type == ZIO_TYPE_READ && bp != NULL) { 942 /* 943 * If we have the bp, then the child should perform the 944 * checksum and the parent need not. This pushes error 945 * detection as close to the leaves as possible and 946 * eliminates redundant checksums in the interior nodes. 947 */ 948 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 949 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 950 } 951 952 /* Not all IO types require vdev io done stage e.g. free */ 953 if (!(pio->io_pipeline & ZIO_STAGE_VDEV_IO_DONE)) 954 pipeline &= ~ZIO_STAGE_VDEV_IO_DONE; 955 956 if (vd->vdev_children == 0) 957 offset += VDEV_LABEL_START_SIZE; 958 959 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 960 961 /* 962 * If we've decided to do a repair, the write is not speculative -- 963 * even if the original read was. 964 */ 965 if (flags & ZIO_FLAG_IO_REPAIR) 966 flags &= ~ZIO_FLAG_SPECULATIVE; 967 968 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 969 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 970 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 971 972 zio->io_physdone = pio->io_physdone; 973 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 974 zio->io_logical->io_phys_children++; 975 976 return (zio); 977} 978 979zio_t * 980zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 981 int type, zio_priority_t priority, enum zio_flag flags, 982 zio_done_func_t *done, void *private) 983{ 984 zio_t *zio; 985 986 ASSERT(vd->vdev_ops->vdev_op_leaf); 987 988 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 989 data, size, done, private, type, priority, 990 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 991 vd, offset, NULL, 992 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 993 994 return (zio); 995} 996 997void 998zio_flush(zio_t *zio, vdev_t *vd) 999{ 1000 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 0, 0, 1001 NULL, NULL, ZIO_PRIORITY_NOW, 1002 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1003} 1004 1005zio_t * 1006zio_trim(zio_t *zio, spa_t *spa, vdev_t *vd, uint64_t offset, uint64_t size) 1007{ 1008 1009 ASSERT(vd->vdev_ops->vdev_op_leaf); 1010 1011 return (zio_create(zio, spa, 0, NULL, NULL, size, NULL, NULL, 1012 ZIO_TYPE_FREE, ZIO_PRIORITY_TRIM, ZIO_FLAG_DONT_AGGREGATE | 1013 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, 1014 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PHYS_PIPELINE)); 1015} 1016 1017void 1018zio_shrink(zio_t *zio, uint64_t size) 1019{ 1020 ASSERT(zio->io_executor == NULL); 1021 ASSERT(zio->io_orig_size == zio->io_size); 1022 ASSERT(size <= zio->io_size); 1023 1024 /* 1025 * We don't shrink for raidz because of problems with the 1026 * reconstruction when reading back less than the block size. 1027 * Note, BP_IS_RAIDZ() assumes no compression. 1028 */ 1029 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1030 if (!BP_IS_RAIDZ(zio->io_bp)) 1031 zio->io_orig_size = zio->io_size = size; 1032} 1033 1034/* 1035 * ========================================================================== 1036 * Prepare to read and write logical blocks 1037 * ========================================================================== 1038 */ 1039 1040static int 1041zio_read_bp_init(zio_t *zio) 1042{ 1043 blkptr_t *bp = zio->io_bp; 1044 1045 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1046 zio->io_child_type == ZIO_CHILD_LOGICAL && 1047 !(zio->io_flags & ZIO_FLAG_RAW)) { 1048 uint64_t psize = 1049 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1050 void *cbuf = zio_buf_alloc(psize); 1051 1052 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 1053 } 1054 1055 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1056 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1057 decode_embedded_bp_compressed(bp, zio->io_data); 1058 } else { 1059 ASSERT(!BP_IS_EMBEDDED(bp)); 1060 } 1061 1062 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1063 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1064 1065 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1066 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1067 1068 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1069 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1070 1071 return (ZIO_PIPELINE_CONTINUE); 1072} 1073 1074static int 1075zio_write_bp_init(zio_t *zio) 1076{ 1077 spa_t *spa = zio->io_spa; 1078 zio_prop_t *zp = &zio->io_prop; 1079 enum zio_compress compress = zp->zp_compress; 1080 blkptr_t *bp = zio->io_bp; 1081 uint64_t lsize = zio->io_size; 1082 uint64_t psize = lsize; 1083 int pass = 1; 1084 1085 /* 1086 * If our children haven't all reached the ready stage, 1087 * wait for them and then repeat this pipeline stage. 1088 */ 1089 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 1090 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 1091 return (ZIO_PIPELINE_STOP); 1092 1093 if (!IO_IS_ALLOCATING(zio)) 1094 return (ZIO_PIPELINE_CONTINUE); 1095 1096 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1097 1098 if (zio->io_bp_override) { 1099 ASSERT(bp->blk_birth != zio->io_txg); 1100 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1101 1102 *bp = *zio->io_bp_override; 1103 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1104 1105 if (BP_IS_EMBEDDED(bp)) 1106 return (ZIO_PIPELINE_CONTINUE); 1107 1108 /* 1109 * If we've been overridden and nopwrite is set then 1110 * set the flag accordingly to indicate that a nopwrite 1111 * has already occurred. 1112 */ 1113 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1114 ASSERT(!zp->zp_dedup); 1115 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1116 return (ZIO_PIPELINE_CONTINUE); 1117 } 1118 1119 ASSERT(!zp->zp_nopwrite); 1120 1121 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1122 return (ZIO_PIPELINE_CONTINUE); 1123 1124 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || 1125 zp->zp_dedup_verify); 1126 1127 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1128 BP_SET_DEDUP(bp, 1); 1129 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1130 return (ZIO_PIPELINE_CONTINUE); 1131 } 1132 zio->io_bp_override = NULL; 1133 BP_ZERO(bp); 1134 } 1135 1136 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1137 /* 1138 * We're rewriting an existing block, which means we're 1139 * working on behalf of spa_sync(). For spa_sync() to 1140 * converge, it must eventually be the case that we don't 1141 * have to allocate new blocks. But compression changes 1142 * the blocksize, which forces a reallocate, and makes 1143 * convergence take longer. Therefore, after the first 1144 * few passes, stop compressing to ensure convergence. 1145 */ 1146 pass = spa_sync_pass(spa); 1147 1148 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1149 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1150 ASSERT(!BP_GET_DEDUP(bp)); 1151 1152 if (pass >= zfs_sync_pass_dont_compress) 1153 compress = ZIO_COMPRESS_OFF; 1154 1155 /* Make sure someone doesn't change their mind on overwrites */ 1156 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1157 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1158 } 1159 1160 if (compress != ZIO_COMPRESS_OFF) { 1161 void *cbuf = zio_buf_alloc(lsize); 1162 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1163 if (psize == 0 || psize == lsize) { 1164 compress = ZIO_COMPRESS_OFF; 1165 zio_buf_free(cbuf, lsize); 1166 } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 1167 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1168 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1169 encode_embedded_bp_compressed(bp, 1170 cbuf, compress, lsize, psize); 1171 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1172 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1173 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1174 zio_buf_free(cbuf, lsize); 1175 bp->blk_birth = zio->io_txg; 1176 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1177 ASSERT(spa_feature_is_active(spa, 1178 SPA_FEATURE_EMBEDDED_DATA)); 1179 return (ZIO_PIPELINE_CONTINUE); 1180 } else { 1181 /* 1182 * Round up compressed size to MINBLOCKSIZE and 1183 * zero the tail. 1184 */ 1185 size_t rounded = 1186 P2ROUNDUP(psize, (size_t)SPA_MINBLOCKSIZE); 1187 if (rounded > psize) { 1188 bzero((char *)cbuf + psize, rounded - psize); 1189 psize = rounded; 1190 } 1191 if (psize == lsize) { 1192 compress = ZIO_COMPRESS_OFF; 1193 zio_buf_free(cbuf, lsize); 1194 } else { 1195 zio_push_transform(zio, cbuf, 1196 psize, lsize, NULL); 1197 } 1198 } 1199 } 1200 1201 /* 1202 * The final pass of spa_sync() must be all rewrites, but the first 1203 * few passes offer a trade-off: allocating blocks defers convergence, 1204 * but newly allocated blocks are sequential, so they can be written 1205 * to disk faster. Therefore, we allow the first few passes of 1206 * spa_sync() to allocate new blocks, but force rewrites after that. 1207 * There should only be a handful of blocks after pass 1 in any case. 1208 */ 1209 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1210 BP_GET_PSIZE(bp) == psize && 1211 pass >= zfs_sync_pass_rewrite) { 1212 ASSERT(psize != 0); 1213 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1214 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1215 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1216 } else { 1217 BP_ZERO(bp); 1218 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1219 } 1220 1221 if (psize == 0) { 1222 if (zio->io_bp_orig.blk_birth != 0 && 1223 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1224 BP_SET_LSIZE(bp, lsize); 1225 BP_SET_TYPE(bp, zp->zp_type); 1226 BP_SET_LEVEL(bp, zp->zp_level); 1227 BP_SET_BIRTH(bp, zio->io_txg, 0); 1228 } 1229 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1230 } else { 1231 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1232 BP_SET_LSIZE(bp, lsize); 1233 BP_SET_TYPE(bp, zp->zp_type); 1234 BP_SET_LEVEL(bp, zp->zp_level); 1235 BP_SET_PSIZE(bp, psize); 1236 BP_SET_COMPRESS(bp, compress); 1237 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1238 BP_SET_DEDUP(bp, zp->zp_dedup); 1239 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1240 if (zp->zp_dedup) { 1241 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1242 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1243 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1244 } 1245 if (zp->zp_nopwrite) { 1246 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1247 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1248 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1249 } 1250 } 1251 1252 return (ZIO_PIPELINE_CONTINUE); 1253} 1254 1255static int 1256zio_free_bp_init(zio_t *zio) 1257{ 1258 blkptr_t *bp = zio->io_bp; 1259 1260 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1261 if (BP_GET_DEDUP(bp)) 1262 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1263 } 1264 1265 return (ZIO_PIPELINE_CONTINUE); 1266} 1267 1268/* 1269 * ========================================================================== 1270 * Execute the I/O pipeline 1271 * ========================================================================== 1272 */ 1273 1274static void 1275zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1276{ 1277 spa_t *spa = zio->io_spa; 1278 zio_type_t t = zio->io_type; 1279 int flags = (cutinline ? TQ_FRONT : 0); 1280 1281 ASSERT(q == ZIO_TASKQ_ISSUE || q == ZIO_TASKQ_INTERRUPT); 1282 1283 /* 1284 * If we're a config writer or a probe, the normal issue and 1285 * interrupt threads may all be blocked waiting for the config lock. 1286 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1287 */ 1288 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1289 t = ZIO_TYPE_NULL; 1290 1291 /* 1292 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1293 */ 1294 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1295 t = ZIO_TYPE_NULL; 1296 1297 /* 1298 * If this is a high priority I/O, then use the high priority taskq if 1299 * available. 1300 */ 1301 if (zio->io_priority == ZIO_PRIORITY_NOW && 1302 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1303 q++; 1304 1305 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1306 1307 /* 1308 * NB: We are assuming that the zio can only be dispatched 1309 * to a single taskq at a time. It would be a grievous error 1310 * to dispatch the zio to another taskq at the same time. 1311 */ 1312#if defined(illumos) || !defined(_KERNEL) 1313 ASSERT(zio->io_tqent.tqent_next == NULL); 1314#else 1315 ASSERT(zio->io_tqent.tqent_task.ta_pending == 0); 1316#endif 1317 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1318 flags, &zio->io_tqent); 1319} 1320 1321static boolean_t 1322zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1323{ 1324 kthread_t *executor = zio->io_executor; 1325 spa_t *spa = zio->io_spa; 1326 1327 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1328 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1329 uint_t i; 1330 for (i = 0; i < tqs->stqs_count; i++) { 1331 if (taskq_member(tqs->stqs_taskq[i], executor)) 1332 return (B_TRUE); 1333 } 1334 } 1335 1336 return (B_FALSE); 1337} 1338 1339static int 1340zio_issue_async(zio_t *zio) 1341{ 1342 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1343 1344 return (ZIO_PIPELINE_STOP); 1345} 1346 1347void 1348zio_interrupt(zio_t *zio) 1349{ 1350 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1351} 1352 1353/* 1354 * Execute the I/O pipeline until one of the following occurs: 1355 * 1356 * (1) the I/O completes 1357 * (2) the pipeline stalls waiting for dependent child I/Os 1358 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1359 * (4) the I/O is delegated by vdev-level caching or aggregation 1360 * (5) the I/O is deferred due to vdev-level queueing 1361 * (6) the I/O is handed off to another thread. 1362 * 1363 * In all cases, the pipeline stops whenever there's no CPU work; it never 1364 * burns a thread in cv_wait(). 1365 * 1366 * There's no locking on io_stage because there's no legitimate way 1367 * for multiple threads to be attempting to process the same I/O. 1368 */ 1369static zio_pipe_stage_t *zio_pipeline[]; 1370 1371void 1372zio_execute(zio_t *zio) 1373{ 1374 zio->io_executor = curthread; 1375 1376 while (zio->io_stage < ZIO_STAGE_DONE) { 1377 enum zio_stage pipeline = zio->io_pipeline; 1378 enum zio_stage stage = zio->io_stage; 1379 int rv; 1380 1381 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1382 ASSERT(ISP2(stage)); 1383 ASSERT(zio->io_stall == NULL); 1384 1385 do { 1386 stage <<= 1; 1387 } while ((stage & pipeline) == 0); 1388 1389 ASSERT(stage <= ZIO_STAGE_DONE); 1390 1391 /* 1392 * If we are in interrupt context and this pipeline stage 1393 * will grab a config lock that is held across I/O, 1394 * or may wait for an I/O that needs an interrupt thread 1395 * to complete, issue async to avoid deadlock. 1396 * 1397 * For VDEV_IO_START, we cut in line so that the io will 1398 * be sent to disk promptly. 1399 */ 1400 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1401 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1402 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1403 zio_requeue_io_start_cut_in_line : B_FALSE; 1404 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1405 return; 1406 } 1407 1408 zio->io_stage = stage; 1409 rv = zio_pipeline[highbit64(stage) - 1](zio); 1410 1411 if (rv == ZIO_PIPELINE_STOP) 1412 return; 1413 1414 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1415 } 1416} 1417 1418/* 1419 * ========================================================================== 1420 * Initiate I/O, either sync or async 1421 * ========================================================================== 1422 */ 1423int 1424zio_wait(zio_t *zio) 1425{ 1426 int error; 1427 1428 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1429 ASSERT(zio->io_executor == NULL); 1430 1431 zio->io_waiter = curthread; 1432 1433 zio_execute(zio); 1434 1435 mutex_enter(&zio->io_lock); 1436 while (zio->io_executor != NULL) 1437 cv_wait(&zio->io_cv, &zio->io_lock); 1438 mutex_exit(&zio->io_lock); 1439 1440 error = zio->io_error; 1441 zio_destroy(zio); 1442 1443 return (error); 1444} 1445 1446void 1447zio_nowait(zio_t *zio) 1448{ 1449 ASSERT(zio->io_executor == NULL); 1450 1451 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1452 zio_unique_parent(zio) == NULL) { 1453 /* 1454 * This is a logical async I/O with no parent to wait for it. 1455 * We add it to the spa_async_root_zio "Godfather" I/O which 1456 * will ensure they complete prior to unloading the pool. 1457 */ 1458 spa_t *spa = zio->io_spa; 1459
|
1461 } 1462 1463 zio_execute(zio); 1464} 1465 1466/* 1467 * ========================================================================== 1468 * Reexecute or suspend/resume failed I/O 1469 * ========================================================================== 1470 */ 1471 1472static void 1473zio_reexecute(zio_t *pio) 1474{ 1475 zio_t *cio, *cio_next; 1476 1477 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1478 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1479 ASSERT(pio->io_gang_leader == NULL); 1480 ASSERT(pio->io_gang_tree == NULL); 1481 1482 pio->io_flags = pio->io_orig_flags; 1483 pio->io_stage = pio->io_orig_stage; 1484 pio->io_pipeline = pio->io_orig_pipeline; 1485 pio->io_reexecute = 0; 1486 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1487 pio->io_error = 0; 1488 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1489 pio->io_state[w] = 0; 1490 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1491 pio->io_child_error[c] = 0; 1492 1493 if (IO_IS_ALLOCATING(pio)) 1494 BP_ZERO(pio->io_bp); 1495 1496 /* 1497 * As we reexecute pio's children, new children could be created. 1498 * New children go to the head of pio's io_child_list, however, 1499 * so we will (correctly) not reexecute them. The key is that 1500 * the remainder of pio's io_child_list, from 'cio_next' onward, 1501 * cannot be affected by any side effects of reexecuting 'cio'. 1502 */ 1503 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1504 cio_next = zio_walk_children(pio); 1505 mutex_enter(&pio->io_lock); 1506 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1507 pio->io_children[cio->io_child_type][w]++; 1508 mutex_exit(&pio->io_lock); 1509 zio_reexecute(cio); 1510 } 1511 1512 /* 1513 * Now that all children have been reexecuted, execute the parent. 1514 * We don't reexecute "The Godfather" I/O here as it's the 1515 * responsibility of the caller to wait on him. 1516 */ 1517 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1518 zio_execute(pio); 1519} 1520 1521void 1522zio_suspend(spa_t *spa, zio_t *zio) 1523{ 1524 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1525 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1526 "failure and the failure mode property for this pool " 1527 "is set to panic.", spa_name(spa)); 1528 1529 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1530 1531 mutex_enter(&spa->spa_suspend_lock); 1532 1533 if (spa->spa_suspend_zio_root == NULL) 1534 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1535 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1536 ZIO_FLAG_GODFATHER); 1537 1538 spa->spa_suspended = B_TRUE; 1539 1540 if (zio != NULL) { 1541 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1542 ASSERT(zio != spa->spa_suspend_zio_root); 1543 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1544 ASSERT(zio_unique_parent(zio) == NULL); 1545 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1546 zio_add_child(spa->spa_suspend_zio_root, zio); 1547 } 1548 1549 mutex_exit(&spa->spa_suspend_lock); 1550} 1551 1552int 1553zio_resume(spa_t *spa) 1554{ 1555 zio_t *pio; 1556 1557 /* 1558 * Reexecute all previously suspended i/o. 1559 */ 1560 mutex_enter(&spa->spa_suspend_lock); 1561 spa->spa_suspended = B_FALSE; 1562 cv_broadcast(&spa->spa_suspend_cv); 1563 pio = spa->spa_suspend_zio_root; 1564 spa->spa_suspend_zio_root = NULL; 1565 mutex_exit(&spa->spa_suspend_lock); 1566 1567 if (pio == NULL) 1568 return (0); 1569 1570 zio_reexecute(pio); 1571 return (zio_wait(pio)); 1572} 1573 1574void 1575zio_resume_wait(spa_t *spa) 1576{ 1577 mutex_enter(&spa->spa_suspend_lock); 1578 while (spa_suspended(spa)) 1579 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1580 mutex_exit(&spa->spa_suspend_lock); 1581} 1582 1583/* 1584 * ========================================================================== 1585 * Gang blocks. 1586 * 1587 * A gang block is a collection of small blocks that looks to the DMU 1588 * like one large block. When zio_dva_allocate() cannot find a block 1589 * of the requested size, due to either severe fragmentation or the pool 1590 * being nearly full, it calls zio_write_gang_block() to construct the 1591 * block from smaller fragments. 1592 * 1593 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1594 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1595 * an indirect block: it's an array of block pointers. It consumes 1596 * only one sector and hence is allocatable regardless of fragmentation. 1597 * The gang header's bps point to its gang members, which hold the data. 1598 * 1599 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1600 * as the verifier to ensure uniqueness of the SHA256 checksum. 1601 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1602 * not the gang header. This ensures that data block signatures (needed for 1603 * deduplication) are independent of how the block is physically stored. 1604 * 1605 * Gang blocks can be nested: a gang member may itself be a gang block. 1606 * Thus every gang block is a tree in which root and all interior nodes are 1607 * gang headers, and the leaves are normal blocks that contain user data. 1608 * The root of the gang tree is called the gang leader. 1609 * 1610 * To perform any operation (read, rewrite, free, claim) on a gang block, 1611 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1612 * in the io_gang_tree field of the original logical i/o by recursively 1613 * reading the gang leader and all gang headers below it. This yields 1614 * an in-core tree containing the contents of every gang header and the 1615 * bps for every constituent of the gang block. 1616 * 1617 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1618 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1619 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1620 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1621 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1622 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1623 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1624 * of the gang header plus zio_checksum_compute() of the data to update the 1625 * gang header's blk_cksum as described above. 1626 * 1627 * The two-phase assemble/issue model solves the problem of partial failure -- 1628 * what if you'd freed part of a gang block but then couldn't read the 1629 * gang header for another part? Assembling the entire gang tree first 1630 * ensures that all the necessary gang header I/O has succeeded before 1631 * starting the actual work of free, claim, or write. Once the gang tree 1632 * is assembled, free and claim are in-memory operations that cannot fail. 1633 * 1634 * In the event that a gang write fails, zio_dva_unallocate() walks the 1635 * gang tree to immediately free (i.e. insert back into the space map) 1636 * everything we've allocated. This ensures that we don't get ENOSPC 1637 * errors during repeated suspend/resume cycles due to a flaky device. 1638 * 1639 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1640 * the gang tree, we won't modify the block, so we can safely defer the free 1641 * (knowing that the block is still intact). If we *can* assemble the gang 1642 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1643 * each constituent bp and we can allocate a new block on the next sync pass. 1644 * 1645 * In all cases, the gang tree allows complete recovery from partial failure. 1646 * ========================================================================== 1647 */ 1648 1649static zio_t * 1650zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1651{ 1652 if (gn != NULL) 1653 return (pio); 1654 1655 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1656 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1657 &pio->io_bookmark)); 1658} 1659 1660zio_t * 1661zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1662{ 1663 zio_t *zio; 1664 1665 if (gn != NULL) { 1666 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1667 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1668 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1669 /* 1670 * As we rewrite each gang header, the pipeline will compute 1671 * a new gang block header checksum for it; but no one will 1672 * compute a new data checksum, so we do that here. The one 1673 * exception is the gang leader: the pipeline already computed 1674 * its data checksum because that stage precedes gang assembly. 1675 * (Presently, nothing actually uses interior data checksums; 1676 * this is just good hygiene.) 1677 */ 1678 if (gn != pio->io_gang_leader->io_gang_tree) { 1679 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1680 data, BP_GET_PSIZE(bp)); 1681 } 1682 /* 1683 * If we are here to damage data for testing purposes, 1684 * leave the GBH alone so that we can detect the damage. 1685 */ 1686 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1687 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1688 } else { 1689 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1690 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1691 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1692 } 1693 1694 return (zio); 1695} 1696 1697/* ARGSUSED */ 1698zio_t * 1699zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1700{ 1701 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1702 BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp), 1703 ZIO_GANG_CHILD_FLAGS(pio))); 1704} 1705 1706/* ARGSUSED */ 1707zio_t * 1708zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1709{ 1710 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1711 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1712} 1713 1714static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1715 NULL, 1716 zio_read_gang, 1717 zio_rewrite_gang, 1718 zio_free_gang, 1719 zio_claim_gang, 1720 NULL 1721}; 1722 1723static void zio_gang_tree_assemble_done(zio_t *zio); 1724 1725static zio_gang_node_t * 1726zio_gang_node_alloc(zio_gang_node_t **gnpp) 1727{ 1728 zio_gang_node_t *gn; 1729 1730 ASSERT(*gnpp == NULL); 1731 1732 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1733 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1734 *gnpp = gn; 1735 1736 return (gn); 1737} 1738 1739static void 1740zio_gang_node_free(zio_gang_node_t **gnpp) 1741{ 1742 zio_gang_node_t *gn = *gnpp; 1743 1744 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1745 ASSERT(gn->gn_child[g] == NULL); 1746 1747 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1748 kmem_free(gn, sizeof (*gn)); 1749 *gnpp = NULL; 1750} 1751 1752static void 1753zio_gang_tree_free(zio_gang_node_t **gnpp) 1754{ 1755 zio_gang_node_t *gn = *gnpp; 1756 1757 if (gn == NULL) 1758 return; 1759 1760 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1761 zio_gang_tree_free(&gn->gn_child[g]); 1762 1763 zio_gang_node_free(gnpp); 1764} 1765 1766static void 1767zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1768{ 1769 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1770 1771 ASSERT(gio->io_gang_leader == gio); 1772 ASSERT(BP_IS_GANG(bp)); 1773 1774 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1775 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1776 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1777} 1778 1779static void 1780zio_gang_tree_assemble_done(zio_t *zio) 1781{ 1782 zio_t *gio = zio->io_gang_leader; 1783 zio_gang_node_t *gn = zio->io_private; 1784 blkptr_t *bp = zio->io_bp; 1785 1786 ASSERT(gio == zio_unique_parent(zio)); 1787 ASSERT(zio->io_child_count == 0); 1788 1789 if (zio->io_error) 1790 return; 1791 1792 if (BP_SHOULD_BYTESWAP(bp)) 1793 byteswap_uint64_array(zio->io_data, zio->io_size); 1794 1795 ASSERT(zio->io_data == gn->gn_gbh); 1796 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1797 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1798 1799 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1800 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1801 if (!BP_IS_GANG(gbp)) 1802 continue; 1803 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1804 } 1805} 1806 1807static void 1808zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1809{ 1810 zio_t *gio = pio->io_gang_leader; 1811 zio_t *zio; 1812 1813 ASSERT(BP_IS_GANG(bp) == !!gn); 1814 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1815 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1816 1817 /* 1818 * If you're a gang header, your data is in gn->gn_gbh. 1819 * If you're a gang member, your data is in 'data' and gn == NULL. 1820 */ 1821 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1822 1823 if (gn != NULL) { 1824 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1825 1826 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1827 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1828 if (BP_IS_HOLE(gbp)) 1829 continue; 1830 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1831 data = (char *)data + BP_GET_PSIZE(gbp); 1832 } 1833 } 1834 1835 if (gn == gio->io_gang_tree && gio->io_data != NULL) 1836 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1837 1838 if (zio != pio) 1839 zio_nowait(zio); 1840} 1841 1842static int 1843zio_gang_assemble(zio_t *zio) 1844{ 1845 blkptr_t *bp = zio->io_bp; 1846 1847 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1848 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1849 1850 zio->io_gang_leader = zio; 1851 1852 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1853 1854 return (ZIO_PIPELINE_CONTINUE); 1855} 1856 1857static int 1858zio_gang_issue(zio_t *zio) 1859{ 1860 blkptr_t *bp = zio->io_bp; 1861 1862 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1863 return (ZIO_PIPELINE_STOP); 1864 1865 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1866 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1867 1868 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1869 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1870 else 1871 zio_gang_tree_free(&zio->io_gang_tree); 1872 1873 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1874 1875 return (ZIO_PIPELINE_CONTINUE); 1876} 1877 1878static void 1879zio_write_gang_member_ready(zio_t *zio) 1880{ 1881 zio_t *pio = zio_unique_parent(zio); 1882 zio_t *gio = zio->io_gang_leader; 1883 dva_t *cdva = zio->io_bp->blk_dva; 1884 dva_t *pdva = pio->io_bp->blk_dva; 1885 uint64_t asize; 1886 1887 if (BP_IS_HOLE(zio->io_bp)) 1888 return; 1889 1890 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1891 1892 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1893 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 1894 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 1895 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 1896 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1897 1898 mutex_enter(&pio->io_lock); 1899 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1900 ASSERT(DVA_GET_GANG(&pdva[d])); 1901 asize = DVA_GET_ASIZE(&pdva[d]); 1902 asize += DVA_GET_ASIZE(&cdva[d]); 1903 DVA_SET_ASIZE(&pdva[d], asize); 1904 } 1905 mutex_exit(&pio->io_lock); 1906} 1907 1908static int 1909zio_write_gang_block(zio_t *pio) 1910{ 1911 spa_t *spa = pio->io_spa; 1912 blkptr_t *bp = pio->io_bp; 1913 zio_t *gio = pio->io_gang_leader; 1914 zio_t *zio; 1915 zio_gang_node_t *gn, **gnpp; 1916 zio_gbh_phys_t *gbh; 1917 uint64_t txg = pio->io_txg; 1918 uint64_t resid = pio->io_size; 1919 uint64_t lsize; 1920 int copies = gio->io_prop.zp_copies; 1921 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 1922 zio_prop_t zp; 1923 int error; 1924 1925 error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, 1926 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, 1927 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1928 if (error) { 1929 pio->io_error = error; 1930 return (ZIO_PIPELINE_CONTINUE); 1931 } 1932 1933 if (pio == gio) { 1934 gnpp = &gio->io_gang_tree; 1935 } else { 1936 gnpp = pio->io_private; 1937 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1938 } 1939 1940 gn = zio_gang_node_alloc(gnpp); 1941 gbh = gn->gn_gbh; 1942 bzero(gbh, SPA_GANGBLOCKSIZE); 1943 1944 /* 1945 * Create the gang header. 1946 */ 1947 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1948 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1949 1950 /* 1951 * Create and nowait the gang children. 1952 */ 1953 for (int g = 0; resid != 0; resid -= lsize, g++) { 1954 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1955 SPA_MINBLOCKSIZE); 1956 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1957 1958 zp.zp_checksum = gio->io_prop.zp_checksum; 1959 zp.zp_compress = ZIO_COMPRESS_OFF; 1960 zp.zp_type = DMU_OT_NONE; 1961 zp.zp_level = 0; 1962 zp.zp_copies = gio->io_prop.zp_copies; 1963 zp.zp_dedup = B_FALSE; 1964 zp.zp_dedup_verify = B_FALSE; 1965 zp.zp_nopwrite = B_FALSE; 1966 1967 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1968 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1969 zio_write_gang_member_ready, NULL, NULL, &gn->gn_child[g], 1970 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1971 &pio->io_bookmark)); 1972 } 1973 1974 /* 1975 * Set pio's pipeline to just wait for zio to finish. 1976 */ 1977 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1978 1979 zio_nowait(zio); 1980 1981 return (ZIO_PIPELINE_CONTINUE); 1982} 1983 1984/* 1985 * The zio_nop_write stage in the pipeline determines if allocating 1986 * a new bp is necessary. By leveraging a cryptographically secure checksum, 1987 * such as SHA256, we can compare the checksums of the new data and the old 1988 * to determine if allocating a new block is required. The nopwrite 1989 * feature can handle writes in either syncing or open context (i.e. zil 1990 * writes) and as a result is mutually exclusive with dedup. 1991 */ 1992static int 1993zio_nop_write(zio_t *zio) 1994{ 1995 blkptr_t *bp = zio->io_bp; 1996 blkptr_t *bp_orig = &zio->io_bp_orig; 1997 zio_prop_t *zp = &zio->io_prop; 1998 1999 ASSERT(BP_GET_LEVEL(bp) == 0); 2000 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2001 ASSERT(zp->zp_nopwrite); 2002 ASSERT(!zp->zp_dedup); 2003 ASSERT(zio->io_bp_override == NULL); 2004 ASSERT(IO_IS_ALLOCATING(zio)); 2005 2006 /* 2007 * Check to see if the original bp and the new bp have matching 2008 * characteristics (i.e. same checksum, compression algorithms, etc). 2009 * If they don't then just continue with the pipeline which will 2010 * allocate a new bp. 2011 */ 2012 if (BP_IS_HOLE(bp_orig) || 2013 !zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_dedup || 2014 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2015 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2016 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2017 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2018 return (ZIO_PIPELINE_CONTINUE); 2019 2020 /* 2021 * If the checksums match then reset the pipeline so that we 2022 * avoid allocating a new bp and issuing any I/O. 2023 */ 2024 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2025 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup); 2026 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2027 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2028 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2029 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2030 sizeof (uint64_t)) == 0); 2031 2032 *bp = *bp_orig; 2033 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2034 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2035 } 2036 2037 return (ZIO_PIPELINE_CONTINUE); 2038} 2039 2040/* 2041 * ========================================================================== 2042 * Dedup 2043 * ========================================================================== 2044 */ 2045static void 2046zio_ddt_child_read_done(zio_t *zio) 2047{ 2048 blkptr_t *bp = zio->io_bp; 2049 ddt_entry_t *dde = zio->io_private; 2050 ddt_phys_t *ddp; 2051 zio_t *pio = zio_unique_parent(zio); 2052 2053 mutex_enter(&pio->io_lock); 2054 ddp = ddt_phys_select(dde, bp); 2055 if (zio->io_error == 0) 2056 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2057 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 2058 dde->dde_repair_data = zio->io_data; 2059 else 2060 zio_buf_free(zio->io_data, zio->io_size); 2061 mutex_exit(&pio->io_lock); 2062} 2063 2064static int 2065zio_ddt_read_start(zio_t *zio) 2066{ 2067 blkptr_t *bp = zio->io_bp; 2068 2069 ASSERT(BP_GET_DEDUP(bp)); 2070 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2071 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2072 2073 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2074 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2075 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2076 ddt_phys_t *ddp = dde->dde_phys; 2077 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2078 blkptr_t blk; 2079 2080 ASSERT(zio->io_vsd == NULL); 2081 zio->io_vsd = dde; 2082 2083 if (ddp_self == NULL) 2084 return (ZIO_PIPELINE_CONTINUE); 2085 2086 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2087 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2088 continue; 2089 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2090 &blk); 2091 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2092 zio_buf_alloc(zio->io_size), zio->io_size, 2093 zio_ddt_child_read_done, dde, zio->io_priority, 2094 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 2095 &zio->io_bookmark)); 2096 } 2097 return (ZIO_PIPELINE_CONTINUE); 2098 } 2099 2100 zio_nowait(zio_read(zio, zio->io_spa, bp, 2101 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 2102 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2103 2104 return (ZIO_PIPELINE_CONTINUE); 2105} 2106 2107static int 2108zio_ddt_read_done(zio_t *zio) 2109{ 2110 blkptr_t *bp = zio->io_bp; 2111 2112 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 2113 return (ZIO_PIPELINE_STOP); 2114 2115 ASSERT(BP_GET_DEDUP(bp)); 2116 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2117 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2118 2119 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2120 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2121 ddt_entry_t *dde = zio->io_vsd; 2122 if (ddt == NULL) { 2123 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2124 return (ZIO_PIPELINE_CONTINUE); 2125 } 2126 if (dde == NULL) { 2127 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2128 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2129 return (ZIO_PIPELINE_STOP); 2130 } 2131 if (dde->dde_repair_data != NULL) { 2132 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 2133 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2134 } 2135 ddt_repair_done(ddt, dde); 2136 zio->io_vsd = NULL; 2137 } 2138 2139 ASSERT(zio->io_vsd == NULL); 2140 2141 return (ZIO_PIPELINE_CONTINUE); 2142} 2143 2144static boolean_t 2145zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2146{ 2147 spa_t *spa = zio->io_spa; 2148 2149 /* 2150 * Note: we compare the original data, not the transformed data, 2151 * because when zio->io_bp is an override bp, we will not have 2152 * pushed the I/O transforms. That's an important optimization 2153 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2154 */ 2155 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2156 zio_t *lio = dde->dde_lead_zio[p]; 2157 2158 if (lio != NULL) { 2159 return (lio->io_orig_size != zio->io_orig_size || 2160 bcmp(zio->io_orig_data, lio->io_orig_data, 2161 zio->io_orig_size) != 0); 2162 } 2163 } 2164 2165 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2166 ddt_phys_t *ddp = &dde->dde_phys[p]; 2167 2168 if (ddp->ddp_phys_birth != 0) { 2169 arc_buf_t *abuf = NULL; 2170 uint32_t aflags = ARC_WAIT; 2171 blkptr_t blk = *zio->io_bp; 2172 int error; 2173 2174 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2175 2176 ddt_exit(ddt); 2177 2178 error = arc_read(NULL, spa, &blk, 2179 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2180 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2181 &aflags, &zio->io_bookmark); 2182 2183 if (error == 0) { 2184 if (arc_buf_size(abuf) != zio->io_orig_size || 2185 bcmp(abuf->b_data, zio->io_orig_data, 2186 zio->io_orig_size) != 0) 2187 error = SET_ERROR(EEXIST); 2188 VERIFY(arc_buf_remove_ref(abuf, &abuf)); 2189 } 2190 2191 ddt_enter(ddt); 2192 return (error != 0); 2193 } 2194 } 2195 2196 return (B_FALSE); 2197} 2198 2199static void 2200zio_ddt_child_write_ready(zio_t *zio) 2201{ 2202 int p = zio->io_prop.zp_copies; 2203 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2204 ddt_entry_t *dde = zio->io_private; 2205 ddt_phys_t *ddp = &dde->dde_phys[p]; 2206 zio_t *pio; 2207 2208 if (zio->io_error) 2209 return; 2210 2211 ddt_enter(ddt); 2212 2213 ASSERT(dde->dde_lead_zio[p] == zio); 2214 2215 ddt_phys_fill(ddp, zio->io_bp); 2216 2217 while ((pio = zio_walk_parents(zio)) != NULL) 2218 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2219 2220 ddt_exit(ddt); 2221} 2222 2223static void 2224zio_ddt_child_write_done(zio_t *zio) 2225{ 2226 int p = zio->io_prop.zp_copies; 2227 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2228 ddt_entry_t *dde = zio->io_private; 2229 ddt_phys_t *ddp = &dde->dde_phys[p]; 2230 2231 ddt_enter(ddt); 2232 2233 ASSERT(ddp->ddp_refcnt == 0); 2234 ASSERT(dde->dde_lead_zio[p] == zio); 2235 dde->dde_lead_zio[p] = NULL; 2236 2237 if (zio->io_error == 0) { 2238 while (zio_walk_parents(zio) != NULL) 2239 ddt_phys_addref(ddp); 2240 } else { 2241 ddt_phys_clear(ddp); 2242 } 2243 2244 ddt_exit(ddt); 2245} 2246 2247static void 2248zio_ddt_ditto_write_done(zio_t *zio) 2249{ 2250 int p = DDT_PHYS_DITTO; 2251 zio_prop_t *zp = &zio->io_prop; 2252 blkptr_t *bp = zio->io_bp; 2253 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2254 ddt_entry_t *dde = zio->io_private; 2255 ddt_phys_t *ddp = &dde->dde_phys[p]; 2256 ddt_key_t *ddk = &dde->dde_key; 2257 2258 ddt_enter(ddt); 2259 2260 ASSERT(ddp->ddp_refcnt == 0); 2261 ASSERT(dde->dde_lead_zio[p] == zio); 2262 dde->dde_lead_zio[p] = NULL; 2263 2264 if (zio->io_error == 0) { 2265 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2266 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2267 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2268 if (ddp->ddp_phys_birth != 0) 2269 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2270 ddt_phys_fill(ddp, bp); 2271 } 2272 2273 ddt_exit(ddt); 2274} 2275 2276static int 2277zio_ddt_write(zio_t *zio) 2278{ 2279 spa_t *spa = zio->io_spa; 2280 blkptr_t *bp = zio->io_bp; 2281 uint64_t txg = zio->io_txg; 2282 zio_prop_t *zp = &zio->io_prop; 2283 int p = zp->zp_copies; 2284 int ditto_copies; 2285 zio_t *cio = NULL; 2286 zio_t *dio = NULL; 2287 ddt_t *ddt = ddt_select(spa, bp); 2288 ddt_entry_t *dde; 2289 ddt_phys_t *ddp; 2290 2291 ASSERT(BP_GET_DEDUP(bp)); 2292 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2293 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2294 2295 ddt_enter(ddt); 2296 dde = ddt_lookup(ddt, bp, B_TRUE); 2297 ddp = &dde->dde_phys[p]; 2298 2299 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2300 /* 2301 * If we're using a weak checksum, upgrade to a strong checksum 2302 * and try again. If we're already using a strong checksum, 2303 * we can't resolve it, so just convert to an ordinary write. 2304 * (And automatically e-mail a paper to Nature?) 2305 */ 2306 if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { 2307 zp->zp_checksum = spa_dedup_checksum(spa); 2308 zio_pop_transforms(zio); 2309 zio->io_stage = ZIO_STAGE_OPEN; 2310 BP_ZERO(bp); 2311 } else { 2312 zp->zp_dedup = B_FALSE; 2313 } 2314 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2315 ddt_exit(ddt); 2316 return (ZIO_PIPELINE_CONTINUE); 2317 } 2318 2319 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2320 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2321 2322 if (ditto_copies > ddt_ditto_copies_present(dde) && 2323 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2324 zio_prop_t czp = *zp; 2325 2326 czp.zp_copies = ditto_copies; 2327 2328 /* 2329 * If we arrived here with an override bp, we won't have run 2330 * the transform stack, so we won't have the data we need to 2331 * generate a child i/o. So, toss the override bp and restart. 2332 * This is safe, because using the override bp is just an 2333 * optimization; and it's rare, so the cost doesn't matter. 2334 */ 2335 if (zio->io_bp_override) { 2336 zio_pop_transforms(zio); 2337 zio->io_stage = ZIO_STAGE_OPEN; 2338 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2339 zio->io_bp_override = NULL; 2340 BP_ZERO(bp); 2341 ddt_exit(ddt); 2342 return (ZIO_PIPELINE_CONTINUE); 2343 } 2344 2345 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2346 zio->io_orig_size, &czp, NULL, NULL, 2347 zio_ddt_ditto_write_done, dde, zio->io_priority, 2348 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2349 2350 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2351 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2352 } 2353 2354 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2355 if (ddp->ddp_phys_birth != 0) 2356 ddt_bp_fill(ddp, bp, txg); 2357 if (dde->dde_lead_zio[p] != NULL) 2358 zio_add_child(zio, dde->dde_lead_zio[p]); 2359 else 2360 ddt_phys_addref(ddp); 2361 } else if (zio->io_bp_override) { 2362 ASSERT(bp->blk_birth == txg); 2363 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2364 ddt_phys_fill(ddp, bp); 2365 ddt_phys_addref(ddp); 2366 } else { 2367 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2368 zio->io_orig_size, zp, zio_ddt_child_write_ready, NULL, 2369 zio_ddt_child_write_done, dde, zio->io_priority, 2370 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2371 2372 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2373 dde->dde_lead_zio[p] = cio; 2374 } 2375 2376 ddt_exit(ddt); 2377 2378 if (cio) 2379 zio_nowait(cio); 2380 if (dio) 2381 zio_nowait(dio); 2382 2383 return (ZIO_PIPELINE_CONTINUE); 2384} 2385 2386ddt_entry_t *freedde; /* for debugging */ 2387 2388static int 2389zio_ddt_free(zio_t *zio) 2390{ 2391 spa_t *spa = zio->io_spa; 2392 blkptr_t *bp = zio->io_bp; 2393 ddt_t *ddt = ddt_select(spa, bp); 2394 ddt_entry_t *dde; 2395 ddt_phys_t *ddp; 2396 2397 ASSERT(BP_GET_DEDUP(bp)); 2398 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2399 2400 ddt_enter(ddt); 2401 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2402 ddp = ddt_phys_select(dde, bp); 2403 ddt_phys_decref(ddp); 2404 ddt_exit(ddt); 2405 2406 return (ZIO_PIPELINE_CONTINUE); 2407} 2408 2409/* 2410 * ========================================================================== 2411 * Allocate and free blocks 2412 * ========================================================================== 2413 */ 2414static int 2415zio_dva_allocate(zio_t *zio) 2416{ 2417 spa_t *spa = zio->io_spa; 2418 metaslab_class_t *mc = spa_normal_class(spa); 2419 blkptr_t *bp = zio->io_bp; 2420 int error; 2421 int flags = 0; 2422 2423 if (zio->io_gang_leader == NULL) { 2424 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2425 zio->io_gang_leader = zio; 2426 } 2427 2428 ASSERT(BP_IS_HOLE(bp)); 2429 ASSERT0(BP_GET_NDVAS(bp)); 2430 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2431 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2432 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2433 2434 /* 2435 * The dump device does not support gang blocks so allocation on 2436 * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid 2437 * the "fast" gang feature. 2438 */ 2439 flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; 2440 flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? 2441 METASLAB_GANG_CHILD : 0; 2442 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2443 zio->io_prop.zp_copies, zio->io_txg, NULL, flags); 2444 2445 if (error) { 2446 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2447 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2448 error); 2449 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2450 return (zio_write_gang_block(zio)); 2451 zio->io_error = error; 2452 } 2453 2454 return (ZIO_PIPELINE_CONTINUE); 2455} 2456 2457static int 2458zio_dva_free(zio_t *zio) 2459{ 2460 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2461 2462 return (ZIO_PIPELINE_CONTINUE); 2463} 2464 2465static int 2466zio_dva_claim(zio_t *zio) 2467{ 2468 int error; 2469 2470 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2471 if (error) 2472 zio->io_error = error; 2473 2474 return (ZIO_PIPELINE_CONTINUE); 2475} 2476 2477/* 2478 * Undo an allocation. This is used by zio_done() when an I/O fails 2479 * and we want to give back the block we just allocated. 2480 * This handles both normal blocks and gang blocks. 2481 */ 2482static void 2483zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2484{ 2485 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2486 ASSERT(zio->io_bp_override == NULL); 2487 2488 if (!BP_IS_HOLE(bp)) 2489 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2490 2491 if (gn != NULL) { 2492 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2493 zio_dva_unallocate(zio, gn->gn_child[g], 2494 &gn->gn_gbh->zg_blkptr[g]); 2495 } 2496 } 2497} 2498 2499/* 2500 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2501 */ 2502int 2503zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2504 uint64_t size, boolean_t use_slog) 2505{ 2506 int error = 1; 2507 2508 ASSERT(txg > spa_syncing_txg(spa)); 2509 2510 /* 2511 * ZIL blocks are always contiguous (i.e. not gang blocks) so we 2512 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" 2513 * when allocating them. 2514 */ 2515 if (use_slog) { 2516 error = metaslab_alloc(spa, spa_log_class(spa), size, 2517 new_bp, 1, txg, old_bp, 2518 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2519 } 2520 2521 if (error) { 2522 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2523 new_bp, 1, txg, old_bp, 2524 METASLAB_HINTBP_AVOID); 2525 } 2526 2527 if (error == 0) { 2528 BP_SET_LSIZE(new_bp, size); 2529 BP_SET_PSIZE(new_bp, size); 2530 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2531 BP_SET_CHECKSUM(new_bp, 2532 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2533 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2534 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2535 BP_SET_LEVEL(new_bp, 0); 2536 BP_SET_DEDUP(new_bp, 0); 2537 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2538 } 2539 2540 return (error); 2541} 2542 2543/* 2544 * Free an intent log block. 2545 */ 2546void 2547zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2548{ 2549 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2550 ASSERT(!BP_IS_GANG(bp)); 2551 2552 zio_free(spa, txg, bp); 2553} 2554 2555/* 2556 * ========================================================================== 2557 * Read, write and delete to physical devices 2558 * ========================================================================== 2559 */ 2560static int 2561zio_vdev_io_start(zio_t *zio) 2562{ 2563 vdev_t *vd = zio->io_vd; 2564 uint64_t align; 2565 spa_t *spa = zio->io_spa; 2566 int ret; 2567 2568 ASSERT(zio->io_error == 0); 2569 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2570 2571 if (vd == NULL) { 2572 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2573 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2574 2575 /* 2576 * The mirror_ops handle multiple DVAs in a single BP. 2577 */ 2578 return (vdev_mirror_ops.vdev_op_io_start(zio)); 2579 } 2580 2581 if (vd->vdev_ops->vdev_op_leaf && zio->io_type == ZIO_TYPE_FREE && 2582 zio->io_priority == ZIO_PRIORITY_NOW) { 2583 trim_map_free(vd, zio->io_offset, zio->io_size, zio->io_txg); 2584 return (ZIO_PIPELINE_CONTINUE); 2585 } 2586 2587 /* 2588 * We keep track of time-sensitive I/Os so that the scan thread 2589 * can quickly react to certain workloads. In particular, we care 2590 * about non-scrubbing, top-level reads and writes with the following 2591 * characteristics: 2592 * - synchronous writes of user data to non-slog devices 2593 * - any reads of user data 2594 * When these conditions are met, adjust the timestamp of spa_last_io 2595 * which allows the scan thread to adjust its workload accordingly. 2596 */ 2597 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2598 vd == vd->vdev_top && !vd->vdev_islog && 2599 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2600 zio->io_txg != spa_syncing_txg(spa)) { 2601 uint64_t old = spa->spa_last_io; 2602 uint64_t new = ddi_get_lbolt64(); 2603 if (old != new) 2604 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2605 } 2606 2607 align = 1ULL << vd->vdev_top->vdev_ashift; 2608 2609 if ((!(zio->io_flags & ZIO_FLAG_PHYSICAL) || 2610 (vd->vdev_top->vdev_physical_ashift > SPA_MINBLOCKSHIFT)) && 2611 P2PHASE(zio->io_size, align) != 0) { 2612 /* Transform logical writes to be a full physical block size. */ 2613 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2614 char *abuf = NULL; 2615 if (zio->io_type == ZIO_TYPE_READ || 2616 zio->io_type == ZIO_TYPE_WRITE) 2617 abuf = zio_buf_alloc(asize); 2618 ASSERT(vd == vd->vdev_top); 2619 if (zio->io_type == ZIO_TYPE_WRITE) { 2620 bcopy(zio->io_data, abuf, zio->io_size); 2621 bzero(abuf + zio->io_size, asize - zio->io_size); 2622 } 2623 zio_push_transform(zio, abuf, asize, abuf ? asize : 0, 2624 zio_subblock); 2625 } 2626 2627 /* 2628 * If this is not a physical io, make sure that it is properly aligned 2629 * before proceeding. 2630 */ 2631 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 2632 ASSERT0(P2PHASE(zio->io_offset, align)); 2633 ASSERT0(P2PHASE(zio->io_size, align)); 2634 } else { 2635 /* 2636 * For physical writes, we allow 512b aligned writes and assume 2637 * the device will perform a read-modify-write as necessary. 2638 */ 2639 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 2640 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 2641 } 2642 2643 VERIFY(zio->io_type == ZIO_TYPE_READ || spa_writeable(spa)); 2644 2645 /* 2646 * If this is a repair I/O, and there's no self-healing involved -- 2647 * that is, we're just resilvering what we expect to resilver -- 2648 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2649 * This prevents spurious resilvering with nested replication. 2650 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2651 * A is out of date, we'll read from C+D, then use the data to 2652 * resilver A+B -- but we don't actually want to resilver B, just A. 2653 * The top-level mirror has no way to know this, so instead we just 2654 * discard unnecessary repairs as we work our way down the vdev tree. 2655 * The same logic applies to any form of nested replication: 2656 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2657 */ 2658 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2659 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2660 zio->io_txg != 0 && /* not a delegated i/o */ 2661 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2662 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2663 zio_vdev_io_bypass(zio); 2664 return (ZIO_PIPELINE_CONTINUE); 2665 } 2666 2667 if (vd->vdev_ops->vdev_op_leaf) { 2668 switch (zio->io_type) { 2669 case ZIO_TYPE_READ: 2670 if (vdev_cache_read(zio)) 2671 return (ZIO_PIPELINE_CONTINUE); 2672 /* FALLTHROUGH */ 2673 case ZIO_TYPE_WRITE: 2674 case ZIO_TYPE_FREE: 2675 if ((zio = vdev_queue_io(zio)) == NULL) 2676 return (ZIO_PIPELINE_STOP); 2677 2678 if (!vdev_accessible(vd, zio)) { 2679 zio->io_error = SET_ERROR(ENXIO); 2680 zio_interrupt(zio); 2681 return (ZIO_PIPELINE_STOP); 2682 } 2683 break; 2684 } 2685 /* 2686 * Note that we ignore repair writes for TRIM because they can 2687 * conflict with normal writes. This isn't an issue because, by 2688 * definition, we only repair blocks that aren't freed. 2689 */ 2690 if (zio->io_type == ZIO_TYPE_WRITE && 2691 !(zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2692 !trim_map_write_start(zio)) 2693 return (ZIO_PIPELINE_STOP); 2694 } 2695 2696 ret = vd->vdev_ops->vdev_op_io_start(zio); 2697 ASSERT(ret == ZIO_PIPELINE_STOP); 2698 2699 return (ret); 2700} 2701 2702static int 2703zio_vdev_io_done(zio_t *zio) 2704{ 2705 vdev_t *vd = zio->io_vd; 2706 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 2707 boolean_t unexpected_error = B_FALSE; 2708 2709 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2710 return (ZIO_PIPELINE_STOP); 2711 2712 ASSERT(zio->io_type == ZIO_TYPE_READ || 2713 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_FREE); 2714 2715 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2716 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE || 2717 zio->io_type == ZIO_TYPE_FREE)) { 2718 2719 if (zio->io_type == ZIO_TYPE_WRITE && 2720 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) 2721 trim_map_write_done(zio); 2722 2723 vdev_queue_io_done(zio); 2724 2725 if (zio->io_type == ZIO_TYPE_WRITE) 2726 vdev_cache_write(zio); 2727 2728 if (zio_injection_enabled && zio->io_error == 0) 2729 zio->io_error = zio_handle_device_injection(vd, 2730 zio, EIO); 2731 2732 if (zio_injection_enabled && zio->io_error == 0) 2733 zio->io_error = zio_handle_label_injection(zio, EIO); 2734 2735 if (zio->io_error) { 2736 if (zio->io_error == ENOTSUP && 2737 zio->io_type == ZIO_TYPE_FREE) { 2738 /* Not all devices support TRIM. */ 2739 } else if (!vdev_accessible(vd, zio)) { 2740 zio->io_error = SET_ERROR(ENXIO); 2741 } else { 2742 unexpected_error = B_TRUE; 2743 } 2744 } 2745 } 2746 2747 ops->vdev_op_io_done(zio); 2748 2749 if (unexpected_error) 2750 VERIFY(vdev_probe(vd, zio) == NULL); 2751 2752 return (ZIO_PIPELINE_CONTINUE); 2753} 2754 2755/* 2756 * For non-raidz ZIOs, we can just copy aside the bad data read from the 2757 * disk, and use that to finish the checksum ereport later. 2758 */ 2759static void 2760zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 2761 const void *good_buf) 2762{ 2763 /* no processing needed */ 2764 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 2765} 2766 2767/*ARGSUSED*/ 2768void 2769zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 2770{ 2771 void *buf = zio_buf_alloc(zio->io_size); 2772 2773 bcopy(zio->io_data, buf, zio->io_size); 2774 2775 zcr->zcr_cbinfo = zio->io_size; 2776 zcr->zcr_cbdata = buf; 2777 zcr->zcr_finish = zio_vsd_default_cksum_finish; 2778 zcr->zcr_free = zio_buf_free; 2779} 2780 2781static int 2782zio_vdev_io_assess(zio_t *zio) 2783{ 2784 vdev_t *vd = zio->io_vd; 2785 2786 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2787 return (ZIO_PIPELINE_STOP); 2788 2789 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2790 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 2791 2792 if (zio->io_vsd != NULL) { 2793 zio->io_vsd_ops->vsd_free(zio); 2794 zio->io_vsd = NULL; 2795 } 2796 2797 if (zio_injection_enabled && zio->io_error == 0) 2798 zio->io_error = zio_handle_fault_injection(zio, EIO); 2799 2800 if (zio->io_type == ZIO_TYPE_FREE && 2801 zio->io_priority != ZIO_PRIORITY_NOW) { 2802 switch (zio->io_error) { 2803 case 0: 2804 ZIO_TRIM_STAT_INCR(bytes, zio->io_size); 2805 ZIO_TRIM_STAT_BUMP(success); 2806 break; 2807 case EOPNOTSUPP: 2808 ZIO_TRIM_STAT_BUMP(unsupported); 2809 break; 2810 default: 2811 ZIO_TRIM_STAT_BUMP(failed); 2812 break; 2813 } 2814 } 2815 2816 /* 2817 * If the I/O failed, determine whether we should attempt to retry it. 2818 * 2819 * On retry, we cut in line in the issue queue, since we don't want 2820 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 2821 */ 2822 if (zio->io_error && vd == NULL && 2823 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 2824 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 2825 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 2826 zio->io_error = 0; 2827 zio->io_flags |= ZIO_FLAG_IO_RETRY | 2828 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 2829 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 2830 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 2831 zio_requeue_io_start_cut_in_line); 2832 return (ZIO_PIPELINE_STOP); 2833 } 2834 2835 /* 2836 * If we got an error on a leaf device, convert it to ENXIO 2837 * if the device is not accessible at all. 2838 */ 2839 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 2840 !vdev_accessible(vd, zio)) 2841 zio->io_error = SET_ERROR(ENXIO); 2842 2843 /* 2844 * If we can't write to an interior vdev (mirror or RAID-Z), 2845 * set vdev_cant_write so that we stop trying to allocate from it. 2846 */ 2847 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 2848 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 2849 vd->vdev_cant_write = B_TRUE; 2850 } 2851 2852 if (zio->io_error) 2853 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2854 2855 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2856 zio->io_physdone != NULL) { 2857 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 2858 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 2859 zio->io_physdone(zio->io_logical); 2860 } 2861 2862 return (ZIO_PIPELINE_CONTINUE); 2863} 2864 2865void 2866zio_vdev_io_reissue(zio_t *zio) 2867{ 2868 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2869 ASSERT(zio->io_error == 0); 2870 2871 zio->io_stage >>= 1; 2872} 2873 2874void 2875zio_vdev_io_redone(zio_t *zio) 2876{ 2877 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 2878 2879 zio->io_stage >>= 1; 2880} 2881 2882void 2883zio_vdev_io_bypass(zio_t *zio) 2884{ 2885 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2886 ASSERT(zio->io_error == 0); 2887 2888 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 2889 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 2890} 2891 2892/* 2893 * ========================================================================== 2894 * Generate and verify checksums 2895 * ========================================================================== 2896 */ 2897static int 2898zio_checksum_generate(zio_t *zio) 2899{ 2900 blkptr_t *bp = zio->io_bp; 2901 enum zio_checksum checksum; 2902 2903 if (bp == NULL) { 2904 /* 2905 * This is zio_write_phys(). 2906 * We're either generating a label checksum, or none at all. 2907 */ 2908 checksum = zio->io_prop.zp_checksum; 2909 2910 if (checksum == ZIO_CHECKSUM_OFF) 2911 return (ZIO_PIPELINE_CONTINUE); 2912 2913 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 2914 } else { 2915 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 2916 ASSERT(!IO_IS_ALLOCATING(zio)); 2917 checksum = ZIO_CHECKSUM_GANG_HEADER; 2918 } else { 2919 checksum = BP_GET_CHECKSUM(bp); 2920 } 2921 } 2922 2923 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 2924 2925 return (ZIO_PIPELINE_CONTINUE); 2926} 2927 2928static int 2929zio_checksum_verify(zio_t *zio) 2930{ 2931 zio_bad_cksum_t info; 2932 blkptr_t *bp = zio->io_bp; 2933 int error; 2934 2935 ASSERT(zio->io_vd != NULL); 2936 2937 if (bp == NULL) { 2938 /* 2939 * This is zio_read_phys(). 2940 * We're either verifying a label checksum, or nothing at all. 2941 */ 2942 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 2943 return (ZIO_PIPELINE_CONTINUE); 2944 2945 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 2946 } 2947 2948 if ((error = zio_checksum_error(zio, &info)) != 0) { 2949 zio->io_error = error; 2950 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2951 zfs_ereport_start_checksum(zio->io_spa, 2952 zio->io_vd, zio, zio->io_offset, 2953 zio->io_size, NULL, &info); 2954 } 2955 } 2956 2957 return (ZIO_PIPELINE_CONTINUE); 2958} 2959 2960/* 2961 * Called by RAID-Z to ensure we don't compute the checksum twice. 2962 */ 2963void 2964zio_checksum_verified(zio_t *zio) 2965{ 2966 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 2967} 2968 2969/* 2970 * ========================================================================== 2971 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2972 * An error of 0 indicates success. ENXIO indicates whole-device failure, 2973 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2974 * indicate errors that are specific to one I/O, and most likely permanent. 2975 * Any other error is presumed to be worse because we weren't expecting it. 2976 * ========================================================================== 2977 */ 2978int 2979zio_worst_error(int e1, int e2) 2980{ 2981 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2982 int r1, r2; 2983 2984 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2985 if (e1 == zio_error_rank[r1]) 2986 break; 2987 2988 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2989 if (e2 == zio_error_rank[r2]) 2990 break; 2991 2992 return (r1 > r2 ? e1 : e2); 2993} 2994 2995/* 2996 * ========================================================================== 2997 * I/O completion 2998 * ========================================================================== 2999 */ 3000static int 3001zio_ready(zio_t *zio) 3002{ 3003 blkptr_t *bp = zio->io_bp; 3004 zio_t *pio, *pio_next; 3005 3006 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 3007 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 3008 return (ZIO_PIPELINE_STOP); 3009 3010 if (zio->io_ready) { 3011 ASSERT(IO_IS_ALLOCATING(zio)); 3012 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3013 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3014 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3015 3016 zio->io_ready(zio); 3017 } 3018 3019 if (bp != NULL && bp != &zio->io_bp_copy) 3020 zio->io_bp_copy = *bp; 3021 3022 if (zio->io_error) 3023 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3024 3025 mutex_enter(&zio->io_lock); 3026 zio->io_state[ZIO_WAIT_READY] = 1; 3027 pio = zio_walk_parents(zio); 3028 mutex_exit(&zio->io_lock); 3029 3030 /* 3031 * As we notify zio's parents, new parents could be added. 3032 * New parents go to the head of zio's io_parent_list, however, 3033 * so we will (correctly) not notify them. The remainder of zio's 3034 * io_parent_list, from 'pio_next' onward, cannot change because 3035 * all parents must wait for us to be done before they can be done. 3036 */ 3037 for (; pio != NULL; pio = pio_next) { 3038 pio_next = zio_walk_parents(zio); 3039 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3040 } 3041 3042 if (zio->io_flags & ZIO_FLAG_NODATA) { 3043 if (BP_IS_GANG(bp)) { 3044 zio->io_flags &= ~ZIO_FLAG_NODATA; 3045 } else { 3046 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 3047 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3048 } 3049 } 3050 3051 if (zio_injection_enabled && 3052 zio->io_spa->spa_syncing_txg == zio->io_txg) 3053 zio_handle_ignored_writes(zio); 3054 3055 return (ZIO_PIPELINE_CONTINUE); 3056} 3057 3058static int 3059zio_done(zio_t *zio) 3060{ 3061 spa_t *spa = zio->io_spa; 3062 zio_t *lio = zio->io_logical; 3063 blkptr_t *bp = zio->io_bp; 3064 vdev_t *vd = zio->io_vd; 3065 uint64_t psize = zio->io_size; 3066 zio_t *pio, *pio_next; 3067 3068 /* 3069 * If our children haven't all completed, 3070 * wait for them and then repeat this pipeline stage. 3071 */ 3072 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 3073 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 3074 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 3075 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 3076 return (ZIO_PIPELINE_STOP); 3077 3078 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3079 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3080 ASSERT(zio->io_children[c][w] == 0); 3081 3082 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3083 ASSERT(bp->blk_pad[0] == 0); 3084 ASSERT(bp->blk_pad[1] == 0); 3085 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3086 (bp == zio_unique_parent(zio)->io_bp)); 3087 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3088 zio->io_bp_override == NULL && 3089 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3090 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3091 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3092 ASSERT(BP_COUNT_GANG(bp) == 0 || 3093 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3094 } 3095 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 3096 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3097 } 3098 3099 /* 3100 * If there were child vdev/gang/ddt errors, they apply to us now. 3101 */ 3102 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3103 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3104 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3105 3106 /* 3107 * If the I/O on the transformed data was successful, generate any 3108 * checksum reports now while we still have the transformed data. 3109 */ 3110 if (zio->io_error == 0) { 3111 while (zio->io_cksum_report != NULL) { 3112 zio_cksum_report_t *zcr = zio->io_cksum_report; 3113 uint64_t align = zcr->zcr_align; 3114 uint64_t asize = P2ROUNDUP(psize, align); 3115 char *abuf = zio->io_data; 3116 3117 if (asize != psize) { 3118 abuf = zio_buf_alloc(asize); 3119 bcopy(zio->io_data, abuf, psize); 3120 bzero(abuf + psize, asize - psize); 3121 } 3122 3123 zio->io_cksum_report = zcr->zcr_next; 3124 zcr->zcr_next = NULL; 3125 zcr->zcr_finish(zcr, abuf); 3126 zfs_ereport_free_checksum(zcr); 3127 3128 if (asize != psize) 3129 zio_buf_free(abuf, asize); 3130 } 3131 } 3132 3133 zio_pop_transforms(zio); /* note: may set zio->io_error */ 3134 3135 vdev_stat_update(zio, psize); 3136 3137 if (zio->io_error) { 3138 /* 3139 * If this I/O is attached to a particular vdev, 3140 * generate an error message describing the I/O failure 3141 * at the block level. We ignore these errors if the 3142 * device is currently unavailable. 3143 */ 3144 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3145 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3146 3147 if ((zio->io_error == EIO || !(zio->io_flags & 3148 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3149 zio == lio) { 3150 /* 3151 * For logical I/O requests, tell the SPA to log the 3152 * error and generate a logical data ereport. 3153 */ 3154 spa_log_error(spa, zio); 3155 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3156 0, 0); 3157 } 3158 } 3159 3160 if (zio->io_error && zio == lio) { 3161 /* 3162 * Determine whether zio should be reexecuted. This will 3163 * propagate all the way to the root via zio_notify_parent(). 3164 */ 3165 ASSERT(vd == NULL && bp != NULL); 3166 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3167 3168 if (IO_IS_ALLOCATING(zio) && 3169 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3170 if (zio->io_error != ENOSPC) 3171 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3172 else 3173 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3174 } 3175 3176 if ((zio->io_type == ZIO_TYPE_READ || 3177 zio->io_type == ZIO_TYPE_FREE) && 3178 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3179 zio->io_error == ENXIO && 3180 spa_load_state(spa) == SPA_LOAD_NONE && 3181 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3182 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3183 3184 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3185 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3186 3187 /* 3188 * Here is a possibly good place to attempt to do 3189 * either combinatorial reconstruction or error correction 3190 * based on checksums. It also might be a good place 3191 * to send out preliminary ereports before we suspend 3192 * processing. 3193 */ 3194 } 3195 3196 /* 3197 * If there were logical child errors, they apply to us now. 3198 * We defer this until now to avoid conflating logical child 3199 * errors with errors that happened to the zio itself when 3200 * updating vdev stats and reporting FMA events above. 3201 */ 3202 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3203 3204 if ((zio->io_error || zio->io_reexecute) && 3205 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3206 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3207 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3208 3209 zio_gang_tree_free(&zio->io_gang_tree); 3210 3211 /* 3212 * Godfather I/Os should never suspend. 3213 */ 3214 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 3215 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 3216 zio->io_reexecute = 0; 3217 3218 if (zio->io_reexecute) { 3219 /* 3220 * This is a logical I/O that wants to reexecute. 3221 * 3222 * Reexecute is top-down. When an i/o fails, if it's not 3223 * the root, it simply notifies its parent and sticks around. 3224 * The parent, seeing that it still has children in zio_done(), 3225 * does the same. This percolates all the way up to the root. 3226 * The root i/o will reexecute or suspend the entire tree. 3227 * 3228 * This approach ensures that zio_reexecute() honors 3229 * all the original i/o dependency relationships, e.g. 3230 * parents not executing until children are ready. 3231 */ 3232 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3233 3234 zio->io_gang_leader = NULL; 3235 3236 mutex_enter(&zio->io_lock); 3237 zio->io_state[ZIO_WAIT_DONE] = 1; 3238 mutex_exit(&zio->io_lock); 3239 3240 /* 3241 * "The Godfather" I/O monitors its children but is 3242 * not a true parent to them. It will track them through 3243 * the pipeline but severs its ties whenever they get into 3244 * trouble (e.g. suspended). This allows "The Godfather" 3245 * I/O to return status without blocking. 3246 */ 3247 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3248 zio_link_t *zl = zio->io_walk_link; 3249 pio_next = zio_walk_parents(zio); 3250 3251 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3252 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3253 zio_remove_child(pio, zio, zl); 3254 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3255 } 3256 } 3257 3258 if ((pio = zio_unique_parent(zio)) != NULL) { 3259 /* 3260 * We're not a root i/o, so there's nothing to do 3261 * but notify our parent. Don't propagate errors 3262 * upward since we haven't permanently failed yet. 3263 */ 3264 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3265 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3266 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3267 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3268 /* 3269 * We'd fail again if we reexecuted now, so suspend 3270 * until conditions improve (e.g. device comes online). 3271 */ 3272 zio_suspend(spa, zio); 3273 } else { 3274 /* 3275 * Reexecution is potentially a huge amount of work. 3276 * Hand it off to the otherwise-unused claim taskq. 3277 */ 3278#if defined(illumos) || !defined(_KERNEL) 3279 ASSERT(zio->io_tqent.tqent_next == NULL); 3280#else 3281 ASSERT(zio->io_tqent.tqent_task.ta_pending == 0); 3282#endif 3283 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3284 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3285 0, &zio->io_tqent); 3286 } 3287 return (ZIO_PIPELINE_STOP); 3288 } 3289 3290 ASSERT(zio->io_child_count == 0); 3291 ASSERT(zio->io_reexecute == 0); 3292 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3293 3294 /* 3295 * Report any checksum errors, since the I/O is complete. 3296 */ 3297 while (zio->io_cksum_report != NULL) { 3298 zio_cksum_report_t *zcr = zio->io_cksum_report; 3299 zio->io_cksum_report = zcr->zcr_next; 3300 zcr->zcr_next = NULL; 3301 zcr->zcr_finish(zcr, NULL); 3302 zfs_ereport_free_checksum(zcr); 3303 } 3304 3305 /* 3306 * It is the responsibility of the done callback to ensure that this 3307 * particular zio is no longer discoverable for adoption, and as 3308 * such, cannot acquire any new parents. 3309 */ 3310 if (zio->io_done) 3311 zio->io_done(zio); 3312 3313 mutex_enter(&zio->io_lock); 3314 zio->io_state[ZIO_WAIT_DONE] = 1; 3315 mutex_exit(&zio->io_lock); 3316 3317 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3318 zio_link_t *zl = zio->io_walk_link; 3319 pio_next = zio_walk_parents(zio); 3320 zio_remove_child(pio, zio, zl); 3321 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3322 } 3323 3324 if (zio->io_waiter != NULL) { 3325 mutex_enter(&zio->io_lock); 3326 zio->io_executor = NULL; 3327 cv_broadcast(&zio->io_cv); 3328 mutex_exit(&zio->io_lock); 3329 } else { 3330 zio_destroy(zio); 3331 } 3332 3333 return (ZIO_PIPELINE_STOP); 3334} 3335 3336/* 3337 * ========================================================================== 3338 * I/O pipeline definition 3339 * ========================================================================== 3340 */ 3341static zio_pipe_stage_t *zio_pipeline[] = { 3342 NULL, 3343 zio_read_bp_init, 3344 zio_free_bp_init, 3345 zio_issue_async, 3346 zio_write_bp_init, 3347 zio_checksum_generate, 3348 zio_nop_write, 3349 zio_ddt_read_start, 3350 zio_ddt_read_done, 3351 zio_ddt_write, 3352 zio_ddt_free, 3353 zio_gang_assemble, 3354 zio_gang_issue, 3355 zio_dva_allocate, 3356 zio_dva_free, 3357 zio_dva_claim, 3358 zio_ready, 3359 zio_vdev_io_start, 3360 zio_vdev_io_done, 3361 zio_vdev_io_assess, 3362 zio_checksum_verify, 3363 zio_done 3364}; 3365 3366/* dnp is the dnode for zb1->zb_object */ 3367boolean_t 3368zbookmark_is_before(const dnode_phys_t *dnp, const zbookmark_phys_t *zb1, 3369 const zbookmark_phys_t *zb2) 3370{ 3371 uint64_t zb1nextL0, zb2thisobj; 3372 3373 ASSERT(zb1->zb_objset == zb2->zb_objset); 3374 ASSERT(zb2->zb_level == 0); 3375 3376 /* The objset_phys_t isn't before anything. */ 3377 if (dnp == NULL) 3378 return (B_FALSE); 3379 3380 zb1nextL0 = (zb1->zb_blkid + 1) << 3381 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)); 3382 3383 zb2thisobj = zb2->zb_object ? zb2->zb_object : 3384 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT); 3385 3386 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 3387 uint64_t nextobj = zb1nextL0 * 3388 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT; 3389 return (nextobj <= zb2thisobj); 3390 } 3391 3392 if (zb1->zb_object < zb2thisobj) 3393 return (B_TRUE); 3394 if (zb1->zb_object > zb2thisobj) 3395 return (B_FALSE); 3396 if (zb2->zb_object == DMU_META_DNODE_OBJECT) 3397 return (B_FALSE); 3398 return (zb1nextL0 <= zb2->zb_blkid); 3399}
| 1461 } 1462 1463 zio_execute(zio); 1464} 1465 1466/* 1467 * ========================================================================== 1468 * Reexecute or suspend/resume failed I/O 1469 * ========================================================================== 1470 */ 1471 1472static void 1473zio_reexecute(zio_t *pio) 1474{ 1475 zio_t *cio, *cio_next; 1476 1477 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1478 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1479 ASSERT(pio->io_gang_leader == NULL); 1480 ASSERT(pio->io_gang_tree == NULL); 1481 1482 pio->io_flags = pio->io_orig_flags; 1483 pio->io_stage = pio->io_orig_stage; 1484 pio->io_pipeline = pio->io_orig_pipeline; 1485 pio->io_reexecute = 0; 1486 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1487 pio->io_error = 0; 1488 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1489 pio->io_state[w] = 0; 1490 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1491 pio->io_child_error[c] = 0; 1492 1493 if (IO_IS_ALLOCATING(pio)) 1494 BP_ZERO(pio->io_bp); 1495 1496 /* 1497 * As we reexecute pio's children, new children could be created. 1498 * New children go to the head of pio's io_child_list, however, 1499 * so we will (correctly) not reexecute them. The key is that 1500 * the remainder of pio's io_child_list, from 'cio_next' onward, 1501 * cannot be affected by any side effects of reexecuting 'cio'. 1502 */ 1503 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1504 cio_next = zio_walk_children(pio); 1505 mutex_enter(&pio->io_lock); 1506 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1507 pio->io_children[cio->io_child_type][w]++; 1508 mutex_exit(&pio->io_lock); 1509 zio_reexecute(cio); 1510 } 1511 1512 /* 1513 * Now that all children have been reexecuted, execute the parent. 1514 * We don't reexecute "The Godfather" I/O here as it's the 1515 * responsibility of the caller to wait on him. 1516 */ 1517 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1518 zio_execute(pio); 1519} 1520 1521void 1522zio_suspend(spa_t *spa, zio_t *zio) 1523{ 1524 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1525 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1526 "failure and the failure mode property for this pool " 1527 "is set to panic.", spa_name(spa)); 1528 1529 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1530 1531 mutex_enter(&spa->spa_suspend_lock); 1532 1533 if (spa->spa_suspend_zio_root == NULL) 1534 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1535 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1536 ZIO_FLAG_GODFATHER); 1537 1538 spa->spa_suspended = B_TRUE; 1539 1540 if (zio != NULL) { 1541 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1542 ASSERT(zio != spa->spa_suspend_zio_root); 1543 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1544 ASSERT(zio_unique_parent(zio) == NULL); 1545 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1546 zio_add_child(spa->spa_suspend_zio_root, zio); 1547 } 1548 1549 mutex_exit(&spa->spa_suspend_lock); 1550} 1551 1552int 1553zio_resume(spa_t *spa) 1554{ 1555 zio_t *pio; 1556 1557 /* 1558 * Reexecute all previously suspended i/o. 1559 */ 1560 mutex_enter(&spa->spa_suspend_lock); 1561 spa->spa_suspended = B_FALSE; 1562 cv_broadcast(&spa->spa_suspend_cv); 1563 pio = spa->spa_suspend_zio_root; 1564 spa->spa_suspend_zio_root = NULL; 1565 mutex_exit(&spa->spa_suspend_lock); 1566 1567 if (pio == NULL) 1568 return (0); 1569 1570 zio_reexecute(pio); 1571 return (zio_wait(pio)); 1572} 1573 1574void 1575zio_resume_wait(spa_t *spa) 1576{ 1577 mutex_enter(&spa->spa_suspend_lock); 1578 while (spa_suspended(spa)) 1579 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1580 mutex_exit(&spa->spa_suspend_lock); 1581} 1582 1583/* 1584 * ========================================================================== 1585 * Gang blocks. 1586 * 1587 * A gang block is a collection of small blocks that looks to the DMU 1588 * like one large block. When zio_dva_allocate() cannot find a block 1589 * of the requested size, due to either severe fragmentation or the pool 1590 * being nearly full, it calls zio_write_gang_block() to construct the 1591 * block from smaller fragments. 1592 * 1593 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1594 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1595 * an indirect block: it's an array of block pointers. It consumes 1596 * only one sector and hence is allocatable regardless of fragmentation. 1597 * The gang header's bps point to its gang members, which hold the data. 1598 * 1599 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1600 * as the verifier to ensure uniqueness of the SHA256 checksum. 1601 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1602 * not the gang header. This ensures that data block signatures (needed for 1603 * deduplication) are independent of how the block is physically stored. 1604 * 1605 * Gang blocks can be nested: a gang member may itself be a gang block. 1606 * Thus every gang block is a tree in which root and all interior nodes are 1607 * gang headers, and the leaves are normal blocks that contain user data. 1608 * The root of the gang tree is called the gang leader. 1609 * 1610 * To perform any operation (read, rewrite, free, claim) on a gang block, 1611 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1612 * in the io_gang_tree field of the original logical i/o by recursively 1613 * reading the gang leader and all gang headers below it. This yields 1614 * an in-core tree containing the contents of every gang header and the 1615 * bps for every constituent of the gang block. 1616 * 1617 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1618 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1619 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1620 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1621 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1622 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1623 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1624 * of the gang header plus zio_checksum_compute() of the data to update the 1625 * gang header's blk_cksum as described above. 1626 * 1627 * The two-phase assemble/issue model solves the problem of partial failure -- 1628 * what if you'd freed part of a gang block but then couldn't read the 1629 * gang header for another part? Assembling the entire gang tree first 1630 * ensures that all the necessary gang header I/O has succeeded before 1631 * starting the actual work of free, claim, or write. Once the gang tree 1632 * is assembled, free and claim are in-memory operations that cannot fail. 1633 * 1634 * In the event that a gang write fails, zio_dva_unallocate() walks the 1635 * gang tree to immediately free (i.e. insert back into the space map) 1636 * everything we've allocated. This ensures that we don't get ENOSPC 1637 * errors during repeated suspend/resume cycles due to a flaky device. 1638 * 1639 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1640 * the gang tree, we won't modify the block, so we can safely defer the free 1641 * (knowing that the block is still intact). If we *can* assemble the gang 1642 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1643 * each constituent bp and we can allocate a new block on the next sync pass. 1644 * 1645 * In all cases, the gang tree allows complete recovery from partial failure. 1646 * ========================================================================== 1647 */ 1648 1649static zio_t * 1650zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1651{ 1652 if (gn != NULL) 1653 return (pio); 1654 1655 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1656 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1657 &pio->io_bookmark)); 1658} 1659 1660zio_t * 1661zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1662{ 1663 zio_t *zio; 1664 1665 if (gn != NULL) { 1666 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1667 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1668 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1669 /* 1670 * As we rewrite each gang header, the pipeline will compute 1671 * a new gang block header checksum for it; but no one will 1672 * compute a new data checksum, so we do that here. The one 1673 * exception is the gang leader: the pipeline already computed 1674 * its data checksum because that stage precedes gang assembly. 1675 * (Presently, nothing actually uses interior data checksums; 1676 * this is just good hygiene.) 1677 */ 1678 if (gn != pio->io_gang_leader->io_gang_tree) { 1679 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1680 data, BP_GET_PSIZE(bp)); 1681 } 1682 /* 1683 * If we are here to damage data for testing purposes, 1684 * leave the GBH alone so that we can detect the damage. 1685 */ 1686 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1687 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1688 } else { 1689 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1690 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1691 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1692 } 1693 1694 return (zio); 1695} 1696 1697/* ARGSUSED */ 1698zio_t * 1699zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1700{ 1701 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1702 BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp), 1703 ZIO_GANG_CHILD_FLAGS(pio))); 1704} 1705 1706/* ARGSUSED */ 1707zio_t * 1708zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1709{ 1710 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1711 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1712} 1713 1714static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1715 NULL, 1716 zio_read_gang, 1717 zio_rewrite_gang, 1718 zio_free_gang, 1719 zio_claim_gang, 1720 NULL 1721}; 1722 1723static void zio_gang_tree_assemble_done(zio_t *zio); 1724 1725static zio_gang_node_t * 1726zio_gang_node_alloc(zio_gang_node_t **gnpp) 1727{ 1728 zio_gang_node_t *gn; 1729 1730 ASSERT(*gnpp == NULL); 1731 1732 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1733 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1734 *gnpp = gn; 1735 1736 return (gn); 1737} 1738 1739static void 1740zio_gang_node_free(zio_gang_node_t **gnpp) 1741{ 1742 zio_gang_node_t *gn = *gnpp; 1743 1744 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1745 ASSERT(gn->gn_child[g] == NULL); 1746 1747 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1748 kmem_free(gn, sizeof (*gn)); 1749 *gnpp = NULL; 1750} 1751 1752static void 1753zio_gang_tree_free(zio_gang_node_t **gnpp) 1754{ 1755 zio_gang_node_t *gn = *gnpp; 1756 1757 if (gn == NULL) 1758 return; 1759 1760 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1761 zio_gang_tree_free(&gn->gn_child[g]); 1762 1763 zio_gang_node_free(gnpp); 1764} 1765 1766static void 1767zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1768{ 1769 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1770 1771 ASSERT(gio->io_gang_leader == gio); 1772 ASSERT(BP_IS_GANG(bp)); 1773 1774 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1775 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1776 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1777} 1778 1779static void 1780zio_gang_tree_assemble_done(zio_t *zio) 1781{ 1782 zio_t *gio = zio->io_gang_leader; 1783 zio_gang_node_t *gn = zio->io_private; 1784 blkptr_t *bp = zio->io_bp; 1785 1786 ASSERT(gio == zio_unique_parent(zio)); 1787 ASSERT(zio->io_child_count == 0); 1788 1789 if (zio->io_error) 1790 return; 1791 1792 if (BP_SHOULD_BYTESWAP(bp)) 1793 byteswap_uint64_array(zio->io_data, zio->io_size); 1794 1795 ASSERT(zio->io_data == gn->gn_gbh); 1796 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1797 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1798 1799 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1800 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1801 if (!BP_IS_GANG(gbp)) 1802 continue; 1803 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1804 } 1805} 1806 1807static void 1808zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1809{ 1810 zio_t *gio = pio->io_gang_leader; 1811 zio_t *zio; 1812 1813 ASSERT(BP_IS_GANG(bp) == !!gn); 1814 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1815 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1816 1817 /* 1818 * If you're a gang header, your data is in gn->gn_gbh. 1819 * If you're a gang member, your data is in 'data' and gn == NULL. 1820 */ 1821 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1822 1823 if (gn != NULL) { 1824 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1825 1826 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1827 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1828 if (BP_IS_HOLE(gbp)) 1829 continue; 1830 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1831 data = (char *)data + BP_GET_PSIZE(gbp); 1832 } 1833 } 1834 1835 if (gn == gio->io_gang_tree && gio->io_data != NULL) 1836 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1837 1838 if (zio != pio) 1839 zio_nowait(zio); 1840} 1841 1842static int 1843zio_gang_assemble(zio_t *zio) 1844{ 1845 blkptr_t *bp = zio->io_bp; 1846 1847 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1848 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1849 1850 zio->io_gang_leader = zio; 1851 1852 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1853 1854 return (ZIO_PIPELINE_CONTINUE); 1855} 1856 1857static int 1858zio_gang_issue(zio_t *zio) 1859{ 1860 blkptr_t *bp = zio->io_bp; 1861 1862 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1863 return (ZIO_PIPELINE_STOP); 1864 1865 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1866 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1867 1868 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1869 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1870 else 1871 zio_gang_tree_free(&zio->io_gang_tree); 1872 1873 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1874 1875 return (ZIO_PIPELINE_CONTINUE); 1876} 1877 1878static void 1879zio_write_gang_member_ready(zio_t *zio) 1880{ 1881 zio_t *pio = zio_unique_parent(zio); 1882 zio_t *gio = zio->io_gang_leader; 1883 dva_t *cdva = zio->io_bp->blk_dva; 1884 dva_t *pdva = pio->io_bp->blk_dva; 1885 uint64_t asize; 1886 1887 if (BP_IS_HOLE(zio->io_bp)) 1888 return; 1889 1890 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1891 1892 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1893 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 1894 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 1895 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 1896 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1897 1898 mutex_enter(&pio->io_lock); 1899 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1900 ASSERT(DVA_GET_GANG(&pdva[d])); 1901 asize = DVA_GET_ASIZE(&pdva[d]); 1902 asize += DVA_GET_ASIZE(&cdva[d]); 1903 DVA_SET_ASIZE(&pdva[d], asize); 1904 } 1905 mutex_exit(&pio->io_lock); 1906} 1907 1908static int 1909zio_write_gang_block(zio_t *pio) 1910{ 1911 spa_t *spa = pio->io_spa; 1912 blkptr_t *bp = pio->io_bp; 1913 zio_t *gio = pio->io_gang_leader; 1914 zio_t *zio; 1915 zio_gang_node_t *gn, **gnpp; 1916 zio_gbh_phys_t *gbh; 1917 uint64_t txg = pio->io_txg; 1918 uint64_t resid = pio->io_size; 1919 uint64_t lsize; 1920 int copies = gio->io_prop.zp_copies; 1921 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 1922 zio_prop_t zp; 1923 int error; 1924 1925 error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, 1926 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, 1927 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1928 if (error) { 1929 pio->io_error = error; 1930 return (ZIO_PIPELINE_CONTINUE); 1931 } 1932 1933 if (pio == gio) { 1934 gnpp = &gio->io_gang_tree; 1935 } else { 1936 gnpp = pio->io_private; 1937 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1938 } 1939 1940 gn = zio_gang_node_alloc(gnpp); 1941 gbh = gn->gn_gbh; 1942 bzero(gbh, SPA_GANGBLOCKSIZE); 1943 1944 /* 1945 * Create the gang header. 1946 */ 1947 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1948 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1949 1950 /* 1951 * Create and nowait the gang children. 1952 */ 1953 for (int g = 0; resid != 0; resid -= lsize, g++) { 1954 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1955 SPA_MINBLOCKSIZE); 1956 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1957 1958 zp.zp_checksum = gio->io_prop.zp_checksum; 1959 zp.zp_compress = ZIO_COMPRESS_OFF; 1960 zp.zp_type = DMU_OT_NONE; 1961 zp.zp_level = 0; 1962 zp.zp_copies = gio->io_prop.zp_copies; 1963 zp.zp_dedup = B_FALSE; 1964 zp.zp_dedup_verify = B_FALSE; 1965 zp.zp_nopwrite = B_FALSE; 1966 1967 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1968 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1969 zio_write_gang_member_ready, NULL, NULL, &gn->gn_child[g], 1970 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1971 &pio->io_bookmark)); 1972 } 1973 1974 /* 1975 * Set pio's pipeline to just wait for zio to finish. 1976 */ 1977 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1978 1979 zio_nowait(zio); 1980 1981 return (ZIO_PIPELINE_CONTINUE); 1982} 1983 1984/* 1985 * The zio_nop_write stage in the pipeline determines if allocating 1986 * a new bp is necessary. By leveraging a cryptographically secure checksum, 1987 * such as SHA256, we can compare the checksums of the new data and the old 1988 * to determine if allocating a new block is required. The nopwrite 1989 * feature can handle writes in either syncing or open context (i.e. zil 1990 * writes) and as a result is mutually exclusive with dedup. 1991 */ 1992static int 1993zio_nop_write(zio_t *zio) 1994{ 1995 blkptr_t *bp = zio->io_bp; 1996 blkptr_t *bp_orig = &zio->io_bp_orig; 1997 zio_prop_t *zp = &zio->io_prop; 1998 1999 ASSERT(BP_GET_LEVEL(bp) == 0); 2000 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2001 ASSERT(zp->zp_nopwrite); 2002 ASSERT(!zp->zp_dedup); 2003 ASSERT(zio->io_bp_override == NULL); 2004 ASSERT(IO_IS_ALLOCATING(zio)); 2005 2006 /* 2007 * Check to see if the original bp and the new bp have matching 2008 * characteristics (i.e. same checksum, compression algorithms, etc). 2009 * If they don't then just continue with the pipeline which will 2010 * allocate a new bp. 2011 */ 2012 if (BP_IS_HOLE(bp_orig) || 2013 !zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_dedup || 2014 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2015 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2016 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2017 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2018 return (ZIO_PIPELINE_CONTINUE); 2019 2020 /* 2021 * If the checksums match then reset the pipeline so that we 2022 * avoid allocating a new bp and issuing any I/O. 2023 */ 2024 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2025 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup); 2026 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2027 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2028 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2029 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2030 sizeof (uint64_t)) == 0); 2031 2032 *bp = *bp_orig; 2033 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2034 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2035 } 2036 2037 return (ZIO_PIPELINE_CONTINUE); 2038} 2039 2040/* 2041 * ========================================================================== 2042 * Dedup 2043 * ========================================================================== 2044 */ 2045static void 2046zio_ddt_child_read_done(zio_t *zio) 2047{ 2048 blkptr_t *bp = zio->io_bp; 2049 ddt_entry_t *dde = zio->io_private; 2050 ddt_phys_t *ddp; 2051 zio_t *pio = zio_unique_parent(zio); 2052 2053 mutex_enter(&pio->io_lock); 2054 ddp = ddt_phys_select(dde, bp); 2055 if (zio->io_error == 0) 2056 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2057 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 2058 dde->dde_repair_data = zio->io_data; 2059 else 2060 zio_buf_free(zio->io_data, zio->io_size); 2061 mutex_exit(&pio->io_lock); 2062} 2063 2064static int 2065zio_ddt_read_start(zio_t *zio) 2066{ 2067 blkptr_t *bp = zio->io_bp; 2068 2069 ASSERT(BP_GET_DEDUP(bp)); 2070 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2071 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2072 2073 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2074 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2075 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2076 ddt_phys_t *ddp = dde->dde_phys; 2077 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2078 blkptr_t blk; 2079 2080 ASSERT(zio->io_vsd == NULL); 2081 zio->io_vsd = dde; 2082 2083 if (ddp_self == NULL) 2084 return (ZIO_PIPELINE_CONTINUE); 2085 2086 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2087 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2088 continue; 2089 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2090 &blk); 2091 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2092 zio_buf_alloc(zio->io_size), zio->io_size, 2093 zio_ddt_child_read_done, dde, zio->io_priority, 2094 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 2095 &zio->io_bookmark)); 2096 } 2097 return (ZIO_PIPELINE_CONTINUE); 2098 } 2099 2100 zio_nowait(zio_read(zio, zio->io_spa, bp, 2101 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 2102 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2103 2104 return (ZIO_PIPELINE_CONTINUE); 2105} 2106 2107static int 2108zio_ddt_read_done(zio_t *zio) 2109{ 2110 blkptr_t *bp = zio->io_bp; 2111 2112 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 2113 return (ZIO_PIPELINE_STOP); 2114 2115 ASSERT(BP_GET_DEDUP(bp)); 2116 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2117 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2118 2119 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2120 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2121 ddt_entry_t *dde = zio->io_vsd; 2122 if (ddt == NULL) { 2123 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2124 return (ZIO_PIPELINE_CONTINUE); 2125 } 2126 if (dde == NULL) { 2127 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2128 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2129 return (ZIO_PIPELINE_STOP); 2130 } 2131 if (dde->dde_repair_data != NULL) { 2132 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 2133 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2134 } 2135 ddt_repair_done(ddt, dde); 2136 zio->io_vsd = NULL; 2137 } 2138 2139 ASSERT(zio->io_vsd == NULL); 2140 2141 return (ZIO_PIPELINE_CONTINUE); 2142} 2143 2144static boolean_t 2145zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2146{ 2147 spa_t *spa = zio->io_spa; 2148 2149 /* 2150 * Note: we compare the original data, not the transformed data, 2151 * because when zio->io_bp is an override bp, we will not have 2152 * pushed the I/O transforms. That's an important optimization 2153 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2154 */ 2155 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2156 zio_t *lio = dde->dde_lead_zio[p]; 2157 2158 if (lio != NULL) { 2159 return (lio->io_orig_size != zio->io_orig_size || 2160 bcmp(zio->io_orig_data, lio->io_orig_data, 2161 zio->io_orig_size) != 0); 2162 } 2163 } 2164 2165 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2166 ddt_phys_t *ddp = &dde->dde_phys[p]; 2167 2168 if (ddp->ddp_phys_birth != 0) { 2169 arc_buf_t *abuf = NULL; 2170 uint32_t aflags = ARC_WAIT; 2171 blkptr_t blk = *zio->io_bp; 2172 int error; 2173 2174 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2175 2176 ddt_exit(ddt); 2177 2178 error = arc_read(NULL, spa, &blk, 2179 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2180 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2181 &aflags, &zio->io_bookmark); 2182 2183 if (error == 0) { 2184 if (arc_buf_size(abuf) != zio->io_orig_size || 2185 bcmp(abuf->b_data, zio->io_orig_data, 2186 zio->io_orig_size) != 0) 2187 error = SET_ERROR(EEXIST); 2188 VERIFY(arc_buf_remove_ref(abuf, &abuf)); 2189 } 2190 2191 ddt_enter(ddt); 2192 return (error != 0); 2193 } 2194 } 2195 2196 return (B_FALSE); 2197} 2198 2199static void 2200zio_ddt_child_write_ready(zio_t *zio) 2201{ 2202 int p = zio->io_prop.zp_copies; 2203 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2204 ddt_entry_t *dde = zio->io_private; 2205 ddt_phys_t *ddp = &dde->dde_phys[p]; 2206 zio_t *pio; 2207 2208 if (zio->io_error) 2209 return; 2210 2211 ddt_enter(ddt); 2212 2213 ASSERT(dde->dde_lead_zio[p] == zio); 2214 2215 ddt_phys_fill(ddp, zio->io_bp); 2216 2217 while ((pio = zio_walk_parents(zio)) != NULL) 2218 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2219 2220 ddt_exit(ddt); 2221} 2222 2223static void 2224zio_ddt_child_write_done(zio_t *zio) 2225{ 2226 int p = zio->io_prop.zp_copies; 2227 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2228 ddt_entry_t *dde = zio->io_private; 2229 ddt_phys_t *ddp = &dde->dde_phys[p]; 2230 2231 ddt_enter(ddt); 2232 2233 ASSERT(ddp->ddp_refcnt == 0); 2234 ASSERT(dde->dde_lead_zio[p] == zio); 2235 dde->dde_lead_zio[p] = NULL; 2236 2237 if (zio->io_error == 0) { 2238 while (zio_walk_parents(zio) != NULL) 2239 ddt_phys_addref(ddp); 2240 } else { 2241 ddt_phys_clear(ddp); 2242 } 2243 2244 ddt_exit(ddt); 2245} 2246 2247static void 2248zio_ddt_ditto_write_done(zio_t *zio) 2249{ 2250 int p = DDT_PHYS_DITTO; 2251 zio_prop_t *zp = &zio->io_prop; 2252 blkptr_t *bp = zio->io_bp; 2253 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2254 ddt_entry_t *dde = zio->io_private; 2255 ddt_phys_t *ddp = &dde->dde_phys[p]; 2256 ddt_key_t *ddk = &dde->dde_key; 2257 2258 ddt_enter(ddt); 2259 2260 ASSERT(ddp->ddp_refcnt == 0); 2261 ASSERT(dde->dde_lead_zio[p] == zio); 2262 dde->dde_lead_zio[p] = NULL; 2263 2264 if (zio->io_error == 0) { 2265 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2266 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2267 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2268 if (ddp->ddp_phys_birth != 0) 2269 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2270 ddt_phys_fill(ddp, bp); 2271 } 2272 2273 ddt_exit(ddt); 2274} 2275 2276static int 2277zio_ddt_write(zio_t *zio) 2278{ 2279 spa_t *spa = zio->io_spa; 2280 blkptr_t *bp = zio->io_bp; 2281 uint64_t txg = zio->io_txg; 2282 zio_prop_t *zp = &zio->io_prop; 2283 int p = zp->zp_copies; 2284 int ditto_copies; 2285 zio_t *cio = NULL; 2286 zio_t *dio = NULL; 2287 ddt_t *ddt = ddt_select(spa, bp); 2288 ddt_entry_t *dde; 2289 ddt_phys_t *ddp; 2290 2291 ASSERT(BP_GET_DEDUP(bp)); 2292 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2293 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2294 2295 ddt_enter(ddt); 2296 dde = ddt_lookup(ddt, bp, B_TRUE); 2297 ddp = &dde->dde_phys[p]; 2298 2299 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2300 /* 2301 * If we're using a weak checksum, upgrade to a strong checksum 2302 * and try again. If we're already using a strong checksum, 2303 * we can't resolve it, so just convert to an ordinary write. 2304 * (And automatically e-mail a paper to Nature?) 2305 */ 2306 if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { 2307 zp->zp_checksum = spa_dedup_checksum(spa); 2308 zio_pop_transforms(zio); 2309 zio->io_stage = ZIO_STAGE_OPEN; 2310 BP_ZERO(bp); 2311 } else { 2312 zp->zp_dedup = B_FALSE; 2313 } 2314 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2315 ddt_exit(ddt); 2316 return (ZIO_PIPELINE_CONTINUE); 2317 } 2318 2319 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2320 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2321 2322 if (ditto_copies > ddt_ditto_copies_present(dde) && 2323 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2324 zio_prop_t czp = *zp; 2325 2326 czp.zp_copies = ditto_copies; 2327 2328 /* 2329 * If we arrived here with an override bp, we won't have run 2330 * the transform stack, so we won't have the data we need to 2331 * generate a child i/o. So, toss the override bp and restart. 2332 * This is safe, because using the override bp is just an 2333 * optimization; and it's rare, so the cost doesn't matter. 2334 */ 2335 if (zio->io_bp_override) { 2336 zio_pop_transforms(zio); 2337 zio->io_stage = ZIO_STAGE_OPEN; 2338 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2339 zio->io_bp_override = NULL; 2340 BP_ZERO(bp); 2341 ddt_exit(ddt); 2342 return (ZIO_PIPELINE_CONTINUE); 2343 } 2344 2345 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2346 zio->io_orig_size, &czp, NULL, NULL, 2347 zio_ddt_ditto_write_done, dde, zio->io_priority, 2348 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2349 2350 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2351 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2352 } 2353 2354 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2355 if (ddp->ddp_phys_birth != 0) 2356 ddt_bp_fill(ddp, bp, txg); 2357 if (dde->dde_lead_zio[p] != NULL) 2358 zio_add_child(zio, dde->dde_lead_zio[p]); 2359 else 2360 ddt_phys_addref(ddp); 2361 } else if (zio->io_bp_override) { 2362 ASSERT(bp->blk_birth == txg); 2363 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2364 ddt_phys_fill(ddp, bp); 2365 ddt_phys_addref(ddp); 2366 } else { 2367 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2368 zio->io_orig_size, zp, zio_ddt_child_write_ready, NULL, 2369 zio_ddt_child_write_done, dde, zio->io_priority, 2370 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2371 2372 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2373 dde->dde_lead_zio[p] = cio; 2374 } 2375 2376 ddt_exit(ddt); 2377 2378 if (cio) 2379 zio_nowait(cio); 2380 if (dio) 2381 zio_nowait(dio); 2382 2383 return (ZIO_PIPELINE_CONTINUE); 2384} 2385 2386ddt_entry_t *freedde; /* for debugging */ 2387 2388static int 2389zio_ddt_free(zio_t *zio) 2390{ 2391 spa_t *spa = zio->io_spa; 2392 blkptr_t *bp = zio->io_bp; 2393 ddt_t *ddt = ddt_select(spa, bp); 2394 ddt_entry_t *dde; 2395 ddt_phys_t *ddp; 2396 2397 ASSERT(BP_GET_DEDUP(bp)); 2398 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2399 2400 ddt_enter(ddt); 2401 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2402 ddp = ddt_phys_select(dde, bp); 2403 ddt_phys_decref(ddp); 2404 ddt_exit(ddt); 2405 2406 return (ZIO_PIPELINE_CONTINUE); 2407} 2408 2409/* 2410 * ========================================================================== 2411 * Allocate and free blocks 2412 * ========================================================================== 2413 */ 2414static int 2415zio_dva_allocate(zio_t *zio) 2416{ 2417 spa_t *spa = zio->io_spa; 2418 metaslab_class_t *mc = spa_normal_class(spa); 2419 blkptr_t *bp = zio->io_bp; 2420 int error; 2421 int flags = 0; 2422 2423 if (zio->io_gang_leader == NULL) { 2424 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2425 zio->io_gang_leader = zio; 2426 } 2427 2428 ASSERT(BP_IS_HOLE(bp)); 2429 ASSERT0(BP_GET_NDVAS(bp)); 2430 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2431 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2432 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2433 2434 /* 2435 * The dump device does not support gang blocks so allocation on 2436 * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid 2437 * the "fast" gang feature. 2438 */ 2439 flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; 2440 flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? 2441 METASLAB_GANG_CHILD : 0; 2442 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2443 zio->io_prop.zp_copies, zio->io_txg, NULL, flags); 2444 2445 if (error) { 2446 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2447 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2448 error); 2449 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2450 return (zio_write_gang_block(zio)); 2451 zio->io_error = error; 2452 } 2453 2454 return (ZIO_PIPELINE_CONTINUE); 2455} 2456 2457static int 2458zio_dva_free(zio_t *zio) 2459{ 2460 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2461 2462 return (ZIO_PIPELINE_CONTINUE); 2463} 2464 2465static int 2466zio_dva_claim(zio_t *zio) 2467{ 2468 int error; 2469 2470 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2471 if (error) 2472 zio->io_error = error; 2473 2474 return (ZIO_PIPELINE_CONTINUE); 2475} 2476 2477/* 2478 * Undo an allocation. This is used by zio_done() when an I/O fails 2479 * and we want to give back the block we just allocated. 2480 * This handles both normal blocks and gang blocks. 2481 */ 2482static void 2483zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2484{ 2485 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2486 ASSERT(zio->io_bp_override == NULL); 2487 2488 if (!BP_IS_HOLE(bp)) 2489 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2490 2491 if (gn != NULL) { 2492 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2493 zio_dva_unallocate(zio, gn->gn_child[g], 2494 &gn->gn_gbh->zg_blkptr[g]); 2495 } 2496 } 2497} 2498 2499/* 2500 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2501 */ 2502int 2503zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2504 uint64_t size, boolean_t use_slog) 2505{ 2506 int error = 1; 2507 2508 ASSERT(txg > spa_syncing_txg(spa)); 2509 2510 /* 2511 * ZIL blocks are always contiguous (i.e. not gang blocks) so we 2512 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" 2513 * when allocating them. 2514 */ 2515 if (use_slog) { 2516 error = metaslab_alloc(spa, spa_log_class(spa), size, 2517 new_bp, 1, txg, old_bp, 2518 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2519 } 2520 2521 if (error) { 2522 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2523 new_bp, 1, txg, old_bp, 2524 METASLAB_HINTBP_AVOID); 2525 } 2526 2527 if (error == 0) { 2528 BP_SET_LSIZE(new_bp, size); 2529 BP_SET_PSIZE(new_bp, size); 2530 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2531 BP_SET_CHECKSUM(new_bp, 2532 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2533 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2534 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2535 BP_SET_LEVEL(new_bp, 0); 2536 BP_SET_DEDUP(new_bp, 0); 2537 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2538 } 2539 2540 return (error); 2541} 2542 2543/* 2544 * Free an intent log block. 2545 */ 2546void 2547zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2548{ 2549 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2550 ASSERT(!BP_IS_GANG(bp)); 2551 2552 zio_free(spa, txg, bp); 2553} 2554 2555/* 2556 * ========================================================================== 2557 * Read, write and delete to physical devices 2558 * ========================================================================== 2559 */ 2560static int 2561zio_vdev_io_start(zio_t *zio) 2562{ 2563 vdev_t *vd = zio->io_vd; 2564 uint64_t align; 2565 spa_t *spa = zio->io_spa; 2566 int ret; 2567 2568 ASSERT(zio->io_error == 0); 2569 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2570 2571 if (vd == NULL) { 2572 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2573 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2574 2575 /* 2576 * The mirror_ops handle multiple DVAs in a single BP. 2577 */ 2578 return (vdev_mirror_ops.vdev_op_io_start(zio)); 2579 } 2580 2581 if (vd->vdev_ops->vdev_op_leaf && zio->io_type == ZIO_TYPE_FREE && 2582 zio->io_priority == ZIO_PRIORITY_NOW) { 2583 trim_map_free(vd, zio->io_offset, zio->io_size, zio->io_txg); 2584 return (ZIO_PIPELINE_CONTINUE); 2585 } 2586 2587 /* 2588 * We keep track of time-sensitive I/Os so that the scan thread 2589 * can quickly react to certain workloads. In particular, we care 2590 * about non-scrubbing, top-level reads and writes with the following 2591 * characteristics: 2592 * - synchronous writes of user data to non-slog devices 2593 * - any reads of user data 2594 * When these conditions are met, adjust the timestamp of spa_last_io 2595 * which allows the scan thread to adjust its workload accordingly. 2596 */ 2597 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2598 vd == vd->vdev_top && !vd->vdev_islog && 2599 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2600 zio->io_txg != spa_syncing_txg(spa)) { 2601 uint64_t old = spa->spa_last_io; 2602 uint64_t new = ddi_get_lbolt64(); 2603 if (old != new) 2604 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2605 } 2606 2607 align = 1ULL << vd->vdev_top->vdev_ashift; 2608 2609 if ((!(zio->io_flags & ZIO_FLAG_PHYSICAL) || 2610 (vd->vdev_top->vdev_physical_ashift > SPA_MINBLOCKSHIFT)) && 2611 P2PHASE(zio->io_size, align) != 0) { 2612 /* Transform logical writes to be a full physical block size. */ 2613 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2614 char *abuf = NULL; 2615 if (zio->io_type == ZIO_TYPE_READ || 2616 zio->io_type == ZIO_TYPE_WRITE) 2617 abuf = zio_buf_alloc(asize); 2618 ASSERT(vd == vd->vdev_top); 2619 if (zio->io_type == ZIO_TYPE_WRITE) { 2620 bcopy(zio->io_data, abuf, zio->io_size); 2621 bzero(abuf + zio->io_size, asize - zio->io_size); 2622 } 2623 zio_push_transform(zio, abuf, asize, abuf ? asize : 0, 2624 zio_subblock); 2625 } 2626 2627 /* 2628 * If this is not a physical io, make sure that it is properly aligned 2629 * before proceeding. 2630 */ 2631 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 2632 ASSERT0(P2PHASE(zio->io_offset, align)); 2633 ASSERT0(P2PHASE(zio->io_size, align)); 2634 } else { 2635 /* 2636 * For physical writes, we allow 512b aligned writes and assume 2637 * the device will perform a read-modify-write as necessary. 2638 */ 2639 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 2640 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 2641 } 2642 2643 VERIFY(zio->io_type == ZIO_TYPE_READ || spa_writeable(spa)); 2644 2645 /* 2646 * If this is a repair I/O, and there's no self-healing involved -- 2647 * that is, we're just resilvering what we expect to resilver -- 2648 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2649 * This prevents spurious resilvering with nested replication. 2650 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2651 * A is out of date, we'll read from C+D, then use the data to 2652 * resilver A+B -- but we don't actually want to resilver B, just A. 2653 * The top-level mirror has no way to know this, so instead we just 2654 * discard unnecessary repairs as we work our way down the vdev tree. 2655 * The same logic applies to any form of nested replication: 2656 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2657 */ 2658 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2659 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2660 zio->io_txg != 0 && /* not a delegated i/o */ 2661 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2662 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2663 zio_vdev_io_bypass(zio); 2664 return (ZIO_PIPELINE_CONTINUE); 2665 } 2666 2667 if (vd->vdev_ops->vdev_op_leaf) { 2668 switch (zio->io_type) { 2669 case ZIO_TYPE_READ: 2670 if (vdev_cache_read(zio)) 2671 return (ZIO_PIPELINE_CONTINUE); 2672 /* FALLTHROUGH */ 2673 case ZIO_TYPE_WRITE: 2674 case ZIO_TYPE_FREE: 2675 if ((zio = vdev_queue_io(zio)) == NULL) 2676 return (ZIO_PIPELINE_STOP); 2677 2678 if (!vdev_accessible(vd, zio)) { 2679 zio->io_error = SET_ERROR(ENXIO); 2680 zio_interrupt(zio); 2681 return (ZIO_PIPELINE_STOP); 2682 } 2683 break; 2684 } 2685 /* 2686 * Note that we ignore repair writes for TRIM because they can 2687 * conflict with normal writes. This isn't an issue because, by 2688 * definition, we only repair blocks that aren't freed. 2689 */ 2690 if (zio->io_type == ZIO_TYPE_WRITE && 2691 !(zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2692 !trim_map_write_start(zio)) 2693 return (ZIO_PIPELINE_STOP); 2694 } 2695 2696 ret = vd->vdev_ops->vdev_op_io_start(zio); 2697 ASSERT(ret == ZIO_PIPELINE_STOP); 2698 2699 return (ret); 2700} 2701 2702static int 2703zio_vdev_io_done(zio_t *zio) 2704{ 2705 vdev_t *vd = zio->io_vd; 2706 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 2707 boolean_t unexpected_error = B_FALSE; 2708 2709 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2710 return (ZIO_PIPELINE_STOP); 2711 2712 ASSERT(zio->io_type == ZIO_TYPE_READ || 2713 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_FREE); 2714 2715 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2716 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE || 2717 zio->io_type == ZIO_TYPE_FREE)) { 2718 2719 if (zio->io_type == ZIO_TYPE_WRITE && 2720 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) 2721 trim_map_write_done(zio); 2722 2723 vdev_queue_io_done(zio); 2724 2725 if (zio->io_type == ZIO_TYPE_WRITE) 2726 vdev_cache_write(zio); 2727 2728 if (zio_injection_enabled && zio->io_error == 0) 2729 zio->io_error = zio_handle_device_injection(vd, 2730 zio, EIO); 2731 2732 if (zio_injection_enabled && zio->io_error == 0) 2733 zio->io_error = zio_handle_label_injection(zio, EIO); 2734 2735 if (zio->io_error) { 2736 if (zio->io_error == ENOTSUP && 2737 zio->io_type == ZIO_TYPE_FREE) { 2738 /* Not all devices support TRIM. */ 2739 } else if (!vdev_accessible(vd, zio)) { 2740 zio->io_error = SET_ERROR(ENXIO); 2741 } else { 2742 unexpected_error = B_TRUE; 2743 } 2744 } 2745 } 2746 2747 ops->vdev_op_io_done(zio); 2748 2749 if (unexpected_error) 2750 VERIFY(vdev_probe(vd, zio) == NULL); 2751 2752 return (ZIO_PIPELINE_CONTINUE); 2753} 2754 2755/* 2756 * For non-raidz ZIOs, we can just copy aside the bad data read from the 2757 * disk, and use that to finish the checksum ereport later. 2758 */ 2759static void 2760zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 2761 const void *good_buf) 2762{ 2763 /* no processing needed */ 2764 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 2765} 2766 2767/*ARGSUSED*/ 2768void 2769zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 2770{ 2771 void *buf = zio_buf_alloc(zio->io_size); 2772 2773 bcopy(zio->io_data, buf, zio->io_size); 2774 2775 zcr->zcr_cbinfo = zio->io_size; 2776 zcr->zcr_cbdata = buf; 2777 zcr->zcr_finish = zio_vsd_default_cksum_finish; 2778 zcr->zcr_free = zio_buf_free; 2779} 2780 2781static int 2782zio_vdev_io_assess(zio_t *zio) 2783{ 2784 vdev_t *vd = zio->io_vd; 2785 2786 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2787 return (ZIO_PIPELINE_STOP); 2788 2789 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2790 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 2791 2792 if (zio->io_vsd != NULL) { 2793 zio->io_vsd_ops->vsd_free(zio); 2794 zio->io_vsd = NULL; 2795 } 2796 2797 if (zio_injection_enabled && zio->io_error == 0) 2798 zio->io_error = zio_handle_fault_injection(zio, EIO); 2799 2800 if (zio->io_type == ZIO_TYPE_FREE && 2801 zio->io_priority != ZIO_PRIORITY_NOW) { 2802 switch (zio->io_error) { 2803 case 0: 2804 ZIO_TRIM_STAT_INCR(bytes, zio->io_size); 2805 ZIO_TRIM_STAT_BUMP(success); 2806 break; 2807 case EOPNOTSUPP: 2808 ZIO_TRIM_STAT_BUMP(unsupported); 2809 break; 2810 default: 2811 ZIO_TRIM_STAT_BUMP(failed); 2812 break; 2813 } 2814 } 2815 2816 /* 2817 * If the I/O failed, determine whether we should attempt to retry it. 2818 * 2819 * On retry, we cut in line in the issue queue, since we don't want 2820 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 2821 */ 2822 if (zio->io_error && vd == NULL && 2823 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 2824 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 2825 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 2826 zio->io_error = 0; 2827 zio->io_flags |= ZIO_FLAG_IO_RETRY | 2828 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 2829 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 2830 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 2831 zio_requeue_io_start_cut_in_line); 2832 return (ZIO_PIPELINE_STOP); 2833 } 2834 2835 /* 2836 * If we got an error on a leaf device, convert it to ENXIO 2837 * if the device is not accessible at all. 2838 */ 2839 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 2840 !vdev_accessible(vd, zio)) 2841 zio->io_error = SET_ERROR(ENXIO); 2842 2843 /* 2844 * If we can't write to an interior vdev (mirror or RAID-Z), 2845 * set vdev_cant_write so that we stop trying to allocate from it. 2846 */ 2847 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 2848 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 2849 vd->vdev_cant_write = B_TRUE; 2850 } 2851 2852 if (zio->io_error) 2853 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2854 2855 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2856 zio->io_physdone != NULL) { 2857 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 2858 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 2859 zio->io_physdone(zio->io_logical); 2860 } 2861 2862 return (ZIO_PIPELINE_CONTINUE); 2863} 2864 2865void 2866zio_vdev_io_reissue(zio_t *zio) 2867{ 2868 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2869 ASSERT(zio->io_error == 0); 2870 2871 zio->io_stage >>= 1; 2872} 2873 2874void 2875zio_vdev_io_redone(zio_t *zio) 2876{ 2877 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 2878 2879 zio->io_stage >>= 1; 2880} 2881 2882void 2883zio_vdev_io_bypass(zio_t *zio) 2884{ 2885 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2886 ASSERT(zio->io_error == 0); 2887 2888 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 2889 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 2890} 2891 2892/* 2893 * ========================================================================== 2894 * Generate and verify checksums 2895 * ========================================================================== 2896 */ 2897static int 2898zio_checksum_generate(zio_t *zio) 2899{ 2900 blkptr_t *bp = zio->io_bp; 2901 enum zio_checksum checksum; 2902 2903 if (bp == NULL) { 2904 /* 2905 * This is zio_write_phys(). 2906 * We're either generating a label checksum, or none at all. 2907 */ 2908 checksum = zio->io_prop.zp_checksum; 2909 2910 if (checksum == ZIO_CHECKSUM_OFF) 2911 return (ZIO_PIPELINE_CONTINUE); 2912 2913 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 2914 } else { 2915 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 2916 ASSERT(!IO_IS_ALLOCATING(zio)); 2917 checksum = ZIO_CHECKSUM_GANG_HEADER; 2918 } else { 2919 checksum = BP_GET_CHECKSUM(bp); 2920 } 2921 } 2922 2923 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 2924 2925 return (ZIO_PIPELINE_CONTINUE); 2926} 2927 2928static int 2929zio_checksum_verify(zio_t *zio) 2930{ 2931 zio_bad_cksum_t info; 2932 blkptr_t *bp = zio->io_bp; 2933 int error; 2934 2935 ASSERT(zio->io_vd != NULL); 2936 2937 if (bp == NULL) { 2938 /* 2939 * This is zio_read_phys(). 2940 * We're either verifying a label checksum, or nothing at all. 2941 */ 2942 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 2943 return (ZIO_PIPELINE_CONTINUE); 2944 2945 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 2946 } 2947 2948 if ((error = zio_checksum_error(zio, &info)) != 0) { 2949 zio->io_error = error; 2950 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2951 zfs_ereport_start_checksum(zio->io_spa, 2952 zio->io_vd, zio, zio->io_offset, 2953 zio->io_size, NULL, &info); 2954 } 2955 } 2956 2957 return (ZIO_PIPELINE_CONTINUE); 2958} 2959 2960/* 2961 * Called by RAID-Z to ensure we don't compute the checksum twice. 2962 */ 2963void 2964zio_checksum_verified(zio_t *zio) 2965{ 2966 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 2967} 2968 2969/* 2970 * ========================================================================== 2971 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2972 * An error of 0 indicates success. ENXIO indicates whole-device failure, 2973 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2974 * indicate errors that are specific to one I/O, and most likely permanent. 2975 * Any other error is presumed to be worse because we weren't expecting it. 2976 * ========================================================================== 2977 */ 2978int 2979zio_worst_error(int e1, int e2) 2980{ 2981 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2982 int r1, r2; 2983 2984 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2985 if (e1 == zio_error_rank[r1]) 2986 break; 2987 2988 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2989 if (e2 == zio_error_rank[r2]) 2990 break; 2991 2992 return (r1 > r2 ? e1 : e2); 2993} 2994 2995/* 2996 * ========================================================================== 2997 * I/O completion 2998 * ========================================================================== 2999 */ 3000static int 3001zio_ready(zio_t *zio) 3002{ 3003 blkptr_t *bp = zio->io_bp; 3004 zio_t *pio, *pio_next; 3005 3006 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 3007 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 3008 return (ZIO_PIPELINE_STOP); 3009 3010 if (zio->io_ready) { 3011 ASSERT(IO_IS_ALLOCATING(zio)); 3012 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3013 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3014 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3015 3016 zio->io_ready(zio); 3017 } 3018 3019 if (bp != NULL && bp != &zio->io_bp_copy) 3020 zio->io_bp_copy = *bp; 3021 3022 if (zio->io_error) 3023 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3024 3025 mutex_enter(&zio->io_lock); 3026 zio->io_state[ZIO_WAIT_READY] = 1; 3027 pio = zio_walk_parents(zio); 3028 mutex_exit(&zio->io_lock); 3029 3030 /* 3031 * As we notify zio's parents, new parents could be added. 3032 * New parents go to the head of zio's io_parent_list, however, 3033 * so we will (correctly) not notify them. The remainder of zio's 3034 * io_parent_list, from 'pio_next' onward, cannot change because 3035 * all parents must wait for us to be done before they can be done. 3036 */ 3037 for (; pio != NULL; pio = pio_next) { 3038 pio_next = zio_walk_parents(zio); 3039 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3040 } 3041 3042 if (zio->io_flags & ZIO_FLAG_NODATA) { 3043 if (BP_IS_GANG(bp)) { 3044 zio->io_flags &= ~ZIO_FLAG_NODATA; 3045 } else { 3046 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 3047 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3048 } 3049 } 3050 3051 if (zio_injection_enabled && 3052 zio->io_spa->spa_syncing_txg == zio->io_txg) 3053 zio_handle_ignored_writes(zio); 3054 3055 return (ZIO_PIPELINE_CONTINUE); 3056} 3057 3058static int 3059zio_done(zio_t *zio) 3060{ 3061 spa_t *spa = zio->io_spa; 3062 zio_t *lio = zio->io_logical; 3063 blkptr_t *bp = zio->io_bp; 3064 vdev_t *vd = zio->io_vd; 3065 uint64_t psize = zio->io_size; 3066 zio_t *pio, *pio_next; 3067 3068 /* 3069 * If our children haven't all completed, 3070 * wait for them and then repeat this pipeline stage. 3071 */ 3072 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 3073 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 3074 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 3075 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 3076 return (ZIO_PIPELINE_STOP); 3077 3078 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3079 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3080 ASSERT(zio->io_children[c][w] == 0); 3081 3082 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3083 ASSERT(bp->blk_pad[0] == 0); 3084 ASSERT(bp->blk_pad[1] == 0); 3085 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3086 (bp == zio_unique_parent(zio)->io_bp)); 3087 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3088 zio->io_bp_override == NULL && 3089 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3090 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3091 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3092 ASSERT(BP_COUNT_GANG(bp) == 0 || 3093 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3094 } 3095 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 3096 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3097 } 3098 3099 /* 3100 * If there were child vdev/gang/ddt errors, they apply to us now. 3101 */ 3102 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3103 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3104 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3105 3106 /* 3107 * If the I/O on the transformed data was successful, generate any 3108 * checksum reports now while we still have the transformed data. 3109 */ 3110 if (zio->io_error == 0) { 3111 while (zio->io_cksum_report != NULL) { 3112 zio_cksum_report_t *zcr = zio->io_cksum_report; 3113 uint64_t align = zcr->zcr_align; 3114 uint64_t asize = P2ROUNDUP(psize, align); 3115 char *abuf = zio->io_data; 3116 3117 if (asize != psize) { 3118 abuf = zio_buf_alloc(asize); 3119 bcopy(zio->io_data, abuf, psize); 3120 bzero(abuf + psize, asize - psize); 3121 } 3122 3123 zio->io_cksum_report = zcr->zcr_next; 3124 zcr->zcr_next = NULL; 3125 zcr->zcr_finish(zcr, abuf); 3126 zfs_ereport_free_checksum(zcr); 3127 3128 if (asize != psize) 3129 zio_buf_free(abuf, asize); 3130 } 3131 } 3132 3133 zio_pop_transforms(zio); /* note: may set zio->io_error */ 3134 3135 vdev_stat_update(zio, psize); 3136 3137 if (zio->io_error) { 3138 /* 3139 * If this I/O is attached to a particular vdev, 3140 * generate an error message describing the I/O failure 3141 * at the block level. We ignore these errors if the 3142 * device is currently unavailable. 3143 */ 3144 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3145 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3146 3147 if ((zio->io_error == EIO || !(zio->io_flags & 3148 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3149 zio == lio) { 3150 /* 3151 * For logical I/O requests, tell the SPA to log the 3152 * error and generate a logical data ereport. 3153 */ 3154 spa_log_error(spa, zio); 3155 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3156 0, 0); 3157 } 3158 } 3159 3160 if (zio->io_error && zio == lio) { 3161 /* 3162 * Determine whether zio should be reexecuted. This will 3163 * propagate all the way to the root via zio_notify_parent(). 3164 */ 3165 ASSERT(vd == NULL && bp != NULL); 3166 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3167 3168 if (IO_IS_ALLOCATING(zio) && 3169 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3170 if (zio->io_error != ENOSPC) 3171 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3172 else 3173 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3174 } 3175 3176 if ((zio->io_type == ZIO_TYPE_READ || 3177 zio->io_type == ZIO_TYPE_FREE) && 3178 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3179 zio->io_error == ENXIO && 3180 spa_load_state(spa) == SPA_LOAD_NONE && 3181 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3182 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3183 3184 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3185 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3186 3187 /* 3188 * Here is a possibly good place to attempt to do 3189 * either combinatorial reconstruction or error correction 3190 * based on checksums. It also might be a good place 3191 * to send out preliminary ereports before we suspend 3192 * processing. 3193 */ 3194 } 3195 3196 /* 3197 * If there were logical child errors, they apply to us now. 3198 * We defer this until now to avoid conflating logical child 3199 * errors with errors that happened to the zio itself when 3200 * updating vdev stats and reporting FMA events above. 3201 */ 3202 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3203 3204 if ((zio->io_error || zio->io_reexecute) && 3205 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3206 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3207 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3208 3209 zio_gang_tree_free(&zio->io_gang_tree); 3210 3211 /* 3212 * Godfather I/Os should never suspend. 3213 */ 3214 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 3215 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 3216 zio->io_reexecute = 0; 3217 3218 if (zio->io_reexecute) { 3219 /* 3220 * This is a logical I/O that wants to reexecute. 3221 * 3222 * Reexecute is top-down. When an i/o fails, if it's not 3223 * the root, it simply notifies its parent and sticks around. 3224 * The parent, seeing that it still has children in zio_done(), 3225 * does the same. This percolates all the way up to the root. 3226 * The root i/o will reexecute or suspend the entire tree. 3227 * 3228 * This approach ensures that zio_reexecute() honors 3229 * all the original i/o dependency relationships, e.g. 3230 * parents not executing until children are ready. 3231 */ 3232 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3233 3234 zio->io_gang_leader = NULL; 3235 3236 mutex_enter(&zio->io_lock); 3237 zio->io_state[ZIO_WAIT_DONE] = 1; 3238 mutex_exit(&zio->io_lock); 3239 3240 /* 3241 * "The Godfather" I/O monitors its children but is 3242 * not a true parent to them. It will track them through 3243 * the pipeline but severs its ties whenever they get into 3244 * trouble (e.g. suspended). This allows "The Godfather" 3245 * I/O to return status without blocking. 3246 */ 3247 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3248 zio_link_t *zl = zio->io_walk_link; 3249 pio_next = zio_walk_parents(zio); 3250 3251 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3252 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3253 zio_remove_child(pio, zio, zl); 3254 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3255 } 3256 } 3257 3258 if ((pio = zio_unique_parent(zio)) != NULL) { 3259 /* 3260 * We're not a root i/o, so there's nothing to do 3261 * but notify our parent. Don't propagate errors 3262 * upward since we haven't permanently failed yet. 3263 */ 3264 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3265 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3266 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3267 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3268 /* 3269 * We'd fail again if we reexecuted now, so suspend 3270 * until conditions improve (e.g. device comes online). 3271 */ 3272 zio_suspend(spa, zio); 3273 } else { 3274 /* 3275 * Reexecution is potentially a huge amount of work. 3276 * Hand it off to the otherwise-unused claim taskq. 3277 */ 3278#if defined(illumos) || !defined(_KERNEL) 3279 ASSERT(zio->io_tqent.tqent_next == NULL); 3280#else 3281 ASSERT(zio->io_tqent.tqent_task.ta_pending == 0); 3282#endif 3283 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3284 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3285 0, &zio->io_tqent); 3286 } 3287 return (ZIO_PIPELINE_STOP); 3288 } 3289 3290 ASSERT(zio->io_child_count == 0); 3291 ASSERT(zio->io_reexecute == 0); 3292 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3293 3294 /* 3295 * Report any checksum errors, since the I/O is complete. 3296 */ 3297 while (zio->io_cksum_report != NULL) { 3298 zio_cksum_report_t *zcr = zio->io_cksum_report; 3299 zio->io_cksum_report = zcr->zcr_next; 3300 zcr->zcr_next = NULL; 3301 zcr->zcr_finish(zcr, NULL); 3302 zfs_ereport_free_checksum(zcr); 3303 } 3304 3305 /* 3306 * It is the responsibility of the done callback to ensure that this 3307 * particular zio is no longer discoverable for adoption, and as 3308 * such, cannot acquire any new parents. 3309 */ 3310 if (zio->io_done) 3311 zio->io_done(zio); 3312 3313 mutex_enter(&zio->io_lock); 3314 zio->io_state[ZIO_WAIT_DONE] = 1; 3315 mutex_exit(&zio->io_lock); 3316 3317 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3318 zio_link_t *zl = zio->io_walk_link; 3319 pio_next = zio_walk_parents(zio); 3320 zio_remove_child(pio, zio, zl); 3321 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3322 } 3323 3324 if (zio->io_waiter != NULL) { 3325 mutex_enter(&zio->io_lock); 3326 zio->io_executor = NULL; 3327 cv_broadcast(&zio->io_cv); 3328 mutex_exit(&zio->io_lock); 3329 } else { 3330 zio_destroy(zio); 3331 } 3332 3333 return (ZIO_PIPELINE_STOP); 3334} 3335 3336/* 3337 * ========================================================================== 3338 * I/O pipeline definition 3339 * ========================================================================== 3340 */ 3341static zio_pipe_stage_t *zio_pipeline[] = { 3342 NULL, 3343 zio_read_bp_init, 3344 zio_free_bp_init, 3345 zio_issue_async, 3346 zio_write_bp_init, 3347 zio_checksum_generate, 3348 zio_nop_write, 3349 zio_ddt_read_start, 3350 zio_ddt_read_done, 3351 zio_ddt_write, 3352 zio_ddt_free, 3353 zio_gang_assemble, 3354 zio_gang_issue, 3355 zio_dva_allocate, 3356 zio_dva_free, 3357 zio_dva_claim, 3358 zio_ready, 3359 zio_vdev_io_start, 3360 zio_vdev_io_done, 3361 zio_vdev_io_assess, 3362 zio_checksum_verify, 3363 zio_done 3364}; 3365 3366/* dnp is the dnode for zb1->zb_object */ 3367boolean_t 3368zbookmark_is_before(const dnode_phys_t *dnp, const zbookmark_phys_t *zb1, 3369 const zbookmark_phys_t *zb2) 3370{ 3371 uint64_t zb1nextL0, zb2thisobj; 3372 3373 ASSERT(zb1->zb_objset == zb2->zb_objset); 3374 ASSERT(zb2->zb_level == 0); 3375 3376 /* The objset_phys_t isn't before anything. */ 3377 if (dnp == NULL) 3378 return (B_FALSE); 3379 3380 zb1nextL0 = (zb1->zb_blkid + 1) << 3381 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)); 3382 3383 zb2thisobj = zb2->zb_object ? zb2->zb_object : 3384 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT); 3385 3386 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 3387 uint64_t nextobj = zb1nextL0 * 3388 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT; 3389 return (nextobj <= zb2thisobj); 3390 } 3391 3392 if (zb1->zb_object < zb2thisobj) 3393 return (B_TRUE); 3394 if (zb1->zb_object > zb2thisobj) 3395 return (B_FALSE); 3396 if (zb2->zb_object == DMU_META_DNODE_OBJECT) 3397 return (B_FALSE); 3398 return (zb1nextL0 <= zb2->zb_blkid); 3399}
|