vdev_geom.c revision 185029
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 23 * All rights reserved. 24 */ 25 26#include <sys/zfs_context.h> 27#include <sys/param.h> 28#include <sys/kernel.h> 29#include <sys/bio.h> 30#include <sys/disk.h> 31#include <sys/spa.h> 32#include <sys/vdev_impl.h> 33#include <sys/fs/zfs.h> 34#include <sys/zio.h> 35#include <geom/geom.h> 36#include <geom/geom_int.h> 37 38/* 39 * Virtual device vector for GEOM. 40 */ 41 42struct g_class zfs_vdev_class = { 43 .name = "ZFS::VDEV", 44 .version = G_VERSION, 45}; 46 47DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev); 48 49typedef struct vdev_geom_ctx { 50 struct g_consumer *gc_consumer; 51 int gc_state; 52 struct bio_queue_head gc_queue; 53 struct mtx gc_queue_mtx; 54} vdev_geom_ctx_t; 55 56static void 57vdev_geom_release(vdev_t *vd) 58{ 59 vdev_geom_ctx_t *ctx; 60 61 ctx = vd->vdev_tsd; 62 vd->vdev_tsd = NULL; 63 64 mtx_lock(&ctx->gc_queue_mtx); 65 ctx->gc_state = 1; 66 wakeup_one(&ctx->gc_queue); 67 while (ctx->gc_state != 2) 68 msleep(&ctx->gc_state, &ctx->gc_queue_mtx, 0, "vgeom:w", 0); 69 mtx_unlock(&ctx->gc_queue_mtx); 70 mtx_destroy(&ctx->gc_queue_mtx); 71 kmem_free(ctx, sizeof(*ctx)); 72} 73 74static void 75vdev_geom_orphan(struct g_consumer *cp) 76{ 77 struct g_geom *gp; 78 vdev_t *vd; 79 int error; 80 81 g_topology_assert(); 82 83 vd = cp->private; 84 gp = cp->geom; 85 error = cp->provider->error; 86 87 ZFS_LOG(1, "Closing access to %s.", cp->provider->name); 88 if (cp->acr + cp->acw + cp->ace > 0) 89 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 90 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name); 91 g_detach(cp); 92 g_destroy_consumer(cp); 93 /* Destroy geom if there are no consumers left. */ 94 if (LIST_EMPTY(&gp->consumer)) { 95 ZFS_LOG(1, "Destroyed geom %s.", gp->name); 96 g_wither_geom(gp, error); 97 } 98 vdev_geom_release(vd); 99 100 vd->vdev_remove_wanted = B_TRUE; 101 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE); 102} 103 104static struct g_consumer * 105vdev_geom_attach(struct g_provider *pp, int write) 106{ 107 struct g_geom *gp; 108 struct g_consumer *cp; 109 110 g_topology_assert(); 111 112 ZFS_LOG(1, "Attaching to %s.", pp->name); 113 /* Do we have geom already? No? Create one. */ 114 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) { 115 if (gp->flags & G_GEOM_WITHER) 116 continue; 117 if (strcmp(gp->name, "zfs::vdev") != 0) 118 continue; 119 break; 120 } 121 if (gp == NULL) { 122 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev"); 123 gp->orphan = vdev_geom_orphan; 124 cp = g_new_consumer(gp); 125 if (g_attach(cp, pp) != 0) { 126 g_wither_geom(gp, ENXIO); 127 return (NULL); 128 } 129 if (g_access(cp, 1, write, 1) != 0) { 130 g_wither_geom(gp, ENXIO); 131 return (NULL); 132 } 133 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name); 134 } else { 135 /* Check if we are already connected to this provider. */ 136 LIST_FOREACH(cp, &gp->consumer, consumer) { 137 if (cp->provider == pp) { 138 ZFS_LOG(1, "Found consumer for %s.", pp->name); 139 break; 140 } 141 } 142 if (cp == NULL) { 143 cp = g_new_consumer(gp); 144 if (g_attach(cp, pp) != 0) { 145 g_destroy_consumer(cp); 146 return (NULL); 147 } 148 if (g_access(cp, 1, write, 1) != 0) { 149 g_detach(cp); 150 g_destroy_consumer(cp); 151 return (NULL); 152 } 153 ZFS_LOG(1, "Created consumer for %s.", pp->name); 154 } else { 155 if (g_access(cp, 1, cp->acw > 0 ? 0 : write, 1) != 0) 156 return (NULL); 157 ZFS_LOG(1, "Used existing consumer for %s.", pp->name); 158 } 159 } 160 return (cp); 161} 162 163static void 164vdev_geom_detach(void *arg, int flag __unused) 165{ 166 struct g_geom *gp; 167 struct g_consumer *cp; 168 169 g_topology_assert(); 170 cp = arg; 171 gp = cp->geom; 172 173 ZFS_LOG(1, "Closing access to %s.", cp->provider->name); 174 g_access(cp, -1, 0, -1); 175 /* Destroy consumer on last close. */ 176 if (cp->acr == 0 && cp->ace == 0) { 177 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name); 178 if (cp->acw > 0) 179 g_access(cp, 0, -cp->acw, 0); 180 g_detach(cp); 181 g_destroy_consumer(cp); 182 } 183 /* Destroy geom if there are no consumers left. */ 184 if (LIST_EMPTY(&gp->consumer)) { 185 ZFS_LOG(1, "Destroyed geom %s.", gp->name); 186 g_wither_geom(gp, ENXIO); 187 } 188} 189 190static void 191vdev_geom_worker(void *arg) 192{ 193 vdev_geom_ctx_t *ctx; 194 zio_t *zio; 195 struct bio *bp; 196 197 ctx = arg; 198 for (;;) { 199 mtx_lock(&ctx->gc_queue_mtx); 200 bp = bioq_takefirst(&ctx->gc_queue); 201 if (bp == NULL) { 202 if (ctx->gc_state == 1) { 203 ctx->gc_state = 2; 204 wakeup_one(&ctx->gc_state); 205 mtx_unlock(&ctx->gc_queue_mtx); 206 kproc_exit(0); 207 } 208 msleep(&ctx->gc_queue, &ctx->gc_queue_mtx, 209 PRIBIO | PDROP, "vgeom:io", 0); 210 continue; 211 } 212 mtx_unlock(&ctx->gc_queue_mtx); 213 zio = bp->bio_caller1; 214 zio->io_error = bp->bio_error; 215 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == ENOTSUP) { 216 vdev_t *vd; 217 218 /* 219 * If we get ENOTSUP, we know that no future 220 * attempts will ever succeed. In this case we 221 * set a persistent bit so that we don't bother 222 * with the ioctl in the future. 223 */ 224 vd = zio->io_vd; 225 vd->vdev_nowritecache = B_TRUE; 226 } 227 g_destroy_bio(bp); 228 zio_interrupt(zio); 229 } 230} 231 232static char * 233vdev_geom_get_id(struct g_consumer *cp) 234{ 235 char *id; 236 int len; 237 238 g_topology_assert_not(); 239 len = DISK_IDENT_SIZE; 240 id = kmem_zalloc(len, KM_SLEEP); 241 if (g_io_getattr("GEOM::ident", cp, &len, id) != 0) { 242 kmem_free(id, DISK_IDENT_SIZE); 243 return (NULL); 244 } 245 return (id); 246} 247 248static uint64_t 249nvlist_get_guid(nvlist_t *list) 250{ 251 nvpair_t *elem = NULL; 252 uint64_t value; 253 254 while ((elem = nvlist_next_nvpair(list, elem)) != NULL) { 255 if (nvpair_type(elem) == DATA_TYPE_UINT64 && 256 strcmp(nvpair_name(elem), "guid") == 0) { 257 VERIFY(nvpair_value_uint64(elem, &value) == 0); 258 return (value); 259 } 260 } 261 return (0); 262} 263 264static char * 265nvlist_get_devid(nvlist_t *list, uint64_t guid) 266{ 267 nvpair_t *elem = NULL; 268 int progress; 269 char *id; 270 271 progress = 0; 272 id = NULL; 273 274 while ((elem = nvlist_next_nvpair(list, elem)) != NULL) { 275 switch (nvpair_type(elem)) { 276 case DATA_TYPE_STRING: 277 { 278 char *value; 279 280 VERIFY(nvpair_value_string(elem, &value) == 0); 281 if (strcmp(nvpair_name(elem), "type") == 0 && 282 strcmp(value, "disk") == 0) { 283 progress |= 0x01; 284 } else if (strcmp(nvpair_name(elem), "devid") == 0) { 285 progress |= 0x02; 286 id = value; 287 } 288 break; 289 } 290 case DATA_TYPE_UINT64: 291 { 292 uint64_t value; 293 294 VERIFY(nvpair_value_uint64(elem, &value) == 0); 295 if (strcmp(nvpair_name(elem), "guid") == 0 && 296 value == guid) { 297 progress |= 0x04; 298 } 299 break; 300 } 301 case DATA_TYPE_NVLIST: 302 { 303 nvlist_t *value; 304 char *lid; 305 306 VERIFY(nvpair_value_nvlist(elem, &value) == 0); 307 lid = nvlist_get_devid(value, guid); 308 if (lid != NULL) 309 return (lid); 310 break; 311 } 312 case DATA_TYPE_NVLIST_ARRAY: 313 { 314 nvlist_t **value; 315 u_int c, count; 316 char *lid; 317 318 VERIFY(nvpair_value_nvlist_array(elem, &value, 319 &count) == 0); 320 321 for (c = 0; c < count; c++) { 322 lid = nvlist_get_devid(value[c], guid); 323 if (lid != NULL) 324 return (lid); 325 } 326 break; 327 } 328 } 329 if (progress == 0x07) 330 break; 331 } 332 if (progress != 0x07) 333 id = NULL; 334 return (id); 335} 336 337static int 338vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size) 339{ 340 struct bio *bp; 341 u_char *p; 342 off_t off; 343 int error; 344 345 ASSERT((offset % cp->provider->sectorsize) == 0); 346 ASSERT((size % cp->provider->sectorsize) == 0); 347 348 bp = g_alloc_bio(); 349 off = offset; 350 offset += size; 351 p = data; 352 error = 0; 353 354 for (; off < offset; off += MAXPHYS, p += MAXPHYS, size -= MAXPHYS) { 355 bzero(bp, sizeof(*bp)); 356 bp->bio_cmd = cmd; 357 bp->bio_done = NULL; 358 bp->bio_offset = off; 359 bp->bio_length = MIN(size, MAXPHYS); 360 bp->bio_data = p; 361 g_io_request(bp, cp); 362 error = biowait(bp, "vdev_geom_io"); 363 if (error != 0) 364 break; 365 } 366 367 g_destroy_bio(bp); 368 return (error); 369} 370 371static char * 372vdev_geom_read_id(struct g_consumer *cp) 373{ 374 struct g_provider *pp; 375 vdev_label_t *label; 376 char *p, *buf; 377 size_t buflen; 378 uint64_t psize; 379 off_t offset, size; 380 char *id; 381 int error, l, len; 382 383 g_topology_assert_not(); 384 385 pp = cp->provider; 386 387 psize = pp->mediasize; 388 psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t)); 389 390 size = sizeof(*label) + pp->sectorsize - 391 ((sizeof(*label) - 1) % pp->sectorsize) - 1; 392 393 id = NULL; 394 label = kmem_alloc(size, KM_SLEEP); 395 buflen = sizeof(label->vl_vdev_phys.vp_nvlist); 396 397 for (l = 0; l < VDEV_LABELS && id == NULL; l++) { 398 nvlist_t *config = NULL; 399 uint64_t guid; 400 401 offset = vdev_label_offset(psize, l, 0); 402 if ((offset % pp->sectorsize) != 0) 403 continue; 404 405 error = vdev_geom_io(cp, BIO_READ, label, offset, size); 406 if (error != 0) 407 continue; 408 buf = label->vl_vdev_phys.vp_nvlist; 409 410 if (nvlist_unpack(buf, buflen, &config, 0) != 0) 411 continue; 412 413 guid = nvlist_get_guid(config); 414 if (guid == 0) { 415 nvlist_free(config); 416 continue; 417 } 418 id = nvlist_get_devid(config, guid); 419 if (id != NULL) { 420 char *tmp; 421 422 tmp = kmem_zalloc(DISK_IDENT_SIZE, KM_SLEEP); 423 strlcpy(tmp, id, DISK_IDENT_SIZE); 424 id = tmp; 425 } 426 427 nvlist_free(config); 428 } 429 430 kmem_free(label, size); 431 if (id != NULL) 432 ZFS_LOG(1, "ID of %s: %s", pp->name, id); 433 return (id); 434} 435 436static void 437vdev_geom_free_id(char *id) 438{ 439 440 if (id != NULL) 441 kmem_free(id, DISK_IDENT_SIZE); 442} 443 444struct vdev_geom_find { 445 const char *id; 446 int write; 447 struct g_consumer *cp; 448}; 449 450static void 451vdev_geom_taste_orphan(struct g_consumer *cp) 452{ 453 454 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 455 cp->provider->name)); 456} 457 458static void 459vdev_geom_attach_by_id_event(void *arg, int flags __unused) 460{ 461 struct vdev_geom_find *ap; 462 struct g_class *mp; 463 struct g_geom *gp, *zgp; 464 struct g_provider *pp; 465 struct g_consumer *zcp; 466 char *id; 467 468 g_topology_assert(); 469 470 ap = arg; 471 472 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste"); 473 /* This orphan function should be never called. */ 474 zgp->orphan = vdev_geom_taste_orphan; 475 zcp = g_new_consumer(zgp); 476 477 /* First round tries to get provider's ID without reading metadata. */ 478 LIST_FOREACH(mp, &g_classes, class) { 479 if (mp == &zfs_vdev_class) 480 continue; 481 LIST_FOREACH(gp, &mp->geom, geom) { 482 if (gp->flags & G_GEOM_WITHER) 483 continue; 484 LIST_FOREACH(pp, &gp->provider, provider) { 485 if (pp->flags & G_PF_WITHER) 486 continue; 487 g_attach(zcp, pp); 488 if (g_access(zcp, 1, 0, 0) != 0) { 489 g_detach(zcp); 490 continue; 491 } 492 g_topology_unlock(); 493 id = vdev_geom_get_id(zcp); 494 g_topology_lock(); 495 g_access(zcp, -1, 0, 0); 496 g_detach(zcp); 497 if (id == NULL || strcmp(id, ap->id) != 0) { 498 vdev_geom_free_id(id); 499 continue; 500 } 501 vdev_geom_free_id(id); 502 ap->cp = vdev_geom_attach(pp, ap->write); 503 if (ap->cp == NULL) { 504 printf("ZFS WARNING: Cannot open %s " 505 "for writting.\n", pp->name); 506 continue; 507 } 508 goto end; 509 } 510 } 511 } 512 /* Second round looks for ID by reading ZFS metadata. */ 513 LIST_FOREACH(mp, &g_classes, class) { 514 if (mp == &zfs_vdev_class) 515 continue; 516 LIST_FOREACH(gp, &mp->geom, geom) { 517 if (gp->flags & G_GEOM_WITHER) 518 continue; 519 LIST_FOREACH(pp, &gp->provider, provider) { 520 if (pp->flags & G_PF_WITHER) 521 continue; 522 g_attach(zcp, pp); 523 if (g_access(zcp, 1, 0, 0) != 0) { 524 g_detach(zcp); 525 continue; 526 } 527 g_topology_unlock(); 528 id = vdev_geom_read_id(zcp); 529 g_topology_lock(); 530 g_access(zcp, -1, 0, 0); 531 g_detach(zcp); 532 if (id == NULL || strcmp(id, ap->id) != 0) { 533 vdev_geom_free_id(id); 534 continue; 535 } 536 vdev_geom_free_id(id); 537 ap->cp = vdev_geom_attach(pp, ap->write); 538 if (ap->cp == NULL) { 539 printf("ZFS WARNING: Cannot open %s " 540 "for writting.\n", pp->name); 541 continue; 542 } 543 goto end; 544 } 545 } 546 } 547 ap->cp = NULL; 548end: 549 g_destroy_consumer(zcp); 550 g_destroy_geom(zgp); 551} 552 553static struct g_consumer * 554vdev_geom_attach_by_id(const char *id, int write) 555{ 556 struct vdev_geom_find *ap; 557 struct g_consumer *cp; 558 559 ap = kmem_zalloc(sizeof(*ap), KM_SLEEP); 560 ap->id = id; 561 ap->write = write; 562 g_waitfor_event(vdev_geom_attach_by_id_event, ap, M_WAITOK, NULL); 563 cp = ap->cp; 564 kmem_free(ap, sizeof(*ap)); 565 return (cp); 566} 567 568static struct g_consumer * 569vdev_geom_open_by_path_and_devid(vdev_t *vd) 570{ 571 struct g_provider *pp; 572 struct g_consumer *cp; 573 char *id; 574 575 cp = NULL; 576 g_topology_lock(); 577 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1); 578 if (pp != NULL) { 579 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path); 580 cp = vdev_geom_attach(pp, !!(spa_mode & FWRITE)); 581 if (cp != NULL && vd->vdev_devid != NULL) { 582 g_topology_unlock(); 583 id = vdev_geom_get_id(cp); 584 g_topology_lock(); 585 if (id == NULL || strcmp(id, vd->vdev_devid) != 0) { 586 vdev_geom_detach(cp, 0); 587 cp = NULL; 588 ZFS_LOG(1, "ID mismatch for provider %s: " 589 "[%s]!=[%s].", vd->vdev_path, 590 vd->vdev_devid, id); 591 } else 592 ZFS_LOG(1, "ID match for provider %s.", 593 vd->vdev_path); 594 vdev_geom_free_id(id); 595 } 596 } 597 g_topology_unlock(); 598 599 return (cp); 600} 601 602static struct g_consumer * 603vdev_geom_open_by_devid(vdev_t *vd) 604{ 605 struct g_consumer *cp; 606 char *buf; 607 size_t len; 608 609 /* 610 * We can't search by devid if it's missing. 611 */ 612 if (vd->vdev_devid == NULL) 613 return (NULL); 614 615 ZFS_LOG(1, "Searching by ID [%s].", vd->vdev_devid); 616 cp = vdev_geom_attach_by_id(vd->vdev_devid, !!(spa_mode & FWRITE)); 617 if (cp != NULL) { 618 len = strlen(cp->provider->name) + strlen("/dev/") + 1; 619 buf = kmem_alloc(len, KM_SLEEP); 620 621 snprintf(buf, len, "/dev/%s", cp->provider->name); 622 spa_strfree(vd->vdev_path); 623 vd->vdev_path = buf; 624 625 ZFS_LOG(1, "Attach by ID [%s] succeeded, provider %s.", 626 vd->vdev_devid, vd->vdev_path); 627 } else 628 ZFS_LOG(1, "Search by ID [%s] failed.", vd->vdev_devid); 629 630 return (cp); 631} 632 633static int 634vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift) 635{ 636 vdev_geom_ctx_t *ctx; 637 struct g_provider *pp; 638 struct g_consumer *cp; 639 int owned; 640 641 /* 642 * We must have a pathname, and it must be absolute. 643 */ 644 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { 645 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 646 return (EINVAL); 647 } 648 649 vd->vdev_tsd = NULL; 650 651 if ((owned = mtx_owned(&Giant))) 652 mtx_unlock(&Giant); 653 cp = vdev_geom_open_by_path_and_devid(vd); 654 if (cp == NULL) { 655 /* 656 * The device at vd->vdev_path doesn't have the right devid. 657 * The disks might have merely moved around so try all other 658 * geom providers to find one with the right devid. 659 */ 660 cp = vdev_geom_open_by_devid(vd); 661 if (cp == NULL) { 662 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path); 663 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 664 if (owned) 665 mtx_lock(&Giant); 666 return (EACCES); 667 } 668 } 669 if (owned) 670 mtx_lock(&Giant); 671 672 cp->private = vd; 673 674 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP); 675 bioq_init(&ctx->gc_queue); 676 mtx_init(&ctx->gc_queue_mtx, "zfs:vdev:geom:queue", NULL, MTX_DEF); 677 ctx->gc_consumer = cp; 678 ctx->gc_state = 0; 679 680 vd->vdev_tsd = ctx; 681 pp = cp->provider; 682 683 kproc_create(vdev_geom_worker, ctx, NULL, 0, 0, "vdev:worker %s", 684 pp->name); 685 686 /* 687 * Determine the actual size of the device. 688 */ 689 *psize = pp->mediasize; 690 691 /* 692 * Determine the device's minimum transfer size. 693 */ 694 *ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1; 695 696 /* 697 * Clear the nowritecache bit, so that on a vdev_reopen() we will 698 * try again. 699 */ 700 vd->vdev_nowritecache = B_FALSE; 701 702 return (0); 703} 704 705static void 706vdev_geom_close(vdev_t *vd) 707{ 708 vdev_geom_ctx_t *ctx; 709 struct g_consumer *cp; 710 711 if ((ctx = vd->vdev_tsd) == NULL) 712 return; 713 if ((cp = ctx->gc_consumer) == NULL) 714 return; 715 vdev_geom_release(vd); 716 g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL); 717} 718 719static void 720vdev_geom_io_intr(struct bio *bp) 721{ 722 vdev_geom_ctx_t *ctx; 723 zio_t *zio; 724 725 zio = bp->bio_caller1; 726 ctx = zio->io_vd->vdev_tsd; 727 728 if ((zio->io_error = bp->bio_error) == 0 && bp->bio_resid != 0) 729 zio->io_error = EIO; 730 731 mtx_lock(&ctx->gc_queue_mtx); 732 bioq_insert_tail(&ctx->gc_queue, bp); 733 wakeup_one(&ctx->gc_queue); 734 mtx_unlock(&ctx->gc_queue_mtx); 735} 736 737static int 738vdev_geom_io_start(zio_t *zio) 739{ 740 vdev_t *vd; 741 vdev_geom_ctx_t *ctx; 742 struct g_consumer *cp; 743 struct bio *bp; 744 int error; 745 746 cp = NULL; 747 748 vd = zio->io_vd; 749 ctx = vd->vdev_tsd; 750 if (ctx != NULL) 751 cp = ctx->gc_consumer; 752 753 if (zio->io_type == ZIO_TYPE_IOCTL) { 754 /* XXPOLICY */ 755 if (!vdev_readable(vd)) { 756 zio->io_error = ENXIO; 757 return (ZIO_PIPELINE_CONTINUE); 758 } 759 760 switch (zio->io_cmd) { 761 762 case DKIOCFLUSHWRITECACHE: 763 764 if (zfs_nocacheflush) 765 break; 766 767 if (vd->vdev_nowritecache) { 768 zio->io_error = ENOTSUP; 769 break; 770 } 771 772 goto sendreq; 773 default: 774 zio->io_error = ENOTSUP; 775 } 776 777 return (ZIO_PIPELINE_CONTINUE); 778 } 779sendreq: 780 if (cp == NULL) { 781 zio->io_error = ENXIO; 782 return (ZIO_PIPELINE_CONTINUE); 783 } 784 bp = g_alloc_bio(); 785 bp->bio_caller1 = zio; 786 switch (zio->io_type) { 787 case ZIO_TYPE_READ: 788 case ZIO_TYPE_WRITE: 789 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE; 790 bp->bio_data = zio->io_data; 791 bp->bio_offset = zio->io_offset; 792 bp->bio_length = zio->io_size; 793 break; 794 case ZIO_TYPE_IOCTL: 795 bp->bio_cmd = BIO_FLUSH; 796 bp->bio_data = NULL; 797 bp->bio_offset = cp->provider->mediasize; 798 bp->bio_length = 0; 799 break; 800 } 801 bp->bio_done = vdev_geom_io_intr; 802 803 g_io_request(bp, cp); 804 805 return (ZIO_PIPELINE_STOP); 806} 807 808static void 809vdev_geom_io_done(zio_t *zio) 810{ 811 812 /* 813 * If the device returned ENXIO, then attempt we should verify if GEOM 814 * provider has been removed. If this is the case, then we trigger an 815 * asynchronous removal of the device. 816 */ 817 if (zio->io_error == ENXIO) { 818 vdev_t *vd = zio->io_vd; 819 vdev_geom_ctx_t *ctx; 820 struct g_provider *pp = NULL; 821 822 ctx = vd->vdev_tsd; 823 if (ctx != NULL && ctx->gc_consumer != NULL) 824 pp = ctx->gc_consumer->provider; 825 826 if (pp == NULL || (pp->flags & G_PF_ORPHAN)) { 827 vd->vdev_remove_wanted = B_TRUE; 828 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); 829 } 830 } 831} 832 833vdev_ops_t vdev_geom_ops = { 834 vdev_geom_open, 835 vdev_geom_close, 836 vdev_default_asize, 837 vdev_geom_io_start, 838 vdev_geom_io_done, 839 NULL, 840 VDEV_TYPE_DISK, /* name of this vdev type */ 841 B_TRUE /* leaf vdev */ 842}; 843