zvol.c revision 324204
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 25 * All rights reserved. 26 * 27 * Portions Copyright 2010 Robert Milkowski 28 * 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 31 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 32 * Copyright (c) 2014 Integros [integros.com] 33 */ 34 35/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */ 36 37/* 38 * ZFS volume emulation driver. 39 * 40 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 41 * Volumes are accessed through the symbolic links named: 42 * 43 * /dev/zvol/dsk/<pool_name>/<dataset_name> 44 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 45 * 46 * These links are created by the /dev filesystem (sdev_zvolops.c). 47 * Volumes are persistent through reboot. No user command needs to be 48 * run before opening and using a device. 49 * 50 * FreeBSD notes. 51 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device 52 * in the system. 53 */ 54 55#include <sys/types.h> 56#include <sys/param.h> 57#include <sys/kernel.h> 58#include <sys/errno.h> 59#include <sys/uio.h> 60#include <sys/bio.h> 61#include <sys/buf.h> 62#include <sys/kmem.h> 63#include <sys/conf.h> 64#include <sys/cmn_err.h> 65#include <sys/stat.h> 66#include <sys/zap.h> 67#include <sys/spa.h> 68#include <sys/spa_impl.h> 69#include <sys/zio.h> 70#include <sys/disk.h> 71#include <sys/dmu_traverse.h> 72#include <sys/dnode.h> 73#include <sys/dsl_dataset.h> 74#include <sys/dsl_prop.h> 75#include <sys/dkio.h> 76#include <sys/byteorder.h> 77#include <sys/sunddi.h> 78#include <sys/dirent.h> 79#include <sys/policy.h> 80#include <sys/queue.h> 81#include <sys/fs/zfs.h> 82#include <sys/zfs_ioctl.h> 83#include <sys/zil.h> 84#include <sys/refcount.h> 85#include <sys/zfs_znode.h> 86#include <sys/zfs_rlock.h> 87#include <sys/vdev_impl.h> 88#include <sys/vdev_raidz.h> 89#include <sys/zvol.h> 90#include <sys/zil_impl.h> 91#include <sys/dbuf.h> 92#include <sys/dmu_tx.h> 93#include <sys/zfeature.h> 94#include <sys/zio_checksum.h> 95#include <sys/filio.h> 96 97#include <geom/geom.h> 98 99#include "zfs_namecheck.h" 100 101#ifndef illumos 102struct g_class zfs_zvol_class = { 103 .name = "ZFS::ZVOL", 104 .version = G_VERSION, 105}; 106 107DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol); 108 109#endif 110void *zfsdev_state; 111static char *zvol_tag = "zvol_tag"; 112 113#define ZVOL_DUMPSIZE "dumpsize" 114 115/* 116 * This lock protects the zfsdev_state structure from being modified 117 * while it's being used, e.g. an open that comes in before a create 118 * finishes. It also protects temporary opens of the dataset so that, 119 * e.g., an open doesn't get a spurious EBUSY. 120 */ 121#ifdef illumos 122kmutex_t zfsdev_state_lock; 123#else 124/* 125 * In FreeBSD we've replaced the upstream zfsdev_state_lock with the 126 * spa_namespace_lock in the ZVOL code. 127 */ 128#define zfsdev_state_lock spa_namespace_lock 129#endif 130static uint32_t zvol_minors; 131 132#ifndef illumos 133SYSCTL_DECL(_vfs_zfs); 134SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME"); 135static int volmode = ZFS_VOLMODE_GEOM; 136TUNABLE_INT("vfs.zfs.vol.mode", &volmode); 137SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0, 138 "Expose as GEOM providers (1), device files (2) or neither"); 139 140#endif 141typedef struct zvol_extent { 142 list_node_t ze_node; 143 dva_t ze_dva; /* dva associated with this extent */ 144 uint64_t ze_nblks; /* number of blocks in extent */ 145} zvol_extent_t; 146 147/* 148 * The in-core state of each volume. 149 */ 150typedef struct zvol_state { 151#ifndef illumos 152 LIST_ENTRY(zvol_state) zv_links; 153#endif 154 char zv_name[MAXPATHLEN]; /* pool/dd name */ 155 uint64_t zv_volsize; /* amount of space we advertise */ 156 uint64_t zv_volblocksize; /* volume block size */ 157#ifdef illumos 158 minor_t zv_minor; /* minor number */ 159#else 160 struct cdev *zv_dev; /* non-GEOM device */ 161 struct g_provider *zv_provider; /* GEOM provider */ 162#endif 163 uint8_t zv_min_bs; /* minimum addressable block shift */ 164 uint8_t zv_flags; /* readonly, dumpified, etc. */ 165 objset_t *zv_objset; /* objset handle */ 166#ifdef illumos 167 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 168#endif 169 uint32_t zv_total_opens; /* total open count */ 170 uint32_t zv_sync_cnt; /* synchronous open count */ 171 zilog_t *zv_zilog; /* ZIL handle */ 172 list_t zv_extents; /* List of extents for dump */ 173 znode_t zv_znode; /* for range locking */ 174 dmu_buf_t *zv_dbuf; /* bonus handle */ 175#ifndef illumos 176 int zv_state; 177 int zv_volmode; /* Provide GEOM or cdev */ 178 struct bio_queue_head zv_queue; 179 struct mtx zv_queue_mtx; /* zv_queue mutex */ 180#endif 181} zvol_state_t; 182 183#ifndef illumos 184static LIST_HEAD(, zvol_state) all_zvols; 185#endif 186/* 187 * zvol specific flags 188 */ 189#define ZVOL_RDONLY 0x1 190#define ZVOL_DUMPIFIED 0x2 191#define ZVOL_EXCL 0x4 192#define ZVOL_WCE 0x8 193 194/* 195 * zvol maximum transfer in one DMU tx. 196 */ 197int zvol_maxphys = DMU_MAX_ACCESS/2; 198 199/* 200 * Toggle unmap functionality. 201 */ 202boolean_t zvol_unmap_enabled = B_TRUE; 203#ifndef illumos 204SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN, 205 &zvol_unmap_enabled, 0, 206 "Enable UNMAP functionality"); 207 208static d_open_t zvol_d_open; 209static d_close_t zvol_d_close; 210static d_read_t zvol_read; 211static d_write_t zvol_write; 212static d_ioctl_t zvol_d_ioctl; 213static d_strategy_t zvol_strategy; 214 215static struct cdevsw zvol_cdevsw = { 216 .d_version = D_VERSION, 217 .d_open = zvol_d_open, 218 .d_close = zvol_d_close, 219 .d_read = zvol_read, 220 .d_write = zvol_write, 221 .d_ioctl = zvol_d_ioctl, 222 .d_strategy = zvol_strategy, 223 .d_name = "zvol", 224 .d_flags = D_DISK | D_TRACKCLOSE, 225}; 226 227static void zvol_geom_run(zvol_state_t *zv); 228static void zvol_geom_destroy(zvol_state_t *zv); 229static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace); 230static void zvol_geom_start(struct bio *bp); 231static void zvol_geom_worker(void *arg); 232static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, 233 uint64_t len, boolean_t sync); 234#endif /* !illumos */ 235 236extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 237 nvlist_t *, nvlist_t *); 238static int zvol_remove_zv(zvol_state_t *); 239static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 240static int zvol_dumpify(zvol_state_t *zv); 241static int zvol_dump_fini(zvol_state_t *zv); 242static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 243 244static void 245zvol_size_changed(zvol_state_t *zv, uint64_t volsize) 246{ 247#ifdef illumos 248 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor); 249 250 zv->zv_volsize = volsize; 251 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 252 "Size", volsize) == DDI_SUCCESS); 253 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 254 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 255 256 /* Notify specfs to invalidate the cached size */ 257 spec_size_invalidate(dev, VBLK); 258 spec_size_invalidate(dev, VCHR); 259#else /* !illumos */ 260 zv->zv_volsize = volsize; 261 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 262 struct g_provider *pp; 263 264 pp = zv->zv_provider; 265 if (pp == NULL) 266 return; 267 g_topology_lock(); 268 g_resize_provider(pp, zv->zv_volsize); 269 g_topology_unlock(); 270 } 271#endif /* illumos */ 272} 273 274int 275zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 276{ 277 if (volsize == 0) 278 return (SET_ERROR(EINVAL)); 279 280 if (volsize % blocksize != 0) 281 return (SET_ERROR(EINVAL)); 282 283#ifdef _ILP32 284 if (volsize - 1 > SPEC_MAXOFFSET_T) 285 return (SET_ERROR(EOVERFLOW)); 286#endif 287 return (0); 288} 289 290int 291zvol_check_volblocksize(uint64_t volblocksize) 292{ 293 if (volblocksize < SPA_MINBLOCKSIZE || 294 volblocksize > SPA_OLD_MAXBLOCKSIZE || 295 !ISP2(volblocksize)) 296 return (SET_ERROR(EDOM)); 297 298 return (0); 299} 300 301int 302zvol_get_stats(objset_t *os, nvlist_t *nv) 303{ 304 int error; 305 dmu_object_info_t doi; 306 uint64_t val; 307 308 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 309 if (error) 310 return (error); 311 312 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 313 314 error = dmu_object_info(os, ZVOL_OBJ, &doi); 315 316 if (error == 0) { 317 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 318 doi.doi_data_block_size); 319 } 320 321 return (error); 322} 323 324static zvol_state_t * 325zvol_minor_lookup(const char *name) 326{ 327#ifdef illumos 328 minor_t minor; 329#endif 330 zvol_state_t *zv; 331 332 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 333 334#ifdef illumos 335 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 336 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 337 if (zv == NULL) 338 continue; 339#else 340 LIST_FOREACH(zv, &all_zvols, zv_links) { 341#endif 342 if (strcmp(zv->zv_name, name) == 0) 343 return (zv); 344 } 345 346 return (NULL); 347} 348 349/* extent mapping arg */ 350struct maparg { 351 zvol_state_t *ma_zv; 352 uint64_t ma_blks; 353}; 354 355/*ARGSUSED*/ 356static int 357zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 358 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 359{ 360 struct maparg *ma = arg; 361 zvol_extent_t *ze; 362 int bs = ma->ma_zv->zv_volblocksize; 363 364 if (bp == NULL || BP_IS_HOLE(bp) || 365 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 366 return (0); 367 368 VERIFY(!BP_IS_EMBEDDED(bp)); 369 370 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 371 ma->ma_blks++; 372 373 /* Abort immediately if we have encountered gang blocks */ 374 if (BP_IS_GANG(bp)) 375 return (SET_ERROR(EFRAGS)); 376 377 /* 378 * See if the block is at the end of the previous extent. 379 */ 380 ze = list_tail(&ma->ma_zv->zv_extents); 381 if (ze && 382 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 383 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 384 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 385 ze->ze_nblks++; 386 return (0); 387 } 388 389 dprintf_bp(bp, "%s", "next blkptr:"); 390 391 /* start a new extent */ 392 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 393 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 394 ze->ze_nblks = 1; 395 list_insert_tail(&ma->ma_zv->zv_extents, ze); 396 return (0); 397} 398 399static void 400zvol_free_extents(zvol_state_t *zv) 401{ 402 zvol_extent_t *ze; 403 404 while (ze = list_head(&zv->zv_extents)) { 405 list_remove(&zv->zv_extents, ze); 406 kmem_free(ze, sizeof (zvol_extent_t)); 407 } 408} 409 410static int 411zvol_get_lbas(zvol_state_t *zv) 412{ 413 objset_t *os = zv->zv_objset; 414 struct maparg ma; 415 int err; 416 417 ma.ma_zv = zv; 418 ma.ma_blks = 0; 419 zvol_free_extents(zv); 420 421 /* commit any in-flight changes before traversing the dataset */ 422 txg_wait_synced(dmu_objset_pool(os), 0); 423 err = traverse_dataset(dmu_objset_ds(os), 0, 424 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 425 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 426 zvol_free_extents(zv); 427 return (err ? err : EIO); 428 } 429 430 return (0); 431} 432 433/* ARGSUSED */ 434void 435zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 436{ 437 zfs_creat_t *zct = arg; 438 nvlist_t *nvprops = zct->zct_props; 439 int error; 440 uint64_t volblocksize, volsize; 441 442 VERIFY(nvlist_lookup_uint64(nvprops, 443 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 444 if (nvlist_lookup_uint64(nvprops, 445 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 446 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 447 448 /* 449 * These properties must be removed from the list so the generic 450 * property setting step won't apply to them. 451 */ 452 VERIFY(nvlist_remove_all(nvprops, 453 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 454 (void) nvlist_remove_all(nvprops, 455 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 456 457 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 458 DMU_OT_NONE, 0, tx); 459 ASSERT(error == 0); 460 461 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 462 DMU_OT_NONE, 0, tx); 463 ASSERT(error == 0); 464 465 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 466 ASSERT(error == 0); 467} 468 469/* 470 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 471 * implement DKIOCFREE/free-long-range. 472 */ 473static int 474zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 475{ 476 uint64_t offset, length; 477 478 if (byteswap) 479 byteswap_uint64_array(lr, sizeof (*lr)); 480 481 offset = lr->lr_offset; 482 length = lr->lr_length; 483 484 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 485} 486 487/* 488 * Replay a TX_WRITE ZIL transaction that didn't get committed 489 * after a system failure 490 */ 491static int 492zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 493{ 494 objset_t *os = zv->zv_objset; 495 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 496 uint64_t offset, length; 497 dmu_tx_t *tx; 498 int error; 499 500 if (byteswap) 501 byteswap_uint64_array(lr, sizeof (*lr)); 502 503 offset = lr->lr_offset; 504 length = lr->lr_length; 505 506 /* If it's a dmu_sync() block, write the whole block */ 507 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 508 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 509 if (length < blocksize) { 510 offset -= offset % blocksize; 511 length = blocksize; 512 } 513 } 514 515 tx = dmu_tx_create(os); 516 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 517 error = dmu_tx_assign(tx, TXG_WAIT); 518 if (error) { 519 dmu_tx_abort(tx); 520 } else { 521 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 522 dmu_tx_commit(tx); 523 } 524 525 return (error); 526} 527 528/* ARGSUSED */ 529static int 530zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 531{ 532 return (SET_ERROR(ENOTSUP)); 533} 534 535/* 536 * Callback vectors for replaying records. 537 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 538 */ 539zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 540 zvol_replay_err, /* 0 no such transaction type */ 541 zvol_replay_err, /* TX_CREATE */ 542 zvol_replay_err, /* TX_MKDIR */ 543 zvol_replay_err, /* TX_MKXATTR */ 544 zvol_replay_err, /* TX_SYMLINK */ 545 zvol_replay_err, /* TX_REMOVE */ 546 zvol_replay_err, /* TX_RMDIR */ 547 zvol_replay_err, /* TX_LINK */ 548 zvol_replay_err, /* TX_RENAME */ 549 zvol_replay_write, /* TX_WRITE */ 550 zvol_replay_truncate, /* TX_TRUNCATE */ 551 zvol_replay_err, /* TX_SETATTR */ 552 zvol_replay_err, /* TX_ACL */ 553 zvol_replay_err, /* TX_CREATE_ACL */ 554 zvol_replay_err, /* TX_CREATE_ATTR */ 555 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 556 zvol_replay_err, /* TX_MKDIR_ACL */ 557 zvol_replay_err, /* TX_MKDIR_ATTR */ 558 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 559 zvol_replay_err, /* TX_WRITE2 */ 560}; 561 562#ifdef illumos 563int 564zvol_name2minor(const char *name, minor_t *minor) 565{ 566 zvol_state_t *zv; 567 568 mutex_enter(&zfsdev_state_lock); 569 zv = zvol_minor_lookup(name); 570 if (minor && zv) 571 *minor = zv->zv_minor; 572 mutex_exit(&zfsdev_state_lock); 573 return (zv ? 0 : -1); 574} 575#endif /* illumos */ 576 577/* 578 * Create a minor node (plus a whole lot more) for the specified volume. 579 */ 580int 581zvol_create_minor(const char *name) 582{ 583 zfs_soft_state_t *zs; 584 zvol_state_t *zv; 585 objset_t *os; 586 dmu_object_info_t doi; 587#ifdef illumos 588 minor_t minor = 0; 589 char chrbuf[30], blkbuf[30]; 590#else 591 struct g_provider *pp; 592 struct g_geom *gp; 593 uint64_t volsize, mode; 594#endif 595 int error; 596 597#ifndef illumos 598 ZFS_LOG(1, "Creating ZVOL %s...", name); 599#endif 600 601 mutex_enter(&zfsdev_state_lock); 602 603 if (zvol_minor_lookup(name) != NULL) { 604 mutex_exit(&zfsdev_state_lock); 605 return (SET_ERROR(EEXIST)); 606 } 607 608 /* lie and say we're read-only */ 609 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 610 611 if (error) { 612 mutex_exit(&zfsdev_state_lock); 613 return (error); 614 } 615 616#ifdef illumos 617 if ((minor = zfsdev_minor_alloc()) == 0) { 618 dmu_objset_disown(os, FTAG); 619 mutex_exit(&zfsdev_state_lock); 620 return (SET_ERROR(ENXIO)); 621 } 622 623 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 624 dmu_objset_disown(os, FTAG); 625 mutex_exit(&zfsdev_state_lock); 626 return (SET_ERROR(EAGAIN)); 627 } 628 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 629 (char *)name); 630 631 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 632 633 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 634 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 635 ddi_soft_state_free(zfsdev_state, minor); 636 dmu_objset_disown(os, FTAG); 637 mutex_exit(&zfsdev_state_lock); 638 return (SET_ERROR(EAGAIN)); 639 } 640 641 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 642 643 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 644 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 645 ddi_remove_minor_node(zfs_dip, chrbuf); 646 ddi_soft_state_free(zfsdev_state, minor); 647 dmu_objset_disown(os, FTAG); 648 mutex_exit(&zfsdev_state_lock); 649 return (SET_ERROR(EAGAIN)); 650 } 651 652 zs = ddi_get_soft_state(zfsdev_state, minor); 653 zs->zss_type = ZSST_ZVOL; 654 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 655#else /* !illumos */ 656 657 zv = kmem_zalloc(sizeof(*zv), KM_SLEEP); 658 zv->zv_state = 0; 659 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 660 if (error) { 661 kmem_free(zv, sizeof(*zv)); 662 dmu_objset_disown(os, zvol_tag); 663 mutex_exit(&zfsdev_state_lock); 664 return (error); 665 } 666 error = dsl_prop_get_integer(name, 667 zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL); 668 if (error != 0 || mode == ZFS_VOLMODE_DEFAULT) 669 mode = volmode; 670 671 DROP_GIANT(); 672 zv->zv_volsize = volsize; 673 zv->zv_volmode = mode; 674 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 675 g_topology_lock(); 676 gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name); 677 gp->start = zvol_geom_start; 678 gp->access = zvol_geom_access; 679 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name); 680 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; 681 pp->sectorsize = DEV_BSIZE; 682 pp->mediasize = zv->zv_volsize; 683 pp->private = zv; 684 685 zv->zv_provider = pp; 686 bioq_init(&zv->zv_queue); 687 mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF); 688 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 689 struct make_dev_args args; 690 691 make_dev_args_init(&args); 692 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 693 args.mda_devsw = &zvol_cdevsw; 694 args.mda_cr = NULL; 695 args.mda_uid = UID_ROOT; 696 args.mda_gid = GID_OPERATOR; 697 args.mda_mode = 0640; 698 args.mda_si_drv2 = zv; 699 error = make_dev_s(&args, &zv->zv_dev, 700 "%s/%s", ZVOL_DRIVER, name); 701 if (error != 0) { 702 kmem_free(zv, sizeof(*zv)); 703 dmu_objset_disown(os, FTAG); 704 mutex_exit(&zfsdev_state_lock); 705 return (error); 706 } 707 zv->zv_dev->si_iosize_max = MAXPHYS; 708 } 709 LIST_INSERT_HEAD(&all_zvols, zv, zv_links); 710#endif /* illumos */ 711 712 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 713 zv->zv_min_bs = DEV_BSHIFT; 714#ifdef illumos 715 zv->zv_minor = minor; 716#endif 717 zv->zv_objset = os; 718 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 719 zv->zv_flags |= ZVOL_RDONLY; 720 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 721 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 722 sizeof (rl_t), offsetof(rl_t, r_node)); 723 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 724 offsetof(zvol_extent_t, ze_node)); 725 /* get and cache the blocksize */ 726 error = dmu_object_info(os, ZVOL_OBJ, &doi); 727 ASSERT(error == 0); 728 zv->zv_volblocksize = doi.doi_data_block_size; 729 730 if (spa_writeable(dmu_objset_spa(os))) { 731 if (zil_replay_disable) 732 zil_destroy(dmu_objset_zil(os), B_FALSE); 733 else 734 zil_replay(os, zv, zvol_replay_vector); 735 } 736 dmu_objset_disown(os, FTAG); 737 zv->zv_objset = NULL; 738 739 zvol_minors++; 740 741 mutex_exit(&zfsdev_state_lock); 742#ifndef illumos 743 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 744 zvol_geom_run(zv); 745 g_topology_unlock(); 746 } 747 PICKUP_GIANT(); 748 749 ZFS_LOG(1, "ZVOL %s created.", name); 750#endif 751 752 return (0); 753} 754 755/* 756 * Remove minor node for the specified volume. 757 */ 758static int 759zvol_remove_zv(zvol_state_t *zv) 760{ 761#ifdef illumos 762 char nmbuf[20]; 763 minor_t minor = zv->zv_minor; 764#endif 765 766 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 767 if (zv->zv_total_opens != 0) 768 return (SET_ERROR(EBUSY)); 769 770#ifdef illumos 771 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 772 ddi_remove_minor_node(zfs_dip, nmbuf); 773 774 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 775 ddi_remove_minor_node(zfs_dip, nmbuf); 776#else 777 ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name); 778 779 LIST_REMOVE(zv, zv_links); 780 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 781 g_topology_lock(); 782 zvol_geom_destroy(zv); 783 g_topology_unlock(); 784 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 785 if (zv->zv_dev != NULL) 786 destroy_dev(zv->zv_dev); 787 } 788#endif 789 790 avl_destroy(&zv->zv_znode.z_range_avl); 791 mutex_destroy(&zv->zv_znode.z_range_lock); 792 793 kmem_free(zv, sizeof (zvol_state_t)); 794#ifdef illumos 795 ddi_soft_state_free(zfsdev_state, minor); 796#endif 797 zvol_minors--; 798 return (0); 799} 800 801int 802zvol_remove_minor(const char *name) 803{ 804 zvol_state_t *zv; 805 int rc; 806 807 mutex_enter(&zfsdev_state_lock); 808 if ((zv = zvol_minor_lookup(name)) == NULL) { 809 mutex_exit(&zfsdev_state_lock); 810 return (SET_ERROR(ENXIO)); 811 } 812 rc = zvol_remove_zv(zv); 813 mutex_exit(&zfsdev_state_lock); 814 return (rc); 815} 816 817int 818zvol_first_open(zvol_state_t *zv) 819{ 820 objset_t *os; 821 uint64_t volsize; 822 int error; 823 uint64_t readonly; 824 825 /* lie and say we're read-only */ 826 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 827 zvol_tag, &os); 828 if (error) 829 return (error); 830 831 zv->zv_objset = os; 832 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 833 if (error) { 834 ASSERT(error == 0); 835 dmu_objset_disown(os, zvol_tag); 836 return (error); 837 } 838 839 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 840 if (error) { 841 dmu_objset_disown(os, zvol_tag); 842 return (error); 843 } 844 845 zvol_size_changed(zv, volsize); 846 zv->zv_zilog = zil_open(os, zvol_get_data); 847 848 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 849 NULL) == 0); 850 if (readonly || dmu_objset_is_snapshot(os) || 851 !spa_writeable(dmu_objset_spa(os))) 852 zv->zv_flags |= ZVOL_RDONLY; 853 else 854 zv->zv_flags &= ~ZVOL_RDONLY; 855 return (error); 856} 857 858void 859zvol_last_close(zvol_state_t *zv) 860{ 861 zil_close(zv->zv_zilog); 862 zv->zv_zilog = NULL; 863 864 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 865 zv->zv_dbuf = NULL; 866 867 /* 868 * Evict cached data 869 */ 870 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 871 !(zv->zv_flags & ZVOL_RDONLY)) 872 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 873 dmu_objset_evict_dbufs(zv->zv_objset); 874 875 dmu_objset_disown(zv->zv_objset, zvol_tag); 876 zv->zv_objset = NULL; 877} 878 879#ifdef illumos 880int 881zvol_prealloc(zvol_state_t *zv) 882{ 883 objset_t *os = zv->zv_objset; 884 dmu_tx_t *tx; 885 uint64_t refd, avail, usedobjs, availobjs; 886 uint64_t resid = zv->zv_volsize; 887 uint64_t off = 0; 888 889 /* Check the space usage before attempting to allocate the space */ 890 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 891 if (avail < zv->zv_volsize) 892 return (SET_ERROR(ENOSPC)); 893 894 /* Free old extents if they exist */ 895 zvol_free_extents(zv); 896 897 while (resid != 0) { 898 int error; 899 uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE); 900 901 tx = dmu_tx_create(os); 902 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 903 error = dmu_tx_assign(tx, TXG_WAIT); 904 if (error) { 905 dmu_tx_abort(tx); 906 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 907 return (error); 908 } 909 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 910 dmu_tx_commit(tx); 911 off += bytes; 912 resid -= bytes; 913 } 914 txg_wait_synced(dmu_objset_pool(os), 0); 915 916 return (0); 917} 918#endif /* illumos */ 919 920static int 921zvol_update_volsize(objset_t *os, uint64_t volsize) 922{ 923 dmu_tx_t *tx; 924 int error; 925 926 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 927 928 tx = dmu_tx_create(os); 929 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 930 dmu_tx_mark_netfree(tx); 931 error = dmu_tx_assign(tx, TXG_WAIT); 932 if (error) { 933 dmu_tx_abort(tx); 934 return (error); 935 } 936 937 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 938 &volsize, tx); 939 dmu_tx_commit(tx); 940 941 if (error == 0) 942 error = dmu_free_long_range(os, 943 ZVOL_OBJ, volsize, DMU_OBJECT_END); 944 return (error); 945} 946 947void 948zvol_remove_minors(const char *name) 949{ 950#ifdef illumos 951 zvol_state_t *zv; 952 char *namebuf; 953 minor_t minor; 954 955 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 956 (void) strncpy(namebuf, name, strlen(name)); 957 (void) strcat(namebuf, "/"); 958 mutex_enter(&zfsdev_state_lock); 959 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 960 961 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 962 if (zv == NULL) 963 continue; 964 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 965 (void) zvol_remove_zv(zv); 966 } 967 kmem_free(namebuf, strlen(name) + 2); 968 969 mutex_exit(&zfsdev_state_lock); 970#else /* !illumos */ 971 zvol_state_t *zv, *tzv; 972 size_t namelen; 973 974 namelen = strlen(name); 975 976 DROP_GIANT(); 977 mutex_enter(&zfsdev_state_lock); 978 979 LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) { 980 if (strcmp(zv->zv_name, name) == 0 || 981 (strncmp(zv->zv_name, name, namelen) == 0 && 982 strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' || 983 zv->zv_name[namelen] == '@'))) { 984 (void) zvol_remove_zv(zv); 985 } 986 } 987 988 mutex_exit(&zfsdev_state_lock); 989 PICKUP_GIANT(); 990#endif /* illumos */ 991} 992 993static int 994zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize) 995{ 996 uint64_t old_volsize = 0ULL; 997 int error = 0; 998 999 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 1000 1001 /* 1002 * Reinitialize the dump area to the new size. If we 1003 * failed to resize the dump area then restore it back to 1004 * its original size. We must set the new volsize prior 1005 * to calling dumpvp_resize() to ensure that the devices' 1006 * size(9P) is not visible by the dump subsystem. 1007 */ 1008 old_volsize = zv->zv_volsize; 1009 zvol_size_changed(zv, volsize); 1010 1011#ifdef ZVOL_DUMP 1012 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1013 if ((error = zvol_dumpify(zv)) != 0 || 1014 (error = dumpvp_resize()) != 0) { 1015 int dumpify_error; 1016 1017 (void) zvol_update_volsize(zv->zv_objset, old_volsize); 1018 zvol_size_changed(zv, old_volsize); 1019 dumpify_error = zvol_dumpify(zv); 1020 error = dumpify_error ? dumpify_error : error; 1021 } 1022 } 1023#endif /* ZVOL_DUMP */ 1024 1025#ifdef illumos 1026 /* 1027 * Generate a LUN expansion event. 1028 */ 1029 if (error == 0) { 1030 sysevent_id_t eid; 1031 nvlist_t *attr; 1032 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1033 1034 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 1035 zv->zv_minor); 1036 1037 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1038 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 1039 1040 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 1041 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 1042 1043 nvlist_free(attr); 1044 kmem_free(physpath, MAXPATHLEN); 1045 } 1046#endif /* illumos */ 1047 return (error); 1048} 1049 1050int 1051zvol_set_volsize(const char *name, uint64_t volsize) 1052{ 1053 zvol_state_t *zv = NULL; 1054 objset_t *os; 1055 int error; 1056 dmu_object_info_t doi; 1057 uint64_t readonly; 1058 boolean_t owned = B_FALSE; 1059 1060 error = dsl_prop_get_integer(name, 1061 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL); 1062 if (error != 0) 1063 return (error); 1064 if (readonly) 1065 return (SET_ERROR(EROFS)); 1066 1067 mutex_enter(&zfsdev_state_lock); 1068 zv = zvol_minor_lookup(name); 1069 1070 if (zv == NULL || zv->zv_objset == NULL) { 1071 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, 1072 FTAG, &os)) != 0) { 1073 mutex_exit(&zfsdev_state_lock); 1074 return (error); 1075 } 1076 owned = B_TRUE; 1077 if (zv != NULL) 1078 zv->zv_objset = os; 1079 } else { 1080 os = zv->zv_objset; 1081 } 1082 1083 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 1084 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0) 1085 goto out; 1086 1087 error = zvol_update_volsize(os, volsize); 1088 1089 if (error == 0 && zv != NULL) 1090 error = zvol_update_live_volsize(zv, volsize); 1091out: 1092 if (owned) { 1093 dmu_objset_disown(os, FTAG); 1094 if (zv != NULL) 1095 zv->zv_objset = NULL; 1096 } 1097 mutex_exit(&zfsdev_state_lock); 1098 return (error); 1099} 1100 1101/*ARGSUSED*/ 1102#ifdef illumos 1103int 1104zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 1105#else 1106static int 1107zvol_open(struct g_provider *pp, int flag, int count) 1108#endif 1109{ 1110 zvol_state_t *zv; 1111 int err = 0; 1112#ifdef illumos 1113 1114 mutex_enter(&zfsdev_state_lock); 1115 1116 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 1117 if (zv == NULL) { 1118 mutex_exit(&zfsdev_state_lock); 1119 return (SET_ERROR(ENXIO)); 1120 } 1121 1122 if (zv->zv_total_opens == 0) 1123 err = zvol_first_open(zv); 1124 if (err) { 1125 mutex_exit(&zfsdev_state_lock); 1126 return (err); 1127 } 1128#else /* !illumos */ 1129 if (tsd_get(zfs_geom_probe_vdev_key) != NULL) { 1130 /* 1131 * if zfs_geom_probe_vdev_key is set, that means that zfs is 1132 * attempting to probe geom providers while looking for a 1133 * replacement for a missing VDEV. In this case, the 1134 * spa_namespace_lock will not be held, but it is still illegal 1135 * to use a zvol as a vdev. Deadlocks can result if another 1136 * thread has spa_namespace_lock 1137 */ 1138 return (EOPNOTSUPP); 1139 } 1140 1141 mutex_enter(&zfsdev_state_lock); 1142 1143 zv = pp->private; 1144 if (zv == NULL) { 1145 mutex_exit(&zfsdev_state_lock); 1146 return (SET_ERROR(ENXIO)); 1147 } 1148 1149 if (zv->zv_total_opens == 0) { 1150 err = zvol_first_open(zv); 1151 if (err) { 1152 mutex_exit(&zfsdev_state_lock); 1153 return (err); 1154 } 1155 pp->mediasize = zv->zv_volsize; 1156 pp->stripeoffset = 0; 1157 pp->stripesize = zv->zv_volblocksize; 1158 } 1159#endif /* illumos */ 1160 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 1161 err = SET_ERROR(EROFS); 1162 goto out; 1163 } 1164 if (zv->zv_flags & ZVOL_EXCL) { 1165 err = SET_ERROR(EBUSY); 1166 goto out; 1167 } 1168#ifdef FEXCL 1169 if (flag & FEXCL) { 1170 if (zv->zv_total_opens != 0) { 1171 err = SET_ERROR(EBUSY); 1172 goto out; 1173 } 1174 zv->zv_flags |= ZVOL_EXCL; 1175 } 1176#endif 1177 1178#ifdef illumos 1179 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 1180 zv->zv_open_count[otyp]++; 1181 zv->zv_total_opens++; 1182 } 1183 mutex_exit(&zfsdev_state_lock); 1184#else 1185 zv->zv_total_opens += count; 1186 mutex_exit(&zfsdev_state_lock); 1187#endif 1188 1189 return (err); 1190out: 1191 if (zv->zv_total_opens == 0) 1192 zvol_last_close(zv); 1193#ifdef illumos 1194 mutex_exit(&zfsdev_state_lock); 1195#else 1196 mutex_exit(&zfsdev_state_lock); 1197#endif 1198 return (err); 1199} 1200 1201/*ARGSUSED*/ 1202#ifdef illumos 1203int 1204zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 1205{ 1206 minor_t minor = getminor(dev); 1207 zvol_state_t *zv; 1208 int error = 0; 1209 1210 mutex_enter(&zfsdev_state_lock); 1211 1212 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1213 if (zv == NULL) { 1214 mutex_exit(&zfsdev_state_lock); 1215#else /* !illumos */ 1216static int 1217zvol_close(struct g_provider *pp, int flag, int count) 1218{ 1219 zvol_state_t *zv; 1220 int error = 0; 1221 boolean_t locked = B_FALSE; 1222 1223 /* See comment in zvol_open(). */ 1224 if (!MUTEX_HELD(&zfsdev_state_lock)) { 1225 mutex_enter(&zfsdev_state_lock); 1226 locked = B_TRUE; 1227 } 1228 1229 zv = pp->private; 1230 if (zv == NULL) { 1231 if (locked) 1232 mutex_exit(&zfsdev_state_lock); 1233#endif /* illumos */ 1234 return (SET_ERROR(ENXIO)); 1235 } 1236 1237 if (zv->zv_flags & ZVOL_EXCL) { 1238 ASSERT(zv->zv_total_opens == 1); 1239 zv->zv_flags &= ~ZVOL_EXCL; 1240 } 1241 1242 /* 1243 * If the open count is zero, this is a spurious close. 1244 * That indicates a bug in the kernel / DDI framework. 1245 */ 1246#ifdef illumos 1247 ASSERT(zv->zv_open_count[otyp] != 0); 1248#endif 1249 ASSERT(zv->zv_total_opens != 0); 1250 1251 /* 1252 * You may get multiple opens, but only one close. 1253 */ 1254#ifdef illumos 1255 zv->zv_open_count[otyp]--; 1256 zv->zv_total_opens--; 1257#else 1258 zv->zv_total_opens -= count; 1259#endif 1260 1261 if (zv->zv_total_opens == 0) 1262 zvol_last_close(zv); 1263 1264#ifdef illumos 1265 mutex_exit(&zfsdev_state_lock); 1266#else 1267 if (locked) 1268 mutex_exit(&zfsdev_state_lock); 1269#endif 1270 return (error); 1271} 1272 1273static void 1274zvol_get_done(zgd_t *zgd, int error) 1275{ 1276 if (zgd->zgd_db) 1277 dmu_buf_rele(zgd->zgd_db, zgd); 1278 1279 zfs_range_unlock(zgd->zgd_rl); 1280 1281 if (error == 0 && zgd->zgd_bp) 1282 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 1283 1284 kmem_free(zgd, sizeof (zgd_t)); 1285} 1286 1287/* 1288 * Get data to generate a TX_WRITE intent log record. 1289 */ 1290static int 1291zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 1292{ 1293 zvol_state_t *zv = arg; 1294 objset_t *os = zv->zv_objset; 1295 uint64_t object = ZVOL_OBJ; 1296 uint64_t offset = lr->lr_offset; 1297 uint64_t size = lr->lr_length; /* length of user data */ 1298 blkptr_t *bp = &lr->lr_blkptr; 1299 dmu_buf_t *db; 1300 zgd_t *zgd; 1301 int error; 1302 1303 ASSERT(zio != NULL); 1304 ASSERT(size != 0); 1305 1306 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 1307 zgd->zgd_zilog = zv->zv_zilog; 1308 1309 /* 1310 * Write records come in two flavors: immediate and indirect. 1311 * For small writes it's cheaper to store the data with the 1312 * log record (immediate); for large writes it's cheaper to 1313 * sync the data and get a pointer to it (indirect) so that 1314 * we don't have to write the data twice. 1315 */ 1316 if (buf != NULL) { /* immediate write */ 1317 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, 1318 RL_READER); 1319 error = dmu_read(os, object, offset, size, buf, 1320 DMU_READ_NO_PREFETCH); 1321 } else { /* indirect write */ 1322 /* 1323 * Have to lock the whole block to ensure when it's written out 1324 * and its checksum is being calculated that no one can change 1325 * the data. Contrarily to zfs_get_data we need not re-check 1326 * blocksize after we get the lock because it cannot be changed. 1327 */ 1328 size = zv->zv_volblocksize; 1329 offset = P2ALIGN(offset, size); 1330 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, 1331 RL_READER); 1332 error = dmu_buf_hold(os, object, offset, zgd, &db, 1333 DMU_READ_NO_PREFETCH); 1334 if (error == 0) { 1335 blkptr_t *obp = dmu_buf_get_blkptr(db); 1336 if (obp) { 1337 ASSERT(BP_IS_HOLE(bp)); 1338 *bp = *obp; 1339 } 1340 1341 zgd->zgd_db = db; 1342 zgd->zgd_bp = bp; 1343 1344 ASSERT(db->db_offset == offset); 1345 ASSERT(db->db_size == size); 1346 1347 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1348 zvol_get_done, zgd); 1349 1350 if (error == 0) 1351 return (0); 1352 } 1353 } 1354 1355 zvol_get_done(zgd, error); 1356 1357 return (error); 1358} 1359 1360/* 1361 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 1362 * 1363 * We store data in the log buffers if it's small enough. 1364 * Otherwise we will later flush the data out via dmu_sync(). 1365 */ 1366ssize_t zvol_immediate_write_sz = 32768; 1367 1368static void 1369zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1370 boolean_t sync) 1371{ 1372 uint32_t blocksize = zv->zv_volblocksize; 1373 zilog_t *zilog = zv->zv_zilog; 1374 itx_wr_state_t write_state; 1375 1376 if (zil_replaying(zilog, tx)) 1377 return; 1378 1379 if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1380 write_state = WR_INDIRECT; 1381 else if (!spa_has_slogs(zilog->zl_spa) && 1382 resid >= blocksize && blocksize > zvol_immediate_write_sz) 1383 write_state = WR_INDIRECT; 1384 else if (sync) 1385 write_state = WR_COPIED; 1386 else 1387 write_state = WR_NEED_COPY; 1388 1389 while (resid) { 1390 itx_t *itx; 1391 lr_write_t *lr; 1392 itx_wr_state_t wr_state = write_state; 1393 ssize_t len = resid; 1394 1395 if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA) 1396 wr_state = WR_NEED_COPY; 1397 else if (wr_state == WR_INDIRECT) 1398 len = MIN(blocksize - P2PHASE(off, blocksize), resid); 1399 1400 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1401 (wr_state == WR_COPIED ? len : 0)); 1402 lr = (lr_write_t *)&itx->itx_lr; 1403 if (wr_state == WR_COPIED && dmu_read(zv->zv_objset, 1404 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1405 zil_itx_destroy(itx); 1406 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1407 lr = (lr_write_t *)&itx->itx_lr; 1408 wr_state = WR_NEED_COPY; 1409 } 1410 1411 itx->itx_wr_state = wr_state; 1412 lr->lr_foid = ZVOL_OBJ; 1413 lr->lr_offset = off; 1414 lr->lr_length = len; 1415 lr->lr_blkoff = 0; 1416 BP_ZERO(&lr->lr_blkptr); 1417 1418 itx->itx_private = zv; 1419 1420 if (!sync && (zv->zv_sync_cnt == 0)) 1421 itx->itx_sync = B_FALSE; 1422 1423 zil_itx_assign(zilog, itx, tx); 1424 1425 off += len; 1426 resid -= len; 1427 } 1428} 1429 1430#ifdef illumos 1431static int 1432zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset, 1433 uint64_t size, boolean_t doread, boolean_t isdump) 1434{ 1435 vdev_disk_t *dvd; 1436 int c; 1437 int numerrors = 0; 1438 1439 if (vd->vdev_ops == &vdev_mirror_ops || 1440 vd->vdev_ops == &vdev_replacing_ops || 1441 vd->vdev_ops == &vdev_spare_ops) { 1442 for (c = 0; c < vd->vdev_children; c++) { 1443 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1444 addr, offset, origoffset, size, doread, isdump); 1445 if (err != 0) { 1446 numerrors++; 1447 } else if (doread) { 1448 break; 1449 } 1450 } 1451 } 1452 1453 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops) 1454 return (numerrors < vd->vdev_children ? 0 : EIO); 1455 1456 if (doread && !vdev_readable(vd)) 1457 return (SET_ERROR(EIO)); 1458 else if (!doread && !vdev_writeable(vd)) 1459 return (SET_ERROR(EIO)); 1460 1461 if (vd->vdev_ops == &vdev_raidz_ops) { 1462 return (vdev_raidz_physio(vd, 1463 addr, size, offset, origoffset, doread, isdump)); 1464 } 1465 1466 offset += VDEV_LABEL_START_SIZE; 1467 1468 if (ddi_in_panic() || isdump) { 1469 ASSERT(!doread); 1470 if (doread) 1471 return (SET_ERROR(EIO)); 1472 dvd = vd->vdev_tsd; 1473 ASSERT3P(dvd, !=, NULL); 1474 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1475 lbtodb(size))); 1476 } else { 1477 dvd = vd->vdev_tsd; 1478 ASSERT3P(dvd, !=, NULL); 1479 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size, 1480 offset, doread ? B_READ : B_WRITE)); 1481 } 1482} 1483 1484static int 1485zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1486 boolean_t doread, boolean_t isdump) 1487{ 1488 vdev_t *vd; 1489 int error; 1490 zvol_extent_t *ze; 1491 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1492 1493 /* Must be sector aligned, and not stradle a block boundary. */ 1494 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1495 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1496 return (SET_ERROR(EINVAL)); 1497 } 1498 ASSERT(size <= zv->zv_volblocksize); 1499 1500 /* Locate the extent this belongs to */ 1501 ze = list_head(&zv->zv_extents); 1502 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1503 offset -= ze->ze_nblks * zv->zv_volblocksize; 1504 ze = list_next(&zv->zv_extents, ze); 1505 } 1506 1507 if (ze == NULL) 1508 return (SET_ERROR(EINVAL)); 1509 1510 if (!ddi_in_panic()) 1511 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1512 1513 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1514 offset += DVA_GET_OFFSET(&ze->ze_dva); 1515 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva), 1516 size, doread, isdump); 1517 1518 if (!ddi_in_panic()) 1519 spa_config_exit(spa, SCL_STATE, FTAG); 1520 1521 return (error); 1522} 1523 1524int 1525zvol_strategy(buf_t *bp) 1526{ 1527 zfs_soft_state_t *zs = NULL; 1528#else /* !illumos */ 1529void 1530zvol_strategy(struct bio *bp) 1531{ 1532#endif /* illumos */ 1533 zvol_state_t *zv; 1534 uint64_t off, volsize; 1535 size_t resid; 1536 char *addr; 1537 objset_t *os; 1538 rl_t *rl; 1539 int error = 0; 1540#ifdef illumos 1541 boolean_t doread = bp->b_flags & B_READ; 1542#else 1543 boolean_t doread = 0; 1544#endif 1545 boolean_t is_dumpified; 1546 boolean_t sync; 1547 1548#ifdef illumos 1549 if (getminor(bp->b_edev) == 0) { 1550 error = SET_ERROR(EINVAL); 1551 } else { 1552 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1553 if (zs == NULL) 1554 error = SET_ERROR(ENXIO); 1555 else if (zs->zss_type != ZSST_ZVOL) 1556 error = SET_ERROR(EINVAL); 1557 } 1558 1559 if (error) { 1560 bioerror(bp, error); 1561 biodone(bp); 1562 return (0); 1563 } 1564 1565 zv = zs->zss_data; 1566 1567 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1568 bioerror(bp, EROFS); 1569 biodone(bp); 1570 return (0); 1571 } 1572 1573 off = ldbtob(bp->b_blkno); 1574#else /* !illumos */ 1575 if (bp->bio_to) 1576 zv = bp->bio_to->private; 1577 else 1578 zv = bp->bio_dev->si_drv2; 1579 1580 if (zv == NULL) { 1581 error = SET_ERROR(ENXIO); 1582 goto out; 1583 } 1584 1585 if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) { 1586 error = SET_ERROR(EROFS); 1587 goto out; 1588 } 1589 1590 switch (bp->bio_cmd) { 1591 case BIO_FLUSH: 1592 goto sync; 1593 case BIO_READ: 1594 doread = 1; 1595 case BIO_WRITE: 1596 case BIO_DELETE: 1597 break; 1598 default: 1599 error = EOPNOTSUPP; 1600 goto out; 1601 } 1602 1603 off = bp->bio_offset; 1604#endif /* illumos */ 1605 volsize = zv->zv_volsize; 1606 1607 os = zv->zv_objset; 1608 ASSERT(os != NULL); 1609 1610#ifdef illumos 1611 bp_mapin(bp); 1612 addr = bp->b_un.b_addr; 1613 resid = bp->b_bcount; 1614 1615 if (resid > 0 && (off < 0 || off >= volsize)) { 1616 bioerror(bp, EIO); 1617 biodone(bp); 1618 return (0); 1619 } 1620 1621 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED; 1622 sync = ((!(bp->b_flags & B_ASYNC) && 1623 !(zv->zv_flags & ZVOL_WCE)) || 1624 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1625 !doread && !is_dumpified; 1626#else /* !illumos */ 1627 addr = bp->bio_data; 1628 resid = bp->bio_length; 1629 1630 if (resid > 0 && (off < 0 || off >= volsize)) { 1631 error = SET_ERROR(EIO); 1632 goto out; 1633 } 1634 1635 is_dumpified = B_FALSE; 1636 sync = !doread && !is_dumpified && 1637 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS; 1638#endif /* illumos */ 1639 1640 /* 1641 * There must be no buffer changes when doing a dmu_sync() because 1642 * we can't change the data whilst calculating the checksum. 1643 */ 1644 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1645 doread ? RL_READER : RL_WRITER); 1646 1647#ifndef illumos 1648 if (bp->bio_cmd == BIO_DELETE) { 1649 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1650 error = dmu_tx_assign(tx, TXG_WAIT); 1651 if (error != 0) { 1652 dmu_tx_abort(tx); 1653 } else { 1654 zvol_log_truncate(zv, tx, off, resid, sync); 1655 dmu_tx_commit(tx); 1656 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1657 off, resid); 1658 resid = 0; 1659 } 1660 goto unlock; 1661 } 1662#endif 1663 while (resid != 0 && off < volsize) { 1664 size_t size = MIN(resid, zvol_maxphys); 1665#ifdef illumos 1666 if (is_dumpified) { 1667 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1668 error = zvol_dumpio(zv, addr, off, size, 1669 doread, B_FALSE); 1670 } else if (doread) { 1671#else 1672 if (doread) { 1673#endif 1674 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1675 DMU_READ_PREFETCH); 1676 } else { 1677 dmu_tx_t *tx = dmu_tx_create(os); 1678 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1679 error = dmu_tx_assign(tx, TXG_WAIT); 1680 if (error) { 1681 dmu_tx_abort(tx); 1682 } else { 1683 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1684 zvol_log_write(zv, tx, off, size, sync); 1685 dmu_tx_commit(tx); 1686 } 1687 } 1688 if (error) { 1689 /* convert checksum errors into IO errors */ 1690 if (error == ECKSUM) 1691 error = SET_ERROR(EIO); 1692 break; 1693 } 1694 off += size; 1695 addr += size; 1696 resid -= size; 1697 } 1698#ifndef illumos 1699unlock: 1700#endif 1701 zfs_range_unlock(rl); 1702 1703#ifdef illumos 1704 if ((bp->b_resid = resid) == bp->b_bcount) 1705 bioerror(bp, off > volsize ? EINVAL : error); 1706 1707 if (sync) 1708 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1709 biodone(bp); 1710 1711 return (0); 1712#else /* !illumos */ 1713 bp->bio_completed = bp->bio_length - resid; 1714 if (bp->bio_completed < bp->bio_length && off > volsize) 1715 error = EINVAL; 1716 1717 if (sync) { 1718sync: 1719 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1720 } 1721out: 1722 if (bp->bio_to) 1723 g_io_deliver(bp, error); 1724 else 1725 biofinish(bp, NULL, error); 1726#endif /* illumos */ 1727} 1728 1729#ifdef illumos 1730/* 1731 * Set the buffer count to the zvol maximum transfer. 1732 * Using our own routine instead of the default minphys() 1733 * means that for larger writes we write bigger buffers on X86 1734 * (128K instead of 56K) and flush the disk write cache less often 1735 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1736 * 56K on X86 and 128K on sparc). 1737 */ 1738void 1739zvol_minphys(struct buf *bp) 1740{ 1741 if (bp->b_bcount > zvol_maxphys) 1742 bp->b_bcount = zvol_maxphys; 1743} 1744 1745int 1746zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1747{ 1748 minor_t minor = getminor(dev); 1749 zvol_state_t *zv; 1750 int error = 0; 1751 uint64_t size; 1752 uint64_t boff; 1753 uint64_t resid; 1754 1755 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1756 if (zv == NULL) 1757 return (SET_ERROR(ENXIO)); 1758 1759 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0) 1760 return (SET_ERROR(EINVAL)); 1761 1762 boff = ldbtob(blkno); 1763 resid = ldbtob(nblocks); 1764 1765 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1766 1767 while (resid) { 1768 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1769 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1770 if (error) 1771 break; 1772 boff += size; 1773 addr += size; 1774 resid -= size; 1775 } 1776 1777 return (error); 1778} 1779 1780/*ARGSUSED*/ 1781int 1782zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1783{ 1784 minor_t minor = getminor(dev); 1785#else /* !illumos */ 1786int 1787zvol_read(struct cdev *dev, struct uio *uio, int ioflag) 1788{ 1789#endif /* illumos */ 1790 zvol_state_t *zv; 1791 uint64_t volsize; 1792 rl_t *rl; 1793 int error = 0; 1794 1795#ifdef illumos 1796 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1797 if (zv == NULL) 1798 return (SET_ERROR(ENXIO)); 1799#else 1800 zv = dev->si_drv2; 1801#endif 1802 1803 volsize = zv->zv_volsize; 1804 /* uio_loffset == volsize isn't an error as its required for EOF processing. */ 1805 if (uio->uio_resid > 0 && 1806 (uio->uio_loffset < 0 || uio->uio_loffset > volsize)) 1807 return (SET_ERROR(EIO)); 1808 1809#ifdef illumos 1810 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1811 error = physio(zvol_strategy, NULL, dev, B_READ, 1812 zvol_minphys, uio); 1813 return (error); 1814 } 1815#endif 1816 1817 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1818 RL_READER); 1819 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1820 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1821 1822 /* don't read past the end */ 1823 if (bytes > volsize - uio->uio_loffset) 1824 bytes = volsize - uio->uio_loffset; 1825 1826 error = dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes); 1827 if (error) { 1828 /* convert checksum errors into IO errors */ 1829 if (error == ECKSUM) 1830 error = SET_ERROR(EIO); 1831 break; 1832 } 1833 } 1834 zfs_range_unlock(rl); 1835 return (error); 1836} 1837 1838#ifdef illumos 1839/*ARGSUSED*/ 1840int 1841zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1842{ 1843 minor_t minor = getminor(dev); 1844#else /* !illumos */ 1845int 1846zvol_write(struct cdev *dev, struct uio *uio, int ioflag) 1847{ 1848#endif /* illumos */ 1849 zvol_state_t *zv; 1850 uint64_t volsize; 1851 rl_t *rl; 1852 int error = 0; 1853 boolean_t sync; 1854 1855#ifdef illumos 1856 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1857 if (zv == NULL) 1858 return (SET_ERROR(ENXIO)); 1859#else 1860 zv = dev->si_drv2; 1861#endif 1862 1863 volsize = zv->zv_volsize; 1864 /* uio_loffset == volsize isn't an error as its required for EOF processing. */ 1865 if (uio->uio_resid > 0 && 1866 (uio->uio_loffset < 0 || uio->uio_loffset > volsize)) 1867 return (SET_ERROR(EIO)); 1868 1869#ifdef illumos 1870 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1871 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1872 zvol_minphys, uio); 1873 return (error); 1874 } 1875 1876 sync = !(zv->zv_flags & ZVOL_WCE) || 1877#else 1878 sync = (ioflag & IO_SYNC) || 1879#endif 1880 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1881 1882 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1883 RL_WRITER); 1884 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1885 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1886 uint64_t off = uio->uio_loffset; 1887 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1888 1889 if (bytes > volsize - off) /* don't write past the end */ 1890 bytes = volsize - off; 1891 1892 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1893 error = dmu_tx_assign(tx, TXG_WAIT); 1894 if (error) { 1895 dmu_tx_abort(tx); 1896 break; 1897 } 1898 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1899 if (error == 0) 1900 zvol_log_write(zv, tx, off, bytes, sync); 1901 dmu_tx_commit(tx); 1902 1903 if (error) 1904 break; 1905 } 1906 zfs_range_unlock(rl); 1907 if (sync) 1908 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1909 return (error); 1910} 1911 1912#ifdef illumos 1913int 1914zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1915{ 1916 struct uuid uuid = EFI_RESERVED; 1917 efi_gpe_t gpe = { 0 }; 1918 uint32_t crc; 1919 dk_efi_t efi; 1920 int length; 1921 char *ptr; 1922 1923 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1924 return (SET_ERROR(EFAULT)); 1925 ptr = (char *)(uintptr_t)efi.dki_data_64; 1926 length = efi.dki_length; 1927 /* 1928 * Some clients may attempt to request a PMBR for the 1929 * zvol. Currently this interface will return EINVAL to 1930 * such requests. These requests could be supported by 1931 * adding a check for lba == 0 and consing up an appropriate 1932 * PMBR. 1933 */ 1934 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1935 return (SET_ERROR(EINVAL)); 1936 1937 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1938 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1939 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1940 1941 if (efi.dki_lba == 1) { 1942 efi_gpt_t gpt = { 0 }; 1943 1944 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1945 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1946 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1947 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1948 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1949 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1950 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1951 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1952 gpt.efi_gpt_SizeOfPartitionEntry = 1953 LE_32(sizeof (efi_gpe_t)); 1954 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1955 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1956 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1957 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1958 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1959 flag)) 1960 return (SET_ERROR(EFAULT)); 1961 ptr += sizeof (gpt); 1962 length -= sizeof (gpt); 1963 } 1964 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1965 length), flag)) 1966 return (SET_ERROR(EFAULT)); 1967 return (0); 1968} 1969 1970/* 1971 * BEGIN entry points to allow external callers access to the volume. 1972 */ 1973/* 1974 * Return the volume parameters needed for access from an external caller. 1975 * These values are invariant as long as the volume is held open. 1976 */ 1977int 1978zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1979 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1980 void **rl_hdl, void **bonus_hdl) 1981{ 1982 zvol_state_t *zv; 1983 1984 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1985 if (zv == NULL) 1986 return (SET_ERROR(ENXIO)); 1987 if (zv->zv_flags & ZVOL_DUMPIFIED) 1988 return (SET_ERROR(ENXIO)); 1989 1990 ASSERT(blksize && max_xfer_len && minor_hdl && 1991 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1992 1993 *blksize = zv->zv_volblocksize; 1994 *max_xfer_len = (uint64_t)zvol_maxphys; 1995 *minor_hdl = zv; 1996 *objset_hdl = zv->zv_objset; 1997 *zil_hdl = zv->zv_zilog; 1998 *rl_hdl = &zv->zv_znode; 1999 *bonus_hdl = zv->zv_dbuf; 2000 return (0); 2001} 2002 2003/* 2004 * Return the current volume size to an external caller. 2005 * The size can change while the volume is open. 2006 */ 2007uint64_t 2008zvol_get_volume_size(void *minor_hdl) 2009{ 2010 zvol_state_t *zv = minor_hdl; 2011 2012 return (zv->zv_volsize); 2013} 2014 2015/* 2016 * Return the current WCE setting to an external caller. 2017 * The WCE setting can change while the volume is open. 2018 */ 2019int 2020zvol_get_volume_wce(void *minor_hdl) 2021{ 2022 zvol_state_t *zv = minor_hdl; 2023 2024 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 2025} 2026 2027/* 2028 * Entry point for external callers to zvol_log_write 2029 */ 2030void 2031zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 2032 boolean_t sync) 2033{ 2034 zvol_state_t *zv = minor_hdl; 2035 2036 zvol_log_write(zv, tx, off, resid, sync); 2037} 2038/* 2039 * END entry points to allow external callers access to the volume. 2040 */ 2041#endif /* illumos */ 2042 2043/* 2044 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 2045 */ 2046static void 2047zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 2048 boolean_t sync) 2049{ 2050 itx_t *itx; 2051 lr_truncate_t *lr; 2052 zilog_t *zilog = zv->zv_zilog; 2053 2054 if (zil_replaying(zilog, tx)) 2055 return; 2056 2057 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 2058 lr = (lr_truncate_t *)&itx->itx_lr; 2059 lr->lr_foid = ZVOL_OBJ; 2060 lr->lr_offset = off; 2061 lr->lr_length = len; 2062 2063 itx->itx_sync = (sync || zv->zv_sync_cnt != 0); 2064 zil_itx_assign(zilog, itx, tx); 2065} 2066 2067#ifdef illumos 2068/* 2069 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 2070 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 2071 */ 2072/*ARGSUSED*/ 2073int 2074zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 2075{ 2076 zvol_state_t *zv; 2077 struct dk_callback *dkc; 2078 int error = 0; 2079 rl_t *rl; 2080 2081 mutex_enter(&zfsdev_state_lock); 2082 2083 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 2084 2085 if (zv == NULL) { 2086 mutex_exit(&zfsdev_state_lock); 2087 return (SET_ERROR(ENXIO)); 2088 } 2089 ASSERT(zv->zv_total_opens > 0); 2090 2091 switch (cmd) { 2092 2093 case DKIOCINFO: 2094 { 2095 struct dk_cinfo dki; 2096 2097 bzero(&dki, sizeof (dki)); 2098 (void) strcpy(dki.dki_cname, "zvol"); 2099 (void) strcpy(dki.dki_dname, "zvol"); 2100 dki.dki_ctype = DKC_UNKNOWN; 2101 dki.dki_unit = getminor(dev); 2102 dki.dki_maxtransfer = 2103 1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs); 2104 mutex_exit(&zfsdev_state_lock); 2105 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 2106 error = SET_ERROR(EFAULT); 2107 return (error); 2108 } 2109 2110 case DKIOCGMEDIAINFO: 2111 { 2112 struct dk_minfo dkm; 2113 2114 bzero(&dkm, sizeof (dkm)); 2115 dkm.dki_lbsize = 1U << zv->zv_min_bs; 2116 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 2117 dkm.dki_media_type = DK_UNKNOWN; 2118 mutex_exit(&zfsdev_state_lock); 2119 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 2120 error = SET_ERROR(EFAULT); 2121 return (error); 2122 } 2123 2124 case DKIOCGMEDIAINFOEXT: 2125 { 2126 struct dk_minfo_ext dkmext; 2127 2128 bzero(&dkmext, sizeof (dkmext)); 2129 dkmext.dki_lbsize = 1U << zv->zv_min_bs; 2130 dkmext.dki_pbsize = zv->zv_volblocksize; 2131 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 2132 dkmext.dki_media_type = DK_UNKNOWN; 2133 mutex_exit(&zfsdev_state_lock); 2134 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag)) 2135 error = SET_ERROR(EFAULT); 2136 return (error); 2137 } 2138 2139 case DKIOCGETEFI: 2140 { 2141 uint64_t vs = zv->zv_volsize; 2142 uint8_t bs = zv->zv_min_bs; 2143 2144 mutex_exit(&zfsdev_state_lock); 2145 error = zvol_getefi((void *)arg, flag, vs, bs); 2146 return (error); 2147 } 2148 2149 case DKIOCFLUSHWRITECACHE: 2150 dkc = (struct dk_callback *)arg; 2151 mutex_exit(&zfsdev_state_lock); 2152 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2153 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 2154 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 2155 error = 0; 2156 } 2157 return (error); 2158 2159 case DKIOCGETWCE: 2160 { 2161 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 2162 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 2163 flag)) 2164 error = SET_ERROR(EFAULT); 2165 break; 2166 } 2167 case DKIOCSETWCE: 2168 { 2169 int wce; 2170 if (ddi_copyin((void *)arg, &wce, sizeof (int), 2171 flag)) { 2172 error = SET_ERROR(EFAULT); 2173 break; 2174 } 2175 if (wce) { 2176 zv->zv_flags |= ZVOL_WCE; 2177 mutex_exit(&zfsdev_state_lock); 2178 } else { 2179 zv->zv_flags &= ~ZVOL_WCE; 2180 mutex_exit(&zfsdev_state_lock); 2181 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2182 } 2183 return (0); 2184 } 2185 2186 case DKIOCGGEOM: 2187 case DKIOCGVTOC: 2188 /* 2189 * commands using these (like prtvtoc) expect ENOTSUP 2190 * since we're emulating an EFI label 2191 */ 2192 error = SET_ERROR(ENOTSUP); 2193 break; 2194 2195 case DKIOCDUMPINIT: 2196 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 2197 RL_WRITER); 2198 error = zvol_dumpify(zv); 2199 zfs_range_unlock(rl); 2200 break; 2201 2202 case DKIOCDUMPFINI: 2203 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 2204 break; 2205 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 2206 RL_WRITER); 2207 error = zvol_dump_fini(zv); 2208 zfs_range_unlock(rl); 2209 break; 2210 2211 case DKIOCFREE: 2212 { 2213 dkioc_free_t df; 2214 dmu_tx_t *tx; 2215 2216 if (!zvol_unmap_enabled) 2217 break; 2218 2219 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 2220 error = SET_ERROR(EFAULT); 2221 break; 2222 } 2223 2224 /* 2225 * Apply Postel's Law to length-checking. If they overshoot, 2226 * just blank out until the end, if there's a need to blank 2227 * out anything. 2228 */ 2229 if (df.df_start >= zv->zv_volsize) 2230 break; /* No need to do anything... */ 2231 2232 mutex_exit(&zfsdev_state_lock); 2233 2234 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 2235 RL_WRITER); 2236 tx = dmu_tx_create(zv->zv_objset); 2237 dmu_tx_mark_netfree(tx); 2238 error = dmu_tx_assign(tx, TXG_WAIT); 2239 if (error != 0) { 2240 dmu_tx_abort(tx); 2241 } else { 2242 zvol_log_truncate(zv, tx, df.df_start, 2243 df.df_length, B_TRUE); 2244 dmu_tx_commit(tx); 2245 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 2246 df.df_start, df.df_length); 2247 } 2248 2249 zfs_range_unlock(rl); 2250 2251 if (error == 0) { 2252 /* 2253 * If the write-cache is disabled or 'sync' property 2254 * is set to 'always' then treat this as a synchronous 2255 * operation (i.e. commit to zil). 2256 */ 2257 if (!(zv->zv_flags & ZVOL_WCE) || 2258 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 2259 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2260 2261 /* 2262 * If the caller really wants synchronous writes, and 2263 * can't wait for them, don't return until the write 2264 * is done. 2265 */ 2266 if (df.df_flags & DF_WAIT_SYNC) { 2267 txg_wait_synced( 2268 dmu_objset_pool(zv->zv_objset), 0); 2269 } 2270 } 2271 return (error); 2272 } 2273 2274 default: 2275 error = SET_ERROR(ENOTTY); 2276 break; 2277 2278 } 2279 mutex_exit(&zfsdev_state_lock); 2280 return (error); 2281} 2282#endif /* illumos */ 2283 2284int 2285zvol_busy(void) 2286{ 2287 return (zvol_minors != 0); 2288} 2289 2290void 2291zvol_init(void) 2292{ 2293 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 2294 1) == 0); 2295#ifdef illumos 2296 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 2297#else 2298 ZFS_LOG(1, "ZVOL Initialized."); 2299#endif 2300} 2301 2302void 2303zvol_fini(void) 2304{ 2305#ifdef illumos 2306 mutex_destroy(&zfsdev_state_lock); 2307#endif 2308 ddi_soft_state_fini(&zfsdev_state); 2309 ZFS_LOG(1, "ZVOL Deinitialized."); 2310} 2311 2312#ifdef illumos 2313/*ARGSUSED*/ 2314static int 2315zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx) 2316{ 2317 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 2318 2319 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 2320 return (1); 2321 return (0); 2322} 2323 2324/*ARGSUSED*/ 2325static void 2326zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx) 2327{ 2328 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 2329 2330 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx); 2331} 2332 2333static int 2334zvol_dump_init(zvol_state_t *zv, boolean_t resize) 2335{ 2336 dmu_tx_t *tx; 2337 int error; 2338 objset_t *os = zv->zv_objset; 2339 spa_t *spa = dmu_objset_spa(os); 2340 vdev_t *vd = spa->spa_root_vdev; 2341 nvlist_t *nv = NULL; 2342 uint64_t version = spa_version(spa); 2343 uint64_t checksum, compress, refresrv, vbs, dedup; 2344 2345 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 2346 ASSERT(vd->vdev_ops == &vdev_root_ops); 2347 2348 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 2349 DMU_OBJECT_END); 2350 if (error != 0) 2351 return (error); 2352 /* wait for dmu_free_long_range to actually free the blocks */ 2353 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2354 2355 /* 2356 * If the pool on which the dump device is being initialized has more 2357 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is 2358 * enabled. If so, bump that feature's counter to indicate that the 2359 * feature is active. We also check the vdev type to handle the 2360 * following case: 2361 * # zpool create test raidz disk1 disk2 disk3 2362 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev), 2363 * the raidz vdev itself has 3 children. 2364 */ 2365 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) { 2366 if (!spa_feature_is_enabled(spa, 2367 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 2368 return (SET_ERROR(ENOTSUP)); 2369 (void) dsl_sync_task(spa_name(spa), 2370 zfs_mvdev_dump_feature_check, 2371 zfs_mvdev_dump_activate_feature_sync, NULL, 2372 2, ZFS_SPACE_CHECK_RESERVED); 2373 } 2374 2375 if (!resize) { 2376 error = dsl_prop_get_integer(zv->zv_name, 2377 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 2378 if (error == 0) { 2379 error = dsl_prop_get_integer(zv->zv_name, 2380 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, 2381 NULL); 2382 } 2383 if (error == 0) { 2384 error = dsl_prop_get_integer(zv->zv_name, 2385 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 2386 &refresrv, NULL); 2387 } 2388 if (error == 0) { 2389 error = dsl_prop_get_integer(zv->zv_name, 2390 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, 2391 NULL); 2392 } 2393 if (version >= SPA_VERSION_DEDUP && error == 0) { 2394 error = dsl_prop_get_integer(zv->zv_name, 2395 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 2396 } 2397 } 2398 if (error != 0) 2399 return (error); 2400 2401 tx = dmu_tx_create(os); 2402 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2403 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2404 error = dmu_tx_assign(tx, TXG_WAIT); 2405 if (error != 0) { 2406 dmu_tx_abort(tx); 2407 return (error); 2408 } 2409 2410 /* 2411 * If we are resizing the dump device then we only need to 2412 * update the refreservation to match the newly updated 2413 * zvolsize. Otherwise, we save off the original state of the 2414 * zvol so that we can restore them if the zvol is ever undumpified. 2415 */ 2416 if (resize) { 2417 error = zap_update(os, ZVOL_ZAP_OBJ, 2418 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 2419 &zv->zv_volsize, tx); 2420 } else { 2421 error = zap_update(os, ZVOL_ZAP_OBJ, 2422 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 2423 &compress, tx); 2424 if (error == 0) { 2425 error = zap_update(os, ZVOL_ZAP_OBJ, 2426 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, 2427 &checksum, tx); 2428 } 2429 if (error == 0) { 2430 error = zap_update(os, ZVOL_ZAP_OBJ, 2431 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 2432 &refresrv, tx); 2433 } 2434 if (error == 0) { 2435 error = zap_update(os, ZVOL_ZAP_OBJ, 2436 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 2437 &vbs, tx); 2438 } 2439 if (error == 0) { 2440 error = dmu_object_set_blocksize( 2441 os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx); 2442 } 2443 if (version >= SPA_VERSION_DEDUP && error == 0) { 2444 error = zap_update(os, ZVOL_ZAP_OBJ, 2445 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 2446 &dedup, tx); 2447 } 2448 if (error == 0) 2449 zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE; 2450 } 2451 dmu_tx_commit(tx); 2452 2453 /* 2454 * We only need update the zvol's property if we are initializing 2455 * the dump area for the first time. 2456 */ 2457 if (error == 0 && !resize) { 2458 /* 2459 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum 2460 * function. Otherwise, use the old default -- OFF. 2461 */ 2462 checksum = spa_feature_is_active(spa, 2463 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY : 2464 ZIO_CHECKSUM_OFF; 2465 2466 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2467 VERIFY(nvlist_add_uint64(nv, 2468 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 2469 VERIFY(nvlist_add_uint64(nv, 2470 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 2471 ZIO_COMPRESS_OFF) == 0); 2472 VERIFY(nvlist_add_uint64(nv, 2473 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 2474 checksum) == 0); 2475 if (version >= SPA_VERSION_DEDUP) { 2476 VERIFY(nvlist_add_uint64(nv, 2477 zfs_prop_to_name(ZFS_PROP_DEDUP), 2478 ZIO_CHECKSUM_OFF) == 0); 2479 } 2480 2481 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2482 nv, NULL); 2483 nvlist_free(nv); 2484 } 2485 2486 /* Allocate the space for the dump */ 2487 if (error == 0) 2488 error = zvol_prealloc(zv); 2489 return (error); 2490} 2491 2492static int 2493zvol_dumpify(zvol_state_t *zv) 2494{ 2495 int error = 0; 2496 uint64_t dumpsize = 0; 2497 dmu_tx_t *tx; 2498 objset_t *os = zv->zv_objset; 2499 2500 if (zv->zv_flags & ZVOL_RDONLY) 2501 return (SET_ERROR(EROFS)); 2502 2503 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 2504 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 2505 boolean_t resize = (dumpsize > 0); 2506 2507 if ((error = zvol_dump_init(zv, resize)) != 0) { 2508 (void) zvol_dump_fini(zv); 2509 return (error); 2510 } 2511 } 2512 2513 /* 2514 * Build up our lba mapping. 2515 */ 2516 error = zvol_get_lbas(zv); 2517 if (error) { 2518 (void) zvol_dump_fini(zv); 2519 return (error); 2520 } 2521 2522 tx = dmu_tx_create(os); 2523 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2524 error = dmu_tx_assign(tx, TXG_WAIT); 2525 if (error) { 2526 dmu_tx_abort(tx); 2527 (void) zvol_dump_fini(zv); 2528 return (error); 2529 } 2530 2531 zv->zv_flags |= ZVOL_DUMPIFIED; 2532 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 2533 &zv->zv_volsize, tx); 2534 dmu_tx_commit(tx); 2535 2536 if (error) { 2537 (void) zvol_dump_fini(zv); 2538 return (error); 2539 } 2540 2541 txg_wait_synced(dmu_objset_pool(os), 0); 2542 return (0); 2543} 2544 2545static int 2546zvol_dump_fini(zvol_state_t *zv) 2547{ 2548 dmu_tx_t *tx; 2549 objset_t *os = zv->zv_objset; 2550 nvlist_t *nv; 2551 int error = 0; 2552 uint64_t checksum, compress, refresrv, vbs, dedup; 2553 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 2554 2555 /* 2556 * Attempt to restore the zvol back to its pre-dumpified state. 2557 * This is a best-effort attempt as it's possible that not all 2558 * of these properties were initialized during the dumpify process 2559 * (i.e. error during zvol_dump_init). 2560 */ 2561 2562 tx = dmu_tx_create(os); 2563 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2564 error = dmu_tx_assign(tx, TXG_WAIT); 2565 if (error) { 2566 dmu_tx_abort(tx); 2567 return (error); 2568 } 2569 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 2570 dmu_tx_commit(tx); 2571 2572 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2573 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 2574 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2575 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 2576 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2577 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 2578 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2579 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 2580 2581 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2582 (void) nvlist_add_uint64(nv, 2583 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 2584 (void) nvlist_add_uint64(nv, 2585 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 2586 (void) nvlist_add_uint64(nv, 2587 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 2588 if (version >= SPA_VERSION_DEDUP && 2589 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2590 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 2591 (void) nvlist_add_uint64(nv, 2592 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 2593 } 2594 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2595 nv, NULL); 2596 nvlist_free(nv); 2597 2598 zvol_free_extents(zv); 2599 zv->zv_flags &= ~ZVOL_DUMPIFIED; 2600 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 2601 /* wait for dmu_free_long_range to actually free the blocks */ 2602 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2603 tx = dmu_tx_create(os); 2604 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2605 error = dmu_tx_assign(tx, TXG_WAIT); 2606 if (error) { 2607 dmu_tx_abort(tx); 2608 return (error); 2609 } 2610 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2611 zv->zv_volblocksize = vbs; 2612 dmu_tx_commit(tx); 2613 2614 return (0); 2615} 2616#else /* !illumos */ 2617 2618static void 2619zvol_geom_run(zvol_state_t *zv) 2620{ 2621 struct g_provider *pp; 2622 2623 pp = zv->zv_provider; 2624 g_error_provider(pp, 0); 2625 2626 kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0, 2627 "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER)); 2628} 2629 2630static void 2631zvol_geom_destroy(zvol_state_t *zv) 2632{ 2633 struct g_provider *pp; 2634 2635 g_topology_assert(); 2636 2637 mtx_lock(&zv->zv_queue_mtx); 2638 zv->zv_state = 1; 2639 wakeup_one(&zv->zv_queue); 2640 while (zv->zv_state != 2) 2641 msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0); 2642 mtx_destroy(&zv->zv_queue_mtx); 2643 2644 pp = zv->zv_provider; 2645 zv->zv_provider = NULL; 2646 pp->private = NULL; 2647 g_wither_geom(pp->geom, ENXIO); 2648} 2649 2650static int 2651zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace) 2652{ 2653 int count, error, flags; 2654 2655 g_topology_assert(); 2656 2657 /* 2658 * To make it easier we expect either open or close, but not both 2659 * at the same time. 2660 */ 2661 KASSERT((acr >= 0 && acw >= 0 && ace >= 0) || 2662 (acr <= 0 && acw <= 0 && ace <= 0), 2663 ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).", 2664 pp->name, acr, acw, ace)); 2665 2666 if (pp->private == NULL) { 2667 if (acr <= 0 && acw <= 0 && ace <= 0) 2668 return (0); 2669 return (pp->error); 2670 } 2671 2672 /* 2673 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0, 2674 * because GEOM already handles that and handles it a bit differently. 2675 * GEOM allows for multiple read/exclusive consumers and ZFS allows 2676 * only one exclusive consumer, no matter if it is reader or writer. 2677 * I like better the way GEOM works so I'll leave it for GEOM to 2678 * decide what to do. 2679 */ 2680 2681 count = acr + acw + ace; 2682 if (count == 0) 2683 return (0); 2684 2685 flags = 0; 2686 if (acr != 0 || ace != 0) 2687 flags |= FREAD; 2688 if (acw != 0) 2689 flags |= FWRITE; 2690 2691 g_topology_unlock(); 2692 if (count > 0) 2693 error = zvol_open(pp, flags, count); 2694 else 2695 error = zvol_close(pp, flags, -count); 2696 g_topology_lock(); 2697 return (error); 2698} 2699 2700static void 2701zvol_geom_start(struct bio *bp) 2702{ 2703 zvol_state_t *zv; 2704 boolean_t first; 2705 2706 zv = bp->bio_to->private; 2707 ASSERT(zv != NULL); 2708 switch (bp->bio_cmd) { 2709 case BIO_FLUSH: 2710 if (!THREAD_CAN_SLEEP()) 2711 goto enqueue; 2712 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2713 g_io_deliver(bp, 0); 2714 break; 2715 case BIO_READ: 2716 case BIO_WRITE: 2717 case BIO_DELETE: 2718 if (!THREAD_CAN_SLEEP()) 2719 goto enqueue; 2720 zvol_strategy(bp); 2721 break; 2722 case BIO_GETATTR: { 2723 spa_t *spa = dmu_objset_spa(zv->zv_objset); 2724 uint64_t refd, avail, usedobjs, availobjs, val; 2725 2726 if (g_handleattr_int(bp, "GEOM::candelete", 1)) 2727 return; 2728 if (strcmp(bp->bio_attribute, "blocksavail") == 0) { 2729 dmu_objset_space(zv->zv_objset, &refd, &avail, 2730 &usedobjs, &availobjs); 2731 if (g_handleattr_off_t(bp, "blocksavail", 2732 avail / DEV_BSIZE)) 2733 return; 2734 } else if (strcmp(bp->bio_attribute, "blocksused") == 0) { 2735 dmu_objset_space(zv->zv_objset, &refd, &avail, 2736 &usedobjs, &availobjs); 2737 if (g_handleattr_off_t(bp, "blocksused", 2738 refd / DEV_BSIZE)) 2739 return; 2740 } else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) { 2741 avail = metaslab_class_get_space(spa_normal_class(spa)); 2742 avail -= metaslab_class_get_alloc(spa_normal_class(spa)); 2743 if (g_handleattr_off_t(bp, "poolblocksavail", 2744 avail / DEV_BSIZE)) 2745 return; 2746 } else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) { 2747 refd = metaslab_class_get_alloc(spa_normal_class(spa)); 2748 if (g_handleattr_off_t(bp, "poolblocksused", 2749 refd / DEV_BSIZE)) 2750 return; 2751 } 2752 /* FALLTHROUGH */ 2753 } 2754 default: 2755 g_io_deliver(bp, EOPNOTSUPP); 2756 break; 2757 } 2758 return; 2759 2760enqueue: 2761 mtx_lock(&zv->zv_queue_mtx); 2762 first = (bioq_first(&zv->zv_queue) == NULL); 2763 bioq_insert_tail(&zv->zv_queue, bp); 2764 mtx_unlock(&zv->zv_queue_mtx); 2765 if (first) 2766 wakeup_one(&zv->zv_queue); 2767} 2768 2769static void 2770zvol_geom_worker(void *arg) 2771{ 2772 zvol_state_t *zv; 2773 struct bio *bp; 2774 2775 thread_lock(curthread); 2776 sched_prio(curthread, PRIBIO); 2777 thread_unlock(curthread); 2778 2779 zv = arg; 2780 for (;;) { 2781 mtx_lock(&zv->zv_queue_mtx); 2782 bp = bioq_takefirst(&zv->zv_queue); 2783 if (bp == NULL) { 2784 if (zv->zv_state == 1) { 2785 zv->zv_state = 2; 2786 wakeup(&zv->zv_state); 2787 mtx_unlock(&zv->zv_queue_mtx); 2788 kthread_exit(); 2789 } 2790 msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP, 2791 "zvol:io", 0); 2792 continue; 2793 } 2794 mtx_unlock(&zv->zv_queue_mtx); 2795 switch (bp->bio_cmd) { 2796 case BIO_FLUSH: 2797 zil_commit(zv->zv_zilog, ZVOL_OBJ); 2798 g_io_deliver(bp, 0); 2799 break; 2800 case BIO_READ: 2801 case BIO_WRITE: 2802 case BIO_DELETE: 2803 zvol_strategy(bp); 2804 break; 2805 default: 2806 g_io_deliver(bp, EOPNOTSUPP); 2807 break; 2808 } 2809 } 2810} 2811 2812extern boolean_t dataset_name_hidden(const char *name); 2813 2814static int 2815zvol_create_snapshots(objset_t *os, const char *name) 2816{ 2817 uint64_t cookie, obj; 2818 char *sname; 2819 int error, len; 2820 2821 cookie = obj = 0; 2822 sname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2823 2824#if 0 2825 (void) dmu_objset_find(name, dmu_objset_prefetch, NULL, 2826 DS_FIND_SNAPSHOTS); 2827#endif 2828 2829 for (;;) { 2830 len = snprintf(sname, MAXPATHLEN, "%s@", name); 2831 if (len >= MAXPATHLEN) { 2832 dmu_objset_rele(os, FTAG); 2833 error = ENAMETOOLONG; 2834 break; 2835 } 2836 2837 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 2838 error = dmu_snapshot_list_next(os, MAXPATHLEN - len, 2839 sname + len, &obj, &cookie, NULL); 2840 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 2841 if (error != 0) { 2842 if (error == ENOENT) 2843 error = 0; 2844 break; 2845 } 2846 2847 error = zvol_create_minor(sname); 2848 if (error != 0 && error != EEXIST) { 2849 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n", 2850 sname, error); 2851 break; 2852 } 2853 } 2854 2855 kmem_free(sname, MAXPATHLEN); 2856 return (error); 2857} 2858 2859int 2860zvol_create_minors(const char *name) 2861{ 2862 uint64_t cookie; 2863 objset_t *os; 2864 char *osname, *p; 2865 int error, len; 2866 2867 if (dataset_name_hidden(name)) 2868 return (0); 2869 2870 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) { 2871 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n", 2872 name, error); 2873 return (error); 2874 } 2875 if (dmu_objset_type(os) == DMU_OST_ZVOL) { 2876 dsl_dataset_long_hold(os->os_dsl_dataset, FTAG); 2877 dsl_pool_rele(dmu_objset_pool(os), FTAG); 2878 error = zvol_create_minor(name); 2879 if (error == 0 || error == EEXIST) { 2880 error = zvol_create_snapshots(os, name); 2881 } else { 2882 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n", 2883 name, error); 2884 } 2885 dsl_dataset_long_rele(os->os_dsl_dataset, FTAG); 2886 dsl_dataset_rele(os->os_dsl_dataset, FTAG); 2887 return (error); 2888 } 2889 if (dmu_objset_type(os) != DMU_OST_ZFS) { 2890 dmu_objset_rele(os, FTAG); 2891 return (0); 2892 } 2893 2894 osname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2895 if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) { 2896 dmu_objset_rele(os, FTAG); 2897 kmem_free(osname, MAXPATHLEN); 2898 return (ENOENT); 2899 } 2900 p = osname + strlen(osname); 2901 len = MAXPATHLEN - (p - osname); 2902 2903#if 0 2904 /* Prefetch the datasets. */ 2905 cookie = 0; 2906 while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) { 2907 if (!dataset_name_hidden(osname)) 2908 (void) dmu_objset_prefetch(osname, NULL); 2909 } 2910#endif 2911 2912 cookie = 0; 2913 while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL, 2914 &cookie) == 0) { 2915 dmu_objset_rele(os, FTAG); 2916 (void)zvol_create_minors(osname); 2917 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) { 2918 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n", 2919 name, error); 2920 return (error); 2921 } 2922 } 2923 2924 dmu_objset_rele(os, FTAG); 2925 kmem_free(osname, MAXPATHLEN); 2926 return (0); 2927} 2928 2929static void 2930zvol_rename_minor(zvol_state_t *zv, const char *newname) 2931{ 2932 struct g_geom *gp; 2933 struct g_provider *pp; 2934 struct cdev *dev; 2935 2936 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 2937 2938 if (zv->zv_volmode == ZFS_VOLMODE_GEOM) { 2939 g_topology_lock(); 2940 pp = zv->zv_provider; 2941 ASSERT(pp != NULL); 2942 gp = pp->geom; 2943 ASSERT(gp != NULL); 2944 2945 zv->zv_provider = NULL; 2946 g_wither_provider(pp, ENXIO); 2947 2948 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname); 2949 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND; 2950 pp->sectorsize = DEV_BSIZE; 2951 pp->mediasize = zv->zv_volsize; 2952 pp->private = zv; 2953 zv->zv_provider = pp; 2954 g_error_provider(pp, 0); 2955 g_topology_unlock(); 2956 } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) { 2957 struct make_dev_args args; 2958 2959 if ((dev = zv->zv_dev) != NULL) { 2960 zv->zv_dev = NULL; 2961 destroy_dev(dev); 2962 if (zv->zv_total_opens > 0) { 2963 zv->zv_flags &= ~ZVOL_EXCL; 2964 zv->zv_total_opens = 0; 2965 zvol_last_close(zv); 2966 } 2967 } 2968 2969 make_dev_args_init(&args); 2970 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK; 2971 args.mda_devsw = &zvol_cdevsw; 2972 args.mda_cr = NULL; 2973 args.mda_uid = UID_ROOT; 2974 args.mda_gid = GID_OPERATOR; 2975 args.mda_mode = 0640; 2976 args.mda_si_drv2 = zv; 2977 if (make_dev_s(&args, &zv->zv_dev, 2978 "%s/%s", ZVOL_DRIVER, newname) == 0) 2979 zv->zv_dev->si_iosize_max = MAXPHYS; 2980 } 2981 strlcpy(zv->zv_name, newname, sizeof(zv->zv_name)); 2982} 2983 2984void 2985zvol_rename_minors(const char *oldname, const char *newname) 2986{ 2987 char name[MAXPATHLEN]; 2988 struct g_provider *pp; 2989 struct g_geom *gp; 2990 size_t oldnamelen, newnamelen; 2991 zvol_state_t *zv; 2992 char *namebuf; 2993 boolean_t locked = B_FALSE; 2994 2995 oldnamelen = strlen(oldname); 2996 newnamelen = strlen(newname); 2997 2998 DROP_GIANT(); 2999 /* See comment in zvol_open(). */ 3000 if (!MUTEX_HELD(&zfsdev_state_lock)) { 3001 mutex_enter(&zfsdev_state_lock); 3002 locked = B_TRUE; 3003 } 3004 3005 LIST_FOREACH(zv, &all_zvols, zv_links) { 3006 if (strcmp(zv->zv_name, oldname) == 0) { 3007 zvol_rename_minor(zv, newname); 3008 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 && 3009 (zv->zv_name[oldnamelen] == '/' || 3010 zv->zv_name[oldnamelen] == '@')) { 3011 snprintf(name, sizeof(name), "%s%c%s", newname, 3012 zv->zv_name[oldnamelen], 3013 zv->zv_name + oldnamelen + 1); 3014 zvol_rename_minor(zv, name); 3015 } 3016 } 3017 3018 if (locked) 3019 mutex_exit(&zfsdev_state_lock); 3020 PICKUP_GIANT(); 3021} 3022 3023static int 3024zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td) 3025{ 3026 zvol_state_t *zv = dev->si_drv2; 3027 int err = 0; 3028 3029 mutex_enter(&zfsdev_state_lock); 3030 if (zv->zv_total_opens == 0) 3031 err = zvol_first_open(zv); 3032 if (err) { 3033 mutex_exit(&zfsdev_state_lock); 3034 return (err); 3035 } 3036 if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 3037 err = SET_ERROR(EROFS); 3038 goto out; 3039 } 3040 if (zv->zv_flags & ZVOL_EXCL) { 3041 err = SET_ERROR(EBUSY); 3042 goto out; 3043 } 3044#ifdef FEXCL 3045 if (flags & FEXCL) { 3046 if (zv->zv_total_opens != 0) { 3047 err = SET_ERROR(EBUSY); 3048 goto out; 3049 } 3050 zv->zv_flags |= ZVOL_EXCL; 3051 } 3052#endif 3053 3054 zv->zv_total_opens++; 3055 if (flags & (FSYNC | FDSYNC)) { 3056 zv->zv_sync_cnt++; 3057 if (zv->zv_sync_cnt == 1) 3058 zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ); 3059 } 3060 mutex_exit(&zfsdev_state_lock); 3061 return (err); 3062out: 3063 if (zv->zv_total_opens == 0) 3064 zvol_last_close(zv); 3065 mutex_exit(&zfsdev_state_lock); 3066 return (err); 3067} 3068 3069static int 3070zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td) 3071{ 3072 zvol_state_t *zv = dev->si_drv2; 3073 3074 mutex_enter(&zfsdev_state_lock); 3075 if (zv->zv_flags & ZVOL_EXCL) { 3076 ASSERT(zv->zv_total_opens == 1); 3077 zv->zv_flags &= ~ZVOL_EXCL; 3078 } 3079 3080 /* 3081 * If the open count is zero, this is a spurious close. 3082 * That indicates a bug in the kernel / DDI framework. 3083 */ 3084 ASSERT(zv->zv_total_opens != 0); 3085 3086 /* 3087 * You may get multiple opens, but only one close. 3088 */ 3089 zv->zv_total_opens--; 3090 if (flags & (FSYNC | FDSYNC)) 3091 zv->zv_sync_cnt--; 3092 3093 if (zv->zv_total_opens == 0) 3094 zvol_last_close(zv); 3095 3096 mutex_exit(&zfsdev_state_lock); 3097 return (0); 3098} 3099 3100static int 3101zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) 3102{ 3103 zvol_state_t *zv; 3104 rl_t *rl; 3105 off_t offset, length; 3106 int i, error; 3107 boolean_t sync; 3108 3109 zv = dev->si_drv2; 3110 3111 error = 0; 3112 KASSERT(zv->zv_total_opens > 0, 3113 ("Device with zero access count in zvol_d_ioctl")); 3114 3115 i = IOCPARM_LEN(cmd); 3116 switch (cmd) { 3117 case DIOCGSECTORSIZE: 3118 *(u_int *)data = DEV_BSIZE; 3119 break; 3120 case DIOCGMEDIASIZE: 3121 *(off_t *)data = zv->zv_volsize; 3122 break; 3123 case DIOCGFLUSH: 3124 zil_commit(zv->zv_zilog, ZVOL_OBJ); 3125 break; 3126 case DIOCGDELETE: 3127 if (!zvol_unmap_enabled) 3128 break; 3129 3130 offset = ((off_t *)data)[0]; 3131 length = ((off_t *)data)[1]; 3132 if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 || 3133 offset < 0 || offset >= zv->zv_volsize || 3134 length <= 0) { 3135 printf("%s: offset=%jd length=%jd\n", __func__, offset, 3136 length); 3137 error = EINVAL; 3138 break; 3139 } 3140 3141 rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER); 3142 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 3143 error = dmu_tx_assign(tx, TXG_WAIT); 3144 if (error != 0) { 3145 sync = FALSE; 3146 dmu_tx_abort(tx); 3147 } else { 3148 sync = (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 3149 zvol_log_truncate(zv, tx, offset, length, sync); 3150 dmu_tx_commit(tx); 3151 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 3152 offset, length); 3153 } 3154 zfs_range_unlock(rl); 3155 if (sync) 3156 zil_commit(zv->zv_zilog, ZVOL_OBJ); 3157 break; 3158 case DIOCGSTRIPESIZE: 3159 *(off_t *)data = zv->zv_volblocksize; 3160 break; 3161 case DIOCGSTRIPEOFFSET: 3162 *(off_t *)data = 0; 3163 break; 3164 case DIOCGATTR: { 3165 spa_t *spa = dmu_objset_spa(zv->zv_objset); 3166 struct diocgattr_arg *arg = (struct diocgattr_arg *)data; 3167 uint64_t refd, avail, usedobjs, availobjs; 3168 3169 if (strcmp(arg->name, "GEOM::candelete") == 0) 3170 arg->value.i = 1; 3171 else if (strcmp(arg->name, "blocksavail") == 0) { 3172 dmu_objset_space(zv->zv_objset, &refd, &avail, 3173 &usedobjs, &availobjs); 3174 arg->value.off = avail / DEV_BSIZE; 3175 } else if (strcmp(arg->name, "blocksused") == 0) { 3176 dmu_objset_space(zv->zv_objset, &refd, &avail, 3177 &usedobjs, &availobjs); 3178 arg->value.off = refd / DEV_BSIZE; 3179 } else if (strcmp(arg->name, "poolblocksavail") == 0) { 3180 avail = metaslab_class_get_space(spa_normal_class(spa)); 3181 avail -= metaslab_class_get_alloc(spa_normal_class(spa)); 3182 arg->value.off = avail / DEV_BSIZE; 3183 } else if (strcmp(arg->name, "poolblocksused") == 0) { 3184 refd = metaslab_class_get_alloc(spa_normal_class(spa)); 3185 arg->value.off = refd / DEV_BSIZE; 3186 } else 3187 error = ENOIOCTL; 3188 break; 3189 } 3190 case FIOSEEKHOLE: 3191 case FIOSEEKDATA: { 3192 off_t *off = (off_t *)data; 3193 uint64_t noff; 3194 boolean_t hole; 3195 3196 hole = (cmd == FIOSEEKHOLE); 3197 noff = *off; 3198 error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff); 3199 *off = noff; 3200 break; 3201 } 3202 default: 3203 error = ENOIOCTL; 3204 } 3205 3206 return (error); 3207} 3208#endif /* illumos */ 3209