zvol.c revision 297548
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 *
27 * Portions Copyright 2010 Robert Milkowski
28 *
29 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
31 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
32 * Copyright (c) 2014 Integros [integros.com]
33 */
34
35/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
36
37/*
38 * ZFS volume emulation driver.
39 *
40 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
41 * Volumes are accessed through the symbolic links named:
42 *
43 * /dev/zvol/dsk/<pool_name>/<dataset_name>
44 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
45 *
46 * These links are created by the /dev filesystem (sdev_zvolops.c).
47 * Volumes are persistent through reboot.  No user command needs to be
48 * run before opening and using a device.
49 *
50 * FreeBSD notes.
51 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
52 * in the system.
53 */
54
55#include <sys/types.h>
56#include <sys/param.h>
57#include <sys/kernel.h>
58#include <sys/errno.h>
59#include <sys/uio.h>
60#include <sys/bio.h>
61#include <sys/buf.h>
62#include <sys/kmem.h>
63#include <sys/conf.h>
64#include <sys/cmn_err.h>
65#include <sys/stat.h>
66#include <sys/zap.h>
67#include <sys/spa.h>
68#include <sys/spa_impl.h>
69#include <sys/zio.h>
70#include <sys/disk.h>
71#include <sys/dmu_traverse.h>
72#include <sys/dnode.h>
73#include <sys/dsl_dataset.h>
74#include <sys/dsl_prop.h>
75#include <sys/dkio.h>
76#include <sys/byteorder.h>
77#include <sys/sunddi.h>
78#include <sys/dirent.h>
79#include <sys/policy.h>
80#include <sys/queue.h>
81#include <sys/fs/zfs.h>
82#include <sys/zfs_ioctl.h>
83#include <sys/zil.h>
84#include <sys/refcount.h>
85#include <sys/zfs_znode.h>
86#include <sys/zfs_rlock.h>
87#include <sys/vdev_impl.h>
88#include <sys/vdev_raidz.h>
89#include <sys/zvol.h>
90#include <sys/zil_impl.h>
91#include <sys/dbuf.h>
92#include <sys/dmu_tx.h>
93#include <sys/zfeature.h>
94#include <sys/zio_checksum.h>
95#include <sys/filio.h>
96
97#include <geom/geom.h>
98
99#include "zfs_namecheck.h"
100
101#ifndef illumos
102struct g_class zfs_zvol_class = {
103	.name = "ZFS::ZVOL",
104	.version = G_VERSION,
105};
106
107DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
108
109#endif
110void *zfsdev_state;
111static char *zvol_tag = "zvol_tag";
112
113#define	ZVOL_DUMPSIZE		"dumpsize"
114
115/*
116 * This lock protects the zfsdev_state structure from being modified
117 * while it's being used, e.g. an open that comes in before a create
118 * finishes.  It also protects temporary opens of the dataset so that,
119 * e.g., an open doesn't get a spurious EBUSY.
120 */
121#ifdef illumos
122kmutex_t zfsdev_state_lock;
123#else
124/*
125 * In FreeBSD we've replaced the upstream zfsdev_state_lock with the
126 * spa_namespace_lock in the ZVOL code.
127 */
128#define zfsdev_state_lock spa_namespace_lock
129#endif
130static uint32_t zvol_minors;
131
132#ifndef illumos
133SYSCTL_DECL(_vfs_zfs);
134SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
135static int	volmode = ZFS_VOLMODE_GEOM;
136TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
137SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
138    "Expose as GEOM providers (1), device files (2) or neither");
139
140#endif
141typedef struct zvol_extent {
142	list_node_t	ze_node;
143	dva_t		ze_dva;		/* dva associated with this extent */
144	uint64_t	ze_nblks;	/* number of blocks in extent */
145} zvol_extent_t;
146
147/*
148 * The in-core state of each volume.
149 */
150typedef struct zvol_state {
151#ifndef illumos
152	LIST_ENTRY(zvol_state)	zv_links;
153#endif
154	char		zv_name[MAXPATHLEN]; /* pool/dd name */
155	uint64_t	zv_volsize;	/* amount of space we advertise */
156	uint64_t	zv_volblocksize; /* volume block size */
157#ifdef illumos
158	minor_t		zv_minor;	/* minor number */
159#else
160	struct cdev	*zv_dev;	/* non-GEOM device */
161	struct g_provider *zv_provider;	/* GEOM provider */
162#endif
163	uint8_t		zv_min_bs;	/* minimum addressable block shift */
164	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
165	objset_t	*zv_objset;	/* objset handle */
166#ifdef illumos
167	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
168#endif
169	uint32_t	zv_total_opens;	/* total open count */
170	zilog_t		*zv_zilog;	/* ZIL handle */
171	list_t		zv_extents;	/* List of extents for dump */
172	znode_t		zv_znode;	/* for range locking */
173	dmu_buf_t	*zv_dbuf;	/* bonus handle */
174#ifndef illumos
175	int		zv_state;
176	int		zv_volmode;	/* Provide GEOM or cdev */
177	struct bio_queue_head zv_queue;
178	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
179#endif
180} zvol_state_t;
181
182#ifndef illumos
183static LIST_HEAD(, zvol_state) all_zvols;
184#endif
185/*
186 * zvol specific flags
187 */
188#define	ZVOL_RDONLY	0x1
189#define	ZVOL_DUMPIFIED	0x2
190#define	ZVOL_EXCL	0x4
191#define	ZVOL_WCE	0x8
192
193/*
194 * zvol maximum transfer in one DMU tx.
195 */
196int zvol_maxphys = DMU_MAX_ACCESS/2;
197
198/*
199 * Toggle unmap functionality.
200 */
201boolean_t zvol_unmap_enabled = B_TRUE;
202#ifndef illumos
203SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
204    &zvol_unmap_enabled, 0,
205    "Enable UNMAP functionality");
206
207static d_open_t		zvol_d_open;
208static d_close_t	zvol_d_close;
209static d_read_t		zvol_read;
210static d_write_t	zvol_write;
211static d_ioctl_t	zvol_d_ioctl;
212static d_strategy_t	zvol_strategy;
213
214static struct cdevsw zvol_cdevsw = {
215	.d_version =	D_VERSION,
216	.d_open =	zvol_d_open,
217	.d_close =	zvol_d_close,
218	.d_read =	zvol_read,
219	.d_write =	zvol_write,
220	.d_ioctl =	zvol_d_ioctl,
221	.d_strategy =	zvol_strategy,
222	.d_name =	"zvol",
223	.d_flags =	D_DISK | D_TRACKCLOSE,
224};
225
226static void zvol_geom_run(zvol_state_t *zv);
227static void zvol_geom_destroy(zvol_state_t *zv);
228static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
229static void zvol_geom_start(struct bio *bp);
230static void zvol_geom_worker(void *arg);
231static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
232    uint64_t len, boolean_t sync);
233#endif	/* !illumos */
234
235extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
236    nvlist_t *, nvlist_t *);
237static int zvol_remove_zv(zvol_state_t *);
238static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
239static int zvol_dumpify(zvol_state_t *zv);
240static int zvol_dump_fini(zvol_state_t *zv);
241static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
242
243static void
244zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
245{
246#ifdef illumos
247	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
248
249	zv->zv_volsize = volsize;
250	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
251	    "Size", volsize) == DDI_SUCCESS);
252	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
253	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
254
255	/* Notify specfs to invalidate the cached size */
256	spec_size_invalidate(dev, VBLK);
257	spec_size_invalidate(dev, VCHR);
258#else	/* !illumos */
259	zv->zv_volsize = volsize;
260	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
261		struct g_provider *pp;
262
263		pp = zv->zv_provider;
264		if (pp == NULL)
265			return;
266		g_topology_lock();
267		g_resize_provider(pp, zv->zv_volsize);
268		g_topology_unlock();
269	}
270#endif	/* illumos */
271}
272
273int
274zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
275{
276	if (volsize == 0)
277		return (SET_ERROR(EINVAL));
278
279	if (volsize % blocksize != 0)
280		return (SET_ERROR(EINVAL));
281
282#ifdef _ILP32
283	if (volsize - 1 > SPEC_MAXOFFSET_T)
284		return (SET_ERROR(EOVERFLOW));
285#endif
286	return (0);
287}
288
289int
290zvol_check_volblocksize(uint64_t volblocksize)
291{
292	if (volblocksize < SPA_MINBLOCKSIZE ||
293	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
294	    !ISP2(volblocksize))
295		return (SET_ERROR(EDOM));
296
297	return (0);
298}
299
300int
301zvol_get_stats(objset_t *os, nvlist_t *nv)
302{
303	int error;
304	dmu_object_info_t doi;
305	uint64_t val;
306
307	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
308	if (error)
309		return (error);
310
311	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
312
313	error = dmu_object_info(os, ZVOL_OBJ, &doi);
314
315	if (error == 0) {
316		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
317		    doi.doi_data_block_size);
318	}
319
320	return (error);
321}
322
323static zvol_state_t *
324zvol_minor_lookup(const char *name)
325{
326#ifdef illumos
327	minor_t minor;
328#endif
329	zvol_state_t *zv;
330
331	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
332
333#ifdef illumos
334	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
335		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
336		if (zv == NULL)
337			continue;
338#else
339	LIST_FOREACH(zv, &all_zvols, zv_links) {
340#endif
341		if (strcmp(zv->zv_name, name) == 0)
342			return (zv);
343	}
344
345	return (NULL);
346}
347
348/* extent mapping arg */
349struct maparg {
350	zvol_state_t	*ma_zv;
351	uint64_t	ma_blks;
352};
353
354/*ARGSUSED*/
355static int
356zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
357    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
358{
359	struct maparg *ma = arg;
360	zvol_extent_t *ze;
361	int bs = ma->ma_zv->zv_volblocksize;
362
363	if (bp == NULL || BP_IS_HOLE(bp) ||
364	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
365		return (0);
366
367	VERIFY(!BP_IS_EMBEDDED(bp));
368
369	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
370	ma->ma_blks++;
371
372	/* Abort immediately if we have encountered gang blocks */
373	if (BP_IS_GANG(bp))
374		return (SET_ERROR(EFRAGS));
375
376	/*
377	 * See if the block is at the end of the previous extent.
378	 */
379	ze = list_tail(&ma->ma_zv->zv_extents);
380	if (ze &&
381	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
382	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
383	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
384		ze->ze_nblks++;
385		return (0);
386	}
387
388	dprintf_bp(bp, "%s", "next blkptr:");
389
390	/* start a new extent */
391	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
392	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
393	ze->ze_nblks = 1;
394	list_insert_tail(&ma->ma_zv->zv_extents, ze);
395	return (0);
396}
397
398static void
399zvol_free_extents(zvol_state_t *zv)
400{
401	zvol_extent_t *ze;
402
403	while (ze = list_head(&zv->zv_extents)) {
404		list_remove(&zv->zv_extents, ze);
405		kmem_free(ze, sizeof (zvol_extent_t));
406	}
407}
408
409static int
410zvol_get_lbas(zvol_state_t *zv)
411{
412	objset_t *os = zv->zv_objset;
413	struct maparg	ma;
414	int		err;
415
416	ma.ma_zv = zv;
417	ma.ma_blks = 0;
418	zvol_free_extents(zv);
419
420	/* commit any in-flight changes before traversing the dataset */
421	txg_wait_synced(dmu_objset_pool(os), 0);
422	err = traverse_dataset(dmu_objset_ds(os), 0,
423	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
424	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
425		zvol_free_extents(zv);
426		return (err ? err : EIO);
427	}
428
429	return (0);
430}
431
432/* ARGSUSED */
433void
434zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
435{
436	zfs_creat_t *zct = arg;
437	nvlist_t *nvprops = zct->zct_props;
438	int error;
439	uint64_t volblocksize, volsize;
440
441	VERIFY(nvlist_lookup_uint64(nvprops,
442	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
443	if (nvlist_lookup_uint64(nvprops,
444	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
445		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
446
447	/*
448	 * These properties must be removed from the list so the generic
449	 * property setting step won't apply to them.
450	 */
451	VERIFY(nvlist_remove_all(nvprops,
452	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
453	(void) nvlist_remove_all(nvprops,
454	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
455
456	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
457	    DMU_OT_NONE, 0, tx);
458	ASSERT(error == 0);
459
460	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
461	    DMU_OT_NONE, 0, tx);
462	ASSERT(error == 0);
463
464	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
465	ASSERT(error == 0);
466}
467
468/*
469 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
470 * implement DKIOCFREE/free-long-range.
471 */
472static int
473zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
474{
475	uint64_t offset, length;
476
477	if (byteswap)
478		byteswap_uint64_array(lr, sizeof (*lr));
479
480	offset = lr->lr_offset;
481	length = lr->lr_length;
482
483	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
484}
485
486/*
487 * Replay a TX_WRITE ZIL transaction that didn't get committed
488 * after a system failure
489 */
490static int
491zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
492{
493	objset_t *os = zv->zv_objset;
494	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
495	uint64_t offset, length;
496	dmu_tx_t *tx;
497	int error;
498
499	if (byteswap)
500		byteswap_uint64_array(lr, sizeof (*lr));
501
502	offset = lr->lr_offset;
503	length = lr->lr_length;
504
505	/* If it's a dmu_sync() block, write the whole block */
506	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
507		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
508		if (length < blocksize) {
509			offset -= offset % blocksize;
510			length = blocksize;
511		}
512	}
513
514	tx = dmu_tx_create(os);
515	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
516	error = dmu_tx_assign(tx, TXG_WAIT);
517	if (error) {
518		dmu_tx_abort(tx);
519	} else {
520		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
521		dmu_tx_commit(tx);
522	}
523
524	return (error);
525}
526
527/* ARGSUSED */
528static int
529zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
530{
531	return (SET_ERROR(ENOTSUP));
532}
533
534/*
535 * Callback vectors for replaying records.
536 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
537 */
538zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
539	zvol_replay_err,	/* 0 no such transaction type */
540	zvol_replay_err,	/* TX_CREATE */
541	zvol_replay_err,	/* TX_MKDIR */
542	zvol_replay_err,	/* TX_MKXATTR */
543	zvol_replay_err,	/* TX_SYMLINK */
544	zvol_replay_err,	/* TX_REMOVE */
545	zvol_replay_err,	/* TX_RMDIR */
546	zvol_replay_err,	/* TX_LINK */
547	zvol_replay_err,	/* TX_RENAME */
548	zvol_replay_write,	/* TX_WRITE */
549	zvol_replay_truncate,	/* TX_TRUNCATE */
550	zvol_replay_err,	/* TX_SETATTR */
551	zvol_replay_err,	/* TX_ACL */
552	zvol_replay_err,	/* TX_CREATE_ACL */
553	zvol_replay_err,	/* TX_CREATE_ATTR */
554	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
555	zvol_replay_err,	/* TX_MKDIR_ACL */
556	zvol_replay_err,	/* TX_MKDIR_ATTR */
557	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
558	zvol_replay_err,	/* TX_WRITE2 */
559};
560
561#ifdef illumos
562int
563zvol_name2minor(const char *name, minor_t *minor)
564{
565	zvol_state_t *zv;
566
567	mutex_enter(&zfsdev_state_lock);
568	zv = zvol_minor_lookup(name);
569	if (minor && zv)
570		*minor = zv->zv_minor;
571	mutex_exit(&zfsdev_state_lock);
572	return (zv ? 0 : -1);
573}
574#endif	/* illumos */
575
576/*
577 * Create a minor node (plus a whole lot more) for the specified volume.
578 */
579int
580zvol_create_minor(const char *name)
581{
582	zfs_soft_state_t *zs;
583	zvol_state_t *zv;
584	objset_t *os;
585	dmu_object_info_t doi;
586#ifdef illumos
587	minor_t minor = 0;
588	char chrbuf[30], blkbuf[30];
589#else
590	struct g_provider *pp;
591	struct g_geom *gp;
592	uint64_t volsize, mode;
593#endif
594	int error;
595
596#ifndef illumos
597	ZFS_LOG(1, "Creating ZVOL %s...", name);
598#endif
599
600	mutex_enter(&zfsdev_state_lock);
601
602	if (zvol_minor_lookup(name) != NULL) {
603		mutex_exit(&zfsdev_state_lock);
604		return (SET_ERROR(EEXIST));
605	}
606
607	/* lie and say we're read-only */
608	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
609
610	if (error) {
611		mutex_exit(&zfsdev_state_lock);
612		return (error);
613	}
614
615#ifdef illumos
616	if ((minor = zfsdev_minor_alloc()) == 0) {
617		dmu_objset_disown(os, FTAG);
618		mutex_exit(&zfsdev_state_lock);
619		return (SET_ERROR(ENXIO));
620	}
621
622	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
623		dmu_objset_disown(os, FTAG);
624		mutex_exit(&zfsdev_state_lock);
625		return (SET_ERROR(EAGAIN));
626	}
627	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
628	    (char *)name);
629
630	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
631
632	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
633	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
634		ddi_soft_state_free(zfsdev_state, minor);
635		dmu_objset_disown(os, FTAG);
636		mutex_exit(&zfsdev_state_lock);
637		return (SET_ERROR(EAGAIN));
638	}
639
640	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
641
642	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
643	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
644		ddi_remove_minor_node(zfs_dip, chrbuf);
645		ddi_soft_state_free(zfsdev_state, minor);
646		dmu_objset_disown(os, FTAG);
647		mutex_exit(&zfsdev_state_lock);
648		return (SET_ERROR(EAGAIN));
649	}
650
651	zs = ddi_get_soft_state(zfsdev_state, minor);
652	zs->zss_type = ZSST_ZVOL;
653	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
654#else	/* !illumos */
655
656	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
657	zv->zv_state = 0;
658	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
659	if (error) {
660		kmem_free(zv, sizeof(*zv));
661		dmu_objset_disown(os, zvol_tag);
662		mutex_exit(&zfsdev_state_lock);
663		return (error);
664	}
665	error = dsl_prop_get_integer(name,
666	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
667	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
668		mode = volmode;
669
670	DROP_GIANT();
671	zv->zv_volsize = volsize;
672	zv->zv_volmode = mode;
673	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
674		g_topology_lock();
675		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
676		gp->start = zvol_geom_start;
677		gp->access = zvol_geom_access;
678		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
679		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
680		pp->sectorsize = DEV_BSIZE;
681		pp->mediasize = zv->zv_volsize;
682		pp->private = zv;
683
684		zv->zv_provider = pp;
685		bioq_init(&zv->zv_queue);
686		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
687	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
688		struct make_dev_args args;
689
690		make_dev_args_init(&args);
691		args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
692		args.mda_devsw = &zvol_cdevsw;
693		args.mda_cr = NULL;
694		args.mda_uid = UID_ROOT;
695		args.mda_gid = GID_OPERATOR;
696		args.mda_mode = 0640;
697		args.mda_si_drv2 = zv;
698		error = make_dev_s(&args, &zv->zv_dev,
699		    "%s/%s", ZVOL_DRIVER, name);
700		if (error != 0) {
701			kmem_free(zv, sizeof(*zv));
702			dmu_objset_disown(os, FTAG);
703			mutex_exit(&zfsdev_state_lock);
704			return (error);
705		}
706		zv->zv_dev->si_iosize_max = MAXPHYS;
707	}
708	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
709#endif	/* illumos */
710
711	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
712	zv->zv_min_bs = DEV_BSHIFT;
713#ifdef illumos
714	zv->zv_minor = minor;
715#endif
716	zv->zv_objset = os;
717	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
718		zv->zv_flags |= ZVOL_RDONLY;
719	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
720	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
721	    sizeof (rl_t), offsetof(rl_t, r_node));
722	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
723	    offsetof(zvol_extent_t, ze_node));
724	/* get and cache the blocksize */
725	error = dmu_object_info(os, ZVOL_OBJ, &doi);
726	ASSERT(error == 0);
727	zv->zv_volblocksize = doi.doi_data_block_size;
728
729	if (spa_writeable(dmu_objset_spa(os))) {
730		if (zil_replay_disable)
731			zil_destroy(dmu_objset_zil(os), B_FALSE);
732		else
733			zil_replay(os, zv, zvol_replay_vector);
734	}
735	dmu_objset_disown(os, FTAG);
736	zv->zv_objset = NULL;
737
738	zvol_minors++;
739
740	mutex_exit(&zfsdev_state_lock);
741#ifndef illumos
742	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
743		zvol_geom_run(zv);
744		g_topology_unlock();
745	}
746	PICKUP_GIANT();
747
748	ZFS_LOG(1, "ZVOL %s created.", name);
749#endif
750
751	return (0);
752}
753
754/*
755 * Remove minor node for the specified volume.
756 */
757static int
758zvol_remove_zv(zvol_state_t *zv)
759{
760#ifdef illumos
761	char nmbuf[20];
762	minor_t minor = zv->zv_minor;
763#endif
764
765	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
766	if (zv->zv_total_opens != 0)
767		return (SET_ERROR(EBUSY));
768
769#ifdef illumos
770	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
771	ddi_remove_minor_node(zfs_dip, nmbuf);
772
773	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
774	ddi_remove_minor_node(zfs_dip, nmbuf);
775#else
776	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
777
778	LIST_REMOVE(zv, zv_links);
779	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
780		g_topology_lock();
781		zvol_geom_destroy(zv);
782		g_topology_unlock();
783	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV)
784		destroy_dev(zv->zv_dev);
785#endif
786
787	avl_destroy(&zv->zv_znode.z_range_avl);
788	mutex_destroy(&zv->zv_znode.z_range_lock);
789
790	kmem_free(zv, sizeof (zvol_state_t));
791#ifdef illumos
792	ddi_soft_state_free(zfsdev_state, minor);
793#endif
794	zvol_minors--;
795	return (0);
796}
797
798int
799zvol_remove_minor(const char *name)
800{
801	zvol_state_t *zv;
802	int rc;
803
804	mutex_enter(&zfsdev_state_lock);
805	if ((zv = zvol_minor_lookup(name)) == NULL) {
806		mutex_exit(&zfsdev_state_lock);
807		return (SET_ERROR(ENXIO));
808	}
809	rc = zvol_remove_zv(zv);
810	mutex_exit(&zfsdev_state_lock);
811	return (rc);
812}
813
814int
815zvol_first_open(zvol_state_t *zv)
816{
817	objset_t *os;
818	uint64_t volsize;
819	int error;
820	uint64_t readonly;
821
822	/* lie and say we're read-only */
823	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
824	    zvol_tag, &os);
825	if (error)
826		return (error);
827
828	zv->zv_objset = os;
829	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
830	if (error) {
831		ASSERT(error == 0);
832		dmu_objset_disown(os, zvol_tag);
833		return (error);
834	}
835
836	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
837	if (error) {
838		dmu_objset_disown(os, zvol_tag);
839		return (error);
840	}
841
842	zvol_size_changed(zv, volsize);
843	zv->zv_zilog = zil_open(os, zvol_get_data);
844
845	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
846	    NULL) == 0);
847	if (readonly || dmu_objset_is_snapshot(os) ||
848	    !spa_writeable(dmu_objset_spa(os)))
849		zv->zv_flags |= ZVOL_RDONLY;
850	else
851		zv->zv_flags &= ~ZVOL_RDONLY;
852	return (error);
853}
854
855void
856zvol_last_close(zvol_state_t *zv)
857{
858	zil_close(zv->zv_zilog);
859	zv->zv_zilog = NULL;
860
861	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
862	zv->zv_dbuf = NULL;
863
864	/*
865	 * Evict cached data
866	 */
867	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
868	    !(zv->zv_flags & ZVOL_RDONLY))
869		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
870	dmu_objset_evict_dbufs(zv->zv_objset);
871
872	dmu_objset_disown(zv->zv_objset, zvol_tag);
873	zv->zv_objset = NULL;
874}
875
876#ifdef illumos
877int
878zvol_prealloc(zvol_state_t *zv)
879{
880	objset_t *os = zv->zv_objset;
881	dmu_tx_t *tx;
882	uint64_t refd, avail, usedobjs, availobjs;
883	uint64_t resid = zv->zv_volsize;
884	uint64_t off = 0;
885
886	/* Check the space usage before attempting to allocate the space */
887	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
888	if (avail < zv->zv_volsize)
889		return (SET_ERROR(ENOSPC));
890
891	/* Free old extents if they exist */
892	zvol_free_extents(zv);
893
894	while (resid != 0) {
895		int error;
896		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
897
898		tx = dmu_tx_create(os);
899		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
900		error = dmu_tx_assign(tx, TXG_WAIT);
901		if (error) {
902			dmu_tx_abort(tx);
903			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
904			return (error);
905		}
906		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
907		dmu_tx_commit(tx);
908		off += bytes;
909		resid -= bytes;
910	}
911	txg_wait_synced(dmu_objset_pool(os), 0);
912
913	return (0);
914}
915#endif	/* illumos */
916
917static int
918zvol_update_volsize(objset_t *os, uint64_t volsize)
919{
920	dmu_tx_t *tx;
921	int error;
922
923	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
924
925	tx = dmu_tx_create(os);
926	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
927	dmu_tx_mark_netfree(tx);
928	error = dmu_tx_assign(tx, TXG_WAIT);
929	if (error) {
930		dmu_tx_abort(tx);
931		return (error);
932	}
933
934	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
935	    &volsize, tx);
936	dmu_tx_commit(tx);
937
938	if (error == 0)
939		error = dmu_free_long_range(os,
940		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
941	return (error);
942}
943
944void
945zvol_remove_minors(const char *name)
946{
947#ifdef illumos
948	zvol_state_t *zv;
949	char *namebuf;
950	minor_t minor;
951
952	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
953	(void) strncpy(namebuf, name, strlen(name));
954	(void) strcat(namebuf, "/");
955	mutex_enter(&zfsdev_state_lock);
956	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
957
958		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
959		if (zv == NULL)
960			continue;
961		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
962			(void) zvol_remove_zv(zv);
963	}
964	kmem_free(namebuf, strlen(name) + 2);
965
966	mutex_exit(&zfsdev_state_lock);
967#else	/* !illumos */
968	zvol_state_t *zv, *tzv;
969	size_t namelen;
970
971	namelen = strlen(name);
972
973	DROP_GIANT();
974	mutex_enter(&zfsdev_state_lock);
975
976	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
977		if (strcmp(zv->zv_name, name) == 0 ||
978		    (strncmp(zv->zv_name, name, namelen) == 0 &&
979		    strlen(zv->zv_name) > namelen && (zv->zv_name[namelen] == '/' ||
980		    zv->zv_name[namelen] == '@'))) {
981			(void) zvol_remove_zv(zv);
982		}
983	}
984
985	mutex_exit(&zfsdev_state_lock);
986	PICKUP_GIANT();
987#endif	/* illumos */
988}
989
990static int
991zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
992{
993	uint64_t old_volsize = 0ULL;
994	int error = 0;
995
996	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
997
998	/*
999	 * Reinitialize the dump area to the new size. If we
1000	 * failed to resize the dump area then restore it back to
1001	 * its original size.  We must set the new volsize prior
1002	 * to calling dumpvp_resize() to ensure that the devices'
1003	 * size(9P) is not visible by the dump subsystem.
1004	 */
1005	old_volsize = zv->zv_volsize;
1006	zvol_size_changed(zv, volsize);
1007
1008#ifdef ZVOL_DUMP
1009	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1010		if ((error = zvol_dumpify(zv)) != 0 ||
1011		    (error = dumpvp_resize()) != 0) {
1012			int dumpify_error;
1013
1014			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
1015			zvol_size_changed(zv, old_volsize);
1016			dumpify_error = zvol_dumpify(zv);
1017			error = dumpify_error ? dumpify_error : error;
1018		}
1019	}
1020#endif	/* ZVOL_DUMP */
1021
1022#ifdef illumos
1023	/*
1024	 * Generate a LUN expansion event.
1025	 */
1026	if (error == 0) {
1027		sysevent_id_t eid;
1028		nvlist_t *attr;
1029		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1030
1031		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
1032		    zv->zv_minor);
1033
1034		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1035		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
1036
1037		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
1038		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
1039
1040		nvlist_free(attr);
1041		kmem_free(physpath, MAXPATHLEN);
1042	}
1043#endif	/* illumos */
1044	return (error);
1045}
1046
1047int
1048zvol_set_volsize(const char *name, uint64_t volsize)
1049{
1050	zvol_state_t *zv = NULL;
1051	objset_t *os;
1052	int error;
1053	dmu_object_info_t doi;
1054	uint64_t readonly;
1055	boolean_t owned = B_FALSE;
1056
1057	error = dsl_prop_get_integer(name,
1058	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
1059	if (error != 0)
1060		return (error);
1061	if (readonly)
1062		return (SET_ERROR(EROFS));
1063
1064	mutex_enter(&zfsdev_state_lock);
1065	zv = zvol_minor_lookup(name);
1066
1067	if (zv == NULL || zv->zv_objset == NULL) {
1068		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
1069		    FTAG, &os)) != 0) {
1070			mutex_exit(&zfsdev_state_lock);
1071			return (error);
1072		}
1073		owned = B_TRUE;
1074		if (zv != NULL)
1075			zv->zv_objset = os;
1076	} else {
1077		os = zv->zv_objset;
1078	}
1079
1080	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
1081	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
1082		goto out;
1083
1084	error = zvol_update_volsize(os, volsize);
1085
1086	if (error == 0 && zv != NULL)
1087		error = zvol_update_live_volsize(zv, volsize);
1088out:
1089	if (owned) {
1090		dmu_objset_disown(os, FTAG);
1091		if (zv != NULL)
1092			zv->zv_objset = NULL;
1093	}
1094	mutex_exit(&zfsdev_state_lock);
1095	return (error);
1096}
1097
1098/*ARGSUSED*/
1099#ifdef illumos
1100int
1101zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
1102#else
1103static int
1104zvol_open(struct g_provider *pp, int flag, int count)
1105#endif
1106{
1107	zvol_state_t *zv;
1108	int err = 0;
1109#ifdef illumos
1110
1111	mutex_enter(&zfsdev_state_lock);
1112
1113	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
1114	if (zv == NULL) {
1115		mutex_exit(&zfsdev_state_lock);
1116		return (SET_ERROR(ENXIO));
1117	}
1118
1119	if (zv->zv_total_opens == 0)
1120		err = zvol_first_open(zv);
1121	if (err) {
1122		mutex_exit(&zfsdev_state_lock);
1123		return (err);
1124	}
1125#else	/* !illumos */
1126	boolean_t locked = B_FALSE;
1127
1128	/*
1129	 * Protect against recursively entering spa_namespace_lock
1130	 * when spa_open() is used for a pool on a (local) ZVOL(s).
1131	 * This is needed since we replaced upstream zfsdev_state_lock
1132	 * with spa_namespace_lock in the ZVOL code.
1133	 * We are using the same trick as spa_open().
1134	 * Note that calls in zvol_first_open which need to resolve
1135	 * pool name to a spa object will enter spa_open()
1136	 * recursively, but that function already has all the
1137	 * necessary protection.
1138	 */
1139	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1140		mutex_enter(&zfsdev_state_lock);
1141		locked = B_TRUE;
1142	}
1143
1144	zv = pp->private;
1145	if (zv == NULL) {
1146		if (locked)
1147			mutex_exit(&zfsdev_state_lock);
1148		return (SET_ERROR(ENXIO));
1149	}
1150
1151	if (zv->zv_total_opens == 0) {
1152		err = zvol_first_open(zv);
1153		if (err) {
1154			if (locked)
1155				mutex_exit(&zfsdev_state_lock);
1156			return (err);
1157		}
1158		pp->mediasize = zv->zv_volsize;
1159		pp->stripeoffset = 0;
1160		pp->stripesize = zv->zv_volblocksize;
1161	}
1162#endif	/* illumos */
1163	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1164		err = SET_ERROR(EROFS);
1165		goto out;
1166	}
1167	if (zv->zv_flags & ZVOL_EXCL) {
1168		err = SET_ERROR(EBUSY);
1169		goto out;
1170	}
1171#ifdef FEXCL
1172	if (flag & FEXCL) {
1173		if (zv->zv_total_opens != 0) {
1174			err = SET_ERROR(EBUSY);
1175			goto out;
1176		}
1177		zv->zv_flags |= ZVOL_EXCL;
1178	}
1179#endif
1180
1181#ifdef illumos
1182	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
1183		zv->zv_open_count[otyp]++;
1184		zv->zv_total_opens++;
1185	}
1186	mutex_exit(&zfsdev_state_lock);
1187#else
1188	zv->zv_total_opens += count;
1189	if (locked)
1190		mutex_exit(&zfsdev_state_lock);
1191#endif
1192
1193	return (err);
1194out:
1195	if (zv->zv_total_opens == 0)
1196		zvol_last_close(zv);
1197#ifdef illumos
1198	mutex_exit(&zfsdev_state_lock);
1199#else
1200	if (locked)
1201		mutex_exit(&zfsdev_state_lock);
1202#endif
1203	return (err);
1204}
1205
1206/*ARGSUSED*/
1207#ifdef illumos
1208int
1209zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
1210{
1211	minor_t minor = getminor(dev);
1212	zvol_state_t *zv;
1213	int error = 0;
1214
1215	mutex_enter(&zfsdev_state_lock);
1216
1217	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1218	if (zv == NULL) {
1219		mutex_exit(&zfsdev_state_lock);
1220#else	/* !illumos */
1221static int
1222zvol_close(struct g_provider *pp, int flag, int count)
1223{
1224	zvol_state_t *zv;
1225	int error = 0;
1226	boolean_t locked = B_FALSE;
1227
1228	/* See comment in zvol_open(). */
1229	if (!MUTEX_HELD(&zfsdev_state_lock)) {
1230		mutex_enter(&zfsdev_state_lock);
1231		locked = B_TRUE;
1232	}
1233
1234	zv = pp->private;
1235	if (zv == NULL) {
1236		if (locked)
1237			mutex_exit(&zfsdev_state_lock);
1238#endif	/* illumos */
1239		return (SET_ERROR(ENXIO));
1240	}
1241
1242	if (zv->zv_flags & ZVOL_EXCL) {
1243		ASSERT(zv->zv_total_opens == 1);
1244		zv->zv_flags &= ~ZVOL_EXCL;
1245	}
1246
1247	/*
1248	 * If the open count is zero, this is a spurious close.
1249	 * That indicates a bug in the kernel / DDI framework.
1250	 */
1251#ifdef illumos
1252	ASSERT(zv->zv_open_count[otyp] != 0);
1253#endif
1254	ASSERT(zv->zv_total_opens != 0);
1255
1256	/*
1257	 * You may get multiple opens, but only one close.
1258	 */
1259#ifdef illumos
1260	zv->zv_open_count[otyp]--;
1261	zv->zv_total_opens--;
1262#else
1263	zv->zv_total_opens -= count;
1264#endif
1265
1266	if (zv->zv_total_opens == 0)
1267		zvol_last_close(zv);
1268
1269#ifdef illumos
1270	mutex_exit(&zfsdev_state_lock);
1271#else
1272	if (locked)
1273		mutex_exit(&zfsdev_state_lock);
1274#endif
1275	return (error);
1276}
1277
1278static void
1279zvol_get_done(zgd_t *zgd, int error)
1280{
1281	if (zgd->zgd_db)
1282		dmu_buf_rele(zgd->zgd_db, zgd);
1283
1284	zfs_range_unlock(zgd->zgd_rl);
1285
1286	if (error == 0 && zgd->zgd_bp)
1287		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1288
1289	kmem_free(zgd, sizeof (zgd_t));
1290}
1291
1292/*
1293 * Get data to generate a TX_WRITE intent log record.
1294 */
1295static int
1296zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1297{
1298	zvol_state_t *zv = arg;
1299	objset_t *os = zv->zv_objset;
1300	uint64_t object = ZVOL_OBJ;
1301	uint64_t offset = lr->lr_offset;
1302	uint64_t size = lr->lr_length;	/* length of user data */
1303	blkptr_t *bp = &lr->lr_blkptr;
1304	dmu_buf_t *db;
1305	zgd_t *zgd;
1306	int error;
1307
1308	ASSERT(zio != NULL);
1309	ASSERT(size != 0);
1310
1311	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1312	zgd->zgd_zilog = zv->zv_zilog;
1313	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1314
1315	/*
1316	 * Write records come in two flavors: immediate and indirect.
1317	 * For small writes it's cheaper to store the data with the
1318	 * log record (immediate); for large writes it's cheaper to
1319	 * sync the data and get a pointer to it (indirect) so that
1320	 * we don't have to write the data twice.
1321	 */
1322	if (buf != NULL) {	/* immediate write */
1323		error = dmu_read(os, object, offset, size, buf,
1324		    DMU_READ_NO_PREFETCH);
1325	} else {
1326		size = zv->zv_volblocksize;
1327		offset = P2ALIGN(offset, size);
1328		error = dmu_buf_hold(os, object, offset, zgd, &db,
1329		    DMU_READ_NO_PREFETCH);
1330		if (error == 0) {
1331			blkptr_t *obp = dmu_buf_get_blkptr(db);
1332			if (obp) {
1333				ASSERT(BP_IS_HOLE(bp));
1334				*bp = *obp;
1335			}
1336
1337			zgd->zgd_db = db;
1338			zgd->zgd_bp = bp;
1339
1340			ASSERT(db->db_offset == offset);
1341			ASSERT(db->db_size == size);
1342
1343			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1344			    zvol_get_done, zgd);
1345
1346			if (error == 0)
1347				return (0);
1348		}
1349	}
1350
1351	zvol_get_done(zgd, error);
1352
1353	return (error);
1354}
1355
1356/*
1357 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1358 *
1359 * We store data in the log buffers if it's small enough.
1360 * Otherwise we will later flush the data out via dmu_sync().
1361 */
1362ssize_t zvol_immediate_write_sz = 32768;
1363
1364static void
1365zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1366    boolean_t sync)
1367{
1368	uint32_t blocksize = zv->zv_volblocksize;
1369	zilog_t *zilog = zv->zv_zilog;
1370	boolean_t slogging;
1371	ssize_t immediate_write_sz;
1372
1373	if (zil_replaying(zilog, tx))
1374		return;
1375
1376	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1377	    ? 0 : zvol_immediate_write_sz;
1378
1379	slogging = spa_has_slogs(zilog->zl_spa) &&
1380	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1381
1382	while (resid) {
1383		itx_t *itx;
1384		lr_write_t *lr;
1385		ssize_t len;
1386		itx_wr_state_t write_state;
1387
1388		/*
1389		 * Unlike zfs_log_write() we can be called with
1390		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1391		 */
1392		if (blocksize > immediate_write_sz && !slogging &&
1393		    resid >= blocksize && off % blocksize == 0) {
1394			write_state = WR_INDIRECT; /* uses dmu_sync */
1395			len = blocksize;
1396		} else if (sync) {
1397			write_state = WR_COPIED;
1398			len = MIN(ZIL_MAX_LOG_DATA, resid);
1399		} else {
1400			write_state = WR_NEED_COPY;
1401			len = MIN(ZIL_MAX_LOG_DATA, resid);
1402		}
1403
1404		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1405		    (write_state == WR_COPIED ? len : 0));
1406		lr = (lr_write_t *)&itx->itx_lr;
1407		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1408		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1409			zil_itx_destroy(itx);
1410			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1411			lr = (lr_write_t *)&itx->itx_lr;
1412			write_state = WR_NEED_COPY;
1413		}
1414
1415		itx->itx_wr_state = write_state;
1416		if (write_state == WR_NEED_COPY)
1417			itx->itx_sod += len;
1418		lr->lr_foid = ZVOL_OBJ;
1419		lr->lr_offset = off;
1420		lr->lr_length = len;
1421		lr->lr_blkoff = 0;
1422		BP_ZERO(&lr->lr_blkptr);
1423
1424		itx->itx_private = zv;
1425		itx->itx_sync = sync;
1426
1427		zil_itx_assign(zilog, itx, tx);
1428
1429		off += len;
1430		resid -= len;
1431	}
1432}
1433
1434#ifdef illumos
1435static int
1436zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1437    uint64_t size, boolean_t doread, boolean_t isdump)
1438{
1439	vdev_disk_t *dvd;
1440	int c;
1441	int numerrors = 0;
1442
1443	if (vd->vdev_ops == &vdev_mirror_ops ||
1444	    vd->vdev_ops == &vdev_replacing_ops ||
1445	    vd->vdev_ops == &vdev_spare_ops) {
1446		for (c = 0; c < vd->vdev_children; c++) {
1447			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1448			    addr, offset, origoffset, size, doread, isdump);
1449			if (err != 0) {
1450				numerrors++;
1451			} else if (doread) {
1452				break;
1453			}
1454		}
1455	}
1456
1457	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1458		return (numerrors < vd->vdev_children ? 0 : EIO);
1459
1460	if (doread && !vdev_readable(vd))
1461		return (SET_ERROR(EIO));
1462	else if (!doread && !vdev_writeable(vd))
1463		return (SET_ERROR(EIO));
1464
1465	if (vd->vdev_ops == &vdev_raidz_ops) {
1466		return (vdev_raidz_physio(vd,
1467		    addr, size, offset, origoffset, doread, isdump));
1468	}
1469
1470	offset += VDEV_LABEL_START_SIZE;
1471
1472	if (ddi_in_panic() || isdump) {
1473		ASSERT(!doread);
1474		if (doread)
1475			return (SET_ERROR(EIO));
1476		dvd = vd->vdev_tsd;
1477		ASSERT3P(dvd, !=, NULL);
1478		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1479		    lbtodb(size)));
1480	} else {
1481		dvd = vd->vdev_tsd;
1482		ASSERT3P(dvd, !=, NULL);
1483		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1484		    offset, doread ? B_READ : B_WRITE));
1485	}
1486}
1487
1488static int
1489zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1490    boolean_t doread, boolean_t isdump)
1491{
1492	vdev_t *vd;
1493	int error;
1494	zvol_extent_t *ze;
1495	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1496
1497	/* Must be sector aligned, and not stradle a block boundary. */
1498	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1499	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1500		return (SET_ERROR(EINVAL));
1501	}
1502	ASSERT(size <= zv->zv_volblocksize);
1503
1504	/* Locate the extent this belongs to */
1505	ze = list_head(&zv->zv_extents);
1506	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1507		offset -= ze->ze_nblks * zv->zv_volblocksize;
1508		ze = list_next(&zv->zv_extents, ze);
1509	}
1510
1511	if (ze == NULL)
1512		return (SET_ERROR(EINVAL));
1513
1514	if (!ddi_in_panic())
1515		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1516
1517	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1518	offset += DVA_GET_OFFSET(&ze->ze_dva);
1519	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1520	    size, doread, isdump);
1521
1522	if (!ddi_in_panic())
1523		spa_config_exit(spa, SCL_STATE, FTAG);
1524
1525	return (error);
1526}
1527
1528int
1529zvol_strategy(buf_t *bp)
1530{
1531	zfs_soft_state_t *zs = NULL;
1532#else	/* !illumos */
1533void
1534zvol_strategy(struct bio *bp)
1535{
1536#endif	/* illumos */
1537	zvol_state_t *zv;
1538	uint64_t off, volsize;
1539	size_t resid;
1540	char *addr;
1541	objset_t *os;
1542	rl_t *rl;
1543	int error = 0;
1544#ifdef illumos
1545	boolean_t doread = bp->b_flags & B_READ;
1546#else
1547	boolean_t doread = 0;
1548#endif
1549	boolean_t is_dumpified;
1550	boolean_t sync;
1551
1552#ifdef illumos
1553	if (getminor(bp->b_edev) == 0) {
1554		error = SET_ERROR(EINVAL);
1555	} else {
1556		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1557		if (zs == NULL)
1558			error = SET_ERROR(ENXIO);
1559		else if (zs->zss_type != ZSST_ZVOL)
1560			error = SET_ERROR(EINVAL);
1561	}
1562
1563	if (error) {
1564		bioerror(bp, error);
1565		biodone(bp);
1566		return (0);
1567	}
1568
1569	zv = zs->zss_data;
1570
1571	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1572		bioerror(bp, EROFS);
1573		biodone(bp);
1574		return (0);
1575	}
1576
1577	off = ldbtob(bp->b_blkno);
1578#else	/* !illumos */
1579	if (bp->bio_to)
1580		zv = bp->bio_to->private;
1581	else
1582		zv = bp->bio_dev->si_drv2;
1583
1584	if (zv == NULL) {
1585		error = SET_ERROR(ENXIO);
1586		goto out;
1587	}
1588
1589	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1590		error = SET_ERROR(EROFS);
1591		goto out;
1592	}
1593
1594	switch (bp->bio_cmd) {
1595	case BIO_FLUSH:
1596		goto sync;
1597	case BIO_READ:
1598		doread = 1;
1599	case BIO_WRITE:
1600	case BIO_DELETE:
1601		break;
1602	default:
1603		error = EOPNOTSUPP;
1604		goto out;
1605	}
1606
1607	off = bp->bio_offset;
1608#endif	/* illumos */
1609	volsize = zv->zv_volsize;
1610
1611	os = zv->zv_objset;
1612	ASSERT(os != NULL);
1613
1614#ifdef illumos
1615	bp_mapin(bp);
1616	addr = bp->b_un.b_addr;
1617	resid = bp->b_bcount;
1618
1619	if (resid > 0 && (off < 0 || off >= volsize)) {
1620		bioerror(bp, EIO);
1621		biodone(bp);
1622		return (0);
1623	}
1624
1625	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1626	sync = ((!(bp->b_flags & B_ASYNC) &&
1627	    !(zv->zv_flags & ZVOL_WCE)) ||
1628	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1629	    !doread && !is_dumpified;
1630#else	/* !illumos */
1631	addr = bp->bio_data;
1632	resid = bp->bio_length;
1633
1634	if (resid > 0 && (off < 0 || off >= volsize)) {
1635		error = SET_ERROR(EIO);
1636		goto out;
1637	}
1638
1639	is_dumpified = B_FALSE;
1640	sync = !doread && !is_dumpified &&
1641	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1642#endif	/* illumos */
1643
1644	/*
1645	 * There must be no buffer changes when doing a dmu_sync() because
1646	 * we can't change the data whilst calculating the checksum.
1647	 */
1648	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1649	    doread ? RL_READER : RL_WRITER);
1650
1651#ifndef illumos
1652	if (bp->bio_cmd == BIO_DELETE) {
1653		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1654		error = dmu_tx_assign(tx, TXG_WAIT);
1655		if (error != 0) {
1656			dmu_tx_abort(tx);
1657		} else {
1658			zvol_log_truncate(zv, tx, off, resid, B_TRUE);
1659			dmu_tx_commit(tx);
1660			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1661			    off, resid);
1662			resid = 0;
1663		}
1664		goto unlock;
1665	}
1666#endif
1667	while (resid != 0 && off < volsize) {
1668		size_t size = MIN(resid, zvol_maxphys);
1669#ifdef illumos
1670		if (is_dumpified) {
1671			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1672			error = zvol_dumpio(zv, addr, off, size,
1673			    doread, B_FALSE);
1674		} else if (doread) {
1675#else
1676		if (doread) {
1677#endif
1678			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1679			    DMU_READ_PREFETCH);
1680		} else {
1681			dmu_tx_t *tx = dmu_tx_create(os);
1682			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1683			error = dmu_tx_assign(tx, TXG_WAIT);
1684			if (error) {
1685				dmu_tx_abort(tx);
1686			} else {
1687				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1688				zvol_log_write(zv, tx, off, size, sync);
1689				dmu_tx_commit(tx);
1690			}
1691		}
1692		if (error) {
1693			/* convert checksum errors into IO errors */
1694			if (error == ECKSUM)
1695				error = SET_ERROR(EIO);
1696			break;
1697		}
1698		off += size;
1699		addr += size;
1700		resid -= size;
1701	}
1702#ifndef illumos
1703unlock:
1704#endif
1705	zfs_range_unlock(rl);
1706
1707#ifdef illumos
1708	if ((bp->b_resid = resid) == bp->b_bcount)
1709		bioerror(bp, off > volsize ? EINVAL : error);
1710
1711	if (sync)
1712		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1713	biodone(bp);
1714
1715	return (0);
1716#else	/* !illumos */
1717	bp->bio_completed = bp->bio_length - resid;
1718	if (bp->bio_completed < bp->bio_length && off > volsize)
1719		error = EINVAL;
1720
1721	if (sync) {
1722sync:
1723		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1724	}
1725out:
1726	if (bp->bio_to)
1727		g_io_deliver(bp, error);
1728	else
1729		biofinish(bp, NULL, error);
1730#endif	/* illumos */
1731}
1732
1733#ifdef illumos
1734/*
1735 * Set the buffer count to the zvol maximum transfer.
1736 * Using our own routine instead of the default minphys()
1737 * means that for larger writes we write bigger buffers on X86
1738 * (128K instead of 56K) and flush the disk write cache less often
1739 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1740 * 56K on X86 and 128K on sparc).
1741 */
1742void
1743zvol_minphys(struct buf *bp)
1744{
1745	if (bp->b_bcount > zvol_maxphys)
1746		bp->b_bcount = zvol_maxphys;
1747}
1748
1749int
1750zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1751{
1752	minor_t minor = getminor(dev);
1753	zvol_state_t *zv;
1754	int error = 0;
1755	uint64_t size;
1756	uint64_t boff;
1757	uint64_t resid;
1758
1759	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1760	if (zv == NULL)
1761		return (SET_ERROR(ENXIO));
1762
1763	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1764		return (SET_ERROR(EINVAL));
1765
1766	boff = ldbtob(blkno);
1767	resid = ldbtob(nblocks);
1768
1769	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1770
1771	while (resid) {
1772		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1773		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1774		if (error)
1775			break;
1776		boff += size;
1777		addr += size;
1778		resid -= size;
1779	}
1780
1781	return (error);
1782}
1783
1784/*ARGSUSED*/
1785int
1786zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1787{
1788	minor_t minor = getminor(dev);
1789#else	/* !illumos */
1790int
1791zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1792{
1793#endif	/* illumos */
1794	zvol_state_t *zv;
1795	uint64_t volsize;
1796	rl_t *rl;
1797	int error = 0;
1798
1799#ifdef illumos
1800	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1801	if (zv == NULL)
1802		return (SET_ERROR(ENXIO));
1803#else
1804	zv = dev->si_drv2;
1805#endif
1806
1807	volsize = zv->zv_volsize;
1808	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1809	if (uio->uio_resid > 0 &&
1810	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1811		return (SET_ERROR(EIO));
1812
1813#ifdef illumos
1814	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1815		error = physio(zvol_strategy, NULL, dev, B_READ,
1816		    zvol_minphys, uio);
1817		return (error);
1818	}
1819#endif
1820
1821	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1822	    RL_READER);
1823	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1824		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1825
1826		/* don't read past the end */
1827		if (bytes > volsize - uio->uio_loffset)
1828			bytes = volsize - uio->uio_loffset;
1829
1830		error =  dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes);
1831		if (error) {
1832			/* convert checksum errors into IO errors */
1833			if (error == ECKSUM)
1834				error = SET_ERROR(EIO);
1835			break;
1836		}
1837	}
1838	zfs_range_unlock(rl);
1839	return (error);
1840}
1841
1842#ifdef illumos
1843/*ARGSUSED*/
1844int
1845zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1846{
1847	minor_t minor = getminor(dev);
1848#else	/* !illumos */
1849int
1850zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1851{
1852#endif	/* illumos */
1853	zvol_state_t *zv;
1854	uint64_t volsize;
1855	rl_t *rl;
1856	int error = 0;
1857	boolean_t sync;
1858
1859#ifdef illumos
1860	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1861	if (zv == NULL)
1862		return (SET_ERROR(ENXIO));
1863#else
1864	zv = dev->si_drv2;
1865#endif
1866
1867	volsize = zv->zv_volsize;
1868	/* uio_loffset == volsize isn't an error as its required for EOF processing. */
1869	if (uio->uio_resid > 0 &&
1870	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1871		return (SET_ERROR(EIO));
1872
1873#ifdef illumos
1874	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1875		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1876		    zvol_minphys, uio);
1877		return (error);
1878	}
1879
1880	sync = !(zv->zv_flags & ZVOL_WCE) ||
1881#else
1882	sync = (ioflag & IO_SYNC) ||
1883#endif
1884	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1885
1886	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1887	    RL_WRITER);
1888	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1889		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1890		uint64_t off = uio->uio_loffset;
1891		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1892
1893		if (bytes > volsize - off)	/* don't write past the end */
1894			bytes = volsize - off;
1895
1896		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1897		error = dmu_tx_assign(tx, TXG_WAIT);
1898		if (error) {
1899			dmu_tx_abort(tx);
1900			break;
1901		}
1902		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1903		if (error == 0)
1904			zvol_log_write(zv, tx, off, bytes, sync);
1905		dmu_tx_commit(tx);
1906
1907		if (error)
1908			break;
1909	}
1910	zfs_range_unlock(rl);
1911	if (sync)
1912		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1913	return (error);
1914}
1915
1916#ifdef illumos
1917int
1918zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1919{
1920	struct uuid uuid = EFI_RESERVED;
1921	efi_gpe_t gpe = { 0 };
1922	uint32_t crc;
1923	dk_efi_t efi;
1924	int length;
1925	char *ptr;
1926
1927	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1928		return (SET_ERROR(EFAULT));
1929	ptr = (char *)(uintptr_t)efi.dki_data_64;
1930	length = efi.dki_length;
1931	/*
1932	 * Some clients may attempt to request a PMBR for the
1933	 * zvol.  Currently this interface will return EINVAL to
1934	 * such requests.  These requests could be supported by
1935	 * adding a check for lba == 0 and consing up an appropriate
1936	 * PMBR.
1937	 */
1938	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1939		return (SET_ERROR(EINVAL));
1940
1941	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1942	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1943	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1944
1945	if (efi.dki_lba == 1) {
1946		efi_gpt_t gpt = { 0 };
1947
1948		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1949		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1950		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1951		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1952		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1953		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1954		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1955		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1956		gpt.efi_gpt_SizeOfPartitionEntry =
1957		    LE_32(sizeof (efi_gpe_t));
1958		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1959		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1960		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1961		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1962		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1963		    flag))
1964			return (SET_ERROR(EFAULT));
1965		ptr += sizeof (gpt);
1966		length -= sizeof (gpt);
1967	}
1968	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1969	    length), flag))
1970		return (SET_ERROR(EFAULT));
1971	return (0);
1972}
1973
1974/*
1975 * BEGIN entry points to allow external callers access to the volume.
1976 */
1977/*
1978 * Return the volume parameters needed for access from an external caller.
1979 * These values are invariant as long as the volume is held open.
1980 */
1981int
1982zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1983    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1984    void **rl_hdl, void **bonus_hdl)
1985{
1986	zvol_state_t *zv;
1987
1988	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1989	if (zv == NULL)
1990		return (SET_ERROR(ENXIO));
1991	if (zv->zv_flags & ZVOL_DUMPIFIED)
1992		return (SET_ERROR(ENXIO));
1993
1994	ASSERT(blksize && max_xfer_len && minor_hdl &&
1995	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1996
1997	*blksize = zv->zv_volblocksize;
1998	*max_xfer_len = (uint64_t)zvol_maxphys;
1999	*minor_hdl = zv;
2000	*objset_hdl = zv->zv_objset;
2001	*zil_hdl = zv->zv_zilog;
2002	*rl_hdl = &zv->zv_znode;
2003	*bonus_hdl = zv->zv_dbuf;
2004	return (0);
2005}
2006
2007/*
2008 * Return the current volume size to an external caller.
2009 * The size can change while the volume is open.
2010 */
2011uint64_t
2012zvol_get_volume_size(void *minor_hdl)
2013{
2014	zvol_state_t *zv = minor_hdl;
2015
2016	return (zv->zv_volsize);
2017}
2018
2019/*
2020 * Return the current WCE setting to an external caller.
2021 * The WCE setting can change while the volume is open.
2022 */
2023int
2024zvol_get_volume_wce(void *minor_hdl)
2025{
2026	zvol_state_t *zv = minor_hdl;
2027
2028	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
2029}
2030
2031/*
2032 * Entry point for external callers to zvol_log_write
2033 */
2034void
2035zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
2036    boolean_t sync)
2037{
2038	zvol_state_t *zv = minor_hdl;
2039
2040	zvol_log_write(zv, tx, off, resid, sync);
2041}
2042/*
2043 * END entry points to allow external callers access to the volume.
2044 */
2045#endif	/* illumos */
2046
2047/*
2048 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
2049 */
2050static void
2051zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
2052    boolean_t sync)
2053{
2054	itx_t *itx;
2055	lr_truncate_t *lr;
2056	zilog_t *zilog = zv->zv_zilog;
2057
2058	if (zil_replaying(zilog, tx))
2059		return;
2060
2061	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
2062	lr = (lr_truncate_t *)&itx->itx_lr;
2063	lr->lr_foid = ZVOL_OBJ;
2064	lr->lr_offset = off;
2065	lr->lr_length = len;
2066
2067	itx->itx_sync = sync;
2068	zil_itx_assign(zilog, itx, tx);
2069}
2070
2071#ifdef illumos
2072/*
2073 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
2074 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
2075 */
2076/*ARGSUSED*/
2077int
2078zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
2079{
2080	zvol_state_t *zv;
2081	struct dk_callback *dkc;
2082	int error = 0;
2083	rl_t *rl;
2084
2085	mutex_enter(&zfsdev_state_lock);
2086
2087	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
2088
2089	if (zv == NULL) {
2090		mutex_exit(&zfsdev_state_lock);
2091		return (SET_ERROR(ENXIO));
2092	}
2093	ASSERT(zv->zv_total_opens > 0);
2094
2095	switch (cmd) {
2096
2097	case DKIOCINFO:
2098	{
2099		struct dk_cinfo dki;
2100
2101		bzero(&dki, sizeof (dki));
2102		(void) strcpy(dki.dki_cname, "zvol");
2103		(void) strcpy(dki.dki_dname, "zvol");
2104		dki.dki_ctype = DKC_UNKNOWN;
2105		dki.dki_unit = getminor(dev);
2106		dki.dki_maxtransfer =
2107		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
2108		mutex_exit(&zfsdev_state_lock);
2109		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
2110			error = SET_ERROR(EFAULT);
2111		return (error);
2112	}
2113
2114	case DKIOCGMEDIAINFO:
2115	{
2116		struct dk_minfo dkm;
2117
2118		bzero(&dkm, sizeof (dkm));
2119		dkm.dki_lbsize = 1U << zv->zv_min_bs;
2120		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2121		dkm.dki_media_type = DK_UNKNOWN;
2122		mutex_exit(&zfsdev_state_lock);
2123		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
2124			error = SET_ERROR(EFAULT);
2125		return (error);
2126	}
2127
2128	case DKIOCGMEDIAINFOEXT:
2129	{
2130		struct dk_minfo_ext dkmext;
2131
2132		bzero(&dkmext, sizeof (dkmext));
2133		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
2134		dkmext.dki_pbsize = zv->zv_volblocksize;
2135		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
2136		dkmext.dki_media_type = DK_UNKNOWN;
2137		mutex_exit(&zfsdev_state_lock);
2138		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
2139			error = SET_ERROR(EFAULT);
2140		return (error);
2141	}
2142
2143	case DKIOCGETEFI:
2144	{
2145		uint64_t vs = zv->zv_volsize;
2146		uint8_t bs = zv->zv_min_bs;
2147
2148		mutex_exit(&zfsdev_state_lock);
2149		error = zvol_getefi((void *)arg, flag, vs, bs);
2150		return (error);
2151	}
2152
2153	case DKIOCFLUSHWRITECACHE:
2154		dkc = (struct dk_callback *)arg;
2155		mutex_exit(&zfsdev_state_lock);
2156		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2157		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
2158			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
2159			error = 0;
2160		}
2161		return (error);
2162
2163	case DKIOCGETWCE:
2164	{
2165		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
2166		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
2167		    flag))
2168			error = SET_ERROR(EFAULT);
2169		break;
2170	}
2171	case DKIOCSETWCE:
2172	{
2173		int wce;
2174		if (ddi_copyin((void *)arg, &wce, sizeof (int),
2175		    flag)) {
2176			error = SET_ERROR(EFAULT);
2177			break;
2178		}
2179		if (wce) {
2180			zv->zv_flags |= ZVOL_WCE;
2181			mutex_exit(&zfsdev_state_lock);
2182		} else {
2183			zv->zv_flags &= ~ZVOL_WCE;
2184			mutex_exit(&zfsdev_state_lock);
2185			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2186		}
2187		return (0);
2188	}
2189
2190	case DKIOCGGEOM:
2191	case DKIOCGVTOC:
2192		/*
2193		 * commands using these (like prtvtoc) expect ENOTSUP
2194		 * since we're emulating an EFI label
2195		 */
2196		error = SET_ERROR(ENOTSUP);
2197		break;
2198
2199	case DKIOCDUMPINIT:
2200		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2201		    RL_WRITER);
2202		error = zvol_dumpify(zv);
2203		zfs_range_unlock(rl);
2204		break;
2205
2206	case DKIOCDUMPFINI:
2207		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
2208			break;
2209		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
2210		    RL_WRITER);
2211		error = zvol_dump_fini(zv);
2212		zfs_range_unlock(rl);
2213		break;
2214
2215	case DKIOCFREE:
2216	{
2217		dkioc_free_t df;
2218		dmu_tx_t *tx;
2219
2220		if (!zvol_unmap_enabled)
2221			break;
2222
2223		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
2224			error = SET_ERROR(EFAULT);
2225			break;
2226		}
2227
2228		/*
2229		 * Apply Postel's Law to length-checking.  If they overshoot,
2230		 * just blank out until the end, if there's a need to blank
2231		 * out anything.
2232		 */
2233		if (df.df_start >= zv->zv_volsize)
2234			break;	/* No need to do anything... */
2235
2236		mutex_exit(&zfsdev_state_lock);
2237
2238		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
2239		    RL_WRITER);
2240		tx = dmu_tx_create(zv->zv_objset);
2241		dmu_tx_mark_netfree(tx);
2242		error = dmu_tx_assign(tx, TXG_WAIT);
2243		if (error != 0) {
2244			dmu_tx_abort(tx);
2245		} else {
2246			zvol_log_truncate(zv, tx, df.df_start,
2247			    df.df_length, B_TRUE);
2248			dmu_tx_commit(tx);
2249			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2250			    df.df_start, df.df_length);
2251		}
2252
2253		zfs_range_unlock(rl);
2254
2255		if (error == 0) {
2256			/*
2257			 * If the write-cache is disabled or 'sync' property
2258			 * is set to 'always' then treat this as a synchronous
2259			 * operation (i.e. commit to zil).
2260			 */
2261			if (!(zv->zv_flags & ZVOL_WCE) ||
2262			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2263				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2264
2265			/*
2266			 * If the caller really wants synchronous writes, and
2267			 * can't wait for them, don't return until the write
2268			 * is done.
2269			 */
2270			if (df.df_flags & DF_WAIT_SYNC) {
2271				txg_wait_synced(
2272				    dmu_objset_pool(zv->zv_objset), 0);
2273			}
2274		}
2275		return (error);
2276	}
2277
2278	default:
2279		error = SET_ERROR(ENOTTY);
2280		break;
2281
2282	}
2283	mutex_exit(&zfsdev_state_lock);
2284	return (error);
2285}
2286#endif	/* illumos */
2287
2288int
2289zvol_busy(void)
2290{
2291	return (zvol_minors != 0);
2292}
2293
2294void
2295zvol_init(void)
2296{
2297	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2298	    1) == 0);
2299#ifdef illumos
2300	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
2301#else
2302	ZFS_LOG(1, "ZVOL Initialized.");
2303#endif
2304}
2305
2306void
2307zvol_fini(void)
2308{
2309#ifdef illumos
2310	mutex_destroy(&zfsdev_state_lock);
2311#endif
2312	ddi_soft_state_fini(&zfsdev_state);
2313	ZFS_LOG(1, "ZVOL Deinitialized.");
2314}
2315
2316#ifdef illumos
2317/*ARGSUSED*/
2318static int
2319zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2320{
2321	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2322
2323	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2324		return (1);
2325	return (0);
2326}
2327
2328/*ARGSUSED*/
2329static void
2330zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2331{
2332	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2333
2334	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2335}
2336
2337static int
2338zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2339{
2340	dmu_tx_t *tx;
2341	int error;
2342	objset_t *os = zv->zv_objset;
2343	spa_t *spa = dmu_objset_spa(os);
2344	vdev_t *vd = spa->spa_root_vdev;
2345	nvlist_t *nv = NULL;
2346	uint64_t version = spa_version(spa);
2347	uint64_t checksum, compress, refresrv, vbs, dedup;
2348
2349	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2350	ASSERT(vd->vdev_ops == &vdev_root_ops);
2351
2352	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2353	    DMU_OBJECT_END);
2354	if (error != 0)
2355		return (error);
2356	/* wait for dmu_free_long_range to actually free the blocks */
2357	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2358
2359	/*
2360	 * If the pool on which the dump device is being initialized has more
2361	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2362	 * enabled.  If so, bump that feature's counter to indicate that the
2363	 * feature is active. We also check the vdev type to handle the
2364	 * following case:
2365	 *   # zpool create test raidz disk1 disk2 disk3
2366	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2367	 *   the raidz vdev itself has 3 children.
2368	 */
2369	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2370		if (!spa_feature_is_enabled(spa,
2371		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2372			return (SET_ERROR(ENOTSUP));
2373		(void) dsl_sync_task(spa_name(spa),
2374		    zfs_mvdev_dump_feature_check,
2375		    zfs_mvdev_dump_activate_feature_sync, NULL,
2376		    2, ZFS_SPACE_CHECK_RESERVED);
2377	}
2378
2379	if (!resize) {
2380		error = dsl_prop_get_integer(zv->zv_name,
2381		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2382		if (error == 0) {
2383			error = dsl_prop_get_integer(zv->zv_name,
2384			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
2385			    NULL);
2386		}
2387		if (error == 0) {
2388			error = dsl_prop_get_integer(zv->zv_name,
2389			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
2390			    &refresrv, NULL);
2391		}
2392		if (error == 0) {
2393			error = dsl_prop_get_integer(zv->zv_name,
2394			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
2395			    NULL);
2396		}
2397		if (version >= SPA_VERSION_DEDUP && error == 0) {
2398			error = dsl_prop_get_integer(zv->zv_name,
2399			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2400		}
2401	}
2402	if (error != 0)
2403		return (error);
2404
2405	tx = dmu_tx_create(os);
2406	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2407	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2408	error = dmu_tx_assign(tx, TXG_WAIT);
2409	if (error != 0) {
2410		dmu_tx_abort(tx);
2411		return (error);
2412	}
2413
2414	/*
2415	 * If we are resizing the dump device then we only need to
2416	 * update the refreservation to match the newly updated
2417	 * zvolsize. Otherwise, we save off the original state of the
2418	 * zvol so that we can restore them if the zvol is ever undumpified.
2419	 */
2420	if (resize) {
2421		error = zap_update(os, ZVOL_ZAP_OBJ,
2422		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2423		    &zv->zv_volsize, tx);
2424	} else {
2425		error = zap_update(os, ZVOL_ZAP_OBJ,
2426		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2427		    &compress, tx);
2428		if (error == 0) {
2429			error = zap_update(os, ZVOL_ZAP_OBJ,
2430			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2431			    &checksum, tx);
2432		}
2433		if (error == 0) {
2434			error = zap_update(os, ZVOL_ZAP_OBJ,
2435			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2436			    &refresrv, tx);
2437		}
2438		if (error == 0) {
2439			error = zap_update(os, ZVOL_ZAP_OBJ,
2440			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2441			    &vbs, tx);
2442		}
2443		if (error == 0) {
2444			error = dmu_object_set_blocksize(
2445			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2446		}
2447		if (version >= SPA_VERSION_DEDUP && error == 0) {
2448			error = zap_update(os, ZVOL_ZAP_OBJ,
2449			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2450			    &dedup, tx);
2451		}
2452		if (error == 0)
2453			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2454	}
2455	dmu_tx_commit(tx);
2456
2457	/*
2458	 * We only need update the zvol's property if we are initializing
2459	 * the dump area for the first time.
2460	 */
2461	if (error == 0 && !resize) {
2462		/*
2463		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2464		 * function.  Otherwise, use the old default -- OFF.
2465		 */
2466		checksum = spa_feature_is_active(spa,
2467		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2468		    ZIO_CHECKSUM_OFF;
2469
2470		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2471		VERIFY(nvlist_add_uint64(nv,
2472		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2473		VERIFY(nvlist_add_uint64(nv,
2474		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2475		    ZIO_COMPRESS_OFF) == 0);
2476		VERIFY(nvlist_add_uint64(nv,
2477		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2478		    checksum) == 0);
2479		if (version >= SPA_VERSION_DEDUP) {
2480			VERIFY(nvlist_add_uint64(nv,
2481			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2482			    ZIO_CHECKSUM_OFF) == 0);
2483		}
2484
2485		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2486		    nv, NULL);
2487		nvlist_free(nv);
2488	}
2489
2490	/* Allocate the space for the dump */
2491	if (error == 0)
2492		error = zvol_prealloc(zv);
2493	return (error);
2494}
2495
2496static int
2497zvol_dumpify(zvol_state_t *zv)
2498{
2499	int error = 0;
2500	uint64_t dumpsize = 0;
2501	dmu_tx_t *tx;
2502	objset_t *os = zv->zv_objset;
2503
2504	if (zv->zv_flags & ZVOL_RDONLY)
2505		return (SET_ERROR(EROFS));
2506
2507	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2508	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2509		boolean_t resize = (dumpsize > 0);
2510
2511		if ((error = zvol_dump_init(zv, resize)) != 0) {
2512			(void) zvol_dump_fini(zv);
2513			return (error);
2514		}
2515	}
2516
2517	/*
2518	 * Build up our lba mapping.
2519	 */
2520	error = zvol_get_lbas(zv);
2521	if (error) {
2522		(void) zvol_dump_fini(zv);
2523		return (error);
2524	}
2525
2526	tx = dmu_tx_create(os);
2527	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2528	error = dmu_tx_assign(tx, TXG_WAIT);
2529	if (error) {
2530		dmu_tx_abort(tx);
2531		(void) zvol_dump_fini(zv);
2532		return (error);
2533	}
2534
2535	zv->zv_flags |= ZVOL_DUMPIFIED;
2536	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2537	    &zv->zv_volsize, tx);
2538	dmu_tx_commit(tx);
2539
2540	if (error) {
2541		(void) zvol_dump_fini(zv);
2542		return (error);
2543	}
2544
2545	txg_wait_synced(dmu_objset_pool(os), 0);
2546	return (0);
2547}
2548
2549static int
2550zvol_dump_fini(zvol_state_t *zv)
2551{
2552	dmu_tx_t *tx;
2553	objset_t *os = zv->zv_objset;
2554	nvlist_t *nv;
2555	int error = 0;
2556	uint64_t checksum, compress, refresrv, vbs, dedup;
2557	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2558
2559	/*
2560	 * Attempt to restore the zvol back to its pre-dumpified state.
2561	 * This is a best-effort attempt as it's possible that not all
2562	 * of these properties were initialized during the dumpify process
2563	 * (i.e. error during zvol_dump_init).
2564	 */
2565
2566	tx = dmu_tx_create(os);
2567	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2568	error = dmu_tx_assign(tx, TXG_WAIT);
2569	if (error) {
2570		dmu_tx_abort(tx);
2571		return (error);
2572	}
2573	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2574	dmu_tx_commit(tx);
2575
2576	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2577	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2578	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2579	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2580	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2581	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2582	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2583	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2584
2585	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2586	(void) nvlist_add_uint64(nv,
2587	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2588	(void) nvlist_add_uint64(nv,
2589	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2590	(void) nvlist_add_uint64(nv,
2591	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2592	if (version >= SPA_VERSION_DEDUP &&
2593	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2594	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2595		(void) nvlist_add_uint64(nv,
2596		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2597	}
2598	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2599	    nv, NULL);
2600	nvlist_free(nv);
2601
2602	zvol_free_extents(zv);
2603	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2604	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2605	/* wait for dmu_free_long_range to actually free the blocks */
2606	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2607	tx = dmu_tx_create(os);
2608	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2609	error = dmu_tx_assign(tx, TXG_WAIT);
2610	if (error) {
2611		dmu_tx_abort(tx);
2612		return (error);
2613	}
2614	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2615		zv->zv_volblocksize = vbs;
2616	dmu_tx_commit(tx);
2617
2618	return (0);
2619}
2620#else	/* !illumos */
2621
2622static void
2623zvol_geom_run(zvol_state_t *zv)
2624{
2625	struct g_provider *pp;
2626
2627	pp = zv->zv_provider;
2628	g_error_provider(pp, 0);
2629
2630	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2631	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2632}
2633
2634static void
2635zvol_geom_destroy(zvol_state_t *zv)
2636{
2637	struct g_provider *pp;
2638
2639	g_topology_assert();
2640
2641	mtx_lock(&zv->zv_queue_mtx);
2642	zv->zv_state = 1;
2643	wakeup_one(&zv->zv_queue);
2644	while (zv->zv_state != 2)
2645		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2646	mtx_destroy(&zv->zv_queue_mtx);
2647
2648	pp = zv->zv_provider;
2649	zv->zv_provider = NULL;
2650	pp->private = NULL;
2651	g_wither_geom(pp->geom, ENXIO);
2652}
2653
2654static int
2655zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2656{
2657	int count, error, flags;
2658
2659	g_topology_assert();
2660
2661	/*
2662	 * To make it easier we expect either open or close, but not both
2663	 * at the same time.
2664	 */
2665	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2666	    (acr <= 0 && acw <= 0 && ace <= 0),
2667	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2668	    pp->name, acr, acw, ace));
2669
2670	if (pp->private == NULL) {
2671		if (acr <= 0 && acw <= 0 && ace <= 0)
2672			return (0);
2673		return (pp->error);
2674	}
2675
2676	/*
2677	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2678	 * because GEOM already handles that and handles it a bit differently.
2679	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2680	 * only one exclusive consumer, no matter if it is reader or writer.
2681	 * I like better the way GEOM works so I'll leave it for GEOM to
2682	 * decide what to do.
2683	 */
2684
2685	count = acr + acw + ace;
2686	if (count == 0)
2687		return (0);
2688
2689	flags = 0;
2690	if (acr != 0 || ace != 0)
2691		flags |= FREAD;
2692	if (acw != 0)
2693		flags |= FWRITE;
2694
2695	g_topology_unlock();
2696	if (count > 0)
2697		error = zvol_open(pp, flags, count);
2698	else
2699		error = zvol_close(pp, flags, -count);
2700	g_topology_lock();
2701	return (error);
2702}
2703
2704static void
2705zvol_geom_start(struct bio *bp)
2706{
2707	zvol_state_t *zv;
2708	boolean_t first;
2709
2710	zv = bp->bio_to->private;
2711	ASSERT(zv != NULL);
2712	switch (bp->bio_cmd) {
2713	case BIO_FLUSH:
2714		if (!THREAD_CAN_SLEEP())
2715			goto enqueue;
2716		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2717		g_io_deliver(bp, 0);
2718		break;
2719	case BIO_READ:
2720	case BIO_WRITE:
2721	case BIO_DELETE:
2722		if (!THREAD_CAN_SLEEP())
2723			goto enqueue;
2724		zvol_strategy(bp);
2725		break;
2726	case BIO_GETATTR: {
2727		spa_t *spa = dmu_objset_spa(zv->zv_objset);
2728		uint64_t refd, avail, usedobjs, availobjs, val;
2729
2730		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2731			return;
2732		if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
2733			dmu_objset_space(zv->zv_objset, &refd, &avail,
2734			    &usedobjs, &availobjs);
2735			if (g_handleattr_off_t(bp, "blocksavail",
2736			    avail / DEV_BSIZE))
2737				return;
2738		} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
2739			dmu_objset_space(zv->zv_objset, &refd, &avail,
2740			    &usedobjs, &availobjs);
2741			if (g_handleattr_off_t(bp, "blocksused",
2742			    refd / DEV_BSIZE))
2743				return;
2744		} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
2745			avail = metaslab_class_get_space(spa_normal_class(spa));
2746			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
2747			if (g_handleattr_off_t(bp, "poolblocksavail",
2748			    avail / DEV_BSIZE))
2749				return;
2750		} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
2751			refd = metaslab_class_get_alloc(spa_normal_class(spa));
2752			if (g_handleattr_off_t(bp, "poolblocksused",
2753			    refd / DEV_BSIZE))
2754				return;
2755		}
2756		/* FALLTHROUGH */
2757	}
2758	default:
2759		g_io_deliver(bp, EOPNOTSUPP);
2760		break;
2761	}
2762	return;
2763
2764enqueue:
2765	mtx_lock(&zv->zv_queue_mtx);
2766	first = (bioq_first(&zv->zv_queue) == NULL);
2767	bioq_insert_tail(&zv->zv_queue, bp);
2768	mtx_unlock(&zv->zv_queue_mtx);
2769	if (first)
2770		wakeup_one(&zv->zv_queue);
2771}
2772
2773static void
2774zvol_geom_worker(void *arg)
2775{
2776	zvol_state_t *zv;
2777	struct bio *bp;
2778
2779	thread_lock(curthread);
2780	sched_prio(curthread, PRIBIO);
2781	thread_unlock(curthread);
2782
2783	zv = arg;
2784	for (;;) {
2785		mtx_lock(&zv->zv_queue_mtx);
2786		bp = bioq_takefirst(&zv->zv_queue);
2787		if (bp == NULL) {
2788			if (zv->zv_state == 1) {
2789				zv->zv_state = 2;
2790				wakeup(&zv->zv_state);
2791				mtx_unlock(&zv->zv_queue_mtx);
2792				kthread_exit();
2793			}
2794			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2795			    "zvol:io", 0);
2796			continue;
2797		}
2798		mtx_unlock(&zv->zv_queue_mtx);
2799		switch (bp->bio_cmd) {
2800		case BIO_FLUSH:
2801			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2802			g_io_deliver(bp, 0);
2803			break;
2804		case BIO_READ:
2805		case BIO_WRITE:
2806		case BIO_DELETE:
2807			zvol_strategy(bp);
2808			break;
2809		default:
2810			g_io_deliver(bp, EOPNOTSUPP);
2811			break;
2812		}
2813	}
2814}
2815
2816extern boolean_t dataset_name_hidden(const char *name);
2817
2818static int
2819zvol_create_snapshots(objset_t *os, const char *name)
2820{
2821	uint64_t cookie, obj;
2822	char *sname;
2823	int error, len;
2824
2825	cookie = obj = 0;
2826	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2827
2828#if 0
2829	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2830	    DS_FIND_SNAPSHOTS);
2831#endif
2832
2833	for (;;) {
2834		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2835		if (len >= MAXPATHLEN) {
2836			dmu_objset_rele(os, FTAG);
2837			error = ENAMETOOLONG;
2838			break;
2839		}
2840
2841		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2842		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2843		    sname + len, &obj, &cookie, NULL);
2844		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2845		if (error != 0) {
2846			if (error == ENOENT)
2847				error = 0;
2848			break;
2849		}
2850
2851		error = zvol_create_minor(sname);
2852		if (error != 0 && error != EEXIST) {
2853			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2854			    sname, error);
2855			break;
2856		}
2857	}
2858
2859	kmem_free(sname, MAXPATHLEN);
2860	return (error);
2861}
2862
2863int
2864zvol_create_minors(const char *name)
2865{
2866	uint64_t cookie;
2867	objset_t *os;
2868	char *osname, *p;
2869	int error, len;
2870
2871	if (dataset_name_hidden(name))
2872		return (0);
2873
2874	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2875		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2876		    name, error);
2877		return (error);
2878	}
2879	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2880		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2881		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2882		error = zvol_create_minor(name);
2883		if (error == 0 || error == EEXIST) {
2884			error = zvol_create_snapshots(os, name);
2885		} else {
2886			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2887			    name, error);
2888		}
2889		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2890		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2891		return (error);
2892	}
2893	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2894		dmu_objset_rele(os, FTAG);
2895		return (0);
2896	}
2897
2898	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2899	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2900		dmu_objset_rele(os, FTAG);
2901		kmem_free(osname, MAXPATHLEN);
2902		return (ENOENT);
2903	}
2904	p = osname + strlen(osname);
2905	len = MAXPATHLEN - (p - osname);
2906
2907#if 0
2908	/* Prefetch the datasets. */
2909	cookie = 0;
2910	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2911		if (!dataset_name_hidden(osname))
2912			(void) dmu_objset_prefetch(osname, NULL);
2913	}
2914#endif
2915
2916	cookie = 0;
2917	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2918	    &cookie) == 0) {
2919		dmu_objset_rele(os, FTAG);
2920		(void)zvol_create_minors(osname);
2921		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2922			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2923			    name, error);
2924			return (error);
2925		}
2926	}
2927
2928	dmu_objset_rele(os, FTAG);
2929	kmem_free(osname, MAXPATHLEN);
2930	return (0);
2931}
2932
2933static void
2934zvol_rename_minor(zvol_state_t *zv, const char *newname)
2935{
2936	struct g_geom *gp;
2937	struct g_provider *pp;
2938	struct cdev *dev;
2939
2940	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
2941
2942	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2943		g_topology_lock();
2944		pp = zv->zv_provider;
2945		ASSERT(pp != NULL);
2946		gp = pp->geom;
2947		ASSERT(gp != NULL);
2948
2949		zv->zv_provider = NULL;
2950		g_wither_provider(pp, ENXIO);
2951
2952		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2953		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2954		pp->sectorsize = DEV_BSIZE;
2955		pp->mediasize = zv->zv_volsize;
2956		pp->private = zv;
2957		zv->zv_provider = pp;
2958		g_error_provider(pp, 0);
2959		g_topology_unlock();
2960	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2961		struct make_dev_args args;
2962
2963		dev = zv->zv_dev;
2964		ASSERT(dev != NULL);
2965		zv->zv_dev = NULL;
2966		destroy_dev(dev);
2967
2968		make_dev_args_init(&args);
2969		args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
2970		args.mda_devsw = &zvol_cdevsw;
2971		args.mda_cr = NULL;
2972		args.mda_uid = UID_ROOT;
2973		args.mda_gid = GID_OPERATOR;
2974		args.mda_mode = 0640;
2975		args.mda_si_drv2 = zv;
2976		if (make_dev_s(&args, &zv->zv_dev,
2977		    "%s/%s", ZVOL_DRIVER, newname) == 0)
2978			zv->zv_dev->si_iosize_max = MAXPHYS;
2979	}
2980	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2981}
2982
2983void
2984zvol_rename_minors(const char *oldname, const char *newname)
2985{
2986	char name[MAXPATHLEN];
2987	struct g_provider *pp;
2988	struct g_geom *gp;
2989	size_t oldnamelen, newnamelen;
2990	zvol_state_t *zv;
2991	char *namebuf;
2992	boolean_t locked = B_FALSE;
2993
2994	oldnamelen = strlen(oldname);
2995	newnamelen = strlen(newname);
2996
2997	DROP_GIANT();
2998	/* See comment in zvol_open(). */
2999	if (!MUTEX_HELD(&zfsdev_state_lock)) {
3000		mutex_enter(&zfsdev_state_lock);
3001		locked = B_TRUE;
3002	}
3003
3004	LIST_FOREACH(zv, &all_zvols, zv_links) {
3005		if (strcmp(zv->zv_name, oldname) == 0) {
3006			zvol_rename_minor(zv, newname);
3007		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
3008		    (zv->zv_name[oldnamelen] == '/' ||
3009		     zv->zv_name[oldnamelen] == '@')) {
3010			snprintf(name, sizeof(name), "%s%c%s", newname,
3011			    zv->zv_name[oldnamelen],
3012			    zv->zv_name + oldnamelen + 1);
3013			zvol_rename_minor(zv, name);
3014		}
3015	}
3016
3017	if (locked)
3018		mutex_exit(&zfsdev_state_lock);
3019	PICKUP_GIANT();
3020}
3021
3022static int
3023zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
3024{
3025	zvol_state_t *zv = dev->si_drv2;
3026	int err = 0;
3027
3028	mutex_enter(&zfsdev_state_lock);
3029	if (zv->zv_total_opens == 0)
3030		err = zvol_first_open(zv);
3031	if (err) {
3032		mutex_exit(&zfsdev_state_lock);
3033		return (err);
3034	}
3035	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
3036		err = SET_ERROR(EROFS);
3037		goto out;
3038	}
3039	if (zv->zv_flags & ZVOL_EXCL) {
3040		err = SET_ERROR(EBUSY);
3041		goto out;
3042	}
3043#ifdef FEXCL
3044	if (flags & FEXCL) {
3045		if (zv->zv_total_opens != 0) {
3046			err = SET_ERROR(EBUSY);
3047			goto out;
3048		}
3049		zv->zv_flags |= ZVOL_EXCL;
3050	}
3051#endif
3052
3053	zv->zv_total_opens++;
3054	mutex_exit(&zfsdev_state_lock);
3055	return (err);
3056out:
3057	if (zv->zv_total_opens == 0)
3058		zvol_last_close(zv);
3059	mutex_exit(&zfsdev_state_lock);
3060	return (err);
3061}
3062
3063static int
3064zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3065{
3066	zvol_state_t *zv = dev->si_drv2;
3067
3068	mutex_enter(&zfsdev_state_lock);
3069	if (zv->zv_flags & ZVOL_EXCL) {
3070		ASSERT(zv->zv_total_opens == 1);
3071		zv->zv_flags &= ~ZVOL_EXCL;
3072	}
3073
3074	/*
3075	 * If the open count is zero, this is a spurious close.
3076	 * That indicates a bug in the kernel / DDI framework.
3077	 */
3078	ASSERT(zv->zv_total_opens != 0);
3079
3080	/*
3081	 * You may get multiple opens, but only one close.
3082	 */
3083	zv->zv_total_opens--;
3084
3085	if (zv->zv_total_opens == 0)
3086		zvol_last_close(zv);
3087
3088	mutex_exit(&zfsdev_state_lock);
3089	return (0);
3090}
3091
3092static int
3093zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
3094{
3095	zvol_state_t *zv;
3096	rl_t *rl;
3097	off_t offset, length, chunk;
3098	int i, error;
3099	u_int u;
3100
3101	zv = dev->si_drv2;
3102
3103	error = 0;
3104	KASSERT(zv->zv_total_opens > 0,
3105	    ("Device with zero access count in zvol_d_ioctl"));
3106
3107	i = IOCPARM_LEN(cmd);
3108	switch (cmd) {
3109	case DIOCGSECTORSIZE:
3110		*(u_int *)data = DEV_BSIZE;
3111		break;
3112	case DIOCGMEDIASIZE:
3113		*(off_t *)data = zv->zv_volsize;
3114		break;
3115	case DIOCGFLUSH:
3116		zil_commit(zv->zv_zilog, ZVOL_OBJ);
3117		break;
3118	case DIOCGDELETE:
3119		if (!zvol_unmap_enabled)
3120			break;
3121
3122		offset = ((off_t *)data)[0];
3123		length = ((off_t *)data)[1];
3124		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
3125		    offset < 0 || offset >= zv->zv_volsize ||
3126		    length <= 0) {
3127			printf("%s: offset=%jd length=%jd\n", __func__, offset,
3128			    length);
3129			error = EINVAL;
3130			break;
3131		}
3132
3133		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
3134		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
3135		error = dmu_tx_assign(tx, TXG_WAIT);
3136		if (error != 0) {
3137			dmu_tx_abort(tx);
3138		} else {
3139			zvol_log_truncate(zv, tx, offset, length, B_TRUE);
3140			dmu_tx_commit(tx);
3141			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
3142			    offset, length);
3143		}
3144		zfs_range_unlock(rl);
3145		if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
3146			zil_commit(zv->zv_zilog, ZVOL_OBJ);
3147		break;
3148	case DIOCGSTRIPESIZE:
3149		*(off_t *)data = zv->zv_volblocksize;
3150		break;
3151	case DIOCGSTRIPEOFFSET:
3152		*(off_t *)data = 0;
3153		break;
3154	case DIOCGATTR: {
3155		spa_t *spa = dmu_objset_spa(zv->zv_objset);
3156		struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
3157		uint64_t refd, avail, usedobjs, availobjs;
3158
3159		if (strcmp(arg->name, "GEOM::candelete") == 0)
3160			arg->value.i = 1;
3161		else if (strcmp(arg->name, "blocksavail") == 0) {
3162			dmu_objset_space(zv->zv_objset, &refd, &avail,
3163			    &usedobjs, &availobjs);
3164			arg->value.off = avail / DEV_BSIZE;
3165		} else if (strcmp(arg->name, "blocksused") == 0) {
3166			dmu_objset_space(zv->zv_objset, &refd, &avail,
3167			    &usedobjs, &availobjs);
3168			arg->value.off = refd / DEV_BSIZE;
3169		} else if (strcmp(arg->name, "poolblocksavail") == 0) {
3170			avail = metaslab_class_get_space(spa_normal_class(spa));
3171			avail -= metaslab_class_get_alloc(spa_normal_class(spa));
3172			arg->value.off = avail / DEV_BSIZE;
3173		} else if (strcmp(arg->name, "poolblocksused") == 0) {
3174			refd = metaslab_class_get_alloc(spa_normal_class(spa));
3175			arg->value.off = refd / DEV_BSIZE;
3176		} else
3177			error = ENOIOCTL;
3178		break;
3179	}
3180	case FIOSEEKHOLE:
3181	case FIOSEEKDATA: {
3182		off_t *off = (off_t *)data;
3183		uint64_t noff;
3184		boolean_t hole;
3185
3186		hole = (cmd == FIOSEEKHOLE);
3187		noff = *off;
3188		error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
3189		*off = noff;
3190		break;
3191	}
3192	default:
3193		error = ENOIOCTL;
3194	}
3195
3196	return (error);
3197}
3198#endif	/* illumos */
3199