zvol.c revision 241297
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 */
27
28/* Portions Copyright 2010 Robert Milkowski */
29/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
30
31/*
32 * ZFS volume emulation driver.
33 *
34 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
35 * Volumes are accessed through the symbolic links named:
36 *
37 * /dev/zvol/dsk/<pool_name>/<dataset_name>
38 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
39 *
40 * These links are created by the /dev filesystem (sdev_zvolops.c).
41 * Volumes are persistent through reboot.  No user command needs to be
42 * run before opening and using a device.
43 *
44 * FreeBSD notes.
45 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
46 * in the system.
47 */
48
49#include <sys/types.h>
50#include <sys/param.h>
51#include <sys/kernel.h>
52#include <sys/errno.h>
53#include <sys/uio.h>
54#include <sys/bio.h>
55#include <sys/buf.h>
56#include <sys/kmem.h>
57#include <sys/conf.h>
58#include <sys/cmn_err.h>
59#include <sys/stat.h>
60#include <sys/zap.h>
61#include <sys/spa.h>
62#include <sys/zio.h>
63#include <sys/dmu_traverse.h>
64#include <sys/dnode.h>
65#include <sys/dsl_dataset.h>
66#include <sys/dsl_prop.h>
67#include <sys/dkio.h>
68#include <sys/byteorder.h>
69#include <sys/sunddi.h>
70#include <sys/dirent.h>
71#include <sys/policy.h>
72#include <sys/fs/zfs.h>
73#include <sys/zfs_ioctl.h>
74#include <sys/zil.h>
75#include <sys/refcount.h>
76#include <sys/zfs_znode.h>
77#include <sys/zfs_rlock.h>
78#include <sys/vdev_impl.h>
79#include <sys/zvol.h>
80#include <sys/zil_impl.h>
81#include <geom/geom.h>
82
83#include "zfs_namecheck.h"
84
85struct g_class zfs_zvol_class = {
86	.name = "ZFS::ZVOL",
87	.version = G_VERSION,
88};
89
90DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
91
92void *zfsdev_state;
93static char *zvol_tag = "zvol_tag";
94
95#define	ZVOL_DUMPSIZE		"dumpsize"
96
97/*
98 * The spa_namespace_lock protects the zfsdev_state structure from being
99 * modified while it's being used, e.g. an open that comes in before a
100 * create finishes.  It also protects temporary opens of the dataset so that,
101 * e.g., an open doesn't get a spurious EBUSY.
102 */
103static uint32_t zvol_minors;
104
105typedef struct zvol_extent {
106	list_node_t	ze_node;
107	dva_t		ze_dva;		/* dva associated with this extent */
108	uint64_t	ze_nblks;	/* number of blocks in extent */
109} zvol_extent_t;
110
111/*
112 * The in-core state of each volume.
113 */
114typedef struct zvol_state {
115	char		zv_name[MAXPATHLEN]; /* pool/dd name */
116	uint64_t	zv_volsize;	/* amount of space we advertise */
117	uint64_t	zv_volblocksize; /* volume block size */
118	struct g_provider *zv_provider;	/* GEOM provider */
119	uint8_t		zv_min_bs;	/* minimum addressable block shift */
120	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
121	objset_t	*zv_objset;	/* objset handle */
122	uint32_t	zv_total_opens;	/* total open count */
123	zilog_t		*zv_zilog;	/* ZIL handle */
124	list_t		zv_extents;	/* List of extents for dump */
125	znode_t		zv_znode;	/* for range locking */
126	dmu_buf_t	*zv_dbuf;	/* bonus handle */
127	int		zv_state;
128	struct bio_queue_head zv_queue;
129	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
130} zvol_state_t;
131
132/*
133 * zvol specific flags
134 */
135#define	ZVOL_RDONLY	0x1
136#define	ZVOL_DUMPIFIED	0x2
137#define	ZVOL_EXCL	0x4
138#define	ZVOL_WCE	0x8
139
140/*
141 * zvol maximum transfer in one DMU tx.
142 */
143int zvol_maxphys = DMU_MAX_ACCESS/2;
144
145extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
146    nvlist_t *, nvlist_t **);
147static int zvol_remove_zv(zvol_state_t *);
148static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
149static int zvol_dumpify(zvol_state_t *zv);
150static int zvol_dump_fini(zvol_state_t *zv);
151static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
152
153static zvol_state_t *zvol_geom_create(const char *name);
154static void zvol_geom_run(zvol_state_t *zv);
155static void zvol_geom_destroy(zvol_state_t *zv);
156static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
157static void zvol_geom_start(struct bio *bp);
158static void zvol_geom_worker(void *arg);
159
160static void
161zvol_size_changed(zvol_state_t *zv)
162{
163#ifdef sun
164	dev_t dev = makedevice(maj, min);
165
166	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
167	    "Size", volsize) == DDI_SUCCESS);
168	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
169	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
170
171	/* Notify specfs to invalidate the cached size */
172	spec_size_invalidate(dev, VBLK);
173	spec_size_invalidate(dev, VCHR);
174#else	/* !sun */
175	struct g_provider *pp;
176
177	pp = zv->zv_provider;
178	if (pp == NULL)
179		return;
180	g_topology_lock();
181	g_resize_provider(pp, zv->zv_volsize);
182	g_topology_unlock();
183#endif	/* !sun */
184}
185
186int
187zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
188{
189	if (volsize == 0)
190		return (EINVAL);
191
192	if (volsize % blocksize != 0)
193		return (EINVAL);
194
195#ifdef _ILP32
196	if (volsize - 1 > SPEC_MAXOFFSET_T)
197		return (EOVERFLOW);
198#endif
199	return (0);
200}
201
202int
203zvol_check_volblocksize(uint64_t volblocksize)
204{
205	if (volblocksize < SPA_MINBLOCKSIZE ||
206	    volblocksize > SPA_MAXBLOCKSIZE ||
207	    !ISP2(volblocksize))
208		return (EDOM);
209
210	return (0);
211}
212
213int
214zvol_get_stats(objset_t *os, nvlist_t *nv)
215{
216	int error;
217	dmu_object_info_t doi;
218	uint64_t val;
219
220	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
221	if (error)
222		return (error);
223
224	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
225
226	error = dmu_object_info(os, ZVOL_OBJ, &doi);
227
228	if (error == 0) {
229		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
230		    doi.doi_data_block_size);
231	}
232
233	return (error);
234}
235
236static zvol_state_t *
237zvol_minor_lookup(const char *name)
238{
239	struct g_provider *pp;
240	struct g_geom *gp;
241	zvol_state_t *zv = NULL;
242
243	ASSERT(MUTEX_HELD(&spa_namespace_lock));
244
245	g_topology_lock();
246	LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
247		pp = LIST_FIRST(&gp->provider);
248		if (pp == NULL)
249			continue;
250		zv = pp->private;
251		if (zv == NULL)
252			continue;
253		if (strcmp(zv->zv_name, name) == 0)
254			break;
255	}
256	g_topology_unlock();
257
258	return (gp != NULL ? zv : NULL);
259}
260
261/* extent mapping arg */
262struct maparg {
263	zvol_state_t	*ma_zv;
264	uint64_t	ma_blks;
265};
266
267/*ARGSUSED*/
268static int
269zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
270    const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
271{
272	struct maparg *ma = arg;
273	zvol_extent_t *ze;
274	int bs = ma->ma_zv->zv_volblocksize;
275
276	if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
277		return (0);
278
279	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
280	ma->ma_blks++;
281
282	/* Abort immediately if we have encountered gang blocks */
283	if (BP_IS_GANG(bp))
284		return (EFRAGS);
285
286	/*
287	 * See if the block is at the end of the previous extent.
288	 */
289	ze = list_tail(&ma->ma_zv->zv_extents);
290	if (ze &&
291	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
292	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
293	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
294		ze->ze_nblks++;
295		return (0);
296	}
297
298	dprintf_bp(bp, "%s", "next blkptr:");
299
300	/* start a new extent */
301	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
302	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
303	ze->ze_nblks = 1;
304	list_insert_tail(&ma->ma_zv->zv_extents, ze);
305	return (0);
306}
307
308static void
309zvol_free_extents(zvol_state_t *zv)
310{
311	zvol_extent_t *ze;
312
313	while (ze = list_head(&zv->zv_extents)) {
314		list_remove(&zv->zv_extents, ze);
315		kmem_free(ze, sizeof (zvol_extent_t));
316	}
317}
318
319static int
320zvol_get_lbas(zvol_state_t *zv)
321{
322	objset_t *os = zv->zv_objset;
323	struct maparg	ma;
324	int		err;
325
326	ma.ma_zv = zv;
327	ma.ma_blks = 0;
328	zvol_free_extents(zv);
329
330	/* commit any in-flight changes before traversing the dataset */
331	txg_wait_synced(dmu_objset_pool(os), 0);
332	err = traverse_dataset(dmu_objset_ds(os), 0,
333	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
334	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
335		zvol_free_extents(zv);
336		return (err ? err : EIO);
337	}
338
339	return (0);
340}
341
342/* ARGSUSED */
343void
344zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
345{
346	zfs_creat_t *zct = arg;
347	nvlist_t *nvprops = zct->zct_props;
348	int error;
349	uint64_t volblocksize, volsize;
350
351	VERIFY(nvlist_lookup_uint64(nvprops,
352	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
353	if (nvlist_lookup_uint64(nvprops,
354	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
355		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
356
357	/*
358	 * These properties must be removed from the list so the generic
359	 * property setting step won't apply to them.
360	 */
361	VERIFY(nvlist_remove_all(nvprops,
362	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
363	(void) nvlist_remove_all(nvprops,
364	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
365
366	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
367	    DMU_OT_NONE, 0, tx);
368	ASSERT(error == 0);
369
370	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
371	    DMU_OT_NONE, 0, tx);
372	ASSERT(error == 0);
373
374	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
375	ASSERT(error == 0);
376}
377
378/*
379 * Replay a TX_WRITE ZIL transaction that didn't get committed
380 * after a system failure
381 */
382static int
383zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
384{
385	objset_t *os = zv->zv_objset;
386	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
387	uint64_t offset, length;
388	dmu_tx_t *tx;
389	int error;
390
391	if (byteswap)
392		byteswap_uint64_array(lr, sizeof (*lr));
393
394	offset = lr->lr_offset;
395	length = lr->lr_length;
396
397	/* If it's a dmu_sync() block, write the whole block */
398	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
399		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
400		if (length < blocksize) {
401			offset -= offset % blocksize;
402			length = blocksize;
403		}
404	}
405
406	tx = dmu_tx_create(os);
407	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
408	error = dmu_tx_assign(tx, TXG_WAIT);
409	if (error) {
410		dmu_tx_abort(tx);
411	} else {
412		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
413		dmu_tx_commit(tx);
414	}
415
416	return (error);
417}
418
419/* ARGSUSED */
420static int
421zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
422{
423	return (ENOTSUP);
424}
425
426/*
427 * Callback vectors for replaying records.
428 * Only TX_WRITE is needed for zvol.
429 */
430zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
431	zvol_replay_err,	/* 0 no such transaction type */
432	zvol_replay_err,	/* TX_CREATE */
433	zvol_replay_err,	/* TX_MKDIR */
434	zvol_replay_err,	/* TX_MKXATTR */
435	zvol_replay_err,	/* TX_SYMLINK */
436	zvol_replay_err,	/* TX_REMOVE */
437	zvol_replay_err,	/* TX_RMDIR */
438	zvol_replay_err,	/* TX_LINK */
439	zvol_replay_err,	/* TX_RENAME */
440	zvol_replay_write,	/* TX_WRITE */
441	zvol_replay_err,	/* TX_TRUNCATE */
442	zvol_replay_err,	/* TX_SETATTR */
443	zvol_replay_err,	/* TX_ACL */
444	zvol_replay_err,	/* TX_CREATE_ACL */
445	zvol_replay_err,	/* TX_CREATE_ATTR */
446	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
447	zvol_replay_err,	/* TX_MKDIR_ACL */
448	zvol_replay_err,	/* TX_MKDIR_ATTR */
449	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
450	zvol_replay_err,	/* TX_WRITE2 */
451};
452
453#ifdef sun
454int
455zvol_name2minor(const char *name, minor_t *minor)
456{
457	zvol_state_t *zv;
458
459	mutex_enter(&spa_namespace_lock);
460	zv = zvol_minor_lookup(name);
461	if (minor && zv)
462		*minor = zv->zv_minor;
463	mutex_exit(&spa_namespace_lock);
464	return (zv ? 0 : -1);
465}
466#endif	/* sun */
467
468/*
469 * Create a minor node (plus a whole lot more) for the specified volume.
470 */
471int
472zvol_create_minor(const char *name)
473{
474	zfs_soft_state_t *zs;
475	zvol_state_t *zv;
476	objset_t *os;
477	dmu_object_info_t doi;
478	uint64_t volsize;
479	int error;
480
481	ZFS_LOG(1, "Creating ZVOL %s...", name);
482
483	mutex_enter(&spa_namespace_lock);
484
485	if (zvol_minor_lookup(name) != NULL) {
486		mutex_exit(&spa_namespace_lock);
487		return (EEXIST);
488	}
489
490	/* lie and say we're read-only */
491	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
492
493	if (error) {
494		mutex_exit(&spa_namespace_lock);
495		return (error);
496	}
497
498#ifdef sun
499	if ((minor = zfsdev_minor_alloc()) == 0) {
500		dmu_objset_disown(os, FTAG);
501		mutex_exit(&spa_namespace_lock);
502		return (ENXIO);
503	}
504
505	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
506		dmu_objset_disown(os, FTAG);
507		mutex_exit(&spa_namespace_lock);
508		return (EAGAIN);
509	}
510	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
511	    (char *)name);
512
513	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
514
515	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
516	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
517		ddi_soft_state_free(zfsdev_state, minor);
518		dmu_objset_disown(os, FTAG);
519		mutex_exit(&spa_namespace_lock);
520		return (EAGAIN);
521	}
522
523	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
524
525	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
526	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
527		ddi_remove_minor_node(zfs_dip, chrbuf);
528		ddi_soft_state_free(zfsdev_state, minor);
529		dmu_objset_disown(os, FTAG);
530		mutex_exit(&spa_namespace_lock);
531		return (EAGAIN);
532	}
533
534	zs = ddi_get_soft_state(zfsdev_state, minor);
535	zs->zss_type = ZSST_ZVOL;
536	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
537#else	/* !sun */
538
539	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
540	if (error) {
541		ASSERT(error == 0);
542		dmu_objset_disown(os, zvol_tag);
543		mutex_exit(&spa_namespace_lock);
544		return (error);
545	}
546
547	DROP_GIANT();
548	g_topology_lock();
549	zv = zvol_geom_create(name);
550	zv->zv_volsize = volsize;
551	zv->zv_provider->mediasize = zv->zv_volsize;
552
553#endif	/* !sun */
554
555	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
556	zv->zv_min_bs = DEV_BSHIFT;
557	zv->zv_objset = os;
558	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
559		zv->zv_flags |= ZVOL_RDONLY;
560	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
561	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
562	    sizeof (rl_t), offsetof(rl_t, r_node));
563	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
564	    offsetof(zvol_extent_t, ze_node));
565	/* get and cache the blocksize */
566	error = dmu_object_info(os, ZVOL_OBJ, &doi);
567	ASSERT(error == 0);
568	zv->zv_volblocksize = doi.doi_data_block_size;
569
570	if (spa_writeable(dmu_objset_spa(os))) {
571		if (zil_replay_disable)
572			zil_destroy(dmu_objset_zil(os), B_FALSE);
573		else
574			zil_replay(os, zv, zvol_replay_vector);
575	}
576	dmu_objset_disown(os, FTAG);
577	zv->zv_objset = NULL;
578
579	zvol_minors++;
580
581	mutex_exit(&spa_namespace_lock);
582
583	zvol_geom_run(zv);
584
585	g_topology_unlock();
586	PICKUP_GIANT();
587
588	ZFS_LOG(1, "ZVOL %s created.", name);
589
590	return (0);
591}
592
593/*
594 * Remove minor node for the specified volume.
595 */
596static int
597zvol_remove_zv(zvol_state_t *zv)
598{
599#ifdef sun
600	minor_t minor = zv->zv_minor;
601#endif
602
603	ASSERT(MUTEX_HELD(&spa_namespace_lock));
604	if (zv->zv_total_opens != 0)
605		return (EBUSY);
606
607	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
608
609#ifdef sun
610	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
611	ddi_remove_minor_node(zfs_dip, nmbuf);
612#endif	/* sun */
613
614	avl_destroy(&zv->zv_znode.z_range_avl);
615	mutex_destroy(&zv->zv_znode.z_range_lock);
616
617	zvol_geom_destroy(zv);
618
619	zvol_minors--;
620	return (0);
621}
622
623int
624zvol_remove_minor(const char *name)
625{
626	zvol_state_t *zv;
627	int rc;
628
629	mutex_enter(&spa_namespace_lock);
630	if ((zv = zvol_minor_lookup(name)) == NULL) {
631		mutex_exit(&spa_namespace_lock);
632		return (ENXIO);
633	}
634	g_topology_lock();
635	rc = zvol_remove_zv(zv);
636	g_topology_unlock();
637	mutex_exit(&spa_namespace_lock);
638	return (rc);
639}
640
641int
642zvol_first_open(zvol_state_t *zv)
643{
644	objset_t *os;
645	uint64_t volsize;
646	int error;
647	uint64_t readonly;
648
649	/* lie and say we're read-only */
650	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
651	    zvol_tag, &os);
652	if (error)
653		return (error);
654
655	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
656	if (error) {
657		ASSERT(error == 0);
658		dmu_objset_disown(os, zvol_tag);
659		return (error);
660	}
661	zv->zv_objset = os;
662	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
663	if (error) {
664		dmu_objset_disown(os, zvol_tag);
665		return (error);
666	}
667	zv->zv_volsize = volsize;
668	zv->zv_zilog = zil_open(os, zvol_get_data);
669	zvol_size_changed(zv);
670
671	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
672	    NULL) == 0);
673	if (readonly || dmu_objset_is_snapshot(os) ||
674	    !spa_writeable(dmu_objset_spa(os)))
675		zv->zv_flags |= ZVOL_RDONLY;
676	else
677		zv->zv_flags &= ~ZVOL_RDONLY;
678	return (error);
679}
680
681void
682zvol_last_close(zvol_state_t *zv)
683{
684	zil_close(zv->zv_zilog);
685	zv->zv_zilog = NULL;
686
687	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
688	zv->zv_dbuf = NULL;
689
690	/*
691	 * Evict cached data
692	 */
693	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
694	    !(zv->zv_flags & ZVOL_RDONLY))
695		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
696	(void) dmu_objset_evict_dbufs(zv->zv_objset);
697
698	dmu_objset_disown(zv->zv_objset, zvol_tag);
699	zv->zv_objset = NULL;
700}
701
702#ifdef sun
703int
704zvol_prealloc(zvol_state_t *zv)
705{
706	objset_t *os = zv->zv_objset;
707	dmu_tx_t *tx;
708	uint64_t refd, avail, usedobjs, availobjs;
709	uint64_t resid = zv->zv_volsize;
710	uint64_t off = 0;
711
712	/* Check the space usage before attempting to allocate the space */
713	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
714	if (avail < zv->zv_volsize)
715		return (ENOSPC);
716
717	/* Free old extents if they exist */
718	zvol_free_extents(zv);
719
720	while (resid != 0) {
721		int error;
722		uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
723
724		tx = dmu_tx_create(os);
725		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
726		error = dmu_tx_assign(tx, TXG_WAIT);
727		if (error) {
728			dmu_tx_abort(tx);
729			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
730			return (error);
731		}
732		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
733		dmu_tx_commit(tx);
734		off += bytes;
735		resid -= bytes;
736	}
737	txg_wait_synced(dmu_objset_pool(os), 0);
738
739	return (0);
740}
741#endif	/* sun */
742
743int
744zvol_update_volsize(objset_t *os, uint64_t volsize)
745{
746	dmu_tx_t *tx;
747	int error;
748
749	ASSERT(MUTEX_HELD(&spa_namespace_lock));
750
751	tx = dmu_tx_create(os);
752	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
753	error = dmu_tx_assign(tx, TXG_WAIT);
754	if (error) {
755		dmu_tx_abort(tx);
756		return (error);
757	}
758
759	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
760	    &volsize, tx);
761	dmu_tx_commit(tx);
762
763	if (error == 0)
764		error = dmu_free_long_range(os,
765		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
766	return (error);
767}
768
769void
770zvol_remove_minors(const char *name)
771{
772	struct g_geom *gp, *gptmp;
773	struct g_provider *pp;
774	zvol_state_t *zv;
775	size_t namelen;
776
777	namelen = strlen(name);
778
779	DROP_GIANT();
780	mutex_enter(&spa_namespace_lock);
781	g_topology_lock();
782
783	LIST_FOREACH_SAFE(gp, &zfs_zvol_class.geom, geom, gptmp) {
784		pp = LIST_FIRST(&gp->provider);
785		if (pp == NULL)
786			continue;
787		zv = pp->private;
788		if (zv == NULL)
789			continue;
790		if (strcmp(zv->zv_name, name) == 0 ||
791		    (strncmp(zv->zv_name, name, namelen) == 0 &&
792		     zv->zv_name[namelen] == '/')) {
793			(void) zvol_remove_zv(zv);
794		}
795	}
796
797	g_topology_unlock();
798	mutex_exit(&spa_namespace_lock);
799	PICKUP_GIANT();
800}
801
802int
803zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
804{
805	zvol_state_t *zv = NULL;
806	objset_t *os;
807	int error;
808	dmu_object_info_t doi;
809	uint64_t old_volsize = 0ULL;
810	uint64_t readonly;
811
812	mutex_enter(&spa_namespace_lock);
813	zv = zvol_minor_lookup(name);
814	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
815		mutex_exit(&spa_namespace_lock);
816		return (error);
817	}
818
819	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
820	    (error = zvol_check_volsize(volsize,
821	    doi.doi_data_block_size)) != 0)
822		goto out;
823
824	VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
825	    NULL) == 0);
826	if (readonly) {
827		error = EROFS;
828		goto out;
829	}
830
831	error = zvol_update_volsize(os, volsize);
832	/*
833	 * Reinitialize the dump area to the new size. If we
834	 * failed to resize the dump area then restore it back to
835	 * its original size.
836	 */
837	if (zv && error == 0) {
838#ifdef ZVOL_DUMP
839		if (zv->zv_flags & ZVOL_DUMPIFIED) {
840			old_volsize = zv->zv_volsize;
841			zv->zv_volsize = volsize;
842			if ((error = zvol_dumpify(zv)) != 0 ||
843			    (error = dumpvp_resize()) != 0) {
844				(void) zvol_update_volsize(os, old_volsize);
845				zv->zv_volsize = old_volsize;
846				error = zvol_dumpify(zv);
847			}
848		}
849#endif	/* ZVOL_DUMP */
850		if (error == 0) {
851			zv->zv_volsize = volsize;
852			zvol_size_changed(zv);
853		}
854	}
855
856#ifdef sun
857	/*
858	 * Generate a LUN expansion event.
859	 */
860	if (zv && error == 0) {
861		sysevent_id_t eid;
862		nvlist_t *attr;
863		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
864
865		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
866		    zv->zv_minor);
867
868		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
869		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
870
871		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
872		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
873
874		nvlist_free(attr);
875		kmem_free(physpath, MAXPATHLEN);
876	}
877#endif	/* sun */
878
879out:
880	dmu_objset_rele(os, FTAG);
881
882	mutex_exit(&spa_namespace_lock);
883
884	return (error);
885}
886
887/*ARGSUSED*/
888static int
889zvol_open(struct g_provider *pp, int flag, int count)
890{
891	zvol_state_t *zv;
892	int err = 0;
893	boolean_t locked = B_FALSE;
894
895	/*
896	 * Protect against recursively entering spa_namespace_lock
897	 * when spa_open() is used for a pool on a (local) ZVOL(s).
898	 * This is needed since we replaced upstream zfsdev_state_lock
899	 * with spa_namespace_lock in the ZVOL code.
900	 * We are using the same trick as spa_open().
901	 * Note that calls in zvol_first_open which need to resolve
902	 * pool name to a spa object will enter spa_open()
903	 * recursively, but that function already has all the
904	 * necessary protection.
905	 */
906	if (!MUTEX_HELD(&spa_namespace_lock)) {
907		mutex_enter(&spa_namespace_lock);
908		locked = B_TRUE;
909	}
910
911	zv = pp->private;
912	if (zv == NULL) {
913		if (locked)
914			mutex_exit(&spa_namespace_lock);
915		return (ENXIO);
916	}
917
918	if (zv->zv_total_opens == 0)
919		err = zvol_first_open(zv);
920	if (err) {
921		if (locked)
922			mutex_exit(&spa_namespace_lock);
923		return (err);
924	}
925	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
926		err = EROFS;
927		goto out;
928	}
929	if (zv->zv_flags & ZVOL_EXCL) {
930		err = EBUSY;
931		goto out;
932	}
933#ifdef FEXCL
934	if (flag & FEXCL) {
935		if (zv->zv_total_opens != 0) {
936			err = EBUSY;
937			goto out;
938		}
939		zv->zv_flags |= ZVOL_EXCL;
940	}
941#endif
942
943	zv->zv_total_opens += count;
944	if (locked)
945		mutex_exit(&spa_namespace_lock);
946
947	return (err);
948out:
949	if (zv->zv_total_opens == 0)
950		zvol_last_close(zv);
951	if (locked)
952		mutex_exit(&spa_namespace_lock);
953	return (err);
954}
955
956/*ARGSUSED*/
957static int
958zvol_close(struct g_provider *pp, int flag, int count)
959{
960	zvol_state_t *zv;
961	int error = 0;
962	boolean_t locked = B_FALSE;
963
964	/* See comment in zvol_open(). */
965	if (!MUTEX_HELD(&spa_namespace_lock)) {
966		mutex_enter(&spa_namespace_lock);
967		locked = B_TRUE;
968	}
969
970	zv = pp->private;
971	if (zv == NULL) {
972		if (locked)
973			mutex_exit(&spa_namespace_lock);
974		return (ENXIO);
975	}
976
977	if (zv->zv_flags & ZVOL_EXCL) {
978		ASSERT(zv->zv_total_opens == 1);
979		zv->zv_flags &= ~ZVOL_EXCL;
980	}
981
982	/*
983	 * If the open count is zero, this is a spurious close.
984	 * That indicates a bug in the kernel / DDI framework.
985	 */
986	ASSERT(zv->zv_total_opens != 0);
987
988	/*
989	 * You may get multiple opens, but only one close.
990	 */
991	zv->zv_total_opens -= count;
992
993	if (zv->zv_total_opens == 0)
994		zvol_last_close(zv);
995
996	if (locked)
997		mutex_exit(&spa_namespace_lock);
998	return (error);
999}
1000
1001static void
1002zvol_get_done(zgd_t *zgd, int error)
1003{
1004	if (zgd->zgd_db)
1005		dmu_buf_rele(zgd->zgd_db, zgd);
1006
1007	zfs_range_unlock(zgd->zgd_rl);
1008
1009	if (error == 0 && zgd->zgd_bp)
1010		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1011
1012	kmem_free(zgd, sizeof (zgd_t));
1013}
1014
1015/*
1016 * Get data to generate a TX_WRITE intent log record.
1017 */
1018static int
1019zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1020{
1021	zvol_state_t *zv = arg;
1022	objset_t *os = zv->zv_objset;
1023	uint64_t object = ZVOL_OBJ;
1024	uint64_t offset = lr->lr_offset;
1025	uint64_t size = lr->lr_length;	/* length of user data */
1026	blkptr_t *bp = &lr->lr_blkptr;
1027	dmu_buf_t *db;
1028	zgd_t *zgd;
1029	int error;
1030
1031	ASSERT(zio != NULL);
1032	ASSERT(size != 0);
1033
1034	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1035	zgd->zgd_zilog = zv->zv_zilog;
1036	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1037
1038	/*
1039	 * Write records come in two flavors: immediate and indirect.
1040	 * For small writes it's cheaper to store the data with the
1041	 * log record (immediate); for large writes it's cheaper to
1042	 * sync the data and get a pointer to it (indirect) so that
1043	 * we don't have to write the data twice.
1044	 */
1045	if (buf != NULL) {	/* immediate write */
1046		error = dmu_read(os, object, offset, size, buf,
1047		    DMU_READ_NO_PREFETCH);
1048	} else {
1049		size = zv->zv_volblocksize;
1050		offset = P2ALIGN(offset, size);
1051		error = dmu_buf_hold(os, object, offset, zgd, &db,
1052		    DMU_READ_NO_PREFETCH);
1053		if (error == 0) {
1054			zgd->zgd_db = db;
1055			zgd->zgd_bp = bp;
1056
1057			ASSERT(db->db_offset == offset);
1058			ASSERT(db->db_size == size);
1059
1060			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1061			    zvol_get_done, zgd);
1062
1063			if (error == 0)
1064				return (0);
1065		}
1066	}
1067
1068	zvol_get_done(zgd, error);
1069
1070	return (error);
1071}
1072
1073/*
1074 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1075 *
1076 * We store data in the log buffers if it's small enough.
1077 * Otherwise we will later flush the data out via dmu_sync().
1078 */
1079ssize_t zvol_immediate_write_sz = 32768;
1080
1081static void
1082zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1083    boolean_t sync)
1084{
1085	uint32_t blocksize = zv->zv_volblocksize;
1086	zilog_t *zilog = zv->zv_zilog;
1087	boolean_t slogging;
1088	ssize_t immediate_write_sz;
1089
1090	if (zil_replaying(zilog, tx))
1091		return;
1092
1093	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1094	    ? 0 : zvol_immediate_write_sz;
1095
1096	slogging = spa_has_slogs(zilog->zl_spa) &&
1097	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1098
1099	while (resid) {
1100		itx_t *itx;
1101		lr_write_t *lr;
1102		ssize_t len;
1103		itx_wr_state_t write_state;
1104
1105		/*
1106		 * Unlike zfs_log_write() we can be called with
1107		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1108		 */
1109		if (blocksize > immediate_write_sz && !slogging &&
1110		    resid >= blocksize && off % blocksize == 0) {
1111			write_state = WR_INDIRECT; /* uses dmu_sync */
1112			len = blocksize;
1113		} else if (sync) {
1114			write_state = WR_COPIED;
1115			len = MIN(ZIL_MAX_LOG_DATA, resid);
1116		} else {
1117			write_state = WR_NEED_COPY;
1118			len = MIN(ZIL_MAX_LOG_DATA, resid);
1119		}
1120
1121		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1122		    (write_state == WR_COPIED ? len : 0));
1123		lr = (lr_write_t *)&itx->itx_lr;
1124		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1125		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1126			zil_itx_destroy(itx);
1127			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1128			lr = (lr_write_t *)&itx->itx_lr;
1129			write_state = WR_NEED_COPY;
1130		}
1131
1132		itx->itx_wr_state = write_state;
1133		if (write_state == WR_NEED_COPY)
1134			itx->itx_sod += len;
1135		lr->lr_foid = ZVOL_OBJ;
1136		lr->lr_offset = off;
1137		lr->lr_length = len;
1138		lr->lr_blkoff = 0;
1139		BP_ZERO(&lr->lr_blkptr);
1140
1141		itx->itx_private = zv;
1142		itx->itx_sync = sync;
1143
1144		zil_itx_assign(zilog, itx, tx);
1145
1146		off += len;
1147		resid -= len;
1148	}
1149}
1150
1151#ifdef sun
1152static int
1153zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size,
1154    boolean_t doread, boolean_t isdump)
1155{
1156	vdev_disk_t *dvd;
1157	int c;
1158	int numerrors = 0;
1159
1160	for (c = 0; c < vd->vdev_children; c++) {
1161		ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
1162		    vd->vdev_ops == &vdev_replacing_ops ||
1163		    vd->vdev_ops == &vdev_spare_ops);
1164		int err = zvol_dumpio_vdev(vd->vdev_child[c],
1165		    addr, offset, size, doread, isdump);
1166		if (err != 0) {
1167			numerrors++;
1168		} else if (doread) {
1169			break;
1170		}
1171	}
1172
1173	if (!vd->vdev_ops->vdev_op_leaf)
1174		return (numerrors < vd->vdev_children ? 0 : EIO);
1175
1176	if (doread && !vdev_readable(vd))
1177		return (EIO);
1178	else if (!doread && !vdev_writeable(vd))
1179		return (EIO);
1180
1181	dvd = vd->vdev_tsd;
1182	ASSERT3P(dvd, !=, NULL);
1183	offset += VDEV_LABEL_START_SIZE;
1184
1185	if (ddi_in_panic() || isdump) {
1186		ASSERT(!doread);
1187		if (doread)
1188			return (EIO);
1189		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1190		    lbtodb(size)));
1191	} else {
1192		return (vdev_disk_physio(dvd->vd_lh, addr, size, offset,
1193		    doread ? B_READ : B_WRITE));
1194	}
1195}
1196
1197static int
1198zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1199    boolean_t doread, boolean_t isdump)
1200{
1201	vdev_t *vd;
1202	int error;
1203	zvol_extent_t *ze;
1204	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1205
1206	/* Must be sector aligned, and not stradle a block boundary. */
1207	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1208	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1209		return (EINVAL);
1210	}
1211	ASSERT(size <= zv->zv_volblocksize);
1212
1213	/* Locate the extent this belongs to */
1214	ze = list_head(&zv->zv_extents);
1215	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1216		offset -= ze->ze_nblks * zv->zv_volblocksize;
1217		ze = list_next(&zv->zv_extents, ze);
1218	}
1219
1220	if (!ddi_in_panic())
1221		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1222
1223	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1224	offset += DVA_GET_OFFSET(&ze->ze_dva);
1225	error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump);
1226
1227	if (!ddi_in_panic())
1228		spa_config_exit(spa, SCL_STATE, FTAG);
1229
1230	return (error);
1231}
1232#endif	/* sun */
1233
1234int
1235zvol_strategy(struct bio *bp)
1236{
1237	zvol_state_t *zv = bp->bio_to->private;
1238	uint64_t off, volsize;
1239	size_t resid;
1240	char *addr;
1241	objset_t *os;
1242	rl_t *rl;
1243	int error = 0;
1244	boolean_t doread = (bp->bio_cmd == BIO_READ);
1245	boolean_t sync;
1246
1247	if (zv == NULL) {
1248		g_io_deliver(bp, ENXIO);
1249		return (0);
1250	}
1251
1252	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1253		g_io_deliver(bp, EROFS);
1254		return (0);
1255	}
1256
1257	off = bp->bio_offset;
1258	volsize = zv->zv_volsize;
1259
1260	os = zv->zv_objset;
1261	ASSERT(os != NULL);
1262
1263	addr = bp->bio_data;
1264	resid = bp->bio_length;
1265
1266	if (resid > 0 && (off < 0 || off >= volsize)) {
1267		g_io_deliver(bp, EIO);
1268		return (0);
1269	}
1270
1271        sync = !doread && zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1272
1273	/*
1274	 * There must be no buffer changes when doing a dmu_sync() because
1275	 * we can't change the data whilst calculating the checksum.
1276	 */
1277	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1278	    doread ? RL_READER : RL_WRITER);
1279
1280	while (resid != 0 && off < volsize) {
1281		size_t size = MIN(resid, zvol_maxphys);
1282		if (doread) {
1283			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1284			    DMU_READ_PREFETCH);
1285		} else {
1286			dmu_tx_t *tx = dmu_tx_create(os);
1287			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1288			error = dmu_tx_assign(tx, TXG_WAIT);
1289			if (error) {
1290				dmu_tx_abort(tx);
1291			} else {
1292				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1293				zvol_log_write(zv, tx, off, size, sync);
1294				dmu_tx_commit(tx);
1295			}
1296		}
1297		if (error) {
1298			/* convert checksum errors into IO errors */
1299			if (error == ECKSUM)
1300				error = EIO;
1301			break;
1302		}
1303		off += size;
1304		addr += size;
1305		resid -= size;
1306	}
1307	zfs_range_unlock(rl);
1308
1309	bp->bio_completed = bp->bio_length - resid;
1310	if (bp->bio_completed < bp->bio_length)
1311		bp->bio_error = (off > volsize ? EINVAL : error);
1312
1313	if (sync)
1314		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1315	g_io_deliver(bp, 0);
1316
1317	return (0);
1318}
1319
1320#ifdef sun
1321/*
1322 * Set the buffer count to the zvol maximum transfer.
1323 * Using our own routine instead of the default minphys()
1324 * means that for larger writes we write bigger buffers on X86
1325 * (128K instead of 56K) and flush the disk write cache less often
1326 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1327 * 56K on X86 and 128K on sparc).
1328 */
1329void
1330zvol_minphys(struct buf *bp)
1331{
1332	if (bp->b_bcount > zvol_maxphys)
1333		bp->b_bcount = zvol_maxphys;
1334}
1335
1336int
1337zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1338{
1339	minor_t minor = getminor(dev);
1340	zvol_state_t *zv;
1341	int error = 0;
1342	uint64_t size;
1343	uint64_t boff;
1344	uint64_t resid;
1345
1346	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1347	if (zv == NULL)
1348		return (ENXIO);
1349
1350	boff = ldbtob(blkno);
1351	resid = ldbtob(nblocks);
1352
1353	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1354
1355	while (resid) {
1356		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1357		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1358		if (error)
1359			break;
1360		boff += size;
1361		addr += size;
1362		resid -= size;
1363	}
1364
1365	return (error);
1366}
1367
1368/*ARGSUSED*/
1369int
1370zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1371{
1372	minor_t minor = getminor(dev);
1373	zvol_state_t *zv;
1374	uint64_t volsize;
1375	rl_t *rl;
1376	int error = 0;
1377
1378	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1379	if (zv == NULL)
1380		return (ENXIO);
1381
1382	volsize = zv->zv_volsize;
1383	if (uio->uio_resid > 0 &&
1384	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1385		return (EIO);
1386
1387	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1388		error = physio(zvol_strategy, NULL, dev, B_READ,
1389		    zvol_minphys, uio);
1390		return (error);
1391	}
1392
1393	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1394	    RL_READER);
1395	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1396		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1397
1398		/* don't read past the end */
1399		if (bytes > volsize - uio->uio_loffset)
1400			bytes = volsize - uio->uio_loffset;
1401
1402		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1403		if (error) {
1404			/* convert checksum errors into IO errors */
1405			if (error == ECKSUM)
1406				error = EIO;
1407			break;
1408		}
1409	}
1410	zfs_range_unlock(rl);
1411	return (error);
1412}
1413
1414/*ARGSUSED*/
1415int
1416zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1417{
1418	minor_t minor = getminor(dev);
1419	zvol_state_t *zv;
1420	uint64_t volsize;
1421	rl_t *rl;
1422	int error = 0;
1423	boolean_t sync;
1424
1425	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1426	if (zv == NULL)
1427		return (ENXIO);
1428
1429	volsize = zv->zv_volsize;
1430	if (uio->uio_resid > 0 &&
1431	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1432		return (EIO);
1433
1434	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1435		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1436		    zvol_minphys, uio);
1437		return (error);
1438	}
1439
1440	sync = !(zv->zv_flags & ZVOL_WCE) ||
1441	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1442
1443	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1444	    RL_WRITER);
1445	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1446		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1447		uint64_t off = uio->uio_loffset;
1448		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1449
1450		if (bytes > volsize - off)	/* don't write past the end */
1451			bytes = volsize - off;
1452
1453		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1454		error = dmu_tx_assign(tx, TXG_WAIT);
1455		if (error) {
1456			dmu_tx_abort(tx);
1457			break;
1458		}
1459		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1460		if (error == 0)
1461			zvol_log_write(zv, tx, off, bytes, sync);
1462		dmu_tx_commit(tx);
1463
1464		if (error)
1465			break;
1466	}
1467	zfs_range_unlock(rl);
1468	if (sync)
1469		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1470	return (error);
1471}
1472
1473int
1474zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1475{
1476	struct uuid uuid = EFI_RESERVED;
1477	efi_gpe_t gpe = { 0 };
1478	uint32_t crc;
1479	dk_efi_t efi;
1480	int length;
1481	char *ptr;
1482
1483	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1484		return (EFAULT);
1485	ptr = (char *)(uintptr_t)efi.dki_data_64;
1486	length = efi.dki_length;
1487	/*
1488	 * Some clients may attempt to request a PMBR for the
1489	 * zvol.  Currently this interface will return EINVAL to
1490	 * such requests.  These requests could be supported by
1491	 * adding a check for lba == 0 and consing up an appropriate
1492	 * PMBR.
1493	 */
1494	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1495		return (EINVAL);
1496
1497	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1498	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1499	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1500
1501	if (efi.dki_lba == 1) {
1502		efi_gpt_t gpt = { 0 };
1503
1504		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1505		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1506		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1507		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1508		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1509		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1510		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1511		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1512		gpt.efi_gpt_SizeOfPartitionEntry =
1513		    LE_32(sizeof (efi_gpe_t));
1514		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1515		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1516		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1517		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1518		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1519		    flag))
1520			return (EFAULT);
1521		ptr += sizeof (gpt);
1522		length -= sizeof (gpt);
1523	}
1524	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1525	    length), flag))
1526		return (EFAULT);
1527	return (0);
1528}
1529
1530/*
1531 * BEGIN entry points to allow external callers access to the volume.
1532 */
1533/*
1534 * Return the volume parameters needed for access from an external caller.
1535 * These values are invariant as long as the volume is held open.
1536 */
1537int
1538zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1539    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1540    void **rl_hdl, void **bonus_hdl)
1541{
1542	zvol_state_t *zv;
1543
1544	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1545	if (zv == NULL)
1546		return (ENXIO);
1547	if (zv->zv_flags & ZVOL_DUMPIFIED)
1548		return (ENXIO);
1549
1550	ASSERT(blksize && max_xfer_len && minor_hdl &&
1551	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1552
1553	*blksize = zv->zv_volblocksize;
1554	*max_xfer_len = (uint64_t)zvol_maxphys;
1555	*minor_hdl = zv;
1556	*objset_hdl = zv->zv_objset;
1557	*zil_hdl = zv->zv_zilog;
1558	*rl_hdl = &zv->zv_znode;
1559	*bonus_hdl = zv->zv_dbuf;
1560	return (0);
1561}
1562
1563/*
1564 * Return the current volume size to an external caller.
1565 * The size can change while the volume is open.
1566 */
1567uint64_t
1568zvol_get_volume_size(void *minor_hdl)
1569{
1570	zvol_state_t *zv = minor_hdl;
1571
1572	return (zv->zv_volsize);
1573}
1574
1575/*
1576 * Return the current WCE setting to an external caller.
1577 * The WCE setting can change while the volume is open.
1578 */
1579int
1580zvol_get_volume_wce(void *minor_hdl)
1581{
1582	zvol_state_t *zv = minor_hdl;
1583
1584	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1585}
1586
1587/*
1588 * Entry point for external callers to zvol_log_write
1589 */
1590void
1591zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1592    boolean_t sync)
1593{
1594	zvol_state_t *zv = minor_hdl;
1595
1596	zvol_log_write(zv, tx, off, resid, sync);
1597}
1598/*
1599 * END entry points to allow external callers access to the volume.
1600 */
1601
1602/*
1603 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1604 */
1605/*ARGSUSED*/
1606int
1607zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1608{
1609	zvol_state_t *zv;
1610	struct dk_cinfo dki;
1611	struct dk_minfo dkm;
1612	struct dk_callback *dkc;
1613	int error = 0;
1614	rl_t *rl;
1615
1616	mutex_enter(&spa_namespace_lock);
1617
1618	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1619
1620	if (zv == NULL) {
1621		mutex_exit(&spa_namespace_lock);
1622		return (ENXIO);
1623	}
1624	ASSERT(zv->zv_total_opens > 0);
1625
1626	switch (cmd) {
1627
1628	case DKIOCINFO:
1629		bzero(&dki, sizeof (dki));
1630		(void) strcpy(dki.dki_cname, "zvol");
1631		(void) strcpy(dki.dki_dname, "zvol");
1632		dki.dki_ctype = DKC_UNKNOWN;
1633		dki.dki_unit = getminor(dev);
1634		dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1635		mutex_exit(&spa_namespace_lock);
1636		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1637			error = EFAULT;
1638		return (error);
1639
1640	case DKIOCGMEDIAINFO:
1641		bzero(&dkm, sizeof (dkm));
1642		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1643		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1644		dkm.dki_media_type = DK_UNKNOWN;
1645		mutex_exit(&spa_namespace_lock);
1646		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1647			error = EFAULT;
1648		return (error);
1649
1650	case DKIOCGETEFI:
1651		{
1652			uint64_t vs = zv->zv_volsize;
1653			uint8_t bs = zv->zv_min_bs;
1654
1655			mutex_exit(&spa_namespace_lock);
1656			error = zvol_getefi((void *)arg, flag, vs, bs);
1657			return (error);
1658		}
1659
1660	case DKIOCFLUSHWRITECACHE:
1661		dkc = (struct dk_callback *)arg;
1662		mutex_exit(&spa_namespace_lock);
1663		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1664		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1665			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1666			error = 0;
1667		}
1668		return (error);
1669
1670	case DKIOCGETWCE:
1671		{
1672			int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1673			if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1674			    flag))
1675				error = EFAULT;
1676			break;
1677		}
1678	case DKIOCSETWCE:
1679		{
1680			int wce;
1681			if (ddi_copyin((void *)arg, &wce, sizeof (int),
1682			    flag)) {
1683				error = EFAULT;
1684				break;
1685			}
1686			if (wce) {
1687				zv->zv_flags |= ZVOL_WCE;
1688				mutex_exit(&spa_namespace_lock);
1689			} else {
1690				zv->zv_flags &= ~ZVOL_WCE;
1691				mutex_exit(&spa_namespace_lock);
1692				zil_commit(zv->zv_zilog, ZVOL_OBJ);
1693			}
1694			return (0);
1695		}
1696
1697	case DKIOCGGEOM:
1698	case DKIOCGVTOC:
1699		/*
1700		 * commands using these (like prtvtoc) expect ENOTSUP
1701		 * since we're emulating an EFI label
1702		 */
1703		error = ENOTSUP;
1704		break;
1705
1706	case DKIOCDUMPINIT:
1707		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1708		    RL_WRITER);
1709		error = zvol_dumpify(zv);
1710		zfs_range_unlock(rl);
1711		break;
1712
1713	case DKIOCDUMPFINI:
1714		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1715			break;
1716		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1717		    RL_WRITER);
1718		error = zvol_dump_fini(zv);
1719		zfs_range_unlock(rl);
1720		break;
1721
1722	default:
1723		error = ENOTTY;
1724		break;
1725
1726	}
1727	mutex_exit(&spa_namespace_lock);
1728	return (error);
1729}
1730#endif	/* sun */
1731
1732int
1733zvol_busy(void)
1734{
1735	return (zvol_minors != 0);
1736}
1737
1738void
1739zvol_init(void)
1740{
1741	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1742	    1) == 0);
1743	ZFS_LOG(1, "ZVOL Initialized.");
1744}
1745
1746void
1747zvol_fini(void)
1748{
1749	ddi_soft_state_fini(&zfsdev_state);
1750	ZFS_LOG(1, "ZVOL Deinitialized.");
1751}
1752
1753#ifdef sun
1754static int
1755zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1756{
1757	dmu_tx_t *tx;
1758	int error = 0;
1759	objset_t *os = zv->zv_objset;
1760	nvlist_t *nv = NULL;
1761	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1762
1763	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1764	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1765	    DMU_OBJECT_END);
1766	/* wait for dmu_free_long_range to actually free the blocks */
1767	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1768
1769	tx = dmu_tx_create(os);
1770	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1771	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1772	error = dmu_tx_assign(tx, TXG_WAIT);
1773	if (error) {
1774		dmu_tx_abort(tx);
1775		return (error);
1776	}
1777
1778	/*
1779	 * If we are resizing the dump device then we only need to
1780	 * update the refreservation to match the newly updated
1781	 * zvolsize. Otherwise, we save off the original state of the
1782	 * zvol so that we can restore them if the zvol is ever undumpified.
1783	 */
1784	if (resize) {
1785		error = zap_update(os, ZVOL_ZAP_OBJ,
1786		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1787		    &zv->zv_volsize, tx);
1788	} else {
1789		uint64_t checksum, compress, refresrv, vbs, dedup;
1790
1791		error = dsl_prop_get_integer(zv->zv_name,
1792		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1793		error = error ? error : dsl_prop_get_integer(zv->zv_name,
1794		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1795		error = error ? error : dsl_prop_get_integer(zv->zv_name,
1796		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1797		error = error ? error : dsl_prop_get_integer(zv->zv_name,
1798		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1799		if (version >= SPA_VERSION_DEDUP) {
1800			error = error ? error :
1801			    dsl_prop_get_integer(zv->zv_name,
1802			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1803		}
1804
1805		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1806		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1807		    &compress, tx);
1808		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1809		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1810		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1811		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1812		    &refresrv, tx);
1813		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1814		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1815		    &vbs, tx);
1816		error = error ? error : dmu_object_set_blocksize(
1817		    os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1818		if (version >= SPA_VERSION_DEDUP) {
1819			error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1820			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1821			    &dedup, tx);
1822		}
1823		if (error == 0)
1824			zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1825	}
1826	dmu_tx_commit(tx);
1827
1828	/*
1829	 * We only need update the zvol's property if we are initializing
1830	 * the dump area for the first time.
1831	 */
1832	if (!resize) {
1833		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1834		VERIFY(nvlist_add_uint64(nv,
1835		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1836		VERIFY(nvlist_add_uint64(nv,
1837		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1838		    ZIO_COMPRESS_OFF) == 0);
1839		VERIFY(nvlist_add_uint64(nv,
1840		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
1841		    ZIO_CHECKSUM_OFF) == 0);
1842		if (version >= SPA_VERSION_DEDUP) {
1843			VERIFY(nvlist_add_uint64(nv,
1844			    zfs_prop_to_name(ZFS_PROP_DEDUP),
1845			    ZIO_CHECKSUM_OFF) == 0);
1846		}
1847
1848		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1849		    nv, NULL);
1850		nvlist_free(nv);
1851
1852		if (error)
1853			return (error);
1854	}
1855
1856	/* Allocate the space for the dump */
1857	error = zvol_prealloc(zv);
1858	return (error);
1859}
1860
1861static int
1862zvol_dumpify(zvol_state_t *zv)
1863{
1864	int error = 0;
1865	uint64_t dumpsize = 0;
1866	dmu_tx_t *tx;
1867	objset_t *os = zv->zv_objset;
1868
1869	if (zv->zv_flags & ZVOL_RDONLY)
1870		return (EROFS);
1871
1872	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
1873	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
1874		boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE;
1875
1876		if ((error = zvol_dump_init(zv, resize)) != 0) {
1877			(void) zvol_dump_fini(zv);
1878			return (error);
1879		}
1880	}
1881
1882	/*
1883	 * Build up our lba mapping.
1884	 */
1885	error = zvol_get_lbas(zv);
1886	if (error) {
1887		(void) zvol_dump_fini(zv);
1888		return (error);
1889	}
1890
1891	tx = dmu_tx_create(os);
1892	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1893	error = dmu_tx_assign(tx, TXG_WAIT);
1894	if (error) {
1895		dmu_tx_abort(tx);
1896		(void) zvol_dump_fini(zv);
1897		return (error);
1898	}
1899
1900	zv->zv_flags |= ZVOL_DUMPIFIED;
1901	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
1902	    &zv->zv_volsize, tx);
1903	dmu_tx_commit(tx);
1904
1905	if (error) {
1906		(void) zvol_dump_fini(zv);
1907		return (error);
1908	}
1909
1910	txg_wait_synced(dmu_objset_pool(os), 0);
1911	return (0);
1912}
1913
1914static int
1915zvol_dump_fini(zvol_state_t *zv)
1916{
1917	dmu_tx_t *tx;
1918	objset_t *os = zv->zv_objset;
1919	nvlist_t *nv;
1920	int error = 0;
1921	uint64_t checksum, compress, refresrv, vbs, dedup;
1922	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1923
1924	/*
1925	 * Attempt to restore the zvol back to its pre-dumpified state.
1926	 * This is a best-effort attempt as it's possible that not all
1927	 * of these properties were initialized during the dumpify process
1928	 * (i.e. error during zvol_dump_init).
1929	 */
1930
1931	tx = dmu_tx_create(os);
1932	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1933	error = dmu_tx_assign(tx, TXG_WAIT);
1934	if (error) {
1935		dmu_tx_abort(tx);
1936		return (error);
1937	}
1938	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
1939	dmu_tx_commit(tx);
1940
1941	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1942	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
1943	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1944	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
1945	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1946	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
1947	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1948	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
1949
1950	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1951	(void) nvlist_add_uint64(nv,
1952	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
1953	(void) nvlist_add_uint64(nv,
1954	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
1955	(void) nvlist_add_uint64(nv,
1956	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
1957	if (version >= SPA_VERSION_DEDUP &&
1958	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1959	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
1960		(void) nvlist_add_uint64(nv,
1961		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
1962	}
1963	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1964	    nv, NULL);
1965	nvlist_free(nv);
1966
1967	zvol_free_extents(zv);
1968	zv->zv_flags &= ~ZVOL_DUMPIFIED;
1969	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
1970	/* wait for dmu_free_long_range to actually free the blocks */
1971	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1972	tx = dmu_tx_create(os);
1973	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1974	error = dmu_tx_assign(tx, TXG_WAIT);
1975	if (error) {
1976		dmu_tx_abort(tx);
1977		return (error);
1978	}
1979	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
1980		zv->zv_volblocksize = vbs;
1981	dmu_tx_commit(tx);
1982
1983	return (0);
1984}
1985#endif	/* sun */
1986
1987static zvol_state_t *
1988zvol_geom_create(const char *name)
1989{
1990	struct g_provider *pp;
1991	struct g_geom *gp;
1992	zvol_state_t *zv;
1993
1994	gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
1995	gp->start = zvol_geom_start;
1996	gp->access = zvol_geom_access;
1997	pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
1998	pp->sectorsize = DEV_BSIZE;
1999
2000	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
2001	zv->zv_provider = pp;
2002	zv->zv_state = 0;
2003	bioq_init(&zv->zv_queue);
2004	mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
2005
2006	pp->private = zv;
2007
2008	return (zv);
2009}
2010
2011static void
2012zvol_geom_run(zvol_state_t *zv)
2013{
2014	struct g_provider *pp;
2015
2016	pp = zv->zv_provider;
2017	g_error_provider(pp, 0);
2018
2019	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2020	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2021}
2022
2023static void
2024zvol_geom_destroy(zvol_state_t *zv)
2025{
2026	struct g_provider *pp;
2027
2028	g_topology_assert();
2029
2030	mtx_lock(&zv->zv_queue_mtx);
2031	zv->zv_state = 1;
2032	wakeup_one(&zv->zv_queue);
2033	while (zv->zv_state != 2)
2034		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2035	mtx_destroy(&zv->zv_queue_mtx);
2036
2037	pp = zv->zv_provider;
2038	zv->zv_provider = NULL;
2039	pp->private = NULL;
2040	g_wither_geom(pp->geom, ENXIO);
2041
2042	kmem_free(zv, sizeof(*zv));
2043}
2044
2045static int
2046zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2047{
2048	int count, error, flags;
2049
2050	g_topology_assert();
2051
2052	/*
2053	 * To make it easier we expect either open or close, but not both
2054	 * at the same time.
2055	 */
2056	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2057	    (acr <= 0 && acw <= 0 && ace <= 0),
2058	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2059	    pp->name, acr, acw, ace));
2060
2061	if (pp->private == NULL) {
2062		if (acr <= 0 && acw <= 0 && ace <= 0)
2063			return (0);
2064		return (pp->error);
2065	}
2066
2067	/*
2068	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2069	 * because GEOM already handles that and handles it a bit differently.
2070	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2071	 * only one exclusive consumer, no matter if it is reader or writer.
2072	 * I like better the way GEOM works so I'll leave it for GEOM to
2073	 * decide what to do.
2074	 */
2075
2076	count = acr + acw + ace;
2077	if (count == 0)
2078		return (0);
2079
2080	flags = 0;
2081	if (acr != 0 || ace != 0)
2082		flags |= FREAD;
2083	if (acw != 0)
2084		flags |= FWRITE;
2085
2086	g_topology_unlock();
2087	if (count > 0)
2088		error = zvol_open(pp, flags, count);
2089	else
2090		error = zvol_close(pp, flags, -count);
2091	g_topology_lock();
2092	return (error);
2093}
2094
2095static void
2096zvol_geom_start(struct bio *bp)
2097{
2098	zvol_state_t *zv;
2099	boolean_t first;
2100
2101	switch (bp->bio_cmd) {
2102	case BIO_READ:
2103	case BIO_WRITE:
2104	case BIO_FLUSH:
2105		zv = bp->bio_to->private;
2106		ASSERT(zv != NULL);
2107		mtx_lock(&zv->zv_queue_mtx);
2108		first = (bioq_first(&zv->zv_queue) == NULL);
2109		bioq_insert_tail(&zv->zv_queue, bp);
2110		mtx_unlock(&zv->zv_queue_mtx);
2111		if (first)
2112			wakeup_one(&zv->zv_queue);
2113		break;
2114	case BIO_GETATTR:
2115	case BIO_DELETE:
2116	default:
2117		g_io_deliver(bp, EOPNOTSUPP);
2118		break;
2119	}
2120}
2121
2122static void
2123zvol_geom_worker(void *arg)
2124{
2125	zvol_state_t *zv;
2126	struct bio *bp;
2127
2128	thread_lock(curthread);
2129	sched_prio(curthread, PRIBIO);
2130	thread_unlock(curthread);
2131
2132	zv = arg;
2133	for (;;) {
2134		mtx_lock(&zv->zv_queue_mtx);
2135		bp = bioq_takefirst(&zv->zv_queue);
2136		if (bp == NULL) {
2137			if (zv->zv_state == 1) {
2138				zv->zv_state = 2;
2139				wakeup(&zv->zv_state);
2140				mtx_unlock(&zv->zv_queue_mtx);
2141				kthread_exit();
2142			}
2143			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2144			    "zvol:io", 0);
2145			continue;
2146		}
2147		mtx_unlock(&zv->zv_queue_mtx);
2148		switch (bp->bio_cmd) {
2149		case BIO_FLUSH:
2150			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2151			g_io_deliver(bp, 0);
2152			break;
2153		case BIO_READ:
2154		case BIO_WRITE:
2155			zvol_strategy(bp);
2156			break;
2157		}
2158	}
2159}
2160
2161extern boolean_t dataset_name_hidden(const char *name);
2162
2163static int
2164zvol_create_snapshots(objset_t *os, const char *name)
2165{
2166	uint64_t cookie, obj;
2167	char *sname;
2168	int error, len;
2169
2170	cookie = obj = 0;
2171	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2172
2173	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2174	    DS_FIND_SNAPSHOTS);
2175
2176	for (;;) {
2177		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2178		if (len >= MAXPATHLEN) {
2179			dmu_objset_rele(os, FTAG);
2180			error = ENAMETOOLONG;
2181			break;
2182		}
2183
2184		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2185		    sname + len, &obj, &cookie, NULL);
2186		if (error != 0) {
2187			if (error == ENOENT)
2188				error = 0;
2189			break;
2190		}
2191
2192		if ((error = zvol_create_minor(sname)) != 0) {
2193			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2194			    sname, error);
2195			break;
2196		}
2197	}
2198
2199	kmem_free(sname, MAXPATHLEN);
2200	return (error);
2201}
2202
2203int
2204zvol_create_minors(const char *name)
2205{
2206	uint64_t cookie;
2207	objset_t *os;
2208	char *osname, *p;
2209	int error, len;
2210
2211	if (dataset_name_hidden(name))
2212		return (0);
2213
2214	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2215		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2216		    name, error);
2217		return (error);
2218	}
2219	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2220		if ((error = zvol_create_minor(name)) == 0)
2221			error = zvol_create_snapshots(os, name);
2222		else {
2223			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2224			    name, error);
2225		}
2226		dmu_objset_rele(os, FTAG);
2227		return (error);
2228	}
2229	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2230		dmu_objset_rele(os, FTAG);
2231		return (0);
2232	}
2233
2234	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2235	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2236		dmu_objset_rele(os, FTAG);
2237		kmem_free(osname, MAXPATHLEN);
2238		return (ENOENT);
2239	}
2240	p = osname + strlen(osname);
2241	len = MAXPATHLEN - (p - osname);
2242
2243	/* Prefetch the datasets. */
2244	cookie = 0;
2245	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2246		if (!dataset_name_hidden(osname))
2247			(void) dmu_objset_prefetch(osname, NULL);
2248	}
2249
2250	cookie = 0;
2251	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2252	    &cookie) == 0) {
2253		dmu_objset_rele(os, FTAG);
2254		(void)zvol_create_minors(osname);
2255		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2256			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2257			    name, error);
2258			return (error);
2259		}
2260	}
2261
2262	dmu_objset_rele(os, FTAG);
2263	kmem_free(osname, MAXPATHLEN);
2264	return (0);
2265}
2266
2267static void
2268zvol_rename_minor(struct g_geom *gp, const char *newname)
2269{
2270	struct g_provider *pp;
2271	zvol_state_t *zv;
2272
2273	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2274	g_topology_assert();
2275
2276	pp = LIST_FIRST(&gp->provider);
2277	ASSERT(pp != NULL);
2278	zv = pp->private;
2279	ASSERT(zv != NULL);
2280
2281	zv->zv_provider = NULL;
2282	g_wither_provider(pp, ENXIO);
2283
2284	pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2285	pp->sectorsize = DEV_BSIZE;
2286	pp->mediasize = zv->zv_volsize;
2287	pp->private = zv;
2288	zv->zv_provider = pp;
2289	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2290	g_error_provider(pp, 0);
2291}
2292
2293void
2294zvol_rename_minors(const char *oldname, const char *newname)
2295{
2296	char name[MAXPATHLEN];
2297	struct g_provider *pp;
2298	struct g_geom *gp;
2299	size_t oldnamelen, newnamelen;
2300	zvol_state_t *zv;
2301	char *namebuf;
2302
2303	oldnamelen = strlen(oldname);
2304	newnamelen = strlen(newname);
2305
2306	DROP_GIANT();
2307	mutex_enter(&spa_namespace_lock);
2308	g_topology_lock();
2309
2310	LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
2311		pp = LIST_FIRST(&gp->provider);
2312		if (pp == NULL)
2313			continue;
2314		zv = pp->private;
2315		if (zv == NULL)
2316			continue;
2317		if (strcmp(zv->zv_name, oldname) == 0) {
2318			zvol_rename_minor(gp, newname);
2319		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2320		    (zv->zv_name[oldnamelen] == '/' ||
2321		     zv->zv_name[oldnamelen] == '@')) {
2322			snprintf(name, sizeof(name), "%s%c%s", newname,
2323			    zv->zv_name[oldnamelen],
2324			    zv->zv_name + oldnamelen + 1);
2325			zvol_rename_minor(gp, name);
2326		}
2327	}
2328
2329	g_topology_unlock();
2330	mutex_exit(&spa_namespace_lock);
2331	PICKUP_GIANT();
2332}
2333