zvol.c revision 268649
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 *
29 * Portions Copyright 2010 Robert Milkowski
30 *
31 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
32 */
33
34/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
35
36/*
37 * ZFS volume emulation driver.
38 *
39 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
40 * Volumes are accessed through the symbolic links named:
41 *
42 * /dev/zvol/dsk/<pool_name>/<dataset_name>
43 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
44 *
45 * These links are created by the /dev filesystem (sdev_zvolops.c).
46 * Volumes are persistent through reboot.  No user command needs to be
47 * run before opening and using a device.
48 *
49 * FreeBSD notes.
50 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
51 * in the system.
52 */
53
54#include <sys/types.h>
55#include <sys/param.h>
56#include <sys/kernel.h>
57#include <sys/errno.h>
58#include <sys/uio.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kmem.h>
62#include <sys/conf.h>
63#include <sys/cmn_err.h>
64#include <sys/stat.h>
65#include <sys/zap.h>
66#include <sys/spa.h>
67#include <sys/spa_impl.h>
68#include <sys/zio.h>
69#include <sys/disk.h>
70#include <sys/dmu_traverse.h>
71#include <sys/dnode.h>
72#include <sys/dsl_dataset.h>
73#include <sys/dsl_prop.h>
74#include <sys/dkio.h>
75#include <sys/byteorder.h>
76#include <sys/sunddi.h>
77#include <sys/dirent.h>
78#include <sys/policy.h>
79#include <sys/queue.h>
80#include <sys/fs/zfs.h>
81#include <sys/zfs_ioctl.h>
82#include <sys/zil.h>
83#include <sys/refcount.h>
84#include <sys/zfs_znode.h>
85#include <sys/zfs_rlock.h>
86#include <sys/vdev_impl.h>
87#include <sys/vdev_raidz.h>
88#include <sys/zvol.h>
89#include <sys/zil_impl.h>
90#include <sys/dbuf.h>
91#include <sys/dmu_tx.h>
92#include <sys/zfeature.h>
93#include <sys/zio_checksum.h>
94
95#include <geom/geom.h>
96
97#include "zfs_namecheck.h"
98
99struct g_class zfs_zvol_class = {
100	.name = "ZFS::ZVOL",
101	.version = G_VERSION,
102};
103
104DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
105
106void *zfsdev_state;
107static char *zvol_tag = "zvol_tag";
108
109#define	ZVOL_DUMPSIZE		"dumpsize"
110
111/*
112 * The spa_namespace_lock protects the zfsdev_state structure from being
113 * modified while it's being used, e.g. an open that comes in before a
114 * create finishes.  It also protects temporary opens of the dataset so that,
115 * e.g., an open doesn't get a spurious EBUSY.
116 */
117static uint32_t zvol_minors;
118
119SYSCTL_DECL(_vfs_zfs);
120SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
121static int	volmode = ZFS_VOLMODE_GEOM;
122TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
123SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
124    "Expose as GEOM providers (1), device files (2) or neither");
125
126typedef struct zvol_extent {
127	list_node_t	ze_node;
128	dva_t		ze_dva;		/* dva associated with this extent */
129	uint64_t	ze_nblks;	/* number of blocks in extent */
130} zvol_extent_t;
131
132/*
133 * The in-core state of each volume.
134 */
135typedef struct zvol_state {
136	LIST_ENTRY(zvol_state)	zv_links;
137	char		zv_name[MAXPATHLEN]; /* pool/dd name */
138	uint64_t	zv_volsize;	/* amount of space we advertise */
139	uint64_t	zv_volblocksize; /* volume block size */
140	struct cdev	*zv_dev;	/* non-GEOM device */
141	struct g_provider *zv_provider;	/* GEOM provider */
142	uint8_t		zv_min_bs;	/* minimum addressable block shift */
143	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
144	objset_t	*zv_objset;	/* objset handle */
145	uint32_t	zv_total_opens;	/* total open count */
146	zilog_t		*zv_zilog;	/* ZIL handle */
147	list_t		zv_extents;	/* List of extents for dump */
148	znode_t		zv_znode;	/* for range locking */
149	dmu_buf_t	*zv_dbuf;	/* bonus handle */
150	int		zv_state;
151	int		zv_volmode;	/* Provide GEOM or cdev */
152	struct bio_queue_head zv_queue;
153	struct mtx	zv_queue_mtx;	/* zv_queue mutex */
154} zvol_state_t;
155
156static LIST_HEAD(, zvol_state) all_zvols;
157
158/*
159 * zvol specific flags
160 */
161#define	ZVOL_RDONLY	0x1
162#define	ZVOL_DUMPIFIED	0x2
163#define	ZVOL_EXCL	0x4
164#define	ZVOL_WCE	0x8
165
166/*
167 * zvol maximum transfer in one DMU tx.
168 */
169int zvol_maxphys = DMU_MAX_ACCESS/2;
170
171static d_open_t		zvol_d_open;
172static d_close_t	zvol_d_close;
173static d_read_t		zvol_read;
174static d_write_t	zvol_write;
175static d_ioctl_t	zvol_d_ioctl;
176static d_strategy_t	zvol_strategy;
177
178static struct cdevsw zvol_cdevsw = {
179	.d_version =	D_VERSION,
180	.d_open =	zvol_d_open,
181	.d_close =	zvol_d_close,
182	.d_read =	zvol_read,
183	.d_write =	zvol_write,
184	.d_ioctl =	zvol_d_ioctl,
185	.d_strategy =	zvol_strategy,
186	.d_name =	"zvol",
187	.d_flags =	D_DISK | D_TRACKCLOSE,
188};
189
190extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
191    nvlist_t *, nvlist_t *);
192static void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
193    uint64_t len, boolean_t sync);
194static int zvol_remove_zv(zvol_state_t *);
195static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
196static int zvol_dumpify(zvol_state_t *zv);
197static int zvol_dump_fini(zvol_state_t *zv);
198static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
199
200static void zvol_geom_run(zvol_state_t *zv);
201static void zvol_geom_destroy(zvol_state_t *zv);
202static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
203static void zvol_geom_start(struct bio *bp);
204static void zvol_geom_worker(void *arg);
205
206static void
207zvol_size_changed(zvol_state_t *zv)
208{
209#ifdef sun
210	dev_t dev = makedevice(maj, min);
211
212	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
213	    "Size", volsize) == DDI_SUCCESS);
214	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
215	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
216
217	/* Notify specfs to invalidate the cached size */
218	spec_size_invalidate(dev, VBLK);
219	spec_size_invalidate(dev, VCHR);
220#else	/* !sun */
221	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
222		struct g_provider *pp;
223
224		pp = zv->zv_provider;
225		if (pp == NULL)
226			return;
227		g_topology_lock();
228		g_resize_provider(pp, zv->zv_volsize);
229		g_topology_unlock();
230	}
231#endif	/* !sun */
232}
233
234int
235zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
236{
237	if (volsize == 0)
238		return (SET_ERROR(EINVAL));
239
240	if (volsize % blocksize != 0)
241		return (SET_ERROR(EINVAL));
242
243#ifdef _ILP32
244	if (volsize - 1 > SPEC_MAXOFFSET_T)
245		return (SET_ERROR(EOVERFLOW));
246#endif
247	return (0);
248}
249
250int
251zvol_check_volblocksize(uint64_t volblocksize)
252{
253	if (volblocksize < SPA_MINBLOCKSIZE ||
254	    volblocksize > SPA_MAXBLOCKSIZE ||
255	    !ISP2(volblocksize))
256		return (SET_ERROR(EDOM));
257
258	return (0);
259}
260
261int
262zvol_get_stats(objset_t *os, nvlist_t *nv)
263{
264	int error;
265	dmu_object_info_t doi;
266	uint64_t val;
267
268	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
269	if (error)
270		return (error);
271
272	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
273
274	error = dmu_object_info(os, ZVOL_OBJ, &doi);
275
276	if (error == 0) {
277		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
278		    doi.doi_data_block_size);
279	}
280
281	return (error);
282}
283
284static zvol_state_t *
285zvol_minor_lookup(const char *name)
286{
287	zvol_state_t *zv;
288
289	ASSERT(MUTEX_HELD(&spa_namespace_lock));
290
291	LIST_FOREACH(zv, &all_zvols, zv_links) {
292		if (strcmp(zv->zv_name, name) == 0)
293			break;
294	}
295
296	return (zv);
297}
298
299/* extent mapping arg */
300struct maparg {
301	zvol_state_t	*ma_zv;
302	uint64_t	ma_blks;
303};
304
305/*ARGSUSED*/
306static int
307zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
308    const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
309{
310	struct maparg *ma = arg;
311	zvol_extent_t *ze;
312	int bs = ma->ma_zv->zv_volblocksize;
313
314	if (BP_IS_HOLE(bp) ||
315	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
316		return (0);
317
318	VERIFY(!BP_IS_EMBEDDED(bp));
319
320	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
321	ma->ma_blks++;
322
323	/* Abort immediately if we have encountered gang blocks */
324	if (BP_IS_GANG(bp))
325		return (SET_ERROR(EFRAGS));
326
327	/*
328	 * See if the block is at the end of the previous extent.
329	 */
330	ze = list_tail(&ma->ma_zv->zv_extents);
331	if (ze &&
332	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
333	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
334	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
335		ze->ze_nblks++;
336		return (0);
337	}
338
339	dprintf_bp(bp, "%s", "next blkptr:");
340
341	/* start a new extent */
342	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
343	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
344	ze->ze_nblks = 1;
345	list_insert_tail(&ma->ma_zv->zv_extents, ze);
346	return (0);
347}
348
349static void
350zvol_free_extents(zvol_state_t *zv)
351{
352	zvol_extent_t *ze;
353
354	while (ze = list_head(&zv->zv_extents)) {
355		list_remove(&zv->zv_extents, ze);
356		kmem_free(ze, sizeof (zvol_extent_t));
357	}
358}
359
360static int
361zvol_get_lbas(zvol_state_t *zv)
362{
363	objset_t *os = zv->zv_objset;
364	struct maparg	ma;
365	int		err;
366
367	ma.ma_zv = zv;
368	ma.ma_blks = 0;
369	zvol_free_extents(zv);
370
371	/* commit any in-flight changes before traversing the dataset */
372	txg_wait_synced(dmu_objset_pool(os), 0);
373	err = traverse_dataset(dmu_objset_ds(os), 0,
374	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
375	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
376		zvol_free_extents(zv);
377		return (err ? err : EIO);
378	}
379
380	return (0);
381}
382
383/* ARGSUSED */
384void
385zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
386{
387	zfs_creat_t *zct = arg;
388	nvlist_t *nvprops = zct->zct_props;
389	int error;
390	uint64_t volblocksize, volsize;
391
392	VERIFY(nvlist_lookup_uint64(nvprops,
393	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
394	if (nvlist_lookup_uint64(nvprops,
395	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
396		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
397
398	/*
399	 * These properties must be removed from the list so the generic
400	 * property setting step won't apply to them.
401	 */
402	VERIFY(nvlist_remove_all(nvprops,
403	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
404	(void) nvlist_remove_all(nvprops,
405	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
406
407	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
408	    DMU_OT_NONE, 0, tx);
409	ASSERT(error == 0);
410
411	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
412	    DMU_OT_NONE, 0, tx);
413	ASSERT(error == 0);
414
415	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
416	ASSERT(error == 0);
417}
418
419/*
420 * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
421 * implement DKIOCFREE/free-long-range.
422 */
423static int
424zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
425{
426	uint64_t offset, length;
427
428	if (byteswap)
429		byteswap_uint64_array(lr, sizeof (*lr));
430
431	offset = lr->lr_offset;
432	length = lr->lr_length;
433
434	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
435}
436
437/*
438 * Replay a TX_WRITE ZIL transaction that didn't get committed
439 * after a system failure
440 */
441static int
442zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
443{
444	objset_t *os = zv->zv_objset;
445	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
446	uint64_t offset, length;
447	dmu_tx_t *tx;
448	int error;
449
450	if (byteswap)
451		byteswap_uint64_array(lr, sizeof (*lr));
452
453	offset = lr->lr_offset;
454	length = lr->lr_length;
455
456	/* If it's a dmu_sync() block, write the whole block */
457	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
458		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
459		if (length < blocksize) {
460			offset -= offset % blocksize;
461			length = blocksize;
462		}
463	}
464
465	tx = dmu_tx_create(os);
466	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
467	error = dmu_tx_assign(tx, TXG_WAIT);
468	if (error) {
469		dmu_tx_abort(tx);
470	} else {
471		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
472		dmu_tx_commit(tx);
473	}
474
475	return (error);
476}
477
478/* ARGSUSED */
479static int
480zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
481{
482	return (SET_ERROR(ENOTSUP));
483}
484
485/*
486 * Callback vectors for replaying records.
487 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
488 */
489zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
490	zvol_replay_err,	/* 0 no such transaction type */
491	zvol_replay_err,	/* TX_CREATE */
492	zvol_replay_err,	/* TX_MKDIR */
493	zvol_replay_err,	/* TX_MKXATTR */
494	zvol_replay_err,	/* TX_SYMLINK */
495	zvol_replay_err,	/* TX_REMOVE */
496	zvol_replay_err,	/* TX_RMDIR */
497	zvol_replay_err,	/* TX_LINK */
498	zvol_replay_err,	/* TX_RENAME */
499	zvol_replay_write,	/* TX_WRITE */
500	zvol_replay_truncate,	/* TX_TRUNCATE */
501	zvol_replay_err,	/* TX_SETATTR */
502	zvol_replay_err,	/* TX_ACL */
503	zvol_replay_err,	/* TX_CREATE_ACL */
504	zvol_replay_err,	/* TX_CREATE_ATTR */
505	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
506	zvol_replay_err,	/* TX_MKDIR_ACL */
507	zvol_replay_err,	/* TX_MKDIR_ATTR */
508	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
509	zvol_replay_err,	/* TX_WRITE2 */
510};
511
512#ifdef sun
513int
514zvol_name2minor(const char *name, minor_t *minor)
515{
516	zvol_state_t *zv;
517
518	mutex_enter(&spa_namespace_lock);
519	zv = zvol_minor_lookup(name);
520	if (minor && zv)
521		*minor = zv->zv_minor;
522	mutex_exit(&spa_namespace_lock);
523	return (zv ? 0 : -1);
524}
525#endif	/* sun */
526
527/*
528 * Create a minor node (plus a whole lot more) for the specified volume.
529 */
530int
531zvol_create_minor(const char *name)
532{
533	zfs_soft_state_t *zs;
534	zvol_state_t *zv;
535	objset_t *os;
536	struct cdev *dev;
537	struct g_provider *pp;
538	struct g_geom *gp;
539	dmu_object_info_t doi;
540	uint64_t volsize, mode;
541	int error;
542
543	ZFS_LOG(1, "Creating ZVOL %s...", name);
544
545	mutex_enter(&spa_namespace_lock);
546
547	if (zvol_minor_lookup(name) != NULL) {
548		mutex_exit(&spa_namespace_lock);
549		return (SET_ERROR(EEXIST));
550	}
551
552	/* lie and say we're read-only */
553	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
554
555	if (error) {
556		mutex_exit(&spa_namespace_lock);
557		return (error);
558	}
559
560#ifdef sun
561	if ((minor = zfsdev_minor_alloc()) == 0) {
562		dmu_objset_disown(os, FTAG);
563		mutex_exit(&spa_namespace_lock);
564		return (SET_ERROR(ENXIO));
565	}
566
567	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
568		dmu_objset_disown(os, FTAG);
569		mutex_exit(&spa_namespace_lock);
570		return (SET_ERROR(EAGAIN));
571	}
572	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
573	    (char *)name);
574
575	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
576
577	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
578	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
579		ddi_soft_state_free(zfsdev_state, minor);
580		dmu_objset_disown(os, FTAG);
581		mutex_exit(&spa_namespace_lock);
582		return (SET_ERROR(EAGAIN));
583	}
584
585	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
586
587	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
588	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
589		ddi_remove_minor_node(zfs_dip, chrbuf);
590		ddi_soft_state_free(zfsdev_state, minor);
591		dmu_objset_disown(os, FTAG);
592		mutex_exit(&spa_namespace_lock);
593		return (SET_ERROR(EAGAIN));
594	}
595
596	zs = ddi_get_soft_state(zfsdev_state, minor);
597	zs->zss_type = ZSST_ZVOL;
598	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
599#else	/* !sun */
600
601	zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
602	zv->zv_state = 0;
603	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
604	if (error) {
605		kmem_free(zv, sizeof(*zv));
606		dmu_objset_disown(os, zvol_tag);
607		mutex_exit(&spa_namespace_lock);
608		return (error);
609	}
610	error = dsl_prop_get_integer(name,
611	    zfs_prop_to_name(ZFS_PROP_VOLMODE), &mode, NULL);
612	if (error != 0 || mode == ZFS_VOLMODE_DEFAULT)
613		mode = volmode;
614
615	DROP_GIANT();
616	zv->zv_volsize = volsize;
617	zv->zv_volmode = mode;
618	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
619		g_topology_lock();
620		gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
621		gp->start = zvol_geom_start;
622		gp->access = zvol_geom_access;
623		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
624		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
625		pp->sectorsize = DEV_BSIZE;
626		pp->mediasize = zv->zv_volsize;
627		pp->private = zv;
628
629		zv->zv_provider = pp;
630		bioq_init(&zv->zv_queue);
631		mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
632	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
633		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
634		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
635		    0640, "%s/%s", ZVOL_DRIVER, name) != 0) {
636			kmem_free(zv, sizeof(*zv));
637			dmu_objset_disown(os, FTAG);
638			mutex_exit(&spa_namespace_lock);
639			return (SET_ERROR(ENXIO));
640		}
641		zv->zv_dev = dev;
642		dev->si_iosize_max = MAXPHYS;
643		dev->si_drv2 = zv;
644	}
645	LIST_INSERT_HEAD(&all_zvols, zv, zv_links);
646#endif	/* !sun */
647
648	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
649	zv->zv_min_bs = DEV_BSHIFT;
650	zv->zv_objset = os;
651	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
652		zv->zv_flags |= ZVOL_RDONLY;
653	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
654	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
655	    sizeof (rl_t), offsetof(rl_t, r_node));
656	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
657	    offsetof(zvol_extent_t, ze_node));
658	/* get and cache the blocksize */
659	error = dmu_object_info(os, ZVOL_OBJ, &doi);
660	ASSERT(error == 0);
661	zv->zv_volblocksize = doi.doi_data_block_size;
662
663	if (spa_writeable(dmu_objset_spa(os))) {
664		if (zil_replay_disable)
665			zil_destroy(dmu_objset_zil(os), B_FALSE);
666		else
667			zil_replay(os, zv, zvol_replay_vector);
668	}
669	dmu_objset_disown(os, FTAG);
670	zv->zv_objset = NULL;
671
672	zvol_minors++;
673
674	mutex_exit(&spa_namespace_lock);
675
676#ifndef sun
677	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
678		zvol_geom_run(zv);
679		g_topology_unlock();
680	}
681	PICKUP_GIANT();
682#endif
683
684	ZFS_LOG(1, "ZVOL %s created.", name);
685
686	return (0);
687}
688
689/*
690 * Remove minor node for the specified volume.
691 */
692static int
693zvol_remove_zv(zvol_state_t *zv)
694{
695#ifdef sun
696	minor_t minor = zv->zv_minor;
697#endif
698
699	ASSERT(MUTEX_HELD(&spa_namespace_lock));
700	if (zv->zv_total_opens != 0)
701		return (SET_ERROR(EBUSY));
702
703	ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
704
705#ifdef sun
706	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
707	ddi_remove_minor_node(zfs_dip, nmbuf);
708#else
709	LIST_REMOVE(zv, zv_links);
710	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
711		g_topology_lock();
712		zvol_geom_destroy(zv);
713		g_topology_unlock();
714	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV)
715		destroy_dev(zv->zv_dev);
716#endif	/* sun */
717
718	avl_destroy(&zv->zv_znode.z_range_avl);
719	mutex_destroy(&zv->zv_znode.z_range_lock);
720
721	kmem_free(zv, sizeof(*zv));
722
723	zvol_minors--;
724	return (0);
725}
726
727int
728zvol_remove_minor(const char *name)
729{
730	zvol_state_t *zv;
731	int rc;
732
733	mutex_enter(&spa_namespace_lock);
734	if ((zv = zvol_minor_lookup(name)) == NULL) {
735		mutex_exit(&spa_namespace_lock);
736		return (SET_ERROR(ENXIO));
737	}
738	rc = zvol_remove_zv(zv);
739	mutex_exit(&spa_namespace_lock);
740	return (rc);
741}
742
743int
744zvol_first_open(zvol_state_t *zv)
745{
746	objset_t *os;
747	uint64_t volsize;
748	int error;
749	uint64_t readonly;
750
751	/* lie and say we're read-only */
752	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
753	    zvol_tag, &os);
754	if (error)
755		return (error);
756
757	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
758	if (error) {
759		ASSERT(error == 0);
760		dmu_objset_disown(os, zvol_tag);
761		return (error);
762	}
763	zv->zv_objset = os;
764	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
765	if (error) {
766		dmu_objset_disown(os, zvol_tag);
767		return (error);
768	}
769	zv->zv_volsize = volsize;
770	zv->zv_zilog = zil_open(os, zvol_get_data);
771	zvol_size_changed(zv);
772
773	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
774	    NULL) == 0);
775	if (readonly || dmu_objset_is_snapshot(os) ||
776	    !spa_writeable(dmu_objset_spa(os)))
777		zv->zv_flags |= ZVOL_RDONLY;
778	else
779		zv->zv_flags &= ~ZVOL_RDONLY;
780	return (error);
781}
782
783void
784zvol_last_close(zvol_state_t *zv)
785{
786	zil_close(zv->zv_zilog);
787	zv->zv_zilog = NULL;
788
789	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
790	zv->zv_dbuf = NULL;
791
792	/*
793	 * Evict cached data
794	 */
795	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
796	    !(zv->zv_flags & ZVOL_RDONLY))
797		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
798	dmu_objset_evict_dbufs(zv->zv_objset);
799
800	dmu_objset_disown(zv->zv_objset, zvol_tag);
801	zv->zv_objset = NULL;
802}
803
804#ifdef sun
805int
806zvol_prealloc(zvol_state_t *zv)
807{
808	objset_t *os = zv->zv_objset;
809	dmu_tx_t *tx;
810	uint64_t refd, avail, usedobjs, availobjs;
811	uint64_t resid = zv->zv_volsize;
812	uint64_t off = 0;
813
814	/* Check the space usage before attempting to allocate the space */
815	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
816	if (avail < zv->zv_volsize)
817		return (SET_ERROR(ENOSPC));
818
819	/* Free old extents if they exist */
820	zvol_free_extents(zv);
821
822	while (resid != 0) {
823		int error;
824		uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
825
826		tx = dmu_tx_create(os);
827		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
828		error = dmu_tx_assign(tx, TXG_WAIT);
829		if (error) {
830			dmu_tx_abort(tx);
831			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
832			return (error);
833		}
834		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
835		dmu_tx_commit(tx);
836		off += bytes;
837		resid -= bytes;
838	}
839	txg_wait_synced(dmu_objset_pool(os), 0);
840
841	return (0);
842}
843#endif	/* sun */
844
845static int
846zvol_update_volsize(objset_t *os, uint64_t volsize)
847{
848	dmu_tx_t *tx;
849	int error;
850
851	ASSERT(MUTEX_HELD(&spa_namespace_lock));
852
853	tx = dmu_tx_create(os);
854	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
855	error = dmu_tx_assign(tx, TXG_WAIT);
856	if (error) {
857		dmu_tx_abort(tx);
858		return (error);
859	}
860
861	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
862	    &volsize, tx);
863	dmu_tx_commit(tx);
864
865	if (error == 0)
866		error = dmu_free_long_range(os,
867		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
868	return (error);
869}
870
871void
872zvol_remove_minors(const char *name)
873{
874	zvol_state_t *zv, *tzv;
875	size_t namelen;
876
877	namelen = strlen(name);
878
879	DROP_GIANT();
880	mutex_enter(&spa_namespace_lock);
881
882	LIST_FOREACH_SAFE(zv, &all_zvols, zv_links, tzv) {
883		if (strcmp(zv->zv_name, name) == 0 ||
884		    (strncmp(zv->zv_name, name, namelen) == 0 &&
885		     zv->zv_name[namelen] == '/')) {
886			(void) zvol_remove_zv(zv);
887		}
888	}
889
890	mutex_exit(&spa_namespace_lock);
891	PICKUP_GIANT();
892}
893
894int
895zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
896{
897	zvol_state_t *zv = NULL;
898	objset_t *os;
899	int error;
900	dmu_object_info_t doi;
901	uint64_t old_volsize = 0ULL;
902	uint64_t readonly;
903
904	mutex_enter(&spa_namespace_lock);
905	zv = zvol_minor_lookup(name);
906	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
907		mutex_exit(&spa_namespace_lock);
908		return (error);
909	}
910
911	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
912	    (error = zvol_check_volsize(volsize,
913	    doi.doi_data_block_size)) != 0)
914		goto out;
915
916	VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
917	    NULL) == 0);
918	if (readonly) {
919		error = EROFS;
920		goto out;
921	}
922
923	error = zvol_update_volsize(os, volsize);
924	/*
925	 * Reinitialize the dump area to the new size. If we
926	 * failed to resize the dump area then restore it back to
927	 * its original size.
928	 */
929	if (zv && error == 0) {
930#ifdef ZVOL_DUMP
931		if (zv->zv_flags & ZVOL_DUMPIFIED) {
932			old_volsize = zv->zv_volsize;
933			zv->zv_volsize = volsize;
934			if ((error = zvol_dumpify(zv)) != 0 ||
935			    (error = dumpvp_resize()) != 0) {
936				(void) zvol_update_volsize(os, old_volsize);
937				zv->zv_volsize = old_volsize;
938				error = zvol_dumpify(zv);
939			}
940		}
941#endif	/* ZVOL_DUMP */
942		if (error == 0) {
943			zv->zv_volsize = volsize;
944			zvol_size_changed(zv);
945		}
946	}
947
948#ifdef sun
949	/*
950	 * Generate a LUN expansion event.
951	 */
952	if (zv && error == 0) {
953		sysevent_id_t eid;
954		nvlist_t *attr;
955		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
956
957		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
958		    zv->zv_minor);
959
960		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
961		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
962
963		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
964		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
965
966		nvlist_free(attr);
967		kmem_free(physpath, MAXPATHLEN);
968	}
969#endif	/* sun */
970
971out:
972	dmu_objset_rele(os, FTAG);
973
974	mutex_exit(&spa_namespace_lock);
975
976	return (error);
977}
978
979/*ARGSUSED*/
980static int
981zvol_open(struct g_provider *pp, int flag, int count)
982{
983	zvol_state_t *zv;
984	int err = 0;
985	boolean_t locked = B_FALSE;
986
987	/*
988	 * Protect against recursively entering spa_namespace_lock
989	 * when spa_open() is used for a pool on a (local) ZVOL(s).
990	 * This is needed since we replaced upstream zfsdev_state_lock
991	 * with spa_namespace_lock in the ZVOL code.
992	 * We are using the same trick as spa_open().
993	 * Note that calls in zvol_first_open which need to resolve
994	 * pool name to a spa object will enter spa_open()
995	 * recursively, but that function already has all the
996	 * necessary protection.
997	 */
998	if (!MUTEX_HELD(&spa_namespace_lock)) {
999		mutex_enter(&spa_namespace_lock);
1000		locked = B_TRUE;
1001	}
1002
1003	zv = pp->private;
1004	if (zv == NULL) {
1005		if (locked)
1006			mutex_exit(&spa_namespace_lock);
1007		return (SET_ERROR(ENXIO));
1008	}
1009
1010	if (zv->zv_total_opens == 0) {
1011		err = zvol_first_open(zv);
1012		if (err) {
1013			if (locked)
1014				mutex_exit(&spa_namespace_lock);
1015			return (err);
1016		}
1017		pp->mediasize = zv->zv_volsize;
1018		pp->stripeoffset = 0;
1019		pp->stripesize = zv->zv_volblocksize;
1020	}
1021	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1022		err = SET_ERROR(EROFS);
1023		goto out;
1024	}
1025	if (zv->zv_flags & ZVOL_EXCL) {
1026		err = SET_ERROR(EBUSY);
1027		goto out;
1028	}
1029#ifdef FEXCL
1030	if (flag & FEXCL) {
1031		if (zv->zv_total_opens != 0) {
1032			err = SET_ERROR(EBUSY);
1033			goto out;
1034		}
1035		zv->zv_flags |= ZVOL_EXCL;
1036	}
1037#endif
1038
1039	zv->zv_total_opens += count;
1040	if (locked)
1041		mutex_exit(&spa_namespace_lock);
1042
1043	return (err);
1044out:
1045	if (zv->zv_total_opens == 0)
1046		zvol_last_close(zv);
1047	if (locked)
1048		mutex_exit(&spa_namespace_lock);
1049	return (err);
1050}
1051
1052/*ARGSUSED*/
1053static int
1054zvol_close(struct g_provider *pp, int flag, int count)
1055{
1056	zvol_state_t *zv;
1057	int error = 0;
1058	boolean_t locked = B_FALSE;
1059
1060	/* See comment in zvol_open(). */
1061	if (!MUTEX_HELD(&spa_namespace_lock)) {
1062		mutex_enter(&spa_namespace_lock);
1063		locked = B_TRUE;
1064	}
1065
1066	zv = pp->private;
1067	if (zv == NULL) {
1068		if (locked)
1069			mutex_exit(&spa_namespace_lock);
1070		return (SET_ERROR(ENXIO));
1071	}
1072
1073	if (zv->zv_flags & ZVOL_EXCL) {
1074		ASSERT(zv->zv_total_opens == 1);
1075		zv->zv_flags &= ~ZVOL_EXCL;
1076	}
1077
1078	/*
1079	 * If the open count is zero, this is a spurious close.
1080	 * That indicates a bug in the kernel / DDI framework.
1081	 */
1082	ASSERT(zv->zv_total_opens != 0);
1083
1084	/*
1085	 * You may get multiple opens, but only one close.
1086	 */
1087	zv->zv_total_opens -= count;
1088
1089	if (zv->zv_total_opens == 0)
1090		zvol_last_close(zv);
1091
1092	if (locked)
1093		mutex_exit(&spa_namespace_lock);
1094	return (error);
1095}
1096
1097static void
1098zvol_get_done(zgd_t *zgd, int error)
1099{
1100	if (zgd->zgd_db)
1101		dmu_buf_rele(zgd->zgd_db, zgd);
1102
1103	zfs_range_unlock(zgd->zgd_rl);
1104
1105	if (error == 0 && zgd->zgd_bp)
1106		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1107
1108	kmem_free(zgd, sizeof (zgd_t));
1109}
1110
1111/*
1112 * Get data to generate a TX_WRITE intent log record.
1113 */
1114static int
1115zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1116{
1117	zvol_state_t *zv = arg;
1118	objset_t *os = zv->zv_objset;
1119	uint64_t object = ZVOL_OBJ;
1120	uint64_t offset = lr->lr_offset;
1121	uint64_t size = lr->lr_length;	/* length of user data */
1122	blkptr_t *bp = &lr->lr_blkptr;
1123	dmu_buf_t *db;
1124	zgd_t *zgd;
1125	int error;
1126
1127	ASSERT(zio != NULL);
1128	ASSERT(size != 0);
1129
1130	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1131	zgd->zgd_zilog = zv->zv_zilog;
1132	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1133
1134	/*
1135	 * Write records come in two flavors: immediate and indirect.
1136	 * For small writes it's cheaper to store the data with the
1137	 * log record (immediate); for large writes it's cheaper to
1138	 * sync the data and get a pointer to it (indirect) so that
1139	 * we don't have to write the data twice.
1140	 */
1141	if (buf != NULL) {	/* immediate write */
1142		error = dmu_read(os, object, offset, size, buf,
1143		    DMU_READ_NO_PREFETCH);
1144	} else {
1145		size = zv->zv_volblocksize;
1146		offset = P2ALIGN(offset, size);
1147		error = dmu_buf_hold(os, object, offset, zgd, &db,
1148		    DMU_READ_NO_PREFETCH);
1149		if (error == 0) {
1150			blkptr_t *obp = dmu_buf_get_blkptr(db);
1151			if (obp) {
1152				ASSERT(BP_IS_HOLE(bp));
1153				*bp = *obp;
1154			}
1155
1156			zgd->zgd_db = db;
1157			zgd->zgd_bp = bp;
1158
1159			ASSERT(db->db_offset == offset);
1160			ASSERT(db->db_size == size);
1161
1162			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1163			    zvol_get_done, zgd);
1164
1165			if (error == 0)
1166				return (0);
1167		}
1168	}
1169
1170	zvol_get_done(zgd, error);
1171
1172	return (error);
1173}
1174
1175/*
1176 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1177 *
1178 * We store data in the log buffers if it's small enough.
1179 * Otherwise we will later flush the data out via dmu_sync().
1180 */
1181ssize_t zvol_immediate_write_sz = 32768;
1182
1183static void
1184zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1185    boolean_t sync)
1186{
1187	uint32_t blocksize = zv->zv_volblocksize;
1188	zilog_t *zilog = zv->zv_zilog;
1189	boolean_t slogging;
1190	ssize_t immediate_write_sz;
1191
1192	if (zil_replaying(zilog, tx))
1193		return;
1194
1195	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1196	    ? 0 : zvol_immediate_write_sz;
1197
1198	slogging = spa_has_slogs(zilog->zl_spa) &&
1199	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1200
1201	while (resid) {
1202		itx_t *itx;
1203		lr_write_t *lr;
1204		ssize_t len;
1205		itx_wr_state_t write_state;
1206
1207		/*
1208		 * Unlike zfs_log_write() we can be called with
1209		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1210		 */
1211		if (blocksize > immediate_write_sz && !slogging &&
1212		    resid >= blocksize && off % blocksize == 0) {
1213			write_state = WR_INDIRECT; /* uses dmu_sync */
1214			len = blocksize;
1215		} else if (sync) {
1216			write_state = WR_COPIED;
1217			len = MIN(ZIL_MAX_LOG_DATA, resid);
1218		} else {
1219			write_state = WR_NEED_COPY;
1220			len = MIN(ZIL_MAX_LOG_DATA, resid);
1221		}
1222
1223		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1224		    (write_state == WR_COPIED ? len : 0));
1225		lr = (lr_write_t *)&itx->itx_lr;
1226		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1227		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1228			zil_itx_destroy(itx);
1229			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1230			lr = (lr_write_t *)&itx->itx_lr;
1231			write_state = WR_NEED_COPY;
1232		}
1233
1234		itx->itx_wr_state = write_state;
1235		if (write_state == WR_NEED_COPY)
1236			itx->itx_sod += len;
1237		lr->lr_foid = ZVOL_OBJ;
1238		lr->lr_offset = off;
1239		lr->lr_length = len;
1240		lr->lr_blkoff = 0;
1241		BP_ZERO(&lr->lr_blkptr);
1242
1243		itx->itx_private = zv;
1244		itx->itx_sync = sync;
1245
1246		zil_itx_assign(zilog, itx, tx);
1247
1248		off += len;
1249		resid -= len;
1250	}
1251}
1252
1253#ifdef sun
1254static int
1255zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1256    uint64_t size, boolean_t doread, boolean_t isdump)
1257{
1258	vdev_disk_t *dvd;
1259	int c;
1260	int numerrors = 0;
1261
1262	if (vd->vdev_ops == &vdev_mirror_ops ||
1263	    vd->vdev_ops == &vdev_replacing_ops ||
1264	    vd->vdev_ops == &vdev_spare_ops) {
1265		for (c = 0; c < vd->vdev_children; c++) {
1266			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1267			    addr, offset, origoffset, size, doread, isdump);
1268			if (err != 0) {
1269				numerrors++;
1270			} else if (doread) {
1271				break;
1272			}
1273		}
1274	}
1275
1276	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1277		return (numerrors < vd->vdev_children ? 0 : EIO);
1278
1279	if (doread && !vdev_readable(vd))
1280		return (SET_ERROR(EIO));
1281	else if (!doread && !vdev_writeable(vd))
1282		return (SET_ERROR(EIO));
1283
1284	if (vd->vdev_ops == &vdev_raidz_ops) {
1285		return (vdev_raidz_physio(vd,
1286		    addr, size, offset, origoffset, doread, isdump));
1287	}
1288
1289	offset += VDEV_LABEL_START_SIZE;
1290
1291	if (ddi_in_panic() || isdump) {
1292		ASSERT(!doread);
1293		if (doread)
1294			return (SET_ERROR(EIO));
1295		dvd = vd->vdev_tsd;
1296		ASSERT3P(dvd, !=, NULL);
1297		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1298		    lbtodb(size)));
1299	} else {
1300		dvd = vd->vdev_tsd;
1301		ASSERT3P(dvd, !=, NULL);
1302		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1303		    offset, doread ? B_READ : B_WRITE));
1304	}
1305}
1306
1307static int
1308zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1309    boolean_t doread, boolean_t isdump)
1310{
1311	vdev_t *vd;
1312	int error;
1313	zvol_extent_t *ze;
1314	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1315
1316	/* Must be sector aligned, and not stradle a block boundary. */
1317	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1318	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1319		return (SET_ERROR(EINVAL));
1320	}
1321	ASSERT(size <= zv->zv_volblocksize);
1322
1323	/* Locate the extent this belongs to */
1324	ze = list_head(&zv->zv_extents);
1325	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1326		offset -= ze->ze_nblks * zv->zv_volblocksize;
1327		ze = list_next(&zv->zv_extents, ze);
1328	}
1329
1330	if (ze == NULL)
1331		return (SET_ERROR(EINVAL));
1332
1333	if (!ddi_in_panic())
1334		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1335
1336	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1337	offset += DVA_GET_OFFSET(&ze->ze_dva);
1338	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1339	    size, doread, isdump);
1340
1341	if (!ddi_in_panic())
1342		spa_config_exit(spa, SCL_STATE, FTAG);
1343
1344	return (error);
1345}
1346#endif	/* sun */
1347
1348void
1349zvol_strategy(struct bio *bp)
1350{
1351	zvol_state_t *zv;
1352	uint64_t off, volsize;
1353	size_t resid;
1354	char *addr;
1355	objset_t *os;
1356	rl_t *rl;
1357	int error = 0;
1358	boolean_t doread = 0;
1359	boolean_t is_dumpified;
1360	boolean_t sync;
1361
1362	if (bp->bio_to)
1363		zv = bp->bio_to->private;
1364	else
1365		zv = bp->bio_dev->si_drv2;
1366
1367	if (zv == NULL) {
1368		error = ENXIO;
1369		goto out;
1370	}
1371
1372	if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1373		error = EROFS;
1374		goto out;
1375	}
1376
1377	switch (bp->bio_cmd) {
1378	case BIO_FLUSH:
1379		goto sync;
1380	case BIO_READ:
1381		doread = 1;
1382	case BIO_WRITE:
1383	case BIO_DELETE:
1384		break;
1385	default:
1386		error = EOPNOTSUPP;
1387		goto out;
1388	}
1389
1390	off = bp->bio_offset;
1391	volsize = zv->zv_volsize;
1392
1393	os = zv->zv_objset;
1394	ASSERT(os != NULL);
1395
1396	addr = bp->bio_data;
1397	resid = bp->bio_length;
1398
1399	if (resid > 0 && (off < 0 || off >= volsize)) {
1400		error = EIO;
1401		goto out;
1402	}
1403
1404#ifdef illumos
1405	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1406#else
1407	is_dumpified = B_FALSE;
1408#endif
1409        sync = !doread && !is_dumpified &&
1410	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1411
1412	/*
1413	 * There must be no buffer changes when doing a dmu_sync() because
1414	 * we can't change the data whilst calculating the checksum.
1415	 */
1416	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1417	    doread ? RL_READER : RL_WRITER);
1418
1419	if (bp->bio_cmd == BIO_DELETE) {
1420		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1421		error = dmu_tx_assign(tx, TXG_WAIT);
1422		if (error != 0) {
1423			dmu_tx_abort(tx);
1424		} else {
1425			zvol_log_truncate(zv, tx, off, resid, B_TRUE);
1426			dmu_tx_commit(tx);
1427			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1428			    off, resid);
1429			resid = 0;
1430		}
1431		goto unlock;
1432	}
1433
1434	while (resid != 0 && off < volsize) {
1435		size_t size = MIN(resid, zvol_maxphys);
1436#ifdef illumos
1437		if (is_dumpified) {
1438			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1439			error = zvol_dumpio(zv, addr, off, size,
1440			    doread, B_FALSE);
1441		} else if (doread) {
1442#else
1443		if (doread) {
1444#endif
1445			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1446			    DMU_READ_PREFETCH);
1447		} else {
1448			dmu_tx_t *tx = dmu_tx_create(os);
1449			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1450			error = dmu_tx_assign(tx, TXG_WAIT);
1451			if (error) {
1452				dmu_tx_abort(tx);
1453			} else {
1454				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1455				zvol_log_write(zv, tx, off, size, sync);
1456				dmu_tx_commit(tx);
1457			}
1458		}
1459		if (error) {
1460			/* convert checksum errors into IO errors */
1461			if (error == ECKSUM)
1462				error = SET_ERROR(EIO);
1463			break;
1464		}
1465		off += size;
1466		addr += size;
1467		resid -= size;
1468	}
1469unlock:
1470	zfs_range_unlock(rl);
1471
1472	bp->bio_completed = bp->bio_length - resid;
1473	if (bp->bio_completed < bp->bio_length && off > volsize)
1474		error = EINVAL;
1475
1476	if (sync) {
1477sync:
1478		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1479	}
1480out:
1481	if (bp->bio_to)
1482		g_io_deliver(bp, error);
1483	else
1484		biofinish(bp, NULL, error);
1485}
1486
1487#ifdef sun
1488/*
1489 * Set the buffer count to the zvol maximum transfer.
1490 * Using our own routine instead of the default minphys()
1491 * means that for larger writes we write bigger buffers on X86
1492 * (128K instead of 56K) and flush the disk write cache less often
1493 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1494 * 56K on X86 and 128K on sparc).
1495 */
1496void
1497zvol_minphys(struct buf *bp)
1498{
1499	if (bp->b_bcount > zvol_maxphys)
1500		bp->b_bcount = zvol_maxphys;
1501}
1502
1503int
1504zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1505{
1506	minor_t minor = getminor(dev);
1507	zvol_state_t *zv;
1508	int error = 0;
1509	uint64_t size;
1510	uint64_t boff;
1511	uint64_t resid;
1512
1513	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1514	if (zv == NULL)
1515		return (SET_ERROR(ENXIO));
1516
1517	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1518		return (SET_ERROR(EINVAL));
1519
1520	boff = ldbtob(blkno);
1521	resid = ldbtob(nblocks);
1522
1523	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1524
1525	while (resid) {
1526		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1527		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1528		if (error)
1529			break;
1530		boff += size;
1531		addr += size;
1532		resid -= size;
1533	}
1534
1535	return (error);
1536}
1537
1538/*ARGSUSED*/
1539int
1540zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1541{
1542	minor_t minor = getminor(dev);
1543#else
1544int
1545zvol_read(struct cdev *dev, struct uio *uio, int ioflag)
1546{
1547#endif
1548	zvol_state_t *zv;
1549	uint64_t volsize;
1550	rl_t *rl;
1551	int error = 0;
1552
1553#ifdef sun
1554	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1555	if (zv == NULL)
1556		return (SET_ERROR(ENXIO));
1557#else
1558	zv = dev->si_drv2;
1559#endif
1560
1561	volsize = zv->zv_volsize;
1562	if (uio->uio_resid > 0 &&
1563	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1564		return (SET_ERROR(EIO));
1565
1566#ifdef illumos
1567	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1568		error = physio(zvol_strategy, NULL, dev, B_READ,
1569		    zvol_minphys, uio);
1570		return (error);
1571	}
1572#endif
1573
1574	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1575	    RL_READER);
1576	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1577		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1578
1579		/* don't read past the end */
1580		if (bytes > volsize - uio->uio_loffset)
1581			bytes = volsize - uio->uio_loffset;
1582
1583		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1584		if (error) {
1585			/* convert checksum errors into IO errors */
1586			if (error == ECKSUM)
1587				error = SET_ERROR(EIO);
1588			break;
1589		}
1590	}
1591	zfs_range_unlock(rl);
1592	return (error);
1593}
1594
1595#ifdef sun
1596/*ARGSUSED*/
1597int
1598zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1599{
1600	minor_t minor = getminor(dev);
1601#else
1602int
1603zvol_write(struct cdev *dev, struct uio *uio, int ioflag)
1604{
1605#endif
1606	zvol_state_t *zv;
1607	uint64_t volsize;
1608	rl_t *rl;
1609	int error = 0;
1610	boolean_t sync;
1611
1612#ifdef sun
1613	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1614	if (zv == NULL)
1615		return (SET_ERROR(ENXIO));
1616#else
1617	zv = dev->si_drv2;
1618#endif
1619
1620	volsize = zv->zv_volsize;
1621	if (uio->uio_resid > 0 &&
1622	    (uio->uio_loffset < 0 || uio->uio_loffset > volsize))
1623		return (SET_ERROR(EIO));
1624
1625#ifdef illumos
1626	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1627		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1628		    zvol_minphys, uio);
1629		return (error);
1630	}
1631#endif
1632
1633#ifdef sun
1634	sync = !(zv->zv_flags & ZVOL_WCE) ||
1635#else
1636	sync =
1637#endif
1638	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1639
1640	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1641	    RL_WRITER);
1642	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1643		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1644		uint64_t off = uio->uio_loffset;
1645		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1646
1647		if (bytes > volsize - off)	/* don't write past the end */
1648			bytes = volsize - off;
1649
1650		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1651		error = dmu_tx_assign(tx, TXG_WAIT);
1652		if (error) {
1653			dmu_tx_abort(tx);
1654			break;
1655		}
1656		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1657		if (error == 0)
1658			zvol_log_write(zv, tx, off, bytes, sync);
1659		dmu_tx_commit(tx);
1660
1661		if (error)
1662			break;
1663	}
1664	zfs_range_unlock(rl);
1665	if (sync)
1666		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1667	return (error);
1668}
1669
1670#ifdef sun
1671int
1672zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1673{
1674	struct uuid uuid = EFI_RESERVED;
1675	efi_gpe_t gpe = { 0 };
1676	uint32_t crc;
1677	dk_efi_t efi;
1678	int length;
1679	char *ptr;
1680
1681	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1682		return (SET_ERROR(EFAULT));
1683	ptr = (char *)(uintptr_t)efi.dki_data_64;
1684	length = efi.dki_length;
1685	/*
1686	 * Some clients may attempt to request a PMBR for the
1687	 * zvol.  Currently this interface will return EINVAL to
1688	 * such requests.  These requests could be supported by
1689	 * adding a check for lba == 0 and consing up an appropriate
1690	 * PMBR.
1691	 */
1692	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1693		return (SET_ERROR(EINVAL));
1694
1695	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1696	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1697	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1698
1699	if (efi.dki_lba == 1) {
1700		efi_gpt_t gpt = { 0 };
1701
1702		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1703		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1704		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1705		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1706		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1707		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1708		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1709		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1710		gpt.efi_gpt_SizeOfPartitionEntry =
1711		    LE_32(sizeof (efi_gpe_t));
1712		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1713		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1714		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1715		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1716		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1717		    flag))
1718			return (SET_ERROR(EFAULT));
1719		ptr += sizeof (gpt);
1720		length -= sizeof (gpt);
1721	}
1722	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1723	    length), flag))
1724		return (SET_ERROR(EFAULT));
1725	return (0);
1726}
1727
1728/*
1729 * BEGIN entry points to allow external callers access to the volume.
1730 */
1731/*
1732 * Return the volume parameters needed for access from an external caller.
1733 * These values are invariant as long as the volume is held open.
1734 */
1735int
1736zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1737    uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1738    void **rl_hdl, void **bonus_hdl)
1739{
1740	zvol_state_t *zv;
1741
1742	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1743	if (zv == NULL)
1744		return (SET_ERROR(ENXIO));
1745	if (zv->zv_flags & ZVOL_DUMPIFIED)
1746		return (SET_ERROR(ENXIO));
1747
1748	ASSERT(blksize && max_xfer_len && minor_hdl &&
1749	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1750
1751	*blksize = zv->zv_volblocksize;
1752	*max_xfer_len = (uint64_t)zvol_maxphys;
1753	*minor_hdl = zv;
1754	*objset_hdl = zv->zv_objset;
1755	*zil_hdl = zv->zv_zilog;
1756	*rl_hdl = &zv->zv_znode;
1757	*bonus_hdl = zv->zv_dbuf;
1758	return (0);
1759}
1760
1761/*
1762 * Return the current volume size to an external caller.
1763 * The size can change while the volume is open.
1764 */
1765uint64_t
1766zvol_get_volume_size(void *minor_hdl)
1767{
1768	zvol_state_t *zv = minor_hdl;
1769
1770	return (zv->zv_volsize);
1771}
1772
1773/*
1774 * Return the current WCE setting to an external caller.
1775 * The WCE setting can change while the volume is open.
1776 */
1777int
1778zvol_get_volume_wce(void *minor_hdl)
1779{
1780	zvol_state_t *zv = minor_hdl;
1781
1782	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1783}
1784
1785/*
1786 * Entry point for external callers to zvol_log_write
1787 */
1788void
1789zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1790    boolean_t sync)
1791{
1792	zvol_state_t *zv = minor_hdl;
1793
1794	zvol_log_write(zv, tx, off, resid, sync);
1795}
1796/*
1797 * END entry points to allow external callers access to the volume.
1798 */
1799#endif	/* sun */
1800
1801/*
1802 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1803 */
1804static void
1805zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1806    boolean_t sync)
1807{
1808	itx_t *itx;
1809	lr_truncate_t *lr;
1810	zilog_t *zilog = zv->zv_zilog;
1811
1812	if (zil_replaying(zilog, tx))
1813		return;
1814
1815	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1816	lr = (lr_truncate_t *)&itx->itx_lr;
1817	lr->lr_foid = ZVOL_OBJ;
1818	lr->lr_offset = off;
1819	lr->lr_length = len;
1820
1821	itx->itx_sync = sync;
1822	zil_itx_assign(zilog, itx, tx);
1823}
1824
1825#ifdef sun
1826/*
1827 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1828 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1829 */
1830/*ARGSUSED*/
1831int
1832zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1833{
1834	zvol_state_t *zv;
1835	struct dk_callback *dkc;
1836	int error = 0;
1837	rl_t *rl;
1838
1839	mutex_enter(&spa_namespace_lock);
1840
1841	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1842
1843	if (zv == NULL) {
1844		mutex_exit(&spa_namespace_lock);
1845		return (SET_ERROR(ENXIO));
1846	}
1847	ASSERT(zv->zv_total_opens > 0);
1848
1849	switch (cmd) {
1850
1851	case DKIOCINFO:
1852	{
1853		struct dk_cinfo dki;
1854
1855		bzero(&dki, sizeof (dki));
1856		(void) strcpy(dki.dki_cname, "zvol");
1857		(void) strcpy(dki.dki_dname, "zvol");
1858		dki.dki_ctype = DKC_UNKNOWN;
1859		dki.dki_unit = getminor(dev);
1860		dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1861		mutex_exit(&spa_namespace_lock);
1862		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1863			error = SET_ERROR(EFAULT);
1864		return (error);
1865	}
1866
1867	case DKIOCGMEDIAINFO:
1868	{
1869		struct dk_minfo dkm;
1870
1871		bzero(&dkm, sizeof (dkm));
1872		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1873		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1874		dkm.dki_media_type = DK_UNKNOWN;
1875		mutex_exit(&spa_namespace_lock);
1876		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1877			error = SET_ERROR(EFAULT);
1878		return (error);
1879	}
1880
1881	case DKIOCGMEDIAINFOEXT:
1882	{
1883		struct dk_minfo_ext dkmext;
1884
1885		bzero(&dkmext, sizeof (dkmext));
1886		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1887		dkmext.dki_pbsize = zv->zv_volblocksize;
1888		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1889		dkmext.dki_media_type = DK_UNKNOWN;
1890		mutex_exit(&spa_namespace_lock);
1891		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1892			error = SET_ERROR(EFAULT);
1893		return (error);
1894	}
1895
1896	case DKIOCGETEFI:
1897	{
1898		uint64_t vs = zv->zv_volsize;
1899		uint8_t bs = zv->zv_min_bs;
1900
1901		mutex_exit(&spa_namespace_lock);
1902		error = zvol_getefi((void *)arg, flag, vs, bs);
1903		return (error);
1904	}
1905
1906	case DKIOCFLUSHWRITECACHE:
1907		dkc = (struct dk_callback *)arg;
1908		mutex_exit(&spa_namespace_lock);
1909		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1910		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1911			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1912			error = 0;
1913		}
1914		return (error);
1915
1916	case DKIOCGETWCE:
1917	{
1918		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1919		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1920		    flag))
1921			error = SET_ERROR(EFAULT);
1922		break;
1923	}
1924	case DKIOCSETWCE:
1925	{
1926		int wce;
1927		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1928		    flag)) {
1929			error = SET_ERROR(EFAULT);
1930			break;
1931		}
1932		if (wce) {
1933			zv->zv_flags |= ZVOL_WCE;
1934			mutex_exit(&spa_namespace_lock);
1935		} else {
1936			zv->zv_flags &= ~ZVOL_WCE;
1937			mutex_exit(&spa_namespace_lock);
1938			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1939		}
1940		return (0);
1941	}
1942
1943	case DKIOCGGEOM:
1944	case DKIOCGVTOC:
1945		/*
1946		 * commands using these (like prtvtoc) expect ENOTSUP
1947		 * since we're emulating an EFI label
1948		 */
1949		error = SET_ERROR(ENOTSUP);
1950		break;
1951
1952	case DKIOCDUMPINIT:
1953		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1954		    RL_WRITER);
1955		error = zvol_dumpify(zv);
1956		zfs_range_unlock(rl);
1957		break;
1958
1959	case DKIOCDUMPFINI:
1960		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1961			break;
1962		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1963		    RL_WRITER);
1964		error = zvol_dump_fini(zv);
1965		zfs_range_unlock(rl);
1966		break;
1967
1968	case DKIOCFREE:
1969	{
1970		dkioc_free_t df;
1971		dmu_tx_t *tx;
1972
1973		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1974			error = SET_ERROR(EFAULT);
1975			break;
1976		}
1977
1978		/*
1979		 * Apply Postel's Law to length-checking.  If they overshoot,
1980		 * just blank out until the end, if there's a need to blank
1981		 * out anything.
1982		 */
1983		if (df.df_start >= zv->zv_volsize)
1984			break;	/* No need to do anything... */
1985		if (df.df_start + df.df_length > zv->zv_volsize)
1986			df.df_length = DMU_OBJECT_END;
1987
1988		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1989		    RL_WRITER);
1990		tx = dmu_tx_create(zv->zv_objset);
1991		error = dmu_tx_assign(tx, TXG_WAIT);
1992		if (error != 0) {
1993			dmu_tx_abort(tx);
1994		} else {
1995			zvol_log_truncate(zv, tx, df.df_start,
1996			    df.df_length, B_TRUE);
1997			dmu_tx_commit(tx);
1998			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1999			    df.df_start, df.df_length);
2000		}
2001
2002		zfs_range_unlock(rl);
2003
2004		if (error == 0) {
2005			/*
2006			 * If the write-cache is disabled or 'sync' property
2007			 * is set to 'always' then treat this as a synchronous
2008			 * operation (i.e. commit to zil).
2009			 */
2010			if (!(zv->zv_flags & ZVOL_WCE) ||
2011			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
2012				zil_commit(zv->zv_zilog, ZVOL_OBJ);
2013
2014			/*
2015			 * If the caller really wants synchronous writes, and
2016			 * can't wait for them, don't return until the write
2017			 * is done.
2018			 */
2019			if (df.df_flags & DF_WAIT_SYNC) {
2020				txg_wait_synced(
2021				    dmu_objset_pool(zv->zv_objset), 0);
2022			}
2023		}
2024		break;
2025	}
2026
2027	default:
2028		error = SET_ERROR(ENOTTY);
2029		break;
2030
2031	}
2032	mutex_exit(&spa_namespace_lock);
2033	return (error);
2034}
2035#endif	/* sun */
2036
2037int
2038zvol_busy(void)
2039{
2040	return (zvol_minors != 0);
2041}
2042
2043void
2044zvol_init(void)
2045{
2046	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
2047	    1) == 0);
2048	ZFS_LOG(1, "ZVOL Initialized.");
2049}
2050
2051void
2052zvol_fini(void)
2053{
2054	ddi_soft_state_fini(&zfsdev_state);
2055	ZFS_LOG(1, "ZVOL Deinitialized.");
2056}
2057
2058#ifdef sun
2059/*ARGSUSED*/
2060static int
2061zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
2062{
2063	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2064
2065	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2066		return (1);
2067	return (0);
2068}
2069
2070/*ARGSUSED*/
2071static void
2072zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
2073{
2074	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
2075
2076	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
2077}
2078
2079static int
2080zvol_dump_init(zvol_state_t *zv, boolean_t resize)
2081{
2082	dmu_tx_t *tx;
2083	int error;
2084	objset_t *os = zv->zv_objset;
2085	spa_t *spa = dmu_objset_spa(os);
2086	vdev_t *vd = spa->spa_root_vdev;
2087	nvlist_t *nv = NULL;
2088	uint64_t version = spa_version(spa);
2089	enum zio_checksum checksum;
2090
2091	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2092	ASSERT(vd->vdev_ops == &vdev_root_ops);
2093
2094	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
2095	    DMU_OBJECT_END);
2096	/* wait for dmu_free_long_range to actually free the blocks */
2097	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2098
2099	/*
2100	 * If the pool on which the dump device is being initialized has more
2101	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
2102	 * enabled.  If so, bump that feature's counter to indicate that the
2103	 * feature is active. We also check the vdev type to handle the
2104	 * following case:
2105	 *   # zpool create test raidz disk1 disk2 disk3
2106	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
2107	 *   the raidz vdev itself has 3 children.
2108	 */
2109	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
2110		if (!spa_feature_is_enabled(spa,
2111		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
2112			return (SET_ERROR(ENOTSUP));
2113		(void) dsl_sync_task(spa_name(spa),
2114		    zfs_mvdev_dump_feature_check,
2115		    zfs_mvdev_dump_activate_feature_sync, NULL, 2);
2116	}
2117
2118	tx = dmu_tx_create(os);
2119	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2120	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2121	error = dmu_tx_assign(tx, TXG_WAIT);
2122	if (error) {
2123		dmu_tx_abort(tx);
2124		return (error);
2125	}
2126
2127	/*
2128	 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2129	 * function.  Otherwise, use the old default -- OFF.
2130	 */
2131	checksum = spa_feature_is_active(spa,
2132	    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2133	    ZIO_CHECKSUM_OFF;
2134
2135	/*
2136	 * If we are resizing the dump device then we only need to
2137	 * update the refreservation to match the newly updated
2138	 * zvolsize. Otherwise, we save off the original state of the
2139	 * zvol so that we can restore them if the zvol is ever undumpified.
2140	 */
2141	if (resize) {
2142		error = zap_update(os, ZVOL_ZAP_OBJ,
2143		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2144		    &zv->zv_volsize, tx);
2145	} else {
2146		uint64_t checksum, compress, refresrv, vbs, dedup;
2147
2148		error = dsl_prop_get_integer(zv->zv_name,
2149		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
2150		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2151		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
2152		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2153		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
2154		error = error ? error : dsl_prop_get_integer(zv->zv_name,
2155		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
2156		if (version >= SPA_VERSION_DEDUP) {
2157			error = error ? error :
2158			    dsl_prop_get_integer(zv->zv_name,
2159			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2160		}
2161
2162		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2163		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2164		    &compress, tx);
2165		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2166		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
2167		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2168		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2169		    &refresrv, tx);
2170		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2171		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2172		    &vbs, tx);
2173		error = error ? error : dmu_object_set_blocksize(
2174		    os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
2175		if (version >= SPA_VERSION_DEDUP) {
2176			error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
2177			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2178			    &dedup, tx);
2179		}
2180		if (error == 0)
2181			zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
2182	}
2183	dmu_tx_commit(tx);
2184
2185	/*
2186	 * We only need update the zvol's property if we are initializing
2187	 * the dump area for the first time.
2188	 */
2189	if (!resize) {
2190		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2191		VERIFY(nvlist_add_uint64(nv,
2192		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2193		VERIFY(nvlist_add_uint64(nv,
2194		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2195		    ZIO_COMPRESS_OFF) == 0);
2196		VERIFY(nvlist_add_uint64(nv,
2197		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2198		    checksum) == 0);
2199		if (version >= SPA_VERSION_DEDUP) {
2200			VERIFY(nvlist_add_uint64(nv,
2201			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2202			    ZIO_CHECKSUM_OFF) == 0);
2203		}
2204
2205		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2206		    nv, NULL);
2207		nvlist_free(nv);
2208
2209		if (error)
2210			return (error);
2211	}
2212
2213	/* Allocate the space for the dump */
2214	error = zvol_prealloc(zv);
2215	return (error);
2216}
2217
2218static int
2219zvol_dumpify(zvol_state_t *zv)
2220{
2221	int error = 0;
2222	uint64_t dumpsize = 0;
2223	dmu_tx_t *tx;
2224	objset_t *os = zv->zv_objset;
2225
2226	if (zv->zv_flags & ZVOL_RDONLY)
2227		return (SET_ERROR(EROFS));
2228
2229	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2230	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2231		boolean_t resize = (dumpsize > 0);
2232
2233		if ((error = zvol_dump_init(zv, resize)) != 0) {
2234			(void) zvol_dump_fini(zv);
2235			return (error);
2236		}
2237	}
2238
2239	/*
2240	 * Build up our lba mapping.
2241	 */
2242	error = zvol_get_lbas(zv);
2243	if (error) {
2244		(void) zvol_dump_fini(zv);
2245		return (error);
2246	}
2247
2248	tx = dmu_tx_create(os);
2249	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2250	error = dmu_tx_assign(tx, TXG_WAIT);
2251	if (error) {
2252		dmu_tx_abort(tx);
2253		(void) zvol_dump_fini(zv);
2254		return (error);
2255	}
2256
2257	zv->zv_flags |= ZVOL_DUMPIFIED;
2258	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2259	    &zv->zv_volsize, tx);
2260	dmu_tx_commit(tx);
2261
2262	if (error) {
2263		(void) zvol_dump_fini(zv);
2264		return (error);
2265	}
2266
2267	txg_wait_synced(dmu_objset_pool(os), 0);
2268	return (0);
2269}
2270
2271static int
2272zvol_dump_fini(zvol_state_t *zv)
2273{
2274	dmu_tx_t *tx;
2275	objset_t *os = zv->zv_objset;
2276	nvlist_t *nv;
2277	int error = 0;
2278	uint64_t checksum, compress, refresrv, vbs, dedup;
2279	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2280
2281	/*
2282	 * Attempt to restore the zvol back to its pre-dumpified state.
2283	 * This is a best-effort attempt as it's possible that not all
2284	 * of these properties were initialized during the dumpify process
2285	 * (i.e. error during zvol_dump_init).
2286	 */
2287
2288	tx = dmu_tx_create(os);
2289	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2290	error = dmu_tx_assign(tx, TXG_WAIT);
2291	if (error) {
2292		dmu_tx_abort(tx);
2293		return (error);
2294	}
2295	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2296	dmu_tx_commit(tx);
2297
2298	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2299	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2300	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2301	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2302	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2303	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2304	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2305	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2306
2307	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2308	(void) nvlist_add_uint64(nv,
2309	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2310	(void) nvlist_add_uint64(nv,
2311	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2312	(void) nvlist_add_uint64(nv,
2313	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2314	if (version >= SPA_VERSION_DEDUP &&
2315	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2316	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2317		(void) nvlist_add_uint64(nv,
2318		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2319	}
2320	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2321	    nv, NULL);
2322	nvlist_free(nv);
2323
2324	zvol_free_extents(zv);
2325	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2326	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2327	/* wait for dmu_free_long_range to actually free the blocks */
2328	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2329	tx = dmu_tx_create(os);
2330	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2331	error = dmu_tx_assign(tx, TXG_WAIT);
2332	if (error) {
2333		dmu_tx_abort(tx);
2334		return (error);
2335	}
2336	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2337		zv->zv_volblocksize = vbs;
2338	dmu_tx_commit(tx);
2339
2340	return (0);
2341}
2342#endif	/* sun */
2343
2344static void
2345zvol_geom_run(zvol_state_t *zv)
2346{
2347	struct g_provider *pp;
2348
2349	pp = zv->zv_provider;
2350	g_error_provider(pp, 0);
2351
2352	kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2353	    "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2354}
2355
2356static void
2357zvol_geom_destroy(zvol_state_t *zv)
2358{
2359	struct g_provider *pp;
2360
2361	g_topology_assert();
2362
2363	mtx_lock(&zv->zv_queue_mtx);
2364	zv->zv_state = 1;
2365	wakeup_one(&zv->zv_queue);
2366	while (zv->zv_state != 2)
2367		msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2368	mtx_destroy(&zv->zv_queue_mtx);
2369
2370	pp = zv->zv_provider;
2371	zv->zv_provider = NULL;
2372	pp->private = NULL;
2373	g_wither_geom(pp->geom, ENXIO);
2374}
2375
2376static int
2377zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2378{
2379	int count, error, flags;
2380
2381	g_topology_assert();
2382
2383	/*
2384	 * To make it easier we expect either open or close, but not both
2385	 * at the same time.
2386	 */
2387	KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2388	    (acr <= 0 && acw <= 0 && ace <= 0),
2389	    ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2390	    pp->name, acr, acw, ace));
2391
2392	if (pp->private == NULL) {
2393		if (acr <= 0 && acw <= 0 && ace <= 0)
2394			return (0);
2395		return (pp->error);
2396	}
2397
2398	/*
2399	 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2400	 * because GEOM already handles that and handles it a bit differently.
2401	 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2402	 * only one exclusive consumer, no matter if it is reader or writer.
2403	 * I like better the way GEOM works so I'll leave it for GEOM to
2404	 * decide what to do.
2405	 */
2406
2407	count = acr + acw + ace;
2408	if (count == 0)
2409		return (0);
2410
2411	flags = 0;
2412	if (acr != 0 || ace != 0)
2413		flags |= FREAD;
2414	if (acw != 0)
2415		flags |= FWRITE;
2416
2417	g_topology_unlock();
2418	if (count > 0)
2419		error = zvol_open(pp, flags, count);
2420	else
2421		error = zvol_close(pp, flags, -count);
2422	g_topology_lock();
2423	return (error);
2424}
2425
2426static void
2427zvol_geom_start(struct bio *bp)
2428{
2429	zvol_state_t *zv;
2430	boolean_t first;
2431
2432	zv = bp->bio_to->private;
2433	ASSERT(zv != NULL);
2434	switch (bp->bio_cmd) {
2435	case BIO_FLUSH:
2436		if (!THREAD_CAN_SLEEP())
2437			goto enqueue;
2438		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2439		g_io_deliver(bp, 0);
2440		break;
2441	case BIO_READ:
2442	case BIO_WRITE:
2443	case BIO_DELETE:
2444		if (!THREAD_CAN_SLEEP())
2445			goto enqueue;
2446		zvol_strategy(bp);
2447		break;
2448	case BIO_GETATTR:
2449		if (g_handleattr_int(bp, "GEOM::candelete", 1))
2450			return;
2451		/* FALLTHROUGH */
2452	default:
2453		g_io_deliver(bp, EOPNOTSUPP);
2454		break;
2455	}
2456	return;
2457
2458enqueue:
2459	mtx_lock(&zv->zv_queue_mtx);
2460	first = (bioq_first(&zv->zv_queue) == NULL);
2461	bioq_insert_tail(&zv->zv_queue, bp);
2462	mtx_unlock(&zv->zv_queue_mtx);
2463	if (first)
2464		wakeup_one(&zv->zv_queue);
2465}
2466
2467static void
2468zvol_geom_worker(void *arg)
2469{
2470	zvol_state_t *zv;
2471	struct bio *bp;
2472
2473	thread_lock(curthread);
2474	sched_prio(curthread, PRIBIO);
2475	thread_unlock(curthread);
2476
2477	zv = arg;
2478	for (;;) {
2479		mtx_lock(&zv->zv_queue_mtx);
2480		bp = bioq_takefirst(&zv->zv_queue);
2481		if (bp == NULL) {
2482			if (zv->zv_state == 1) {
2483				zv->zv_state = 2;
2484				wakeup(&zv->zv_state);
2485				mtx_unlock(&zv->zv_queue_mtx);
2486				kthread_exit();
2487			}
2488			msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2489			    "zvol:io", 0);
2490			continue;
2491		}
2492		mtx_unlock(&zv->zv_queue_mtx);
2493		switch (bp->bio_cmd) {
2494		case BIO_FLUSH:
2495			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2496			g_io_deliver(bp, 0);
2497			break;
2498		case BIO_READ:
2499		case BIO_WRITE:
2500			zvol_strategy(bp);
2501			break;
2502		}
2503	}
2504}
2505
2506extern boolean_t dataset_name_hidden(const char *name);
2507
2508static int
2509zvol_create_snapshots(objset_t *os, const char *name)
2510{
2511	uint64_t cookie, obj;
2512	char *sname;
2513	int error, len;
2514
2515	cookie = obj = 0;
2516	sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2517
2518#if 0
2519	(void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2520	    DS_FIND_SNAPSHOTS);
2521#endif
2522
2523	for (;;) {
2524		len = snprintf(sname, MAXPATHLEN, "%s@", name);
2525		if (len >= MAXPATHLEN) {
2526			dmu_objset_rele(os, FTAG);
2527			error = ENAMETOOLONG;
2528			break;
2529		}
2530
2531		dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2532		error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2533		    sname + len, &obj, &cookie, NULL);
2534		dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2535		if (error != 0) {
2536			if (error == ENOENT)
2537				error = 0;
2538			break;
2539		}
2540
2541		if ((error = zvol_create_minor(sname)) != 0) {
2542			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2543			    sname, error);
2544			break;
2545		}
2546	}
2547
2548	kmem_free(sname, MAXPATHLEN);
2549	return (error);
2550}
2551
2552int
2553zvol_create_minors(const char *name)
2554{
2555	uint64_t cookie;
2556	objset_t *os;
2557	char *osname, *p;
2558	int error, len;
2559
2560	if (dataset_name_hidden(name))
2561		return (0);
2562
2563	if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2564		printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2565		    name, error);
2566		return (error);
2567	}
2568	if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2569		dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2570		dsl_pool_rele(dmu_objset_pool(os), FTAG);
2571		if ((error = zvol_create_minor(name)) == 0)
2572			error = zvol_create_snapshots(os, name);
2573		else {
2574			printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2575			    name, error);
2576		}
2577		dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2578		dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2579		return (error);
2580	}
2581	if (dmu_objset_type(os) != DMU_OST_ZFS) {
2582		dmu_objset_rele(os, FTAG);
2583		return (0);
2584	}
2585
2586	osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2587	if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2588		dmu_objset_rele(os, FTAG);
2589		kmem_free(osname, MAXPATHLEN);
2590		return (ENOENT);
2591	}
2592	p = osname + strlen(osname);
2593	len = MAXPATHLEN - (p - osname);
2594
2595#if 0
2596	/* Prefetch the datasets. */
2597	cookie = 0;
2598	while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2599		if (!dataset_name_hidden(osname))
2600			(void) dmu_objset_prefetch(osname, NULL);
2601	}
2602#endif
2603
2604	cookie = 0;
2605	while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2606	    &cookie) == 0) {
2607		dmu_objset_rele(os, FTAG);
2608		(void)zvol_create_minors(osname);
2609		if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2610			printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2611			    name, error);
2612			return (error);
2613		}
2614	}
2615
2616	dmu_objset_rele(os, FTAG);
2617	kmem_free(osname, MAXPATHLEN);
2618	return (0);
2619}
2620
2621static void
2622zvol_rename_minor(zvol_state_t *zv, const char *newname)
2623{
2624	struct g_geom *gp;
2625	struct g_provider *pp;
2626	struct cdev *dev;
2627
2628	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2629
2630	if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
2631		g_topology_lock();
2632		pp = zv->zv_provider;
2633		ASSERT(pp != NULL);
2634		gp = pp->geom;
2635		ASSERT(gp != NULL);
2636
2637		zv->zv_provider = NULL;
2638		g_wither_provider(pp, ENXIO);
2639
2640		pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2641		pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2642		pp->sectorsize = DEV_BSIZE;
2643		pp->mediasize = zv->zv_volsize;
2644		pp->private = zv;
2645		zv->zv_provider = pp;
2646		g_error_provider(pp, 0);
2647		g_topology_unlock();
2648	} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
2649		dev = zv->zv_dev;
2650		ASSERT(dev != NULL);
2651		zv->zv_dev = NULL;
2652		destroy_dev(dev);
2653
2654		if (make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK,
2655		    &dev, &zvol_cdevsw, NULL, UID_ROOT, GID_OPERATOR,
2656		    0640, "%s/%s", ZVOL_DRIVER, newname) == 0) {
2657			zv->zv_dev = dev;
2658			dev->si_iosize_max = MAXPHYS;
2659			dev->si_drv2 = zv;
2660		}
2661	}
2662	strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2663}
2664
2665void
2666zvol_rename_minors(const char *oldname, const char *newname)
2667{
2668	char name[MAXPATHLEN];
2669	struct g_provider *pp;
2670	struct g_geom *gp;
2671	size_t oldnamelen, newnamelen;
2672	zvol_state_t *zv;
2673	char *namebuf;
2674
2675	oldnamelen = strlen(oldname);
2676	newnamelen = strlen(newname);
2677
2678	DROP_GIANT();
2679	mutex_enter(&spa_namespace_lock);
2680
2681	LIST_FOREACH(zv, &all_zvols, zv_links) {
2682		if (strcmp(zv->zv_name, oldname) == 0) {
2683			zvol_rename_minor(zv, newname);
2684		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2685		    (zv->zv_name[oldnamelen] == '/' ||
2686		     zv->zv_name[oldnamelen] == '@')) {
2687			snprintf(name, sizeof(name), "%s%c%s", newname,
2688			    zv->zv_name[oldnamelen],
2689			    zv->zv_name + oldnamelen + 1);
2690			zvol_rename_minor(zv, name);
2691		}
2692	}
2693
2694	mutex_exit(&spa_namespace_lock);
2695	PICKUP_GIANT();
2696}
2697
2698static int
2699zvol_d_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2700{
2701	zvol_state_t *zv;
2702	int err = 0;
2703
2704	mutex_enter(&spa_namespace_lock);
2705	zv = dev->si_drv2;
2706	if (zv == NULL) {
2707		mutex_exit(&spa_namespace_lock);
2708		return(ENXIO);		/* zvol_create_minor() not done yet */
2709	}
2710
2711	if (zv->zv_total_opens == 0)
2712		err = zvol_first_open(zv);
2713	if (err) {
2714		mutex_exit(&spa_namespace_lock);
2715		return (err);
2716	}
2717	if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
2718		err = SET_ERROR(EROFS);
2719		goto out;
2720	}
2721	if (zv->zv_flags & ZVOL_EXCL) {
2722		err = SET_ERROR(EBUSY);
2723		goto out;
2724	}
2725#ifdef FEXCL
2726	if (flags & FEXCL) {
2727		if (zv->zv_total_opens != 0) {
2728			err = SET_ERROR(EBUSY);
2729			goto out;
2730		}
2731		zv->zv_flags |= ZVOL_EXCL;
2732	}
2733#endif
2734
2735	zv->zv_total_opens++;
2736	mutex_exit(&spa_namespace_lock);
2737	return (err);
2738out:
2739	if (zv->zv_total_opens == 0)
2740		zvol_last_close(zv);
2741	mutex_exit(&spa_namespace_lock);
2742	return (err);
2743}
2744
2745static int
2746zvol_d_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2747{
2748	zvol_state_t *zv;
2749	int err = 0;
2750
2751	mutex_enter(&spa_namespace_lock);
2752	zv = dev->si_drv2;
2753	if (zv == NULL) {
2754		mutex_exit(&spa_namespace_lock);
2755		return(ENXIO);
2756	}
2757
2758	if (zv->zv_flags & ZVOL_EXCL) {
2759		ASSERT(zv->zv_total_opens == 1);
2760		zv->zv_flags &= ~ZVOL_EXCL;
2761	}
2762
2763	/*
2764	 * If the open count is zero, this is a spurious close.
2765	 * That indicates a bug in the kernel / DDI framework.
2766	 */
2767	ASSERT(zv->zv_total_opens != 0);
2768
2769	/*
2770	 * You may get multiple opens, but only one close.
2771	 */
2772	zv->zv_total_opens--;
2773
2774	if (zv->zv_total_opens == 0)
2775		zvol_last_close(zv);
2776
2777	mutex_exit(&spa_namespace_lock);
2778	return (0);
2779}
2780
2781static int
2782zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
2783{
2784	zvol_state_t *zv;
2785	rl_t *rl;
2786	off_t offset, length, chunk;
2787	int i, error;
2788	u_int u;
2789
2790	zv = dev->si_drv2;
2791
2792	error = 0;
2793	KASSERT(zv->zv_total_opens > 0,
2794	    ("Device with zero access count in zvol_d_ioctl"));
2795
2796	i = IOCPARM_LEN(cmd);
2797	switch (cmd) {
2798	case DIOCGSECTORSIZE:
2799		*(u_int *)data = DEV_BSIZE;
2800		break;
2801	case DIOCGMEDIASIZE:
2802		*(off_t *)data = zv->zv_volsize;
2803		break;
2804	case DIOCGFLUSH:
2805		zil_commit(zv->zv_zilog, ZVOL_OBJ);
2806		break;
2807	case DIOCGDELETE:
2808		offset = ((off_t *)data)[0];
2809		length = ((off_t *)data)[1];
2810		if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
2811		    offset < 0 || offset >= zv->zv_volsize ||
2812		    length <= 0) {
2813			printf("%s: offset=%jd length=%jd\n", __func__, offset,
2814			    length);
2815			error = EINVAL;
2816			break;
2817		}
2818
2819		rl = zfs_range_lock(&zv->zv_znode, offset, length, RL_WRITER);
2820		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
2821		error = dmu_tx_assign(tx, TXG_WAIT);
2822		if (error != 0) {
2823			dmu_tx_abort(tx);
2824		} else {
2825			zvol_log_truncate(zv, tx, offset, length, B_TRUE);
2826			dmu_tx_commit(tx);
2827			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
2828			    offset, length);
2829		}
2830		zfs_range_unlock(rl);
2831		if (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
2832			zil_commit(zv->zv_zilog, ZVOL_OBJ);
2833		break;
2834	case DIOCGSTRIPESIZE:
2835		*(off_t *)data = zv->zv_volblocksize;
2836		break;
2837	case DIOCGSTRIPEOFFSET:
2838		*(off_t *)data = 0;
2839		break;
2840	default:
2841		error = ENOIOCTL;
2842	}
2843
2844	return (error);
2845}
2846