zfs_ctldir.c revision 197515
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * ZFS control directory (a.k.a. ".zfs")
30 *
31 * This directory provides a common location for all ZFS meta-objects.
32 * Currently, this is only the 'snapshot' directory, but this may expand in the
33 * future.  The elements are built using the GFS primitives, as the hierarchy
34 * does not actually exist on disk.
35 *
36 * For 'snapshot', we don't want to have all snapshots always mounted, because
37 * this would take up a huge amount of space in /etc/mnttab.  We have three
38 * types of objects:
39 *
40 * 	ctldir ------> snapshotdir -------> snapshot
41 *                                             |
42 *                                             |
43 *                                             V
44 *                                         mounted fs
45 *
46 * The 'snapshot' node contains just enough information to lookup '..' and act
47 * as a mountpoint for the snapshot.  Whenever we lookup a specific snapshot, we
48 * perform an automount of the underlying filesystem and return the
49 * corresponding vnode.
50 *
51 * All mounts are handled automatically by the kernel, but unmounts are
52 * (currently) handled from user land.  The main reason is that there is no
53 * reliable way to auto-unmount the filesystem when it's "no longer in use".
54 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
55 * unmounts any snapshots within the snapshot directory.
56 *
57 * The '.zfs', '.zfs/snapshot', and all directories created under
58 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
59 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
60 *
61 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
62 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
63 * However, vnodes within these mounted on file systems have their v_vfsp
64 * fields set to the head filesystem to make NFS happy (see
65 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
66 * so that it cannot be freed until all snapshots have been unmounted.
67 */
68
69#include <sys/zfs_context.h>
70#include <sys/zfs_ctldir.h>
71#include <sys/zfs_ioctl.h>
72#include <sys/zfs_vfsops.h>
73#include <sys/namei.h>
74#include <sys/gfs.h>
75#include <sys/stat.h>
76#include <sys/dmu.h>
77#include <sys/dsl_deleg.h>
78#include <sys/mount.h>
79#include <sys/sunddi.h>
80
81#include "zfs_namecheck.h"
82
83typedef struct zfsctl_node {
84	gfs_dir_t	zc_gfs_private;
85	uint64_t	zc_id;
86	timestruc_t	zc_cmtime;	/* ctime and mtime, always the same */
87} zfsctl_node_t;
88
89typedef struct zfsctl_snapdir {
90	zfsctl_node_t	sd_node;
91	kmutex_t	sd_lock;
92	avl_tree_t	sd_snaps;
93} zfsctl_snapdir_t;
94
95typedef struct {
96	char		*se_name;
97	vnode_t		*se_root;
98	avl_node_t	se_node;
99} zfs_snapentry_t;
100
101static int
102snapentry_compare(const void *a, const void *b)
103{
104	const zfs_snapentry_t *sa = a;
105	const zfs_snapentry_t *sb = b;
106	int ret = strcmp(sa->se_name, sb->se_name);
107
108	if (ret < 0)
109		return (-1);
110	else if (ret > 0)
111		return (1);
112	else
113		return (0);
114}
115
116static struct vop_vector zfsctl_ops_root;
117static struct vop_vector zfsctl_ops_snapdir;
118static struct vop_vector zfsctl_ops_snapshot;
119
120static vnode_t *zfsctl_mknode_snapdir(vnode_t *);
121static vnode_t *zfsctl_snapshot_mknode(vnode_t *, uint64_t objset);
122static int zfsctl_unmount_snap(zfs_snapentry_t *, int, cred_t *);
123
124/*
125 * Root directory elements.  We have only a single static entry, 'snapshot'.
126 */
127static gfs_dirent_t zfsctl_root_entries[] = {
128	{ "snapshot", zfsctl_mknode_snapdir, GFS_CACHE_VNODE },
129	{ NULL }
130};
131
132/* include . and .. in the calculation */
133#define	NROOT_ENTRIES	((sizeof (zfsctl_root_entries) / \
134    sizeof (gfs_dirent_t)) + 1)
135
136
137/*
138 * Initialize the various GFS pieces we'll need to create and manipulate .zfs
139 * directories.  This is called from the ZFS init routine, and initializes the
140 * vnode ops vectors that we'll be using.
141 */
142void
143zfsctl_init(void)
144{
145}
146
147void
148zfsctl_fini(void)
149{
150}
151
152/*
153 * Return the inode number associated with the 'snapshot' directory.
154 */
155/* ARGSUSED */
156static ino64_t
157zfsctl_root_inode_cb(vnode_t *vp, int index)
158{
159	ASSERT(index == 0);
160	return (ZFSCTL_INO_SNAPDIR);
161}
162
163/*
164 * Create the '.zfs' directory.  This directory is cached as part of the VFS
165 * structure.  This results in a hold on the vfs_t.  The code in zfs_umount()
166 * therefore checks against a vfs_count of 2 instead of 1.  This reference
167 * is removed when the ctldir is destroyed in the unmount.
168 */
169void
170zfsctl_create(zfsvfs_t *zfsvfs)
171{
172	vnode_t *vp, *rvp;
173	zfsctl_node_t *zcp;
174
175	ASSERT(zfsvfs->z_ctldir == NULL);
176
177	vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs,
178	    &zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries,
179	    zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL);
180	zcp = vp->v_data;
181	zcp->zc_id = ZFSCTL_INO_ROOT;
182
183	VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0);
184	ZFS_TIME_DECODE(&zcp->zc_cmtime, VTOZ(rvp)->z_phys->zp_crtime);
185	VN_URELE(rvp);
186
187	/*
188	 * We're only faking the fact that we have a root of a filesystem for
189	 * the sake of the GFS interfaces.  Undo the flag manipulation it did
190	 * for us.
191	 */
192	vp->v_vflag &= ~VV_ROOT;
193
194	zfsvfs->z_ctldir = vp;
195
196	VOP_UNLOCK(vp, 0);
197}
198
199/*
200 * Destroy the '.zfs' directory.  Only called when the filesystem is unmounted.
201 * There might still be more references if we were force unmounted, but only
202 * new zfs_inactive() calls can occur and they don't reference .zfs
203 */
204void
205zfsctl_destroy(zfsvfs_t *zfsvfs)
206{
207	VN_RELE(zfsvfs->z_ctldir);
208	zfsvfs->z_ctldir = NULL;
209}
210
211/*
212 * Given a root znode, retrieve the associated .zfs directory.
213 * Add a hold to the vnode and return it.
214 */
215vnode_t *
216zfsctl_root(znode_t *zp)
217{
218	ASSERT(zfs_has_ctldir(zp));
219	VN_HOLD(zp->z_zfsvfs->z_ctldir);
220	return (zp->z_zfsvfs->z_ctldir);
221}
222
223/*
224 * Common open routine.  Disallow any write access.
225 */
226/* ARGSUSED */
227static int
228zfsctl_common_open(struct vop_open_args *ap)
229{
230	int flags = ap->a_mode;
231
232	if (flags & FWRITE)
233		return (EACCES);
234
235	return (0);
236}
237
238/*
239 * Common close routine.  Nothing to do here.
240 */
241/* ARGSUSED */
242static int
243zfsctl_common_close(struct vop_close_args *ap)
244{
245	return (0);
246}
247
248/*
249 * Common access routine.  Disallow writes.
250 */
251/* ARGSUSED */
252static int
253zfsctl_common_access(ap)
254	struct vop_access_args /* {
255		struct vnode *a_vp;
256		int  a_accmode;
257		struct ucred *a_cred;
258		struct thread *a_td;
259	} */ *ap;
260{
261	int mode = ap->a_accmode;
262
263	if (mode & VWRITE)
264		return (EACCES);
265
266	return (0);
267}
268
269/*
270 * Common getattr function.  Fill in basic information.
271 */
272static void
273zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
274{
275	zfsctl_node_t	*zcp = vp->v_data;
276	timestruc_t	now;
277
278	vap->va_uid = 0;
279	vap->va_gid = 0;
280	vap->va_rdev = 0;
281	/*
282	 * We are a purly virtual object, so we have no
283	 * blocksize or allocated blocks.
284	 */
285	vap->va_blksize = 0;
286	vap->va_nblocks = 0;
287	vap->va_seq = 0;
288	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
289	vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
290	    S_IROTH | S_IXOTH;
291	vap->va_type = VDIR;
292	/*
293	 * We live in the now (for atime).
294	 */
295	gethrestime(&now);
296	vap->va_atime = now;
297	vap->va_mtime = vap->va_ctime = vap->va_birthtime = zcp->zc_cmtime;
298	/* FreeBSD: Reset chflags(2) flags. */
299	vap->va_flags = 0;
300}
301
302/*ARGSUSED*/
303static int
304zfsctl_common_fid(ap)
305	struct vop_fid_args /* {
306		struct vnode *a_vp;
307		struct fid *a_fid;
308	} */ *ap;
309{
310	vnode_t		*vp = ap->a_vp;
311	fid_t		*fidp = (void *)ap->a_fid;
312	zfsvfs_t	*zfsvfs = vp->v_vfsp->vfs_data;
313	zfsctl_node_t	*zcp = vp->v_data;
314	uint64_t	object = zcp->zc_id;
315	zfid_short_t	*zfid;
316	int		i;
317
318	ZFS_ENTER(zfsvfs);
319
320	fidp->fid_len = SHORT_FID_LEN;
321
322	zfid = (zfid_short_t *)fidp;
323
324	zfid->zf_len = SHORT_FID_LEN;
325
326	for (i = 0; i < sizeof (zfid->zf_object); i++)
327		zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
328
329	/* .zfs znodes always have a generation number of 0 */
330	for (i = 0; i < sizeof (zfid->zf_gen); i++)
331		zfid->zf_gen[i] = 0;
332
333	ZFS_EXIT(zfsvfs);
334	return (0);
335}
336
337static int
338zfsctl_common_reclaim(ap)
339	struct vop_reclaim_args /* {
340		struct vnode *a_vp;
341		struct thread *a_td;
342	} */ *ap;
343{
344	vnode_t *vp = ap->a_vp;
345
346	/*
347	 * Destroy the vm object and flush associated pages.
348	 */
349	vnode_destroy_vobject(vp);
350	VI_LOCK(vp);
351	vp->v_data = NULL;
352	VI_UNLOCK(vp);
353	return (0);
354}
355
356/*
357 * .zfs inode namespace
358 *
359 * We need to generate unique inode numbers for all files and directories
360 * within the .zfs pseudo-filesystem.  We use the following scheme:
361 *
362 * 	ENTRY			ZFSCTL_INODE
363 * 	.zfs			1
364 * 	.zfs/snapshot		2
365 * 	.zfs/snapshot/<snap>	objectid(snap)
366 */
367
368#define	ZFSCTL_INO_SNAP(id)	(id)
369
370/*
371 * Get root directory attributes.
372 */
373/* ARGSUSED */
374static int
375zfsctl_root_getattr(ap)
376	struct vop_getattr_args /* {
377		struct vnode *a_vp;
378		struct vattr *a_vap;
379		struct ucred *a_cred;
380		struct thread *a_td;
381	} */ *ap;
382{
383	struct vnode *vp = ap->a_vp;
384	struct vattr *vap = ap->a_vap;
385	zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
386
387	ZFS_ENTER(zfsvfs);
388	vap->va_nodeid = ZFSCTL_INO_ROOT;
389	vap->va_nlink = vap->va_size = NROOT_ENTRIES;
390
391	zfsctl_common_getattr(vp, vap);
392	ZFS_EXIT(zfsvfs);
393
394	return (0);
395}
396
397/*
398 * Special case the handling of "..".
399 */
400/* ARGSUSED */
401int
402zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
403    int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
404    int *direntflags, pathname_t *realpnp)
405{
406	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
407	int err;
408
409	/*
410	 * No extended attributes allowed under .zfs
411	 */
412	if (flags & LOOKUP_XATTR)
413		return (EINVAL);
414
415	ZFS_ENTER(zfsvfs);
416
417	if (strcmp(nm, "..") == 0) {
418		err = VFS_ROOT(dvp->v_vfsp, LK_EXCLUSIVE, vpp);
419		if (err == 0)
420			VOP_UNLOCK(*vpp, 0);
421	} else {
422		err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir,
423		    cr, ct, direntflags, realpnp);
424	}
425
426	ZFS_EXIT(zfsvfs);
427
428	return (err);
429}
430
431/*
432 * Special case the handling of "..".
433 */
434/* ARGSUSED */
435int
436zfsctl_freebsd_root_lookup(ap)
437	struct vop_lookup_args /* {
438		struct vnode *a_dvp;
439		struct vnode **a_vpp;
440		struct componentname *a_cnp;
441	} */ *ap;
442{
443	vnode_t *dvp = ap->a_dvp;
444	vnode_t **vpp = ap->a_vpp;
445	cred_t *cr = ap->a_cnp->cn_cred;
446	int flags = ap->a_cnp->cn_flags;
447	int nameiop = ap->a_cnp->cn_nameiop;
448	char nm[NAME_MAX + 1];
449	int err;
450
451	if ((flags & ISLASTCN) && (nameiop == RENAME || nameiop == CREATE))
452		return (EOPNOTSUPP);
453
454	ASSERT(ap->a_cnp->cn_namelen < sizeof(nm));
455	strlcpy(nm, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
456
457	err = zfsctl_root_lookup(dvp, nm, vpp, NULL, 0, NULL, cr, NULL, NULL, NULL);
458	if (err == 0 && (nm[0] != '.' || nm[1] != '\0'))
459		vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
460
461	return (err);
462}
463
464static struct vop_vector zfsctl_ops_root = {
465	.vop_default =	&default_vnodeops,
466	.vop_open =	zfsctl_common_open,
467	.vop_close =	zfsctl_common_close,
468	.vop_ioctl =	VOP_EINVAL,
469	.vop_getattr =	zfsctl_root_getattr,
470	.vop_access =	zfsctl_common_access,
471	.vop_readdir =	gfs_vop_readdir,
472	.vop_lookup =	zfsctl_freebsd_root_lookup,
473	.vop_inactive =	gfs_vop_inactive,
474	.vop_reclaim =	zfsctl_common_reclaim,
475	.vop_fid =	zfsctl_common_fid,
476};
477
478static int
479zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
480{
481	objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
482
483	if (snapshot_namecheck(name, NULL, NULL) != 0)
484		return (EILSEQ);
485	dmu_objset_name(os, zname);
486	if (strlen(zname) + 1 + strlen(name) >= len)
487		return (ENAMETOOLONG);
488	(void) strcat(zname, "@");
489	(void) strcat(zname, name);
490	return (0);
491}
492
493static int
494zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr)
495{
496	vnode_t *svp = sep->se_root;
497	int error;
498
499	ASSERT(vn_ismntpt(svp));
500
501	/* this will be dropped by dounmount() */
502	if ((error = vn_vfswlock(svp)) != 0)
503		return (error);
504
505	return (dounmount(vn_mountedvfs(svp), fflags, curthread));
506}
507
508#if 0
509static void
510zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm)
511{
512	avl_index_t where;
513	vfs_t *vfsp;
514	refstr_t *pathref;
515	char newpath[MAXNAMELEN];
516	char *tail;
517
518	ASSERT(MUTEX_HELD(&sdp->sd_lock));
519	ASSERT(sep != NULL);
520
521	vfsp = vn_mountedvfs(sep->se_root);
522	ASSERT(vfsp != NULL);
523
524	vfs_lock_wait(vfsp);
525
526	/*
527	 * Change the name in the AVL tree.
528	 */
529	avl_remove(&sdp->sd_snaps, sep);
530	kmem_free(sep->se_name, strlen(sep->se_name) + 1);
531	sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
532	(void) strcpy(sep->se_name, nm);
533	VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL);
534	avl_insert(&sdp->sd_snaps, sep, where);
535
536	/*
537	 * Change the current mountpoint info:
538	 * 	- update the tail of the mntpoint path
539	 *	- update the tail of the resource path
540	 */
541	pathref = vfs_getmntpoint(vfsp);
542	(void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
543	VERIFY((tail = strrchr(newpath, '/')) != NULL);
544	*(tail+1) = '\0';
545	ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
546	(void) strcat(newpath, nm);
547	refstr_rele(pathref);
548	vfs_setmntpoint(vfsp, newpath);
549
550	pathref = vfs_getresource(vfsp);
551	(void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
552	VERIFY((tail = strrchr(newpath, '@')) != NULL);
553	*(tail+1) = '\0';
554	ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
555	(void) strcat(newpath, nm);
556	refstr_rele(pathref);
557	vfs_setresource(vfsp, newpath);
558
559	vfs_unlock(vfsp);
560}
561#endif
562
563#if 0
564/*ARGSUSED*/
565static int
566zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
567    cred_t *cr, caller_context_t *ct, int flags)
568{
569	zfsctl_snapdir_t *sdp = sdvp->v_data;
570	zfs_snapentry_t search, *sep;
571	zfsvfs_t *zfsvfs;
572	avl_index_t where;
573	char from[MAXNAMELEN], to[MAXNAMELEN];
574	char real[MAXNAMELEN];
575	int err;
576
577	zfsvfs = sdvp->v_vfsp->vfs_data;
578	ZFS_ENTER(zfsvfs);
579
580	if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
581		err = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
582		    MAXNAMELEN, NULL);
583		if (err == 0) {
584			snm = real;
585		} else if (err != ENOTSUP) {
586			ZFS_EXIT(zfsvfs);
587			return (err);
588		}
589	}
590
591	ZFS_EXIT(zfsvfs);
592
593	err = zfsctl_snapshot_zname(sdvp, snm, MAXNAMELEN, from);
594	if (!err)
595		err = zfsctl_snapshot_zname(tdvp, tnm, MAXNAMELEN, to);
596	if (!err)
597		err = zfs_secpolicy_rename_perms(from, to, cr);
598	if (err)
599		return (err);
600
601	/*
602	 * Cannot move snapshots out of the snapdir.
603	 */
604	if (sdvp != tdvp)
605		return (EINVAL);
606
607	if (strcmp(snm, tnm) == 0)
608		return (0);
609
610	mutex_enter(&sdp->sd_lock);
611
612	search.se_name = (char *)snm;
613	if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) == NULL) {
614		mutex_exit(&sdp->sd_lock);
615		return (ENOENT);
616	}
617
618	err = dmu_objset_rename(from, to, B_FALSE);
619	if (err == 0)
620		zfsctl_rename_snap(sdp, sep, tnm);
621
622	mutex_exit(&sdp->sd_lock);
623
624	return (err);
625}
626#endif
627
628#if 0
629/* ARGSUSED */
630static int
631zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
632    caller_context_t *ct, int flags)
633{
634	zfsctl_snapdir_t *sdp = dvp->v_data;
635	zfs_snapentry_t *sep;
636	zfs_snapentry_t search;
637	zfsvfs_t *zfsvfs;
638	char snapname[MAXNAMELEN];
639	char real[MAXNAMELEN];
640	int err;
641
642	zfsvfs = dvp->v_vfsp->vfs_data;
643	ZFS_ENTER(zfsvfs);
644
645	if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
646
647		err = dmu_snapshot_realname(zfsvfs->z_os, name, real,
648		    MAXNAMELEN, NULL);
649		if (err == 0) {
650			name = real;
651		} else if (err != ENOTSUP) {
652			ZFS_EXIT(zfsvfs);
653			return (err);
654		}
655	}
656
657	ZFS_EXIT(zfsvfs);
658
659	err = zfsctl_snapshot_zname(dvp, name, MAXNAMELEN, snapname);
660	if (!err)
661		err = zfs_secpolicy_destroy_perms(snapname, cr);
662	if (err)
663		return (err);
664
665	mutex_enter(&sdp->sd_lock);
666
667	search.se_name = name;
668	sep = avl_find(&sdp->sd_snaps, &search, NULL);
669	if (sep) {
670		avl_remove(&sdp->sd_snaps, sep);
671		err = zfsctl_unmount_snap(sep, MS_FORCE, cr);
672		if (err) {
673			avl_index_t where;
674
675			if (avl_find(&sdp->sd_snaps, sep, &where) == NULL)
676				avl_insert(&sdp->sd_snaps, sep, where);
677		} else
678			err = dmu_objset_destroy(snapname);
679	} else {
680		err = ENOENT;
681	}
682
683	mutex_exit(&sdp->sd_lock);
684
685	return (err);
686}
687#endif
688
689/*
690 * This creates a snapshot under '.zfs/snapshot'.
691 */
692/* ARGSUSED */
693static int
694zfsctl_snapdir_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t  **vpp,
695    cred_t *cr, caller_context_t *cc, int flags, vsecattr_t *vsecp)
696{
697	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
698	char name[MAXNAMELEN];
699	int err;
700	static enum symfollow follow = NO_FOLLOW;
701	static enum uio_seg seg = UIO_SYSSPACE;
702
703	if (snapshot_namecheck(dirname, NULL, NULL) != 0)
704		return (EILSEQ);
705
706	dmu_objset_name(zfsvfs->z_os, name);
707
708	*vpp = NULL;
709
710	err = zfs_secpolicy_snapshot_perms(name, cr);
711	if (err)
712		return (err);
713
714	if (err == 0) {
715		err = dmu_objset_snapshot(name, dirname, B_FALSE);
716		if (err)
717			return (err);
718		err = lookupnameat(dirname, seg, follow, NULL, vpp, dvp);
719	}
720
721	return (err);
722}
723
724static int
725zfsctl_freebsd_snapdir_mkdir(ap)
726        struct vop_mkdir_args /* {
727                struct vnode *a_dvp;
728                struct vnode **a_vpp;
729                struct componentname *a_cnp;
730                struct vattr *a_vap;
731        } */ *ap;
732{
733
734	ASSERT(ap->a_cnp->cn_flags & SAVENAME);
735
736	return (zfsctl_snapdir_mkdir(ap->a_dvp, ap->a_cnp->cn_nameptr, NULL,
737	    ap->a_vpp, ap->a_cnp->cn_cred, NULL, 0, NULL));
738}
739
740/*
741 * Lookup entry point for the 'snapshot' directory.  Try to open the
742 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
743 * Perform a mount of the associated dataset on top of the vnode.
744 */
745/* ARGSUSED */
746int
747zfsctl_snapdir_lookup(ap)
748	struct vop_lookup_args /* {
749		struct vnode *a_dvp;
750		struct vnode **a_vpp;
751		struct componentname *a_cnp;
752	} */ *ap;
753{
754	vnode_t *dvp = ap->a_dvp;
755	vnode_t **vpp = ap->a_vpp;
756	struct componentname *cnp = ap->a_cnp;
757	char nm[NAME_MAX + 1];
758	zfsctl_snapdir_t *sdp = dvp->v_data;
759	objset_t *snap;
760	char snapname[MAXNAMELEN];
761	char real[MAXNAMELEN];
762	char *mountpoint;
763	zfs_snapentry_t *sep, search;
764	size_t mountpoint_len;
765	avl_index_t where;
766	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
767	int err;
768	int flags = 0;
769
770	/*
771	 * No extended attributes allowed under .zfs
772	 */
773	if (flags & LOOKUP_XATTR)
774		return (EINVAL);
775	ASSERT(ap->a_cnp->cn_namelen < sizeof(nm));
776	strlcpy(nm, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
777
778	ASSERT(dvp->v_type == VDIR);
779
780	if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0)
781		return (0);
782
783	*vpp = NULL;
784
785	/*
786	 * If we get a recursive call, that means we got called
787	 * from the domount() code while it was trying to look up the
788	 * spec (which looks like a local path for zfs).  We need to
789	 * add some flag to domount() to tell it not to do this lookup.
790	 */
791	if (MUTEX_HELD(&sdp->sd_lock))
792		return (ENOENT);
793
794	ZFS_ENTER(zfsvfs);
795
796	if (flags & FIGNORECASE) {
797		boolean_t conflict = B_FALSE;
798
799		err = dmu_snapshot_realname(zfsvfs->z_os, nm, real,
800		    MAXNAMELEN, &conflict);
801		if (err == 0) {
802			strlcpy(nm, real, sizeof(nm));
803		} else if (err != ENOTSUP) {
804			ZFS_EXIT(zfsvfs);
805			return (err);
806		}
807#if 0
808		if (realpnp)
809			(void) strlcpy(realpnp->pn_buf, nm,
810			    realpnp->pn_bufsize);
811		if (conflict && direntflags)
812			*direntflags = ED_CASE_CONFLICT;
813#endif
814	}
815
816	mutex_enter(&sdp->sd_lock);
817	search.se_name = (char *)nm;
818	if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) {
819		*vpp = sep->se_root;
820		VN_HOLD(*vpp);
821		err = traverse(vpp, LK_EXCLUSIVE | LK_RETRY);
822		if (err) {
823			VN_RELE(*vpp);
824			*vpp = NULL;
825		} else if (*vpp == sep->se_root) {
826			/*
827			 * The snapshot was unmounted behind our backs,
828			 * try to remount it.
829			 */
830			goto domount;
831		} else {
832			/*
833			 * VROOT was set during the traverse call.  We need
834			 * to clear it since we're pretending to be part
835			 * of our parent's vfs.
836			 */
837			(*vpp)->v_flag &= ~VROOT;
838		}
839		mutex_exit(&sdp->sd_lock);
840		ZFS_EXIT(zfsvfs);
841		return (err);
842	}
843
844	/*
845	 * The requested snapshot is not currently mounted, look it up.
846	 */
847	err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname);
848	if (err) {
849		mutex_exit(&sdp->sd_lock);
850		ZFS_EXIT(zfsvfs);
851		/*
852		 * handle "ls *" or "?" in a graceful manner,
853		 * forcing EILSEQ to ENOENT.
854		 * Since shell ultimately passes "*" or "?" as name to lookup
855		 */
856		return (err == EILSEQ ? ENOENT : err);
857	}
858	if (dmu_objset_open(snapname, DMU_OST_ZFS,
859	    DS_MODE_USER | DS_MODE_READONLY, &snap) != 0) {
860		mutex_exit(&sdp->sd_lock);
861		/* Translate errors and add SAVENAME when needed. */
862		if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
863			err = EJUSTRETURN;
864			cnp->cn_flags |= SAVENAME;
865		} else {
866			err = ENOENT;
867		}
868		ZFS_EXIT(zfsvfs);
869		return (err);
870	}
871
872	sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP);
873	sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
874	(void) strcpy(sep->se_name, nm);
875	*vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap));
876	VN_HOLD(*vpp);
877	avl_insert(&sdp->sd_snaps, sep, where);
878
879	dmu_objset_close(snap);
880domount:
881	mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) +
882	    strlen("/.zfs/snapshot/") + strlen(nm) + 1;
883	mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
884	(void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s",
885	    dvp->v_vfsp->mnt_stat.f_mntonname, nm);
886	err = mount_snapshot(curthread, vpp, "zfs", mountpoint, snapname, 0);
887	kmem_free(mountpoint, mountpoint_len);
888	if (err == 0) {
889		/*
890		 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
891		 *
892		 * This is where we lie about our v_vfsp in order to
893		 * make .zfs/snapshot/<snapname> accessible over NFS
894		 * without requiring manual mounts of <snapname>.
895		 */
896		ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs);
897		VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
898	}
899	mutex_exit(&sdp->sd_lock);
900	ZFS_EXIT(zfsvfs);
901	if (err != 0)
902		*vpp = NULL;
903	return (err);
904}
905
906/* ARGSUSED */
907static int
908zfsctl_snapdir_readdir_cb(vnode_t *vp, void *dp, int *eofp,
909    offset_t *offp, offset_t *nextp, void *data, int flags)
910{
911	zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
912	char snapname[MAXNAMELEN];
913	uint64_t id, cookie;
914	boolean_t case_conflict;
915	int error;
916
917	ZFS_ENTER(zfsvfs);
918
919	cookie = *offp;
920	error = dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN, snapname, &id,
921	    &cookie, &case_conflict);
922	if (error) {
923		ZFS_EXIT(zfsvfs);
924		if (error == ENOENT) {
925			*eofp = 1;
926			return (0);
927		}
928		return (error);
929	}
930
931	if (flags & V_RDDIR_ENTFLAGS) {
932		edirent_t *eodp = dp;
933
934		(void) strcpy(eodp->ed_name, snapname);
935		eodp->ed_ino = ZFSCTL_INO_SNAP(id);
936		eodp->ed_eflags = case_conflict ? ED_CASE_CONFLICT : 0;
937	} else {
938		struct dirent64 *odp = dp;
939
940		(void) strcpy(odp->d_name, snapname);
941		odp->d_ino = ZFSCTL_INO_SNAP(id);
942	}
943	*nextp = cookie;
944
945	ZFS_EXIT(zfsvfs);
946
947	return (0);
948}
949
950/*
951 * pvp is the '.zfs' directory (zfsctl_node_t).
952 * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t).
953 *
954 * This function is the callback to create a GFS vnode for '.zfs/snapshot'
955 * when a lookup is performed on .zfs for "snapshot".
956 */
957vnode_t *
958zfsctl_mknode_snapdir(vnode_t *pvp)
959{
960	vnode_t *vp;
961	zfsctl_snapdir_t *sdp;
962
963	vp = gfs_dir_create(sizeof (zfsctl_snapdir_t), pvp, pvp->v_vfsp,
964	    &zfsctl_ops_snapdir, NULL, NULL, MAXNAMELEN,
965	    zfsctl_snapdir_readdir_cb, NULL);
966	sdp = vp->v_data;
967	sdp->sd_node.zc_id = ZFSCTL_INO_SNAPDIR;
968	sdp->sd_node.zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime;
969	mutex_init(&sdp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
970	avl_create(&sdp->sd_snaps, snapentry_compare,
971	    sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
972	VOP_UNLOCK(vp, 0);
973	return (vp);
974}
975
976/* ARGSUSED */
977static int
978zfsctl_snapdir_getattr(ap)
979	struct vop_getattr_args /* {
980		struct vnode *a_vp;
981		struct vattr *a_vap;
982		struct ucred *a_cred;
983		struct thread *a_td;
984	} */ *ap;
985{
986	struct vnode *vp = ap->a_vp;
987	struct vattr *vap = ap->a_vap;
988	zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
989	zfsctl_snapdir_t *sdp = vp->v_data;
990
991	ZFS_ENTER(zfsvfs);
992	zfsctl_common_getattr(vp, vap);
993	vap->va_nodeid = gfs_file_inode(vp);
994	vap->va_nlink = vap->va_size = avl_numnodes(&sdp->sd_snaps) + 2;
995	ZFS_EXIT(zfsvfs);
996
997	return (0);
998}
999
1000/* ARGSUSED */
1001static int
1002zfsctl_snapdir_inactive(ap)
1003	struct vop_inactive_args /* {
1004		struct vnode *a_vp;
1005		struct thread *a_td;
1006	} */ *ap;
1007{
1008	vnode_t *vp = ap->a_vp;
1009	zfsctl_snapdir_t *sdp = vp->v_data;
1010	zfs_snapentry_t *sep;
1011
1012	/*
1013	 * On forced unmount we have to free snapshots from here.
1014	 */
1015	mutex_enter(&sdp->sd_lock);
1016	while ((sep = avl_first(&sdp->sd_snaps)) != NULL) {
1017		avl_remove(&sdp->sd_snaps, sep);
1018		kmem_free(sep->se_name, strlen(sep->se_name) + 1);
1019		kmem_free(sep, sizeof (zfs_snapentry_t));
1020	}
1021	mutex_exit(&sdp->sd_lock);
1022	gfs_dir_inactive(vp);
1023	ASSERT(avl_numnodes(&sdp->sd_snaps) == 0);
1024	mutex_destroy(&sdp->sd_lock);
1025	avl_destroy(&sdp->sd_snaps);
1026	kmem_free(sdp, sizeof (zfsctl_snapdir_t));
1027
1028	return (0);
1029}
1030
1031static struct vop_vector zfsctl_ops_snapdir = {
1032	.vop_default =	&default_vnodeops,
1033	.vop_open =	zfsctl_common_open,
1034	.vop_close =	zfsctl_common_close,
1035	.vop_ioctl =	VOP_EINVAL,
1036	.vop_getattr =	zfsctl_snapdir_getattr,
1037	.vop_access =	zfsctl_common_access,
1038	.vop_mkdir =	zfsctl_freebsd_snapdir_mkdir,
1039	.vop_readdir =	gfs_vop_readdir,
1040	.vop_lookup =	zfsctl_snapdir_lookup,
1041	.vop_inactive =	zfsctl_snapdir_inactive,
1042	.vop_reclaim =	zfsctl_common_reclaim,
1043	.vop_fid =	zfsctl_common_fid,
1044};
1045
1046/*
1047 * pvp is the GFS vnode '.zfs/snapshot'.
1048 *
1049 * This creates a GFS node under '.zfs/snapshot' representing each
1050 * snapshot.  This newly created GFS node is what we mount snapshot
1051 * vfs_t's ontop of.
1052 */
1053static vnode_t *
1054zfsctl_snapshot_mknode(vnode_t *pvp, uint64_t objset)
1055{
1056	vnode_t *vp;
1057	zfsctl_node_t *zcp;
1058
1059	vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp, pvp->v_vfsp,
1060	    &zfsctl_ops_snapshot, NULL, NULL, MAXNAMELEN, NULL, NULL);
1061	VN_HOLD(vp);
1062	zcp = vp->v_data;
1063	zcp->zc_id = objset;
1064	VFS_HOLD(vp->v_vfsp);
1065	VOP_UNLOCK(vp, 0);
1066
1067	return (vp);
1068}
1069
1070static int
1071zfsctl_snapshot_inactive(ap)
1072	struct vop_inactive_args /* {
1073		struct vnode *a_vp;
1074		struct thread *a_td;
1075	} */ *ap;
1076{
1077	vnode_t *vp = ap->a_vp;
1078	cred_t *cr = ap->a_td->td_ucred;
1079	struct vop_inactive_args iap;
1080	zfsctl_snapdir_t *sdp;
1081	zfs_snapentry_t *sep, *next;
1082	int locked;
1083	vnode_t *dvp;
1084
1085	if (vp->v_count > 0)
1086		goto end;
1087
1088	VERIFY(gfs_dir_lookup(vp, "..", &dvp, cr, 0, NULL, NULL) == 0);
1089	sdp = dvp->v_data;
1090	VOP_UNLOCK(dvp, 0);
1091
1092	if (!(locked = MUTEX_HELD(&sdp->sd_lock)))
1093		mutex_enter(&sdp->sd_lock);
1094
1095	ASSERT(!vn_ismntpt(vp));
1096
1097	sep = avl_first(&sdp->sd_snaps);
1098	while (sep != NULL) {
1099		next = AVL_NEXT(&sdp->sd_snaps, sep);
1100
1101		if (sep->se_root == vp) {
1102			avl_remove(&sdp->sd_snaps, sep);
1103			kmem_free(sep->se_name, strlen(sep->se_name) + 1);
1104			kmem_free(sep, sizeof (zfs_snapentry_t));
1105			break;
1106		}
1107		sep = next;
1108	}
1109	ASSERT(sep != NULL);
1110
1111	if (!locked)
1112		mutex_exit(&sdp->sd_lock);
1113	VN_RELE(dvp);
1114end:
1115	VFS_RELE(vp->v_vfsp);
1116
1117	/*
1118	 * Dispose of the vnode for the snapshot mount point.
1119	 * This is safe to do because once this entry has been removed
1120	 * from the AVL tree, it can't be found again, so cannot become
1121	 * "active".  If we lookup the same name again we will end up
1122	 * creating a new vnode.
1123	 */
1124	iap.a_vp = vp;
1125	return (gfs_vop_inactive(&iap));
1126}
1127
1128static int
1129zfsctl_traverse_begin(vnode_t **vpp, int lktype)
1130{
1131
1132	VN_HOLD(*vpp);
1133	/* Snapshot should be already mounted, but just in case. */
1134	if (vn_mountedvfs(*vpp) == NULL)
1135		return (ENOENT);
1136	return (traverse(vpp, lktype));
1137}
1138
1139static void
1140zfsctl_traverse_end(vnode_t *vp, int err)
1141{
1142
1143	if (err == 0)
1144		vput(vp);
1145	else
1146		VN_RELE(vp);
1147}
1148
1149static int
1150zfsctl_snapshot_getattr(ap)
1151	struct vop_getattr_args /* {
1152		struct vnode *a_vp;
1153		struct vattr *a_vap;
1154		struct ucred *a_cred;
1155	} */ *ap;
1156{
1157	vnode_t *vp = ap->a_vp;
1158	int err;
1159
1160	err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY);
1161	if (err == 0)
1162		err = VOP_GETATTR(vp, ap->a_vap, ap->a_cred);
1163	zfsctl_traverse_end(vp, err);
1164	return (err);
1165}
1166
1167static int
1168zfsctl_snapshot_fid(ap)
1169	struct vop_fid_args /* {
1170		struct vnode *a_vp;
1171		struct fid *a_fid;
1172	} */ *ap;
1173{
1174	vnode_t *vp = ap->a_vp;
1175	int err;
1176
1177	err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY);
1178	if (err == 0)
1179		err = VOP_VPTOFH(vp, (void *)ap->a_fid);
1180	zfsctl_traverse_end(vp, err);
1181	return (err);
1182}
1183
1184static int
1185zfsctl_snapshot_lookup(ap)
1186	struct vop_lookup_args /* {
1187		struct vnode *a_dvp;
1188		struct vnode **a_vpp;
1189		struct componentname *a_cnp;
1190	} */ *ap;
1191{
1192	vnode_t *dvp = ap->a_dvp;
1193	vnode_t **vpp = ap->a_vpp;
1194	struct componentname *cnp = ap->a_cnp;
1195	cred_t *cr = ap->a_cnp->cn_cred;
1196	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
1197	int error;
1198
1199	if (cnp->cn_namelen != 2 || cnp->cn_nameptr[0] != '.' ||
1200	    cnp->cn_nameptr[1] != '.') {
1201		return (ENOENT);
1202	}
1203
1204	ASSERT(dvp->v_type == VDIR);
1205	ASSERT(zfsvfs->z_ctldir != NULL);
1206
1207	error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", vpp,
1208	    NULL, 0, NULL, cr, NULL, NULL, NULL);
1209	if (error == 0)
1210		vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1211	return (error);
1212}
1213
1214static int
1215zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
1216{
1217	zfsvfs_t *zfsvfs = ap->a_vp->v_vfsp->vfs_data;
1218	vnode_t *dvp, *vp;
1219	zfsctl_snapdir_t *sdp;
1220	zfs_snapentry_t *sep;
1221	int error;
1222
1223	ASSERT(zfsvfs->z_ctldir != NULL);
1224	error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1225	    NULL, 0, NULL, kcred, NULL, NULL, NULL);
1226	if (error != 0)
1227		return (error);
1228	sdp = dvp->v_data;
1229
1230	mutex_enter(&sdp->sd_lock);
1231	sep = avl_first(&sdp->sd_snaps);
1232	while (sep != NULL) {
1233		vp = sep->se_root;
1234		if (vp == ap->a_vp)
1235			break;
1236		sep = AVL_NEXT(&sdp->sd_snaps, sep);
1237	}
1238	if (sep == NULL) {
1239		mutex_exit(&sdp->sd_lock);
1240		error = ENOENT;
1241	} else {
1242		size_t len;
1243
1244		len = strlen(sep->se_name);
1245		*ap->a_buflen -= len;
1246		bcopy(sep->se_name, ap->a_buf + *ap->a_buflen, len);
1247		mutex_exit(&sdp->sd_lock);
1248		vhold(dvp);
1249		*ap->a_vpp = dvp;
1250	}
1251	VN_RELE(dvp);
1252
1253	return (error);
1254}
1255
1256/*
1257 * These VP's should never see the light of day.  They should always
1258 * be covered.
1259 */
1260static struct vop_vector zfsctl_ops_snapshot = {
1261	.vop_default =	&default_vnodeops,
1262	.vop_inactive =	zfsctl_snapshot_inactive,
1263	.vop_lookup =	zfsctl_snapshot_lookup,
1264	.vop_reclaim =	zfsctl_common_reclaim,
1265	.vop_getattr =	zfsctl_snapshot_getattr,
1266	.vop_fid =	zfsctl_snapshot_fid,
1267	.vop_vptocnp =	zfsctl_snapshot_vptocnp,
1268};
1269
1270int
1271zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
1272{
1273	zfsvfs_t *zfsvfs = vfsp->vfs_data;
1274	vnode_t *dvp, *vp;
1275	zfsctl_snapdir_t *sdp;
1276	zfsctl_node_t *zcp;
1277	zfs_snapentry_t *sep;
1278	int error;
1279
1280	ASSERT(zfsvfs->z_ctldir != NULL);
1281	error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1282	    NULL, 0, NULL, kcred, NULL, NULL, NULL);
1283	if (error != 0)
1284		return (error);
1285	sdp = dvp->v_data;
1286
1287	mutex_enter(&sdp->sd_lock);
1288	sep = avl_first(&sdp->sd_snaps);
1289	while (sep != NULL) {
1290		vp = sep->se_root;
1291		zcp = vp->v_data;
1292		if (zcp->zc_id == objsetid)
1293			break;
1294
1295		sep = AVL_NEXT(&sdp->sd_snaps, sep);
1296	}
1297
1298	if (sep != NULL) {
1299		VN_HOLD(vp);
1300		/*
1301		 * Return the mounted root rather than the covered mount point.
1302		 * Takes the GFS vnode at .zfs/snapshot/<snapshot objsetid>
1303		 * and returns the ZFS vnode mounted on top of the GFS node.
1304		 * This ZFS vnode is the root of the vfs for objset 'objsetid'.
1305		 */
1306		error = traverse(&vp, LK_SHARED | LK_RETRY);
1307		if (error == 0) {
1308			if (vp == sep->se_root)
1309				error = EINVAL;
1310			else
1311				*zfsvfsp = VTOZ(vp)->z_zfsvfs;
1312		}
1313		mutex_exit(&sdp->sd_lock);
1314		if (error == 0)
1315			VN_URELE(vp);
1316		else
1317			VN_RELE(vp);
1318	} else {
1319		error = EINVAL;
1320		mutex_exit(&sdp->sd_lock);
1321	}
1322
1323	VN_RELE(dvp);
1324
1325	return (error);
1326}
1327
1328/*
1329 * Unmount any snapshots for the given filesystem.  This is called from
1330 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1331 * snapshots.
1332 */
1333int
1334zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
1335{
1336	zfsvfs_t *zfsvfs = vfsp->vfs_data;
1337	vnode_t *dvp;
1338	zfsctl_snapdir_t *sdp;
1339	zfs_snapentry_t *sep, *next;
1340	int error;
1341
1342	ASSERT(zfsvfs->z_ctldir != NULL);
1343	error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1344	    NULL, 0, NULL, cr, NULL, NULL, NULL);
1345	if (error != 0)
1346		return (error);
1347	sdp = dvp->v_data;
1348
1349	mutex_enter(&sdp->sd_lock);
1350
1351	sep = avl_first(&sdp->sd_snaps);
1352	while (sep != NULL) {
1353		next = AVL_NEXT(&sdp->sd_snaps, sep);
1354
1355		/*
1356		 * If this snapshot is not mounted, then it must
1357		 * have just been unmounted by somebody else, and
1358		 * will be cleaned up by zfsctl_snapdir_inactive().
1359		 */
1360		if (vn_ismntpt(sep->se_root)) {
1361			error = zfsctl_unmount_snap(sep, fflags, cr);
1362			if (error) {
1363				avl_index_t where;
1364
1365				/*
1366				 * Before reinserting snapshot to the tree,
1367				 * check if it was actually removed. For example
1368				 * when snapshot mount point is busy, we will
1369				 * have an error here, but there will be no need
1370				 * to reinsert snapshot.
1371				 */
1372				if (avl_find(&sdp->sd_snaps, sep, &where) == NULL)
1373					avl_insert(&sdp->sd_snaps, sep, where);
1374				break;
1375			}
1376		}
1377		sep = next;
1378	}
1379
1380	mutex_exit(&sdp->sd_lock);
1381	VN_RELE(dvp);
1382
1383	return (error);
1384}
1385