zfs_ctldir.c revision 191990
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * ZFS control directory (a.k.a. ".zfs")
30 *
31 * This directory provides a common location for all ZFS meta-objects.
32 * Currently, this is only the 'snapshot' directory, but this may expand in the
33 * future.  The elements are built using the GFS primitives, as the hierarchy
34 * does not actually exist on disk.
35 *
36 * For 'snapshot', we don't want to have all snapshots always mounted, because
37 * this would take up a huge amount of space in /etc/mnttab.  We have three
38 * types of objects:
39 *
40 * 	ctldir ------> snapshotdir -------> snapshot
41 *                                             |
42 *                                             |
43 *                                             V
44 *                                         mounted fs
45 *
46 * The 'snapshot' node contains just enough information to lookup '..' and act
47 * as a mountpoint for the snapshot.  Whenever we lookup a specific snapshot, we
48 * perform an automount of the underlying filesystem and return the
49 * corresponding vnode.
50 *
51 * All mounts are handled automatically by the kernel, but unmounts are
52 * (currently) handled from user land.  The main reason is that there is no
53 * reliable way to auto-unmount the filesystem when it's "no longer in use".
54 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
55 * unmounts any snapshots within the snapshot directory.
56 *
57 * The '.zfs', '.zfs/snapshot', and all directories created under
58 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
59 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
60 *
61 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
62 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
63 * However, vnodes within these mounted on file systems have their v_vfsp
64 * fields set to the head filesystem to make NFS happy (see
65 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
66 * so that it cannot be freed until all snapshots have been unmounted.
67 */
68
69#include <sys/zfs_context.h>
70#include <sys/zfs_ctldir.h>
71#include <sys/zfs_ioctl.h>
72#include <sys/zfs_vfsops.h>
73#include <sys/namei.h>
74#include <sys/gfs.h>
75#include <sys/stat.h>
76#include <sys/dmu.h>
77#include <sys/dsl_deleg.h>
78#include <sys/mount.h>
79#include <sys/sunddi.h>
80
81#include "zfs_namecheck.h"
82
83typedef struct zfsctl_node {
84	gfs_dir_t	zc_gfs_private;
85	uint64_t	zc_id;
86	timestruc_t	zc_cmtime;	/* ctime and mtime, always the same */
87} zfsctl_node_t;
88
89typedef struct zfsctl_snapdir {
90	zfsctl_node_t	sd_node;
91	kmutex_t	sd_lock;
92	avl_tree_t	sd_snaps;
93} zfsctl_snapdir_t;
94
95typedef struct {
96	char		*se_name;
97	vnode_t		*se_root;
98	avl_node_t	se_node;
99} zfs_snapentry_t;
100
101static int
102snapentry_compare(const void *a, const void *b)
103{
104	const zfs_snapentry_t *sa = a;
105	const zfs_snapentry_t *sb = b;
106	int ret = strcmp(sa->se_name, sb->se_name);
107
108	if (ret < 0)
109		return (-1);
110	else if (ret > 0)
111		return (1);
112	else
113		return (0);
114}
115
116static struct vop_vector zfsctl_ops_root;
117static struct vop_vector zfsctl_ops_snapdir;
118static struct vop_vector zfsctl_ops_snapshot;
119
120static vnode_t *zfsctl_mknode_snapdir(vnode_t *);
121static vnode_t *zfsctl_snapshot_mknode(vnode_t *, uint64_t objset);
122static int zfsctl_unmount_snap(zfs_snapentry_t *, int, cred_t *);
123
124/*
125 * Root directory elements.  We have only a single static entry, 'snapshot'.
126 */
127static gfs_dirent_t zfsctl_root_entries[] = {
128	{ "snapshot", zfsctl_mknode_snapdir, GFS_CACHE_VNODE },
129	{ NULL }
130};
131
132/* include . and .. in the calculation */
133#define	NROOT_ENTRIES	((sizeof (zfsctl_root_entries) / \
134    sizeof (gfs_dirent_t)) + 1)
135
136
137/*
138 * Initialize the various GFS pieces we'll need to create and manipulate .zfs
139 * directories.  This is called from the ZFS init routine, and initializes the
140 * vnode ops vectors that we'll be using.
141 */
142void
143zfsctl_init(void)
144{
145}
146
147void
148zfsctl_fini(void)
149{
150}
151
152/*
153 * Return the inode number associated with the 'snapshot' directory.
154 */
155/* ARGSUSED */
156static ino64_t
157zfsctl_root_inode_cb(vnode_t *vp, int index)
158{
159	ASSERT(index == 0);
160	return (ZFSCTL_INO_SNAPDIR);
161}
162
163/*
164 * Create the '.zfs' directory.  This directory is cached as part of the VFS
165 * structure.  This results in a hold on the vfs_t.  The code in zfs_umount()
166 * therefore checks against a vfs_count of 2 instead of 1.  This reference
167 * is removed when the ctldir is destroyed in the unmount.
168 */
169void
170zfsctl_create(zfsvfs_t *zfsvfs)
171{
172	vnode_t *vp, *rvp;
173	zfsctl_node_t *zcp;
174
175	ASSERT(zfsvfs->z_ctldir == NULL);
176
177	vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs,
178	    &zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries,
179	    zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL);
180	zcp = vp->v_data;
181	zcp->zc_id = ZFSCTL_INO_ROOT;
182
183	VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0);
184	ZFS_TIME_DECODE(&zcp->zc_cmtime, VTOZ(rvp)->z_phys->zp_crtime);
185	VN_URELE(rvp);
186
187	/*
188	 * We're only faking the fact that we have a root of a filesystem for
189	 * the sake of the GFS interfaces.  Undo the flag manipulation it did
190	 * for us.
191	 */
192	vp->v_vflag &= ~VV_ROOT;
193
194	zfsvfs->z_ctldir = vp;
195
196	VOP_UNLOCK(vp, 0);
197}
198
199/*
200 * Destroy the '.zfs' directory.  Only called when the filesystem is unmounted.
201 * There might still be more references if we were force unmounted, but only
202 * new zfs_inactive() calls can occur and they don't reference .zfs
203 */
204void
205zfsctl_destroy(zfsvfs_t *zfsvfs)
206{
207	VN_RELE(zfsvfs->z_ctldir);
208	zfsvfs->z_ctldir = NULL;
209}
210
211/*
212 * Given a root znode, retrieve the associated .zfs directory.
213 * Add a hold to the vnode and return it.
214 */
215vnode_t *
216zfsctl_root(znode_t *zp)
217{
218	ASSERT(zfs_has_ctldir(zp));
219	VN_HOLD(zp->z_zfsvfs->z_ctldir);
220	return (zp->z_zfsvfs->z_ctldir);
221}
222
223/*
224 * Common open routine.  Disallow any write access.
225 */
226/* ARGSUSED */
227static int
228zfsctl_common_open(struct vop_open_args *ap)
229{
230	int flags = ap->a_mode;
231
232	if (flags & FWRITE)
233		return (EACCES);
234
235	return (0);
236}
237
238/*
239 * Common close routine.  Nothing to do here.
240 */
241/* ARGSUSED */
242static int
243zfsctl_common_close(struct vop_close_args *ap)
244{
245	return (0);
246}
247
248/*
249 * Common access routine.  Disallow writes.
250 */
251/* ARGSUSED */
252static int
253zfsctl_common_access(ap)
254	struct vop_access_args /* {
255		struct vnode *a_vp;
256		int  a_accmode;
257		struct ucred *a_cred;
258		struct thread *a_td;
259	} */ *ap;
260{
261	int mode = ap->a_accmode;
262
263	if (mode & VWRITE)
264		return (EACCES);
265
266	return (0);
267}
268
269/*
270 * Common getattr function.  Fill in basic information.
271 */
272static void
273zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
274{
275	zfsctl_node_t	*zcp = vp->v_data;
276	timestruc_t	now;
277
278	vap->va_uid = 0;
279	vap->va_gid = 0;
280	vap->va_rdev = 0;
281	/*
282	 * We are a purly virtual object, so we have no
283	 * blocksize or allocated blocks.
284	 */
285	vap->va_blksize = 0;
286	vap->va_nblocks = 0;
287	vap->va_seq = 0;
288	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
289	vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
290	    S_IROTH | S_IXOTH;
291	vap->va_type = VDIR;
292	/*
293	 * We live in the now (for atime).
294	 */
295	gethrestime(&now);
296	vap->va_atime = now;
297	vap->va_mtime = vap->va_ctime = vap->va_birthtime = zcp->zc_cmtime;
298	/* FreeBSD: Reset chflags(2) flags. */
299	vap->va_flags = 0;
300}
301
302/*ARGSUSED*/
303static int
304zfsctl_common_fid(ap)
305	struct vop_fid_args /* {
306		struct vnode *a_vp;
307		struct fid *a_fid;
308	} */ *ap;
309{
310	vnode_t		*vp = ap->a_vp;
311	fid_t		*fidp = (void *)ap->a_fid;
312	zfsvfs_t	*zfsvfs = vp->v_vfsp->vfs_data;
313	zfsctl_node_t	*zcp = vp->v_data;
314	uint64_t	object = zcp->zc_id;
315	zfid_short_t	*zfid;
316	int		i;
317
318	ZFS_ENTER(zfsvfs);
319
320	fidp->fid_len = SHORT_FID_LEN;
321
322	zfid = (zfid_short_t *)fidp;
323
324	zfid->zf_len = SHORT_FID_LEN;
325
326	for (i = 0; i < sizeof (zfid->zf_object); i++)
327		zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
328
329	/* .zfs znodes always have a generation number of 0 */
330	for (i = 0; i < sizeof (zfid->zf_gen); i++)
331		zfid->zf_gen[i] = 0;
332
333	ZFS_EXIT(zfsvfs);
334	return (0);
335}
336
337static int
338zfsctl_common_reclaim(ap)
339	struct vop_reclaim_args /* {
340		struct vnode *a_vp;
341		struct thread *a_td;
342	} */ *ap;
343{
344	vnode_t *vp = ap->a_vp;
345
346	/*
347	 * Destroy the vm object and flush associated pages.
348	 */
349	vnode_destroy_vobject(vp);
350	VI_LOCK(vp);
351	vp->v_data = NULL;
352	VI_UNLOCK(vp);
353	return (0);
354}
355
356/*
357 * .zfs inode namespace
358 *
359 * We need to generate unique inode numbers for all files and directories
360 * within the .zfs pseudo-filesystem.  We use the following scheme:
361 *
362 * 	ENTRY			ZFSCTL_INODE
363 * 	.zfs			1
364 * 	.zfs/snapshot		2
365 * 	.zfs/snapshot/<snap>	objectid(snap)
366 */
367
368#define	ZFSCTL_INO_SNAP(id)	(id)
369
370/*
371 * Get root directory attributes.
372 */
373/* ARGSUSED */
374static int
375zfsctl_root_getattr(ap)
376	struct vop_getattr_args /* {
377		struct vnode *a_vp;
378		struct vattr *a_vap;
379		struct ucred *a_cred;
380		struct thread *a_td;
381	} */ *ap;
382{
383	struct vnode *vp = ap->a_vp;
384	struct vattr *vap = ap->a_vap;
385	zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
386
387	ZFS_ENTER(zfsvfs);
388	vap->va_nodeid = ZFSCTL_INO_ROOT;
389	vap->va_nlink = vap->va_size = NROOT_ENTRIES;
390
391	zfsctl_common_getattr(vp, vap);
392	ZFS_EXIT(zfsvfs);
393
394	return (0);
395}
396
397/*
398 * Special case the handling of "..".
399 */
400/* ARGSUSED */
401int
402zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
403    int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
404    int *direntflags, pathname_t *realpnp)
405{
406	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
407	int err;
408
409	/*
410	 * No extended attributes allowed under .zfs
411	 */
412	if (flags & LOOKUP_XATTR)
413		return (EINVAL);
414
415	ZFS_ENTER(zfsvfs);
416
417	if (strcmp(nm, "..") == 0) {
418		err = VFS_ROOT(dvp->v_vfsp, LK_EXCLUSIVE, vpp);
419		if (err == 0)
420			VOP_UNLOCK(*vpp, 0);
421	} else {
422		err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir,
423		    cr, ct, direntflags, realpnp);
424	}
425
426	ZFS_EXIT(zfsvfs);
427
428	return (err);
429}
430
431/*
432 * Special case the handling of "..".
433 */
434/* ARGSUSED */
435int
436zfsctl_freebsd_root_lookup(ap)
437	struct vop_lookup_args /* {
438		struct vnode *a_dvp;
439		struct vnode **a_vpp;
440		struct componentname *a_cnp;
441	} */ *ap;
442{
443	vnode_t *dvp = ap->a_dvp;
444	vnode_t **vpp = ap->a_vpp;
445	cred_t *cr = ap->a_cnp->cn_cred;
446	int flags = ap->a_cnp->cn_flags;
447	int nameiop = ap->a_cnp->cn_nameiop;
448	char nm[NAME_MAX + 1];
449	int err;
450
451	if ((flags & ISLASTCN) && (nameiop == RENAME || nameiop == CREATE))
452		return (EOPNOTSUPP);
453
454	ASSERT(ap->a_cnp->cn_namelen < sizeof(nm));
455	strlcpy(nm, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
456
457	err = zfsctl_root_lookup(dvp, nm, vpp, NULL, 0, NULL, cr, NULL, NULL, NULL);
458	if (err == 0 && (nm[0] != '.' || nm[1] != '\0'))
459		vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
460
461	return (err);
462}
463
464static struct vop_vector zfsctl_ops_root = {
465	.vop_default =	&default_vnodeops,
466	.vop_open =	zfsctl_common_open,
467	.vop_close =	zfsctl_common_close,
468	.vop_ioctl =	VOP_EINVAL,
469	.vop_getattr =	zfsctl_root_getattr,
470	.vop_access =	zfsctl_common_access,
471	.vop_readdir =	gfs_vop_readdir,
472	.vop_lookup =	zfsctl_freebsd_root_lookup,
473	.vop_inactive =	gfs_vop_inactive,
474	.vop_reclaim =	zfsctl_common_reclaim,
475	.vop_fid =	zfsctl_common_fid,
476};
477
478static int
479zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
480{
481	objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
482
483	if (snapshot_namecheck(name, NULL, NULL) != 0)
484		return (EILSEQ);
485	dmu_objset_name(os, zname);
486	if (strlen(zname) + 1 + strlen(name) >= len)
487		return (ENAMETOOLONG);
488	(void) strcat(zname, "@");
489	(void) strcat(zname, name);
490	return (0);
491}
492
493static int
494zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr)
495{
496	vnode_t *svp = sep->se_root;
497	int error;
498
499	ASSERT(vn_ismntpt(svp));
500
501	/* this will be dropped by dounmount() */
502	if ((error = vn_vfswlock(svp)) != 0)
503		return (error);
504
505	return (dounmount(vn_mountedvfs(svp), fflags, curthread));
506}
507
508#if 0
509static void
510zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm)
511{
512	avl_index_t where;
513	vfs_t *vfsp;
514	refstr_t *pathref;
515	char newpath[MAXNAMELEN];
516	char *tail;
517
518	ASSERT(MUTEX_HELD(&sdp->sd_lock));
519	ASSERT(sep != NULL);
520
521	vfsp = vn_mountedvfs(sep->se_root);
522	ASSERT(vfsp != NULL);
523
524	vfs_lock_wait(vfsp);
525
526	/*
527	 * Change the name in the AVL tree.
528	 */
529	avl_remove(&sdp->sd_snaps, sep);
530	kmem_free(sep->se_name, strlen(sep->se_name) + 1);
531	sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
532	(void) strcpy(sep->se_name, nm);
533	VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL);
534	avl_insert(&sdp->sd_snaps, sep, where);
535
536	/*
537	 * Change the current mountpoint info:
538	 * 	- update the tail of the mntpoint path
539	 *	- update the tail of the resource path
540	 */
541	pathref = vfs_getmntpoint(vfsp);
542	(void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
543	VERIFY((tail = strrchr(newpath, '/')) != NULL);
544	*(tail+1) = '\0';
545	ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
546	(void) strcat(newpath, nm);
547	refstr_rele(pathref);
548	vfs_setmntpoint(vfsp, newpath);
549
550	pathref = vfs_getresource(vfsp);
551	(void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
552	VERIFY((tail = strrchr(newpath, '@')) != NULL);
553	*(tail+1) = '\0';
554	ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
555	(void) strcat(newpath, nm);
556	refstr_rele(pathref);
557	vfs_setresource(vfsp, newpath);
558
559	vfs_unlock(vfsp);
560}
561#endif
562
563#if 0
564/*ARGSUSED*/
565static int
566zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
567    cred_t *cr, caller_context_t *ct, int flags)
568{
569	zfsctl_snapdir_t *sdp = sdvp->v_data;
570	zfs_snapentry_t search, *sep;
571	zfsvfs_t *zfsvfs;
572	avl_index_t where;
573	char from[MAXNAMELEN], to[MAXNAMELEN];
574	char real[MAXNAMELEN];
575	int err;
576
577	zfsvfs = sdvp->v_vfsp->vfs_data;
578	ZFS_ENTER(zfsvfs);
579
580	if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
581		err = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
582		    MAXNAMELEN, NULL);
583		if (err == 0) {
584			snm = real;
585		} else if (err != ENOTSUP) {
586			ZFS_EXIT(zfsvfs);
587			return (err);
588		}
589	}
590
591	ZFS_EXIT(zfsvfs);
592
593	err = zfsctl_snapshot_zname(sdvp, snm, MAXNAMELEN, from);
594	if (!err)
595		err = zfsctl_snapshot_zname(tdvp, tnm, MAXNAMELEN, to);
596	if (!err)
597		err = zfs_secpolicy_rename_perms(from, to, cr);
598	if (err)
599		return (err);
600
601	/*
602	 * Cannot move snapshots out of the snapdir.
603	 */
604	if (sdvp != tdvp)
605		return (EINVAL);
606
607	if (strcmp(snm, tnm) == 0)
608		return (0);
609
610	mutex_enter(&sdp->sd_lock);
611
612	search.se_name = (char *)snm;
613	if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) == NULL) {
614		mutex_exit(&sdp->sd_lock);
615		return (ENOENT);
616	}
617
618	err = dmu_objset_rename(from, to, B_FALSE);
619	if (err == 0)
620		zfsctl_rename_snap(sdp, sep, tnm);
621
622	mutex_exit(&sdp->sd_lock);
623
624	return (err);
625}
626#endif
627
628#if 0
629/* ARGSUSED */
630static int
631zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
632    caller_context_t *ct, int flags)
633{
634	zfsctl_snapdir_t *sdp = dvp->v_data;
635	zfs_snapentry_t *sep;
636	zfs_snapentry_t search;
637	zfsvfs_t *zfsvfs;
638	char snapname[MAXNAMELEN];
639	char real[MAXNAMELEN];
640	int err;
641
642	zfsvfs = dvp->v_vfsp->vfs_data;
643	ZFS_ENTER(zfsvfs);
644
645	if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
646
647		err = dmu_snapshot_realname(zfsvfs->z_os, name, real,
648		    MAXNAMELEN, NULL);
649		if (err == 0) {
650			name = real;
651		} else if (err != ENOTSUP) {
652			ZFS_EXIT(zfsvfs);
653			return (err);
654		}
655	}
656
657	ZFS_EXIT(zfsvfs);
658
659	err = zfsctl_snapshot_zname(dvp, name, MAXNAMELEN, snapname);
660	if (!err)
661		err = zfs_secpolicy_destroy_perms(snapname, cr);
662	if (err)
663		return (err);
664
665	mutex_enter(&sdp->sd_lock);
666
667	search.se_name = name;
668	sep = avl_find(&sdp->sd_snaps, &search, NULL);
669	if (sep) {
670		avl_remove(&sdp->sd_snaps, sep);
671		err = zfsctl_unmount_snap(sep, MS_FORCE, cr);
672		if (err)
673			avl_add(&sdp->sd_snaps, sep);
674		else
675			err = dmu_objset_destroy(snapname);
676	} else {
677		err = ENOENT;
678	}
679
680	mutex_exit(&sdp->sd_lock);
681
682	return (err);
683}
684#endif
685
686/*
687 * This creates a snapshot under '.zfs/snapshot'.
688 */
689/* ARGSUSED */
690static int
691zfsctl_snapdir_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t  **vpp,
692    cred_t *cr, caller_context_t *cc, int flags, vsecattr_t *vsecp)
693{
694	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
695	char name[MAXNAMELEN];
696	int err;
697	static enum symfollow follow = NO_FOLLOW;
698	static enum uio_seg seg = UIO_SYSSPACE;
699
700	if (snapshot_namecheck(dirname, NULL, NULL) != 0)
701		return (EILSEQ);
702
703	dmu_objset_name(zfsvfs->z_os, name);
704
705	*vpp = NULL;
706
707	err = zfs_secpolicy_snapshot_perms(name, cr);
708	if (err)
709		return (err);
710
711	if (err == 0) {
712		err = dmu_objset_snapshot(name, dirname, B_FALSE);
713		if (err)
714			return (err);
715		err = lookupnameat(dirname, seg, follow, NULL, vpp, dvp);
716	}
717
718	return (err);
719}
720
721static int
722zfsctl_freebsd_snapdir_mkdir(ap)
723        struct vop_mkdir_args /* {
724                struct vnode *a_dvp;
725                struct vnode **a_vpp;
726                struct componentname *a_cnp;
727                struct vattr *a_vap;
728        } */ *ap;
729{
730
731	ASSERT(ap->a_cnp->cn_flags & SAVENAME);
732
733	return (zfsctl_snapdir_mkdir(ap->a_dvp, ap->a_cnp->cn_nameptr, NULL,
734	    ap->a_vpp, ap->a_cnp->cn_cred, NULL, 0, NULL));
735}
736
737/*
738 * Lookup entry point for the 'snapshot' directory.  Try to open the
739 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
740 * Perform a mount of the associated dataset on top of the vnode.
741 */
742/* ARGSUSED */
743int
744zfsctl_snapdir_lookup(ap)
745	struct vop_lookup_args /* {
746		struct vnode *a_dvp;
747		struct vnode **a_vpp;
748		struct componentname *a_cnp;
749	} */ *ap;
750{
751	vnode_t *dvp = ap->a_dvp;
752	vnode_t **vpp = ap->a_vpp;
753	struct componentname *cnp = ap->a_cnp;
754	char nm[NAME_MAX + 1];
755	zfsctl_snapdir_t *sdp = dvp->v_data;
756	objset_t *snap;
757	char snapname[MAXNAMELEN];
758	char real[MAXNAMELEN];
759	char *mountpoint;
760	zfs_snapentry_t *sep, search;
761	size_t mountpoint_len;
762	avl_index_t where;
763	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
764	int err;
765	int flags = 0;
766
767	/*
768	 * No extended attributes allowed under .zfs
769	 */
770	if (flags & LOOKUP_XATTR)
771		return (EINVAL);
772	ASSERT(ap->a_cnp->cn_namelen < sizeof(nm));
773	strlcpy(nm, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
774
775	ASSERT(dvp->v_type == VDIR);
776
777	if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0)
778		return (0);
779
780	*vpp = NULL;
781
782	/*
783	 * If we get a recursive call, that means we got called
784	 * from the domount() code while it was trying to look up the
785	 * spec (which looks like a local path for zfs).  We need to
786	 * add some flag to domount() to tell it not to do this lookup.
787	 */
788	if (MUTEX_HELD(&sdp->sd_lock))
789		return (ENOENT);
790
791	ZFS_ENTER(zfsvfs);
792
793	if (flags & FIGNORECASE) {
794		boolean_t conflict = B_FALSE;
795
796		err = dmu_snapshot_realname(zfsvfs->z_os, nm, real,
797		    MAXNAMELEN, &conflict);
798		if (err == 0) {
799			strlcpy(nm, real, sizeof(nm));
800		} else if (err != ENOTSUP) {
801			ZFS_EXIT(zfsvfs);
802			return (err);
803		}
804#if 0
805		if (realpnp)
806			(void) strlcpy(realpnp->pn_buf, nm,
807			    realpnp->pn_bufsize);
808		if (conflict && direntflags)
809			*direntflags = ED_CASE_CONFLICT;
810#endif
811	}
812
813	mutex_enter(&sdp->sd_lock);
814	search.se_name = (char *)nm;
815	if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) {
816		*vpp = sep->se_root;
817		VN_HOLD(*vpp);
818		if ((*vpp)->v_mountedhere == NULL) {
819			/*
820			 * The snapshot was unmounted behind our backs,
821			 * try to remount it.
822			 */
823			goto domount;
824		} else {
825			/*
826			 * VROOT was set during the traverse call.  We need
827			 * to clear it since we're pretending to be part
828			 * of our parent's vfs.
829			 */
830			(*vpp)->v_flag &= ~VROOT;
831		}
832		vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
833		mutex_exit(&sdp->sd_lock);
834		ZFS_EXIT(zfsvfs);
835		return (0);
836	}
837
838	/*
839	 * The requested snapshot is not currently mounted, look it up.
840	 */
841	err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname);
842	if (err) {
843		mutex_exit(&sdp->sd_lock);
844		ZFS_EXIT(zfsvfs);
845		/*
846		 * handle "ls *" or "?" in a graceful manner,
847		 * forcing EILSEQ to ENOENT.
848		 * Since shell ultimately passes "*" or "?" as name to lookup
849		 */
850		return (err == EILSEQ ? ENOENT : err);
851	}
852	if (dmu_objset_open(snapname, DMU_OST_ZFS,
853	    DS_MODE_USER | DS_MODE_READONLY, &snap) != 0) {
854		mutex_exit(&sdp->sd_lock);
855		/* Translate errors and add SAVENAME when needed. */
856		if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
857			err = EJUSTRETURN;
858			cnp->cn_flags |= SAVENAME;
859		} else {
860			err = ENOENT;
861		}
862		ZFS_EXIT(zfsvfs);
863		return (err);
864	}
865
866	sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP);
867	sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
868	(void) strcpy(sep->se_name, nm);
869	*vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap));
870	VN_HOLD(*vpp);
871	avl_insert(&sdp->sd_snaps, sep, where);
872
873	dmu_objset_close(snap);
874domount:
875	mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) +
876	    strlen("/.zfs/snapshot/") + strlen(nm) + 1;
877	mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
878	(void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s",
879	    dvp->v_vfsp->mnt_stat.f_mntonname, nm);
880	err = domount(curthread, *vpp, "zfs", mountpoint, snapname, 0);
881	kmem_free(mountpoint, mountpoint_len);
882	/* FreeBSD: This line was moved from below to avoid a lock recursion. */
883	if (err == 0)
884		vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
885	mutex_exit(&sdp->sd_lock);
886	/*
887	 * If we had an error, drop our hold on the vnode and
888	 * zfsctl_snapshot_inactive() will clean up.
889	 */
890	if (err) {
891		VN_RELE(*vpp);
892		*vpp = NULL;
893	}
894	ZFS_EXIT(zfsvfs);
895	return (err);
896}
897
898/* ARGSUSED */
899static int
900zfsctl_snapdir_readdir_cb(vnode_t *vp, void *dp, int *eofp,
901    offset_t *offp, offset_t *nextp, void *data, int flags)
902{
903	zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
904	char snapname[MAXNAMELEN];
905	uint64_t id, cookie;
906	boolean_t case_conflict;
907	int error;
908
909	ZFS_ENTER(zfsvfs);
910
911	cookie = *offp;
912	error = dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN, snapname, &id,
913	    &cookie, &case_conflict);
914	if (error) {
915		ZFS_EXIT(zfsvfs);
916		if (error == ENOENT) {
917			*eofp = 1;
918			return (0);
919		}
920		return (error);
921	}
922
923	if (flags & V_RDDIR_ENTFLAGS) {
924		edirent_t *eodp = dp;
925
926		(void) strcpy(eodp->ed_name, snapname);
927		eodp->ed_ino = ZFSCTL_INO_SNAP(id);
928		eodp->ed_eflags = case_conflict ? ED_CASE_CONFLICT : 0;
929	} else {
930		struct dirent64 *odp = dp;
931
932		(void) strcpy(odp->d_name, snapname);
933		odp->d_ino = ZFSCTL_INO_SNAP(id);
934	}
935	*nextp = cookie;
936
937	ZFS_EXIT(zfsvfs);
938
939	return (0);
940}
941
942/*
943 * pvp is the '.zfs' directory (zfsctl_node_t).
944 * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t).
945 *
946 * This function is the callback to create a GFS vnode for '.zfs/snapshot'
947 * when a lookup is performed on .zfs for "snapshot".
948 */
949vnode_t *
950zfsctl_mknode_snapdir(vnode_t *pvp)
951{
952	vnode_t *vp;
953	zfsctl_snapdir_t *sdp;
954
955	vp = gfs_dir_create(sizeof (zfsctl_snapdir_t), pvp, pvp->v_vfsp,
956	    &zfsctl_ops_snapdir, NULL, NULL, MAXNAMELEN,
957	    zfsctl_snapdir_readdir_cb, NULL);
958	sdp = vp->v_data;
959	sdp->sd_node.zc_id = ZFSCTL_INO_SNAPDIR;
960	sdp->sd_node.zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime;
961	mutex_init(&sdp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
962	avl_create(&sdp->sd_snaps, snapentry_compare,
963	    sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
964	VOP_UNLOCK(vp, 0);
965	return (vp);
966}
967
968/* ARGSUSED */
969static int
970zfsctl_snapdir_getattr(ap)
971	struct vop_getattr_args /* {
972		struct vnode *a_vp;
973		struct vattr *a_vap;
974		struct ucred *a_cred;
975		struct thread *a_td;
976	} */ *ap;
977{
978	struct vnode *vp = ap->a_vp;
979	struct vattr *vap = ap->a_vap;
980	zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
981	zfsctl_snapdir_t *sdp = vp->v_data;
982
983	ZFS_ENTER(zfsvfs);
984	zfsctl_common_getattr(vp, vap);
985	vap->va_nodeid = gfs_file_inode(vp);
986	vap->va_nlink = vap->va_size = avl_numnodes(&sdp->sd_snaps) + 2;
987	ZFS_EXIT(zfsvfs);
988
989	return (0);
990}
991
992/* ARGSUSED */
993static int
994zfsctl_snapdir_inactive(ap)
995	struct vop_inactive_args /* {
996		struct vnode *a_vp;
997		struct thread *a_td;
998	} */ *ap;
999{
1000	vnode_t *vp = ap->a_vp;
1001	zfsctl_snapdir_t *sdp = vp->v_data;
1002	void *private;
1003
1004	private = gfs_dir_inactive(vp);
1005	if (private != NULL) {
1006		ASSERT(avl_numnodes(&sdp->sd_snaps) == 0);
1007		mutex_destroy(&sdp->sd_lock);
1008		avl_destroy(&sdp->sd_snaps);
1009		kmem_free(private, sizeof (zfsctl_snapdir_t));
1010	}
1011	return (0);
1012}
1013
1014static struct vop_vector zfsctl_ops_snapdir = {
1015	.vop_default =	&default_vnodeops,
1016	.vop_open =	zfsctl_common_open,
1017	.vop_close =	zfsctl_common_close,
1018	.vop_ioctl =	VOP_EINVAL,
1019	.vop_getattr =	zfsctl_snapdir_getattr,
1020	.vop_access =	zfsctl_common_access,
1021	.vop_mkdir =	zfsctl_freebsd_snapdir_mkdir,
1022	.vop_readdir =	gfs_vop_readdir,
1023	.vop_lookup =	zfsctl_snapdir_lookup,
1024	.vop_inactive =	zfsctl_snapdir_inactive,
1025	.vop_reclaim =	zfsctl_common_reclaim,
1026	.vop_fid =	zfsctl_common_fid,
1027};
1028
1029/*
1030 * pvp is the GFS vnode '.zfs/snapshot'.
1031 *
1032 * This creates a GFS node under '.zfs/snapshot' representing each
1033 * snapshot.  This newly created GFS node is what we mount snapshot
1034 * vfs_t's ontop of.
1035 */
1036static vnode_t *
1037zfsctl_snapshot_mknode(vnode_t *pvp, uint64_t objset)
1038{
1039	vnode_t *vp;
1040	zfsctl_node_t *zcp;
1041
1042	vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp, pvp->v_vfsp,
1043	    &zfsctl_ops_snapshot, NULL, NULL, MAXNAMELEN, NULL, NULL);
1044	VN_HOLD(vp);
1045	zcp = vp->v_data;
1046	zcp->zc_id = objset;
1047	VFS_HOLD(vp->v_vfsp);
1048	VOP_UNLOCK(vp, 0);
1049
1050	return (vp);
1051}
1052
1053static int
1054zfsctl_snapshot_inactive(ap)
1055	struct vop_inactive_args /* {
1056		struct vnode *a_vp;
1057		struct thread *a_td;
1058	} */ *ap;
1059{
1060	vnode_t *vp = ap->a_vp;
1061	cred_t *cr = ap->a_td->td_ucred;
1062	struct vop_inactive_args iap;
1063	zfsctl_snapdir_t *sdp;
1064	zfs_snapentry_t *sep, *next;
1065	int locked;
1066	vnode_t *dvp;
1067
1068	VERIFY(gfs_dir_lookup(vp, "..", &dvp, cr, 0, NULL, NULL) == 0);
1069	sdp = dvp->v_data;
1070	VOP_UNLOCK(dvp, 0);
1071
1072	if (!(locked = MUTEX_HELD(&sdp->sd_lock)))
1073		mutex_enter(&sdp->sd_lock);
1074
1075	if (vp->v_count > 1) {
1076		if (!locked)
1077			mutex_exit(&sdp->sd_lock);
1078		return (0);
1079	}
1080	ASSERT(!vn_ismntpt(vp));
1081
1082	sep = avl_first(&sdp->sd_snaps);
1083	while (sep != NULL) {
1084		next = AVL_NEXT(&sdp->sd_snaps, sep);
1085
1086		if (sep->se_root == vp) {
1087			avl_remove(&sdp->sd_snaps, sep);
1088			kmem_free(sep->se_name, strlen(sep->se_name) + 1);
1089			kmem_free(sep, sizeof (zfs_snapentry_t));
1090			break;
1091		}
1092		sep = next;
1093	}
1094	ASSERT(sep != NULL);
1095
1096	if (!locked)
1097		mutex_exit(&sdp->sd_lock);
1098	VN_RELE(dvp);
1099	VFS_RELE(vp->v_vfsp);
1100
1101	/*
1102	 * Dispose of the vnode for the snapshot mount point.
1103	 * This is safe to do because once this entry has been removed
1104	 * from the AVL tree, it can't be found again, so cannot become
1105	 * "active".  If we lookup the same name again we will end up
1106	 * creating a new vnode.
1107	 */
1108	iap.a_vp = vp;
1109	return (gfs_vop_inactive(&iap));
1110}
1111
1112static int
1113zfsctl_traverse_begin(vnode_t **vpp, int lktype)
1114{
1115
1116	VN_HOLD(*vpp);
1117	/* Snapshot should be already mounted, but just in case. */
1118	if (vn_mountedvfs(*vpp) == NULL)
1119		return (ENOENT);
1120	return (traverse(vpp, lktype));
1121}
1122
1123static void
1124zfsctl_traverse_end(vnode_t *vp, int err)
1125{
1126
1127	if (err == 0)
1128		vput(vp);
1129	else
1130		VN_RELE(vp);
1131}
1132
1133static int
1134zfsctl_snapshot_getattr(ap)
1135	struct vop_getattr_args /* {
1136		struct vnode *a_vp;
1137		struct vattr *a_vap;
1138		struct ucred *a_cred;
1139	} */ *ap;
1140{
1141	vnode_t *vp = ap->a_vp;
1142	int err;
1143
1144	err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY);
1145	if (err == 0)
1146		err = VOP_GETATTR(vp, ap->a_vap, ap->a_cred);
1147	zfsctl_traverse_end(vp, err);
1148	return (err);
1149}
1150
1151static int
1152zfsctl_snapshot_fid(ap)
1153	struct vop_fid_args /* {
1154		struct vnode *a_vp;
1155		struct fid *a_fid;
1156	} */ *ap;
1157{
1158	vnode_t *vp = ap->a_vp;
1159	int err;
1160
1161	err = zfsctl_traverse_begin(&vp, LK_SHARED | LK_RETRY);
1162	if (err == 0)
1163		err = VOP_VPTOFH(vp, (void *)ap->a_fid);
1164	zfsctl_traverse_end(vp, err);
1165	return (err);
1166}
1167
1168static int
1169zfsctl_snapshot_lookup(ap)
1170	struct vop_lookup_args /* {
1171		struct vnode *a_dvp;
1172		struct vnode **a_vpp;
1173		struct componentname *a_cnp;
1174	} */ *ap;
1175{
1176	vnode_t *dvp = ap->a_dvp;
1177	vnode_t **vpp = ap->a_vpp;
1178	struct componentname *cnp = ap->a_cnp;
1179	cred_t *cr = ap->a_cnp->cn_cred;
1180	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
1181	int error;
1182
1183	if (cnp->cn_namelen != 2 || cnp->cn_nameptr[0] != '.' ||
1184	    cnp->cn_nameptr[1] != '.') {
1185		return (ENOENT);
1186	}
1187
1188	ASSERT(dvp->v_type == VDIR);
1189	ASSERT(zfsvfs->z_ctldir != NULL);
1190
1191	error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", vpp,
1192	    NULL, 0, NULL, cr, NULL, NULL, NULL);
1193	if (error == 0)
1194		vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1195	return (error);
1196}
1197
1198/*
1199 * These VP's should never see the light of day.  They should always
1200 * be covered.
1201 */
1202static struct vop_vector zfsctl_ops_snapshot = {
1203	.vop_default =	&default_vnodeops,
1204	.vop_inactive =	zfsctl_snapshot_inactive,
1205	.vop_lookup =	zfsctl_snapshot_lookup,
1206	.vop_reclaim =	zfsctl_common_reclaim,
1207	.vop_getattr =	zfsctl_snapshot_getattr,
1208	.vop_fid =	zfsctl_snapshot_fid,
1209};
1210
1211int
1212zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
1213{
1214	zfsvfs_t *zfsvfs = vfsp->vfs_data;
1215	vnode_t *dvp, *vp;
1216	zfsctl_snapdir_t *sdp;
1217	zfsctl_node_t *zcp;
1218	zfs_snapentry_t *sep;
1219	int error;
1220
1221	ASSERT(zfsvfs->z_ctldir != NULL);
1222	error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1223	    NULL, 0, NULL, kcred, NULL, NULL, NULL);
1224	if (error != 0)
1225		return (error);
1226	sdp = dvp->v_data;
1227
1228	mutex_enter(&sdp->sd_lock);
1229	sep = avl_first(&sdp->sd_snaps);
1230	while (sep != NULL) {
1231		vp = sep->se_root;
1232		zcp = vp->v_data;
1233		if (zcp->zc_id == objsetid)
1234			break;
1235
1236		sep = AVL_NEXT(&sdp->sd_snaps, sep);
1237	}
1238
1239	if (sep != NULL) {
1240		VN_HOLD(vp);
1241		/*
1242		 * Return the mounted root rather than the covered mount point.
1243		 * Takes the GFS vnode at .zfs/snapshot/<snapshot objsetid>
1244		 * and returns the ZFS vnode mounted on top of the GFS node.
1245		 * This ZFS vnode is the root of the vfs for objset 'objsetid'.
1246		 */
1247		error = traverse(&vp, LK_SHARED | LK_RETRY);
1248		if (error == 0) {
1249			if (vp == sep->se_root)
1250				error = EINVAL;
1251			else
1252				*zfsvfsp = VTOZ(vp)->z_zfsvfs;
1253		}
1254		mutex_exit(&sdp->sd_lock);
1255		if (error == 0)
1256			VN_URELE(vp);
1257		else
1258			VN_RELE(vp);
1259	} else {
1260		error = EINVAL;
1261		mutex_exit(&sdp->sd_lock);
1262	}
1263
1264	VN_RELE(dvp);
1265
1266	return (error);
1267}
1268
1269/*
1270 * Unmount any snapshots for the given filesystem.  This is called from
1271 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1272 * snapshots.
1273 */
1274int
1275zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
1276{
1277	zfsvfs_t *zfsvfs = vfsp->vfs_data;
1278	vnode_t *dvp;
1279	zfsctl_snapdir_t *sdp;
1280	zfs_snapentry_t *sep, *next;
1281	int error;
1282
1283	ASSERT(zfsvfs->z_ctldir != NULL);
1284	error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1285	    NULL, 0, NULL, cr, NULL, NULL, NULL);
1286	if (error != 0)
1287		return (error);
1288	sdp = dvp->v_data;
1289
1290	mutex_enter(&sdp->sd_lock);
1291
1292	sep = avl_first(&sdp->sd_snaps);
1293	while (sep != NULL) {
1294		next = AVL_NEXT(&sdp->sd_snaps, sep);
1295
1296		/*
1297		 * If this snapshot is not mounted, then it must
1298		 * have just been unmounted by somebody else, and
1299		 * will be cleaned up by zfsctl_snapdir_inactive().
1300		 */
1301		if (vn_ismntpt(sep->se_root)) {
1302			error = zfsctl_unmount_snap(sep, fflags, cr);
1303			if (error) {
1304				avl_add(&sdp->sd_snaps, sep);
1305				break;
1306			}
1307		}
1308		sep = next;
1309	}
1310
1311	mutex_exit(&sdp->sd_lock);
1312	VN_RELE(dvp);
1313
1314	return (error);
1315}
1316