1/*	$NetBSD: puffs_vfsops.c,v 1.100.8.2 2012/08/12 13:13:21 martin Exp $	*/
2
3/*
4 * Copyright (c) 2005, 2006  Antti Kantee.  All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Google Summer of Code program and the Ulla Tuominen Foundation.
8 * The Google SoC project was mentored by Bill Studenmund.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: puffs_vfsops.c,v 1.100.8.2 2012/08/12 13:13:21 martin Exp $");
34
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/mount.h>
38#include <sys/malloc.h>
39#include <sys/extattr.h>
40#include <sys/queue.h>
41#include <sys/vnode.h>
42#include <sys/dirent.h>
43#include <sys/kauth.h>
44#include <sys/proc.h>
45#include <sys/module.h>
46#include <sys/kthread.h>
47
48#include <uvm/uvm.h>
49
50#include <dev/putter/putter_sys.h>
51
52#include <miscfs/genfs/genfs.h>
53
54#include <fs/puffs/puffs_msgif.h>
55#include <fs/puffs/puffs_sys.h>
56
57#include <lib/libkern/libkern.h>
58
59#include <nfs/nfsproto.h> /* for fh sizes */
60
61MODULE(MODULE_CLASS_VFS, puffs, "putter");
62
63VFS_PROTOS(puffs_vfsop);
64
65#ifndef PUFFS_PNODEBUCKETS
66#define PUFFS_PNODEBUCKETS 256
67#endif
68#ifndef PUFFS_MAXPNODEBUCKETS
69#define PUFFS_MAXPNODEBUCKETS 8192
70#endif
71int puffs_pnodebuckets_default = PUFFS_PNODEBUCKETS;
72int puffs_maxpnodebuckets = PUFFS_MAXPNODEBUCKETS;
73
74#define BUCKETALLOC(a) (sizeof(struct puffs_pnode_hashlist *) * (a))
75
76static struct putter_ops puffs_putter = {
77	.pop_getout	= puffs_msgif_getout,
78	.pop_releaseout	= puffs_msgif_releaseout,
79	.pop_waitcount	= puffs_msgif_waitcount,
80	.pop_dispatch	= puffs_msgif_dispatch,
81	.pop_close	= puffs_msgif_close,
82};
83
84/*
85 * Try to ensure data structures used by the puffs protocol
86 * do not unexpectedly change.
87 */
88#ifdef __i386__
89CTASSERT(sizeof(struct puffs_kargs) == 3928);
90CTASSERT(sizeof(struct vattr) == 136);
91CTASSERT(sizeof(struct puffs_req) == 44);
92#endif
93
94int
95puffs_vfsop_mount(struct mount *mp, const char *path, void *data,
96	size_t *data_len)
97{
98	struct puffs_mount *pmp = NULL;
99	struct puffs_kargs *args;
100	char fstype[_VFS_NAMELEN];
101	char *p;
102	int error = 0, i;
103	pid_t mntpid = curlwp->l_proc->p_pid;
104
105	if (data == NULL)
106		return EINVAL;
107	if (*data_len < sizeof *args)
108		return EINVAL;
109
110	if (mp->mnt_flag & MNT_GETARGS) {
111		pmp = MPTOPUFFSMP(mp);
112		*(struct puffs_kargs *)data = pmp->pmp_args;
113		*data_len = sizeof *args;
114		return 0;
115	}
116
117	/* update is not supported currently */
118	if (mp->mnt_flag & MNT_UPDATE)
119		return EOPNOTSUPP;
120
121	args = (struct puffs_kargs *)data;
122
123	if (args->pa_vers != PUFFSVERSION) {
124		printf("puffs_mount: development version mismatch: "
125		    "kernel %d, lib %d\n", PUFFSVERSION, args->pa_vers);
126		error = EINVAL;
127		goto out;
128	}
129
130	if ((args->pa_flags & ~PUFFS_KFLAG_MASK) != 0) {
131		printf("puffs_mount: invalid KFLAGs 0x%x\n", args->pa_flags);
132		error = EINVAL;
133		goto out;
134	}
135	if ((args->pa_fhflags & ~PUFFS_FHFLAG_MASK) != 0) {
136		printf("puffs_mount: invalid FHFLAGs 0x%x\n", args->pa_fhflags);
137		error = EINVAL;
138		goto out;
139	}
140
141	for (i = 0; i < __arraycount(args->pa_spare); i++) {
142		if (args->pa_spare[i] != 0) {
143			printf("puffs_mount: pa_spare[%d] = 0x%x\n",
144			    i, args->pa_spare[i]);
145			error = EINVAL;
146			goto out;
147		}
148	}
149
150	/* use dummy value for passthrough */
151	if (args->pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
152		args->pa_fhsize = sizeof(struct fid);
153
154	/* sanitize file handle length */
155	if (PUFFS_TOFHSIZE(args->pa_fhsize) > FHANDLE_SIZE_MAX) {
156		printf("puffs_mount: handle size %zu too large\n",
157		    args->pa_fhsize);
158		error = EINVAL;
159		goto out;
160	}
161	/* sanity check file handle max sizes */
162	if (args->pa_fhsize && args->pa_fhflags & PUFFS_FHFLAG_PROTOMASK) {
163		size_t kfhsize = PUFFS_TOFHSIZE(args->pa_fhsize);
164
165		if (args->pa_fhflags & PUFFS_FHFLAG_NFSV2) {
166			if (NFSX_FHTOOBIG_P(kfhsize, 0)) {
167				printf("puffs_mount: fhsize larger than "
168				    "NFSv2 max %d\n",
169				    PUFFS_FROMFHSIZE(NFSX_V2FH));
170				error = EINVAL;
171				goto out;
172			}
173		}
174
175		if (args->pa_fhflags & PUFFS_FHFLAG_NFSV3) {
176			if (NFSX_FHTOOBIG_P(kfhsize, 1)) {
177				printf("puffs_mount: fhsize larger than "
178				    "NFSv3 max %d\n",
179				    PUFFS_FROMFHSIZE(NFSX_V3FHMAX));
180				error = EINVAL;
181				goto out;
182			}
183		}
184	}
185
186	/* don't allow non-printing characters (like my sweet umlauts.. snif) */
187	args->pa_typename[sizeof(args->pa_typename)-1] = '\0';
188	for (p = args->pa_typename; *p; p++)
189		if (*p < ' ' || *p > '~')
190			*p = '.';
191
192	args->pa_mntfromname[sizeof(args->pa_mntfromname)-1] = '\0';
193	for (p = args->pa_mntfromname; *p; p++)
194		if (*p < ' ' || *p > '~')
195			*p = '.';
196
197	/* build real name */
198	(void)strlcpy(fstype, PUFFS_TYPEPREFIX, sizeof(fstype));
199	(void)strlcat(fstype, args->pa_typename, sizeof(fstype));
200
201	/* inform user server if it got the max request size it wanted */
202	if (args->pa_maxmsglen == 0 || args->pa_maxmsglen > PUFFS_MSG_MAXSIZE)
203		args->pa_maxmsglen = PUFFS_MSG_MAXSIZE;
204	else if (args->pa_maxmsglen < 2*PUFFS_MSGSTRUCT_MAX)
205		args->pa_maxmsglen = 2*PUFFS_MSGSTRUCT_MAX;
206
207	(void)strlcpy(args->pa_typename, fstype, sizeof(args->pa_typename));
208
209	if (args->pa_nhashbuckets == 0)
210		args->pa_nhashbuckets = puffs_pnodebuckets_default;
211	if (args->pa_nhashbuckets < 1)
212		args->pa_nhashbuckets = 1;
213	if (args->pa_nhashbuckets > PUFFS_MAXPNODEBUCKETS) {
214		args->pa_nhashbuckets = puffs_maxpnodebuckets;
215		printf("puffs_mount: using %d hash buckets. "
216		    "adjust puffs_maxpnodebuckets for more\n",
217		    puffs_maxpnodebuckets);
218	}
219
220	error = set_statvfs_info(path, UIO_USERSPACE, args->pa_mntfromname,
221	    UIO_SYSSPACE, fstype, mp, curlwp);
222	if (error)
223		goto out;
224	mp->mnt_stat.f_iosize = DEV_BSIZE;
225	mp->mnt_stat.f_namemax = args->pa_svfsb.f_namemax;
226
227	/*
228	 * We can't handle the VFS_STATVFS() mount_domount() does
229	 * after VFS_MOUNT() because we'd deadlock, so handle it
230	 * here already.
231	 */
232	copy_statvfs_info(&args->pa_svfsb, mp);
233	(void)memcpy(&mp->mnt_stat, &args->pa_svfsb, sizeof(mp->mnt_stat));
234
235	KASSERT(curlwp != uvm.pagedaemon_lwp);
236	pmp = kmem_zalloc(sizeof(struct puffs_mount), KM_SLEEP);
237
238	mp->mnt_fs_bshift = DEV_BSHIFT;
239	mp->mnt_dev_bshift = DEV_BSHIFT;
240	mp->mnt_flag &= ~MNT_LOCAL; /* we don't really know, so ... */
241	mp->mnt_data = pmp;
242
243#if 0
244	/*
245	 * XXX: puffs code is MPSAFE.  However, VFS really isn't.
246	 * Currently, there is nothing which protects an inode from
247	 * reclaim while there are threads inside the file system.
248	 * This means that in the event of a server crash, an MPSAFE
249	 * mount is likely to end up accessing invalid memory.  For the
250	 * non-mpsafe case, the kernel lock, general structure of
251	 * puffs and pmp_refcount protect the threads during escape.
252	 *
253	 * Fixing this will require:
254	 *  a) fixing vfs
255	 * OR
256	 *  b) adding a small sleep to puffs_msgif_close() between
257	 *     userdead() and dounmount().
258	 *     (well, this isn't really a fix, but would solve
259	 *     99.999% of the race conditions).
260	 *
261	 * Also, in the event of "b", unmount -f should be used,
262	 * like with any other file system, sparingly and only when
263	 * it is "known" to be safe.
264	 */
265	mp->mnt_iflags |= IMNT_MPSAFE;
266#endif
267
268	pmp->pmp_status = PUFFSTAT_MOUNTING;
269	pmp->pmp_mp = mp;
270	pmp->pmp_msg_maxsize = args->pa_maxmsglen;
271	pmp->pmp_args = *args;
272
273	pmp->pmp_npnodehash = args->pa_nhashbuckets;
274	pmp->pmp_pnodehash = kmem_alloc(BUCKETALLOC(pmp->pmp_npnodehash),
275	    KM_SLEEP);
276	for (i = 0; i < pmp->pmp_npnodehash; i++)
277		LIST_INIT(&pmp->pmp_pnodehash[i]);
278	LIST_INIT(&pmp->pmp_newcookie);
279
280	/*
281	 * Inform the fileops processing code that we have a mountpoint.
282	 * If it doesn't know about anyone with our pid/fd having the
283	 * device open, punt
284	 */
285	if ((pmp->pmp_pi
286	    = putter_attach(mntpid, args->pa_fd, pmp, &puffs_putter)) == NULL) {
287		error = ENOENT;
288		goto out;
289	}
290
291	/* XXX: check parameters */
292	pmp->pmp_root_cookie = args->pa_root_cookie;
293	pmp->pmp_root_vtype = args->pa_root_vtype;
294	pmp->pmp_root_vsize = args->pa_root_vsize;
295	pmp->pmp_root_rdev = args->pa_root_rdev;
296	pmp->pmp_docompat = args->pa_time32;
297
298	mutex_init(&pmp->pmp_lock, MUTEX_DEFAULT, IPL_NONE);
299	mutex_init(&pmp->pmp_sopmtx, MUTEX_DEFAULT, IPL_NONE);
300	cv_init(&pmp->pmp_msg_waiter_cv, "puffsget");
301	cv_init(&pmp->pmp_refcount_cv, "puffsref");
302	cv_init(&pmp->pmp_unmounting_cv, "puffsum");
303	cv_init(&pmp->pmp_sopcv, "puffsop");
304	TAILQ_INIT(&pmp->pmp_msg_touser);
305	TAILQ_INIT(&pmp->pmp_msg_replywait);
306	TAILQ_INIT(&pmp->pmp_sopfastreqs);
307	TAILQ_INIT(&pmp->pmp_sopnodereqs);
308
309	if ((error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
310	    puffs_sop_thread, pmp, NULL, "puffsop")) != 0)
311		goto out;
312	pmp->pmp_sopthrcount = 1;
313
314	DPRINTF(("puffs_mount: mount point at %p, puffs specific at %p\n",
315	    mp, MPTOPUFFSMP(mp)));
316
317	vfs_getnewfsid(mp);
318
319 out:
320	if (error && pmp && pmp->pmp_pi)
321		putter_detach(pmp->pmp_pi);
322	if (error && pmp && pmp->pmp_pnodehash)
323		kmem_free(pmp->pmp_pnodehash, BUCKETALLOC(pmp->pmp_npnodehash));
324	if (error && pmp)
325		kmem_free(pmp, sizeof(struct puffs_mount));
326	return error;
327}
328
329int
330puffs_vfsop_start(struct mount *mp, int flags)
331{
332	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
333
334	KASSERT(pmp->pmp_status == PUFFSTAT_MOUNTING);
335	pmp->pmp_status = PUFFSTAT_RUNNING;
336
337	return 0;
338}
339
340int
341puffs_vfsop_unmount(struct mount *mp, int mntflags)
342{
343	PUFFS_MSG_VARS(vfs, unmount);
344	struct puffs_mount *pmp;
345	int error, force;
346
347	error = 0;
348	force = mntflags & MNT_FORCE;
349	pmp = MPTOPUFFSMP(mp);
350
351	DPRINTF(("puffs_unmount: detach filesystem from vfs, current "
352	    "status 0x%x\n", pmp->pmp_status));
353
354	/*
355	 * flush all the vnodes.  VOP_RECLAIM() takes care that the
356	 * root vnode does not get flushed until unmount.  The
357	 * userspace root node cookie is stored in the mount
358	 * structure, so we can always re-instantiate a root vnode,
359	 * should userspace unmount decide it doesn't want to
360	 * cooperate.
361	 */
362	error = vflush(mp, NULLVP, force ? FORCECLOSE : 0);
363	if (error)
364		goto out;
365
366	/*
367	 * If we are not DYING, we should ask userspace's opinion
368	 * about the situation
369	 */
370	mutex_enter(&pmp->pmp_lock);
371	if (pmp->pmp_status != PUFFSTAT_DYING) {
372		pmp->pmp_unmounting = 1;
373		mutex_exit(&pmp->pmp_lock);
374
375		PUFFS_MSG_ALLOC(vfs, unmount);
376		puffs_msg_setinfo(park_unmount,
377		    PUFFSOP_VFS, PUFFS_VFS_UNMOUNT, NULL);
378		unmount_msg->pvfsr_flags = mntflags;
379
380		PUFFS_MSG_ENQUEUEWAIT(pmp, park_unmount, error);
381		PUFFS_MSG_RELEASE(unmount);
382
383		error = checkerr(pmp, error, __func__);
384		DPRINTF(("puffs_unmount: error %d force %d\n", error, force));
385
386		mutex_enter(&pmp->pmp_lock);
387		pmp->pmp_unmounting = 0;
388		cv_broadcast(&pmp->pmp_unmounting_cv);
389	}
390
391	/*
392	 * if userspace cooperated or we really need to die,
393	 * screw what userland thinks and just die.
394	 */
395	if (error == 0 || force) {
396		struct puffs_sopreq *psopr;
397
398		/* tell waiters & other resources to go unwait themselves */
399		puffs_userdead(pmp);
400		putter_detach(pmp->pmp_pi);
401
402		/*
403		 * Wait until there are no more users for the mount resource.
404		 * Notice that this is hooked against transport_close
405		 * and return from touser.  In an ideal world, it would
406		 * be hooked against final return from all operations.
407		 * But currently it works well enough, since nobody
408		 * does weird blocking voodoo after return from touser().
409		 */
410		while (pmp->pmp_refcount != 0)
411			cv_wait(&pmp->pmp_refcount_cv, &pmp->pmp_lock);
412		mutex_exit(&pmp->pmp_lock);
413
414		/*
415		 * Release kernel thread now that there is nothing
416		 * it would be wanting to lock.
417		 */
418		KASSERT(curlwp != uvm.pagedaemon_lwp);
419		psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
420		psopr->psopr_sopreq = PUFFS_SOPREQSYS_EXIT;
421		mutex_enter(&pmp->pmp_sopmtx);
422		if (pmp->pmp_sopthrcount == 0) {
423			mutex_exit(&pmp->pmp_sopmtx);
424			kmem_free(psopr, sizeof(*psopr));
425			mutex_enter(&pmp->pmp_sopmtx);
426			KASSERT(pmp->pmp_sopthrcount == 0);
427		} else {
428			TAILQ_INSERT_TAIL(&pmp->pmp_sopfastreqs,
429			    psopr, psopr_entries);
430			cv_signal(&pmp->pmp_sopcv);
431		}
432		while (pmp->pmp_sopthrcount > 0)
433			cv_wait(&pmp->pmp_sopcv, &pmp->pmp_sopmtx);
434		mutex_exit(&pmp->pmp_sopmtx);
435
436		/* free resources now that we hopefully have no waiters left */
437		cv_destroy(&pmp->pmp_unmounting_cv);
438		cv_destroy(&pmp->pmp_refcount_cv);
439		cv_destroy(&pmp->pmp_msg_waiter_cv);
440		cv_destroy(&pmp->pmp_sopcv);
441		mutex_destroy(&pmp->pmp_lock);
442		mutex_destroy(&pmp->pmp_sopmtx);
443
444		kmem_free(pmp->pmp_pnodehash, BUCKETALLOC(pmp->pmp_npnodehash));
445		kmem_free(pmp, sizeof(struct puffs_mount));
446		error = 0;
447	} else {
448		mutex_exit(&pmp->pmp_lock);
449	}
450
451 out:
452	DPRINTF(("puffs_unmount: return %d\n", error));
453	return error;
454}
455
456/*
457 * This doesn't need to travel to userspace
458 */
459int
460puffs_vfsop_root(struct mount *mp, struct vnode **vpp)
461{
462	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
463	int rv;
464
465	rv = puffs_cookie2vnode(pmp, pmp->pmp_root_cookie, 1, 1, vpp);
466	KASSERT(rv != PUFFS_NOSUCHCOOKIE);
467	return rv;
468}
469
470int
471puffs_vfsop_statvfs(struct mount *mp, struct statvfs *sbp)
472{
473	PUFFS_MSG_VARS(vfs, statvfs);
474	struct puffs_mount *pmp;
475	int error = 0;
476
477	pmp = MPTOPUFFSMP(mp);
478
479	/*
480	 * If we are mounting, it means that the userspace counterpart
481	 * is calling mount(2), but mount(2) also calls statvfs.  So
482	 * requesting statvfs from userspace would mean a deadlock.
483	 * Compensate.
484	 */
485	if (__predict_false(pmp->pmp_status == PUFFSTAT_MOUNTING))
486		return EINPROGRESS;
487
488	PUFFS_MSG_ALLOC(vfs, statvfs);
489	puffs_msg_setinfo(park_statvfs, PUFFSOP_VFS, PUFFS_VFS_STATVFS, NULL);
490
491	PUFFS_MSG_ENQUEUEWAIT(pmp, park_statvfs, error);
492	error = checkerr(pmp, error, __func__);
493	statvfs_msg->pvfsr_sb.f_iosize = DEV_BSIZE;
494
495	/*
496	 * Try to produce a sensible result even in the event
497	 * of userspace error.
498	 *
499	 * XXX: cache the copy in non-error case
500	 */
501	if (!error) {
502		copy_statvfs_info(&statvfs_msg->pvfsr_sb, mp);
503		(void)memcpy(sbp, &statvfs_msg->pvfsr_sb,
504		    sizeof(struct statvfs));
505	} else {
506		copy_statvfs_info(sbp, mp);
507	}
508
509	PUFFS_MSG_RELEASE(statvfs);
510	return error;
511}
512
513static int
514pageflush(struct mount *mp, kauth_cred_t cred, int waitfor)
515{
516	struct puffs_node *pn;
517	struct vnode *vp, *mvp;
518	int error, rv, fsyncwait;
519
520	error = 0;
521	fsyncwait = (waitfor == MNT_WAIT) ? FSYNC_WAIT : 0;
522
523	/* Allocate a marker vnode. */
524	mvp = vnalloc(mp);
525
526	/*
527	 * Sync all cached data from regular vnodes (which are not
528	 * currently locked, see below).  After this we call VFS_SYNC
529	 * for the fs server, which should handle data and metadata for
530	 * all the nodes it knows to exist.
531	 */
532	mutex_enter(&mntvnode_lock);
533 loop:
534	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
535		vmark(mvp, vp);
536		if (vp->v_mount != mp || vismarker(vp))
537			continue;
538
539		mutex_enter(vp->v_interlock);
540		pn = VPTOPP(vp);
541		if (vp->v_type != VREG || UVM_OBJ_IS_CLEAN(&vp->v_uobj)) {
542			mutex_exit(vp->v_interlock);
543			continue;
544		}
545
546		mutex_exit(&mntvnode_lock);
547
548		/*
549		 * Here we try to get a reference to the vnode and to
550		 * lock it.  This is mostly cargo-culted, but I will
551		 * offer an explanation to why I believe this might
552		 * actually do the right thing.
553		 *
554		 * If the vnode is a goner, we quite obviously don't need
555		 * to sync it.
556		 *
557		 * If the vnode was busy, we don't need to sync it because
558		 * this is never called with MNT_WAIT except from
559		 * dounmount(), when we are wait-flushing all the dirty
560		 * vnodes through other routes in any case.  So there,
561		 * sync() doesn't actually sync.  Happy now?
562		 */
563		rv = vget(vp, LK_EXCLUSIVE | LK_NOWAIT);
564		if (rv) {
565			mutex_enter(&mntvnode_lock);
566			if (rv == ENOENT) {
567				(void)vunmark(mvp);
568				goto loop;
569			}
570			continue;
571		}
572
573		/* hmm.. is the FAF thing entirely sensible? */
574		if (waitfor == MNT_LAZY) {
575			mutex_enter(vp->v_interlock);
576			pn->pn_stat |= PNODE_FAF;
577			mutex_exit(vp->v_interlock);
578		}
579		rv = VOP_FSYNC(vp, cred, fsyncwait, 0, 0);
580		if (waitfor == MNT_LAZY) {
581			mutex_enter(vp->v_interlock);
582			pn->pn_stat &= ~PNODE_FAF;
583			mutex_exit(vp->v_interlock);
584		}
585		if (rv)
586			error = rv;
587		vput(vp);
588		mutex_enter(&mntvnode_lock);
589	}
590	mutex_exit(&mntvnode_lock);
591	vnfree(mvp);
592
593	return error;
594}
595
596int
597puffs_vfsop_sync(struct mount *mp, int waitfor, struct kauth_cred *cred)
598{
599	PUFFS_MSG_VARS(vfs, sync);
600	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
601	int error, rv;
602
603	error = pageflush(mp, cred, waitfor);
604
605	/* sync fs */
606	PUFFS_MSG_ALLOC(vfs, sync);
607	sync_msg->pvfsr_waitfor = waitfor;
608	puffs_credcvt(&sync_msg->pvfsr_cred, cred);
609	puffs_msg_setinfo(park_sync, PUFFSOP_VFS, PUFFS_VFS_SYNC, NULL);
610
611	PUFFS_MSG_ENQUEUEWAIT(pmp, park_sync, rv);
612	rv = checkerr(pmp, rv, __func__);
613	if (rv)
614		error = rv;
615
616	PUFFS_MSG_RELEASE(sync);
617	return error;
618}
619
620int
621puffs_vfsop_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
622{
623	PUFFS_MSG_VARS(vfs, fhtonode);
624	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
625	struct vnode *vp;
626	void *fhdata;
627	size_t argsize, fhlen;
628	int error;
629
630	if (pmp->pmp_args.pa_fhsize == 0)
631		return EOPNOTSUPP;
632
633	if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) {
634		fhlen = fhp->fid_len;
635		fhdata = fhp;
636	} else {
637		fhlen = PUFFS_FROMFHSIZE(fhp->fid_len);
638		fhdata = fhp->fid_data;
639
640		if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) {
641			if (pmp->pmp_args.pa_fhsize < fhlen)
642				return EINVAL;
643		} else {
644			if (pmp->pmp_args.pa_fhsize != fhlen)
645				return EINVAL;
646		}
647	}
648
649	argsize = sizeof(struct puffs_vfsmsg_fhtonode) + fhlen;
650	puffs_msgmem_alloc(argsize, &park_fhtonode, (void *)&fhtonode_msg, 1);
651	fhtonode_msg->pvfsr_dsize = fhlen;
652	memcpy(fhtonode_msg->pvfsr_data, fhdata, fhlen);
653	puffs_msg_setinfo(park_fhtonode, PUFFSOP_VFS, PUFFS_VFS_FHTOVP, NULL);
654
655	PUFFS_MSG_ENQUEUEWAIT(pmp, park_fhtonode, error);
656	error = checkerr(pmp, error, __func__);
657	if (error)
658		goto out;
659
660	error = puffs_cookie2vnode(pmp, fhtonode_msg->pvfsr_fhcookie, 1,1,&vp);
661	DPRINTF(("puffs_fhtovp: got cookie %p, existing vnode %p\n",
662	    fhtonode_msg->pvfsr_fhcookie, vp));
663	if (error == PUFFS_NOSUCHCOOKIE) {
664		error = puffs_getvnode(mp, fhtonode_msg->pvfsr_fhcookie,
665		    fhtonode_msg->pvfsr_vtype, fhtonode_msg->pvfsr_size,
666		    fhtonode_msg->pvfsr_rdev, &vp);
667		if (error)
668			goto out;
669		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
670	} else if (error) {
671		goto out;
672	}
673
674	*vpp = vp;
675 out:
676	puffs_msgmem_release(park_fhtonode);
677	return error;
678}
679
680int
681puffs_vfsop_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
682{
683	PUFFS_MSG_VARS(vfs, nodetofh);
684	struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
685	size_t argsize, fhlen;
686	int error;
687
688	if (pmp->pmp_args.pa_fhsize == 0)
689		return EOPNOTSUPP;
690
691	/* if file handles are static len, we can test len immediately */
692	if (((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC) == 0)
693	    && ((pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) == 0)
694	    && (PUFFS_FROMFHSIZE(*fh_size) < pmp->pmp_args.pa_fhsize)) {
695		*fh_size = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
696		return E2BIG;
697	}
698
699	if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
700		fhlen = *fh_size;
701	else
702		fhlen = PUFFS_FROMFHSIZE(*fh_size);
703
704	argsize = sizeof(struct puffs_vfsmsg_nodetofh) + fhlen;
705	puffs_msgmem_alloc(argsize, &park_nodetofh, (void *)&nodetofh_msg, 1);
706	nodetofh_msg->pvfsr_fhcookie = VPTOPNC(vp);
707	nodetofh_msg->pvfsr_dsize = fhlen;
708	puffs_msg_setinfo(park_nodetofh, PUFFSOP_VFS, PUFFS_VFS_VPTOFH, NULL);
709
710	PUFFS_MSG_ENQUEUEWAIT(pmp, park_nodetofh, error);
711	error = checkerr(pmp, error, __func__);
712
713	if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH)
714		fhlen = nodetofh_msg->pvfsr_dsize;
715	else if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_DYNAMIC)
716		fhlen = PUFFS_TOFHSIZE(nodetofh_msg->pvfsr_dsize);
717	else
718		fhlen = PUFFS_TOFHSIZE(pmp->pmp_args.pa_fhsize);
719
720	if (error) {
721		if (error == E2BIG)
722			*fh_size = fhlen;
723		goto out;
724	}
725
726	if (fhlen > FHANDLE_SIZE_MAX) {
727		puffs_senderr(pmp, PUFFS_ERR_VPTOFH, E2BIG,
728		    "file handle too big", VPTOPNC(vp));
729		error = EPROTO;
730		goto out;
731	}
732
733	if (*fh_size < fhlen) {
734		*fh_size = fhlen;
735		error = E2BIG;
736		goto out;
737	}
738	*fh_size = fhlen;
739
740	if (fhp) {
741		if (pmp->pmp_args.pa_fhflags & PUFFS_FHFLAG_PASSTHROUGH) {
742			memcpy(fhp, nodetofh_msg->pvfsr_data, fhlen);
743		} else {
744			fhp->fid_len = *fh_size;
745			memcpy(fhp->fid_data, nodetofh_msg->pvfsr_data,
746			    nodetofh_msg->pvfsr_dsize);
747		}
748	}
749
750 out:
751	puffs_msgmem_release(park_nodetofh);
752	return error;
753}
754
755void
756puffs_vfsop_init(void)
757{
758
759	/* some checks depend on this */
760	KASSERT(VNOVAL == VSIZENOTSET);
761
762	pool_init(&puffs_pnpool, sizeof(struct puffs_node), 0, 0, 0,
763	    "puffpnpl", &pool_allocator_nointr, IPL_NONE);
764	pool_init(&puffs_vapool, sizeof(struct vattr), 0, 0, 0,
765	    "puffvapl", &pool_allocator_nointr, IPL_NONE);
766	puffs_msgif_init();
767}
768
769void
770puffs_vfsop_done(void)
771{
772
773	puffs_msgif_destroy();
774	pool_destroy(&puffs_pnpool);
775	pool_destroy(&puffs_vapool);
776}
777
778int
779puffs_vfsop_snapshot(struct mount *mp, struct vnode *vp, struct timespec *ts)
780{
781
782	return EOPNOTSUPP;
783}
784
785int
786puffs_vfsop_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
787	int attrnamespace, const char *attrname)
788{
789	PUFFS_MSG_VARS(vfs, extattrctl);
790	struct puffs_mount *pmp = MPTOPUFFSMP(mp);
791	struct puffs_node *pnp;
792	puffs_cookie_t pnc;
793	int error, flags;
794
795	if (vp) {
796		/* doesn't make sense for puffs servers */
797		if (vp->v_mount != mp)
798			return EXDEV;
799		pnp = vp->v_data;
800		pnc = pnp->pn_cookie;
801		flags = PUFFS_EXTATTRCTL_HASNODE;
802	} else {
803		pnp = pnc = NULL;
804		flags = 0;
805	}
806
807	PUFFS_MSG_ALLOC(vfs, extattrctl);
808	extattrctl_msg->pvfsr_cmd = cmd;
809	extattrctl_msg->pvfsr_attrnamespace = attrnamespace;
810	extattrctl_msg->pvfsr_flags = flags;
811	if (attrname) {
812		strlcpy(extattrctl_msg->pvfsr_attrname, attrname,
813		    sizeof(extattrctl_msg->pvfsr_attrname));
814		extattrctl_msg->pvfsr_flags |= PUFFS_EXTATTRCTL_HASATTRNAME;
815	}
816	puffs_msg_setinfo(park_extattrctl,
817	    PUFFSOP_VFS, PUFFS_VFS_EXTATTRCTL, pnc);
818
819	puffs_msg_enqueue(pmp, park_extattrctl);
820	if (vp) {
821		mutex_enter(&pnp->pn_mtx);
822		puffs_referencenode(pnp);
823		mutex_exit(&pnp->pn_mtx);
824		VOP_UNLOCK(vp);
825	}
826	error = puffs_msg_wait2(pmp, park_extattrctl, pnp, NULL);
827	PUFFS_MSG_RELEASE(extattrctl);
828	if (vp) {
829		puffs_releasenode(pnp);
830	}
831
832	return checkerr(pmp, error, __func__);
833}
834
835const struct vnodeopv_desc * const puffs_vnodeopv_descs[] = {
836	&puffs_vnodeop_opv_desc,
837	&puffs_specop_opv_desc,
838	&puffs_fifoop_opv_desc,
839	&puffs_msgop_opv_desc,
840	NULL,
841};
842
843struct vfsops puffs_vfsops = {
844	MOUNT_PUFFS,
845	sizeof (struct puffs_kargs),
846	puffs_vfsop_mount,		/* mount	*/
847	puffs_vfsop_start,		/* start	*/
848	puffs_vfsop_unmount,		/* unmount	*/
849	puffs_vfsop_root,		/* root		*/
850	(void *)eopnotsupp,		/* quotactl	*/
851	puffs_vfsop_statvfs,		/* statvfs	*/
852	puffs_vfsop_sync,		/* sync		*/
853	(void *)eopnotsupp,		/* vget		*/
854	puffs_vfsop_fhtovp,		/* fhtovp	*/
855	puffs_vfsop_vptofh,		/* vptofh	*/
856	puffs_vfsop_init,		/* init		*/
857	NULL,				/* reinit	*/
858	puffs_vfsop_done,		/* done		*/
859	NULL,				/* mountroot	*/
860	puffs_vfsop_snapshot,		/* snapshot	*/
861	puffs_vfsop_extattrctl,		/* extattrctl	*/
862	(void *)eopnotsupp,		/* suspendctl	*/
863	genfs_renamelock_enter,
864	genfs_renamelock_exit,
865	(void *)eopnotsupp,
866	puffs_vnodeopv_descs,		/* vnodeops	*/
867	0,				/* refcount	*/
868	{ NULL, NULL }
869};
870
871static int
872puffs_modcmd(modcmd_t cmd, void *arg)
873{
874
875	switch (cmd) {
876	case MODULE_CMD_INIT:
877		return vfs_attach(&puffs_vfsops);
878	case MODULE_CMD_FINI:
879		return vfs_detach(&puffs_vfsops);
880	default:
881		return ENOTTY;
882	}
883}
884