1/*	$NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $	*/
2
3/*-
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5 *
6 * Copyright (c) 2005 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11 * 2005 program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * Efficient memory file system.
37 *
38 * tmpfs is a file system that uses FreeBSD's virtual memory
39 * sub-system to store file data and metadata in an efficient way.
40 * This means that it does not follow the structure of an on-disk file
41 * system because it simply does not need to.  Instead, it uses
42 * memory-specific data structures and algorithms to automatically
43 * allocate and release resources.
44 */
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD$");
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/dirent.h>
51#include <sys/limits.h>
52#include <sys/lock.h>
53#include <sys/mount.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/jail.h>
57#include <sys/kernel.h>
58#include <sys/rwlock.h>
59#include <sys/stat.h>
60#include <sys/sx.h>
61#include <sys/sysctl.h>
62#include <sys/vnode.h>
63
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <vm/pmap.h>
67#include <vm/vm_extern.h>
68#include <vm/vm_map.h>
69#include <vm/vm_object.h>
70#include <vm/vm_param.h>
71
72#include <fs/tmpfs/tmpfs.h>
73
74/*
75 * Default permission for root node
76 */
77#define TMPFS_DEFAULT_ROOT_MODE	(S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
78
79static MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
80MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
81
82static int	tmpfs_mount(struct mount *);
83static int	tmpfs_unmount(struct mount *, int);
84static int	tmpfs_root(struct mount *, int flags, struct vnode **);
85static int	tmpfs_fhtovp(struct mount *, struct fid *, int,
86		    struct vnode **);
87static int	tmpfs_statfs(struct mount *, struct statfs *);
88static void	tmpfs_susp_clean(struct mount *);
89
90static const char *tmpfs_opts[] = {
91	"from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
92	"union", "nonc", NULL
93};
94
95static const char *tmpfs_updateopts[] = {
96	"from", "export", "size", NULL
97};
98
99/*
100 * Handle updates of time from writes to mmaped regions.  Use
101 * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since
102 * unmap of the tmpfs-backed vnode does not call vinactive(), due to
103 * vm object type is OBJT_SWAP.
104 * If lazy, only handle delayed update of mtime due to the writes to
105 * mapped files.
106 */
107static void
108tmpfs_update_mtime(struct mount *mp, bool lazy)
109{
110	struct vnode *vp, *mvp;
111	struct vm_object *obj;
112
113	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
114		if (vp->v_type != VREG) {
115			VI_UNLOCK(vp);
116			continue;
117		}
118		obj = vp->v_object;
119		KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
120		    (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
121
122		/*
123		 * In lazy case, do unlocked read, avoid taking vnode
124		 * lock if not needed.  Lost update will be handled on
125		 * the next call.
126		 * For non-lazy case, we must flush all pending
127		 * metadata changes now.
128		 */
129		if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) {
130			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK,
131			    curthread) != 0)
132				continue;
133			tmpfs_check_mtime(vp);
134			if (!lazy)
135				tmpfs_update(vp);
136			vput(vp);
137		} else {
138			VI_UNLOCK(vp);
139			continue;
140		}
141	}
142}
143
144struct tmpfs_check_rw_maps_arg {
145	bool found;
146};
147
148static bool
149tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
150    vm_map_entry_t entry __unused, void *arg)
151{
152	struct tmpfs_check_rw_maps_arg *a;
153
154	a = arg;
155	a->found = true;
156	return (true);
157}
158
159/*
160 * Revoke write permissions from all mappings of regular files
161 * belonging to the specified tmpfs mount.
162 */
163static bool
164tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
165    vm_map_entry_t entry, void *arg __unused)
166{
167
168	/*
169	 * XXXKIB: might be invalidate the mapping
170	 * instead ?  The process is not going to be
171	 * happy in any case.
172	 */
173	entry->max_protection &= ~VM_PROT_WRITE;
174	if ((entry->protection & VM_PROT_WRITE) != 0) {
175		entry->protection &= ~VM_PROT_WRITE;
176		pmap_protect(map->pmap, entry->start, entry->end,
177		    entry->protection);
178	}
179	return (false);
180}
181
182static void
183tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
184    vm_map_entry_t, void *), void *cb_arg)
185{
186	struct proc *p;
187	struct vmspace *vm;
188	vm_map_t map;
189	vm_map_entry_t entry;
190	vm_object_t object;
191	struct vnode *vp;
192	int gen;
193	bool terminate;
194
195	terminate = false;
196	sx_slock(&allproc_lock);
197again:
198	gen = allproc_gen;
199	FOREACH_PROC_IN_SYSTEM(p) {
200		PROC_LOCK(p);
201		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
202		    P_SYSTEM | P_WEXIT)) != 0) {
203			PROC_UNLOCK(p);
204			continue;
205		}
206		vm = vmspace_acquire_ref(p);
207		_PHOLD_LITE(p);
208		PROC_UNLOCK(p);
209		if (vm == NULL) {
210			PRELE(p);
211			continue;
212		}
213		sx_sunlock(&allproc_lock);
214		map = &vm->vm_map;
215
216		vm_map_lock(map);
217		if (map->busy)
218			vm_map_wait_busy(map);
219		for (entry = map->header.next; entry != &map->header;
220		    entry = entry->next) {
221			if ((entry->eflags & (MAP_ENTRY_GUARD |
222			    MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
223			    (entry->max_protection & VM_PROT_WRITE) == 0)
224				continue;
225			object = entry->object.vm_object;
226			if (object == NULL || object->type != OBJT_SWAP ||
227			    (object->flags & OBJ_TMPFS_NODE) == 0)
228				continue;
229			/*
230			 * No need to dig into shadow chain, mapping
231			 * of the object not at top is readonly.
232			 */
233
234			VM_OBJECT_RLOCK(object);
235			if (object->type == OBJT_DEAD) {
236				VM_OBJECT_RUNLOCK(object);
237				continue;
238			}
239			MPASS(object->ref_count > 1);
240			if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) !=
241			    (OBJ_TMPFS_NODE | OBJ_TMPFS)) {
242				VM_OBJECT_RUNLOCK(object);
243				continue;
244			}
245			vp = object->un_pager.swp.swp_tmpfs;
246			if (vp->v_mount != mp) {
247				VM_OBJECT_RUNLOCK(object);
248				continue;
249			}
250
251			terminate = cb(mp, map, entry, cb_arg);
252			VM_OBJECT_RUNLOCK(object);
253			if (terminate)
254				break;
255		}
256		vm_map_unlock(map);
257
258		vmspace_free(vm);
259		sx_slock(&allproc_lock);
260		PRELE(p);
261		if (terminate)
262			break;
263	}
264	if (!terminate && gen != allproc_gen)
265		goto again;
266	sx_sunlock(&allproc_lock);
267}
268
269static bool
270tmpfs_check_rw_maps(struct mount *mp)
271{
272	struct tmpfs_check_rw_maps_arg ca;
273
274	ca.found = false;
275	tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
276	return (ca.found);
277}
278
279static int
280tmpfs_rw_to_ro(struct mount *mp)
281{
282	int error, flags;
283	bool forced;
284
285	forced = (mp->mnt_flag & MNT_FORCE) != 0;
286	flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
287
288	if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
289		return (error);
290	error = vfs_write_suspend_umnt(mp);
291	if (error != 0)
292		return (error);
293	if (!forced && tmpfs_check_rw_maps(mp)) {
294		error = EBUSY;
295		goto out;
296	}
297	VFS_TO_TMPFS(mp)->tm_ronly = 1;
298	MNT_ILOCK(mp);
299	mp->mnt_flag |= MNT_RDONLY;
300	MNT_IUNLOCK(mp);
301	for (;;) {
302		tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
303		tmpfs_update_mtime(mp, false);
304		error = vflush(mp, 0, flags, curthread);
305		if (error != 0) {
306			VFS_TO_TMPFS(mp)->tm_ronly = 0;
307			MNT_ILOCK(mp);
308			mp->mnt_flag &= ~MNT_RDONLY;
309			MNT_IUNLOCK(mp);
310			goto out;
311		}
312		if (!tmpfs_check_rw_maps(mp))
313			break;
314	}
315out:
316	vfs_write_resume(mp, 0);
317	return (error);
318}
319
320static int
321tmpfs_mount(struct mount *mp)
322{
323	const size_t nodes_per_page = howmany(PAGE_SIZE,
324	    sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
325	struct tmpfs_mount *tmp;
326	struct tmpfs_node *root;
327	int error;
328	bool nonc;
329	/* Size counters. */
330	u_quad_t pages;
331	off_t nodes_max, size_max, maxfilesize;
332
333	/* Root node attributes. */
334	uid_t root_uid;
335	gid_t root_gid;
336	mode_t root_mode;
337
338	struct vattr va;
339
340	if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
341		return (EINVAL);
342
343	if (mp->mnt_flag & MNT_UPDATE) {
344		/* Only support update mounts for certain options. */
345		if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
346			return (EOPNOTSUPP);
347		tmp = VFS_TO_TMPFS(mp);
348		if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
349			/*
350			 * On-the-fly resizing is not supported (yet). We still
351			 * need to have "size" listed as "supported", otherwise
352			 * trying to update fs that is listed in fstab with size
353			 * parameter, say trying to change rw to ro or vice
354			 * versa, would cause vfs_filteropt() to bail.
355			 */
356			if (size_max != tmp->tm_size_max)
357				return (EOPNOTSUPP);
358		}
359		if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
360		    !tmp->tm_ronly) {
361			/* RW -> RO */
362			return (tmpfs_rw_to_ro(mp));
363		} else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
364		    tmp->tm_ronly) {
365			/* RO -> RW */
366			tmp->tm_ronly = 0;
367			MNT_ILOCK(mp);
368			mp->mnt_flag &= ~MNT_RDONLY;
369			MNT_IUNLOCK(mp);
370		}
371		return (0);
372	}
373
374	vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
375	error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
376	VOP_UNLOCK(mp->mnt_vnodecovered, 0);
377	if (error)
378		return (error);
379
380	if (mp->mnt_cred->cr_ruid != 0 ||
381	    vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
382		root_gid = va.va_gid;
383	if (mp->mnt_cred->cr_ruid != 0 ||
384	    vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
385		root_uid = va.va_uid;
386	if (mp->mnt_cred->cr_ruid != 0 ||
387	    vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
388		root_mode = va.va_mode;
389	if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
390		nodes_max = 0;
391	if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
392		size_max = 0;
393	if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
394		maxfilesize = 0;
395	nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
396
397	/* Do not allow mounts if we do not have enough memory to preserve
398	 * the minimum reserved pages. */
399	if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
400		return (ENOSPC);
401
402	/* Get the maximum number of memory pages this file system is
403	 * allowed to use, based on the maximum size the user passed in
404	 * the mount structure.  A value of zero is treated as if the
405	 * maximum available space was requested. */
406	if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE ||
407	    (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
408		pages = SIZE_MAX;
409	else {
410		size_max = roundup(size_max, PAGE_SIZE);
411		pages = howmany(size_max, PAGE_SIZE);
412	}
413	MPASS(pages > 0);
414
415	if (nodes_max <= 3) {
416		if (pages < INT_MAX / nodes_per_page)
417			nodes_max = pages * nodes_per_page;
418		else
419			nodes_max = INT_MAX;
420	}
421	if (nodes_max > INT_MAX)
422		nodes_max = INT_MAX;
423	MPASS(nodes_max >= 3);
424
425	/* Allocate the tmpfs mount structure and fill it. */
426	tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
427	    M_TMPFSMNT, M_WAITOK | M_ZERO);
428
429	mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
430	tmp->tm_nodes_max = nodes_max;
431	tmp->tm_nodes_inuse = 0;
432	tmp->tm_refcount = 1;
433	tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
434	LIST_INIT(&tmp->tm_nodes_used);
435
436	tmp->tm_size_max = size_max;
437	tmp->tm_pages_max = pages;
438	tmp->tm_pages_used = 0;
439	new_unrhdr64(&tmp->tm_ino_unr, 2);
440	tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
441	tmp->tm_nonc = nonc;
442
443	/* Allocate the root node. */
444	error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
445	    root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
446
447	if (error != 0 || root == NULL) {
448		free(tmp, M_TMPFSMNT);
449		return (error);
450	}
451	KASSERT(root->tn_id == 2,
452	    ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
453	tmp->tm_root = root;
454
455	MNT_ILOCK(mp);
456	mp->mnt_flag |= MNT_LOCAL;
457	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
458	    MNTK_TEXT_REFS;
459	MNT_IUNLOCK(mp);
460
461	mp->mnt_data = tmp;
462	mp->mnt_stat.f_namemax = MAXNAMLEN;
463	vfs_getnewfsid(mp);
464	vfs_mountedfrom(mp, "tmpfs");
465
466	return 0;
467}
468
469/* ARGSUSED2 */
470static int
471tmpfs_unmount(struct mount *mp, int mntflags)
472{
473	struct tmpfs_mount *tmp;
474	struct tmpfs_node *node;
475	int error, flags;
476
477	flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0;
478	tmp = VFS_TO_TMPFS(mp);
479
480	/* Stop writers */
481	error = vfs_write_suspend_umnt(mp);
482	if (error != 0)
483		return (error);
484	/*
485	 * At this point, nodes cannot be destroyed by any other
486	 * thread because write suspension is started.
487	 */
488
489	for (;;) {
490		error = vflush(mp, 0, flags, curthread);
491		if (error != 0) {
492			vfs_write_resume(mp, VR_START_WRITE);
493			return (error);
494		}
495		MNT_ILOCK(mp);
496		if (mp->mnt_nvnodelistsize == 0) {
497			MNT_IUNLOCK(mp);
498			break;
499		}
500		MNT_IUNLOCK(mp);
501		if ((mntflags & MNT_FORCE) == 0) {
502			vfs_write_resume(mp, VR_START_WRITE);
503			return (EBUSY);
504		}
505	}
506
507	TMPFS_LOCK(tmp);
508	while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
509		TMPFS_NODE_LOCK(node);
510		if (node->tn_type == VDIR)
511			tmpfs_dir_destroy(tmp, node);
512		if (tmpfs_free_node_locked(tmp, node, true))
513			TMPFS_LOCK(tmp);
514		else
515			TMPFS_NODE_UNLOCK(node);
516	}
517
518	mp->mnt_data = NULL;
519	tmpfs_free_tmp(tmp);
520	vfs_write_resume(mp, VR_START_WRITE);
521
522	MNT_ILOCK(mp);
523	mp->mnt_flag &= ~MNT_LOCAL;
524	MNT_IUNLOCK(mp);
525
526	return (0);
527}
528
529void
530tmpfs_free_tmp(struct tmpfs_mount *tmp)
531{
532	TMPFS_MP_ASSERT_LOCKED(tmp);
533	MPASS(tmp->tm_refcount > 0);
534
535	tmp->tm_refcount--;
536	if (tmp->tm_refcount > 0) {
537		TMPFS_UNLOCK(tmp);
538		return;
539	}
540	TMPFS_UNLOCK(tmp);
541
542	mtx_destroy(&tmp->tm_allnode_lock);
543	MPASS(tmp->tm_pages_used == 0);
544	MPASS(tmp->tm_nodes_inuse == 0);
545
546	free(tmp, M_TMPFSMNT);
547}
548
549static int
550tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
551{
552	int error;
553
554	error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
555	if (error == 0)
556		(*vpp)->v_vflag |= VV_ROOT;
557	return (error);
558}
559
560static int
561tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
562    struct vnode **vpp)
563{
564	struct tmpfs_fid_data tfd;
565	struct tmpfs_mount *tmp;
566	struct tmpfs_node *node;
567	int error;
568
569	if (fhp->fid_len != sizeof(tfd))
570		return (EINVAL);
571
572	/*
573	 * Copy from fid_data onto the stack to avoid unaligned pointer use.
574	 * See the comment in sys/mount.h on struct fid for details.
575	 */
576	memcpy(&tfd, fhp->fid_data, fhp->fid_len);
577
578	tmp = VFS_TO_TMPFS(mp);
579
580	if (tfd.tfd_id >= tmp->tm_nodes_max)
581		return (EINVAL);
582
583	TMPFS_LOCK(tmp);
584	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
585		if (node->tn_id == tfd.tfd_id &&
586		    node->tn_gen == tfd.tfd_gen) {
587			tmpfs_ref_node(node);
588			break;
589		}
590	}
591	TMPFS_UNLOCK(tmp);
592
593	if (node != NULL) {
594		error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
595		tmpfs_free_node(tmp, node);
596	} else
597		error = EINVAL;
598	return (error);
599}
600
601/* ARGSUSED2 */
602static int
603tmpfs_statfs(struct mount *mp, struct statfs *sbp)
604{
605	struct tmpfs_mount *tmp;
606	size_t used;
607
608	tmp = VFS_TO_TMPFS(mp);
609
610	sbp->f_iosize = PAGE_SIZE;
611	sbp->f_bsize = PAGE_SIZE;
612
613	used = tmpfs_pages_used(tmp);
614	if (tmp->tm_pages_max != ULONG_MAX)
615		 sbp->f_blocks = tmp->tm_pages_max;
616	else
617		 sbp->f_blocks = used + tmpfs_mem_avail();
618	if (sbp->f_blocks <= used)
619		sbp->f_bavail = 0;
620	else
621		sbp->f_bavail = sbp->f_blocks - used;
622	sbp->f_bfree = sbp->f_bavail;
623	used = tmp->tm_nodes_inuse;
624	sbp->f_files = tmp->tm_nodes_max;
625	if (sbp->f_files <= used)
626		sbp->f_ffree = 0;
627	else
628		sbp->f_ffree = sbp->f_files - used;
629	/* sbp->f_owner = tmp->tn_uid; */
630
631	return 0;
632}
633
634static int
635tmpfs_sync(struct mount *mp, int waitfor)
636{
637
638	if (waitfor == MNT_SUSPEND) {
639		MNT_ILOCK(mp);
640		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
641		MNT_IUNLOCK(mp);
642	} else if (waitfor == MNT_LAZY) {
643		tmpfs_update_mtime(mp, true);
644	}
645	return (0);
646}
647
648/*
649 * The presence of a susp_clean method tells the VFS to track writes.
650 */
651static void
652tmpfs_susp_clean(struct mount *mp __unused)
653{
654}
655
656static int
657tmpfs_init(struct vfsconf *conf)
658{
659	tmpfs_subr_init();
660	return (0);
661}
662
663static int
664tmpfs_uninit(struct vfsconf *conf)
665{
666	tmpfs_subr_uninit();
667	return (0);
668}
669
670/*
671 * tmpfs vfs operations.
672 */
673struct vfsops tmpfs_vfsops = {
674	.vfs_mount =			tmpfs_mount,
675	.vfs_unmount =			tmpfs_unmount,
676	.vfs_root =			tmpfs_root,
677	.vfs_statfs =			tmpfs_statfs,
678	.vfs_fhtovp =			tmpfs_fhtovp,
679	.vfs_sync =			tmpfs_sync,
680	.vfs_susp_clean =		tmpfs_susp_clean,
681	.vfs_init =			tmpfs_init,
682	.vfs_uninit =			tmpfs_uninit,
683};
684VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);
685