tmpfs_vfsops.c revision 1.73
1/*	$NetBSD: tmpfs_vfsops.c,v 1.73 2018/08/09 08:43:56 christos Exp $	*/
2
3/*
4 * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Efficient memory file system.
35 *
36 * tmpfs is a file system that uses NetBSD's virtual memory sub-system
37 * (the well-known UVM) to store file data and metadata in an efficient
38 * way.  This means that it does not follow the structure of an on-disk
39 * file system because it simply does not need to.  Instead, it uses
40 * memory-specific data structures and algorithms to automatically
41 * allocate and release resources.
42 */
43
44#include <sys/cdefs.h>
45__KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.73 2018/08/09 08:43:56 christos Exp $");
46
47#include <sys/param.h>
48#include <sys/atomic.h>
49#include <sys/types.h>
50#include <sys/kmem.h>
51#include <sys/mount.h>
52#include <sys/stat.h>
53#include <sys/systm.h>
54#include <sys/vnode.h>
55#include <sys/kauth.h>
56#include <sys/module.h>
57
58#include <miscfs/genfs/genfs.h>
59#include <fs/tmpfs/tmpfs.h>
60#include <fs/tmpfs/tmpfs_args.h>
61
62MODULE(MODULE_CLASS_VFS, tmpfs, NULL);
63
64struct pool	tmpfs_dirent_pool;
65struct pool	tmpfs_node_pool;
66
67void
68tmpfs_init(void)
69{
70
71	pool_init(&tmpfs_dirent_pool, sizeof(tmpfs_dirent_t), 0, 0, 0,
72	    "tmpfs_dirent", &pool_allocator_nointr, IPL_NONE);
73	pool_init(&tmpfs_node_pool, sizeof(tmpfs_node_t), 0, 0, 0,
74	    "tmpfs_node", &pool_allocator_nointr, IPL_NONE);
75}
76
77void
78tmpfs_done(void)
79{
80
81	pool_destroy(&tmpfs_dirent_pool);
82	pool_destroy(&tmpfs_node_pool);
83}
84
85int
86tmpfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
87{
88	struct tmpfs_args *args = data;
89	tmpfs_mount_t *tmp;
90	tmpfs_node_t *root;
91	struct vattr va;
92	struct vnode *vp;
93	uint64_t memlimit;
94	ino_t nodes;
95	int error, flags;
96	bool set_memlimit;
97	bool set_nodes;
98
99	if (args == NULL)
100		return EINVAL;
101
102	/* Validate the version. */
103	if (*data_len < sizeof(*args) ||
104	    args->ta_version != TMPFS_ARGS_VERSION)
105		return EINVAL;
106
107	/* Handle retrieval of mount point arguments. */
108	if (mp->mnt_flag & MNT_GETARGS) {
109		if (mp->mnt_data == NULL)
110			return EIO;
111		tmp = VFS_TO_TMPFS(mp);
112
113		args->ta_version = TMPFS_ARGS_VERSION;
114		args->ta_nodes_max = tmp->tm_nodes_max;
115		args->ta_size_max = tmp->tm_mem_limit;
116
117		root = tmp->tm_root;
118		args->ta_root_uid = root->tn_uid;
119		args->ta_root_gid = root->tn_gid;
120		args->ta_root_mode = root->tn_mode;
121
122		*data_len = sizeof(*args);
123		return 0;
124	}
125
126
127	/* Prohibit mounts if there is not enough memory. */
128	if (tmpfs_mem_info(true) < uvmexp.freetarg)
129		return EINVAL;
130
131	/* Check for invalid uid and gid arguments */
132	if (args->ta_root_uid == VNOVAL || args->ta_root_gid == VNOVAL)
133		return EINVAL;
134
135	/* This can never happen? */
136	if ((args->ta_root_mode & ALLPERMS) == VNOVAL)
137		return EINVAL;
138
139	/* Get the memory usage limit for this file-system. */
140	if (args->ta_size_max < PAGE_SIZE) {
141		memlimit = UINT64_MAX;
142		set_memlimit = false;
143	} else {
144		memlimit = args->ta_size_max;
145		set_memlimit = true;
146	}
147	KASSERT(memlimit > 0);
148
149	if (args->ta_nodes_max <= 3) {
150		nodes = 3 + (memlimit / 1024);
151		set_nodes = false;
152	} else {
153		nodes = args->ta_nodes_max;
154		set_nodes = true;
155	}
156	nodes = MIN(nodes, INT_MAX);
157	KASSERT(nodes >= 3);
158
159	if (mp->mnt_flag & MNT_UPDATE) {
160		tmp = VFS_TO_TMPFS(mp);
161		if (set_nodes && nodes < tmp->tm_nodes_cnt)
162			return EBUSY;
163		if ((mp->mnt_iflag & IMNT_WANTRDONLY)) {
164			/* Changing from read/write to read-only. */
165			flags = WRITECLOSE;
166			if ((mp->mnt_flag & MNT_FORCE))
167				flags |= FORCECLOSE;
168			error = vflush(mp, NULL, flags);
169			if (error)
170				return error;
171		}
172		if (set_memlimit) {
173			if ((error = tmpfs_mntmem_set(tmp, memlimit)) != 0)
174				return error;
175		}
176		if (set_nodes)
177			tmp->tm_nodes_max = nodes;
178		root = tmp->tm_root;
179		root->tn_uid = args->ta_root_uid;
180		root->tn_gid = args->ta_root_gid;
181		root->tn_mode = args->ta_root_mode;
182		return 0;
183	}
184
185	mp->mnt_flag |= MNT_LOCAL;
186	mp->mnt_stat.f_namemax = TMPFS_MAXNAMLEN;
187	mp->mnt_fs_bshift = PAGE_SHIFT;
188	mp->mnt_dev_bshift = DEV_BSHIFT;
189	mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO;
190	vfs_getnewfsid(mp);
191
192	/* Allocate the tmpfs mount structure and fill it. */
193	tmp = kmem_zalloc(sizeof(tmpfs_mount_t), KM_SLEEP);
194	tmp->tm_nodes_max = nodes;
195	tmp->tm_nodes_cnt = 0;
196	LIST_INIT(&tmp->tm_nodes);
197
198	mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE);
199	tmpfs_mntmem_init(tmp, memlimit);
200	mp->mnt_data = tmp;
201
202	/* Allocate the root node. */
203	vattr_null(&va);
204	va.va_type = VDIR;
205	va.va_mode = args->ta_root_mode & ALLPERMS;
206	va.va_uid = args->ta_root_uid;
207	va.va_gid = args->ta_root_gid;
208	error = vcache_new(mp, NULL, &va, NOCRED, &vp);
209	if (error) {
210		mp->mnt_data = NULL;
211		tmpfs_mntmem_destroy(tmp);
212		mutex_destroy(&tmp->tm_lock);
213		kmem_free(tmp, sizeof(*tmp));
214		return error;
215	}
216	KASSERT(vp != NULL);
217	root = VP_TO_TMPFS_NODE(vp);
218	KASSERT(root != NULL);
219
220	/*
221	 * Parent of the root inode is itself.  Also, root inode has no
222	 * directory entry (i.e. is never attached), thus hold an extra
223	 * reference (link) for it.
224	 */
225	root->tn_links++;
226	root->tn_spec.tn_dir.tn_parent = root;
227	tmp->tm_root = root;
228	vrele(vp);
229
230	error = set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE,
231	    mp->mnt_op->vfs_name, mp, curlwp);
232	if (error) {
233		(void)tmpfs_unmount(mp, MNT_FORCE);
234	}
235	return error;
236}
237
238int
239tmpfs_start(struct mount *mp, int flags)
240{
241
242	return 0;
243}
244
245int
246tmpfs_unmount(struct mount *mp, int mntflags)
247{
248	tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
249	tmpfs_node_t *node, *cnode;
250	int error, flags = 0;
251
252	/* Handle forced unmounts. */
253	if (mntflags & MNT_FORCE)
254		flags |= FORCECLOSE;
255
256	/* Finalize all pending I/O. */
257	error = vflush(mp, NULL, flags);
258	if (error != 0)
259		return error;
260
261	/*
262	 * First round, detach and destroy all directory entries.
263	 * Also, clear the pointers to the vnodes - they are gone.
264	 */
265	LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
266		tmpfs_dirent_t *de;
267
268		node->tn_vnode = NULL;
269		if (node->tn_type != VDIR) {
270			continue;
271		}
272		while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) {
273			cnode = de->td_node;
274			if (cnode && cnode != TMPFS_NODE_WHITEOUT) {
275				cnode->tn_vnode = NULL;
276			}
277			tmpfs_dir_detach(node, de);
278			tmpfs_free_dirent(tmp, de);
279		}
280		/* Extra virtual entry (itself for the root). */
281		node->tn_links--;
282	}
283
284	/* Release the reference on root (diagnostic). */
285	node = tmp->tm_root;
286	node->tn_links--;
287
288	/* Second round, destroy all inodes. */
289	while ((node = LIST_FIRST(&tmp->tm_nodes)) != NULL) {
290		tmpfs_free_node(tmp, node);
291	}
292
293	/* Throw away the tmpfs_mount structure. */
294	tmpfs_mntmem_destroy(tmp);
295	mutex_destroy(&tmp->tm_lock);
296	kmem_free(tmp, sizeof(*tmp));
297	mp->mnt_data = NULL;
298
299	return 0;
300}
301
302int
303tmpfs_root(struct mount *mp, vnode_t **vpp)
304{
305	tmpfs_node_t *node = VFS_TO_TMPFS(mp)->tm_root;
306	int error;
307
308	error = vcache_get(mp, &node, sizeof(node), vpp);
309	if (error)
310		return error;
311	error = vn_lock(*vpp, LK_EXCLUSIVE);
312	if (error) {
313		vrele(*vpp);
314		*vpp = NULL;
315		return error;
316	}
317
318	return 0;
319}
320
321int
322tmpfs_vget(struct mount *mp, ino_t ino, vnode_t **vpp)
323{
324
325	return EOPNOTSUPP;
326}
327
328int
329tmpfs_fhtovp(struct mount *mp, struct fid *fhp, vnode_t **vpp)
330{
331	tmpfs_mount_t *tmp = VFS_TO_TMPFS(mp);
332	tmpfs_node_t *node;
333	tmpfs_fid_t tfh;
334	int error;
335
336	if (fhp->fid_len != sizeof(tmpfs_fid_t)) {
337		return EINVAL;
338	}
339	memcpy(&tfh, fhp, sizeof(tmpfs_fid_t));
340
341	mutex_enter(&tmp->tm_lock);
342	LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
343		if (node->tn_id == tfh.tf_id) {
344			/* Prevent this node from disappearing. */
345			atomic_inc_32(&node->tn_holdcount);
346			break;
347		}
348	}
349	mutex_exit(&tmp->tm_lock);
350	if (node == NULL)
351		return ESTALE;
352
353	error = vcache_get(mp, &node, sizeof(node), vpp);
354	/* If this node has been reclaimed free it now. */
355	if (atomic_dec_32_nv(&node->tn_holdcount) == TMPFS_NODE_RECLAIMED) {
356		KASSERT(error != 0);
357		tmpfs_free_node(tmp, node);
358	}
359	if (error)
360		return (error == ENOENT ? ESTALE : error);
361	error = vn_lock(*vpp, LK_EXCLUSIVE);
362	if (error) {
363		vrele(*vpp);
364		*vpp = NULL;
365		return error;
366	}
367	if (TMPFS_NODE_GEN(node) != tfh.tf_gen) {
368		vput(*vpp);
369		*vpp = NULL;
370		return ESTALE;
371	}
372
373	return 0;
374}
375
376int
377tmpfs_vptofh(vnode_t *vp, struct fid *fhp, size_t *fh_size)
378{
379	tmpfs_fid_t tfh;
380	tmpfs_node_t *node;
381
382	if (*fh_size < sizeof(tmpfs_fid_t)) {
383		*fh_size = sizeof(tmpfs_fid_t);
384		return E2BIG;
385	}
386	*fh_size = sizeof(tmpfs_fid_t);
387	node = VP_TO_TMPFS_NODE(vp);
388
389	memset(&tfh, 0, sizeof(tfh));
390	tfh.tf_len = sizeof(tmpfs_fid_t);
391	tfh.tf_gen = TMPFS_NODE_GEN(node);
392	tfh.tf_id = node->tn_id;
393	memcpy(fhp, &tfh, sizeof(tfh));
394
395	return 0;
396}
397
398int
399tmpfs_statvfs(struct mount *mp, struct statvfs *sbp)
400{
401	tmpfs_mount_t *tmp;
402	fsfilcnt_t freenodes;
403	size_t avail;
404
405	tmp = VFS_TO_TMPFS(mp);
406
407	sbp->f_iosize = sbp->f_frsize = sbp->f_bsize = PAGE_SIZE;
408
409	mutex_enter(&tmp->tm_acc_lock);
410	avail =  tmpfs_pages_avail(tmp);
411	sbp->f_blocks = (tmpfs_bytes_max(tmp) >> PAGE_SHIFT);
412	sbp->f_bavail = sbp->f_bfree = avail;
413	sbp->f_bresvd = 0;
414
415	freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt,
416	    avail * PAGE_SIZE / sizeof(tmpfs_node_t));
417
418	sbp->f_files = tmp->tm_nodes_cnt + freenodes;
419	sbp->f_favail = sbp->f_ffree = freenodes;
420	sbp->f_fresvd = 0;
421	mutex_exit(&tmp->tm_acc_lock);
422
423	copy_statvfs_info(sbp, mp);
424
425	return 0;
426}
427
428int
429tmpfs_sync(struct mount *mp, int waitfor, kauth_cred_t uc)
430{
431
432	return 0;
433}
434
435int
436tmpfs_snapshot(struct mount *mp, vnode_t *vp, struct timespec *ctime)
437{
438
439	return EOPNOTSUPP;
440}
441
442/*
443 * tmpfs vfs operations.
444 */
445
446extern const struct vnodeopv_desc tmpfs_fifoop_opv_desc;
447extern const struct vnodeopv_desc tmpfs_specop_opv_desc;
448extern const struct vnodeopv_desc tmpfs_vnodeop_opv_desc;
449
450const struct vnodeopv_desc * const tmpfs_vnodeopv_descs[] = {
451	&tmpfs_fifoop_opv_desc,
452	&tmpfs_specop_opv_desc,
453	&tmpfs_vnodeop_opv_desc,
454	NULL,
455};
456
457struct vfsops tmpfs_vfsops = {
458	.vfs_name = MOUNT_TMPFS,
459	.vfs_min_mount_data = sizeof (struct tmpfs_args),
460	.vfs_mount = tmpfs_mount,
461	.vfs_start = tmpfs_start,
462	.vfs_unmount = tmpfs_unmount,
463	.vfs_root = tmpfs_root,
464	.vfs_quotactl = (void *)eopnotsupp,
465	.vfs_statvfs = tmpfs_statvfs,
466	.vfs_sync = tmpfs_sync,
467	.vfs_vget = tmpfs_vget,
468	.vfs_loadvnode = tmpfs_loadvnode,
469	.vfs_newvnode = tmpfs_newvnode,
470	.vfs_fhtovp = tmpfs_fhtovp,
471	.vfs_vptofh = tmpfs_vptofh,
472	.vfs_init = tmpfs_init,
473	.vfs_done = tmpfs_done,
474	.vfs_snapshot = tmpfs_snapshot,
475	.vfs_extattrctl = vfs_stdextattrctl,
476	.vfs_suspendctl = genfs_suspendctl,
477	.vfs_renamelock_enter = genfs_renamelock_enter,
478	.vfs_renamelock_exit = genfs_renamelock_exit,
479	.vfs_fsync = (void *)eopnotsupp,
480	.vfs_opv_descs = tmpfs_vnodeopv_descs
481};
482
483static int
484tmpfs_modcmd(modcmd_t cmd, void *arg)
485{
486
487	switch (cmd) {
488	case MODULE_CMD_INIT:
489		return vfs_attach(&tmpfs_vfsops);
490	case MODULE_CMD_FINI:
491		return vfs_detach(&tmpfs_vfsops);
492	default:
493		return ENOTTY;
494	}
495}
496