Deleted Added
full compact
tmpfs_vfsops.c (253573) tmpfs_vfsops.c (254741)
1/* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */
2
3/*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Efficient memory file system.
35 *
36 * tmpfs is a file system that uses NetBSD's virtual memory sub-system
37 * (the well-known UVM) to store file data and metadata in an efficient
38 * way. This means that it does not follow the structure of an on-disk
39 * file system because it simply does not need to. Instead, it uses
40 * memory-specific data structures and algorithms to automatically
41 * allocate and release resources.
42 */
43#include <sys/cdefs.h>
1/* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */
2
3/*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9 * 2005 program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Efficient memory file system.
35 *
36 * tmpfs is a file system that uses NetBSD's virtual memory sub-system
37 * (the well-known UVM) to store file data and metadata in an efficient
38 * way. This means that it does not follow the structure of an on-disk
39 * file system because it simply does not need to. Instead, it uses
40 * memory-specific data structures and algorithms to automatically
41 * allocate and release resources.
42 */
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/fs/tmpfs/tmpfs_vfsops.c 253573 2013-07-23 14:48:37Z nwhitehorn $");
44__FBSDID("$FreeBSD: head/sys/fs/tmpfs/tmpfs_vfsops.c 254741 2013-08-23 22:52:20Z delphij $");
45
46#include <sys/param.h>
47#include <sys/limits.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
45
46#include <sys/param.h>
47#include <sys/limits.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/proc.h>
51#include <sys/jail.h>
50#include <sys/kernel.h>
51#include <sys/stat.h>
52#include <sys/systm.h>
53#include <sys/sysctl.h>
54
55#include <vm/vm.h>
56#include <vm/vm_object.h>
57#include <vm/vm_param.h>
58
59#include <fs/tmpfs/tmpfs.h>
60
61/*
62 * Default permission for root node
63 */
64#define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
65
66MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
67MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
68
69/* --------------------------------------------------------------------- */
70
71static int tmpfs_mount(struct mount *);
72static int tmpfs_unmount(struct mount *, int);
73static int tmpfs_root(struct mount *, int flags, struct vnode **);
74static int tmpfs_fhtovp(struct mount *, struct fid *, int,
75 struct vnode **);
76static int tmpfs_statfs(struct mount *, struct statfs *);
77
78/* --------------------------------------------------------------------- */
79
80static const char *tmpfs_opts[] = {
81 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
82 "union", NULL
83};
84
85static const char *tmpfs_updateopts[] = {
86 "from", "export", NULL
87};
88
89/* --------------------------------------------------------------------- */
90
91static int
92tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
93{
94 struct tmpfs_node *node = (struct tmpfs_node *)mem;
95
96 node->tn_gen++;
97 node->tn_size = 0;
98 node->tn_status = 0;
99 node->tn_flags = 0;
100 node->tn_links = 0;
101 node->tn_vnode = NULL;
102 node->tn_vpstate = 0;
103
104 return (0);
105}
106
107static void
108tmpfs_node_dtor(void *mem, int size, void *arg)
109{
110 struct tmpfs_node *node = (struct tmpfs_node *)mem;
111 node->tn_type = VNON;
112}
113
114static int
115tmpfs_node_init(void *mem, int size, int flags)
116{
117 struct tmpfs_node *node = (struct tmpfs_node *)mem;
118 node->tn_id = 0;
119
120 mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF);
121 node->tn_gen = arc4random();
122
123 return (0);
124}
125
126static void
127tmpfs_node_fini(void *mem, int size)
128{
129 struct tmpfs_node *node = (struct tmpfs_node *)mem;
130
131 mtx_destroy(&node->tn_interlock);
132}
133
134static int
135tmpfs_mount(struct mount *mp)
136{
137 const size_t nodes_per_page = howmany(PAGE_SIZE,
138 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
139 struct tmpfs_mount *tmp;
140 struct tmpfs_node *root;
52#include <sys/kernel.h>
53#include <sys/stat.h>
54#include <sys/systm.h>
55#include <sys/sysctl.h>
56
57#include <vm/vm.h>
58#include <vm/vm_object.h>
59#include <vm/vm_param.h>
60
61#include <fs/tmpfs/tmpfs.h>
62
63/*
64 * Default permission for root node
65 */
66#define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
67
68MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
69MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
70
71/* --------------------------------------------------------------------- */
72
73static int tmpfs_mount(struct mount *);
74static int tmpfs_unmount(struct mount *, int);
75static int tmpfs_root(struct mount *, int flags, struct vnode **);
76static int tmpfs_fhtovp(struct mount *, struct fid *, int,
77 struct vnode **);
78static int tmpfs_statfs(struct mount *, struct statfs *);
79
80/* --------------------------------------------------------------------- */
81
82static const char *tmpfs_opts[] = {
83 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
84 "union", NULL
85};
86
87static const char *tmpfs_updateopts[] = {
88 "from", "export", NULL
89};
90
91/* --------------------------------------------------------------------- */
92
93static int
94tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
95{
96 struct tmpfs_node *node = (struct tmpfs_node *)mem;
97
98 node->tn_gen++;
99 node->tn_size = 0;
100 node->tn_status = 0;
101 node->tn_flags = 0;
102 node->tn_links = 0;
103 node->tn_vnode = NULL;
104 node->tn_vpstate = 0;
105
106 return (0);
107}
108
109static void
110tmpfs_node_dtor(void *mem, int size, void *arg)
111{
112 struct tmpfs_node *node = (struct tmpfs_node *)mem;
113 node->tn_type = VNON;
114}
115
116static int
117tmpfs_node_init(void *mem, int size, int flags)
118{
119 struct tmpfs_node *node = (struct tmpfs_node *)mem;
120 node->tn_id = 0;
121
122 mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF);
123 node->tn_gen = arc4random();
124
125 return (0);
126}
127
128static void
129tmpfs_node_fini(void *mem, int size)
130{
131 struct tmpfs_node *node = (struct tmpfs_node *)mem;
132
133 mtx_destroy(&node->tn_interlock);
134}
135
136static int
137tmpfs_mount(struct mount *mp)
138{
139 const size_t nodes_per_page = howmany(PAGE_SIZE,
140 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
141 struct tmpfs_mount *tmp;
142 struct tmpfs_node *root;
143 struct thread *td = curthread;
141 int error;
142 /* Size counters. */
143 u_quad_t pages;
144 off_t nodes_max, size_max, maxfilesize;
145
146 /* Root node attributes. */
147 uid_t root_uid;
148 gid_t root_gid;
149 mode_t root_mode;
150
151 struct vattr va;
152
144 int error;
145 /* Size counters. */
146 u_quad_t pages;
147 off_t nodes_max, size_max, maxfilesize;
148
149 /* Root node attributes. */
150 uid_t root_uid;
151 gid_t root_gid;
152 mode_t root_mode;
153
154 struct vattr va;
155
156 if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_TMPFS))
157 return (EPERM);
158
153 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
154 return (EINVAL);
155
156 if (mp->mnt_flag & MNT_UPDATE) {
157 /* Only support update mounts for certain options. */
158 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
159 return (EOPNOTSUPP);
160 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) !=
161 ((struct tmpfs_mount *)mp->mnt_data)->tm_ronly)
162 return (EOPNOTSUPP);
163 return (0);
164 }
165
166 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
167 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
168 VOP_UNLOCK(mp->mnt_vnodecovered, 0);
169 if (error)
170 return (error);
171
172 if (mp->mnt_cred->cr_ruid != 0 ||
173 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
174 root_gid = va.va_gid;
175 if (mp->mnt_cred->cr_ruid != 0 ||
176 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
177 root_uid = va.va_uid;
178 if (mp->mnt_cred->cr_ruid != 0 ||
179 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
180 root_mode = va.va_mode;
181 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
182 nodes_max = 0;
183 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
184 size_max = 0;
185 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
186 maxfilesize = 0;
187
188 /* Do not allow mounts if we do not have enough memory to preserve
189 * the minimum reserved pages. */
190 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
191 return ENOSPC;
192
193 /* Get the maximum number of memory pages this file system is
194 * allowed to use, based on the maximum size the user passed in
195 * the mount structure. A value of zero is treated as if the
196 * maximum available space was requested. */
197 if (size_max < PAGE_SIZE || size_max > OFF_MAX - PAGE_SIZE ||
198 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
199 pages = SIZE_MAX;
200 else
201 pages = howmany(size_max, PAGE_SIZE);
202 MPASS(pages > 0);
203
204 if (nodes_max <= 3) {
205 if (pages < INT_MAX / nodes_per_page)
206 nodes_max = pages * nodes_per_page;
207 else
208 nodes_max = INT_MAX;
209 }
210 if (nodes_max > INT_MAX)
211 nodes_max = INT_MAX;
212 MPASS(nodes_max >= 3);
213
214 /* Allocate the tmpfs mount structure and fill it. */
215 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
216 M_TMPFSMNT, M_WAITOK | M_ZERO);
217
218 mtx_init(&tmp->allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
219 tmp->tm_nodes_max = nodes_max;
220 tmp->tm_nodes_inuse = 0;
221 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
222 LIST_INIT(&tmp->tm_nodes_used);
223
224 tmp->tm_pages_max = pages;
225 tmp->tm_pages_used = 0;
226 tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->allnode_lock);
227 tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
228 sizeof(struct tmpfs_dirent),
229 NULL, NULL, NULL, NULL,
230 UMA_ALIGN_PTR, 0);
231 tmp->tm_node_pool = uma_zcreate("TMPFS node",
232 sizeof(struct tmpfs_node),
233 tmpfs_node_ctor, tmpfs_node_dtor,
234 tmpfs_node_init, tmpfs_node_fini,
235 UMA_ALIGN_PTR, 0);
236 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
237
238 /* Allocate the root node. */
239 error = tmpfs_alloc_node(tmp, VDIR, root_uid,
240 root_gid, root_mode & ALLPERMS, NULL, NULL,
241 VNOVAL, &root);
242
243 if (error != 0 || root == NULL) {
244 uma_zdestroy(tmp->tm_node_pool);
245 uma_zdestroy(tmp->tm_dirent_pool);
246 delete_unrhdr(tmp->tm_ino_unr);
247 free(tmp, M_TMPFSMNT);
248 return error;
249 }
250 KASSERT(root->tn_id == 2,
251 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
252 tmp->tm_root = root;
253
254 MNT_ILOCK(mp);
255 mp->mnt_flag |= MNT_LOCAL;
256 MNT_IUNLOCK(mp);
257
258 mp->mnt_data = tmp;
259 mp->mnt_stat.f_namemax = MAXNAMLEN;
260 vfs_getnewfsid(mp);
261 vfs_mountedfrom(mp, "tmpfs");
262
263 return 0;
264}
265
266/* --------------------------------------------------------------------- */
267
268/* ARGSUSED2 */
269static int
270tmpfs_unmount(struct mount *mp, int mntflags)
271{
272 int error;
273 int flags = 0;
274 struct tmpfs_mount *tmp;
275 struct tmpfs_node *node;
276
277 /* Handle forced unmounts. */
278 if (mntflags & MNT_FORCE)
279 flags |= FORCECLOSE;
280
281 /* Finalize all pending I/O. */
282 error = vflush(mp, 0, flags, curthread);
283 if (error != 0)
284 return error;
285
286 tmp = VFS_TO_TMPFS(mp);
287
288 /* Free all associated data. The loop iterates over the linked list
289 * we have containing all used nodes. For each of them that is
290 * a directory, we free all its directory entries. Note that after
291 * freeing a node, it will automatically go to the available list,
292 * so we will later have to iterate over it to release its items. */
293 node = LIST_FIRST(&tmp->tm_nodes_used);
294 while (node != NULL) {
295 struct tmpfs_node *next;
296
297 if (node->tn_type == VDIR)
298 tmpfs_dir_destroy(tmp, node);
299
300 next = LIST_NEXT(node, tn_entries);
301 tmpfs_free_node(tmp, node);
302 node = next;
303 }
304
305 uma_zdestroy(tmp->tm_dirent_pool);
306 uma_zdestroy(tmp->tm_node_pool);
307 delete_unrhdr(tmp->tm_ino_unr);
308
309 mtx_destroy(&tmp->allnode_lock);
310 MPASS(tmp->tm_pages_used == 0);
311 MPASS(tmp->tm_nodes_inuse == 0);
312
313 /* Throw away the tmpfs_mount structure. */
314 free(mp->mnt_data, M_TMPFSMNT);
315 mp->mnt_data = NULL;
316
317 MNT_ILOCK(mp);
318 mp->mnt_flag &= ~MNT_LOCAL;
319 MNT_IUNLOCK(mp);
320 return 0;
321}
322
323/* --------------------------------------------------------------------- */
324
325static int
326tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
327{
328 int error;
329 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
330
331 if (!error)
332 (*vpp)->v_vflag |= VV_ROOT;
333
334 return error;
335}
336
337/* --------------------------------------------------------------------- */
338
339static int
340tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
341 struct vnode **vpp)
342{
343 boolean_t found;
344 struct tmpfs_fid *tfhp;
345 struct tmpfs_mount *tmp;
346 struct tmpfs_node *node;
347
348 tmp = VFS_TO_TMPFS(mp);
349
350 tfhp = (struct tmpfs_fid *)fhp;
351 if (tfhp->tf_len != sizeof(struct tmpfs_fid))
352 return EINVAL;
353
354 if (tfhp->tf_id >= tmp->tm_nodes_max)
355 return EINVAL;
356
357 found = FALSE;
358
359 TMPFS_LOCK(tmp);
360 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
361 if (node->tn_id == tfhp->tf_id &&
362 node->tn_gen == tfhp->tf_gen) {
363 found = TRUE;
364 break;
365 }
366 }
367 TMPFS_UNLOCK(tmp);
368
369 if (found)
370 return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp));
371
372 return (EINVAL);
373}
374
375/* --------------------------------------------------------------------- */
376
377/* ARGSUSED2 */
378static int
379tmpfs_statfs(struct mount *mp, struct statfs *sbp)
380{
381 struct tmpfs_mount *tmp;
382 size_t used;
383
384 tmp = VFS_TO_TMPFS(mp);
385
386 sbp->f_iosize = PAGE_SIZE;
387 sbp->f_bsize = PAGE_SIZE;
388
389 used = tmpfs_pages_used(tmp);
390 if (tmp->tm_pages_max != SIZE_MAX)
391 sbp->f_blocks = tmp->tm_pages_max;
392 else
393 sbp->f_blocks = used + tmpfs_mem_avail();
394 if (sbp->f_blocks <= used)
395 sbp->f_bavail = 0;
396 else
397 sbp->f_bavail = sbp->f_blocks - used;
398 sbp->f_bfree = sbp->f_bavail;
399 used = tmp->tm_nodes_inuse;
400 sbp->f_files = tmp->tm_nodes_max;
401 if (sbp->f_files <= used)
402 sbp->f_ffree = 0;
403 else
404 sbp->f_ffree = sbp->f_files - used;
405 /* sbp->f_owner = tmp->tn_uid; */
406
407 return 0;
408}
409
410/* --------------------------------------------------------------------- */
411
412/*
413 * tmpfs vfs operations.
414 */
415
416struct vfsops tmpfs_vfsops = {
417 .vfs_mount = tmpfs_mount,
418 .vfs_unmount = tmpfs_unmount,
419 .vfs_root = tmpfs_root,
420 .vfs_statfs = tmpfs_statfs,
421 .vfs_fhtovp = tmpfs_fhtovp,
422};
159 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
160 return (EINVAL);
161
162 if (mp->mnt_flag & MNT_UPDATE) {
163 /* Only support update mounts for certain options. */
164 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
165 return (EOPNOTSUPP);
166 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) !=
167 ((struct tmpfs_mount *)mp->mnt_data)->tm_ronly)
168 return (EOPNOTSUPP);
169 return (0);
170 }
171
172 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
173 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
174 VOP_UNLOCK(mp->mnt_vnodecovered, 0);
175 if (error)
176 return (error);
177
178 if (mp->mnt_cred->cr_ruid != 0 ||
179 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
180 root_gid = va.va_gid;
181 if (mp->mnt_cred->cr_ruid != 0 ||
182 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
183 root_uid = va.va_uid;
184 if (mp->mnt_cred->cr_ruid != 0 ||
185 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
186 root_mode = va.va_mode;
187 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
188 nodes_max = 0;
189 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
190 size_max = 0;
191 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
192 maxfilesize = 0;
193
194 /* Do not allow mounts if we do not have enough memory to preserve
195 * the minimum reserved pages. */
196 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
197 return ENOSPC;
198
199 /* Get the maximum number of memory pages this file system is
200 * allowed to use, based on the maximum size the user passed in
201 * the mount structure. A value of zero is treated as if the
202 * maximum available space was requested. */
203 if (size_max < PAGE_SIZE || size_max > OFF_MAX - PAGE_SIZE ||
204 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
205 pages = SIZE_MAX;
206 else
207 pages = howmany(size_max, PAGE_SIZE);
208 MPASS(pages > 0);
209
210 if (nodes_max <= 3) {
211 if (pages < INT_MAX / nodes_per_page)
212 nodes_max = pages * nodes_per_page;
213 else
214 nodes_max = INT_MAX;
215 }
216 if (nodes_max > INT_MAX)
217 nodes_max = INT_MAX;
218 MPASS(nodes_max >= 3);
219
220 /* Allocate the tmpfs mount structure and fill it. */
221 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
222 M_TMPFSMNT, M_WAITOK | M_ZERO);
223
224 mtx_init(&tmp->allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
225 tmp->tm_nodes_max = nodes_max;
226 tmp->tm_nodes_inuse = 0;
227 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
228 LIST_INIT(&tmp->tm_nodes_used);
229
230 tmp->tm_pages_max = pages;
231 tmp->tm_pages_used = 0;
232 tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->allnode_lock);
233 tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
234 sizeof(struct tmpfs_dirent),
235 NULL, NULL, NULL, NULL,
236 UMA_ALIGN_PTR, 0);
237 tmp->tm_node_pool = uma_zcreate("TMPFS node",
238 sizeof(struct tmpfs_node),
239 tmpfs_node_ctor, tmpfs_node_dtor,
240 tmpfs_node_init, tmpfs_node_fini,
241 UMA_ALIGN_PTR, 0);
242 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
243
244 /* Allocate the root node. */
245 error = tmpfs_alloc_node(tmp, VDIR, root_uid,
246 root_gid, root_mode & ALLPERMS, NULL, NULL,
247 VNOVAL, &root);
248
249 if (error != 0 || root == NULL) {
250 uma_zdestroy(tmp->tm_node_pool);
251 uma_zdestroy(tmp->tm_dirent_pool);
252 delete_unrhdr(tmp->tm_ino_unr);
253 free(tmp, M_TMPFSMNT);
254 return error;
255 }
256 KASSERT(root->tn_id == 2,
257 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
258 tmp->tm_root = root;
259
260 MNT_ILOCK(mp);
261 mp->mnt_flag |= MNT_LOCAL;
262 MNT_IUNLOCK(mp);
263
264 mp->mnt_data = tmp;
265 mp->mnt_stat.f_namemax = MAXNAMLEN;
266 vfs_getnewfsid(mp);
267 vfs_mountedfrom(mp, "tmpfs");
268
269 return 0;
270}
271
272/* --------------------------------------------------------------------- */
273
274/* ARGSUSED2 */
275static int
276tmpfs_unmount(struct mount *mp, int mntflags)
277{
278 int error;
279 int flags = 0;
280 struct tmpfs_mount *tmp;
281 struct tmpfs_node *node;
282
283 /* Handle forced unmounts. */
284 if (mntflags & MNT_FORCE)
285 flags |= FORCECLOSE;
286
287 /* Finalize all pending I/O. */
288 error = vflush(mp, 0, flags, curthread);
289 if (error != 0)
290 return error;
291
292 tmp = VFS_TO_TMPFS(mp);
293
294 /* Free all associated data. The loop iterates over the linked list
295 * we have containing all used nodes. For each of them that is
296 * a directory, we free all its directory entries. Note that after
297 * freeing a node, it will automatically go to the available list,
298 * so we will later have to iterate over it to release its items. */
299 node = LIST_FIRST(&tmp->tm_nodes_used);
300 while (node != NULL) {
301 struct tmpfs_node *next;
302
303 if (node->tn_type == VDIR)
304 tmpfs_dir_destroy(tmp, node);
305
306 next = LIST_NEXT(node, tn_entries);
307 tmpfs_free_node(tmp, node);
308 node = next;
309 }
310
311 uma_zdestroy(tmp->tm_dirent_pool);
312 uma_zdestroy(tmp->tm_node_pool);
313 delete_unrhdr(tmp->tm_ino_unr);
314
315 mtx_destroy(&tmp->allnode_lock);
316 MPASS(tmp->tm_pages_used == 0);
317 MPASS(tmp->tm_nodes_inuse == 0);
318
319 /* Throw away the tmpfs_mount structure. */
320 free(mp->mnt_data, M_TMPFSMNT);
321 mp->mnt_data = NULL;
322
323 MNT_ILOCK(mp);
324 mp->mnt_flag &= ~MNT_LOCAL;
325 MNT_IUNLOCK(mp);
326 return 0;
327}
328
329/* --------------------------------------------------------------------- */
330
331static int
332tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
333{
334 int error;
335 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
336
337 if (!error)
338 (*vpp)->v_vflag |= VV_ROOT;
339
340 return error;
341}
342
343/* --------------------------------------------------------------------- */
344
345static int
346tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
347 struct vnode **vpp)
348{
349 boolean_t found;
350 struct tmpfs_fid *tfhp;
351 struct tmpfs_mount *tmp;
352 struct tmpfs_node *node;
353
354 tmp = VFS_TO_TMPFS(mp);
355
356 tfhp = (struct tmpfs_fid *)fhp;
357 if (tfhp->tf_len != sizeof(struct tmpfs_fid))
358 return EINVAL;
359
360 if (tfhp->tf_id >= tmp->tm_nodes_max)
361 return EINVAL;
362
363 found = FALSE;
364
365 TMPFS_LOCK(tmp);
366 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
367 if (node->tn_id == tfhp->tf_id &&
368 node->tn_gen == tfhp->tf_gen) {
369 found = TRUE;
370 break;
371 }
372 }
373 TMPFS_UNLOCK(tmp);
374
375 if (found)
376 return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp));
377
378 return (EINVAL);
379}
380
381/* --------------------------------------------------------------------- */
382
383/* ARGSUSED2 */
384static int
385tmpfs_statfs(struct mount *mp, struct statfs *sbp)
386{
387 struct tmpfs_mount *tmp;
388 size_t used;
389
390 tmp = VFS_TO_TMPFS(mp);
391
392 sbp->f_iosize = PAGE_SIZE;
393 sbp->f_bsize = PAGE_SIZE;
394
395 used = tmpfs_pages_used(tmp);
396 if (tmp->tm_pages_max != SIZE_MAX)
397 sbp->f_blocks = tmp->tm_pages_max;
398 else
399 sbp->f_blocks = used + tmpfs_mem_avail();
400 if (sbp->f_blocks <= used)
401 sbp->f_bavail = 0;
402 else
403 sbp->f_bavail = sbp->f_blocks - used;
404 sbp->f_bfree = sbp->f_bavail;
405 used = tmp->tm_nodes_inuse;
406 sbp->f_files = tmp->tm_nodes_max;
407 if (sbp->f_files <= used)
408 sbp->f_ffree = 0;
409 else
410 sbp->f_ffree = sbp->f_files - used;
411 /* sbp->f_owner = tmp->tn_uid; */
412
413 return 0;
414}
415
416/* --------------------------------------------------------------------- */
417
418/*
419 * tmpfs vfs operations.
420 */
421
422struct vfsops tmpfs_vfsops = {
423 .vfs_mount = tmpfs_mount,
424 .vfs_unmount = tmpfs_unmount,
425 .vfs_root = tmpfs_root,
426 .vfs_statfs = tmpfs_statfs,
427 .vfs_fhtovp = tmpfs_fhtovp,
428};
423VFS_SET(tmpfs_vfsops, tmpfs, 0);
429VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);