Deleted Added
full compact
tmpfs_vfsops.c (341074) tmpfs_vfsops.c (346286)
1/* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */
2
3/*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code

--- 27 unchanged lines hidden (view full) ---

36 * tmpfs is a file system that uses FreeBSD's virtual memory
37 * sub-system to store file data and metadata in an efficient way.
38 * This means that it does not follow the structure of an on-disk file
39 * system because it simply does not need to. Instead, it uses
40 * memory-specific data structures and algorithms to automatically
41 * allocate and release resources.
42 */
43#include <sys/cdefs.h>
1/* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */
2
3/*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code

--- 27 unchanged lines hidden (view full) ---

36 * tmpfs is a file system that uses FreeBSD's virtual memory
37 * sub-system to store file data and metadata in an efficient way.
38 * This means that it does not follow the structure of an on-disk file
39 * system because it simply does not need to. Instead, it uses
40 * memory-specific data structures and algorithms to automatically
41 * allocate and release resources.
42 */
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: stable/11/sys/fs/tmpfs/tmpfs_vfsops.c 341074 2018-11-27 16:51:18Z markj $");
44__FBSDID("$FreeBSD: stable/11/sys/fs/tmpfs/tmpfs_vfsops.c 346286 2019-04-16 17:43:14Z kib $");
45
46#include <sys/param.h>
47#include <sys/limits.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/proc.h>
51#include <sys/jail.h>
52#include <sys/kernel.h>
53#include <sys/rwlock.h>
54#include <sys/stat.h>
45
46#include <sys/param.h>
47#include <sys/limits.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/proc.h>
51#include <sys/jail.h>
52#include <sys/kernel.h>
53#include <sys/rwlock.h>
54#include <sys/stat.h>
55#include <sys/sx.h>
55#include <sys/sysctl.h>
56
57#include <vm/vm.h>
56#include <sys/sysctl.h>
57
58#include <vm/vm.h>
59#include <vm/vm_param.h>
60#include <vm/pmap.h>
61#include <vm/vm_extern.h>
62#include <vm/vm_map.h>
58#include <vm/vm_object.h>
59#include <vm/vm_param.h>
60
61#include <fs/tmpfs/tmpfs.h>
62
63/*
64 * Default permission for root node
65 */

--- 11 unchanged lines hidden (view full) ---

77static void tmpfs_susp_clean(struct mount *);
78
79static const char *tmpfs_opts[] = {
80 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
81 "union", "nonc", NULL
82};
83
84static const char *tmpfs_updateopts[] = {
63#include <vm/vm_object.h>
64#include <vm/vm_param.h>
65
66#include <fs/tmpfs/tmpfs.h>
67
68/*
69 * Default permission for root node
70 */

--- 11 unchanged lines hidden (view full) ---

82static void tmpfs_susp_clean(struct mount *);
83
84static const char *tmpfs_opts[] = {
85 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
86 "union", "nonc", NULL
87};
88
89static const char *tmpfs_updateopts[] = {
85 "from", "export", NULL
90 "from", "export", "size", NULL
86};
87
88static int
89tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
90{
91 struct tmpfs_node *node = (struct tmpfs_node *)mem;
92
93 node->tn_gen++;

--- 29 unchanged lines hidden (view full) ---

123static void
124tmpfs_node_fini(void *mem, int size)
125{
126 struct tmpfs_node *node = (struct tmpfs_node *)mem;
127
128 mtx_destroy(&node->tn_interlock);
129}
130
91};
92
93static int
94tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
95{
96 struct tmpfs_node *node = (struct tmpfs_node *)mem;
97
98 node->tn_gen++;

--- 29 unchanged lines hidden (view full) ---

128static void
129tmpfs_node_fini(void *mem, int size)
130{
131 struct tmpfs_node *node = (struct tmpfs_node *)mem;
132
133 mtx_destroy(&node->tn_interlock);
134}
135
136/*
137 * Handle updates of time from writes to mmaped regions. Use
138 * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since
139 * unmap of the tmpfs-backed vnode does not call vinactive(), due to
140 * vm object type is OBJT_SWAP.
141 * If lazy, only handle delayed update of mtime due to the writes to
142 * mapped files.
143 */
144static void
145tmpfs_update_mtime(struct mount *mp, bool lazy)
146{
147 struct vnode *vp, *mvp;
148 struct vm_object *obj;
149
150 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
151 if (vp->v_type != VREG) {
152 VI_UNLOCK(vp);
153 continue;
154 }
155 obj = vp->v_object;
156 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
157 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
158
159 /*
160 * In lazy case, do unlocked read, avoid taking vnode
161 * lock if not needed. Lost update will be handled on
162 * the next call.
163 * For non-lazy case, we must flush all pending
164 * metadata changes now.
165 */
166 if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) {
167 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK,
168 curthread) != 0)
169 continue;
170 tmpfs_check_mtime(vp);
171 if (!lazy)
172 tmpfs_update(vp);
173 vput(vp);
174 } else {
175 VI_UNLOCK(vp);
176 continue;
177 }
178 }
179}
180
181struct tmpfs_check_rw_maps_arg {
182 bool found;
183};
184
185static bool
186tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
187 vm_map_entry_t entry __unused, void *arg)
188{
189 struct tmpfs_check_rw_maps_arg *a;
190
191 a = arg;
192 a->found = true;
193 return (true);
194}
195
196/*
197 * Revoke write permissions from all mappings of regular files
198 * belonging to the specified tmpfs mount.
199 */
200static bool
201tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
202 vm_map_entry_t entry, void *arg __unused)
203{
204
205 /*
206 * XXXKIB: might be invalidate the mapping
207 * instead ? The process is not going to be
208 * happy in any case.
209 */
210 entry->max_protection &= ~VM_PROT_WRITE;
211 if ((entry->protection & VM_PROT_WRITE) != 0) {
212 entry->protection &= ~VM_PROT_WRITE;
213 pmap_protect(map->pmap, entry->start, entry->end,
214 entry->protection);
215 }
216 return (false);
217}
218
219static void
220tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
221 vm_map_entry_t, void *), void *cb_arg)
222{
223 struct proc *p;
224 struct vmspace *vm;
225 vm_map_t map;
226 vm_map_entry_t entry;
227 vm_object_t object;
228 struct vnode *vp;
229 int gen;
230 bool terminate;
231
232 terminate = false;
233 sx_slock(&allproc_lock);
234again:
235 gen = allproc_gen;
236 FOREACH_PROC_IN_SYSTEM(p) {
237 PROC_LOCK(p);
238 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
239 P_SYSTEM | P_WEXIT)) != 0) {
240 PROC_UNLOCK(p);
241 continue;
242 }
243 vm = vmspace_acquire_ref(p);
244 _PHOLD_LITE(p);
245 PROC_UNLOCK(p);
246 if (vm == NULL) {
247 PRELE(p);
248 continue;
249 }
250 sx_sunlock(&allproc_lock);
251 map = &vm->vm_map;
252
253 vm_map_lock(map);
254 if (map->busy)
255 vm_map_wait_busy(map);
256 for (entry = map->header.next; entry != &map->header;
257 entry = entry->next) {
258 if ((entry->eflags & (MAP_ENTRY_GUARD |
259 MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
260 (entry->max_protection & VM_PROT_WRITE) == 0)
261 continue;
262 object = entry->object.vm_object;
263 if (object == NULL || object->type != OBJT_SWAP ||
264 (object->flags & OBJ_TMPFS_NODE) == 0)
265 continue;
266 /*
267 * No need to dig into shadow chain, mapping
268 * of the object not at top is readonly.
269 */
270
271 VM_OBJECT_RLOCK(object);
272 if (object->type == OBJT_DEAD) {
273 VM_OBJECT_RUNLOCK(object);
274 continue;
275 }
276 MPASS(object->ref_count > 1);
277 if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) !=
278 (OBJ_TMPFS_NODE | OBJ_TMPFS)) {
279 VM_OBJECT_RUNLOCK(object);
280 continue;
281 }
282 vp = object->un_pager.swp.swp_tmpfs;
283 if (vp->v_mount != mp) {
284 VM_OBJECT_RUNLOCK(object);
285 continue;
286 }
287
288 terminate = cb(mp, map, entry, cb_arg);
289 VM_OBJECT_RUNLOCK(object);
290 if (terminate)
291 break;
292 }
293 vm_map_unlock(map);
294
295 vmspace_free(vm);
296 sx_slock(&allproc_lock);
297 PRELE(p);
298 if (terminate)
299 break;
300 }
301 if (!terminate && gen != allproc_gen)
302 goto again;
303 sx_sunlock(&allproc_lock);
304}
305
306static bool
307tmpfs_check_rw_maps(struct mount *mp)
308{
309 struct tmpfs_check_rw_maps_arg ca;
310
311 ca.found = false;
312 tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
313 return (ca.found);
314}
315
131static int
316static int
317tmpfs_rw_to_ro(struct mount *mp)
318{
319 int error, flags;
320 bool forced;
321
322 forced = (mp->mnt_flag & MNT_FORCE) != 0;
323 flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
324
325 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
326 return (error);
327 error = vfs_write_suspend_umnt(mp);
328 if (error != 0)
329 return (error);
330 if (!forced && tmpfs_check_rw_maps(mp)) {
331 error = EBUSY;
332 goto out;
333 }
334 VFS_TO_TMPFS(mp)->tm_ronly = 1;
335 MNT_ILOCK(mp);
336 mp->mnt_flag |= MNT_RDONLY;
337 MNT_IUNLOCK(mp);
338 for (;;) {
339 tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
340 tmpfs_update_mtime(mp, false);
341 error = vflush(mp, 0, flags, curthread);
342 if (error != 0) {
343 VFS_TO_TMPFS(mp)->tm_ronly = 0;
344 MNT_ILOCK(mp);
345 mp->mnt_flag &= ~MNT_RDONLY;
346 MNT_IUNLOCK(mp);
347 goto out;
348 }
349 if (!tmpfs_check_rw_maps(mp))
350 break;
351 }
352out:
353 vfs_write_resume(mp, 0);
354 return (error);
355}
356
357static int
132tmpfs_mount(struct mount *mp)
133{
134 const size_t nodes_per_page = howmany(PAGE_SIZE,
135 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
136 struct tmpfs_mount *tmp;
137 struct tmpfs_node *root;
138 struct thread *td = curthread;
139 int error;

--- 14 unchanged lines hidden (view full) ---

154
155 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
156 return (EINVAL);
157
158 if (mp->mnt_flag & MNT_UPDATE) {
159 /* Only support update mounts for certain options. */
160 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
161 return (EOPNOTSUPP);
358tmpfs_mount(struct mount *mp)
359{
360 const size_t nodes_per_page = howmany(PAGE_SIZE,
361 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
362 struct tmpfs_mount *tmp;
363 struct tmpfs_node *root;
364 struct thread *td = curthread;
365 int error;

--- 14 unchanged lines hidden (view full) ---

380
381 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
382 return (EINVAL);
383
384 if (mp->mnt_flag & MNT_UPDATE) {
385 /* Only support update mounts for certain options. */
386 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
387 return (EOPNOTSUPP);
162 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) !=
163 ((struct tmpfs_mount *)mp->mnt_data)->tm_ronly)
164 return (EOPNOTSUPP);
388 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
389 /*
390 * On-the-fly resizing is not supported (yet). We still
391 * need to have "size" listed as "supported", otherwise
392 * trying to update fs that is listed in fstab with size
393 * parameter, say trying to change rw to ro or vice
394 * versa, would cause vfs_filteropt() to bail.
395 */
396 if (size_max != VFS_TO_TMPFS(mp)->tm_size_max)
397 return (EOPNOTSUPP);
398 }
399 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
400 !(VFS_TO_TMPFS(mp)->tm_ronly)) {
401 /* RW -> RO */
402 return (tmpfs_rw_to_ro(mp));
403 } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
404 VFS_TO_TMPFS(mp)->tm_ronly) {
405 /* RO -> RW */
406 VFS_TO_TMPFS(mp)->tm_ronly = 0;
407 MNT_ILOCK(mp);
408 mp->mnt_flag &= ~MNT_RDONLY;
409 MNT_IUNLOCK(mp);
410 }
165 return (0);
166 }
167
168 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
169 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
170 VOP_UNLOCK(mp->mnt_vnodecovered, 0);
171 if (error)
172 return (error);

--- 49 unchanged lines hidden (view full) ---

222
223 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
224 tmp->tm_nodes_max = nodes_max;
225 tmp->tm_nodes_inuse = 0;
226 tmp->tm_refcount = 1;
227 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
228 LIST_INIT(&tmp->tm_nodes_used);
229
411 return (0);
412 }
413
414 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
415 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
416 VOP_UNLOCK(mp->mnt_vnodecovered, 0);
417 if (error)
418 return (error);

--- 49 unchanged lines hidden (view full) ---

468
469 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
470 tmp->tm_nodes_max = nodes_max;
471 tmp->tm_nodes_inuse = 0;
472 tmp->tm_refcount = 1;
473 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
474 LIST_INIT(&tmp->tm_nodes_used);
475
476 tmp->tm_size_max = size_max;
230 tmp->tm_pages_max = pages;
231 tmp->tm_pages_used = 0;
232 tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->tm_allnode_lock);
233 tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
234 sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
235 UMA_ALIGN_PTR, 0);
236 tmp->tm_node_pool = uma_zcreate("TMPFS node",
237 sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,

--- 190 unchanged lines hidden (view full) ---

428 /* sbp->f_owner = tmp->tn_uid; */
429
430 return 0;
431}
432
433static int
434tmpfs_sync(struct mount *mp, int waitfor)
435{
477 tmp->tm_pages_max = pages;
478 tmp->tm_pages_used = 0;
479 tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->tm_allnode_lock);
480 tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
481 sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
482 UMA_ALIGN_PTR, 0);
483 tmp->tm_node_pool = uma_zcreate("TMPFS node",
484 sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,

--- 190 unchanged lines hidden (view full) ---

675 /* sbp->f_owner = tmp->tn_uid; */
676
677 return 0;
678}
679
680static int
681tmpfs_sync(struct mount *mp, int waitfor)
682{
436 struct vnode *vp, *mvp;
437 struct vm_object *obj;
438
439 if (waitfor == MNT_SUSPEND) {
440 MNT_ILOCK(mp);
441 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
442 MNT_IUNLOCK(mp);
443 } else if (waitfor == MNT_LAZY) {
683
684 if (waitfor == MNT_SUSPEND) {
685 MNT_ILOCK(mp);
686 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
687 MNT_IUNLOCK(mp);
688 } else if (waitfor == MNT_LAZY) {
444 /*
445 * Handle lazy updates of mtime from writes to mmaped
446 * regions. Use MNT_VNODE_FOREACH_ALL instead of
447 * MNT_VNODE_FOREACH_ACTIVE, since unmap of the
448 * tmpfs-backed vnode does not call vinactive(), due
449 * to vm object type is OBJT_SWAP.
450 */
451 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
452 if (vp->v_type != VREG) {
453 VI_UNLOCK(vp);
454 continue;
455 }
456 obj = vp->v_object;
457 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
458 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
459
460 /*
461 * Unlocked read, avoid taking vnode lock if
462 * not needed. Lost update will be handled on
463 * the next call.
464 */
465 if ((obj->flags & OBJ_TMPFS_DIRTY) == 0) {
466 VI_UNLOCK(vp);
467 continue;
468 }
469 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
470 curthread) != 0)
471 continue;
472 tmpfs_check_mtime(vp);
473 vput(vp);
474 }
689 tmpfs_update_mtime(mp, true);
475 }
476 return (0);
477}
478
479/*
480 * The presence of a susp_clean method tells the VFS to track writes.
481 */
482static void

--- 18 unchanged lines hidden ---
690 }
691 return (0);
692}
693
694/*
695 * The presence of a susp_clean method tells the VFS to track writes.
696 */
697static void

--- 18 unchanged lines hidden ---