Deleted Added
full compact
null_vnops.c (139984) null_vnops.c (140165)
1/*-
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * John Heidemann of the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
33 *
34 * Ancestors:
35 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
36 * ...and...
37 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
38 *
1/*-
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * John Heidemann of the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
33 *
34 * Ancestors:
35 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
36 * ...and...
37 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
38 *
39 * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 139984 2005-01-10 13:09:33Z phk $
39 * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 140165 2005-01-13 07:53:01Z phk $
40 */
41
42/*
43 * Null Layer
44 *
45 * (See mount_nullfs(8) for more information.)
46 *
47 * The null layer duplicates a portion of the filesystem
48 * name space under a new name. In this respect, it is
49 * similar to the loopback filesystem. It differs from
50 * the loopback fs in two respects: it is implemented using
51 * a stackable layers techniques, and its "null-node"s stack above
52 * all lower-layer vnodes, not just over directory vnodes.
53 *
54 * The null layer has two purposes. First, it serves as a demonstration
55 * of layering by proving a layer which does nothing. (It actually
56 * does everything the loopback filesystem does, which is slightly
57 * more than nothing.) Second, the null layer can serve as a prototype
58 * layer. Since it provides all necessary layer framework,
59 * new filesystem layers can be created very easily be starting
60 * with a null layer.
61 *
62 * The remainder of this man page examines the null layer as a basis
63 * for constructing new layers.
64 *
65 *
66 * INSTANTIATING NEW NULL LAYERS
67 *
68 * New null layers are created with mount_nullfs(8).
69 * Mount_nullfs(8) takes two arguments, the pathname
70 * of the lower vfs (target-pn) and the pathname where the null
71 * layer will appear in the namespace (alias-pn). After
72 * the null layer is put into place, the contents
73 * of target-pn subtree will be aliased under alias-pn.
74 *
75 *
76 * OPERATION OF A NULL LAYER
77 *
78 * The null layer is the minimum filesystem layer,
79 * simply bypassing all possible operations to the lower layer
80 * for processing there. The majority of its activity centers
81 * on the bypass routine, through which nearly all vnode operations
82 * pass.
83 *
84 * The bypass routine accepts arbitrary vnode operations for
85 * handling by the lower layer. It begins by examing vnode
86 * operation arguments and replacing any null-nodes by their
87 * lower-layer equivlants. It then invokes the operation
88 * on the lower layer. Finally, it replaces the null-nodes
89 * in the arguments and, if a vnode is return by the operation,
90 * stacks a null-node on top of the returned vnode.
91 *
92 * Although bypass handles most operations, vop_getattr, vop_lock,
93 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
94 * bypassed. Vop_getattr must change the fsid being returned.
95 * Vop_lock and vop_unlock must handle any locking for the
96 * current vnode as well as pass the lock request down.
97 * Vop_inactive and vop_reclaim are not bypassed so that
98 * they can handle freeing null-layer specific data. Vop_print
99 * is not bypassed to avoid excessive debugging information.
100 * Also, certain vnode operations change the locking state within
101 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
102 * and symlink). Ideally these operations should not change the
103 * lock state, but should be changed to let the caller of the
104 * function unlock them. Otherwise all intermediate vnode layers
105 * (such as union, umapfs, etc) must catch these functions to do
106 * the necessary locking at their layer.
107 *
108 *
109 * INSTANTIATING VNODE STACKS
110 *
111 * Mounting associates the null layer with a lower layer,
112 * effect stacking two VFSes. Vnode stacks are instead
113 * created on demand as files are accessed.
114 *
115 * The initial mount creates a single vnode stack for the
116 * root of the new null layer. All other vnode stacks
117 * are created as a result of vnode operations on
118 * this or other null vnode stacks.
119 *
120 * New vnode stacks come into existance as a result of
121 * an operation which returns a vnode.
122 * The bypass routine stacks a null-node above the new
123 * vnode before returning it to the caller.
124 *
125 * For example, imagine mounting a null layer with
126 * "mount_nullfs /usr/include /dev/layer/null".
127 * Changing directory to /dev/layer/null will assign
128 * the root null-node (which was created when the null layer was mounted).
129 * Now consider opening "sys". A vop_lookup would be
130 * done on the root null-node. This operation would bypass through
131 * to the lower layer which would return a vnode representing
132 * the UFS "sys". Null_bypass then builds a null-node
133 * aliasing the UFS "sys" and returns this to the caller.
134 * Later operations on the null-node "sys" will repeat this
135 * process when constructing other vnode stacks.
136 *
137 *
138 * CREATING OTHER FILE SYSTEM LAYERS
139 *
140 * One of the easiest ways to construct new filesystem layers is to make
141 * a copy of the null layer, rename all files and variables, and
142 * then begin modifing the copy. Sed can be used to easily rename
143 * all variables.
144 *
145 * The umap layer is an example of a layer descended from the
146 * null layer.
147 *
148 *
149 * INVOKING OPERATIONS ON LOWER LAYERS
150 *
151 * There are two techniques to invoke operations on a lower layer
152 * when the operation cannot be completely bypassed. Each method
153 * is appropriate in different situations. In both cases,
154 * it is the responsibility of the aliasing layer to make
155 * the operation arguments "correct" for the lower layer
156 * by mapping a vnode arguments to the lower layer.
157 *
158 * The first approach is to call the aliasing layer's bypass routine.
159 * This method is most suitable when you wish to invoke the operation
160 * currently being handled on the lower layer. It has the advantage
161 * that the bypass routine already must do argument mapping.
162 * An example of this is null_getattrs in the null layer.
163 *
164 * A second approach is to directly invoke vnode operations on
165 * the lower layer with the VOP_OPERATIONNAME interface.
166 * The advantage of this method is that it is easy to invoke
167 * arbitrary operations on the lower layer. The disadvantage
168 * is that vnode arguments must be manualy mapped.
169 *
170 */
171
172#include <sys/param.h>
173#include <sys/systm.h>
174#include <sys/conf.h>
175#include <sys/kernel.h>
176#include <sys/lock.h>
177#include <sys/malloc.h>
178#include <sys/mount.h>
179#include <sys/mutex.h>
180#include <sys/namei.h>
181#include <sys/sysctl.h>
182#include <sys/vnode.h>
183
184#include <fs/nullfs/null.h>
185
186#include <vm/vm.h>
187#include <vm/vm_extern.h>
188#include <vm/vm_object.h>
189#include <vm/vnode_pager.h>
190
191static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
192SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
193 &null_bug_bypass, 0, "");
194
195static vop_access_t null_access;
196static vop_createvobject_t null_createvobject;
197static vop_destroyvobject_t null_destroyvobject;
198static vop_getattr_t null_getattr;
199static vop_getvobject_t null_getvobject;
200static vop_inactive_t null_inactive;
201static vop_islocked_t null_islocked;
202static vop_lock_t null_lock;
203static vop_lookup_t null_lookup;
204static vop_print_t null_print;
205static vop_reclaim_t null_reclaim;
206static vop_rename_t null_rename;
207static vop_setattr_t null_setattr;
208static vop_unlock_t null_unlock;
209
210/*
211 * This is the 10-Apr-92 bypass routine.
212 * This version has been optimized for speed, throwing away some
213 * safety checks. It should still always work, but it's not as
214 * robust to programmer errors.
215 *
216 * In general, we map all vnodes going down and unmap them on the way back.
217 * As an exception to this, vnodes can be marked "unmapped" by setting
218 * the Nth bit in operation's vdesc_flags.
219 *
220 * Also, some BSD vnode operations have the side effect of vrele'ing
221 * their arguments. With stacking, the reference counts are held
222 * by the upper node, not the lower one, so we must handle these
223 * side-effects here. This is not of concern in Sun-derived systems
224 * since there are no such side-effects.
225 *
226 * This makes the following assumptions:
227 * - only one returned vpp
228 * - no INOUT vpp's (Sun's vop_open has one of these)
229 * - the vnode operation vector of the first vnode should be used
230 * to determine what implementation of the op should be invoked
231 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
232 * problems on rmdir'ing mount points and renaming?)
233 */
234int
235null_bypass(ap)
236 struct vop_generic_args /* {
237 struct vnodeop_desc *a_desc;
238 <other random data follows, presumably>
239 } */ *ap;
240{
241 register struct vnode **this_vp_p;
242 int error;
243 struct vnode *old_vps[VDESC_MAX_VPS];
244 struct vnode **vps_p[VDESC_MAX_VPS];
245 struct vnode ***vppp;
246 struct vnodeop_desc *descp = ap->a_desc;
247 int reles, i;
248
249 if (null_bug_bypass)
250 printf ("null_bypass: %s\n", descp->vdesc_name);
251
252#ifdef DIAGNOSTIC
253 /*
254 * We require at least one vp.
255 */
256 if (descp->vdesc_vp_offsets == NULL ||
257 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
258 panic ("null_bypass: no vp's in map");
259#endif
260
261 /*
262 * Map the vnodes going in.
263 * Later, we'll invoke the operation based on
264 * the first mapped vnode's operation vector.
265 */
266 reles = descp->vdesc_flags;
267 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
268 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
269 break; /* bail out at end of list */
270 vps_p[i] = this_vp_p =
271 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
272 /*
273 * We're not guaranteed that any but the first vnode
274 * are of our type. Check for and don't map any
275 * that aren't. (We must always map first vp or vclean fails.)
276 */
277 if (i && (*this_vp_p == NULLVP ||
278 (*this_vp_p)->v_op != &null_vnodeops)) {
279 old_vps[i] = NULLVP;
280 } else {
281 old_vps[i] = *this_vp_p;
282 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
283 /*
284 * XXX - Several operations have the side effect
285 * of vrele'ing their vp's. We must account for
286 * that. (This should go away in the future.)
287 */
288 if (reles & VDESC_VP0_WILLRELE)
289 VREF(*this_vp_p);
290 }
291
292 }
293
294 /*
295 * Call the operation on the lower layer
296 * with the modified argument structure.
297 */
298 if (vps_p[0] && *vps_p[0])
40 */
41
42/*
43 * Null Layer
44 *
45 * (See mount_nullfs(8) for more information.)
46 *
47 * The null layer duplicates a portion of the filesystem
48 * name space under a new name. In this respect, it is
49 * similar to the loopback filesystem. It differs from
50 * the loopback fs in two respects: it is implemented using
51 * a stackable layers techniques, and its "null-node"s stack above
52 * all lower-layer vnodes, not just over directory vnodes.
53 *
54 * The null layer has two purposes. First, it serves as a demonstration
55 * of layering by proving a layer which does nothing. (It actually
56 * does everything the loopback filesystem does, which is slightly
57 * more than nothing.) Second, the null layer can serve as a prototype
58 * layer. Since it provides all necessary layer framework,
59 * new filesystem layers can be created very easily be starting
60 * with a null layer.
61 *
62 * The remainder of this man page examines the null layer as a basis
63 * for constructing new layers.
64 *
65 *
66 * INSTANTIATING NEW NULL LAYERS
67 *
68 * New null layers are created with mount_nullfs(8).
69 * Mount_nullfs(8) takes two arguments, the pathname
70 * of the lower vfs (target-pn) and the pathname where the null
71 * layer will appear in the namespace (alias-pn). After
72 * the null layer is put into place, the contents
73 * of target-pn subtree will be aliased under alias-pn.
74 *
75 *
76 * OPERATION OF A NULL LAYER
77 *
78 * The null layer is the minimum filesystem layer,
79 * simply bypassing all possible operations to the lower layer
80 * for processing there. The majority of its activity centers
81 * on the bypass routine, through which nearly all vnode operations
82 * pass.
83 *
84 * The bypass routine accepts arbitrary vnode operations for
85 * handling by the lower layer. It begins by examing vnode
86 * operation arguments and replacing any null-nodes by their
87 * lower-layer equivlants. It then invokes the operation
88 * on the lower layer. Finally, it replaces the null-nodes
89 * in the arguments and, if a vnode is return by the operation,
90 * stacks a null-node on top of the returned vnode.
91 *
92 * Although bypass handles most operations, vop_getattr, vop_lock,
93 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
94 * bypassed. Vop_getattr must change the fsid being returned.
95 * Vop_lock and vop_unlock must handle any locking for the
96 * current vnode as well as pass the lock request down.
97 * Vop_inactive and vop_reclaim are not bypassed so that
98 * they can handle freeing null-layer specific data. Vop_print
99 * is not bypassed to avoid excessive debugging information.
100 * Also, certain vnode operations change the locking state within
101 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
102 * and symlink). Ideally these operations should not change the
103 * lock state, but should be changed to let the caller of the
104 * function unlock them. Otherwise all intermediate vnode layers
105 * (such as union, umapfs, etc) must catch these functions to do
106 * the necessary locking at their layer.
107 *
108 *
109 * INSTANTIATING VNODE STACKS
110 *
111 * Mounting associates the null layer with a lower layer,
112 * effect stacking two VFSes. Vnode stacks are instead
113 * created on demand as files are accessed.
114 *
115 * The initial mount creates a single vnode stack for the
116 * root of the new null layer. All other vnode stacks
117 * are created as a result of vnode operations on
118 * this or other null vnode stacks.
119 *
120 * New vnode stacks come into existance as a result of
121 * an operation which returns a vnode.
122 * The bypass routine stacks a null-node above the new
123 * vnode before returning it to the caller.
124 *
125 * For example, imagine mounting a null layer with
126 * "mount_nullfs /usr/include /dev/layer/null".
127 * Changing directory to /dev/layer/null will assign
128 * the root null-node (which was created when the null layer was mounted).
129 * Now consider opening "sys". A vop_lookup would be
130 * done on the root null-node. This operation would bypass through
131 * to the lower layer which would return a vnode representing
132 * the UFS "sys". Null_bypass then builds a null-node
133 * aliasing the UFS "sys" and returns this to the caller.
134 * Later operations on the null-node "sys" will repeat this
135 * process when constructing other vnode stacks.
136 *
137 *
138 * CREATING OTHER FILE SYSTEM LAYERS
139 *
140 * One of the easiest ways to construct new filesystem layers is to make
141 * a copy of the null layer, rename all files and variables, and
142 * then begin modifing the copy. Sed can be used to easily rename
143 * all variables.
144 *
145 * The umap layer is an example of a layer descended from the
146 * null layer.
147 *
148 *
149 * INVOKING OPERATIONS ON LOWER LAYERS
150 *
151 * There are two techniques to invoke operations on a lower layer
152 * when the operation cannot be completely bypassed. Each method
153 * is appropriate in different situations. In both cases,
154 * it is the responsibility of the aliasing layer to make
155 * the operation arguments "correct" for the lower layer
156 * by mapping a vnode arguments to the lower layer.
157 *
158 * The first approach is to call the aliasing layer's bypass routine.
159 * This method is most suitable when you wish to invoke the operation
160 * currently being handled on the lower layer. It has the advantage
161 * that the bypass routine already must do argument mapping.
162 * An example of this is null_getattrs in the null layer.
163 *
164 * A second approach is to directly invoke vnode operations on
165 * the lower layer with the VOP_OPERATIONNAME interface.
166 * The advantage of this method is that it is easy to invoke
167 * arbitrary operations on the lower layer. The disadvantage
168 * is that vnode arguments must be manualy mapped.
169 *
170 */
171
172#include <sys/param.h>
173#include <sys/systm.h>
174#include <sys/conf.h>
175#include <sys/kernel.h>
176#include <sys/lock.h>
177#include <sys/malloc.h>
178#include <sys/mount.h>
179#include <sys/mutex.h>
180#include <sys/namei.h>
181#include <sys/sysctl.h>
182#include <sys/vnode.h>
183
184#include <fs/nullfs/null.h>
185
186#include <vm/vm.h>
187#include <vm/vm_extern.h>
188#include <vm/vm_object.h>
189#include <vm/vnode_pager.h>
190
191static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
192SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
193 &null_bug_bypass, 0, "");
194
195static vop_access_t null_access;
196static vop_createvobject_t null_createvobject;
197static vop_destroyvobject_t null_destroyvobject;
198static vop_getattr_t null_getattr;
199static vop_getvobject_t null_getvobject;
200static vop_inactive_t null_inactive;
201static vop_islocked_t null_islocked;
202static vop_lock_t null_lock;
203static vop_lookup_t null_lookup;
204static vop_print_t null_print;
205static vop_reclaim_t null_reclaim;
206static vop_rename_t null_rename;
207static vop_setattr_t null_setattr;
208static vop_unlock_t null_unlock;
209
210/*
211 * This is the 10-Apr-92 bypass routine.
212 * This version has been optimized for speed, throwing away some
213 * safety checks. It should still always work, but it's not as
214 * robust to programmer errors.
215 *
216 * In general, we map all vnodes going down and unmap them on the way back.
217 * As an exception to this, vnodes can be marked "unmapped" by setting
218 * the Nth bit in operation's vdesc_flags.
219 *
220 * Also, some BSD vnode operations have the side effect of vrele'ing
221 * their arguments. With stacking, the reference counts are held
222 * by the upper node, not the lower one, so we must handle these
223 * side-effects here. This is not of concern in Sun-derived systems
224 * since there are no such side-effects.
225 *
226 * This makes the following assumptions:
227 * - only one returned vpp
228 * - no INOUT vpp's (Sun's vop_open has one of these)
229 * - the vnode operation vector of the first vnode should be used
230 * to determine what implementation of the op should be invoked
231 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
232 * problems on rmdir'ing mount points and renaming?)
233 */
234int
235null_bypass(ap)
236 struct vop_generic_args /* {
237 struct vnodeop_desc *a_desc;
238 <other random data follows, presumably>
239 } */ *ap;
240{
241 register struct vnode **this_vp_p;
242 int error;
243 struct vnode *old_vps[VDESC_MAX_VPS];
244 struct vnode **vps_p[VDESC_MAX_VPS];
245 struct vnode ***vppp;
246 struct vnodeop_desc *descp = ap->a_desc;
247 int reles, i;
248
249 if (null_bug_bypass)
250 printf ("null_bypass: %s\n", descp->vdesc_name);
251
252#ifdef DIAGNOSTIC
253 /*
254 * We require at least one vp.
255 */
256 if (descp->vdesc_vp_offsets == NULL ||
257 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
258 panic ("null_bypass: no vp's in map");
259#endif
260
261 /*
262 * Map the vnodes going in.
263 * Later, we'll invoke the operation based on
264 * the first mapped vnode's operation vector.
265 */
266 reles = descp->vdesc_flags;
267 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
268 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
269 break; /* bail out at end of list */
270 vps_p[i] = this_vp_p =
271 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
272 /*
273 * We're not guaranteed that any but the first vnode
274 * are of our type. Check for and don't map any
275 * that aren't. (We must always map first vp or vclean fails.)
276 */
277 if (i && (*this_vp_p == NULLVP ||
278 (*this_vp_p)->v_op != &null_vnodeops)) {
279 old_vps[i] = NULLVP;
280 } else {
281 old_vps[i] = *this_vp_p;
282 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
283 /*
284 * XXX - Several operations have the side effect
285 * of vrele'ing their vp's. We must account for
286 * that. (This should go away in the future.)
287 */
288 if (reles & VDESC_VP0_WILLRELE)
289 VREF(*this_vp_p);
290 }
291
292 }
293
294 /*
295 * Call the operation on the lower layer
296 * with the modified argument structure.
297 */
298 if (vps_p[0] && *vps_p[0])
299 error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
299 error = VCALL(ap);
300 else {
301 printf("null_bypass: no map for %s\n", descp->vdesc_name);
302 error = EINVAL;
303 }
304
305 /*
306 * Maintain the illusion of call-by-value
307 * by restoring vnodes in the argument structure
308 * to their original value.
309 */
310 reles = descp->vdesc_flags;
311 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
312 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
313 break; /* bail out at end of list */
314 if (old_vps[i]) {
315 *(vps_p[i]) = old_vps[i];
316#if 0
317 if (reles & VDESC_VP0_WILLUNLOCK)
318 VOP_UNLOCK(*(vps_p[i]), LK_THISLAYER, curthread);
319#endif
320 if (reles & VDESC_VP0_WILLRELE)
321 vrele(*(vps_p[i]));
322 }
323 }
324
325 /*
326 * Map the possible out-going vpp
327 * (Assumes that the lower layer always returns
328 * a VREF'ed vpp unless it gets an error.)
329 */
330 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
331 !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
332 !error) {
333 /*
334 * XXX - even though some ops have vpp returned vp's,
335 * several ops actually vrele this before returning.
336 * We must avoid these ops.
337 * (This should go away when these ops are regularized.)
338 */
339 if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
340 goto out;
341 vppp = VOPARG_OFFSETTO(struct vnode***,
342 descp->vdesc_vpp_offset,ap);
343 if (*vppp)
344 error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
345 }
346
347 out:
348 return (error);
349}
350
351/*
352 * We have to carry on the locking protocol on the null layer vnodes
353 * as we progress through the tree. We also have to enforce read-only
354 * if this layer is mounted read-only.
355 */
356static int
357null_lookup(ap)
358 struct vop_lookup_args /* {
359 struct vnode * a_dvp;
360 struct vnode ** a_vpp;
361 struct componentname * a_cnp;
362 } */ *ap;
363{
364 struct componentname *cnp = ap->a_cnp;
365 struct vnode *dvp = ap->a_dvp;
366 struct thread *td = cnp->cn_thread;
367 int flags = cnp->cn_flags;
368 struct vnode *vp, *ldvp, *lvp;
369 int error;
370
371 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
372 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
373 return (EROFS);
374 /*
375 * Although it is possible to call null_bypass(), we'll do
376 * a direct call to reduce overhead
377 */
378 ldvp = NULLVPTOLOWERVP(dvp);
379 vp = lvp = NULL;
380 error = VOP_LOOKUP(ldvp, &lvp, cnp);
381 if (error == EJUSTRETURN && (flags & ISLASTCN) &&
382 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
383 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
384 error = EROFS;
385
386 /*
387 * Rely only on the PDIRUNLOCK flag which should be carefully
388 * tracked by underlying filesystem.
389 */
390 if ((cnp->cn_flags & PDIRUNLOCK) && dvp->v_vnlock != ldvp->v_vnlock)
391 VOP_UNLOCK(dvp, LK_THISLAYER, td);
392 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
393 if (ldvp == lvp) {
394 *ap->a_vpp = dvp;
395 VREF(dvp);
396 vrele(lvp);
397 } else {
398 error = null_nodeget(dvp->v_mount, lvp, &vp);
399 if (error) {
400 /* XXX Cleanup needed... */
401 panic("null_nodeget failed");
402 }
403 *ap->a_vpp = vp;
404 }
405 }
406 return (error);
407}
408
409/*
410 * Setattr call. Disallow write attempts if the layer is mounted read-only.
411 */
412static int
413null_setattr(ap)
414 struct vop_setattr_args /* {
415 struct vnodeop_desc *a_desc;
416 struct vnode *a_vp;
417 struct vattr *a_vap;
418 struct ucred *a_cred;
419 struct thread *a_td;
420 } */ *ap;
421{
422 struct vnode *vp = ap->a_vp;
423 struct vattr *vap = ap->a_vap;
424
425 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
426 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
427 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
428 (vp->v_mount->mnt_flag & MNT_RDONLY))
429 return (EROFS);
430 if (vap->va_size != VNOVAL) {
431 switch (vp->v_type) {
432 case VDIR:
433 return (EISDIR);
434 case VCHR:
435 case VBLK:
436 case VSOCK:
437 case VFIFO:
438 if (vap->va_flags != VNOVAL)
439 return (EOPNOTSUPP);
440 return (0);
441 case VREG:
442 case VLNK:
443 default:
444 /*
445 * Disallow write attempts if the filesystem is
446 * mounted read-only.
447 */
448 if (vp->v_mount->mnt_flag & MNT_RDONLY)
449 return (EROFS);
450 }
451 }
452
453 return (null_bypass((struct vop_generic_args *)ap));
454}
455
456/*
457 * We handle getattr only to change the fsid.
458 */
459static int
460null_getattr(ap)
461 struct vop_getattr_args /* {
462 struct vnode *a_vp;
463 struct vattr *a_vap;
464 struct ucred *a_cred;
465 struct thread *a_td;
466 } */ *ap;
467{
468 int error;
469
470 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
471 return (error);
472
473 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
474 return (0);
475}
476
477/*
478 * Handle to disallow write access if mounted read-only.
479 */
480static int
481null_access(ap)
482 struct vop_access_args /* {
483 struct vnode *a_vp;
484 int a_mode;
485 struct ucred *a_cred;
486 struct thread *a_td;
487 } */ *ap;
488{
489 struct vnode *vp = ap->a_vp;
490 mode_t mode = ap->a_mode;
491
492 /*
493 * Disallow write attempts on read-only layers;
494 * unless the file is a socket, fifo, or a block or
495 * character device resident on the filesystem.
496 */
497 if (mode & VWRITE) {
498 switch (vp->v_type) {
499 case VDIR:
500 case VLNK:
501 case VREG:
502 if (vp->v_mount->mnt_flag & MNT_RDONLY)
503 return (EROFS);
504 break;
505 default:
506 break;
507 }
508 }
509 return (null_bypass((struct vop_generic_args *)ap));
510}
511
512/*
513 * We handle this to eliminate null FS to lower FS
514 * file moving. Don't know why we don't allow this,
515 * possibly we should.
516 */
517static int
518null_rename(ap)
519 struct vop_rename_args /* {
520 struct vnode *a_fdvp;
521 struct vnode *a_fvp;
522 struct componentname *a_fcnp;
523 struct vnode *a_tdvp;
524 struct vnode *a_tvp;
525 struct componentname *a_tcnp;
526 } */ *ap;
527{
528 struct vnode *tdvp = ap->a_tdvp;
529 struct vnode *fvp = ap->a_fvp;
530 struct vnode *fdvp = ap->a_fdvp;
531 struct vnode *tvp = ap->a_tvp;
532
533 /* Check for cross-device rename. */
534 if ((fvp->v_mount != tdvp->v_mount) ||
535 (tvp && (fvp->v_mount != tvp->v_mount))) {
536 if (tdvp == tvp)
537 vrele(tdvp);
538 else
539 vput(tdvp);
540 if (tvp)
541 vput(tvp);
542 vrele(fdvp);
543 vrele(fvp);
544 return (EXDEV);
545 }
546
547 return (null_bypass((struct vop_generic_args *)ap));
548}
549
550/*
551 * We need to process our own vnode lock and then clear the
552 * interlock flag as it applies only to our vnode, not the
553 * vnodes below us on the stack.
554 */
555static int
556null_lock(ap)
557 struct vop_lock_args /* {
558 struct vnode *a_vp;
559 int a_flags;
560 struct thread *a_td;
561 } */ *ap;
562{
563 struct vnode *vp = ap->a_vp;
564 int flags = ap->a_flags;
565 struct thread *td = ap->a_td;
566 struct vnode *lvp;
567 int error;
568 struct null_node *nn;
569
570 if (flags & LK_THISLAYER) {
571 if (vp->v_vnlock != NULL) {
572 /* lock is shared across layers */
573 if (flags & LK_INTERLOCK)
574 mtx_unlock(&vp->v_interlock);
575 return 0;
576 }
577 error = lockmgr(&vp->v_lock, flags & ~LK_THISLAYER,
578 &vp->v_interlock, td);
579 return (error);
580 }
581
582 if (vp->v_vnlock != NULL) {
583 /*
584 * The lower level has exported a struct lock to us. Use
585 * it so that all vnodes in the stack lock and unlock
586 * simultaneously. Note: we don't DRAIN the lock as DRAIN
587 * decommissions the lock - just because our vnode is
588 * going away doesn't mean the struct lock below us is.
589 * LK_EXCLUSIVE is fine.
590 */
591 if ((flags & LK_INTERLOCK) == 0) {
592 VI_LOCK(vp);
593 flags |= LK_INTERLOCK;
594 }
595 nn = VTONULL(vp);
596 if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
597 NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n");
598 /*
599 * Emulate lock draining by waiting for all other
600 * pending locks to complete. Afterwards the
601 * lockmgr call might block, but no other threads
602 * will attempt to use this nullfs vnode due to the
603 * VI_XLOCK flag.
604 */
605 while (nn->null_pending_locks > 0) {
606 nn->null_drain_wakeup = 1;
607 msleep(&nn->null_pending_locks,
608 VI_MTX(vp),
609 PVFS,
610 "nuldr", 0);
611 }
612 error = lockmgr(vp->v_vnlock,
613 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
614 VI_MTX(vp), td);
615 return error;
616 }
617 nn->null_pending_locks++;
618 error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td);
619 VI_LOCK(vp);
620 /*
621 * If we're called from vrele then v_usecount can have been 0
622 * and another process might have initiated a recycle
623 * operation. When that happens, just back out.
624 */
625 if (error == 0 && (vp->v_iflag & VI_XLOCK) != 0 &&
626 td != vp->v_vxthread) {
627 lockmgr(vp->v_vnlock,
628 (flags & ~LK_TYPE_MASK) | LK_RELEASE,
629 VI_MTX(vp), td);
630 VI_LOCK(vp);
631 error = ENOENT;
632 }
633 nn->null_pending_locks--;
634 /*
635 * Wakeup the process draining the vnode after all
636 * pending lock attempts has been failed.
637 */
638 if (nn->null_pending_locks == 0 &&
639 nn->null_drain_wakeup != 0) {
640 nn->null_drain_wakeup = 0;
641 wakeup(&nn->null_pending_locks);
642 }
643 if (error == ENOENT && (vp->v_iflag & VI_XLOCK) != 0 &&
644 vp->v_vxthread != curthread) {
645 vp->v_iflag |= VI_XWANT;
646 msleep(vp, VI_MTX(vp), PINOD, "nulbo", 0);
647 }
648 VI_UNLOCK(vp);
649 return error;
650 } else {
651 /*
652 * To prevent race conditions involving doing a lookup
653 * on "..", we have to lock the lower node, then lock our
654 * node. Most of the time it won't matter that we lock our
655 * node (as any locking would need the lower one locked
656 * first). But we can LK_DRAIN the upper lock as a step
657 * towards decomissioning it.
658 */
659 lvp = NULLVPTOLOWERVP(vp);
660 if (lvp == NULL)
661 return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, td));
662 if (flags & LK_INTERLOCK) {
663 mtx_unlock(&vp->v_interlock);
664 flags &= ~LK_INTERLOCK;
665 }
666 if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
667 error = VOP_LOCK(lvp,
668 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, td);
669 } else
670 error = VOP_LOCK(lvp, flags, td);
671 if (error)
672 return (error);
673 error = lockmgr(&vp->v_lock, flags, &vp->v_interlock, td);
674 if (error)
675 VOP_UNLOCK(lvp, 0, td);
676 return (error);
677 }
678}
679
680/*
681 * We need to process our own vnode unlock and then clear the
682 * interlock flag as it applies only to our vnode, not the
683 * vnodes below us on the stack.
684 */
685static int
686null_unlock(ap)
687 struct vop_unlock_args /* {
688 struct vnode *a_vp;
689 int a_flags;
690 struct thread *a_td;
691 } */ *ap;
692{
693 struct vnode *vp = ap->a_vp;
694 int flags = ap->a_flags;
695 struct thread *td = ap->a_td;
696 struct vnode *lvp;
697
698 if (vp->v_vnlock != NULL) {
699 if (flags & LK_THISLAYER)
700 return 0; /* the lock is shared across layers */
701 flags &= ~LK_THISLAYER;
702 return (lockmgr(vp->v_vnlock, flags | LK_RELEASE,
703 &vp->v_interlock, td));
704 }
705 lvp = NULLVPTOLOWERVP(vp);
706 if (lvp == NULL)
707 return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td));
708 if ((flags & LK_THISLAYER) == 0) {
709 if (flags & LK_INTERLOCK) {
710 mtx_unlock(&vp->v_interlock);
711 flags &= ~LK_INTERLOCK;
712 }
713 VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, td);
714 } else
715 flags &= ~LK_THISLAYER;
716 return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td));
717}
718
719static int
720null_islocked(ap)
721 struct vop_islocked_args /* {
722 struct vnode *a_vp;
723 struct thread *a_td;
724 } */ *ap;
725{
726 struct vnode *vp = ap->a_vp;
727 struct thread *td = ap->a_td;
728
729 if (vp->v_vnlock != NULL)
730 return (lockstatus(vp->v_vnlock, td));
731 return (lockstatus(&vp->v_lock, td));
732}
733
734/*
735 * There is no way to tell that someone issued remove/rmdir operation
736 * on the underlying filesystem. For now we just have to release lowevrp
737 * as soon as possible.
738 *
739 * Note, we can't release any resources nor remove vnode from hash before
740 * appropriate VXLOCK stuff is is done because other process can find this
741 * vnode in hash during inactivation and may be sitting in vget() and waiting
742 * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM.
743 */
744static int
745null_inactive(ap)
746 struct vop_inactive_args /* {
747 struct vnode *a_vp;
748 struct thread *a_td;
749 } */ *ap;
750{
751 struct vnode *vp = ap->a_vp;
752 struct thread *td = ap->a_td;
753
754 VOP_UNLOCK(vp, 0, td);
755
756 /*
757 * If this is the last reference, then free up the vnode
758 * so as not to tie up the lower vnodes.
759 */
760 vrecycle(vp, NULL, td);
761
762 return (0);
763}
764
765/*
766 * Now, the VXLOCK is in force and we're free to destroy the null vnode.
767 */
768static int
769null_reclaim(ap)
770 struct vop_reclaim_args /* {
771 struct vnode *a_vp;
772 struct thread *a_td;
773 } */ *ap;
774{
775 struct vnode *vp = ap->a_vp;
776 struct null_node *xp = VTONULL(vp);
777 struct vnode *lowervp = xp->null_lowervp;
778
779 if (lowervp) {
780 null_hashrem(xp);
781
782 vrele(lowervp);
783 vrele(lowervp);
784 }
785
786 vp->v_data = NULL;
787 vp->v_vnlock = &vp->v_lock;
788 FREE(xp, M_NULLFSNODE);
789
790 return (0);
791}
792
793static int
794null_print(ap)
795 struct vop_print_args /* {
796 struct vnode *a_vp;
797 } */ *ap;
798{
799 register struct vnode *vp = ap->a_vp;
800 printf("\tvp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
801 return (0);
802}
803
804/*
805 * Let an underlying filesystem do the work
806 */
807static int
808null_createvobject(ap)
809 struct vop_createvobject_args /* {
810 struct vnode *vp;
811 struct ucred *cred;
812 struct thread *td;
813 } */ *ap;
814{
815 struct vnode *vp = ap->a_vp;
816 struct vnode *lowervp = VTONULL(vp) ? NULLVPTOLOWERVP(vp) : NULL;
817 int error;
818
819 if (vp->v_type == VNON || lowervp == NULL)
820 return 0;
821 error = VOP_CREATEVOBJECT(lowervp, ap->a_cred, ap->a_td);
822 if (error)
823 return (error);
824 vp->v_vflag |= VV_OBJBUF;
825 return (0);
826}
827
828/*
829 * We have nothing to destroy and this operation shouldn't be bypassed.
830 */
831static int
832null_destroyvobject(ap)
833 struct vop_destroyvobject_args /* {
834 struct vnode *vp;
835 } */ *ap;
836{
837 struct vnode *vp = ap->a_vp;
838
839 vp->v_vflag &= ~VV_OBJBUF;
840 return (0);
841}
842
843static int
844null_getvobject(ap)
845 struct vop_getvobject_args /* {
846 struct vnode *vp;
847 struct vm_object **objpp;
848 } */ *ap;
849{
850 struct vnode *lvp = NULLVPTOLOWERVP(ap->a_vp);
851
852 if (lvp == NULL)
853 return EINVAL;
854 return (VOP_GETVOBJECT(lvp, ap->a_objpp));
855}
856
857/*
858 * Global vfs data structures
859 */
860struct vop_vector null_vnodeops = {
861 .vop_bypass = null_bypass,
862
863 .vop_access = null_access,
864 .vop_bmap = VOP_EOPNOTSUPP,
865 .vop_createvobject = null_createvobject,
866 .vop_destroyvobject = null_destroyvobject,
867 .vop_getattr = null_getattr,
868 .vop_getvobject = null_getvobject,
869 .vop_getwritemount = vop_stdgetwritemount,
870 .vop_inactive = null_inactive,
871 .vop_islocked = null_islocked,
872 .vop_lock = null_lock,
873 .vop_lookup = null_lookup,
874 .vop_print = null_print,
875 .vop_reclaim = null_reclaim,
876 .vop_rename = null_rename,
877 .vop_setattr = null_setattr,
878 .vop_strategy = VOP_EOPNOTSUPP,
879 .vop_unlock = null_unlock,
880};
300 else {
301 printf("null_bypass: no map for %s\n", descp->vdesc_name);
302 error = EINVAL;
303 }
304
305 /*
306 * Maintain the illusion of call-by-value
307 * by restoring vnodes in the argument structure
308 * to their original value.
309 */
310 reles = descp->vdesc_flags;
311 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
312 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
313 break; /* bail out at end of list */
314 if (old_vps[i]) {
315 *(vps_p[i]) = old_vps[i];
316#if 0
317 if (reles & VDESC_VP0_WILLUNLOCK)
318 VOP_UNLOCK(*(vps_p[i]), LK_THISLAYER, curthread);
319#endif
320 if (reles & VDESC_VP0_WILLRELE)
321 vrele(*(vps_p[i]));
322 }
323 }
324
325 /*
326 * Map the possible out-going vpp
327 * (Assumes that the lower layer always returns
328 * a VREF'ed vpp unless it gets an error.)
329 */
330 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
331 !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
332 !error) {
333 /*
334 * XXX - even though some ops have vpp returned vp's,
335 * several ops actually vrele this before returning.
336 * We must avoid these ops.
337 * (This should go away when these ops are regularized.)
338 */
339 if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
340 goto out;
341 vppp = VOPARG_OFFSETTO(struct vnode***,
342 descp->vdesc_vpp_offset,ap);
343 if (*vppp)
344 error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
345 }
346
347 out:
348 return (error);
349}
350
351/*
352 * We have to carry on the locking protocol on the null layer vnodes
353 * as we progress through the tree. We also have to enforce read-only
354 * if this layer is mounted read-only.
355 */
356static int
357null_lookup(ap)
358 struct vop_lookup_args /* {
359 struct vnode * a_dvp;
360 struct vnode ** a_vpp;
361 struct componentname * a_cnp;
362 } */ *ap;
363{
364 struct componentname *cnp = ap->a_cnp;
365 struct vnode *dvp = ap->a_dvp;
366 struct thread *td = cnp->cn_thread;
367 int flags = cnp->cn_flags;
368 struct vnode *vp, *ldvp, *lvp;
369 int error;
370
371 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
372 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
373 return (EROFS);
374 /*
375 * Although it is possible to call null_bypass(), we'll do
376 * a direct call to reduce overhead
377 */
378 ldvp = NULLVPTOLOWERVP(dvp);
379 vp = lvp = NULL;
380 error = VOP_LOOKUP(ldvp, &lvp, cnp);
381 if (error == EJUSTRETURN && (flags & ISLASTCN) &&
382 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
383 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
384 error = EROFS;
385
386 /*
387 * Rely only on the PDIRUNLOCK flag which should be carefully
388 * tracked by underlying filesystem.
389 */
390 if ((cnp->cn_flags & PDIRUNLOCK) && dvp->v_vnlock != ldvp->v_vnlock)
391 VOP_UNLOCK(dvp, LK_THISLAYER, td);
392 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
393 if (ldvp == lvp) {
394 *ap->a_vpp = dvp;
395 VREF(dvp);
396 vrele(lvp);
397 } else {
398 error = null_nodeget(dvp->v_mount, lvp, &vp);
399 if (error) {
400 /* XXX Cleanup needed... */
401 panic("null_nodeget failed");
402 }
403 *ap->a_vpp = vp;
404 }
405 }
406 return (error);
407}
408
409/*
410 * Setattr call. Disallow write attempts if the layer is mounted read-only.
411 */
412static int
413null_setattr(ap)
414 struct vop_setattr_args /* {
415 struct vnodeop_desc *a_desc;
416 struct vnode *a_vp;
417 struct vattr *a_vap;
418 struct ucred *a_cred;
419 struct thread *a_td;
420 } */ *ap;
421{
422 struct vnode *vp = ap->a_vp;
423 struct vattr *vap = ap->a_vap;
424
425 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
426 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
427 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
428 (vp->v_mount->mnt_flag & MNT_RDONLY))
429 return (EROFS);
430 if (vap->va_size != VNOVAL) {
431 switch (vp->v_type) {
432 case VDIR:
433 return (EISDIR);
434 case VCHR:
435 case VBLK:
436 case VSOCK:
437 case VFIFO:
438 if (vap->va_flags != VNOVAL)
439 return (EOPNOTSUPP);
440 return (0);
441 case VREG:
442 case VLNK:
443 default:
444 /*
445 * Disallow write attempts if the filesystem is
446 * mounted read-only.
447 */
448 if (vp->v_mount->mnt_flag & MNT_RDONLY)
449 return (EROFS);
450 }
451 }
452
453 return (null_bypass((struct vop_generic_args *)ap));
454}
455
456/*
457 * We handle getattr only to change the fsid.
458 */
459static int
460null_getattr(ap)
461 struct vop_getattr_args /* {
462 struct vnode *a_vp;
463 struct vattr *a_vap;
464 struct ucred *a_cred;
465 struct thread *a_td;
466 } */ *ap;
467{
468 int error;
469
470 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
471 return (error);
472
473 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
474 return (0);
475}
476
477/*
478 * Handle to disallow write access if mounted read-only.
479 */
480static int
481null_access(ap)
482 struct vop_access_args /* {
483 struct vnode *a_vp;
484 int a_mode;
485 struct ucred *a_cred;
486 struct thread *a_td;
487 } */ *ap;
488{
489 struct vnode *vp = ap->a_vp;
490 mode_t mode = ap->a_mode;
491
492 /*
493 * Disallow write attempts on read-only layers;
494 * unless the file is a socket, fifo, or a block or
495 * character device resident on the filesystem.
496 */
497 if (mode & VWRITE) {
498 switch (vp->v_type) {
499 case VDIR:
500 case VLNK:
501 case VREG:
502 if (vp->v_mount->mnt_flag & MNT_RDONLY)
503 return (EROFS);
504 break;
505 default:
506 break;
507 }
508 }
509 return (null_bypass((struct vop_generic_args *)ap));
510}
511
512/*
513 * We handle this to eliminate null FS to lower FS
514 * file moving. Don't know why we don't allow this,
515 * possibly we should.
516 */
517static int
518null_rename(ap)
519 struct vop_rename_args /* {
520 struct vnode *a_fdvp;
521 struct vnode *a_fvp;
522 struct componentname *a_fcnp;
523 struct vnode *a_tdvp;
524 struct vnode *a_tvp;
525 struct componentname *a_tcnp;
526 } */ *ap;
527{
528 struct vnode *tdvp = ap->a_tdvp;
529 struct vnode *fvp = ap->a_fvp;
530 struct vnode *fdvp = ap->a_fdvp;
531 struct vnode *tvp = ap->a_tvp;
532
533 /* Check for cross-device rename. */
534 if ((fvp->v_mount != tdvp->v_mount) ||
535 (tvp && (fvp->v_mount != tvp->v_mount))) {
536 if (tdvp == tvp)
537 vrele(tdvp);
538 else
539 vput(tdvp);
540 if (tvp)
541 vput(tvp);
542 vrele(fdvp);
543 vrele(fvp);
544 return (EXDEV);
545 }
546
547 return (null_bypass((struct vop_generic_args *)ap));
548}
549
550/*
551 * We need to process our own vnode lock and then clear the
552 * interlock flag as it applies only to our vnode, not the
553 * vnodes below us on the stack.
554 */
555static int
556null_lock(ap)
557 struct vop_lock_args /* {
558 struct vnode *a_vp;
559 int a_flags;
560 struct thread *a_td;
561 } */ *ap;
562{
563 struct vnode *vp = ap->a_vp;
564 int flags = ap->a_flags;
565 struct thread *td = ap->a_td;
566 struct vnode *lvp;
567 int error;
568 struct null_node *nn;
569
570 if (flags & LK_THISLAYER) {
571 if (vp->v_vnlock != NULL) {
572 /* lock is shared across layers */
573 if (flags & LK_INTERLOCK)
574 mtx_unlock(&vp->v_interlock);
575 return 0;
576 }
577 error = lockmgr(&vp->v_lock, flags & ~LK_THISLAYER,
578 &vp->v_interlock, td);
579 return (error);
580 }
581
582 if (vp->v_vnlock != NULL) {
583 /*
584 * The lower level has exported a struct lock to us. Use
585 * it so that all vnodes in the stack lock and unlock
586 * simultaneously. Note: we don't DRAIN the lock as DRAIN
587 * decommissions the lock - just because our vnode is
588 * going away doesn't mean the struct lock below us is.
589 * LK_EXCLUSIVE is fine.
590 */
591 if ((flags & LK_INTERLOCK) == 0) {
592 VI_LOCK(vp);
593 flags |= LK_INTERLOCK;
594 }
595 nn = VTONULL(vp);
596 if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
597 NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n");
598 /*
599 * Emulate lock draining by waiting for all other
600 * pending locks to complete. Afterwards the
601 * lockmgr call might block, but no other threads
602 * will attempt to use this nullfs vnode due to the
603 * VI_XLOCK flag.
604 */
605 while (nn->null_pending_locks > 0) {
606 nn->null_drain_wakeup = 1;
607 msleep(&nn->null_pending_locks,
608 VI_MTX(vp),
609 PVFS,
610 "nuldr", 0);
611 }
612 error = lockmgr(vp->v_vnlock,
613 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
614 VI_MTX(vp), td);
615 return error;
616 }
617 nn->null_pending_locks++;
618 error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td);
619 VI_LOCK(vp);
620 /*
621 * If we're called from vrele then v_usecount can have been 0
622 * and another process might have initiated a recycle
623 * operation. When that happens, just back out.
624 */
625 if (error == 0 && (vp->v_iflag & VI_XLOCK) != 0 &&
626 td != vp->v_vxthread) {
627 lockmgr(vp->v_vnlock,
628 (flags & ~LK_TYPE_MASK) | LK_RELEASE,
629 VI_MTX(vp), td);
630 VI_LOCK(vp);
631 error = ENOENT;
632 }
633 nn->null_pending_locks--;
634 /*
635 * Wakeup the process draining the vnode after all
636 * pending lock attempts has been failed.
637 */
638 if (nn->null_pending_locks == 0 &&
639 nn->null_drain_wakeup != 0) {
640 nn->null_drain_wakeup = 0;
641 wakeup(&nn->null_pending_locks);
642 }
643 if (error == ENOENT && (vp->v_iflag & VI_XLOCK) != 0 &&
644 vp->v_vxthread != curthread) {
645 vp->v_iflag |= VI_XWANT;
646 msleep(vp, VI_MTX(vp), PINOD, "nulbo", 0);
647 }
648 VI_UNLOCK(vp);
649 return error;
650 } else {
651 /*
652 * To prevent race conditions involving doing a lookup
653 * on "..", we have to lock the lower node, then lock our
654 * node. Most of the time it won't matter that we lock our
655 * node (as any locking would need the lower one locked
656 * first). But we can LK_DRAIN the upper lock as a step
657 * towards decomissioning it.
658 */
659 lvp = NULLVPTOLOWERVP(vp);
660 if (lvp == NULL)
661 return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, td));
662 if (flags & LK_INTERLOCK) {
663 mtx_unlock(&vp->v_interlock);
664 flags &= ~LK_INTERLOCK;
665 }
666 if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
667 error = VOP_LOCK(lvp,
668 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, td);
669 } else
670 error = VOP_LOCK(lvp, flags, td);
671 if (error)
672 return (error);
673 error = lockmgr(&vp->v_lock, flags, &vp->v_interlock, td);
674 if (error)
675 VOP_UNLOCK(lvp, 0, td);
676 return (error);
677 }
678}
679
680/*
681 * We need to process our own vnode unlock and then clear the
682 * interlock flag as it applies only to our vnode, not the
683 * vnodes below us on the stack.
684 */
685static int
686null_unlock(ap)
687 struct vop_unlock_args /* {
688 struct vnode *a_vp;
689 int a_flags;
690 struct thread *a_td;
691 } */ *ap;
692{
693 struct vnode *vp = ap->a_vp;
694 int flags = ap->a_flags;
695 struct thread *td = ap->a_td;
696 struct vnode *lvp;
697
698 if (vp->v_vnlock != NULL) {
699 if (flags & LK_THISLAYER)
700 return 0; /* the lock is shared across layers */
701 flags &= ~LK_THISLAYER;
702 return (lockmgr(vp->v_vnlock, flags | LK_RELEASE,
703 &vp->v_interlock, td));
704 }
705 lvp = NULLVPTOLOWERVP(vp);
706 if (lvp == NULL)
707 return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td));
708 if ((flags & LK_THISLAYER) == 0) {
709 if (flags & LK_INTERLOCK) {
710 mtx_unlock(&vp->v_interlock);
711 flags &= ~LK_INTERLOCK;
712 }
713 VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, td);
714 } else
715 flags &= ~LK_THISLAYER;
716 return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td));
717}
718
719static int
720null_islocked(ap)
721 struct vop_islocked_args /* {
722 struct vnode *a_vp;
723 struct thread *a_td;
724 } */ *ap;
725{
726 struct vnode *vp = ap->a_vp;
727 struct thread *td = ap->a_td;
728
729 if (vp->v_vnlock != NULL)
730 return (lockstatus(vp->v_vnlock, td));
731 return (lockstatus(&vp->v_lock, td));
732}
733
734/*
735 * There is no way to tell that someone issued remove/rmdir operation
736 * on the underlying filesystem. For now we just have to release lowevrp
737 * as soon as possible.
738 *
739 * Note, we can't release any resources nor remove vnode from hash before
740 * appropriate VXLOCK stuff is is done because other process can find this
741 * vnode in hash during inactivation and may be sitting in vget() and waiting
742 * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM.
743 */
744static int
745null_inactive(ap)
746 struct vop_inactive_args /* {
747 struct vnode *a_vp;
748 struct thread *a_td;
749 } */ *ap;
750{
751 struct vnode *vp = ap->a_vp;
752 struct thread *td = ap->a_td;
753
754 VOP_UNLOCK(vp, 0, td);
755
756 /*
757 * If this is the last reference, then free up the vnode
758 * so as not to tie up the lower vnodes.
759 */
760 vrecycle(vp, NULL, td);
761
762 return (0);
763}
764
765/*
766 * Now, the VXLOCK is in force and we're free to destroy the null vnode.
767 */
768static int
769null_reclaim(ap)
770 struct vop_reclaim_args /* {
771 struct vnode *a_vp;
772 struct thread *a_td;
773 } */ *ap;
774{
775 struct vnode *vp = ap->a_vp;
776 struct null_node *xp = VTONULL(vp);
777 struct vnode *lowervp = xp->null_lowervp;
778
779 if (lowervp) {
780 null_hashrem(xp);
781
782 vrele(lowervp);
783 vrele(lowervp);
784 }
785
786 vp->v_data = NULL;
787 vp->v_vnlock = &vp->v_lock;
788 FREE(xp, M_NULLFSNODE);
789
790 return (0);
791}
792
793static int
794null_print(ap)
795 struct vop_print_args /* {
796 struct vnode *a_vp;
797 } */ *ap;
798{
799 register struct vnode *vp = ap->a_vp;
800 printf("\tvp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
801 return (0);
802}
803
804/*
805 * Let an underlying filesystem do the work
806 */
807static int
808null_createvobject(ap)
809 struct vop_createvobject_args /* {
810 struct vnode *vp;
811 struct ucred *cred;
812 struct thread *td;
813 } */ *ap;
814{
815 struct vnode *vp = ap->a_vp;
816 struct vnode *lowervp = VTONULL(vp) ? NULLVPTOLOWERVP(vp) : NULL;
817 int error;
818
819 if (vp->v_type == VNON || lowervp == NULL)
820 return 0;
821 error = VOP_CREATEVOBJECT(lowervp, ap->a_cred, ap->a_td);
822 if (error)
823 return (error);
824 vp->v_vflag |= VV_OBJBUF;
825 return (0);
826}
827
828/*
829 * We have nothing to destroy and this operation shouldn't be bypassed.
830 */
831static int
832null_destroyvobject(ap)
833 struct vop_destroyvobject_args /* {
834 struct vnode *vp;
835 } */ *ap;
836{
837 struct vnode *vp = ap->a_vp;
838
839 vp->v_vflag &= ~VV_OBJBUF;
840 return (0);
841}
842
843static int
844null_getvobject(ap)
845 struct vop_getvobject_args /* {
846 struct vnode *vp;
847 struct vm_object **objpp;
848 } */ *ap;
849{
850 struct vnode *lvp = NULLVPTOLOWERVP(ap->a_vp);
851
852 if (lvp == NULL)
853 return EINVAL;
854 return (VOP_GETVOBJECT(lvp, ap->a_objpp));
855}
856
857/*
858 * Global vfs data structures
859 */
860struct vop_vector null_vnodeops = {
861 .vop_bypass = null_bypass,
862
863 .vop_access = null_access,
864 .vop_bmap = VOP_EOPNOTSUPP,
865 .vop_createvobject = null_createvobject,
866 .vop_destroyvobject = null_destroyvobject,
867 .vop_getattr = null_getattr,
868 .vop_getvobject = null_getvobject,
869 .vop_getwritemount = vop_stdgetwritemount,
870 .vop_inactive = null_inactive,
871 .vop_islocked = null_islocked,
872 .vop_lock = null_lock,
873 .vop_lookup = null_lookup,
874 .vop_print = null_print,
875 .vop_reclaim = null_reclaim,
876 .vop_rename = null_rename,
877 .vop_setattr = null_setattr,
878 .vop_strategy = VOP_EOPNOTSUPP,
879 .vop_unlock = null_unlock,
880};