null_vnops.c revision 232303
1130803Smarcel/*-
2130803Smarcel * Copyright (c) 1992, 1993
3130803Smarcel *	The Regents of the University of California.  All rights reserved.
4130803Smarcel *
5130803Smarcel * This code is derived from software contributed to Berkeley by
6130803Smarcel * John Heidemann of the UCLA Ficus project.
7130803Smarcel *
8130803Smarcel * Redistribution and use in source and binary forms, with or without
9130803Smarcel * modification, are permitted provided that the following conditions
10130803Smarcel * are met:
11130803Smarcel * 1. Redistributions of source code must retain the above copyright
12130803Smarcel *    notice, this list of conditions and the following disclaimer.
13130803Smarcel * 2. Redistributions in binary form must reproduce the above copyright
14130803Smarcel *    notice, this list of conditions and the following disclaimer in the
15130803Smarcel *    documentation and/or other materials provided with the distribution.
16130803Smarcel * 4. Neither the name of the University nor the names of its contributors
17130803Smarcel *    may be used to endorse or promote products derived from this software
18130803Smarcel *    without specific prior written permission.
19130803Smarcel *
20130803Smarcel * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21130803Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22130803Smarcel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23130803Smarcel * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24130803Smarcel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25130803Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26130803Smarcel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27130803Smarcel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28130803Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29130803Smarcel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30130803Smarcel * SUCH DAMAGE.
31130803Smarcel *
32130803Smarcel *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
33130803Smarcel *
34130803Smarcel * Ancestors:
35130803Smarcel *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
36130803Smarcel *	...and...
37130803Smarcel *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
38130803Smarcel *
39130803Smarcel * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 232303 2012-02-29 15:15:36Z kib $
40130803Smarcel */
41130803Smarcel
42130803Smarcel/*
43130803Smarcel * Null Layer
44130803Smarcel *
45130803Smarcel * (See mount_nullfs(8) for more information.)
46130803Smarcel *
47130803Smarcel * The null layer duplicates a portion of the filesystem
48130803Smarcel * name space under a new name.  In this respect, it is
49130803Smarcel * similar to the loopback filesystem.  It differs from
50130803Smarcel * the loopback fs in two respects:  it is implemented using
51130803Smarcel * a stackable layers techniques, and its "null-node"s stack above
52130803Smarcel * all lower-layer vnodes, not just over directory vnodes.
53130803Smarcel *
54130803Smarcel * The null layer has two purposes.  First, it serves as a demonstration
55130803Smarcel * of layering by proving a layer which does nothing.  (It actually
56130803Smarcel * does everything the loopback filesystem does, which is slightly
57130803Smarcel * more than nothing.)  Second, the null layer can serve as a prototype
58130803Smarcel * layer.  Since it provides all necessary layer framework,
59130803Smarcel * new filesystem layers can be created very easily be starting
60130803Smarcel * with a null layer.
61130803Smarcel *
62130803Smarcel * The remainder of this man page examines the null layer as a basis
63130803Smarcel * for constructing new layers.
64130803Smarcel *
65130803Smarcel *
66130803Smarcel * INSTANTIATING NEW NULL LAYERS
67130803Smarcel *
68130803Smarcel * New null layers are created with mount_nullfs(8).
69130803Smarcel * Mount_nullfs(8) takes two arguments, the pathname
70130803Smarcel * of the lower vfs (target-pn) and the pathname where the null
71130803Smarcel * layer will appear in the namespace (alias-pn).  After
72130803Smarcel * the null layer is put into place, the contents
73130803Smarcel * of target-pn subtree will be aliased under alias-pn.
74130803Smarcel *
75130803Smarcel *
76130803Smarcel * OPERATION OF A NULL LAYER
77130803Smarcel *
78130803Smarcel * The null layer is the minimum filesystem layer,
79130803Smarcel * simply bypassing all possible operations to the lower layer
80130803Smarcel * for processing there.  The majority of its activity centers
81130803Smarcel * on the bypass routine, through which nearly all vnode operations
82130803Smarcel * pass.
83130803Smarcel *
84130803Smarcel * The bypass routine accepts arbitrary vnode operations for
85130803Smarcel * handling by the lower layer.  It begins by examing vnode
86130803Smarcel * operation arguments and replacing any null-nodes by their
87130803Smarcel * lower-layer equivlants.  It then invokes the operation
88130803Smarcel * on the lower layer.  Finally, it replaces the null-nodes
89130803Smarcel * in the arguments and, if a vnode is return by the operation,
90130803Smarcel * stacks a null-node on top of the returned vnode.
91130803Smarcel *
92130803Smarcel * Although bypass handles most operations, vop_getattr, vop_lock,
93130803Smarcel * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
94130803Smarcel * bypassed. Vop_getattr must change the fsid being returned.
95130803Smarcel * Vop_lock and vop_unlock must handle any locking for the
96130803Smarcel * current vnode as well as pass the lock request down.
97130803Smarcel * Vop_inactive and vop_reclaim are not bypassed so that
98130803Smarcel * they can handle freeing null-layer specific data. Vop_print
99130803Smarcel * is not bypassed to avoid excessive debugging information.
100130803Smarcel * Also, certain vnode operations change the locking state within
101130803Smarcel * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
102130803Smarcel * and symlink). Ideally these operations should not change the
103130803Smarcel * lock state, but should be changed to let the caller of the
104130803Smarcel * function unlock them. Otherwise all intermediate vnode layers
105130803Smarcel * (such as union, umapfs, etc) must catch these functions to do
106130803Smarcel * the necessary locking at their layer.
107130803Smarcel *
108130803Smarcel *
109130803Smarcel * INSTANTIATING VNODE STACKS
110130803Smarcel *
111130803Smarcel * Mounting associates the null layer with a lower layer,
112130803Smarcel * effect stacking two VFSes.  Vnode stacks are instead
113130803Smarcel * created on demand as files are accessed.
114130803Smarcel *
115130803Smarcel * The initial mount creates a single vnode stack for the
116130803Smarcel * root of the new null layer.  All other vnode stacks
117130803Smarcel * are created as a result of vnode operations on
118130803Smarcel * this or other null vnode stacks.
119130803Smarcel *
120130803Smarcel * New vnode stacks come into existance as a result of
121130803Smarcel * an operation which returns a vnode.
122130803Smarcel * The bypass routine stacks a null-node above the new
123130803Smarcel * vnode before returning it to the caller.
124130803Smarcel *
125130803Smarcel * For example, imagine mounting a null layer with
126130803Smarcel * "mount_nullfs /usr/include /dev/layer/null".
127130803Smarcel * Changing directory to /dev/layer/null will assign
128130803Smarcel * the root null-node (which was created when the null layer was mounted).
129130803Smarcel * Now consider opening "sys".  A vop_lookup would be
130130803Smarcel * done on the root null-node.  This operation would bypass through
131130803Smarcel * to the lower layer which would return a vnode representing
132130803Smarcel * the UFS "sys".  Null_bypass then builds a null-node
133130803Smarcel * aliasing the UFS "sys" and returns this to the caller.
134130803Smarcel * Later operations on the null-node "sys" will repeat this
135130803Smarcel * process when constructing other vnode stacks.
136130803Smarcel *
137130803Smarcel *
138130803Smarcel * CREATING OTHER FILE SYSTEM LAYERS
139130803Smarcel *
140130803Smarcel * One of the easiest ways to construct new filesystem layers is to make
141130803Smarcel * a copy of the null layer, rename all files and variables, and
142130803Smarcel * then begin modifing the copy.  Sed can be used to easily rename
143130803Smarcel * all variables.
144130803Smarcel *
145130803Smarcel * The umap layer is an example of a layer descended from the
146130803Smarcel * null layer.
147130803Smarcel *
148130803Smarcel *
149130803Smarcel * INVOKING OPERATIONS ON LOWER LAYERS
150130803Smarcel *
151130803Smarcel * There are two techniques to invoke operations on a lower layer
152130803Smarcel * when the operation cannot be completely bypassed.  Each method
153130803Smarcel * is appropriate in different situations.  In both cases,
154130803Smarcel * it is the responsibility of the aliasing layer to make
155130803Smarcel * the operation arguments "correct" for the lower layer
156130803Smarcel * by mapping a vnode arguments to the lower layer.
157130803Smarcel *
158130803Smarcel * The first approach is to call the aliasing layer's bypass routine.
159130803Smarcel * This method is most suitable when you wish to invoke the operation
160130803Smarcel * currently being handled on the lower layer.  It has the advantage
161130803Smarcel * that the bypass routine already must do argument mapping.
162130803Smarcel * An example of this is null_getattrs in the null layer.
163130803Smarcel *
164130803Smarcel * A second approach is to directly invoke vnode operations on
165130803Smarcel * the lower layer with the VOP_OPERATIONNAME interface.
166130803Smarcel * The advantage of this method is that it is easy to invoke
167130803Smarcel * arbitrary operations on the lower layer.  The disadvantage
168130803Smarcel * is that vnode arguments must be manualy mapped.
169130803Smarcel *
170130803Smarcel */
171130803Smarcel
172130803Smarcel#include <sys/param.h>
173130803Smarcel#include <sys/systm.h>
174130803Smarcel#include <sys/conf.h>
175130803Smarcel#include <sys/kernel.h>
176130803Smarcel#include <sys/lock.h>
177130803Smarcel#include <sys/malloc.h>
178130803Smarcel#include <sys/mount.h>
179130803Smarcel#include <sys/mutex.h>
180130803Smarcel#include <sys/namei.h>
181130803Smarcel#include <sys/sysctl.h>
182130803Smarcel#include <sys/vnode.h>
183130803Smarcel
184130803Smarcel#include <fs/nullfs/null.h>
185130803Smarcel
186130803Smarcel#include <vm/vm.h>
187130803Smarcel#include <vm/vm_extern.h>
188130803Smarcel#include <vm/vm_object.h>
189130803Smarcel#include <vm/vnode_pager.h>
190130803Smarcel
191130803Smarcelstatic int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
192130803SmarcelSYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
193130803Smarcel	&null_bug_bypass, 0, "");
194130803Smarcel
195130803Smarcel/*
196130803Smarcel * This is the 10-Apr-92 bypass routine.
197130803Smarcel *    This version has been optimized for speed, throwing away some
198130803Smarcel * safety checks.  It should still always work, but it's not as
199130803Smarcel * robust to programmer errors.
200130803Smarcel *
201130803Smarcel * In general, we map all vnodes going down and unmap them on the way back.
202130803Smarcel * As an exception to this, vnodes can be marked "unmapped" by setting
203130803Smarcel * the Nth bit in operation's vdesc_flags.
204130803Smarcel *
205130803Smarcel * Also, some BSD vnode operations have the side effect of vrele'ing
206130803Smarcel * their arguments.  With stacking, the reference counts are held
207130803Smarcel * by the upper node, not the lower one, so we must handle these
208130803Smarcel * side-effects here.  This is not of concern in Sun-derived systems
209130803Smarcel * since there are no such side-effects.
210130803Smarcel *
211130803Smarcel * This makes the following assumptions:
212130803Smarcel * - only one returned vpp
213130803Smarcel * - no INOUT vpp's (Sun's vop_open has one of these)
214130803Smarcel * - the vnode operation vector of the first vnode should be used
215130803Smarcel *   to determine what implementation of the op should be invoked
216130803Smarcel * - all mapped vnodes are of our vnode-type (NEEDSWORK:
217130803Smarcel *   problems on rmdir'ing mount points and renaming?)
218130803Smarcel */
219130803Smarcelint
220130803Smarcelnull_bypass(struct vop_generic_args *ap)
221130803Smarcel{
222130803Smarcel	struct vnode **this_vp_p;
223130803Smarcel	int error;
224130803Smarcel	struct vnode *old_vps[VDESC_MAX_VPS];
225130803Smarcel	struct vnode **vps_p[VDESC_MAX_VPS];
226130803Smarcel	struct vnode ***vppp;
227130803Smarcel	struct vnodeop_desc *descp = ap->a_desc;
228130803Smarcel	int reles, i;
229130803Smarcel
230130803Smarcel	if (null_bug_bypass)
231130803Smarcel		printf ("null_bypass: %s\n", descp->vdesc_name);
232130803Smarcel
233130803Smarcel#ifdef DIAGNOSTIC
234130803Smarcel	/*
235130803Smarcel	 * We require at least one vp.
236130803Smarcel	 */
237130803Smarcel	if (descp->vdesc_vp_offsets == NULL ||
238130803Smarcel	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
239130803Smarcel		panic ("null_bypass: no vp's in map");
240130803Smarcel#endif
241130803Smarcel
242130803Smarcel	/*
243130803Smarcel	 * Map the vnodes going in.
244130803Smarcel	 * Later, we'll invoke the operation based on
245130803Smarcel	 * the first mapped vnode's operation vector.
246130803Smarcel	 */
247130803Smarcel	reles = descp->vdesc_flags;
248130803Smarcel	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
249130803Smarcel		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
250130803Smarcel			break;   /* bail out at end of list */
251130803Smarcel		vps_p[i] = this_vp_p =
252130803Smarcel			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
253130803Smarcel		/*
254130803Smarcel		 * We're not guaranteed that any but the first vnode
255130803Smarcel		 * are of our type.  Check for and don't map any
256130803Smarcel		 * that aren't.  (We must always map first vp or vclean fails.)
257130803Smarcel		 */
258130803Smarcel		if (i && (*this_vp_p == NULLVP ||
259130803Smarcel		    (*this_vp_p)->v_op != &null_vnodeops)) {
260130803Smarcel			old_vps[i] = NULLVP;
261130803Smarcel		} else {
262130803Smarcel			old_vps[i] = *this_vp_p;
263130803Smarcel			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
264130803Smarcel			/*
265130803Smarcel			 * XXX - Several operations have the side effect
266130803Smarcel			 * of vrele'ing their vp's.  We must account for
267130803Smarcel			 * that.  (This should go away in the future.)
268130803Smarcel			 */
269130803Smarcel			if (reles & VDESC_VP0_WILLRELE)
270130803Smarcel				VREF(*this_vp_p);
271130803Smarcel		}
272130803Smarcel
273130803Smarcel	}
274130803Smarcel
275130803Smarcel	/*
276130803Smarcel	 * Call the operation on the lower layer
277130803Smarcel	 * with the modified argument structure.
278130803Smarcel	 */
279130803Smarcel	if (vps_p[0] && *vps_p[0])
280130803Smarcel		error = VCALL(ap);
281130803Smarcel	else {
282130803Smarcel		printf("null_bypass: no map for %s\n", descp->vdesc_name);
283130803Smarcel		error = EINVAL;
284130803Smarcel	}
285130803Smarcel
286130803Smarcel	/*
287130803Smarcel	 * Maintain the illusion of call-by-value
288130803Smarcel	 * by restoring vnodes in the argument structure
289130803Smarcel	 * to their original value.
290130803Smarcel	 */
291130803Smarcel	reles = descp->vdesc_flags;
292130803Smarcel	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
293130803Smarcel		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
294130803Smarcel			break;   /* bail out at end of list */
295130803Smarcel		if (old_vps[i]) {
296130803Smarcel			*(vps_p[i]) = old_vps[i];
297130803Smarcel#if 0
298130803Smarcel			if (reles & VDESC_VP0_WILLUNLOCK)
299130803Smarcel				VOP_UNLOCK(*(vps_p[i]), 0);
300130803Smarcel#endif
301130803Smarcel			if (reles & VDESC_VP0_WILLRELE)
302130803Smarcel				vrele(*(vps_p[i]));
303130803Smarcel		}
304130803Smarcel	}
305130803Smarcel
306130803Smarcel	/*
307130803Smarcel	 * Map the possible out-going vpp
308130803Smarcel	 * (Assumes that the lower layer always returns
309130803Smarcel	 * a VREF'ed vpp unless it gets an error.)
310130803Smarcel	 */
311130803Smarcel	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
312130803Smarcel	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
313130803Smarcel	    !error) {
314130803Smarcel		/*
315130803Smarcel		 * XXX - even though some ops have vpp returned vp's,
316130803Smarcel		 * several ops actually vrele this before returning.
317130803Smarcel		 * We must avoid these ops.
318130803Smarcel		 * (This should go away when these ops are regularized.)
319130803Smarcel		 */
320130803Smarcel		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
321130803Smarcel			goto out;
322130803Smarcel		vppp = VOPARG_OFFSETTO(struct vnode***,
323130803Smarcel				 descp->vdesc_vpp_offset,ap);
324130803Smarcel		if (*vppp)
325130803Smarcel			error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
326130803Smarcel	}
327130803Smarcel
328130803Smarcel out:
329130803Smarcel	return (error);
330130803Smarcel}
331130803Smarcel
332130803Smarcel/*
333130803Smarcel * We have to carry on the locking protocol on the null layer vnodes
334130803Smarcel * as we progress through the tree. We also have to enforce read-only
335130803Smarcel * if this layer is mounted read-only.
336130803Smarcel */
337130803Smarcelstatic int
338130803Smarcelnull_lookup(struct vop_lookup_args *ap)
339130803Smarcel{
340130803Smarcel	struct componentname *cnp = ap->a_cnp;
341130803Smarcel	struct vnode *dvp = ap->a_dvp;
342130803Smarcel	int flags = cnp->cn_flags;
343130803Smarcel	struct vnode *vp, *ldvp, *lvp;
344130803Smarcel	int error;
345130803Smarcel
346130803Smarcel	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
347130803Smarcel	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
348130803Smarcel		return (EROFS);
349130803Smarcel	/*
350130803Smarcel	 * Although it is possible to call null_bypass(), we'll do
351130803Smarcel	 * a direct call to reduce overhead
352130803Smarcel	 */
353130803Smarcel	ldvp = NULLVPTOLOWERVP(dvp);
354130803Smarcel	vp = lvp = NULL;
355130803Smarcel	error = VOP_LOOKUP(ldvp, &lvp, cnp);
356130803Smarcel	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
357130803Smarcel	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
358130803Smarcel	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
359130803Smarcel		error = EROFS;
360130803Smarcel
361130803Smarcel	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
362130803Smarcel		if (ldvp == lvp) {
363130803Smarcel			*ap->a_vpp = dvp;
364130803Smarcel			VREF(dvp);
365130803Smarcel			vrele(lvp);
366130803Smarcel		} else {
367130803Smarcel			error = null_nodeget(dvp->v_mount, lvp, &vp);
368130803Smarcel			if (error == 0)
369130803Smarcel				*ap->a_vpp = vp;
370130803Smarcel		}
371130803Smarcel	}
372130803Smarcel	return (error);
373130803Smarcel}
374130803Smarcel
375130803Smarcelstatic int
376130803Smarcelnull_open(struct vop_open_args *ap)
377130803Smarcel{
378130803Smarcel	int retval;
379130803Smarcel	struct vnode *vp, *ldvp;
380130803Smarcel
381130803Smarcel	vp = ap->a_vp;
382130803Smarcel	ldvp = NULLVPTOLOWERVP(vp);
383130803Smarcel	retval = null_bypass(&ap->a_gen);
384130803Smarcel	if (retval == 0)
385130803Smarcel		vp->v_object = ldvp->v_object;
386130803Smarcel	return (retval);
387130803Smarcel}
388130803Smarcel
389130803Smarcel/*
390130803Smarcel * Setattr call. Disallow write attempts if the layer is mounted read-only.
391130803Smarcel */
392130803Smarcelstatic int
393130803Smarcelnull_setattr(struct vop_setattr_args *ap)
394130803Smarcel{
395130803Smarcel	struct vnode *vp = ap->a_vp;
396130803Smarcel	struct vattr *vap = ap->a_vap;
397130803Smarcel
398130803Smarcel  	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
399130803Smarcel	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
400130803Smarcel	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
401130803Smarcel	    (vp->v_mount->mnt_flag & MNT_RDONLY))
402130803Smarcel		return (EROFS);
403130803Smarcel	if (vap->va_size != VNOVAL) {
404130803Smarcel 		switch (vp->v_type) {
405130803Smarcel 		case VDIR:
406130803Smarcel 			return (EISDIR);
407130803Smarcel 		case VCHR:
408130803Smarcel 		case VBLK:
409130803Smarcel 		case VSOCK:
410130803Smarcel 		case VFIFO:
411130803Smarcel			if (vap->va_flags != VNOVAL)
412130803Smarcel				return (EOPNOTSUPP);
413130803Smarcel			return (0);
414130803Smarcel		case VREG:
415130803Smarcel		case VLNK:
416130803Smarcel 		default:
417130803Smarcel			/*
418130803Smarcel			 * Disallow write attempts if the filesystem is
419130803Smarcel			 * mounted read-only.
420130803Smarcel			 */
421130803Smarcel			if (vp->v_mount->mnt_flag & MNT_RDONLY)
422130803Smarcel				return (EROFS);
423130803Smarcel		}
424130803Smarcel	}
425130803Smarcel
426130803Smarcel	return (null_bypass((struct vop_generic_args *)ap));
427130803Smarcel}
428130803Smarcel
429130803Smarcel/*
430130803Smarcel *  We handle getattr only to change the fsid.
431130803Smarcel */
432130803Smarcelstatic int
433130803Smarcelnull_getattr(struct vop_getattr_args *ap)
434130803Smarcel{
435130803Smarcel	int error;
436130803Smarcel
437130803Smarcel	if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
438130803Smarcel		return (error);
439130803Smarcel
440130803Smarcel	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
441130803Smarcel	return (0);
442130803Smarcel}
443130803Smarcel
444130803Smarcel/*
445130803Smarcel * Handle to disallow write access if mounted read-only.
446130803Smarcel */
447130803Smarcelstatic int
448130803Smarcelnull_access(struct vop_access_args *ap)
449130803Smarcel{
450130803Smarcel	struct vnode *vp = ap->a_vp;
451130803Smarcel	accmode_t accmode = ap->a_accmode;
452130803Smarcel
453130803Smarcel	/*
454130803Smarcel	 * Disallow write attempts on read-only layers;
455130803Smarcel	 * unless the file is a socket, fifo, or a block or
456130803Smarcel	 * character device resident on the filesystem.
457130803Smarcel	 */
458130803Smarcel	if (accmode & VWRITE) {
459130803Smarcel		switch (vp->v_type) {
460130803Smarcel		case VDIR:
461130803Smarcel		case VLNK:
462130803Smarcel		case VREG:
463130803Smarcel			if (vp->v_mount->mnt_flag & MNT_RDONLY)
464130803Smarcel				return (EROFS);
465130803Smarcel			break;
466130803Smarcel		default:
467130803Smarcel			break;
468130803Smarcel		}
469130803Smarcel	}
470130803Smarcel	return (null_bypass((struct vop_generic_args *)ap));
471130803Smarcel}
472130803Smarcel
473130803Smarcelstatic int
474130803Smarcelnull_accessx(struct vop_accessx_args *ap)
475130803Smarcel{
476130803Smarcel	struct vnode *vp = ap->a_vp;
477130803Smarcel	accmode_t accmode = ap->a_accmode;
478130803Smarcel
479130803Smarcel	/*
480130803Smarcel	 * Disallow write attempts on read-only layers;
481130803Smarcel	 * unless the file is a socket, fifo, or a block or
482130803Smarcel	 * character device resident on the filesystem.
483130803Smarcel	 */
484130803Smarcel	if (accmode & VWRITE) {
485130803Smarcel		switch (vp->v_type) {
486130803Smarcel		case VDIR:
487130803Smarcel		case VLNK:
488130803Smarcel		case VREG:
489130803Smarcel			if (vp->v_mount->mnt_flag & MNT_RDONLY)
490130803Smarcel				return (EROFS);
491130803Smarcel			break;
492130803Smarcel		default:
493130803Smarcel			break;
494130803Smarcel		}
495130803Smarcel	}
496130803Smarcel	return (null_bypass((struct vop_generic_args *)ap));
497130803Smarcel}
498130803Smarcel
499130803Smarcel/*
500130803Smarcel * Increasing refcount of lower vnode is needed at least for the case
501130803Smarcel * when lower FS is NFS to do sillyrename if the file is in use.
502130803Smarcel * Unfortunately v_usecount is incremented in many places in
503130803Smarcel * the kernel and, as such, there may be races that result in
504130803Smarcel * the NFS client doing an extraneous silly rename, but that seems
505130803Smarcel * preferable to not doing a silly rename when it is needed.
506130803Smarcel */
507130803Smarcelstatic int
508130803Smarcelnull_remove(struct vop_remove_args *ap)
509130803Smarcel{
510130803Smarcel	int retval, vreleit;
511130803Smarcel	struct vnode *lvp;
512130803Smarcel
513130803Smarcel	if (vrefcnt(ap->a_vp) > 1) {
514130803Smarcel		lvp = NULLVPTOLOWERVP(ap->a_vp);
515130803Smarcel		VREF(lvp);
516130803Smarcel		vreleit = 1;
517130803Smarcel	} else
518130803Smarcel		vreleit = 0;
519130803Smarcel	retval = null_bypass(&ap->a_gen);
520130803Smarcel	if (vreleit != 0)
521130803Smarcel		vrele(lvp);
522130803Smarcel	return (retval);
523130803Smarcel}
524130803Smarcel
525130803Smarcel/*
526130803Smarcel * We handle this to eliminate null FS to lower FS
527130803Smarcel * file moving. Don't know why we don't allow this,
528130803Smarcel * possibly we should.
529130803Smarcel */
530130803Smarcelstatic int
531130803Smarcelnull_rename(struct vop_rename_args *ap)
532130803Smarcel{
533130803Smarcel	struct vnode *tdvp = ap->a_tdvp;
534130803Smarcel	struct vnode *fvp = ap->a_fvp;
535130803Smarcel	struct vnode *fdvp = ap->a_fdvp;
536130803Smarcel	struct vnode *tvp = ap->a_tvp;
537130803Smarcel
538130803Smarcel	/* Check for cross-device rename. */
539130803Smarcel	if ((fvp->v_mount != tdvp->v_mount) ||
540130803Smarcel	    (tvp && (fvp->v_mount != tvp->v_mount))) {
541130803Smarcel		if (tdvp == tvp)
542130803Smarcel			vrele(tdvp);
543130803Smarcel		else
544130803Smarcel			vput(tdvp);
545130803Smarcel		if (tvp)
546130803Smarcel			vput(tvp);
547130803Smarcel		vrele(fdvp);
548130803Smarcel		vrele(fvp);
549130803Smarcel		return (EXDEV);
550130803Smarcel	}
551130803Smarcel
552130803Smarcel	return (null_bypass((struct vop_generic_args *)ap));
553130803Smarcel}
554130803Smarcel
555130803Smarcel/*
556130803Smarcel * We need to process our own vnode lock and then clear the
557130803Smarcel * interlock flag as it applies only to our vnode, not the
558130803Smarcel * vnodes below us on the stack.
559130803Smarcel */
560130803Smarcelstatic int
561130803Smarcelnull_lock(struct vop_lock1_args *ap)
562130803Smarcel{
563130803Smarcel	struct vnode *vp = ap->a_vp;
564130803Smarcel	int flags = ap->a_flags;
565130803Smarcel	struct null_node *nn;
566130803Smarcel	struct vnode *lvp;
567130803Smarcel	int error;
568130803Smarcel
569130803Smarcel
570130803Smarcel	if ((flags & LK_INTERLOCK) == 0) {
571130803Smarcel		VI_LOCK(vp);
572130803Smarcel		ap->a_flags = flags |= LK_INTERLOCK;
573130803Smarcel	}
574130803Smarcel	nn = VTONULL(vp);
575130803Smarcel	/*
576130803Smarcel	 * If we're still active we must ask the lower layer to
577130803Smarcel	 * lock as ffs has special lock considerations in it's
578130803Smarcel	 * vop lock.
579130803Smarcel	 */
580130803Smarcel	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
581130803Smarcel		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
582130803Smarcel		VI_UNLOCK(vp);
583130803Smarcel		/*
584130803Smarcel		 * We have to hold the vnode here to solve a potential
585130803Smarcel		 * reclaim race.  If we're forcibly vgone'd while we
586130803Smarcel		 * still have refs, a thread could be sleeping inside
587130803Smarcel		 * the lowervp's vop_lock routine.  When we vgone we will
588130803Smarcel		 * drop our last ref to the lowervp, which would allow it
589130803Smarcel		 * to be reclaimed.  The lowervp could then be recycled,
590130803Smarcel		 * in which case it is not legal to be sleeping in it's VOP.
591130803Smarcel		 * We prevent it from being recycled by holding the vnode
592130803Smarcel		 * here.
593130803Smarcel		 */
594130803Smarcel		vholdl(lvp);
595130803Smarcel		error = VOP_LOCK(lvp, flags);
596130803Smarcel
597130803Smarcel		/*
598130803Smarcel		 * We might have slept to get the lock and someone might have
599130803Smarcel		 * clean our vnode already, switching vnode lock from one in
600130803Smarcel		 * lowervp to v_lock in our own vnode structure.  Handle this
601130803Smarcel		 * case by reacquiring correct lock in requested mode.
602130803Smarcel		 */
603130803Smarcel		if (VTONULL(vp) == NULL && error == 0) {
604130803Smarcel			ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
605130803Smarcel			switch (flags & LK_TYPE_MASK) {
606130803Smarcel			case LK_SHARED:
607130803Smarcel				ap->a_flags |= LK_SHARED;
608130803Smarcel				break;
609130803Smarcel			case LK_UPGRADE:
610130803Smarcel			case LK_EXCLUSIVE:
611130803Smarcel				ap->a_flags |= LK_EXCLUSIVE;
612130803Smarcel				break;
613130803Smarcel			default:
614130803Smarcel				panic("Unsupported lock request %d\n",
615130803Smarcel				    ap->a_flags);
616130803Smarcel			}
617130803Smarcel			VOP_UNLOCK(lvp, 0);
618130803Smarcel			error = vop_stdlock(ap);
619130803Smarcel		}
620130803Smarcel		vdrop(lvp);
621130803Smarcel	} else
622130803Smarcel		error = vop_stdlock(ap);
623130803Smarcel
624130803Smarcel	return (error);
625130803Smarcel}
626130803Smarcel
627130803Smarcel/*
628130803Smarcel * We need to process our own vnode unlock and then clear the
629130803Smarcel * interlock flag as it applies only to our vnode, not the
630130803Smarcel * vnodes below us on the stack.
631130803Smarcel */
632130803Smarcelstatic int
633130803Smarcelnull_unlock(struct vop_unlock_args *ap)
634130803Smarcel{
635130803Smarcel	struct vnode *vp = ap->a_vp;
636130803Smarcel	int flags = ap->a_flags;
637130803Smarcel	int mtxlkflag = 0;
638130803Smarcel	struct null_node *nn;
639130803Smarcel	struct vnode *lvp;
640130803Smarcel	int error;
641130803Smarcel
642130803Smarcel	if ((flags & LK_INTERLOCK) != 0)
643130803Smarcel		mtxlkflag = 1;
644130803Smarcel	else if (mtx_owned(VI_MTX(vp)) == 0) {
645130803Smarcel		VI_LOCK(vp);
646130803Smarcel		mtxlkflag = 2;
647130803Smarcel	}
648130803Smarcel	nn = VTONULL(vp);
649130803Smarcel	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
650130803Smarcel		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
651130803Smarcel		flags |= LK_INTERLOCK;
652130803Smarcel		vholdl(lvp);
653130803Smarcel		VI_UNLOCK(vp);
654130803Smarcel		error = VOP_UNLOCK(lvp, flags);
655130803Smarcel		vdrop(lvp);
656130803Smarcel		if (mtxlkflag == 0)
657130803Smarcel			VI_LOCK(vp);
658130803Smarcel	} else {
659130803Smarcel		if (mtxlkflag == 2)
660130803Smarcel			VI_UNLOCK(vp);
661130803Smarcel		error = vop_stdunlock(ap);
662130803Smarcel	}
663130803Smarcel
664130803Smarcel	return (error);
665130803Smarcel}
666130803Smarcel
667130803Smarcel/*
668130803Smarcel * There is no way to tell that someone issued remove/rmdir operation
669130803Smarcel * on the underlying filesystem. For now we just have to release lowervp
670130803Smarcel * as soon as possible.
671130803Smarcel *
672130803Smarcel * Note, we can't release any resources nor remove vnode from hash before
673130803Smarcel * appropriate VXLOCK stuff is done because other process can find this
674130803Smarcel * vnode in hash during inactivation and may be sitting in vget() and waiting
675130803Smarcel * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM.
676130803Smarcel */
677130803Smarcelstatic int
678130803Smarcelnull_inactive(struct vop_inactive_args *ap)
679130803Smarcel{
680130803Smarcel	struct vnode *vp = ap->a_vp;
681130803Smarcel	struct thread *td = ap->a_td;
682130803Smarcel
683130803Smarcel	vp->v_object = NULL;
684130803Smarcel
685130803Smarcel	/*
686130803Smarcel	 * If this is the last reference, then free up the vnode
687130803Smarcel	 * so as not to tie up the lower vnodes.
688130803Smarcel	 */
689130803Smarcel	vrecycle(vp, td);
690130803Smarcel
691130803Smarcel	return (0);
692130803Smarcel}
693130803Smarcel
694130803Smarcel/*
695130803Smarcel * Now, the VXLOCK is in force and we're free to destroy the null vnode.
696130803Smarcel */
697130803Smarcelstatic int
698130803Smarcelnull_reclaim(struct vop_reclaim_args *ap)
699130803Smarcel{
700130803Smarcel	struct vnode *vp;
701130803Smarcel	struct null_node *xp;
702130803Smarcel	struct vnode *lowervp;
703130803Smarcel
704130803Smarcel	vp = ap->a_vp;
705130803Smarcel	xp = VTONULL(vp);
706130803Smarcel	lowervp = xp->null_lowervp;
707130803Smarcel
708130803Smarcel	KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
709130803Smarcel	    ("Reclaiming inclomplete null vnode %p", vp));
710130803Smarcel
711130803Smarcel	null_hashrem(xp);
712130803Smarcel	/*
713130803Smarcel	 * Use the interlock to protect the clearing of v_data to
714130803Smarcel	 * prevent faults in null_lock().
715130803Smarcel	 */
716130803Smarcel	lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
717130803Smarcel	VI_LOCK(vp);
718130803Smarcel	vp->v_data = NULL;
719130803Smarcel	vp->v_object = NULL;
720130803Smarcel	vp->v_vnlock = &vp->v_lock;
721130803Smarcel	VI_UNLOCK(vp);
722130803Smarcel	vput(lowervp);
723130803Smarcel	free(xp, M_NULLFSNODE);
724130803Smarcel
725130803Smarcel	return (0);
726130803Smarcel}
727130803Smarcel
728130803Smarcelstatic int
729130803Smarcelnull_print(struct vop_print_args *ap)
730130803Smarcel{
731130803Smarcel	struct vnode *vp = ap->a_vp;
732130803Smarcel
733130803Smarcel	printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
734130803Smarcel	return (0);
735130803Smarcel}
736130803Smarcel
737130803Smarcel/* ARGSUSED */
738130803Smarcelstatic int
739130803Smarcelnull_getwritemount(struct vop_getwritemount_args *ap)
740130803Smarcel{
741130803Smarcel	struct null_node *xp;
742130803Smarcel	struct vnode *lowervp;
743130803Smarcel	struct vnode *vp;
744130803Smarcel
745130803Smarcel	vp = ap->a_vp;
746130803Smarcel	VI_LOCK(vp);
747130803Smarcel	xp = VTONULL(vp);
748130803Smarcel	if (xp && (lowervp = xp->null_lowervp)) {
749130803Smarcel		VI_LOCK_FLAGS(lowervp, MTX_DUPOK);
750130803Smarcel		VI_UNLOCK(vp);
751130803Smarcel		vholdl(lowervp);
752130803Smarcel		VI_UNLOCK(lowervp);
753130803Smarcel		VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
754130803Smarcel		vdrop(lowervp);
755130803Smarcel	} else {
756130803Smarcel		VI_UNLOCK(vp);
757130803Smarcel		*(ap->a_mpp) = NULL;
758130803Smarcel	}
759130803Smarcel	return (0);
760130803Smarcel}
761130803Smarcel
762130803Smarcelstatic int
763130803Smarcelnull_vptofh(struct vop_vptofh_args *ap)
764130803Smarcel{
765130803Smarcel	struct vnode *lvp;
766130803Smarcel
767130803Smarcel	lvp = NULLVPTOLOWERVP(ap->a_vp);
768130803Smarcel	return VOP_VPTOFH(lvp, ap->a_fhp);
769130803Smarcel}
770130803Smarcel
771130803Smarcelstatic int
772130803Smarcelnull_vptocnp(struct vop_vptocnp_args *ap)
773130803Smarcel{
774130803Smarcel	struct vnode *vp = ap->a_vp;
775130803Smarcel	struct vnode **dvp = ap->a_vpp;
776130803Smarcel	struct vnode *lvp, *ldvp;
777130803Smarcel	struct ucred *cred = ap->a_cred;
778130803Smarcel	int error, locked;
779130803Smarcel
780130803Smarcel	if (vp->v_type == VDIR)
781130803Smarcel		return (vop_stdvptocnp(ap));
782130803Smarcel
783130803Smarcel	locked = VOP_ISLOCKED(vp);
784130803Smarcel	lvp = NULLVPTOLOWERVP(vp);
785130803Smarcel	vhold(lvp);
786130803Smarcel	VOP_UNLOCK(vp, 0); /* vp is held by vn_vptocnp_locked that called us */
787130803Smarcel	ldvp = lvp;
788130803Smarcel	vref(lvp);
789130803Smarcel	error = vn_vptocnp(&ldvp, cred, ap->a_buf, ap->a_buflen);
790130803Smarcel	vdrop(lvp);
791130803Smarcel	if (error != 0) {
792130803Smarcel		vn_lock(vp, locked | LK_RETRY);
793130803Smarcel		return (ENOENT);
794130803Smarcel	}
795130803Smarcel
796130803Smarcel	/*
797130803Smarcel	 * Exclusive lock is required by insmntque1 call in
798130803Smarcel	 * null_nodeget()
799130803Smarcel	 */
800130803Smarcel	error = vn_lock(ldvp, LK_EXCLUSIVE);
801130803Smarcel	if (error != 0) {
802130803Smarcel		vrele(ldvp);
803130803Smarcel		vn_lock(vp, locked | LK_RETRY);
804130803Smarcel		return (ENOENT);
805130803Smarcel	}
806130803Smarcel	vref(ldvp);
807130803Smarcel	error = null_nodeget(vp->v_mount, ldvp, dvp);
808130803Smarcel	if (error == 0) {
809130803Smarcel#ifdef DIAGNOSTIC
810130803Smarcel		NULLVPTOLOWERVP(*dvp);
811130803Smarcel#endif
812130803Smarcel		VOP_UNLOCK(*dvp, 0); /* keep reference on *dvp */
813130803Smarcel	}
814130803Smarcel	vn_lock(vp, locked | LK_RETRY);
815130803Smarcel	return (error);
816130803Smarcel}
817130803Smarcel
818130803Smarcel/*
819130803Smarcel * Global vfs data structures
820130803Smarcel */
821130803Smarcelstruct vop_vector null_vnodeops = {
822130803Smarcel	.vop_bypass =		null_bypass,
823130803Smarcel	.vop_access =		null_access,
824130803Smarcel	.vop_accessx =		null_accessx,
825130803Smarcel	.vop_advlockpurge =	vop_stdadvlockpurge,
826130803Smarcel	.vop_bmap =		VOP_EOPNOTSUPP,
827130803Smarcel	.vop_getattr =		null_getattr,
828130803Smarcel	.vop_getwritemount =	null_getwritemount,
829130803Smarcel	.vop_inactive =		null_inactive,
830130803Smarcel	.vop_islocked =		vop_stdislocked,
831130803Smarcel	.vop_lock1 =		null_lock,
832130803Smarcel	.vop_lookup =		null_lookup,
833130803Smarcel	.vop_open =		null_open,
834130803Smarcel	.vop_print =		null_print,
835130803Smarcel	.vop_reclaim =		null_reclaim,
836130803Smarcel	.vop_remove =		null_remove,
837130803Smarcel	.vop_rename =		null_rename,
838130803Smarcel	.vop_setattr =		null_setattr,
839130803Smarcel	.vop_strategy =		VOP_EOPNOTSUPP,
840130803Smarcel	.vop_unlock =		null_unlock,
841130803Smarcel	.vop_vptocnp =		null_vptocnp,
842130803Smarcel	.vop_vptofh =		null_vptofh,
843130803Smarcel};
844130803Smarcel