null_vnops.c revision 116469
1/*
2 * Copyright (c) 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * John Heidemann of the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
37 *
38 * Ancestors:
39 *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
40 *	...and...
41 *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
42 *
43 * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 116469 2003-06-17 08:52:45Z tjr $
44 */
45
46/*
47 * Null Layer
48 *
49 * (See mount_nullfs(8) for more information.)
50 *
51 * The null layer duplicates a portion of the filesystem
52 * name space under a new name.  In this respect, it is
53 * similar to the loopback filesystem.  It differs from
54 * the loopback fs in two respects:  it is implemented using
55 * a stackable layers techniques, and its "null-node"s stack above
56 * all lower-layer vnodes, not just over directory vnodes.
57 *
58 * The null layer has two purposes.  First, it serves as a demonstration
59 * of layering by proving a layer which does nothing.  (It actually
60 * does everything the loopback filesystem does, which is slightly
61 * more than nothing.)  Second, the null layer can serve as a prototype
62 * layer.  Since it provides all necessary layer framework,
63 * new filesystem layers can be created very easily be starting
64 * with a null layer.
65 *
66 * The remainder of this man page examines the null layer as a basis
67 * for constructing new layers.
68 *
69 *
70 * INSTANTIATING NEW NULL LAYERS
71 *
72 * New null layers are created with mount_nullfs(8).
73 * Mount_nullfs(8) takes two arguments, the pathname
74 * of the lower vfs (target-pn) and the pathname where the null
75 * layer will appear in the namespace (alias-pn).  After
76 * the null layer is put into place, the contents
77 * of target-pn subtree will be aliased under alias-pn.
78 *
79 *
80 * OPERATION OF A NULL LAYER
81 *
82 * The null layer is the minimum filesystem layer,
83 * simply bypassing all possible operations to the lower layer
84 * for processing there.  The majority of its activity centers
85 * on the bypass routine, through which nearly all vnode operations
86 * pass.
87 *
88 * The bypass routine accepts arbitrary vnode operations for
89 * handling by the lower layer.  It begins by examing vnode
90 * operation arguments and replacing any null-nodes by their
91 * lower-layer equivlants.  It then invokes the operation
92 * on the lower layer.  Finally, it replaces the null-nodes
93 * in the arguments and, if a vnode is return by the operation,
94 * stacks a null-node on top of the returned vnode.
95 *
96 * Although bypass handles most operations, vop_getattr, vop_lock,
97 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
98 * bypassed. Vop_getattr must change the fsid being returned.
99 * Vop_lock and vop_unlock must handle any locking for the
100 * current vnode as well as pass the lock request down.
101 * Vop_inactive and vop_reclaim are not bypassed so that
102 * they can handle freeing null-layer specific data. Vop_print
103 * is not bypassed to avoid excessive debugging information.
104 * Also, certain vnode operations change the locking state within
105 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
106 * and symlink). Ideally these operations should not change the
107 * lock state, but should be changed to let the caller of the
108 * function unlock them. Otherwise all intermediate vnode layers
109 * (such as union, umapfs, etc) must catch these functions to do
110 * the necessary locking at their layer.
111 *
112 *
113 * INSTANTIATING VNODE STACKS
114 *
115 * Mounting associates the null layer with a lower layer,
116 * effect stacking two VFSes.  Vnode stacks are instead
117 * created on demand as files are accessed.
118 *
119 * The initial mount creates a single vnode stack for the
120 * root of the new null layer.  All other vnode stacks
121 * are created as a result of vnode operations on
122 * this or other null vnode stacks.
123 *
124 * New vnode stacks come into existance as a result of
125 * an operation which returns a vnode.
126 * The bypass routine stacks a null-node above the new
127 * vnode before returning it to the caller.
128 *
129 * For example, imagine mounting a null layer with
130 * "mount_nullfs /usr/include /dev/layer/null".
131 * Changing directory to /dev/layer/null will assign
132 * the root null-node (which was created when the null layer was mounted).
133 * Now consider opening "sys".  A vop_lookup would be
134 * done on the root null-node.  This operation would bypass through
135 * to the lower layer which would return a vnode representing
136 * the UFS "sys".  Null_bypass then builds a null-node
137 * aliasing the UFS "sys" and returns this to the caller.
138 * Later operations on the null-node "sys" will repeat this
139 * process when constructing other vnode stacks.
140 *
141 *
142 * CREATING OTHER FILE SYSTEM LAYERS
143 *
144 * One of the easiest ways to construct new filesystem layers is to make
145 * a copy of the null layer, rename all files and variables, and
146 * then begin modifing the copy.  Sed can be used to easily rename
147 * all variables.
148 *
149 * The umap layer is an example of a layer descended from the
150 * null layer.
151 *
152 *
153 * INVOKING OPERATIONS ON LOWER LAYERS
154 *
155 * There are two techniques to invoke operations on a lower layer
156 * when the operation cannot be completely bypassed.  Each method
157 * is appropriate in different situations.  In both cases,
158 * it is the responsibility of the aliasing layer to make
159 * the operation arguments "correct" for the lower layer
160 * by mapping a vnode arguments to the lower layer.
161 *
162 * The first approach is to call the aliasing layer's bypass routine.
163 * This method is most suitable when you wish to invoke the operation
164 * currently being handled on the lower layer.  It has the advantage
165 * that the bypass routine already must do argument mapping.
166 * An example of this is null_getattrs in the null layer.
167 *
168 * A second approach is to directly invoke vnode operations on
169 * the lower layer with the VOP_OPERATIONNAME interface.
170 * The advantage of this method is that it is easy to invoke
171 * arbitrary operations on the lower layer.  The disadvantage
172 * is that vnode arguments must be manualy mapped.
173 *
174 */
175
176#include <sys/param.h>
177#include <sys/systm.h>
178#include <sys/conf.h>
179#include <sys/kernel.h>
180#include <sys/lock.h>
181#include <sys/malloc.h>
182#include <sys/mount.h>
183#include <sys/mutex.h>
184#include <sys/namei.h>
185#include <sys/sysctl.h>
186#include <sys/vnode.h>
187
188#include <fs/nullfs/null.h>
189
190#include <vm/vm.h>
191#include <vm/vm_extern.h>
192#include <vm/vm_object.h>
193#include <vm/vnode_pager.h>
194
195static int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
196SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
197	&null_bug_bypass, 0, "");
198
199static int	null_access(struct vop_access_args *ap);
200static int	null_createvobject(struct vop_createvobject_args *ap);
201static int	null_destroyvobject(struct vop_destroyvobject_args *ap);
202static int	null_getattr(struct vop_getattr_args *ap);
203static int	null_getvobject(struct vop_getvobject_args *ap);
204static int	null_inactive(struct vop_inactive_args *ap);
205static int	null_islocked(struct vop_islocked_args *ap);
206static int	null_lock(struct vop_lock_args *ap);
207static int	null_lookup(struct vop_lookup_args *ap);
208static int	null_open(struct vop_open_args *ap);
209static int	null_print(struct vop_print_args *ap);
210static int	null_reclaim(struct vop_reclaim_args *ap);
211static int	null_rename(struct vop_rename_args *ap);
212static int	null_setattr(struct vop_setattr_args *ap);
213static int	null_unlock(struct vop_unlock_args *ap);
214
215/*
216 * This is the 10-Apr-92 bypass routine.
217 *    This version has been optimized for speed, throwing away some
218 * safety checks.  It should still always work, but it's not as
219 * robust to programmer errors.
220 *
221 * In general, we map all vnodes going down and unmap them on the way back.
222 * As an exception to this, vnodes can be marked "unmapped" by setting
223 * the Nth bit in operation's vdesc_flags.
224 *
225 * Also, some BSD vnode operations have the side effect of vrele'ing
226 * their arguments.  With stacking, the reference counts are held
227 * by the upper node, not the lower one, so we must handle these
228 * side-effects here.  This is not of concern in Sun-derived systems
229 * since there are no such side-effects.
230 *
231 * This makes the following assumptions:
232 * - only one returned vpp
233 * - no INOUT vpp's (Sun's vop_open has one of these)
234 * - the vnode operation vector of the first vnode should be used
235 *   to determine what implementation of the op should be invoked
236 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
237 *   problems on rmdir'ing mount points and renaming?)
238 */
239int
240null_bypass(ap)
241	struct vop_generic_args /* {
242		struct vnodeop_desc *a_desc;
243		<other random data follows, presumably>
244	} */ *ap;
245{
246	register struct vnode **this_vp_p;
247	int error;
248	struct vnode *old_vps[VDESC_MAX_VPS];
249	struct vnode **vps_p[VDESC_MAX_VPS];
250	struct vnode ***vppp;
251	struct vnodeop_desc *descp = ap->a_desc;
252	int reles, i;
253
254	if (null_bug_bypass)
255		printf ("null_bypass: %s\n", descp->vdesc_name);
256
257#ifdef DIAGNOSTIC
258	/*
259	 * We require at least one vp.
260	 */
261	if (descp->vdesc_vp_offsets == NULL ||
262	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
263		panic ("null_bypass: no vp's in map");
264#endif
265
266	/*
267	 * Map the vnodes going in.
268	 * Later, we'll invoke the operation based on
269	 * the first mapped vnode's operation vector.
270	 */
271	reles = descp->vdesc_flags;
272	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
273		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
274			break;   /* bail out at end of list */
275		vps_p[i] = this_vp_p =
276			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
277		/*
278		 * We're not guaranteed that any but the first vnode
279		 * are of our type.  Check for and don't map any
280		 * that aren't.  (We must always map first vp or vclean fails.)
281		 */
282		if (i && (*this_vp_p == NULLVP ||
283		    (*this_vp_p)->v_op != null_vnodeop_p)) {
284			old_vps[i] = NULLVP;
285		} else {
286			old_vps[i] = *this_vp_p;
287			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
288			/*
289			 * XXX - Several operations have the side effect
290			 * of vrele'ing their vp's.  We must account for
291			 * that.  (This should go away in the future.)
292			 */
293			if (reles & VDESC_VP0_WILLRELE)
294				VREF(*this_vp_p);
295		}
296
297	}
298
299	/*
300	 * Call the operation on the lower layer
301	 * with the modified argument structure.
302	 */
303	if (vps_p[0] && *vps_p[0])
304		error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
305	else {
306		printf("null_bypass: no map for %s\n", descp->vdesc_name);
307		error = EINVAL;
308	}
309
310	/*
311	 * Maintain the illusion of call-by-value
312	 * by restoring vnodes in the argument structure
313	 * to their original value.
314	 */
315	reles = descp->vdesc_flags;
316	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
317		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
318			break;   /* bail out at end of list */
319		if (old_vps[i]) {
320			*(vps_p[i]) = old_vps[i];
321#if 0
322			if (reles & VDESC_VP0_WILLUNLOCK)
323				VOP_UNLOCK(*(vps_p[i]), LK_THISLAYER, curthread);
324#endif
325			if (reles & VDESC_VP0_WILLRELE)
326				vrele(*(vps_p[i]));
327		}
328	}
329
330	/*
331	 * Map the possible out-going vpp
332	 * (Assumes that the lower layer always returns
333	 * a VREF'ed vpp unless it gets an error.)
334	 */
335	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
336	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
337	    !error) {
338		/*
339		 * XXX - even though some ops have vpp returned vp's,
340		 * several ops actually vrele this before returning.
341		 * We must avoid these ops.
342		 * (This should go away when these ops are regularized.)
343		 */
344		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
345			goto out;
346		vppp = VOPARG_OFFSETTO(struct vnode***,
347				 descp->vdesc_vpp_offset,ap);
348		if (*vppp)
349			error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
350	}
351
352 out:
353	return (error);
354}
355
356/*
357 * We have to carry on the locking protocol on the null layer vnodes
358 * as we progress through the tree. We also have to enforce read-only
359 * if this layer is mounted read-only.
360 */
361static int
362null_lookup(ap)
363	struct vop_lookup_args /* {
364		struct vnode * a_dvp;
365		struct vnode ** a_vpp;
366		struct componentname * a_cnp;
367	} */ *ap;
368{
369	struct componentname *cnp = ap->a_cnp;
370	struct vnode *dvp = ap->a_dvp;
371	struct thread *td = cnp->cn_thread;
372	int flags = cnp->cn_flags;
373	struct vnode *vp, *ldvp, *lvp;
374	int error;
375
376	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
377	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
378		return (EROFS);
379	/*
380	 * Although it is possible to call null_bypass(), we'll do
381	 * a direct call to reduce overhead
382	 */
383	ldvp = NULLVPTOLOWERVP(dvp);
384	vp = lvp = NULL;
385	error = VOP_LOOKUP(ldvp, &lvp, cnp);
386	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
387	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
388	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
389		error = EROFS;
390
391	/*
392	 * Rely only on the PDIRUNLOCK flag which should be carefully
393	 * tracked by underlying filesystem.
394	 */
395	if (cnp->cn_flags & PDIRUNLOCK)
396		VOP_UNLOCK(dvp, LK_THISLAYER, td);
397	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
398		if (ldvp == lvp) {
399			*ap->a_vpp = dvp;
400			VREF(dvp);
401			vrele(lvp);
402		} else {
403			error = null_nodeget(dvp->v_mount, lvp, &vp);
404			if (error) {
405				/* XXX Cleanup needed... */
406				panic("null_nodeget failed");
407			}
408			*ap->a_vpp = vp;
409		}
410	}
411	return (error);
412}
413
414/*
415 * Setattr call. Disallow write attempts if the layer is mounted read-only.
416 */
417static int
418null_setattr(ap)
419	struct vop_setattr_args /* {
420		struct vnodeop_desc *a_desc;
421		struct vnode *a_vp;
422		struct vattr *a_vap;
423		struct ucred *a_cred;
424		struct thread *a_td;
425	} */ *ap;
426{
427	struct vnode *vp = ap->a_vp;
428	struct vattr *vap = ap->a_vap;
429
430  	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
431	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
432	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
433	    (vp->v_mount->mnt_flag & MNT_RDONLY))
434		return (EROFS);
435	if (vap->va_size != VNOVAL) {
436 		switch (vp->v_type) {
437 		case VDIR:
438 			return (EISDIR);
439 		case VCHR:
440 		case VBLK:
441 		case VSOCK:
442 		case VFIFO:
443			if (vap->va_flags != VNOVAL)
444				return (EOPNOTSUPP);
445			return (0);
446		case VREG:
447		case VLNK:
448 		default:
449			/*
450			 * Disallow write attempts if the filesystem is
451			 * mounted read-only.
452			 */
453			if (vp->v_mount->mnt_flag & MNT_RDONLY)
454				return (EROFS);
455		}
456	}
457
458	return (null_bypass((struct vop_generic_args *)ap));
459}
460
461/*
462 *  We handle getattr only to change the fsid.
463 */
464static int
465null_getattr(ap)
466	struct vop_getattr_args /* {
467		struct vnode *a_vp;
468		struct vattr *a_vap;
469		struct ucred *a_cred;
470		struct thread *a_td;
471	} */ *ap;
472{
473	int error;
474
475	if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
476		return (error);
477
478	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
479	return (0);
480}
481
482/*
483 * Handle to disallow write access if mounted read-only.
484 */
485static int
486null_access(ap)
487	struct vop_access_args /* {
488		struct vnode *a_vp;
489		int  a_mode;
490		struct ucred *a_cred;
491		struct thread *a_td;
492	} */ *ap;
493{
494	struct vnode *vp = ap->a_vp;
495	mode_t mode = ap->a_mode;
496
497	/*
498	 * Disallow write attempts on read-only layers;
499	 * unless the file is a socket, fifo, or a block or
500	 * character device resident on the filesystem.
501	 */
502	if (mode & VWRITE) {
503		switch (vp->v_type) {
504		case VDIR:
505		case VLNK:
506		case VREG:
507			if (vp->v_mount->mnt_flag & MNT_RDONLY)
508				return (EROFS);
509			break;
510		default:
511			break;
512		}
513	}
514	return (null_bypass((struct vop_generic_args *)ap));
515}
516
517/*
518 * We must handle open to be able to catch MNT_NODEV and friends.
519 */
520static int
521null_open(ap)
522	struct vop_open_args /* {
523		struct vnode *a_vp;
524		int  a_mode;
525		struct ucred *a_cred;
526		struct thread *a_td;
527	} */ *ap;
528{
529	struct vnode *vp = ap->a_vp;
530	struct vnode *lvp = NULLVPTOLOWERVP(ap->a_vp);
531
532	if ((vp->v_mount->mnt_flag & MNT_NODEV) &&
533	    (lvp->v_type == VBLK || lvp->v_type == VCHR))
534		return ENXIO;
535
536	return (null_bypass((struct vop_generic_args *)ap));
537}
538
539/*
540 * We handle this to eliminate null FS to lower FS
541 * file moving. Don't know why we don't allow this,
542 * possibly we should.
543 */
544static int
545null_rename(ap)
546	struct vop_rename_args /* {
547		struct vnode *a_fdvp;
548		struct vnode *a_fvp;
549		struct componentname *a_fcnp;
550		struct vnode *a_tdvp;
551		struct vnode *a_tvp;
552		struct componentname *a_tcnp;
553	} */ *ap;
554{
555	struct vnode *tdvp = ap->a_tdvp;
556	struct vnode *fvp = ap->a_fvp;
557	struct vnode *fdvp = ap->a_fdvp;
558	struct vnode *tvp = ap->a_tvp;
559
560	/* Check for cross-device rename. */
561	if ((fvp->v_mount != tdvp->v_mount) ||
562	    (tvp && (fvp->v_mount != tvp->v_mount))) {
563		if (tdvp == tvp)
564			vrele(tdvp);
565		else
566			vput(tdvp);
567		if (tvp)
568			vput(tvp);
569		vrele(fdvp);
570		vrele(fvp);
571		return (EXDEV);
572	}
573
574	return (null_bypass((struct vop_generic_args *)ap));
575}
576
577/*
578 * We need to process our own vnode lock and then clear the
579 * interlock flag as it applies only to our vnode, not the
580 * vnodes below us on the stack.
581 */
582static int
583null_lock(ap)
584	struct vop_lock_args /* {
585		struct vnode *a_vp;
586		int a_flags;
587		struct thread *a_td;
588	} */ *ap;
589{
590	struct vnode *vp = ap->a_vp;
591	int flags = ap->a_flags;
592	struct thread *td = ap->a_td;
593	struct vnode *lvp;
594	int error;
595	struct null_node *nn;
596
597	if (flags & LK_THISLAYER) {
598		if (vp->v_vnlock != NULL) {
599			/* lock is shared across layers */
600			if (flags & LK_INTERLOCK)
601				mtx_unlock(&vp->v_interlock);
602			return 0;
603		}
604		error = lockmgr(&vp->v_lock, flags & ~LK_THISLAYER,
605		    &vp->v_interlock, td);
606		return (error);
607	}
608
609	if (vp->v_vnlock != NULL) {
610		/*
611		 * The lower level has exported a struct lock to us. Use
612		 * it so that all vnodes in the stack lock and unlock
613		 * simultaneously. Note: we don't DRAIN the lock as DRAIN
614		 * decommissions the lock - just because our vnode is
615		 * going away doesn't mean the struct lock below us is.
616		 * LK_EXCLUSIVE is fine.
617		 */
618		if ((flags & LK_INTERLOCK) == 0) {
619			VI_LOCK(vp);
620			flags |= LK_INTERLOCK;
621		}
622		nn = VTONULL(vp);
623		if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
624			NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n");
625			/*
626			 * Emulate lock draining by waiting for all other
627			 * pending locks to complete.  Afterwards the
628			 * lockmgr call might block, but no other threads
629			 * will attempt to use this nullfs vnode due to the
630			 * VI_XLOCK flag.
631			 */
632			while (nn->null_pending_locks > 0) {
633				nn->null_drain_wakeup = 1;
634				msleep(&nn->null_pending_locks,
635				       VI_MTX(vp),
636				       PVFS,
637				       "nuldr", 0);
638			}
639			error = lockmgr(vp->v_vnlock,
640					(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
641					VI_MTX(vp), td);
642			return error;
643		}
644		nn->null_pending_locks++;
645		error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td);
646		VI_LOCK(vp);
647		/*
648		 * If we're called from vrele then v_usecount can have been 0
649		 * and another process might have initiated a recycle
650		 * operation.  When that happens, just back out.
651		 */
652		if (error == 0 && (vp->v_iflag & VI_XLOCK) != 0 &&
653		    td != vp->v_vxproc) {
654			lockmgr(vp->v_vnlock,
655				(flags & ~LK_TYPE_MASK) | LK_RELEASE,
656				VI_MTX(vp), td);
657			VI_LOCK(vp);
658			error = ENOENT;
659		}
660		nn->null_pending_locks--;
661		/*
662		 * Wakeup the process draining the vnode after all
663		 * pending lock attempts has been failed.
664		 */
665		if (nn->null_pending_locks == 0 &&
666		    nn->null_drain_wakeup != 0) {
667			nn->null_drain_wakeup = 0;
668			wakeup(&nn->null_pending_locks);
669		}
670		if (error == ENOENT && (vp->v_iflag & VI_XLOCK) != 0 &&
671		    vp->v_vxproc != curthread) {
672			vp->v_iflag |= VI_XWANT;
673			msleep(vp, VI_MTX(vp), PINOD, "nulbo", 0);
674		}
675		VI_UNLOCK(vp);
676		return error;
677	} else {
678		/*
679		 * To prevent race conditions involving doing a lookup
680		 * on "..", we have to lock the lower node, then lock our
681		 * node. Most of the time it won't matter that we lock our
682		 * node (as any locking would need the lower one locked
683		 * first). But we can LK_DRAIN the upper lock as a step
684		 * towards decomissioning it.
685		 */
686		lvp = NULLVPTOLOWERVP(vp);
687		if (lvp == NULL)
688			return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, td));
689		if (flags & LK_INTERLOCK) {
690			mtx_unlock(&vp->v_interlock);
691			flags &= ~LK_INTERLOCK;
692		}
693		if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
694			error = VOP_LOCK(lvp,
695				(flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, td);
696		} else
697			error = VOP_LOCK(lvp, flags, td);
698		if (error)
699			return (error);
700		error = lockmgr(&vp->v_lock, flags, &vp->v_interlock, td);
701		if (error)
702			VOP_UNLOCK(lvp, 0, td);
703		return (error);
704	}
705}
706
707/*
708 * We need to process our own vnode unlock and then clear the
709 * interlock flag as it applies only to our vnode, not the
710 * vnodes below us on the stack.
711 */
712static int
713null_unlock(ap)
714	struct vop_unlock_args /* {
715		struct vnode *a_vp;
716		int a_flags;
717		struct thread *a_td;
718	} */ *ap;
719{
720	struct vnode *vp = ap->a_vp;
721	int flags = ap->a_flags;
722	struct thread *td = ap->a_td;
723	struct vnode *lvp;
724
725	if (vp->v_vnlock != NULL) {
726		if (flags & LK_THISLAYER)
727			return 0;	/* the lock is shared across layers */
728		flags &= ~LK_THISLAYER;
729		return (lockmgr(vp->v_vnlock, flags | LK_RELEASE,
730			&vp->v_interlock, td));
731	}
732	lvp = NULLVPTOLOWERVP(vp);
733	if (lvp == NULL)
734		return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td));
735	if ((flags & LK_THISLAYER) == 0) {
736		if (flags & LK_INTERLOCK) {
737			mtx_unlock(&vp->v_interlock);
738			flags &= ~LK_INTERLOCK;
739		}
740		VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, td);
741	} else
742		flags &= ~LK_THISLAYER;
743	return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td));
744}
745
746static int
747null_islocked(ap)
748	struct vop_islocked_args /* {
749		struct vnode *a_vp;
750		struct thread *a_td;
751	} */ *ap;
752{
753	struct vnode *vp = ap->a_vp;
754	struct thread *td = ap->a_td;
755
756	if (vp->v_vnlock != NULL)
757		return (lockstatus(vp->v_vnlock, td));
758	return (lockstatus(&vp->v_lock, td));
759}
760
761/*
762 * There is no way to tell that someone issued remove/rmdir operation
763 * on the underlying filesystem. For now we just have to release lowevrp
764 * as soon as possible.
765 *
766 * Note, we can't release any resources nor remove vnode from hash before
767 * appropriate VXLOCK stuff is is done because other process can find this
768 * vnode in hash during inactivation and may be sitting in vget() and waiting
769 * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM.
770 */
771static int
772null_inactive(ap)
773	struct vop_inactive_args /* {
774		struct vnode *a_vp;
775		struct thread *a_td;
776	} */ *ap;
777{
778	struct vnode *vp = ap->a_vp;
779	struct thread *td = ap->a_td;
780
781	VOP_UNLOCK(vp, 0, td);
782
783	/*
784	 * If this is the last reference, then free up the vnode
785	 * so as not to tie up the lower vnodes.
786	 */
787	vrecycle(vp, NULL, td);
788
789	return (0);
790}
791
792/*
793 * Now, the VXLOCK is in force and we're free to destroy the null vnode.
794 */
795static int
796null_reclaim(ap)
797	struct vop_reclaim_args /* {
798		struct vnode *a_vp;
799		struct thread *a_td;
800	} */ *ap;
801{
802	struct vnode *vp = ap->a_vp;
803	struct null_node *xp = VTONULL(vp);
804	struct vnode *lowervp = xp->null_lowervp;
805
806	if (lowervp) {
807		null_hashrem(xp);
808
809		vrele(lowervp);
810		vrele(lowervp);
811	}
812
813	vp->v_data = NULL;
814	vp->v_vnlock = &vp->v_lock;
815	FREE(xp, M_NULLFSNODE);
816
817	return (0);
818}
819
820static int
821null_print(ap)
822	struct vop_print_args /* {
823		struct vnode *a_vp;
824	} */ *ap;
825{
826	register struct vnode *vp = ap->a_vp;
827	printf("\tvp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
828	return (0);
829}
830
831/*
832 * Let an underlying filesystem do the work
833 */
834static int
835null_createvobject(ap)
836	struct vop_createvobject_args /* {
837		struct vnode *vp;
838		struct ucred *cred;
839		struct thread *td;
840	} */ *ap;
841{
842	struct vnode *vp = ap->a_vp;
843	struct vnode *lowervp = VTONULL(vp) ? NULLVPTOLOWERVP(vp) : NULL;
844	int error;
845
846	if (vp->v_type == VNON || lowervp == NULL)
847		return 0;
848	error = VOP_CREATEVOBJECT(lowervp, ap->a_cred, ap->a_td);
849	if (error)
850		return (error);
851	vp->v_vflag |= VV_OBJBUF;
852	return (0);
853}
854
855/*
856 * We have nothing to destroy and this operation shouldn't be bypassed.
857 */
858static int
859null_destroyvobject(ap)
860	struct vop_destroyvobject_args /* {
861		struct vnode *vp;
862	} */ *ap;
863{
864	struct vnode *vp = ap->a_vp;
865
866	vp->v_vflag &= ~VV_OBJBUF;
867	return (0);
868}
869
870static int
871null_getvobject(ap)
872	struct vop_getvobject_args /* {
873		struct vnode *vp;
874		struct vm_object **objpp;
875	} */ *ap;
876{
877	struct vnode *lvp = NULLVPTOLOWERVP(ap->a_vp);
878
879	if (lvp == NULL)
880		return EINVAL;
881	return (VOP_GETVOBJECT(lvp, ap->a_objpp));
882}
883
884/*
885 * Global vfs data structures
886 */
887vop_t **null_vnodeop_p;
888static struct vnodeopv_entry_desc null_vnodeop_entries[] = {
889	{ &vop_default_desc,		(vop_t *) null_bypass },
890
891	{ &vop_access_desc,		(vop_t *) null_access },
892	{ &vop_bmap_desc,		(vop_t *) vop_eopnotsupp },
893	{ &vop_createvobject_desc,	(vop_t *) null_createvobject },
894	{ &vop_destroyvobject_desc,	(vop_t *) null_destroyvobject },
895	{ &vop_getattr_desc,		(vop_t *) null_getattr },
896	{ &vop_getvobject_desc,		(vop_t *) null_getvobject },
897	{ &vop_getwritemount_desc,	(vop_t *) vop_stdgetwritemount},
898	{ &vop_inactive_desc,		(vop_t *) null_inactive },
899	{ &vop_islocked_desc,		(vop_t *) null_islocked },
900	{ &vop_lock_desc,		(vop_t *) null_lock },
901	{ &vop_lookup_desc,		(vop_t *) null_lookup },
902	{ &vop_open_desc,		(vop_t *) null_open },
903	{ &vop_print_desc,		(vop_t *) null_print },
904	{ &vop_reclaim_desc,		(vop_t *) null_reclaim },
905	{ &vop_rename_desc,		(vop_t *) null_rename },
906	{ &vop_setattr_desc,		(vop_t *) null_setattr },
907	{ &vop_strategy_desc,		(vop_t *) vop_eopnotsupp },
908	{ &vop_unlock_desc,		(vop_t *) null_unlock },
909	{ NULL, NULL }
910};
911static struct vnodeopv_desc null_vnodeop_opv_desc =
912	{ &null_vnodeop_p, null_vnodeop_entries };
913
914VNODEOP_SET(null_vnodeop_opv_desc);
915