null_vnops.c revision 22521
1/*
2 * Copyright (c) 1992, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * John Heidemann of the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)null_vnops.c	8.6 (Berkeley) 5/27/95
37 *
38 * Ancestors:
39 *	@(#)lofs_vnops.c	1.2 (Berkeley) 6/18/92
40 *	$Id: null_vnops.c,v 1.11.2000.1 1996/09/17 14:32:31 peter Exp $
41 *	...and...
42 *	@(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
43 *
44 * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 22521 1997-02-10 02:22:35Z dyson $
45 */
46
47/*
48 * Null Layer
49 *
50 * (See mount_null(8) for more information.)
51 *
52 * The null layer duplicates a portion of the file system
53 * name space under a new name.  In this respect, it is
54 * similar to the loopback file system.  It differs from
55 * the loopback fs in two respects:  it is implemented using
56 * a stackable layers techniques, and it's "null-node"s stack above
57 * all lower-layer vnodes, not just over directory vnodes.
58 *
59 * The null layer has two purposes.  First, it serves as a demonstration
60 * of layering by proving a layer which does nothing.  (It actually
61 * does everything the loopback file system does, which is slightly
62 * more than nothing.)  Second, the null layer can serve as a prototype
63 * layer.  Since it provides all necessary layer framework,
64 * new file system layers can be created very easily be starting
65 * with a null layer.
66 *
67 * The remainder of this man page examines the null layer as a basis
68 * for constructing new layers.
69 *
70 *
71 * INSTANTIATING NEW NULL LAYERS
72 *
73 * New null layers are created with mount_null(8).
74 * Mount_null(8) takes two arguments, the pathname
75 * of the lower vfs (target-pn) and the pathname where the null
76 * layer will appear in the namespace (alias-pn).  After
77 * the null layer is put into place, the contents
78 * of target-pn subtree will be aliased under alias-pn.
79 *
80 *
81 * OPERATION OF A NULL LAYER
82 *
83 * The null layer is the minimum file system layer,
84 * simply bypassing all possible operations to the lower layer
85 * for processing there.  The majority of its activity centers
86 * on the bypass routine, though which nearly all vnode operations
87 * pass.
88 *
89 * The bypass routine accepts arbitrary vnode operations for
90 * handling by the lower layer.  It begins by examing vnode
91 * operation arguments and replacing any null-nodes by their
92 * lower-layer equivlants.  It then invokes the operation
93 * on the lower layer.  Finally, it replaces the null-nodes
94 * in the arguments and, if a vnode is return by the operation,
95 * stacks a null-node on top of the returned vnode.
96 *
97 * Although bypass handles most operations, vop_getattr, vop_lock,
98 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
99 * bypassed. Vop_getattr must change the fsid being returned.
100 * Vop_lock and vop_unlock must handle any locking for the
101 * current vnode as well as pass the lock request down.
102 * Vop_inactive and vop_reclaim are not bypassed so that
103 * they can handle freeing null-layer specific data. Vop_print
104 * is not bypassed to avoid excessive debugging information.
105 * Also, certain vnode operations change the locking state within
106 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
107 * and symlink). Ideally these operations should not change the
108 * lock state, but should be changed to let the caller of the
109 * function unlock them. Otherwise all intermediate vnode layers
110 * (such as union, umapfs, etc) must catch these functions to do
111 * the necessary locking at their layer.
112 *
113 *
114 * INSTANTIATING VNODE STACKS
115 *
116 * Mounting associates the null layer with a lower layer,
117 * effect stacking two VFSes.  Vnode stacks are instead
118 * created on demand as files are accessed.
119 *
120 * The initial mount creates a single vnode stack for the
121 * root of the new null layer.  All other vnode stacks
122 * are created as a result of vnode operations on
123 * this or other null vnode stacks.
124 *
125 * New vnode stacks come into existance as a result of
126 * an operation which returns a vnode.
127 * The bypass routine stacks a null-node above the new
128 * vnode before returning it to the caller.
129 *
130 * For example, imagine mounting a null layer with
131 * "mount_null /usr/include /dev/layer/null".
132 * Changing directory to /dev/layer/null will assign
133 * the root null-node (which was created when the null layer was mounted).
134 * Now consider opening "sys".  A vop_lookup would be
135 * done on the root null-node.  This operation would bypass through
136 * to the lower layer which would return a vnode representing
137 * the UFS "sys".  Null_bypass then builds a null-node
138 * aliasing the UFS "sys" and returns this to the caller.
139 * Later operations on the null-node "sys" will repeat this
140 * process when constructing other vnode stacks.
141 *
142 *
143 * CREATING OTHER FILE SYSTEM LAYERS
144 *
145 * One of the easiest ways to construct new file system layers is to make
146 * a copy of the null layer, rename all files and variables, and
147 * then begin modifing the copy.  Sed can be used to easily rename
148 * all variables.
149 *
150 * The umap layer is an example of a layer descended from the
151 * null layer.
152 *
153 *
154 * INVOKING OPERATIONS ON LOWER LAYERS
155 *
156 * There are two techniques to invoke operations on a lower layer
157 * when the operation cannot be completely bypassed.  Each method
158 * is appropriate in different situations.  In both cases,
159 * it is the responsibility of the aliasing layer to make
160 * the operation arguments "correct" for the lower layer
161 * by mapping an vnode arguments to the lower layer.
162 *
163 * The first approach is to call the aliasing layer's bypass routine.
164 * This method is most suitable when you wish to invoke the operation
165 * currently being hanldled on the lower layer.  It has the advantage
166 * that the bypass routine already must do argument mapping.
167 * An example of this is null_getattrs in the null layer.
168 *
169 * A second approach is to directly invoked vnode operations on
170 * the lower layer with the VOP_OPERATIONNAME interface.
171 * The advantage of this method is that it is easy to invoke
172 * arbitrary operations on the lower layer.  The disadvantage
173 * is that vnodes arguments must be manualy mapped.
174 *
175 */
176
177#include <sys/param.h>
178#include <sys/systm.h>
179#include <sys/kernel.h>
180#include <sys/sysctl.h>
181#include <sys/proc.h>
182#include <sys/time.h>
183#include <sys/types.h>
184#include <sys/vnode.h>
185#include <sys/mount.h>
186#include <sys/namei.h>
187#include <sys/malloc.h>
188#include <sys/buf.h>
189#include <miscfs/nullfs/null.h>
190
191static int null_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
192SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
193	&null_bug_bypass, 0, "");
194
195int		null_bypass __P((struct vop_generic_args *ap));
196static int	null_bwrite __P((struct vop_bwrite_args *ap));
197static int	null_getattr __P((struct vop_getattr_args *ap));
198static int	null_inactive __P((struct vop_inactive_args *ap));
199static int	null_print __P((struct vop_print_args *ap));
200static int	null_reclaim __P((struct vop_reclaim_args *ap));
201static int	null_strategy __P((struct vop_strategy_args *ap));
202
203/*
204 * This is the 10-Apr-92 bypass routine.
205 *    This version has been optimized for speed, throwing away some
206 * safety checks.  It should still always work, but it's not as
207 * robust to programmer errors.
208 *    Define SAFETY to include some error checking code.
209 *
210 * In general, we map all vnodes going down and unmap them on the way back.
211 * As an exception to this, vnodes can be marked "unmapped" by setting
212 * the Nth bit in operation's vdesc_flags.
213 *
214 * Also, some BSD vnode operations have the side effect of vrele'ing
215 * their arguments.  With stacking, the reference counts are held
216 * by the upper node, not the lower one, so we must handle these
217 * side-effects here.  This is not of concern in Sun-derived systems
218 * since there are no such side-effects.
219 *
220 * This makes the following assumptions:
221 * - only one returned vpp
222 * - no INOUT vpp's (Sun's vop_open has one of these)
223 * - the vnode operation vector of the first vnode should be used
224 *   to determine what implementation of the op should be invoked
225 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
226 *   problems on rmdir'ing mount points and renaming?)
227 */
228int
229null_bypass(ap)
230	struct vop_generic_args /* {
231		struct vnodeop_desc *a_desc;
232		<other random data follows, presumably>
233	} */ *ap;
234{
235	register struct vnode **this_vp_p;
236	int error;
237	struct vnode *old_vps[VDESC_MAX_VPS];
238	struct vnode **vps_p[VDESC_MAX_VPS];
239	struct vnode ***vppp;
240	struct vnodeop_desc *descp = ap->a_desc;
241	int reles, i;
242
243	if (null_bug_bypass)
244		printf ("null_bypass: %s\n", descp->vdesc_name);
245
246#ifdef SAFETY
247	/*
248	 * We require at least one vp.
249	 */
250	if (descp->vdesc_vp_offsets == NULL ||
251	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
252		panic ("null_bypass: no vp's in map.");
253#endif
254
255	/*
256	 * Map the vnodes going in.
257	 * Later, we'll invoke the operation based on
258	 * the first mapped vnode's operation vector.
259	 */
260	reles = descp->vdesc_flags;
261	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
262		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
263			break;   /* bail out at end of list */
264		vps_p[i] = this_vp_p =
265			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
266		/*
267		 * We're not guaranteed that any but the first vnode
268		 * are of our type.  Check for and don't map any
269		 * that aren't.  (We must always map first vp or vclean fails.)
270		 */
271		if (i && (*this_vp_p == NULL ||
272		    (*this_vp_p)->v_op != null_vnodeop_p)) {
273			old_vps[i] = NULL;
274		} else {
275			old_vps[i] = *this_vp_p;
276			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
277			/*
278			 * XXX - Several operations have the side effect
279			 * of vrele'ing their vp's.  We must account for
280			 * that.  (This should go away in the future.)
281			 */
282			if (reles & 1)
283				VREF(*this_vp_p);
284		}
285
286	}
287
288	/*
289	 * Call the operation on the lower layer
290	 * with the modified argument structure.
291	 */
292	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
293
294	/*
295	 * Maintain the illusion of call-by-value
296	 * by restoring vnodes in the argument structure
297	 * to their original value.
298	 */
299	reles = descp->vdesc_flags;
300	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
301		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
302			break;   /* bail out at end of list */
303		if (old_vps[i]) {
304			*(vps_p[i]) = old_vps[i];
305			if (reles & 1)
306				vrele(*(vps_p[i]));
307		}
308	}
309
310	/*
311	 * Map the possible out-going vpp
312	 * (Assumes that the lower layer always returns
313	 * a VREF'ed vpp unless it gets an error.)
314	 */
315	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
316	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
317	    !error) {
318		/*
319		 * XXX - even though some ops have vpp returned vp's,
320		 * several ops actually vrele this before returning.
321		 * We must avoid these ops.
322		 * (This should go away when these ops are regularized.)
323		 */
324		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
325			goto out;
326		vppp = VOPARG_OFFSETTO(struct vnode***,
327				 descp->vdesc_vpp_offset,ap);
328		error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp);
329	}
330
331 out:
332	return (error);
333}
334
335/*
336 * We have to carry on the locking protocol on the null layer vnodes
337 * as we progress through the tree. We also have to enforce read-only
338 * if this layer is mounted read-only.
339 */
340static int
341null_lookup(ap)
342	struct vop_lookup_args /* {
343		struct vnode * a_dvp;
344		struct vnode ** a_vpp;
345		struct componentname * a_cnp;
346	} */ *ap;
347{
348	struct componentname *cnp = ap->a_cnp;
349	struct proc *p = cnp->cn_proc;
350	int flags = cnp->cn_flags;
351	struct vop_lock_args lockargs;
352	struct vop_unlock_args unlockargs;
353	struct vnode *dvp, *vp;
354	int error;
355
356	if ((flags & ISLASTCN) && (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
357	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
358		return (EROFS);
359	error = null_bypass(ap);
360	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
361	    (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
362	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
363		error = EROFS;
364	/*
365	 * We must do the same locking and unlocking at this layer as
366	 * is done in the layers below us. We could figure this out
367	 * based on the error return and the LASTCN, LOCKPARENT, and
368	 * LOCKLEAF flags. However, it is more expidient to just find
369	 * out the state of the lower level vnodes and set ours to the
370	 * same state.
371	 */
372	dvp = ap->a_dvp;
373	vp = *ap->a_vpp;
374	if (dvp == vp)
375		return (error);
376	if (!VOP_ISLOCKED(dvp)) {
377		unlockargs.a_vp = dvp;
378		unlockargs.a_flags = 0;
379		unlockargs.a_p = p;
380		vop_nounlock(&unlockargs);
381	}
382	if (vp != NULL && VOP_ISLOCKED(vp)) {
383		lockargs.a_vp = vp;
384		lockargs.a_flags = LK_SHARED;
385		lockargs.a_p = p;
386		vop_nolock(&lockargs);
387	}
388	return (error);
389}
390
391/*
392 * Setattr call. Disallow write attempts if the layer is mounted read-only.
393 */
394int
395null_setattr(ap)
396	struct vop_setattr_args /* {
397		struct vnodeop_desc *a_desc;
398		struct vnode *a_vp;
399		struct vattr *a_vap;
400		struct ucred *a_cred;
401		struct proc *a_p;
402	} */ *ap;
403{
404	struct vnode *vp = ap->a_vp;
405	struct vattr *vap = ap->a_vap;
406
407  	if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
408	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.ts_sec != VNOVAL ||
409	    vap->va_mtime.ts_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
410	    (vp->v_mount->mnt_flag & MNT_RDONLY))
411		return (EROFS);
412	if (vap->va_size != VNOVAL) {
413 		switch (vp->v_type) {
414 		case VDIR:
415 			return (EISDIR);
416 		case VCHR:
417 		case VBLK:
418 		case VSOCK:
419 		case VFIFO:
420			return (0);
421		case VREG:
422		case VLNK:
423 		default:
424			/*
425			 * Disallow write attempts if the filesystem is
426			 * mounted read-only.
427			 */
428			if (vp->v_mount->mnt_flag & MNT_RDONLY)
429				return (EROFS);
430		}
431	}
432	return (null_bypass(ap));
433}
434
435/*
436 *  We handle getattr only to change the fsid.
437 */
438static int
439null_getattr(ap)
440	struct vop_getattr_args /* {
441		struct vnode *a_vp;
442		struct vattr *a_vap;
443		struct ucred *a_cred;
444		struct proc *a_p;
445	} */ *ap;
446{
447	int error;
448
449	if (error = null_bypass(ap))
450		return (error);
451	/* Requires that arguments be restored. */
452	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
453	return (0);
454}
455
456static int
457null_access(ap)
458	struct vop_access_args /* {
459		struct vnode *a_vp;
460		int  a_mode;
461		struct ucred *a_cred;
462		struct proc *a_p;
463	} */ *ap;
464{
465	struct vnode *vp = ap->a_vp;
466	mode_t mode = ap->a_mode;
467
468	/*
469	 * Disallow write attempts on read-only layers;
470	 * unless the file is a socket, fifo, or a block or
471	 * character device resident on the file system.
472	 */
473	if (mode & VWRITE) {
474		switch (vp->v_type) {
475		case VDIR:
476		case VLNK:
477		case VREG:
478			if (vp->v_mount->mnt_flag & MNT_RDONLY)
479				return (EROFS);
480			break;
481		}
482	}
483	return (null_bypass(ap));
484}
485
486/*
487 * We need to process our own vnode lock and then clear the
488 * interlock flag as it applies only to our vnode, not the
489 * vnodes below us on the stack.
490 */
491int
492null_lock(ap)
493	struct vop_lock_args /* {
494		struct vnode *a_vp;
495		int a_flags;
496		struct proc *a_p;
497	} */ *ap;
498{
499
500	vop_nolock(ap);
501	if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
502		return (0);
503	ap->a_flags &= ~LK_INTERLOCK;
504	return (null_bypass(ap));
505}
506
507/*
508 * We need to process our own vnode unlock and then clear the
509 * interlock flag as it applies only to our vnode, not the
510 * vnodes below us on the stack.
511 */
512int
513null_unlock(ap)
514	struct vop_unlock_args /* {
515		struct vnode *a_vp;
516		int a_flags;
517		struct proc *a_p;
518	} */ *ap;
519{
520	struct vnode *vp = ap->a_vp;
521
522	vop_nounlock(ap);
523	ap->a_flags &= ~LK_INTERLOCK;
524	return (null_bypass(ap));
525}
526
527int
528null_inactive(ap)
529	struct vop_inactive_args /* {
530		struct vnode *a_vp;
531		struct proc *a_p;
532	} */ *ap;
533{
534	/*
535	 * Do nothing (and _don't_ bypass).
536	 * Wait to vrele lowervp until reclaim,
537	 * so that until then our null_node is in the
538	 * cache and reusable.
539	 *
540	 * NEEDSWORK: Someday, consider inactive'ing
541	 * the lowervp and then trying to reactivate it
542	 * with capabilities (v_id)
543	 * like they do in the name lookup cache code.
544	 * That's too much work for now.
545	 */
546	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
547	return (0);
548}
549
550static int
551null_reclaim(ap)
552	struct vop_reclaim_args /* {
553		struct vnode *a_vp;
554		struct proc *a_p;
555	} */ *ap;
556{
557	struct vnode *vp = ap->a_vp;
558	struct null_node *xp = VTONULL(vp);
559	struct vnode *lowervp = xp->null_lowervp;
560
561	/*
562	 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p,
563	 * so we can't call VOPs on ourself.
564	 */
565	/* After this assignment, this node will not be re-used. */
566	xp->null_lowervp = NULL;
567	LIST_REMOVE(xp, null_hash);
568	FREE(vp->v_data, M_TEMP);
569	vp->v_data = NULL;
570	vrele (lowervp);
571	return (0);
572}
573
574static int
575null_print(ap)
576	struct vop_print_args /* {
577		struct vnode *a_vp;
578	} */ *ap;
579{
580	register struct vnode *vp = ap->a_vp;
581	printf ("\ttag VT_NULLFS, vp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
582	return (0);
583}
584
585/*
586 * XXX - vop_strategy must be hand coded because it has no
587 * vnode in its arguments.
588 * This goes away with a merged VM/buffer cache.
589 */
590static int
591null_strategy(ap)
592	struct vop_strategy_args /* {
593		struct buf *a_bp;
594	} */ *ap;
595{
596	struct buf *bp = ap->a_bp;
597	int error;
598	struct vnode *savedvp;
599
600	savedvp = bp->b_vp;
601	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
602
603	error = VOP_STRATEGY(bp);
604
605	bp->b_vp = savedvp;
606
607	return (error);
608}
609
610/*
611 * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
612 * vnode in its arguments.
613 * This goes away with a merged VM/buffer cache.
614 */
615static int
616null_bwrite(ap)
617	struct vop_bwrite_args /* {
618		struct buf *a_bp;
619	} */ *ap;
620{
621	struct buf *bp = ap->a_bp;
622	int error;
623	struct vnode *savedvp;
624
625	savedvp = bp->b_vp;
626	bp->b_vp = NULLVPTOLOWERVP(bp->b_vp);
627
628	error = VOP_BWRITE(bp);
629
630	bp->b_vp = savedvp;
631
632	return (error);
633}
634
635/*
636 * Global vfs data structures
637 */
638vop_t **null_vnodeop_p;
639static struct vnodeopv_entry_desc null_vnodeop_entries[] = {
640	{ &vop_default_desc, (vop_t *)null_bypass },
641
642	{ &vop_lookup_desc, (vop_t *)null_lookup },
643	{ &vop_setattr_desc, (vop_t *)null_setattr },
644	{ &vop_getattr_desc, (vop_t *)null_getattr },
645	{ &vop_access_desc, (vop_t *)null_access },
646	{ &vop_lock_desc, (vop_t *)null_lock },
647	{ &vop_unlock_desc, (vop_t *)null_unlock },
648	{ &vop_inactive_desc, (vop_t *)null_inactive },
649	{ &vop_reclaim_desc, (vop_t *)null_reclaim },
650	{ &vop_print_desc, (vop_t *)null_print },
651
652	{ &vop_strategy_desc, (vop_t *)null_strategy },
653	{ &vop_bwrite_desc, (vop_t *)null_bwrite },
654
655	{ NULL, NULL }
656};
657static struct vnodeopv_desc null_vnodeop_opv_desc =
658	{ &null_vnodeop_p, null_vnodeop_entries };
659
660VNODEOP_SET(null_vnodeop_opv_desc);
661