vfs_default.c revision 137043
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 137043 2004-10-29 10:59:28Z phk $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/conf.h>
43#include <sys/kernel.h>
44#include <sys/limits.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mount.h>
48#include <sys/mutex.h>
49#include <sys/unistd.h>
50#include <sys/vnode.h>
51#include <sys/poll.h>
52
53#include <vm/vm.h>
54#include <vm/vm_object.h>
55#include <vm/vm_extern.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pager.h>
60#include <vm/vnode_pager.h>
61
62static int	vop_nolookup(struct vop_lookup_args *);
63static int	vop_nostrategy(struct vop_strategy_args *);
64
65/*
66 * This vnode table stores what we want to do if the filesystem doesn't
67 * implement a particular VOP.
68 *
69 * If there is no specific entry here, we will return EOPNOTSUPP.
70 *
71 */
72
73vop_t **default_vnodeop_p;
74static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
75	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
76	{ &vop_advlock_desc,		(vop_t *) vop_einval },
77	{ &vop_bmap_desc,		(vop_t *) vop_stdbmap },
78	{ &vop_close_desc,		(vop_t *) vop_null },
79	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
80	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
81	{ &vop_fsync_desc,		(vop_t *) vop_null },
82	{ &vop_getpages_desc,		(vop_t *) vop_stdgetpages },
83	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
84	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
85	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
86	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
87	{ &vop_lease_desc,		(vop_t *) vop_null },
88	{ &vop_lock_desc,		(vop_t *) vop_stdlock },
89	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
90	{ &vop_open_desc,		(vop_t *) vop_null },
91	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
92	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
93	{ &vop_putpages_desc,		(vop_t *) vop_stdputpages },
94	{ &vop_readlink_desc,		(vop_t *) vop_einval },
95	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
96	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
97	{ &vop_unlock_desc,		(vop_t *) vop_stdunlock },
98	{ NULL, NULL }
99};
100
101static struct vnodeopv_desc default_vnodeop_opv_desc =
102        { &default_vnodeop_p, default_vnodeop_entries };
103
104VNODEOP_SET(default_vnodeop_opv_desc);
105
106/*
107 * Series of placeholder functions for various error returns for
108 * VOPs.
109 */
110
111int
112vop_eopnotsupp(struct vop_generic_args *ap)
113{
114	/*
115	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
116	*/
117
118	return (EOPNOTSUPP);
119}
120
121int
122vop_ebadf(struct vop_generic_args *ap)
123{
124
125	return (EBADF);
126}
127
128int
129vop_enotty(struct vop_generic_args *ap)
130{
131
132	return (ENOTTY);
133}
134
135int
136vop_einval(struct vop_generic_args *ap)
137{
138
139	return (EINVAL);
140}
141
142int
143vop_null(struct vop_generic_args *ap)
144{
145
146	return (0);
147}
148
149/*
150 * Used to make a defined VOP fall back to the default VOP.
151 */
152int
153vop_defaultop(struct vop_generic_args *ap)
154{
155
156	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
157}
158
159/*
160 * Helper function to panic on some bad VOPs in some filesystems.
161 */
162int
163vop_panic(struct vop_generic_args *ap)
164{
165
166	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
167}
168
169/*
170 * vop_std<something> and vop_no<something> are default functions for use by
171 * filesystems that need the "default reasonable" implementation for a
172 * particular operation.
173 *
174 * The documentation for the operations they implement exists (if it exists)
175 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
176 */
177
178/*
179 * Default vop for filesystems that do not support name lookup
180 */
181static int
182vop_nolookup(ap)
183	struct vop_lookup_args /* {
184		struct vnode *a_dvp;
185		struct vnode **a_vpp;
186		struct componentname *a_cnp;
187	} */ *ap;
188{
189
190	*ap->a_vpp = NULL;
191	return (ENOTDIR);
192}
193
194/*
195 *	vop_nostrategy:
196 *
197 *	Strategy routine for VFS devices that have none.
198 *
199 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
200 *	routine.  Typically this is done for a BIO_READ strategy call.
201 *	Typically B_INVAL is assumed to already be clear prior to a write
202 *	and should not be cleared manually unless you just made the buffer
203 *	invalid.  BIO_ERROR should be cleared either way.
204 */
205
206static int
207vop_nostrategy (struct vop_strategy_args *ap)
208{
209	printf("No strategy for buffer at %p\n", ap->a_bp);
210	vprint("vnode", ap->a_vp);
211	ap->a_bp->b_ioflags |= BIO_ERROR;
212	ap->a_bp->b_error = EOPNOTSUPP;
213	bufdone(ap->a_bp);
214	return (EOPNOTSUPP);
215}
216
217/*
218 * vop_stdpathconf:
219 *
220 * Standard implementation of POSIX pathconf, to get information about limits
221 * for a filesystem.
222 * Override per filesystem for the case where the filesystem has smaller
223 * limits.
224 */
225int
226vop_stdpathconf(ap)
227	struct vop_pathconf_args /* {
228	struct vnode *a_vp;
229	int a_name;
230	int *a_retval;
231	} */ *ap;
232{
233
234	switch (ap->a_name) {
235		case _PC_LINK_MAX:
236			*ap->a_retval = LINK_MAX;
237			return (0);
238		case _PC_MAX_CANON:
239			*ap->a_retval = MAX_CANON;
240			return (0);
241		case _PC_MAX_INPUT:
242			*ap->a_retval = MAX_INPUT;
243			return (0);
244		case _PC_PIPE_BUF:
245			*ap->a_retval = PIPE_BUF;
246			return (0);
247		case _PC_CHOWN_RESTRICTED:
248			*ap->a_retval = 1;
249			return (0);
250		case _PC_VDISABLE:
251			*ap->a_retval = _POSIX_VDISABLE;
252			return (0);
253		default:
254			return (EINVAL);
255	}
256	/* NOTREACHED */
257}
258
259/*
260 * Standard lock, unlock and islocked functions.
261 */
262int
263vop_stdlock(ap)
264	struct vop_lock_args /* {
265		struct vnode *a_vp;
266		int a_flags;
267		struct thread *a_td;
268	} */ *ap;
269{
270	struct vnode *vp = ap->a_vp;
271
272#ifndef	DEBUG_LOCKS
273	return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
274#else
275	return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
276	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
277#endif
278}
279
280/* See above. */
281int
282vop_stdunlock(ap)
283	struct vop_unlock_args /* {
284		struct vnode *a_vp;
285		int a_flags;
286		struct thread *a_td;
287	} */ *ap;
288{
289	struct vnode *vp = ap->a_vp;
290
291	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
292	    ap->a_td));
293}
294
295/* See above. */
296int
297vop_stdislocked(ap)
298	struct vop_islocked_args /* {
299		struct vnode *a_vp;
300		struct thread *a_td;
301	} */ *ap;
302{
303
304	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
305}
306
307/* Mark the vnode inactive */
308int
309vop_stdinactive(ap)
310	struct vop_inactive_args /* {
311		struct vnode *a_vp;
312		struct thread *a_td;
313	} */ *ap;
314{
315
316	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
317	return (0);
318}
319
320/*
321 * Return true for select/poll.
322 */
323int
324vop_nopoll(ap)
325	struct vop_poll_args /* {
326		struct vnode *a_vp;
327		int  a_events;
328		struct ucred *a_cred;
329		struct thread *a_td;
330	} */ *ap;
331{
332	/*
333	 * Return true for read/write.  If the user asked for something
334	 * special, return POLLNVAL, so that clients have a way of
335	 * determining reliably whether or not the extended
336	 * functionality is present without hard-coding knowledge
337	 * of specific filesystem implementations.
338	 * Stay in sync with kern_conf.c::no_poll().
339	 */
340	if (ap->a_events & ~POLLSTANDARD)
341		return (POLLNVAL);
342
343	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
344}
345
346/*
347 * Implement poll for local filesystems that support it.
348 */
349int
350vop_stdpoll(ap)
351	struct vop_poll_args /* {
352		struct vnode *a_vp;
353		int  a_events;
354		struct ucred *a_cred;
355		struct thread *a_td;
356	} */ *ap;
357{
358	if (ap->a_events & ~POLLSTANDARD)
359		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
360	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
361}
362
363/*
364 * Return our mount point, as we will take charge of the writes.
365 */
366int
367vop_stdgetwritemount(ap)
368	struct vop_getwritemount_args /* {
369		struct vnode *a_vp;
370		struct mount **a_mpp;
371	} */ *ap;
372{
373
374	*(ap->a_mpp) = ap->a_vp->v_mount;
375	return (0);
376}
377
378/* Create the VM system backing object for this vnode */
379int
380vop_stdcreatevobject(ap)
381	struct vop_createvobject_args /* {
382		struct vnode *vp;
383		struct ucred *cred;
384		struct thread *td;
385	} */ *ap;
386{
387	struct vnode *vp = ap->a_vp;
388	struct ucred *cred = ap->a_cred;
389	struct thread *td = ap->a_td;
390	struct vattr vat;
391	vm_object_t object;
392	int error = 0;
393
394	GIANT_REQUIRED;
395
396	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
397		return (0);
398
399retry:
400	if ((object = vp->v_object) == NULL) {
401		if (vp->v_type == VREG || vp->v_type == VDIR) {
402			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
403				goto retn;
404			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
405		} else if (vn_isdisk(vp, NULL)) {
406			/*
407			 * This simply allocates the biggest object possible
408			 * for a disk vnode.  This should be fixed, but doesn't
409			 * cause any problems (yet).
410			 */
411			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
412		} else {
413			goto retn;
414		}
415		/*
416		 * Dereference the reference we just created.  This assumes
417		 * that the object is associated with the vp.
418		 */
419		VM_OBJECT_LOCK(object);
420		object->ref_count--;
421		VM_OBJECT_UNLOCK(object);
422		vrele(vp);
423	} else {
424		VM_OBJECT_LOCK(object);
425		if (object->flags & OBJ_DEAD) {
426			VOP_UNLOCK(vp, 0, td);
427			msleep(object, VM_OBJECT_MTX(object), PDROP | PVM,
428			    "vodead", 0);
429			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
430			goto retry;
431		}
432		VM_OBJECT_UNLOCK(object);
433	}
434
435	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
436	vp->v_vflag |= VV_OBJBUF;
437
438retn:
439	return (error);
440}
441
442/* Destroy the VM system object associated with this vnode */
443int
444vop_stddestroyvobject(ap)
445	struct vop_destroyvobject_args /* {
446		struct vnode *vp;
447	} */ *ap;
448{
449	struct vnode *vp = ap->a_vp;
450	vm_object_t obj = vp->v_object;
451
452	GIANT_REQUIRED;
453
454	if (obj == NULL)
455		return (0);
456	VM_OBJECT_LOCK(obj);
457	if (obj->ref_count == 0) {
458		/*
459		 * vclean() may be called twice. The first time
460		 * removes the primary reference to the object,
461		 * the second time goes one further and is a
462		 * special-case to terminate the object.
463		 *
464		 * don't double-terminate the object
465		 */
466		if ((obj->flags & OBJ_DEAD) == 0)
467			vm_object_terminate(obj);
468		else
469			VM_OBJECT_UNLOCK(obj);
470	} else {
471		/*
472		 * Woe to the process that tries to page now :-).
473		 */
474		vm_pager_deallocate(obj);
475		VM_OBJECT_UNLOCK(obj);
476	}
477	return (0);
478}
479
480/*
481 * Return the underlying VM object.  This routine may be called with or
482 * without the vnode interlock held.  If called without, the returned
483 * object is not guarenteed to be valid.  The syncer typically gets the
484 * object without holding the interlock in order to quickly test whether
485 * it might be dirty before going heavy-weight.  vm_object's use zalloc
486 * and thus stable-storage, so this is safe.
487 */
488int
489vop_stdgetvobject(ap)
490	struct vop_getvobject_args /* {
491		struct vnode *vp;
492		struct vm_object **objpp;
493	} */ *ap;
494{
495	struct vnode *vp = ap->a_vp;
496	struct vm_object **objpp = ap->a_objpp;
497
498	if (objpp)
499		*objpp = vp->v_object;
500	return (vp->v_object ? 0 : EINVAL);
501}
502
503/* XXX Needs good comment and VOP_BMAP(9) manpage */
504int
505vop_stdbmap(ap)
506	struct vop_bmap_args /* {
507		struct vnode *a_vp;
508		daddr_t  a_bn;
509		struct vnode **a_vpp;
510		daddr_t *a_bnp;
511		int *a_runp;
512		int *a_runb;
513	} */ *ap;
514{
515
516	if (ap->a_vpp != NULL)
517		*ap->a_vpp = ap->a_vp;
518	if (ap->a_bnp != NULL)
519		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
520	if (ap->a_runp != NULL)
521		*ap->a_runp = 0;
522	if (ap->a_runb != NULL)
523		*ap->a_runb = 0;
524	return (0);
525}
526
527int
528vop_stdfsync(ap)
529	struct vop_fsync_args /* {
530		struct vnode *a_vp;
531		struct ucred *a_cred;
532		int a_waitfor;
533		struct thread *a_td;
534	} */ *ap;
535{
536	struct vnode *vp = ap->a_vp;
537	struct buf *bp;
538	struct bufobj *bo;
539	struct buf *nbp;
540	int s, error = 0;
541	int maxretry = 100;     /* large, arbitrarily chosen */
542
543	VI_LOCK(vp);
544loop1:
545	/*
546	 * MARK/SCAN initialization to avoid infinite loops.
547	 */
548	s = splbio();
549        TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
550                bp->b_vflags &= ~BV_SCANNED;
551		bp->b_error = 0;
552	}
553	splx(s);
554
555	/*
556	 * Flush all dirty buffers associated with a block device.
557	 */
558loop2:
559	s = splbio();
560	TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
561		if ((bp->b_vflags & BV_SCANNED) != 0)
562			continue;
563		bp->b_vflags |= BV_SCANNED;
564		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
565			continue;
566		VI_UNLOCK(vp);
567		if ((bp->b_flags & B_DELWRI) == 0)
568			panic("fsync: not dirty");
569		if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
570			vfs_bio_awrite(bp);
571			splx(s);
572		} else {
573			bremfree(bp);
574			splx(s);
575			bawrite(bp);
576		}
577		VI_LOCK(vp);
578		goto loop2;
579	}
580
581	/*
582	 * If synchronous the caller expects us to completely resolve all
583	 * dirty buffers in the system.  Wait for in-progress I/O to
584	 * complete (which could include background bitmap writes), then
585	 * retry if dirty blocks still exist.
586	 */
587	if (ap->a_waitfor == MNT_WAIT) {
588		bo = &vp->v_bufobj;
589		bufobj_wwait(bo, 0, 0);
590		if (bo->bo_dirty.bv_cnt > 0) {
591			/*
592			 * If we are unable to write any of these buffers
593			 * then we fail now rather than trying endlessly
594			 * to write them out.
595			 */
596			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
597				if ((error = bp->b_error) == 0)
598					continue;
599			if (error == 0 && --maxretry >= 0) {
600				splx(s);
601				goto loop1;
602			}
603			vprint("fsync: giving up on dirty", vp);
604			error = EAGAIN;
605		}
606	}
607	VI_UNLOCK(vp);
608	splx(s);
609
610	return (error);
611}
612
613/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
614int
615vop_stdgetpages(ap)
616	struct vop_getpages_args /* {
617		struct vnode *a_vp;
618		vm_page_t *a_m;
619		int a_count;
620		int a_reqpage;
621		vm_ooffset_t a_offset;
622	} */ *ap;
623{
624
625	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
626	    ap->a_count, ap->a_reqpage);
627}
628
629/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
630int
631vop_stdputpages(ap)
632	struct vop_putpages_args /* {
633		struct vnode *a_vp;
634		vm_page_t *a_m;
635		int a_count;
636		int a_sync;
637		int *a_rtvals;
638		vm_ooffset_t a_offset;
639	} */ *ap;
640{
641
642	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
643	     ap->a_sync, ap->a_rtvals);
644}
645
646/*
647 * vfs default ops
648 * used to fill the vfs function table to get reasonable default return values.
649 */
650int
651vfs_stdroot (mp, vpp, td)
652	struct mount *mp;
653	struct vnode **vpp;
654	struct thread *td;
655{
656
657	return (EOPNOTSUPP);
658}
659
660int
661vfs_stdstatfs (mp, sbp, td)
662	struct mount *mp;
663	struct statfs *sbp;
664	struct thread *td;
665{
666
667	return (EOPNOTSUPP);
668}
669
670int
671vfs_stdvptofh (vp, fhp)
672	struct vnode *vp;
673	struct fid *fhp;
674{
675
676	return (EOPNOTSUPP);
677}
678
679int
680vfs_stdstart (mp, flags, td)
681	struct mount *mp;
682	int flags;
683	struct thread *td;
684{
685
686	return (0);
687}
688
689int
690vfs_stdquotactl (mp, cmds, uid, arg, td)
691	struct mount *mp;
692	int cmds;
693	uid_t uid;
694	caddr_t arg;
695	struct thread *td;
696{
697
698	return (EOPNOTSUPP);
699}
700
701int
702vfs_stdsync(mp, waitfor, cred, td)
703	struct mount *mp;
704	int waitfor;
705	struct ucred *cred;
706	struct thread *td;
707{
708	struct vnode *vp, *nvp;
709	int error, lockreq, allerror = 0;
710
711	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
712	if (waitfor != MNT_WAIT)
713		lockreq |= LK_NOWAIT;
714	/*
715	 * Force stale buffer cache information to be flushed.
716	 */
717	MNT_ILOCK(mp);
718loop:
719	MNT_VNODE_FOREACH(vp, mp, nvp) {
720
721		VI_LOCK(vp);
722		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
723			VI_UNLOCK(vp);
724			continue;
725		}
726		MNT_IUNLOCK(mp);
727
728		if ((error = vget(vp, lockreq, td)) != 0) {
729			MNT_ILOCK(mp);
730			if (error == ENOENT)
731				goto loop;
732			continue;
733		}
734		error = VOP_FSYNC(vp, cred, waitfor, td);
735		if (error)
736			allerror = error;
737
738		VOP_UNLOCK(vp, 0, td);
739		vrele(vp);
740		MNT_ILOCK(mp);
741	}
742	MNT_IUNLOCK(mp);
743	return (allerror);
744}
745
746int
747vfs_stdnosync (mp, waitfor, cred, td)
748	struct mount *mp;
749	int waitfor;
750	struct ucred *cred;
751	struct thread *td;
752{
753
754	return (0);
755}
756
757int
758vfs_stdvget (mp, ino, flags, vpp)
759	struct mount *mp;
760	ino_t ino;
761	int flags;
762	struct vnode **vpp;
763{
764
765	return (EOPNOTSUPP);
766}
767
768int
769vfs_stdfhtovp (mp, fhp, vpp)
770	struct mount *mp;
771	struct fid *fhp;
772	struct vnode **vpp;
773{
774
775	return (EOPNOTSUPP);
776}
777
778int
779vfs_stdinit (vfsp)
780	struct vfsconf *vfsp;
781{
782
783	return (0);
784}
785
786int
787vfs_stduninit (vfsp)
788	struct vfsconf *vfsp;
789{
790
791	return(0);
792}
793
794int
795vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
796	struct mount *mp;
797	int cmd;
798	struct vnode *filename_vp;
799	int attrnamespace;
800	const char *attrname;
801	struct thread *td;
802{
803
804	if (filename_vp != NULL)
805		VOP_UNLOCK(filename_vp, 0, td);
806	return (EOPNOTSUPP);
807}
808
809int
810vfs_stdsysctl(mp, op, req)
811	struct mount *mp;
812	fsctlop_t op;
813	struct sysctl_req *req;
814{
815
816	return (EOPNOTSUPP);
817}
818
819/* end of vfs default ops */
820