vfs_default.c revision 137297
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 137297 2004-11-06 05:33:02Z alc $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/conf.h>
43#include <sys/kernel.h>
44#include <sys/limits.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mount.h>
48#include <sys/mutex.h>
49#include <sys/unistd.h>
50#include <sys/vnode.h>
51#include <sys/poll.h>
52
53#include <vm/vm.h>
54#include <vm/vm_object.h>
55#include <vm/vm_extern.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pager.h>
60#include <vm/vnode_pager.h>
61
62static int	vop_nolookup(struct vop_lookup_args *);
63static int	vop_nostrategy(struct vop_strategy_args *);
64
65/*
66 * This vnode table stores what we want to do if the filesystem doesn't
67 * implement a particular VOP.
68 *
69 * If there is no specific entry here, we will return EOPNOTSUPP.
70 *
71 */
72
73vop_t **default_vnodeop_p;
74static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
75	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
76	{ &vop_advlock_desc,		(vop_t *) vop_einval },
77	{ &vop_bmap_desc,		(vop_t *) vop_stdbmap },
78	{ &vop_close_desc,		(vop_t *) vop_null },
79	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
80	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
81	{ &vop_fsync_desc,		(vop_t *) vop_null },
82	{ &vop_getpages_desc,		(vop_t *) vop_stdgetpages },
83	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
84	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
85	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
86	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
87	{ &vop_lease_desc,		(vop_t *) vop_null },
88	{ &vop_lock_desc,		(vop_t *) vop_stdlock },
89	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
90	{ &vop_open_desc,		(vop_t *) vop_null },
91	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
92	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
93	{ &vop_putpages_desc,		(vop_t *) vop_stdputpages },
94	{ &vop_readlink_desc,		(vop_t *) vop_einval },
95	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
96	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
97	{ &vop_unlock_desc,		(vop_t *) vop_stdunlock },
98	{ NULL, NULL }
99};
100
101static struct vnodeopv_desc default_vnodeop_opv_desc =
102        { &default_vnodeop_p, default_vnodeop_entries };
103
104VNODEOP_SET(default_vnodeop_opv_desc);
105
106/*
107 * Series of placeholder functions for various error returns for
108 * VOPs.
109 */
110
111int
112vop_eopnotsupp(struct vop_generic_args *ap)
113{
114	/*
115	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
116	*/
117
118	return (EOPNOTSUPP);
119}
120
121int
122vop_ebadf(struct vop_generic_args *ap)
123{
124
125	return (EBADF);
126}
127
128int
129vop_enotty(struct vop_generic_args *ap)
130{
131
132	return (ENOTTY);
133}
134
135int
136vop_einval(struct vop_generic_args *ap)
137{
138
139	return (EINVAL);
140}
141
142int
143vop_null(struct vop_generic_args *ap)
144{
145
146	return (0);
147}
148
149/*
150 * Used to make a defined VOP fall back to the default VOP.
151 */
152int
153vop_defaultop(struct vop_generic_args *ap)
154{
155
156	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
157}
158
159/*
160 * Helper function to panic on some bad VOPs in some filesystems.
161 */
162int
163vop_panic(struct vop_generic_args *ap)
164{
165
166	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
167}
168
169/*
170 * vop_std<something> and vop_no<something> are default functions for use by
171 * filesystems that need the "default reasonable" implementation for a
172 * particular operation.
173 *
174 * The documentation for the operations they implement exists (if it exists)
175 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
176 */
177
178/*
179 * Default vop for filesystems that do not support name lookup
180 */
181static int
182vop_nolookup(ap)
183	struct vop_lookup_args /* {
184		struct vnode *a_dvp;
185		struct vnode **a_vpp;
186		struct componentname *a_cnp;
187	} */ *ap;
188{
189
190	*ap->a_vpp = NULL;
191	return (ENOTDIR);
192}
193
194/*
195 *	vop_nostrategy:
196 *
197 *	Strategy routine for VFS devices that have none.
198 *
199 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
200 *	routine.  Typically this is done for a BIO_READ strategy call.
201 *	Typically B_INVAL is assumed to already be clear prior to a write
202 *	and should not be cleared manually unless you just made the buffer
203 *	invalid.  BIO_ERROR should be cleared either way.
204 */
205
206static int
207vop_nostrategy (struct vop_strategy_args *ap)
208{
209	printf("No strategy for buffer at %p\n", ap->a_bp);
210	vprint("vnode", ap->a_vp);
211	ap->a_bp->b_ioflags |= BIO_ERROR;
212	ap->a_bp->b_error = EOPNOTSUPP;
213	bufdone(ap->a_bp);
214	return (EOPNOTSUPP);
215}
216
217/*
218 * vop_stdpathconf:
219 *
220 * Standard implementation of POSIX pathconf, to get information about limits
221 * for a filesystem.
222 * Override per filesystem for the case where the filesystem has smaller
223 * limits.
224 */
225int
226vop_stdpathconf(ap)
227	struct vop_pathconf_args /* {
228	struct vnode *a_vp;
229	int a_name;
230	int *a_retval;
231	} */ *ap;
232{
233
234	switch (ap->a_name) {
235		case _PC_LINK_MAX:
236			*ap->a_retval = LINK_MAX;
237			return (0);
238		case _PC_MAX_CANON:
239			*ap->a_retval = MAX_CANON;
240			return (0);
241		case _PC_MAX_INPUT:
242			*ap->a_retval = MAX_INPUT;
243			return (0);
244		case _PC_PIPE_BUF:
245			*ap->a_retval = PIPE_BUF;
246			return (0);
247		case _PC_CHOWN_RESTRICTED:
248			*ap->a_retval = 1;
249			return (0);
250		case _PC_VDISABLE:
251			*ap->a_retval = _POSIX_VDISABLE;
252			return (0);
253		default:
254			return (EINVAL);
255	}
256	/* NOTREACHED */
257}
258
259/*
260 * Standard lock, unlock and islocked functions.
261 */
262int
263vop_stdlock(ap)
264	struct vop_lock_args /* {
265		struct vnode *a_vp;
266		int a_flags;
267		struct thread *a_td;
268	} */ *ap;
269{
270	struct vnode *vp = ap->a_vp;
271
272#ifndef	DEBUG_LOCKS
273	return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
274#else
275	return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
276	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
277#endif
278}
279
280/* See above. */
281int
282vop_stdunlock(ap)
283	struct vop_unlock_args /* {
284		struct vnode *a_vp;
285		int a_flags;
286		struct thread *a_td;
287	} */ *ap;
288{
289	struct vnode *vp = ap->a_vp;
290
291	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
292	    ap->a_td));
293}
294
295/* See above. */
296int
297vop_stdislocked(ap)
298	struct vop_islocked_args /* {
299		struct vnode *a_vp;
300		struct thread *a_td;
301	} */ *ap;
302{
303
304	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
305}
306
307/* Mark the vnode inactive */
308int
309vop_stdinactive(ap)
310	struct vop_inactive_args /* {
311		struct vnode *a_vp;
312		struct thread *a_td;
313	} */ *ap;
314{
315
316	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
317	return (0);
318}
319
320/*
321 * Return true for select/poll.
322 */
323int
324vop_nopoll(ap)
325	struct vop_poll_args /* {
326		struct vnode *a_vp;
327		int  a_events;
328		struct ucred *a_cred;
329		struct thread *a_td;
330	} */ *ap;
331{
332	/*
333	 * Return true for read/write.  If the user asked for something
334	 * special, return POLLNVAL, so that clients have a way of
335	 * determining reliably whether or not the extended
336	 * functionality is present without hard-coding knowledge
337	 * of specific filesystem implementations.
338	 * Stay in sync with kern_conf.c::no_poll().
339	 */
340	if (ap->a_events & ~POLLSTANDARD)
341		return (POLLNVAL);
342
343	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
344}
345
346/*
347 * Implement poll for local filesystems that support it.
348 */
349int
350vop_stdpoll(ap)
351	struct vop_poll_args /* {
352		struct vnode *a_vp;
353		int  a_events;
354		struct ucred *a_cred;
355		struct thread *a_td;
356	} */ *ap;
357{
358	if (ap->a_events & ~POLLSTANDARD)
359		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
360	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
361}
362
363/*
364 * Return our mount point, as we will take charge of the writes.
365 */
366int
367vop_stdgetwritemount(ap)
368	struct vop_getwritemount_args /* {
369		struct vnode *a_vp;
370		struct mount **a_mpp;
371	} */ *ap;
372{
373
374	*(ap->a_mpp) = ap->a_vp->v_mount;
375	return (0);
376}
377
378/* Create the VM system backing object for this vnode */
379int
380vop_stdcreatevobject(ap)
381	struct vop_createvobject_args /* {
382		struct vnode *vp;
383		struct ucred *cred;
384		struct thread *td;
385	} */ *ap;
386{
387	struct vnode *vp = ap->a_vp;
388	struct ucred *cred = ap->a_cred;
389	struct thread *td = ap->a_td;
390	struct vattr vat;
391	vm_object_t object;
392	int error = 0;
393
394	GIANT_REQUIRED;
395
396	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
397		return (0);
398
399retry:
400	if ((object = vp->v_object) == NULL) {
401		if (vp->v_type == VREG || vp->v_type == VDIR) {
402			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
403				goto retn;
404			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
405		} else if (vn_isdisk(vp, NULL)) {
406			/*
407			 * This simply allocates the biggest object possible
408			 * for a disk vnode.  This should be fixed, but doesn't
409			 * cause any problems (yet).
410			 */
411			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
412		} else {
413			goto retn;
414		}
415		/*
416		 * Dereference the reference we just created.  This assumes
417		 * that the object is associated with the vp.
418		 */
419		VM_OBJECT_LOCK(object);
420		object->ref_count--;
421		VM_OBJECT_UNLOCK(object);
422		vrele(vp);
423	} else {
424		VM_OBJECT_LOCK(object);
425		if (object->flags & OBJ_DEAD) {
426			VOP_UNLOCK(vp, 0, td);
427			vm_object_set_flag(object, OBJ_DISCONNECTWNT);
428			msleep(object, VM_OBJECT_MTX(object), PDROP | PVM,
429			    "vodead", 0);
430			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
431			goto retry;
432		}
433		VM_OBJECT_UNLOCK(object);
434	}
435
436	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
437	vp->v_vflag |= VV_OBJBUF;
438
439retn:
440	return (error);
441}
442
443/* Destroy the VM system object associated with this vnode */
444int
445vop_stddestroyvobject(ap)
446	struct vop_destroyvobject_args /* {
447		struct vnode *vp;
448	} */ *ap;
449{
450	struct vnode *vp = ap->a_vp;
451	vm_object_t obj = vp->v_object;
452
453	GIANT_REQUIRED;
454
455	if (obj == NULL)
456		return (0);
457	VM_OBJECT_LOCK(obj);
458	if (obj->ref_count == 0) {
459		/*
460		 * vclean() may be called twice. The first time
461		 * removes the primary reference to the object,
462		 * the second time goes one further and is a
463		 * special-case to terminate the object.
464		 *
465		 * don't double-terminate the object
466		 */
467		if ((obj->flags & OBJ_DEAD) == 0)
468			vm_object_terminate(obj);
469		else
470			VM_OBJECT_UNLOCK(obj);
471	} else {
472		/*
473		 * Woe to the process that tries to page now :-).
474		 */
475		vm_pager_deallocate(obj);
476		VM_OBJECT_UNLOCK(obj);
477	}
478	return (0);
479}
480
481/*
482 * Return the underlying VM object.  This routine may be called with or
483 * without the vnode interlock held.  If called without, the returned
484 * object is not guarenteed to be valid.  The syncer typically gets the
485 * object without holding the interlock in order to quickly test whether
486 * it might be dirty before going heavy-weight.  vm_object's use zalloc
487 * and thus stable-storage, so this is safe.
488 */
489int
490vop_stdgetvobject(ap)
491	struct vop_getvobject_args /* {
492		struct vnode *vp;
493		struct vm_object **objpp;
494	} */ *ap;
495{
496	struct vnode *vp = ap->a_vp;
497	struct vm_object **objpp = ap->a_objpp;
498
499	if (objpp)
500		*objpp = vp->v_object;
501	return (vp->v_object ? 0 : EINVAL);
502}
503
504/* XXX Needs good comment and VOP_BMAP(9) manpage */
505int
506vop_stdbmap(ap)
507	struct vop_bmap_args /* {
508		struct vnode *a_vp;
509		daddr_t  a_bn;
510		struct vnode **a_vpp;
511		daddr_t *a_bnp;
512		int *a_runp;
513		int *a_runb;
514	} */ *ap;
515{
516
517	if (ap->a_vpp != NULL)
518		*ap->a_vpp = ap->a_vp;
519	if (ap->a_bnp != NULL)
520		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
521	if (ap->a_runp != NULL)
522		*ap->a_runp = 0;
523	if (ap->a_runb != NULL)
524		*ap->a_runb = 0;
525	return (0);
526}
527
528int
529vop_stdfsync(ap)
530	struct vop_fsync_args /* {
531		struct vnode *a_vp;
532		struct ucred *a_cred;
533		int a_waitfor;
534		struct thread *a_td;
535	} */ *ap;
536{
537	struct vnode *vp = ap->a_vp;
538	struct buf *bp;
539	struct bufobj *bo;
540	struct buf *nbp;
541	int s, error = 0;
542	int maxretry = 100;     /* large, arbitrarily chosen */
543
544	VI_LOCK(vp);
545loop1:
546	/*
547	 * MARK/SCAN initialization to avoid infinite loops.
548	 */
549	s = splbio();
550        TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
551                bp->b_vflags &= ~BV_SCANNED;
552		bp->b_error = 0;
553	}
554	splx(s);
555
556	/*
557	 * Flush all dirty buffers associated with a block device.
558	 */
559loop2:
560	s = splbio();
561	TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
562		if ((bp->b_vflags & BV_SCANNED) != 0)
563			continue;
564		bp->b_vflags |= BV_SCANNED;
565		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
566			continue;
567		VI_UNLOCK(vp);
568		if ((bp->b_flags & B_DELWRI) == 0)
569			panic("fsync: not dirty");
570		if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
571			vfs_bio_awrite(bp);
572			splx(s);
573		} else {
574			bremfree(bp);
575			splx(s);
576			bawrite(bp);
577		}
578		VI_LOCK(vp);
579		goto loop2;
580	}
581
582	/*
583	 * If synchronous the caller expects us to completely resolve all
584	 * dirty buffers in the system.  Wait for in-progress I/O to
585	 * complete (which could include background bitmap writes), then
586	 * retry if dirty blocks still exist.
587	 */
588	if (ap->a_waitfor == MNT_WAIT) {
589		bo = &vp->v_bufobj;
590		bufobj_wwait(bo, 0, 0);
591		if (bo->bo_dirty.bv_cnt > 0) {
592			/*
593			 * If we are unable to write any of these buffers
594			 * then we fail now rather than trying endlessly
595			 * to write them out.
596			 */
597			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
598				if ((error = bp->b_error) == 0)
599					continue;
600			if (error == 0 && --maxretry >= 0) {
601				splx(s);
602				goto loop1;
603			}
604			vprint("fsync: giving up on dirty", vp);
605			error = EAGAIN;
606		}
607	}
608	VI_UNLOCK(vp);
609	splx(s);
610
611	return (error);
612}
613
614/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
615int
616vop_stdgetpages(ap)
617	struct vop_getpages_args /* {
618		struct vnode *a_vp;
619		vm_page_t *a_m;
620		int a_count;
621		int a_reqpage;
622		vm_ooffset_t a_offset;
623	} */ *ap;
624{
625
626	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
627	    ap->a_count, ap->a_reqpage);
628}
629
630/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
631int
632vop_stdputpages(ap)
633	struct vop_putpages_args /* {
634		struct vnode *a_vp;
635		vm_page_t *a_m;
636		int a_count;
637		int a_sync;
638		int *a_rtvals;
639		vm_ooffset_t a_offset;
640	} */ *ap;
641{
642
643	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
644	     ap->a_sync, ap->a_rtvals);
645}
646
647/*
648 * vfs default ops
649 * used to fill the vfs function table to get reasonable default return values.
650 */
651int
652vfs_stdroot (mp, vpp, td)
653	struct mount *mp;
654	struct vnode **vpp;
655	struct thread *td;
656{
657
658	return (EOPNOTSUPP);
659}
660
661int
662vfs_stdstatfs (mp, sbp, td)
663	struct mount *mp;
664	struct statfs *sbp;
665	struct thread *td;
666{
667
668	return (EOPNOTSUPP);
669}
670
671int
672vfs_stdvptofh (vp, fhp)
673	struct vnode *vp;
674	struct fid *fhp;
675{
676
677	return (EOPNOTSUPP);
678}
679
680int
681vfs_stdstart (mp, flags, td)
682	struct mount *mp;
683	int flags;
684	struct thread *td;
685{
686
687	return (0);
688}
689
690int
691vfs_stdquotactl (mp, cmds, uid, arg, td)
692	struct mount *mp;
693	int cmds;
694	uid_t uid;
695	caddr_t arg;
696	struct thread *td;
697{
698
699	return (EOPNOTSUPP);
700}
701
702int
703vfs_stdsync(mp, waitfor, cred, td)
704	struct mount *mp;
705	int waitfor;
706	struct ucred *cred;
707	struct thread *td;
708{
709	struct vnode *vp, *nvp;
710	int error, lockreq, allerror = 0;
711
712	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
713	if (waitfor != MNT_WAIT)
714		lockreq |= LK_NOWAIT;
715	/*
716	 * Force stale buffer cache information to be flushed.
717	 */
718	MNT_ILOCK(mp);
719loop:
720	MNT_VNODE_FOREACH(vp, mp, nvp) {
721
722		VI_LOCK(vp);
723		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
724			VI_UNLOCK(vp);
725			continue;
726		}
727		MNT_IUNLOCK(mp);
728
729		if ((error = vget(vp, lockreq, td)) != 0) {
730			MNT_ILOCK(mp);
731			if (error == ENOENT)
732				goto loop;
733			continue;
734		}
735		error = VOP_FSYNC(vp, cred, waitfor, td);
736		if (error)
737			allerror = error;
738
739		VOP_UNLOCK(vp, 0, td);
740		vrele(vp);
741		MNT_ILOCK(mp);
742	}
743	MNT_IUNLOCK(mp);
744	return (allerror);
745}
746
747int
748vfs_stdnosync (mp, waitfor, cred, td)
749	struct mount *mp;
750	int waitfor;
751	struct ucred *cred;
752	struct thread *td;
753{
754
755	return (0);
756}
757
758int
759vfs_stdvget (mp, ino, flags, vpp)
760	struct mount *mp;
761	ino_t ino;
762	int flags;
763	struct vnode **vpp;
764{
765
766	return (EOPNOTSUPP);
767}
768
769int
770vfs_stdfhtovp (mp, fhp, vpp)
771	struct mount *mp;
772	struct fid *fhp;
773	struct vnode **vpp;
774{
775
776	return (EOPNOTSUPP);
777}
778
779int
780vfs_stdinit (vfsp)
781	struct vfsconf *vfsp;
782{
783
784	return (0);
785}
786
787int
788vfs_stduninit (vfsp)
789	struct vfsconf *vfsp;
790{
791
792	return(0);
793}
794
795int
796vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
797	struct mount *mp;
798	int cmd;
799	struct vnode *filename_vp;
800	int attrnamespace;
801	const char *attrname;
802	struct thread *td;
803{
804
805	if (filename_vp != NULL)
806		VOP_UNLOCK(filename_vp, 0, td);
807	return (EOPNOTSUPP);
808}
809
810int
811vfs_stdsysctl(mp, op, req)
812	struct mount *mp;
813	fsctlop_t op;
814	struct sysctl_req *req;
815{
816
817	return (EOPNOTSUPP);
818}
819
820/* end of vfs default ops */
821