vfs_default.c revision 137677
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 137677 2004-11-13 22:59:52Z phk $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/conf.h>
43#include <sys/kernel.h>
44#include <sys/limits.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mount.h>
48#include <sys/mutex.h>
49#include <sys/unistd.h>
50#include <sys/vnode.h>
51#include <sys/poll.h>
52
53#include <vm/vm.h>
54#include <vm/vm_object.h>
55#include <vm/vm_extern.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pager.h>
60#include <vm/vnode_pager.h>
61
62static int	vop_nolookup(struct vop_lookup_args *);
63static int	vop_nostrategy(struct vop_strategy_args *);
64
65/*
66 * This vnode table stores what we want to do if the filesystem doesn't
67 * implement a particular VOP.
68 *
69 * If there is no specific entry here, we will return EOPNOTSUPP.
70 *
71 */
72
73vop_t **default_vnodeop_p;
74static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
75	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
76	{ &vop_advlock_desc,		(vop_t *) vop_einval },
77	{ &vop_bmap_desc,		(vop_t *) vop_stdbmap },
78	{ &vop_close_desc,		(vop_t *) vop_null },
79	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
80	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
81	{ &vop_fsync_desc,		(vop_t *) vop_null },
82	{ &vop_getpages_desc,		(vop_t *) vop_stdgetpages },
83	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
84	{ &vop_getwritemount_desc, 	(vop_t *) vop_stdgetwritemount },
85	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
86	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
87	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
88	{ &vop_lease_desc,		(vop_t *) vop_null },
89	{ &vop_lock_desc,		(vop_t *) vop_stdlock },
90	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
91	{ &vop_open_desc,		(vop_t *) vop_null },
92	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
93	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
94	{ &vop_putpages_desc,		(vop_t *) vop_stdputpages },
95	{ &vop_readlink_desc,		(vop_t *) vop_einval },
96	{ &vop_revoke_desc,		(vop_t *) vop_panic },
97	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
98	{ &vop_unlock_desc,		(vop_t *) vop_stdunlock },
99	{ NULL, NULL }
100};
101
102static struct vnodeopv_desc default_vnodeop_opv_desc =
103        { &default_vnodeop_p, default_vnodeop_entries };
104
105VNODEOP_SET(default_vnodeop_opv_desc);
106
107/*
108 * Series of placeholder functions for various error returns for
109 * VOPs.
110 */
111
112int
113vop_eopnotsupp(struct vop_generic_args *ap)
114{
115	/*
116	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
117	*/
118
119	return (EOPNOTSUPP);
120}
121
122int
123vop_ebadf(struct vop_generic_args *ap)
124{
125
126	return (EBADF);
127}
128
129int
130vop_enotty(struct vop_generic_args *ap)
131{
132
133	return (ENOTTY);
134}
135
136int
137vop_einval(struct vop_generic_args *ap)
138{
139
140	return (EINVAL);
141}
142
143int
144vop_null(struct vop_generic_args *ap)
145{
146
147	return (0);
148}
149
150/*
151 * Used to make a defined VOP fall back to the default VOP.
152 */
153int
154vop_defaultop(struct vop_generic_args *ap)
155{
156
157	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
158}
159
160/*
161 * Helper function to panic on some bad VOPs in some filesystems.
162 */
163int
164vop_panic(struct vop_generic_args *ap)
165{
166
167	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
168}
169
170/*
171 * vop_std<something> and vop_no<something> are default functions for use by
172 * filesystems that need the "default reasonable" implementation for a
173 * particular operation.
174 *
175 * The documentation for the operations they implement exists (if it exists)
176 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
177 */
178
179/*
180 * Default vop for filesystems that do not support name lookup
181 */
182static int
183vop_nolookup(ap)
184	struct vop_lookup_args /* {
185		struct vnode *a_dvp;
186		struct vnode **a_vpp;
187		struct componentname *a_cnp;
188	} */ *ap;
189{
190
191	*ap->a_vpp = NULL;
192	return (ENOTDIR);
193}
194
195/*
196 *	vop_nostrategy:
197 *
198 *	Strategy routine for VFS devices that have none.
199 *
200 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
201 *	routine.  Typically this is done for a BIO_READ strategy call.
202 *	Typically B_INVAL is assumed to already be clear prior to a write
203 *	and should not be cleared manually unless you just made the buffer
204 *	invalid.  BIO_ERROR should be cleared either way.
205 */
206
207static int
208vop_nostrategy (struct vop_strategy_args *ap)
209{
210	printf("No strategy for buffer at %p\n", ap->a_bp);
211	vprint("vnode", ap->a_vp);
212	ap->a_bp->b_ioflags |= BIO_ERROR;
213	ap->a_bp->b_error = EOPNOTSUPP;
214	bufdone(ap->a_bp);
215	return (EOPNOTSUPP);
216}
217
218/*
219 * vop_stdpathconf:
220 *
221 * Standard implementation of POSIX pathconf, to get information about limits
222 * for a filesystem.
223 * Override per filesystem for the case where the filesystem has smaller
224 * limits.
225 */
226int
227vop_stdpathconf(ap)
228	struct vop_pathconf_args /* {
229	struct vnode *a_vp;
230	int a_name;
231	int *a_retval;
232	} */ *ap;
233{
234
235	switch (ap->a_name) {
236		case _PC_LINK_MAX:
237			*ap->a_retval = LINK_MAX;
238			return (0);
239		case _PC_MAX_CANON:
240			*ap->a_retval = MAX_CANON;
241			return (0);
242		case _PC_MAX_INPUT:
243			*ap->a_retval = MAX_INPUT;
244			return (0);
245		case _PC_PIPE_BUF:
246			*ap->a_retval = PIPE_BUF;
247			return (0);
248		case _PC_CHOWN_RESTRICTED:
249			*ap->a_retval = 1;
250			return (0);
251		case _PC_VDISABLE:
252			*ap->a_retval = _POSIX_VDISABLE;
253			return (0);
254		default:
255			return (EINVAL);
256	}
257	/* NOTREACHED */
258}
259
260/*
261 * Standard lock, unlock and islocked functions.
262 */
263int
264vop_stdlock(ap)
265	struct vop_lock_args /* {
266		struct vnode *a_vp;
267		int a_flags;
268		struct thread *a_td;
269	} */ *ap;
270{
271	struct vnode *vp = ap->a_vp;
272
273#ifndef	DEBUG_LOCKS
274	return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
275#else
276	return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
277	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
278#endif
279}
280
281/* See above. */
282int
283vop_stdunlock(ap)
284	struct vop_unlock_args /* {
285		struct vnode *a_vp;
286		int a_flags;
287		struct thread *a_td;
288	} */ *ap;
289{
290	struct vnode *vp = ap->a_vp;
291
292	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
293	    ap->a_td));
294}
295
296/* See above. */
297int
298vop_stdislocked(ap)
299	struct vop_islocked_args /* {
300		struct vnode *a_vp;
301		struct thread *a_td;
302	} */ *ap;
303{
304
305	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
306}
307
308/* Mark the vnode inactive */
309int
310vop_stdinactive(ap)
311	struct vop_inactive_args /* {
312		struct vnode *a_vp;
313		struct thread *a_td;
314	} */ *ap;
315{
316
317	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
318	return (0);
319}
320
321/*
322 * Return true for select/poll.
323 */
324int
325vop_nopoll(ap)
326	struct vop_poll_args /* {
327		struct vnode *a_vp;
328		int  a_events;
329		struct ucred *a_cred;
330		struct thread *a_td;
331	} */ *ap;
332{
333	/*
334	 * Return true for read/write.  If the user asked for something
335	 * special, return POLLNVAL, so that clients have a way of
336	 * determining reliably whether or not the extended
337	 * functionality is present without hard-coding knowledge
338	 * of specific filesystem implementations.
339	 * Stay in sync with kern_conf.c::no_poll().
340	 */
341	if (ap->a_events & ~POLLSTANDARD)
342		return (POLLNVAL);
343
344	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
345}
346
347/*
348 * Implement poll for local filesystems that support it.
349 */
350int
351vop_stdpoll(ap)
352	struct vop_poll_args /* {
353		struct vnode *a_vp;
354		int  a_events;
355		struct ucred *a_cred;
356		struct thread *a_td;
357	} */ *ap;
358{
359	if (ap->a_events & ~POLLSTANDARD)
360		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
361	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
362}
363
364/*
365 * Return our mount point, as we will take charge of the writes.
366 */
367int
368vop_stdgetwritemount(ap)
369	struct vop_getwritemount_args /* {
370		struct vnode *a_vp;
371		struct mount **a_mpp;
372	} */ *ap;
373{
374
375	*(ap->a_mpp) = ap->a_vp->v_mount;
376	return (0);
377}
378
379/* Create the VM system backing object for this vnode */
380int
381vop_stdcreatevobject(ap)
382	struct vop_createvobject_args /* {
383		struct vnode *vp;
384		struct ucred *cred;
385		struct thread *td;
386	} */ *ap;
387{
388	struct vnode *vp = ap->a_vp;
389	struct ucred *cred = ap->a_cred;
390	struct thread *td = ap->a_td;
391	struct vattr vat;
392	vm_object_t object;
393	int error = 0;
394
395	GIANT_REQUIRED;
396
397	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
398		return (0);
399
400retry:
401	if ((object = vp->v_object) == NULL) {
402		if (vp->v_type == VREG || vp->v_type == VDIR) {
403			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
404				goto retn;
405			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
406		} else if (vn_isdisk(vp, NULL)) {
407			/*
408			 * This simply allocates the biggest object possible
409			 * for a disk vnode.  This should be fixed, but doesn't
410			 * cause any problems (yet).
411			 */
412			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
413		} else {
414			goto retn;
415		}
416		/*
417		 * Dereference the reference we just created.  This assumes
418		 * that the object is associated with the vp.
419		 */
420		VM_OBJECT_LOCK(object);
421		object->ref_count--;
422		VM_OBJECT_UNLOCK(object);
423		vrele(vp);
424	} else {
425		VM_OBJECT_LOCK(object);
426		if (object->flags & OBJ_DEAD) {
427			VOP_UNLOCK(vp, 0, td);
428			vm_object_set_flag(object, OBJ_DISCONNECTWNT);
429			msleep(object, VM_OBJECT_MTX(object), PDROP | PVM,
430			    "vodead", 0);
431			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
432			goto retry;
433		}
434		VM_OBJECT_UNLOCK(object);
435	}
436
437	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
438	vp->v_vflag |= VV_OBJBUF;
439
440retn:
441	return (error);
442}
443
444/* Destroy the VM system object associated with this vnode */
445int
446vop_stddestroyvobject(ap)
447	struct vop_destroyvobject_args /* {
448		struct vnode *vp;
449	} */ *ap;
450{
451	struct vnode *vp = ap->a_vp;
452	vm_object_t obj = vp->v_object;
453
454	GIANT_REQUIRED;
455
456	if (obj == NULL)
457		return (0);
458	VM_OBJECT_LOCK(obj);
459	if (obj->ref_count == 0) {
460		/*
461		 * vclean() may be called twice. The first time
462		 * removes the primary reference to the object,
463		 * the second time goes one further and is a
464		 * special-case to terminate the object.
465		 *
466		 * don't double-terminate the object
467		 */
468		if ((obj->flags & OBJ_DEAD) == 0)
469			vm_object_terminate(obj);
470		else
471			VM_OBJECT_UNLOCK(obj);
472	} else {
473		/*
474		 * Woe to the process that tries to page now :-).
475		 */
476		vm_pager_deallocate(obj);
477		VM_OBJECT_UNLOCK(obj);
478	}
479	return (0);
480}
481
482/*
483 * Return the underlying VM object.  This routine may be called with or
484 * without the vnode interlock held.  If called without, the returned
485 * object is not guarenteed to be valid.  The syncer typically gets the
486 * object without holding the interlock in order to quickly test whether
487 * it might be dirty before going heavy-weight.  vm_object's use zalloc
488 * and thus stable-storage, so this is safe.
489 */
490int
491vop_stdgetvobject(ap)
492	struct vop_getvobject_args /* {
493		struct vnode *vp;
494		struct vm_object **objpp;
495	} */ *ap;
496{
497	struct vnode *vp = ap->a_vp;
498	struct vm_object **objpp = ap->a_objpp;
499
500	if (objpp)
501		*objpp = vp->v_object;
502	return (vp->v_object ? 0 : EINVAL);
503}
504
505/* XXX Needs good comment and VOP_BMAP(9) manpage */
506int
507vop_stdbmap(ap)
508	struct vop_bmap_args /* {
509		struct vnode *a_vp;
510		daddr_t  a_bn;
511		struct vnode **a_vpp;
512		daddr_t *a_bnp;
513		int *a_runp;
514		int *a_runb;
515	} */ *ap;
516{
517
518	if (ap->a_vpp != NULL)
519		*ap->a_vpp = ap->a_vp;
520	if (ap->a_bnp != NULL)
521		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
522	if (ap->a_runp != NULL)
523		*ap->a_runp = 0;
524	if (ap->a_runb != NULL)
525		*ap->a_runb = 0;
526	return (0);
527}
528
529int
530vop_stdfsync(ap)
531	struct vop_fsync_args /* {
532		struct vnode *a_vp;
533		struct ucred *a_cred;
534		int a_waitfor;
535		struct thread *a_td;
536	} */ *ap;
537{
538	struct vnode *vp = ap->a_vp;
539	struct buf *bp;
540	struct bufobj *bo;
541	struct buf *nbp;
542	int s, error = 0;
543	int maxretry = 100;     /* large, arbitrarily chosen */
544
545	VI_LOCK(vp);
546loop1:
547	/*
548	 * MARK/SCAN initialization to avoid infinite loops.
549	 */
550	s = splbio();
551        TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
552                bp->b_vflags &= ~BV_SCANNED;
553		bp->b_error = 0;
554	}
555	splx(s);
556
557	/*
558	 * Flush all dirty buffers associated with a block device.
559	 */
560loop2:
561	s = splbio();
562	TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
563		if ((bp->b_vflags & BV_SCANNED) != 0)
564			continue;
565		bp->b_vflags |= BV_SCANNED;
566		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
567			continue;
568		VI_UNLOCK(vp);
569		if ((bp->b_flags & B_DELWRI) == 0)
570			panic("fsync: not dirty");
571		if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
572			vfs_bio_awrite(bp);
573			splx(s);
574		} else {
575			bremfree(bp);
576			splx(s);
577			bawrite(bp);
578		}
579		VI_LOCK(vp);
580		goto loop2;
581	}
582
583	/*
584	 * If synchronous the caller expects us to completely resolve all
585	 * dirty buffers in the system.  Wait for in-progress I/O to
586	 * complete (which could include background bitmap writes), then
587	 * retry if dirty blocks still exist.
588	 */
589	if (ap->a_waitfor == MNT_WAIT) {
590		bo = &vp->v_bufobj;
591		bufobj_wwait(bo, 0, 0);
592		if (bo->bo_dirty.bv_cnt > 0) {
593			/*
594			 * If we are unable to write any of these buffers
595			 * then we fail now rather than trying endlessly
596			 * to write them out.
597			 */
598			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
599				if ((error = bp->b_error) == 0)
600					continue;
601			if (error == 0 && --maxretry >= 0) {
602				splx(s);
603				goto loop1;
604			}
605			vprint("fsync: giving up on dirty", vp);
606			error = EAGAIN;
607		}
608	}
609	VI_UNLOCK(vp);
610	splx(s);
611
612	return (error);
613}
614
615/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
616int
617vop_stdgetpages(ap)
618	struct vop_getpages_args /* {
619		struct vnode *a_vp;
620		vm_page_t *a_m;
621		int a_count;
622		int a_reqpage;
623		vm_ooffset_t a_offset;
624	} */ *ap;
625{
626
627	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
628	    ap->a_count, ap->a_reqpage);
629}
630
631/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
632int
633vop_stdputpages(ap)
634	struct vop_putpages_args /* {
635		struct vnode *a_vp;
636		vm_page_t *a_m;
637		int a_count;
638		int a_sync;
639		int *a_rtvals;
640		vm_ooffset_t a_offset;
641	} */ *ap;
642{
643
644	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
645	     ap->a_sync, ap->a_rtvals);
646}
647
648/*
649 * vfs default ops
650 * used to fill the vfs function table to get reasonable default return values.
651 */
652int
653vfs_stdroot (mp, vpp, td)
654	struct mount *mp;
655	struct vnode **vpp;
656	struct thread *td;
657{
658
659	return (EOPNOTSUPP);
660}
661
662int
663vfs_stdstatfs (mp, sbp, td)
664	struct mount *mp;
665	struct statfs *sbp;
666	struct thread *td;
667{
668
669	return (EOPNOTSUPP);
670}
671
672int
673vfs_stdvptofh (vp, fhp)
674	struct vnode *vp;
675	struct fid *fhp;
676{
677
678	return (EOPNOTSUPP);
679}
680
681int
682vfs_stdstart (mp, flags, td)
683	struct mount *mp;
684	int flags;
685	struct thread *td;
686{
687
688	return (0);
689}
690
691int
692vfs_stdquotactl (mp, cmds, uid, arg, td)
693	struct mount *mp;
694	int cmds;
695	uid_t uid;
696	caddr_t arg;
697	struct thread *td;
698{
699
700	return (EOPNOTSUPP);
701}
702
703int
704vfs_stdsync(mp, waitfor, cred, td)
705	struct mount *mp;
706	int waitfor;
707	struct ucred *cred;
708	struct thread *td;
709{
710	struct vnode *vp, *nvp;
711	int error, lockreq, allerror = 0;
712
713	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
714	if (waitfor != MNT_WAIT)
715		lockreq |= LK_NOWAIT;
716	/*
717	 * Force stale buffer cache information to be flushed.
718	 */
719	MNT_ILOCK(mp);
720loop:
721	MNT_VNODE_FOREACH(vp, mp, nvp) {
722
723		VI_LOCK(vp);
724		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
725			VI_UNLOCK(vp);
726			continue;
727		}
728		MNT_IUNLOCK(mp);
729
730		if ((error = vget(vp, lockreq, td)) != 0) {
731			MNT_ILOCK(mp);
732			if (error == ENOENT)
733				goto loop;
734			continue;
735		}
736		error = VOP_FSYNC(vp, cred, waitfor, td);
737		if (error)
738			allerror = error;
739
740		VOP_UNLOCK(vp, 0, td);
741		vrele(vp);
742		MNT_ILOCK(mp);
743	}
744	MNT_IUNLOCK(mp);
745	return (allerror);
746}
747
748int
749vfs_stdnosync (mp, waitfor, cred, td)
750	struct mount *mp;
751	int waitfor;
752	struct ucred *cred;
753	struct thread *td;
754{
755
756	return (0);
757}
758
759int
760vfs_stdvget (mp, ino, flags, vpp)
761	struct mount *mp;
762	ino_t ino;
763	int flags;
764	struct vnode **vpp;
765{
766
767	return (EOPNOTSUPP);
768}
769
770int
771vfs_stdfhtovp (mp, fhp, vpp)
772	struct mount *mp;
773	struct fid *fhp;
774	struct vnode **vpp;
775{
776
777	return (EOPNOTSUPP);
778}
779
780int
781vfs_stdinit (vfsp)
782	struct vfsconf *vfsp;
783{
784
785	return (0);
786}
787
788int
789vfs_stduninit (vfsp)
790	struct vfsconf *vfsp;
791{
792
793	return(0);
794}
795
796int
797vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
798	struct mount *mp;
799	int cmd;
800	struct vnode *filename_vp;
801	int attrnamespace;
802	const char *attrname;
803	struct thread *td;
804{
805
806	if (filename_vp != NULL)
807		VOP_UNLOCK(filename_vp, 0, td);
808	return (EOPNOTSUPP);
809}
810
811int
812vfs_stdsysctl(mp, op, req)
813	struct mount *mp;
814	fsctlop_t op;
815	struct sysctl_req *req;
816{
817
818	return (EOPNOTSUPP);
819}
820
821/* end of vfs default ops */
822