vfs_default.c revision 65770
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *
39 * $FreeBSD: head/sys/kern/vfs_default.c 65770 2000-09-12 09:49:08Z bp $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/bio.h>
45#include <sys/buf.h>
46#include <sys/conf.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mount.h>
51#include <sys/unistd.h>
52#include <sys/vnode.h>
53#include <sys/poll.h>
54
55#include <machine/limits.h>
56
57#include <vm/vm.h>
58#include <vm/vm_object.h>
59#include <vm/vm_extern.h>
60#include <vm/pmap.h>
61#include <vm/vm_map.h>
62#include <vm/vm_page.h>
63#include <vm/vm_pager.h>
64#include <vm/vnode_pager.h>
65#include <vm/vm_zone.h>
66
67static int vop_nostrategy __P((struct vop_strategy_args *));
68
69/*
70 * This vnode table stores what we want to do if the filesystem doesn't
71 * implement a particular VOP.
72 *
73 * If there is no specific entry here, we will return EOPNOTSUPP.
74 *
75 */
76
77vop_t **default_vnodeop_p;
78static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
79	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
80	{ &vop_advlock_desc,		(vop_t *) vop_einval },
81	{ &vop_bwrite_desc,		(vop_t *) vop_stdbwrite },
82	{ &vop_close_desc,		(vop_t *) vop_null },
83	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
84	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
85	{ &vop_fsync_desc,		(vop_t *) vop_null },
86	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
87	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
88	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
89	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
90	{ &vop_lease_desc,		(vop_t *) vop_null },
91	{ &vop_lock_desc,		(vop_t *) vop_nolock },
92	{ &vop_mmap_desc,		(vop_t *) vop_einval },
93	{ &vop_open_desc,		(vop_t *) vop_null },
94	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
95	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
96	{ &vop_readlink_desc,		(vop_t *) vop_einval },
97	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
98	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
99	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
100	{ NULL, NULL }
101};
102
103static struct vnodeopv_desc default_vnodeop_opv_desc =
104        { &default_vnodeop_p, default_vnodeop_entries };
105
106VNODEOP_SET(default_vnodeop_opv_desc);
107
108int
109vop_eopnotsupp(struct vop_generic_args *ap)
110{
111	/*
112	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
113	*/
114
115	return (EOPNOTSUPP);
116}
117
118int
119vop_ebadf(struct vop_generic_args *ap)
120{
121
122	return (EBADF);
123}
124
125int
126vop_enotty(struct vop_generic_args *ap)
127{
128
129	return (ENOTTY);
130}
131
132int
133vop_einval(struct vop_generic_args *ap)
134{
135
136	return (EINVAL);
137}
138
139int
140vop_null(struct vop_generic_args *ap)
141{
142
143	return (0);
144}
145
146int
147vop_defaultop(struct vop_generic_args *ap)
148{
149
150	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
151}
152
153int
154vop_panic(struct vop_generic_args *ap)
155{
156
157	printf("vop_panic[%s]\n", ap->a_desc->vdesc_name);
158	panic("Filesystem goof");
159	return (0);
160}
161
162/*
163 *	vop_nostrategy:
164 *
165 *	Strategy routine for VFS devices that have none.
166 *
167 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
168 *	routine.  Typically this is done for a BIO_READ strategy call.
169 *	Typically B_INVAL is assumed to already be clear prior to a write
170 *	and should not be cleared manually unless you just made the buffer
171 *	invalid.  BIO_ERROR should be cleared either way.
172 */
173
174static int
175vop_nostrategy (struct vop_strategy_args *ap)
176{
177	printf("No strategy for buffer at %p\n", ap->a_bp);
178	vprint("", ap->a_vp);
179	vprint("", ap->a_bp->b_vp);
180	ap->a_bp->b_ioflags |= BIO_ERROR;
181	ap->a_bp->b_error = EOPNOTSUPP;
182	bufdone(ap->a_bp);
183	return (EOPNOTSUPP);
184}
185
186int
187vop_stdpathconf(ap)
188	struct vop_pathconf_args /* {
189	struct vnode *a_vp;
190	int a_name;
191	int *a_retval;
192	} */ *ap;
193{
194
195	switch (ap->a_name) {
196		case _PC_LINK_MAX:
197			*ap->a_retval = LINK_MAX;
198			return (0);
199		case _PC_MAX_CANON:
200			*ap->a_retval = MAX_CANON;
201			return (0);
202		case _PC_MAX_INPUT:
203			*ap->a_retval = MAX_INPUT;
204			return (0);
205		case _PC_PIPE_BUF:
206			*ap->a_retval = PIPE_BUF;
207			return (0);
208		case _PC_CHOWN_RESTRICTED:
209			*ap->a_retval = 1;
210			return (0);
211		case _PC_VDISABLE:
212			*ap->a_retval = _POSIX_VDISABLE;
213			return (0);
214		default:
215			return (EINVAL);
216	}
217	/* NOTREACHED */
218}
219
220/*
221 * Standard lock, unlock and islocked functions.
222 *
223 * These depend on the lock structure being the first element in the
224 * inode, ie: vp->v_data points to the the lock!
225 */
226int
227vop_stdlock(ap)
228	struct vop_lock_args /* {
229		struct vnode *a_vp;
230		int a_flags;
231		struct proc *a_p;
232	} */ *ap;
233{
234	struct lock *l;
235
236	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
237		if (ap->a_flags & LK_INTERLOCK)
238			simple_unlock(&ap->a_vp->v_interlock);
239		return 0;
240	}
241
242#ifndef	DEBUG_LOCKS
243	return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p));
244#else
245	return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p,
246	    "vop_stdlock", ap->a_vp->filename, ap->a_vp->line));
247#endif
248}
249
250int
251vop_stdunlock(ap)
252	struct vop_unlock_args /* {
253		struct vnode *a_vp;
254		int a_flags;
255		struct proc *a_p;
256	} */ *ap;
257{
258	struct lock *l;
259
260	if ((l = (struct lock *)ap->a_vp->v_data) == NULL) {
261		if (ap->a_flags & LK_INTERLOCK)
262			simple_unlock(&ap->a_vp->v_interlock);
263		return 0;
264	}
265
266	return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock,
267	    ap->a_p));
268}
269
270int
271vop_stdislocked(ap)
272	struct vop_islocked_args /* {
273		struct vnode *a_vp;
274		struct proc *a_p;
275	} */ *ap;
276{
277	struct lock *l;
278
279	if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
280		return 0;
281
282	return (lockstatus(l, ap->a_p));
283}
284
285int
286vop_stdinactive(ap)
287	struct vop_inactive_args /* {
288		struct vnode *a_vp;
289		struct proc *a_p;
290	} */ *ap;
291{
292
293	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
294	return (0);
295}
296
297/*
298 * Return true for select/poll.
299 */
300int
301vop_nopoll(ap)
302	struct vop_poll_args /* {
303		struct vnode *a_vp;
304		int  a_events;
305		struct ucred *a_cred;
306		struct proc *a_p;
307	} */ *ap;
308{
309	/*
310	 * Return true for read/write.  If the user asked for something
311	 * special, return POLLNVAL, so that clients have a way of
312	 * determining reliably whether or not the extended
313	 * functionality is present without hard-coding knowledge
314	 * of specific filesystem implementations.
315	 */
316	if (ap->a_events & ~POLLSTANDARD)
317		return (POLLNVAL);
318
319	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
320}
321
322/*
323 * Implement poll for local filesystems that support it.
324 */
325int
326vop_stdpoll(ap)
327	struct vop_poll_args /* {
328		struct vnode *a_vp;
329		int  a_events;
330		struct ucred *a_cred;
331		struct proc *a_p;
332	} */ *ap;
333{
334	if ((ap->a_events & ~POLLSTANDARD) == 0)
335		return (ap->a_events & (POLLRDNORM|POLLWRNORM));
336	return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
337}
338
339int
340vop_stdbwrite(ap)
341	struct vop_bwrite_args *ap;
342{
343	return (bwrite(ap->a_bp));
344}
345
346/*
347 * Stubs to use when there is no locking to be done on the underlying object.
348 * A minimal shared lock is necessary to ensure that the underlying object
349 * is not revoked while an operation is in progress. So, an active shared
350 * count is maintained in an auxillary vnode lock structure.
351 */
352int
353vop_sharedlock(ap)
354	struct vop_lock_args /* {
355		struct vnode *a_vp;
356		int a_flags;
357		struct proc *a_p;
358	} */ *ap;
359{
360	/*
361	 * This code cannot be used until all the non-locking filesystems
362	 * (notably NFS) are converted to properly lock and release nodes.
363	 * Also, certain vnode operations change the locking state within
364	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
365	 * and symlink). Ideally these operations should not change the
366	 * lock state, but should be changed to let the caller of the
367	 * function unlock them. Otherwise all intermediate vnode layers
368	 * (such as union, umapfs, etc) must catch these functions to do
369	 * the necessary locking at their layer. Note that the inactive
370	 * and lookup operations also change their lock state, but this
371	 * cannot be avoided, so these two operations will always need
372	 * to be handled in intermediate layers.
373	 */
374	struct vnode *vp = ap->a_vp;
375	int vnflags, flags = ap->a_flags;
376
377	if (vp->v_vnlock == NULL) {
378		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
379			return (0);
380		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
381		    M_VNODE, M_WAITOK);
382		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
383	}
384	switch (flags & LK_TYPE_MASK) {
385	case LK_DRAIN:
386		vnflags = LK_DRAIN;
387		break;
388	case LK_EXCLUSIVE:
389#ifdef DEBUG_VFS_LOCKS
390		/*
391		 * Normally, we use shared locks here, but that confuses
392		 * the locking assertions.
393		 */
394		vnflags = LK_EXCLUSIVE;
395		break;
396#endif
397	case LK_SHARED:
398		vnflags = LK_SHARED;
399		break;
400	case LK_UPGRADE:
401	case LK_EXCLUPGRADE:
402	case LK_DOWNGRADE:
403		return (0);
404	case LK_RELEASE:
405	default:
406		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
407	}
408	if (flags & LK_INTERLOCK)
409		vnflags |= LK_INTERLOCK;
410#ifndef	DEBUG_LOCKS
411	return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
412#else
413	return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p,
414	    "vop_sharedlock", vp->filename, vp->line));
415#endif
416}
417
418/*
419 * Stubs to use when there is no locking to be done on the underlying object.
420 * A minimal shared lock is necessary to ensure that the underlying object
421 * is not revoked while an operation is in progress. So, an active shared
422 * count is maintained in an auxillary vnode lock structure.
423 */
424int
425vop_nolock(ap)
426	struct vop_lock_args /* {
427		struct vnode *a_vp;
428		int a_flags;
429		struct proc *a_p;
430	} */ *ap;
431{
432#ifdef notyet
433	/*
434	 * This code cannot be used until all the non-locking filesystems
435	 * (notably NFS) are converted to properly lock and release nodes.
436	 * Also, certain vnode operations change the locking state within
437	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
438	 * and symlink). Ideally these operations should not change the
439	 * lock state, but should be changed to let the caller of the
440	 * function unlock them. Otherwise all intermediate vnode layers
441	 * (such as union, umapfs, etc) must catch these functions to do
442	 * the necessary locking at their layer. Note that the inactive
443	 * and lookup operations also change their lock state, but this
444	 * cannot be avoided, so these two operations will always need
445	 * to be handled in intermediate layers.
446	 */
447	struct vnode *vp = ap->a_vp;
448	int vnflags, flags = ap->a_flags;
449
450	if (vp->v_vnlock == NULL) {
451		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
452			return (0);
453		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
454		    M_VNODE, M_WAITOK);
455		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
456	}
457	switch (flags & LK_TYPE_MASK) {
458	case LK_DRAIN:
459		vnflags = LK_DRAIN;
460		break;
461	case LK_EXCLUSIVE:
462	case LK_SHARED:
463		vnflags = LK_SHARED;
464		break;
465	case LK_UPGRADE:
466	case LK_EXCLUPGRADE:
467	case LK_DOWNGRADE:
468		return (0);
469	case LK_RELEASE:
470	default:
471		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
472	}
473	if (flags & LK_INTERLOCK)
474		vnflags |= LK_INTERLOCK;
475	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
476#else /* for now */
477	/*
478	 * Since we are not using the lock manager, we must clear
479	 * the interlock here.
480	 */
481	if (ap->a_flags & LK_INTERLOCK)
482		simple_unlock(&ap->a_vp->v_interlock);
483	return (0);
484#endif
485}
486
487/*
488 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
489 */
490int
491vop_nounlock(ap)
492	struct vop_unlock_args /* {
493		struct vnode *a_vp;
494		int a_flags;
495		struct proc *a_p;
496	} */ *ap;
497{
498	struct vnode *vp = ap->a_vp;
499
500	if (vp->v_vnlock == NULL) {
501		if (ap->a_flags & LK_INTERLOCK)
502			simple_unlock(&ap->a_vp->v_interlock);
503		return (0);
504	}
505	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
506		&ap->a_vp->v_interlock, ap->a_p));
507}
508
509/*
510 * Return whether or not the node is in use.
511 */
512int
513vop_noislocked(ap)
514	struct vop_islocked_args /* {
515		struct vnode *a_vp;
516		struct proc *a_p;
517	} */ *ap;
518{
519	struct vnode *vp = ap->a_vp;
520
521	if (vp->v_vnlock == NULL)
522		return (0);
523	return (lockstatus(vp->v_vnlock, ap->a_p));
524}
525
526/*
527 * Return our mount point, as we will take charge of the writes.
528 */
529int
530vop_stdgetwritemount(ap)
531	struct vop_getwritemount_args /* {
532		struct vnode *a_vp;
533		struct mount **a_mpp;
534	} */ *ap;
535{
536
537	*(ap->a_mpp) = ap->a_vp->v_mount;
538	return (0);
539}
540
541int
542vop_stdcreatevobject(ap)
543	struct vop_createvobject_args /* {
544		struct vnode *vp;
545		struct ucred *cred;
546		struct proc *p;
547	} */ *ap;
548{
549	struct vnode *vp = ap->a_vp;
550	struct ucred *cred = ap->a_cred;
551	struct proc *p = ap->a_p;
552	struct vattr vat;
553	vm_object_t object;
554	int error = 0;
555
556	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
557		return (0);
558
559retry:
560	if ((object = vp->v_object) == NULL) {
561		if (vp->v_type == VREG || vp->v_type == VDIR) {
562			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
563				goto retn;
564			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
565		} else if (devsw(vp->v_rdev) != NULL) {
566			/*
567			 * This simply allocates the biggest object possible
568			 * for a disk vnode.  This should be fixed, but doesn't
569			 * cause any problems (yet).
570			 */
571			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
572		} else {
573			goto retn;
574		}
575		/*
576		 * Dereference the reference we just created.  This assumes
577		 * that the object is associated with the vp.
578		 */
579		object->ref_count--;
580		vp->v_usecount--;
581	} else {
582		if (object->flags & OBJ_DEAD) {
583			VOP_UNLOCK(vp, 0, p);
584			tsleep(object, PVM, "vodead", 0);
585			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
586			goto retry;
587		}
588	}
589
590	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
591	vp->v_flag |= VOBJBUF;
592
593retn:
594	return (error);
595}
596
597int
598vop_stddestroyvobject(ap)
599	struct vop_destroyvobject_args /* {
600		struct vnode *vp;
601	} */ *ap;
602{
603	struct vnode *vp = ap->a_vp;
604	vm_object_t obj = vp->v_object;
605
606	if (vp->v_object == NULL)
607		return (0);
608
609	if (obj->ref_count == 0) {
610		/*
611		 * vclean() may be called twice. The first time
612		 * removes the primary reference to the object,
613		 * the second time goes one further and is a
614		 * special-case to terminate the object.
615		 */
616		vm_object_terminate(obj);
617	} else {
618		/*
619		 * Woe to the process that tries to page now :-).
620		 */
621		vm_pager_deallocate(obj);
622	}
623	return (0);
624}
625
626int
627vop_stdgetvobject(ap)
628	struct vop_getvobject_args /* {
629		struct vnode *vp;
630		struct vm_object **objpp;
631	} */ *ap;
632{
633	struct vnode *vp = ap->a_vp;
634	struct vm_object **objpp = ap->a_objpp;
635
636	if (objpp)
637		*objpp = vp->v_object;
638	return (vp->v_object ? 0 : EINVAL);
639}
640
641/*
642 * vfs default ops
643 * used to fill the vfs fucntion table to get reasonable default return values.
644 */
645int
646vfs_stdmount (mp, path, data, ndp, p)
647	struct mount *mp;
648	char *path;
649	caddr_t data;
650	struct nameidata *ndp;
651	struct proc *p;
652{
653	return (0);
654}
655
656int
657vfs_stdunmount (mp, mntflags, p)
658	struct mount *mp;
659	int mntflags;
660	struct proc *p;
661{
662	return (0);
663}
664
665int
666vfs_stdroot (mp, vpp)
667	struct mount *mp;
668	struct vnode **vpp;
669{
670	return (EOPNOTSUPP);
671}
672
673int
674vfs_stdstatfs (mp, sbp, p)
675	struct mount *mp;
676	struct statfs *sbp;
677	struct proc *p;
678{
679	return (EOPNOTSUPP);
680}
681
682int
683vfs_stdvptofh (vp, fhp)
684	struct vnode *vp;
685	struct fid *fhp;
686{
687	return (EOPNOTSUPP);
688}
689
690int
691vfs_stdstart (mp, flags, p)
692	struct mount *mp;
693	int flags;
694	struct proc *p;
695{
696	return (0);
697}
698
699int
700vfs_stdquotactl (mp, cmds, uid, arg, p)
701	struct mount *mp;
702	int cmds;
703	uid_t uid;
704	caddr_t arg;
705	struct proc *p;
706{
707	return (EOPNOTSUPP);
708}
709
710int
711vfs_stdsync (mp, waitfor, cred, p)
712	struct mount *mp;
713	int waitfor;
714	struct ucred *cred;
715	struct proc *p;
716{
717	return (0);
718}
719
720int
721vfs_stdvget (mp, ino, vpp)
722	struct mount *mp;
723	ino_t ino;
724	struct vnode **vpp;
725{
726	return (EOPNOTSUPP);
727}
728
729int
730vfs_stdfhtovp (mp, fhp, vpp)
731	struct mount *mp;
732	struct fid *fhp;
733	struct vnode **vpp;
734{
735	return (EOPNOTSUPP);
736}
737
738int
739vfs_stdcheckexp (mp, nam, extflagsp, credanonp)
740	struct mount *mp;
741	struct sockaddr *nam;
742	int *extflagsp;
743	struct ucred **credanonp;
744{
745	return (EOPNOTSUPP);
746}
747
748int
749vfs_stdinit (vfsp)
750	struct vfsconf *vfsp;
751{
752	return (0);
753}
754
755int
756vfs_stduninit (vfsp)
757	struct vfsconf *vfsp;
758{
759	return(0);
760}
761
762int
763vfs_stdextattrctl(mp, cmd, attrname, arg, p)
764	struct mount *mp;
765	int cmd;
766	const char *attrname;
767	caddr_t arg;
768	struct proc *p;
769{
770	return(EOPNOTSUPP);
771}
772
773/* end of vfs default ops */
774