vfs_default.c revision 110584
130489Sphk/*
230489Sphk * Copyright (c) 1989, 1993
330489Sphk *	The Regents of the University of California.  All rights reserved.
430489Sphk *
530489Sphk * This code is derived from software contributed
630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project.
730489Sphk *
830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
930489Sphk *
1030489Sphk * Redistribution and use in source and binary forms, with or without
1130489Sphk * modification, are permitted provided that the following conditions
1230489Sphk * are met:
1330489Sphk * 1. Redistributions of source code must retain the above copyright
1430489Sphk *    notice, this list of conditions and the following disclaimer.
1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright
1630489Sphk *    notice, this list of conditions and the following disclaimer in the
1730489Sphk *    documentation and/or other materials provided with the distribution.
1830489Sphk * 3. All advertising materials mentioning features or use of this software
1930489Sphk *    must display the following acknowledgement:
2030489Sphk *	This product includes software developed by the University of
2130489Sphk *	California, Berkeley and its contributors.
2230489Sphk * 4. Neither the name of the University nor the names of its contributors
2330489Sphk *    may be used to endorse or promote products derived from this software
2430489Sphk *    without specific prior written permission.
2530489Sphk *
2630489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2730489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2830489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2930489Sphk * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3030489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3130489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3230489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3330489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3430489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3530489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3630489Sphk * SUCH DAMAGE.
3730489Sphk *
3847989Sgpalmer *
3950477Speter * $FreeBSD: head/sys/kern/vfs_default.c 110584 2003-02-09 11:28:35Z jeff $
4030489Sphk */
4130489Sphk
4230489Sphk#include <sys/param.h>
4330489Sphk#include <sys/systm.h>
4460041Sphk#include <sys/bio.h>
4544272Sbde#include <sys/buf.h>
4665770Sbp#include <sys/conf.h>
4730489Sphk#include <sys/kernel.h>
4831561Sbde#include <sys/lock.h>
4930743Sphk#include <sys/malloc.h>
5051068Salfred#include <sys/mount.h>
5167365Sjhb#include <sys/mutex.h>
5230492Sphk#include <sys/unistd.h>
5330489Sphk#include <sys/vnode.h>
5430743Sphk#include <sys/poll.h>
5530489Sphk
5665770Sbp#include <machine/limits.h>
5765770Sbp
5865770Sbp#include <vm/vm.h>
5965770Sbp#include <vm/vm_object.h>
6065770Sbp#include <vm/vm_extern.h>
6165770Sbp#include <vm/pmap.h>
6265770Sbp#include <vm/vm_map.h>
6365770Sbp#include <vm/vm_page.h>
6465770Sbp#include <vm/vm_pager.h>
6565770Sbp#include <vm/vnode_pager.h>
6665770Sbp
6792723Salfredstatic int	vop_nolookup(struct vop_lookup_args *);
6892723Salfredstatic int	vop_nostrategy(struct vop_strategy_args *);
69108686Sphkstatic int	vop_nospecstrategy(struct vop_specstrategy_args *);
7030489Sphk
7130489Sphk/*
7230489Sphk * This vnode table stores what we want to do if the filesystem doesn't
7330489Sphk * implement a particular VOP.
7430489Sphk *
7530489Sphk * If there is no specific entry here, we will return EOPNOTSUPP.
7630489Sphk *
7730489Sphk */
7830489Sphk
7930489Sphkvop_t **default_vnodeop_p;
8030489Sphkstatic struct vnodeopv_entry_desc default_vnodeop_entries[] = {
8130492Sphk	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
8230492Sphk	{ &vop_advlock_desc,		(vop_t *) vop_einval },
8376131Sphk	{ &vop_bmap_desc,		(vop_t *) vop_stdbmap },
8430492Sphk	{ &vop_close_desc,		(vop_t *) vop_null },
8565770Sbp	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
8665770Sbp	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
8730492Sphk	{ &vop_fsync_desc,		(vop_t *) vop_null },
8876167Sphk	{ &vop_getpages_desc,		(vop_t *) vop_stdgetpages },
8965770Sbp	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
9064819Sphk	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
9130492Sphk	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
92100739Sjeff	{ &vop_islocked_desc,		(vop_t *) vop_stdislocked },
9330739Sphk	{ &vop_lease_desc,		(vop_t *) vop_null },
94100739Sjeff	{ &vop_lock_desc,		(vop_t *) vop_stdlock },
9572594Sbde	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
9630492Sphk	{ &vop_open_desc,		(vop_t *) vop_null },
9730492Sphk	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
98108680Sphk	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
9976167Sphk	{ &vop_putpages_desc,		(vop_t *) vop_stdputpages },
10030492Sphk	{ &vop_readlink_desc,		(vop_t *) vop_einval },
10130489Sphk	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
102108686Sphk	{ &vop_specstrategy_desc,	(vop_t *) vop_nospecstrategy },
10330489Sphk	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
104100739Sjeff	{ &vop_unlock_desc,		(vop_t *) vop_stdunlock },
10530489Sphk	{ NULL, NULL }
10630489Sphk};
10730489Sphk
10830489Sphkstatic struct vnodeopv_desc default_vnodeop_opv_desc =
10930489Sphk        { &default_vnodeop_p, default_vnodeop_entries };
11030489Sphk
11130489SphkVNODEOP_SET(default_vnodeop_opv_desc);
11230489Sphk
11391690Seivind/*
11491690Seivind * Series of placeholder functions for various error returns for
11591690Seivind * VOPs.
11691690Seivind */
11791690Seivind
11830489Sphkint
11930492Sphkvop_eopnotsupp(struct vop_generic_args *ap)
12030489Sphk{
12130489Sphk	/*
12230492Sphk	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
12330489Sphk	*/
12430489Sphk
12530489Sphk	return (EOPNOTSUPP);
12630489Sphk}
12730489Sphk
12830489Sphkint
12930492Sphkvop_ebadf(struct vop_generic_args *ap)
13030489Sphk{
13130489Sphk
13230492Sphk	return (EBADF);
13330492Sphk}
13430492Sphk
13530492Sphkint
13630492Sphkvop_enotty(struct vop_generic_args *ap)
13730492Sphk{
13830492Sphk
13930492Sphk	return (ENOTTY);
14030492Sphk}
14130492Sphk
14230492Sphkint
14330492Sphkvop_einval(struct vop_generic_args *ap)
14430492Sphk{
14530492Sphk
14630492Sphk	return (EINVAL);
14730492Sphk}
14830492Sphk
14930492Sphkint
15030492Sphkvop_null(struct vop_generic_args *ap)
15130492Sphk{
15230492Sphk
15330492Sphk	return (0);
15430492Sphk}
15530492Sphk
15691690Seivind/*
15791690Seivind * Used to make a defined VOP fall back to the default VOP.
15891690Seivind */
15930492Sphkint
16030492Sphkvop_defaultop(struct vop_generic_args *ap)
16130492Sphk{
16230492Sphk
16330489Sphk	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
16430489Sphk}
16530489Sphk
16691690Seivind/*
16791690Seivind * Helper function to panic on some bad VOPs in some filesystems.
16891690Seivind */
16941056Speterint
17041056Spetervop_panic(struct vop_generic_args *ap)
17141056Speter{
17241056Speter
17372594Sbde	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
17441056Speter}
17541056Speter
17691690Seivind/*
17791690Seivind * vop_std<something> and vop_no<something> are default functions for use by
17891690Seivind * filesystems that need the "default reasonable" implementation for a
17991690Seivind * particular operation.
18091690Seivind *
18191690Seivind * The documentation for the operations they implement exists (if it exists)
18291690Seivind * in the VOP_<SOMETHING>(9) manpage (all uppercase).
18391690Seivind */
18491690Seivind
18591690Seivind/*
18691690Seivind * Default vop for filesystems that do not support name lookup
18791690Seivind */
18872594Sbdestatic int
18972594Sbdevop_nolookup(ap)
19072594Sbde	struct vop_lookup_args /* {
19172594Sbde		struct vnode *a_dvp;
19272594Sbde		struct vnode **a_vpp;
19372594Sbde		struct componentname *a_cnp;
19472594Sbde	} */ *ap;
19572594Sbde{
19672594Sbde
19772594Sbde	*ap->a_vpp = NULL;
19872594Sbde	return (ENOTDIR);
19972594Sbde}
20072594Sbde
20146349Salc/*
20246349Salc *	vop_nostrategy:
20346349Salc *
20446349Salc *	Strategy routine for VFS devices that have none.
20546349Salc *
20658934Sphk *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
20758345Sphk *	routine.  Typically this is done for a BIO_READ strategy call.
20858345Sphk *	Typically B_INVAL is assumed to already be clear prior to a write
20958345Sphk *	and should not be cleared manually unless you just made the buffer
21058934Sphk *	invalid.  BIO_ERROR should be cleared either way.
21146349Salc */
21246349Salc
21330489Sphkstatic int
21430489Sphkvop_nostrategy (struct vop_strategy_args *ap)
21530489Sphk{
21630489Sphk	printf("No strategy for buffer at %p\n", ap->a_bp);
21737384Sjulian	vprint("", ap->a_vp);
21830489Sphk	vprint("", ap->a_bp->b_vp);
21958934Sphk	ap->a_bp->b_ioflags |= BIO_ERROR;
22030489Sphk	ap->a_bp->b_error = EOPNOTSUPP;
22159249Sphk	bufdone(ap->a_bp);
22230489Sphk	return (EOPNOTSUPP);
22330489Sphk}
22430492Sphk
22591690Seivind/*
226108686Sphk *	vop_nospecstrategy:
227108686Sphk *
228108686Sphk *	This shouldn't happen.  VOP_SPECSTRATEGY should always have a VCHR
229108686Sphk *	argument vnode, and thos have a method for specstrategy over in
230108686Sphk *	specfs, so we only ever get here if somebody botched it.
231108686Sphk *	Pass the call to VOP_STRATEGY() and get on with life.
232108686Sphk *	The first time we print some info useful for debugging.
233108686Sphk */
234108686Sphk
235108686Sphkstatic int
236108686Sphkvop_nospecstrategy (struct vop_specstrategy_args *ap)
237108686Sphk{
238108686Sphk	static int once;
239108686Sphk
240108686Sphk	if (!once) {
241108686Sphk		vprint("\nVOP_SPECSTRATEGY on non-VCHR\n", ap->a_vp);
242108686Sphk		backtrace();
243108686Sphk		once++;
244108686Sphk	}
245108686Sphk	return VOP_STRATEGY(ap->a_vp, ap->a_bp);
246108686Sphk}
247108686Sphk
248108686Sphk/*
24991690Seivind * vop_stdpathconf:
25091690Seivind *
25191690Seivind * Standard implementation of POSIX pathconf, to get information about limits
25291690Seivind * for a filesystem.
25391690Seivind * Override per filesystem for the case where the filesystem has smaller
25491690Seivind * limits.
25591690Seivind */
25630492Sphkint
25730492Sphkvop_stdpathconf(ap)
25830492Sphk	struct vop_pathconf_args /* {
25930492Sphk	struct vnode *a_vp;
26030492Sphk	int a_name;
26130492Sphk	int *a_retval;
26230492Sphk	} */ *ap;
26330492Sphk{
26430492Sphk
26530492Sphk	switch (ap->a_name) {
26630492Sphk		case _PC_LINK_MAX:
26730492Sphk			*ap->a_retval = LINK_MAX;
26830492Sphk			return (0);
26930492Sphk		case _PC_MAX_CANON:
27030492Sphk			*ap->a_retval = MAX_CANON;
27130492Sphk			return (0);
27230492Sphk		case _PC_MAX_INPUT:
27330492Sphk			*ap->a_retval = MAX_INPUT;
27430492Sphk			return (0);
27530492Sphk		case _PC_PIPE_BUF:
27630492Sphk			*ap->a_retval = PIPE_BUF;
27730492Sphk			return (0);
27830492Sphk		case _PC_CHOWN_RESTRICTED:
27930492Sphk			*ap->a_retval = 1;
28030492Sphk			return (0);
28130492Sphk		case _PC_VDISABLE:
28230492Sphk			*ap->a_retval = _POSIX_VDISABLE;
28330492Sphk			return (0);
28430492Sphk		default:
28530492Sphk			return (EINVAL);
28630492Sphk	}
28730492Sphk	/* NOTREACHED */
28830492Sphk}
28930513Sphk
29030513Sphk/*
29130513Sphk * Standard lock, unlock and islocked functions.
29230513Sphk */
29330513Sphkint
29430513Sphkvop_stdlock(ap)
29530513Sphk	struct vop_lock_args /* {
29630513Sphk		struct vnode *a_vp;
29730513Sphk		int a_flags;
29883366Sjulian		struct thread *a_td;
29930513Sphk	} */ *ap;
30030513Sphk{
30166355Sbp	struct vnode *vp = ap->a_vp;
30230513Sphk
30342900Seivind#ifndef	DEBUG_LOCKS
304105077Smckusick	return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
30542900Seivind#else
306105077Smckusick	return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
30783366Sjulian	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
30842900Seivind#endif
30930513Sphk}
31030513Sphk
31191690Seivind/* See above. */
31230513Sphkint
31330513Sphkvop_stdunlock(ap)
31430513Sphk	struct vop_unlock_args /* {
31530513Sphk		struct vnode *a_vp;
31630513Sphk		int a_flags;
31783366Sjulian		struct thread *a_td;
31830513Sphk	} */ *ap;
31930513Sphk{
32066355Sbp	struct vnode *vp = ap->a_vp;
32130513Sphk
322105077Smckusick	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
32383366Sjulian	    ap->a_td));
32430513Sphk}
32530513Sphk
32691690Seivind/* See above. */
32730513Sphkint
32830513Sphkvop_stdislocked(ap)
32930513Sphk	struct vop_islocked_args /* {
33030513Sphk		struct vnode *a_vp;
33183366Sjulian		struct thread *a_td;
33230513Sphk	} */ *ap;
33330513Sphk{
33430513Sphk
335105077Smckusick	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
33630513Sphk}
33730513Sphk
33891690Seivind/* Mark the vnode inactive */
33964819Sphkint
34064819Sphkvop_stdinactive(ap)
34164819Sphk	struct vop_inactive_args /* {
34264819Sphk		struct vnode *a_vp;
34383366Sjulian		struct thread *a_td;
34464819Sphk	} */ *ap;
34564819Sphk{
34664819Sphk
34783366Sjulian	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
34864819Sphk	return (0);
34964819Sphk}
35064819Sphk
35130743Sphk/*
35230743Sphk * Return true for select/poll.
35330743Sphk */
35430743Sphkint
35530743Sphkvop_nopoll(ap)
35630743Sphk	struct vop_poll_args /* {
35730743Sphk		struct vnode *a_vp;
35830743Sphk		int  a_events;
35930743Sphk		struct ucred *a_cred;
36083366Sjulian		struct thread *a_td;
36130743Sphk	} */ *ap;
36230743Sphk{
36330743Sphk	/*
36431727Swollman	 * Return true for read/write.  If the user asked for something
36531727Swollman	 * special, return POLLNVAL, so that clients have a way of
36631727Swollman	 * determining reliably whether or not the extended
36731727Swollman	 * functionality is present without hard-coding knowledge
36831727Swollman	 * of specific filesystem implementations.
36930743Sphk	 */
37031727Swollman	if (ap->a_events & ~POLLSTANDARD)
37131727Swollman		return (POLLNVAL);
37231727Swollman
37330743Sphk	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
37430743Sphk}
37530743Sphk
37631727Swollman/*
37731727Swollman * Implement poll for local filesystems that support it.
37831727Swollman */
37930743Sphkint
38031727Swollmanvop_stdpoll(ap)
38131727Swollman	struct vop_poll_args /* {
38231727Swollman		struct vnode *a_vp;
38331727Swollman		int  a_events;
38431727Swollman		struct ucred *a_cred;
38583366Sjulian		struct thread *a_td;
38631727Swollman	} */ *ap;
38731727Swollman{
38876578Sjlemon	if (ap->a_events & ~POLLSTANDARD)
38983366Sjulian		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
39076578Sjlemon	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
39131727Swollman}
39231727Swollman
39330743Sphk/*
39430743Sphk * Stubs to use when there is no locking to be done on the underlying object.
39530743Sphk * A minimal shared lock is necessary to ensure that the underlying object
39630743Sphk * is not revoked while an operation is in progress. So, an active shared
39730743Sphk * count is maintained in an auxillary vnode lock structure.
39830743Sphk */
39930743Sphkint
40030743Sphkvop_sharedlock(ap)
40130743Sphk	struct vop_lock_args /* {
40230743Sphk		struct vnode *a_vp;
40330743Sphk		int a_flags;
40483366Sjulian		struct thread *a_td;
40530743Sphk	} */ *ap;
40630743Sphk{
40730743Sphk	/*
40830743Sphk	 * This code cannot be used until all the non-locking filesystems
40930743Sphk	 * (notably NFS) are converted to properly lock and release nodes.
41030743Sphk	 * Also, certain vnode operations change the locking state within
41130743Sphk	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
41230743Sphk	 * and symlink). Ideally these operations should not change the
41330743Sphk	 * lock state, but should be changed to let the caller of the
41430743Sphk	 * function unlock them. Otherwise all intermediate vnode layers
41530743Sphk	 * (such as union, umapfs, etc) must catch these functions to do
41630743Sphk	 * the necessary locking at their layer. Note that the inactive
41730743Sphk	 * and lookup operations also change their lock state, but this
41830743Sphk	 * cannot be avoided, so these two operations will always need
41930743Sphk	 * to be handled in intermediate layers.
42030743Sphk	 */
42130743Sphk	struct vnode *vp = ap->a_vp;
42230743Sphk	int vnflags, flags = ap->a_flags;
42330743Sphk
42430743Sphk	switch (flags & LK_TYPE_MASK) {
42530743Sphk	case LK_DRAIN:
42630743Sphk		vnflags = LK_DRAIN;
42730743Sphk		break;
42830743Sphk	case LK_EXCLUSIVE:
42930743Sphk#ifdef DEBUG_VFS_LOCKS
43030743Sphk		/*
43130743Sphk		 * Normally, we use shared locks here, but that confuses
43230743Sphk		 * the locking assertions.
43330743Sphk		 */
43430743Sphk		vnflags = LK_EXCLUSIVE;
43530743Sphk		break;
43630743Sphk#endif
43730743Sphk	case LK_SHARED:
43830743Sphk		vnflags = LK_SHARED;
43930743Sphk		break;
44030743Sphk	case LK_UPGRADE:
44130743Sphk	case LK_EXCLUPGRADE:
44230743Sphk	case LK_DOWNGRADE:
44330743Sphk		return (0);
44430743Sphk	case LK_RELEASE:
44530743Sphk	default:
44630743Sphk		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
44730743Sphk	}
44830743Sphk	if (flags & LK_INTERLOCK)
44930743Sphk		vnflags |= LK_INTERLOCK;
45042900Seivind#ifndef	DEBUG_LOCKS
451105077Smckusick	return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
45242900Seivind#else
453105077Smckusick	return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td,
45442900Seivind	    "vop_sharedlock", vp->filename, vp->line));
45542900Seivind#endif
45630743Sphk}
45730743Sphk
45830743Sphk/*
45930743Sphk * Stubs to use when there is no locking to be done on the underlying object.
46030743Sphk * A minimal shared lock is necessary to ensure that the underlying object
46130743Sphk * is not revoked while an operation is in progress. So, an active shared
46230743Sphk * count is maintained in an auxillary vnode lock structure.
46330743Sphk */
46430743Sphkint
46530743Sphkvop_nolock(ap)
46630743Sphk	struct vop_lock_args /* {
46730743Sphk		struct vnode *a_vp;
46830743Sphk		int a_flags;
46983366Sjulian		struct thread *a_td;
47030743Sphk	} */ *ap;
47130743Sphk{
47230743Sphk#ifdef notyet
47330743Sphk	/*
47430743Sphk	 * This code cannot be used until all the non-locking filesystems
47530743Sphk	 * (notably NFS) are converted to properly lock and release nodes.
47630743Sphk	 * Also, certain vnode operations change the locking state within
47730743Sphk	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
47830743Sphk	 * and symlink). Ideally these operations should not change the
47930743Sphk	 * lock state, but should be changed to let the caller of the
48030743Sphk	 * function unlock them. Otherwise all intermediate vnode layers
48130743Sphk	 * (such as union, umapfs, etc) must catch these functions to do
48230743Sphk	 * the necessary locking at their layer. Note that the inactive
48330743Sphk	 * and lookup operations also change their lock state, but this
48430743Sphk	 * cannot be avoided, so these two operations will always need
48530743Sphk	 * to be handled in intermediate layers.
48630743Sphk	 */
48730743Sphk	struct vnode *vp = ap->a_vp;
48830743Sphk	int vnflags, flags = ap->a_flags;
48930743Sphk
49030743Sphk	switch (flags & LK_TYPE_MASK) {
49130743Sphk	case LK_DRAIN:
49230743Sphk		vnflags = LK_DRAIN;
49330743Sphk		break;
49430743Sphk	case LK_EXCLUSIVE:
49530743Sphk	case LK_SHARED:
49630743Sphk		vnflags = LK_SHARED;
49730743Sphk		break;
49830743Sphk	case LK_UPGRADE:
49930743Sphk	case LK_EXCLUPGRADE:
50030743Sphk	case LK_DOWNGRADE:
50130743Sphk		return (0);
50230743Sphk	case LK_RELEASE:
50330743Sphk	default:
50430743Sphk		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
50530743Sphk	}
50630743Sphk	if (flags & LK_INTERLOCK)
50730743Sphk		vnflags |= LK_INTERLOCK;
508105077Smckusick	return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
50930743Sphk#else /* for now */
51030743Sphk	/*
51130743Sphk	 * Since we are not using the lock manager, we must clear
51230743Sphk	 * the interlock here.
51330743Sphk	 */
51431263Sbde	if (ap->a_flags & LK_INTERLOCK)
515103927Sjeff		VI_UNLOCK(ap->a_vp);
51630743Sphk	return (0);
51730743Sphk#endif
51830743Sphk}
51930743Sphk
52030743Sphk/*
52130743Sphk * Do the inverse of vop_nolock, handling the interlock in a compatible way.
52230743Sphk */
52330743Sphkint
52430743Sphkvop_nounlock(ap)
52530743Sphk	struct vop_unlock_args /* {
52630743Sphk		struct vnode *a_vp;
52730743Sphk		int a_flags;
52883366Sjulian		struct thread *a_td;
52930743Sphk	} */ *ap;
53030743Sphk{
53130743Sphk
53266355Sbp	/*
53366355Sbp	 * Since we are not using the lock manager, we must clear
53466355Sbp	 * the interlock here.
53566355Sbp	 */
53666355Sbp	if (ap->a_flags & LK_INTERLOCK)
537103927Sjeff		VI_UNLOCK(ap->a_vp);
53866355Sbp	return (0);
53930743Sphk}
54030743Sphk
54130743Sphk/*
54230743Sphk * Return whether or not the node is in use.
54330743Sphk */
54430743Sphkint
54530743Sphkvop_noislocked(ap)
54630743Sphk	struct vop_islocked_args /* {
54730743Sphk		struct vnode *a_vp;
54883366Sjulian		struct thread *a_td;
54930743Sphk	} */ *ap;
55030743Sphk{
55130743Sphk
55266355Sbp	return (0);
55330743Sphk}
55430743Sphk
55562976Smckusick/*
55662976Smckusick * Return our mount point, as we will take charge of the writes.
55762976Smckusick */
55862976Smckusickint
55962976Smckusickvop_stdgetwritemount(ap)
56062976Smckusick	struct vop_getwritemount_args /* {
56162976Smckusick		struct vnode *a_vp;
56262976Smckusick		struct mount **a_mpp;
56362976Smckusick	} */ *ap;
56462976Smckusick{
56562976Smckusick
56662976Smckusick	*(ap->a_mpp) = ap->a_vp->v_mount;
56762976Smckusick	return (0);
56862976Smckusick}
56962976Smckusick
57091690Seivind/* Create the VM system backing object for this vnode */
57165770Sbpint
57265770Sbpvop_stdcreatevobject(ap)
57365770Sbp	struct vop_createvobject_args /* {
57465770Sbp		struct vnode *vp;
57565770Sbp		struct ucred *cred;
57683366Sjulian		struct thread *td;
57765770Sbp	} */ *ap;
57865770Sbp{
57965770Sbp	struct vnode *vp = ap->a_vp;
58065770Sbp	struct ucred *cred = ap->a_cred;
58183366Sjulian	struct thread *td = ap->a_td;
58265770Sbp	struct vattr vat;
58365770Sbp	vm_object_t object;
58465770Sbp	int error = 0;
58565770Sbp
58679224Sdillon	GIANT_REQUIRED;
58779224Sdillon
58865770Sbp	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
58965770Sbp		return (0);
59065770Sbp
59165770Sbpretry:
59265770Sbp	if ((object = vp->v_object) == NULL) {
59365770Sbp		if (vp->v_type == VREG || vp->v_type == VDIR) {
59483366Sjulian			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
59565770Sbp				goto retn;
59665770Sbp			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
59765770Sbp		} else if (devsw(vp->v_rdev) != NULL) {
59865770Sbp			/*
59965770Sbp			 * This simply allocates the biggest object possible
60065770Sbp			 * for a disk vnode.  This should be fixed, but doesn't
60165770Sbp			 * cause any problems (yet).
60265770Sbp			 */
60365770Sbp			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
60465770Sbp		} else {
60565770Sbp			goto retn;
60665770Sbp		}
60765770Sbp		/*
60865770Sbp		 * Dereference the reference we just created.  This assumes
60965770Sbp		 * that the object is associated with the vp.
61065770Sbp		 */
61165770Sbp		object->ref_count--;
612105884Sphk		vrele(vp);
61365770Sbp	} else {
61465770Sbp		if (object->flags & OBJ_DEAD) {
61583366Sjulian			VOP_UNLOCK(vp, 0, td);
61679224Sdillon			tsleep(object, PVM, "vodead", 0);
61783366Sjulian			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
61865770Sbp			goto retry;
61965770Sbp		}
62065770Sbp	}
62165770Sbp
62265770Sbp	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
623101308Sjeff	vp->v_vflag |= VV_OBJBUF;
62465770Sbp
62565770Sbpretn:
62665770Sbp	return (error);
62765770Sbp}
62865770Sbp
62991690Seivind/* Destroy the VM system object associated with this vnode */
63065770Sbpint
63165770Sbpvop_stddestroyvobject(ap)
63265770Sbp	struct vop_destroyvobject_args /* {
63365770Sbp		struct vnode *vp;
63465770Sbp	} */ *ap;
63565770Sbp{
63665770Sbp	struct vnode *vp = ap->a_vp;
63765770Sbp	vm_object_t obj = vp->v_object;
63865770Sbp
63979224Sdillon	GIANT_REQUIRED;
64079224Sdillon
64165770Sbp	if (vp->v_object == NULL)
64265770Sbp		return (0);
64365770Sbp
64465770Sbp	if (obj->ref_count == 0) {
64565770Sbp		/*
64665770Sbp		 * vclean() may be called twice. The first time
64765770Sbp		 * removes the primary reference to the object,
64865770Sbp		 * the second time goes one further and is a
64965770Sbp		 * special-case to terminate the object.
65085340Sdillon		 *
65185340Sdillon		 * don't double-terminate the object
65265770Sbp		 */
65385340Sdillon		if ((obj->flags & OBJ_DEAD) == 0)
65485340Sdillon			vm_object_terminate(obj);
65565770Sbp	} else {
65665770Sbp		/*
65765770Sbp		 * Woe to the process that tries to page now :-).
65865770Sbp		 */
65965770Sbp		vm_pager_deallocate(obj);
66065770Sbp	}
66165770Sbp	return (0);
66265770Sbp}
66365770Sbp
66485340Sdillon/*
66585340Sdillon * Return the underlying VM object.  This routine may be called with or
66685340Sdillon * without the vnode interlock held.  If called without, the returned
66785340Sdillon * object is not guarenteed to be valid.  The syncer typically gets the
66885340Sdillon * object without holding the interlock in order to quickly test whether
66985340Sdillon * it might be dirty before going heavy-weight.  vm_object's use zalloc
67085340Sdillon * and thus stable-storage, so this is safe.
67185340Sdillon */
67265770Sbpint
67365770Sbpvop_stdgetvobject(ap)
67465770Sbp	struct vop_getvobject_args /* {
67565770Sbp		struct vnode *vp;
67665770Sbp		struct vm_object **objpp;
67765770Sbp	} */ *ap;
67865770Sbp{
67965770Sbp	struct vnode *vp = ap->a_vp;
68065770Sbp	struct vm_object **objpp = ap->a_objpp;
68165770Sbp
68265770Sbp	if (objpp)
68365770Sbp		*objpp = vp->v_object;
68465770Sbp	return (vp->v_object ? 0 : EINVAL);
68565770Sbp}
68665770Sbp
68791690Seivind/* XXX Needs good comment and VOP_BMAP(9) manpage */
68876131Sphkint
68976131Sphkvop_stdbmap(ap)
69076131Sphk	struct vop_bmap_args /* {
69176131Sphk		struct vnode *a_vp;
69276131Sphk		daddr_t  a_bn;
69376131Sphk		struct vnode **a_vpp;
69476131Sphk		daddr_t *a_bnp;
69576131Sphk		int *a_runp;
69676131Sphk		int *a_runb;
69776131Sphk	} */ *ap;
69876131Sphk{
69976131Sphk
70076131Sphk	if (ap->a_vpp != NULL)
70176131Sphk		*ap->a_vpp = ap->a_vp;
70276131Sphk	if (ap->a_bnp != NULL)
70376131Sphk		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
70476131Sphk	if (ap->a_runp != NULL)
70576131Sphk		*ap->a_runp = 0;
70676131Sphk	if (ap->a_runb != NULL)
70776131Sphk		*ap->a_runb = 0;
70876131Sphk	return (0);
70976131Sphk}
71076131Sphk
711110584Sjeffint
712110584Sjeffvop_stdfsync(ap)
713110584Sjeff	struct vop_fsync_args /* {
714110584Sjeff		struct vnode *a_vp;
715110584Sjeff		struct ucred *a_cred;
716110584Sjeff		int a_waitfor;
717110584Sjeff		struct thread *a_td;
718110584Sjeff	} */ *ap;
719110584Sjeff{
720110584Sjeff	struct vnode *vp = ap->a_vp;
721110584Sjeff	struct buf *bp;
722110584Sjeff	struct buf *nbp;
723110584Sjeff	int s, error = 0;
724110584Sjeff	int maxretry = 100;     /* large, arbitrarily chosen */
725110584Sjeff
726110584Sjeff	VI_LOCK(vp);
727110584Sjeffloop1:
728110584Sjeff	/*
729110584Sjeff	 * MARK/SCAN initialization to avoid infinite loops.
730110584Sjeff	 */
731110584Sjeff	s = splbio();
732110584Sjeff        TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
733110584Sjeff                bp->b_vflags &= ~BV_SCANNED;
734110584Sjeff		bp->b_error = 0;
735110584Sjeff	}
736110584Sjeff	splx(s);
737110584Sjeff
738110584Sjeff	/*
739110584Sjeff	 * Flush all dirty buffers associated with a block device.
740110584Sjeff	 */
741110584Sjeffloop2:
742110584Sjeff	s = splbio();
743110584Sjeff	for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
744110584Sjeff		nbp = TAILQ_NEXT(bp, b_vnbufs);
745110584Sjeff		if ((bp->b_vflags & BV_SCANNED) != 0)
746110584Sjeff			continue;
747110584Sjeff		bp->b_vflags |= BV_SCANNED;
748110584Sjeff		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT))
749110584Sjeff			continue;
750110584Sjeff		VI_UNLOCK(vp);
751110584Sjeff		if ((bp->b_flags & B_DELWRI) == 0)
752110584Sjeff			panic("spec_fsync: not dirty");
753110584Sjeff		if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
754110584Sjeff			BUF_UNLOCK(bp);
755110584Sjeff			vfs_bio_awrite(bp);
756110584Sjeff			splx(s);
757110584Sjeff		} else {
758110584Sjeff			bremfree(bp);
759110584Sjeff			splx(s);
760110584Sjeff			bawrite(bp);
761110584Sjeff		}
762110584Sjeff		VI_LOCK(vp);
763110584Sjeff		goto loop2;
764110584Sjeff	}
765110584Sjeff
766110584Sjeff	/*
767110584Sjeff	 * If synchronous the caller expects us to completely resolve all
768110584Sjeff	 * dirty buffers in the system.  Wait for in-progress I/O to
769110584Sjeff	 * complete (which could include background bitmap writes), then
770110584Sjeff	 * retry if dirty blocks still exist.
771110584Sjeff	 */
772110584Sjeff	if (ap->a_waitfor == MNT_WAIT) {
773110584Sjeff		while (vp->v_numoutput) {
774110584Sjeff			vp->v_iflag |= VI_BWAIT;
775110584Sjeff			msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp),
776110584Sjeff			    PRIBIO + 1, "spfsyn", 0);
777110584Sjeff		}
778110584Sjeff		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
779110584Sjeff			/*
780110584Sjeff			 * If we are unable to write any of these buffers
781110584Sjeff			 * then we fail now rather than trying endlessly
782110584Sjeff			 * to write them out.
783110584Sjeff			 */
784110584Sjeff			TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs)
785110584Sjeff				if ((error = bp->b_error) == 0)
786110584Sjeff					continue;
787110584Sjeff			if (error == 0 && --maxretry >= 0) {
788110584Sjeff				splx(s);
789110584Sjeff				goto loop1;
790110584Sjeff			}
791110584Sjeff			vprint("fsync: giving up on dirty", vp);
792110584Sjeff			error = EAGAIN;
793110584Sjeff		}
794110584Sjeff	}
795110584Sjeff	VI_UNLOCK(vp);
796110584Sjeff	splx(s);
797110584Sjeff
798110584Sjeff	return (error);
799110584Sjeff}
80091690Seivind/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
80176167Sphkint
80276167Sphkvop_stdgetpages(ap)
80376167Sphk	struct vop_getpages_args /* {
80476167Sphk		struct vnode *a_vp;
80576167Sphk		vm_page_t *a_m;
80676167Sphk		int a_count;
80776167Sphk		int a_reqpage;
80876167Sphk		vm_ooffset_t a_offset;
80976167Sphk	} */ *ap;
81076167Sphk{
81176131Sphk
81276167Sphk	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
81376167Sphk	    ap->a_count, ap->a_reqpage);
81476167Sphk}
81576167Sphk
81691690Seivind/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
81776319Sphkint
81876167Sphkvop_stdputpages(ap)
81976167Sphk	struct vop_putpages_args /* {
82076167Sphk		struct vnode *a_vp;
82176167Sphk		vm_page_t *a_m;
82276167Sphk		int a_count;
82376167Sphk		int a_sync;
82476167Sphk		int *a_rtvals;
82576167Sphk		vm_ooffset_t a_offset;
82676167Sphk	} */ *ap;
82776167Sphk{
82876167Sphk
82976319Sphk	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
83076167Sphk	     ap->a_sync, ap->a_rtvals);
83176167Sphk}
83276167Sphk
83376167Sphk
83476167Sphk
83551068Salfred/*
83651068Salfred * vfs default ops
83791690Seivind * used to fill the vfs function table to get reasonable default return values.
83851068Salfred */
83991690Seivindint
84051068Salfredvfs_stdroot (mp, vpp)
84151068Salfred	struct mount *mp;
84251068Salfred	struct vnode **vpp;
84351068Salfred{
84451068Salfred	return (EOPNOTSUPP);
84551068Salfred}
84651068Salfred
84791690Seivindint
84883366Sjulianvfs_stdstatfs (mp, sbp, td)
84951068Salfred	struct mount *mp;
85051068Salfred	struct statfs *sbp;
85183366Sjulian	struct thread *td;
85251068Salfred{
85351068Salfred	return (EOPNOTSUPP);
85451068Salfred}
85551068Salfred
85651068Salfredint
85751068Salfredvfs_stdvptofh (vp, fhp)
85851068Salfred	struct vnode *vp;
85951068Salfred	struct fid *fhp;
86051068Salfred{
86151068Salfred	return (EOPNOTSUPP);
86251068Salfred}
86351068Salfred
86491690Seivindint
86583366Sjulianvfs_stdstart (mp, flags, td)
86651068Salfred	struct mount *mp;
86751068Salfred	int flags;
86883366Sjulian	struct thread *td;
86951068Salfred{
87051068Salfred	return (0);
87151068Salfred}
87251068Salfred
87351068Salfredint
87483366Sjulianvfs_stdquotactl (mp, cmds, uid, arg, td)
87551068Salfred	struct mount *mp;
87651068Salfred	int cmds;
87751068Salfred	uid_t uid;
87851068Salfred	caddr_t arg;
87983366Sjulian	struct thread *td;
88051068Salfred{
88151068Salfred	return (EOPNOTSUPP);
88251068Salfred}
88351068Salfred
88451068Salfredint
88583366Sjulianvfs_stdsync (mp, waitfor, cred, td)
88651068Salfred	struct mount *mp;
88751068Salfred	int waitfor;
88851068Salfred	struct ucred *cred;
88983366Sjulian	struct thread *td;
89051068Salfred{
89151068Salfred	return (0);
89251068Salfred}
89351068Salfred
89451068Salfredint
89592462Smckusickvfs_stdvget (mp, ino, flags, vpp)
89651068Salfred	struct mount *mp;
89751068Salfred	ino_t ino;
89892462Smckusick	int flags;
89951068Salfred	struct vnode **vpp;
90051068Salfred{
90151068Salfred	return (EOPNOTSUPP);
90251068Salfred}
90351068Salfred
90451068Salfredint
90551138Salfredvfs_stdfhtovp (mp, fhp, vpp)
90651068Salfred	struct mount *mp;
90751068Salfred	struct fid *fhp;
90851138Salfred	struct vnode **vpp;
90951138Salfred{
91051138Salfred	return (EOPNOTSUPP);
91151138Salfred}
91251138Salfred
91351068Salfredint
91451068Salfredvfs_stdinit (vfsp)
91551068Salfred	struct vfsconf *vfsp;
91651068Salfred{
91751068Salfred	return (0);
91851068Salfred}
91951068Salfred
92051068Salfredint
92151068Salfredvfs_stduninit (vfsp)
92251068Salfred	struct vfsconf *vfsp;
92351068Salfred{
92451068Salfred	return(0);
92551068Salfred}
92651068Salfred
92754803Srwatsonint
92883366Sjulianvfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
92954803Srwatson	struct mount *mp;
93054803Srwatson	int cmd;
93174273Srwatson	struct vnode *filename_vp;
93274437Srwatson	int attrnamespace;
93356272Srwatson	const char *attrname;
93483366Sjulian	struct thread *td;
93554803Srwatson{
936101786Sphk	if (filename_vp != NULL)
937101786Sphk		VOP_UNLOCK(filename_vp, 0, td);
93854803Srwatson	return(EOPNOTSUPP);
93954803Srwatson}
94054803Srwatson
94151068Salfred/* end of vfs default ops */
942