vfs_default.c revision 83366
130489Sphk/*
230489Sphk * Copyright (c) 1989, 1993
330489Sphk *	The Regents of the University of California.  All rights reserved.
430489Sphk *
530489Sphk * This code is derived from software contributed
630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project.
730489Sphk *
830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
930489Sphk *
1030489Sphk * Redistribution and use in source and binary forms, with or without
1130489Sphk * modification, are permitted provided that the following conditions
1230489Sphk * are met:
1330489Sphk * 1. Redistributions of source code must retain the above copyright
1430489Sphk *    notice, this list of conditions and the following disclaimer.
1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright
1630489Sphk *    notice, this list of conditions and the following disclaimer in the
1730489Sphk *    documentation and/or other materials provided with the distribution.
1830489Sphk * 3. All advertising materials mentioning features or use of this software
1930489Sphk *    must display the following acknowledgement:
2030489Sphk *	This product includes software developed by the University of
2130489Sphk *	California, Berkeley and its contributors.
2230489Sphk * 4. Neither the name of the University nor the names of its contributors
2330489Sphk *    may be used to endorse or promote products derived from this software
2430489Sphk *    without specific prior written permission.
2530489Sphk *
2630489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2730489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2830489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2930489Sphk * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3030489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3130489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3230489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3330489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3430489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3530489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3630489Sphk * SUCH DAMAGE.
3730489Sphk *
3847989Sgpalmer *
3950477Speter * $FreeBSD: head/sys/kern/vfs_default.c 83366 2001-09-12 08:38:13Z julian $
4030489Sphk */
4130489Sphk
4230489Sphk#include <sys/param.h>
4330489Sphk#include <sys/systm.h>
4460041Sphk#include <sys/bio.h>
4544272Sbde#include <sys/buf.h>
4665770Sbp#include <sys/conf.h>
4730489Sphk#include <sys/kernel.h>
4831561Sbde#include <sys/lock.h>
4930743Sphk#include <sys/malloc.h>
5051068Salfred#include <sys/mount.h>
5167365Sjhb#include <sys/mutex.h>
5230492Sphk#include <sys/unistd.h>
5330489Sphk#include <sys/vnode.h>
5430743Sphk#include <sys/poll.h>
5530489Sphk
5665770Sbp#include <machine/limits.h>
5765770Sbp
5865770Sbp#include <vm/vm.h>
5965770Sbp#include <vm/vm_object.h>
6065770Sbp#include <vm/vm_extern.h>
6165770Sbp#include <vm/pmap.h>
6265770Sbp#include <vm/vm_map.h>
6365770Sbp#include <vm/vm_page.h>
6465770Sbp#include <vm/vm_pager.h>
6565770Sbp#include <vm/vnode_pager.h>
6665770Sbp#include <vm/vm_zone.h>
6765770Sbp
6872594Sbdestatic int	vop_nolookup __P((struct vop_lookup_args *));
6972594Sbdestatic int	vop_nostrategy __P((struct vop_strategy_args *));
7030489Sphk
7130489Sphk/*
7230489Sphk * This vnode table stores what we want to do if the filesystem doesn't
7330489Sphk * implement a particular VOP.
7430489Sphk *
7530489Sphk * If there is no specific entry here, we will return EOPNOTSUPP.
7630489Sphk *
7730489Sphk */
7830489Sphk
7930489Sphkvop_t **default_vnodeop_p;
8030489Sphkstatic struct vnodeopv_entry_desc default_vnodeop_entries[] = {
8130492Sphk	{ &vop_default_desc,		(vop_t *) vop_eopnotsupp },
8230492Sphk	{ &vop_advlock_desc,		(vop_t *) vop_einval },
8376131Sphk	{ &vop_bmap_desc,		(vop_t *) vop_stdbmap },
8430492Sphk	{ &vop_close_desc,		(vop_t *) vop_null },
8565770Sbp	{ &vop_createvobject_desc,	(vop_t *) vop_stdcreatevobject },
8665770Sbp	{ &vop_destroyvobject_desc,	(vop_t *) vop_stddestroyvobject },
8730492Sphk	{ &vop_fsync_desc,		(vop_t *) vop_null },
8876167Sphk	{ &vop_getpages_desc,		(vop_t *) vop_stdgetpages },
8965770Sbp	{ &vop_getvobject_desc,		(vop_t *) vop_stdgetvobject },
9064819Sphk	{ &vop_inactive_desc,		(vop_t *) vop_stdinactive },
9130492Sphk	{ &vop_ioctl_desc,		(vop_t *) vop_enotty },
9230496Sphk	{ &vop_islocked_desc,		(vop_t *) vop_noislocked },
9330739Sphk	{ &vop_lease_desc,		(vop_t *) vop_null },
9430496Sphk	{ &vop_lock_desc,		(vop_t *) vop_nolock },
9572594Sbde	{ &vop_lookup_desc,		(vop_t *) vop_nolookup },
9630492Sphk	{ &vop_open_desc,		(vop_t *) vop_null },
9730492Sphk	{ &vop_pathconf_desc,		(vop_t *) vop_einval },
9876167Sphk	{ &vop_putpages_desc,		(vop_t *) vop_stdputpages },
9930489Sphk	{ &vop_poll_desc,		(vop_t *) vop_nopoll },
10030492Sphk	{ &vop_readlink_desc,		(vop_t *) vop_einval },
10130489Sphk	{ &vop_revoke_desc,		(vop_t *) vop_revoke },
10230489Sphk	{ &vop_strategy_desc,		(vop_t *) vop_nostrategy },
10330496Sphk	{ &vop_unlock_desc,		(vop_t *) vop_nounlock },
10430489Sphk	{ NULL, NULL }
10530489Sphk};
10630489Sphk
10730489Sphkstatic struct vnodeopv_desc default_vnodeop_opv_desc =
10830489Sphk        { &default_vnodeop_p, default_vnodeop_entries };
10930489Sphk
11030489SphkVNODEOP_SET(default_vnodeop_opv_desc);
11130489Sphk
11230489Sphkint
11330492Sphkvop_eopnotsupp(struct vop_generic_args *ap)
11430489Sphk{
11530489Sphk	/*
11630492Sphk	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
11730489Sphk	*/
11830489Sphk
11930489Sphk	return (EOPNOTSUPP);
12030489Sphk}
12130489Sphk
12230489Sphkint
12330492Sphkvop_ebadf(struct vop_generic_args *ap)
12430489Sphk{
12530489Sphk
12630492Sphk	return (EBADF);
12730492Sphk}
12830492Sphk
12930492Sphkint
13030492Sphkvop_enotty(struct vop_generic_args *ap)
13130492Sphk{
13230492Sphk
13330492Sphk	return (ENOTTY);
13430492Sphk}
13530492Sphk
13630492Sphkint
13730492Sphkvop_einval(struct vop_generic_args *ap)
13830492Sphk{
13930492Sphk
14030492Sphk	return (EINVAL);
14130492Sphk}
14230492Sphk
14330492Sphkint
14430492Sphkvop_null(struct vop_generic_args *ap)
14530492Sphk{
14630492Sphk
14730492Sphk	return (0);
14830492Sphk}
14930492Sphk
15030492Sphkint
15130492Sphkvop_defaultop(struct vop_generic_args *ap)
15230492Sphk{
15330492Sphk
15430489Sphk	return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
15530489Sphk}
15630489Sphk
15741056Speterint
15841056Spetervop_panic(struct vop_generic_args *ap)
15941056Speter{
16041056Speter
16172594Sbde	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
16241056Speter}
16341056Speter
16472594Sbdestatic int
16572594Sbdevop_nolookup(ap)
16672594Sbde	struct vop_lookup_args /* {
16772594Sbde		struct vnode *a_dvp;
16872594Sbde		struct vnode **a_vpp;
16972594Sbde		struct componentname *a_cnp;
17072594Sbde	} */ *ap;
17172594Sbde{
17272594Sbde
17372594Sbde	*ap->a_vpp = NULL;
17472594Sbde	return (ENOTDIR);
17572594Sbde}
17672594Sbde
17746349Salc/*
17846349Salc *	vop_nostrategy:
17946349Salc *
18046349Salc *	Strategy routine for VFS devices that have none.
18146349Salc *
18258934Sphk *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
18358345Sphk *	routine.  Typically this is done for a BIO_READ strategy call.
18458345Sphk *	Typically B_INVAL is assumed to already be clear prior to a write
18558345Sphk *	and should not be cleared manually unless you just made the buffer
18658934Sphk *	invalid.  BIO_ERROR should be cleared either way.
18746349Salc */
18846349Salc
18930489Sphkstatic int
19030489Sphkvop_nostrategy (struct vop_strategy_args *ap)
19130489Sphk{
19230489Sphk	printf("No strategy for buffer at %p\n", ap->a_bp);
19337384Sjulian	vprint("", ap->a_vp);
19430489Sphk	vprint("", ap->a_bp->b_vp);
19558934Sphk	ap->a_bp->b_ioflags |= BIO_ERROR;
19630489Sphk	ap->a_bp->b_error = EOPNOTSUPP;
19759249Sphk	bufdone(ap->a_bp);
19830489Sphk	return (EOPNOTSUPP);
19930489Sphk}
20030492Sphk
20130492Sphkint
20230492Sphkvop_stdpathconf(ap)
20330492Sphk	struct vop_pathconf_args /* {
20430492Sphk	struct vnode *a_vp;
20530492Sphk	int a_name;
20630492Sphk	int *a_retval;
20730492Sphk	} */ *ap;
20830492Sphk{
20930492Sphk
21030492Sphk	switch (ap->a_name) {
21130492Sphk		case _PC_LINK_MAX:
21230492Sphk			*ap->a_retval = LINK_MAX;
21330492Sphk			return (0);
21430492Sphk		case _PC_MAX_CANON:
21530492Sphk			*ap->a_retval = MAX_CANON;
21630492Sphk			return (0);
21730492Sphk		case _PC_MAX_INPUT:
21830492Sphk			*ap->a_retval = MAX_INPUT;
21930492Sphk			return (0);
22030492Sphk		case _PC_PIPE_BUF:
22130492Sphk			*ap->a_retval = PIPE_BUF;
22230492Sphk			return (0);
22330492Sphk		case _PC_CHOWN_RESTRICTED:
22430492Sphk			*ap->a_retval = 1;
22530492Sphk			return (0);
22630492Sphk		case _PC_VDISABLE:
22730492Sphk			*ap->a_retval = _POSIX_VDISABLE;
22830492Sphk			return (0);
22930492Sphk		default:
23030492Sphk			return (EINVAL);
23130492Sphk	}
23230492Sphk	/* NOTREACHED */
23330492Sphk}
23430513Sphk
23530513Sphk/*
23630513Sphk * Standard lock, unlock and islocked functions.
23730513Sphk *
23830513Sphk * These depend on the lock structure being the first element in the
23930513Sphk * inode, ie: vp->v_data points to the the lock!
24030513Sphk */
24130513Sphkint
24230513Sphkvop_stdlock(ap)
24330513Sphk	struct vop_lock_args /* {
24430513Sphk		struct vnode *a_vp;
24530513Sphk		int a_flags;
24683366Sjulian		struct thread *a_td;
24730513Sphk	} */ *ap;
24830513Sphk{
24966355Sbp	struct vnode *vp = ap->a_vp;
25030513Sphk
25142900Seivind#ifndef	DEBUG_LOCKS
25283366Sjulian	return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, ap->a_td));
25342900Seivind#else
25466355Sbp	return (debuglockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock,
25583366Sjulian	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
25642900Seivind#endif
25730513Sphk}
25830513Sphk
25930513Sphkint
26030513Sphkvop_stdunlock(ap)
26130513Sphk	struct vop_unlock_args /* {
26230513Sphk		struct vnode *a_vp;
26330513Sphk		int a_flags;
26483366Sjulian		struct thread *a_td;
26530513Sphk	} */ *ap;
26630513Sphk{
26766355Sbp	struct vnode *vp = ap->a_vp;
26830513Sphk
26966355Sbp	return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock,
27083366Sjulian	    ap->a_td));
27130513Sphk}
27230513Sphk
27330513Sphkint
27430513Sphkvop_stdislocked(ap)
27530513Sphk	struct vop_islocked_args /* {
27630513Sphk		struct vnode *a_vp;
27783366Sjulian		struct thread *a_td;
27830513Sphk	} */ *ap;
27930513Sphk{
28030513Sphk
28183366Sjulian	return (lockstatus(&ap->a_vp->v_lock, ap->a_td));
28230513Sphk}
28330513Sphk
28464819Sphkint
28564819Sphkvop_stdinactive(ap)
28664819Sphk	struct vop_inactive_args /* {
28764819Sphk		struct vnode *a_vp;
28883366Sjulian		struct thread *a_td;
28964819Sphk	} */ *ap;
29064819Sphk{
29164819Sphk
29283366Sjulian	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
29364819Sphk	return (0);
29464819Sphk}
29564819Sphk
29630743Sphk/*
29730743Sphk * Return true for select/poll.
29830743Sphk */
29930743Sphkint
30030743Sphkvop_nopoll(ap)
30130743Sphk	struct vop_poll_args /* {
30230743Sphk		struct vnode *a_vp;
30330743Sphk		int  a_events;
30430743Sphk		struct ucred *a_cred;
30583366Sjulian		struct thread *a_td;
30630743Sphk	} */ *ap;
30730743Sphk{
30830743Sphk	/*
30931727Swollman	 * Return true for read/write.  If the user asked for something
31031727Swollman	 * special, return POLLNVAL, so that clients have a way of
31131727Swollman	 * determining reliably whether or not the extended
31231727Swollman	 * functionality is present without hard-coding knowledge
31331727Swollman	 * of specific filesystem implementations.
31430743Sphk	 */
31531727Swollman	if (ap->a_events & ~POLLSTANDARD)
31631727Swollman		return (POLLNVAL);
31731727Swollman
31830743Sphk	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
31930743Sphk}
32030743Sphk
32131727Swollman/*
32231727Swollman * Implement poll for local filesystems that support it.
32331727Swollman */
32430743Sphkint
32531727Swollmanvop_stdpoll(ap)
32631727Swollman	struct vop_poll_args /* {
32731727Swollman		struct vnode *a_vp;
32831727Swollman		int  a_events;
32931727Swollman		struct ucred *a_cred;
33083366Sjulian		struct thread *a_td;
33131727Swollman	} */ *ap;
33231727Swollman{
33376578Sjlemon	if (ap->a_events & ~POLLSTANDARD)
33483366Sjulian		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
33576578Sjlemon	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
33631727Swollman}
33731727Swollman
33830743Sphk/*
33930743Sphk * Stubs to use when there is no locking to be done on the underlying object.
34030743Sphk * A minimal shared lock is necessary to ensure that the underlying object
34130743Sphk * is not revoked while an operation is in progress. So, an active shared
34230743Sphk * count is maintained in an auxillary vnode lock structure.
34330743Sphk */
34430743Sphkint
34530743Sphkvop_sharedlock(ap)
34630743Sphk	struct vop_lock_args /* {
34730743Sphk		struct vnode *a_vp;
34830743Sphk		int a_flags;
34983366Sjulian		struct thread *a_td;
35030743Sphk	} */ *ap;
35130743Sphk{
35230743Sphk	/*
35330743Sphk	 * This code cannot be used until all the non-locking filesystems
35430743Sphk	 * (notably NFS) are converted to properly lock and release nodes.
35530743Sphk	 * Also, certain vnode operations change the locking state within
35630743Sphk	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
35730743Sphk	 * and symlink). Ideally these operations should not change the
35830743Sphk	 * lock state, but should be changed to let the caller of the
35930743Sphk	 * function unlock them. Otherwise all intermediate vnode layers
36030743Sphk	 * (such as union, umapfs, etc) must catch these functions to do
36130743Sphk	 * the necessary locking at their layer. Note that the inactive
36230743Sphk	 * and lookup operations also change their lock state, but this
36330743Sphk	 * cannot be avoided, so these two operations will always need
36430743Sphk	 * to be handled in intermediate layers.
36530743Sphk	 */
36630743Sphk	struct vnode *vp = ap->a_vp;
36730743Sphk	int vnflags, flags = ap->a_flags;
36830743Sphk
36930743Sphk	switch (flags & LK_TYPE_MASK) {
37030743Sphk	case LK_DRAIN:
37130743Sphk		vnflags = LK_DRAIN;
37230743Sphk		break;
37330743Sphk	case LK_EXCLUSIVE:
37430743Sphk#ifdef DEBUG_VFS_LOCKS
37530743Sphk		/*
37630743Sphk		 * Normally, we use shared locks here, but that confuses
37730743Sphk		 * the locking assertions.
37830743Sphk		 */
37930743Sphk		vnflags = LK_EXCLUSIVE;
38030743Sphk		break;
38130743Sphk#endif
38230743Sphk	case LK_SHARED:
38330743Sphk		vnflags = LK_SHARED;
38430743Sphk		break;
38530743Sphk	case LK_UPGRADE:
38630743Sphk	case LK_EXCLUPGRADE:
38730743Sphk	case LK_DOWNGRADE:
38830743Sphk		return (0);
38930743Sphk	case LK_RELEASE:
39030743Sphk	default:
39130743Sphk		panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
39230743Sphk	}
39330743Sphk	if (flags & LK_INTERLOCK)
39430743Sphk		vnflags |= LK_INTERLOCK;
39542900Seivind#ifndef	DEBUG_LOCKS
39683366Sjulian	return (lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td));
39742900Seivind#else
39883366Sjulian	return (debuglockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td,
39942900Seivind	    "vop_sharedlock", vp->filename, vp->line));
40042900Seivind#endif
40130743Sphk}
40230743Sphk
40330743Sphk/*
40430743Sphk * Stubs to use when there is no locking to be done on the underlying object.
40530743Sphk * A minimal shared lock is necessary to ensure that the underlying object
40630743Sphk * is not revoked while an operation is in progress. So, an active shared
40730743Sphk * count is maintained in an auxillary vnode lock structure.
40830743Sphk */
40930743Sphkint
41030743Sphkvop_nolock(ap)
41130743Sphk	struct vop_lock_args /* {
41230743Sphk		struct vnode *a_vp;
41330743Sphk		int a_flags;
41483366Sjulian		struct thread *a_td;
41530743Sphk	} */ *ap;
41630743Sphk{
41730743Sphk#ifdef notyet
41830743Sphk	/*
41930743Sphk	 * This code cannot be used until all the non-locking filesystems
42030743Sphk	 * (notably NFS) are converted to properly lock and release nodes.
42130743Sphk	 * Also, certain vnode operations change the locking state within
42230743Sphk	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
42330743Sphk	 * and symlink). Ideally these operations should not change the
42430743Sphk	 * lock state, but should be changed to let the caller of the
42530743Sphk	 * function unlock them. Otherwise all intermediate vnode layers
42630743Sphk	 * (such as union, umapfs, etc) must catch these functions to do
42730743Sphk	 * the necessary locking at their layer. Note that the inactive
42830743Sphk	 * and lookup operations also change their lock state, but this
42930743Sphk	 * cannot be avoided, so these two operations will always need
43030743Sphk	 * to be handled in intermediate layers.
43130743Sphk	 */
43230743Sphk	struct vnode *vp = ap->a_vp;
43330743Sphk	int vnflags, flags = ap->a_flags;
43430743Sphk
43530743Sphk	switch (flags & LK_TYPE_MASK) {
43630743Sphk	case LK_DRAIN:
43730743Sphk		vnflags = LK_DRAIN;
43830743Sphk		break;
43930743Sphk	case LK_EXCLUSIVE:
44030743Sphk	case LK_SHARED:
44130743Sphk		vnflags = LK_SHARED;
44230743Sphk		break;
44330743Sphk	case LK_UPGRADE:
44430743Sphk	case LK_EXCLUPGRADE:
44530743Sphk	case LK_DOWNGRADE:
44630743Sphk		return (0);
44730743Sphk	case LK_RELEASE:
44830743Sphk	default:
44930743Sphk		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
45030743Sphk	}
45130743Sphk	if (flags & LK_INTERLOCK)
45230743Sphk		vnflags |= LK_INTERLOCK;
45383366Sjulian	return(lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td));
45430743Sphk#else /* for now */
45530743Sphk	/*
45630743Sphk	 * Since we are not using the lock manager, we must clear
45730743Sphk	 * the interlock here.
45830743Sphk	 */
45931263Sbde	if (ap->a_flags & LK_INTERLOCK)
46072200Sbmilekic		mtx_unlock(&ap->a_vp->v_interlock);
46130743Sphk	return (0);
46230743Sphk#endif
46330743Sphk}
46430743Sphk
46530743Sphk/*
46630743Sphk * Do the inverse of vop_nolock, handling the interlock in a compatible way.
46730743Sphk */
46830743Sphkint
46930743Sphkvop_nounlock(ap)
47030743Sphk	struct vop_unlock_args /* {
47130743Sphk		struct vnode *a_vp;
47230743Sphk		int a_flags;
47383366Sjulian		struct thread *a_td;
47430743Sphk	} */ *ap;
47530743Sphk{
47630743Sphk
47766355Sbp	/*
47866355Sbp	 * Since we are not using the lock manager, we must clear
47966355Sbp	 * the interlock here.
48066355Sbp	 */
48166355Sbp	if (ap->a_flags & LK_INTERLOCK)
48272200Sbmilekic		mtx_unlock(&ap->a_vp->v_interlock);
48366355Sbp	return (0);
48430743Sphk}
48530743Sphk
48630743Sphk/*
48730743Sphk * Return whether or not the node is in use.
48830743Sphk */
48930743Sphkint
49030743Sphkvop_noislocked(ap)
49130743Sphk	struct vop_islocked_args /* {
49230743Sphk		struct vnode *a_vp;
49383366Sjulian		struct thread *a_td;
49430743Sphk	} */ *ap;
49530743Sphk{
49630743Sphk
49766355Sbp	return (0);
49830743Sphk}
49930743Sphk
50062976Smckusick/*
50162976Smckusick * Return our mount point, as we will take charge of the writes.
50262976Smckusick */
50362976Smckusickint
50462976Smckusickvop_stdgetwritemount(ap)
50562976Smckusick	struct vop_getwritemount_args /* {
50662976Smckusick		struct vnode *a_vp;
50762976Smckusick		struct mount **a_mpp;
50862976Smckusick	} */ *ap;
50962976Smckusick{
51062976Smckusick
51162976Smckusick	*(ap->a_mpp) = ap->a_vp->v_mount;
51262976Smckusick	return (0);
51362976Smckusick}
51462976Smckusick
51565770Sbpint
51665770Sbpvop_stdcreatevobject(ap)
51765770Sbp	struct vop_createvobject_args /* {
51865770Sbp		struct vnode *vp;
51965770Sbp		struct ucred *cred;
52083366Sjulian		struct thread *td;
52165770Sbp	} */ *ap;
52265770Sbp{
52365770Sbp	struct vnode *vp = ap->a_vp;
52465770Sbp	struct ucred *cred = ap->a_cred;
52583366Sjulian	struct thread *td = ap->a_td;
52665770Sbp	struct vattr vat;
52765770Sbp	vm_object_t object;
52865770Sbp	int error = 0;
52965770Sbp
53079224Sdillon	GIANT_REQUIRED;
53179224Sdillon
53265770Sbp	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
53365770Sbp		return (0);
53465770Sbp
53565770Sbpretry:
53665770Sbp	if ((object = vp->v_object) == NULL) {
53765770Sbp		if (vp->v_type == VREG || vp->v_type == VDIR) {
53883366Sjulian			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
53965770Sbp				goto retn;
54065770Sbp			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
54165770Sbp		} else if (devsw(vp->v_rdev) != NULL) {
54265770Sbp			/*
54365770Sbp			 * This simply allocates the biggest object possible
54465770Sbp			 * for a disk vnode.  This should be fixed, but doesn't
54565770Sbp			 * cause any problems (yet).
54665770Sbp			 */
54765770Sbp			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
54865770Sbp		} else {
54965770Sbp			goto retn;
55065770Sbp		}
55165770Sbp		/*
55265770Sbp		 * Dereference the reference we just created.  This assumes
55365770Sbp		 * that the object is associated with the vp.
55465770Sbp		 */
55565770Sbp		object->ref_count--;
55665770Sbp		vp->v_usecount--;
55765770Sbp	} else {
55865770Sbp		if (object->flags & OBJ_DEAD) {
55983366Sjulian			VOP_UNLOCK(vp, 0, td);
56079224Sdillon			tsleep(object, PVM, "vodead", 0);
56183366Sjulian			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
56265770Sbp			goto retry;
56365770Sbp		}
56465770Sbp	}
56565770Sbp
56665770Sbp	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
56765770Sbp	vp->v_flag |= VOBJBUF;
56865770Sbp
56965770Sbpretn:
57065770Sbp	return (error);
57165770Sbp}
57265770Sbp
57365770Sbpint
57465770Sbpvop_stddestroyvobject(ap)
57565770Sbp	struct vop_destroyvobject_args /* {
57665770Sbp		struct vnode *vp;
57765770Sbp	} */ *ap;
57865770Sbp{
57965770Sbp	struct vnode *vp = ap->a_vp;
58065770Sbp	vm_object_t obj = vp->v_object;
58165770Sbp
58279224Sdillon	GIANT_REQUIRED;
58379224Sdillon
58465770Sbp	if (vp->v_object == NULL)
58565770Sbp		return (0);
58665770Sbp
58765770Sbp	if (obj->ref_count == 0) {
58865770Sbp		/*
58965770Sbp		 * vclean() may be called twice. The first time
59065770Sbp		 * removes the primary reference to the object,
59165770Sbp		 * the second time goes one further and is a
59265770Sbp		 * special-case to terminate the object.
59365770Sbp		 */
59465770Sbp		vm_object_terminate(obj);
59565770Sbp	} else {
59665770Sbp		/*
59765770Sbp		 * Woe to the process that tries to page now :-).
59865770Sbp		 */
59965770Sbp		vm_pager_deallocate(obj);
60065770Sbp	}
60165770Sbp	return (0);
60265770Sbp}
60365770Sbp
60465770Sbpint
60565770Sbpvop_stdgetvobject(ap)
60665770Sbp	struct vop_getvobject_args /* {
60765770Sbp		struct vnode *vp;
60865770Sbp		struct vm_object **objpp;
60965770Sbp	} */ *ap;
61065770Sbp{
61165770Sbp	struct vnode *vp = ap->a_vp;
61265770Sbp	struct vm_object **objpp = ap->a_objpp;
61365770Sbp
61465770Sbp	if (objpp)
61565770Sbp		*objpp = vp->v_object;
61665770Sbp	return (vp->v_object ? 0 : EINVAL);
61765770Sbp}
61865770Sbp
61976131Sphkint
62076131Sphkvop_stdbmap(ap)
62176131Sphk	struct vop_bmap_args /* {
62276131Sphk		struct vnode *a_vp;
62376131Sphk		daddr_t  a_bn;
62476131Sphk		struct vnode **a_vpp;
62576131Sphk		daddr_t *a_bnp;
62676131Sphk		int *a_runp;
62776131Sphk		int *a_runb;
62876131Sphk	} */ *ap;
62976131Sphk{
63076131Sphk
63176131Sphk	if (ap->a_vpp != NULL)
63276131Sphk		*ap->a_vpp = ap->a_vp;
63376131Sphk	if (ap->a_bnp != NULL)
63476131Sphk		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
63576131Sphk	if (ap->a_runp != NULL)
63676131Sphk		*ap->a_runp = 0;
63776131Sphk	if (ap->a_runb != NULL)
63876131Sphk		*ap->a_runb = 0;
63976131Sphk	return (0);
64076131Sphk}
64176131Sphk
64276167Sphkint
64376167Sphkvop_stdgetpages(ap)
64476167Sphk	struct vop_getpages_args /* {
64576167Sphk		struct vnode *a_vp;
64676167Sphk		vm_page_t *a_m;
64776167Sphk		int a_count;
64876167Sphk		int a_reqpage;
64976167Sphk		vm_ooffset_t a_offset;
65076167Sphk	} */ *ap;
65176167Sphk{
65276131Sphk
65376167Sphk	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
65476167Sphk	    ap->a_count, ap->a_reqpage);
65576167Sphk}
65676167Sphk
65776319Sphkint
65876167Sphkvop_stdputpages(ap)
65976167Sphk	struct vop_putpages_args /* {
66076167Sphk		struct vnode *a_vp;
66176167Sphk		vm_page_t *a_m;
66276167Sphk		int a_count;
66376167Sphk		int a_sync;
66476167Sphk		int *a_rtvals;
66576167Sphk		vm_ooffset_t a_offset;
66676167Sphk	} */ *ap;
66776167Sphk{
66876167Sphk
66976319Sphk	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
67076167Sphk	     ap->a_sync, ap->a_rtvals);
67176167Sphk}
67276167Sphk
67376167Sphk
67476167Sphk
67551068Salfred/*
67651068Salfred * vfs default ops
67751068Salfred * used to fill the vfs fucntion table to get reasonable default return values.
67851068Salfred */
67951068Salfredint
68083366Sjulianvfs_stdmount (mp, path, data, ndp, td)
68151068Salfred	struct mount *mp;
68251068Salfred	char *path;
68351068Salfred	caddr_t data;
68451068Salfred	struct nameidata *ndp;
68583366Sjulian	struct thread *td;
68651068Salfred{
68751068Salfred	return (0);
68851068Salfred}
68951068Salfred
69051068Salfredint
69183366Sjulianvfs_stdunmount (mp, mntflags, td)
69251068Salfred	struct mount *mp;
69351068Salfred	int mntflags;
69483366Sjulian	struct thread *td;
69551068Salfred{
69651068Salfred	return (0);
69751068Salfred}
69851068Salfred
69951068Salfredint
70051068Salfredvfs_stdroot (mp, vpp)
70151068Salfred	struct mount *mp;
70251068Salfred	struct vnode **vpp;
70351068Salfred{
70451068Salfred	return (EOPNOTSUPP);
70551068Salfred}
70651068Salfred
70751068Salfredint
70883366Sjulianvfs_stdstatfs (mp, sbp, td)
70951068Salfred	struct mount *mp;
71051068Salfred	struct statfs *sbp;
71183366Sjulian	struct thread *td;
71251068Salfred{
71351068Salfred	return (EOPNOTSUPP);
71451068Salfred}
71551068Salfred
71651068Salfredint
71751068Salfredvfs_stdvptofh (vp, fhp)
71851068Salfred	struct vnode *vp;
71951068Salfred	struct fid *fhp;
72051068Salfred{
72151068Salfred	return (EOPNOTSUPP);
72251068Salfred}
72351068Salfred
72451068Salfredint
72583366Sjulianvfs_stdstart (mp, flags, td)
72651068Salfred	struct mount *mp;
72751068Salfred	int flags;
72883366Sjulian	struct thread *td;
72951068Salfred{
73051068Salfred	return (0);
73151068Salfred}
73251068Salfred
73351068Salfredint
73483366Sjulianvfs_stdquotactl (mp, cmds, uid, arg, td)
73551068Salfred	struct mount *mp;
73651068Salfred	int cmds;
73751068Salfred	uid_t uid;
73851068Salfred	caddr_t arg;
73983366Sjulian	struct thread *td;
74051068Salfred{
74151068Salfred	return (EOPNOTSUPP);
74251068Salfred}
74351068Salfred
74451068Salfredint
74583366Sjulianvfs_stdsync (mp, waitfor, cred, td)
74651068Salfred	struct mount *mp;
74751068Salfred	int waitfor;
74851068Salfred	struct ucred *cred;
74983366Sjulian	struct thread *td;
75051068Salfred{
75151068Salfred	return (0);
75251068Salfred}
75351068Salfred
75451068Salfredint
75551068Salfredvfs_stdvget (mp, ino, vpp)
75651068Salfred	struct mount *mp;
75751068Salfred	ino_t ino;
75851068Salfred	struct vnode **vpp;
75951068Salfred{
76051068Salfred	return (EOPNOTSUPP);
76151068Salfred}
76251068Salfred
76351068Salfredint
76451138Salfredvfs_stdfhtovp (mp, fhp, vpp)
76551068Salfred	struct mount *mp;
76651068Salfred	struct fid *fhp;
76751138Salfred	struct vnode **vpp;
76851138Salfred{
76951138Salfred	return (EOPNOTSUPP);
77051138Salfred}
77151138Salfred
77251068Salfredint
77351068Salfredvfs_stdinit (vfsp)
77451068Salfred	struct vfsconf *vfsp;
77551068Salfred{
77651068Salfred	return (0);
77751068Salfred}
77851068Salfred
77951068Salfredint
78051068Salfredvfs_stduninit (vfsp)
78151068Salfred	struct vfsconf *vfsp;
78251068Salfred{
78351068Salfred	return(0);
78451068Salfred}
78551068Salfred
78654803Srwatsonint
78783366Sjulianvfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
78854803Srwatson	struct mount *mp;
78954803Srwatson	int cmd;
79074273Srwatson	struct vnode *filename_vp;
79174437Srwatson	int attrnamespace;
79256272Srwatson	const char *attrname;
79383366Sjulian	struct thread *td;
79454803Srwatson{
79554803Srwatson	return(EOPNOTSUPP);
79654803Srwatson}
79754803Srwatson
79851068Salfred/* end of vfs default ops */
799