vfs_default.c revision 140048
1139804Simp/*-
230489Sphk * Copyright (c) 1989, 1993
330489Sphk *	The Regents of the University of California.  All rights reserved.
430489Sphk *
530489Sphk * This code is derived from software contributed
630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project.
730489Sphk *
830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
930489Sphk *
1030489Sphk * Redistribution and use in source and binary forms, with or without
1130489Sphk * modification, are permitted provided that the following conditions
1230489Sphk * are met:
1330489Sphk * 1. Redistributions of source code must retain the above copyright
1430489Sphk *    notice, this list of conditions and the following disclaimer.
1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright
1630489Sphk *    notice, this list of conditions and the following disclaimer in the
1730489Sphk *    documentation and/or other materials provided with the distribution.
1830489Sphk * 4. Neither the name of the University nor the names of its contributors
1930489Sphk *    may be used to endorse or promote products derived from this software
2030489Sphk *    without specific prior written permission.
2130489Sphk *
2230489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2330489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2430489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2530489Sphk * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2630489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2730489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2830489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2930489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3030489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3130489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3230489Sphk * SUCH DAMAGE.
3330489Sphk */
3430489Sphk
35116182Sobrien#include <sys/cdefs.h>
36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 140048 2005-01-11 07:36:22Z phk $");
37116182Sobrien
3830489Sphk#include <sys/param.h>
3930489Sphk#include <sys/systm.h>
4060041Sphk#include <sys/bio.h>
4144272Sbde#include <sys/buf.h>
4265770Sbp#include <sys/conf.h>
4330489Sphk#include <sys/kernel.h>
44114216Skan#include <sys/limits.h>
4531561Sbde#include <sys/lock.h>
4630743Sphk#include <sys/malloc.h>
4751068Salfred#include <sys/mount.h>
4867365Sjhb#include <sys/mutex.h>
4930492Sphk#include <sys/unistd.h>
5030489Sphk#include <sys/vnode.h>
5130743Sphk#include <sys/poll.h>
5230489Sphk
5365770Sbp#include <vm/vm.h>
5465770Sbp#include <vm/vm_object.h>
5565770Sbp#include <vm/vm_extern.h>
5665770Sbp#include <vm/pmap.h>
5765770Sbp#include <vm/vm_map.h>
5865770Sbp#include <vm/vm_page.h>
5965770Sbp#include <vm/vm_pager.h>
6065770Sbp#include <vm/vnode_pager.h>
6165770Sbp
6292723Salfredstatic int	vop_nolookup(struct vop_lookup_args *);
6392723Salfredstatic int	vop_nostrategy(struct vop_strategy_args *);
6430489Sphk
6530489Sphk/*
6630489Sphk * This vnode table stores what we want to do if the filesystem doesn't
6730489Sphk * implement a particular VOP.
6830489Sphk *
6930489Sphk * If there is no specific entry here, we will return EOPNOTSUPP.
7030489Sphk *
7130489Sphk */
7230489Sphk
73138290Sphkstruct vop_vector default_vnodeops = {
74138290Sphk	.vop_default =		NULL,
75138339Sphk	.vop_bypass =		VOP_EOPNOTSUPP,
76138339Sphk
77138290Sphk	.vop_advlock =		VOP_EINVAL,
78138290Sphk	.vop_bmap =		vop_stdbmap,
79138290Sphk	.vop_close =		VOP_NULL,
80138290Sphk	.vop_createvobject =	vop_stdcreatevobject,
81138290Sphk	.vop_destroyvobject =	vop_stddestroyvobject,
82138290Sphk	.vop_fsync =		VOP_NULL,
83138290Sphk	.vop_getpages =		vop_stdgetpages,
84138339Sphk	.vop_getvobject =	vop_stdgetvobject,
85138290Sphk	.vop_getwritemount = 	vop_stdgetwritemount,
86138290Sphk	.vop_inactive =		vop_stdinactive,
87138290Sphk	.vop_ioctl =		VOP_ENOTTY,
88138290Sphk	.vop_islocked =		vop_stdislocked,
89138290Sphk	.vop_lease =		VOP_NULL,
90138290Sphk	.vop_lock =		vop_stdlock,
91138290Sphk	.vop_lookup =		vop_nolookup,
92138290Sphk	.vop_open =		VOP_NULL,
93138290Sphk	.vop_pathconf =		VOP_EINVAL,
94138290Sphk	.vop_poll =		vop_nopoll,
95138290Sphk	.vop_putpages =		vop_stdputpages,
96138290Sphk	.vop_readlink =		VOP_EINVAL,
97138290Sphk	.vop_revoke =		VOP_PANIC,
98138290Sphk	.vop_strategy =		vop_nostrategy,
99138290Sphk	.vop_unlock =		vop_stdunlock,
10030489Sphk};
10130489Sphk
10291690Seivind/*
10391690Seivind * Series of placeholder functions for various error returns for
10491690Seivind * VOPs.
10591690Seivind */
10691690Seivind
10730489Sphkint
10830492Sphkvop_eopnotsupp(struct vop_generic_args *ap)
10930489Sphk{
11030489Sphk	/*
11130492Sphk	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
11230489Sphk	*/
11330489Sphk
11430489Sphk	return (EOPNOTSUPP);
11530489Sphk}
11630489Sphk
11730489Sphkint
11830492Sphkvop_ebadf(struct vop_generic_args *ap)
11930489Sphk{
12030489Sphk
12130492Sphk	return (EBADF);
12230492Sphk}
12330492Sphk
12430492Sphkint
12530492Sphkvop_enotty(struct vop_generic_args *ap)
12630492Sphk{
12730492Sphk
12830492Sphk	return (ENOTTY);
12930492Sphk}
13030492Sphk
13130492Sphkint
13230492Sphkvop_einval(struct vop_generic_args *ap)
13330492Sphk{
13430492Sphk
13530492Sphk	return (EINVAL);
13630492Sphk}
13730492Sphk
13830492Sphkint
13930492Sphkvop_null(struct vop_generic_args *ap)
14030492Sphk{
14130492Sphk
14230492Sphk	return (0);
14330492Sphk}
14430492Sphk
14591690Seivind/*
14691690Seivind * Helper function to panic on some bad VOPs in some filesystems.
14791690Seivind */
14841056Speterint
14941056Spetervop_panic(struct vop_generic_args *ap)
15041056Speter{
15141056Speter
15272594Sbde	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
15341056Speter}
15441056Speter
15591690Seivind/*
15691690Seivind * vop_std<something> and vop_no<something> are default functions for use by
15791690Seivind * filesystems that need the "default reasonable" implementation for a
15891690Seivind * particular operation.
15991690Seivind *
16091690Seivind * The documentation for the operations they implement exists (if it exists)
16191690Seivind * in the VOP_<SOMETHING>(9) manpage (all uppercase).
16291690Seivind */
16391690Seivind
16491690Seivind/*
16591690Seivind * Default vop for filesystems that do not support name lookup
16691690Seivind */
16772594Sbdestatic int
16872594Sbdevop_nolookup(ap)
16972594Sbde	struct vop_lookup_args /* {
17072594Sbde		struct vnode *a_dvp;
17172594Sbde		struct vnode **a_vpp;
17272594Sbde		struct componentname *a_cnp;
17372594Sbde	} */ *ap;
17472594Sbde{
17572594Sbde
17672594Sbde	*ap->a_vpp = NULL;
17772594Sbde	return (ENOTDIR);
17872594Sbde}
17972594Sbde
18046349Salc/*
18146349Salc *	vop_nostrategy:
18246349Salc *
18346349Salc *	Strategy routine for VFS devices that have none.
18446349Salc *
18558934Sphk *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
18658345Sphk *	routine.  Typically this is done for a BIO_READ strategy call.
187112067Skan *	Typically B_INVAL is assumed to already be clear prior to a write
18858345Sphk *	and should not be cleared manually unless you just made the buffer
18958934Sphk *	invalid.  BIO_ERROR should be cleared either way.
19046349Salc */
19146349Salc
19230489Sphkstatic int
19330489Sphkvop_nostrategy (struct vop_strategy_args *ap)
19430489Sphk{
19530489Sphk	printf("No strategy for buffer at %p\n", ap->a_bp);
196111842Snjl	vprint("vnode", ap->a_vp);
19758934Sphk	ap->a_bp->b_ioflags |= BIO_ERROR;
19830489Sphk	ap->a_bp->b_error = EOPNOTSUPP;
19959249Sphk	bufdone(ap->a_bp);
20030489Sphk	return (EOPNOTSUPP);
20130489Sphk}
20230492Sphk
20391690Seivind/*
20491690Seivind * vop_stdpathconf:
205112067Skan *
20691690Seivind * Standard implementation of POSIX pathconf, to get information about limits
20791690Seivind * for a filesystem.
20891690Seivind * Override per filesystem for the case where the filesystem has smaller
20991690Seivind * limits.
21091690Seivind */
21130492Sphkint
21230492Sphkvop_stdpathconf(ap)
21330492Sphk	struct vop_pathconf_args /* {
21430492Sphk	struct vnode *a_vp;
21530492Sphk	int a_name;
21630492Sphk	int *a_retval;
21730492Sphk	} */ *ap;
21830492Sphk{
21930492Sphk
22030492Sphk	switch (ap->a_name) {
22130492Sphk		case _PC_LINK_MAX:
22230492Sphk			*ap->a_retval = LINK_MAX;
22330492Sphk			return (0);
22430492Sphk		case _PC_MAX_CANON:
22530492Sphk			*ap->a_retval = MAX_CANON;
22630492Sphk			return (0);
22730492Sphk		case _PC_MAX_INPUT:
22830492Sphk			*ap->a_retval = MAX_INPUT;
22930492Sphk			return (0);
23030492Sphk		case _PC_PIPE_BUF:
23130492Sphk			*ap->a_retval = PIPE_BUF;
23230492Sphk			return (0);
23330492Sphk		case _PC_CHOWN_RESTRICTED:
23430492Sphk			*ap->a_retval = 1;
23530492Sphk			return (0);
23630492Sphk		case _PC_VDISABLE:
23730492Sphk			*ap->a_retval = _POSIX_VDISABLE;
23830492Sphk			return (0);
23930492Sphk		default:
24030492Sphk			return (EINVAL);
24130492Sphk	}
24230492Sphk	/* NOTREACHED */
24330492Sphk}
24430513Sphk
24530513Sphk/*
24630513Sphk * Standard lock, unlock and islocked functions.
24730513Sphk */
24830513Sphkint
24930513Sphkvop_stdlock(ap)
25030513Sphk	struct vop_lock_args /* {
25130513Sphk		struct vnode *a_vp;
25230513Sphk		int a_flags;
25383366Sjulian		struct thread *a_td;
25430513Sphk	} */ *ap;
255112067Skan{
25666355Sbp	struct vnode *vp = ap->a_vp;
25730513Sphk
25842900Seivind#ifndef	DEBUG_LOCKS
259105077Smckusick	return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
26042900Seivind#else
261105077Smckusick	return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
26283366Sjulian	    ap->a_td, "vop_stdlock", vp->filename, vp->line));
26342900Seivind#endif
26430513Sphk}
26530513Sphk
26691690Seivind/* See above. */
26730513Sphkint
26830513Sphkvop_stdunlock(ap)
26930513Sphk	struct vop_unlock_args /* {
27030513Sphk		struct vnode *a_vp;
27130513Sphk		int a_flags;
27283366Sjulian		struct thread *a_td;
27330513Sphk	} */ *ap;
27430513Sphk{
27566355Sbp	struct vnode *vp = ap->a_vp;
27630513Sphk
277105077Smckusick	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
27883366Sjulian	    ap->a_td));
27930513Sphk}
28030513Sphk
28191690Seivind/* See above. */
28230513Sphkint
28330513Sphkvop_stdislocked(ap)
28430513Sphk	struct vop_islocked_args /* {
28530513Sphk		struct vnode *a_vp;
28683366Sjulian		struct thread *a_td;
28730513Sphk	} */ *ap;
28830513Sphk{
28930513Sphk
290105077Smckusick	return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
29130513Sphk}
29230513Sphk
29391690Seivind/* Mark the vnode inactive */
29464819Sphkint
29564819Sphkvop_stdinactive(ap)
29664819Sphk	struct vop_inactive_args /* {
29764819Sphk		struct vnode *a_vp;
29883366Sjulian		struct thread *a_td;
29964819Sphk	} */ *ap;
30064819Sphk{
30164819Sphk
30283366Sjulian	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
30364819Sphk	return (0);
30464819Sphk}
30564819Sphk
30630743Sphk/*
30730743Sphk * Return true for select/poll.
30830743Sphk */
30930743Sphkint
31030743Sphkvop_nopoll(ap)
31130743Sphk	struct vop_poll_args /* {
31230743Sphk		struct vnode *a_vp;
31330743Sphk		int  a_events;
31430743Sphk		struct ucred *a_cred;
31583366Sjulian		struct thread *a_td;
31630743Sphk	} */ *ap;
31730743Sphk{
31830743Sphk	/*
31931727Swollman	 * Return true for read/write.  If the user asked for something
32031727Swollman	 * special, return POLLNVAL, so that clients have a way of
32131727Swollman	 * determining reliably whether or not the extended
32231727Swollman	 * functionality is present without hard-coding knowledge
32331727Swollman	 * of specific filesystem implementations.
324120514Sphk	 * Stay in sync with kern_conf.c::no_poll().
32530743Sphk	 */
32631727Swollman	if (ap->a_events & ~POLLSTANDARD)
32731727Swollman		return (POLLNVAL);
32831727Swollman
32930743Sphk	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
33030743Sphk}
33130743Sphk
33231727Swollman/*
33331727Swollman * Implement poll for local filesystems that support it.
33431727Swollman */
33530743Sphkint
33631727Swollmanvop_stdpoll(ap)
33731727Swollman	struct vop_poll_args /* {
33831727Swollman		struct vnode *a_vp;
33931727Swollman		int  a_events;
34031727Swollman		struct ucred *a_cred;
34183366Sjulian		struct thread *a_td;
34231727Swollman	} */ *ap;
34331727Swollman{
34476578Sjlemon	if (ap->a_events & ~POLLSTANDARD)
34583366Sjulian		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
34676578Sjlemon	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
34731727Swollman}
34831727Swollman
34930743Sphk/*
35062976Smckusick * Return our mount point, as we will take charge of the writes.
35162976Smckusick */
35262976Smckusickint
35362976Smckusickvop_stdgetwritemount(ap)
35462976Smckusick	struct vop_getwritemount_args /* {
35562976Smckusick		struct vnode *a_vp;
35662976Smckusick		struct mount **a_mpp;
35762976Smckusick	} */ *ap;
35862976Smckusick{
35962976Smckusick
36062976Smckusick	*(ap->a_mpp) = ap->a_vp->v_mount;
36162976Smckusick	return (0);
36262976Smckusick}
36362976Smckusick
36491690Seivind/* Create the VM system backing object for this vnode */
36565770Sbpint
36665770Sbpvop_stdcreatevobject(ap)
36765770Sbp	struct vop_createvobject_args /* {
36865770Sbp		struct vnode *vp;
36965770Sbp		struct ucred *cred;
37083366Sjulian		struct thread *td;
37165770Sbp	} */ *ap;
37265770Sbp{
37365770Sbp	struct vnode *vp = ap->a_vp;
37465770Sbp	struct ucred *cred = ap->a_cred;
37583366Sjulian	struct thread *td = ap->a_td;
37665770Sbp	struct vattr vat;
37765770Sbp	vm_object_t object;
37865770Sbp	int error = 0;
379137845Sphk	vm_ooffset_t size;
38065770Sbp
38179224Sdillon	GIANT_REQUIRED;
38279224Sdillon
38365770Sbp	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
38465770Sbp		return (0);
38565770Sbp
386137845Sphk	while ((object = vp->v_object) != NULL) {
387137845Sphk		VM_OBJECT_LOCK(object);
388137845Sphk		if (!(object->flags & OBJ_DEAD)) {
389137845Sphk			VM_OBJECT_UNLOCK(object);
390137845Sphk			break;
391137845Sphk		}
392137845Sphk		VOP_UNLOCK(vp, 0, td);
393137845Sphk		vm_object_set_flag(object, OBJ_DISCONNECTWNT);
394137845Sphk		msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0);
395137845Sphk		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
396137845Sphk	}
397137845Sphk
398137845Sphk	if (object == NULL) {
399137845Sphk		if (vn_isdisk(vp, NULL)) {
40065770Sbp			/*
40165770Sbp			 * This simply allocates the biggest object possible
40265770Sbp			 * for a disk vnode.  This should be fixed, but doesn't
40365770Sbp			 * cause any problems (yet).
40465770Sbp			 */
405137845Sphk			size = IDX_TO_OFF(INT_MAX);
40665770Sbp		} else {
407137845Sphk			if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
408137845Sphk				return (error);
409137845Sphk			size = vat.va_size;
41065770Sbp		}
411137845Sphk
412137845Sphk		object = vnode_pager_alloc(vp, size, 0, 0);
41365770Sbp		/*
41465770Sbp		 * Dereference the reference we just created.  This assumes
41565770Sbp		 * that the object is associated with the vp.
41665770Sbp		 */
417114378Salc		VM_OBJECT_LOCK(object);
41865770Sbp		object->ref_count--;
419114378Salc		VM_OBJECT_UNLOCK(object);
420105884Sphk		vrele(vp);
42165770Sbp	}
42265770Sbp
42365770Sbp	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
424101308Sjeff	vp->v_vflag |= VV_OBJBUF;
42565770Sbp
42665770Sbp	return (error);
42765770Sbp}
42865770Sbp
42991690Seivind/* Destroy the VM system object associated with this vnode */
43065770Sbpint
43165770Sbpvop_stddestroyvobject(ap)
43265770Sbp	struct vop_destroyvobject_args /* {
43365770Sbp		struct vnode *vp;
43465770Sbp	} */ *ap;
43565770Sbp{
43665770Sbp	struct vnode *vp = ap->a_vp;
43765770Sbp	vm_object_t obj = vp->v_object;
43865770Sbp
43979224Sdillon	GIANT_REQUIRED;
44079224Sdillon
441114080Salc	if (obj == NULL)
44265770Sbp		return (0);
443114080Salc	VM_OBJECT_LOCK(obj);
44465770Sbp	if (obj->ref_count == 0) {
44565770Sbp		/*
44665770Sbp		 * vclean() may be called twice. The first time
44765770Sbp		 * removes the primary reference to the object,
44865770Sbp		 * the second time goes one further and is a
44965770Sbp		 * special-case to terminate the object.
45085340Sdillon		 *
45185340Sdillon		 * don't double-terminate the object
45265770Sbp		 */
45385340Sdillon		if ((obj->flags & OBJ_DEAD) == 0)
45485340Sdillon			vm_object_terminate(obj);
455114080Salc		else
456114080Salc			VM_OBJECT_UNLOCK(obj);
45765770Sbp	} else {
45865770Sbp		/*
45965770Sbp		 * Woe to the process that tries to page now :-).
46065770Sbp		 */
46165770Sbp		vm_pager_deallocate(obj);
462114774Salc		VM_OBJECT_UNLOCK(obj);
46365770Sbp	}
46465770Sbp	return (0);
46565770Sbp}
46665770Sbp
46785340Sdillon/*
46885340Sdillon * Return the underlying VM object.  This routine may be called with or
46985340Sdillon * without the vnode interlock held.  If called without, the returned
47085340Sdillon * object is not guarenteed to be valid.  The syncer typically gets the
47185340Sdillon * object without holding the interlock in order to quickly test whether
47285340Sdillon * it might be dirty before going heavy-weight.  vm_object's use zalloc
47385340Sdillon * and thus stable-storage, so this is safe.
47485340Sdillon */
47565770Sbpint
47665770Sbpvop_stdgetvobject(ap)
47765770Sbp	struct vop_getvobject_args /* {
47865770Sbp		struct vnode *vp;
47965770Sbp		struct vm_object **objpp;
48065770Sbp	} */ *ap;
48165770Sbp{
48265770Sbp	struct vnode *vp = ap->a_vp;
48365770Sbp	struct vm_object **objpp = ap->a_objpp;
48465770Sbp
48565770Sbp	if (objpp)
48665770Sbp		*objpp = vp->v_object;
48765770Sbp	return (vp->v_object ? 0 : EINVAL);
48865770Sbp}
48965770Sbp
49091690Seivind/* XXX Needs good comment and VOP_BMAP(9) manpage */
49176131Sphkint
49276131Sphkvop_stdbmap(ap)
493112067Skan	struct vop_bmap_args /* {
49476131Sphk		struct vnode *a_vp;
49576131Sphk		daddr_t  a_bn;
496137726Sphk		struct bufobj **a_bop;
49776131Sphk		daddr_t *a_bnp;
49876131Sphk		int *a_runp;
49976131Sphk		int *a_runb;
50076131Sphk	} */ *ap;
50176131Sphk{
50276131Sphk
503137726Sphk	if (ap->a_bop != NULL)
504137726Sphk		*ap->a_bop = &ap->a_vp->v_bufobj;
50576131Sphk	if (ap->a_bnp != NULL)
50676131Sphk		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
50776131Sphk	if (ap->a_runp != NULL)
50876131Sphk		*ap->a_runp = 0;
50976131Sphk	if (ap->a_runb != NULL)
51076131Sphk		*ap->a_runb = 0;
51176131Sphk	return (0);
51276131Sphk}
51376131Sphk
514110584Sjeffint
515110584Sjeffvop_stdfsync(ap)
516110584Sjeff	struct vop_fsync_args /* {
517110584Sjeff		struct vnode *a_vp;
518110584Sjeff		struct ucred *a_cred;
519110584Sjeff		int a_waitfor;
520110584Sjeff		struct thread *a_td;
521110584Sjeff	} */ *ap;
522110584Sjeff{
523110584Sjeff	struct vnode *vp = ap->a_vp;
524110584Sjeff	struct buf *bp;
525136751Sphk	struct bufobj *bo;
526110584Sjeff	struct buf *nbp;
527110584Sjeff	int s, error = 0;
528110584Sjeff	int maxretry = 100;     /* large, arbitrarily chosen */
529110584Sjeff
530110584Sjeff	VI_LOCK(vp);
531110584Sjeffloop1:
532110584Sjeff	/*
533110584Sjeff	 * MARK/SCAN initialization to avoid infinite loops.
534110584Sjeff	 */
535110584Sjeff	s = splbio();
536136943Sphk        TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
537110584Sjeff                bp->b_vflags &= ~BV_SCANNED;
538110584Sjeff		bp->b_error = 0;
539110584Sjeff	}
540110584Sjeff	splx(s);
541110584Sjeff
542110584Sjeff	/*
543110584Sjeff	 * Flush all dirty buffers associated with a block device.
544110584Sjeff	 */
545110584Sjeffloop2:
546110584Sjeff	s = splbio();
547136943Sphk	TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
548110584Sjeff		if ((bp->b_vflags & BV_SCANNED) != 0)
549110584Sjeff			continue;
550110584Sjeff		bp->b_vflags |= BV_SCANNED;
551111463Sjeff		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
552110584Sjeff			continue;
553110584Sjeff		VI_UNLOCK(vp);
554110584Sjeff		if ((bp->b_flags & B_DELWRI) == 0)
555110588Sjeff			panic("fsync: not dirty");
556110584Sjeff		if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) {
557110584Sjeff			vfs_bio_awrite(bp);
558110584Sjeff			splx(s);
559110584Sjeff		} else {
560110584Sjeff			bremfree(bp);
561110584Sjeff			splx(s);
562110584Sjeff			bawrite(bp);
563110584Sjeff		}
564110584Sjeff		VI_LOCK(vp);
565110584Sjeff		goto loop2;
566110584Sjeff	}
567110584Sjeff
568110584Sjeff	/*
569110584Sjeff	 * If synchronous the caller expects us to completely resolve all
570110584Sjeff	 * dirty buffers in the system.  Wait for in-progress I/O to
571110584Sjeff	 * complete (which could include background bitmap writes), then
572110584Sjeff	 * retry if dirty blocks still exist.
573110584Sjeff	 */
574110584Sjeff	if (ap->a_waitfor == MNT_WAIT) {
575136751Sphk		bo = &vp->v_bufobj;
576136751Sphk		bufobj_wwait(bo, 0, 0);
577136751Sphk		if (bo->bo_dirty.bv_cnt > 0) {
578110584Sjeff			/*
579110584Sjeff			 * If we are unable to write any of these buffers
580110584Sjeff			 * then we fail now rather than trying endlessly
581110584Sjeff			 * to write them out.
582110584Sjeff			 */
583136751Sphk			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
584110584Sjeff				if ((error = bp->b_error) == 0)
585110584Sjeff					continue;
586110584Sjeff			if (error == 0 && --maxretry >= 0) {
587110584Sjeff				splx(s);
588110584Sjeff				goto loop1;
589110584Sjeff			}
590110584Sjeff			vprint("fsync: giving up on dirty", vp);
591110584Sjeff			error = EAGAIN;
592110584Sjeff		}
593110584Sjeff	}
594110584Sjeff	VI_UNLOCK(vp);
595110584Sjeff	splx(s);
596112067Skan
597110584Sjeff	return (error);
598110584Sjeff}
599112067Skan
60091690Seivind/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
60176167Sphkint
60276167Sphkvop_stdgetpages(ap)
60376167Sphk	struct vop_getpages_args /* {
60476167Sphk		struct vnode *a_vp;
60576167Sphk		vm_page_t *a_m;
60676167Sphk		int a_count;
60776167Sphk		int a_reqpage;
60876167Sphk		vm_ooffset_t a_offset;
60976167Sphk	} */ *ap;
61076167Sphk{
61176131Sphk
61276167Sphk	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
61376167Sphk	    ap->a_count, ap->a_reqpage);
61476167Sphk}
61576167Sphk
61691690Seivind/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
61776319Sphkint
61876167Sphkvop_stdputpages(ap)
61976167Sphk	struct vop_putpages_args /* {
62076167Sphk		struct vnode *a_vp;
62176167Sphk		vm_page_t *a_m;
62276167Sphk		int a_count;
62376167Sphk		int a_sync;
62476167Sphk		int *a_rtvals;
62576167Sphk		vm_ooffset_t a_offset;
62676167Sphk	} */ *ap;
62776167Sphk{
62876167Sphk
62976319Sphk	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
63076167Sphk	     ap->a_sync, ap->a_rtvals);
63176167Sphk}
63276167Sphk
633112067Skan/*
63451068Salfred * vfs default ops
63591690Seivind * used to fill the vfs function table to get reasonable default return values.
63651068Salfred */
63791690Seivindint
638132023Salfredvfs_stdroot (mp, vpp, td)
63951068Salfred	struct mount *mp;
64051068Salfred	struct vnode **vpp;
641132023Salfred	struct thread *td;
64251068Salfred{
643131734Salfred
64451068Salfred	return (EOPNOTSUPP);
64551068Salfred}
64651068Salfred
64791690Seivindint
64883366Sjulianvfs_stdstatfs (mp, sbp, td)
64951068Salfred	struct mount *mp;
65051068Salfred	struct statfs *sbp;
65183366Sjulian	struct thread *td;
65251068Salfred{
653131734Salfred
65451068Salfred	return (EOPNOTSUPP);
65551068Salfred}
65651068Salfred
65751068Salfredint
65851068Salfredvfs_stdvptofh (vp, fhp)
65951068Salfred	struct vnode *vp;
66051068Salfred	struct fid *fhp;
66151068Salfred{
662131734Salfred
66351068Salfred	return (EOPNOTSUPP);
66451068Salfred}
66551068Salfred
66691690Seivindint
66783366Sjulianvfs_stdstart (mp, flags, td)
66851068Salfred	struct mount *mp;
66951068Salfred	int flags;
67083366Sjulian	struct thread *td;
67151068Salfred{
672131734Salfred
67351068Salfred	return (0);
67451068Salfred}
67551068Salfred
676112067Skanint
67783366Sjulianvfs_stdquotactl (mp, cmds, uid, arg, td)
67851068Salfred	struct mount *mp;
67951068Salfred	int cmds;
68051068Salfred	uid_t uid;
68151068Salfred	caddr_t arg;
68283366Sjulian	struct thread *td;
68351068Salfred{
684131734Salfred
68551068Salfred	return (EOPNOTSUPP);
68651068Salfred}
68751068Salfred
688112067Skanint
689140048Sphkvfs_stdsync(mp, waitfor, td)
69051068Salfred	struct mount *mp;
69151068Salfred	int waitfor;
69283366Sjulian	struct thread *td;
69351068Salfred{
694112119Skan	struct vnode *vp, *nvp;
695112119Skan	int error, lockreq, allerror = 0;
696112119Skan
697112119Skan	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
698112119Skan	if (waitfor != MNT_WAIT)
699112119Skan		lockreq |= LK_NOWAIT;
700112119Skan	/*
701112119Skan	 * Force stale buffer cache information to be flushed.
702112119Skan	 */
703122091Skan	MNT_ILOCK(mp);
704112119Skanloop:
705131551Sphk	MNT_VNODE_FOREACH(vp, mp, nvp) {
706112119Skan
707112119Skan		VI_LOCK(vp);
708136943Sphk		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
709112119Skan			VI_UNLOCK(vp);
710112119Skan			continue;
711112119Skan		}
712122091Skan		MNT_IUNLOCK(mp);
713112119Skan
714112119Skan		if ((error = vget(vp, lockreq, td)) != 0) {
715122091Skan			MNT_ILOCK(mp);
716112119Skan			if (error == ENOENT)
717112119Skan				goto loop;
718112119Skan			continue;
719112119Skan		}
720140048Sphk		error = VOP_FSYNC(vp, waitfor, td);
721112119Skan		if (error)
722112119Skan			allerror = error;
723112119Skan
724121874Skan		VOP_UNLOCK(vp, 0, td);
725121874Skan		vrele(vp);
726122091Skan		MNT_ILOCK(mp);
727112119Skan	}
728122091Skan	MNT_IUNLOCK(mp);
729112119Skan	return (allerror);
730112119Skan}
731112119Skan
732112119Skanint
733140048Sphkvfs_stdnosync (mp, waitfor, td)
734112119Skan	struct mount *mp;
735112119Skan	int waitfor;
736112119Skan	struct thread *td;
737112119Skan{
738131734Salfred
73951068Salfred	return (0);
74051068Salfred}
74151068Salfred
742112067Skanint
74392462Smckusickvfs_stdvget (mp, ino, flags, vpp)
74451068Salfred	struct mount *mp;
74551068Salfred	ino_t ino;
74692462Smckusick	int flags;
74751068Salfred	struct vnode **vpp;
74851068Salfred{
749131734Salfred
75051068Salfred	return (EOPNOTSUPP);
75151068Salfred}
75251068Salfred
753112067Skanint
75451138Salfredvfs_stdfhtovp (mp, fhp, vpp)
75551068Salfred	struct mount *mp;
75651068Salfred	struct fid *fhp;
75751138Salfred	struct vnode **vpp;
75851138Salfred{
759131734Salfred
76051138Salfred	return (EOPNOTSUPP);
76151138Salfred}
76251138Salfred
76351068Salfredint
764112067Skanvfs_stdinit (vfsp)
76551068Salfred	struct vfsconf *vfsp;
76651068Salfred{
767131734Salfred
76851068Salfred	return (0);
76951068Salfred}
77051068Salfred
77151068Salfredint
77251068Salfredvfs_stduninit (vfsp)
77351068Salfred	struct vfsconf *vfsp;
77451068Salfred{
775131734Salfred
77651068Salfred	return(0);
77751068Salfred}
77851068Salfred
77954803Srwatsonint
78083366Sjulianvfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
78154803Srwatson	struct mount *mp;
78254803Srwatson	int cmd;
78374273Srwatson	struct vnode *filename_vp;
78474437Srwatson	int attrnamespace;
78556272Srwatson	const char *attrname;
78683366Sjulian	struct thread *td;
78754803Srwatson{
788131734Salfred
789101786Sphk	if (filename_vp != NULL)
790101786Sphk		VOP_UNLOCK(filename_vp, 0, td);
791131734Salfred	return (EOPNOTSUPP);
79254803Srwatson}
79354803Srwatson
794131733Salfredint
795131733Salfredvfs_stdsysctl(mp, op, req)
796131733Salfred	struct mount *mp;
797131733Salfred	fsctlop_t op;
798131733Salfred	struct sysctl_req *req;
799131733Salfred{
800131733Salfred
801131733Salfred	return (EOPNOTSUPP);
802131733Salfred}
803131733Salfred
80451068Salfred/* end of vfs default ops */
805