vfs_default.c revision 194601
1139804Simp/*-
230489Sphk * Copyright (c) 1989, 1993
330489Sphk *	The Regents of the University of California.  All rights reserved.
430489Sphk *
530489Sphk * This code is derived from software contributed
630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project.
730489Sphk *
830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
930489Sphk *
1030489Sphk * Redistribution and use in source and binary forms, with or without
1130489Sphk * modification, are permitted provided that the following conditions
1230489Sphk * are met:
1330489Sphk * 1. Redistributions of source code must retain the above copyright
1430489Sphk *    notice, this list of conditions and the following disclaimer.
1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright
1630489Sphk *    notice, this list of conditions and the following disclaimer in the
1730489Sphk *    documentation and/or other materials provided with the distribution.
1830489Sphk * 4. Neither the name of the University nor the names of its contributors
1930489Sphk *    may be used to endorse or promote products derived from this software
2030489Sphk *    without specific prior written permission.
2130489Sphk *
2230489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2330489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2430489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2530489Sphk * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2630489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2730489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2830489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2930489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3030489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3130489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3230489Sphk * SUCH DAMAGE.
3330489Sphk */
3430489Sphk
35116182Sobrien#include <sys/cdefs.h>
36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 194601 2009-06-21 19:21:01Z kib $");
37116182Sobrien
3830489Sphk#include <sys/param.h>
3930489Sphk#include <sys/systm.h>
4060041Sphk#include <sys/bio.h>
4144272Sbde#include <sys/buf.h>
4265770Sbp#include <sys/conf.h>
43147198Sssouhlal#include <sys/event.h>
4430489Sphk#include <sys/kernel.h>
45114216Skan#include <sys/limits.h>
4631561Sbde#include <sys/lock.h>
47178243Skib#include <sys/lockf.h>
4830743Sphk#include <sys/malloc.h>
4951068Salfred#include <sys/mount.h>
5067365Sjhb#include <sys/mutex.h>
51189539Smarcus#include <sys/namei.h>
52189539Smarcus#include <sys/fcntl.h>
5330492Sphk#include <sys/unistd.h>
5430489Sphk#include <sys/vnode.h>
55189539Smarcus#include <sys/dirent.h>
5630743Sphk#include <sys/poll.h>
5730489Sphk
58193508Srwatson#include <security/mac/mac_framework.h>
59193508Srwatson
6065770Sbp#include <vm/vm.h>
6165770Sbp#include <vm/vm_object.h>
6265770Sbp#include <vm/vm_extern.h>
6365770Sbp#include <vm/pmap.h>
6465770Sbp#include <vm/vm_map.h>
6565770Sbp#include <vm/vm_page.h>
6665770Sbp#include <vm/vm_pager.h>
6765770Sbp#include <vm/vnode_pager.h>
6865770Sbp
6992723Salfredstatic int	vop_nolookup(struct vop_lookup_args *);
7092723Salfredstatic int	vop_nostrategy(struct vop_strategy_args *);
71189539Smarcusstatic int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
72189539Smarcus				char *dirbuf, int dirbuflen, off_t *off,
73189539Smarcus				char **cpos, int *len, int *eofflag,
74189539Smarcus				struct thread *td);
75189539Smarcusstatic int	dirent_exists(struct vnode *vp, const char *dirname,
76189539Smarcus			      struct thread *td);
7730489Sphk
78189539Smarcus#define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
79189539Smarcus
8030489Sphk/*
8130489Sphk * This vnode table stores what we want to do if the filesystem doesn't
8230489Sphk * implement a particular VOP.
8330489Sphk *
8430489Sphk * If there is no specific entry here, we will return EOPNOTSUPP.
8530489Sphk *
8630489Sphk */
8730489Sphk
88138290Sphkstruct vop_vector default_vnodeops = {
89138290Sphk	.vop_default =		NULL,
90138339Sphk	.vop_bypass =		VOP_EOPNOTSUPP,
91138339Sphk
92193092Strasz	.vop_accessx =		vop_stdaccessx,
93178243Skib	.vop_advlock =		vop_stdadvlock,
94178243Skib	.vop_advlockasync =	vop_stdadvlockasync,
95138290Sphk	.vop_bmap =		vop_stdbmap,
96138290Sphk	.vop_close =		VOP_NULL,
97138290Sphk	.vop_fsync =		VOP_NULL,
98138290Sphk	.vop_getpages =		vop_stdgetpages,
99138290Sphk	.vop_getwritemount = 	vop_stdgetwritemount,
100143494Sjeff	.vop_inactive =		VOP_NULL,
101138290Sphk	.vop_ioctl =		VOP_ENOTTY,
102147198Sssouhlal	.vop_kqfilter =		vop_stdkqfilter,
103138290Sphk	.vop_islocked =		vop_stdislocked,
104169671Skib	.vop_lock1 =		vop_stdlock,
105138290Sphk	.vop_lookup =		vop_nolookup,
106138290Sphk	.vop_open =		VOP_NULL,
107138290Sphk	.vop_pathconf =		VOP_EINVAL,
108138290Sphk	.vop_poll =		vop_nopoll,
109138290Sphk	.vop_putpages =		vop_stdputpages,
110138290Sphk	.vop_readlink =		VOP_EINVAL,
111138290Sphk	.vop_revoke =		VOP_PANIC,
112138290Sphk	.vop_strategy =		vop_nostrategy,
113138290Sphk	.vop_unlock =		vop_stdunlock,
114189539Smarcus	.vop_vptocnp =		vop_stdvptocnp,
115166774Spjd	.vop_vptofh =		vop_stdvptofh,
11630489Sphk};
11730489Sphk
11891690Seivind/*
11991690Seivind * Series of placeholder functions for various error returns for
12091690Seivind * VOPs.
12191690Seivind */
12291690Seivind
12330489Sphkint
12430492Sphkvop_eopnotsupp(struct vop_generic_args *ap)
12530489Sphk{
12630489Sphk	/*
12730492Sphk	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
12830489Sphk	*/
12930489Sphk
13030489Sphk	return (EOPNOTSUPP);
13130489Sphk}
13230489Sphk
13330489Sphkint
13430492Sphkvop_ebadf(struct vop_generic_args *ap)
13530489Sphk{
13630489Sphk
13730492Sphk	return (EBADF);
13830492Sphk}
13930492Sphk
14030492Sphkint
14130492Sphkvop_enotty(struct vop_generic_args *ap)
14230492Sphk{
14330492Sphk
14430492Sphk	return (ENOTTY);
14530492Sphk}
14630492Sphk
14730492Sphkint
14830492Sphkvop_einval(struct vop_generic_args *ap)
14930492Sphk{
15030492Sphk
15130492Sphk	return (EINVAL);
15230492Sphk}
15330492Sphk
15430492Sphkint
155185956Smarcusvop_enoent(struct vop_generic_args *ap)
156185956Smarcus{
157185956Smarcus
158185956Smarcus	return (ENOENT);
159185956Smarcus}
160185956Smarcus
161185956Smarcusint
16230492Sphkvop_null(struct vop_generic_args *ap)
16330492Sphk{
16430492Sphk
16530492Sphk	return (0);
16630492Sphk}
16730492Sphk
16891690Seivind/*
16991690Seivind * Helper function to panic on some bad VOPs in some filesystems.
17091690Seivind */
17141056Speterint
17241056Spetervop_panic(struct vop_generic_args *ap)
17341056Speter{
17441056Speter
17572594Sbde	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
17641056Speter}
17741056Speter
17891690Seivind/*
17991690Seivind * vop_std<something> and vop_no<something> are default functions for use by
18091690Seivind * filesystems that need the "default reasonable" implementation for a
18191690Seivind * particular operation.
18291690Seivind *
18391690Seivind * The documentation for the operations they implement exists (if it exists)
18491690Seivind * in the VOP_<SOMETHING>(9) manpage (all uppercase).
18591690Seivind */
18691690Seivind
18791690Seivind/*
18891690Seivind * Default vop for filesystems that do not support name lookup
18991690Seivind */
19072594Sbdestatic int
19172594Sbdevop_nolookup(ap)
19272594Sbde	struct vop_lookup_args /* {
19372594Sbde		struct vnode *a_dvp;
19472594Sbde		struct vnode **a_vpp;
19572594Sbde		struct componentname *a_cnp;
19672594Sbde	} */ *ap;
19772594Sbde{
19872594Sbde
19972594Sbde	*ap->a_vpp = NULL;
20072594Sbde	return (ENOTDIR);
20172594Sbde}
20272594Sbde
20346349Salc/*
20446349Salc *	vop_nostrategy:
20546349Salc *
20646349Salc *	Strategy routine for VFS devices that have none.
20746349Salc *
20858934Sphk *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
20958345Sphk *	routine.  Typically this is done for a BIO_READ strategy call.
210112067Skan *	Typically B_INVAL is assumed to already be clear prior to a write
21158345Sphk *	and should not be cleared manually unless you just made the buffer
21258934Sphk *	invalid.  BIO_ERROR should be cleared either way.
21346349Salc */
21446349Salc
21530489Sphkstatic int
21630489Sphkvop_nostrategy (struct vop_strategy_args *ap)
21730489Sphk{
21830489Sphk	printf("No strategy for buffer at %p\n", ap->a_bp);
219111842Snjl	vprint("vnode", ap->a_vp);
22058934Sphk	ap->a_bp->b_ioflags |= BIO_ERROR;
22130489Sphk	ap->a_bp->b_error = EOPNOTSUPP;
22259249Sphk	bufdone(ap->a_bp);
22330489Sphk	return (EOPNOTSUPP);
22430489Sphk}
22530492Sphk
226189539Smarcusstatic int
227189539Smarcusget_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
228189539Smarcus		int dirbuflen, off_t *off, char **cpos, int *len,
229189539Smarcus		int *eofflag, struct thread *td)
230189539Smarcus{
231189539Smarcus	int error, reclen;
232189539Smarcus	struct uio uio;
233189539Smarcus	struct iovec iov;
234189539Smarcus	struct dirent *dp;
235189539Smarcus
236189539Smarcus	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
237189539Smarcus	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
238189539Smarcus
239189539Smarcus	if (*len == 0) {
240189539Smarcus		iov.iov_base = dirbuf;
241189539Smarcus		iov.iov_len = dirbuflen;
242189539Smarcus
243189539Smarcus		uio.uio_iov = &iov;
244189539Smarcus		uio.uio_iovcnt = 1;
245189539Smarcus		uio.uio_offset = *off;
246189539Smarcus		uio.uio_resid = dirbuflen;
247189539Smarcus		uio.uio_segflg = UIO_SYSSPACE;
248189539Smarcus		uio.uio_rw = UIO_READ;
249189539Smarcus		uio.uio_td = td;
250189539Smarcus
251189539Smarcus		*eofflag = 0;
252189539Smarcus
253189539Smarcus#ifdef MAC
254189539Smarcus		error = mac_vnode_check_readdir(td->td_ucred, vp);
255189539Smarcus		if (error == 0)
256189539Smarcus#endif
257189539Smarcus			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
258189539Smarcus		    		NULL, NULL);
259189539Smarcus		if (error)
260189539Smarcus			return (error);
261189539Smarcus
262189539Smarcus		*off = uio.uio_offset;
263189539Smarcus
264189539Smarcus		*cpos = dirbuf;
265189539Smarcus		*len = (dirbuflen - uio.uio_resid);
266189539Smarcus	}
267189539Smarcus
268189539Smarcus	dp = (struct dirent *)(*cpos);
269189539Smarcus	reclen = dp->d_reclen;
270189539Smarcus	*dpp = dp;
271189539Smarcus
272189539Smarcus	/* check for malformed directory.. */
273189539Smarcus	if (reclen < DIRENT_MINSIZE)
274189539Smarcus		return (EINVAL);
275189539Smarcus
276189539Smarcus	*cpos += reclen;
277189539Smarcus	*len -= reclen;
278189539Smarcus
279189539Smarcus	return (0);
280189539Smarcus}
281189539Smarcus
28291690Seivind/*
283189539Smarcus * Check if a named file exists in a given directory vnode.
284189539Smarcus */
285189539Smarcusstatic int
286189539Smarcusdirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
287189539Smarcus{
288189539Smarcus	char *dirbuf, *cpos;
289189539Smarcus	int error, eofflag, dirbuflen, len, found;
290189539Smarcus	off_t off;
291189539Smarcus	struct dirent *dp;
292189539Smarcus	struct vattr va;
293189539Smarcus
294189539Smarcus	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
295189539Smarcus	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
296189539Smarcus
297189539Smarcus	found = 0;
298189539Smarcus
299189539Smarcus	error = VOP_GETATTR(vp, &va, td->td_ucred);
300189539Smarcus	if (error)
301189539Smarcus		return (found);
302189539Smarcus
303189539Smarcus	dirbuflen = DEV_BSIZE;
304189539Smarcus	if (dirbuflen < va.va_blocksize)
305189539Smarcus		dirbuflen = va.va_blocksize;
306189539Smarcus	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
307189539Smarcus
308189539Smarcus	off = 0;
309189539Smarcus	len = 0;
310189539Smarcus	do {
311189539Smarcus		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
312189539Smarcus					&cpos, &len, &eofflag, td);
313189539Smarcus		if (error)
314189539Smarcus			goto out;
315189539Smarcus
316189539Smarcus		if ((dp->d_type != DT_WHT) &&
317189539Smarcus		    !strcmp(dp->d_name, dirname)) {
318189539Smarcus			found = 1;
319189539Smarcus			goto out;
320189539Smarcus		}
321189539Smarcus	} while (len > 0 || !eofflag);
322189539Smarcus
323189539Smarcusout:
324189539Smarcus	free(dirbuf, M_TEMP);
325189539Smarcus	return (found);
326189539Smarcus}
327189539Smarcus
328193092Straszint
329193092Straszvop_stdaccessx(struct vop_accessx_args *ap)
330193092Strasz{
331193092Strasz	int error;
332193092Strasz	accmode_t accmode = ap->a_accmode;
333193092Strasz
334193092Strasz	error = vfs_unixify_accmode(&accmode);
335193092Strasz	if (error != 0)
336193092Strasz		return (error);
337193092Strasz
338193092Strasz	if (accmode == 0)
339193092Strasz		return (0);
340193092Strasz
341193092Strasz	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
342193092Strasz}
343193092Strasz
344189539Smarcus/*
345178243Skib * Advisory record locking support
346178243Skib */
347178243Skibint
348178243Skibvop_stdadvlock(struct vop_advlock_args *ap)
349178243Skib{
350182371Sattilio	struct vnode *vp;
351182371Sattilio	struct ucred *cred;
352178243Skib	struct vattr vattr;
353178243Skib	int error;
354178243Skib
355182371Sattilio	vp = ap->a_vp;
356182371Sattilio	cred = curthread->td_ucred;
357178243Skib	vn_lock(vp, LK_SHARED | LK_RETRY);
358182371Sattilio	error = VOP_GETATTR(vp, &vattr, cred);
359178243Skib	VOP_UNLOCK(vp, 0);
360178243Skib	if (error)
361178243Skib		return (error);
362178243Skib
363178243Skib	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
364178243Skib}
365178243Skib
366178243Skibint
367178243Skibvop_stdadvlockasync(struct vop_advlockasync_args *ap)
368178243Skib{
369182371Sattilio	struct vnode *vp;
370182371Sattilio	struct ucred *cred;
371178243Skib	struct vattr vattr;
372178243Skib	int error;
373178243Skib
374182371Sattilio	vp = ap->a_vp;
375182371Sattilio	cred = curthread->td_ucred;
376178243Skib	vn_lock(vp, LK_SHARED | LK_RETRY);
377182371Sattilio	error = VOP_GETATTR(vp, &vattr, cred);
378178243Skib	VOP_UNLOCK(vp, 0);
379178243Skib	if (error)
380178243Skib		return (error);
381178243Skib
382178243Skib	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
383178243Skib}
384178243Skib
385178243Skib/*
38691690Seivind * vop_stdpathconf:
387112067Skan *
38891690Seivind * Standard implementation of POSIX pathconf, to get information about limits
38991690Seivind * for a filesystem.
39091690Seivind * Override per filesystem for the case where the filesystem has smaller
39191690Seivind * limits.
39291690Seivind */
39330492Sphkint
39430492Sphkvop_stdpathconf(ap)
39530492Sphk	struct vop_pathconf_args /* {
39630492Sphk	struct vnode *a_vp;
39730492Sphk	int a_name;
39830492Sphk	int *a_retval;
39930492Sphk	} */ *ap;
40030492Sphk{
40130492Sphk
40230492Sphk	switch (ap->a_name) {
403149175Sphk		case _PC_NAME_MAX:
404149175Sphk			*ap->a_retval = NAME_MAX;
405149175Sphk			return (0);
406149175Sphk		case _PC_PATH_MAX:
407149175Sphk			*ap->a_retval = PATH_MAX;
408149175Sphk			return (0);
40930492Sphk		case _PC_LINK_MAX:
41030492Sphk			*ap->a_retval = LINK_MAX;
41130492Sphk			return (0);
41230492Sphk		case _PC_MAX_CANON:
41330492Sphk			*ap->a_retval = MAX_CANON;
41430492Sphk			return (0);
41530492Sphk		case _PC_MAX_INPUT:
41630492Sphk			*ap->a_retval = MAX_INPUT;
41730492Sphk			return (0);
41830492Sphk		case _PC_PIPE_BUF:
41930492Sphk			*ap->a_retval = PIPE_BUF;
42030492Sphk			return (0);
42130492Sphk		case _PC_CHOWN_RESTRICTED:
42230492Sphk			*ap->a_retval = 1;
42330492Sphk			return (0);
42430492Sphk		case _PC_VDISABLE:
42530492Sphk			*ap->a_retval = _POSIX_VDISABLE;
42630492Sphk			return (0);
42730492Sphk		default:
42830492Sphk			return (EINVAL);
42930492Sphk	}
43030492Sphk	/* NOTREACHED */
43130492Sphk}
43230513Sphk
43330513Sphk/*
43430513Sphk * Standard lock, unlock and islocked functions.
43530513Sphk */
43630513Sphkint
43730513Sphkvop_stdlock(ap)
438169671Skib	struct vop_lock1_args /* {
43930513Sphk		struct vnode *a_vp;
44030513Sphk		int a_flags;
441164248Skmacy		char *file;
442164248Skmacy		int line;
44330513Sphk	} */ *ap;
444112067Skan{
44566355Sbp	struct vnode *vp = ap->a_vp;
44630513Sphk
447176320Sattilio	return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
448176320Sattilio	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
449175635Sattilio	    ap->a_line));
45030513Sphk}
45130513Sphk
45291690Seivind/* See above. */
45330513Sphkint
45430513Sphkvop_stdunlock(ap)
45530513Sphk	struct vop_unlock_args /* {
45630513Sphk		struct vnode *a_vp;
45730513Sphk		int a_flags;
45830513Sphk	} */ *ap;
45930513Sphk{
46066355Sbp	struct vnode *vp = ap->a_vp;
46130513Sphk
462175635Sattilio	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
46330513Sphk}
46430513Sphk
46591690Seivind/* See above. */
46630513Sphkint
46730513Sphkvop_stdislocked(ap)
46830513Sphk	struct vop_islocked_args /* {
46930513Sphk		struct vnode *a_vp;
47030513Sphk	} */ *ap;
47130513Sphk{
47230513Sphk
473176559Sattilio	return (lockstatus(ap->a_vp->v_vnlock));
47430513Sphk}
47530513Sphk
47630743Sphk/*
47730743Sphk * Return true for select/poll.
47830743Sphk */
47930743Sphkint
48030743Sphkvop_nopoll(ap)
48130743Sphk	struct vop_poll_args /* {
48230743Sphk		struct vnode *a_vp;
48330743Sphk		int  a_events;
48430743Sphk		struct ucred *a_cred;
48583366Sjulian		struct thread *a_td;
48630743Sphk	} */ *ap;
48730743Sphk{
48831727Swollman
489189450Skib	return (poll_no_poll(ap->a_events));
49030743Sphk}
49130743Sphk
49231727Swollman/*
49331727Swollman * Implement poll for local filesystems that support it.
49431727Swollman */
49530743Sphkint
49631727Swollmanvop_stdpoll(ap)
49731727Swollman	struct vop_poll_args /* {
49831727Swollman		struct vnode *a_vp;
49931727Swollman		int  a_events;
50031727Swollman		struct ucred *a_cred;
50183366Sjulian		struct thread *a_td;
50231727Swollman	} */ *ap;
50331727Swollman{
50476578Sjlemon	if (ap->a_events & ~POLLSTANDARD)
50583366Sjulian		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
50676578Sjlemon	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
50731727Swollman}
50831727Swollman
50930743Sphk/*
51062976Smckusick * Return our mount point, as we will take charge of the writes.
51162976Smckusick */
51262976Smckusickint
51362976Smckusickvop_stdgetwritemount(ap)
51462976Smckusick	struct vop_getwritemount_args /* {
51562976Smckusick		struct vnode *a_vp;
51662976Smckusick		struct mount **a_mpp;
51762976Smckusick	} */ *ap;
51862976Smckusick{
519157323Sjeff	struct mount *mp;
52062976Smckusick
521157323Sjeff	/*
522157323Sjeff	 * XXX Since this is called unlocked we may be recycled while
523157323Sjeff	 * attempting to ref the mount.  If this is the case or mountpoint
524157323Sjeff	 * will be set to NULL.  We only have to prevent this call from
525157323Sjeff	 * returning with a ref to an incorrect mountpoint.  It is not
526157323Sjeff	 * harmful to return with a ref to our previous mountpoint.
527157323Sjeff	 */
528157323Sjeff	mp = ap->a_vp->v_mount;
529162455Stegge	if (mp != NULL) {
530162455Stegge		vfs_ref(mp);
531162455Stegge		if (mp != ap->a_vp->v_mount) {
532162455Stegge			vfs_rel(mp);
533162455Stegge			mp = NULL;
534162455Stegge		}
535157323Sjeff	}
536157323Sjeff	*(ap->a_mpp) = mp;
53762976Smckusick	return (0);
53862976Smckusick}
53962976Smckusick
54091690Seivind/* XXX Needs good comment and VOP_BMAP(9) manpage */
54176131Sphkint
54276131Sphkvop_stdbmap(ap)
543112067Skan	struct vop_bmap_args /* {
54476131Sphk		struct vnode *a_vp;
54576131Sphk		daddr_t  a_bn;
546137726Sphk		struct bufobj **a_bop;
54776131Sphk		daddr_t *a_bnp;
54876131Sphk		int *a_runp;
54976131Sphk		int *a_runb;
55076131Sphk	} */ *ap;
55176131Sphk{
55276131Sphk
553137726Sphk	if (ap->a_bop != NULL)
554137726Sphk		*ap->a_bop = &ap->a_vp->v_bufobj;
55576131Sphk	if (ap->a_bnp != NULL)
55676131Sphk		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
55776131Sphk	if (ap->a_runp != NULL)
55876131Sphk		*ap->a_runp = 0;
55976131Sphk	if (ap->a_runb != NULL)
56076131Sphk		*ap->a_runb = 0;
56176131Sphk	return (0);
56276131Sphk}
56376131Sphk
564110584Sjeffint
565110584Sjeffvop_stdfsync(ap)
566110584Sjeff	struct vop_fsync_args /* {
567110584Sjeff		struct vnode *a_vp;
568110584Sjeff		struct ucred *a_cred;
569110584Sjeff		int a_waitfor;
570110584Sjeff		struct thread *a_td;
571110584Sjeff	} */ *ap;
572110584Sjeff{
573110584Sjeff	struct vnode *vp = ap->a_vp;
574110584Sjeff	struct buf *bp;
575136751Sphk	struct bufobj *bo;
576110584Sjeff	struct buf *nbp;
577145732Sjeff	int error = 0;
578144584Sjeff	int maxretry = 1000;     /* large, arbitrarily chosen */
579110584Sjeff
580177493Sjeff	bo = &vp->v_bufobj;
581177493Sjeff	BO_LOCK(bo);
582110584Sjeffloop1:
583110584Sjeff	/*
584110584Sjeff	 * MARK/SCAN initialization to avoid infinite loops.
585110584Sjeff	 */
586177493Sjeff        TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
587110584Sjeff                bp->b_vflags &= ~BV_SCANNED;
588110584Sjeff		bp->b_error = 0;
589110584Sjeff	}
590110584Sjeff
591110584Sjeff	/*
592144584Sjeff	 * Flush all dirty buffers associated with a vnode.
593110584Sjeff	 */
594110584Sjeffloop2:
595177493Sjeff	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
596110584Sjeff		if ((bp->b_vflags & BV_SCANNED) != 0)
597110584Sjeff			continue;
598110584Sjeff		bp->b_vflags |= BV_SCANNED;
599111463Sjeff		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
600110584Sjeff			continue;
601177493Sjeff		BO_UNLOCK(bo);
602177493Sjeff		KASSERT(bp->b_bufobj == bo,
603147388Sjeff		    ("bp %p wrong b_bufobj %p should be %p",
604177493Sjeff		    bp, bp->b_bufobj, bo));
605110584Sjeff		if ((bp->b_flags & B_DELWRI) == 0)
606110588Sjeff			panic("fsync: not dirty");
607140734Sphk		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
608110584Sjeff			vfs_bio_awrite(bp);
609110584Sjeff		} else {
610110584Sjeff			bremfree(bp);
611110584Sjeff			bawrite(bp);
612110584Sjeff		}
613177493Sjeff		BO_LOCK(bo);
614110584Sjeff		goto loop2;
615110584Sjeff	}
616110584Sjeff
617110584Sjeff	/*
618110584Sjeff	 * If synchronous the caller expects us to completely resolve all
619110584Sjeff	 * dirty buffers in the system.  Wait for in-progress I/O to
620110584Sjeff	 * complete (which could include background bitmap writes), then
621110584Sjeff	 * retry if dirty blocks still exist.
622110584Sjeff	 */
623110584Sjeff	if (ap->a_waitfor == MNT_WAIT) {
624136751Sphk		bufobj_wwait(bo, 0, 0);
625136751Sphk		if (bo->bo_dirty.bv_cnt > 0) {
626110584Sjeff			/*
627110584Sjeff			 * If we are unable to write any of these buffers
628110584Sjeff			 * then we fail now rather than trying endlessly
629110584Sjeff			 * to write them out.
630110584Sjeff			 */
631136751Sphk			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
632110584Sjeff				if ((error = bp->b_error) == 0)
633110584Sjeff					continue;
634145732Sjeff			if (error == 0 && --maxretry >= 0)
635110584Sjeff				goto loop1;
636110584Sjeff			error = EAGAIN;
637110584Sjeff		}
638110584Sjeff	}
639177493Sjeff	BO_UNLOCK(bo);
640144584Sjeff	if (error == EAGAIN)
641144584Sjeff		vprint("fsync: giving up on dirty", vp);
642112067Skan
643110584Sjeff	return (error);
644110584Sjeff}
645112067Skan
64691690Seivind/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
64776167Sphkint
64876167Sphkvop_stdgetpages(ap)
64976167Sphk	struct vop_getpages_args /* {
65076167Sphk		struct vnode *a_vp;
65176167Sphk		vm_page_t *a_m;
65276167Sphk		int a_count;
65376167Sphk		int a_reqpage;
65476167Sphk		vm_ooffset_t a_offset;
65576167Sphk	} */ *ap;
65676167Sphk{
65776131Sphk
65876167Sphk	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
65976167Sphk	    ap->a_count, ap->a_reqpage);
66076167Sphk}
66176167Sphk
662147198Sssouhlalint
663147198Sssouhlalvop_stdkqfilter(struct vop_kqfilter_args *ap)
664147198Sssouhlal{
665147198Sssouhlal	return vfs_kqfilter(ap);
666147198Sssouhlal}
667147198Sssouhlal
66891690Seivind/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
66976319Sphkint
67076167Sphkvop_stdputpages(ap)
67176167Sphk	struct vop_putpages_args /* {
67276167Sphk		struct vnode *a_vp;
67376167Sphk		vm_page_t *a_m;
67476167Sphk		int a_count;
67576167Sphk		int a_sync;
67676167Sphk		int *a_rtvals;
67776167Sphk		vm_ooffset_t a_offset;
67876167Sphk	} */ *ap;
67976167Sphk{
68076167Sphk
68176319Sphk	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
68276167Sphk	     ap->a_sync, ap->a_rtvals);
68376167Sphk}
68476167Sphk
685166774Spjdint
686166774Spjdvop_stdvptofh(struct vop_vptofh_args *ap)
687166774Spjd{
688166795Spjd	return (EOPNOTSUPP);
689166774Spjd}
690166774Spjd
691189539Smarcusint
692189539Smarcusvop_stdvptocnp(struct vop_vptocnp_args *ap)
693189539Smarcus{
694189539Smarcus	struct vnode *vp = ap->a_vp;
695189539Smarcus	struct vnode **dvp = ap->a_vpp;
696194601Skib	struct ucred *cred = ap->a_cred;
697189539Smarcus	char *buf = ap->a_buf;
698189539Smarcus	int *buflen = ap->a_buflen;
699189539Smarcus	char *dirbuf, *cpos;
700189539Smarcus	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
701189539Smarcus	off_t off;
702189539Smarcus	ino_t fileno;
703189539Smarcus	struct vattr va;
704189539Smarcus	struct nameidata nd;
705189539Smarcus	struct thread *td;
706189539Smarcus	struct dirent *dp;
707189539Smarcus	struct vnode *mvp;
708189539Smarcus
709189539Smarcus	i = *buflen;
710189539Smarcus	error = 0;
711189539Smarcus	covered = 0;
712189539Smarcus	td = curthread;
713189539Smarcus
714189539Smarcus	if (vp->v_type != VDIR)
715189539Smarcus		return (ENOENT);
716189539Smarcus
717194601Skib	error = VOP_GETATTR(vp, &va, cred);
718189539Smarcus	if (error)
719189539Smarcus		return (error);
720189539Smarcus
721189539Smarcus	VREF(vp);
722189539Smarcus	locked = VOP_ISLOCKED(vp);
723189539Smarcus	VOP_UNLOCK(vp, 0);
724189539Smarcus	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
725189539Smarcus	    "..", vp, td);
726189539Smarcus	flags = FREAD;
727194601Skib	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
728189539Smarcus	if (error) {
729189539Smarcus		vn_lock(vp, locked | LK_RETRY);
730189539Smarcus		return (error);
731189539Smarcus	}
732189539Smarcus	NDFREE(&nd, NDF_ONLY_PNBUF);
733189539Smarcus
734189539Smarcus	mvp = *dvp = nd.ni_vp;
735189539Smarcus
736189539Smarcus	if (vp->v_mount != (*dvp)->v_mount &&
737189539Smarcus	    ((*dvp)->v_vflag & VV_ROOT) &&
738189539Smarcus	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
739189539Smarcus		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
740189539Smarcus		VREF(mvp);
741189539Smarcus		VOP_UNLOCK(mvp, 0);
742194601Skib		vn_close(mvp, FREAD, cred, td);
743189539Smarcus		VREF(*dvp);
744189539Smarcus		vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
745189539Smarcus		covered = 1;
746189539Smarcus	}
747189539Smarcus
748189539Smarcus	fileno = va.va_fileid;
749189539Smarcus
750189539Smarcus	dirbuflen = DEV_BSIZE;
751189539Smarcus	if (dirbuflen < va.va_blocksize)
752189539Smarcus		dirbuflen = va.va_blocksize;
753189539Smarcus	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
754189539Smarcus
755189539Smarcus	if ((*dvp)->v_type != VDIR) {
756189539Smarcus		error = ENOENT;
757189539Smarcus		goto out;
758189539Smarcus	}
759189539Smarcus
760189539Smarcus	off = 0;
761189539Smarcus	len = 0;
762189539Smarcus	do {
763189539Smarcus		/* call VOP_READDIR of parent */
764189539Smarcus		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
765189539Smarcus					&cpos, &len, &eofflag, td);
766189539Smarcus		if (error)
767189539Smarcus			goto out;
768189539Smarcus
769189539Smarcus		if ((dp->d_type != DT_WHT) &&
770189539Smarcus		    (dp->d_fileno == fileno)) {
771189539Smarcus			if (covered) {
772189539Smarcus				VOP_UNLOCK(*dvp, 0);
773189539Smarcus				vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
774189539Smarcus				if (dirent_exists(mvp, dp->d_name, td)) {
775189539Smarcus					error = ENOENT;
776189539Smarcus					VOP_UNLOCK(mvp, 0);
777189539Smarcus					vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
778189539Smarcus					goto out;
779189539Smarcus				}
780189539Smarcus				VOP_UNLOCK(mvp, 0);
781189539Smarcus				vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
782189539Smarcus			}
783189539Smarcus			i -= dp->d_namlen;
784189539Smarcus
785189539Smarcus			if (i < 0) {
786189539Smarcus				error = ENOMEM;
787189539Smarcus				goto out;
788189539Smarcus			}
789189539Smarcus			bcopy(dp->d_name, buf + i, dp->d_namlen);
790189539Smarcus			error = 0;
791189539Smarcus			goto out;
792189539Smarcus		}
793189539Smarcus	} while (len > 0 || !eofflag);
794189539Smarcus	error = ENOENT;
795189539Smarcus
796189539Smarcusout:
797189539Smarcus	free(dirbuf, M_TEMP);
798189539Smarcus	if (!error) {
799189539Smarcus		*buflen = i;
800189539Smarcus		vhold(*dvp);
801189539Smarcus	}
802189539Smarcus	if (covered) {
803189539Smarcus		vput(*dvp);
804189539Smarcus		vrele(mvp);
805189539Smarcus	} else {
806189539Smarcus		VOP_UNLOCK(mvp, 0);
807194601Skib		vn_close(mvp, FREAD, cred, td);
808189539Smarcus	}
809189539Smarcus	vn_lock(vp, locked | LK_RETRY);
810189539Smarcus	return (error);
811189539Smarcus}
812189539Smarcus
813112067Skan/*
81451068Salfred * vfs default ops
81591690Seivind * used to fill the vfs function table to get reasonable default return values.
81651068Salfred */
81791690Seivindint
818191990Sattiliovfs_stdroot (mp, flags, vpp)
81951068Salfred	struct mount *mp;
820144054Sjeff	int flags;
82151068Salfred	struct vnode **vpp;
82251068Salfred{
823131734Salfred
82451068Salfred	return (EOPNOTSUPP);
82551068Salfred}
82651068Salfred
82791690Seivindint
828191990Sattiliovfs_stdstatfs (mp, sbp)
82951068Salfred	struct mount *mp;
83051068Salfred	struct statfs *sbp;
83151068Salfred{
832131734Salfred
83351068Salfred	return (EOPNOTSUPP);
83451068Salfred}
83551068Salfred
83651068Salfredint
837191990Sattiliovfs_stdquotactl (mp, cmds, uid, arg)
83851068Salfred	struct mount *mp;
83951068Salfred	int cmds;
84051068Salfred	uid_t uid;
841153400Sdes	void *arg;
84251068Salfred{
843131734Salfred
84451068Salfred	return (EOPNOTSUPP);
84551068Salfred}
84651068Salfred
847112067Skanint
848191990Sattiliovfs_stdsync(mp, waitfor)
84951068Salfred	struct mount *mp;
85051068Salfred	int waitfor;
85151068Salfred{
852154152Stegge	struct vnode *vp, *mvp;
853191990Sattilio	struct thread *td;
854112119Skan	int error, lockreq, allerror = 0;
855112119Skan
856191990Sattilio	td = curthread;
857112119Skan	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
858112119Skan	if (waitfor != MNT_WAIT)
859112119Skan		lockreq |= LK_NOWAIT;
860112119Skan	/*
861112119Skan	 * Force stale buffer cache information to be flushed.
862112119Skan	 */
863122091Skan	MNT_ILOCK(mp);
864112119Skanloop:
865154152Stegge	MNT_VNODE_FOREACH(vp, mp, mvp) {
866177493Sjeff		/* bv_cnt is an acceptable race here. */
867177493Sjeff		if (vp->v_bufobj.bo_dirty.bv_cnt == 0)
868177493Sjeff			continue;
869112119Skan		VI_LOCK(vp);
870122091Skan		MNT_IUNLOCK(mp);
871112119Skan		if ((error = vget(vp, lockreq, td)) != 0) {
872122091Skan			MNT_ILOCK(mp);
873154152Stegge			if (error == ENOENT) {
874154152Stegge				MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
875112119Skan				goto loop;
876154152Stegge			}
877112119Skan			continue;
878112119Skan		}
879140048Sphk		error = VOP_FSYNC(vp, waitfor, td);
880112119Skan		if (error)
881112119Skan			allerror = error;
882112119Skan
883155032Sjeff		/* Do not turn this into vput.  td is not always curthread. */
884175294Sattilio		VOP_UNLOCK(vp, 0);
885121874Skan		vrele(vp);
886122091Skan		MNT_ILOCK(mp);
887112119Skan	}
888122091Skan	MNT_IUNLOCK(mp);
889112119Skan	return (allerror);
890112119Skan}
891112119Skan
892112119Skanint
893191990Sattiliovfs_stdnosync (mp, waitfor)
894112119Skan	struct mount *mp;
895112119Skan	int waitfor;
896112119Skan{
897131734Salfred
89851068Salfred	return (0);
89951068Salfred}
90051068Salfred
901112067Skanint
90292462Smckusickvfs_stdvget (mp, ino, flags, vpp)
90351068Salfred	struct mount *mp;
90451068Salfred	ino_t ino;
90592462Smckusick	int flags;
90651068Salfred	struct vnode **vpp;
90751068Salfred{
908131734Salfred
90951068Salfred	return (EOPNOTSUPP);
91051068Salfred}
91151068Salfred
912112067Skanint
91351138Salfredvfs_stdfhtovp (mp, fhp, vpp)
91451068Salfred	struct mount *mp;
91551068Salfred	struct fid *fhp;
91651138Salfred	struct vnode **vpp;
91751138Salfred{
918131734Salfred
91951138Salfred	return (EOPNOTSUPP);
92051138Salfred}
92151138Salfred
92251068Salfredint
923112067Skanvfs_stdinit (vfsp)
92451068Salfred	struct vfsconf *vfsp;
92551068Salfred{
926131734Salfred
92751068Salfred	return (0);
92851068Salfred}
92951068Salfred
93051068Salfredint
93151068Salfredvfs_stduninit (vfsp)
93251068Salfred	struct vfsconf *vfsp;
93351068Salfred{
934131734Salfred
93551068Salfred	return(0);
93651068Salfred}
93751068Salfred
93854803Srwatsonint
939191990Sattiliovfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
94054803Srwatson	struct mount *mp;
94154803Srwatson	int cmd;
94274273Srwatson	struct vnode *filename_vp;
94374437Srwatson	int attrnamespace;
94456272Srwatson	const char *attrname;
94554803Srwatson{
946131734Salfred
947101786Sphk	if (filename_vp != NULL)
948175294Sattilio		VOP_UNLOCK(filename_vp, 0);
949131734Salfred	return (EOPNOTSUPP);
95054803Srwatson}
95154803Srwatson
952131733Salfredint
953131733Salfredvfs_stdsysctl(mp, op, req)
954131733Salfred	struct mount *mp;
955131733Salfred	fsctlop_t op;
956131733Salfred	struct sysctl_req *req;
957131733Salfred{
958131733Salfred
959131733Salfred	return (EOPNOTSUPP);
960131733Salfred}
961131733Salfred
96251068Salfred/* end of vfs default ops */
963