vfs_default.c revision 292373
1139804Simp/*-
230489Sphk * Copyright (c) 1989, 1993
330489Sphk *	The Regents of the University of California.  All rights reserved.
430489Sphk *
530489Sphk * This code is derived from software contributed
630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project.
730489Sphk *
830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
930489Sphk *
1030489Sphk * Redistribution and use in source and binary forms, with or without
1130489Sphk * modification, are permitted provided that the following conditions
1230489Sphk * are met:
1330489Sphk * 1. Redistributions of source code must retain the above copyright
1430489Sphk *    notice, this list of conditions and the following disclaimer.
1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright
1630489Sphk *    notice, this list of conditions and the following disclaimer in the
1730489Sphk *    documentation and/or other materials provided with the distribution.
1830489Sphk * 4. Neither the name of the University nor the names of its contributors
1930489Sphk *    may be used to endorse or promote products derived from this software
2030489Sphk *    without specific prior written permission.
2130489Sphk *
2230489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2330489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2430489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2530489Sphk * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2630489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2730489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2830489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2930489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3030489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3130489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3230489Sphk * SUCH DAMAGE.
3330489Sphk */
3430489Sphk
35116182Sobrien#include <sys/cdefs.h>
36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 292373 2015-12-16 21:30:45Z glebius $");
37116182Sobrien
3830489Sphk#include <sys/param.h>
3930489Sphk#include <sys/systm.h>
4060041Sphk#include <sys/bio.h>
4144272Sbde#include <sys/buf.h>
4265770Sbp#include <sys/conf.h>
43147198Sssouhlal#include <sys/event.h>
4430489Sphk#include <sys/kernel.h>
45114216Skan#include <sys/limits.h>
4631561Sbde#include <sys/lock.h>
47178243Skib#include <sys/lockf.h>
4830743Sphk#include <sys/malloc.h>
4951068Salfred#include <sys/mount.h>
50189539Smarcus#include <sys/namei.h>
51248084Sattilio#include <sys/rwlock.h>
52189539Smarcus#include <sys/fcntl.h>
5330492Sphk#include <sys/unistd.h>
5430489Sphk#include <sys/vnode.h>
55189539Smarcus#include <sys/dirent.h>
5630743Sphk#include <sys/poll.h>
5730489Sphk
58193508Srwatson#include <security/mac/mac_framework.h>
59193508Srwatson
6065770Sbp#include <vm/vm.h>
6165770Sbp#include <vm/vm_object.h>
6265770Sbp#include <vm/vm_extern.h>
6365770Sbp#include <vm/pmap.h>
6465770Sbp#include <vm/vm_map.h>
6565770Sbp#include <vm/vm_page.h>
6665770Sbp#include <vm/vm_pager.h>
6765770Sbp#include <vm/vnode_pager.h>
6865770Sbp
6992723Salfredstatic int	vop_nolookup(struct vop_lookup_args *);
70206094Skibstatic int	vop_norename(struct vop_rename_args *);
7192723Salfredstatic int	vop_nostrategy(struct vop_strategy_args *);
72189539Smarcusstatic int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
73189539Smarcus				char *dirbuf, int dirbuflen, off_t *off,
74189539Smarcus				char **cpos, int *len, int *eofflag,
75189539Smarcus				struct thread *td);
76189539Smarcusstatic int	dirent_exists(struct vnode *vp, const char *dirname,
77189539Smarcus			      struct thread *td);
7830489Sphk
79189539Smarcus#define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
80189539Smarcus
81241025Skibstatic int vop_stdis_text(struct vop_is_text_args *ap);
82241025Skibstatic int vop_stdset_text(struct vop_set_text_args *ap);
83241025Skibstatic int vop_stdunset_text(struct vop_unset_text_args *ap);
84242476Skibstatic int vop_stdget_writecount(struct vop_get_writecount_args *ap);
85242476Skibstatic int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
86274914Sglebiusstatic int vop_stdgetpages_async(struct vop_getpages_async_args *ap);
87241025Skib
8830489Sphk/*
8930489Sphk * This vnode table stores what we want to do if the filesystem doesn't
9030489Sphk * implement a particular VOP.
9130489Sphk *
9230489Sphk * If there is no specific entry here, we will return EOPNOTSUPP.
9330489Sphk *
94197680Strasz * Note that every filesystem has to implement either vop_access
95197680Strasz * or vop_accessx; failing to do so will result in immediate crash
96197680Strasz * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
97197680Strasz * which calls vop_stdaccess() etc.
9830489Sphk */
9930489Sphk
100138290Sphkstruct vop_vector default_vnodeops = {
101138290Sphk	.vop_default =		NULL,
102138339Sphk	.vop_bypass =		VOP_EOPNOTSUPP,
103138339Sphk
104197680Strasz	.vop_access =		vop_stdaccess,
105193092Strasz	.vop_accessx =		vop_stdaccessx,
106227070Sjhb	.vop_advise =		vop_stdadvise,
107178243Skib	.vop_advlock =		vop_stdadvlock,
108178243Skib	.vop_advlockasync =	vop_stdadvlockasync,
109208003Szml	.vop_advlockpurge =	vop_stdadvlockpurge,
110220791Smdf	.vop_allocate =		vop_stdallocate,
111138290Sphk	.vop_bmap =		vop_stdbmap,
112138290Sphk	.vop_close =		VOP_NULL,
113138290Sphk	.vop_fsync =		VOP_NULL,
114138290Sphk	.vop_getpages =		vop_stdgetpages,
115274914Sglebius	.vop_getpages_async =	vop_stdgetpages_async,
116138290Sphk	.vop_getwritemount = 	vop_stdgetwritemount,
117143494Sjeff	.vop_inactive =		VOP_NULL,
118138290Sphk	.vop_ioctl =		VOP_ENOTTY,
119147198Sssouhlal	.vop_kqfilter =		vop_stdkqfilter,
120138290Sphk	.vop_islocked =		vop_stdislocked,
121169671Skib	.vop_lock1 =		vop_stdlock,
122138290Sphk	.vop_lookup =		vop_nolookup,
123138290Sphk	.vop_open =		VOP_NULL,
124138290Sphk	.vop_pathconf =		VOP_EINVAL,
125138290Sphk	.vop_poll =		vop_nopoll,
126138290Sphk	.vop_putpages =		vop_stdputpages,
127138290Sphk	.vop_readlink =		VOP_EINVAL,
128206094Skib	.vop_rename =		vop_norename,
129138290Sphk	.vop_revoke =		VOP_PANIC,
130138290Sphk	.vop_strategy =		vop_nostrategy,
131138290Sphk	.vop_unlock =		vop_stdunlock,
132189539Smarcus	.vop_vptocnp =		vop_stdvptocnp,
133166774Spjd	.vop_vptofh =		vop_stdvptofh,
134232317Strociny	.vop_unp_bind =		vop_stdunp_bind,
135232317Strociny	.vop_unp_connect =	vop_stdunp_connect,
136232317Strociny	.vop_unp_detach =	vop_stdunp_detach,
137241025Skib	.vop_is_text =		vop_stdis_text,
138241025Skib	.vop_set_text =		vop_stdset_text,
139241025Skib	.vop_unset_text =	vop_stdunset_text,
140242476Skib	.vop_get_writecount =	vop_stdget_writecount,
141242476Skib	.vop_add_writecount =	vop_stdadd_writecount,
14230489Sphk};
14330489Sphk
14491690Seivind/*
14591690Seivind * Series of placeholder functions for various error returns for
14691690Seivind * VOPs.
14791690Seivind */
14891690Seivind
14930489Sphkint
15030492Sphkvop_eopnotsupp(struct vop_generic_args *ap)
15130489Sphk{
15230489Sphk	/*
15330492Sphk	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
15430489Sphk	*/
15530489Sphk
15630489Sphk	return (EOPNOTSUPP);
15730489Sphk}
15830489Sphk
15930489Sphkint
16030492Sphkvop_ebadf(struct vop_generic_args *ap)
16130489Sphk{
16230489Sphk
16330492Sphk	return (EBADF);
16430492Sphk}
16530492Sphk
16630492Sphkint
16730492Sphkvop_enotty(struct vop_generic_args *ap)
16830492Sphk{
16930492Sphk
17030492Sphk	return (ENOTTY);
17130492Sphk}
17230492Sphk
17330492Sphkint
17430492Sphkvop_einval(struct vop_generic_args *ap)
17530492Sphk{
17630492Sphk
17730492Sphk	return (EINVAL);
17830492Sphk}
17930492Sphk
18030492Sphkint
181185956Smarcusvop_enoent(struct vop_generic_args *ap)
182185956Smarcus{
183185956Smarcus
184185956Smarcus	return (ENOENT);
185185956Smarcus}
186185956Smarcus
187185956Smarcusint
18830492Sphkvop_null(struct vop_generic_args *ap)
18930492Sphk{
19030492Sphk
19130492Sphk	return (0);
19230492Sphk}
19330492Sphk
19491690Seivind/*
19591690Seivind * Helper function to panic on some bad VOPs in some filesystems.
19691690Seivind */
19741056Speterint
19841056Spetervop_panic(struct vop_generic_args *ap)
19941056Speter{
20041056Speter
20172594Sbde	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
20241056Speter}
20341056Speter
20491690Seivind/*
20591690Seivind * vop_std<something> and vop_no<something> are default functions for use by
20691690Seivind * filesystems that need the "default reasonable" implementation for a
20791690Seivind * particular operation.
20891690Seivind *
20991690Seivind * The documentation for the operations they implement exists (if it exists)
21091690Seivind * in the VOP_<SOMETHING>(9) manpage (all uppercase).
21191690Seivind */
21291690Seivind
21391690Seivind/*
21491690Seivind * Default vop for filesystems that do not support name lookup
21591690Seivind */
21672594Sbdestatic int
21772594Sbdevop_nolookup(ap)
21872594Sbde	struct vop_lookup_args /* {
21972594Sbde		struct vnode *a_dvp;
22072594Sbde		struct vnode **a_vpp;
22172594Sbde		struct componentname *a_cnp;
22272594Sbde	} */ *ap;
22372594Sbde{
22472594Sbde
22572594Sbde	*ap->a_vpp = NULL;
22672594Sbde	return (ENOTDIR);
22772594Sbde}
22872594Sbde
22946349Salc/*
230206094Skib * vop_norename:
231206094Skib *
232206094Skib * Handle unlock and reference counting for arguments of vop_rename
233206094Skib * for filesystems that do not implement rename operation.
234206094Skib */
235206094Skibstatic int
236206094Skibvop_norename(struct vop_rename_args *ap)
237206094Skib{
238206094Skib
239206094Skib	vop_rename_fail(ap);
240206094Skib	return (EOPNOTSUPP);
241206094Skib}
242206094Skib
243206094Skib/*
24446349Salc *	vop_nostrategy:
24546349Salc *
24646349Salc *	Strategy routine for VFS devices that have none.
24746349Salc *
24858934Sphk *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
24958345Sphk *	routine.  Typically this is done for a BIO_READ strategy call.
250112067Skan *	Typically B_INVAL is assumed to already be clear prior to a write
25158345Sphk *	and should not be cleared manually unless you just made the buffer
25258934Sphk *	invalid.  BIO_ERROR should be cleared either way.
25346349Salc */
25446349Salc
25530489Sphkstatic int
25630489Sphkvop_nostrategy (struct vop_strategy_args *ap)
25730489Sphk{
25830489Sphk	printf("No strategy for buffer at %p\n", ap->a_bp);
259111842Snjl	vprint("vnode", ap->a_vp);
26058934Sphk	ap->a_bp->b_ioflags |= BIO_ERROR;
26130489Sphk	ap->a_bp->b_error = EOPNOTSUPP;
26259249Sphk	bufdone(ap->a_bp);
26330489Sphk	return (EOPNOTSUPP);
26430489Sphk}
26530492Sphk
266189539Smarcusstatic int
267189539Smarcusget_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
268189539Smarcus		int dirbuflen, off_t *off, char **cpos, int *len,
269189539Smarcus		int *eofflag, struct thread *td)
270189539Smarcus{
271189539Smarcus	int error, reclen;
272189539Smarcus	struct uio uio;
273189539Smarcus	struct iovec iov;
274189539Smarcus	struct dirent *dp;
275189539Smarcus
276189539Smarcus	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
277189539Smarcus	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
278189539Smarcus
279189539Smarcus	if (*len == 0) {
280189539Smarcus		iov.iov_base = dirbuf;
281189539Smarcus		iov.iov_len = dirbuflen;
282189539Smarcus
283189539Smarcus		uio.uio_iov = &iov;
284189539Smarcus		uio.uio_iovcnt = 1;
285189539Smarcus		uio.uio_offset = *off;
286189539Smarcus		uio.uio_resid = dirbuflen;
287189539Smarcus		uio.uio_segflg = UIO_SYSSPACE;
288189539Smarcus		uio.uio_rw = UIO_READ;
289189539Smarcus		uio.uio_td = td;
290189539Smarcus
291189539Smarcus		*eofflag = 0;
292189539Smarcus
293189539Smarcus#ifdef MAC
294189539Smarcus		error = mac_vnode_check_readdir(td->td_ucred, vp);
295189539Smarcus		if (error == 0)
296189539Smarcus#endif
297189539Smarcus			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
298189539Smarcus		    		NULL, NULL);
299189539Smarcus		if (error)
300189539Smarcus			return (error);
301189539Smarcus
302211818Sbrian		*off = uio.uio_offset;
303211818Sbrian
304211684Sbrian		*cpos = dirbuf;
305211818Sbrian		*len = (dirbuflen - uio.uio_resid);
306211818Sbrian
307211818Sbrian		if (*len == 0)
308211818Sbrian			return (ENOENT);
309189539Smarcus	}
310189539Smarcus
311189539Smarcus	dp = (struct dirent *)(*cpos);
312189539Smarcus	reclen = dp->d_reclen;
313189539Smarcus	*dpp = dp;
314189539Smarcus
315189539Smarcus	/* check for malformed directory.. */
316189539Smarcus	if (reclen < DIRENT_MINSIZE)
317189539Smarcus		return (EINVAL);
318189539Smarcus
319189539Smarcus	*cpos += reclen;
320189539Smarcus	*len -= reclen;
321189539Smarcus
322189539Smarcus	return (0);
323189539Smarcus}
324189539Smarcus
32591690Seivind/*
326189539Smarcus * Check if a named file exists in a given directory vnode.
327189539Smarcus */
328189539Smarcusstatic int
329189539Smarcusdirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
330189539Smarcus{
331189539Smarcus	char *dirbuf, *cpos;
332189539Smarcus	int error, eofflag, dirbuflen, len, found;
333189539Smarcus	off_t off;
334189539Smarcus	struct dirent *dp;
335189539Smarcus	struct vattr va;
336189539Smarcus
337189539Smarcus	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
338189539Smarcus	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
339189539Smarcus
340189539Smarcus	found = 0;
341189539Smarcus
342189539Smarcus	error = VOP_GETATTR(vp, &va, td->td_ucred);
343189539Smarcus	if (error)
344189539Smarcus		return (found);
345189539Smarcus
346189539Smarcus	dirbuflen = DEV_BSIZE;
347189539Smarcus	if (dirbuflen < va.va_blocksize)
348189539Smarcus		dirbuflen = va.va_blocksize;
349189539Smarcus	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
350189539Smarcus
351189539Smarcus	off = 0;
352189539Smarcus	len = 0;
353189539Smarcus	do {
354189539Smarcus		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
355189539Smarcus					&cpos, &len, &eofflag, td);
356189539Smarcus		if (error)
357189539Smarcus			goto out;
358189539Smarcus
359235503Sgleb		if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
360235503Sgleb		    strcmp(dp->d_name, dirname) == 0) {
361189539Smarcus			found = 1;
362189539Smarcus			goto out;
363189539Smarcus		}
364189539Smarcus	} while (len > 0 || !eofflag);
365189539Smarcus
366189539Smarcusout:
367189539Smarcus	free(dirbuf, M_TEMP);
368189539Smarcus	return (found);
369189539Smarcus}
370189539Smarcus
371193092Straszint
372197680Straszvop_stdaccess(struct vop_access_args *ap)
373197680Strasz{
374197680Strasz
375197680Strasz	KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
376197680Strasz	    VAPPEND)) == 0, ("invalid bit in accmode"));
377197680Strasz
378197680Strasz	return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
379197680Strasz}
380197680Strasz
381197680Straszint
382193092Straszvop_stdaccessx(struct vop_accessx_args *ap)
383193092Strasz{
384193092Strasz	int error;
385193092Strasz	accmode_t accmode = ap->a_accmode;
386193092Strasz
387193092Strasz	error = vfs_unixify_accmode(&accmode);
388193092Strasz	if (error != 0)
389193092Strasz		return (error);
390193092Strasz
391193092Strasz	if (accmode == 0)
392193092Strasz		return (0);
393193092Strasz
394193092Strasz	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
395193092Strasz}
396193092Strasz
397189539Smarcus/*
398178243Skib * Advisory record locking support
399178243Skib */
400178243Skibint
401178243Skibvop_stdadvlock(struct vop_advlock_args *ap)
402178243Skib{
403182371Sattilio	struct vnode *vp;
404178243Skib	struct vattr vattr;
405178243Skib	int error;
406178243Skib
407182371Sattilio	vp = ap->a_vp;
408276192Srmacklem	if (ap->a_fl->l_whence == SEEK_END) {
409276192Srmacklem		/*
410276200Srmacklem		 * The NFSv4 server must avoid doing a vn_lock() here, since it
411276200Srmacklem		 * can deadlock the nfsd threads, due to a LOR.  Fortunately
412276200Srmacklem		 * the NFSv4 server always uses SEEK_SET and this code is
413276200Srmacklem		 * only required for the SEEK_END case.
414276192Srmacklem		 */
415276192Srmacklem		vn_lock(vp, LK_SHARED | LK_RETRY);
416276192Srmacklem		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
417276192Srmacklem		VOP_UNLOCK(vp, 0);
418276192Srmacklem		if (error)
419276192Srmacklem			return (error);
420276192Srmacklem	} else
421276192Srmacklem		vattr.va_size = 0;
422178243Skib
423178243Skib	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
424178243Skib}
425178243Skib
426178243Skibint
427178243Skibvop_stdadvlockasync(struct vop_advlockasync_args *ap)
428178243Skib{
429182371Sattilio	struct vnode *vp;
430178243Skib	struct vattr vattr;
431178243Skib	int error;
432178243Skib
433182371Sattilio	vp = ap->a_vp;
434276192Srmacklem	if (ap->a_fl->l_whence == SEEK_END) {
435276192Srmacklem		/* The size argument is only needed for SEEK_END. */
436276192Srmacklem		vn_lock(vp, LK_SHARED | LK_RETRY);
437276192Srmacklem		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
438276192Srmacklem		VOP_UNLOCK(vp, 0);
439276192Srmacklem		if (error)
440276192Srmacklem			return (error);
441276192Srmacklem	} else
442276192Srmacklem		vattr.va_size = 0;
443178243Skib
444178243Skib	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
445178243Skib}
446178243Skib
447208003Szmlint
448208003Szmlvop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
449208003Szml{
450208003Szml	struct vnode *vp;
451208003Szml
452208003Szml	vp = ap->a_vp;
453208003Szml	lf_purgelocks(vp, &vp->v_lockf);
454208003Szml	return (0);
455208003Szml}
456208003Szml
457178243Skib/*
45891690Seivind * vop_stdpathconf:
459112067Skan *
46091690Seivind * Standard implementation of POSIX pathconf, to get information about limits
46191690Seivind * for a filesystem.
46291690Seivind * Override per filesystem for the case where the filesystem has smaller
46391690Seivind * limits.
46491690Seivind */
46530492Sphkint
46630492Sphkvop_stdpathconf(ap)
46730492Sphk	struct vop_pathconf_args /* {
46830492Sphk	struct vnode *a_vp;
46930492Sphk	int a_name;
47030492Sphk	int *a_retval;
47130492Sphk	} */ *ap;
47230492Sphk{
47330492Sphk
47430492Sphk	switch (ap->a_name) {
475149175Sphk		case _PC_NAME_MAX:
476149175Sphk			*ap->a_retval = NAME_MAX;
477149175Sphk			return (0);
478149175Sphk		case _PC_PATH_MAX:
479149175Sphk			*ap->a_retval = PATH_MAX;
480149175Sphk			return (0);
48130492Sphk		case _PC_LINK_MAX:
48230492Sphk			*ap->a_retval = LINK_MAX;
48330492Sphk			return (0);
48430492Sphk		case _PC_MAX_CANON:
48530492Sphk			*ap->a_retval = MAX_CANON;
48630492Sphk			return (0);
48730492Sphk		case _PC_MAX_INPUT:
48830492Sphk			*ap->a_retval = MAX_INPUT;
48930492Sphk			return (0);
49030492Sphk		case _PC_PIPE_BUF:
49130492Sphk			*ap->a_retval = PIPE_BUF;
49230492Sphk			return (0);
49330492Sphk		case _PC_CHOWN_RESTRICTED:
49430492Sphk			*ap->a_retval = 1;
49530492Sphk			return (0);
49630492Sphk		case _PC_VDISABLE:
49730492Sphk			*ap->a_retval = _POSIX_VDISABLE;
49830492Sphk			return (0);
49930492Sphk		default:
50030492Sphk			return (EINVAL);
50130492Sphk	}
50230492Sphk	/* NOTREACHED */
50330492Sphk}
50430513Sphk
50530513Sphk/*
50630513Sphk * Standard lock, unlock and islocked functions.
50730513Sphk */
50830513Sphkint
50930513Sphkvop_stdlock(ap)
510169671Skib	struct vop_lock1_args /* {
51130513Sphk		struct vnode *a_vp;
51230513Sphk		int a_flags;
513164248Skmacy		char *file;
514164248Skmacy		int line;
51530513Sphk	} */ *ap;
516112067Skan{
51766355Sbp	struct vnode *vp = ap->a_vp;
51830513Sphk
519176320Sattilio	return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
520176320Sattilio	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
521175635Sattilio	    ap->a_line));
52230513Sphk}
52330513Sphk
52491690Seivind/* See above. */
52530513Sphkint
52630513Sphkvop_stdunlock(ap)
52730513Sphk	struct vop_unlock_args /* {
52830513Sphk		struct vnode *a_vp;
52930513Sphk		int a_flags;
53030513Sphk	} */ *ap;
53130513Sphk{
53266355Sbp	struct vnode *vp = ap->a_vp;
53330513Sphk
534175635Sattilio	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
53530513Sphk}
53630513Sphk
53791690Seivind/* See above. */
53830513Sphkint
53930513Sphkvop_stdislocked(ap)
54030513Sphk	struct vop_islocked_args /* {
54130513Sphk		struct vnode *a_vp;
54230513Sphk	} */ *ap;
54330513Sphk{
54430513Sphk
545176559Sattilio	return (lockstatus(ap->a_vp->v_vnlock));
54630513Sphk}
54730513Sphk
54830743Sphk/*
54930743Sphk * Return true for select/poll.
55030743Sphk */
55130743Sphkint
55230743Sphkvop_nopoll(ap)
55330743Sphk	struct vop_poll_args /* {
55430743Sphk		struct vnode *a_vp;
55530743Sphk		int  a_events;
55630743Sphk		struct ucred *a_cred;
55783366Sjulian		struct thread *a_td;
55830743Sphk	} */ *ap;
55930743Sphk{
56031727Swollman
561189450Skib	return (poll_no_poll(ap->a_events));
56230743Sphk}
56330743Sphk
56431727Swollman/*
56531727Swollman * Implement poll for local filesystems that support it.
56631727Swollman */
56730743Sphkint
56831727Swollmanvop_stdpoll(ap)
56931727Swollman	struct vop_poll_args /* {
57031727Swollman		struct vnode *a_vp;
57131727Swollman		int  a_events;
57231727Swollman		struct ucred *a_cred;
57383366Sjulian		struct thread *a_td;
57431727Swollman	} */ *ap;
57531727Swollman{
57676578Sjlemon	if (ap->a_events & ~POLLSTANDARD)
57783366Sjulian		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
57876578Sjlemon	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
57931727Swollman}
58031727Swollman
58130743Sphk/*
58262976Smckusick * Return our mount point, as we will take charge of the writes.
58362976Smckusick */
58462976Smckusickint
58562976Smckusickvop_stdgetwritemount(ap)
58662976Smckusick	struct vop_getwritemount_args /* {
58762976Smckusick		struct vnode *a_vp;
58862976Smckusick		struct mount **a_mpp;
58962976Smckusick	} */ *ap;
59062976Smckusick{
591157323Sjeff	struct mount *mp;
59262976Smckusick
593157323Sjeff	/*
594157323Sjeff	 * XXX Since this is called unlocked we may be recycled while
595157323Sjeff	 * attempting to ref the mount.  If this is the case or mountpoint
596157323Sjeff	 * will be set to NULL.  We only have to prevent this call from
597157323Sjeff	 * returning with a ref to an incorrect mountpoint.  It is not
598157323Sjeff	 * harmful to return with a ref to our previous mountpoint.
599157323Sjeff	 */
600157323Sjeff	mp = ap->a_vp->v_mount;
601162455Stegge	if (mp != NULL) {
602162455Stegge		vfs_ref(mp);
603162455Stegge		if (mp != ap->a_vp->v_mount) {
604162455Stegge			vfs_rel(mp);
605162455Stegge			mp = NULL;
606162455Stegge		}
607157323Sjeff	}
608157323Sjeff	*(ap->a_mpp) = mp;
60962976Smckusick	return (0);
61062976Smckusick}
61162976Smckusick
61291690Seivind/* XXX Needs good comment and VOP_BMAP(9) manpage */
61376131Sphkint
61476131Sphkvop_stdbmap(ap)
615112067Skan	struct vop_bmap_args /* {
61676131Sphk		struct vnode *a_vp;
61776131Sphk		daddr_t  a_bn;
618137726Sphk		struct bufobj **a_bop;
61976131Sphk		daddr_t *a_bnp;
62076131Sphk		int *a_runp;
62176131Sphk		int *a_runb;
62276131Sphk	} */ *ap;
62376131Sphk{
62476131Sphk
625137726Sphk	if (ap->a_bop != NULL)
626137726Sphk		*ap->a_bop = &ap->a_vp->v_bufobj;
62776131Sphk	if (ap->a_bnp != NULL)
62876131Sphk		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
62976131Sphk	if (ap->a_runp != NULL)
63076131Sphk		*ap->a_runp = 0;
63176131Sphk	if (ap->a_runb != NULL)
63276131Sphk		*ap->a_runb = 0;
63376131Sphk	return (0);
63476131Sphk}
63576131Sphk
636110584Sjeffint
637110584Sjeffvop_stdfsync(ap)
638110584Sjeff	struct vop_fsync_args /* {
639110584Sjeff		struct vnode *a_vp;
640110584Sjeff		struct ucred *a_cred;
641110584Sjeff		int a_waitfor;
642110584Sjeff		struct thread *a_td;
643110584Sjeff	} */ *ap;
644110584Sjeff{
645110584Sjeff	struct vnode *vp = ap->a_vp;
646110584Sjeff	struct buf *bp;
647136751Sphk	struct bufobj *bo;
648110584Sjeff	struct buf *nbp;
649145732Sjeff	int error = 0;
650144584Sjeff	int maxretry = 1000;     /* large, arbitrarily chosen */
651110584Sjeff
652177493Sjeff	bo = &vp->v_bufobj;
653177493Sjeff	BO_LOCK(bo);
654110584Sjeffloop1:
655110584Sjeff	/*
656110584Sjeff	 * MARK/SCAN initialization to avoid infinite loops.
657110584Sjeff	 */
658177493Sjeff        TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
659110584Sjeff                bp->b_vflags &= ~BV_SCANNED;
660110584Sjeff		bp->b_error = 0;
661110584Sjeff	}
662110584Sjeff
663110584Sjeff	/*
664144584Sjeff	 * Flush all dirty buffers associated with a vnode.
665110584Sjeff	 */
666110584Sjeffloop2:
667177493Sjeff	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
668110584Sjeff		if ((bp->b_vflags & BV_SCANNED) != 0)
669110584Sjeff			continue;
670110584Sjeff		bp->b_vflags |= BV_SCANNED;
671236825Smckusick		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
672236825Smckusick			if (ap->a_waitfor != MNT_WAIT)
673236825Smckusick				continue;
674236825Smckusick			if (BUF_LOCK(bp,
675236825Smckusick			    LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL,
676251171Sjeff			    BO_LOCKPTR(bo)) != 0) {
677236825Smckusick				BO_LOCK(bo);
678236825Smckusick				goto loop1;
679236825Smckusick			}
680236825Smckusick			BO_LOCK(bo);
681236825Smckusick		}
682177493Sjeff		BO_UNLOCK(bo);
683177493Sjeff		KASSERT(bp->b_bufobj == bo,
684147388Sjeff		    ("bp %p wrong b_bufobj %p should be %p",
685177493Sjeff		    bp, bp->b_bufobj, bo));
686110584Sjeff		if ((bp->b_flags & B_DELWRI) == 0)
687110588Sjeff			panic("fsync: not dirty");
688140734Sphk		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
689110584Sjeff			vfs_bio_awrite(bp);
690110584Sjeff		} else {
691110584Sjeff			bremfree(bp);
692110584Sjeff			bawrite(bp);
693110584Sjeff		}
694177493Sjeff		BO_LOCK(bo);
695110584Sjeff		goto loop2;
696110584Sjeff	}
697110584Sjeff
698110584Sjeff	/*
699110584Sjeff	 * If synchronous the caller expects us to completely resolve all
700110584Sjeff	 * dirty buffers in the system.  Wait for in-progress I/O to
701110584Sjeff	 * complete (which could include background bitmap writes), then
702110584Sjeff	 * retry if dirty blocks still exist.
703110584Sjeff	 */
704110584Sjeff	if (ap->a_waitfor == MNT_WAIT) {
705136751Sphk		bufobj_wwait(bo, 0, 0);
706136751Sphk		if (bo->bo_dirty.bv_cnt > 0) {
707110584Sjeff			/*
708110584Sjeff			 * If we are unable to write any of these buffers
709110584Sjeff			 * then we fail now rather than trying endlessly
710110584Sjeff			 * to write them out.
711110584Sjeff			 */
712136751Sphk			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
713110584Sjeff				if ((error = bp->b_error) == 0)
714110584Sjeff					continue;
715145732Sjeff			if (error == 0 && --maxretry >= 0)
716110584Sjeff				goto loop1;
717110584Sjeff			error = EAGAIN;
718110584Sjeff		}
719110584Sjeff	}
720177493Sjeff	BO_UNLOCK(bo);
721144584Sjeff	if (error == EAGAIN)
722144584Sjeff		vprint("fsync: giving up on dirty", vp);
723112067Skan
724110584Sjeff	return (error);
725110584Sjeff}
726112067Skan
72791690Seivind/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
72876167Sphkint
72976167Sphkvop_stdgetpages(ap)
73076167Sphk	struct vop_getpages_args /* {
73176167Sphk		struct vnode *a_vp;
73276167Sphk		vm_page_t *a_m;
73376167Sphk		int a_count;
734292373Sglebius		int *a_rbehind;
735292373Sglebius		int *a_rahead;
73676167Sphk	} */ *ap;
73776167Sphk{
73876131Sphk
73976167Sphk	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
740292373Sglebius	    ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL);
74176167Sphk}
74276167Sphk
743274914Sglebiusstatic int
744274914Sglebiusvop_stdgetpages_async(struct vop_getpages_async_args *ap)
745274914Sglebius{
746274914Sglebius	int error;
747274914Sglebius
748292373Sglebius	error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
749292373Sglebius	    ap->a_rahead);
750292373Sglebius	ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
751274914Sglebius	return (error);
752274914Sglebius}
753274914Sglebius
754147198Sssouhlalint
755147198Sssouhlalvop_stdkqfilter(struct vop_kqfilter_args *ap)
756147198Sssouhlal{
757147198Sssouhlal	return vfs_kqfilter(ap);
758147198Sssouhlal}
759147198Sssouhlal
76091690Seivind/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
76176319Sphkint
76276167Sphkvop_stdputpages(ap)
76376167Sphk	struct vop_putpages_args /* {
76476167Sphk		struct vnode *a_vp;
76576167Sphk		vm_page_t *a_m;
76676167Sphk		int a_count;
76776167Sphk		int a_sync;
76876167Sphk		int *a_rtvals;
76976167Sphk	} */ *ap;
77076167Sphk{
77176167Sphk
77276319Sphk	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
77376167Sphk	     ap->a_sync, ap->a_rtvals);
77476167Sphk}
77576167Sphk
776166774Spjdint
777166774Spjdvop_stdvptofh(struct vop_vptofh_args *ap)
778166774Spjd{
779166795Spjd	return (EOPNOTSUPP);
780166774Spjd}
781166774Spjd
782189539Smarcusint
783189539Smarcusvop_stdvptocnp(struct vop_vptocnp_args *ap)
784189539Smarcus{
785189539Smarcus	struct vnode *vp = ap->a_vp;
786189539Smarcus	struct vnode **dvp = ap->a_vpp;
787194601Skib	struct ucred *cred = ap->a_cred;
788189539Smarcus	char *buf = ap->a_buf;
789189539Smarcus	int *buflen = ap->a_buflen;
790189539Smarcus	char *dirbuf, *cpos;
791189539Smarcus	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
792189539Smarcus	off_t off;
793189539Smarcus	ino_t fileno;
794189539Smarcus	struct vattr va;
795189539Smarcus	struct nameidata nd;
796189539Smarcus	struct thread *td;
797189539Smarcus	struct dirent *dp;
798189539Smarcus	struct vnode *mvp;
799189539Smarcus
800189539Smarcus	i = *buflen;
801189539Smarcus	error = 0;
802189539Smarcus	covered = 0;
803189539Smarcus	td = curthread;
804189539Smarcus
805189539Smarcus	if (vp->v_type != VDIR)
806189539Smarcus		return (ENOENT);
807189539Smarcus
808194601Skib	error = VOP_GETATTR(vp, &va, cred);
809189539Smarcus	if (error)
810189539Smarcus		return (error);
811189539Smarcus
812189539Smarcus	VREF(vp);
813189539Smarcus	locked = VOP_ISLOCKED(vp);
814189539Smarcus	VOP_UNLOCK(vp, 0);
815285135Smjg	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
816189539Smarcus	    "..", vp, td);
817189539Smarcus	flags = FREAD;
818194601Skib	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
819189539Smarcus	if (error) {
820189539Smarcus		vn_lock(vp, locked | LK_RETRY);
821189539Smarcus		return (error);
822189539Smarcus	}
823189539Smarcus	NDFREE(&nd, NDF_ONLY_PNBUF);
824189539Smarcus
825189539Smarcus	mvp = *dvp = nd.ni_vp;
826189539Smarcus
827189539Smarcus	if (vp->v_mount != (*dvp)->v_mount &&
828189539Smarcus	    ((*dvp)->v_vflag & VV_ROOT) &&
829189539Smarcus	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
830189539Smarcus		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
831189539Smarcus		VREF(mvp);
832189539Smarcus		VOP_UNLOCK(mvp, 0);
833194601Skib		vn_close(mvp, FREAD, cred, td);
834189539Smarcus		VREF(*dvp);
835285135Smjg		vn_lock(*dvp, LK_SHARED | LK_RETRY);
836189539Smarcus		covered = 1;
837189539Smarcus	}
838189539Smarcus
839189539Smarcus	fileno = va.va_fileid;
840189539Smarcus
841189539Smarcus	dirbuflen = DEV_BSIZE;
842189539Smarcus	if (dirbuflen < va.va_blocksize)
843189539Smarcus		dirbuflen = va.va_blocksize;
844189539Smarcus	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
845189539Smarcus
846189539Smarcus	if ((*dvp)->v_type != VDIR) {
847189539Smarcus		error = ENOENT;
848189539Smarcus		goto out;
849189539Smarcus	}
850189539Smarcus
851189539Smarcus	off = 0;
852189539Smarcus	len = 0;
853189539Smarcus	do {
854189539Smarcus		/* call VOP_READDIR of parent */
855189539Smarcus		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
856189539Smarcus					&cpos, &len, &eofflag, td);
857189539Smarcus		if (error)
858189539Smarcus			goto out;
859189539Smarcus
860189539Smarcus		if ((dp->d_type != DT_WHT) &&
861189539Smarcus		    (dp->d_fileno == fileno)) {
862189539Smarcus			if (covered) {
863189539Smarcus				VOP_UNLOCK(*dvp, 0);
864285135Smjg				vn_lock(mvp, LK_SHARED | LK_RETRY);
865189539Smarcus				if (dirent_exists(mvp, dp->d_name, td)) {
866189539Smarcus					error = ENOENT;
867189539Smarcus					VOP_UNLOCK(mvp, 0);
868285135Smjg					vn_lock(*dvp, LK_SHARED | LK_RETRY);
869189539Smarcus					goto out;
870189539Smarcus				}
871189539Smarcus				VOP_UNLOCK(mvp, 0);
872285135Smjg				vn_lock(*dvp, LK_SHARED | LK_RETRY);
873189539Smarcus			}
874189539Smarcus			i -= dp->d_namlen;
875189539Smarcus
876189539Smarcus			if (i < 0) {
877189539Smarcus				error = ENOMEM;
878189539Smarcus				goto out;
879189539Smarcus			}
880247560Skib			if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
881247560Skib				error = ENOENT;
882247560Skib			} else {
883247560Skib				bcopy(dp->d_name, buf + i, dp->d_namlen);
884247560Skib				error = 0;
885247560Skib			}
886189539Smarcus			goto out;
887189539Smarcus		}
888189539Smarcus	} while (len > 0 || !eofflag);
889189539Smarcus	error = ENOENT;
890189539Smarcus
891189539Smarcusout:
892189539Smarcus	free(dirbuf, M_TEMP);
893189539Smarcus	if (!error) {
894189539Smarcus		*buflen = i;
895227697Skib		vref(*dvp);
896189539Smarcus	}
897189539Smarcus	if (covered) {
898189539Smarcus		vput(*dvp);
899189539Smarcus		vrele(mvp);
900189539Smarcus	} else {
901189539Smarcus		VOP_UNLOCK(mvp, 0);
902194601Skib		vn_close(mvp, FREAD, cred, td);
903189539Smarcus	}
904189539Smarcus	vn_lock(vp, locked | LK_RETRY);
905189539Smarcus	return (error);
906189539Smarcus}
907189539Smarcus
908220791Smdfint
909220791Smdfvop_stdallocate(struct vop_allocate_args *ap)
910220791Smdf{
911220791Smdf#ifdef __notyet__
912220791Smdf	struct statfs sfs;
913220791Smdf#endif
914220791Smdf	struct iovec aiov;
915220791Smdf	struct vattr vattr, *vap;
916220791Smdf	struct uio auio;
917220846Smdf	off_t fsize, len, cur, offset;
918220791Smdf	uint8_t *buf;
919220791Smdf	struct thread *td;
920220791Smdf	struct vnode *vp;
921220791Smdf	size_t iosize;
922220846Smdf	int error;
923220791Smdf
924220791Smdf	buf = NULL;
925220791Smdf	error = 0;
926220791Smdf	td = curthread;
927220791Smdf	vap = &vattr;
928220791Smdf	vp = ap->a_vp;
929220846Smdf	len = *ap->a_len;
930220846Smdf	offset = *ap->a_offset;
931220791Smdf
932220791Smdf	error = VOP_GETATTR(vp, vap, td->td_ucred);
933220791Smdf	if (error != 0)
934220791Smdf		goto out;
935220846Smdf	fsize = vap->va_size;
936220791Smdf	iosize = vap->va_blocksize;
937220791Smdf	if (iosize == 0)
938220791Smdf		iosize = BLKDEV_IOSIZE;
939220791Smdf	if (iosize > MAXPHYS)
940220791Smdf		iosize = MAXPHYS;
941220791Smdf	buf = malloc(iosize, M_TEMP, M_WAITOK);
942220791Smdf
943220791Smdf#ifdef __notyet__
944220791Smdf	/*
945220791Smdf	 * Check if the filesystem sets f_maxfilesize; if not use
946220791Smdf	 * VOP_SETATTR to perform the check.
947220791Smdf	 */
948220791Smdf	error = VFS_STATFS(vp->v_mount, &sfs, td);
949220791Smdf	if (error != 0)
950220791Smdf		goto out;
951220791Smdf	if (sfs.f_maxfilesize) {
952220791Smdf		if (offset > sfs.f_maxfilesize || len > sfs.f_maxfilesize ||
953220791Smdf		    offset + len > sfs.f_maxfilesize) {
954220791Smdf			error = EFBIG;
955220791Smdf			goto out;
956220791Smdf		}
957220791Smdf	} else
958220791Smdf#endif
959220791Smdf	if (offset + len > vap->va_size) {
960220846Smdf		/*
961220846Smdf		 * Test offset + len against the filesystem's maxfilesize.
962220846Smdf		 */
963220791Smdf		VATTR_NULL(vap);
964220791Smdf		vap->va_size = offset + len;
965220791Smdf		error = VOP_SETATTR(vp, vap, td->td_ucred);
966220791Smdf		if (error != 0)
967220791Smdf			goto out;
968220846Smdf		VATTR_NULL(vap);
969220846Smdf		vap->va_size = fsize;
970220846Smdf		error = VOP_SETATTR(vp, vap, td->td_ucred);
971220846Smdf		if (error != 0)
972220846Smdf			goto out;
973220791Smdf	}
974220791Smdf
975220846Smdf	for (;;) {
976220791Smdf		/*
977220791Smdf		 * Read and write back anything below the nominal file
978220791Smdf		 * size.  There's currently no way outside the filesystem
979220791Smdf		 * to know whether this area is sparse or not.
980220791Smdf		 */
981220791Smdf		cur = iosize;
982220791Smdf		if ((offset % iosize) != 0)
983220791Smdf			cur -= (offset % iosize);
984220791Smdf		if (cur > len)
985220791Smdf			cur = len;
986220846Smdf		if (offset < fsize) {
987220791Smdf			aiov.iov_base = buf;
988220791Smdf			aiov.iov_len = cur;
989220791Smdf			auio.uio_iov = &aiov;
990220791Smdf			auio.uio_iovcnt = 1;
991220791Smdf			auio.uio_offset = offset;
992220791Smdf			auio.uio_resid = cur;
993220791Smdf			auio.uio_segflg = UIO_SYSSPACE;
994220791Smdf			auio.uio_rw = UIO_READ;
995220791Smdf			auio.uio_td = td;
996220791Smdf			error = VOP_READ(vp, &auio, 0, td->td_ucred);
997220791Smdf			if (error != 0)
998220791Smdf				break;
999220791Smdf			if (auio.uio_resid > 0) {
1000220791Smdf				bzero(buf + cur - auio.uio_resid,
1001220791Smdf				    auio.uio_resid);
1002220791Smdf			}
1003220791Smdf		} else {
1004220791Smdf			bzero(buf, cur);
1005220791Smdf		}
1006220791Smdf
1007220791Smdf		aiov.iov_base = buf;
1008220791Smdf		aiov.iov_len = cur;
1009220791Smdf		auio.uio_iov = &aiov;
1010220791Smdf		auio.uio_iovcnt = 1;
1011220791Smdf		auio.uio_offset = offset;
1012220791Smdf		auio.uio_resid = cur;
1013220791Smdf		auio.uio_segflg = UIO_SYSSPACE;
1014220791Smdf		auio.uio_rw = UIO_WRITE;
1015220791Smdf		auio.uio_td = td;
1016220791Smdf
1017220791Smdf		error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
1018220791Smdf		if (error != 0)
1019220791Smdf			break;
1020220791Smdf
1021220791Smdf		len -= cur;
1022220791Smdf		offset += cur;
1023220846Smdf		if (len == 0)
1024220846Smdf			break;
1025220846Smdf		if (should_yield())
1026220846Smdf			break;
1027220791Smdf	}
1028220791Smdf
1029220791Smdf out:
1030220846Smdf	*ap->a_len = len;
1031220846Smdf	*ap->a_offset = offset;
1032220791Smdf	free(buf, M_TEMP);
1033220791Smdf	return (error);
1034220791Smdf}
1035220791Smdf
1036227070Sjhbint
1037227070Sjhbvop_stdadvise(struct vop_advise_args *ap)
1038227070Sjhb{
1039227070Sjhb	struct vnode *vp;
1040292326Skib	struct bufobj *bo;
1041292326Skib	daddr_t startn, endn;
1042227070Sjhb	off_t start, end;
1043288431Smarkj	int bsize, error;
1044227070Sjhb
1045227070Sjhb	vp = ap->a_vp;
1046227070Sjhb	switch (ap->a_advice) {
1047227070Sjhb	case POSIX_FADV_WILLNEED:
1048227070Sjhb		/*
1049227070Sjhb		 * Do nothing for now.  Filesystems should provide a
1050227070Sjhb		 * custom method which starts an asynchronous read of
1051227070Sjhb		 * the requested region.
1052227070Sjhb		 */
1053227070Sjhb		error = 0;
1054227070Sjhb		break;
1055227070Sjhb	case POSIX_FADV_DONTNEED:
1056227070Sjhb		error = 0;
1057227070Sjhb		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1058227070Sjhb		if (vp->v_iflag & VI_DOOMED) {
1059227070Sjhb			VOP_UNLOCK(vp, 0);
1060227070Sjhb			break;
1061227070Sjhb		}
1062288431Smarkj
1063288431Smarkj		/*
1064288431Smarkj		 * Deactivate pages in the specified range from the backing VM
1065288431Smarkj		 * object.  Pages that are resident in the buffer cache will
1066288431Smarkj		 * remain wired until their corresponding buffers are released
1067288431Smarkj		 * below.
1068288431Smarkj		 */
1069227070Sjhb		if (vp->v_object != NULL) {
1070227070Sjhb			start = trunc_page(ap->a_start);
1071227070Sjhb			end = round_page(ap->a_end);
1072248084Sattilio			VM_OBJECT_WLOCK(vp->v_object);
1073288431Smarkj			vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
1074227070Sjhb			    OFF_TO_IDX(end));
1075248084Sattilio			VM_OBJECT_WUNLOCK(vp->v_object);
1076227070Sjhb		}
1077288431Smarkj
1078292326Skib		bo = &vp->v_bufobj;
1079292326Skib		BO_RLOCK(bo);
1080288431Smarkj		bsize = vp->v_bufobj.bo_bsize;
1081288431Smarkj		startn = ap->a_start / bsize;
1082292326Skib		endn = ap->a_end / bsize;
1083292326Skib		for (;;) {
1084292326Skib			error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
1085292326Skib			if (error == EAGAIN)
1086288431Smarkj				continue;
1087292326Skib			error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
1088292326Skib			if (error == EAGAIN)
1089292326Skib				continue;
1090292326Skib			break;
1091288431Smarkj		}
1092292326Skib		BO_RUNLOCK(bo);
1093227070Sjhb		VOP_UNLOCK(vp, 0);
1094227070Sjhb		break;
1095227070Sjhb	default:
1096227070Sjhb		error = EINVAL;
1097227070Sjhb		break;
1098227070Sjhb	}
1099227070Sjhb	return (error);
1100227070Sjhb}
1101227070Sjhb
1102232317Strocinyint
1103232317Strocinyvop_stdunp_bind(struct vop_unp_bind_args *ap)
1104232317Strociny{
1105232317Strociny
1106232317Strociny	ap->a_vp->v_socket = ap->a_socket;
1107232317Strociny	return (0);
1108232317Strociny}
1109232317Strociny
1110232317Strocinyint
1111232317Strocinyvop_stdunp_connect(struct vop_unp_connect_args *ap)
1112232317Strociny{
1113232317Strociny
1114232317Strociny	*ap->a_socket = ap->a_vp->v_socket;
1115232317Strociny	return (0);
1116232317Strociny}
1117232317Strociny
1118232317Strocinyint
1119232317Strocinyvop_stdunp_detach(struct vop_unp_detach_args *ap)
1120232317Strociny{
1121232317Strociny
1122232317Strociny	ap->a_vp->v_socket = NULL;
1123232317Strociny	return (0);
1124232317Strociny}
1125232317Strociny
1126241025Skibstatic int
1127241025Skibvop_stdis_text(struct vop_is_text_args *ap)
1128241025Skib{
1129241025Skib
1130241025Skib	return ((ap->a_vp->v_vflag & VV_TEXT) != 0);
1131241025Skib}
1132241025Skib
1133241025Skibstatic int
1134241025Skibvop_stdset_text(struct vop_set_text_args *ap)
1135241025Skib{
1136241025Skib
1137241025Skib	ap->a_vp->v_vflag |= VV_TEXT;
1138241025Skib	return (0);
1139241025Skib}
1140241025Skib
1141241025Skibstatic int
1142241025Skibvop_stdunset_text(struct vop_unset_text_args *ap)
1143241025Skib{
1144241025Skib
1145241025Skib	ap->a_vp->v_vflag &= ~VV_TEXT;
1146241025Skib	return (0);
1147241025Skib}
1148241025Skib
1149242476Skibstatic int
1150242476Skibvop_stdget_writecount(struct vop_get_writecount_args *ap)
1151242476Skib{
1152242476Skib
1153242476Skib	*ap->a_writecount = ap->a_vp->v_writecount;
1154242476Skib	return (0);
1155242476Skib}
1156242476Skib
1157242476Skibstatic int
1158242476Skibvop_stdadd_writecount(struct vop_add_writecount_args *ap)
1159242476Skib{
1160242476Skib
1161242476Skib	ap->a_vp->v_writecount += ap->a_inc;
1162242476Skib	return (0);
1163242476Skib}
1164242476Skib
1165112067Skan/*
116651068Salfred * vfs default ops
116791690Seivind * used to fill the vfs function table to get reasonable default return values.
116851068Salfred */
116991690Seivindint
1170191990Sattiliovfs_stdroot (mp, flags, vpp)
117151068Salfred	struct mount *mp;
1172144054Sjeff	int flags;
117351068Salfred	struct vnode **vpp;
117451068Salfred{
1175131734Salfred
117651068Salfred	return (EOPNOTSUPP);
117751068Salfred}
117851068Salfred
117991690Seivindint
1180191990Sattiliovfs_stdstatfs (mp, sbp)
118151068Salfred	struct mount *mp;
118251068Salfred	struct statfs *sbp;
118351068Salfred{
1184131734Salfred
118551068Salfred	return (EOPNOTSUPP);
118651068Salfred}
118751068Salfred
118851068Salfredint
1189191990Sattiliovfs_stdquotactl (mp, cmds, uid, arg)
119051068Salfred	struct mount *mp;
119151068Salfred	int cmds;
119251068Salfred	uid_t uid;
1193153400Sdes	void *arg;
119451068Salfred{
1195131734Salfred
119651068Salfred	return (EOPNOTSUPP);
119751068Salfred}
119851068Salfred
1199112067Skanint
1200191990Sattiliovfs_stdsync(mp, waitfor)
120151068Salfred	struct mount *mp;
120251068Salfred	int waitfor;
120351068Salfred{
1204154152Stegge	struct vnode *vp, *mvp;
1205191990Sattilio	struct thread *td;
1206112119Skan	int error, lockreq, allerror = 0;
1207112119Skan
1208191990Sattilio	td = curthread;
1209112119Skan	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1210112119Skan	if (waitfor != MNT_WAIT)
1211112119Skan		lockreq |= LK_NOWAIT;
1212112119Skan	/*
1213112119Skan	 * Force stale buffer cache information to be flushed.
1214112119Skan	 */
1215112119Skanloop:
1216234386Smckusick	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1217234386Smckusick		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1218234386Smckusick			VI_UNLOCK(vp);
1219177493Sjeff			continue;
1220234386Smckusick		}
1221112119Skan		if ((error = vget(vp, lockreq, td)) != 0) {
1222154152Stegge			if (error == ENOENT) {
1223234386Smckusick				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1224112119Skan				goto loop;
1225154152Stegge			}
1226112119Skan			continue;
1227112119Skan		}
1228140048Sphk		error = VOP_FSYNC(vp, waitfor, td);
1229112119Skan		if (error)
1230112119Skan			allerror = error;
1231204065Spjd		vput(vp);
1232112119Skan	}
1233112119Skan	return (allerror);
1234112119Skan}
1235112119Skan
1236112119Skanint
1237191990Sattiliovfs_stdnosync (mp, waitfor)
1238112119Skan	struct mount *mp;
1239112119Skan	int waitfor;
1240112119Skan{
1241131734Salfred
124251068Salfred	return (0);
124351068Salfred}
124451068Salfred
1245112067Skanint
124692462Smckusickvfs_stdvget (mp, ino, flags, vpp)
124751068Salfred	struct mount *mp;
124851068Salfred	ino_t ino;
124992462Smckusick	int flags;
125051068Salfred	struct vnode **vpp;
125151068Salfred{
1252131734Salfred
125351068Salfred	return (EOPNOTSUPP);
125451068Salfred}
125551068Salfred
1256112067Skanint
1257222167Srmacklemvfs_stdfhtovp (mp, fhp, flags, vpp)
125851068Salfred	struct mount *mp;
125951068Salfred	struct fid *fhp;
1260222167Srmacklem	int flags;
126151138Salfred	struct vnode **vpp;
126251138Salfred{
1263131734Salfred
126451138Salfred	return (EOPNOTSUPP);
126551138Salfred}
126651138Salfred
126751068Salfredint
1268112067Skanvfs_stdinit (vfsp)
126951068Salfred	struct vfsconf *vfsp;
127051068Salfred{
1271131734Salfred
127251068Salfred	return (0);
127351068Salfred}
127451068Salfred
127551068Salfredint
127651068Salfredvfs_stduninit (vfsp)
127751068Salfred	struct vfsconf *vfsp;
127851068Salfred{
1279131734Salfred
128051068Salfred	return(0);
128151068Salfred}
128251068Salfred
128354803Srwatsonint
1284191990Sattiliovfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
128554803Srwatson	struct mount *mp;
128654803Srwatson	int cmd;
128774273Srwatson	struct vnode *filename_vp;
128874437Srwatson	int attrnamespace;
128956272Srwatson	const char *attrname;
129054803Srwatson{
1291131734Salfred
1292101786Sphk	if (filename_vp != NULL)
1293175294Sattilio		VOP_UNLOCK(filename_vp, 0);
1294131734Salfred	return (EOPNOTSUPP);
129554803Srwatson}
129654803Srwatson
1297131733Salfredint
1298131733Salfredvfs_stdsysctl(mp, op, req)
1299131733Salfred	struct mount *mp;
1300131733Salfred	fsctlop_t op;
1301131733Salfred	struct sysctl_req *req;
1302131733Salfred{
1303131733Salfred
1304131733Salfred	return (EOPNOTSUPP);
1305131733Salfred}
1306131733Salfred
130751068Salfred/* end of vfs default ops */
1308