vfs_default.c revision 315475
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/11/sys/kern/vfs_default.c 315475 2017-03-18 05:53:09Z alc $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/conf.h>
43#include <sys/event.h>
44#include <sys/kernel.h>
45#include <sys/limits.h>
46#include <sys/lock.h>
47#include <sys/lockf.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/namei.h>
51#include <sys/rwlock.h>
52#include <sys/fcntl.h>
53#include <sys/unistd.h>
54#include <sys/vnode.h>
55#include <sys/dirent.h>
56#include <sys/poll.h>
57
58#include <security/mac/mac_framework.h>
59
60#include <vm/vm.h>
61#include <vm/vm_object.h>
62#include <vm/vm_extern.h>
63#include <vm/pmap.h>
64#include <vm/vm_map.h>
65#include <vm/vm_page.h>
66#include <vm/vm_pager.h>
67#include <vm/vnode_pager.h>
68
69static int	vop_nolookup(struct vop_lookup_args *);
70static int	vop_norename(struct vop_rename_args *);
71static int	vop_nostrategy(struct vop_strategy_args *);
72static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
73				char *dirbuf, int dirbuflen, off_t *off,
74				char **cpos, int *len, int *eofflag,
75				struct thread *td);
76static int	dirent_exists(struct vnode *vp, const char *dirname,
77			      struct thread *td);
78
79#define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
80
81static int vop_stdis_text(struct vop_is_text_args *ap);
82static int vop_stdset_text(struct vop_set_text_args *ap);
83static int vop_stdunset_text(struct vop_unset_text_args *ap);
84static int vop_stdget_writecount(struct vop_get_writecount_args *ap);
85static int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
86static int vop_stdfdatasync(struct vop_fdatasync_args *ap);
87static int vop_stdgetpages_async(struct vop_getpages_async_args *ap);
88
89/*
90 * This vnode table stores what we want to do if the filesystem doesn't
91 * implement a particular VOP.
92 *
93 * If there is no specific entry here, we will return EOPNOTSUPP.
94 *
95 * Note that every filesystem has to implement either vop_access
96 * or vop_accessx; failing to do so will result in immediate crash
97 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
98 * which calls vop_stdaccess() etc.
99 */
100
101struct vop_vector default_vnodeops = {
102	.vop_default =		NULL,
103	.vop_bypass =		VOP_EOPNOTSUPP,
104
105	.vop_access =		vop_stdaccess,
106	.vop_accessx =		vop_stdaccessx,
107	.vop_advise =		vop_stdadvise,
108	.vop_advlock =		vop_stdadvlock,
109	.vop_advlockasync =	vop_stdadvlockasync,
110	.vop_advlockpurge =	vop_stdadvlockpurge,
111	.vop_allocate =		vop_stdallocate,
112	.vop_bmap =		vop_stdbmap,
113	.vop_close =		VOP_NULL,
114	.vop_fsync =		VOP_NULL,
115	.vop_fdatasync =	vop_stdfdatasync,
116	.vop_getpages =		vop_stdgetpages,
117	.vop_getpages_async =	vop_stdgetpages_async,
118	.vop_getwritemount = 	vop_stdgetwritemount,
119	.vop_inactive =		VOP_NULL,
120	.vop_ioctl =		VOP_ENOTTY,
121	.vop_kqfilter =		vop_stdkqfilter,
122	.vop_islocked =		vop_stdislocked,
123	.vop_lock1 =		vop_stdlock,
124	.vop_lookup =		vop_nolookup,
125	.vop_open =		VOP_NULL,
126	.vop_pathconf =		VOP_EINVAL,
127	.vop_poll =		vop_nopoll,
128	.vop_putpages =		vop_stdputpages,
129	.vop_readlink =		VOP_EINVAL,
130	.vop_rename =		vop_norename,
131	.vop_revoke =		VOP_PANIC,
132	.vop_strategy =		vop_nostrategy,
133	.vop_unlock =		vop_stdunlock,
134	.vop_vptocnp =		vop_stdvptocnp,
135	.vop_vptofh =		vop_stdvptofh,
136	.vop_unp_bind =		vop_stdunp_bind,
137	.vop_unp_connect =	vop_stdunp_connect,
138	.vop_unp_detach =	vop_stdunp_detach,
139	.vop_is_text =		vop_stdis_text,
140	.vop_set_text =		vop_stdset_text,
141	.vop_unset_text =	vop_stdunset_text,
142	.vop_get_writecount =	vop_stdget_writecount,
143	.vop_add_writecount =	vop_stdadd_writecount,
144};
145
146/*
147 * Series of placeholder functions for various error returns for
148 * VOPs.
149 */
150
151int
152vop_eopnotsupp(struct vop_generic_args *ap)
153{
154	/*
155	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
156	*/
157
158	return (EOPNOTSUPP);
159}
160
161int
162vop_ebadf(struct vop_generic_args *ap)
163{
164
165	return (EBADF);
166}
167
168int
169vop_enotty(struct vop_generic_args *ap)
170{
171
172	return (ENOTTY);
173}
174
175int
176vop_einval(struct vop_generic_args *ap)
177{
178
179	return (EINVAL);
180}
181
182int
183vop_enoent(struct vop_generic_args *ap)
184{
185
186	return (ENOENT);
187}
188
189int
190vop_null(struct vop_generic_args *ap)
191{
192
193	return (0);
194}
195
196/*
197 * Helper function to panic on some bad VOPs in some filesystems.
198 */
199int
200vop_panic(struct vop_generic_args *ap)
201{
202
203	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
204}
205
206/*
207 * vop_std<something> and vop_no<something> are default functions for use by
208 * filesystems that need the "default reasonable" implementation for a
209 * particular operation.
210 *
211 * The documentation for the operations they implement exists (if it exists)
212 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
213 */
214
215/*
216 * Default vop for filesystems that do not support name lookup
217 */
218static int
219vop_nolookup(ap)
220	struct vop_lookup_args /* {
221		struct vnode *a_dvp;
222		struct vnode **a_vpp;
223		struct componentname *a_cnp;
224	} */ *ap;
225{
226
227	*ap->a_vpp = NULL;
228	return (ENOTDIR);
229}
230
231/*
232 * vop_norename:
233 *
234 * Handle unlock and reference counting for arguments of vop_rename
235 * for filesystems that do not implement rename operation.
236 */
237static int
238vop_norename(struct vop_rename_args *ap)
239{
240
241	vop_rename_fail(ap);
242	return (EOPNOTSUPP);
243}
244
245/*
246 *	vop_nostrategy:
247 *
248 *	Strategy routine for VFS devices that have none.
249 *
250 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
251 *	routine.  Typically this is done for a BIO_READ strategy call.
252 *	Typically B_INVAL is assumed to already be clear prior to a write
253 *	and should not be cleared manually unless you just made the buffer
254 *	invalid.  BIO_ERROR should be cleared either way.
255 */
256
257static int
258vop_nostrategy (struct vop_strategy_args *ap)
259{
260	printf("No strategy for buffer at %p\n", ap->a_bp);
261	vn_printf(ap->a_vp, "vnode ");
262	ap->a_bp->b_ioflags |= BIO_ERROR;
263	ap->a_bp->b_error = EOPNOTSUPP;
264	bufdone(ap->a_bp);
265	return (EOPNOTSUPP);
266}
267
268static int
269get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
270		int dirbuflen, off_t *off, char **cpos, int *len,
271		int *eofflag, struct thread *td)
272{
273	int error, reclen;
274	struct uio uio;
275	struct iovec iov;
276	struct dirent *dp;
277
278	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
279	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
280
281	if (*len == 0) {
282		iov.iov_base = dirbuf;
283		iov.iov_len = dirbuflen;
284
285		uio.uio_iov = &iov;
286		uio.uio_iovcnt = 1;
287		uio.uio_offset = *off;
288		uio.uio_resid = dirbuflen;
289		uio.uio_segflg = UIO_SYSSPACE;
290		uio.uio_rw = UIO_READ;
291		uio.uio_td = td;
292
293		*eofflag = 0;
294
295#ifdef MAC
296		error = mac_vnode_check_readdir(td->td_ucred, vp);
297		if (error == 0)
298#endif
299			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
300		    		NULL, NULL);
301		if (error)
302			return (error);
303
304		*off = uio.uio_offset;
305
306		*cpos = dirbuf;
307		*len = (dirbuflen - uio.uio_resid);
308
309		if (*len == 0)
310			return (ENOENT);
311	}
312
313	dp = (struct dirent *)(*cpos);
314	reclen = dp->d_reclen;
315	*dpp = dp;
316
317	/* check for malformed directory.. */
318	if (reclen < DIRENT_MINSIZE)
319		return (EINVAL);
320
321	*cpos += reclen;
322	*len -= reclen;
323
324	return (0);
325}
326
327/*
328 * Check if a named file exists in a given directory vnode.
329 */
330static int
331dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
332{
333	char *dirbuf, *cpos;
334	int error, eofflag, dirbuflen, len, found;
335	off_t off;
336	struct dirent *dp;
337	struct vattr va;
338
339	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
340	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
341
342	found = 0;
343
344	error = VOP_GETATTR(vp, &va, td->td_ucred);
345	if (error)
346		return (found);
347
348	dirbuflen = DEV_BSIZE;
349	if (dirbuflen < va.va_blocksize)
350		dirbuflen = va.va_blocksize;
351	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
352
353	off = 0;
354	len = 0;
355	do {
356		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
357					&cpos, &len, &eofflag, td);
358		if (error)
359			goto out;
360
361		if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
362		    strcmp(dp->d_name, dirname) == 0) {
363			found = 1;
364			goto out;
365		}
366	} while (len > 0 || !eofflag);
367
368out:
369	free(dirbuf, M_TEMP);
370	return (found);
371}
372
373int
374vop_stdaccess(struct vop_access_args *ap)
375{
376
377	KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
378	    VAPPEND)) == 0, ("invalid bit in accmode"));
379
380	return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
381}
382
383int
384vop_stdaccessx(struct vop_accessx_args *ap)
385{
386	int error;
387	accmode_t accmode = ap->a_accmode;
388
389	error = vfs_unixify_accmode(&accmode);
390	if (error != 0)
391		return (error);
392
393	if (accmode == 0)
394		return (0);
395
396	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
397}
398
399/*
400 * Advisory record locking support
401 */
402int
403vop_stdadvlock(struct vop_advlock_args *ap)
404{
405	struct vnode *vp;
406	struct vattr vattr;
407	int error;
408
409	vp = ap->a_vp;
410	if (ap->a_fl->l_whence == SEEK_END) {
411		/*
412		 * The NFSv4 server must avoid doing a vn_lock() here, since it
413		 * can deadlock the nfsd threads, due to a LOR.  Fortunately
414		 * the NFSv4 server always uses SEEK_SET and this code is
415		 * only required for the SEEK_END case.
416		 */
417		vn_lock(vp, LK_SHARED | LK_RETRY);
418		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
419		VOP_UNLOCK(vp, 0);
420		if (error)
421			return (error);
422	} else
423		vattr.va_size = 0;
424
425	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
426}
427
428int
429vop_stdadvlockasync(struct vop_advlockasync_args *ap)
430{
431	struct vnode *vp;
432	struct vattr vattr;
433	int error;
434
435	vp = ap->a_vp;
436	if (ap->a_fl->l_whence == SEEK_END) {
437		/* The size argument is only needed for SEEK_END. */
438		vn_lock(vp, LK_SHARED | LK_RETRY);
439		error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
440		VOP_UNLOCK(vp, 0);
441		if (error)
442			return (error);
443	} else
444		vattr.va_size = 0;
445
446	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
447}
448
449int
450vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
451{
452	struct vnode *vp;
453
454	vp = ap->a_vp;
455	lf_purgelocks(vp, &vp->v_lockf);
456	return (0);
457}
458
459/*
460 * vop_stdpathconf:
461 *
462 * Standard implementation of POSIX pathconf, to get information about limits
463 * for a filesystem.
464 * Override per filesystem for the case where the filesystem has smaller
465 * limits.
466 */
467int
468vop_stdpathconf(ap)
469	struct vop_pathconf_args /* {
470	struct vnode *a_vp;
471	int a_name;
472	int *a_retval;
473	} */ *ap;
474{
475
476	switch (ap->a_name) {
477		case _PC_ASYNC_IO:
478			*ap->a_retval = _POSIX_ASYNCHRONOUS_IO;
479			return (0);
480		case _PC_NAME_MAX:
481			*ap->a_retval = NAME_MAX;
482			return (0);
483		case _PC_PATH_MAX:
484			*ap->a_retval = PATH_MAX;
485			return (0);
486		case _PC_LINK_MAX:
487			*ap->a_retval = LINK_MAX;
488			return (0);
489		case _PC_MAX_CANON:
490			*ap->a_retval = MAX_CANON;
491			return (0);
492		case _PC_MAX_INPUT:
493			*ap->a_retval = MAX_INPUT;
494			return (0);
495		case _PC_PIPE_BUF:
496			*ap->a_retval = PIPE_BUF;
497			return (0);
498		case _PC_CHOWN_RESTRICTED:
499			*ap->a_retval = 1;
500			return (0);
501		case _PC_VDISABLE:
502			*ap->a_retval = _POSIX_VDISABLE;
503			return (0);
504		default:
505			return (EINVAL);
506	}
507	/* NOTREACHED */
508}
509
510/*
511 * Standard lock, unlock and islocked functions.
512 */
513int
514vop_stdlock(ap)
515	struct vop_lock1_args /* {
516		struct vnode *a_vp;
517		int a_flags;
518		char *file;
519		int line;
520	} */ *ap;
521{
522	struct vnode *vp = ap->a_vp;
523	struct mtx *ilk;
524
525	ilk = VI_MTX(vp);
526	return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags,
527	    (ilk != NULL) ? &ilk->lock_object : NULL, ap->a_file, ap->a_line));
528}
529
530/* See above. */
531int
532vop_stdunlock(ap)
533	struct vop_unlock_args /* {
534		struct vnode *a_vp;
535		int a_flags;
536	} */ *ap;
537{
538	struct vnode *vp = ap->a_vp;
539	struct mtx *ilk;
540
541	ilk = VI_MTX(vp);
542	return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags,
543	    (ilk != NULL) ? &ilk->lock_object : NULL));
544}
545
546/* See above. */
547int
548vop_stdislocked(ap)
549	struct vop_islocked_args /* {
550		struct vnode *a_vp;
551	} */ *ap;
552{
553
554	return (lockstatus(ap->a_vp->v_vnlock));
555}
556
557/*
558 * Return true for select/poll.
559 */
560int
561vop_nopoll(ap)
562	struct vop_poll_args /* {
563		struct vnode *a_vp;
564		int  a_events;
565		struct ucred *a_cred;
566		struct thread *a_td;
567	} */ *ap;
568{
569
570	return (poll_no_poll(ap->a_events));
571}
572
573/*
574 * Implement poll for local filesystems that support it.
575 */
576int
577vop_stdpoll(ap)
578	struct vop_poll_args /* {
579		struct vnode *a_vp;
580		int  a_events;
581		struct ucred *a_cred;
582		struct thread *a_td;
583	} */ *ap;
584{
585	if (ap->a_events & ~POLLSTANDARD)
586		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
587	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
588}
589
590/*
591 * Return our mount point, as we will take charge of the writes.
592 */
593int
594vop_stdgetwritemount(ap)
595	struct vop_getwritemount_args /* {
596		struct vnode *a_vp;
597		struct mount **a_mpp;
598	} */ *ap;
599{
600	struct mount *mp;
601
602	/*
603	 * XXX Since this is called unlocked we may be recycled while
604	 * attempting to ref the mount.  If this is the case or mountpoint
605	 * will be set to NULL.  We only have to prevent this call from
606	 * returning with a ref to an incorrect mountpoint.  It is not
607	 * harmful to return with a ref to our previous mountpoint.
608	 */
609	mp = ap->a_vp->v_mount;
610	if (mp != NULL) {
611		vfs_ref(mp);
612		if (mp != ap->a_vp->v_mount) {
613			vfs_rel(mp);
614			mp = NULL;
615		}
616	}
617	*(ap->a_mpp) = mp;
618	return (0);
619}
620
621/* XXX Needs good comment and VOP_BMAP(9) manpage */
622int
623vop_stdbmap(ap)
624	struct vop_bmap_args /* {
625		struct vnode *a_vp;
626		daddr_t  a_bn;
627		struct bufobj **a_bop;
628		daddr_t *a_bnp;
629		int *a_runp;
630		int *a_runb;
631	} */ *ap;
632{
633
634	if (ap->a_bop != NULL)
635		*ap->a_bop = &ap->a_vp->v_bufobj;
636	if (ap->a_bnp != NULL)
637		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
638	if (ap->a_runp != NULL)
639		*ap->a_runp = 0;
640	if (ap->a_runb != NULL)
641		*ap->a_runb = 0;
642	return (0);
643}
644
645int
646vop_stdfsync(ap)
647	struct vop_fsync_args /* {
648		struct vnode *a_vp;
649		int a_waitfor;
650		struct thread *a_td;
651	} */ *ap;
652{
653	struct vnode *vp = ap->a_vp;
654	struct buf *bp;
655	struct bufobj *bo;
656	struct buf *nbp;
657	int error = 0;
658	int maxretry = 1000;     /* large, arbitrarily chosen */
659
660	bo = &vp->v_bufobj;
661	BO_LOCK(bo);
662loop1:
663	/*
664	 * MARK/SCAN initialization to avoid infinite loops.
665	 */
666        TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
667                bp->b_vflags &= ~BV_SCANNED;
668		bp->b_error = 0;
669	}
670
671	/*
672	 * Flush all dirty buffers associated with a vnode.
673	 */
674loop2:
675	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
676		if ((bp->b_vflags & BV_SCANNED) != 0)
677			continue;
678		bp->b_vflags |= BV_SCANNED;
679		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
680			if (ap->a_waitfor != MNT_WAIT)
681				continue;
682			if (BUF_LOCK(bp,
683			    LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL,
684			    BO_LOCKPTR(bo)) != 0) {
685				BO_LOCK(bo);
686				goto loop1;
687			}
688			BO_LOCK(bo);
689		}
690		BO_UNLOCK(bo);
691		KASSERT(bp->b_bufobj == bo,
692		    ("bp %p wrong b_bufobj %p should be %p",
693		    bp, bp->b_bufobj, bo));
694		if ((bp->b_flags & B_DELWRI) == 0)
695			panic("fsync: not dirty");
696		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
697			vfs_bio_awrite(bp);
698		} else {
699			bremfree(bp);
700			bawrite(bp);
701		}
702		BO_LOCK(bo);
703		goto loop2;
704	}
705
706	/*
707	 * If synchronous the caller expects us to completely resolve all
708	 * dirty buffers in the system.  Wait for in-progress I/O to
709	 * complete (which could include background bitmap writes), then
710	 * retry if dirty blocks still exist.
711	 */
712	if (ap->a_waitfor == MNT_WAIT) {
713		bufobj_wwait(bo, 0, 0);
714		if (bo->bo_dirty.bv_cnt > 0) {
715			/*
716			 * If we are unable to write any of these buffers
717			 * then we fail now rather than trying endlessly
718			 * to write them out.
719			 */
720			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
721				if ((error = bp->b_error) == 0)
722					continue;
723			if (error == 0 && --maxretry >= 0)
724				goto loop1;
725			error = EAGAIN;
726		}
727	}
728	BO_UNLOCK(bo);
729	if (error == EAGAIN)
730		vn_printf(vp, "fsync: giving up on dirty ");
731
732	return (error);
733}
734
735static int
736vop_stdfdatasync(struct vop_fdatasync_args *ap)
737{
738
739	return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td));
740}
741
742int
743vop_stdfdatasync_buf(struct vop_fdatasync_args *ap)
744{
745	struct vop_fsync_args apf;
746
747	apf.a_vp = ap->a_vp;
748	apf.a_waitfor = MNT_WAIT;
749	apf.a_td = ap->a_td;
750	return (vop_stdfsync(&apf));
751}
752
753/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
754int
755vop_stdgetpages(ap)
756	struct vop_getpages_args /* {
757		struct vnode *a_vp;
758		vm_page_t *a_m;
759		int a_count;
760		int *a_rbehind;
761		int *a_rahead;
762	} */ *ap;
763{
764
765	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
766	    ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL);
767}
768
769static int
770vop_stdgetpages_async(struct vop_getpages_async_args *ap)
771{
772	int error;
773
774	error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
775	    ap->a_rahead);
776	ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
777	return (error);
778}
779
780int
781vop_stdkqfilter(struct vop_kqfilter_args *ap)
782{
783	return vfs_kqfilter(ap);
784}
785
786/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
787int
788vop_stdputpages(ap)
789	struct vop_putpages_args /* {
790		struct vnode *a_vp;
791		vm_page_t *a_m;
792		int a_count;
793		int a_sync;
794		int *a_rtvals;
795	} */ *ap;
796{
797
798	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
799	     ap->a_sync, ap->a_rtvals);
800}
801
802int
803vop_stdvptofh(struct vop_vptofh_args *ap)
804{
805	return (EOPNOTSUPP);
806}
807
808int
809vop_stdvptocnp(struct vop_vptocnp_args *ap)
810{
811	struct vnode *vp = ap->a_vp;
812	struct vnode **dvp = ap->a_vpp;
813	struct ucred *cred = ap->a_cred;
814	char *buf = ap->a_buf;
815	int *buflen = ap->a_buflen;
816	char *dirbuf, *cpos;
817	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
818	off_t off;
819	ino_t fileno;
820	struct vattr va;
821	struct nameidata nd;
822	struct thread *td;
823	struct dirent *dp;
824	struct vnode *mvp;
825
826	i = *buflen;
827	error = 0;
828	covered = 0;
829	td = curthread;
830
831	if (vp->v_type != VDIR)
832		return (ENOENT);
833
834	error = VOP_GETATTR(vp, &va, cred);
835	if (error)
836		return (error);
837
838	VREF(vp);
839	locked = VOP_ISLOCKED(vp);
840	VOP_UNLOCK(vp, 0);
841	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
842	    "..", vp, td);
843	flags = FREAD;
844	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
845	if (error) {
846		vn_lock(vp, locked | LK_RETRY);
847		return (error);
848	}
849	NDFREE(&nd, NDF_ONLY_PNBUF);
850
851	mvp = *dvp = nd.ni_vp;
852
853	if (vp->v_mount != (*dvp)->v_mount &&
854	    ((*dvp)->v_vflag & VV_ROOT) &&
855	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
856		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
857		VREF(mvp);
858		VOP_UNLOCK(mvp, 0);
859		vn_close(mvp, FREAD, cred, td);
860		VREF(*dvp);
861		vn_lock(*dvp, LK_SHARED | LK_RETRY);
862		covered = 1;
863	}
864
865	fileno = va.va_fileid;
866
867	dirbuflen = DEV_BSIZE;
868	if (dirbuflen < va.va_blocksize)
869		dirbuflen = va.va_blocksize;
870	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
871
872	if ((*dvp)->v_type != VDIR) {
873		error = ENOENT;
874		goto out;
875	}
876
877	off = 0;
878	len = 0;
879	do {
880		/* call VOP_READDIR of parent */
881		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
882					&cpos, &len, &eofflag, td);
883		if (error)
884			goto out;
885
886		if ((dp->d_type != DT_WHT) &&
887		    (dp->d_fileno == fileno)) {
888			if (covered) {
889				VOP_UNLOCK(*dvp, 0);
890				vn_lock(mvp, LK_SHARED | LK_RETRY);
891				if (dirent_exists(mvp, dp->d_name, td)) {
892					error = ENOENT;
893					VOP_UNLOCK(mvp, 0);
894					vn_lock(*dvp, LK_SHARED | LK_RETRY);
895					goto out;
896				}
897				VOP_UNLOCK(mvp, 0);
898				vn_lock(*dvp, LK_SHARED | LK_RETRY);
899			}
900			i -= dp->d_namlen;
901
902			if (i < 0) {
903				error = ENOMEM;
904				goto out;
905			}
906			if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
907				error = ENOENT;
908			} else {
909				bcopy(dp->d_name, buf + i, dp->d_namlen);
910				error = 0;
911			}
912			goto out;
913		}
914	} while (len > 0 || !eofflag);
915	error = ENOENT;
916
917out:
918	free(dirbuf, M_TEMP);
919	if (!error) {
920		*buflen = i;
921		vref(*dvp);
922	}
923	if (covered) {
924		vput(*dvp);
925		vrele(mvp);
926	} else {
927		VOP_UNLOCK(mvp, 0);
928		vn_close(mvp, FREAD, cred, td);
929	}
930	vn_lock(vp, locked | LK_RETRY);
931	return (error);
932}
933
934int
935vop_stdallocate(struct vop_allocate_args *ap)
936{
937#ifdef __notyet__
938	struct statfs *sfs;
939	off_t maxfilesize = 0;
940#endif
941	struct iovec aiov;
942	struct vattr vattr, *vap;
943	struct uio auio;
944	off_t fsize, len, cur, offset;
945	uint8_t *buf;
946	struct thread *td;
947	struct vnode *vp;
948	size_t iosize;
949	int error;
950
951	buf = NULL;
952	error = 0;
953	td = curthread;
954	vap = &vattr;
955	vp = ap->a_vp;
956	len = *ap->a_len;
957	offset = *ap->a_offset;
958
959	error = VOP_GETATTR(vp, vap, td->td_ucred);
960	if (error != 0)
961		goto out;
962	fsize = vap->va_size;
963	iosize = vap->va_blocksize;
964	if (iosize == 0)
965		iosize = BLKDEV_IOSIZE;
966	if (iosize > MAXPHYS)
967		iosize = MAXPHYS;
968	buf = malloc(iosize, M_TEMP, M_WAITOK);
969
970#ifdef __notyet__
971	/*
972	 * Check if the filesystem sets f_maxfilesize; if not use
973	 * VOP_SETATTR to perform the check.
974	 */
975	sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
976	error = VFS_STATFS(vp->v_mount, sfs, td);
977	if (error == 0)
978		maxfilesize = sfs->f_maxfilesize;
979	free(sfs, M_STATFS);
980	if (error != 0)
981		goto out;
982	if (maxfilesize) {
983		if (offset > maxfilesize || len > maxfilesize ||
984		    offset + len > maxfilesize) {
985			error = EFBIG;
986			goto out;
987		}
988	} else
989#endif
990	if (offset + len > vap->va_size) {
991		/*
992		 * Test offset + len against the filesystem's maxfilesize.
993		 */
994		VATTR_NULL(vap);
995		vap->va_size = offset + len;
996		error = VOP_SETATTR(vp, vap, td->td_ucred);
997		if (error != 0)
998			goto out;
999		VATTR_NULL(vap);
1000		vap->va_size = fsize;
1001		error = VOP_SETATTR(vp, vap, td->td_ucred);
1002		if (error != 0)
1003			goto out;
1004	}
1005
1006	for (;;) {
1007		/*
1008		 * Read and write back anything below the nominal file
1009		 * size.  There's currently no way outside the filesystem
1010		 * to know whether this area is sparse or not.
1011		 */
1012		cur = iosize;
1013		if ((offset % iosize) != 0)
1014			cur -= (offset % iosize);
1015		if (cur > len)
1016			cur = len;
1017		if (offset < fsize) {
1018			aiov.iov_base = buf;
1019			aiov.iov_len = cur;
1020			auio.uio_iov = &aiov;
1021			auio.uio_iovcnt = 1;
1022			auio.uio_offset = offset;
1023			auio.uio_resid = cur;
1024			auio.uio_segflg = UIO_SYSSPACE;
1025			auio.uio_rw = UIO_READ;
1026			auio.uio_td = td;
1027			error = VOP_READ(vp, &auio, 0, td->td_ucred);
1028			if (error != 0)
1029				break;
1030			if (auio.uio_resid > 0) {
1031				bzero(buf + cur - auio.uio_resid,
1032				    auio.uio_resid);
1033			}
1034		} else {
1035			bzero(buf, cur);
1036		}
1037
1038		aiov.iov_base = buf;
1039		aiov.iov_len = cur;
1040		auio.uio_iov = &aiov;
1041		auio.uio_iovcnt = 1;
1042		auio.uio_offset = offset;
1043		auio.uio_resid = cur;
1044		auio.uio_segflg = UIO_SYSSPACE;
1045		auio.uio_rw = UIO_WRITE;
1046		auio.uio_td = td;
1047
1048		error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
1049		if (error != 0)
1050			break;
1051
1052		len -= cur;
1053		offset += cur;
1054		if (len == 0)
1055			break;
1056		if (should_yield())
1057			break;
1058	}
1059
1060 out:
1061	*ap->a_len = len;
1062	*ap->a_offset = offset;
1063	free(buf, M_TEMP);
1064	return (error);
1065}
1066
1067int
1068vop_stdadvise(struct vop_advise_args *ap)
1069{
1070	struct vnode *vp;
1071	struct bufobj *bo;
1072	daddr_t startn, endn;
1073	off_t start, end;
1074	int bsize, error;
1075
1076	vp = ap->a_vp;
1077	switch (ap->a_advice) {
1078	case POSIX_FADV_WILLNEED:
1079		/*
1080		 * Do nothing for now.  Filesystems should provide a
1081		 * custom method which starts an asynchronous read of
1082		 * the requested region.
1083		 */
1084		error = 0;
1085		break;
1086	case POSIX_FADV_DONTNEED:
1087		error = 0;
1088		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1089		if (vp->v_iflag & VI_DOOMED) {
1090			VOP_UNLOCK(vp, 0);
1091			break;
1092		}
1093
1094		/*
1095		 * Deactivate pages in the specified range from the backing VM
1096		 * object.  Pages that are resident in the buffer cache will
1097		 * remain wired until their corresponding buffers are released
1098		 * below.
1099		 */
1100		if (vp->v_object != NULL) {
1101			start = trunc_page(ap->a_start);
1102			end = round_page(ap->a_end);
1103			VM_OBJECT_RLOCK(vp->v_object);
1104			vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
1105			    OFF_TO_IDX(end));
1106			VM_OBJECT_RUNLOCK(vp->v_object);
1107		}
1108
1109		bo = &vp->v_bufobj;
1110		BO_RLOCK(bo);
1111		bsize = vp->v_bufobj.bo_bsize;
1112		startn = ap->a_start / bsize;
1113		endn = ap->a_end / bsize;
1114		error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
1115		if (error == 0)
1116			error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
1117		BO_RUNLOCK(bo);
1118		VOP_UNLOCK(vp, 0);
1119		break;
1120	default:
1121		error = EINVAL;
1122		break;
1123	}
1124	return (error);
1125}
1126
1127int
1128vop_stdunp_bind(struct vop_unp_bind_args *ap)
1129{
1130
1131	ap->a_vp->v_socket = ap->a_socket;
1132	return (0);
1133}
1134
1135int
1136vop_stdunp_connect(struct vop_unp_connect_args *ap)
1137{
1138
1139	*ap->a_socket = ap->a_vp->v_socket;
1140	return (0);
1141}
1142
1143int
1144vop_stdunp_detach(struct vop_unp_detach_args *ap)
1145{
1146
1147	ap->a_vp->v_socket = NULL;
1148	return (0);
1149}
1150
1151static int
1152vop_stdis_text(struct vop_is_text_args *ap)
1153{
1154
1155	return ((ap->a_vp->v_vflag & VV_TEXT) != 0);
1156}
1157
1158static int
1159vop_stdset_text(struct vop_set_text_args *ap)
1160{
1161
1162	ap->a_vp->v_vflag |= VV_TEXT;
1163	return (0);
1164}
1165
1166static int
1167vop_stdunset_text(struct vop_unset_text_args *ap)
1168{
1169
1170	ap->a_vp->v_vflag &= ~VV_TEXT;
1171	return (0);
1172}
1173
1174static int
1175vop_stdget_writecount(struct vop_get_writecount_args *ap)
1176{
1177
1178	*ap->a_writecount = ap->a_vp->v_writecount;
1179	return (0);
1180}
1181
1182static int
1183vop_stdadd_writecount(struct vop_add_writecount_args *ap)
1184{
1185
1186	ap->a_vp->v_writecount += ap->a_inc;
1187	return (0);
1188}
1189
1190/*
1191 * vfs default ops
1192 * used to fill the vfs function table to get reasonable default return values.
1193 */
1194int
1195vfs_stdroot (mp, flags, vpp)
1196	struct mount *mp;
1197	int flags;
1198	struct vnode **vpp;
1199{
1200
1201	return (EOPNOTSUPP);
1202}
1203
1204int
1205vfs_stdstatfs (mp, sbp)
1206	struct mount *mp;
1207	struct statfs *sbp;
1208{
1209
1210	return (EOPNOTSUPP);
1211}
1212
1213int
1214vfs_stdquotactl (mp, cmds, uid, arg)
1215	struct mount *mp;
1216	int cmds;
1217	uid_t uid;
1218	void *arg;
1219{
1220
1221	return (EOPNOTSUPP);
1222}
1223
1224int
1225vfs_stdsync(mp, waitfor)
1226	struct mount *mp;
1227	int waitfor;
1228{
1229	struct vnode *vp, *mvp;
1230	struct thread *td;
1231	int error, lockreq, allerror = 0;
1232
1233	td = curthread;
1234	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1235	if (waitfor != MNT_WAIT)
1236		lockreq |= LK_NOWAIT;
1237	/*
1238	 * Force stale buffer cache information to be flushed.
1239	 */
1240loop:
1241	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1242		if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1243			VI_UNLOCK(vp);
1244			continue;
1245		}
1246		if ((error = vget(vp, lockreq, td)) != 0) {
1247			if (error == ENOENT) {
1248				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1249				goto loop;
1250			}
1251			continue;
1252		}
1253		error = VOP_FSYNC(vp, waitfor, td);
1254		if (error)
1255			allerror = error;
1256		vput(vp);
1257	}
1258	return (allerror);
1259}
1260
1261int
1262vfs_stdnosync (mp, waitfor)
1263	struct mount *mp;
1264	int waitfor;
1265{
1266
1267	return (0);
1268}
1269
1270int
1271vfs_stdvget (mp, ino, flags, vpp)
1272	struct mount *mp;
1273	ino_t ino;
1274	int flags;
1275	struct vnode **vpp;
1276{
1277
1278	return (EOPNOTSUPP);
1279}
1280
1281int
1282vfs_stdfhtovp (mp, fhp, flags, vpp)
1283	struct mount *mp;
1284	struct fid *fhp;
1285	int flags;
1286	struct vnode **vpp;
1287{
1288
1289	return (EOPNOTSUPP);
1290}
1291
1292int
1293vfs_stdinit (vfsp)
1294	struct vfsconf *vfsp;
1295{
1296
1297	return (0);
1298}
1299
1300int
1301vfs_stduninit (vfsp)
1302	struct vfsconf *vfsp;
1303{
1304
1305	return(0);
1306}
1307
1308int
1309vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1310	struct mount *mp;
1311	int cmd;
1312	struct vnode *filename_vp;
1313	int attrnamespace;
1314	const char *attrname;
1315{
1316
1317	if (filename_vp != NULL)
1318		VOP_UNLOCK(filename_vp, 0);
1319	return (EOPNOTSUPP);
1320}
1321
1322int
1323vfs_stdsysctl(mp, op, req)
1324	struct mount *mp;
1325	fsctlop_t op;
1326	struct sysctl_req *req;
1327{
1328
1329	return (EOPNOTSUPP);
1330}
1331
1332/* end of vfs default ops */
1333