vfs_default.c revision 206094
11590Srgrimes/*-
21590Srgrimes * Copyright (c) 1989, 1993
31590Srgrimes *	The Regents of the University of California.  All rights reserved.
41590Srgrimes *
51590Srgrimes * This code is derived from software contributed
61590Srgrimes * to Berkeley by John Heidemann of the UCLA Ficus project.
71590Srgrimes *
81590Srgrimes * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
91590Srgrimes *
101590Srgrimes * Redistribution and use in source and binary forms, with or without
111590Srgrimes * modification, are permitted provided that the following conditions
121590Srgrimes * are met:
131590Srgrimes * 1. Redistributions of source code must retain the above copyright
141590Srgrimes *    notice, this list of conditions and the following disclaimer.
151590Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
161590Srgrimes *    notice, this list of conditions and the following disclaimer in the
171590Srgrimes *    documentation and/or other materials provided with the distribution.
181590Srgrimes * 4. Neither the name of the University nor the names of its contributors
191590Srgrimes *    may be used to endorse or promote products derived from this software
201590Srgrimes *    without specific prior written permission.
211590Srgrimes *
221590Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
231590Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
241590Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
251590Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
261590Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
271590Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
281590Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
291590Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3060583Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3160583Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3260583Sphk * SUCH DAMAGE.
331590Srgrimes */
341590Srgrimes
351590Srgrimes#include <sys/cdefs.h>
361590Srgrimes__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 206094 2010-04-02 14:03:43Z kib $");
371590Srgrimes
381590Srgrimes#include <sys/param.h>
39149388Sbrian#include <sys/systm.h>
4092920Simp#include <sys/bio.h>
4192920Simp#include <sys/buf.h>
4292920Simp#include <sys/conf.h>
4392920Simp#include <sys/event.h>
441590Srgrimes#include <sys/kernel.h>
45227156Sed#include <sys/limits.h>
46#include <sys/lock.h>
47#include <sys/lockf.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/mutex.h>
51#include <sys/namei.h>
52#include <sys/fcntl.h>
53#include <sys/unistd.h>
54#include <sys/vnode.h>
55#include <sys/dirent.h>
56#include <sys/poll.h>
57
58#include <security/mac/mac_framework.h>
59
60#include <vm/vm.h>
61#include <vm/vm_object.h>
62#include <vm/vm_extern.h>
63#include <vm/pmap.h>
64#include <vm/vm_map.h>
65#include <vm/vm_page.h>
66#include <vm/vm_pager.h>
67#include <vm/vnode_pager.h>
68
69static int	vop_nolookup(struct vop_lookup_args *);
70static int	vop_norename(struct vop_rename_args *);
71static int	vop_nostrategy(struct vop_strategy_args *);
72static int	get_next_dirent(struct vnode *vp, struct dirent **dpp,
73				char *dirbuf, int dirbuflen, off_t *off,
74				char **cpos, int *len, int *eofflag,
75				struct thread *td);
76static int	dirent_exists(struct vnode *vp, const char *dirname,
77			      struct thread *td);
78
79#define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
80
81/*
82 * This vnode table stores what we want to do if the filesystem doesn't
83 * implement a particular VOP.
84 *
85 * If there is no specific entry here, we will return EOPNOTSUPP.
86 *
87 * Note that every filesystem has to implement either vop_access
88 * or vop_accessx; failing to do so will result in immediate crash
89 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
90 * which calls vop_stdaccess() etc.
91 */
92
93struct vop_vector default_vnodeops = {
94	.vop_default =		NULL,
95	.vop_bypass =		VOP_EOPNOTSUPP,
96
97	.vop_access =		vop_stdaccess,
98	.vop_accessx =		vop_stdaccessx,
99	.vop_advlock =		vop_stdadvlock,
100	.vop_advlockasync =	vop_stdadvlockasync,
101	.vop_bmap =		vop_stdbmap,
102	.vop_close =		VOP_NULL,
103	.vop_fsync =		VOP_NULL,
104	.vop_getpages =		vop_stdgetpages,
105	.vop_getwritemount = 	vop_stdgetwritemount,
106	.vop_inactive =		VOP_NULL,
107	.vop_ioctl =		VOP_ENOTTY,
108	.vop_kqfilter =		vop_stdkqfilter,
109	.vop_islocked =		vop_stdislocked,
110	.vop_lock1 =		vop_stdlock,
111	.vop_lookup =		vop_nolookup,
112	.vop_open =		VOP_NULL,
113	.vop_pathconf =		VOP_EINVAL,
114	.vop_poll =		vop_nopoll,
115	.vop_putpages =		vop_stdputpages,
116	.vop_readlink =		VOP_EINVAL,
117	.vop_rename =		vop_norename,
118	.vop_revoke =		VOP_PANIC,
119	.vop_strategy =		vop_nostrategy,
120	.vop_unlock =		vop_stdunlock,
121	.vop_vptocnp =		vop_stdvptocnp,
122	.vop_vptofh =		vop_stdvptofh,
123};
124
125/*
126 * Series of placeholder functions for various error returns for
127 * VOPs.
128 */
129
130int
131vop_eopnotsupp(struct vop_generic_args *ap)
132{
133	/*
134	printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
135	*/
136
137	return (EOPNOTSUPP);
138}
139
140int
141vop_ebadf(struct vop_generic_args *ap)
142{
143
144	return (EBADF);
145}
146
147int
148vop_enotty(struct vop_generic_args *ap)
149{
150
151	return (ENOTTY);
152}
153
154int
155vop_einval(struct vop_generic_args *ap)
156{
157
158	return (EINVAL);
159}
160
161int
162vop_enoent(struct vop_generic_args *ap)
163{
164
165	return (ENOENT);
166}
167
168int
169vop_null(struct vop_generic_args *ap)
170{
171
172	return (0);
173}
174
175/*
176 * Helper function to panic on some bad VOPs in some filesystems.
177 */
178int
179vop_panic(struct vop_generic_args *ap)
180{
181
182	panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
183}
184
185/*
186 * vop_std<something> and vop_no<something> are default functions for use by
187 * filesystems that need the "default reasonable" implementation for a
188 * particular operation.
189 *
190 * The documentation for the operations they implement exists (if it exists)
191 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
192 */
193
194/*
195 * Default vop for filesystems that do not support name lookup
196 */
197static int
198vop_nolookup(ap)
199	struct vop_lookup_args /* {
200		struct vnode *a_dvp;
201		struct vnode **a_vpp;
202		struct componentname *a_cnp;
203	} */ *ap;
204{
205
206	*ap->a_vpp = NULL;
207	return (ENOTDIR);
208}
209
210/*
211 * vop_norename:
212 *
213 * Handle unlock and reference counting for arguments of vop_rename
214 * for filesystems that do not implement rename operation.
215 */
216static int
217vop_norename(struct vop_rename_args *ap)
218{
219
220	vop_rename_fail(ap);
221	return (EOPNOTSUPP);
222}
223
224/*
225 *	vop_nostrategy:
226 *
227 *	Strategy routine for VFS devices that have none.
228 *
229 *	BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
230 *	routine.  Typically this is done for a BIO_READ strategy call.
231 *	Typically B_INVAL is assumed to already be clear prior to a write
232 *	and should not be cleared manually unless you just made the buffer
233 *	invalid.  BIO_ERROR should be cleared either way.
234 */
235
236static int
237vop_nostrategy (struct vop_strategy_args *ap)
238{
239	printf("No strategy for buffer at %p\n", ap->a_bp);
240	vprint("vnode", ap->a_vp);
241	ap->a_bp->b_ioflags |= BIO_ERROR;
242	ap->a_bp->b_error = EOPNOTSUPP;
243	bufdone(ap->a_bp);
244	return (EOPNOTSUPP);
245}
246
247static int
248get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
249		int dirbuflen, off_t *off, char **cpos, int *len,
250		int *eofflag, struct thread *td)
251{
252	int error, reclen;
253	struct uio uio;
254	struct iovec iov;
255	struct dirent *dp;
256
257	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
258	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
259
260	if (*len == 0) {
261		iov.iov_base = dirbuf;
262		iov.iov_len = dirbuflen;
263
264		uio.uio_iov = &iov;
265		uio.uio_iovcnt = 1;
266		uio.uio_offset = *off;
267		uio.uio_resid = dirbuflen;
268		uio.uio_segflg = UIO_SYSSPACE;
269		uio.uio_rw = UIO_READ;
270		uio.uio_td = td;
271
272		*eofflag = 0;
273
274#ifdef MAC
275		error = mac_vnode_check_readdir(td->td_ucred, vp);
276		if (error == 0)
277#endif
278			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
279		    		NULL, NULL);
280		if (error)
281			return (error);
282
283		*off = uio.uio_offset;
284
285		*cpos = dirbuf;
286		*len = (dirbuflen - uio.uio_resid);
287	}
288
289	dp = (struct dirent *)(*cpos);
290	reclen = dp->d_reclen;
291	*dpp = dp;
292
293	/* check for malformed directory.. */
294	if (reclen < DIRENT_MINSIZE)
295		return (EINVAL);
296
297	*cpos += reclen;
298	*len -= reclen;
299
300	return (0);
301}
302
303/*
304 * Check if a named file exists in a given directory vnode.
305 */
306static int
307dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
308{
309	char *dirbuf, *cpos;
310	int error, eofflag, dirbuflen, len, found;
311	off_t off;
312	struct dirent *dp;
313	struct vattr va;
314
315	KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
316	KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
317
318	found = 0;
319
320	error = VOP_GETATTR(vp, &va, td->td_ucred);
321	if (error)
322		return (found);
323
324	dirbuflen = DEV_BSIZE;
325	if (dirbuflen < va.va_blocksize)
326		dirbuflen = va.va_blocksize;
327	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
328
329	off = 0;
330	len = 0;
331	do {
332		error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
333					&cpos, &len, &eofflag, td);
334		if (error)
335			goto out;
336
337		if ((dp->d_type != DT_WHT) &&
338		    !strcmp(dp->d_name, dirname)) {
339			found = 1;
340			goto out;
341		}
342	} while (len > 0 || !eofflag);
343
344out:
345	free(dirbuf, M_TEMP);
346	return (found);
347}
348
349int
350vop_stdaccess(struct vop_access_args *ap)
351{
352
353	KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
354	    VAPPEND)) == 0, ("invalid bit in accmode"));
355
356	return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
357}
358
359int
360vop_stdaccessx(struct vop_accessx_args *ap)
361{
362	int error;
363	accmode_t accmode = ap->a_accmode;
364
365	error = vfs_unixify_accmode(&accmode);
366	if (error != 0)
367		return (error);
368
369	if (accmode == 0)
370		return (0);
371
372	return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
373}
374
375/*
376 * Advisory record locking support
377 */
378int
379vop_stdadvlock(struct vop_advlock_args *ap)
380{
381	struct vnode *vp;
382	struct ucred *cred;
383	struct vattr vattr;
384	int error;
385
386	vp = ap->a_vp;
387	cred = curthread->td_ucred;
388	vn_lock(vp, LK_SHARED | LK_RETRY);
389	error = VOP_GETATTR(vp, &vattr, cred);
390	VOP_UNLOCK(vp, 0);
391	if (error)
392		return (error);
393
394	return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
395}
396
397int
398vop_stdadvlockasync(struct vop_advlockasync_args *ap)
399{
400	struct vnode *vp;
401	struct ucred *cred;
402	struct vattr vattr;
403	int error;
404
405	vp = ap->a_vp;
406	cred = curthread->td_ucred;
407	vn_lock(vp, LK_SHARED | LK_RETRY);
408	error = VOP_GETATTR(vp, &vattr, cred);
409	VOP_UNLOCK(vp, 0);
410	if (error)
411		return (error);
412
413	return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
414}
415
416/*
417 * vop_stdpathconf:
418 *
419 * Standard implementation of POSIX pathconf, to get information about limits
420 * for a filesystem.
421 * Override per filesystem for the case where the filesystem has smaller
422 * limits.
423 */
424int
425vop_stdpathconf(ap)
426	struct vop_pathconf_args /* {
427	struct vnode *a_vp;
428	int a_name;
429	int *a_retval;
430	} */ *ap;
431{
432
433	switch (ap->a_name) {
434		case _PC_NAME_MAX:
435			*ap->a_retval = NAME_MAX;
436			return (0);
437		case _PC_PATH_MAX:
438			*ap->a_retval = PATH_MAX;
439			return (0);
440		case _PC_LINK_MAX:
441			*ap->a_retval = LINK_MAX;
442			return (0);
443		case _PC_MAX_CANON:
444			*ap->a_retval = MAX_CANON;
445			return (0);
446		case _PC_MAX_INPUT:
447			*ap->a_retval = MAX_INPUT;
448			return (0);
449		case _PC_PIPE_BUF:
450			*ap->a_retval = PIPE_BUF;
451			return (0);
452		case _PC_CHOWN_RESTRICTED:
453			*ap->a_retval = 1;
454			return (0);
455		case _PC_VDISABLE:
456			*ap->a_retval = _POSIX_VDISABLE;
457			return (0);
458		default:
459			return (EINVAL);
460	}
461	/* NOTREACHED */
462}
463
464/*
465 * Standard lock, unlock and islocked functions.
466 */
467int
468vop_stdlock(ap)
469	struct vop_lock1_args /* {
470		struct vnode *a_vp;
471		int a_flags;
472		char *file;
473		int line;
474	} */ *ap;
475{
476	struct vnode *vp = ap->a_vp;
477
478	return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
479	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
480	    ap->a_line));
481}
482
483/* See above. */
484int
485vop_stdunlock(ap)
486	struct vop_unlock_args /* {
487		struct vnode *a_vp;
488		int a_flags;
489	} */ *ap;
490{
491	struct vnode *vp = ap->a_vp;
492
493	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
494}
495
496/* See above. */
497int
498vop_stdislocked(ap)
499	struct vop_islocked_args /* {
500		struct vnode *a_vp;
501	} */ *ap;
502{
503
504	return (lockstatus(ap->a_vp->v_vnlock));
505}
506
507/*
508 * Return true for select/poll.
509 */
510int
511vop_nopoll(ap)
512	struct vop_poll_args /* {
513		struct vnode *a_vp;
514		int  a_events;
515		struct ucred *a_cred;
516		struct thread *a_td;
517	} */ *ap;
518{
519
520	return (poll_no_poll(ap->a_events));
521}
522
523/*
524 * Implement poll for local filesystems that support it.
525 */
526int
527vop_stdpoll(ap)
528	struct vop_poll_args /* {
529		struct vnode *a_vp;
530		int  a_events;
531		struct ucred *a_cred;
532		struct thread *a_td;
533	} */ *ap;
534{
535	if (ap->a_events & ~POLLSTANDARD)
536		return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
537	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
538}
539
540/*
541 * Return our mount point, as we will take charge of the writes.
542 */
543int
544vop_stdgetwritemount(ap)
545	struct vop_getwritemount_args /* {
546		struct vnode *a_vp;
547		struct mount **a_mpp;
548	} */ *ap;
549{
550	struct mount *mp;
551
552	/*
553	 * XXX Since this is called unlocked we may be recycled while
554	 * attempting to ref the mount.  If this is the case or mountpoint
555	 * will be set to NULL.  We only have to prevent this call from
556	 * returning with a ref to an incorrect mountpoint.  It is not
557	 * harmful to return with a ref to our previous mountpoint.
558	 */
559	mp = ap->a_vp->v_mount;
560	if (mp != NULL) {
561		vfs_ref(mp);
562		if (mp != ap->a_vp->v_mount) {
563			vfs_rel(mp);
564			mp = NULL;
565		}
566	}
567	*(ap->a_mpp) = mp;
568	return (0);
569}
570
571/* XXX Needs good comment and VOP_BMAP(9) manpage */
572int
573vop_stdbmap(ap)
574	struct vop_bmap_args /* {
575		struct vnode *a_vp;
576		daddr_t  a_bn;
577		struct bufobj **a_bop;
578		daddr_t *a_bnp;
579		int *a_runp;
580		int *a_runb;
581	} */ *ap;
582{
583
584	if (ap->a_bop != NULL)
585		*ap->a_bop = &ap->a_vp->v_bufobj;
586	if (ap->a_bnp != NULL)
587		*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
588	if (ap->a_runp != NULL)
589		*ap->a_runp = 0;
590	if (ap->a_runb != NULL)
591		*ap->a_runb = 0;
592	return (0);
593}
594
595int
596vop_stdfsync(ap)
597	struct vop_fsync_args /* {
598		struct vnode *a_vp;
599		struct ucred *a_cred;
600		int a_waitfor;
601		struct thread *a_td;
602	} */ *ap;
603{
604	struct vnode *vp = ap->a_vp;
605	struct buf *bp;
606	struct bufobj *bo;
607	struct buf *nbp;
608	int error = 0;
609	int maxretry = 1000;     /* large, arbitrarily chosen */
610
611	bo = &vp->v_bufobj;
612	BO_LOCK(bo);
613loop1:
614	/*
615	 * MARK/SCAN initialization to avoid infinite loops.
616	 */
617        TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
618                bp->b_vflags &= ~BV_SCANNED;
619		bp->b_error = 0;
620	}
621
622	/*
623	 * Flush all dirty buffers associated with a vnode.
624	 */
625loop2:
626	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
627		if ((bp->b_vflags & BV_SCANNED) != 0)
628			continue;
629		bp->b_vflags |= BV_SCANNED;
630		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
631			continue;
632		BO_UNLOCK(bo);
633		KASSERT(bp->b_bufobj == bo,
634		    ("bp %p wrong b_bufobj %p should be %p",
635		    bp, bp->b_bufobj, bo));
636		if ((bp->b_flags & B_DELWRI) == 0)
637			panic("fsync: not dirty");
638		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
639			vfs_bio_awrite(bp);
640		} else {
641			bremfree(bp);
642			bawrite(bp);
643		}
644		BO_LOCK(bo);
645		goto loop2;
646	}
647
648	/*
649	 * If synchronous the caller expects us to completely resolve all
650	 * dirty buffers in the system.  Wait for in-progress I/O to
651	 * complete (which could include background bitmap writes), then
652	 * retry if dirty blocks still exist.
653	 */
654	if (ap->a_waitfor == MNT_WAIT) {
655		bufobj_wwait(bo, 0, 0);
656		if (bo->bo_dirty.bv_cnt > 0) {
657			/*
658			 * If we are unable to write any of these buffers
659			 * then we fail now rather than trying endlessly
660			 * to write them out.
661			 */
662			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
663				if ((error = bp->b_error) == 0)
664					continue;
665			if (error == 0 && --maxretry >= 0)
666				goto loop1;
667			error = EAGAIN;
668		}
669	}
670	BO_UNLOCK(bo);
671	if (error == EAGAIN)
672		vprint("fsync: giving up on dirty", vp);
673
674	return (error);
675}
676
677/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
678int
679vop_stdgetpages(ap)
680	struct vop_getpages_args /* {
681		struct vnode *a_vp;
682		vm_page_t *a_m;
683		int a_count;
684		int a_reqpage;
685		vm_ooffset_t a_offset;
686	} */ *ap;
687{
688
689	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
690	    ap->a_count, ap->a_reqpage);
691}
692
693int
694vop_stdkqfilter(struct vop_kqfilter_args *ap)
695{
696	return vfs_kqfilter(ap);
697}
698
699/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
700int
701vop_stdputpages(ap)
702	struct vop_putpages_args /* {
703		struct vnode *a_vp;
704		vm_page_t *a_m;
705		int a_count;
706		int a_sync;
707		int *a_rtvals;
708		vm_ooffset_t a_offset;
709	} */ *ap;
710{
711
712	return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
713	     ap->a_sync, ap->a_rtvals);
714}
715
716int
717vop_stdvptofh(struct vop_vptofh_args *ap)
718{
719	return (EOPNOTSUPP);
720}
721
722int
723vop_stdvptocnp(struct vop_vptocnp_args *ap)
724{
725	struct vnode *vp = ap->a_vp;
726	struct vnode **dvp = ap->a_vpp;
727	struct ucred *cred = ap->a_cred;
728	char *buf = ap->a_buf;
729	int *buflen = ap->a_buflen;
730	char *dirbuf, *cpos;
731	int i, error, eofflag, dirbuflen, flags, locked, len, covered;
732	off_t off;
733	ino_t fileno;
734	struct vattr va;
735	struct nameidata nd;
736	struct thread *td;
737	struct dirent *dp;
738	struct vnode *mvp;
739
740	i = *buflen;
741	error = 0;
742	covered = 0;
743	td = curthread;
744
745	if (vp->v_type != VDIR)
746		return (ENOENT);
747
748	error = VOP_GETATTR(vp, &va, cred);
749	if (error)
750		return (error);
751
752	VREF(vp);
753	locked = VOP_ISLOCKED(vp);
754	VOP_UNLOCK(vp, 0);
755	NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
756	    "..", vp, td);
757	flags = FREAD;
758	error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
759	if (error) {
760		vn_lock(vp, locked | LK_RETRY);
761		return (error);
762	}
763	NDFREE(&nd, NDF_ONLY_PNBUF);
764
765	mvp = *dvp = nd.ni_vp;
766
767	if (vp->v_mount != (*dvp)->v_mount &&
768	    ((*dvp)->v_vflag & VV_ROOT) &&
769	    ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
770		*dvp = (*dvp)->v_mount->mnt_vnodecovered;
771		VREF(mvp);
772		VOP_UNLOCK(mvp, 0);
773		vn_close(mvp, FREAD, cred, td);
774		VREF(*dvp);
775		vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
776		covered = 1;
777	}
778
779	fileno = va.va_fileid;
780
781	dirbuflen = DEV_BSIZE;
782	if (dirbuflen < va.va_blocksize)
783		dirbuflen = va.va_blocksize;
784	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
785
786	if ((*dvp)->v_type != VDIR) {
787		error = ENOENT;
788		goto out;
789	}
790
791	off = 0;
792	len = 0;
793	do {
794		/* call VOP_READDIR of parent */
795		error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
796					&cpos, &len, &eofflag, td);
797		if (error)
798			goto out;
799
800		if ((dp->d_type != DT_WHT) &&
801		    (dp->d_fileno == fileno)) {
802			if (covered) {
803				VOP_UNLOCK(*dvp, 0);
804				vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
805				if (dirent_exists(mvp, dp->d_name, td)) {
806					error = ENOENT;
807					VOP_UNLOCK(mvp, 0);
808					vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
809					goto out;
810				}
811				VOP_UNLOCK(mvp, 0);
812				vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
813			}
814			i -= dp->d_namlen;
815
816			if (i < 0) {
817				error = ENOMEM;
818				goto out;
819			}
820			bcopy(dp->d_name, buf + i, dp->d_namlen);
821			error = 0;
822			goto out;
823		}
824	} while (len > 0 || !eofflag);
825	error = ENOENT;
826
827out:
828	free(dirbuf, M_TEMP);
829	if (!error) {
830		*buflen = i;
831		vhold(*dvp);
832	}
833	if (covered) {
834		vput(*dvp);
835		vrele(mvp);
836	} else {
837		VOP_UNLOCK(mvp, 0);
838		vn_close(mvp, FREAD, cred, td);
839	}
840	vn_lock(vp, locked | LK_RETRY);
841	return (error);
842}
843
844/*
845 * vfs default ops
846 * used to fill the vfs function table to get reasonable default return values.
847 */
848int
849vfs_stdroot (mp, flags, vpp)
850	struct mount *mp;
851	int flags;
852	struct vnode **vpp;
853{
854
855	return (EOPNOTSUPP);
856}
857
858int
859vfs_stdstatfs (mp, sbp)
860	struct mount *mp;
861	struct statfs *sbp;
862{
863
864	return (EOPNOTSUPP);
865}
866
867int
868vfs_stdquotactl (mp, cmds, uid, arg)
869	struct mount *mp;
870	int cmds;
871	uid_t uid;
872	void *arg;
873{
874
875	return (EOPNOTSUPP);
876}
877
878int
879vfs_stdsync(mp, waitfor)
880	struct mount *mp;
881	int waitfor;
882{
883	struct vnode *vp, *mvp;
884	struct thread *td;
885	int error, lockreq, allerror = 0;
886
887	td = curthread;
888	lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
889	if (waitfor != MNT_WAIT)
890		lockreq |= LK_NOWAIT;
891	/*
892	 * Force stale buffer cache information to be flushed.
893	 */
894	MNT_ILOCK(mp);
895loop:
896	MNT_VNODE_FOREACH(vp, mp, mvp) {
897		/* bv_cnt is an acceptable race here. */
898		if (vp->v_bufobj.bo_dirty.bv_cnt == 0)
899			continue;
900		VI_LOCK(vp);
901		MNT_IUNLOCK(mp);
902		if ((error = vget(vp, lockreq, td)) != 0) {
903			MNT_ILOCK(mp);
904			if (error == ENOENT) {
905				MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
906				goto loop;
907			}
908			continue;
909		}
910		error = VOP_FSYNC(vp, waitfor, td);
911		if (error)
912			allerror = error;
913		vput(vp);
914		MNT_ILOCK(mp);
915	}
916	MNT_IUNLOCK(mp);
917	return (allerror);
918}
919
920int
921vfs_stdnosync (mp, waitfor)
922	struct mount *mp;
923	int waitfor;
924{
925
926	return (0);
927}
928
929int
930vfs_stdvget (mp, ino, flags, vpp)
931	struct mount *mp;
932	ino_t ino;
933	int flags;
934	struct vnode **vpp;
935{
936
937	return (EOPNOTSUPP);
938}
939
940int
941vfs_stdfhtovp (mp, fhp, vpp)
942	struct mount *mp;
943	struct fid *fhp;
944	struct vnode **vpp;
945{
946
947	return (EOPNOTSUPP);
948}
949
950int
951vfs_stdinit (vfsp)
952	struct vfsconf *vfsp;
953{
954
955	return (0);
956}
957
958int
959vfs_stduninit (vfsp)
960	struct vfsconf *vfsp;
961{
962
963	return(0);
964}
965
966int
967vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
968	struct mount *mp;
969	int cmd;
970	struct vnode *filename_vp;
971	int attrnamespace;
972	const char *attrname;
973{
974
975	if (filename_vp != NULL)
976		VOP_UNLOCK(filename_vp, 0);
977	return (EOPNOTSUPP);
978}
979
980int
981vfs_stdsysctl(mp, op, req)
982	struct mount *mp;
983	fsctlop_t op;
984	struct sysctl_req *req;
985{
986
987	return (EOPNOTSUPP);
988}
989
990/* end of vfs default ops */
991