vfs_cache.c revision 60938
1/*
2 * Copyright (c) 1989, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Poul-Henning Kamp of the FreeBSD Project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
37 * $FreeBSD: head/sys/kern/vfs_cache.c 60938 2000-05-26 02:09:24Z jake $
38 */
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/sysctl.h>
44#include <sys/mount.h>
45#include <sys/vnode.h>
46#include <sys/namei.h>
47#include <sys/malloc.h>
48#include <sys/sysproto.h>
49#include <sys/proc.h>
50#include <sys/filedesc.h>
51
52/*
53 * This structure describes the elements in the cache of recent
54 * names looked up by namei.
55 */
56
57struct	namecache {
58	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
59	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
60	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
61	struct	vnode *nc_dvp;		/* vnode of parent of name */
62	struct	vnode *nc_vp;		/* vnode the name refers to */
63	u_char	nc_flag;		/* flag bits */
64	u_char	nc_nlen;		/* length of name */
65	char	nc_name[0];		/* segment name */
66};
67
68/*
69 * Name caching works as follows:
70 *
71 * Names found by directory scans are retained in a cache
72 * for future reference.  It is managed LRU, so frequently
73 * used names will hang around.  Cache is indexed by hash value
74 * obtained from (vp, name) where vp refers to the directory
75 * containing name.
76 *
77 * If it is a "negative" entry, (i.e. for a name that is known NOT to
78 * exist) the vnode pointer will be NULL.
79 *
80 * Upon reaching the last segment of a path, if the reference
81 * is for DELETE, or NOCACHE is set (rewrite), and the
82 * name is located in the cache, it will be dropped.
83 */
84
85/*
86 * Structures associated with name cacheing.
87 */
88#define NCHHASH(dvp, hash) \
89	(&nchashtbl[((dvp)->v_id + (hash)) & nchash])
90static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
91static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
92static u_long	nchash;			/* size of hash table */
93SYSCTL_INT(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
94static u_long	ncnegfactor = 16;	/* ratio of negative entries */
95SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
96static u_long	numneg;		/* number of cache entries allocated */
97SYSCTL_INT(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
98static u_long	numcache;		/* number of cache entries allocated */
99SYSCTL_INT(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
100struct	nchstats nchstats;		/* cache effectiveness statistics */
101
102static int	doingcache = 1;		/* 1 => enable the cache */
103SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
104SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
105SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
106
107/*
108 * The new name cache statistics
109 */
110SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
111#define STATNODE(mode, name, var) \
112	SYSCTL_INT(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
113STATNODE(CTLFLAG_RD, numneg, &numneg);
114STATNODE(CTLFLAG_RD, numcache, &numcache);
115static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
116static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
117static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
118static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
119static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
120static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
121static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
122static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
123static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
124static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
125
126
127static void cache_zap __P((struct namecache *ncp));
128
129MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
130
131/*
132 * Flags in namecache.nc_flag
133 */
134#define NCF_WHITE	1
135/*
136 * Delete an entry from its hash list and move it to the front
137 * of the LRU list for immediate reuse.
138 */
139static void
140cache_zap(ncp)
141	struct namecache *ncp;
142{
143	LIST_REMOVE(ncp, nc_hash);
144	LIST_REMOVE(ncp, nc_src);
145	if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src))
146		vdrop(ncp->nc_dvp);
147	if (ncp->nc_vp) {
148		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
149	} else {
150		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
151		numneg--;
152	}
153	numcache--;
154	free(ncp, M_VFSCACHE);
155}
156
157/*
158 * Lookup an entry in the cache
159 *
160 * We don't do this if the segment name is long, simply so the cache
161 * can avoid holding long names (which would either waste space, or
162 * add greatly to the complexity).
163 *
164 * Lookup is called with dvp pointing to the directory to search,
165 * cnp pointing to the name of the entry being sought. If the lookup
166 * succeeds, the vnode is returned in *vpp, and a status of -1 is
167 * returned. If the lookup determines that the name does not exist
168 * (negative cacheing), a status of ENOENT is returned. If the lookup
169 * fails, a status of zero is returned.
170 */
171
172int
173cache_lookup(dvp, vpp, cnp)
174	struct vnode *dvp;
175	struct vnode **vpp;
176	struct componentname *cnp;
177{
178	struct namecache *ncp;
179	u_long hash;
180	u_char *cp;
181	int len;
182
183	if (!doingcache) {
184		cnp->cn_flags &= ~MAKEENTRY;
185		return (0);
186	}
187
188	numcalls++;
189
190	if (cnp->cn_nameptr[0] == '.') {
191		if (cnp->cn_namelen == 1) {
192			*vpp = dvp;
193			dothits++;
194			return (-1);
195		}
196		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
197			dotdothits++;
198			if (dvp->v_dd->v_id != dvp->v_ddid ||
199			    (cnp->cn_flags & MAKEENTRY) == 0) {
200				dvp->v_ddid = 0;
201				return (0);
202			}
203			*vpp = dvp->v_dd;
204			return (-1);
205		}
206	}
207
208	hash = 0;
209	len = cnp->cn_namelen;
210	for (cp = cnp->cn_nameptr; len; len--, cp++)
211		hash += *cp;
212	LIST_FOREACH(ncp, (NCHHASH(dvp, hash)), nc_hash) {
213		numchecks++;
214		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
215		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
216			break;
217	}
218
219	/* We failed to find an entry */
220	if (ncp == 0) {
221		if ((cnp->cn_flags & MAKEENTRY) == 0) {
222			nummisszap++;
223		} else {
224			nummiss++;
225		}
226		nchstats.ncs_miss++;
227		return (0);
228	}
229
230	/* We don't want to have an entry, so dump it */
231	if ((cnp->cn_flags & MAKEENTRY) == 0) {
232		numposzaps++;
233		nchstats.ncs_badhits++;
234		cache_zap(ncp);
235		return (0);
236	}
237
238	/* We found a "positive" match, return the vnode */
239        if (ncp->nc_vp) {
240		numposhits++;
241		nchstats.ncs_goodhits++;
242		*vpp = ncp->nc_vp;
243		return (-1);
244	}
245
246	/* We found a negative match, and want to create it, so purge */
247	if (cnp->cn_nameiop == CREATE) {
248		numnegzaps++;
249		nchstats.ncs_badhits++;
250		cache_zap(ncp);
251		return (0);
252	}
253
254	numneghits++;
255	/*
256	 * We found a "negative" match, ENOENT notifies client of this match.
257	 * The nc_vpid field records whether this is a whiteout.
258	 */
259	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
260	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
261	nchstats.ncs_neghits++;
262	if (ncp->nc_flag & NCF_WHITE)
263		cnp->cn_flags |= ISWHITEOUT;
264	return (ENOENT);
265}
266
267/*
268 * Add an entry to the cache.
269 */
270void
271cache_enter(dvp, vp, cnp)
272	struct vnode *dvp;
273	struct vnode *vp;
274	struct componentname *cnp;
275{
276	struct namecache *ncp;
277	struct nchashhead *ncpp;
278	u_long hash;
279	u_char *cp, *dp;
280	int len;
281
282	if (!doingcache)
283		return;
284
285	if (cnp->cn_nameptr[0] == '.') {
286		if (cnp->cn_namelen == 1) {
287			return;
288		}
289		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
290			if (vp) {
291				dvp->v_dd = vp;
292				dvp->v_ddid = vp->v_id;
293			} else {
294				dvp->v_dd = dvp;
295				dvp->v_ddid = 0;
296			}
297			return;
298		}
299	}
300
301	ncp = (struct namecache *)
302		malloc(sizeof *ncp + cnp->cn_namelen, M_VFSCACHE, M_WAITOK);
303	bzero((char *)ncp, sizeof *ncp);
304	numcache++;
305	if (!vp) {
306		numneg++;
307		ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
308	} else if (vp->v_type == VDIR) {
309		vp->v_dd = dvp;
310		vp->v_ddid = dvp->v_id;
311	}
312
313	/*
314	 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
315	 * For negative entries, we have to record whether it is a whiteout.
316	 * the whiteout flag is stored in the nc_vpid field which is
317	 * otherwise unused.
318	 */
319	ncp->nc_vp = vp;
320	ncp->nc_dvp = dvp;
321	len = ncp->nc_nlen = cnp->cn_namelen;
322	hash = 0;
323	dp = ncp->nc_name;
324	for (cp = cnp->cn_nameptr; len; len--, cp++, dp++)
325		hash += (*dp = *cp);
326	ncpp = NCHHASH(dvp, hash);
327	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
328	if (LIST_EMPTY(&dvp->v_cache_src))
329		vhold(dvp);
330	LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
331	if (vp) {
332		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
333	} else {
334		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
335	}
336	if (numneg * ncnegfactor > numcache) {
337		ncp = TAILQ_FIRST(&ncneg);
338		cache_zap(ncp);
339	}
340}
341
342/*
343 * Name cache initialization, from vfs_init() when we are booting
344 */
345void
346nchinit()
347{
348
349	TAILQ_INIT(&ncneg);
350	nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
351}
352
353/*
354 * Invalidate all entries to a particular vnode.
355 *
356 * Remove all entries in the namecache relating to this vnode and
357 * change the v_id.  We take the v_id from a global counter, since
358 * it becomes a handy sequence number in crash-dumps that way.
359 * No valid vnode will ever have (v_id == 0).
360 *
361 * XXX: Only time and the size of v_id prevents this from failing:
362 * XXX: In theory we should hunt down all (struct vnode*, v_id)
363 * XXX: soft references and nuke them, at least on the global
364 * XXX: v_id wraparound.  The period of resistance can be extended
365 * XXX: by incrementing each vnodes v_id individually instead of
366 * XXX: using the global v_id.
367 */
368
369void
370cache_purge(vp)
371	struct vnode *vp;
372{
373	static u_long nextid;
374
375	while (!LIST_EMPTY(&vp->v_cache_src))
376		cache_zap(LIST_FIRST(&vp->v_cache_src));
377	while (!TAILQ_EMPTY(&vp->v_cache_dst))
378		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
379
380	do
381		nextid++;
382	while (nextid == vp->v_id || !nextid);
383	vp->v_id = nextid;
384	vp->v_dd = vp;
385	vp->v_ddid = 0;
386}
387
388/*
389 * Flush all entries referencing a particular filesystem.
390 *
391 * Since we need to check it anyway, we will flush all the invalid
392 * entries at the same time.
393 */
394void
395cache_purgevfs(mp)
396	struct mount *mp;
397{
398	struct nchashhead *ncpp;
399	struct namecache *ncp, *nnp;
400
401	/* Scan hash tables for applicable entries */
402	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
403		for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
404			nnp = LIST_NEXT(ncp, nc_hash);
405			if (ncp->nc_dvp->v_mount == mp) {
406				cache_zap(ncp);
407			}
408		}
409	}
410}
411
412/*
413 * Perform canonical checks and cache lookup and pass on to filesystem
414 * through the vop_cachedlookup only if needed.
415 */
416
417int
418vfs_cache_lookup(ap)
419	struct vop_lookup_args /* {
420		struct vnode *a_dvp;
421		struct vnode **a_vpp;
422		struct componentname *a_cnp;
423	} */ *ap;
424{
425	struct vnode *vdp;
426	struct vnode *pdp;
427	int lockparent;
428	int error;
429	struct vnode **vpp = ap->a_vpp;
430	struct componentname *cnp = ap->a_cnp;
431	struct ucred *cred = cnp->cn_cred;
432	int flags = cnp->cn_flags;
433	struct proc *p = cnp->cn_proc;
434	u_long vpid;	/* capability number of vnode */
435
436	*vpp = NULL;
437	vdp = ap->a_dvp;
438	lockparent = flags & LOCKPARENT;
439
440	if (vdp->v_type != VDIR)
441                return (ENOTDIR);
442
443	if ((flags & ISLASTCN) && (vdp->v_mount->mnt_flag & MNT_RDONLY) &&
444	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
445		return (EROFS);
446
447	error = VOP_ACCESS(vdp, VEXEC, cred, cnp->cn_proc);
448
449	if (error)
450		return (error);
451
452	error = cache_lookup(vdp, vpp, cnp);
453
454	if (!error)
455		return (VOP_CACHEDLOOKUP(ap->a_dvp, ap->a_vpp, ap->a_cnp));
456
457	if (error == ENOENT)
458		return (error);
459
460	pdp = vdp;
461	vdp = *vpp;
462	vpid = vdp->v_id;
463	if (pdp == vdp) {   /* lookup on "." */
464		VREF(vdp);
465		error = 0;
466	} else if (flags & ISDOTDOT) {
467		VOP_UNLOCK(pdp, 0, p);
468		error = vget(vdp, LK_EXCLUSIVE, p);
469		if (!error && lockparent && (flags & ISLASTCN))
470			error = vn_lock(pdp, LK_EXCLUSIVE, p);
471	} else {
472		error = vget(vdp, LK_EXCLUSIVE, p);
473		if (!lockparent || error || !(flags & ISLASTCN))
474			VOP_UNLOCK(pdp, 0, p);
475	}
476	/*
477	 * Check that the capability number did not change
478	 * while we were waiting for the lock.
479	 */
480	if (!error) {
481		if (vpid == vdp->v_id)
482			return (0);
483		vput(vdp);
484		if (lockparent && pdp != vdp && (flags & ISLASTCN))
485			VOP_UNLOCK(pdp, 0, p);
486	}
487	error = vn_lock(pdp, LK_EXCLUSIVE, p);
488	if (error)
489		return (error);
490	return (VOP_CACHEDLOOKUP(ap->a_dvp, ap->a_vpp, ap->a_cnp));
491}
492
493
494#ifndef _SYS_SYSPROTO_H_
495struct  __getcwd_args {
496	u_char	*buf;
497	u_int	buflen;
498};
499#endif
500
501#define STATNODE(mode, name, var) \
502	SYSCTL_INT(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
503
504static int disablecwd;
505SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
506
507static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
508static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
509static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
510static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
511static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
512static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
513int
514__getcwd(p, uap)
515	struct proc *p;
516	struct __getcwd_args *uap;
517{
518	char *bp, *buf;
519	int error, i, slash_prefixed;
520	struct filedesc *fdp;
521	struct namecache *ncp;
522	struct vnode *vp;
523
524	numcwdcalls++;
525	if (disablecwd)
526		return (ENODEV);
527	if (uap->buflen < 2)
528		return (EINVAL);
529	if (uap->buflen > MAXPATHLEN)
530		uap->buflen = MAXPATHLEN;
531	buf = bp = malloc(uap->buflen, M_TEMP, M_WAITOK);
532	bp += uap->buflen - 1;
533	*bp = '\0';
534	fdp = p->p_fd;
535	slash_prefixed = 0;
536	for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) {
537		if (vp->v_flag & VROOT) {
538			if (vp->v_mount == NULL)	/* forced unmount */
539				return (EBADF);
540			vp = vp->v_mount->mnt_vnodecovered;
541			continue;
542		}
543		if (vp->v_dd->v_id != vp->v_ddid) {
544			numcwdfail1++;
545			free(buf, M_TEMP);
546			return (ENOTDIR);
547		}
548		ncp = TAILQ_FIRST(&vp->v_cache_dst);
549		if (!ncp) {
550			numcwdfail2++;
551			free(buf, M_TEMP);
552			return (ENOENT);
553		}
554		if (ncp->nc_dvp != vp->v_dd) {
555			numcwdfail3++;
556			free(buf, M_TEMP);
557			return (EBADF);
558		}
559		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
560			if (bp == buf) {
561				numcwdfail4++;
562				free(buf, M_TEMP);
563				return (ENOMEM);
564			}
565			*--bp = ncp->nc_name[i];
566		}
567		if (bp == buf) {
568			numcwdfail4++;
569			free(buf, M_TEMP);
570			return (ENOMEM);
571		}
572		*--bp = '/';
573		slash_prefixed = 1;
574		vp = vp->v_dd;
575	}
576	if (!slash_prefixed) {
577		if (bp == buf) {
578			numcwdfail4++;
579			free(buf, M_TEMP);
580			return (ENOMEM);
581		}
582		*--bp = '/';
583	}
584	numcwdfound++;
585	error = copyout(bp, uap->buf, strlen(bp) + 1);
586	free(buf, M_TEMP);
587	return (error);
588}
589
590/*
591 * Thus begins the fullpath magic.
592 */
593
594#undef STATNODE
595#define STATNODE(name)							\
596	static u_int name;						\
597	SYSCTL_INT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
598
599static int disablefullpath;
600SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
601    &disablefullpath, 0, "");
602
603STATNODE(numfullpathcalls);
604STATNODE(numfullpathfail1);
605STATNODE(numfullpathfail2);
606STATNODE(numfullpathfail3);
607STATNODE(numfullpathfail4);
608STATNODE(numfullpathfound);
609
610int
611textvp_fullpath(struct proc *p, char **retbuf, char **retfreebuf) {
612	char *bp, *buf;
613	int i, slash_prefixed;
614	struct filedesc *fdp;
615	struct namecache *ncp;
616	struct vnode *vp, *textvp;
617
618	numfullpathcalls++;
619	if (disablefullpath)
620		return (ENODEV);
621	textvp = p->p_textvp;
622	if (textvp == NULL)
623		return (EINVAL);
624	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
625	bp = buf + MAXPATHLEN - 1;
626	*bp = '\0';
627	fdp = p->p_fd;
628	slash_prefixed = 0;
629	for (vp = textvp; vp != fdp->fd_rdir && vp != rootvnode;) {
630		if (vp->v_flag & VROOT) {
631			if (vp->v_mount == NULL) {	/* forced unmount */
632				free(buf, M_TEMP);
633				return (EBADF);
634			}
635			vp = vp->v_mount->mnt_vnodecovered;
636			continue;
637		}
638		if (vp != textvp && vp->v_dd->v_id != vp->v_ddid) {
639			numfullpathfail1++;
640			free(buf, M_TEMP);
641			return (ENOTDIR);
642		}
643		ncp = TAILQ_FIRST(&vp->v_cache_dst);
644		if (!ncp) {
645			numfullpathfail2++;
646			free(buf, M_TEMP);
647			return (ENOENT);
648		}
649		if (vp != textvp && ncp->nc_dvp != vp->v_dd) {
650			numfullpathfail3++;
651			free(buf, M_TEMP);
652			return (EBADF);
653		}
654		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
655			if (bp == buf) {
656				numfullpathfail4++;
657				free(buf, M_TEMP);
658				return (ENOMEM);
659			}
660			*--bp = ncp->nc_name[i];
661		}
662		if (bp == buf) {
663			numfullpathfail4++;
664			free(buf, M_TEMP);
665			return (ENOMEM);
666		}
667		*--bp = '/';
668		slash_prefixed = 1;
669		vp = ncp->nc_dvp;
670	}
671	if (!slash_prefixed) {
672		if (bp == buf) {
673			numfullpathfail4++;
674			free(buf, M_TEMP);
675			return (ENOMEM);
676		}
677		*--bp = '/';
678	}
679	numfullpathfound++;
680	*retbuf = bp;
681	*retfreebuf = buf;
682	return (0);
683}
684