vfs_cache.c revision 65665
1/*
2 * Copyright (c) 1989, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Poul-Henning Kamp of the FreeBSD Project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
37 * $FreeBSD: head/sys/kern/vfs_cache.c 65665 2000-09-10 03:46:12Z bp $
38 */
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/sysctl.h>
44#include <sys/mount.h>
45#include <sys/vnode.h>
46#include <sys/namei.h>
47#include <sys/malloc.h>
48#include <sys/sysproto.h>
49#include <sys/proc.h>
50#include <sys/filedesc.h>
51
52/*
53 * This structure describes the elements in the cache of recent
54 * names looked up by namei.
55 */
56
57struct	namecache {
58	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
59	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
60	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
61	struct	vnode *nc_dvp;		/* vnode of parent of name */
62	struct	vnode *nc_vp;		/* vnode the name refers to */
63	u_char	nc_flag;		/* flag bits */
64	u_char	nc_nlen;		/* length of name */
65	char	nc_name[0];		/* segment name */
66};
67
68/*
69 * Name caching works as follows:
70 *
71 * Names found by directory scans are retained in a cache
72 * for future reference.  It is managed LRU, so frequently
73 * used names will hang around.  Cache is indexed by hash value
74 * obtained from (vp, name) where vp refers to the directory
75 * containing name.
76 *
77 * If it is a "negative" entry, (i.e. for a name that is known NOT to
78 * exist) the vnode pointer will be NULL.
79 *
80 * Upon reaching the last segment of a path, if the reference
81 * is for DELETE, or NOCACHE is set (rewrite), and the
82 * name is located in the cache, it will be dropped.
83 */
84
85/*
86 * Structures associated with name cacheing.
87 */
88#define NCHHASH(dvp, hash) \
89	(&nchashtbl[((dvp)->v_id + (hash)) & nchash])
90static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
91static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
92static u_long	nchash;			/* size of hash table */
93SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
94static u_long	ncnegfactor = 16;	/* ratio of negative entries */
95SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
96static u_long	numneg;		/* number of cache entries allocated */
97SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
98static u_long	numcache;		/* number of cache entries allocated */
99SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
100struct	nchstats nchstats;		/* cache effectiveness statistics */
101
102static int	doingcache = 1;		/* 1 => enable the cache */
103SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
104SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
105SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
106
107/*
108 * The new name cache statistics
109 */
110SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
111#define STATNODE(mode, name, var) \
112	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
113STATNODE(CTLFLAG_RD, numneg, &numneg);
114STATNODE(CTLFLAG_RD, numcache, &numcache);
115static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
116static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
117static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
118static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
119static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
120static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
121static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
122static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
123static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
124static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
125
126
127static void cache_zap __P((struct namecache *ncp));
128
129MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
130
131/*
132 * Flags in namecache.nc_flag
133 */
134#define NCF_WHITE	1
135/*
136 * Delete an entry from its hash list and move it to the front
137 * of the LRU list for immediate reuse.
138 */
139static void
140cache_zap(ncp)
141	struct namecache *ncp;
142{
143	LIST_REMOVE(ncp, nc_hash);
144	LIST_REMOVE(ncp, nc_src);
145	if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src))
146		vdrop(ncp->nc_dvp);
147	if (ncp->nc_vp) {
148		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
149	} else {
150		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
151		numneg--;
152	}
153	numcache--;
154	free(ncp, M_VFSCACHE);
155}
156
157/*
158 * Lookup an entry in the cache
159 *
160 * We don't do this if the segment name is long, simply so the cache
161 * can avoid holding long names (which would either waste space, or
162 * add greatly to the complexity).
163 *
164 * Lookup is called with dvp pointing to the directory to search,
165 * cnp pointing to the name of the entry being sought. If the lookup
166 * succeeds, the vnode is returned in *vpp, and a status of -1 is
167 * returned. If the lookup determines that the name does not exist
168 * (negative cacheing), a status of ENOENT is returned. If the lookup
169 * fails, a status of zero is returned.
170 */
171
172int
173cache_lookup(dvp, vpp, cnp)
174	struct vnode *dvp;
175	struct vnode **vpp;
176	struct componentname *cnp;
177{
178	struct namecache *ncp;
179	u_long hash;
180	u_char *cp;
181	int len;
182
183	if (!doingcache) {
184		cnp->cn_flags &= ~MAKEENTRY;
185		return (0);
186	}
187
188	numcalls++;
189
190	if (cnp->cn_nameptr[0] == '.') {
191		if (cnp->cn_namelen == 1) {
192			*vpp = dvp;
193			dothits++;
194			return (-1);
195		}
196		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
197			dotdothits++;
198			if (dvp->v_dd->v_id != dvp->v_ddid ||
199			    (cnp->cn_flags & MAKEENTRY) == 0) {
200				dvp->v_ddid = 0;
201				return (0);
202			}
203			*vpp = dvp->v_dd;
204			return (-1);
205		}
206	}
207
208	hash = 0;
209	len = cnp->cn_namelen;
210	for (cp = cnp->cn_nameptr; len; len--, cp++)
211		hash += *cp;
212	LIST_FOREACH(ncp, (NCHHASH(dvp, hash)), nc_hash) {
213		numchecks++;
214		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
215		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
216			break;
217	}
218
219	/* We failed to find an entry */
220	if (ncp == 0) {
221		if ((cnp->cn_flags & MAKEENTRY) == 0) {
222			nummisszap++;
223		} else {
224			nummiss++;
225		}
226		nchstats.ncs_miss++;
227		return (0);
228	}
229
230	/* We don't want to have an entry, so dump it */
231	if ((cnp->cn_flags & MAKEENTRY) == 0) {
232		numposzaps++;
233		nchstats.ncs_badhits++;
234		cache_zap(ncp);
235		return (0);
236	}
237
238	/* We found a "positive" match, return the vnode */
239        if (ncp->nc_vp) {
240		numposhits++;
241		nchstats.ncs_goodhits++;
242		*vpp = ncp->nc_vp;
243		return (-1);
244	}
245
246	/* We found a negative match, and want to create it, so purge */
247	if (cnp->cn_nameiop == CREATE) {
248		numnegzaps++;
249		nchstats.ncs_badhits++;
250		cache_zap(ncp);
251		return (0);
252	}
253
254	numneghits++;
255	/*
256	 * We found a "negative" match, ENOENT notifies client of this match.
257	 * The nc_vpid field records whether this is a whiteout.
258	 */
259	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
260	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
261	nchstats.ncs_neghits++;
262	if (ncp->nc_flag & NCF_WHITE)
263		cnp->cn_flags |= ISWHITEOUT;
264	return (ENOENT);
265}
266
267/*
268 * Add an entry to the cache.
269 */
270void
271cache_enter(dvp, vp, cnp)
272	struct vnode *dvp;
273	struct vnode *vp;
274	struct componentname *cnp;
275{
276	struct namecache *ncp;
277	struct nchashhead *ncpp;
278	u_long hash;
279	u_char *cp, *dp;
280	int len;
281
282	if (!doingcache)
283		return;
284
285	if (cnp->cn_nameptr[0] == '.') {
286		if (cnp->cn_namelen == 1) {
287			return;
288		}
289		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
290			if (vp) {
291				dvp->v_dd = vp;
292				dvp->v_ddid = vp->v_id;
293			} else {
294				dvp->v_dd = dvp;
295				dvp->v_ddid = 0;
296			}
297			return;
298		}
299	}
300
301	ncp = (struct namecache *)
302		malloc(sizeof *ncp + cnp->cn_namelen, M_VFSCACHE, M_WAITOK);
303	bzero((char *)ncp, sizeof *ncp);
304	numcache++;
305	if (!vp) {
306		numneg++;
307		ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
308	} else if (vp->v_type == VDIR) {
309		vp->v_dd = dvp;
310		vp->v_ddid = dvp->v_id;
311	}
312
313	/*
314	 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
315	 * For negative entries, we have to record whether it is a whiteout.
316	 * the whiteout flag is stored in the nc_vpid field which is
317	 * otherwise unused.
318	 */
319	ncp->nc_vp = vp;
320	ncp->nc_dvp = dvp;
321	len = ncp->nc_nlen = cnp->cn_namelen;
322	hash = 0;
323	dp = ncp->nc_name;
324	for (cp = cnp->cn_nameptr; len; len--, cp++, dp++)
325		hash += (*dp = *cp);
326	ncpp = NCHHASH(dvp, hash);
327	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
328	if (LIST_EMPTY(&dvp->v_cache_src))
329		vhold(dvp);
330	LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
331	if (vp) {
332		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
333	} else {
334		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
335	}
336	if (numneg * ncnegfactor > numcache) {
337		ncp = TAILQ_FIRST(&ncneg);
338		cache_zap(ncp);
339	}
340}
341
342/*
343 * Name cache initialization, from vfs_init() when we are booting
344 */
345void
346nchinit()
347{
348
349	TAILQ_INIT(&ncneg);
350	nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
351}
352
353/*
354 * Invalidate all entries to a particular vnode.
355 *
356 * Remove all entries in the namecache relating to this vnode and
357 * change the v_id.  We take the v_id from a global counter, since
358 * it becomes a handy sequence number in crash-dumps that way.
359 * No valid vnode will ever have (v_id == 0).
360 *
361 * XXX: Only time and the size of v_id prevents this from failing:
362 * XXX: In theory we should hunt down all (struct vnode*, v_id)
363 * XXX: soft references and nuke them, at least on the global
364 * XXX: v_id wraparound.  The period of resistance can be extended
365 * XXX: by incrementing each vnodes v_id individually instead of
366 * XXX: using the global v_id.
367 */
368
369void
370cache_purge(vp)
371	struct vnode *vp;
372{
373	static u_long nextid;
374
375	while (!LIST_EMPTY(&vp->v_cache_src))
376		cache_zap(LIST_FIRST(&vp->v_cache_src));
377	while (!TAILQ_EMPTY(&vp->v_cache_dst))
378		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
379
380	do
381		nextid++;
382	while (nextid == vp->v_id || !nextid);
383	vp->v_id = nextid;
384	vp->v_dd = vp;
385	vp->v_ddid = 0;
386}
387
388/*
389 * Flush all entries referencing a particular filesystem.
390 *
391 * Since we need to check it anyway, we will flush all the invalid
392 * entries at the same time.
393 */
394void
395cache_purgevfs(mp)
396	struct mount *mp;
397{
398	struct nchashhead *ncpp;
399	struct namecache *ncp, *nnp;
400
401	/* Scan hash tables for applicable entries */
402	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
403		for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
404			nnp = LIST_NEXT(ncp, nc_hash);
405			if (ncp->nc_dvp->v_mount == mp) {
406				cache_zap(ncp);
407			}
408		}
409	}
410}
411
412/*
413 * Perform canonical checks and cache lookup and pass on to filesystem
414 * through the vop_cachedlookup only if needed.
415 */
416
417int
418vfs_cache_lookup(ap)
419	struct vop_lookup_args /* {
420		struct vnode *a_dvp;
421		struct vnode **a_vpp;
422		struct componentname *a_cnp;
423	} */ *ap;
424{
425	struct vnode *dvp, *vp;
426	int lockparent;
427	int error;
428	struct vnode **vpp = ap->a_vpp;
429	struct componentname *cnp = ap->a_cnp;
430	struct ucred *cred = cnp->cn_cred;
431	int flags = cnp->cn_flags;
432	struct proc *p = cnp->cn_proc;
433	u_long vpid;	/* capability number of vnode */
434
435	*vpp = NULL;
436	dvp = ap->a_dvp;
437	lockparent = flags & LOCKPARENT;
438
439	if (dvp->v_type != VDIR)
440                return (ENOTDIR);
441
442	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
443	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
444		return (EROFS);
445
446	error = VOP_ACCESS(dvp, VEXEC, cred, p);
447
448	if (error)
449		return (error);
450
451	error = cache_lookup(dvp, vpp, cnp);
452
453	if (!error)
454		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
455
456	if (error == ENOENT)
457		return (error);
458
459	vp = *vpp;
460	vpid = vp->v_id;
461	if (dvp == vp) {   /* lookup on "." */
462		VREF(vp);
463		error = 0;
464	} else if (flags & ISDOTDOT) {
465		VOP_UNLOCK(dvp, 0, p);
466		error = vget(vp, LK_EXCLUSIVE, p);
467		if (!error && lockparent && (flags & ISLASTCN))
468			error = vn_lock(dvp, LK_EXCLUSIVE, p);
469	} else {
470		error = vget(vp, LK_EXCLUSIVE, p);
471		if (!lockparent || error || !(flags & ISLASTCN))
472			VOP_UNLOCK(dvp, 0, p);
473	}
474	/*
475	 * Check that the capability number did not change
476	 * while we were waiting for the lock.
477	 */
478	if (!error) {
479		if (vpid == vp->v_id)
480			return (0);
481		vput(vp);
482		if (lockparent && dvp != vp && (flags & ISLASTCN))
483			VOP_UNLOCK(dvp, 0, p);
484	}
485	error = vn_lock(dvp, LK_EXCLUSIVE, p);
486	if (error)
487		return (error);
488	return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
489}
490
491
492#ifndef _SYS_SYSPROTO_H_
493struct  __getcwd_args {
494	u_char	*buf;
495	u_int	buflen;
496};
497#endif
498
499static int disablecwd;
500SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
501
502static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
503static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
504static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
505static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
506static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
507static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
508int
509__getcwd(p, uap)
510	struct proc *p;
511	struct __getcwd_args *uap;
512{
513	char *bp, *buf;
514	int error, i, slash_prefixed;
515	struct filedesc *fdp;
516	struct namecache *ncp;
517	struct vnode *vp;
518
519	numcwdcalls++;
520	if (disablecwd)
521		return (ENODEV);
522	if (uap->buflen < 2)
523		return (EINVAL);
524	if (uap->buflen > MAXPATHLEN)
525		uap->buflen = MAXPATHLEN;
526	buf = bp = malloc(uap->buflen, M_TEMP, M_WAITOK);
527	bp += uap->buflen - 1;
528	*bp = '\0';
529	fdp = p->p_fd;
530	slash_prefixed = 0;
531	for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) {
532		if (vp->v_flag & VROOT) {
533			if (vp->v_mount == NULL)	/* forced unmount */
534				return (EBADF);
535			vp = vp->v_mount->mnt_vnodecovered;
536			continue;
537		}
538		if (vp->v_dd->v_id != vp->v_ddid) {
539			numcwdfail1++;
540			free(buf, M_TEMP);
541			return (ENOTDIR);
542		}
543		ncp = TAILQ_FIRST(&vp->v_cache_dst);
544		if (!ncp) {
545			numcwdfail2++;
546			free(buf, M_TEMP);
547			return (ENOENT);
548		}
549		if (ncp->nc_dvp != vp->v_dd) {
550			numcwdfail3++;
551			free(buf, M_TEMP);
552			return (EBADF);
553		}
554		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
555			if (bp == buf) {
556				numcwdfail4++;
557				free(buf, M_TEMP);
558				return (ENOMEM);
559			}
560			*--bp = ncp->nc_name[i];
561		}
562		if (bp == buf) {
563			numcwdfail4++;
564			free(buf, M_TEMP);
565			return (ENOMEM);
566		}
567		*--bp = '/';
568		slash_prefixed = 1;
569		vp = vp->v_dd;
570	}
571	if (!slash_prefixed) {
572		if (bp == buf) {
573			numcwdfail4++;
574			free(buf, M_TEMP);
575			return (ENOMEM);
576		}
577		*--bp = '/';
578	}
579	numcwdfound++;
580	error = copyout(bp, uap->buf, strlen(bp) + 1);
581	free(buf, M_TEMP);
582	return (error);
583}
584
585/*
586 * Thus begins the fullpath magic.
587 */
588
589#undef STATNODE
590#define STATNODE(name)							\
591	static u_int name;						\
592	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
593
594static int disablefullpath;
595SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
596    &disablefullpath, 0, "");
597
598STATNODE(numfullpathcalls);
599STATNODE(numfullpathfail1);
600STATNODE(numfullpathfail2);
601STATNODE(numfullpathfail3);
602STATNODE(numfullpathfail4);
603STATNODE(numfullpathfound);
604
605int
606textvp_fullpath(struct proc *p, char **retbuf, char **retfreebuf) {
607	char *bp, *buf;
608	int i, slash_prefixed;
609	struct filedesc *fdp;
610	struct namecache *ncp;
611	struct vnode *vp, *textvp;
612
613	numfullpathcalls++;
614	if (disablefullpath)
615		return (ENODEV);
616	textvp = p->p_textvp;
617	if (textvp == NULL)
618		return (EINVAL);
619	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
620	bp = buf + MAXPATHLEN - 1;
621	*bp = '\0';
622	fdp = p->p_fd;
623	slash_prefixed = 0;
624	for (vp = textvp; vp != fdp->fd_rdir && vp != rootvnode;) {
625		if (vp->v_flag & VROOT) {
626			if (vp->v_mount == NULL) {	/* forced unmount */
627				free(buf, M_TEMP);
628				return (EBADF);
629			}
630			vp = vp->v_mount->mnt_vnodecovered;
631			continue;
632		}
633		if (vp != textvp && vp->v_dd->v_id != vp->v_ddid) {
634			numfullpathfail1++;
635			free(buf, M_TEMP);
636			return (ENOTDIR);
637		}
638		ncp = TAILQ_FIRST(&vp->v_cache_dst);
639		if (!ncp) {
640			numfullpathfail2++;
641			free(buf, M_TEMP);
642			return (ENOENT);
643		}
644		if (vp != textvp && ncp->nc_dvp != vp->v_dd) {
645			numfullpathfail3++;
646			free(buf, M_TEMP);
647			return (EBADF);
648		}
649		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
650			if (bp == buf) {
651				numfullpathfail4++;
652				free(buf, M_TEMP);
653				return (ENOMEM);
654			}
655			*--bp = ncp->nc_name[i];
656		}
657		if (bp == buf) {
658			numfullpathfail4++;
659			free(buf, M_TEMP);
660			return (ENOMEM);
661		}
662		*--bp = '/';
663		slash_prefixed = 1;
664		vp = ncp->nc_dvp;
665	}
666	if (!slash_prefixed) {
667		if (bp == buf) {
668			numfullpathfail4++;
669			free(buf, M_TEMP);
670			return (ENOMEM);
671		}
672		*--bp = '/';
673	}
674	numfullpathfound++;
675	*retbuf = bp;
676	*retfreebuf = buf;
677	return (0);
678}
679