vfs_cache.c revision 74384
1/*
2 * Copyright (c) 1989, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Poul-Henning Kamp of the FreeBSD Project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
37 * $FreeBSD: head/sys/kern/vfs_cache.c 74384 2001-03-17 09:31:06Z peter $
38 */
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/sysctl.h>
44#include <sys/mount.h>
45#include <sys/vnode.h>
46#include <sys/namei.h>
47#include <sys/malloc.h>
48#include <sys/sysproto.h>
49#include <sys/proc.h>
50#include <sys/filedesc.h>
51#include <sys/fnv_hash.h>
52
53/*
54 * This structure describes the elements in the cache of recent
55 * names looked up by namei.
56 */
57
58struct	namecache {
59	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
60	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
61	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
62	struct	vnode *nc_dvp;		/* vnode of parent of name */
63	struct	vnode *nc_vp;		/* vnode the name refers to */
64	u_char	nc_flag;		/* flag bits */
65	u_char	nc_nlen;		/* length of name */
66	char	nc_name[0];		/* segment name */
67};
68
69/*
70 * Name caching works as follows:
71 *
72 * Names found by directory scans are retained in a cache
73 * for future reference.  It is managed LRU, so frequently
74 * used names will hang around.  Cache is indexed by hash value
75 * obtained from (vp, name) where vp refers to the directory
76 * containing name.
77 *
78 * If it is a "negative" entry, (i.e. for a name that is known NOT to
79 * exist) the vnode pointer will be NULL.
80 *
81 * Upon reaching the last segment of a path, if the reference
82 * is for DELETE, or NOCACHE is set (rewrite), and the
83 * name is located in the cache, it will be dropped.
84 */
85
86/*
87 * Structures associated with name cacheing.
88 */
89#define NCHHASH(dvp, hash) \
90	(&nchashtbl[((dvp)->v_id + (hash)) & nchash])
91static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
92static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
93static u_long	nchash;			/* size of hash table */
94SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
95static u_long	ncnegfactor = 16;	/* ratio of negative entries */
96SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
97static u_long	numneg;		/* number of cache entries allocated */
98SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
99static u_long	numcache;		/* number of cache entries allocated */
100SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
101struct	nchstats nchstats;		/* cache effectiveness statistics */
102
103static int	doingcache = 1;		/* 1 => enable the cache */
104SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
105SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
106SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
107
108/*
109 * The new name cache statistics
110 */
111SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
112#define STATNODE(mode, name, var) \
113	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
114STATNODE(CTLFLAG_RD, numneg, &numneg);
115STATNODE(CTLFLAG_RD, numcache, &numcache);
116static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
117static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
118static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
119static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
120static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
121static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
122static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
123static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
124static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
125static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
126
127SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats,
128        sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
129
130
131
132static void cache_zap __P((struct namecache *ncp));
133
134static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
135
136/*
137 * Flags in namecache.nc_flag
138 */
139#define NCF_WHITE	1
140/*
141 * Delete an entry from its hash list and move it to the front
142 * of the LRU list for immediate reuse.
143 */
144static void
145cache_zap(ncp)
146	struct namecache *ncp;
147{
148	LIST_REMOVE(ncp, nc_hash);
149	LIST_REMOVE(ncp, nc_src);
150	if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src))
151		vdrop(ncp->nc_dvp);
152	if (ncp->nc_vp) {
153		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
154	} else {
155		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
156		numneg--;
157	}
158	numcache--;
159	free(ncp, M_VFSCACHE);
160}
161
162/*
163 * Lookup an entry in the cache
164 *
165 * We don't do this if the segment name is long, simply so the cache
166 * can avoid holding long names (which would either waste space, or
167 * add greatly to the complexity).
168 *
169 * Lookup is called with dvp pointing to the directory to search,
170 * cnp pointing to the name of the entry being sought. If the lookup
171 * succeeds, the vnode is returned in *vpp, and a status of -1 is
172 * returned. If the lookup determines that the name does not exist
173 * (negative cacheing), a status of ENOENT is returned. If the lookup
174 * fails, a status of zero is returned.
175 */
176
177int
178cache_lookup(dvp, vpp, cnp)
179	struct vnode *dvp;
180	struct vnode **vpp;
181	struct componentname *cnp;
182{
183	struct namecache *ncp;
184	u_int32_t hash;
185
186	if (!doingcache) {
187		cnp->cn_flags &= ~MAKEENTRY;
188		return (0);
189	}
190
191	numcalls++;
192
193	if (cnp->cn_nameptr[0] == '.') {
194		if (cnp->cn_namelen == 1) {
195			*vpp = dvp;
196			dothits++;
197			return (-1);
198		}
199		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
200			dotdothits++;
201			if (dvp->v_dd->v_id != dvp->v_ddid ||
202			    (cnp->cn_flags & MAKEENTRY) == 0) {
203				dvp->v_ddid = 0;
204				return (0);
205			}
206			*vpp = dvp->v_dd;
207			return (-1);
208		}
209	}
210
211	hash = fnv32_hashbuf(cnp->cn_nameptr, cnp->cn_namelen);
212	LIST_FOREACH(ncp, (NCHHASH(dvp, hash)), nc_hash) {
213		numchecks++;
214		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
215		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
216			break;
217	}
218
219	/* We failed to find an entry */
220	if (ncp == 0) {
221		if ((cnp->cn_flags & MAKEENTRY) == 0) {
222			nummisszap++;
223		} else {
224			nummiss++;
225		}
226		nchstats.ncs_miss++;
227		return (0);
228	}
229
230	/* We don't want to have an entry, so dump it */
231	if ((cnp->cn_flags & MAKEENTRY) == 0) {
232		numposzaps++;
233		nchstats.ncs_badhits++;
234		cache_zap(ncp);
235		return (0);
236	}
237
238	/* We found a "positive" match, return the vnode */
239        if (ncp->nc_vp) {
240		numposhits++;
241		nchstats.ncs_goodhits++;
242		*vpp = ncp->nc_vp;
243		return (-1);
244	}
245
246	/* We found a negative match, and want to create it, so purge */
247	if (cnp->cn_nameiop == CREATE) {
248		numnegzaps++;
249		nchstats.ncs_badhits++;
250		cache_zap(ncp);
251		return (0);
252	}
253
254	numneghits++;
255	/*
256	 * We found a "negative" match, ENOENT notifies client of this match.
257	 * The nc_vpid field records whether this is a whiteout.
258	 */
259	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
260	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
261	nchstats.ncs_neghits++;
262	if (ncp->nc_flag & NCF_WHITE)
263		cnp->cn_flags |= ISWHITEOUT;
264	return (ENOENT);
265}
266
267/*
268 * Add an entry to the cache.
269 */
270void
271cache_enter(dvp, vp, cnp)
272	struct vnode *dvp;
273	struct vnode *vp;
274	struct componentname *cnp;
275{
276	struct namecache *ncp;
277	struct nchashhead *ncpp;
278	u_int32_t hash;
279	int len;
280
281	if (!doingcache)
282		return;
283
284	if (cnp->cn_nameptr[0] == '.') {
285		if (cnp->cn_namelen == 1) {
286			return;
287		}
288		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
289			if (vp) {
290				dvp->v_dd = vp;
291				dvp->v_ddid = vp->v_id;
292			} else {
293				dvp->v_dd = dvp;
294				dvp->v_ddid = 0;
295			}
296			return;
297		}
298	}
299
300	ncp = (struct namecache *)
301		malloc(sizeof *ncp + cnp->cn_namelen, M_VFSCACHE, M_WAITOK);
302	bzero((char *)ncp, sizeof *ncp);
303	numcache++;
304	if (!vp) {
305		numneg++;
306		ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
307	} else if (vp->v_type == VDIR) {
308		vp->v_dd = dvp;
309		vp->v_ddid = dvp->v_id;
310	}
311
312	/*
313	 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
314	 * For negative entries, we have to record whether it is a whiteout.
315	 * the whiteout flag is stored in the nc_vpid field which is
316	 * otherwise unused.
317	 */
318	ncp->nc_vp = vp;
319	ncp->nc_dvp = dvp;
320	len = ncp->nc_nlen = cnp->cn_namelen;
321	hash = fnv32_hashbuf(cnp->cn_nameptr, len);
322	bcopy(cnp->cn_nameptr, ncp->nc_name, len);
323	ncpp = NCHHASH(dvp, hash);
324	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
325	if (LIST_EMPTY(&dvp->v_cache_src))
326		vhold(dvp);
327	LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
328	if (vp) {
329		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
330	} else {
331		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
332	}
333	if (numneg * ncnegfactor > numcache) {
334		ncp = TAILQ_FIRST(&ncneg);
335		cache_zap(ncp);
336	}
337}
338
339/*
340 * Name cache initialization, from vfs_init() when we are booting
341 */
342static void
343nchinit(void *dummy __unused)
344{
345
346	TAILQ_INIT(&ncneg);
347	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
348}
349SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL)
350
351
352/*
353 * Invalidate all entries to a particular vnode.
354 *
355 * Remove all entries in the namecache relating to this vnode and
356 * change the v_id.  We take the v_id from a global counter, since
357 * it becomes a handy sequence number in crash-dumps that way.
358 * No valid vnode will ever have (v_id == 0).
359 *
360 * XXX: Only time and the size of v_id prevents this from failing:
361 * XXX: In theory we should hunt down all (struct vnode*, v_id)
362 * XXX: soft references and nuke them, at least on the global
363 * XXX: v_id wraparound.  The period of resistance can be extended
364 * XXX: by incrementing each vnodes v_id individually instead of
365 * XXX: using the global v_id.
366 */
367
368void
369cache_purge(vp)
370	struct vnode *vp;
371{
372	static u_long nextid;
373
374	while (!LIST_EMPTY(&vp->v_cache_src))
375		cache_zap(LIST_FIRST(&vp->v_cache_src));
376	while (!TAILQ_EMPTY(&vp->v_cache_dst))
377		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
378
379	do
380		nextid++;
381	while (nextid == vp->v_id || !nextid);
382	vp->v_id = nextid;
383	vp->v_dd = vp;
384	vp->v_ddid = 0;
385}
386
387/*
388 * Flush all entries referencing a particular filesystem.
389 *
390 * Since we need to check it anyway, we will flush all the invalid
391 * entries at the same time.
392 */
393void
394cache_purgevfs(mp)
395	struct mount *mp;
396{
397	struct nchashhead *ncpp;
398	struct namecache *ncp, *nnp;
399
400	/* Scan hash tables for applicable entries */
401	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
402		for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
403			nnp = LIST_NEXT(ncp, nc_hash);
404			if (ncp->nc_dvp->v_mount == mp) {
405				cache_zap(ncp);
406			}
407		}
408	}
409}
410
411/*
412 * Perform canonical checks and cache lookup and pass on to filesystem
413 * through the vop_cachedlookup only if needed.
414 */
415
416int
417vfs_cache_lookup(ap)
418	struct vop_lookup_args /* {
419		struct vnode *a_dvp;
420		struct vnode **a_vpp;
421		struct componentname *a_cnp;
422	} */ *ap;
423{
424	struct vnode *dvp, *vp;
425	int lockparent;
426	int error;
427	struct vnode **vpp = ap->a_vpp;
428	struct componentname *cnp = ap->a_cnp;
429	struct ucred *cred = cnp->cn_cred;
430	int flags = cnp->cn_flags;
431	struct proc *p = cnp->cn_proc;
432	u_long vpid;	/* capability number of vnode */
433
434	*vpp = NULL;
435	dvp = ap->a_dvp;
436	lockparent = flags & LOCKPARENT;
437
438	if (dvp->v_type != VDIR)
439                return (ENOTDIR);
440
441	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
442	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
443		return (EROFS);
444
445	error = VOP_ACCESS(dvp, VEXEC, cred, p);
446
447	if (error)
448		return (error);
449
450	error = cache_lookup(dvp, vpp, cnp);
451
452	if (!error)
453		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
454
455	if (error == ENOENT)
456		return (error);
457
458	vp = *vpp;
459	vpid = vp->v_id;
460	cnp->cn_flags &= ~PDIRUNLOCK;
461	if (dvp == vp) {   /* lookup on "." */
462		VREF(vp);
463		error = 0;
464	} else if (flags & ISDOTDOT) {
465		VOP_UNLOCK(dvp, 0, p);
466		cnp->cn_flags |= PDIRUNLOCK;
467		error = vget(vp, LK_EXCLUSIVE, p);
468		if (!error && lockparent && (flags & ISLASTCN)) {
469			if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) == 0)
470				cnp->cn_flags &= ~PDIRUNLOCK;
471		}
472	} else {
473		error = vget(vp, LK_EXCLUSIVE, p);
474		if (!lockparent || error || !(flags & ISLASTCN)) {
475			VOP_UNLOCK(dvp, 0, p);
476			cnp->cn_flags |= PDIRUNLOCK;
477		}
478	}
479	/*
480	 * Check that the capability number did not change
481	 * while we were waiting for the lock.
482	 */
483	if (!error) {
484		if (vpid == vp->v_id)
485			return (0);
486		vput(vp);
487		if (lockparent && dvp != vp && (flags & ISLASTCN)) {
488			VOP_UNLOCK(dvp, 0, p);
489			cnp->cn_flags |= PDIRUNLOCK;
490		}
491	}
492	if (cnp->cn_flags & PDIRUNLOCK) {
493		error = vn_lock(dvp, LK_EXCLUSIVE, p);
494		if (error)
495			return (error);
496		cnp->cn_flags &= ~PDIRUNLOCK;
497	}
498	return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
499}
500
501
502#ifndef _SYS_SYSPROTO_H_
503struct  __getcwd_args {
504	u_char	*buf;
505	u_int	buflen;
506};
507#endif
508
509static int disablecwd;
510SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
511
512static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
513static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
514static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
515static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
516static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
517static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
518int
519__getcwd(p, uap)
520	struct proc *p;
521	struct __getcwd_args *uap;
522{
523	char *bp, *buf;
524	int error, i, slash_prefixed;
525	struct filedesc *fdp;
526	struct namecache *ncp;
527	struct vnode *vp;
528
529	numcwdcalls++;
530	if (disablecwd)
531		return (ENODEV);
532	if (uap->buflen < 2)
533		return (EINVAL);
534	if (uap->buflen > MAXPATHLEN)
535		uap->buflen = MAXPATHLEN;
536	buf = bp = malloc(uap->buflen, M_TEMP, M_WAITOK);
537	bp += uap->buflen - 1;
538	*bp = '\0';
539	fdp = p->p_fd;
540	slash_prefixed = 0;
541	for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) {
542		if (vp->v_flag & VROOT) {
543			if (vp->v_mount == NULL)	/* forced unmount */
544				return (EBADF);
545			vp = vp->v_mount->mnt_vnodecovered;
546			continue;
547		}
548		if (vp->v_dd->v_id != vp->v_ddid) {
549			numcwdfail1++;
550			free(buf, M_TEMP);
551			return (ENOTDIR);
552		}
553		ncp = TAILQ_FIRST(&vp->v_cache_dst);
554		if (!ncp) {
555			numcwdfail2++;
556			free(buf, M_TEMP);
557			return (ENOENT);
558		}
559		if (ncp->nc_dvp != vp->v_dd) {
560			numcwdfail3++;
561			free(buf, M_TEMP);
562			return (EBADF);
563		}
564		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
565			if (bp == buf) {
566				numcwdfail4++;
567				free(buf, M_TEMP);
568				return (ENOMEM);
569			}
570			*--bp = ncp->nc_name[i];
571		}
572		if (bp == buf) {
573			numcwdfail4++;
574			free(buf, M_TEMP);
575			return (ENOMEM);
576		}
577		*--bp = '/';
578		slash_prefixed = 1;
579		vp = vp->v_dd;
580	}
581	if (!slash_prefixed) {
582		if (bp == buf) {
583			numcwdfail4++;
584			free(buf, M_TEMP);
585			return (ENOMEM);
586		}
587		*--bp = '/';
588	}
589	numcwdfound++;
590	error = copyout(bp, uap->buf, strlen(bp) + 1);
591	free(buf, M_TEMP);
592	return (error);
593}
594
595/*
596 * Thus begins the fullpath magic.
597 */
598
599#undef STATNODE
600#define STATNODE(name)							\
601	static u_int name;						\
602	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
603
604static int disablefullpath;
605SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
606    &disablefullpath, 0, "");
607
608STATNODE(numfullpathcalls);
609STATNODE(numfullpathfail1);
610STATNODE(numfullpathfail2);
611STATNODE(numfullpathfail3);
612STATNODE(numfullpathfail4);
613STATNODE(numfullpathfound);
614
615int
616textvp_fullpath(struct proc *p, char **retbuf, char **retfreebuf) {
617	char *bp, *buf;
618	int i, slash_prefixed;
619	struct filedesc *fdp;
620	struct namecache *ncp;
621	struct vnode *vp, *textvp;
622
623	numfullpathcalls++;
624	if (disablefullpath)
625		return (ENODEV);
626	textvp = p->p_textvp;
627	if (textvp == NULL)
628		return (EINVAL);
629	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
630	bp = buf + MAXPATHLEN - 1;
631	*bp = '\0';
632	fdp = p->p_fd;
633	slash_prefixed = 0;
634	for (vp = textvp; vp != fdp->fd_rdir && vp != rootvnode;) {
635		if (vp->v_flag & VROOT) {
636			if (vp->v_mount == NULL) {	/* forced unmount */
637				free(buf, M_TEMP);
638				return (EBADF);
639			}
640			vp = vp->v_mount->mnt_vnodecovered;
641			continue;
642		}
643		if (vp != textvp && vp->v_dd->v_id != vp->v_ddid) {
644			numfullpathfail1++;
645			free(buf, M_TEMP);
646			return (ENOTDIR);
647		}
648		ncp = TAILQ_FIRST(&vp->v_cache_dst);
649		if (!ncp) {
650			numfullpathfail2++;
651			free(buf, M_TEMP);
652			return (ENOENT);
653		}
654		if (vp != textvp && ncp->nc_dvp != vp->v_dd) {
655			numfullpathfail3++;
656			free(buf, M_TEMP);
657			return (EBADF);
658		}
659		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
660			if (bp == buf) {
661				numfullpathfail4++;
662				free(buf, M_TEMP);
663				return (ENOMEM);
664			}
665			*--bp = ncp->nc_name[i];
666		}
667		if (bp == buf) {
668			numfullpathfail4++;
669			free(buf, M_TEMP);
670			return (ENOMEM);
671		}
672		*--bp = '/';
673		slash_prefixed = 1;
674		vp = ncp->nc_dvp;
675	}
676	if (!slash_prefixed) {
677		if (bp == buf) {
678			numfullpathfail4++;
679			free(buf, M_TEMP);
680			return (ENOMEM);
681		}
682		*--bp = '/';
683	}
684	numfullpathfound++;
685	*retbuf = bp;
686	*retfreebuf = buf;
687	return (0);
688}
689