vfs_cache.c revision 170000
1/*- 2 * Copyright (c) 1989, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Poul-Henning Kamp of the FreeBSD Project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/kern/vfs_cache.c 170000 2007-05-25 22:23:38Z pjd $"); 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/lock.h> 42#include <sys/mutex.h> 43#include <sys/sysctl.h> 44#include <sys/mount.h> 45#include <sys/vnode.h> 46#include <sys/namei.h> 47#include <sys/malloc.h> 48#include <sys/syscallsubr.h> 49#include <sys/sysproto.h> 50#include <sys/proc.h> 51#include <sys/filedesc.h> 52#include <sys/fnv_hash.h> 53 54#include <vm/uma.h> 55 56/* 57 * This structure describes the elements in the cache of recent 58 * names looked up by namei. 59 */ 60 61struct namecache { 62 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 63 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 64 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 65 struct vnode *nc_dvp; /* vnode of parent of name */ 66 struct vnode *nc_vp; /* vnode the name refers to */ 67 u_char nc_flag; /* flag bits */ 68 u_char nc_nlen; /* length of name */ 69 char nc_name[0]; /* segment name */ 70}; 71 72/* 73 * Name caching works as follows: 74 * 75 * Names found by directory scans are retained in a cache 76 * for future reference. It is managed LRU, so frequently 77 * used names will hang around. Cache is indexed by hash value 78 * obtained from (vp, name) where vp refers to the directory 79 * containing name. 80 * 81 * If it is a "negative" entry, (i.e. for a name that is known NOT to 82 * exist) the vnode pointer will be NULL. 83 * 84 * Upon reaching the last segment of a path, if the reference 85 * is for DELETE, or NOCACHE is set (rewrite), and the 86 * name is located in the cache, it will be dropped. 87 */ 88 89/* 90 * Structures associated with name cacheing. 91 */ 92#define NCHHASH(hash) \ 93 (&nchashtbl[(hash) & nchash]) 94static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 95static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */ 96static u_long nchash; /* size of hash table */ 97SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 98static u_long ncnegfactor = 16; /* ratio of negative entries */ 99SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 100static u_long numneg; /* number of cache entries allocated */ 101SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 102static u_long numcache; /* number of cache entries allocated */ 103SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 104static u_long numcachehv; /* number of cache entries with vnodes held */ 105SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, ""); 106#if 0 107static u_long numcachepl; /* number of cache purge for leaf entries */ 108SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, ""); 109#endif 110struct nchstats nchstats; /* cache effectiveness statistics */ 111 112static struct mtx cache_lock; 113MTX_SYSINIT(vfscache, &cache_lock, "Name Cache", MTX_DEF); 114 115#define CACHE_LOCK() mtx_lock(&cache_lock) 116#define CACHE_UNLOCK() mtx_unlock(&cache_lock) 117 118/* 119 * UMA zones for the VFS cache. 120 * 121 * The small cache is used for entries with short names, which are the 122 * most common. The large cache is used for entries which are too big to 123 * fit in the small cache. 124 */ 125static uma_zone_t cache_zone_small; 126static uma_zone_t cache_zone_large; 127 128#define CACHE_PATH_CUTOFF 32 129#define CACHE_ZONE_SMALL (sizeof(struct namecache) + CACHE_PATH_CUTOFF) 130#define CACHE_ZONE_LARGE (sizeof(struct namecache) + NAME_MAX) 131 132#define cache_alloc(len) uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \ 133 cache_zone_small : cache_zone_large, M_WAITOK) 134#define cache_free(ncp) do { \ 135 if (ncp != NULL) \ 136 uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \ 137 cache_zone_small : cache_zone_large, (ncp)); \ 138} while (0) 139 140static int doingcache = 1; /* 1 => enable the cache */ 141SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, ""); 142 143/* Export size information to userland */ 144SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0, 145 sizeof(struct namecache), ""); 146 147/* 148 * The new name cache statistics 149 */ 150static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 151#define STATNODE(mode, name, var) \ 152 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 153STATNODE(CTLFLAG_RD, numneg, &numneg); 154STATNODE(CTLFLAG_RD, numcache, &numcache); 155static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 156static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 157static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 158static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 159static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 160static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 161static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 162static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 163static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 164static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 165 166SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats, 167 sizeof(nchstats), "LU", "VFS cache effectiveness statistics"); 168 169 170 171static void cache_zap(struct namecache *ncp); 172static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 173 char *buf, char **retbuf, u_int buflen); 174 175static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 176 177/* 178 * Flags in namecache.nc_flag 179 */ 180#define NCF_WHITE 1 181 182/* 183 * Grab an atomic snapshot of the name cache hash chain lengths 184 */ 185SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats"); 186 187static int 188sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 189{ 190 int error; 191 struct nchashhead *ncpp; 192 struct namecache *ncp; 193 int n_nchash; 194 int count; 195 196 n_nchash = nchash + 1; /* nchash is max index, not count */ 197 if (!req->oldptr) 198 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 199 200 /* Scan hash tables for applicable entries */ 201 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 202 count = 0; 203 LIST_FOREACH(ncp, ncpp, nc_hash) { 204 count++; 205 } 206 error = SYSCTL_OUT(req, &count, sizeof(count)); 207 if (error) 208 return (error); 209 } 210 return (0); 211} 212SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD, 213 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", "nchash chain lengths"); 214 215static int 216sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 217{ 218 int error; 219 struct nchashhead *ncpp; 220 struct namecache *ncp; 221 int n_nchash; 222 int count, maxlength, used, pct; 223 224 if (!req->oldptr) 225 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 226 227 n_nchash = nchash + 1; /* nchash is max index, not count */ 228 used = 0; 229 maxlength = 0; 230 231 /* Scan hash tables for applicable entries */ 232 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 233 count = 0; 234 LIST_FOREACH(ncp, ncpp, nc_hash) { 235 count++; 236 } 237 if (count) 238 used++; 239 if (maxlength < count) 240 maxlength = count; 241 } 242 n_nchash = nchash + 1; 243 pct = (used * 100 * 100) / n_nchash; 244 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 245 if (error) 246 return (error); 247 error = SYSCTL_OUT(req, &used, sizeof(used)); 248 if (error) 249 return (error); 250 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 251 if (error) 252 return (error); 253 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 254 if (error) 255 return (error); 256 return (0); 257} 258SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD, 259 0, 0, sysctl_debug_hashstat_nchash, "I", "nchash chain lengths"); 260 261/* 262 * cache_zap(): 263 * 264 * Removes a namecache entry from cache, whether it contains an actual 265 * pointer to a vnode or if it is just a negative cache entry. 266 */ 267static void 268cache_zap(ncp) 269 struct namecache *ncp; 270{ 271 struct vnode *vp; 272 273 mtx_assert(&cache_lock, MA_OWNED); 274 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp); 275 vp = NULL; 276 LIST_REMOVE(ncp, nc_hash); 277 LIST_REMOVE(ncp, nc_src); 278 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 279 vp = ncp->nc_dvp; 280 numcachehv--; 281 } 282 if (ncp->nc_vp) { 283 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 284 ncp->nc_vp->v_dd = NULL; 285 } else { 286 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 287 numneg--; 288 } 289 numcache--; 290 cache_free(ncp); 291 if (vp) 292 vdrop(vp); 293} 294 295/* 296 * Lookup an entry in the cache 297 * 298 * Lookup is called with dvp pointing to the directory to search, 299 * cnp pointing to the name of the entry being sought. If the lookup 300 * succeeds, the vnode is returned in *vpp, and a status of -1 is 301 * returned. If the lookup determines that the name does not exist 302 * (negative cacheing), a status of ENOENT is returned. If the lookup 303 * fails, a status of zero is returned. 304 * 305 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is 306 * unlocked. If we're looking up . an extra ref is taken, but the lock is 307 * not recursively acquired. 308 */ 309 310int 311cache_lookup(dvp, vpp, cnp) 312 struct vnode *dvp; 313 struct vnode **vpp; 314 struct componentname *cnp; 315{ 316 struct namecache *ncp; 317 u_int32_t hash; 318 int error, ltype; 319 320 if (!doingcache) { 321 cnp->cn_flags &= ~MAKEENTRY; 322 return (0); 323 } 324retry: 325 CACHE_LOCK(); 326 numcalls++; 327 328 if (cnp->cn_nameptr[0] == '.') { 329 if (cnp->cn_namelen == 1) { 330 *vpp = dvp; 331 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 332 dvp, cnp->cn_nameptr); 333 dothits++; 334 goto success; 335 } 336 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 337 dotdothits++; 338 if (dvp->v_dd == NULL || 339 (cnp->cn_flags & MAKEENTRY) == 0) { 340 CACHE_UNLOCK(); 341 return (0); 342 } 343 *vpp = dvp->v_dd; 344 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 345 dvp, cnp->cn_nameptr, *vpp); 346 goto success; 347 } 348 } 349 350 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT); 351 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 352 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 353 numchecks++; 354 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 355 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 356 break; 357 } 358 359 /* We failed to find an entry */ 360 if (ncp == 0) { 361 if ((cnp->cn_flags & MAKEENTRY) == 0) { 362 nummisszap++; 363 } else { 364 nummiss++; 365 } 366 nchstats.ncs_miss++; 367 CACHE_UNLOCK(); 368 return (0); 369 } 370 371 /* We don't want to have an entry, so dump it */ 372 if ((cnp->cn_flags & MAKEENTRY) == 0) { 373 numposzaps++; 374 nchstats.ncs_badhits++; 375 cache_zap(ncp); 376 CACHE_UNLOCK(); 377 return (0); 378 } 379 380 /* We found a "positive" match, return the vnode */ 381 if (ncp->nc_vp) { 382 numposhits++; 383 nchstats.ncs_goodhits++; 384 *vpp = ncp->nc_vp; 385 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 386 dvp, cnp->cn_nameptr, *vpp, ncp); 387 goto success; 388 } 389 390 /* We found a negative match, and want to create it, so purge */ 391 if (cnp->cn_nameiop == CREATE) { 392 numnegzaps++; 393 nchstats.ncs_badhits++; 394 cache_zap(ncp); 395 CACHE_UNLOCK(); 396 return (0); 397 } 398 399 numneghits++; 400 /* 401 * We found a "negative" match, so we shift it to the end of 402 * the "negative" cache entries queue to satisfy LRU. Also, 403 * check to see if the entry is a whiteout; indicate this to 404 * the componentname, if so. 405 */ 406 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 407 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 408 nchstats.ncs_neghits++; 409 if (ncp->nc_flag & NCF_WHITE) 410 cnp->cn_flags |= ISWHITEOUT; 411 CACHE_UNLOCK(); 412 return (ENOENT); 413 414success: 415 /* 416 * On success we return a locked and ref'd vnode as per the lookup 417 * protocol. 418 */ 419 if (dvp == *vpp) { /* lookup on "." */ 420 VREF(*vpp); 421 CACHE_UNLOCK(); 422 return (-1); 423 } 424 ltype = 0; /* silence gcc warning */ 425 if (cnp->cn_flags & ISDOTDOT) { 426 ltype = VOP_ISLOCKED(dvp, cnp->cn_thread); 427 VOP_UNLOCK(dvp, 0, cnp->cn_thread); 428 } 429 VI_LOCK(*vpp); 430 CACHE_UNLOCK(); 431 error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread); 432 if (cnp->cn_flags & ISDOTDOT) 433 vn_lock(dvp, ltype | LK_RETRY, cnp->cn_thread); 434 if (error) { 435 *vpp = NULL; 436 goto retry; 437 } 438 return (-1); 439} 440 441/* 442 * Add an entry to the cache. 443 */ 444void 445cache_enter(dvp, vp, cnp) 446 struct vnode *dvp; 447 struct vnode *vp; 448 struct componentname *cnp; 449{ 450 struct namecache *ncp; 451 struct nchashhead *ncpp; 452 u_int32_t hash; 453 int hold; 454 int zap; 455 int len; 456 457 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 458 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, 459 ("cahe_enter: Adding a doomed vnode")); 460 461 if (!doingcache) 462 return; 463 464 if (cnp->cn_nameptr[0] == '.') { 465 if (cnp->cn_namelen == 1) { 466 return; 467 } 468 /* 469 * For dotdot lookups only cache the v_dd pointer if the 470 * directory has a link back to its parent via v_cache_dst. 471 * Without this an unlinked directory would keep a soft 472 * reference to its parent which could not be NULLd at 473 * cache_purge() time. 474 */ 475 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 476 CACHE_LOCK(); 477 if (!TAILQ_EMPTY(&dvp->v_cache_dst)) 478 dvp->v_dd = vp; 479 CACHE_UNLOCK(); 480 return; 481 } 482 } 483 484 hold = 0; 485 zap = 0; 486 ncp = cache_alloc(cnp->cn_namelen); 487 CACHE_LOCK(); 488 numcache++; 489 if (!vp) { 490 numneg++; 491 ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0; 492 } else if (vp->v_type == VDIR) { 493 vp->v_dd = dvp; 494 } else { 495 vp->v_dd = NULL; 496 } 497 498 /* 499 * Set the rest of the namecache entry elements, calculate it's 500 * hash key and insert it into the appropriate chain within 501 * the cache entries table. 502 */ 503 ncp->nc_vp = vp; 504 ncp->nc_dvp = dvp; 505 len = ncp->nc_nlen = cnp->cn_namelen; 506 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT); 507 bcopy(cnp->cn_nameptr, ncp->nc_name, len); 508 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 509 ncpp = NCHHASH(hash); 510 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 511 if (LIST_EMPTY(&dvp->v_cache_src)) { 512 hold = 1; 513 numcachehv++; 514 } 515 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 516 /* 517 * If the entry is "negative", we place it into the 518 * "negative" cache queue, otherwise, we place it into the 519 * destination vnode's cache entries queue. 520 */ 521 if (vp) { 522 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 523 } else { 524 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 525 } 526 if (numneg * ncnegfactor > numcache) { 527 ncp = TAILQ_FIRST(&ncneg); 528 zap = 1; 529 } 530 if (hold) 531 vhold(dvp); 532 if (zap) 533 cache_zap(ncp); 534 CACHE_UNLOCK(); 535} 536 537/* 538 * Name cache initialization, from vfs_init() when we are booting 539 */ 540static void 541nchinit(void *dummy __unused) 542{ 543 544 TAILQ_INIT(&ncneg); 545 546 cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL, 547 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 548 cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL, 549 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 550 551 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 552} 553SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL) 554 555 556/* 557 * Invalidate all entries to a particular vnode. 558 */ 559void 560cache_purge(vp) 561 struct vnode *vp; 562{ 563 564 CTR1(KTR_VFS, "cache_purge(%p)", vp); 565 CACHE_LOCK(); 566 while (!LIST_EMPTY(&vp->v_cache_src)) 567 cache_zap(LIST_FIRST(&vp->v_cache_src)); 568 while (!TAILQ_EMPTY(&vp->v_cache_dst)) 569 cache_zap(TAILQ_FIRST(&vp->v_cache_dst)); 570 vp->v_dd = NULL; 571 CACHE_UNLOCK(); 572} 573 574/* 575 * Flush all entries referencing a particular filesystem. 576 * 577 * Since we need to check it anyway, we will flush all the invalid 578 * entries at the same time. 579 */ 580void 581cache_purgevfs(mp) 582 struct mount *mp; 583{ 584 struct nchashhead *ncpp; 585 struct namecache *ncp, *nnp; 586 587 /* Scan hash tables for applicable entries */ 588 CACHE_LOCK(); 589 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { 590 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) { 591 if (ncp->nc_dvp->v_mount == mp) 592 cache_zap(ncp); 593 } 594 } 595 CACHE_UNLOCK(); 596} 597 598/* 599 * Perform canonical checks and cache lookup and pass on to filesystem 600 * through the vop_cachedlookup only if needed. 601 */ 602 603int 604vfs_cache_lookup(ap) 605 struct vop_lookup_args /* { 606 struct vnode *a_dvp; 607 struct vnode **a_vpp; 608 struct componentname *a_cnp; 609 } */ *ap; 610{ 611 struct vnode *dvp; 612 int error; 613 struct vnode **vpp = ap->a_vpp; 614 struct componentname *cnp = ap->a_cnp; 615 struct ucred *cred = cnp->cn_cred; 616 int flags = cnp->cn_flags; 617 struct thread *td = cnp->cn_thread; 618 619 *vpp = NULL; 620 dvp = ap->a_dvp; 621 622 if (dvp->v_type != VDIR) 623 return (ENOTDIR); 624 625 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 626 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 627 return (EROFS); 628 629 error = VOP_ACCESS(dvp, VEXEC, cred, td); 630 if (error) 631 return (error); 632 633 error = cache_lookup(dvp, vpp, cnp); 634 if (error == 0) 635 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 636 if (error == ENOENT) 637 return (error); 638 return (0); 639} 640 641 642#ifndef _SYS_SYSPROTO_H_ 643struct __getcwd_args { 644 u_char *buf; 645 u_int buflen; 646}; 647#endif 648 649/* 650 * XXX All of these sysctls would probably be more productive dead. 651 */ 652static int disablecwd; 653SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 654 "Disable the getcwd syscall"); 655 656/* Implementation of the getcwd syscall. */ 657int 658__getcwd(td, uap) 659 struct thread *td; 660 struct __getcwd_args *uap; 661{ 662 663 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen)); 664} 665 666int 667kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen) 668{ 669 char *bp, *tmpbuf; 670 struct filedesc *fdp; 671 int error; 672 673 if (disablecwd) 674 return (ENODEV); 675 if (buflen < 2) 676 return (EINVAL); 677 if (buflen > MAXPATHLEN) 678 buflen = MAXPATHLEN; 679 680 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 681 fdp = td->td_proc->p_fd; 682 mtx_lock(&Giant); 683 FILEDESC_SLOCK(fdp); 684 error = vn_fullpath1(td, fdp->fd_cdir, fdp->fd_rdir, tmpbuf, 685 &bp, buflen); 686 FILEDESC_SUNLOCK(fdp); 687 mtx_unlock(&Giant); 688 689 if (!error) { 690 if (bufseg == UIO_SYSSPACE) 691 bcopy(bp, buf, strlen(bp) + 1); 692 else 693 error = copyout(bp, buf, strlen(bp) + 1); 694 } 695 free(tmpbuf, M_TEMP); 696 return (error); 697} 698 699/* 700 * Thus begins the fullpath magic. 701 */ 702 703#undef STATNODE 704#define STATNODE(name) \ 705 static u_int name; \ 706 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 707 708static int disablefullpath; 709SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 710 "Disable the vn_fullpath function"); 711 712/* These count for kern___getcwd(), too. */ 713STATNODE(numfullpathcalls); 714STATNODE(numfullpathfail1); 715STATNODE(numfullpathfail2); 716STATNODE(numfullpathfail4); 717STATNODE(numfullpathfound); 718 719/* 720 * Retrieve the full filesystem path that correspond to a vnode from the name 721 * cache (if available) 722 */ 723int 724vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 725{ 726 char *buf; 727 struct filedesc *fdp; 728 int error; 729 730 if (disablefullpath) 731 return (ENODEV); 732 if (vn == NULL) 733 return (EINVAL); 734 735 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 736 fdp = td->td_proc->p_fd; 737 FILEDESC_SLOCK(fdp); 738 error = vn_fullpath1(td, vn, fdp->fd_rdir, buf, retbuf, MAXPATHLEN); 739 FILEDESC_SUNLOCK(fdp); 740 741 if (!error) 742 *freebuf = buf; 743 else 744 free(buf, M_TEMP); 745 return (error); 746} 747 748/* 749 * The magic behind kern___getcwd() and vn_fullpath(). 750 */ 751static int 752vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 753 char *buf, char **retbuf, u_int buflen) 754{ 755 char *bp; 756 int error, i, slash_prefixed; 757 struct namecache *ncp; 758 759 bp = buf + buflen - 1; 760 *bp = '\0'; 761 error = 0; 762 slash_prefixed = 0; 763 764 CACHE_LOCK(); 765 numfullpathcalls++; 766 if (vp->v_type != VDIR) { 767 ncp = TAILQ_FIRST(&vp->v_cache_dst); 768 if (!ncp) { 769 numfullpathfail2++; 770 CACHE_UNLOCK(); 771 return (ENOENT); 772 } 773 for (i = ncp->nc_nlen - 1; i >= 0 && bp > buf; i--) 774 *--bp = ncp->nc_name[i]; 775 if (bp == buf) { 776 numfullpathfail4++; 777 CACHE_UNLOCK(); 778 return (ENOMEM); 779 } 780 *--bp = '/'; 781 slash_prefixed = 1; 782 vp = ncp->nc_dvp; 783 } 784 while (vp != rdir && vp != rootvnode) { 785 if (vp->v_vflag & VV_ROOT) { 786 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */ 787 error = EBADF; 788 break; 789 } 790 vp = vp->v_mount->mnt_vnodecovered; 791 continue; 792 } 793 if (vp->v_dd == NULL) { 794 numfullpathfail1++; 795 error = ENOTDIR; 796 break; 797 } 798 ncp = TAILQ_FIRST(&vp->v_cache_dst); 799 if (!ncp) { 800 numfullpathfail2++; 801 error = ENOENT; 802 break; 803 } 804 MPASS(ncp->nc_dvp == vp->v_dd); 805 for (i = ncp->nc_nlen - 1; i >= 0 && bp != buf; i--) 806 *--bp = ncp->nc_name[i]; 807 if (bp == buf) { 808 numfullpathfail4++; 809 error = ENOMEM; 810 break; 811 } 812 *--bp = '/'; 813 slash_prefixed = 1; 814 vp = ncp->nc_dvp; 815 } 816 if (error) { 817 CACHE_UNLOCK(); 818 return (error); 819 } 820 if (!slash_prefixed) { 821 if (bp == buf) { 822 numfullpathfail4++; 823 CACHE_UNLOCK(); 824 return (ENOMEM); 825 } else { 826 *--bp = '/'; 827 } 828 } 829 numfullpathfound++; 830 CACHE_UNLOCK(); 831 832 *retbuf = bp; 833 return (0); 834} 835