vfs_cache.c revision 187658
1/*- 2 * Copyright (c) 1989, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Poul-Henning Kamp of the FreeBSD Project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/kern/vfs_cache.c 187658 2009-01-23 22:49:23Z jhb $"); 37 38#include <sys/param.h> 39#include <sys/filedesc.h> 40#include <sys/fnv_hash.h> 41#include <sys/kernel.h> 42#include <sys/lock.h> 43#include <sys/malloc.h> 44#include <sys/mount.h> 45#include <sys/mutex.h> 46#include <sys/namei.h> 47#include <sys/proc.h> 48#include <sys/syscallsubr.h> 49#include <sys/sysctl.h> 50#include <sys/sysproto.h> 51#include <sys/systm.h> 52#include <sys/vnode.h> 53 54#include <vm/uma.h> 55 56/* 57 * This structure describes the elements in the cache of recent 58 * names looked up by namei. 59 */ 60 61struct namecache { 62 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 63 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 64 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 65 struct vnode *nc_dvp; /* vnode of parent of name */ 66 struct vnode *nc_vp; /* vnode the name refers to */ 67 u_char nc_flag; /* flag bits */ 68 u_char nc_nlen; /* length of name */ 69 char nc_name[0]; /* segment name */ 70}; 71 72/* 73 * Name caching works as follows: 74 * 75 * Names found by directory scans are retained in a cache 76 * for future reference. It is managed LRU, so frequently 77 * used names will hang around. Cache is indexed by hash value 78 * obtained from (vp, name) where vp refers to the directory 79 * containing name. 80 * 81 * If it is a "negative" entry, (i.e. for a name that is known NOT to 82 * exist) the vnode pointer will be NULL. 83 * 84 * Upon reaching the last segment of a path, if the reference 85 * is for DELETE, or NOCACHE is set (rewrite), and the 86 * name is located in the cache, it will be dropped. 87 */ 88 89/* 90 * Structures associated with name cacheing. 91 */ 92#define NCHHASH(hash) \ 93 (&nchashtbl[(hash) & nchash]) 94static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 95static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */ 96static u_long nchash; /* size of hash table */ 97SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 98static u_long ncnegfactor = 16; /* ratio of negative entries */ 99SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 100static u_long numneg; /* number of cache entries allocated */ 101SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 102static u_long numcache; /* number of cache entries allocated */ 103SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 104static u_long numcachehv; /* number of cache entries with vnodes held */ 105SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, ""); 106#if 0 107static u_long numcachepl; /* number of cache purge for leaf entries */ 108SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, ""); 109#endif 110struct nchstats nchstats; /* cache effectiveness statistics */ 111 112static struct mtx cache_lock; 113MTX_SYSINIT(vfscache, &cache_lock, "Name Cache", MTX_DEF); 114 115#define CACHE_LOCK() mtx_lock(&cache_lock) 116#define CACHE_UNLOCK() mtx_unlock(&cache_lock) 117 118/* 119 * UMA zones for the VFS cache. 120 * 121 * The small cache is used for entries with short names, which are the 122 * most common. The large cache is used for entries which are too big to 123 * fit in the small cache. 124 */ 125static uma_zone_t cache_zone_small; 126static uma_zone_t cache_zone_large; 127 128#define CACHE_PATH_CUTOFF 32 129#define CACHE_ZONE_SMALL (sizeof(struct namecache) + CACHE_PATH_CUTOFF) 130#define CACHE_ZONE_LARGE (sizeof(struct namecache) + NAME_MAX) 131 132#define cache_alloc(len) uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \ 133 cache_zone_small : cache_zone_large, M_WAITOK) 134#define cache_free(ncp) do { \ 135 if (ncp != NULL) \ 136 uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \ 137 cache_zone_small : cache_zone_large, (ncp)); \ 138} while (0) 139 140static int doingcache = 1; /* 1 => enable the cache */ 141SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, ""); 142 143/* Export size information to userland */ 144SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0, 145 sizeof(struct namecache), ""); 146 147/* 148 * The new name cache statistics 149 */ 150static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 151#define STATNODE(mode, name, var) \ 152 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 153STATNODE(CTLFLAG_RD, numneg, &numneg); 154STATNODE(CTLFLAG_RD, numcache, &numcache); 155static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 156static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 157static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 158static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 159static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 160static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 161static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 162static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 163static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 164static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 165 166SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE, 167 &nchstats, sizeof(nchstats), "LU", "VFS cache effectiveness statistics"); 168 169 170 171static void cache_zap(struct namecache *ncp); 172static int vn_vptocnp(struct vnode **vp, char **bp, char *buf, u_int *buflen); 173static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 174 char *buf, char **retbuf, u_int buflen); 175 176static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 177 178/* 179 * Flags in namecache.nc_flag 180 */ 181#define NCF_WHITE 1 182 183/* 184 * Grab an atomic snapshot of the name cache hash chain lengths 185 */ 186SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats"); 187 188static int 189sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 190{ 191 int error; 192 struct nchashhead *ncpp; 193 struct namecache *ncp; 194 int n_nchash; 195 int count; 196 197 n_nchash = nchash + 1; /* nchash is max index, not count */ 198 if (!req->oldptr) 199 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 200 201 /* Scan hash tables for applicable entries */ 202 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 203 CACHE_LOCK(); 204 count = 0; 205 LIST_FOREACH(ncp, ncpp, nc_hash) { 206 count++; 207 } 208 CACHE_UNLOCK(); 209 error = SYSCTL_OUT(req, &count, sizeof(count)); 210 if (error) 211 return (error); 212 } 213 return (0); 214} 215SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 216 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 217 "nchash chain lengths"); 218 219static int 220sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 221{ 222 int error; 223 struct nchashhead *ncpp; 224 struct namecache *ncp; 225 int n_nchash; 226 int count, maxlength, used, pct; 227 228 if (!req->oldptr) 229 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 230 231 n_nchash = nchash + 1; /* nchash is max index, not count */ 232 used = 0; 233 maxlength = 0; 234 235 /* Scan hash tables for applicable entries */ 236 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 237 count = 0; 238 CACHE_LOCK(); 239 LIST_FOREACH(ncp, ncpp, nc_hash) { 240 count++; 241 } 242 CACHE_UNLOCK(); 243 if (count) 244 used++; 245 if (maxlength < count) 246 maxlength = count; 247 } 248 n_nchash = nchash + 1; 249 pct = (used * 100 * 100) / n_nchash; 250 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 251 if (error) 252 return (error); 253 error = SYSCTL_OUT(req, &used, sizeof(used)); 254 if (error) 255 return (error); 256 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 257 if (error) 258 return (error); 259 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 260 if (error) 261 return (error); 262 return (0); 263} 264SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 265 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 266 "nchash chain lengths"); 267 268/* 269 * cache_zap(): 270 * 271 * Removes a namecache entry from cache, whether it contains an actual 272 * pointer to a vnode or if it is just a negative cache entry. 273 */ 274static void 275cache_zap(ncp) 276 struct namecache *ncp; 277{ 278 struct vnode *vp; 279 280 mtx_assert(&cache_lock, MA_OWNED); 281 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp); 282 vp = NULL; 283 LIST_REMOVE(ncp, nc_hash); 284 LIST_REMOVE(ncp, nc_src); 285 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 286 vp = ncp->nc_dvp; 287 numcachehv--; 288 } 289 if (ncp->nc_vp) { 290 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 291 ncp->nc_vp->v_dd = NULL; 292 } else { 293 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 294 numneg--; 295 } 296 numcache--; 297 cache_free(ncp); 298 if (vp) 299 vdrop(vp); 300} 301 302/* 303 * Lookup an entry in the cache 304 * 305 * Lookup is called with dvp pointing to the directory to search, 306 * cnp pointing to the name of the entry being sought. If the lookup 307 * succeeds, the vnode is returned in *vpp, and a status of -1 is 308 * returned. If the lookup determines that the name does not exist 309 * (negative cacheing), a status of ENOENT is returned. If the lookup 310 * fails, a status of zero is returned. If the directory vnode is 311 * recycled out from under us due to a forced unmount, a status of 312 * EBADF is returned. 313 * 314 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is 315 * unlocked. If we're looking up . an extra ref is taken, but the lock is 316 * not recursively acquired. 317 */ 318 319int 320cache_lookup(dvp, vpp, cnp) 321 struct vnode *dvp; 322 struct vnode **vpp; 323 struct componentname *cnp; 324{ 325 struct namecache *ncp; 326 u_int32_t hash; 327 int error, ltype; 328 329 if (!doingcache) { 330 cnp->cn_flags &= ~MAKEENTRY; 331 return (0); 332 } 333retry: 334 CACHE_LOCK(); 335 numcalls++; 336 337 if (cnp->cn_nameptr[0] == '.') { 338 if (cnp->cn_namelen == 1) { 339 *vpp = dvp; 340 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 341 dvp, cnp->cn_nameptr); 342 dothits++; 343 goto success; 344 } 345 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 346 dotdothits++; 347 if (dvp->v_dd == NULL || 348 (cnp->cn_flags & MAKEENTRY) == 0) { 349 CACHE_UNLOCK(); 350 return (0); 351 } 352 *vpp = dvp->v_dd; 353 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 354 dvp, cnp->cn_nameptr, *vpp); 355 goto success; 356 } 357 } 358 359 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT); 360 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 361 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 362 numchecks++; 363 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 364 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 365 break; 366 } 367 368 /* We failed to find an entry */ 369 if (ncp == 0) { 370 if ((cnp->cn_flags & MAKEENTRY) == 0) { 371 nummisszap++; 372 } else { 373 nummiss++; 374 } 375 nchstats.ncs_miss++; 376 CACHE_UNLOCK(); 377 return (0); 378 } 379 380 /* We don't want to have an entry, so dump it */ 381 if ((cnp->cn_flags & MAKEENTRY) == 0) { 382 numposzaps++; 383 nchstats.ncs_badhits++; 384 cache_zap(ncp); 385 CACHE_UNLOCK(); 386 return (0); 387 } 388 389 /* We found a "positive" match, return the vnode */ 390 if (ncp->nc_vp) { 391 numposhits++; 392 nchstats.ncs_goodhits++; 393 *vpp = ncp->nc_vp; 394 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 395 dvp, cnp->cn_nameptr, *vpp, ncp); 396 goto success; 397 } 398 399 /* We found a negative match, and want to create it, so purge */ 400 if (cnp->cn_nameiop == CREATE) { 401 numnegzaps++; 402 nchstats.ncs_badhits++; 403 cache_zap(ncp); 404 CACHE_UNLOCK(); 405 return (0); 406 } 407 408 numneghits++; 409 /* 410 * We found a "negative" match, so we shift it to the end of 411 * the "negative" cache entries queue to satisfy LRU. Also, 412 * check to see if the entry is a whiteout; indicate this to 413 * the componentname, if so. 414 */ 415 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 416 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 417 nchstats.ncs_neghits++; 418 if (ncp->nc_flag & NCF_WHITE) 419 cnp->cn_flags |= ISWHITEOUT; 420 CACHE_UNLOCK(); 421 return (ENOENT); 422 423success: 424 /* 425 * On success we return a locked and ref'd vnode as per the lookup 426 * protocol. 427 */ 428 if (dvp == *vpp) { /* lookup on "." */ 429 VREF(*vpp); 430 CACHE_UNLOCK(); 431 /* 432 * When we lookup "." we still can be asked to lock it 433 * differently... 434 */ 435 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 436 if (ltype != VOP_ISLOCKED(*vpp)) { 437 if (ltype == LK_EXCLUSIVE) { 438 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 439 if ((*vpp)->v_iflag & VI_DOOMED) { 440 /* forced unmount */ 441 vrele(*vpp); 442 *vpp = NULL; 443 return (EBADF); 444 } 445 } else 446 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 447 } 448 return (-1); 449 } 450 ltype = 0; /* silence gcc warning */ 451 if (cnp->cn_flags & ISDOTDOT) { 452 ltype = VOP_ISLOCKED(dvp); 453 VOP_UNLOCK(dvp, 0); 454 } 455 VI_LOCK(*vpp); 456 CACHE_UNLOCK(); 457 error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread); 458 if (cnp->cn_flags & ISDOTDOT) 459 vn_lock(dvp, ltype | LK_RETRY); 460 if (error) { 461 *vpp = NULL; 462 goto retry; 463 } 464 if ((cnp->cn_flags & ISLASTCN) && 465 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) { 466 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup"); 467 } 468 return (-1); 469} 470 471/* 472 * Add an entry to the cache. 473 */ 474void 475cache_enter(dvp, vp, cnp) 476 struct vnode *dvp; 477 struct vnode *vp; 478 struct componentname *cnp; 479{ 480 struct namecache *ncp, *n2; 481 struct nchashhead *ncpp; 482 u_int32_t hash; 483 int hold; 484 int zap; 485 int len; 486 487 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 488 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, 489 ("cahe_enter: Adding a doomed vnode")); 490 491 if (!doingcache) 492 return; 493 494 /* 495 * Avoid blowout in namecache entries. 496 */ 497 if (numcache >= desiredvnodes * 2) 498 return; 499 500 if (cnp->cn_nameptr[0] == '.') { 501 if (cnp->cn_namelen == 1) { 502 return; 503 } 504 /* 505 * For dotdot lookups only cache the v_dd pointer if the 506 * directory has a link back to its parent via v_cache_dst. 507 * Without this an unlinked directory would keep a soft 508 * reference to its parent which could not be NULLd at 509 * cache_purge() time. 510 */ 511 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 512 CACHE_LOCK(); 513 if (!TAILQ_EMPTY(&dvp->v_cache_dst)) 514 dvp->v_dd = vp; 515 CACHE_UNLOCK(); 516 return; 517 } 518 } 519 520 hold = 0; 521 zap = 0; 522 523 /* 524 * Calculate the hash key and setup as much of the new 525 * namecache entry as possible before acquiring the lock. 526 */ 527 ncp = cache_alloc(cnp->cn_namelen); 528 ncp->nc_vp = vp; 529 ncp->nc_dvp = dvp; 530 len = ncp->nc_nlen = cnp->cn_namelen; 531 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT); 532 bcopy(cnp->cn_nameptr, ncp->nc_name, len); 533 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 534 CACHE_LOCK(); 535 536 /* 537 * See if this vnode or negative entry is already in the cache 538 * with this name. This can happen with concurrent lookups of 539 * the same path name. 540 */ 541 ncpp = NCHHASH(hash); 542 LIST_FOREACH(n2, ncpp, nc_hash) { 543 if (n2->nc_dvp == dvp && 544 n2->nc_nlen == cnp->cn_namelen && 545 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) { 546 CACHE_UNLOCK(); 547 cache_free(ncp); 548 return; 549 } 550 } 551 552 numcache++; 553 if (!vp) { 554 numneg++; 555 ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0; 556 } else if (vp->v_type == VDIR) { 557 vp->v_dd = dvp; 558 } else { 559 vp->v_dd = NULL; 560 } 561 562 /* 563 * Insert the new namecache entry into the appropriate chain 564 * within the cache entries table. 565 */ 566 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 567 if (LIST_EMPTY(&dvp->v_cache_src)) { 568 hold = 1; 569 numcachehv++; 570 } 571 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 572 /* 573 * If the entry is "negative", we place it into the 574 * "negative" cache queue, otherwise, we place it into the 575 * destination vnode's cache entries queue. 576 */ 577 if (vp) { 578 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 579 } else { 580 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 581 } 582 if (numneg * ncnegfactor > numcache) { 583 ncp = TAILQ_FIRST(&ncneg); 584 zap = 1; 585 } 586 if (hold) 587 vhold(dvp); 588 if (zap) 589 cache_zap(ncp); 590 CACHE_UNLOCK(); 591} 592 593/* 594 * Name cache initialization, from vfs_init() when we are booting 595 */ 596static void 597nchinit(void *dummy __unused) 598{ 599 600 TAILQ_INIT(&ncneg); 601 602 cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL, 603 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 604 cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL, 605 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 606 607 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 608} 609SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 610 611 612/* 613 * Invalidate all entries to a particular vnode. 614 */ 615void 616cache_purge(vp) 617 struct vnode *vp; 618{ 619 620 CTR1(KTR_VFS, "cache_purge(%p)", vp); 621 CACHE_LOCK(); 622 while (!LIST_EMPTY(&vp->v_cache_src)) 623 cache_zap(LIST_FIRST(&vp->v_cache_src)); 624 while (!TAILQ_EMPTY(&vp->v_cache_dst)) 625 cache_zap(TAILQ_FIRST(&vp->v_cache_dst)); 626 vp->v_dd = NULL; 627 CACHE_UNLOCK(); 628} 629 630/* 631 * Flush all entries referencing a particular filesystem. 632 */ 633void 634cache_purgevfs(mp) 635 struct mount *mp; 636{ 637 struct nchashhead *ncpp; 638 struct namecache *ncp, *nnp; 639 640 /* Scan hash tables for applicable entries */ 641 CACHE_LOCK(); 642 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { 643 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) { 644 if (ncp->nc_dvp->v_mount == mp) 645 cache_zap(ncp); 646 } 647 } 648 CACHE_UNLOCK(); 649} 650 651/* 652 * Perform canonical checks and cache lookup and pass on to filesystem 653 * through the vop_cachedlookup only if needed. 654 */ 655 656int 657vfs_cache_lookup(ap) 658 struct vop_lookup_args /* { 659 struct vnode *a_dvp; 660 struct vnode **a_vpp; 661 struct componentname *a_cnp; 662 } */ *ap; 663{ 664 struct vnode *dvp; 665 int error; 666 struct vnode **vpp = ap->a_vpp; 667 struct componentname *cnp = ap->a_cnp; 668 struct ucred *cred = cnp->cn_cred; 669 int flags = cnp->cn_flags; 670 struct thread *td = cnp->cn_thread; 671 672 *vpp = NULL; 673 dvp = ap->a_dvp; 674 675 if (dvp->v_type != VDIR) 676 return (ENOTDIR); 677 678 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 679 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 680 return (EROFS); 681 682 error = VOP_ACCESS(dvp, VEXEC, cred, td); 683 if (error) 684 return (error); 685 686 error = cache_lookup(dvp, vpp, cnp); 687 if (error == 0) 688 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 689 if (error == -1) 690 return (0); 691 return (error); 692} 693 694 695#ifndef _SYS_SYSPROTO_H_ 696struct __getcwd_args { 697 u_char *buf; 698 u_int buflen; 699}; 700#endif 701 702/* 703 * XXX All of these sysctls would probably be more productive dead. 704 */ 705static int disablecwd; 706SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 707 "Disable the getcwd syscall"); 708 709/* Implementation of the getcwd syscall. */ 710int 711__getcwd(td, uap) 712 struct thread *td; 713 struct __getcwd_args *uap; 714{ 715 716 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen)); 717} 718 719int 720kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen) 721{ 722 char *bp, *tmpbuf; 723 struct filedesc *fdp; 724 struct vnode *cdir, *rdir; 725 int error, vfslocked; 726 727 if (disablecwd) 728 return (ENODEV); 729 if (buflen < 2) 730 return (EINVAL); 731 if (buflen > MAXPATHLEN) 732 buflen = MAXPATHLEN; 733 734 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 735 fdp = td->td_proc->p_fd; 736 FILEDESC_SLOCK(fdp); 737 cdir = fdp->fd_cdir; 738 VREF(cdir); 739 rdir = fdp->fd_rdir; 740 VREF(rdir); 741 FILEDESC_SUNLOCK(fdp); 742 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen); 743 vfslocked = VFS_LOCK_GIANT(rdir->v_mount); 744 vrele(rdir); 745 VFS_UNLOCK_GIANT(vfslocked); 746 vfslocked = VFS_LOCK_GIANT(cdir->v_mount); 747 vrele(cdir); 748 VFS_UNLOCK_GIANT(vfslocked); 749 750 if (!error) { 751 if (bufseg == UIO_SYSSPACE) 752 bcopy(bp, buf, strlen(bp) + 1); 753 else 754 error = copyout(bp, buf, strlen(bp) + 1); 755 } 756 free(tmpbuf, M_TEMP); 757 return (error); 758} 759 760/* 761 * Thus begins the fullpath magic. 762 */ 763 764#undef STATNODE 765#define STATNODE(name) \ 766 static u_int name; \ 767 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 768 769static int disablefullpath; 770SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 771 "Disable the vn_fullpath function"); 772 773/* These count for kern___getcwd(), too. */ 774STATNODE(numfullpathcalls); 775STATNODE(numfullpathfail1); 776STATNODE(numfullpathfail2); 777STATNODE(numfullpathfail4); 778STATNODE(numfullpathfound); 779 780/* 781 * Retrieve the full filesystem path that correspond to a vnode from the name 782 * cache (if available) 783 */ 784int 785vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 786{ 787 char *buf; 788 struct filedesc *fdp; 789 struct vnode *rdir; 790 int error, vfslocked; 791 792 if (disablefullpath) 793 return (ENODEV); 794 if (vn == NULL) 795 return (EINVAL); 796 797 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 798 fdp = td->td_proc->p_fd; 799 FILEDESC_SLOCK(fdp); 800 rdir = fdp->fd_rdir; 801 VREF(rdir); 802 FILEDESC_SUNLOCK(fdp); 803 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN); 804 vfslocked = VFS_LOCK_GIANT(rdir->v_mount); 805 vrele(rdir); 806 VFS_UNLOCK_GIANT(vfslocked); 807 808 if (!error) 809 *freebuf = buf; 810 else 811 free(buf, M_TEMP); 812 return (error); 813} 814 815/* 816 * This function is similar to vn_fullpath, but it attempts to lookup the 817 * pathname relative to the global root mount point. This is required for the 818 * auditing sub-system, as audited pathnames must be absolute, relative to the 819 * global root mount point. 820 */ 821int 822vn_fullpath_global(struct thread *td, struct vnode *vn, 823 char **retbuf, char **freebuf) 824{ 825 char *buf; 826 int error; 827 828 if (disablefullpath) 829 return (ENODEV); 830 if (vn == NULL) 831 return (EINVAL); 832 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 833 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN); 834 if (!error) 835 *freebuf = buf; 836 else 837 free(buf, M_TEMP); 838 return (error); 839} 840 841static int 842vn_vptocnp(struct vnode **vp, char **bp, char *buf, u_int *buflen) 843{ 844 struct vnode *dvp; 845 int error, vfslocked; 846 847 vhold(*vp); 848 CACHE_UNLOCK(); 849 vfslocked = VFS_LOCK_GIANT((*vp)->v_mount); 850 vn_lock(*vp, LK_SHARED | LK_RETRY); 851 error = VOP_VPTOCNP(*vp, &dvp, buf, buflen); 852 VOP_UNLOCK(*vp, 0); 853 vdrop(*vp); 854 VFS_UNLOCK_GIANT(vfslocked); 855 if (error) { 856 numfullpathfail2++; 857 return (error); 858 } 859 *bp = buf + *buflen; 860 *vp = dvp; 861 CACHE_LOCK(); 862 if ((*vp)->v_iflag & VI_DOOMED) { 863 /* forced unmount */ 864 CACHE_UNLOCK(); 865 vdrop(*vp); 866 return (ENOENT); 867 } 868 vdrop(*vp); 869 870 return (0); 871} 872 873/* 874 * The magic behind kern___getcwd() and vn_fullpath(). 875 */ 876static int 877vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 878 char *buf, char **retbuf, u_int buflen) 879{ 880 char *bp; 881 int error, i, slash_prefixed; 882 struct namecache *ncp; 883 884 buflen--; 885 bp = buf + buflen; 886 *bp = '\0'; 887 error = 0; 888 slash_prefixed = 0; 889 890 CACHE_LOCK(); 891 numfullpathcalls++; 892 if (vp->v_type != VDIR) { 893 ncp = TAILQ_FIRST(&vp->v_cache_dst); 894 if (ncp != NULL) { 895 for (i = ncp->nc_nlen - 1; i >= 0 && bp > buf; i--) 896 *--bp = ncp->nc_name[i]; 897 if (bp == buf) { 898 numfullpathfail4++; 899 CACHE_UNLOCK(); 900 return (ENOMEM); 901 } 902 vp = ncp->nc_dvp; 903 } else { 904 error = vn_vptocnp(&vp, &bp, buf, &buflen); 905 if (error) { 906 return (error); 907 } 908 } 909 *--bp = '/'; 910 buflen--; 911 if (buflen < 0) { 912 numfullpathfail4++; 913 CACHE_UNLOCK(); 914 return (ENOMEM); 915 } 916 slash_prefixed = 1; 917 } 918 while (vp != rdir && vp != rootvnode) { 919 if (vp->v_vflag & VV_ROOT) { 920 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */ 921 CACHE_UNLOCK(); 922 error = EBADF; 923 break; 924 } 925 vp = vp->v_mount->mnt_vnodecovered; 926 continue; 927 } 928 if (vp->v_type != VDIR) { 929 numfullpathfail1++; 930 CACHE_UNLOCK(); 931 error = ENOTDIR; 932 break; 933 } 934 ncp = TAILQ_FIRST(&vp->v_cache_dst); 935 if (ncp != NULL) { 936 MPASS(vp->v_dd == NULL || ncp->nc_dvp == vp->v_dd); 937 buflen -= ncp->nc_nlen - 1; 938 for (i = ncp->nc_nlen - 1; i >= 0 && bp != buf; i--) 939 *--bp = ncp->nc_name[i]; 940 if (bp == buf) { 941 numfullpathfail4++; 942 CACHE_UNLOCK(); 943 error = ENOMEM; 944 break; 945 } 946 vp = ncp->nc_dvp; 947 } else { 948 error = vn_vptocnp(&vp, &bp, buf, &buflen); 949 if (error) { 950 break; 951 } 952 } 953 *--bp = '/'; 954 buflen--; 955 if (buflen < 0) { 956 numfullpathfail4++; 957 CACHE_UNLOCK(); 958 error = ENOMEM; 959 break; 960 } 961 slash_prefixed = 1; 962 } 963 if (error) 964 return (error); 965 if (!slash_prefixed) { 966 if (bp == buf) { 967 numfullpathfail4++; 968 CACHE_UNLOCK(); 969 return (ENOMEM); 970 } else { 971 *--bp = '/'; 972 } 973 } 974 numfullpathfound++; 975 CACHE_UNLOCK(); 976 977 *retbuf = bp; 978 return (0); 979} 980 981int 982vn_commname(struct vnode *vp, char *buf, u_int buflen) 983{ 984 struct namecache *ncp; 985 int l; 986 987 CACHE_LOCK(); 988 ncp = TAILQ_FIRST(&vp->v_cache_dst); 989 if (!ncp) { 990 CACHE_UNLOCK(); 991 return (ENOENT); 992 } 993 l = min(ncp->nc_nlen, buflen - 1); 994 memcpy(buf, ncp->nc_name, l); 995 CACHE_UNLOCK(); 996 buf[l] = '\0'; 997 return (0); 998} 999