43 44#include "opt_compat.h" 45#include "opt_ddb.h" 46#include "opt_watchdog.h" 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/bio.h> 51#include <sys/buf.h> 52#include <sys/condvar.h> 53#include <sys/conf.h> 54#include <sys/dirent.h> 55#include <sys/event.h> 56#include <sys/eventhandler.h> 57#include <sys/extattr.h> 58#include <sys/file.h> 59#include <sys/fcntl.h> 60#include <sys/jail.h> 61#include <sys/kdb.h> 62#include <sys/kernel.h> 63#include <sys/kthread.h> 64#include <sys/lockf.h> 65#include <sys/malloc.h> 66#include <sys/mount.h> 67#include <sys/namei.h> 68#include <sys/pctrie.h> 69#include <sys/priv.h> 70#include <sys/reboot.h> 71#include <sys/rwlock.h> 72#include <sys/sched.h> 73#include <sys/sleepqueue.h> 74#include <sys/smp.h> 75#include <sys/stat.h> 76#include <sys/sysctl.h> 77#include <sys/syslog.h> 78#include <sys/vmmeter.h> 79#include <sys/vnode.h> 80#include <sys/watchdog.h> 81 82#include <machine/stdarg.h> 83 84#include <security/mac/mac_framework.h> 85 86#include <vm/vm.h> 87#include <vm/vm_object.h> 88#include <vm/vm_extern.h> 89#include <vm/pmap.h> 90#include <vm/vm_map.h> 91#include <vm/vm_page.h> 92#include <vm/vm_kern.h> 93#include <vm/uma.h> 94 95#ifdef DDB 96#include <ddb/ddb.h> 97#endif 98 99static void delmntque(struct vnode *vp); 100static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 101 int slpflag, int slptimeo); 102static void syncer_shutdown(void *arg, int howto); 103static int vtryrecycle(struct vnode *vp); 104static void v_incr_usecount(struct vnode *); 105static void v_decr_usecount(struct vnode *); 106static void v_decr_useonly(struct vnode *); 107static void v_upgrade_usecount(struct vnode *); 108static void vnlru_free(int); 109static void vgonel(struct vnode *); 110static void vfs_knllock(void *arg); 111static void vfs_knlunlock(void *arg); 112static void vfs_knl_assert_locked(void *arg); 113static void vfs_knl_assert_unlocked(void *arg); 114static void destroy_vpollinfo(struct vpollinfo *vi); 115 116/* 117 * Number of vnodes in existence. Increased whenever getnewvnode() 118 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 119 */ 120static unsigned long numvnodes; 121 122SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 123 "Number of vnodes in existence"); 124 125static u_long vnodes_created; 126SYSCTL_ULONG(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 127 0, "Number of vnodes created by getnewvnode"); 128 129/* 130 * Conversion tables for conversion from vnode types to inode formats 131 * and back. 132 */ 133enum vtype iftovt_tab[16] = { 134 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 135 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 136}; 137int vttoif_tab[10] = { 138 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 139 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 140}; 141 142/* 143 * List of vnodes that are ready for recycling. 144 */ 145static TAILQ_HEAD(freelst, vnode) vnode_free_list; 146 147/* 148 * Free vnode target. Free vnodes may simply be files which have been stat'd 149 * but not read. This is somewhat common, and a small cache of such files 150 * should be kept to avoid recreation costs. 151 */ 152static u_long wantfreevnodes; 153SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 154/* Number of vnodes in the free list. */ 155static u_long freevnodes; 156SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, 157 "Number of vnodes in the free list"); 158 159static int vlru_allow_cache_src; 160SYSCTL_INT(_vfs, OID_AUTO, vlru_allow_cache_src, CTLFLAG_RW, 161 &vlru_allow_cache_src, 0, "Allow vlru to reclaim source vnode"); 162 163static u_long recycles_count; 164SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 0, 165 "Number of vnodes recycled to avoid exceding kern.maxvnodes"); 166 167/* 168 * Various variables used for debugging the new implementation of 169 * reassignbuf(). 170 * XXX these are probably of (very) limited utility now. 171 */ 172static int reassignbufcalls; 173SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 174 "Number of calls to reassignbuf"); 175 176static u_long free_owe_inact; 177SYSCTL_ULONG(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 0, 178 "Number of times free vnodes kept on active list due to VFS " 179 "owing inactivation"); 180 181/* 182 * Cache for the mount type id assigned to NFS. This is used for 183 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 184 */ 185int nfs_mount_type = -1; 186 187/* To keep more than one thread at a time from running vfs_getnewfsid */ 188static struct mtx mntid_mtx; 189 190/* 191 * Lock for any access to the following: 192 * vnode_free_list 193 * numvnodes 194 * freevnodes 195 */ 196static struct mtx vnode_free_list_mtx; 197 198/* Publicly exported FS */ 199struct nfs_public nfs_pub; 200 201static uma_zone_t buf_trie_zone; 202 203/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 204static uma_zone_t vnode_zone; 205static uma_zone_t vnodepoll_zone; 206 207/* 208 * The workitem queue. 209 * 210 * It is useful to delay writes of file data and filesystem metadata 211 * for tens of seconds so that quickly created and deleted files need 212 * not waste disk bandwidth being created and removed. To realize this, 213 * we append vnodes to a "workitem" queue. When running with a soft 214 * updates implementation, most pending metadata dependencies should 215 * not wait for more than a few seconds. Thus, mounted on block devices 216 * are delayed only about a half the time that file data is delayed. 217 * Similarly, directory updates are more critical, so are only delayed 218 * about a third the time that file data is delayed. Thus, there are 219 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 220 * one each second (driven off the filesystem syncer process). The 221 * syncer_delayno variable indicates the next queue that is to be processed. 222 * Items that need to be processed soon are placed in this queue: 223 * 224 * syncer_workitem_pending[syncer_delayno] 225 * 226 * A delay of fifteen seconds is done by placing the request fifteen 227 * entries later in the queue: 228 * 229 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 230 * 231 */ 232static int syncer_delayno; 233static long syncer_mask; 234LIST_HEAD(synclist, bufobj); 235static struct synclist *syncer_workitem_pending; 236/* 237 * The sync_mtx protects: 238 * bo->bo_synclist 239 * sync_vnode_count 240 * syncer_delayno 241 * syncer_state 242 * syncer_workitem_pending 243 * syncer_worklist_len 244 * rushjob 245 */ 246static struct mtx sync_mtx; 247static struct cv sync_wakeup; 248 249#define SYNCER_MAXDELAY 32 250static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 251static int syncdelay = 30; /* max time to delay syncing data */ 252static int filedelay = 30; /* time to delay syncing files */ 253SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 254 "Time to delay syncing files (in seconds)"); 255static int dirdelay = 29; /* time to delay syncing directories */ 256SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 257 "Time to delay syncing directories (in seconds)"); 258static int metadelay = 28; /* time to delay syncing metadata */ 259SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 260 "Time to delay syncing metadata (in seconds)"); 261static int rushjob; /* number of slots to run ASAP */ 262static int stat_rush_requests; /* number of times I/O speeded up */ 263SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 264 "Number of times I/O speeded up (rush requests)"); 265 266/* 267 * When shutting down the syncer, run it at four times normal speed. 268 */ 269#define SYNCER_SHUTDOWN_SPEEDUP 4 270static int sync_vnode_count; 271static int syncer_worklist_len; 272static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 273 syncer_state; 274 275/* 276 * Number of vnodes we want to exist at any one time. This is mostly used 277 * to size hash tables in vnode-related code. It is normally not used in 278 * getnewvnode(), as wantfreevnodes is normally nonzero.) 279 * 280 * XXX desiredvnodes is historical cruft and should not exist. 281 */ 282int desiredvnodes; 283 284static int 285sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 286{ 287 int error, old_desiredvnodes; 288 289 old_desiredvnodes = desiredvnodes; 290 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 291 return (error); 292 if (old_desiredvnodes != desiredvnodes) { 293 vfs_hash_changesize(desiredvnodes); 294 cache_changesize(desiredvnodes); 295 } 296 return (0); 297} 298 299SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 300 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 301 sysctl_update_desiredvnodes, "I", "Maximum number of vnodes"); 302SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 303 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)"); 304static int vnlru_nowhere; 305SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 306 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 307 308/* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 309static int vnsz2log; 310 311/* 312 * Support for the bufobj clean & dirty pctrie. 313 */ 314static void * 315buf_trie_alloc(struct pctrie *ptree) 316{ 317 318 return uma_zalloc(buf_trie_zone, M_NOWAIT); 319} 320 321static void 322buf_trie_free(struct pctrie *ptree, void *node) 323{ 324 325 uma_zfree(buf_trie_zone, node); 326} 327PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 328 329/* 330 * Initialize the vnode management data structures. 331 * 332 * Reevaluate the following cap on the number of vnodes after the physical 333 * memory size exceeds 512GB. In the limit, as the physical memory size 334 * grows, the ratio of physical pages to vnodes approaches sixteen to one. 335 */ 336#ifndef MAXVNODES_MAX 337#define MAXVNODES_MAX (512 * (1024 * 1024 * 1024 / (int)PAGE_SIZE / 16)) 338#endif 339 340/* 341 * Initialize a vnode as it first enters the zone. 342 */ 343static int 344vnode_init(void *mem, int size, int flags) 345{ 346 struct vnode *vp; 347 struct bufobj *bo; 348 349 vp = mem; 350 bzero(vp, size); 351 /* 352 * Setup locks. 353 */ 354 vp->v_vnlock = &vp->v_lock; 355 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 356 /* 357 * By default, don't allow shared locks unless filesystems opt-in. 358 */ 359 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 360 LK_NOSHARE | LK_IS_VNODE); 361 /* 362 * Initialize bufobj. 363 */ 364 bo = &vp->v_bufobj; 365 bo->__bo_vnode = vp; 366 rw_init(BO_LOCKPTR(bo), "bufobj interlock"); 367 bo->bo_private = vp; 368 TAILQ_INIT(&bo->bo_clean.bv_hd); 369 TAILQ_INIT(&bo->bo_dirty.bv_hd); 370 /* 371 * Initialize namecache. 372 */ 373 LIST_INIT(&vp->v_cache_src); 374 TAILQ_INIT(&vp->v_cache_dst); 375 /* 376 * Initialize rangelocks. 377 */ 378 rangelock_init(&vp->v_rl); 379 return (0); 380} 381 382/* 383 * Free a vnode when it is cleared from the zone. 384 */ 385static void 386vnode_fini(void *mem, int size) 387{ 388 struct vnode *vp; 389 struct bufobj *bo; 390 391 vp = mem; 392 rangelock_destroy(&vp->v_rl); 393 lockdestroy(vp->v_vnlock); 394 mtx_destroy(&vp->v_interlock); 395 bo = &vp->v_bufobj; 396 rw_destroy(BO_LOCKPTR(bo)); 397} 398 399static void 400vntblinit(void *dummy __unused) 401{ 402 u_int i; 403 int physvnodes, virtvnodes; 404 405 /* 406 * Desiredvnodes is a function of the physical memory size and the 407 * kernel's heap size. Generally speaking, it scales with the 408 * physical memory size. The ratio of desiredvnodes to physical pages 409 * is one to four until desiredvnodes exceeds 98,304. Thereafter, the 410 * marginal ratio of desiredvnodes to physical pages is one to 411 * sixteen. However, desiredvnodes is limited by the kernel's heap 412 * size. The memory required by desiredvnodes vnodes and vm objects 413 * may not exceed one seventh of the kernel's heap size. 414 */ 415 physvnodes = maxproc + cnt.v_page_count / 16 + 3 * min(98304 * 4, 416 cnt.v_page_count) / 16; 417 virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) + 418 sizeof(struct vnode))); 419 desiredvnodes = min(physvnodes, virtvnodes); 420 if (desiredvnodes > MAXVNODES_MAX) { 421 if (bootverbose) 422 printf("Reducing kern.maxvnodes %d -> %d\n", 423 desiredvnodes, MAXVNODES_MAX); 424 desiredvnodes = MAXVNODES_MAX; 425 } 426 wantfreevnodes = desiredvnodes / 4; 427 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 428 TAILQ_INIT(&vnode_free_list); 429 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 430 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 431 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 432 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 433 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 434 /* 435 * Preallocate enough nodes to support one-per buf so that 436 * we can not fail an insert. reassignbuf() callers can not 437 * tolerate the insertion failure. 438 */ 439 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 440 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 441 UMA_ZONE_NOFREE | UMA_ZONE_VM); 442 uma_prealloc(buf_trie_zone, nbuf); 443 /* 444 * Initialize the filesystem syncer. 445 */ 446 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 447 &syncer_mask); 448 syncer_maxdelay = syncer_mask + 1; 449 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 450 cv_init(&sync_wakeup, "syncer"); 451 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 452 vnsz2log++; 453 vnsz2log--; 454} 455SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 456 457 458/* 459 * Mark a mount point as busy. Used to synchronize access and to delay 460 * unmounting. Eventually, mountlist_mtx is not released on failure. 461 * 462 * vfs_busy() is a custom lock, it can block the caller. 463 * vfs_busy() only sleeps if the unmount is active on the mount point. 464 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 465 * vnode belonging to mp. 466 * 467 * Lookup uses vfs_busy() to traverse mount points. 468 * root fs var fs 469 * / vnode lock A / vnode lock (/var) D 470 * /var vnode lock B /log vnode lock(/var/log) E 471 * vfs_busy lock C vfs_busy lock F 472 * 473 * Within each file system, the lock order is C->A->B and F->D->E. 474 * 475 * When traversing across mounts, the system follows that lock order: 476 * 477 * C->A->B 478 * | 479 * +->F->D->E 480 * 481 * The lookup() process for namei("/var") illustrates the process: 482 * VOP_LOOKUP() obtains B while A is held 483 * vfs_busy() obtains a shared lock on F while A and B are held 484 * vput() releases lock on B 485 * vput() releases lock on A 486 * VFS_ROOT() obtains lock on D while shared lock on F is held 487 * vfs_unbusy() releases shared lock on F 488 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 489 * Attempt to lock A (instead of vp_crossmp) while D is held would 490 * violate the global order, causing deadlocks. 491 * 492 * dounmount() locks B while F is drained. 493 */ 494int 495vfs_busy(struct mount *mp, int flags) 496{ 497 498 MPASS((flags & ~MBF_MASK) == 0); 499 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 500 501 MNT_ILOCK(mp); 502 MNT_REF(mp); 503 /* 504 * If mount point is currenly being unmounted, sleep until the 505 * mount point fate is decided. If thread doing the unmounting fails, 506 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 507 * that this mount point has survived the unmount attempt and vfs_busy 508 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 509 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 510 * about to be really destroyed. vfs_busy needs to release its 511 * reference on the mount point in this case and return with ENOENT, 512 * telling the caller that mount mount it tried to busy is no longer 513 * valid. 514 */ 515 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 516 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 517 MNT_REL(mp); 518 MNT_IUNLOCK(mp); 519 CTR1(KTR_VFS, "%s: failed busying before sleeping", 520 __func__); 521 return (ENOENT); 522 } 523 if (flags & MBF_MNTLSTLOCK) 524 mtx_unlock(&mountlist_mtx); 525 mp->mnt_kern_flag |= MNTK_MWAIT; 526 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 527 if (flags & MBF_MNTLSTLOCK) 528 mtx_lock(&mountlist_mtx); 529 MNT_ILOCK(mp); 530 } 531 if (flags & MBF_MNTLSTLOCK) 532 mtx_unlock(&mountlist_mtx); 533 mp->mnt_lockref++; 534 MNT_IUNLOCK(mp); 535 return (0); 536} 537 538/* 539 * Free a busy filesystem. 540 */ 541void 542vfs_unbusy(struct mount *mp) 543{ 544 545 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 546 MNT_ILOCK(mp); 547 MNT_REL(mp); 548 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 549 mp->mnt_lockref--; 550 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 551 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 552 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 553 mp->mnt_kern_flag &= ~MNTK_DRAINING; 554 wakeup(&mp->mnt_lockref); 555 } 556 MNT_IUNLOCK(mp); 557} 558 559/* 560 * Lookup a mount point by filesystem identifier. 561 */ 562struct mount * 563vfs_getvfs(fsid_t *fsid) 564{ 565 struct mount *mp; 566 567 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 568 mtx_lock(&mountlist_mtx); 569 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 570 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 571 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 572 vfs_ref(mp); 573 mtx_unlock(&mountlist_mtx); 574 return (mp); 575 } 576 } 577 mtx_unlock(&mountlist_mtx); 578 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 579 return ((struct mount *) 0); 580} 581 582/* 583 * Lookup a mount point by filesystem identifier, busying it before 584 * returning. 585 * 586 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 587 * cache for popular filesystem identifiers. The cache is lockess, using 588 * the fact that struct mount's are never freed. In worst case we may 589 * get pointer to unmounted or even different filesystem, so we have to 590 * check what we got, and go slow way if so. 591 */ 592struct mount * 593vfs_busyfs(fsid_t *fsid) 594{ 595#define FSID_CACHE_SIZE 256 596 typedef struct mount * volatile vmp_t; 597 static vmp_t cache[FSID_CACHE_SIZE]; 598 struct mount *mp; 599 int error; 600 uint32_t hash; 601 602 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 603 hash = fsid->val[0] ^ fsid->val[1]; 604 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 605 mp = cache[hash]; 606 if (mp == NULL || 607 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 608 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 609 goto slow; 610 if (vfs_busy(mp, 0) != 0) { 611 cache[hash] = NULL; 612 goto slow; 613 } 614 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 615 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 616 return (mp); 617 else 618 vfs_unbusy(mp); 619 620slow: 621 mtx_lock(&mountlist_mtx); 622 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 623 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 624 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 625 error = vfs_busy(mp, MBF_MNTLSTLOCK); 626 if (error) { 627 cache[hash] = NULL; 628 mtx_unlock(&mountlist_mtx); 629 return (NULL); 630 } 631 cache[hash] = mp; 632 return (mp); 633 } 634 } 635 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 636 mtx_unlock(&mountlist_mtx); 637 return ((struct mount *) 0); 638} 639 640/* 641 * Check if a user can access privileged mount options. 642 */ 643int 644vfs_suser(struct mount *mp, struct thread *td) 645{ 646 int error; 647 648 /* 649 * If the thread is jailed, but this is not a jail-friendly file 650 * system, deny immediately. 651 */ 652 if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) 653 return (EPERM); 654 655 /* 656 * If the file system was mounted outside the jail of the calling 657 * thread, deny immediately. 658 */ 659 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 660 return (EPERM); 661 662 /* 663 * If file system supports delegated administration, we don't check 664 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 665 * by the file system itself. 666 * If this is not the user that did original mount, we check for 667 * the PRIV_VFS_MOUNT_OWNER privilege. 668 */ 669 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 670 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 671 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 672 return (error); 673 } 674 return (0); 675} 676 677/* 678 * Get a new unique fsid. Try to make its val[0] unique, since this value 679 * will be used to create fake device numbers for stat(). Also try (but 680 * not so hard) make its val[0] unique mod 2^16, since some emulators only 681 * support 16-bit device numbers. We end up with unique val[0]'s for the 682 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 683 * 684 * Keep in mind that several mounts may be running in parallel. Starting 685 * the search one past where the previous search terminated is both a 686 * micro-optimization and a defense against returning the same fsid to 687 * different mounts. 688 */ 689void 690vfs_getnewfsid(struct mount *mp) 691{ 692 static uint16_t mntid_base; 693 struct mount *nmp; 694 fsid_t tfsid; 695 int mtype; 696 697 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 698 mtx_lock(&mntid_mtx); 699 mtype = mp->mnt_vfc->vfc_typenum; 700 tfsid.val[1] = mtype; 701 mtype = (mtype & 0xFF) << 24; 702 for (;;) { 703 tfsid.val[0] = makedev(255, 704 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 705 mntid_base++; 706 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 707 break; 708 vfs_rel(nmp); 709 } 710 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 711 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 712 mtx_unlock(&mntid_mtx); 713} 714 715/* 716 * Knob to control the precision of file timestamps: 717 * 718 * 0 = seconds only; nanoseconds zeroed. 719 * 1 = seconds and nanoseconds, accurate within 1/HZ. 720 * 2 = seconds and nanoseconds, truncated to microseconds. 721 * >=3 = seconds and nanoseconds, maximum precision. 722 */ 723enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 724 725static int timestamp_precision = TSP_USEC; 726SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 727 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 728 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, " 729 "3+: sec + ns (max. precision))"); 730 731/* 732 * Get a current timestamp. 733 */ 734void 735vfs_timestamp(struct timespec *tsp) 736{ 737 struct timeval tv; 738 739 switch (timestamp_precision) { 740 case TSP_SEC: 741 tsp->tv_sec = time_second; 742 tsp->tv_nsec = 0; 743 break; 744 case TSP_HZ: 745 getnanotime(tsp); 746 break; 747 case TSP_USEC: 748 microtime(&tv); 749 TIMEVAL_TO_TIMESPEC(&tv, tsp); 750 break; 751 case TSP_NSEC: 752 default: 753 nanotime(tsp); 754 break; 755 } 756} 757 758/* 759 * Set vnode attributes to VNOVAL 760 */ 761void 762vattr_null(struct vattr *vap) 763{ 764 765 vap->va_type = VNON; 766 vap->va_size = VNOVAL; 767 vap->va_bytes = VNOVAL; 768 vap->va_mode = VNOVAL; 769 vap->va_nlink = VNOVAL; 770 vap->va_uid = VNOVAL; 771 vap->va_gid = VNOVAL; 772 vap->va_fsid = VNOVAL; 773 vap->va_fileid = VNOVAL; 774 vap->va_blocksize = VNOVAL; 775 vap->va_rdev = VNOVAL; 776 vap->va_atime.tv_sec = VNOVAL; 777 vap->va_atime.tv_nsec = VNOVAL; 778 vap->va_mtime.tv_sec = VNOVAL; 779 vap->va_mtime.tv_nsec = VNOVAL; 780 vap->va_ctime.tv_sec = VNOVAL; 781 vap->va_ctime.tv_nsec = VNOVAL; 782 vap->va_birthtime.tv_sec = VNOVAL; 783 vap->va_birthtime.tv_nsec = VNOVAL; 784 vap->va_flags = VNOVAL; 785 vap->va_gen = VNOVAL; 786 vap->va_vaflags = 0; 787} 788 789/* 790 * This routine is called when we have too many vnodes. It attempts 791 * to free <count> vnodes and will potentially free vnodes that still 792 * have VM backing store (VM backing store is typically the cause 793 * of a vnode blowout so we want to do this). Therefore, this operation 794 * is not considered cheap. 795 * 796 * A number of conditions may prevent a vnode from being reclaimed. 797 * the buffer cache may have references on the vnode, a directory 798 * vnode may still have references due to the namei cache representing 799 * underlying files, or the vnode may be in active use. It is not 800 * desireable to reuse such vnodes. These conditions may cause the 801 * number of vnodes to reach some minimum value regardless of what 802 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 803 */ 804static int 805vlrureclaim(struct mount *mp) 806{ 807 struct vnode *vp; 808 int done; 809 int trigger; 810 int usevnodes; 811 int count; 812 813 /* 814 * Calculate the trigger point, don't allow user 815 * screwups to blow us up. This prevents us from 816 * recycling vnodes with lots of resident pages. We 817 * aren't trying to free memory, we are trying to 818 * free vnodes. 819 */ 820 usevnodes = desiredvnodes; 821 if (usevnodes <= 0) 822 usevnodes = 1; 823 trigger = cnt.v_page_count * 2 / usevnodes; 824 done = 0; 825 vn_start_write(NULL, &mp, V_WAIT); 826 MNT_ILOCK(mp); 827 count = mp->mnt_nvnodelistsize / 10 + 1; 828 while (count != 0) { 829 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 830 while (vp != NULL && vp->v_type == VMARKER) 831 vp = TAILQ_NEXT(vp, v_nmntvnodes); 832 if (vp == NULL) 833 break; 834 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 835 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 836 --count; 837 if (!VI_TRYLOCK(vp)) 838 goto next_iter; 839 /* 840 * If it's been deconstructed already, it's still 841 * referenced, or it exceeds the trigger, skip it. 842 */ 843 if (vp->v_usecount || 844 (!vlru_allow_cache_src && 845 !LIST_EMPTY(&(vp)->v_cache_src)) || 846 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 847 vp->v_object->resident_page_count > trigger)) { 848 VI_UNLOCK(vp); 849 goto next_iter; 850 } 851 MNT_IUNLOCK(mp); 852 vholdl(vp); 853 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 854 vdrop(vp); 855 goto next_iter_mntunlocked; 856 } 857 VI_LOCK(vp); 858 /* 859 * v_usecount may have been bumped after VOP_LOCK() dropped 860 * the vnode interlock and before it was locked again. 861 * 862 * It is not necessary to recheck VI_DOOMED because it can 863 * only be set by another thread that holds both the vnode 864 * lock and vnode interlock. If another thread has the 865 * vnode lock before we get to VOP_LOCK() and obtains the 866 * vnode interlock after VOP_LOCK() drops the vnode 867 * interlock, the other thread will be unable to drop the 868 * vnode lock before our VOP_LOCK() call fails. 869 */ 870 if (vp->v_usecount || 871 (!vlru_allow_cache_src && 872 !LIST_EMPTY(&(vp)->v_cache_src)) || 873 (vp->v_object != NULL && 874 vp->v_object->resident_page_count > trigger)) { 875 VOP_UNLOCK(vp, LK_INTERLOCK); 876 vdrop(vp); 877 goto next_iter_mntunlocked; 878 } 879 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 880 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 881 atomic_add_long(&recycles_count, 1); 882 vgonel(vp); 883 VOP_UNLOCK(vp, 0); 884 vdropl(vp); 885 done++; 886next_iter_mntunlocked: 887 if (!should_yield()) 888 goto relock_mnt; 889 goto yield; 890next_iter: 891 if (!should_yield()) 892 continue; 893 MNT_IUNLOCK(mp); 894yield: 895 kern_yield(PRI_USER); 896relock_mnt: 897 MNT_ILOCK(mp); 898 } 899 MNT_IUNLOCK(mp); 900 vn_finished_write(mp); 901 return done; 902} 903 904/* 905 * Attempt to keep the free list at wantfreevnodes length. 906 */ 907static void 908vnlru_free(int count) 909{ 910 struct vnode *vp; 911 912 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 913 for (; count > 0; count--) { 914 vp = TAILQ_FIRST(&vnode_free_list); 915 /* 916 * The list can be modified while the free_list_mtx 917 * has been dropped and vp could be NULL here. 918 */ 919 if (!vp) 920 break; 921 VNASSERT(vp->v_op != NULL, vp, 922 ("vnlru_free: vnode already reclaimed.")); 923 KASSERT((vp->v_iflag & VI_FREE) != 0, 924 ("Removing vnode not on freelist")); 925 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 926 ("Mangling active vnode")); 927 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 928 /* 929 * Don't recycle if we can't get the interlock. 930 */ 931 if (!VI_TRYLOCK(vp)) { 932 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 933 continue; 934 } 935 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 936 vp, ("vp inconsistent on freelist")); 937 938 /* 939 * The clear of VI_FREE prevents activation of the 940 * vnode. There is no sense in putting the vnode on 941 * the mount point active list, only to remove it 942 * later during recycling. Inline the relevant part 943 * of vholdl(), to avoid triggering assertions or 944 * activating. 945 */ 946 freevnodes--; 947 vp->v_iflag &= ~VI_FREE; 948 vp->v_holdcnt++; 949 950 mtx_unlock(&vnode_free_list_mtx); 951 VI_UNLOCK(vp); 952 vtryrecycle(vp); 953 /* 954 * If the recycled succeeded this vdrop will actually free 955 * the vnode. If not it will simply place it back on 956 * the free list. 957 */ 958 vdrop(vp); 959 mtx_lock(&vnode_free_list_mtx); 960 } 961} 962/* 963 * Attempt to recycle vnodes in a context that is always safe to block. 964 * Calling vlrurecycle() from the bowels of filesystem code has some 965 * interesting deadlock problems. 966 */ 967static struct proc *vnlruproc; 968static int vnlruproc_sig; 969 970static void 971vnlru_proc(void) 972{ 973 struct mount *mp, *nmp; 974 int done; 975 struct proc *p = vnlruproc; 976 977 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 978 SHUTDOWN_PRI_FIRST); 979 980 for (;;) { 981 kproc_suspend_check(p); 982 mtx_lock(&vnode_free_list_mtx); 983 if (freevnodes > wantfreevnodes) 984 vnlru_free(freevnodes - wantfreevnodes); 985 if (numvnodes <= desiredvnodes * 9 / 10) { 986 vnlruproc_sig = 0; 987 wakeup(&vnlruproc_sig); 988 msleep(vnlruproc, &vnode_free_list_mtx, 989 PVFS|PDROP, "vlruwt", hz); 990 continue; 991 } 992 mtx_unlock(&vnode_free_list_mtx); 993 done = 0; 994 mtx_lock(&mountlist_mtx); 995 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 996 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 997 nmp = TAILQ_NEXT(mp, mnt_list); 998 continue; 999 } 1000 done += vlrureclaim(mp); 1001 mtx_lock(&mountlist_mtx); 1002 nmp = TAILQ_NEXT(mp, mnt_list); 1003 vfs_unbusy(mp); 1004 } 1005 mtx_unlock(&mountlist_mtx); 1006 if (done == 0) { 1007#if 0 1008 /* These messages are temporary debugging aids */ 1009 if (vnlru_nowhere < 5) 1010 printf("vnlru process getting nowhere..\n"); 1011 else if (vnlru_nowhere == 5) 1012 printf("vnlru process messages stopped.\n"); 1013#endif 1014 vnlru_nowhere++; 1015 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1016 } else 1017 kern_yield(PRI_USER); 1018 } 1019} 1020 1021static struct kproc_desc vnlru_kp = { 1022 "vnlru", 1023 vnlru_proc, 1024 &vnlruproc 1025}; 1026SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1027 &vnlru_kp); 1028 1029/* 1030 * Routines having to do with the management of the vnode table. 1031 */ 1032 1033/* 1034 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1035 * before we actually vgone(). This function must be called with the vnode 1036 * held to prevent the vnode from being returned to the free list midway 1037 * through vgone(). 1038 */ 1039static int 1040vtryrecycle(struct vnode *vp) 1041{ 1042 struct mount *vnmp; 1043 1044 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1045 VNASSERT(vp->v_holdcnt, vp, 1046 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1047 /* 1048 * This vnode may found and locked via some other list, if so we 1049 * can't recycle it yet. 1050 */ 1051 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1052 CTR2(KTR_VFS, 1053 "%s: impossible to recycle, vp %p lock is already held", 1054 __func__, vp); 1055 return (EWOULDBLOCK); 1056 } 1057 /* 1058 * Don't recycle if its filesystem is being suspended. 1059 */ 1060 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1061 VOP_UNLOCK(vp, 0); 1062 CTR2(KTR_VFS, 1063 "%s: impossible to recycle, cannot start the write for %p", 1064 __func__, vp); 1065 return (EBUSY); 1066 } 1067 /* 1068 * If we got this far, we need to acquire the interlock and see if 1069 * anyone picked up this vnode from another list. If not, we will 1070 * mark it with DOOMED via vgonel() so that anyone who does find it 1071 * will skip over it. 1072 */ 1073 VI_LOCK(vp); 1074 if (vp->v_usecount) { 1075 VOP_UNLOCK(vp, LK_INTERLOCK); 1076 vn_finished_write(vnmp); 1077 CTR2(KTR_VFS, 1078 "%s: impossible to recycle, %p is already referenced", 1079 __func__, vp); 1080 return (EBUSY); 1081 } 1082 if ((vp->v_iflag & VI_DOOMED) == 0) { 1083 atomic_add_long(&recycles_count, 1); 1084 vgonel(vp); 1085 } 1086 VOP_UNLOCK(vp, LK_INTERLOCK); 1087 vn_finished_write(vnmp); 1088 return (0); 1089} 1090 1091/* 1092 * Wait for available vnodes. 1093 */ 1094static int 1095getnewvnode_wait(int suspended) 1096{ 1097 1098 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1099 if (numvnodes > desiredvnodes) { 1100 if (suspended) { 1101 /* 1102 * File system is beeing suspended, we cannot risk a 1103 * deadlock here, so allocate new vnode anyway. 1104 */ 1105 if (freevnodes > wantfreevnodes) 1106 vnlru_free(freevnodes - wantfreevnodes); 1107 return (0); 1108 } 1109 if (vnlruproc_sig == 0) { 1110 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1111 wakeup(vnlruproc); 1112 } 1113 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1114 "vlruwk", hz); 1115 } 1116 return (numvnodes > desiredvnodes ? ENFILE : 0); 1117} 1118 1119void 1120getnewvnode_reserve(u_int count) 1121{ 1122 struct thread *td; 1123 1124 td = curthread; 1125 /* First try to be quick and racy. */ 1126 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1127 td->td_vp_reserv += count; 1128 return; 1129 } else 1130 atomic_subtract_long(&numvnodes, count); 1131 1132 mtx_lock(&vnode_free_list_mtx); 1133 while (count > 0) { 1134 if (getnewvnode_wait(0) == 0) { 1135 count--; 1136 td->td_vp_reserv++; 1137 atomic_add_long(&numvnodes, 1); 1138 } 1139 } 1140 mtx_unlock(&vnode_free_list_mtx); 1141} 1142 1143void 1144getnewvnode_drop_reserve(void) 1145{ 1146 struct thread *td; 1147 1148 td = curthread; 1149 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1150 td->td_vp_reserv = 0; 1151} 1152 1153/* 1154 * Return the next vnode from the free list. 1155 */ 1156int 1157getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1158 struct vnode **vpp) 1159{ 1160 struct vnode *vp; 1161 struct thread *td; 1162 struct lock_object *lo; 1163 int error; 1164 1165 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1166 vp = NULL; 1167 td = curthread; 1168 if (td->td_vp_reserv > 0) { 1169 td->td_vp_reserv -= 1; 1170 goto alloc; 1171 } 1172 mtx_lock(&vnode_free_list_mtx); 1173 /* 1174 * Lend our context to reclaim vnodes if they've exceeded the max. 1175 */ 1176 if (freevnodes > wantfreevnodes) 1177 vnlru_free(1); 1178 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1179 MNTK_SUSPEND)); 1180#if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1181 if (error != 0) { 1182 mtx_unlock(&vnode_free_list_mtx); 1183 return (error); 1184 } 1185#endif 1186 atomic_add_long(&numvnodes, 1); 1187 mtx_unlock(&vnode_free_list_mtx); 1188alloc: 1189 atomic_add_long(&vnodes_created, 1); 1190 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1191 /* 1192 * Locks are given the generic name "vnode" when created. 1193 * Follow the historic practice of using the filesystem 1194 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1195 * 1196 * Locks live in a witness group keyed on their name. Thus, 1197 * when a lock is renamed, it must also move from the witness 1198 * group of its old name to the witness group of its new name. 1199 * 1200 * The change only needs to be made when the vnode moves 1201 * from one filesystem type to another. We ensure that each 1202 * filesystem use a single static name pointer for its tag so 1203 * that we can compare pointers rather than doing a strcmp(). 1204 */ 1205 lo = &vp->v_vnlock->lock_object; 1206 if (lo->lo_name != tag) { 1207 lo->lo_name = tag; 1208 WITNESS_DESTROY(lo); 1209 WITNESS_INIT(lo, tag); 1210 } 1211 /* 1212 * By default, don't allow shared locks unless filesystems opt-in. 1213 */ 1214 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1215 /* 1216 * Finalize various vnode identity bits. 1217 */ 1218 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1219 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1220 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1221 vp->v_type = VNON; 1222 vp->v_tag = tag; 1223 vp->v_op = vops; 1224 v_incr_usecount(vp); 1225 vp->v_bufobj.bo_ops = &buf_ops_bio; 1226#ifdef MAC 1227 mac_vnode_init(vp); 1228 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1229 mac_vnode_associate_singlelabel(mp, vp); 1230 else if (mp == NULL && vops != &dead_vnodeops) 1231 printf("NULL mp in getnewvnode()\n"); 1232#endif 1233 if (mp != NULL) { 1234 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1235 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1236 vp->v_vflag |= VV_NOKNOTE; 1237 } 1238 1239 /* 1240 * For the filesystems which do not use vfs_hash_insert(), 1241 * still initialize v_hash to have vfs_hash_index() useful. 1242 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1243 * its own hashing. 1244 */ 1245 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1246 1247 *vpp = vp; 1248 return (0); 1249} 1250 1251/* 1252 * Delete from old mount point vnode list, if on one. 1253 */ 1254static void 1255delmntque(struct vnode *vp) 1256{ 1257 struct mount *mp; 1258 int active; 1259 1260 mp = vp->v_mount; 1261 if (mp == NULL) 1262 return; 1263 MNT_ILOCK(mp); 1264 VI_LOCK(vp); 1265 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1266 ("Active vnode list size %d > Vnode list size %d", 1267 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1268 active = vp->v_iflag & VI_ACTIVE; 1269 vp->v_iflag &= ~VI_ACTIVE; 1270 if (active) { 1271 mtx_lock(&vnode_free_list_mtx); 1272 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1273 mp->mnt_activevnodelistsize--; 1274 mtx_unlock(&vnode_free_list_mtx); 1275 } 1276 vp->v_mount = NULL; 1277 VI_UNLOCK(vp); 1278 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1279 ("bad mount point vnode list size")); 1280 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1281 mp->mnt_nvnodelistsize--; 1282 MNT_REL(mp); 1283 MNT_IUNLOCK(mp); 1284} 1285 1286static void 1287insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1288{ 1289 1290 vp->v_data = NULL; 1291 vp->v_op = &dead_vnodeops; 1292 vgone(vp); 1293 vput(vp); 1294} 1295 1296/* 1297 * Insert into list of vnodes for the new mount point, if available. 1298 */ 1299int 1300insmntque1(struct vnode *vp, struct mount *mp, 1301 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1302{ 1303 1304 KASSERT(vp->v_mount == NULL, 1305 ("insmntque: vnode already on per mount vnode list")); 1306 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1307 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1308 1309 /* 1310 * We acquire the vnode interlock early to ensure that the 1311 * vnode cannot be recycled by another process releasing a 1312 * holdcnt on it before we get it on both the vnode list 1313 * and the active vnode list. The mount mutex protects only 1314 * manipulation of the vnode list and the vnode freelist 1315 * mutex protects only manipulation of the active vnode list. 1316 * Hence the need to hold the vnode interlock throughout. 1317 */ 1318 MNT_ILOCK(mp); 1319 VI_LOCK(vp); 1320 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1321 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1322 mp->mnt_nvnodelistsize == 0)) && 1323 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1324 VI_UNLOCK(vp); 1325 MNT_IUNLOCK(mp); 1326 if (dtr != NULL) 1327 dtr(vp, dtr_arg); 1328 return (EBUSY); 1329 } 1330 vp->v_mount = mp; 1331 MNT_REF(mp); 1332 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1333 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1334 ("neg mount point vnode list size")); 1335 mp->mnt_nvnodelistsize++; 1336 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1337 ("Activating already active vnode")); 1338 vp->v_iflag |= VI_ACTIVE; 1339 mtx_lock(&vnode_free_list_mtx); 1340 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1341 mp->mnt_activevnodelistsize++; 1342 mtx_unlock(&vnode_free_list_mtx); 1343 VI_UNLOCK(vp); 1344 MNT_IUNLOCK(mp); 1345 return (0); 1346} 1347 1348int 1349insmntque(struct vnode *vp, struct mount *mp) 1350{ 1351 1352 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1353} 1354 1355/* 1356 * Flush out and invalidate all buffers associated with a bufobj 1357 * Called with the underlying object locked. 1358 */ 1359int 1360bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1361{ 1362 int error; 1363 1364 BO_LOCK(bo); 1365 if (flags & V_SAVE) { 1366 error = bufobj_wwait(bo, slpflag, slptimeo); 1367 if (error) { 1368 BO_UNLOCK(bo); 1369 return (error); 1370 } 1371 if (bo->bo_dirty.bv_cnt > 0) { 1372 BO_UNLOCK(bo); 1373 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1374 return (error); 1375 /* 1376 * XXX We could save a lock/unlock if this was only 1377 * enabled under INVARIANTS 1378 */ 1379 BO_LOCK(bo); 1380 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1381 panic("vinvalbuf: dirty bufs"); 1382 } 1383 } 1384 /* 1385 * If you alter this loop please notice that interlock is dropped and 1386 * reacquired in flushbuflist. Special care is needed to ensure that 1387 * no race conditions occur from this. 1388 */ 1389 do { 1390 error = flushbuflist(&bo->bo_clean, 1391 flags, bo, slpflag, slptimeo); 1392 if (error == 0 && !(flags & V_CLEANONLY)) 1393 error = flushbuflist(&bo->bo_dirty, 1394 flags, bo, slpflag, slptimeo); 1395 if (error != 0 && error != EAGAIN) { 1396 BO_UNLOCK(bo); 1397 return (error); 1398 } 1399 } while (error != 0); 1400 1401 /* 1402 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1403 * have write I/O in-progress but if there is a VM object then the 1404 * VM object can also have read-I/O in-progress. 1405 */ 1406 do { 1407 bufobj_wwait(bo, 0, 0); 1408 BO_UNLOCK(bo); 1409 if (bo->bo_object != NULL) { 1410 VM_OBJECT_WLOCK(bo->bo_object); 1411 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1412 VM_OBJECT_WUNLOCK(bo->bo_object); 1413 } 1414 BO_LOCK(bo); 1415 } while (bo->bo_numoutput > 0); 1416 BO_UNLOCK(bo); 1417 1418 /* 1419 * Destroy the copy in the VM cache, too. 1420 */ 1421 if (bo->bo_object != NULL && 1422 (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) { 1423 VM_OBJECT_WLOCK(bo->bo_object); 1424 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1425 OBJPR_CLEANONLY : 0); 1426 VM_OBJECT_WUNLOCK(bo->bo_object); 1427 } 1428 1429#ifdef INVARIANTS 1430 BO_LOCK(bo); 1431 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 && 1432 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 1433 panic("vinvalbuf: flush failed"); 1434 BO_UNLOCK(bo); 1435#endif 1436 return (0); 1437} 1438 1439/* 1440 * Flush out and invalidate all buffers associated with a vnode. 1441 * Called with the underlying object locked. 1442 */ 1443int 1444vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1445{ 1446 1447 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1448 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1449 if (vp->v_object != NULL && vp->v_object->handle != vp) 1450 return (0); 1451 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1452} 1453 1454/* 1455 * Flush out buffers on the specified list. 1456 * 1457 */ 1458static int 1459flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1460 int slptimeo) 1461{ 1462 struct buf *bp, *nbp; 1463 int retval, error; 1464 daddr_t lblkno; 1465 b_xflags_t xflags; 1466 1467 ASSERT_BO_WLOCKED(bo); 1468 1469 retval = 0; 1470 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1471 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1472 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1473 continue; 1474 } 1475 lblkno = 0; 1476 xflags = 0; 1477 if (nbp != NULL) { 1478 lblkno = nbp->b_lblkno; 1479 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1480 } 1481 retval = EAGAIN; 1482 error = BUF_TIMELOCK(bp, 1483 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1484 "flushbuf", slpflag, slptimeo); 1485 if (error) { 1486 BO_LOCK(bo); 1487 return (error != ENOLCK ? error : EAGAIN); 1488 } 1489 KASSERT(bp->b_bufobj == bo, 1490 ("bp %p wrong b_bufobj %p should be %p", 1491 bp, bp->b_bufobj, bo)); 1492 if (bp->b_bufobj != bo) { /* XXX: necessary ? */ 1493 BUF_UNLOCK(bp); 1494 BO_LOCK(bo); 1495 return (EAGAIN); 1496 } 1497 /* 1498 * XXX Since there are no node locks for NFS, I 1499 * believe there is a slight chance that a delayed 1500 * write will occur while sleeping just above, so 1501 * check for it. 1502 */ 1503 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1504 (flags & V_SAVE)) { 1505 bremfree(bp); 1506 bp->b_flags |= B_ASYNC; 1507 bwrite(bp); 1508 BO_LOCK(bo); 1509 return (EAGAIN); /* XXX: why not loop ? */ 1510 } 1511 bremfree(bp); 1512 bp->b_flags |= (B_INVAL | B_RELBUF); 1513 bp->b_flags &= ~B_ASYNC; 1514 brelse(bp); 1515 BO_LOCK(bo); 1516 if (nbp != NULL && 1517 (nbp->b_bufobj != bo || 1518 nbp->b_lblkno != lblkno || 1519 (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) != xflags)) 1520 break; /* nbp invalid */ 1521 } 1522 return (retval); 1523} 1524 1525/* 1526 * Truncate a file's buffer and pages to a specified length. This 1527 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1528 * sync activity. 1529 */ 1530int 1531vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) 1532{ 1533 struct buf *bp, *nbp; 1534 int anyfreed; 1535 int trunclbn; 1536 struct bufobj *bo; 1537 1538 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1539 vp, cred, blksize, (uintmax_t)length); 1540 1541 /* 1542 * Round up to the *next* lbn. 1543 */ 1544 trunclbn = (length + blksize - 1) / blksize; 1545 1546 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1547restart: 1548 bo = &vp->v_bufobj; 1549 BO_LOCK(bo); 1550 anyfreed = 1; 1551 for (;anyfreed;) { 1552 anyfreed = 0; 1553 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1554 if (bp->b_lblkno < trunclbn) 1555 continue; 1556 if (BUF_LOCK(bp, 1557 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1558 BO_LOCKPTR(bo)) == ENOLCK) 1559 goto restart; 1560 1561 bremfree(bp); 1562 bp->b_flags |= (B_INVAL | B_RELBUF); 1563 bp->b_flags &= ~B_ASYNC; 1564 brelse(bp); 1565 anyfreed = 1; 1566 1567 BO_LOCK(bo); 1568 if (nbp != NULL && 1569 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1570 (nbp->b_vp != vp) || 1571 (nbp->b_flags & B_DELWRI))) { 1572 BO_UNLOCK(bo); 1573 goto restart; 1574 } 1575 } 1576 1577 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1578 if (bp->b_lblkno < trunclbn) 1579 continue; 1580 if (BUF_LOCK(bp, 1581 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1582 BO_LOCKPTR(bo)) == ENOLCK) 1583 goto restart; 1584 bremfree(bp); 1585 bp->b_flags |= (B_INVAL | B_RELBUF); 1586 bp->b_flags &= ~B_ASYNC; 1587 brelse(bp); 1588 anyfreed = 1; 1589 1590 BO_LOCK(bo); 1591 if (nbp != NULL && 1592 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1593 (nbp->b_vp != vp) || 1594 (nbp->b_flags & B_DELWRI) == 0)) { 1595 BO_UNLOCK(bo); 1596 goto restart; 1597 } 1598 } 1599 } 1600 1601 if (length > 0) { 1602restartsync: 1603 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1604 if (bp->b_lblkno > 0) 1605 continue; 1606 /* 1607 * Since we hold the vnode lock this should only 1608 * fail if we're racing with the buf daemon. 1609 */ 1610 if (BUF_LOCK(bp, 1611 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1612 BO_LOCKPTR(bo)) == ENOLCK) { 1613 goto restart; 1614 } 1615 VNASSERT((bp->b_flags & B_DELWRI), vp, 1616 ("buf(%p) on dirty queue without DELWRI", bp)); 1617 1618 bremfree(bp); 1619 bawrite(bp); 1620 BO_LOCK(bo); 1621 goto restartsync; 1622 } 1623 } 1624 1625 bufobj_wwait(bo, 0, 0); 1626 BO_UNLOCK(bo); 1627 vnode_pager_setsize(vp, length); 1628 1629 return (0); 1630} 1631 1632static void 1633buf_vlist_remove(struct buf *bp) 1634{ 1635 struct bufv *bv; 1636 1637 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1638 ASSERT_BO_WLOCKED(bp->b_bufobj); 1639 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1640 (BX_VNDIRTY|BX_VNCLEAN), 1641 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1642 if (bp->b_xflags & BX_VNDIRTY) 1643 bv = &bp->b_bufobj->bo_dirty; 1644 else 1645 bv = &bp->b_bufobj->bo_clean; 1646 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 1647 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1648 bv->bv_cnt--; 1649 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1650} 1651 1652/* 1653 * Add the buffer to the sorted clean or dirty block list. 1654 * 1655 * NOTE: xflags is passed as a constant, optimizing this inline function! 1656 */ 1657static void 1658buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1659{ 1660 struct bufv *bv; 1661 struct buf *n; 1662 int error; 1663 1664 ASSERT_BO_WLOCKED(bo); 1665 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 1666 ("dead bo %p", bo)); 1667 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1668 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1669 bp->b_xflags |= xflags; 1670 if (xflags & BX_VNDIRTY) 1671 bv = &bo->bo_dirty; 1672 else 1673 bv = &bo->bo_clean; 1674 1675 /* 1676 * Keep the list ordered. Optimize empty list insertion. Assume 1677 * we tend to grow at the tail so lookup_le should usually be cheaper 1678 * than _ge. 1679 */ 1680 if (bv->bv_cnt == 0 || 1681 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 1682 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1683 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 1684 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 1685 else 1686 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 1687 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 1688 if (error) 1689 panic("buf_vlist_add: Preallocated nodes insufficient."); 1690 bv->bv_cnt++; 1691} 1692 1693/* 1694 * Lookup a buffer using the splay tree. Note that we specifically avoid 1695 * shadow buffers used in background bitmap writes. 1696 * 1697 * This code isn't quite efficient as it could be because we are maintaining 1698 * two sorted lists and do not know which list the block resides in. 1699 * 1700 * During a "make buildworld" the desired buffer is found at one of 1701 * the roots more than 60% of the time. Thus, checking both roots 1702 * before performing either splay eliminates unnecessary splays on the 1703 * first tree splayed. 1704 */ 1705struct buf * 1706gbincore(struct bufobj *bo, daddr_t lblkno) 1707{ 1708 struct buf *bp; 1709 1710 ASSERT_BO_LOCKED(bo); 1711 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 1712 if (bp != NULL) 1713 return (bp); 1714 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 1715} 1716 1717/* 1718 * Associate a buffer with a vnode. 1719 */ 1720void 1721bgetvp(struct vnode *vp, struct buf *bp) 1722{ 1723 struct bufobj *bo; 1724 1725 bo = &vp->v_bufobj; 1726 ASSERT_BO_WLOCKED(bo); 1727 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 1728 1729 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 1730 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 1731 ("bgetvp: bp already attached! %p", bp)); 1732 1733 vhold(vp); 1734 bp->b_vp = vp; 1735 bp->b_bufobj = bo; 1736 /* 1737 * Insert onto list for new vnode. 1738 */ 1739 buf_vlist_add(bp, bo, BX_VNCLEAN); 1740} 1741 1742/* 1743 * Disassociate a buffer from a vnode. 1744 */ 1745void 1746brelvp(struct buf *bp) 1747{ 1748 struct bufobj *bo; 1749 struct vnode *vp; 1750 1751 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1752 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1753 1754 /* 1755 * Delete from old vnode list, if on one. 1756 */ 1757 vp = bp->b_vp; /* XXX */ 1758 bo = bp->b_bufobj; 1759 BO_LOCK(bo); 1760 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1761 buf_vlist_remove(bp); 1762 else 1763 panic("brelvp: Buffer %p not on queue.", bp); 1764 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1765 bo->bo_flag &= ~BO_ONWORKLST; 1766 mtx_lock(&sync_mtx); 1767 LIST_REMOVE(bo, bo_synclist); 1768 syncer_worklist_len--; 1769 mtx_unlock(&sync_mtx); 1770 } 1771 bp->b_vp = NULL; 1772 bp->b_bufobj = NULL; 1773 BO_UNLOCK(bo); 1774 vdrop(vp); 1775} 1776 1777/* 1778 * Add an item to the syncer work queue. 1779 */ 1780static void 1781vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 1782{ 1783 int slot; 1784 1785 ASSERT_BO_WLOCKED(bo); 1786 1787 mtx_lock(&sync_mtx); 1788 if (bo->bo_flag & BO_ONWORKLST) 1789 LIST_REMOVE(bo, bo_synclist); 1790 else { 1791 bo->bo_flag |= BO_ONWORKLST; 1792 syncer_worklist_len++; 1793 } 1794 1795 if (delay > syncer_maxdelay - 2) 1796 delay = syncer_maxdelay - 2; 1797 slot = (syncer_delayno + delay) & syncer_mask; 1798 1799 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 1800 mtx_unlock(&sync_mtx); 1801} 1802 1803static int 1804sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 1805{ 1806 int error, len; 1807 1808 mtx_lock(&sync_mtx); 1809 len = syncer_worklist_len - sync_vnode_count; 1810 mtx_unlock(&sync_mtx); 1811 error = SYSCTL_OUT(req, &len, sizeof(len)); 1812 return (error); 1813} 1814 1815SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 1816 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 1817 1818static struct proc *updateproc; 1819static void sched_sync(void); 1820static struct kproc_desc up_kp = { 1821 "syncer", 1822 sched_sync, 1823 &updateproc 1824}; 1825SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 1826 1827static int 1828sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 1829{ 1830 struct vnode *vp; 1831 struct mount *mp; 1832 1833 *bo = LIST_FIRST(slp); 1834 if (*bo == NULL) 1835 return (0); 1836 vp = (*bo)->__bo_vnode; /* XXX */ 1837 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 1838 return (1); 1839 /* 1840 * We use vhold in case the vnode does not 1841 * successfully sync. vhold prevents the vnode from 1842 * going away when we unlock the sync_mtx so that 1843 * we can acquire the vnode interlock. 1844 */ 1845 vholdl(vp); 1846 mtx_unlock(&sync_mtx); 1847 VI_UNLOCK(vp); 1848 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1849 vdrop(vp); 1850 mtx_lock(&sync_mtx); 1851 return (*bo == LIST_FIRST(slp)); 1852 } 1853 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1854 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1855 VOP_UNLOCK(vp, 0); 1856 vn_finished_write(mp); 1857 BO_LOCK(*bo); 1858 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 1859 /* 1860 * Put us back on the worklist. The worklist 1861 * routine will remove us from our current 1862 * position and then add us back in at a later 1863 * position. 1864 */ 1865 vn_syncer_add_to_worklist(*bo, syncdelay); 1866 } 1867 BO_UNLOCK(*bo); 1868 vdrop(vp); 1869 mtx_lock(&sync_mtx); 1870 return (0); 1871} 1872 1873static int first_printf = 1; 1874 1875/* 1876 * System filesystem synchronizer daemon. 1877 */ 1878static void 1879sched_sync(void) 1880{ 1881 struct synclist *next, *slp; 1882 struct bufobj *bo; 1883 long starttime; 1884 struct thread *td = curthread; 1885 int last_work_seen; 1886 int net_worklist_len; 1887 int syncer_final_iter; 1888 int error; 1889 1890 last_work_seen = 0; 1891 syncer_final_iter = 0; 1892 syncer_state = SYNCER_RUNNING; 1893 starttime = time_uptime; 1894 td->td_pflags |= TDP_NORUNNINGBUF; 1895 1896 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 1897 SHUTDOWN_PRI_LAST); 1898 1899 mtx_lock(&sync_mtx); 1900 for (;;) { 1901 if (syncer_state == SYNCER_FINAL_DELAY && 1902 syncer_final_iter == 0) { 1903 mtx_unlock(&sync_mtx); 1904 kproc_suspend_check(td->td_proc); 1905 mtx_lock(&sync_mtx); 1906 } 1907 net_worklist_len = syncer_worklist_len - sync_vnode_count; 1908 if (syncer_state != SYNCER_RUNNING && 1909 starttime != time_uptime) { 1910 if (first_printf) { 1911 printf("\nSyncing disks, vnodes remaining..."); 1912 first_printf = 0; 1913 } 1914 printf("%d ", net_worklist_len); 1915 } 1916 starttime = time_uptime; 1917 1918 /* 1919 * Push files whose dirty time has expired. Be careful 1920 * of interrupt race on slp queue. 1921 * 1922 * Skip over empty worklist slots when shutting down. 1923 */ 1924 do { 1925 slp = &syncer_workitem_pending[syncer_delayno]; 1926 syncer_delayno += 1; 1927 if (syncer_delayno == syncer_maxdelay) 1928 syncer_delayno = 0; 1929 next = &syncer_workitem_pending[syncer_delayno]; 1930 /* 1931 * If the worklist has wrapped since the 1932 * it was emptied of all but syncer vnodes, 1933 * switch to the FINAL_DELAY state and run 1934 * for one more second. 1935 */ 1936 if (syncer_state == SYNCER_SHUTTING_DOWN && 1937 net_worklist_len == 0 && 1938 last_work_seen == syncer_delayno) { 1939 syncer_state = SYNCER_FINAL_DELAY; 1940 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 1941 } 1942 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 1943 syncer_worklist_len > 0); 1944 1945 /* 1946 * Keep track of the last time there was anything 1947 * on the worklist other than syncer vnodes. 1948 * Return to the SHUTTING_DOWN state if any 1949 * new work appears. 1950 */ 1951 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 1952 last_work_seen = syncer_delayno; 1953 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 1954 syncer_state = SYNCER_SHUTTING_DOWN; 1955 while (!LIST_EMPTY(slp)) { 1956 error = sync_vnode(slp, &bo, td); 1957 if (error == 1) { 1958 LIST_REMOVE(bo, bo_synclist); 1959 LIST_INSERT_HEAD(next, bo, bo_synclist); 1960 continue; 1961 } 1962 1963 if (first_printf == 0) 1964 wdog_kern_pat(WD_LASTVAL); 1965 1966 } 1967 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 1968 syncer_final_iter--; 1969 /* 1970 * The variable rushjob allows the kernel to speed up the 1971 * processing of the filesystem syncer process. A rushjob 1972 * value of N tells the filesystem syncer to process the next 1973 * N seconds worth of work on its queue ASAP. Currently rushjob 1974 * is used by the soft update code to speed up the filesystem 1975 * syncer process when the incore state is getting so far 1976 * ahead of the disk that the kernel memory pool is being 1977 * threatened with exhaustion. 1978 */ 1979 if (rushjob > 0) { 1980 rushjob -= 1; 1981 continue; 1982 } 1983 /* 1984 * Just sleep for a short period of time between 1985 * iterations when shutting down to allow some I/O 1986 * to happen. 1987 * 1988 * If it has taken us less than a second to process the 1989 * current work, then wait. Otherwise start right over 1990 * again. We can still lose time if any single round 1991 * takes more than two seconds, but it does not really 1992 * matter as we are just trying to generally pace the 1993 * filesystem activity. 1994 */ 1995 if (syncer_state != SYNCER_RUNNING || 1996 time_uptime == starttime) { 1997 thread_lock(td); 1998 sched_prio(td, PPAUSE); 1999 thread_unlock(td); 2000 } 2001 if (syncer_state != SYNCER_RUNNING) 2002 cv_timedwait(&sync_wakeup, &sync_mtx, 2003 hz / SYNCER_SHUTDOWN_SPEEDUP); 2004 else if (time_uptime == starttime) 2005 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2006 } 2007} 2008 2009/* 2010 * Request the syncer daemon to speed up its work. 2011 * We never push it to speed up more than half of its 2012 * normal turn time, otherwise it could take over the cpu. 2013 */ 2014int 2015speedup_syncer(void) 2016{ 2017 int ret = 0; 2018 2019 mtx_lock(&sync_mtx); 2020 if (rushjob < syncdelay / 2) { 2021 rushjob += 1; 2022 stat_rush_requests += 1; 2023 ret = 1; 2024 } 2025 mtx_unlock(&sync_mtx); 2026 cv_broadcast(&sync_wakeup); 2027 return (ret); 2028} 2029 2030/* 2031 * Tell the syncer to speed up its work and run though its work 2032 * list several times, then tell it to shut down. 2033 */ 2034static void 2035syncer_shutdown(void *arg, int howto) 2036{ 2037 2038 if (howto & RB_NOSYNC) 2039 return; 2040 mtx_lock(&sync_mtx); 2041 syncer_state = SYNCER_SHUTTING_DOWN; 2042 rushjob = 0; 2043 mtx_unlock(&sync_mtx); 2044 cv_broadcast(&sync_wakeup); 2045 kproc_shutdown(arg, howto); 2046} 2047 2048void 2049syncer_suspend(void) 2050{ 2051 2052 syncer_shutdown(updateproc, 0); 2053} 2054 2055void 2056syncer_resume(void) 2057{ 2058 2059 mtx_lock(&sync_mtx); 2060 first_printf = 1; 2061 syncer_state = SYNCER_RUNNING; 2062 mtx_unlock(&sync_mtx); 2063 cv_broadcast(&sync_wakeup); 2064 kproc_resume(updateproc); 2065} 2066 2067/* 2068 * Reassign a buffer from one vnode to another. 2069 * Used to assign file specific control information 2070 * (indirect blocks) to the vnode to which they belong. 2071 */ 2072void 2073reassignbuf(struct buf *bp) 2074{ 2075 struct vnode *vp; 2076 struct bufobj *bo; 2077 int delay; 2078#ifdef INVARIANTS 2079 struct bufv *bv; 2080#endif 2081 2082 vp = bp->b_vp; 2083 bo = bp->b_bufobj; 2084 ++reassignbufcalls; 2085 2086 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2087 bp, bp->b_vp, bp->b_flags); 2088 /* 2089 * B_PAGING flagged buffers cannot be reassigned because their vp 2090 * is not fully linked in. 2091 */ 2092 if (bp->b_flags & B_PAGING) 2093 panic("cannot reassign paging buffer"); 2094 2095 /* 2096 * Delete from old vnode list, if on one. 2097 */ 2098 BO_LOCK(bo); 2099 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2100 buf_vlist_remove(bp); 2101 else 2102 panic("reassignbuf: Buffer %p not on queue.", bp); 2103 /* 2104 * If dirty, put on list of dirty buffers; otherwise insert onto list 2105 * of clean buffers. 2106 */ 2107 if (bp->b_flags & B_DELWRI) { 2108 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2109 switch (vp->v_type) { 2110 case VDIR: 2111 delay = dirdelay; 2112 break; 2113 case VCHR: 2114 delay = metadelay; 2115 break; 2116 default: 2117 delay = filedelay; 2118 } 2119 vn_syncer_add_to_worklist(bo, delay); 2120 } 2121 buf_vlist_add(bp, bo, BX_VNDIRTY); 2122 } else { 2123 buf_vlist_add(bp, bo, BX_VNCLEAN); 2124 2125 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2126 mtx_lock(&sync_mtx); 2127 LIST_REMOVE(bo, bo_synclist); 2128 syncer_worklist_len--; 2129 mtx_unlock(&sync_mtx); 2130 bo->bo_flag &= ~BO_ONWORKLST; 2131 } 2132 } 2133#ifdef INVARIANTS 2134 bv = &bo->bo_clean; 2135 bp = TAILQ_FIRST(&bv->bv_hd); 2136 KASSERT(bp == NULL || bp->b_bufobj == bo, 2137 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2138 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2139 KASSERT(bp == NULL || bp->b_bufobj == bo, 2140 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2141 bv = &bo->bo_dirty; 2142 bp = TAILQ_FIRST(&bv->bv_hd); 2143 KASSERT(bp == NULL || bp->b_bufobj == bo, 2144 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2145 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2146 KASSERT(bp == NULL || bp->b_bufobj == bo, 2147 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2148#endif 2149 BO_UNLOCK(bo); 2150} 2151 2152/* 2153 * Increment the use and hold counts on the vnode, taking care to reference 2154 * the driver's usecount if this is a chardev. The vholdl() will remove 2155 * the vnode from the free list if it is presently free. Requires the 2156 * vnode interlock and returns with it held. 2157 */ 2158static void 2159v_incr_usecount(struct vnode *vp) 2160{ 2161 2162 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2163 vholdl(vp); 2164 vp->v_usecount++; 2165 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2166 dev_lock(); 2167 vp->v_rdev->si_usecount++; 2168 dev_unlock(); 2169 } 2170} 2171 2172/* 2173 * Turn a holdcnt into a use+holdcnt such that only one call to 2174 * v_decr_usecount is needed. 2175 */ 2176static void 2177v_upgrade_usecount(struct vnode *vp) 2178{ 2179 2180 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2181 vp->v_usecount++; 2182 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2183 dev_lock(); 2184 vp->v_rdev->si_usecount++; 2185 dev_unlock(); 2186 } 2187} 2188 2189/* 2190 * Decrement the vnode use and hold count along with the driver's usecount 2191 * if this is a chardev. The vdropl() below releases the vnode interlock 2192 * as it may free the vnode. 2193 */ 2194static void 2195v_decr_usecount(struct vnode *vp) 2196{ 2197 2198 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2199 VNASSERT(vp->v_usecount > 0, vp, 2200 ("v_decr_usecount: negative usecount")); 2201 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2202 vp->v_usecount--; 2203 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2204 dev_lock(); 2205 vp->v_rdev->si_usecount--; 2206 dev_unlock(); 2207 } 2208 vdropl(vp); 2209} 2210 2211/* 2212 * Decrement only the use count and driver use count. This is intended to 2213 * be paired with a follow on vdropl() to release the remaining hold count. 2214 * In this way we may vgone() a vnode with a 0 usecount without risk of 2215 * having it end up on a free list because the hold count is kept above 0. 2216 */ 2217static void 2218v_decr_useonly(struct vnode *vp) 2219{ 2220 2221 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2222 VNASSERT(vp->v_usecount > 0, vp, 2223 ("v_decr_useonly: negative usecount")); 2224 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2225 vp->v_usecount--; 2226 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2227 dev_lock(); 2228 vp->v_rdev->si_usecount--; 2229 dev_unlock(); 2230 } 2231} 2232 2233/* 2234 * Grab a particular vnode from the free list, increment its 2235 * reference count and lock it. VI_DOOMED is set if the vnode 2236 * is being destroyed. Only callers who specify LK_RETRY will 2237 * see doomed vnodes. If inactive processing was delayed in 2238 * vput try to do it here. 2239 */ 2240int 2241vget(struct vnode *vp, int flags, struct thread *td) 2242{ 2243 int error; 2244 2245 error = 0; 2246 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2247 ("vget: invalid lock operation")); 2248 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2249 2250 if ((flags & LK_INTERLOCK) == 0) 2251 VI_LOCK(vp); 2252 vholdl(vp); 2253 if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) { 2254 vdrop(vp); 2255 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2256 vp); 2257 return (error); 2258 } 2259 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2260 panic("vget: vn_lock failed to return ENOENT\n"); 2261 VI_LOCK(vp); 2262 /* Upgrade our holdcnt to a usecount. */ 2263 v_upgrade_usecount(vp); 2264 /* 2265 * We don't guarantee that any particular close will 2266 * trigger inactive processing so just make a best effort 2267 * here at preventing a reference to a removed file. If 2268 * we don't succeed no harm is done. 2269 */ 2270 if (vp->v_iflag & VI_OWEINACT) { 2271 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2272 (flags & LK_NOWAIT) == 0) 2273 vinactive(vp, td); 2274 vp->v_iflag &= ~VI_OWEINACT; 2275 } 2276 VI_UNLOCK(vp); 2277 return (0); 2278} 2279 2280/* 2281 * Increase the reference count of a vnode. 2282 */ 2283void 2284vref(struct vnode *vp) 2285{ 2286 2287 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2288 VI_LOCK(vp); 2289 v_incr_usecount(vp); 2290 VI_UNLOCK(vp); 2291} 2292 2293/* 2294 * Return reference count of a vnode. 2295 * 2296 * The results of this call are only guaranteed when some mechanism other 2297 * than the VI lock is used to stop other processes from gaining references 2298 * to the vnode. This may be the case if the caller holds the only reference. 2299 * This is also useful when stale data is acceptable as race conditions may 2300 * be accounted for by some other means. 2301 */ 2302int 2303vrefcnt(struct vnode *vp) 2304{ 2305 int usecnt; 2306 2307 VI_LOCK(vp); 2308 usecnt = vp->v_usecount; 2309 VI_UNLOCK(vp); 2310 2311 return (usecnt); 2312} 2313 2314#define VPUTX_VRELE 1 2315#define VPUTX_VPUT 2 2316#define VPUTX_VUNREF 3 2317 2318static void 2319vputx(struct vnode *vp, int func) 2320{ 2321 int error; 2322 2323 KASSERT(vp != NULL, ("vputx: null vp")); 2324 if (func == VPUTX_VUNREF) 2325 ASSERT_VOP_LOCKED(vp, "vunref"); 2326 else if (func == VPUTX_VPUT) 2327 ASSERT_VOP_LOCKED(vp, "vput"); 2328 else 2329 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2330 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2331 VI_LOCK(vp); 2332 2333 /* Skip this v_writecount check if we're going to panic below. */ 2334 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2335 ("vputx: missed vn_close")); 2336 error = 0; 2337 2338 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2339 vp->v_usecount == 1)) { 2340 if (func == VPUTX_VPUT) 2341 VOP_UNLOCK(vp, 0); 2342 v_decr_usecount(vp); 2343 return; 2344 } 2345 2346 if (vp->v_usecount != 1) { 2347 vprint("vputx: negative ref count", vp); 2348 panic("vputx: negative ref cnt"); 2349 } 2350 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2351 /* 2352 * We want to hold the vnode until the inactive finishes to 2353 * prevent vgone() races. We drop the use count here and the 2354 * hold count below when we're done. 2355 */ 2356 v_decr_useonly(vp); 2357 /* 2358 * We must call VOP_INACTIVE with the node locked. Mark 2359 * as VI_DOINGINACT to avoid recursion. 2360 */ 2361 vp->v_iflag |= VI_OWEINACT; 2362 switch (func) { 2363 case VPUTX_VRELE: 2364 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2365 VI_LOCK(vp); 2366 break; 2367 case VPUTX_VPUT: 2368 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2369 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2370 LK_NOWAIT); 2371 VI_LOCK(vp); 2372 } 2373 break; 2374 case VPUTX_VUNREF: 2375 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2376 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2377 VI_LOCK(vp); 2378 } 2379 break; 2380 } 2381 if (vp->v_usecount > 0) 2382 vp->v_iflag &= ~VI_OWEINACT; 2383 if (error == 0) { 2384 if (vp->v_iflag & VI_OWEINACT) 2385 vinactive(vp, curthread); 2386 if (func != VPUTX_VUNREF) 2387 VOP_UNLOCK(vp, 0); 2388 } 2389 vdropl(vp); 2390} 2391 2392/* 2393 * Vnode put/release. 2394 * If count drops to zero, call inactive routine and return to freelist. 2395 */ 2396void 2397vrele(struct vnode *vp) 2398{ 2399 2400 vputx(vp, VPUTX_VRELE); 2401} 2402 2403/* 2404 * Release an already locked vnode. This give the same effects as 2405 * unlock+vrele(), but takes less time and avoids releasing and 2406 * re-aquiring the lock (as vrele() acquires the lock internally.) 2407 */ 2408void 2409vput(struct vnode *vp) 2410{ 2411 2412 vputx(vp, VPUTX_VPUT); 2413} 2414 2415/* 2416 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2417 */ 2418void 2419vunref(struct vnode *vp) 2420{ 2421 2422 vputx(vp, VPUTX_VUNREF); 2423} 2424 2425/* 2426 * Somebody doesn't want the vnode recycled. 2427 */ 2428void 2429vhold(struct vnode *vp) 2430{ 2431 2432 VI_LOCK(vp); 2433 vholdl(vp); 2434 VI_UNLOCK(vp); 2435} 2436 2437/* 2438 * Increase the hold count and activate if this is the first reference. 2439 */ 2440void 2441vholdl(struct vnode *vp) 2442{ 2443 struct mount *mp; 2444 2445 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2446#ifdef INVARIANTS 2447 /* getnewvnode() calls v_incr_usecount() without holding interlock. */ 2448 if (vp->v_type != VNON || vp->v_data != NULL) 2449 ASSERT_VI_LOCKED(vp, "vholdl"); 2450#endif 2451 vp->v_holdcnt++; 2452 if ((vp->v_iflag & VI_FREE) == 0) 2453 return; 2454 VNASSERT(vp->v_holdcnt == 1, vp, ("vholdl: wrong hold count")); 2455 VNASSERT(vp->v_op != NULL, vp, ("vholdl: vnode already reclaimed.")); 2456 /* 2457 * Remove a vnode from the free list, mark it as in use, 2458 * and put it on the active list. 2459 */ 2460 mtx_lock(&vnode_free_list_mtx); 2461 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 2462 freevnodes--; 2463 vp->v_iflag &= ~VI_FREE; 2464 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 2465 ("Activating already active vnode")); 2466 vp->v_iflag |= VI_ACTIVE; 2467 mp = vp->v_mount; 2468 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 2469 mp->mnt_activevnodelistsize++; 2470 mtx_unlock(&vnode_free_list_mtx); 2471} 2472 2473/* 2474 * Note that there is one less who cares about this vnode. 2475 * vdrop() is the opposite of vhold(). 2476 */ 2477void 2478vdrop(struct vnode *vp) 2479{ 2480 2481 VI_LOCK(vp); 2482 vdropl(vp); 2483} 2484 2485/* 2486 * Drop the hold count of the vnode. If this is the last reference to 2487 * the vnode we place it on the free list unless it has been vgone'd 2488 * (marked VI_DOOMED) in which case we will free it. 2489 * 2490 * Because the vnode vm object keeps a hold reference on the vnode if 2491 * there is at least one resident non-cached page, the vnode cannot 2492 * leave the active list without the page cleanup done. 2493 */ 2494void 2495vdropl(struct vnode *vp) 2496{ 2497 struct bufobj *bo; 2498 struct mount *mp; 2499 int active; 2500 2501 ASSERT_VI_LOCKED(vp, "vdropl"); 2502 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2503 if (vp->v_holdcnt <= 0) 2504 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2505 vp->v_holdcnt--; 2506 if (vp->v_holdcnt > 0) { 2507 VI_UNLOCK(vp); 2508 return; 2509 } 2510 if ((vp->v_iflag & VI_DOOMED) == 0) { 2511 /* 2512 * Mark a vnode as free: remove it from its active list 2513 * and put it up for recycling on the freelist. 2514 */ 2515 VNASSERT(vp->v_op != NULL, vp, 2516 ("vdropl: vnode already reclaimed.")); 2517 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2518 ("vnode already free")); 2519 VNASSERT(vp->v_holdcnt == 0, vp, 2520 ("vdropl: freeing when we shouldn't")); 2521 active = vp->v_iflag & VI_ACTIVE; 2522 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2523 vp->v_iflag &= ~VI_ACTIVE; 2524 mp = vp->v_mount; 2525 mtx_lock(&vnode_free_list_mtx); 2526 if (active) { 2527 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, 2528 v_actfreelist); 2529 mp->mnt_activevnodelistsize--; 2530 } 2531 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 2532 v_actfreelist); 2533 freevnodes++; 2534 vp->v_iflag |= VI_FREE; 2535 mtx_unlock(&vnode_free_list_mtx); 2536 } else { 2537 atomic_add_long(&free_owe_inact, 1); 2538 } 2539 VI_UNLOCK(vp); 2540 return; 2541 } 2542 /* 2543 * The vnode has been marked for destruction, so free it. 2544 * 2545 * The vnode will be returned to the zone where it will 2546 * normally remain until it is needed for another vnode. We 2547 * need to cleanup (or verify that the cleanup has already 2548 * been done) any residual data left from its current use 2549 * so as not to contaminate the freshly allocated vnode. 2550 */ 2551 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2552 atomic_subtract_long(&numvnodes, 1); 2553 bo = &vp->v_bufobj; 2554 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2555 ("cleaned vnode still on the free list.")); 2556 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2557 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 2558 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2559 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2560 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2561 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2562 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2563 ("clean blk trie not empty")); 2564 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2565 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2566 ("dirty blk trie not empty")); 2567 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 2568 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 2569 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 2570 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2571 ("Dangling rangelock waiters")); 2572 VI_UNLOCK(vp); 2573#ifdef MAC 2574 mac_vnode_destroy(vp); 2575#endif 2576 if (vp->v_pollinfo != NULL) { 2577 destroy_vpollinfo(vp->v_pollinfo); 2578 vp->v_pollinfo = NULL; 2579 } 2580#ifdef INVARIANTS 2581 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 2582 vp->v_op = NULL; 2583#endif 2584 bzero(&vp->v_un, sizeof(vp->v_un)); 2585 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 2586 vp->v_iflag = 0; 2587 vp->v_vflag = 0; 2588 bo->bo_flag = 0; 2589 uma_zfree(vnode_zone, vp); 2590} 2591 2592/* 2593 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2594 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2595 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2596 * failed lock upgrade. 2597 */ 2598void 2599vinactive(struct vnode *vp, struct thread *td) 2600{ 2601 struct vm_object *obj; 2602 2603 ASSERT_VOP_ELOCKED(vp, "vinactive"); 2604 ASSERT_VI_LOCKED(vp, "vinactive"); 2605 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2606 ("vinactive: recursed on VI_DOINGINACT")); 2607 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2608 vp->v_iflag |= VI_DOINGINACT; 2609 vp->v_iflag &= ~VI_OWEINACT; 2610 VI_UNLOCK(vp); 2611 /* 2612 * Before moving off the active list, we must be sure that any 2613 * modified pages are converted into the vnode's dirty 2614 * buffers, since these will no longer be checked once the 2615 * vnode is on the inactive list. 2616 * 2617 * The write-out of the dirty pages is asynchronous. At the 2618 * point that VOP_INACTIVE() is called, there could still be 2619 * pending I/O and dirty pages in the object. 2620 */ 2621 obj = vp->v_object; 2622 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 2623 VM_OBJECT_WLOCK(obj); 2624 vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC); 2625 VM_OBJECT_WUNLOCK(obj); 2626 } 2627 VOP_INACTIVE(vp, td); 2628 VI_LOCK(vp); 2629 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2630 ("vinactive: lost VI_DOINGINACT")); 2631 vp->v_iflag &= ~VI_DOINGINACT; 2632} 2633 2634/* 2635 * Remove any vnodes in the vnode table belonging to mount point mp. 2636 * 2637 * If FORCECLOSE is not specified, there should not be any active ones, 2638 * return error if any are found (nb: this is a user error, not a 2639 * system error). If FORCECLOSE is specified, detach any active vnodes 2640 * that are found. 2641 * 2642 * If WRITECLOSE is set, only flush out regular file vnodes open for 2643 * writing. 2644 * 2645 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2646 * 2647 * `rootrefs' specifies the base reference count for the root vnode 2648 * of this filesystem. The root vnode is considered busy if its 2649 * v_usecount exceeds this value. On a successful return, vflush(, td) 2650 * will call vrele() on the root vnode exactly rootrefs times. 2651 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2652 * be zero. 2653 */ 2654#ifdef DIAGNOSTIC 2655static int busyprt = 0; /* print out busy vnodes */ 2656SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 2657#endif 2658 2659int 2660vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 2661{ 2662 struct vnode *vp, *mvp, *rootvp = NULL; 2663 struct vattr vattr; 2664 int busy = 0, error; 2665 2666 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 2667 rootrefs, flags); 2668 if (rootrefs > 0) { 2669 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2670 ("vflush: bad args")); 2671 /* 2672 * Get the filesystem root vnode. We can vput() it 2673 * immediately, since with rootrefs > 0, it won't go away. 2674 */ 2675 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 2676 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 2677 __func__, error); 2678 return (error); 2679 } 2680 vput(rootvp); 2681 } 2682loop: 2683 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 2684 vholdl(vp); 2685 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 2686 if (error) { 2687 vdrop(vp); 2688 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2689 goto loop; 2690 } 2691 /* 2692 * Skip over a vnodes marked VV_SYSTEM. 2693 */ 2694 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2695 VOP_UNLOCK(vp, 0); 2696 vdrop(vp); 2697 continue; 2698 } 2699 /* 2700 * If WRITECLOSE is set, flush out unlinked but still open 2701 * files (even if open only for reading) and regular file 2702 * vnodes open for writing. 2703 */ 2704 if (flags & WRITECLOSE) { 2705 if (vp->v_object != NULL) { 2706 VM_OBJECT_WLOCK(vp->v_object); 2707 vm_object_page_clean(vp->v_object, 0, 0, 0); 2708 VM_OBJECT_WUNLOCK(vp->v_object); 2709 } 2710 error = VOP_FSYNC(vp, MNT_WAIT, td); 2711 if (error != 0) { 2712 VOP_UNLOCK(vp, 0); 2713 vdrop(vp); 2714 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2715 return (error); 2716 } 2717 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 2718 VI_LOCK(vp); 2719 2720 if ((vp->v_type == VNON || 2721 (error == 0 && vattr.va_nlink > 0)) && 2722 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2723 VOP_UNLOCK(vp, 0); 2724 vdropl(vp); 2725 continue; 2726 } 2727 } else 2728 VI_LOCK(vp); 2729 /* 2730 * With v_usecount == 0, all we need to do is clear out the 2731 * vnode data structures and we are done. 2732 * 2733 * If FORCECLOSE is set, forcibly close the vnode. 2734 */ 2735 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 2736 vgonel(vp); 2737 } else { 2738 busy++; 2739#ifdef DIAGNOSTIC 2740 if (busyprt) 2741 vprint("vflush: busy vnode", vp); 2742#endif 2743 } 2744 VOP_UNLOCK(vp, 0); 2745 vdropl(vp); 2746 } 2747 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2748 /* 2749 * If just the root vnode is busy, and if its refcount 2750 * is equal to `rootrefs', then go ahead and kill it. 2751 */ 2752 VI_LOCK(rootvp); 2753 KASSERT(busy > 0, ("vflush: not busy")); 2754 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 2755 ("vflush: usecount %d < rootrefs %d", 2756 rootvp->v_usecount, rootrefs)); 2757 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2758 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 2759 vgone(rootvp); 2760 VOP_UNLOCK(rootvp, 0); 2761 busy = 0; 2762 } else 2763 VI_UNLOCK(rootvp); 2764 } 2765 if (busy) { 2766 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 2767 busy); 2768 return (EBUSY); 2769 } 2770 for (; rootrefs > 0; rootrefs--) 2771 vrele(rootvp); 2772 return (0); 2773} 2774 2775/* 2776 * Recycle an unused vnode to the front of the free list. 2777 */ 2778int 2779vrecycle(struct vnode *vp) 2780{ 2781 int recycled; 2782 2783 ASSERT_VOP_ELOCKED(vp, "vrecycle"); 2784 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2785 recycled = 0; 2786 VI_LOCK(vp); 2787 if (vp->v_usecount == 0) { 2788 recycled = 1; 2789 vgonel(vp); 2790 } 2791 VI_UNLOCK(vp); 2792 return (recycled); 2793} 2794 2795/* 2796 * Eliminate all activity associated with a vnode 2797 * in preparation for reuse. 2798 */ 2799void 2800vgone(struct vnode *vp) 2801{ 2802 VI_LOCK(vp); 2803 vgonel(vp); 2804 VI_UNLOCK(vp); 2805} 2806 2807static void 2808notify_lowervp_vfs_dummy(struct mount *mp __unused, 2809 struct vnode *lowervp __unused) 2810{ 2811} 2812 2813/* 2814 * Notify upper mounts about reclaimed or unlinked vnode. 2815 */ 2816void 2817vfs_notify_upper(struct vnode *vp, int event) 2818{ 2819 static struct vfsops vgonel_vfsops = { 2820 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 2821 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 2822 }; 2823 struct mount *mp, *ump, *mmp; 2824 2825 mp = vp->v_mount; 2826 if (mp == NULL) 2827 return; 2828 2829 MNT_ILOCK(mp); 2830 if (TAILQ_EMPTY(&mp->mnt_uppers)) 2831 goto unlock; 2832 MNT_IUNLOCK(mp); 2833 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 2834 mmp->mnt_op = &vgonel_vfsops; 2835 mmp->mnt_kern_flag |= MNTK_MARKER; 2836 MNT_ILOCK(mp); 2837 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 2838 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 2839 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 2840 ump = TAILQ_NEXT(ump, mnt_upper_link); 2841 continue; 2842 } 2843 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 2844 MNT_IUNLOCK(mp); 2845 switch (event) { 2846 case VFS_NOTIFY_UPPER_RECLAIM: 2847 VFS_RECLAIM_LOWERVP(ump, vp); 2848 break; 2849 case VFS_NOTIFY_UPPER_UNLINK: 2850 VFS_UNLINK_LOWERVP(ump, vp); 2851 break; 2852 default: 2853 KASSERT(0, ("invalid event %d", event)); 2854 break; 2855 } 2856 MNT_ILOCK(mp); 2857 ump = TAILQ_NEXT(mmp, mnt_upper_link); 2858 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 2859 } 2860 free(mmp, M_TEMP); 2861 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 2862 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 2863 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 2864 wakeup(&mp->mnt_uppers); 2865 } 2866unlock: 2867 MNT_IUNLOCK(mp); 2868} 2869 2870/* 2871 * vgone, with the vp interlock held. 2872 */ 2873static void 2874vgonel(struct vnode *vp) 2875{ 2876 struct thread *td; 2877 int oweinact; 2878 int active; 2879 struct mount *mp; 2880 2881 ASSERT_VOP_ELOCKED(vp, "vgonel"); 2882 ASSERT_VI_LOCKED(vp, "vgonel"); 2883 VNASSERT(vp->v_holdcnt, vp, 2884 ("vgonel: vp %p has no reference.", vp)); 2885 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2886 td = curthread; 2887 2888 /* 2889 * Don't vgonel if we're already doomed. 2890 */ 2891 if (vp->v_iflag & VI_DOOMED) 2892 return; 2893 vp->v_iflag |= VI_DOOMED; 2894 2895 /* 2896 * Check to see if the vnode is in use. If so, we have to call 2897 * VOP_CLOSE() and VOP_INACTIVE(). 2898 */ 2899 active = vp->v_usecount; 2900 oweinact = (vp->v_iflag & VI_OWEINACT); 2901 VI_UNLOCK(vp); 2902 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 2903 2904 /* 2905 * If purging an active vnode, it must be closed and 2906 * deactivated before being reclaimed. 2907 */ 2908 if (active) 2909 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2910 if (oweinact || active) { 2911 VI_LOCK(vp); 2912 if ((vp->v_iflag & VI_DOINGINACT) == 0) 2913 vinactive(vp, td); 2914 VI_UNLOCK(vp); 2915 } 2916 if (vp->v_type == VSOCK) 2917 vfs_unp_reclaim(vp); 2918 2919 /* 2920 * Clean out any buffers associated with the vnode. 2921 * If the flush fails, just toss the buffers. 2922 */ 2923 mp = NULL; 2924 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 2925 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 2926 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 2927 while (vinvalbuf(vp, 0, 0, 0) != 0) 2928 ; 2929 } 2930 2931 BO_LOCK(&vp->v_bufobj); 2932 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 2933 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 2934 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 2935 vp->v_bufobj.bo_clean.bv_cnt == 0, 2936 ("vp %p bufobj not invalidated", vp));
| 43 44#include "opt_compat.h" 45#include "opt_ddb.h" 46#include "opt_watchdog.h" 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/bio.h> 51#include <sys/buf.h> 52#include <sys/condvar.h> 53#include <sys/conf.h> 54#include <sys/dirent.h> 55#include <sys/event.h> 56#include <sys/eventhandler.h> 57#include <sys/extattr.h> 58#include <sys/file.h> 59#include <sys/fcntl.h> 60#include <sys/jail.h> 61#include <sys/kdb.h> 62#include <sys/kernel.h> 63#include <sys/kthread.h> 64#include <sys/lockf.h> 65#include <sys/malloc.h> 66#include <sys/mount.h> 67#include <sys/namei.h> 68#include <sys/pctrie.h> 69#include <sys/priv.h> 70#include <sys/reboot.h> 71#include <sys/rwlock.h> 72#include <sys/sched.h> 73#include <sys/sleepqueue.h> 74#include <sys/smp.h> 75#include <sys/stat.h> 76#include <sys/sysctl.h> 77#include <sys/syslog.h> 78#include <sys/vmmeter.h> 79#include <sys/vnode.h> 80#include <sys/watchdog.h> 81 82#include <machine/stdarg.h> 83 84#include <security/mac/mac_framework.h> 85 86#include <vm/vm.h> 87#include <vm/vm_object.h> 88#include <vm/vm_extern.h> 89#include <vm/pmap.h> 90#include <vm/vm_map.h> 91#include <vm/vm_page.h> 92#include <vm/vm_kern.h> 93#include <vm/uma.h> 94 95#ifdef DDB 96#include <ddb/ddb.h> 97#endif 98 99static void delmntque(struct vnode *vp); 100static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 101 int slpflag, int slptimeo); 102static void syncer_shutdown(void *arg, int howto); 103static int vtryrecycle(struct vnode *vp); 104static void v_incr_usecount(struct vnode *); 105static void v_decr_usecount(struct vnode *); 106static void v_decr_useonly(struct vnode *); 107static void v_upgrade_usecount(struct vnode *); 108static void vnlru_free(int); 109static void vgonel(struct vnode *); 110static void vfs_knllock(void *arg); 111static void vfs_knlunlock(void *arg); 112static void vfs_knl_assert_locked(void *arg); 113static void vfs_knl_assert_unlocked(void *arg); 114static void destroy_vpollinfo(struct vpollinfo *vi); 115 116/* 117 * Number of vnodes in existence. Increased whenever getnewvnode() 118 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 119 */ 120static unsigned long numvnodes; 121 122SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 123 "Number of vnodes in existence"); 124 125static u_long vnodes_created; 126SYSCTL_ULONG(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 127 0, "Number of vnodes created by getnewvnode"); 128 129/* 130 * Conversion tables for conversion from vnode types to inode formats 131 * and back. 132 */ 133enum vtype iftovt_tab[16] = { 134 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 135 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 136}; 137int vttoif_tab[10] = { 138 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 139 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 140}; 141 142/* 143 * List of vnodes that are ready for recycling. 144 */ 145static TAILQ_HEAD(freelst, vnode) vnode_free_list; 146 147/* 148 * Free vnode target. Free vnodes may simply be files which have been stat'd 149 * but not read. This is somewhat common, and a small cache of such files 150 * should be kept to avoid recreation costs. 151 */ 152static u_long wantfreevnodes; 153SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 154/* Number of vnodes in the free list. */ 155static u_long freevnodes; 156SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, 157 "Number of vnodes in the free list"); 158 159static int vlru_allow_cache_src; 160SYSCTL_INT(_vfs, OID_AUTO, vlru_allow_cache_src, CTLFLAG_RW, 161 &vlru_allow_cache_src, 0, "Allow vlru to reclaim source vnode"); 162 163static u_long recycles_count; 164SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 0, 165 "Number of vnodes recycled to avoid exceding kern.maxvnodes"); 166 167/* 168 * Various variables used for debugging the new implementation of 169 * reassignbuf(). 170 * XXX these are probably of (very) limited utility now. 171 */ 172static int reassignbufcalls; 173SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 174 "Number of calls to reassignbuf"); 175 176static u_long free_owe_inact; 177SYSCTL_ULONG(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 0, 178 "Number of times free vnodes kept on active list due to VFS " 179 "owing inactivation"); 180 181/* 182 * Cache for the mount type id assigned to NFS. This is used for 183 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 184 */ 185int nfs_mount_type = -1; 186 187/* To keep more than one thread at a time from running vfs_getnewfsid */ 188static struct mtx mntid_mtx; 189 190/* 191 * Lock for any access to the following: 192 * vnode_free_list 193 * numvnodes 194 * freevnodes 195 */ 196static struct mtx vnode_free_list_mtx; 197 198/* Publicly exported FS */ 199struct nfs_public nfs_pub; 200 201static uma_zone_t buf_trie_zone; 202 203/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 204static uma_zone_t vnode_zone; 205static uma_zone_t vnodepoll_zone; 206 207/* 208 * The workitem queue. 209 * 210 * It is useful to delay writes of file data and filesystem metadata 211 * for tens of seconds so that quickly created and deleted files need 212 * not waste disk bandwidth being created and removed. To realize this, 213 * we append vnodes to a "workitem" queue. When running with a soft 214 * updates implementation, most pending metadata dependencies should 215 * not wait for more than a few seconds. Thus, mounted on block devices 216 * are delayed only about a half the time that file data is delayed. 217 * Similarly, directory updates are more critical, so are only delayed 218 * about a third the time that file data is delayed. Thus, there are 219 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 220 * one each second (driven off the filesystem syncer process). The 221 * syncer_delayno variable indicates the next queue that is to be processed. 222 * Items that need to be processed soon are placed in this queue: 223 * 224 * syncer_workitem_pending[syncer_delayno] 225 * 226 * A delay of fifteen seconds is done by placing the request fifteen 227 * entries later in the queue: 228 * 229 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 230 * 231 */ 232static int syncer_delayno; 233static long syncer_mask; 234LIST_HEAD(synclist, bufobj); 235static struct synclist *syncer_workitem_pending; 236/* 237 * The sync_mtx protects: 238 * bo->bo_synclist 239 * sync_vnode_count 240 * syncer_delayno 241 * syncer_state 242 * syncer_workitem_pending 243 * syncer_worklist_len 244 * rushjob 245 */ 246static struct mtx sync_mtx; 247static struct cv sync_wakeup; 248 249#define SYNCER_MAXDELAY 32 250static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 251static int syncdelay = 30; /* max time to delay syncing data */ 252static int filedelay = 30; /* time to delay syncing files */ 253SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 254 "Time to delay syncing files (in seconds)"); 255static int dirdelay = 29; /* time to delay syncing directories */ 256SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 257 "Time to delay syncing directories (in seconds)"); 258static int metadelay = 28; /* time to delay syncing metadata */ 259SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 260 "Time to delay syncing metadata (in seconds)"); 261static int rushjob; /* number of slots to run ASAP */ 262static int stat_rush_requests; /* number of times I/O speeded up */ 263SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 264 "Number of times I/O speeded up (rush requests)"); 265 266/* 267 * When shutting down the syncer, run it at four times normal speed. 268 */ 269#define SYNCER_SHUTDOWN_SPEEDUP 4 270static int sync_vnode_count; 271static int syncer_worklist_len; 272static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 273 syncer_state; 274 275/* 276 * Number of vnodes we want to exist at any one time. This is mostly used 277 * to size hash tables in vnode-related code. It is normally not used in 278 * getnewvnode(), as wantfreevnodes is normally nonzero.) 279 * 280 * XXX desiredvnodes is historical cruft and should not exist. 281 */ 282int desiredvnodes; 283 284static int 285sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 286{ 287 int error, old_desiredvnodes; 288 289 old_desiredvnodes = desiredvnodes; 290 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 291 return (error); 292 if (old_desiredvnodes != desiredvnodes) { 293 vfs_hash_changesize(desiredvnodes); 294 cache_changesize(desiredvnodes); 295 } 296 return (0); 297} 298 299SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 300 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 301 sysctl_update_desiredvnodes, "I", "Maximum number of vnodes"); 302SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 303 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)"); 304static int vnlru_nowhere; 305SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 306 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 307 308/* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 309static int vnsz2log; 310 311/* 312 * Support for the bufobj clean & dirty pctrie. 313 */ 314static void * 315buf_trie_alloc(struct pctrie *ptree) 316{ 317 318 return uma_zalloc(buf_trie_zone, M_NOWAIT); 319} 320 321static void 322buf_trie_free(struct pctrie *ptree, void *node) 323{ 324 325 uma_zfree(buf_trie_zone, node); 326} 327PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 328 329/* 330 * Initialize the vnode management data structures. 331 * 332 * Reevaluate the following cap on the number of vnodes after the physical 333 * memory size exceeds 512GB. In the limit, as the physical memory size 334 * grows, the ratio of physical pages to vnodes approaches sixteen to one. 335 */ 336#ifndef MAXVNODES_MAX 337#define MAXVNODES_MAX (512 * (1024 * 1024 * 1024 / (int)PAGE_SIZE / 16)) 338#endif 339 340/* 341 * Initialize a vnode as it first enters the zone. 342 */ 343static int 344vnode_init(void *mem, int size, int flags) 345{ 346 struct vnode *vp; 347 struct bufobj *bo; 348 349 vp = mem; 350 bzero(vp, size); 351 /* 352 * Setup locks. 353 */ 354 vp->v_vnlock = &vp->v_lock; 355 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 356 /* 357 * By default, don't allow shared locks unless filesystems opt-in. 358 */ 359 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 360 LK_NOSHARE | LK_IS_VNODE); 361 /* 362 * Initialize bufobj. 363 */ 364 bo = &vp->v_bufobj; 365 bo->__bo_vnode = vp; 366 rw_init(BO_LOCKPTR(bo), "bufobj interlock"); 367 bo->bo_private = vp; 368 TAILQ_INIT(&bo->bo_clean.bv_hd); 369 TAILQ_INIT(&bo->bo_dirty.bv_hd); 370 /* 371 * Initialize namecache. 372 */ 373 LIST_INIT(&vp->v_cache_src); 374 TAILQ_INIT(&vp->v_cache_dst); 375 /* 376 * Initialize rangelocks. 377 */ 378 rangelock_init(&vp->v_rl); 379 return (0); 380} 381 382/* 383 * Free a vnode when it is cleared from the zone. 384 */ 385static void 386vnode_fini(void *mem, int size) 387{ 388 struct vnode *vp; 389 struct bufobj *bo; 390 391 vp = mem; 392 rangelock_destroy(&vp->v_rl); 393 lockdestroy(vp->v_vnlock); 394 mtx_destroy(&vp->v_interlock); 395 bo = &vp->v_bufobj; 396 rw_destroy(BO_LOCKPTR(bo)); 397} 398 399static void 400vntblinit(void *dummy __unused) 401{ 402 u_int i; 403 int physvnodes, virtvnodes; 404 405 /* 406 * Desiredvnodes is a function of the physical memory size and the 407 * kernel's heap size. Generally speaking, it scales with the 408 * physical memory size. The ratio of desiredvnodes to physical pages 409 * is one to four until desiredvnodes exceeds 98,304. Thereafter, the 410 * marginal ratio of desiredvnodes to physical pages is one to 411 * sixteen. However, desiredvnodes is limited by the kernel's heap 412 * size. The memory required by desiredvnodes vnodes and vm objects 413 * may not exceed one seventh of the kernel's heap size. 414 */ 415 physvnodes = maxproc + cnt.v_page_count / 16 + 3 * min(98304 * 4, 416 cnt.v_page_count) / 16; 417 virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) + 418 sizeof(struct vnode))); 419 desiredvnodes = min(physvnodes, virtvnodes); 420 if (desiredvnodes > MAXVNODES_MAX) { 421 if (bootverbose) 422 printf("Reducing kern.maxvnodes %d -> %d\n", 423 desiredvnodes, MAXVNODES_MAX); 424 desiredvnodes = MAXVNODES_MAX; 425 } 426 wantfreevnodes = desiredvnodes / 4; 427 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 428 TAILQ_INIT(&vnode_free_list); 429 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 430 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 431 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 432 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 433 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 434 /* 435 * Preallocate enough nodes to support one-per buf so that 436 * we can not fail an insert. reassignbuf() callers can not 437 * tolerate the insertion failure. 438 */ 439 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 440 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 441 UMA_ZONE_NOFREE | UMA_ZONE_VM); 442 uma_prealloc(buf_trie_zone, nbuf); 443 /* 444 * Initialize the filesystem syncer. 445 */ 446 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 447 &syncer_mask); 448 syncer_maxdelay = syncer_mask + 1; 449 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 450 cv_init(&sync_wakeup, "syncer"); 451 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 452 vnsz2log++; 453 vnsz2log--; 454} 455SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 456 457 458/* 459 * Mark a mount point as busy. Used to synchronize access and to delay 460 * unmounting. Eventually, mountlist_mtx is not released on failure. 461 * 462 * vfs_busy() is a custom lock, it can block the caller. 463 * vfs_busy() only sleeps if the unmount is active on the mount point. 464 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 465 * vnode belonging to mp. 466 * 467 * Lookup uses vfs_busy() to traverse mount points. 468 * root fs var fs 469 * / vnode lock A / vnode lock (/var) D 470 * /var vnode lock B /log vnode lock(/var/log) E 471 * vfs_busy lock C vfs_busy lock F 472 * 473 * Within each file system, the lock order is C->A->B and F->D->E. 474 * 475 * When traversing across mounts, the system follows that lock order: 476 * 477 * C->A->B 478 * | 479 * +->F->D->E 480 * 481 * The lookup() process for namei("/var") illustrates the process: 482 * VOP_LOOKUP() obtains B while A is held 483 * vfs_busy() obtains a shared lock on F while A and B are held 484 * vput() releases lock on B 485 * vput() releases lock on A 486 * VFS_ROOT() obtains lock on D while shared lock on F is held 487 * vfs_unbusy() releases shared lock on F 488 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 489 * Attempt to lock A (instead of vp_crossmp) while D is held would 490 * violate the global order, causing deadlocks. 491 * 492 * dounmount() locks B while F is drained. 493 */ 494int 495vfs_busy(struct mount *mp, int flags) 496{ 497 498 MPASS((flags & ~MBF_MASK) == 0); 499 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 500 501 MNT_ILOCK(mp); 502 MNT_REF(mp); 503 /* 504 * If mount point is currenly being unmounted, sleep until the 505 * mount point fate is decided. If thread doing the unmounting fails, 506 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 507 * that this mount point has survived the unmount attempt and vfs_busy 508 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 509 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 510 * about to be really destroyed. vfs_busy needs to release its 511 * reference on the mount point in this case and return with ENOENT, 512 * telling the caller that mount mount it tried to busy is no longer 513 * valid. 514 */ 515 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 516 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 517 MNT_REL(mp); 518 MNT_IUNLOCK(mp); 519 CTR1(KTR_VFS, "%s: failed busying before sleeping", 520 __func__); 521 return (ENOENT); 522 } 523 if (flags & MBF_MNTLSTLOCK) 524 mtx_unlock(&mountlist_mtx); 525 mp->mnt_kern_flag |= MNTK_MWAIT; 526 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 527 if (flags & MBF_MNTLSTLOCK) 528 mtx_lock(&mountlist_mtx); 529 MNT_ILOCK(mp); 530 } 531 if (flags & MBF_MNTLSTLOCK) 532 mtx_unlock(&mountlist_mtx); 533 mp->mnt_lockref++; 534 MNT_IUNLOCK(mp); 535 return (0); 536} 537 538/* 539 * Free a busy filesystem. 540 */ 541void 542vfs_unbusy(struct mount *mp) 543{ 544 545 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 546 MNT_ILOCK(mp); 547 MNT_REL(mp); 548 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 549 mp->mnt_lockref--; 550 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 551 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 552 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 553 mp->mnt_kern_flag &= ~MNTK_DRAINING; 554 wakeup(&mp->mnt_lockref); 555 } 556 MNT_IUNLOCK(mp); 557} 558 559/* 560 * Lookup a mount point by filesystem identifier. 561 */ 562struct mount * 563vfs_getvfs(fsid_t *fsid) 564{ 565 struct mount *mp; 566 567 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 568 mtx_lock(&mountlist_mtx); 569 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 570 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 571 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 572 vfs_ref(mp); 573 mtx_unlock(&mountlist_mtx); 574 return (mp); 575 } 576 } 577 mtx_unlock(&mountlist_mtx); 578 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 579 return ((struct mount *) 0); 580} 581 582/* 583 * Lookup a mount point by filesystem identifier, busying it before 584 * returning. 585 * 586 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 587 * cache for popular filesystem identifiers. The cache is lockess, using 588 * the fact that struct mount's are never freed. In worst case we may 589 * get pointer to unmounted or even different filesystem, so we have to 590 * check what we got, and go slow way if so. 591 */ 592struct mount * 593vfs_busyfs(fsid_t *fsid) 594{ 595#define FSID_CACHE_SIZE 256 596 typedef struct mount * volatile vmp_t; 597 static vmp_t cache[FSID_CACHE_SIZE]; 598 struct mount *mp; 599 int error; 600 uint32_t hash; 601 602 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 603 hash = fsid->val[0] ^ fsid->val[1]; 604 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 605 mp = cache[hash]; 606 if (mp == NULL || 607 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 608 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 609 goto slow; 610 if (vfs_busy(mp, 0) != 0) { 611 cache[hash] = NULL; 612 goto slow; 613 } 614 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 615 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 616 return (mp); 617 else 618 vfs_unbusy(mp); 619 620slow: 621 mtx_lock(&mountlist_mtx); 622 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 623 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 624 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 625 error = vfs_busy(mp, MBF_MNTLSTLOCK); 626 if (error) { 627 cache[hash] = NULL; 628 mtx_unlock(&mountlist_mtx); 629 return (NULL); 630 } 631 cache[hash] = mp; 632 return (mp); 633 } 634 } 635 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 636 mtx_unlock(&mountlist_mtx); 637 return ((struct mount *) 0); 638} 639 640/* 641 * Check if a user can access privileged mount options. 642 */ 643int 644vfs_suser(struct mount *mp, struct thread *td) 645{ 646 int error; 647 648 /* 649 * If the thread is jailed, but this is not a jail-friendly file 650 * system, deny immediately. 651 */ 652 if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) 653 return (EPERM); 654 655 /* 656 * If the file system was mounted outside the jail of the calling 657 * thread, deny immediately. 658 */ 659 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 660 return (EPERM); 661 662 /* 663 * If file system supports delegated administration, we don't check 664 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 665 * by the file system itself. 666 * If this is not the user that did original mount, we check for 667 * the PRIV_VFS_MOUNT_OWNER privilege. 668 */ 669 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 670 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 671 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 672 return (error); 673 } 674 return (0); 675} 676 677/* 678 * Get a new unique fsid. Try to make its val[0] unique, since this value 679 * will be used to create fake device numbers for stat(). Also try (but 680 * not so hard) make its val[0] unique mod 2^16, since some emulators only 681 * support 16-bit device numbers. We end up with unique val[0]'s for the 682 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 683 * 684 * Keep in mind that several mounts may be running in parallel. Starting 685 * the search one past where the previous search terminated is both a 686 * micro-optimization and a defense against returning the same fsid to 687 * different mounts. 688 */ 689void 690vfs_getnewfsid(struct mount *mp) 691{ 692 static uint16_t mntid_base; 693 struct mount *nmp; 694 fsid_t tfsid; 695 int mtype; 696 697 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 698 mtx_lock(&mntid_mtx); 699 mtype = mp->mnt_vfc->vfc_typenum; 700 tfsid.val[1] = mtype; 701 mtype = (mtype & 0xFF) << 24; 702 for (;;) { 703 tfsid.val[0] = makedev(255, 704 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 705 mntid_base++; 706 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 707 break; 708 vfs_rel(nmp); 709 } 710 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 711 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 712 mtx_unlock(&mntid_mtx); 713} 714 715/* 716 * Knob to control the precision of file timestamps: 717 * 718 * 0 = seconds only; nanoseconds zeroed. 719 * 1 = seconds and nanoseconds, accurate within 1/HZ. 720 * 2 = seconds and nanoseconds, truncated to microseconds. 721 * >=3 = seconds and nanoseconds, maximum precision. 722 */ 723enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 724 725static int timestamp_precision = TSP_USEC; 726SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 727 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 728 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, " 729 "3+: sec + ns (max. precision))"); 730 731/* 732 * Get a current timestamp. 733 */ 734void 735vfs_timestamp(struct timespec *tsp) 736{ 737 struct timeval tv; 738 739 switch (timestamp_precision) { 740 case TSP_SEC: 741 tsp->tv_sec = time_second; 742 tsp->tv_nsec = 0; 743 break; 744 case TSP_HZ: 745 getnanotime(tsp); 746 break; 747 case TSP_USEC: 748 microtime(&tv); 749 TIMEVAL_TO_TIMESPEC(&tv, tsp); 750 break; 751 case TSP_NSEC: 752 default: 753 nanotime(tsp); 754 break; 755 } 756} 757 758/* 759 * Set vnode attributes to VNOVAL 760 */ 761void 762vattr_null(struct vattr *vap) 763{ 764 765 vap->va_type = VNON; 766 vap->va_size = VNOVAL; 767 vap->va_bytes = VNOVAL; 768 vap->va_mode = VNOVAL; 769 vap->va_nlink = VNOVAL; 770 vap->va_uid = VNOVAL; 771 vap->va_gid = VNOVAL; 772 vap->va_fsid = VNOVAL; 773 vap->va_fileid = VNOVAL; 774 vap->va_blocksize = VNOVAL; 775 vap->va_rdev = VNOVAL; 776 vap->va_atime.tv_sec = VNOVAL; 777 vap->va_atime.tv_nsec = VNOVAL; 778 vap->va_mtime.tv_sec = VNOVAL; 779 vap->va_mtime.tv_nsec = VNOVAL; 780 vap->va_ctime.tv_sec = VNOVAL; 781 vap->va_ctime.tv_nsec = VNOVAL; 782 vap->va_birthtime.tv_sec = VNOVAL; 783 vap->va_birthtime.tv_nsec = VNOVAL; 784 vap->va_flags = VNOVAL; 785 vap->va_gen = VNOVAL; 786 vap->va_vaflags = 0; 787} 788 789/* 790 * This routine is called when we have too many vnodes. It attempts 791 * to free <count> vnodes and will potentially free vnodes that still 792 * have VM backing store (VM backing store is typically the cause 793 * of a vnode blowout so we want to do this). Therefore, this operation 794 * is not considered cheap. 795 * 796 * A number of conditions may prevent a vnode from being reclaimed. 797 * the buffer cache may have references on the vnode, a directory 798 * vnode may still have references due to the namei cache representing 799 * underlying files, or the vnode may be in active use. It is not 800 * desireable to reuse such vnodes. These conditions may cause the 801 * number of vnodes to reach some minimum value regardless of what 802 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 803 */ 804static int 805vlrureclaim(struct mount *mp) 806{ 807 struct vnode *vp; 808 int done; 809 int trigger; 810 int usevnodes; 811 int count; 812 813 /* 814 * Calculate the trigger point, don't allow user 815 * screwups to blow us up. This prevents us from 816 * recycling vnodes with lots of resident pages. We 817 * aren't trying to free memory, we are trying to 818 * free vnodes. 819 */ 820 usevnodes = desiredvnodes; 821 if (usevnodes <= 0) 822 usevnodes = 1; 823 trigger = cnt.v_page_count * 2 / usevnodes; 824 done = 0; 825 vn_start_write(NULL, &mp, V_WAIT); 826 MNT_ILOCK(mp); 827 count = mp->mnt_nvnodelistsize / 10 + 1; 828 while (count != 0) { 829 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 830 while (vp != NULL && vp->v_type == VMARKER) 831 vp = TAILQ_NEXT(vp, v_nmntvnodes); 832 if (vp == NULL) 833 break; 834 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 835 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 836 --count; 837 if (!VI_TRYLOCK(vp)) 838 goto next_iter; 839 /* 840 * If it's been deconstructed already, it's still 841 * referenced, or it exceeds the trigger, skip it. 842 */ 843 if (vp->v_usecount || 844 (!vlru_allow_cache_src && 845 !LIST_EMPTY(&(vp)->v_cache_src)) || 846 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 847 vp->v_object->resident_page_count > trigger)) { 848 VI_UNLOCK(vp); 849 goto next_iter; 850 } 851 MNT_IUNLOCK(mp); 852 vholdl(vp); 853 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 854 vdrop(vp); 855 goto next_iter_mntunlocked; 856 } 857 VI_LOCK(vp); 858 /* 859 * v_usecount may have been bumped after VOP_LOCK() dropped 860 * the vnode interlock and before it was locked again. 861 * 862 * It is not necessary to recheck VI_DOOMED because it can 863 * only be set by another thread that holds both the vnode 864 * lock and vnode interlock. If another thread has the 865 * vnode lock before we get to VOP_LOCK() and obtains the 866 * vnode interlock after VOP_LOCK() drops the vnode 867 * interlock, the other thread will be unable to drop the 868 * vnode lock before our VOP_LOCK() call fails. 869 */ 870 if (vp->v_usecount || 871 (!vlru_allow_cache_src && 872 !LIST_EMPTY(&(vp)->v_cache_src)) || 873 (vp->v_object != NULL && 874 vp->v_object->resident_page_count > trigger)) { 875 VOP_UNLOCK(vp, LK_INTERLOCK); 876 vdrop(vp); 877 goto next_iter_mntunlocked; 878 } 879 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 880 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 881 atomic_add_long(&recycles_count, 1); 882 vgonel(vp); 883 VOP_UNLOCK(vp, 0); 884 vdropl(vp); 885 done++; 886next_iter_mntunlocked: 887 if (!should_yield()) 888 goto relock_mnt; 889 goto yield; 890next_iter: 891 if (!should_yield()) 892 continue; 893 MNT_IUNLOCK(mp); 894yield: 895 kern_yield(PRI_USER); 896relock_mnt: 897 MNT_ILOCK(mp); 898 } 899 MNT_IUNLOCK(mp); 900 vn_finished_write(mp); 901 return done; 902} 903 904/* 905 * Attempt to keep the free list at wantfreevnodes length. 906 */ 907static void 908vnlru_free(int count) 909{ 910 struct vnode *vp; 911 912 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 913 for (; count > 0; count--) { 914 vp = TAILQ_FIRST(&vnode_free_list); 915 /* 916 * The list can be modified while the free_list_mtx 917 * has been dropped and vp could be NULL here. 918 */ 919 if (!vp) 920 break; 921 VNASSERT(vp->v_op != NULL, vp, 922 ("vnlru_free: vnode already reclaimed.")); 923 KASSERT((vp->v_iflag & VI_FREE) != 0, 924 ("Removing vnode not on freelist")); 925 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 926 ("Mangling active vnode")); 927 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 928 /* 929 * Don't recycle if we can't get the interlock. 930 */ 931 if (!VI_TRYLOCK(vp)) { 932 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 933 continue; 934 } 935 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 936 vp, ("vp inconsistent on freelist")); 937 938 /* 939 * The clear of VI_FREE prevents activation of the 940 * vnode. There is no sense in putting the vnode on 941 * the mount point active list, only to remove it 942 * later during recycling. Inline the relevant part 943 * of vholdl(), to avoid triggering assertions or 944 * activating. 945 */ 946 freevnodes--; 947 vp->v_iflag &= ~VI_FREE; 948 vp->v_holdcnt++; 949 950 mtx_unlock(&vnode_free_list_mtx); 951 VI_UNLOCK(vp); 952 vtryrecycle(vp); 953 /* 954 * If the recycled succeeded this vdrop will actually free 955 * the vnode. If not it will simply place it back on 956 * the free list. 957 */ 958 vdrop(vp); 959 mtx_lock(&vnode_free_list_mtx); 960 } 961} 962/* 963 * Attempt to recycle vnodes in a context that is always safe to block. 964 * Calling vlrurecycle() from the bowels of filesystem code has some 965 * interesting deadlock problems. 966 */ 967static struct proc *vnlruproc; 968static int vnlruproc_sig; 969 970static void 971vnlru_proc(void) 972{ 973 struct mount *mp, *nmp; 974 int done; 975 struct proc *p = vnlruproc; 976 977 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 978 SHUTDOWN_PRI_FIRST); 979 980 for (;;) { 981 kproc_suspend_check(p); 982 mtx_lock(&vnode_free_list_mtx); 983 if (freevnodes > wantfreevnodes) 984 vnlru_free(freevnodes - wantfreevnodes); 985 if (numvnodes <= desiredvnodes * 9 / 10) { 986 vnlruproc_sig = 0; 987 wakeup(&vnlruproc_sig); 988 msleep(vnlruproc, &vnode_free_list_mtx, 989 PVFS|PDROP, "vlruwt", hz); 990 continue; 991 } 992 mtx_unlock(&vnode_free_list_mtx); 993 done = 0; 994 mtx_lock(&mountlist_mtx); 995 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 996 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 997 nmp = TAILQ_NEXT(mp, mnt_list); 998 continue; 999 } 1000 done += vlrureclaim(mp); 1001 mtx_lock(&mountlist_mtx); 1002 nmp = TAILQ_NEXT(mp, mnt_list); 1003 vfs_unbusy(mp); 1004 } 1005 mtx_unlock(&mountlist_mtx); 1006 if (done == 0) { 1007#if 0 1008 /* These messages are temporary debugging aids */ 1009 if (vnlru_nowhere < 5) 1010 printf("vnlru process getting nowhere..\n"); 1011 else if (vnlru_nowhere == 5) 1012 printf("vnlru process messages stopped.\n"); 1013#endif 1014 vnlru_nowhere++; 1015 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1016 } else 1017 kern_yield(PRI_USER); 1018 } 1019} 1020 1021static struct kproc_desc vnlru_kp = { 1022 "vnlru", 1023 vnlru_proc, 1024 &vnlruproc 1025}; 1026SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1027 &vnlru_kp); 1028 1029/* 1030 * Routines having to do with the management of the vnode table. 1031 */ 1032 1033/* 1034 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1035 * before we actually vgone(). This function must be called with the vnode 1036 * held to prevent the vnode from being returned to the free list midway 1037 * through vgone(). 1038 */ 1039static int 1040vtryrecycle(struct vnode *vp) 1041{ 1042 struct mount *vnmp; 1043 1044 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1045 VNASSERT(vp->v_holdcnt, vp, 1046 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1047 /* 1048 * This vnode may found and locked via some other list, if so we 1049 * can't recycle it yet. 1050 */ 1051 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1052 CTR2(KTR_VFS, 1053 "%s: impossible to recycle, vp %p lock is already held", 1054 __func__, vp); 1055 return (EWOULDBLOCK); 1056 } 1057 /* 1058 * Don't recycle if its filesystem is being suspended. 1059 */ 1060 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1061 VOP_UNLOCK(vp, 0); 1062 CTR2(KTR_VFS, 1063 "%s: impossible to recycle, cannot start the write for %p", 1064 __func__, vp); 1065 return (EBUSY); 1066 } 1067 /* 1068 * If we got this far, we need to acquire the interlock and see if 1069 * anyone picked up this vnode from another list. If not, we will 1070 * mark it with DOOMED via vgonel() so that anyone who does find it 1071 * will skip over it. 1072 */ 1073 VI_LOCK(vp); 1074 if (vp->v_usecount) { 1075 VOP_UNLOCK(vp, LK_INTERLOCK); 1076 vn_finished_write(vnmp); 1077 CTR2(KTR_VFS, 1078 "%s: impossible to recycle, %p is already referenced", 1079 __func__, vp); 1080 return (EBUSY); 1081 } 1082 if ((vp->v_iflag & VI_DOOMED) == 0) { 1083 atomic_add_long(&recycles_count, 1); 1084 vgonel(vp); 1085 } 1086 VOP_UNLOCK(vp, LK_INTERLOCK); 1087 vn_finished_write(vnmp); 1088 return (0); 1089} 1090 1091/* 1092 * Wait for available vnodes. 1093 */ 1094static int 1095getnewvnode_wait(int suspended) 1096{ 1097 1098 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1099 if (numvnodes > desiredvnodes) { 1100 if (suspended) { 1101 /* 1102 * File system is beeing suspended, we cannot risk a 1103 * deadlock here, so allocate new vnode anyway. 1104 */ 1105 if (freevnodes > wantfreevnodes) 1106 vnlru_free(freevnodes - wantfreevnodes); 1107 return (0); 1108 } 1109 if (vnlruproc_sig == 0) { 1110 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1111 wakeup(vnlruproc); 1112 } 1113 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1114 "vlruwk", hz); 1115 } 1116 return (numvnodes > desiredvnodes ? ENFILE : 0); 1117} 1118 1119void 1120getnewvnode_reserve(u_int count) 1121{ 1122 struct thread *td; 1123 1124 td = curthread; 1125 /* First try to be quick and racy. */ 1126 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1127 td->td_vp_reserv += count; 1128 return; 1129 } else 1130 atomic_subtract_long(&numvnodes, count); 1131 1132 mtx_lock(&vnode_free_list_mtx); 1133 while (count > 0) { 1134 if (getnewvnode_wait(0) == 0) { 1135 count--; 1136 td->td_vp_reserv++; 1137 atomic_add_long(&numvnodes, 1); 1138 } 1139 } 1140 mtx_unlock(&vnode_free_list_mtx); 1141} 1142 1143void 1144getnewvnode_drop_reserve(void) 1145{ 1146 struct thread *td; 1147 1148 td = curthread; 1149 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1150 td->td_vp_reserv = 0; 1151} 1152 1153/* 1154 * Return the next vnode from the free list. 1155 */ 1156int 1157getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1158 struct vnode **vpp) 1159{ 1160 struct vnode *vp; 1161 struct thread *td; 1162 struct lock_object *lo; 1163 int error; 1164 1165 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1166 vp = NULL; 1167 td = curthread; 1168 if (td->td_vp_reserv > 0) { 1169 td->td_vp_reserv -= 1; 1170 goto alloc; 1171 } 1172 mtx_lock(&vnode_free_list_mtx); 1173 /* 1174 * Lend our context to reclaim vnodes if they've exceeded the max. 1175 */ 1176 if (freevnodes > wantfreevnodes) 1177 vnlru_free(1); 1178 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1179 MNTK_SUSPEND)); 1180#if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1181 if (error != 0) { 1182 mtx_unlock(&vnode_free_list_mtx); 1183 return (error); 1184 } 1185#endif 1186 atomic_add_long(&numvnodes, 1); 1187 mtx_unlock(&vnode_free_list_mtx); 1188alloc: 1189 atomic_add_long(&vnodes_created, 1); 1190 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1191 /* 1192 * Locks are given the generic name "vnode" when created. 1193 * Follow the historic practice of using the filesystem 1194 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1195 * 1196 * Locks live in a witness group keyed on their name. Thus, 1197 * when a lock is renamed, it must also move from the witness 1198 * group of its old name to the witness group of its new name. 1199 * 1200 * The change only needs to be made when the vnode moves 1201 * from one filesystem type to another. We ensure that each 1202 * filesystem use a single static name pointer for its tag so 1203 * that we can compare pointers rather than doing a strcmp(). 1204 */ 1205 lo = &vp->v_vnlock->lock_object; 1206 if (lo->lo_name != tag) { 1207 lo->lo_name = tag; 1208 WITNESS_DESTROY(lo); 1209 WITNESS_INIT(lo, tag); 1210 } 1211 /* 1212 * By default, don't allow shared locks unless filesystems opt-in. 1213 */ 1214 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1215 /* 1216 * Finalize various vnode identity bits. 1217 */ 1218 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1219 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1220 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1221 vp->v_type = VNON; 1222 vp->v_tag = tag; 1223 vp->v_op = vops; 1224 v_incr_usecount(vp); 1225 vp->v_bufobj.bo_ops = &buf_ops_bio; 1226#ifdef MAC 1227 mac_vnode_init(vp); 1228 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1229 mac_vnode_associate_singlelabel(mp, vp); 1230 else if (mp == NULL && vops != &dead_vnodeops) 1231 printf("NULL mp in getnewvnode()\n"); 1232#endif 1233 if (mp != NULL) { 1234 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1235 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1236 vp->v_vflag |= VV_NOKNOTE; 1237 } 1238 1239 /* 1240 * For the filesystems which do not use vfs_hash_insert(), 1241 * still initialize v_hash to have vfs_hash_index() useful. 1242 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1243 * its own hashing. 1244 */ 1245 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1246 1247 *vpp = vp; 1248 return (0); 1249} 1250 1251/* 1252 * Delete from old mount point vnode list, if on one. 1253 */ 1254static void 1255delmntque(struct vnode *vp) 1256{ 1257 struct mount *mp; 1258 int active; 1259 1260 mp = vp->v_mount; 1261 if (mp == NULL) 1262 return; 1263 MNT_ILOCK(mp); 1264 VI_LOCK(vp); 1265 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1266 ("Active vnode list size %d > Vnode list size %d", 1267 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1268 active = vp->v_iflag & VI_ACTIVE; 1269 vp->v_iflag &= ~VI_ACTIVE; 1270 if (active) { 1271 mtx_lock(&vnode_free_list_mtx); 1272 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1273 mp->mnt_activevnodelistsize--; 1274 mtx_unlock(&vnode_free_list_mtx); 1275 } 1276 vp->v_mount = NULL; 1277 VI_UNLOCK(vp); 1278 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1279 ("bad mount point vnode list size")); 1280 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1281 mp->mnt_nvnodelistsize--; 1282 MNT_REL(mp); 1283 MNT_IUNLOCK(mp); 1284} 1285 1286static void 1287insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1288{ 1289 1290 vp->v_data = NULL; 1291 vp->v_op = &dead_vnodeops; 1292 vgone(vp); 1293 vput(vp); 1294} 1295 1296/* 1297 * Insert into list of vnodes for the new mount point, if available. 1298 */ 1299int 1300insmntque1(struct vnode *vp, struct mount *mp, 1301 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1302{ 1303 1304 KASSERT(vp->v_mount == NULL, 1305 ("insmntque: vnode already on per mount vnode list")); 1306 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1307 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1308 1309 /* 1310 * We acquire the vnode interlock early to ensure that the 1311 * vnode cannot be recycled by another process releasing a 1312 * holdcnt on it before we get it on both the vnode list 1313 * and the active vnode list. The mount mutex protects only 1314 * manipulation of the vnode list and the vnode freelist 1315 * mutex protects only manipulation of the active vnode list. 1316 * Hence the need to hold the vnode interlock throughout. 1317 */ 1318 MNT_ILOCK(mp); 1319 VI_LOCK(vp); 1320 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1321 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1322 mp->mnt_nvnodelistsize == 0)) && 1323 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1324 VI_UNLOCK(vp); 1325 MNT_IUNLOCK(mp); 1326 if (dtr != NULL) 1327 dtr(vp, dtr_arg); 1328 return (EBUSY); 1329 } 1330 vp->v_mount = mp; 1331 MNT_REF(mp); 1332 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1333 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1334 ("neg mount point vnode list size")); 1335 mp->mnt_nvnodelistsize++; 1336 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1337 ("Activating already active vnode")); 1338 vp->v_iflag |= VI_ACTIVE; 1339 mtx_lock(&vnode_free_list_mtx); 1340 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1341 mp->mnt_activevnodelistsize++; 1342 mtx_unlock(&vnode_free_list_mtx); 1343 VI_UNLOCK(vp); 1344 MNT_IUNLOCK(mp); 1345 return (0); 1346} 1347 1348int 1349insmntque(struct vnode *vp, struct mount *mp) 1350{ 1351 1352 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1353} 1354 1355/* 1356 * Flush out and invalidate all buffers associated with a bufobj 1357 * Called with the underlying object locked. 1358 */ 1359int 1360bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1361{ 1362 int error; 1363 1364 BO_LOCK(bo); 1365 if (flags & V_SAVE) { 1366 error = bufobj_wwait(bo, slpflag, slptimeo); 1367 if (error) { 1368 BO_UNLOCK(bo); 1369 return (error); 1370 } 1371 if (bo->bo_dirty.bv_cnt > 0) { 1372 BO_UNLOCK(bo); 1373 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1374 return (error); 1375 /* 1376 * XXX We could save a lock/unlock if this was only 1377 * enabled under INVARIANTS 1378 */ 1379 BO_LOCK(bo); 1380 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1381 panic("vinvalbuf: dirty bufs"); 1382 } 1383 } 1384 /* 1385 * If you alter this loop please notice that interlock is dropped and 1386 * reacquired in flushbuflist. Special care is needed to ensure that 1387 * no race conditions occur from this. 1388 */ 1389 do { 1390 error = flushbuflist(&bo->bo_clean, 1391 flags, bo, slpflag, slptimeo); 1392 if (error == 0 && !(flags & V_CLEANONLY)) 1393 error = flushbuflist(&bo->bo_dirty, 1394 flags, bo, slpflag, slptimeo); 1395 if (error != 0 && error != EAGAIN) { 1396 BO_UNLOCK(bo); 1397 return (error); 1398 } 1399 } while (error != 0); 1400 1401 /* 1402 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1403 * have write I/O in-progress but if there is a VM object then the 1404 * VM object can also have read-I/O in-progress. 1405 */ 1406 do { 1407 bufobj_wwait(bo, 0, 0); 1408 BO_UNLOCK(bo); 1409 if (bo->bo_object != NULL) { 1410 VM_OBJECT_WLOCK(bo->bo_object); 1411 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1412 VM_OBJECT_WUNLOCK(bo->bo_object); 1413 } 1414 BO_LOCK(bo); 1415 } while (bo->bo_numoutput > 0); 1416 BO_UNLOCK(bo); 1417 1418 /* 1419 * Destroy the copy in the VM cache, too. 1420 */ 1421 if (bo->bo_object != NULL && 1422 (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) { 1423 VM_OBJECT_WLOCK(bo->bo_object); 1424 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1425 OBJPR_CLEANONLY : 0); 1426 VM_OBJECT_WUNLOCK(bo->bo_object); 1427 } 1428 1429#ifdef INVARIANTS 1430 BO_LOCK(bo); 1431 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 && 1432 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 1433 panic("vinvalbuf: flush failed"); 1434 BO_UNLOCK(bo); 1435#endif 1436 return (0); 1437} 1438 1439/* 1440 * Flush out and invalidate all buffers associated with a vnode. 1441 * Called with the underlying object locked. 1442 */ 1443int 1444vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1445{ 1446 1447 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1448 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1449 if (vp->v_object != NULL && vp->v_object->handle != vp) 1450 return (0); 1451 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1452} 1453 1454/* 1455 * Flush out buffers on the specified list. 1456 * 1457 */ 1458static int 1459flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1460 int slptimeo) 1461{ 1462 struct buf *bp, *nbp; 1463 int retval, error; 1464 daddr_t lblkno; 1465 b_xflags_t xflags; 1466 1467 ASSERT_BO_WLOCKED(bo); 1468 1469 retval = 0; 1470 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1471 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1472 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1473 continue; 1474 } 1475 lblkno = 0; 1476 xflags = 0; 1477 if (nbp != NULL) { 1478 lblkno = nbp->b_lblkno; 1479 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1480 } 1481 retval = EAGAIN; 1482 error = BUF_TIMELOCK(bp, 1483 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1484 "flushbuf", slpflag, slptimeo); 1485 if (error) { 1486 BO_LOCK(bo); 1487 return (error != ENOLCK ? error : EAGAIN); 1488 } 1489 KASSERT(bp->b_bufobj == bo, 1490 ("bp %p wrong b_bufobj %p should be %p", 1491 bp, bp->b_bufobj, bo)); 1492 if (bp->b_bufobj != bo) { /* XXX: necessary ? */ 1493 BUF_UNLOCK(bp); 1494 BO_LOCK(bo); 1495 return (EAGAIN); 1496 } 1497 /* 1498 * XXX Since there are no node locks for NFS, I 1499 * believe there is a slight chance that a delayed 1500 * write will occur while sleeping just above, so 1501 * check for it. 1502 */ 1503 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1504 (flags & V_SAVE)) { 1505 bremfree(bp); 1506 bp->b_flags |= B_ASYNC; 1507 bwrite(bp); 1508 BO_LOCK(bo); 1509 return (EAGAIN); /* XXX: why not loop ? */ 1510 } 1511 bremfree(bp); 1512 bp->b_flags |= (B_INVAL | B_RELBUF); 1513 bp->b_flags &= ~B_ASYNC; 1514 brelse(bp); 1515 BO_LOCK(bo); 1516 if (nbp != NULL && 1517 (nbp->b_bufobj != bo || 1518 nbp->b_lblkno != lblkno || 1519 (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) != xflags)) 1520 break; /* nbp invalid */ 1521 } 1522 return (retval); 1523} 1524 1525/* 1526 * Truncate a file's buffer and pages to a specified length. This 1527 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1528 * sync activity. 1529 */ 1530int 1531vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) 1532{ 1533 struct buf *bp, *nbp; 1534 int anyfreed; 1535 int trunclbn; 1536 struct bufobj *bo; 1537 1538 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1539 vp, cred, blksize, (uintmax_t)length); 1540 1541 /* 1542 * Round up to the *next* lbn. 1543 */ 1544 trunclbn = (length + blksize - 1) / blksize; 1545 1546 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1547restart: 1548 bo = &vp->v_bufobj; 1549 BO_LOCK(bo); 1550 anyfreed = 1; 1551 for (;anyfreed;) { 1552 anyfreed = 0; 1553 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1554 if (bp->b_lblkno < trunclbn) 1555 continue; 1556 if (BUF_LOCK(bp, 1557 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1558 BO_LOCKPTR(bo)) == ENOLCK) 1559 goto restart; 1560 1561 bremfree(bp); 1562 bp->b_flags |= (B_INVAL | B_RELBUF); 1563 bp->b_flags &= ~B_ASYNC; 1564 brelse(bp); 1565 anyfreed = 1; 1566 1567 BO_LOCK(bo); 1568 if (nbp != NULL && 1569 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1570 (nbp->b_vp != vp) || 1571 (nbp->b_flags & B_DELWRI))) { 1572 BO_UNLOCK(bo); 1573 goto restart; 1574 } 1575 } 1576 1577 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1578 if (bp->b_lblkno < trunclbn) 1579 continue; 1580 if (BUF_LOCK(bp, 1581 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1582 BO_LOCKPTR(bo)) == ENOLCK) 1583 goto restart; 1584 bremfree(bp); 1585 bp->b_flags |= (B_INVAL | B_RELBUF); 1586 bp->b_flags &= ~B_ASYNC; 1587 brelse(bp); 1588 anyfreed = 1; 1589 1590 BO_LOCK(bo); 1591 if (nbp != NULL && 1592 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1593 (nbp->b_vp != vp) || 1594 (nbp->b_flags & B_DELWRI) == 0)) { 1595 BO_UNLOCK(bo); 1596 goto restart; 1597 } 1598 } 1599 } 1600 1601 if (length > 0) { 1602restartsync: 1603 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1604 if (bp->b_lblkno > 0) 1605 continue; 1606 /* 1607 * Since we hold the vnode lock this should only 1608 * fail if we're racing with the buf daemon. 1609 */ 1610 if (BUF_LOCK(bp, 1611 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1612 BO_LOCKPTR(bo)) == ENOLCK) { 1613 goto restart; 1614 } 1615 VNASSERT((bp->b_flags & B_DELWRI), vp, 1616 ("buf(%p) on dirty queue without DELWRI", bp)); 1617 1618 bremfree(bp); 1619 bawrite(bp); 1620 BO_LOCK(bo); 1621 goto restartsync; 1622 } 1623 } 1624 1625 bufobj_wwait(bo, 0, 0); 1626 BO_UNLOCK(bo); 1627 vnode_pager_setsize(vp, length); 1628 1629 return (0); 1630} 1631 1632static void 1633buf_vlist_remove(struct buf *bp) 1634{ 1635 struct bufv *bv; 1636 1637 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1638 ASSERT_BO_WLOCKED(bp->b_bufobj); 1639 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1640 (BX_VNDIRTY|BX_VNCLEAN), 1641 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1642 if (bp->b_xflags & BX_VNDIRTY) 1643 bv = &bp->b_bufobj->bo_dirty; 1644 else 1645 bv = &bp->b_bufobj->bo_clean; 1646 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 1647 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1648 bv->bv_cnt--; 1649 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1650} 1651 1652/* 1653 * Add the buffer to the sorted clean or dirty block list. 1654 * 1655 * NOTE: xflags is passed as a constant, optimizing this inline function! 1656 */ 1657static void 1658buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1659{ 1660 struct bufv *bv; 1661 struct buf *n; 1662 int error; 1663 1664 ASSERT_BO_WLOCKED(bo); 1665 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 1666 ("dead bo %p", bo)); 1667 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1668 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1669 bp->b_xflags |= xflags; 1670 if (xflags & BX_VNDIRTY) 1671 bv = &bo->bo_dirty; 1672 else 1673 bv = &bo->bo_clean; 1674 1675 /* 1676 * Keep the list ordered. Optimize empty list insertion. Assume 1677 * we tend to grow at the tail so lookup_le should usually be cheaper 1678 * than _ge. 1679 */ 1680 if (bv->bv_cnt == 0 || 1681 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 1682 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1683 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 1684 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 1685 else 1686 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 1687 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 1688 if (error) 1689 panic("buf_vlist_add: Preallocated nodes insufficient."); 1690 bv->bv_cnt++; 1691} 1692 1693/* 1694 * Lookup a buffer using the splay tree. Note that we specifically avoid 1695 * shadow buffers used in background bitmap writes. 1696 * 1697 * This code isn't quite efficient as it could be because we are maintaining 1698 * two sorted lists and do not know which list the block resides in. 1699 * 1700 * During a "make buildworld" the desired buffer is found at one of 1701 * the roots more than 60% of the time. Thus, checking both roots 1702 * before performing either splay eliminates unnecessary splays on the 1703 * first tree splayed. 1704 */ 1705struct buf * 1706gbincore(struct bufobj *bo, daddr_t lblkno) 1707{ 1708 struct buf *bp; 1709 1710 ASSERT_BO_LOCKED(bo); 1711 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 1712 if (bp != NULL) 1713 return (bp); 1714 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 1715} 1716 1717/* 1718 * Associate a buffer with a vnode. 1719 */ 1720void 1721bgetvp(struct vnode *vp, struct buf *bp) 1722{ 1723 struct bufobj *bo; 1724 1725 bo = &vp->v_bufobj; 1726 ASSERT_BO_WLOCKED(bo); 1727 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 1728 1729 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 1730 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 1731 ("bgetvp: bp already attached! %p", bp)); 1732 1733 vhold(vp); 1734 bp->b_vp = vp; 1735 bp->b_bufobj = bo; 1736 /* 1737 * Insert onto list for new vnode. 1738 */ 1739 buf_vlist_add(bp, bo, BX_VNCLEAN); 1740} 1741 1742/* 1743 * Disassociate a buffer from a vnode. 1744 */ 1745void 1746brelvp(struct buf *bp) 1747{ 1748 struct bufobj *bo; 1749 struct vnode *vp; 1750 1751 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1752 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1753 1754 /* 1755 * Delete from old vnode list, if on one. 1756 */ 1757 vp = bp->b_vp; /* XXX */ 1758 bo = bp->b_bufobj; 1759 BO_LOCK(bo); 1760 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1761 buf_vlist_remove(bp); 1762 else 1763 panic("brelvp: Buffer %p not on queue.", bp); 1764 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1765 bo->bo_flag &= ~BO_ONWORKLST; 1766 mtx_lock(&sync_mtx); 1767 LIST_REMOVE(bo, bo_synclist); 1768 syncer_worklist_len--; 1769 mtx_unlock(&sync_mtx); 1770 } 1771 bp->b_vp = NULL; 1772 bp->b_bufobj = NULL; 1773 BO_UNLOCK(bo); 1774 vdrop(vp); 1775} 1776 1777/* 1778 * Add an item to the syncer work queue. 1779 */ 1780static void 1781vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 1782{ 1783 int slot; 1784 1785 ASSERT_BO_WLOCKED(bo); 1786 1787 mtx_lock(&sync_mtx); 1788 if (bo->bo_flag & BO_ONWORKLST) 1789 LIST_REMOVE(bo, bo_synclist); 1790 else { 1791 bo->bo_flag |= BO_ONWORKLST; 1792 syncer_worklist_len++; 1793 } 1794 1795 if (delay > syncer_maxdelay - 2) 1796 delay = syncer_maxdelay - 2; 1797 slot = (syncer_delayno + delay) & syncer_mask; 1798 1799 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 1800 mtx_unlock(&sync_mtx); 1801} 1802 1803static int 1804sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 1805{ 1806 int error, len; 1807 1808 mtx_lock(&sync_mtx); 1809 len = syncer_worklist_len - sync_vnode_count; 1810 mtx_unlock(&sync_mtx); 1811 error = SYSCTL_OUT(req, &len, sizeof(len)); 1812 return (error); 1813} 1814 1815SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 1816 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 1817 1818static struct proc *updateproc; 1819static void sched_sync(void); 1820static struct kproc_desc up_kp = { 1821 "syncer", 1822 sched_sync, 1823 &updateproc 1824}; 1825SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 1826 1827static int 1828sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 1829{ 1830 struct vnode *vp; 1831 struct mount *mp; 1832 1833 *bo = LIST_FIRST(slp); 1834 if (*bo == NULL) 1835 return (0); 1836 vp = (*bo)->__bo_vnode; /* XXX */ 1837 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 1838 return (1); 1839 /* 1840 * We use vhold in case the vnode does not 1841 * successfully sync. vhold prevents the vnode from 1842 * going away when we unlock the sync_mtx so that 1843 * we can acquire the vnode interlock. 1844 */ 1845 vholdl(vp); 1846 mtx_unlock(&sync_mtx); 1847 VI_UNLOCK(vp); 1848 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1849 vdrop(vp); 1850 mtx_lock(&sync_mtx); 1851 return (*bo == LIST_FIRST(slp)); 1852 } 1853 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1854 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1855 VOP_UNLOCK(vp, 0); 1856 vn_finished_write(mp); 1857 BO_LOCK(*bo); 1858 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 1859 /* 1860 * Put us back on the worklist. The worklist 1861 * routine will remove us from our current 1862 * position and then add us back in at a later 1863 * position. 1864 */ 1865 vn_syncer_add_to_worklist(*bo, syncdelay); 1866 } 1867 BO_UNLOCK(*bo); 1868 vdrop(vp); 1869 mtx_lock(&sync_mtx); 1870 return (0); 1871} 1872 1873static int first_printf = 1; 1874 1875/* 1876 * System filesystem synchronizer daemon. 1877 */ 1878static void 1879sched_sync(void) 1880{ 1881 struct synclist *next, *slp; 1882 struct bufobj *bo; 1883 long starttime; 1884 struct thread *td = curthread; 1885 int last_work_seen; 1886 int net_worklist_len; 1887 int syncer_final_iter; 1888 int error; 1889 1890 last_work_seen = 0; 1891 syncer_final_iter = 0; 1892 syncer_state = SYNCER_RUNNING; 1893 starttime = time_uptime; 1894 td->td_pflags |= TDP_NORUNNINGBUF; 1895 1896 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 1897 SHUTDOWN_PRI_LAST); 1898 1899 mtx_lock(&sync_mtx); 1900 for (;;) { 1901 if (syncer_state == SYNCER_FINAL_DELAY && 1902 syncer_final_iter == 0) { 1903 mtx_unlock(&sync_mtx); 1904 kproc_suspend_check(td->td_proc); 1905 mtx_lock(&sync_mtx); 1906 } 1907 net_worklist_len = syncer_worklist_len - sync_vnode_count; 1908 if (syncer_state != SYNCER_RUNNING && 1909 starttime != time_uptime) { 1910 if (first_printf) { 1911 printf("\nSyncing disks, vnodes remaining..."); 1912 first_printf = 0; 1913 } 1914 printf("%d ", net_worklist_len); 1915 } 1916 starttime = time_uptime; 1917 1918 /* 1919 * Push files whose dirty time has expired. Be careful 1920 * of interrupt race on slp queue. 1921 * 1922 * Skip over empty worklist slots when shutting down. 1923 */ 1924 do { 1925 slp = &syncer_workitem_pending[syncer_delayno]; 1926 syncer_delayno += 1; 1927 if (syncer_delayno == syncer_maxdelay) 1928 syncer_delayno = 0; 1929 next = &syncer_workitem_pending[syncer_delayno]; 1930 /* 1931 * If the worklist has wrapped since the 1932 * it was emptied of all but syncer vnodes, 1933 * switch to the FINAL_DELAY state and run 1934 * for one more second. 1935 */ 1936 if (syncer_state == SYNCER_SHUTTING_DOWN && 1937 net_worklist_len == 0 && 1938 last_work_seen == syncer_delayno) { 1939 syncer_state = SYNCER_FINAL_DELAY; 1940 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 1941 } 1942 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 1943 syncer_worklist_len > 0); 1944 1945 /* 1946 * Keep track of the last time there was anything 1947 * on the worklist other than syncer vnodes. 1948 * Return to the SHUTTING_DOWN state if any 1949 * new work appears. 1950 */ 1951 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 1952 last_work_seen = syncer_delayno; 1953 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 1954 syncer_state = SYNCER_SHUTTING_DOWN; 1955 while (!LIST_EMPTY(slp)) { 1956 error = sync_vnode(slp, &bo, td); 1957 if (error == 1) { 1958 LIST_REMOVE(bo, bo_synclist); 1959 LIST_INSERT_HEAD(next, bo, bo_synclist); 1960 continue; 1961 } 1962 1963 if (first_printf == 0) 1964 wdog_kern_pat(WD_LASTVAL); 1965 1966 } 1967 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 1968 syncer_final_iter--; 1969 /* 1970 * The variable rushjob allows the kernel to speed up the 1971 * processing of the filesystem syncer process. A rushjob 1972 * value of N tells the filesystem syncer to process the next 1973 * N seconds worth of work on its queue ASAP. Currently rushjob 1974 * is used by the soft update code to speed up the filesystem 1975 * syncer process when the incore state is getting so far 1976 * ahead of the disk that the kernel memory pool is being 1977 * threatened with exhaustion. 1978 */ 1979 if (rushjob > 0) { 1980 rushjob -= 1; 1981 continue; 1982 } 1983 /* 1984 * Just sleep for a short period of time between 1985 * iterations when shutting down to allow some I/O 1986 * to happen. 1987 * 1988 * If it has taken us less than a second to process the 1989 * current work, then wait. Otherwise start right over 1990 * again. We can still lose time if any single round 1991 * takes more than two seconds, but it does not really 1992 * matter as we are just trying to generally pace the 1993 * filesystem activity. 1994 */ 1995 if (syncer_state != SYNCER_RUNNING || 1996 time_uptime == starttime) { 1997 thread_lock(td); 1998 sched_prio(td, PPAUSE); 1999 thread_unlock(td); 2000 } 2001 if (syncer_state != SYNCER_RUNNING) 2002 cv_timedwait(&sync_wakeup, &sync_mtx, 2003 hz / SYNCER_SHUTDOWN_SPEEDUP); 2004 else if (time_uptime == starttime) 2005 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2006 } 2007} 2008 2009/* 2010 * Request the syncer daemon to speed up its work. 2011 * We never push it to speed up more than half of its 2012 * normal turn time, otherwise it could take over the cpu. 2013 */ 2014int 2015speedup_syncer(void) 2016{ 2017 int ret = 0; 2018 2019 mtx_lock(&sync_mtx); 2020 if (rushjob < syncdelay / 2) { 2021 rushjob += 1; 2022 stat_rush_requests += 1; 2023 ret = 1; 2024 } 2025 mtx_unlock(&sync_mtx); 2026 cv_broadcast(&sync_wakeup); 2027 return (ret); 2028} 2029 2030/* 2031 * Tell the syncer to speed up its work and run though its work 2032 * list several times, then tell it to shut down. 2033 */ 2034static void 2035syncer_shutdown(void *arg, int howto) 2036{ 2037 2038 if (howto & RB_NOSYNC) 2039 return; 2040 mtx_lock(&sync_mtx); 2041 syncer_state = SYNCER_SHUTTING_DOWN; 2042 rushjob = 0; 2043 mtx_unlock(&sync_mtx); 2044 cv_broadcast(&sync_wakeup); 2045 kproc_shutdown(arg, howto); 2046} 2047 2048void 2049syncer_suspend(void) 2050{ 2051 2052 syncer_shutdown(updateproc, 0); 2053} 2054 2055void 2056syncer_resume(void) 2057{ 2058 2059 mtx_lock(&sync_mtx); 2060 first_printf = 1; 2061 syncer_state = SYNCER_RUNNING; 2062 mtx_unlock(&sync_mtx); 2063 cv_broadcast(&sync_wakeup); 2064 kproc_resume(updateproc); 2065} 2066 2067/* 2068 * Reassign a buffer from one vnode to another. 2069 * Used to assign file specific control information 2070 * (indirect blocks) to the vnode to which they belong. 2071 */ 2072void 2073reassignbuf(struct buf *bp) 2074{ 2075 struct vnode *vp; 2076 struct bufobj *bo; 2077 int delay; 2078#ifdef INVARIANTS 2079 struct bufv *bv; 2080#endif 2081 2082 vp = bp->b_vp; 2083 bo = bp->b_bufobj; 2084 ++reassignbufcalls; 2085 2086 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2087 bp, bp->b_vp, bp->b_flags); 2088 /* 2089 * B_PAGING flagged buffers cannot be reassigned because their vp 2090 * is not fully linked in. 2091 */ 2092 if (bp->b_flags & B_PAGING) 2093 panic("cannot reassign paging buffer"); 2094 2095 /* 2096 * Delete from old vnode list, if on one. 2097 */ 2098 BO_LOCK(bo); 2099 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2100 buf_vlist_remove(bp); 2101 else 2102 panic("reassignbuf: Buffer %p not on queue.", bp); 2103 /* 2104 * If dirty, put on list of dirty buffers; otherwise insert onto list 2105 * of clean buffers. 2106 */ 2107 if (bp->b_flags & B_DELWRI) { 2108 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2109 switch (vp->v_type) { 2110 case VDIR: 2111 delay = dirdelay; 2112 break; 2113 case VCHR: 2114 delay = metadelay; 2115 break; 2116 default: 2117 delay = filedelay; 2118 } 2119 vn_syncer_add_to_worklist(bo, delay); 2120 } 2121 buf_vlist_add(bp, bo, BX_VNDIRTY); 2122 } else { 2123 buf_vlist_add(bp, bo, BX_VNCLEAN); 2124 2125 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2126 mtx_lock(&sync_mtx); 2127 LIST_REMOVE(bo, bo_synclist); 2128 syncer_worklist_len--; 2129 mtx_unlock(&sync_mtx); 2130 bo->bo_flag &= ~BO_ONWORKLST; 2131 } 2132 } 2133#ifdef INVARIANTS 2134 bv = &bo->bo_clean; 2135 bp = TAILQ_FIRST(&bv->bv_hd); 2136 KASSERT(bp == NULL || bp->b_bufobj == bo, 2137 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2138 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2139 KASSERT(bp == NULL || bp->b_bufobj == bo, 2140 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2141 bv = &bo->bo_dirty; 2142 bp = TAILQ_FIRST(&bv->bv_hd); 2143 KASSERT(bp == NULL || bp->b_bufobj == bo, 2144 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2145 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2146 KASSERT(bp == NULL || bp->b_bufobj == bo, 2147 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2148#endif 2149 BO_UNLOCK(bo); 2150} 2151 2152/* 2153 * Increment the use and hold counts on the vnode, taking care to reference 2154 * the driver's usecount if this is a chardev. The vholdl() will remove 2155 * the vnode from the free list if it is presently free. Requires the 2156 * vnode interlock and returns with it held. 2157 */ 2158static void 2159v_incr_usecount(struct vnode *vp) 2160{ 2161 2162 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2163 vholdl(vp); 2164 vp->v_usecount++; 2165 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2166 dev_lock(); 2167 vp->v_rdev->si_usecount++; 2168 dev_unlock(); 2169 } 2170} 2171 2172/* 2173 * Turn a holdcnt into a use+holdcnt such that only one call to 2174 * v_decr_usecount is needed. 2175 */ 2176static void 2177v_upgrade_usecount(struct vnode *vp) 2178{ 2179 2180 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2181 vp->v_usecount++; 2182 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2183 dev_lock(); 2184 vp->v_rdev->si_usecount++; 2185 dev_unlock(); 2186 } 2187} 2188 2189/* 2190 * Decrement the vnode use and hold count along with the driver's usecount 2191 * if this is a chardev. The vdropl() below releases the vnode interlock 2192 * as it may free the vnode. 2193 */ 2194static void 2195v_decr_usecount(struct vnode *vp) 2196{ 2197 2198 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2199 VNASSERT(vp->v_usecount > 0, vp, 2200 ("v_decr_usecount: negative usecount")); 2201 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2202 vp->v_usecount--; 2203 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2204 dev_lock(); 2205 vp->v_rdev->si_usecount--; 2206 dev_unlock(); 2207 } 2208 vdropl(vp); 2209} 2210 2211/* 2212 * Decrement only the use count and driver use count. This is intended to 2213 * be paired with a follow on vdropl() to release the remaining hold count. 2214 * In this way we may vgone() a vnode with a 0 usecount without risk of 2215 * having it end up on a free list because the hold count is kept above 0. 2216 */ 2217static void 2218v_decr_useonly(struct vnode *vp) 2219{ 2220 2221 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2222 VNASSERT(vp->v_usecount > 0, vp, 2223 ("v_decr_useonly: negative usecount")); 2224 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2225 vp->v_usecount--; 2226 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2227 dev_lock(); 2228 vp->v_rdev->si_usecount--; 2229 dev_unlock(); 2230 } 2231} 2232 2233/* 2234 * Grab a particular vnode from the free list, increment its 2235 * reference count and lock it. VI_DOOMED is set if the vnode 2236 * is being destroyed. Only callers who specify LK_RETRY will 2237 * see doomed vnodes. If inactive processing was delayed in 2238 * vput try to do it here. 2239 */ 2240int 2241vget(struct vnode *vp, int flags, struct thread *td) 2242{ 2243 int error; 2244 2245 error = 0; 2246 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2247 ("vget: invalid lock operation")); 2248 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2249 2250 if ((flags & LK_INTERLOCK) == 0) 2251 VI_LOCK(vp); 2252 vholdl(vp); 2253 if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) { 2254 vdrop(vp); 2255 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2256 vp); 2257 return (error); 2258 } 2259 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2260 panic("vget: vn_lock failed to return ENOENT\n"); 2261 VI_LOCK(vp); 2262 /* Upgrade our holdcnt to a usecount. */ 2263 v_upgrade_usecount(vp); 2264 /* 2265 * We don't guarantee that any particular close will 2266 * trigger inactive processing so just make a best effort 2267 * here at preventing a reference to a removed file. If 2268 * we don't succeed no harm is done. 2269 */ 2270 if (vp->v_iflag & VI_OWEINACT) { 2271 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2272 (flags & LK_NOWAIT) == 0) 2273 vinactive(vp, td); 2274 vp->v_iflag &= ~VI_OWEINACT; 2275 } 2276 VI_UNLOCK(vp); 2277 return (0); 2278} 2279 2280/* 2281 * Increase the reference count of a vnode. 2282 */ 2283void 2284vref(struct vnode *vp) 2285{ 2286 2287 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2288 VI_LOCK(vp); 2289 v_incr_usecount(vp); 2290 VI_UNLOCK(vp); 2291} 2292 2293/* 2294 * Return reference count of a vnode. 2295 * 2296 * The results of this call are only guaranteed when some mechanism other 2297 * than the VI lock is used to stop other processes from gaining references 2298 * to the vnode. This may be the case if the caller holds the only reference. 2299 * This is also useful when stale data is acceptable as race conditions may 2300 * be accounted for by some other means. 2301 */ 2302int 2303vrefcnt(struct vnode *vp) 2304{ 2305 int usecnt; 2306 2307 VI_LOCK(vp); 2308 usecnt = vp->v_usecount; 2309 VI_UNLOCK(vp); 2310 2311 return (usecnt); 2312} 2313 2314#define VPUTX_VRELE 1 2315#define VPUTX_VPUT 2 2316#define VPUTX_VUNREF 3 2317 2318static void 2319vputx(struct vnode *vp, int func) 2320{ 2321 int error; 2322 2323 KASSERT(vp != NULL, ("vputx: null vp")); 2324 if (func == VPUTX_VUNREF) 2325 ASSERT_VOP_LOCKED(vp, "vunref"); 2326 else if (func == VPUTX_VPUT) 2327 ASSERT_VOP_LOCKED(vp, "vput"); 2328 else 2329 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2330 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2331 VI_LOCK(vp); 2332 2333 /* Skip this v_writecount check if we're going to panic below. */ 2334 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2335 ("vputx: missed vn_close")); 2336 error = 0; 2337 2338 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2339 vp->v_usecount == 1)) { 2340 if (func == VPUTX_VPUT) 2341 VOP_UNLOCK(vp, 0); 2342 v_decr_usecount(vp); 2343 return; 2344 } 2345 2346 if (vp->v_usecount != 1) { 2347 vprint("vputx: negative ref count", vp); 2348 panic("vputx: negative ref cnt"); 2349 } 2350 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2351 /* 2352 * We want to hold the vnode until the inactive finishes to 2353 * prevent vgone() races. We drop the use count here and the 2354 * hold count below when we're done. 2355 */ 2356 v_decr_useonly(vp); 2357 /* 2358 * We must call VOP_INACTIVE with the node locked. Mark 2359 * as VI_DOINGINACT to avoid recursion. 2360 */ 2361 vp->v_iflag |= VI_OWEINACT; 2362 switch (func) { 2363 case VPUTX_VRELE: 2364 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2365 VI_LOCK(vp); 2366 break; 2367 case VPUTX_VPUT: 2368 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2369 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2370 LK_NOWAIT); 2371 VI_LOCK(vp); 2372 } 2373 break; 2374 case VPUTX_VUNREF: 2375 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2376 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2377 VI_LOCK(vp); 2378 } 2379 break; 2380 } 2381 if (vp->v_usecount > 0) 2382 vp->v_iflag &= ~VI_OWEINACT; 2383 if (error == 0) { 2384 if (vp->v_iflag & VI_OWEINACT) 2385 vinactive(vp, curthread); 2386 if (func != VPUTX_VUNREF) 2387 VOP_UNLOCK(vp, 0); 2388 } 2389 vdropl(vp); 2390} 2391 2392/* 2393 * Vnode put/release. 2394 * If count drops to zero, call inactive routine and return to freelist. 2395 */ 2396void 2397vrele(struct vnode *vp) 2398{ 2399 2400 vputx(vp, VPUTX_VRELE); 2401} 2402 2403/* 2404 * Release an already locked vnode. This give the same effects as 2405 * unlock+vrele(), but takes less time and avoids releasing and 2406 * re-aquiring the lock (as vrele() acquires the lock internally.) 2407 */ 2408void 2409vput(struct vnode *vp) 2410{ 2411 2412 vputx(vp, VPUTX_VPUT); 2413} 2414 2415/* 2416 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2417 */ 2418void 2419vunref(struct vnode *vp) 2420{ 2421 2422 vputx(vp, VPUTX_VUNREF); 2423} 2424 2425/* 2426 * Somebody doesn't want the vnode recycled. 2427 */ 2428void 2429vhold(struct vnode *vp) 2430{ 2431 2432 VI_LOCK(vp); 2433 vholdl(vp); 2434 VI_UNLOCK(vp); 2435} 2436 2437/* 2438 * Increase the hold count and activate if this is the first reference. 2439 */ 2440void 2441vholdl(struct vnode *vp) 2442{ 2443 struct mount *mp; 2444 2445 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2446#ifdef INVARIANTS 2447 /* getnewvnode() calls v_incr_usecount() without holding interlock. */ 2448 if (vp->v_type != VNON || vp->v_data != NULL) 2449 ASSERT_VI_LOCKED(vp, "vholdl"); 2450#endif 2451 vp->v_holdcnt++; 2452 if ((vp->v_iflag & VI_FREE) == 0) 2453 return; 2454 VNASSERT(vp->v_holdcnt == 1, vp, ("vholdl: wrong hold count")); 2455 VNASSERT(vp->v_op != NULL, vp, ("vholdl: vnode already reclaimed.")); 2456 /* 2457 * Remove a vnode from the free list, mark it as in use, 2458 * and put it on the active list. 2459 */ 2460 mtx_lock(&vnode_free_list_mtx); 2461 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 2462 freevnodes--; 2463 vp->v_iflag &= ~VI_FREE; 2464 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 2465 ("Activating already active vnode")); 2466 vp->v_iflag |= VI_ACTIVE; 2467 mp = vp->v_mount; 2468 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 2469 mp->mnt_activevnodelistsize++; 2470 mtx_unlock(&vnode_free_list_mtx); 2471} 2472 2473/* 2474 * Note that there is one less who cares about this vnode. 2475 * vdrop() is the opposite of vhold(). 2476 */ 2477void 2478vdrop(struct vnode *vp) 2479{ 2480 2481 VI_LOCK(vp); 2482 vdropl(vp); 2483} 2484 2485/* 2486 * Drop the hold count of the vnode. If this is the last reference to 2487 * the vnode we place it on the free list unless it has been vgone'd 2488 * (marked VI_DOOMED) in which case we will free it. 2489 * 2490 * Because the vnode vm object keeps a hold reference on the vnode if 2491 * there is at least one resident non-cached page, the vnode cannot 2492 * leave the active list without the page cleanup done. 2493 */ 2494void 2495vdropl(struct vnode *vp) 2496{ 2497 struct bufobj *bo; 2498 struct mount *mp; 2499 int active; 2500 2501 ASSERT_VI_LOCKED(vp, "vdropl"); 2502 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2503 if (vp->v_holdcnt <= 0) 2504 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2505 vp->v_holdcnt--; 2506 if (vp->v_holdcnt > 0) { 2507 VI_UNLOCK(vp); 2508 return; 2509 } 2510 if ((vp->v_iflag & VI_DOOMED) == 0) { 2511 /* 2512 * Mark a vnode as free: remove it from its active list 2513 * and put it up for recycling on the freelist. 2514 */ 2515 VNASSERT(vp->v_op != NULL, vp, 2516 ("vdropl: vnode already reclaimed.")); 2517 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2518 ("vnode already free")); 2519 VNASSERT(vp->v_holdcnt == 0, vp, 2520 ("vdropl: freeing when we shouldn't")); 2521 active = vp->v_iflag & VI_ACTIVE; 2522 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2523 vp->v_iflag &= ~VI_ACTIVE; 2524 mp = vp->v_mount; 2525 mtx_lock(&vnode_free_list_mtx); 2526 if (active) { 2527 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, 2528 v_actfreelist); 2529 mp->mnt_activevnodelistsize--; 2530 } 2531 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 2532 v_actfreelist); 2533 freevnodes++; 2534 vp->v_iflag |= VI_FREE; 2535 mtx_unlock(&vnode_free_list_mtx); 2536 } else { 2537 atomic_add_long(&free_owe_inact, 1); 2538 } 2539 VI_UNLOCK(vp); 2540 return; 2541 } 2542 /* 2543 * The vnode has been marked for destruction, so free it. 2544 * 2545 * The vnode will be returned to the zone where it will 2546 * normally remain until it is needed for another vnode. We 2547 * need to cleanup (or verify that the cleanup has already 2548 * been done) any residual data left from its current use 2549 * so as not to contaminate the freshly allocated vnode. 2550 */ 2551 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2552 atomic_subtract_long(&numvnodes, 1); 2553 bo = &vp->v_bufobj; 2554 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2555 ("cleaned vnode still on the free list.")); 2556 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2557 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 2558 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2559 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2560 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2561 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2562 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2563 ("clean blk trie not empty")); 2564 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2565 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2566 ("dirty blk trie not empty")); 2567 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 2568 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 2569 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 2570 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2571 ("Dangling rangelock waiters")); 2572 VI_UNLOCK(vp); 2573#ifdef MAC 2574 mac_vnode_destroy(vp); 2575#endif 2576 if (vp->v_pollinfo != NULL) { 2577 destroy_vpollinfo(vp->v_pollinfo); 2578 vp->v_pollinfo = NULL; 2579 } 2580#ifdef INVARIANTS 2581 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 2582 vp->v_op = NULL; 2583#endif 2584 bzero(&vp->v_un, sizeof(vp->v_un)); 2585 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 2586 vp->v_iflag = 0; 2587 vp->v_vflag = 0; 2588 bo->bo_flag = 0; 2589 uma_zfree(vnode_zone, vp); 2590} 2591 2592/* 2593 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2594 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2595 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2596 * failed lock upgrade. 2597 */ 2598void 2599vinactive(struct vnode *vp, struct thread *td) 2600{ 2601 struct vm_object *obj; 2602 2603 ASSERT_VOP_ELOCKED(vp, "vinactive"); 2604 ASSERT_VI_LOCKED(vp, "vinactive"); 2605 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2606 ("vinactive: recursed on VI_DOINGINACT")); 2607 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2608 vp->v_iflag |= VI_DOINGINACT; 2609 vp->v_iflag &= ~VI_OWEINACT; 2610 VI_UNLOCK(vp); 2611 /* 2612 * Before moving off the active list, we must be sure that any 2613 * modified pages are converted into the vnode's dirty 2614 * buffers, since these will no longer be checked once the 2615 * vnode is on the inactive list. 2616 * 2617 * The write-out of the dirty pages is asynchronous. At the 2618 * point that VOP_INACTIVE() is called, there could still be 2619 * pending I/O and dirty pages in the object. 2620 */ 2621 obj = vp->v_object; 2622 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 2623 VM_OBJECT_WLOCK(obj); 2624 vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC); 2625 VM_OBJECT_WUNLOCK(obj); 2626 } 2627 VOP_INACTIVE(vp, td); 2628 VI_LOCK(vp); 2629 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2630 ("vinactive: lost VI_DOINGINACT")); 2631 vp->v_iflag &= ~VI_DOINGINACT; 2632} 2633 2634/* 2635 * Remove any vnodes in the vnode table belonging to mount point mp. 2636 * 2637 * If FORCECLOSE is not specified, there should not be any active ones, 2638 * return error if any are found (nb: this is a user error, not a 2639 * system error). If FORCECLOSE is specified, detach any active vnodes 2640 * that are found. 2641 * 2642 * If WRITECLOSE is set, only flush out regular file vnodes open for 2643 * writing. 2644 * 2645 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2646 * 2647 * `rootrefs' specifies the base reference count for the root vnode 2648 * of this filesystem. The root vnode is considered busy if its 2649 * v_usecount exceeds this value. On a successful return, vflush(, td) 2650 * will call vrele() on the root vnode exactly rootrefs times. 2651 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2652 * be zero. 2653 */ 2654#ifdef DIAGNOSTIC 2655static int busyprt = 0; /* print out busy vnodes */ 2656SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 2657#endif 2658 2659int 2660vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 2661{ 2662 struct vnode *vp, *mvp, *rootvp = NULL; 2663 struct vattr vattr; 2664 int busy = 0, error; 2665 2666 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 2667 rootrefs, flags); 2668 if (rootrefs > 0) { 2669 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2670 ("vflush: bad args")); 2671 /* 2672 * Get the filesystem root vnode. We can vput() it 2673 * immediately, since with rootrefs > 0, it won't go away. 2674 */ 2675 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 2676 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 2677 __func__, error); 2678 return (error); 2679 } 2680 vput(rootvp); 2681 } 2682loop: 2683 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 2684 vholdl(vp); 2685 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 2686 if (error) { 2687 vdrop(vp); 2688 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2689 goto loop; 2690 } 2691 /* 2692 * Skip over a vnodes marked VV_SYSTEM. 2693 */ 2694 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2695 VOP_UNLOCK(vp, 0); 2696 vdrop(vp); 2697 continue; 2698 } 2699 /* 2700 * If WRITECLOSE is set, flush out unlinked but still open 2701 * files (even if open only for reading) and regular file 2702 * vnodes open for writing. 2703 */ 2704 if (flags & WRITECLOSE) { 2705 if (vp->v_object != NULL) { 2706 VM_OBJECT_WLOCK(vp->v_object); 2707 vm_object_page_clean(vp->v_object, 0, 0, 0); 2708 VM_OBJECT_WUNLOCK(vp->v_object); 2709 } 2710 error = VOP_FSYNC(vp, MNT_WAIT, td); 2711 if (error != 0) { 2712 VOP_UNLOCK(vp, 0); 2713 vdrop(vp); 2714 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2715 return (error); 2716 } 2717 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 2718 VI_LOCK(vp); 2719 2720 if ((vp->v_type == VNON || 2721 (error == 0 && vattr.va_nlink > 0)) && 2722 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2723 VOP_UNLOCK(vp, 0); 2724 vdropl(vp); 2725 continue; 2726 } 2727 } else 2728 VI_LOCK(vp); 2729 /* 2730 * With v_usecount == 0, all we need to do is clear out the 2731 * vnode data structures and we are done. 2732 * 2733 * If FORCECLOSE is set, forcibly close the vnode. 2734 */ 2735 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 2736 vgonel(vp); 2737 } else { 2738 busy++; 2739#ifdef DIAGNOSTIC 2740 if (busyprt) 2741 vprint("vflush: busy vnode", vp); 2742#endif 2743 } 2744 VOP_UNLOCK(vp, 0); 2745 vdropl(vp); 2746 } 2747 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2748 /* 2749 * If just the root vnode is busy, and if its refcount 2750 * is equal to `rootrefs', then go ahead and kill it. 2751 */ 2752 VI_LOCK(rootvp); 2753 KASSERT(busy > 0, ("vflush: not busy")); 2754 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 2755 ("vflush: usecount %d < rootrefs %d", 2756 rootvp->v_usecount, rootrefs)); 2757 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2758 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 2759 vgone(rootvp); 2760 VOP_UNLOCK(rootvp, 0); 2761 busy = 0; 2762 } else 2763 VI_UNLOCK(rootvp); 2764 } 2765 if (busy) { 2766 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 2767 busy); 2768 return (EBUSY); 2769 } 2770 for (; rootrefs > 0; rootrefs--) 2771 vrele(rootvp); 2772 return (0); 2773} 2774 2775/* 2776 * Recycle an unused vnode to the front of the free list. 2777 */ 2778int 2779vrecycle(struct vnode *vp) 2780{ 2781 int recycled; 2782 2783 ASSERT_VOP_ELOCKED(vp, "vrecycle"); 2784 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2785 recycled = 0; 2786 VI_LOCK(vp); 2787 if (vp->v_usecount == 0) { 2788 recycled = 1; 2789 vgonel(vp); 2790 } 2791 VI_UNLOCK(vp); 2792 return (recycled); 2793} 2794 2795/* 2796 * Eliminate all activity associated with a vnode 2797 * in preparation for reuse. 2798 */ 2799void 2800vgone(struct vnode *vp) 2801{ 2802 VI_LOCK(vp); 2803 vgonel(vp); 2804 VI_UNLOCK(vp); 2805} 2806 2807static void 2808notify_lowervp_vfs_dummy(struct mount *mp __unused, 2809 struct vnode *lowervp __unused) 2810{ 2811} 2812 2813/* 2814 * Notify upper mounts about reclaimed or unlinked vnode. 2815 */ 2816void 2817vfs_notify_upper(struct vnode *vp, int event) 2818{ 2819 static struct vfsops vgonel_vfsops = { 2820 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 2821 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 2822 }; 2823 struct mount *mp, *ump, *mmp; 2824 2825 mp = vp->v_mount; 2826 if (mp == NULL) 2827 return; 2828 2829 MNT_ILOCK(mp); 2830 if (TAILQ_EMPTY(&mp->mnt_uppers)) 2831 goto unlock; 2832 MNT_IUNLOCK(mp); 2833 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 2834 mmp->mnt_op = &vgonel_vfsops; 2835 mmp->mnt_kern_flag |= MNTK_MARKER; 2836 MNT_ILOCK(mp); 2837 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 2838 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 2839 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 2840 ump = TAILQ_NEXT(ump, mnt_upper_link); 2841 continue; 2842 } 2843 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 2844 MNT_IUNLOCK(mp); 2845 switch (event) { 2846 case VFS_NOTIFY_UPPER_RECLAIM: 2847 VFS_RECLAIM_LOWERVP(ump, vp); 2848 break; 2849 case VFS_NOTIFY_UPPER_UNLINK: 2850 VFS_UNLINK_LOWERVP(ump, vp); 2851 break; 2852 default: 2853 KASSERT(0, ("invalid event %d", event)); 2854 break; 2855 } 2856 MNT_ILOCK(mp); 2857 ump = TAILQ_NEXT(mmp, mnt_upper_link); 2858 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 2859 } 2860 free(mmp, M_TEMP); 2861 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 2862 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 2863 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 2864 wakeup(&mp->mnt_uppers); 2865 } 2866unlock: 2867 MNT_IUNLOCK(mp); 2868} 2869 2870/* 2871 * vgone, with the vp interlock held. 2872 */ 2873static void 2874vgonel(struct vnode *vp) 2875{ 2876 struct thread *td; 2877 int oweinact; 2878 int active; 2879 struct mount *mp; 2880 2881 ASSERT_VOP_ELOCKED(vp, "vgonel"); 2882 ASSERT_VI_LOCKED(vp, "vgonel"); 2883 VNASSERT(vp->v_holdcnt, vp, 2884 ("vgonel: vp %p has no reference.", vp)); 2885 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2886 td = curthread; 2887 2888 /* 2889 * Don't vgonel if we're already doomed. 2890 */ 2891 if (vp->v_iflag & VI_DOOMED) 2892 return; 2893 vp->v_iflag |= VI_DOOMED; 2894 2895 /* 2896 * Check to see if the vnode is in use. If so, we have to call 2897 * VOP_CLOSE() and VOP_INACTIVE(). 2898 */ 2899 active = vp->v_usecount; 2900 oweinact = (vp->v_iflag & VI_OWEINACT); 2901 VI_UNLOCK(vp); 2902 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 2903 2904 /* 2905 * If purging an active vnode, it must be closed and 2906 * deactivated before being reclaimed. 2907 */ 2908 if (active) 2909 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2910 if (oweinact || active) { 2911 VI_LOCK(vp); 2912 if ((vp->v_iflag & VI_DOINGINACT) == 0) 2913 vinactive(vp, td); 2914 VI_UNLOCK(vp); 2915 } 2916 if (vp->v_type == VSOCK) 2917 vfs_unp_reclaim(vp); 2918 2919 /* 2920 * Clean out any buffers associated with the vnode. 2921 * If the flush fails, just toss the buffers. 2922 */ 2923 mp = NULL; 2924 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 2925 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 2926 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 2927 while (vinvalbuf(vp, 0, 0, 0) != 0) 2928 ; 2929 } 2930 2931 BO_LOCK(&vp->v_bufobj); 2932 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 2933 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 2934 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 2935 vp->v_bufobj.bo_clean.bv_cnt == 0, 2936 ("vp %p bufobj not invalidated", vp));
|
2938 BO_UNLOCK(&vp->v_bufobj); 2939 2940 /* 2941 * Reclaim the vnode. 2942 */ 2943 if (VOP_RECLAIM(vp, td)) 2944 panic("vgone: cannot reclaim"); 2945 if (mp != NULL) 2946 vn_finished_secondary_write(mp); 2947 VNASSERT(vp->v_object == NULL, vp, 2948 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 2949 /* 2950 * Clear the advisory locks and wake up waiting threads. 2951 */ 2952 (void)VOP_ADVLOCKPURGE(vp); 2953 vp->v_lockf = NULL; 2954 /* 2955 * Delete from old mount point vnode list. 2956 */ 2957 delmntque(vp); 2958 cache_purge(vp); 2959 /* 2960 * Done with purge, reset to the standard lock and invalidate 2961 * the vnode. 2962 */ 2963 VI_LOCK(vp); 2964 vp->v_vnlock = &vp->v_lock; 2965 vp->v_op = &dead_vnodeops; 2966 vp->v_tag = "none"; 2967 vp->v_type = VBAD; 2968} 2969 2970/* 2971 * Calculate the total number of references to a special device. 2972 */ 2973int 2974vcount(struct vnode *vp) 2975{ 2976 int count; 2977 2978 dev_lock(); 2979 count = vp->v_rdev->si_usecount; 2980 dev_unlock(); 2981 return (count); 2982} 2983 2984/* 2985 * Same as above, but using the struct cdev *as argument 2986 */ 2987int 2988count_dev(struct cdev *dev) 2989{ 2990 int count; 2991 2992 dev_lock(); 2993 count = dev->si_usecount; 2994 dev_unlock(); 2995 return(count); 2996} 2997 2998/* 2999 * Print out a description of a vnode. 3000 */ 3001static char *typename[] = 3002{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3003 "VMARKER"}; 3004 3005void 3006vn_printf(struct vnode *vp, const char *fmt, ...) 3007{ 3008 va_list ap; 3009 char buf[256], buf2[16]; 3010 u_long flags; 3011 3012 va_start(ap, fmt); 3013 vprintf(fmt, ap); 3014 va_end(ap); 3015 printf("%p: ", (void *)vp); 3016 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3017 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", 3018 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); 3019 buf[0] = '\0'; 3020 buf[1] = '\0'; 3021 if (vp->v_vflag & VV_ROOT) 3022 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3023 if (vp->v_vflag & VV_ISTTY) 3024 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3025 if (vp->v_vflag & VV_NOSYNC) 3026 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3027 if (vp->v_vflag & VV_ETERNALDEV) 3028 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3029 if (vp->v_vflag & VV_CACHEDLABEL) 3030 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3031 if (vp->v_vflag & VV_TEXT) 3032 strlcat(buf, "|VV_TEXT", sizeof(buf)); 3033 if (vp->v_vflag & VV_COPYONWRITE) 3034 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3035 if (vp->v_vflag & VV_SYSTEM) 3036 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3037 if (vp->v_vflag & VV_PROCDEP) 3038 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3039 if (vp->v_vflag & VV_NOKNOTE) 3040 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3041 if (vp->v_vflag & VV_DELETED) 3042 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3043 if (vp->v_vflag & VV_MD) 3044 strlcat(buf, "|VV_MD", sizeof(buf)); 3045 if (vp->v_vflag & VV_FORCEINSMQ) 3046 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3047 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3048 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3049 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3050 if (flags != 0) { 3051 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3052 strlcat(buf, buf2, sizeof(buf)); 3053 } 3054 if (vp->v_iflag & VI_MOUNT) 3055 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3056 if (vp->v_iflag & VI_DOOMED) 3057 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 3058 if (vp->v_iflag & VI_FREE) 3059 strlcat(buf, "|VI_FREE", sizeof(buf)); 3060 if (vp->v_iflag & VI_ACTIVE) 3061 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3062 if (vp->v_iflag & VI_DOINGINACT) 3063 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3064 if (vp->v_iflag & VI_OWEINACT) 3065 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3066 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | 3067 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 3068 if (flags != 0) { 3069 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3070 strlcat(buf, buf2, sizeof(buf)); 3071 } 3072 printf(" flags (%s)\n", buf + 1); 3073 if (mtx_owned(VI_MTX(vp))) 3074 printf(" VI_LOCKed"); 3075 if (vp->v_object != NULL) 3076 printf(" v_object %p ref %d pages %d " 3077 "cleanbuf %d dirtybuf %d\n", 3078 vp->v_object, vp->v_object->ref_count, 3079 vp->v_object->resident_page_count, 3080 vp->v_bufobj.bo_clean.bv_cnt, 3081 vp->v_bufobj.bo_dirty.bv_cnt); 3082 printf(" "); 3083 lockmgr_printinfo(vp->v_vnlock); 3084 if (vp->v_data != NULL) 3085 VOP_PRINT(vp); 3086} 3087 3088#ifdef DDB 3089/* 3090 * List all of the locked vnodes in the system. 3091 * Called when debugging the kernel. 3092 */ 3093DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3094{ 3095 struct mount *mp; 3096 struct vnode *vp; 3097 3098 /* 3099 * Note: because this is DDB, we can't obey the locking semantics 3100 * for these structures, which means we could catch an inconsistent 3101 * state and dereference a nasty pointer. Not much to be done 3102 * about that. 3103 */ 3104 db_printf("Locked vnodes\n"); 3105 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3106 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3107 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3108 vprint("", vp); 3109 } 3110 } 3111} 3112 3113/* 3114 * Show details about the given vnode. 3115 */ 3116DB_SHOW_COMMAND(vnode, db_show_vnode) 3117{ 3118 struct vnode *vp; 3119 3120 if (!have_addr) 3121 return; 3122 vp = (struct vnode *)addr; 3123 vn_printf(vp, "vnode "); 3124} 3125 3126/* 3127 * Show details about the given mount point. 3128 */ 3129DB_SHOW_COMMAND(mount, db_show_mount) 3130{ 3131 struct mount *mp; 3132 struct vfsopt *opt; 3133 struct statfs *sp; 3134 struct vnode *vp; 3135 char buf[512]; 3136 uint64_t mflags; 3137 u_int flags; 3138 3139 if (!have_addr) { 3140 /* No address given, print short info about all mount points. */ 3141 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3142 db_printf("%p %s on %s (%s)\n", mp, 3143 mp->mnt_stat.f_mntfromname, 3144 mp->mnt_stat.f_mntonname, 3145 mp->mnt_stat.f_fstypename); 3146 if (db_pager_quit) 3147 break; 3148 } 3149 db_printf("\nMore info: show mount <addr>\n"); 3150 return; 3151 } 3152 3153 mp = (struct mount *)addr; 3154 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3155 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3156 3157 buf[0] = '\0'; 3158 mflags = mp->mnt_flag; 3159#define MNT_FLAG(flag) do { \ 3160 if (mflags & (flag)) { \ 3161 if (buf[0] != '\0') \ 3162 strlcat(buf, ", ", sizeof(buf)); \ 3163 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3164 mflags &= ~(flag); \ 3165 } \ 3166} while (0) 3167 MNT_FLAG(MNT_RDONLY); 3168 MNT_FLAG(MNT_SYNCHRONOUS); 3169 MNT_FLAG(MNT_NOEXEC); 3170 MNT_FLAG(MNT_NOSUID); 3171 MNT_FLAG(MNT_NFS4ACLS); 3172 MNT_FLAG(MNT_UNION); 3173 MNT_FLAG(MNT_ASYNC); 3174 MNT_FLAG(MNT_SUIDDIR); 3175 MNT_FLAG(MNT_SOFTDEP); 3176 MNT_FLAG(MNT_NOSYMFOLLOW); 3177 MNT_FLAG(MNT_GJOURNAL); 3178 MNT_FLAG(MNT_MULTILABEL); 3179 MNT_FLAG(MNT_ACLS); 3180 MNT_FLAG(MNT_NOATIME); 3181 MNT_FLAG(MNT_NOCLUSTERR); 3182 MNT_FLAG(MNT_NOCLUSTERW); 3183 MNT_FLAG(MNT_SUJ); 3184 MNT_FLAG(MNT_EXRDONLY); 3185 MNT_FLAG(MNT_EXPORTED); 3186 MNT_FLAG(MNT_DEFEXPORTED); 3187 MNT_FLAG(MNT_EXPORTANON); 3188 MNT_FLAG(MNT_EXKERB); 3189 MNT_FLAG(MNT_EXPUBLIC); 3190 MNT_FLAG(MNT_LOCAL); 3191 MNT_FLAG(MNT_QUOTA); 3192 MNT_FLAG(MNT_ROOTFS); 3193 MNT_FLAG(MNT_USER); 3194 MNT_FLAG(MNT_IGNORE); 3195 MNT_FLAG(MNT_UPDATE); 3196 MNT_FLAG(MNT_DELEXPORT); 3197 MNT_FLAG(MNT_RELOAD); 3198 MNT_FLAG(MNT_FORCE); 3199 MNT_FLAG(MNT_SNAPSHOT); 3200 MNT_FLAG(MNT_BYFSID); 3201#undef MNT_FLAG 3202 if (mflags != 0) { 3203 if (buf[0] != '\0') 3204 strlcat(buf, ", ", sizeof(buf)); 3205 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3206 "0x%016jx", mflags); 3207 } 3208 db_printf(" mnt_flag = %s\n", buf); 3209 3210 buf[0] = '\0'; 3211 flags = mp->mnt_kern_flag; 3212#define MNT_KERN_FLAG(flag) do { \ 3213 if (flags & (flag)) { \ 3214 if (buf[0] != '\0') \ 3215 strlcat(buf, ", ", sizeof(buf)); \ 3216 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3217 flags &= ~(flag); \ 3218 } \ 3219} while (0) 3220 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3221 MNT_KERN_FLAG(MNTK_ASYNC); 3222 MNT_KERN_FLAG(MNTK_SOFTDEP); 3223 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 3224 MNT_KERN_FLAG(MNTK_DRAINING); 3225 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3226 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3227 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3228 MNT_KERN_FLAG(MNTK_NO_IOPF); 3229 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3230 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3231 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3232 MNT_KERN_FLAG(MNTK_MARKER); 3233 MNT_KERN_FLAG(MNTK_USES_BCACHE); 3234 MNT_KERN_FLAG(MNTK_NOASYNC); 3235 MNT_KERN_FLAG(MNTK_UNMOUNT); 3236 MNT_KERN_FLAG(MNTK_MWAIT); 3237 MNT_KERN_FLAG(MNTK_SUSPEND); 3238 MNT_KERN_FLAG(MNTK_SUSPEND2); 3239 MNT_KERN_FLAG(MNTK_SUSPENDED); 3240 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3241 MNT_KERN_FLAG(MNTK_NOKNOTE); 3242#undef MNT_KERN_FLAG 3243 if (flags != 0) { 3244 if (buf[0] != '\0') 3245 strlcat(buf, ", ", sizeof(buf)); 3246 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3247 "0x%08x", flags); 3248 } 3249 db_printf(" mnt_kern_flag = %s\n", buf); 3250 3251 db_printf(" mnt_opt = "); 3252 opt = TAILQ_FIRST(mp->mnt_opt); 3253 if (opt != NULL) { 3254 db_printf("%s", opt->name); 3255 opt = TAILQ_NEXT(opt, link); 3256 while (opt != NULL) { 3257 db_printf(", %s", opt->name); 3258 opt = TAILQ_NEXT(opt, link); 3259 } 3260 } 3261 db_printf("\n"); 3262 3263 sp = &mp->mnt_stat; 3264 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3265 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3266 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3267 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3268 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3269 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3270 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3271 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3272 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3273 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3274 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3275 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3276 3277 db_printf(" mnt_cred = { uid=%u ruid=%u", 3278 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3279 if (jailed(mp->mnt_cred)) 3280 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3281 db_printf(" }\n"); 3282 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3283 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3284 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3285 db_printf(" mnt_activevnodelistsize = %d\n", 3286 mp->mnt_activevnodelistsize); 3287 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3288 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3289 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3290 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3291 db_printf(" mnt_lockref = %d\n", mp->mnt_lockref); 3292 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3293 db_printf(" mnt_secondary_accwrites = %d\n", 3294 mp->mnt_secondary_accwrites); 3295 db_printf(" mnt_gjprovider = %s\n", 3296 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3297 3298 db_printf("\n\nList of active vnodes\n"); 3299 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3300 if (vp->v_type != VMARKER) { 3301 vn_printf(vp, "vnode "); 3302 if (db_pager_quit) 3303 break; 3304 } 3305 } 3306 db_printf("\n\nList of inactive vnodes\n"); 3307 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3308 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3309 vn_printf(vp, "vnode "); 3310 if (db_pager_quit) 3311 break; 3312 } 3313 } 3314} 3315#endif /* DDB */ 3316 3317/* 3318 * Fill in a struct xvfsconf based on a struct vfsconf. 3319 */ 3320static int 3321vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3322{ 3323 struct xvfsconf xvfsp; 3324 3325 bzero(&xvfsp, sizeof(xvfsp)); 3326 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3327 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3328 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3329 xvfsp.vfc_flags = vfsp->vfc_flags; 3330 /* 3331 * These are unused in userland, we keep them 3332 * to not break binary compatibility. 3333 */ 3334 xvfsp.vfc_vfsops = NULL; 3335 xvfsp.vfc_next = NULL; 3336 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3337} 3338 3339#ifdef COMPAT_FREEBSD32 3340struct xvfsconf32 { 3341 uint32_t vfc_vfsops; 3342 char vfc_name[MFSNAMELEN]; 3343 int32_t vfc_typenum; 3344 int32_t vfc_refcount; 3345 int32_t vfc_flags; 3346 uint32_t vfc_next; 3347}; 3348 3349static int 3350vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3351{ 3352 struct xvfsconf32 xvfsp; 3353 3354 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3355 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3356 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3357 xvfsp.vfc_flags = vfsp->vfc_flags; 3358 xvfsp.vfc_vfsops = 0; 3359 xvfsp.vfc_next = 0; 3360 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3361} 3362#endif 3363 3364/* 3365 * Top level filesystem related information gathering. 3366 */ 3367static int 3368sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3369{ 3370 struct vfsconf *vfsp; 3371 int error; 3372 3373 error = 0; 3374 vfsconf_slock(); 3375 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3376#ifdef COMPAT_FREEBSD32 3377 if (req->flags & SCTL_MASK32) 3378 error = vfsconf2x32(req, vfsp); 3379 else 3380#endif 3381 error = vfsconf2x(req, vfsp); 3382 if (error) 3383 break; 3384 } 3385 vfsconf_sunlock(); 3386 return (error); 3387} 3388 3389SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3390 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 3391 "S,xvfsconf", "List of all configured filesystems"); 3392 3393#ifndef BURN_BRIDGES 3394static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3395 3396static int 3397vfs_sysctl(SYSCTL_HANDLER_ARGS) 3398{ 3399 int *name = (int *)arg1 - 1; /* XXX */ 3400 u_int namelen = arg2 + 1; /* XXX */ 3401 struct vfsconf *vfsp; 3402 3403 log(LOG_WARNING, "userland calling deprecated sysctl, " 3404 "please rebuild world\n"); 3405 3406#if 1 || defined(COMPAT_PRELITE2) 3407 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3408 if (namelen == 1) 3409 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3410#endif 3411 3412 switch (name[1]) { 3413 case VFS_MAXTYPENUM: 3414 if (namelen != 2) 3415 return (ENOTDIR); 3416 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3417 case VFS_CONF: 3418 if (namelen != 3) 3419 return (ENOTDIR); /* overloaded */ 3420 vfsconf_slock(); 3421 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3422 if (vfsp->vfc_typenum == name[2]) 3423 break; 3424 } 3425 vfsconf_sunlock(); 3426 if (vfsp == NULL) 3427 return (EOPNOTSUPP); 3428#ifdef COMPAT_FREEBSD32 3429 if (req->flags & SCTL_MASK32) 3430 return (vfsconf2x32(req, vfsp)); 3431 else 3432#endif 3433 return (vfsconf2x(req, vfsp)); 3434 } 3435 return (EOPNOTSUPP); 3436} 3437 3438static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 3439 CTLFLAG_MPSAFE, vfs_sysctl, 3440 "Generic filesystem"); 3441 3442#if 1 || defined(COMPAT_PRELITE2) 3443 3444static int 3445sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3446{ 3447 int error; 3448 struct vfsconf *vfsp; 3449 struct ovfsconf ovfs; 3450 3451 vfsconf_slock(); 3452 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3453 bzero(&ovfs, sizeof(ovfs)); 3454 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3455 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3456 ovfs.vfc_index = vfsp->vfc_typenum; 3457 ovfs.vfc_refcount = vfsp->vfc_refcount; 3458 ovfs.vfc_flags = vfsp->vfc_flags; 3459 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3460 if (error != 0) { 3461 vfsconf_sunlock(); 3462 return (error); 3463 } 3464 } 3465 vfsconf_sunlock(); 3466 return (0); 3467} 3468 3469#endif /* 1 || COMPAT_PRELITE2 */ 3470#endif /* !BURN_BRIDGES */ 3471 3472#define KINFO_VNODESLOP 10 3473#ifdef notyet 3474/* 3475 * Dump vnode list (via sysctl). 3476 */ 3477/* ARGSUSED */ 3478static int 3479sysctl_vnode(SYSCTL_HANDLER_ARGS) 3480{ 3481 struct xvnode *xvn; 3482 struct mount *mp; 3483 struct vnode *vp; 3484 int error, len, n; 3485 3486 /* 3487 * Stale numvnodes access is not fatal here. 3488 */ 3489 req->lock = 0; 3490 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3491 if (!req->oldptr) 3492 /* Make an estimate */ 3493 return (SYSCTL_OUT(req, 0, len)); 3494 3495 error = sysctl_wire_old_buffer(req, 0); 3496 if (error != 0) 3497 return (error); 3498 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3499 n = 0; 3500 mtx_lock(&mountlist_mtx); 3501 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3502 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3503 continue; 3504 MNT_ILOCK(mp); 3505 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3506 if (n == len) 3507 break; 3508 vref(vp); 3509 xvn[n].xv_size = sizeof *xvn; 3510 xvn[n].xv_vnode = vp; 3511 xvn[n].xv_id = 0; /* XXX compat */ 3512#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3513 XV_COPY(usecount); 3514 XV_COPY(writecount); 3515 XV_COPY(holdcnt); 3516 XV_COPY(mount); 3517 XV_COPY(numoutput); 3518 XV_COPY(type); 3519#undef XV_COPY 3520 xvn[n].xv_flag = vp->v_vflag; 3521 3522 switch (vp->v_type) { 3523 case VREG: 3524 case VDIR: 3525 case VLNK: 3526 break; 3527 case VBLK: 3528 case VCHR: 3529 if (vp->v_rdev == NULL) { 3530 vrele(vp); 3531 continue; 3532 } 3533 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3534 break; 3535 case VSOCK: 3536 xvn[n].xv_socket = vp->v_socket; 3537 break; 3538 case VFIFO: 3539 xvn[n].xv_fifo = vp->v_fifoinfo; 3540 break; 3541 case VNON: 3542 case VBAD: 3543 default: 3544 /* shouldn't happen? */ 3545 vrele(vp); 3546 continue; 3547 } 3548 vrele(vp); 3549 ++n; 3550 } 3551 MNT_IUNLOCK(mp); 3552 mtx_lock(&mountlist_mtx); 3553 vfs_unbusy(mp); 3554 if (n == len) 3555 break; 3556 } 3557 mtx_unlock(&mountlist_mtx); 3558 3559 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3560 free(xvn, M_TEMP); 3561 return (error); 3562} 3563 3564SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 3565 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 3566 ""); 3567#endif 3568 3569static void 3570unmount_or_warn(struct mount *mp) 3571{ 3572 int error; 3573 3574 error = dounmount(mp, MNT_FORCE, curthread); 3575 if (error != 0 && strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) { 3576 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 3577 if (error == EBUSY) 3578 printf("BUSY)\n"); 3579 else 3580 printf("%d)\n", error); 3581 } 3582} 3583 3584/* 3585 * Unmount all filesystems. The list is traversed in reverse order 3586 * of mounting to avoid dependencies. 3587 */ 3588void 3589vfs_unmountall(void) 3590{ 3591 struct mount *mp, *tmp; 3592 3593 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 3594 3595 /* 3596 * Since this only runs when rebooting, it is not interlocked. 3597 */ 3598 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 3599 vfs_ref(mp); 3600 3601 /* 3602 * Forcibly unmounting "/dev" before "/" would prevent clean 3603 * unmount of the latter. 3604 */ 3605 if (mp == rootdevmp) 3606 continue; 3607 3608 unmount_or_warn(mp); 3609 } 3610 3611 if (rootdevmp != NULL) 3612 unmount_or_warn(rootdevmp); 3613} 3614 3615/* 3616 * perform msync on all vnodes under a mount point 3617 * the mount point must be locked. 3618 */ 3619void 3620vfs_msync(struct mount *mp, int flags) 3621{ 3622 struct vnode *vp, *mvp; 3623 struct vm_object *obj; 3624 3625 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 3626 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 3627 obj = vp->v_object; 3628 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 3629 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 3630 if (!vget(vp, 3631 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 3632 curthread)) { 3633 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 3634 vput(vp); 3635 continue; 3636 } 3637 3638 obj = vp->v_object; 3639 if (obj != NULL) { 3640 VM_OBJECT_WLOCK(obj); 3641 vm_object_page_clean(obj, 0, 0, 3642 flags == MNT_WAIT ? 3643 OBJPC_SYNC : OBJPC_NOSYNC); 3644 VM_OBJECT_WUNLOCK(obj); 3645 } 3646 vput(vp); 3647 } 3648 } else 3649 VI_UNLOCK(vp); 3650 } 3651} 3652 3653static void 3654destroy_vpollinfo_free(struct vpollinfo *vi) 3655{ 3656 3657 knlist_destroy(&vi->vpi_selinfo.si_note); 3658 mtx_destroy(&vi->vpi_lock); 3659 uma_zfree(vnodepoll_zone, vi); 3660} 3661 3662static void 3663destroy_vpollinfo(struct vpollinfo *vi) 3664{ 3665 3666 knlist_clear(&vi->vpi_selinfo.si_note, 1); 3667 seldrain(&vi->vpi_selinfo); 3668 destroy_vpollinfo_free(vi); 3669} 3670 3671/* 3672 * Initalize per-vnode helper structure to hold poll-related state. 3673 */ 3674void 3675v_addpollinfo(struct vnode *vp) 3676{ 3677 struct vpollinfo *vi; 3678 3679 if (vp->v_pollinfo != NULL) 3680 return; 3681 vi = uma_zalloc(vnodepoll_zone, M_WAITOK); 3682 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 3683 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 3684 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 3685 VI_LOCK(vp); 3686 if (vp->v_pollinfo != NULL) { 3687 VI_UNLOCK(vp); 3688 destroy_vpollinfo_free(vi); 3689 return; 3690 } 3691 vp->v_pollinfo = vi; 3692 VI_UNLOCK(vp); 3693} 3694 3695/* 3696 * Record a process's interest in events which might happen to 3697 * a vnode. Because poll uses the historic select-style interface 3698 * internally, this routine serves as both the ``check for any 3699 * pending events'' and the ``record my interest in future events'' 3700 * functions. (These are done together, while the lock is held, 3701 * to avoid race conditions.) 3702 */ 3703int 3704vn_pollrecord(struct vnode *vp, struct thread *td, int events) 3705{ 3706 3707 v_addpollinfo(vp); 3708 mtx_lock(&vp->v_pollinfo->vpi_lock); 3709 if (vp->v_pollinfo->vpi_revents & events) { 3710 /* 3711 * This leaves events we are not interested 3712 * in available for the other process which 3713 * which presumably had requested them 3714 * (otherwise they would never have been 3715 * recorded). 3716 */ 3717 events &= vp->v_pollinfo->vpi_revents; 3718 vp->v_pollinfo->vpi_revents &= ~events; 3719 3720 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3721 return (events); 3722 } 3723 vp->v_pollinfo->vpi_events |= events; 3724 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 3725 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3726 return (0); 3727} 3728 3729/* 3730 * Routine to create and manage a filesystem syncer vnode. 3731 */ 3732#define sync_close ((int (*)(struct vop_close_args *))nullop) 3733static int sync_fsync(struct vop_fsync_args *); 3734static int sync_inactive(struct vop_inactive_args *); 3735static int sync_reclaim(struct vop_reclaim_args *); 3736 3737static struct vop_vector sync_vnodeops = { 3738 .vop_bypass = VOP_EOPNOTSUPP, 3739 .vop_close = sync_close, /* close */ 3740 .vop_fsync = sync_fsync, /* fsync */ 3741 .vop_inactive = sync_inactive, /* inactive */ 3742 .vop_reclaim = sync_reclaim, /* reclaim */ 3743 .vop_lock1 = vop_stdlock, /* lock */ 3744 .vop_unlock = vop_stdunlock, /* unlock */ 3745 .vop_islocked = vop_stdislocked, /* islocked */ 3746}; 3747 3748/* 3749 * Create a new filesystem syncer vnode for the specified mount point. 3750 */ 3751void 3752vfs_allocate_syncvnode(struct mount *mp) 3753{ 3754 struct vnode *vp; 3755 struct bufobj *bo; 3756 static long start, incr, next; 3757 int error; 3758 3759 /* Allocate a new vnode */ 3760 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 3761 if (error != 0) 3762 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 3763 vp->v_type = VNON; 3764 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3765 vp->v_vflag |= VV_FORCEINSMQ; 3766 error = insmntque(vp, mp); 3767 if (error != 0) 3768 panic("vfs_allocate_syncvnode: insmntque() failed"); 3769 vp->v_vflag &= ~VV_FORCEINSMQ; 3770 VOP_UNLOCK(vp, 0); 3771 /* 3772 * Place the vnode onto the syncer worklist. We attempt to 3773 * scatter them about on the list so that they will go off 3774 * at evenly distributed times even if all the filesystems 3775 * are mounted at once. 3776 */ 3777 next += incr; 3778 if (next == 0 || next > syncer_maxdelay) { 3779 start /= 2; 3780 incr /= 2; 3781 if (start == 0) { 3782 start = syncer_maxdelay / 2; 3783 incr = syncer_maxdelay; 3784 } 3785 next = start; 3786 } 3787 bo = &vp->v_bufobj; 3788 BO_LOCK(bo); 3789 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 3790 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 3791 mtx_lock(&sync_mtx); 3792 sync_vnode_count++; 3793 if (mp->mnt_syncer == NULL) { 3794 mp->mnt_syncer = vp; 3795 vp = NULL; 3796 } 3797 mtx_unlock(&sync_mtx); 3798 BO_UNLOCK(bo); 3799 if (vp != NULL) { 3800 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3801 vgone(vp); 3802 vput(vp); 3803 } 3804} 3805 3806void 3807vfs_deallocate_syncvnode(struct mount *mp) 3808{ 3809 struct vnode *vp; 3810 3811 mtx_lock(&sync_mtx); 3812 vp = mp->mnt_syncer; 3813 if (vp != NULL) 3814 mp->mnt_syncer = NULL; 3815 mtx_unlock(&sync_mtx); 3816 if (vp != NULL) 3817 vrele(vp); 3818} 3819 3820/* 3821 * Do a lazy sync of the filesystem. 3822 */ 3823static int 3824sync_fsync(struct vop_fsync_args *ap) 3825{ 3826 struct vnode *syncvp = ap->a_vp; 3827 struct mount *mp = syncvp->v_mount; 3828 int error, save; 3829 struct bufobj *bo; 3830 3831 /* 3832 * We only need to do something if this is a lazy evaluation. 3833 */ 3834 if (ap->a_waitfor != MNT_LAZY) 3835 return (0); 3836 3837 /* 3838 * Move ourselves to the back of the sync list. 3839 */ 3840 bo = &syncvp->v_bufobj; 3841 BO_LOCK(bo); 3842 vn_syncer_add_to_worklist(bo, syncdelay); 3843 BO_UNLOCK(bo); 3844 3845 /* 3846 * Walk the list of vnodes pushing all that are dirty and 3847 * not already on the sync list. 3848 */ 3849 if (vfs_busy(mp, MBF_NOWAIT) != 0) 3850 return (0); 3851 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3852 vfs_unbusy(mp); 3853 return (0); 3854 } 3855 save = curthread_pflags_set(TDP_SYNCIO); 3856 vfs_msync(mp, MNT_NOWAIT); 3857 error = VFS_SYNC(mp, MNT_LAZY); 3858 curthread_pflags_restore(save); 3859 vn_finished_write(mp); 3860 vfs_unbusy(mp); 3861 return (error); 3862} 3863 3864/* 3865 * The syncer vnode is no referenced. 3866 */ 3867static int 3868sync_inactive(struct vop_inactive_args *ap) 3869{ 3870 3871 vgone(ap->a_vp); 3872 return (0); 3873} 3874 3875/* 3876 * The syncer vnode is no longer needed and is being decommissioned. 3877 * 3878 * Modifications to the worklist must be protected by sync_mtx. 3879 */ 3880static int 3881sync_reclaim(struct vop_reclaim_args *ap) 3882{ 3883 struct vnode *vp = ap->a_vp; 3884 struct bufobj *bo; 3885 3886 bo = &vp->v_bufobj; 3887 BO_LOCK(bo); 3888 mtx_lock(&sync_mtx); 3889 if (vp->v_mount->mnt_syncer == vp) 3890 vp->v_mount->mnt_syncer = NULL; 3891 if (bo->bo_flag & BO_ONWORKLST) { 3892 LIST_REMOVE(bo, bo_synclist); 3893 syncer_worklist_len--; 3894 sync_vnode_count--; 3895 bo->bo_flag &= ~BO_ONWORKLST; 3896 } 3897 mtx_unlock(&sync_mtx); 3898 BO_UNLOCK(bo); 3899 3900 return (0); 3901} 3902 3903/* 3904 * Check if vnode represents a disk device 3905 */ 3906int 3907vn_isdisk(struct vnode *vp, int *errp) 3908{ 3909 int error; 3910 3911 if (vp->v_type != VCHR) { 3912 error = ENOTBLK; 3913 goto out; 3914 } 3915 error = 0; 3916 dev_lock(); 3917 if (vp->v_rdev == NULL) 3918 error = ENXIO; 3919 else if (vp->v_rdev->si_devsw == NULL) 3920 error = ENXIO; 3921 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 3922 error = ENOTBLK; 3923 dev_unlock(); 3924out: 3925 if (errp != NULL) 3926 *errp = error; 3927 return (error == 0); 3928} 3929 3930/* 3931 * Common filesystem object access control check routine. Accepts a 3932 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3933 * and optional call-by-reference privused argument allowing vaccess() 3934 * to indicate to the caller whether privilege was used to satisfy the 3935 * request (obsoleted). Returns 0 on success, or an errno on failure. 3936 */ 3937int 3938vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 3939 accmode_t accmode, struct ucred *cred, int *privused) 3940{ 3941 accmode_t dac_granted; 3942 accmode_t priv_granted; 3943 3944 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 3945 ("invalid bit in accmode")); 3946 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 3947 ("VAPPEND without VWRITE")); 3948 3949 /* 3950 * Look for a normal, non-privileged way to access the file/directory 3951 * as requested. If it exists, go with that. 3952 */ 3953 3954 if (privused != NULL) 3955 *privused = 0; 3956 3957 dac_granted = 0; 3958 3959 /* Check the owner. */ 3960 if (cred->cr_uid == file_uid) { 3961 dac_granted |= VADMIN; 3962 if (file_mode & S_IXUSR) 3963 dac_granted |= VEXEC; 3964 if (file_mode & S_IRUSR) 3965 dac_granted |= VREAD; 3966 if (file_mode & S_IWUSR) 3967 dac_granted |= (VWRITE | VAPPEND); 3968 3969 if ((accmode & dac_granted) == accmode) 3970 return (0); 3971 3972 goto privcheck; 3973 } 3974 3975 /* Otherwise, check the groups (first match) */ 3976 if (groupmember(file_gid, cred)) { 3977 if (file_mode & S_IXGRP) 3978 dac_granted |= VEXEC; 3979 if (file_mode & S_IRGRP) 3980 dac_granted |= VREAD; 3981 if (file_mode & S_IWGRP) 3982 dac_granted |= (VWRITE | VAPPEND); 3983 3984 if ((accmode & dac_granted) == accmode) 3985 return (0); 3986 3987 goto privcheck; 3988 } 3989 3990 /* Otherwise, check everyone else. */ 3991 if (file_mode & S_IXOTH) 3992 dac_granted |= VEXEC; 3993 if (file_mode & S_IROTH) 3994 dac_granted |= VREAD; 3995 if (file_mode & S_IWOTH) 3996 dac_granted |= (VWRITE | VAPPEND); 3997 if ((accmode & dac_granted) == accmode) 3998 return (0); 3999 4000privcheck: 4001 /* 4002 * Build a privilege mask to determine if the set of privileges 4003 * satisfies the requirements when combined with the granted mask 4004 * from above. For each privilege, if the privilege is required, 4005 * bitwise or the request type onto the priv_granted mask. 4006 */ 4007 priv_granted = 0; 4008 4009 if (type == VDIR) { 4010 /* 4011 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4012 * requests, instead of PRIV_VFS_EXEC. 4013 */ 4014 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4015 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 4016 priv_granted |= VEXEC; 4017 } else { 4018 /* 4019 * Ensure that at least one execute bit is on. Otherwise, 4020 * a privileged user will always succeed, and we don't want 4021 * this to happen unless the file really is executable. 4022 */ 4023 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4024 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4025 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 4026 priv_granted |= VEXEC; 4027 } 4028 4029 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4030 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 4031 priv_granted |= VREAD; 4032 4033 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4034 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 4035 priv_granted |= (VWRITE | VAPPEND); 4036 4037 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4038 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 4039 priv_granted |= VADMIN; 4040 4041 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4042 /* XXX audit: privilege used */ 4043 if (privused != NULL) 4044 *privused = 1; 4045 return (0); 4046 } 4047 4048 return ((accmode & VADMIN) ? EPERM : EACCES); 4049} 4050 4051/* 4052 * Credential check based on process requesting service, and per-attribute 4053 * permissions. 4054 */ 4055int 4056extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4057 struct thread *td, accmode_t accmode) 4058{ 4059 4060 /* 4061 * Kernel-invoked always succeeds. 4062 */ 4063 if (cred == NOCRED) 4064 return (0); 4065 4066 /* 4067 * Do not allow privileged processes in jail to directly manipulate 4068 * system attributes. 4069 */ 4070 switch (attrnamespace) { 4071 case EXTATTR_NAMESPACE_SYSTEM: 4072 /* Potentially should be: return (EPERM); */ 4073 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 4074 case EXTATTR_NAMESPACE_USER: 4075 return (VOP_ACCESS(vp, accmode, cred, td)); 4076 default: 4077 return (EPERM); 4078 } 4079} 4080 4081#ifdef DEBUG_VFS_LOCKS 4082/* 4083 * This only exists to supress warnings from unlocked specfs accesses. It is 4084 * no longer ok to have an unlocked VFS. 4085 */ 4086#define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4087 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4088 4089int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4090SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4091 "Drop into debugger on lock violation"); 4092 4093int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4094SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4095 0, "Check for interlock across VOPs"); 4096 4097int vfs_badlock_print = 1; /* Print lock violations. */ 4098SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4099 0, "Print lock violations"); 4100 4101#ifdef KDB 4102int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4103SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4104 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4105#endif 4106 4107static void 4108vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4109{ 4110 4111#ifdef KDB 4112 if (vfs_badlock_backtrace) 4113 kdb_backtrace(); 4114#endif 4115 if (vfs_badlock_print) 4116 printf("%s: %p %s\n", str, (void *)vp, msg); 4117 if (vfs_badlock_ddb) 4118 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4119} 4120 4121void 4122assert_vi_locked(struct vnode *vp, const char *str) 4123{ 4124 4125 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4126 vfs_badlock("interlock is not locked but should be", str, vp); 4127} 4128 4129void 4130assert_vi_unlocked(struct vnode *vp, const char *str) 4131{ 4132 4133 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4134 vfs_badlock("interlock is locked but should not be", str, vp); 4135} 4136 4137void 4138assert_vop_locked(struct vnode *vp, const char *str) 4139{ 4140 int locked; 4141 4142 if (!IGNORE_LOCK(vp)) { 4143 locked = VOP_ISLOCKED(vp); 4144 if (locked == 0 || locked == LK_EXCLOTHER) 4145 vfs_badlock("is not locked but should be", str, vp); 4146 } 4147} 4148 4149void 4150assert_vop_unlocked(struct vnode *vp, const char *str) 4151{ 4152 4153 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4154 vfs_badlock("is locked but should not be", str, vp); 4155} 4156 4157void 4158assert_vop_elocked(struct vnode *vp, const char *str) 4159{ 4160 4161 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4162 vfs_badlock("is not exclusive locked but should be", str, vp); 4163} 4164 4165#if 0 4166void 4167assert_vop_elocked_other(struct vnode *vp, const char *str) 4168{ 4169 4170 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER) 4171 vfs_badlock("is not exclusive locked by another thread", 4172 str, vp); 4173} 4174 4175void 4176assert_vop_slocked(struct vnode *vp, const char *str) 4177{ 4178 4179 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED) 4180 vfs_badlock("is not locked shared but should be", str, vp); 4181} 4182#endif /* 0 */ 4183#endif /* DEBUG_VFS_LOCKS */ 4184 4185void 4186vop_rename_fail(struct vop_rename_args *ap) 4187{ 4188 4189 if (ap->a_tvp != NULL) 4190 vput(ap->a_tvp); 4191 if (ap->a_tdvp == ap->a_tvp) 4192 vrele(ap->a_tdvp); 4193 else 4194 vput(ap->a_tdvp); 4195 vrele(ap->a_fdvp); 4196 vrele(ap->a_fvp); 4197} 4198 4199void 4200vop_rename_pre(void *ap) 4201{ 4202 struct vop_rename_args *a = ap; 4203 4204#ifdef DEBUG_VFS_LOCKS 4205 if (a->a_tvp) 4206 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4207 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4208 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4209 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4210 4211 /* Check the source (from). */ 4212 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4213 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4214 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4215 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4216 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4217 4218 /* Check the target. */ 4219 if (a->a_tvp) 4220 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4221 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4222#endif 4223 if (a->a_tdvp != a->a_fdvp) 4224 vhold(a->a_fdvp); 4225 if (a->a_tvp != a->a_fvp) 4226 vhold(a->a_fvp); 4227 vhold(a->a_tdvp); 4228 if (a->a_tvp) 4229 vhold(a->a_tvp); 4230} 4231 4232void 4233vop_strategy_pre(void *ap) 4234{ 4235#ifdef DEBUG_VFS_LOCKS 4236 struct vop_strategy_args *a; 4237 struct buf *bp; 4238 4239 a = ap; 4240 bp = a->a_bp; 4241 4242 /* 4243 * Cluster ops lock their component buffers but not the IO container. 4244 */ 4245 if ((bp->b_flags & B_CLUSTER) != 0) 4246 return; 4247 4248 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4249 if (vfs_badlock_print) 4250 printf( 4251 "VOP_STRATEGY: bp is not locked but should be\n"); 4252 if (vfs_badlock_ddb) 4253 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4254 } 4255#endif 4256} 4257 4258void 4259vop_lock_pre(void *ap) 4260{ 4261#ifdef DEBUG_VFS_LOCKS 4262 struct vop_lock1_args *a = ap; 4263 4264 if ((a->a_flags & LK_INTERLOCK) == 0) 4265 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4266 else 4267 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4268#endif 4269} 4270 4271void 4272vop_lock_post(void *ap, int rc) 4273{ 4274#ifdef DEBUG_VFS_LOCKS 4275 struct vop_lock1_args *a = ap; 4276 4277 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4278 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4279 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4280#endif 4281} 4282 4283void 4284vop_unlock_pre(void *ap) 4285{ 4286#ifdef DEBUG_VFS_LOCKS 4287 struct vop_unlock_args *a = ap; 4288 4289 if (a->a_flags & LK_INTERLOCK) 4290 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4291 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4292#endif 4293} 4294 4295void 4296vop_unlock_post(void *ap, int rc) 4297{ 4298#ifdef DEBUG_VFS_LOCKS 4299 struct vop_unlock_args *a = ap; 4300 4301 if (a->a_flags & LK_INTERLOCK) 4302 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4303#endif 4304} 4305 4306void 4307vop_create_post(void *ap, int rc) 4308{ 4309 struct vop_create_args *a = ap; 4310 4311 if (!rc) 4312 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4313} 4314 4315void 4316vop_deleteextattr_post(void *ap, int rc) 4317{ 4318 struct vop_deleteextattr_args *a = ap; 4319 4320 if (!rc) 4321 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4322} 4323 4324void 4325vop_link_post(void *ap, int rc) 4326{ 4327 struct vop_link_args *a = ap; 4328 4329 if (!rc) { 4330 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4331 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4332 } 4333} 4334 4335void 4336vop_mkdir_post(void *ap, int rc) 4337{ 4338 struct vop_mkdir_args *a = ap; 4339 4340 if (!rc) 4341 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4342} 4343 4344void 4345vop_mknod_post(void *ap, int rc) 4346{ 4347 struct vop_mknod_args *a = ap; 4348 4349 if (!rc) 4350 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4351} 4352 4353void 4354vop_remove_post(void *ap, int rc) 4355{ 4356 struct vop_remove_args *a = ap; 4357 4358 if (!rc) { 4359 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4360 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4361 } 4362} 4363 4364void 4365vop_rename_post(void *ap, int rc) 4366{ 4367 struct vop_rename_args *a = ap; 4368 4369 if (!rc) { 4370 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE); 4371 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE); 4372 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4373 if (a->a_tvp) 4374 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4375 } 4376 if (a->a_tdvp != a->a_fdvp) 4377 vdrop(a->a_fdvp); 4378 if (a->a_tvp != a->a_fvp) 4379 vdrop(a->a_fvp); 4380 vdrop(a->a_tdvp); 4381 if (a->a_tvp) 4382 vdrop(a->a_tvp); 4383} 4384 4385void 4386vop_rmdir_post(void *ap, int rc) 4387{ 4388 struct vop_rmdir_args *a = ap; 4389 4390 if (!rc) { 4391 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4392 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4393 } 4394} 4395 4396void 4397vop_setattr_post(void *ap, int rc) 4398{ 4399 struct vop_setattr_args *a = ap; 4400 4401 if (!rc) 4402 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4403} 4404 4405void 4406vop_setextattr_post(void *ap, int rc) 4407{ 4408 struct vop_setextattr_args *a = ap; 4409 4410 if (!rc) 4411 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4412} 4413 4414void 4415vop_symlink_post(void *ap, int rc) 4416{ 4417 struct vop_symlink_args *a = ap; 4418 4419 if (!rc) 4420 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4421} 4422 4423static struct knlist fs_knlist; 4424 4425static void 4426vfs_event_init(void *arg) 4427{ 4428 knlist_init_mtx(&fs_knlist, NULL); 4429} 4430/* XXX - correct order? */ 4431SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4432 4433void 4434vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4435{ 4436 4437 KNOTE_UNLOCKED(&fs_knlist, event); 4438} 4439 4440static int filt_fsattach(struct knote *kn); 4441static void filt_fsdetach(struct knote *kn); 4442static int filt_fsevent(struct knote *kn, long hint); 4443 4444struct filterops fs_filtops = { 4445 .f_isfd = 0, 4446 .f_attach = filt_fsattach, 4447 .f_detach = filt_fsdetach, 4448 .f_event = filt_fsevent 4449}; 4450 4451static int 4452filt_fsattach(struct knote *kn) 4453{ 4454 4455 kn->kn_flags |= EV_CLEAR; 4456 knlist_add(&fs_knlist, kn, 0); 4457 return (0); 4458} 4459 4460static void 4461filt_fsdetach(struct knote *kn) 4462{ 4463 4464 knlist_remove(&fs_knlist, kn, 0); 4465} 4466 4467static int 4468filt_fsevent(struct knote *kn, long hint) 4469{ 4470 4471 kn->kn_fflags |= hint; 4472 return (kn->kn_fflags != 0); 4473} 4474 4475static int 4476sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4477{ 4478 struct vfsidctl vc; 4479 int error; 4480 struct mount *mp; 4481 4482 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4483 if (error) 4484 return (error); 4485 if (vc.vc_vers != VFS_CTL_VERS1) 4486 return (EINVAL); 4487 mp = vfs_getvfs(&vc.vc_fsid); 4488 if (mp == NULL) 4489 return (ENOENT); 4490 /* ensure that a specific sysctl goes to the right filesystem. */ 4491 if (strcmp(vc.vc_fstypename, "*") != 0 && 4492 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4493 vfs_rel(mp); 4494 return (EINVAL); 4495 } 4496 VCTLTOREQ(&vc, req); 4497 error = VFS_SYSCTL(mp, vc.vc_op, req); 4498 vfs_rel(mp); 4499 return (error); 4500} 4501 4502SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 4503 NULL, 0, sysctl_vfs_ctl, "", 4504 "Sysctl by fsid"); 4505 4506/* 4507 * Function to initialize a va_filerev field sensibly. 4508 * XXX: Wouldn't a random number make a lot more sense ?? 4509 */ 4510u_quad_t 4511init_va_filerev(void) 4512{ 4513 struct bintime bt; 4514 4515 getbinuptime(&bt); 4516 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4517} 4518 4519static int filt_vfsread(struct knote *kn, long hint); 4520static int filt_vfswrite(struct knote *kn, long hint); 4521static int filt_vfsvnode(struct knote *kn, long hint); 4522static void filt_vfsdetach(struct knote *kn); 4523static struct filterops vfsread_filtops = { 4524 .f_isfd = 1, 4525 .f_detach = filt_vfsdetach, 4526 .f_event = filt_vfsread 4527}; 4528static struct filterops vfswrite_filtops = { 4529 .f_isfd = 1, 4530 .f_detach = filt_vfsdetach, 4531 .f_event = filt_vfswrite 4532}; 4533static struct filterops vfsvnode_filtops = { 4534 .f_isfd = 1, 4535 .f_detach = filt_vfsdetach, 4536 .f_event = filt_vfsvnode 4537}; 4538 4539static void 4540vfs_knllock(void *arg) 4541{ 4542 struct vnode *vp = arg; 4543 4544 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4545} 4546 4547static void 4548vfs_knlunlock(void *arg) 4549{ 4550 struct vnode *vp = arg; 4551 4552 VOP_UNLOCK(vp, 0); 4553} 4554 4555static void 4556vfs_knl_assert_locked(void *arg) 4557{ 4558#ifdef DEBUG_VFS_LOCKS 4559 struct vnode *vp = arg; 4560 4561 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 4562#endif 4563} 4564 4565static void 4566vfs_knl_assert_unlocked(void *arg) 4567{ 4568#ifdef DEBUG_VFS_LOCKS 4569 struct vnode *vp = arg; 4570 4571 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 4572#endif 4573} 4574 4575int 4576vfs_kqfilter(struct vop_kqfilter_args *ap) 4577{ 4578 struct vnode *vp = ap->a_vp; 4579 struct knote *kn = ap->a_kn; 4580 struct knlist *knl; 4581 4582 switch (kn->kn_filter) { 4583 case EVFILT_READ: 4584 kn->kn_fop = &vfsread_filtops; 4585 break; 4586 case EVFILT_WRITE: 4587 kn->kn_fop = &vfswrite_filtops; 4588 break; 4589 case EVFILT_VNODE: 4590 kn->kn_fop = &vfsvnode_filtops; 4591 break; 4592 default: 4593 return (EINVAL); 4594 } 4595 4596 kn->kn_hook = (caddr_t)vp; 4597 4598 v_addpollinfo(vp); 4599 if (vp->v_pollinfo == NULL) 4600 return (ENOMEM); 4601 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 4602 vhold(vp); 4603 knlist_add(knl, kn, 0); 4604 4605 return (0); 4606} 4607 4608/* 4609 * Detach knote from vnode 4610 */ 4611static void 4612filt_vfsdetach(struct knote *kn) 4613{ 4614 struct vnode *vp = (struct vnode *)kn->kn_hook; 4615 4616 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 4617 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 4618 vdrop(vp); 4619} 4620 4621/*ARGSUSED*/ 4622static int 4623filt_vfsread(struct knote *kn, long hint) 4624{ 4625 struct vnode *vp = (struct vnode *)kn->kn_hook; 4626 struct vattr va; 4627 int res; 4628 4629 /* 4630 * filesystem is gone, so set the EOF flag and schedule 4631 * the knote for deletion. 4632 */ 4633 if (hint == NOTE_REVOKE) { 4634 VI_LOCK(vp); 4635 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4636 VI_UNLOCK(vp); 4637 return (1); 4638 } 4639 4640 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 4641 return (0); 4642 4643 VI_LOCK(vp); 4644 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 4645 res = (kn->kn_data != 0); 4646 VI_UNLOCK(vp); 4647 return (res); 4648} 4649 4650/*ARGSUSED*/ 4651static int 4652filt_vfswrite(struct knote *kn, long hint) 4653{ 4654 struct vnode *vp = (struct vnode *)kn->kn_hook; 4655 4656 VI_LOCK(vp); 4657 4658 /* 4659 * filesystem is gone, so set the EOF flag and schedule 4660 * the knote for deletion. 4661 */ 4662 if (hint == NOTE_REVOKE) 4663 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4664 4665 kn->kn_data = 0; 4666 VI_UNLOCK(vp); 4667 return (1); 4668} 4669 4670static int 4671filt_vfsvnode(struct knote *kn, long hint) 4672{ 4673 struct vnode *vp = (struct vnode *)kn->kn_hook; 4674 int res; 4675 4676 VI_LOCK(vp); 4677 if (kn->kn_sfflags & hint) 4678 kn->kn_fflags |= hint; 4679 if (hint == NOTE_REVOKE) { 4680 kn->kn_flags |= EV_EOF; 4681 VI_UNLOCK(vp); 4682 return (1); 4683 } 4684 res = (kn->kn_fflags != 0); 4685 VI_UNLOCK(vp); 4686 return (res); 4687} 4688 4689int 4690vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 4691{ 4692 int error; 4693 4694 if (dp->d_reclen > ap->a_uio->uio_resid) 4695 return (ENAMETOOLONG); 4696 error = uiomove(dp, dp->d_reclen, ap->a_uio); 4697 if (error) { 4698 if (ap->a_ncookies != NULL) { 4699 if (ap->a_cookies != NULL) 4700 free(ap->a_cookies, M_TEMP); 4701 ap->a_cookies = NULL; 4702 *ap->a_ncookies = 0; 4703 } 4704 return (error); 4705 } 4706 if (ap->a_ncookies == NULL) 4707 return (0); 4708 4709 KASSERT(ap->a_cookies, 4710 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 4711 4712 *ap->a_cookies = realloc(*ap->a_cookies, 4713 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 4714 (*ap->a_cookies)[*ap->a_ncookies] = off; 4715 return (0); 4716} 4717 4718/* 4719 * Mark for update the access time of the file if the filesystem 4720 * supports VOP_MARKATIME. This functionality is used by execve and 4721 * mmap, so we want to avoid the I/O implied by directly setting 4722 * va_atime for the sake of efficiency. 4723 */ 4724void 4725vfs_mark_atime(struct vnode *vp, struct ucred *cred) 4726{ 4727 struct mount *mp; 4728 4729 mp = vp->v_mount; 4730 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 4731 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 4732 (void)VOP_MARKATIME(vp); 4733} 4734 4735/* 4736 * The purpose of this routine is to remove granularity from accmode_t, 4737 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 4738 * VADMIN and VAPPEND. 4739 * 4740 * If it returns 0, the caller is supposed to continue with the usual 4741 * access checks using 'accmode' as modified by this routine. If it 4742 * returns nonzero value, the caller is supposed to return that value 4743 * as errno. 4744 * 4745 * Note that after this routine runs, accmode may be zero. 4746 */ 4747int 4748vfs_unixify_accmode(accmode_t *accmode) 4749{ 4750 /* 4751 * There is no way to specify explicit "deny" rule using 4752 * file mode or POSIX.1e ACLs. 4753 */ 4754 if (*accmode & VEXPLICIT_DENY) { 4755 *accmode = 0; 4756 return (0); 4757 } 4758 4759 /* 4760 * None of these can be translated into usual access bits. 4761 * Also, the common case for NFSv4 ACLs is to not contain 4762 * either of these bits. Caller should check for VWRITE 4763 * on the containing directory instead. 4764 */ 4765 if (*accmode & (VDELETE_CHILD | VDELETE)) 4766 return (EPERM); 4767 4768 if (*accmode & VADMIN_PERMS) { 4769 *accmode &= ~VADMIN_PERMS; 4770 *accmode |= VADMIN; 4771 } 4772 4773 /* 4774 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 4775 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 4776 */ 4777 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 4778 4779 return (0); 4780} 4781 4782/* 4783 * These are helper functions for filesystems to traverse all 4784 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 4785 * 4786 * This interface replaces MNT_VNODE_FOREACH. 4787 */ 4788 4789MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 4790 4791struct vnode * 4792__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 4793{ 4794 struct vnode *vp; 4795 4796 if (should_yield()) 4797 kern_yield(PRI_USER); 4798 MNT_ILOCK(mp); 4799 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4800 vp = TAILQ_NEXT(*mvp, v_nmntvnodes); 4801 while (vp != NULL && (vp->v_type == VMARKER || 4802 (vp->v_iflag & VI_DOOMED) != 0)) 4803 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4804 4805 /* Check if we are done */ 4806 if (vp == NULL) { 4807 __mnt_vnode_markerfree_all(mvp, mp); 4808 /* MNT_IUNLOCK(mp); -- done in above function */ 4809 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 4810 return (NULL); 4811 } 4812 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4813 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4814 VI_LOCK(vp); 4815 MNT_IUNLOCK(mp); 4816 return (vp); 4817} 4818 4819struct vnode * 4820__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 4821{ 4822 struct vnode *vp; 4823 4824 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4825 MNT_ILOCK(mp); 4826 MNT_REF(mp); 4827 (*mvp)->v_type = VMARKER; 4828 4829 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 4830 while (vp != NULL && (vp->v_type == VMARKER || 4831 (vp->v_iflag & VI_DOOMED) != 0)) 4832 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4833 4834 /* Check if we are done */ 4835 if (vp == NULL) { 4836 MNT_REL(mp); 4837 MNT_IUNLOCK(mp); 4838 free(*mvp, M_VNODE_MARKER); 4839 *mvp = NULL; 4840 return (NULL); 4841 } 4842 (*mvp)->v_mount = mp; 4843 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4844 VI_LOCK(vp); 4845 MNT_IUNLOCK(mp); 4846 return (vp); 4847} 4848 4849 4850void 4851__mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 4852{ 4853 4854 if (*mvp == NULL) { 4855 MNT_IUNLOCK(mp); 4856 return; 4857 } 4858 4859 mtx_assert(MNT_MTX(mp), MA_OWNED); 4860 4861 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4862 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4863 MNT_REL(mp); 4864 MNT_IUNLOCK(mp); 4865 free(*mvp, M_VNODE_MARKER); 4866 *mvp = NULL; 4867} 4868 4869/* 4870 * These are helper functions for filesystems to traverse their 4871 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 4872 */ 4873static void 4874mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4875{ 4876 4877 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4878 4879 MNT_ILOCK(mp); 4880 MNT_REL(mp); 4881 MNT_IUNLOCK(mp); 4882 free(*mvp, M_VNODE_MARKER); 4883 *mvp = NULL; 4884} 4885 4886static struct vnode * 4887mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4888{ 4889 struct vnode *vp, *nvp; 4890 4891 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 4892 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4893restart: 4894 vp = TAILQ_NEXT(*mvp, v_actfreelist); 4895 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4896 while (vp != NULL) { 4897 if (vp->v_type == VMARKER) { 4898 vp = TAILQ_NEXT(vp, v_actfreelist); 4899 continue; 4900 } 4901 if (!VI_TRYLOCK(vp)) { 4902 if (mp_ncpus == 1 || should_yield()) { 4903 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4904 mtx_unlock(&vnode_free_list_mtx); 4905 pause("vnacti", 1); 4906 mtx_lock(&vnode_free_list_mtx); 4907 goto restart; 4908 } 4909 continue; 4910 } 4911 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 4912 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 4913 ("alien vnode on the active list %p %p", vp, mp)); 4914 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 4915 break; 4916 nvp = TAILQ_NEXT(vp, v_actfreelist); 4917 VI_UNLOCK(vp); 4918 vp = nvp; 4919 } 4920 4921 /* Check if we are done */ 4922 if (vp == NULL) { 4923 mtx_unlock(&vnode_free_list_mtx); 4924 mnt_vnode_markerfree_active(mvp, mp); 4925 return (NULL); 4926 } 4927 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 4928 mtx_unlock(&vnode_free_list_mtx); 4929 ASSERT_VI_LOCKED(vp, "active iter"); 4930 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 4931 return (vp); 4932} 4933 4934struct vnode * 4935__mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4936{ 4937 4938 if (should_yield()) 4939 kern_yield(PRI_USER); 4940 mtx_lock(&vnode_free_list_mtx); 4941 return (mnt_vnode_next_active(mvp, mp)); 4942} 4943 4944struct vnode * 4945__mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 4946{ 4947 struct vnode *vp; 4948 4949 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4950 MNT_ILOCK(mp); 4951 MNT_REF(mp); 4952 MNT_IUNLOCK(mp); 4953 (*mvp)->v_type = VMARKER; 4954 (*mvp)->v_mount = mp; 4955 4956 mtx_lock(&vnode_free_list_mtx); 4957 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 4958 if (vp == NULL) { 4959 mtx_unlock(&vnode_free_list_mtx); 4960 mnt_vnode_markerfree_active(mvp, mp); 4961 return (NULL); 4962 } 4963 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4964 return (mnt_vnode_next_active(mvp, mp)); 4965} 4966 4967void 4968__mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4969{ 4970 4971 if (*mvp == NULL) 4972 return; 4973 4974 mtx_lock(&vnode_free_list_mtx); 4975 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4976 mtx_unlock(&vnode_free_list_mtx); 4977 mnt_vnode_markerfree_active(mvp, mp); 4978}
| 2944 BO_UNLOCK(&vp->v_bufobj); 2945 2946 /* 2947 * Reclaim the vnode. 2948 */ 2949 if (VOP_RECLAIM(vp, td)) 2950 panic("vgone: cannot reclaim"); 2951 if (mp != NULL) 2952 vn_finished_secondary_write(mp); 2953 VNASSERT(vp->v_object == NULL, vp, 2954 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 2955 /* 2956 * Clear the advisory locks and wake up waiting threads. 2957 */ 2958 (void)VOP_ADVLOCKPURGE(vp); 2959 vp->v_lockf = NULL; 2960 /* 2961 * Delete from old mount point vnode list. 2962 */ 2963 delmntque(vp); 2964 cache_purge(vp); 2965 /* 2966 * Done with purge, reset to the standard lock and invalidate 2967 * the vnode. 2968 */ 2969 VI_LOCK(vp); 2970 vp->v_vnlock = &vp->v_lock; 2971 vp->v_op = &dead_vnodeops; 2972 vp->v_tag = "none"; 2973 vp->v_type = VBAD; 2974} 2975 2976/* 2977 * Calculate the total number of references to a special device. 2978 */ 2979int 2980vcount(struct vnode *vp) 2981{ 2982 int count; 2983 2984 dev_lock(); 2985 count = vp->v_rdev->si_usecount; 2986 dev_unlock(); 2987 return (count); 2988} 2989 2990/* 2991 * Same as above, but using the struct cdev *as argument 2992 */ 2993int 2994count_dev(struct cdev *dev) 2995{ 2996 int count; 2997 2998 dev_lock(); 2999 count = dev->si_usecount; 3000 dev_unlock(); 3001 return(count); 3002} 3003 3004/* 3005 * Print out a description of a vnode. 3006 */ 3007static char *typename[] = 3008{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3009 "VMARKER"}; 3010 3011void 3012vn_printf(struct vnode *vp, const char *fmt, ...) 3013{ 3014 va_list ap; 3015 char buf[256], buf2[16]; 3016 u_long flags; 3017 3018 va_start(ap, fmt); 3019 vprintf(fmt, ap); 3020 va_end(ap); 3021 printf("%p: ", (void *)vp); 3022 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3023 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", 3024 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); 3025 buf[0] = '\0'; 3026 buf[1] = '\0'; 3027 if (vp->v_vflag & VV_ROOT) 3028 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3029 if (vp->v_vflag & VV_ISTTY) 3030 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3031 if (vp->v_vflag & VV_NOSYNC) 3032 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3033 if (vp->v_vflag & VV_ETERNALDEV) 3034 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3035 if (vp->v_vflag & VV_CACHEDLABEL) 3036 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3037 if (vp->v_vflag & VV_TEXT) 3038 strlcat(buf, "|VV_TEXT", sizeof(buf)); 3039 if (vp->v_vflag & VV_COPYONWRITE) 3040 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3041 if (vp->v_vflag & VV_SYSTEM) 3042 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3043 if (vp->v_vflag & VV_PROCDEP) 3044 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3045 if (vp->v_vflag & VV_NOKNOTE) 3046 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3047 if (vp->v_vflag & VV_DELETED) 3048 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3049 if (vp->v_vflag & VV_MD) 3050 strlcat(buf, "|VV_MD", sizeof(buf)); 3051 if (vp->v_vflag & VV_FORCEINSMQ) 3052 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3053 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3054 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3055 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3056 if (flags != 0) { 3057 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3058 strlcat(buf, buf2, sizeof(buf)); 3059 } 3060 if (vp->v_iflag & VI_MOUNT) 3061 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3062 if (vp->v_iflag & VI_DOOMED) 3063 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 3064 if (vp->v_iflag & VI_FREE) 3065 strlcat(buf, "|VI_FREE", sizeof(buf)); 3066 if (vp->v_iflag & VI_ACTIVE) 3067 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3068 if (vp->v_iflag & VI_DOINGINACT) 3069 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3070 if (vp->v_iflag & VI_OWEINACT) 3071 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3072 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | 3073 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 3074 if (flags != 0) { 3075 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3076 strlcat(buf, buf2, sizeof(buf)); 3077 } 3078 printf(" flags (%s)\n", buf + 1); 3079 if (mtx_owned(VI_MTX(vp))) 3080 printf(" VI_LOCKed"); 3081 if (vp->v_object != NULL) 3082 printf(" v_object %p ref %d pages %d " 3083 "cleanbuf %d dirtybuf %d\n", 3084 vp->v_object, vp->v_object->ref_count, 3085 vp->v_object->resident_page_count, 3086 vp->v_bufobj.bo_clean.bv_cnt, 3087 vp->v_bufobj.bo_dirty.bv_cnt); 3088 printf(" "); 3089 lockmgr_printinfo(vp->v_vnlock); 3090 if (vp->v_data != NULL) 3091 VOP_PRINT(vp); 3092} 3093 3094#ifdef DDB 3095/* 3096 * List all of the locked vnodes in the system. 3097 * Called when debugging the kernel. 3098 */ 3099DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3100{ 3101 struct mount *mp; 3102 struct vnode *vp; 3103 3104 /* 3105 * Note: because this is DDB, we can't obey the locking semantics 3106 * for these structures, which means we could catch an inconsistent 3107 * state and dereference a nasty pointer. Not much to be done 3108 * about that. 3109 */ 3110 db_printf("Locked vnodes\n"); 3111 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3112 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3113 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3114 vprint("", vp); 3115 } 3116 } 3117} 3118 3119/* 3120 * Show details about the given vnode. 3121 */ 3122DB_SHOW_COMMAND(vnode, db_show_vnode) 3123{ 3124 struct vnode *vp; 3125 3126 if (!have_addr) 3127 return; 3128 vp = (struct vnode *)addr; 3129 vn_printf(vp, "vnode "); 3130} 3131 3132/* 3133 * Show details about the given mount point. 3134 */ 3135DB_SHOW_COMMAND(mount, db_show_mount) 3136{ 3137 struct mount *mp; 3138 struct vfsopt *opt; 3139 struct statfs *sp; 3140 struct vnode *vp; 3141 char buf[512]; 3142 uint64_t mflags; 3143 u_int flags; 3144 3145 if (!have_addr) { 3146 /* No address given, print short info about all mount points. */ 3147 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3148 db_printf("%p %s on %s (%s)\n", mp, 3149 mp->mnt_stat.f_mntfromname, 3150 mp->mnt_stat.f_mntonname, 3151 mp->mnt_stat.f_fstypename); 3152 if (db_pager_quit) 3153 break; 3154 } 3155 db_printf("\nMore info: show mount <addr>\n"); 3156 return; 3157 } 3158 3159 mp = (struct mount *)addr; 3160 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3161 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3162 3163 buf[0] = '\0'; 3164 mflags = mp->mnt_flag; 3165#define MNT_FLAG(flag) do { \ 3166 if (mflags & (flag)) { \ 3167 if (buf[0] != '\0') \ 3168 strlcat(buf, ", ", sizeof(buf)); \ 3169 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3170 mflags &= ~(flag); \ 3171 } \ 3172} while (0) 3173 MNT_FLAG(MNT_RDONLY); 3174 MNT_FLAG(MNT_SYNCHRONOUS); 3175 MNT_FLAG(MNT_NOEXEC); 3176 MNT_FLAG(MNT_NOSUID); 3177 MNT_FLAG(MNT_NFS4ACLS); 3178 MNT_FLAG(MNT_UNION); 3179 MNT_FLAG(MNT_ASYNC); 3180 MNT_FLAG(MNT_SUIDDIR); 3181 MNT_FLAG(MNT_SOFTDEP); 3182 MNT_FLAG(MNT_NOSYMFOLLOW); 3183 MNT_FLAG(MNT_GJOURNAL); 3184 MNT_FLAG(MNT_MULTILABEL); 3185 MNT_FLAG(MNT_ACLS); 3186 MNT_FLAG(MNT_NOATIME); 3187 MNT_FLAG(MNT_NOCLUSTERR); 3188 MNT_FLAG(MNT_NOCLUSTERW); 3189 MNT_FLAG(MNT_SUJ); 3190 MNT_FLAG(MNT_EXRDONLY); 3191 MNT_FLAG(MNT_EXPORTED); 3192 MNT_FLAG(MNT_DEFEXPORTED); 3193 MNT_FLAG(MNT_EXPORTANON); 3194 MNT_FLAG(MNT_EXKERB); 3195 MNT_FLAG(MNT_EXPUBLIC); 3196 MNT_FLAG(MNT_LOCAL); 3197 MNT_FLAG(MNT_QUOTA); 3198 MNT_FLAG(MNT_ROOTFS); 3199 MNT_FLAG(MNT_USER); 3200 MNT_FLAG(MNT_IGNORE); 3201 MNT_FLAG(MNT_UPDATE); 3202 MNT_FLAG(MNT_DELEXPORT); 3203 MNT_FLAG(MNT_RELOAD); 3204 MNT_FLAG(MNT_FORCE); 3205 MNT_FLAG(MNT_SNAPSHOT); 3206 MNT_FLAG(MNT_BYFSID); 3207#undef MNT_FLAG 3208 if (mflags != 0) { 3209 if (buf[0] != '\0') 3210 strlcat(buf, ", ", sizeof(buf)); 3211 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3212 "0x%016jx", mflags); 3213 } 3214 db_printf(" mnt_flag = %s\n", buf); 3215 3216 buf[0] = '\0'; 3217 flags = mp->mnt_kern_flag; 3218#define MNT_KERN_FLAG(flag) do { \ 3219 if (flags & (flag)) { \ 3220 if (buf[0] != '\0') \ 3221 strlcat(buf, ", ", sizeof(buf)); \ 3222 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3223 flags &= ~(flag); \ 3224 } \ 3225} while (0) 3226 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3227 MNT_KERN_FLAG(MNTK_ASYNC); 3228 MNT_KERN_FLAG(MNTK_SOFTDEP); 3229 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 3230 MNT_KERN_FLAG(MNTK_DRAINING); 3231 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3232 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3233 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3234 MNT_KERN_FLAG(MNTK_NO_IOPF); 3235 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3236 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3237 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3238 MNT_KERN_FLAG(MNTK_MARKER); 3239 MNT_KERN_FLAG(MNTK_USES_BCACHE); 3240 MNT_KERN_FLAG(MNTK_NOASYNC); 3241 MNT_KERN_FLAG(MNTK_UNMOUNT); 3242 MNT_KERN_FLAG(MNTK_MWAIT); 3243 MNT_KERN_FLAG(MNTK_SUSPEND); 3244 MNT_KERN_FLAG(MNTK_SUSPEND2); 3245 MNT_KERN_FLAG(MNTK_SUSPENDED); 3246 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3247 MNT_KERN_FLAG(MNTK_NOKNOTE); 3248#undef MNT_KERN_FLAG 3249 if (flags != 0) { 3250 if (buf[0] != '\0') 3251 strlcat(buf, ", ", sizeof(buf)); 3252 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3253 "0x%08x", flags); 3254 } 3255 db_printf(" mnt_kern_flag = %s\n", buf); 3256 3257 db_printf(" mnt_opt = "); 3258 opt = TAILQ_FIRST(mp->mnt_opt); 3259 if (opt != NULL) { 3260 db_printf("%s", opt->name); 3261 opt = TAILQ_NEXT(opt, link); 3262 while (opt != NULL) { 3263 db_printf(", %s", opt->name); 3264 opt = TAILQ_NEXT(opt, link); 3265 } 3266 } 3267 db_printf("\n"); 3268 3269 sp = &mp->mnt_stat; 3270 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3271 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3272 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3273 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3274 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3275 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3276 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3277 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3278 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3279 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3280 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3281 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3282 3283 db_printf(" mnt_cred = { uid=%u ruid=%u", 3284 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3285 if (jailed(mp->mnt_cred)) 3286 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3287 db_printf(" }\n"); 3288 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3289 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3290 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3291 db_printf(" mnt_activevnodelistsize = %d\n", 3292 mp->mnt_activevnodelistsize); 3293 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3294 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3295 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3296 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3297 db_printf(" mnt_lockref = %d\n", mp->mnt_lockref); 3298 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3299 db_printf(" mnt_secondary_accwrites = %d\n", 3300 mp->mnt_secondary_accwrites); 3301 db_printf(" mnt_gjprovider = %s\n", 3302 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3303 3304 db_printf("\n\nList of active vnodes\n"); 3305 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3306 if (vp->v_type != VMARKER) { 3307 vn_printf(vp, "vnode "); 3308 if (db_pager_quit) 3309 break; 3310 } 3311 } 3312 db_printf("\n\nList of inactive vnodes\n"); 3313 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3314 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3315 vn_printf(vp, "vnode "); 3316 if (db_pager_quit) 3317 break; 3318 } 3319 } 3320} 3321#endif /* DDB */ 3322 3323/* 3324 * Fill in a struct xvfsconf based on a struct vfsconf. 3325 */ 3326static int 3327vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3328{ 3329 struct xvfsconf xvfsp; 3330 3331 bzero(&xvfsp, sizeof(xvfsp)); 3332 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3333 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3334 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3335 xvfsp.vfc_flags = vfsp->vfc_flags; 3336 /* 3337 * These are unused in userland, we keep them 3338 * to not break binary compatibility. 3339 */ 3340 xvfsp.vfc_vfsops = NULL; 3341 xvfsp.vfc_next = NULL; 3342 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3343} 3344 3345#ifdef COMPAT_FREEBSD32 3346struct xvfsconf32 { 3347 uint32_t vfc_vfsops; 3348 char vfc_name[MFSNAMELEN]; 3349 int32_t vfc_typenum; 3350 int32_t vfc_refcount; 3351 int32_t vfc_flags; 3352 uint32_t vfc_next; 3353}; 3354 3355static int 3356vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3357{ 3358 struct xvfsconf32 xvfsp; 3359 3360 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3361 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3362 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3363 xvfsp.vfc_flags = vfsp->vfc_flags; 3364 xvfsp.vfc_vfsops = 0; 3365 xvfsp.vfc_next = 0; 3366 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3367} 3368#endif 3369 3370/* 3371 * Top level filesystem related information gathering. 3372 */ 3373static int 3374sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3375{ 3376 struct vfsconf *vfsp; 3377 int error; 3378 3379 error = 0; 3380 vfsconf_slock(); 3381 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3382#ifdef COMPAT_FREEBSD32 3383 if (req->flags & SCTL_MASK32) 3384 error = vfsconf2x32(req, vfsp); 3385 else 3386#endif 3387 error = vfsconf2x(req, vfsp); 3388 if (error) 3389 break; 3390 } 3391 vfsconf_sunlock(); 3392 return (error); 3393} 3394 3395SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3396 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 3397 "S,xvfsconf", "List of all configured filesystems"); 3398 3399#ifndef BURN_BRIDGES 3400static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3401 3402static int 3403vfs_sysctl(SYSCTL_HANDLER_ARGS) 3404{ 3405 int *name = (int *)arg1 - 1; /* XXX */ 3406 u_int namelen = arg2 + 1; /* XXX */ 3407 struct vfsconf *vfsp; 3408 3409 log(LOG_WARNING, "userland calling deprecated sysctl, " 3410 "please rebuild world\n"); 3411 3412#if 1 || defined(COMPAT_PRELITE2) 3413 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3414 if (namelen == 1) 3415 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3416#endif 3417 3418 switch (name[1]) { 3419 case VFS_MAXTYPENUM: 3420 if (namelen != 2) 3421 return (ENOTDIR); 3422 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3423 case VFS_CONF: 3424 if (namelen != 3) 3425 return (ENOTDIR); /* overloaded */ 3426 vfsconf_slock(); 3427 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3428 if (vfsp->vfc_typenum == name[2]) 3429 break; 3430 } 3431 vfsconf_sunlock(); 3432 if (vfsp == NULL) 3433 return (EOPNOTSUPP); 3434#ifdef COMPAT_FREEBSD32 3435 if (req->flags & SCTL_MASK32) 3436 return (vfsconf2x32(req, vfsp)); 3437 else 3438#endif 3439 return (vfsconf2x(req, vfsp)); 3440 } 3441 return (EOPNOTSUPP); 3442} 3443 3444static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 3445 CTLFLAG_MPSAFE, vfs_sysctl, 3446 "Generic filesystem"); 3447 3448#if 1 || defined(COMPAT_PRELITE2) 3449 3450static int 3451sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3452{ 3453 int error; 3454 struct vfsconf *vfsp; 3455 struct ovfsconf ovfs; 3456 3457 vfsconf_slock(); 3458 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3459 bzero(&ovfs, sizeof(ovfs)); 3460 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3461 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3462 ovfs.vfc_index = vfsp->vfc_typenum; 3463 ovfs.vfc_refcount = vfsp->vfc_refcount; 3464 ovfs.vfc_flags = vfsp->vfc_flags; 3465 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3466 if (error != 0) { 3467 vfsconf_sunlock(); 3468 return (error); 3469 } 3470 } 3471 vfsconf_sunlock(); 3472 return (0); 3473} 3474 3475#endif /* 1 || COMPAT_PRELITE2 */ 3476#endif /* !BURN_BRIDGES */ 3477 3478#define KINFO_VNODESLOP 10 3479#ifdef notyet 3480/* 3481 * Dump vnode list (via sysctl). 3482 */ 3483/* ARGSUSED */ 3484static int 3485sysctl_vnode(SYSCTL_HANDLER_ARGS) 3486{ 3487 struct xvnode *xvn; 3488 struct mount *mp; 3489 struct vnode *vp; 3490 int error, len, n; 3491 3492 /* 3493 * Stale numvnodes access is not fatal here. 3494 */ 3495 req->lock = 0; 3496 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3497 if (!req->oldptr) 3498 /* Make an estimate */ 3499 return (SYSCTL_OUT(req, 0, len)); 3500 3501 error = sysctl_wire_old_buffer(req, 0); 3502 if (error != 0) 3503 return (error); 3504 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3505 n = 0; 3506 mtx_lock(&mountlist_mtx); 3507 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3508 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3509 continue; 3510 MNT_ILOCK(mp); 3511 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3512 if (n == len) 3513 break; 3514 vref(vp); 3515 xvn[n].xv_size = sizeof *xvn; 3516 xvn[n].xv_vnode = vp; 3517 xvn[n].xv_id = 0; /* XXX compat */ 3518#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3519 XV_COPY(usecount); 3520 XV_COPY(writecount); 3521 XV_COPY(holdcnt); 3522 XV_COPY(mount); 3523 XV_COPY(numoutput); 3524 XV_COPY(type); 3525#undef XV_COPY 3526 xvn[n].xv_flag = vp->v_vflag; 3527 3528 switch (vp->v_type) { 3529 case VREG: 3530 case VDIR: 3531 case VLNK: 3532 break; 3533 case VBLK: 3534 case VCHR: 3535 if (vp->v_rdev == NULL) { 3536 vrele(vp); 3537 continue; 3538 } 3539 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3540 break; 3541 case VSOCK: 3542 xvn[n].xv_socket = vp->v_socket; 3543 break; 3544 case VFIFO: 3545 xvn[n].xv_fifo = vp->v_fifoinfo; 3546 break; 3547 case VNON: 3548 case VBAD: 3549 default: 3550 /* shouldn't happen? */ 3551 vrele(vp); 3552 continue; 3553 } 3554 vrele(vp); 3555 ++n; 3556 } 3557 MNT_IUNLOCK(mp); 3558 mtx_lock(&mountlist_mtx); 3559 vfs_unbusy(mp); 3560 if (n == len) 3561 break; 3562 } 3563 mtx_unlock(&mountlist_mtx); 3564 3565 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3566 free(xvn, M_TEMP); 3567 return (error); 3568} 3569 3570SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 3571 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 3572 ""); 3573#endif 3574 3575static void 3576unmount_or_warn(struct mount *mp) 3577{ 3578 int error; 3579 3580 error = dounmount(mp, MNT_FORCE, curthread); 3581 if (error != 0 && strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) { 3582 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 3583 if (error == EBUSY) 3584 printf("BUSY)\n"); 3585 else 3586 printf("%d)\n", error); 3587 } 3588} 3589 3590/* 3591 * Unmount all filesystems. The list is traversed in reverse order 3592 * of mounting to avoid dependencies. 3593 */ 3594void 3595vfs_unmountall(void) 3596{ 3597 struct mount *mp, *tmp; 3598 3599 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 3600 3601 /* 3602 * Since this only runs when rebooting, it is not interlocked. 3603 */ 3604 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 3605 vfs_ref(mp); 3606 3607 /* 3608 * Forcibly unmounting "/dev" before "/" would prevent clean 3609 * unmount of the latter. 3610 */ 3611 if (mp == rootdevmp) 3612 continue; 3613 3614 unmount_or_warn(mp); 3615 } 3616 3617 if (rootdevmp != NULL) 3618 unmount_or_warn(rootdevmp); 3619} 3620 3621/* 3622 * perform msync on all vnodes under a mount point 3623 * the mount point must be locked. 3624 */ 3625void 3626vfs_msync(struct mount *mp, int flags) 3627{ 3628 struct vnode *vp, *mvp; 3629 struct vm_object *obj; 3630 3631 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 3632 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 3633 obj = vp->v_object; 3634 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 3635 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 3636 if (!vget(vp, 3637 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 3638 curthread)) { 3639 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 3640 vput(vp); 3641 continue; 3642 } 3643 3644 obj = vp->v_object; 3645 if (obj != NULL) { 3646 VM_OBJECT_WLOCK(obj); 3647 vm_object_page_clean(obj, 0, 0, 3648 flags == MNT_WAIT ? 3649 OBJPC_SYNC : OBJPC_NOSYNC); 3650 VM_OBJECT_WUNLOCK(obj); 3651 } 3652 vput(vp); 3653 } 3654 } else 3655 VI_UNLOCK(vp); 3656 } 3657} 3658 3659static void 3660destroy_vpollinfo_free(struct vpollinfo *vi) 3661{ 3662 3663 knlist_destroy(&vi->vpi_selinfo.si_note); 3664 mtx_destroy(&vi->vpi_lock); 3665 uma_zfree(vnodepoll_zone, vi); 3666} 3667 3668static void 3669destroy_vpollinfo(struct vpollinfo *vi) 3670{ 3671 3672 knlist_clear(&vi->vpi_selinfo.si_note, 1); 3673 seldrain(&vi->vpi_selinfo); 3674 destroy_vpollinfo_free(vi); 3675} 3676 3677/* 3678 * Initalize per-vnode helper structure to hold poll-related state. 3679 */ 3680void 3681v_addpollinfo(struct vnode *vp) 3682{ 3683 struct vpollinfo *vi; 3684 3685 if (vp->v_pollinfo != NULL) 3686 return; 3687 vi = uma_zalloc(vnodepoll_zone, M_WAITOK); 3688 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 3689 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 3690 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 3691 VI_LOCK(vp); 3692 if (vp->v_pollinfo != NULL) { 3693 VI_UNLOCK(vp); 3694 destroy_vpollinfo_free(vi); 3695 return; 3696 } 3697 vp->v_pollinfo = vi; 3698 VI_UNLOCK(vp); 3699} 3700 3701/* 3702 * Record a process's interest in events which might happen to 3703 * a vnode. Because poll uses the historic select-style interface 3704 * internally, this routine serves as both the ``check for any 3705 * pending events'' and the ``record my interest in future events'' 3706 * functions. (These are done together, while the lock is held, 3707 * to avoid race conditions.) 3708 */ 3709int 3710vn_pollrecord(struct vnode *vp, struct thread *td, int events) 3711{ 3712 3713 v_addpollinfo(vp); 3714 mtx_lock(&vp->v_pollinfo->vpi_lock); 3715 if (vp->v_pollinfo->vpi_revents & events) { 3716 /* 3717 * This leaves events we are not interested 3718 * in available for the other process which 3719 * which presumably had requested them 3720 * (otherwise they would never have been 3721 * recorded). 3722 */ 3723 events &= vp->v_pollinfo->vpi_revents; 3724 vp->v_pollinfo->vpi_revents &= ~events; 3725 3726 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3727 return (events); 3728 } 3729 vp->v_pollinfo->vpi_events |= events; 3730 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 3731 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3732 return (0); 3733} 3734 3735/* 3736 * Routine to create and manage a filesystem syncer vnode. 3737 */ 3738#define sync_close ((int (*)(struct vop_close_args *))nullop) 3739static int sync_fsync(struct vop_fsync_args *); 3740static int sync_inactive(struct vop_inactive_args *); 3741static int sync_reclaim(struct vop_reclaim_args *); 3742 3743static struct vop_vector sync_vnodeops = { 3744 .vop_bypass = VOP_EOPNOTSUPP, 3745 .vop_close = sync_close, /* close */ 3746 .vop_fsync = sync_fsync, /* fsync */ 3747 .vop_inactive = sync_inactive, /* inactive */ 3748 .vop_reclaim = sync_reclaim, /* reclaim */ 3749 .vop_lock1 = vop_stdlock, /* lock */ 3750 .vop_unlock = vop_stdunlock, /* unlock */ 3751 .vop_islocked = vop_stdislocked, /* islocked */ 3752}; 3753 3754/* 3755 * Create a new filesystem syncer vnode for the specified mount point. 3756 */ 3757void 3758vfs_allocate_syncvnode(struct mount *mp) 3759{ 3760 struct vnode *vp; 3761 struct bufobj *bo; 3762 static long start, incr, next; 3763 int error; 3764 3765 /* Allocate a new vnode */ 3766 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 3767 if (error != 0) 3768 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 3769 vp->v_type = VNON; 3770 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3771 vp->v_vflag |= VV_FORCEINSMQ; 3772 error = insmntque(vp, mp); 3773 if (error != 0) 3774 panic("vfs_allocate_syncvnode: insmntque() failed"); 3775 vp->v_vflag &= ~VV_FORCEINSMQ; 3776 VOP_UNLOCK(vp, 0); 3777 /* 3778 * Place the vnode onto the syncer worklist. We attempt to 3779 * scatter them about on the list so that they will go off 3780 * at evenly distributed times even if all the filesystems 3781 * are mounted at once. 3782 */ 3783 next += incr; 3784 if (next == 0 || next > syncer_maxdelay) { 3785 start /= 2; 3786 incr /= 2; 3787 if (start == 0) { 3788 start = syncer_maxdelay / 2; 3789 incr = syncer_maxdelay; 3790 } 3791 next = start; 3792 } 3793 bo = &vp->v_bufobj; 3794 BO_LOCK(bo); 3795 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 3796 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 3797 mtx_lock(&sync_mtx); 3798 sync_vnode_count++; 3799 if (mp->mnt_syncer == NULL) { 3800 mp->mnt_syncer = vp; 3801 vp = NULL; 3802 } 3803 mtx_unlock(&sync_mtx); 3804 BO_UNLOCK(bo); 3805 if (vp != NULL) { 3806 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3807 vgone(vp); 3808 vput(vp); 3809 } 3810} 3811 3812void 3813vfs_deallocate_syncvnode(struct mount *mp) 3814{ 3815 struct vnode *vp; 3816 3817 mtx_lock(&sync_mtx); 3818 vp = mp->mnt_syncer; 3819 if (vp != NULL) 3820 mp->mnt_syncer = NULL; 3821 mtx_unlock(&sync_mtx); 3822 if (vp != NULL) 3823 vrele(vp); 3824} 3825 3826/* 3827 * Do a lazy sync of the filesystem. 3828 */ 3829static int 3830sync_fsync(struct vop_fsync_args *ap) 3831{ 3832 struct vnode *syncvp = ap->a_vp; 3833 struct mount *mp = syncvp->v_mount; 3834 int error, save; 3835 struct bufobj *bo; 3836 3837 /* 3838 * We only need to do something if this is a lazy evaluation. 3839 */ 3840 if (ap->a_waitfor != MNT_LAZY) 3841 return (0); 3842 3843 /* 3844 * Move ourselves to the back of the sync list. 3845 */ 3846 bo = &syncvp->v_bufobj; 3847 BO_LOCK(bo); 3848 vn_syncer_add_to_worklist(bo, syncdelay); 3849 BO_UNLOCK(bo); 3850 3851 /* 3852 * Walk the list of vnodes pushing all that are dirty and 3853 * not already on the sync list. 3854 */ 3855 if (vfs_busy(mp, MBF_NOWAIT) != 0) 3856 return (0); 3857 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3858 vfs_unbusy(mp); 3859 return (0); 3860 } 3861 save = curthread_pflags_set(TDP_SYNCIO); 3862 vfs_msync(mp, MNT_NOWAIT); 3863 error = VFS_SYNC(mp, MNT_LAZY); 3864 curthread_pflags_restore(save); 3865 vn_finished_write(mp); 3866 vfs_unbusy(mp); 3867 return (error); 3868} 3869 3870/* 3871 * The syncer vnode is no referenced. 3872 */ 3873static int 3874sync_inactive(struct vop_inactive_args *ap) 3875{ 3876 3877 vgone(ap->a_vp); 3878 return (0); 3879} 3880 3881/* 3882 * The syncer vnode is no longer needed and is being decommissioned. 3883 * 3884 * Modifications to the worklist must be protected by sync_mtx. 3885 */ 3886static int 3887sync_reclaim(struct vop_reclaim_args *ap) 3888{ 3889 struct vnode *vp = ap->a_vp; 3890 struct bufobj *bo; 3891 3892 bo = &vp->v_bufobj; 3893 BO_LOCK(bo); 3894 mtx_lock(&sync_mtx); 3895 if (vp->v_mount->mnt_syncer == vp) 3896 vp->v_mount->mnt_syncer = NULL; 3897 if (bo->bo_flag & BO_ONWORKLST) { 3898 LIST_REMOVE(bo, bo_synclist); 3899 syncer_worklist_len--; 3900 sync_vnode_count--; 3901 bo->bo_flag &= ~BO_ONWORKLST; 3902 } 3903 mtx_unlock(&sync_mtx); 3904 BO_UNLOCK(bo); 3905 3906 return (0); 3907} 3908 3909/* 3910 * Check if vnode represents a disk device 3911 */ 3912int 3913vn_isdisk(struct vnode *vp, int *errp) 3914{ 3915 int error; 3916 3917 if (vp->v_type != VCHR) { 3918 error = ENOTBLK; 3919 goto out; 3920 } 3921 error = 0; 3922 dev_lock(); 3923 if (vp->v_rdev == NULL) 3924 error = ENXIO; 3925 else if (vp->v_rdev->si_devsw == NULL) 3926 error = ENXIO; 3927 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 3928 error = ENOTBLK; 3929 dev_unlock(); 3930out: 3931 if (errp != NULL) 3932 *errp = error; 3933 return (error == 0); 3934} 3935 3936/* 3937 * Common filesystem object access control check routine. Accepts a 3938 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3939 * and optional call-by-reference privused argument allowing vaccess() 3940 * to indicate to the caller whether privilege was used to satisfy the 3941 * request (obsoleted). Returns 0 on success, or an errno on failure. 3942 */ 3943int 3944vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 3945 accmode_t accmode, struct ucred *cred, int *privused) 3946{ 3947 accmode_t dac_granted; 3948 accmode_t priv_granted; 3949 3950 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 3951 ("invalid bit in accmode")); 3952 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 3953 ("VAPPEND without VWRITE")); 3954 3955 /* 3956 * Look for a normal, non-privileged way to access the file/directory 3957 * as requested. If it exists, go with that. 3958 */ 3959 3960 if (privused != NULL) 3961 *privused = 0; 3962 3963 dac_granted = 0; 3964 3965 /* Check the owner. */ 3966 if (cred->cr_uid == file_uid) { 3967 dac_granted |= VADMIN; 3968 if (file_mode & S_IXUSR) 3969 dac_granted |= VEXEC; 3970 if (file_mode & S_IRUSR) 3971 dac_granted |= VREAD; 3972 if (file_mode & S_IWUSR) 3973 dac_granted |= (VWRITE | VAPPEND); 3974 3975 if ((accmode & dac_granted) == accmode) 3976 return (0); 3977 3978 goto privcheck; 3979 } 3980 3981 /* Otherwise, check the groups (first match) */ 3982 if (groupmember(file_gid, cred)) { 3983 if (file_mode & S_IXGRP) 3984 dac_granted |= VEXEC; 3985 if (file_mode & S_IRGRP) 3986 dac_granted |= VREAD; 3987 if (file_mode & S_IWGRP) 3988 dac_granted |= (VWRITE | VAPPEND); 3989 3990 if ((accmode & dac_granted) == accmode) 3991 return (0); 3992 3993 goto privcheck; 3994 } 3995 3996 /* Otherwise, check everyone else. */ 3997 if (file_mode & S_IXOTH) 3998 dac_granted |= VEXEC; 3999 if (file_mode & S_IROTH) 4000 dac_granted |= VREAD; 4001 if (file_mode & S_IWOTH) 4002 dac_granted |= (VWRITE | VAPPEND); 4003 if ((accmode & dac_granted) == accmode) 4004 return (0); 4005 4006privcheck: 4007 /* 4008 * Build a privilege mask to determine if the set of privileges 4009 * satisfies the requirements when combined with the granted mask 4010 * from above. For each privilege, if the privilege is required, 4011 * bitwise or the request type onto the priv_granted mask. 4012 */ 4013 priv_granted = 0; 4014 4015 if (type == VDIR) { 4016 /* 4017 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4018 * requests, instead of PRIV_VFS_EXEC. 4019 */ 4020 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4021 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 4022 priv_granted |= VEXEC; 4023 } else { 4024 /* 4025 * Ensure that at least one execute bit is on. Otherwise, 4026 * a privileged user will always succeed, and we don't want 4027 * this to happen unless the file really is executable. 4028 */ 4029 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4030 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4031 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 4032 priv_granted |= VEXEC; 4033 } 4034 4035 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4036 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 4037 priv_granted |= VREAD; 4038 4039 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4040 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 4041 priv_granted |= (VWRITE | VAPPEND); 4042 4043 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4044 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 4045 priv_granted |= VADMIN; 4046 4047 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4048 /* XXX audit: privilege used */ 4049 if (privused != NULL) 4050 *privused = 1; 4051 return (0); 4052 } 4053 4054 return ((accmode & VADMIN) ? EPERM : EACCES); 4055} 4056 4057/* 4058 * Credential check based on process requesting service, and per-attribute 4059 * permissions. 4060 */ 4061int 4062extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4063 struct thread *td, accmode_t accmode) 4064{ 4065 4066 /* 4067 * Kernel-invoked always succeeds. 4068 */ 4069 if (cred == NOCRED) 4070 return (0); 4071 4072 /* 4073 * Do not allow privileged processes in jail to directly manipulate 4074 * system attributes. 4075 */ 4076 switch (attrnamespace) { 4077 case EXTATTR_NAMESPACE_SYSTEM: 4078 /* Potentially should be: return (EPERM); */ 4079 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 4080 case EXTATTR_NAMESPACE_USER: 4081 return (VOP_ACCESS(vp, accmode, cred, td)); 4082 default: 4083 return (EPERM); 4084 } 4085} 4086 4087#ifdef DEBUG_VFS_LOCKS 4088/* 4089 * This only exists to supress warnings from unlocked specfs accesses. It is 4090 * no longer ok to have an unlocked VFS. 4091 */ 4092#define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4093 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4094 4095int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4096SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4097 "Drop into debugger on lock violation"); 4098 4099int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4100SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4101 0, "Check for interlock across VOPs"); 4102 4103int vfs_badlock_print = 1; /* Print lock violations. */ 4104SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4105 0, "Print lock violations"); 4106 4107#ifdef KDB 4108int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4109SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4110 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4111#endif 4112 4113static void 4114vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4115{ 4116 4117#ifdef KDB 4118 if (vfs_badlock_backtrace) 4119 kdb_backtrace(); 4120#endif 4121 if (vfs_badlock_print) 4122 printf("%s: %p %s\n", str, (void *)vp, msg); 4123 if (vfs_badlock_ddb) 4124 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4125} 4126 4127void 4128assert_vi_locked(struct vnode *vp, const char *str) 4129{ 4130 4131 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4132 vfs_badlock("interlock is not locked but should be", str, vp); 4133} 4134 4135void 4136assert_vi_unlocked(struct vnode *vp, const char *str) 4137{ 4138 4139 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4140 vfs_badlock("interlock is locked but should not be", str, vp); 4141} 4142 4143void 4144assert_vop_locked(struct vnode *vp, const char *str) 4145{ 4146 int locked; 4147 4148 if (!IGNORE_LOCK(vp)) { 4149 locked = VOP_ISLOCKED(vp); 4150 if (locked == 0 || locked == LK_EXCLOTHER) 4151 vfs_badlock("is not locked but should be", str, vp); 4152 } 4153} 4154 4155void 4156assert_vop_unlocked(struct vnode *vp, const char *str) 4157{ 4158 4159 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4160 vfs_badlock("is locked but should not be", str, vp); 4161} 4162 4163void 4164assert_vop_elocked(struct vnode *vp, const char *str) 4165{ 4166 4167 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4168 vfs_badlock("is not exclusive locked but should be", str, vp); 4169} 4170 4171#if 0 4172void 4173assert_vop_elocked_other(struct vnode *vp, const char *str) 4174{ 4175 4176 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER) 4177 vfs_badlock("is not exclusive locked by another thread", 4178 str, vp); 4179} 4180 4181void 4182assert_vop_slocked(struct vnode *vp, const char *str) 4183{ 4184 4185 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED) 4186 vfs_badlock("is not locked shared but should be", str, vp); 4187} 4188#endif /* 0 */ 4189#endif /* DEBUG_VFS_LOCKS */ 4190 4191void 4192vop_rename_fail(struct vop_rename_args *ap) 4193{ 4194 4195 if (ap->a_tvp != NULL) 4196 vput(ap->a_tvp); 4197 if (ap->a_tdvp == ap->a_tvp) 4198 vrele(ap->a_tdvp); 4199 else 4200 vput(ap->a_tdvp); 4201 vrele(ap->a_fdvp); 4202 vrele(ap->a_fvp); 4203} 4204 4205void 4206vop_rename_pre(void *ap) 4207{ 4208 struct vop_rename_args *a = ap; 4209 4210#ifdef DEBUG_VFS_LOCKS 4211 if (a->a_tvp) 4212 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4213 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4214 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4215 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4216 4217 /* Check the source (from). */ 4218 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4219 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4220 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4221 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4222 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4223 4224 /* Check the target. */ 4225 if (a->a_tvp) 4226 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4227 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4228#endif 4229 if (a->a_tdvp != a->a_fdvp) 4230 vhold(a->a_fdvp); 4231 if (a->a_tvp != a->a_fvp) 4232 vhold(a->a_fvp); 4233 vhold(a->a_tdvp); 4234 if (a->a_tvp) 4235 vhold(a->a_tvp); 4236} 4237 4238void 4239vop_strategy_pre(void *ap) 4240{ 4241#ifdef DEBUG_VFS_LOCKS 4242 struct vop_strategy_args *a; 4243 struct buf *bp; 4244 4245 a = ap; 4246 bp = a->a_bp; 4247 4248 /* 4249 * Cluster ops lock their component buffers but not the IO container. 4250 */ 4251 if ((bp->b_flags & B_CLUSTER) != 0) 4252 return; 4253 4254 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4255 if (vfs_badlock_print) 4256 printf( 4257 "VOP_STRATEGY: bp is not locked but should be\n"); 4258 if (vfs_badlock_ddb) 4259 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4260 } 4261#endif 4262} 4263 4264void 4265vop_lock_pre(void *ap) 4266{ 4267#ifdef DEBUG_VFS_LOCKS 4268 struct vop_lock1_args *a = ap; 4269 4270 if ((a->a_flags & LK_INTERLOCK) == 0) 4271 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4272 else 4273 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4274#endif 4275} 4276 4277void 4278vop_lock_post(void *ap, int rc) 4279{ 4280#ifdef DEBUG_VFS_LOCKS 4281 struct vop_lock1_args *a = ap; 4282 4283 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4284 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4285 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4286#endif 4287} 4288 4289void 4290vop_unlock_pre(void *ap) 4291{ 4292#ifdef DEBUG_VFS_LOCKS 4293 struct vop_unlock_args *a = ap; 4294 4295 if (a->a_flags & LK_INTERLOCK) 4296 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4297 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4298#endif 4299} 4300 4301void 4302vop_unlock_post(void *ap, int rc) 4303{ 4304#ifdef DEBUG_VFS_LOCKS 4305 struct vop_unlock_args *a = ap; 4306 4307 if (a->a_flags & LK_INTERLOCK) 4308 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4309#endif 4310} 4311 4312void 4313vop_create_post(void *ap, int rc) 4314{ 4315 struct vop_create_args *a = ap; 4316 4317 if (!rc) 4318 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4319} 4320 4321void 4322vop_deleteextattr_post(void *ap, int rc) 4323{ 4324 struct vop_deleteextattr_args *a = ap; 4325 4326 if (!rc) 4327 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4328} 4329 4330void 4331vop_link_post(void *ap, int rc) 4332{ 4333 struct vop_link_args *a = ap; 4334 4335 if (!rc) { 4336 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4337 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4338 } 4339} 4340 4341void 4342vop_mkdir_post(void *ap, int rc) 4343{ 4344 struct vop_mkdir_args *a = ap; 4345 4346 if (!rc) 4347 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4348} 4349 4350void 4351vop_mknod_post(void *ap, int rc) 4352{ 4353 struct vop_mknod_args *a = ap; 4354 4355 if (!rc) 4356 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4357} 4358 4359void 4360vop_remove_post(void *ap, int rc) 4361{ 4362 struct vop_remove_args *a = ap; 4363 4364 if (!rc) { 4365 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4366 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4367 } 4368} 4369 4370void 4371vop_rename_post(void *ap, int rc) 4372{ 4373 struct vop_rename_args *a = ap; 4374 4375 if (!rc) { 4376 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE); 4377 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE); 4378 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4379 if (a->a_tvp) 4380 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4381 } 4382 if (a->a_tdvp != a->a_fdvp) 4383 vdrop(a->a_fdvp); 4384 if (a->a_tvp != a->a_fvp) 4385 vdrop(a->a_fvp); 4386 vdrop(a->a_tdvp); 4387 if (a->a_tvp) 4388 vdrop(a->a_tvp); 4389} 4390 4391void 4392vop_rmdir_post(void *ap, int rc) 4393{ 4394 struct vop_rmdir_args *a = ap; 4395 4396 if (!rc) { 4397 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4398 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4399 } 4400} 4401 4402void 4403vop_setattr_post(void *ap, int rc) 4404{ 4405 struct vop_setattr_args *a = ap; 4406 4407 if (!rc) 4408 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4409} 4410 4411void 4412vop_setextattr_post(void *ap, int rc) 4413{ 4414 struct vop_setextattr_args *a = ap; 4415 4416 if (!rc) 4417 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4418} 4419 4420void 4421vop_symlink_post(void *ap, int rc) 4422{ 4423 struct vop_symlink_args *a = ap; 4424 4425 if (!rc) 4426 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4427} 4428 4429static struct knlist fs_knlist; 4430 4431static void 4432vfs_event_init(void *arg) 4433{ 4434 knlist_init_mtx(&fs_knlist, NULL); 4435} 4436/* XXX - correct order? */ 4437SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4438 4439void 4440vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4441{ 4442 4443 KNOTE_UNLOCKED(&fs_knlist, event); 4444} 4445 4446static int filt_fsattach(struct knote *kn); 4447static void filt_fsdetach(struct knote *kn); 4448static int filt_fsevent(struct knote *kn, long hint); 4449 4450struct filterops fs_filtops = { 4451 .f_isfd = 0, 4452 .f_attach = filt_fsattach, 4453 .f_detach = filt_fsdetach, 4454 .f_event = filt_fsevent 4455}; 4456 4457static int 4458filt_fsattach(struct knote *kn) 4459{ 4460 4461 kn->kn_flags |= EV_CLEAR; 4462 knlist_add(&fs_knlist, kn, 0); 4463 return (0); 4464} 4465 4466static void 4467filt_fsdetach(struct knote *kn) 4468{ 4469 4470 knlist_remove(&fs_knlist, kn, 0); 4471} 4472 4473static int 4474filt_fsevent(struct knote *kn, long hint) 4475{ 4476 4477 kn->kn_fflags |= hint; 4478 return (kn->kn_fflags != 0); 4479} 4480 4481static int 4482sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4483{ 4484 struct vfsidctl vc; 4485 int error; 4486 struct mount *mp; 4487 4488 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4489 if (error) 4490 return (error); 4491 if (vc.vc_vers != VFS_CTL_VERS1) 4492 return (EINVAL); 4493 mp = vfs_getvfs(&vc.vc_fsid); 4494 if (mp == NULL) 4495 return (ENOENT); 4496 /* ensure that a specific sysctl goes to the right filesystem. */ 4497 if (strcmp(vc.vc_fstypename, "*") != 0 && 4498 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4499 vfs_rel(mp); 4500 return (EINVAL); 4501 } 4502 VCTLTOREQ(&vc, req); 4503 error = VFS_SYSCTL(mp, vc.vc_op, req); 4504 vfs_rel(mp); 4505 return (error); 4506} 4507 4508SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 4509 NULL, 0, sysctl_vfs_ctl, "", 4510 "Sysctl by fsid"); 4511 4512/* 4513 * Function to initialize a va_filerev field sensibly. 4514 * XXX: Wouldn't a random number make a lot more sense ?? 4515 */ 4516u_quad_t 4517init_va_filerev(void) 4518{ 4519 struct bintime bt; 4520 4521 getbinuptime(&bt); 4522 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4523} 4524 4525static int filt_vfsread(struct knote *kn, long hint); 4526static int filt_vfswrite(struct knote *kn, long hint); 4527static int filt_vfsvnode(struct knote *kn, long hint); 4528static void filt_vfsdetach(struct knote *kn); 4529static struct filterops vfsread_filtops = { 4530 .f_isfd = 1, 4531 .f_detach = filt_vfsdetach, 4532 .f_event = filt_vfsread 4533}; 4534static struct filterops vfswrite_filtops = { 4535 .f_isfd = 1, 4536 .f_detach = filt_vfsdetach, 4537 .f_event = filt_vfswrite 4538}; 4539static struct filterops vfsvnode_filtops = { 4540 .f_isfd = 1, 4541 .f_detach = filt_vfsdetach, 4542 .f_event = filt_vfsvnode 4543}; 4544 4545static void 4546vfs_knllock(void *arg) 4547{ 4548 struct vnode *vp = arg; 4549 4550 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4551} 4552 4553static void 4554vfs_knlunlock(void *arg) 4555{ 4556 struct vnode *vp = arg; 4557 4558 VOP_UNLOCK(vp, 0); 4559} 4560 4561static void 4562vfs_knl_assert_locked(void *arg) 4563{ 4564#ifdef DEBUG_VFS_LOCKS 4565 struct vnode *vp = arg; 4566 4567 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 4568#endif 4569} 4570 4571static void 4572vfs_knl_assert_unlocked(void *arg) 4573{ 4574#ifdef DEBUG_VFS_LOCKS 4575 struct vnode *vp = arg; 4576 4577 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 4578#endif 4579} 4580 4581int 4582vfs_kqfilter(struct vop_kqfilter_args *ap) 4583{ 4584 struct vnode *vp = ap->a_vp; 4585 struct knote *kn = ap->a_kn; 4586 struct knlist *knl; 4587 4588 switch (kn->kn_filter) { 4589 case EVFILT_READ: 4590 kn->kn_fop = &vfsread_filtops; 4591 break; 4592 case EVFILT_WRITE: 4593 kn->kn_fop = &vfswrite_filtops; 4594 break; 4595 case EVFILT_VNODE: 4596 kn->kn_fop = &vfsvnode_filtops; 4597 break; 4598 default: 4599 return (EINVAL); 4600 } 4601 4602 kn->kn_hook = (caddr_t)vp; 4603 4604 v_addpollinfo(vp); 4605 if (vp->v_pollinfo == NULL) 4606 return (ENOMEM); 4607 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 4608 vhold(vp); 4609 knlist_add(knl, kn, 0); 4610 4611 return (0); 4612} 4613 4614/* 4615 * Detach knote from vnode 4616 */ 4617static void 4618filt_vfsdetach(struct knote *kn) 4619{ 4620 struct vnode *vp = (struct vnode *)kn->kn_hook; 4621 4622 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 4623 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 4624 vdrop(vp); 4625} 4626 4627/*ARGSUSED*/ 4628static int 4629filt_vfsread(struct knote *kn, long hint) 4630{ 4631 struct vnode *vp = (struct vnode *)kn->kn_hook; 4632 struct vattr va; 4633 int res; 4634 4635 /* 4636 * filesystem is gone, so set the EOF flag and schedule 4637 * the knote for deletion. 4638 */ 4639 if (hint == NOTE_REVOKE) { 4640 VI_LOCK(vp); 4641 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4642 VI_UNLOCK(vp); 4643 return (1); 4644 } 4645 4646 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 4647 return (0); 4648 4649 VI_LOCK(vp); 4650 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 4651 res = (kn->kn_data != 0); 4652 VI_UNLOCK(vp); 4653 return (res); 4654} 4655 4656/*ARGSUSED*/ 4657static int 4658filt_vfswrite(struct knote *kn, long hint) 4659{ 4660 struct vnode *vp = (struct vnode *)kn->kn_hook; 4661 4662 VI_LOCK(vp); 4663 4664 /* 4665 * filesystem is gone, so set the EOF flag and schedule 4666 * the knote for deletion. 4667 */ 4668 if (hint == NOTE_REVOKE) 4669 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4670 4671 kn->kn_data = 0; 4672 VI_UNLOCK(vp); 4673 return (1); 4674} 4675 4676static int 4677filt_vfsvnode(struct knote *kn, long hint) 4678{ 4679 struct vnode *vp = (struct vnode *)kn->kn_hook; 4680 int res; 4681 4682 VI_LOCK(vp); 4683 if (kn->kn_sfflags & hint) 4684 kn->kn_fflags |= hint; 4685 if (hint == NOTE_REVOKE) { 4686 kn->kn_flags |= EV_EOF; 4687 VI_UNLOCK(vp); 4688 return (1); 4689 } 4690 res = (kn->kn_fflags != 0); 4691 VI_UNLOCK(vp); 4692 return (res); 4693} 4694 4695int 4696vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 4697{ 4698 int error; 4699 4700 if (dp->d_reclen > ap->a_uio->uio_resid) 4701 return (ENAMETOOLONG); 4702 error = uiomove(dp, dp->d_reclen, ap->a_uio); 4703 if (error) { 4704 if (ap->a_ncookies != NULL) { 4705 if (ap->a_cookies != NULL) 4706 free(ap->a_cookies, M_TEMP); 4707 ap->a_cookies = NULL; 4708 *ap->a_ncookies = 0; 4709 } 4710 return (error); 4711 } 4712 if (ap->a_ncookies == NULL) 4713 return (0); 4714 4715 KASSERT(ap->a_cookies, 4716 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 4717 4718 *ap->a_cookies = realloc(*ap->a_cookies, 4719 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 4720 (*ap->a_cookies)[*ap->a_ncookies] = off; 4721 return (0); 4722} 4723 4724/* 4725 * Mark for update the access time of the file if the filesystem 4726 * supports VOP_MARKATIME. This functionality is used by execve and 4727 * mmap, so we want to avoid the I/O implied by directly setting 4728 * va_atime for the sake of efficiency. 4729 */ 4730void 4731vfs_mark_atime(struct vnode *vp, struct ucred *cred) 4732{ 4733 struct mount *mp; 4734 4735 mp = vp->v_mount; 4736 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 4737 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 4738 (void)VOP_MARKATIME(vp); 4739} 4740 4741/* 4742 * The purpose of this routine is to remove granularity from accmode_t, 4743 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 4744 * VADMIN and VAPPEND. 4745 * 4746 * If it returns 0, the caller is supposed to continue with the usual 4747 * access checks using 'accmode' as modified by this routine. If it 4748 * returns nonzero value, the caller is supposed to return that value 4749 * as errno. 4750 * 4751 * Note that after this routine runs, accmode may be zero. 4752 */ 4753int 4754vfs_unixify_accmode(accmode_t *accmode) 4755{ 4756 /* 4757 * There is no way to specify explicit "deny" rule using 4758 * file mode or POSIX.1e ACLs. 4759 */ 4760 if (*accmode & VEXPLICIT_DENY) { 4761 *accmode = 0; 4762 return (0); 4763 } 4764 4765 /* 4766 * None of these can be translated into usual access bits. 4767 * Also, the common case for NFSv4 ACLs is to not contain 4768 * either of these bits. Caller should check for VWRITE 4769 * on the containing directory instead. 4770 */ 4771 if (*accmode & (VDELETE_CHILD | VDELETE)) 4772 return (EPERM); 4773 4774 if (*accmode & VADMIN_PERMS) { 4775 *accmode &= ~VADMIN_PERMS; 4776 *accmode |= VADMIN; 4777 } 4778 4779 /* 4780 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 4781 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 4782 */ 4783 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 4784 4785 return (0); 4786} 4787 4788/* 4789 * These are helper functions for filesystems to traverse all 4790 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 4791 * 4792 * This interface replaces MNT_VNODE_FOREACH. 4793 */ 4794 4795MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 4796 4797struct vnode * 4798__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 4799{ 4800 struct vnode *vp; 4801 4802 if (should_yield()) 4803 kern_yield(PRI_USER); 4804 MNT_ILOCK(mp); 4805 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4806 vp = TAILQ_NEXT(*mvp, v_nmntvnodes); 4807 while (vp != NULL && (vp->v_type == VMARKER || 4808 (vp->v_iflag & VI_DOOMED) != 0)) 4809 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4810 4811 /* Check if we are done */ 4812 if (vp == NULL) { 4813 __mnt_vnode_markerfree_all(mvp, mp); 4814 /* MNT_IUNLOCK(mp); -- done in above function */ 4815 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 4816 return (NULL); 4817 } 4818 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4819 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4820 VI_LOCK(vp); 4821 MNT_IUNLOCK(mp); 4822 return (vp); 4823} 4824 4825struct vnode * 4826__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 4827{ 4828 struct vnode *vp; 4829 4830 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4831 MNT_ILOCK(mp); 4832 MNT_REF(mp); 4833 (*mvp)->v_type = VMARKER; 4834 4835 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 4836 while (vp != NULL && (vp->v_type == VMARKER || 4837 (vp->v_iflag & VI_DOOMED) != 0)) 4838 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4839 4840 /* Check if we are done */ 4841 if (vp == NULL) { 4842 MNT_REL(mp); 4843 MNT_IUNLOCK(mp); 4844 free(*mvp, M_VNODE_MARKER); 4845 *mvp = NULL; 4846 return (NULL); 4847 } 4848 (*mvp)->v_mount = mp; 4849 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4850 VI_LOCK(vp); 4851 MNT_IUNLOCK(mp); 4852 return (vp); 4853} 4854 4855 4856void 4857__mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 4858{ 4859 4860 if (*mvp == NULL) { 4861 MNT_IUNLOCK(mp); 4862 return; 4863 } 4864 4865 mtx_assert(MNT_MTX(mp), MA_OWNED); 4866 4867 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4868 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4869 MNT_REL(mp); 4870 MNT_IUNLOCK(mp); 4871 free(*mvp, M_VNODE_MARKER); 4872 *mvp = NULL; 4873} 4874 4875/* 4876 * These are helper functions for filesystems to traverse their 4877 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 4878 */ 4879static void 4880mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4881{ 4882 4883 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4884 4885 MNT_ILOCK(mp); 4886 MNT_REL(mp); 4887 MNT_IUNLOCK(mp); 4888 free(*mvp, M_VNODE_MARKER); 4889 *mvp = NULL; 4890} 4891 4892static struct vnode * 4893mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4894{ 4895 struct vnode *vp, *nvp; 4896 4897 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 4898 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4899restart: 4900 vp = TAILQ_NEXT(*mvp, v_actfreelist); 4901 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4902 while (vp != NULL) { 4903 if (vp->v_type == VMARKER) { 4904 vp = TAILQ_NEXT(vp, v_actfreelist); 4905 continue; 4906 } 4907 if (!VI_TRYLOCK(vp)) { 4908 if (mp_ncpus == 1 || should_yield()) { 4909 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4910 mtx_unlock(&vnode_free_list_mtx); 4911 pause("vnacti", 1); 4912 mtx_lock(&vnode_free_list_mtx); 4913 goto restart; 4914 } 4915 continue; 4916 } 4917 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 4918 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 4919 ("alien vnode on the active list %p %p", vp, mp)); 4920 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 4921 break; 4922 nvp = TAILQ_NEXT(vp, v_actfreelist); 4923 VI_UNLOCK(vp); 4924 vp = nvp; 4925 } 4926 4927 /* Check if we are done */ 4928 if (vp == NULL) { 4929 mtx_unlock(&vnode_free_list_mtx); 4930 mnt_vnode_markerfree_active(mvp, mp); 4931 return (NULL); 4932 } 4933 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 4934 mtx_unlock(&vnode_free_list_mtx); 4935 ASSERT_VI_LOCKED(vp, "active iter"); 4936 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 4937 return (vp); 4938} 4939 4940struct vnode * 4941__mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4942{ 4943 4944 if (should_yield()) 4945 kern_yield(PRI_USER); 4946 mtx_lock(&vnode_free_list_mtx); 4947 return (mnt_vnode_next_active(mvp, mp)); 4948} 4949 4950struct vnode * 4951__mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 4952{ 4953 struct vnode *vp; 4954 4955 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4956 MNT_ILOCK(mp); 4957 MNT_REF(mp); 4958 MNT_IUNLOCK(mp); 4959 (*mvp)->v_type = VMARKER; 4960 (*mvp)->v_mount = mp; 4961 4962 mtx_lock(&vnode_free_list_mtx); 4963 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 4964 if (vp == NULL) { 4965 mtx_unlock(&vnode_free_list_mtx); 4966 mnt_vnode_markerfree_active(mvp, mp); 4967 return (NULL); 4968 } 4969 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4970 return (mnt_vnode_next_active(mvp, mp)); 4971} 4972 4973void 4974__mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4975{ 4976 4977 if (*mvp == NULL) 4978 return; 4979 4980 mtx_lock(&vnode_free_list_mtx); 4981 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4982 mtx_unlock(&vnode_free_list_mtx); 4983 mnt_vnode_markerfree_active(mvp, mp); 4984}
|