vfs_subr.c revision 162945
1/*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 */ 36 37/* 38 * External virtual filesystem routines 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/kern/vfs_subr.c 162945 2006-10-02 07:25:58Z kib $"); 43 44#include "opt_ddb.h" 45#include "opt_mac.h" 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/bio.h> 50#include <sys/buf.h> 51#include <sys/conf.h> 52#include <sys/dirent.h> 53#include <sys/event.h> 54#include <sys/eventhandler.h> 55#include <sys/extattr.h> 56#include <sys/file.h> 57#include <sys/fcntl.h> 58#include <sys/kdb.h> 59#include <sys/kernel.h> 60#include <sys/kthread.h> 61#include <sys/mac.h> 62#include <sys/malloc.h> 63#include <sys/mount.h> 64#include <sys/namei.h> 65#include <sys/reboot.h> 66#include <sys/sleepqueue.h> 67#include <sys/stat.h> 68#include <sys/sysctl.h> 69#include <sys/syslog.h> 70#include <sys/vmmeter.h> 71#include <sys/vnode.h> 72 73#include <machine/stdarg.h> 74 75#include <vm/vm.h> 76#include <vm/vm_object.h> 77#include <vm/vm_extern.h> 78#include <vm/pmap.h> 79#include <vm/vm_map.h> 80#include <vm/vm_page.h> 81#include <vm/vm_kern.h> 82#include <vm/uma.h> 83 84#ifdef DDB 85#include <ddb/ddb.h> 86#endif 87 88static MALLOC_DEFINE(M_NETADDR, "subr_export_host", "Export host address structure"); 89 90static void delmntque(struct vnode *vp); 91static void insmntque(struct vnode *vp, struct mount *mp); 92static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 93 int slpflag, int slptimeo); 94static void syncer_shutdown(void *arg, int howto); 95static int vtryrecycle(struct vnode *vp); 96static void vbusy(struct vnode *vp); 97static void vdropl(struct vnode *vp); 98static void vinactive(struct vnode *, struct thread *); 99static void v_incr_usecount(struct vnode *); 100static void v_decr_usecount(struct vnode *); 101static void v_decr_useonly(struct vnode *); 102static void v_upgrade_usecount(struct vnode *); 103static void vfree(struct vnode *); 104static void vnlru_free(int); 105static void vdestroy(struct vnode *); 106static void vgonel(struct vnode *); 107static void vfs_knllock(void *arg); 108static void vfs_knlunlock(void *arg); 109static int vfs_knllocked(void *arg); 110 111 112/* 113 * Enable Giant pushdown based on whether or not the vm is mpsafe in this 114 * build. Without mpsafevm the buffer cache can not run Giant free. 115 */ 116#if !defined(__powerpc__) 117int mpsafe_vfs = 1; 118#else 119int mpsafe_vfs; 120#endif 121TUNABLE_INT("debug.mpsafevfs", &mpsafe_vfs); 122SYSCTL_INT(_debug, OID_AUTO, mpsafevfs, CTLFLAG_RD, &mpsafe_vfs, 0, 123 "MPSAFE VFS"); 124 125/* 126 * Number of vnodes in existence. Increased whenever getnewvnode() 127 * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed 128 * vnode. 129 */ 130static unsigned long numvnodes; 131 132SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 133 134/* 135 * Conversion tables for conversion from vnode types to inode formats 136 * and back. 137 */ 138enum vtype iftovt_tab[16] = { 139 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 140 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 141}; 142int vttoif_tab[10] = { 143 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 144 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 145}; 146 147/* 148 * List of vnodes that are ready for recycling. 149 */ 150static TAILQ_HEAD(freelst, vnode) vnode_free_list; 151 152/* 153 * Free vnode target. Free vnodes may simply be files which have been stat'd 154 * but not read. This is somewhat common, and a small cache of such files 155 * should be kept to avoid recreation costs. 156 */ 157static u_long wantfreevnodes; 158SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 159/* Number of vnodes in the free list. */ 160static u_long freevnodes; 161SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 162 163/* 164 * Various variables used for debugging the new implementation of 165 * reassignbuf(). 166 * XXX these are probably of (very) limited utility now. 167 */ 168static int reassignbufcalls; 169SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 170 171/* 172 * Cache for the mount type id assigned to NFS. This is used for 173 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 174 */ 175int nfs_mount_type = -1; 176 177/* To keep more than one thread at a time from running vfs_getnewfsid */ 178static struct mtx mntid_mtx; 179 180/* 181 * Lock for any access to the following: 182 * vnode_free_list 183 * numvnodes 184 * freevnodes 185 */ 186static struct mtx vnode_free_list_mtx; 187 188/* Publicly exported FS */ 189struct nfs_public nfs_pub; 190 191/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 192static uma_zone_t vnode_zone; 193static uma_zone_t vnodepoll_zone; 194 195/* Set to 1 to print out reclaim of active vnodes */ 196int prtactive; 197 198/* 199 * The workitem queue. 200 * 201 * It is useful to delay writes of file data and filesystem metadata 202 * for tens of seconds so that quickly created and deleted files need 203 * not waste disk bandwidth being created and removed. To realize this, 204 * we append vnodes to a "workitem" queue. When running with a soft 205 * updates implementation, most pending metadata dependencies should 206 * not wait for more than a few seconds. Thus, mounted on block devices 207 * are delayed only about a half the time that file data is delayed. 208 * Similarly, directory updates are more critical, so are only delayed 209 * about a third the time that file data is delayed. Thus, there are 210 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 211 * one each second (driven off the filesystem syncer process). The 212 * syncer_delayno variable indicates the next queue that is to be processed. 213 * Items that need to be processed soon are placed in this queue: 214 * 215 * syncer_workitem_pending[syncer_delayno] 216 * 217 * A delay of fifteen seconds is done by placing the request fifteen 218 * entries later in the queue: 219 * 220 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 221 * 222 */ 223static int syncer_delayno; 224static long syncer_mask; 225LIST_HEAD(synclist, bufobj); 226static struct synclist *syncer_workitem_pending; 227/* 228 * The sync_mtx protects: 229 * bo->bo_synclist 230 * sync_vnode_count 231 * syncer_delayno 232 * syncer_state 233 * syncer_workitem_pending 234 * syncer_worklist_len 235 * rushjob 236 */ 237static struct mtx sync_mtx; 238 239#define SYNCER_MAXDELAY 32 240static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 241static int syncdelay = 30; /* max time to delay syncing data */ 242static int filedelay = 30; /* time to delay syncing files */ 243SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 244static int dirdelay = 29; /* time to delay syncing directories */ 245SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 246static int metadelay = 28; /* time to delay syncing metadata */ 247SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 248static int rushjob; /* number of slots to run ASAP */ 249static int stat_rush_requests; /* number of times I/O speeded up */ 250SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 251 252/* 253 * When shutting down the syncer, run it at four times normal speed. 254 */ 255#define SYNCER_SHUTDOWN_SPEEDUP 4 256static int sync_vnode_count; 257static int syncer_worklist_len; 258static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 259 syncer_state; 260 261/* 262 * Number of vnodes we want to exist at any one time. This is mostly used 263 * to size hash tables in vnode-related code. It is normally not used in 264 * getnewvnode(), as wantfreevnodes is normally nonzero.) 265 * 266 * XXX desiredvnodes is historical cruft and should not exist. 267 */ 268int desiredvnodes; 269SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 270 &desiredvnodes, 0, "Maximum number of vnodes"); 271SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 272 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)"); 273static int vnlru_nowhere; 274SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 275 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 276 277/* 278 * Macros to control when a vnode is freed and recycled. All require 279 * the vnode interlock. 280 */ 281#define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt) 282#define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt) 283#define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt) 284 285 286/* 287 * Initialize the vnode management data structures. 288 */ 289#ifndef MAXVNODES_MAX 290#define MAXVNODES_MAX 100000 291#endif 292static void 293vntblinit(void *dummy __unused) 294{ 295 296 /* 297 * Desiredvnodes is a function of the physical memory size and 298 * the kernel's heap size. Specifically, desiredvnodes scales 299 * in proportion to the physical memory size until two fifths 300 * of the kernel's heap size is consumed by vnodes and vm 301 * objects. 302 */ 303 desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size / 304 (5 * (sizeof(struct vm_object) + sizeof(struct vnode)))); 305 if (desiredvnodes > MAXVNODES_MAX) { 306 if (bootverbose) 307 printf("Reducing kern.maxvnodes %d -> %d\n", 308 desiredvnodes, MAXVNODES_MAX); 309 desiredvnodes = MAXVNODES_MAX; 310 } 311 wantfreevnodes = desiredvnodes / 4; 312 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 313 TAILQ_INIT(&vnode_free_list); 314 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 315 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 316 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 317 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 318 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 319 /* 320 * Initialize the filesystem syncer. 321 */ 322 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 323 &syncer_mask); 324 syncer_maxdelay = syncer_mask + 1; 325 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 326} 327SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL) 328 329 330/* 331 * Mark a mount point as busy. Used to synchronize access and to delay 332 * unmounting. Interlock is not released on failure. 333 */ 334int 335vfs_busy(struct mount *mp, int flags, struct mtx *interlkp, 336 struct thread *td) 337{ 338 int lkflags; 339 340 MNT_ILOCK(mp); 341 MNT_REF(mp); 342 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 343 if (flags & LK_NOWAIT) { 344 MNT_REL(mp); 345 MNT_IUNLOCK(mp); 346 return (ENOENT); 347 } 348 if (interlkp) 349 mtx_unlock(interlkp); 350 mp->mnt_kern_flag |= MNTK_MWAIT; 351 /* 352 * Since all busy locks are shared except the exclusive 353 * lock granted when unmounting, the only place that a 354 * wakeup needs to be done is at the release of the 355 * exclusive lock at the end of dounmount. 356 */ 357 msleep(mp, MNT_MTX(mp), PVFS, "vfs_busy", 0); 358 MNT_REL(mp); 359 MNT_IUNLOCK(mp); 360 if (interlkp) 361 mtx_lock(interlkp); 362 return (ENOENT); 363 } 364 if (interlkp) 365 mtx_unlock(interlkp); 366 lkflags = LK_SHARED | LK_INTERLOCK; 367 if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td)) 368 panic("vfs_busy: unexpected lock failure"); 369 return (0); 370} 371 372/* 373 * Free a busy filesystem. 374 */ 375void 376vfs_unbusy(struct mount *mp, struct thread *td) 377{ 378 379 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 380 vfs_rel(mp); 381} 382 383/* 384 * Lookup a mount point by filesystem identifier. 385 */ 386struct mount * 387vfs_getvfs(fsid_t *fsid) 388{ 389 struct mount *mp; 390 391 mtx_lock(&mountlist_mtx); 392 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 393 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 394 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 395 vfs_ref(mp); 396 mtx_unlock(&mountlist_mtx); 397 return (mp); 398 } 399 } 400 mtx_unlock(&mountlist_mtx); 401 return ((struct mount *) 0); 402} 403 404/* 405 * Check if a user can access priveledged mount options. 406 */ 407int 408vfs_suser(struct mount *mp, struct thread *td) 409{ 410 int error; 411 412 if ((mp->mnt_flag & MNT_USER) == 0 || 413 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 414 if ((error = suser(td)) != 0) 415 return (error); 416 } 417 return (0); 418} 419 420/* 421 * Get a new unique fsid. Try to make its val[0] unique, since this value 422 * will be used to create fake device numbers for stat(). Also try (but 423 * not so hard) make its val[0] unique mod 2^16, since some emulators only 424 * support 16-bit device numbers. We end up with unique val[0]'s for the 425 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 426 * 427 * Keep in mind that several mounts may be running in parallel. Starting 428 * the search one past where the previous search terminated is both a 429 * micro-optimization and a defense against returning the same fsid to 430 * different mounts. 431 */ 432void 433vfs_getnewfsid(struct mount *mp) 434{ 435 static u_int16_t mntid_base; 436 struct mount *nmp; 437 fsid_t tfsid; 438 int mtype; 439 440 mtx_lock(&mntid_mtx); 441 mtype = mp->mnt_vfc->vfc_typenum; 442 tfsid.val[1] = mtype; 443 mtype = (mtype & 0xFF) << 24; 444 for (;;) { 445 tfsid.val[0] = makedev(255, 446 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 447 mntid_base++; 448 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 449 break; 450 vfs_rel(nmp); 451 } 452 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 453 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 454 mtx_unlock(&mntid_mtx); 455} 456 457/* 458 * Knob to control the precision of file timestamps: 459 * 460 * 0 = seconds only; nanoseconds zeroed. 461 * 1 = seconds and nanoseconds, accurate within 1/HZ. 462 * 2 = seconds and nanoseconds, truncated to microseconds. 463 * >=3 = seconds and nanoseconds, maximum precision. 464 */ 465enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 466 467static int timestamp_precision = TSP_SEC; 468SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 469 ×tamp_precision, 0, ""); 470 471/* 472 * Get a current timestamp. 473 */ 474void 475vfs_timestamp(struct timespec *tsp) 476{ 477 struct timeval tv; 478 479 switch (timestamp_precision) { 480 case TSP_SEC: 481 tsp->tv_sec = time_second; 482 tsp->tv_nsec = 0; 483 break; 484 case TSP_HZ: 485 getnanotime(tsp); 486 break; 487 case TSP_USEC: 488 microtime(&tv); 489 TIMEVAL_TO_TIMESPEC(&tv, tsp); 490 break; 491 case TSP_NSEC: 492 default: 493 nanotime(tsp); 494 break; 495 } 496} 497 498/* 499 * Set vnode attributes to VNOVAL 500 */ 501void 502vattr_null(struct vattr *vap) 503{ 504 505 vap->va_type = VNON; 506 vap->va_size = VNOVAL; 507 vap->va_bytes = VNOVAL; 508 vap->va_mode = VNOVAL; 509 vap->va_nlink = VNOVAL; 510 vap->va_uid = VNOVAL; 511 vap->va_gid = VNOVAL; 512 vap->va_fsid = VNOVAL; 513 vap->va_fileid = VNOVAL; 514 vap->va_blocksize = VNOVAL; 515 vap->va_rdev = VNOVAL; 516 vap->va_atime.tv_sec = VNOVAL; 517 vap->va_atime.tv_nsec = VNOVAL; 518 vap->va_mtime.tv_sec = VNOVAL; 519 vap->va_mtime.tv_nsec = VNOVAL; 520 vap->va_ctime.tv_sec = VNOVAL; 521 vap->va_ctime.tv_nsec = VNOVAL; 522 vap->va_birthtime.tv_sec = VNOVAL; 523 vap->va_birthtime.tv_nsec = VNOVAL; 524 vap->va_flags = VNOVAL; 525 vap->va_gen = VNOVAL; 526 vap->va_vaflags = 0; 527} 528 529/* 530 * This routine is called when we have too many vnodes. It attempts 531 * to free <count> vnodes and will potentially free vnodes that still 532 * have VM backing store (VM backing store is typically the cause 533 * of a vnode blowout so we want to do this). Therefore, this operation 534 * is not considered cheap. 535 * 536 * A number of conditions may prevent a vnode from being reclaimed. 537 * the buffer cache may have references on the vnode, a directory 538 * vnode may still have references due to the namei cache representing 539 * underlying files, or the vnode may be in active use. It is not 540 * desireable to reuse such vnodes. These conditions may cause the 541 * number of vnodes to reach some minimum value regardless of what 542 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 543 */ 544static int 545vlrureclaim(struct mount *mp) 546{ 547 struct thread *td; 548 struct vnode *vp; 549 int done; 550 int trigger; 551 int usevnodes; 552 int count; 553 554 /* 555 * Calculate the trigger point, don't allow user 556 * screwups to blow us up. This prevents us from 557 * recycling vnodes with lots of resident pages. We 558 * aren't trying to free memory, we are trying to 559 * free vnodes. 560 */ 561 usevnodes = desiredvnodes; 562 if (usevnodes <= 0) 563 usevnodes = 1; 564 trigger = cnt.v_page_count * 2 / usevnodes; 565 done = 0; 566 td = curthread; 567 vn_start_write(NULL, &mp, V_WAIT); 568 MNT_ILOCK(mp); 569 count = mp->mnt_nvnodelistsize / 10 + 1; 570 while (count != 0) { 571 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 572 while (vp != NULL && vp->v_type == VMARKER) 573 vp = TAILQ_NEXT(vp, v_nmntvnodes); 574 if (vp == NULL) 575 break; 576 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 577 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 578 --count; 579 if (!VI_TRYLOCK(vp)) 580 goto next_iter; 581 /* 582 * If it's been deconstructed already, it's still 583 * referenced, or it exceeds the trigger, skip it. 584 */ 585 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) || 586 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 587 vp->v_object->resident_page_count > trigger)) { 588 VI_UNLOCK(vp); 589 goto next_iter; 590 } 591 MNT_IUNLOCK(mp); 592 vholdl(vp); 593 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT, td)) { 594 vdrop(vp); 595 goto next_iter_mntunlocked; 596 } 597 VI_LOCK(vp); 598 /* 599 * v_usecount may have been bumped after VOP_LOCK() dropped 600 * the vnode interlock and before it was locked again. 601 * 602 * It is not necessary to recheck VI_DOOMED because it can 603 * only be set by another thread that holds both the vnode 604 * lock and vnode interlock. If another thread has the 605 * vnode lock before we get to VOP_LOCK() and obtains the 606 * vnode interlock after VOP_LOCK() drops the vnode 607 * interlock, the other thread will be unable to drop the 608 * vnode lock before our VOP_LOCK() call fails. 609 */ 610 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) || 611 (vp->v_object != NULL && 612 vp->v_object->resident_page_count > trigger)) { 613 VOP_UNLOCK(vp, LK_INTERLOCK, td); 614 goto next_iter_mntunlocked; 615 } 616 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 617 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 618 vgonel(vp); 619 VOP_UNLOCK(vp, 0, td); 620 vdropl(vp); 621 done++; 622next_iter_mntunlocked: 623 if ((count % 256) != 0) 624 goto relock_mnt; 625 goto yield; 626next_iter: 627 if ((count % 256) != 0) 628 continue; 629 MNT_IUNLOCK(mp); 630yield: 631 uio_yield(); 632relock_mnt: 633 MNT_ILOCK(mp); 634 } 635 MNT_IUNLOCK(mp); 636 vn_finished_write(mp); 637 return done; 638} 639 640/* 641 * Attempt to keep the free list at wantfreevnodes length. 642 */ 643static void 644vnlru_free(int count) 645{ 646 struct vnode *vp; 647 int vfslocked; 648 649 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 650 for (; count > 0; count--) { 651 vp = TAILQ_FIRST(&vnode_free_list); 652 /* 653 * The list can be modified while the free_list_mtx 654 * has been dropped and vp could be NULL here. 655 */ 656 if (!vp) 657 break; 658 VNASSERT(vp->v_op != NULL, vp, 659 ("vnlru_free: vnode already reclaimed.")); 660 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 661 /* 662 * Don't recycle if we can't get the interlock. 663 */ 664 if (!VI_TRYLOCK(vp)) { 665 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 666 continue; 667 } 668 VNASSERT(VCANRECYCLE(vp), vp, 669 ("vp inconsistent on freelist")); 670 freevnodes--; 671 vp->v_iflag &= ~VI_FREE; 672 vholdl(vp); 673 mtx_unlock(&vnode_free_list_mtx); 674 VI_UNLOCK(vp); 675 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 676 vtryrecycle(vp); 677 VFS_UNLOCK_GIANT(vfslocked); 678 /* 679 * If the recycled succeeded this vdrop will actually free 680 * the vnode. If not it will simply place it back on 681 * the free list. 682 */ 683 vdrop(vp); 684 mtx_lock(&vnode_free_list_mtx); 685 } 686} 687/* 688 * Attempt to recycle vnodes in a context that is always safe to block. 689 * Calling vlrurecycle() from the bowels of filesystem code has some 690 * interesting deadlock problems. 691 */ 692static struct proc *vnlruproc; 693static int vnlruproc_sig; 694 695static void 696vnlru_proc(void) 697{ 698 struct mount *mp, *nmp; 699 int done; 700 struct proc *p = vnlruproc; 701 struct thread *td = FIRST_THREAD_IN_PROC(p); 702 703 mtx_lock(&Giant); 704 705 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 706 SHUTDOWN_PRI_FIRST); 707 708 for (;;) { 709 kthread_suspend_check(p); 710 mtx_lock(&vnode_free_list_mtx); 711 if (freevnodes > wantfreevnodes) 712 vnlru_free(freevnodes - wantfreevnodes); 713 if (numvnodes <= desiredvnodes * 9 / 10) { 714 vnlruproc_sig = 0; 715 wakeup(&vnlruproc_sig); 716 msleep(vnlruproc, &vnode_free_list_mtx, 717 PVFS|PDROP, "vlruwt", hz); 718 continue; 719 } 720 mtx_unlock(&vnode_free_list_mtx); 721 done = 0; 722 mtx_lock(&mountlist_mtx); 723 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 724 int vfsunlocked; 725 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 726 nmp = TAILQ_NEXT(mp, mnt_list); 727 continue; 728 } 729 if (!VFS_NEEDSGIANT(mp)) { 730 mtx_unlock(&Giant); 731 vfsunlocked = 1; 732 } else 733 vfsunlocked = 0; 734 done += vlrureclaim(mp); 735 if (vfsunlocked) 736 mtx_lock(&Giant); 737 mtx_lock(&mountlist_mtx); 738 nmp = TAILQ_NEXT(mp, mnt_list); 739 vfs_unbusy(mp, td); 740 } 741 mtx_unlock(&mountlist_mtx); 742 if (done == 0) { 743#if 0 744 /* These messages are temporary debugging aids */ 745 if (vnlru_nowhere < 5) 746 printf("vnlru process getting nowhere..\n"); 747 else if (vnlru_nowhere == 5) 748 printf("vnlru process messages stopped.\n"); 749#endif 750 vnlru_nowhere++; 751 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 752 } else 753 uio_yield(); 754 } 755} 756 757static struct kproc_desc vnlru_kp = { 758 "vnlru", 759 vnlru_proc, 760 &vnlruproc 761}; 762SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 763 764/* 765 * Routines having to do with the management of the vnode table. 766 */ 767 768static void 769vdestroy(struct vnode *vp) 770{ 771 struct bufobj *bo; 772 773 CTR1(KTR_VFS, "vdestroy vp %p", vp); 774 mtx_lock(&vnode_free_list_mtx); 775 numvnodes--; 776 mtx_unlock(&vnode_free_list_mtx); 777 bo = &vp->v_bufobj; 778 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 779 ("cleaned vnode still on the free list.")); 780 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 781 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 782 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 783 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 784 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 785 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 786 VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL")); 787 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 788 VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL")); 789 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 790 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 791 VI_UNLOCK(vp); 792#ifdef MAC 793 mac_destroy_vnode(vp); 794#endif 795 if (vp->v_pollinfo != NULL) { 796 knlist_destroy(&vp->v_pollinfo->vpi_selinfo.si_note); 797 mtx_destroy(&vp->v_pollinfo->vpi_lock); 798 uma_zfree(vnodepoll_zone, vp->v_pollinfo); 799 } 800#ifdef INVARIANTS 801 /* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */ 802 vp->v_op = NULL; 803#endif 804 lockdestroy(vp->v_vnlock); 805 mtx_destroy(&vp->v_interlock); 806 uma_zfree(vnode_zone, vp); 807} 808 809/* 810 * Try to recycle a freed vnode. We abort if anyone picks up a reference 811 * before we actually vgone(). This function must be called with the vnode 812 * held to prevent the vnode from being returned to the free list midway 813 * through vgone(). 814 */ 815static int 816vtryrecycle(struct vnode *vp) 817{ 818 struct thread *td = curthread; 819 struct mount *vnmp; 820 821 CTR1(KTR_VFS, "vtryrecycle: trying vp %p", vp); 822 VNASSERT(vp->v_holdcnt, vp, 823 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 824 /* 825 * This vnode may found and locked via some other list, if so we 826 * can't recycle it yet. 827 */ 828 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0) 829 return (EWOULDBLOCK); 830 /* 831 * Don't recycle if its filesystem is being suspended. 832 */ 833 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 834 VOP_UNLOCK(vp, 0, td); 835 return (EBUSY); 836 } 837 /* 838 * If we got this far, we need to acquire the interlock and see if 839 * anyone picked up this vnode from another list. If not, we will 840 * mark it with DOOMED via vgonel() so that anyone who does find it 841 * will skip over it. 842 */ 843 VI_LOCK(vp); 844 if (vp->v_usecount) { 845 VOP_UNLOCK(vp, LK_INTERLOCK, td); 846 vn_finished_write(vnmp); 847 return (EBUSY); 848 } 849 if ((vp->v_iflag & VI_DOOMED) == 0) 850 vgonel(vp); 851 VOP_UNLOCK(vp, LK_INTERLOCK, td); 852 vn_finished_write(vnmp); 853 CTR1(KTR_VFS, "vtryrecycle: recycled vp %p", vp); 854 return (0); 855} 856 857/* 858 * Return the next vnode from the free list. 859 */ 860int 861getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 862 struct vnode **vpp) 863{ 864 struct vnode *vp = NULL; 865 struct bufobj *bo; 866 867 mtx_lock(&vnode_free_list_mtx); 868 /* 869 * Lend our context to reclaim vnodes if they've exceeded the max. 870 */ 871 if (freevnodes > wantfreevnodes) 872 vnlru_free(1); 873 /* 874 * Wait for available vnodes. 875 */ 876 if (numvnodes > desiredvnodes) { 877 if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) { 878 /* 879 * File system is beeing suspended, we cannot risk a 880 * deadlock here, so allocate new vnode anyway. 881 */ 882 if (freevnodes > wantfreevnodes) 883 vnlru_free(freevnodes - wantfreevnodes); 884 goto alloc; 885 } 886 if (vnlruproc_sig == 0) { 887 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 888 wakeup(vnlruproc); 889 } 890 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 891 "vlruwk", hz); 892#if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 893 if (numvnodes > desiredvnodes) { 894 mtx_unlock(&vnode_free_list_mtx); 895 return (ENFILE); 896 } 897#endif 898 } 899alloc: 900 numvnodes++; 901 mtx_unlock(&vnode_free_list_mtx); 902 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO); 903 /* 904 * Setup locks. 905 */ 906 vp->v_vnlock = &vp->v_lock; 907 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 908 /* 909 * By default, don't allow shared locks unless filesystems 910 * opt-in. 911 */ 912 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE); 913 /* 914 * Initialize bufobj. 915 */ 916 bo = &vp->v_bufobj; 917 bo->__bo_vnode = vp; 918 bo->bo_mtx = &vp->v_interlock; 919 bo->bo_ops = &buf_ops_bio; 920 bo->bo_private = vp; 921 TAILQ_INIT(&bo->bo_clean.bv_hd); 922 TAILQ_INIT(&bo->bo_dirty.bv_hd); 923 /* 924 * Initialize namecache. 925 */ 926 LIST_INIT(&vp->v_cache_src); 927 TAILQ_INIT(&vp->v_cache_dst); 928 /* 929 * Finalize various vnode identity bits. 930 */ 931 vp->v_type = VNON; 932 vp->v_tag = tag; 933 vp->v_op = vops; 934 v_incr_usecount(vp); 935 vp->v_data = 0; 936#ifdef MAC 937 mac_init_vnode(vp); 938 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 939 mac_associate_vnode_singlelabel(mp, vp); 940 else if (mp == NULL) 941 printf("NULL mp in getnewvnode()\n"); 942#endif 943 if (mp != NULL) { 944 insmntque(vp, mp); 945 bo->bo_bsize = mp->mnt_stat.f_iosize; 946 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 947 vp->v_vflag |= VV_NOKNOTE; 948 } 949 950 CTR2(KTR_VFS, "getnewvnode: mp %p vp %p", mp, vp); 951 *vpp = vp; 952 return (0); 953} 954 955/* 956 * Delete from old mount point vnode list, if on one. 957 */ 958static void 959delmntque(struct vnode *vp) 960{ 961 struct mount *mp; 962 963 mp = vp->v_mount; 964 if (mp == NULL) 965 return; 966 MNT_ILOCK(mp); 967 vp->v_mount = NULL; 968 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 969 ("bad mount point vnode list size")); 970 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 971 mp->mnt_nvnodelistsize--; 972 MNT_REL(mp); 973 MNT_IUNLOCK(mp); 974} 975 976/* 977 * Insert into list of vnodes for the new mount point, if available. 978 */ 979static void 980insmntque(struct vnode *vp, struct mount *mp) 981{ 982 983 vp->v_mount = mp; 984 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 985 MNT_ILOCK(mp); 986 MNT_REF(mp); 987 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 988 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 989 ("neg mount point vnode list size")); 990 mp->mnt_nvnodelistsize++; 991 MNT_IUNLOCK(mp); 992} 993 994/* 995 * Flush out and invalidate all buffers associated with a bufobj 996 * Called with the underlying object locked. 997 */ 998int 999bufobj_invalbuf(struct bufobj *bo, int flags, struct thread *td, int slpflag, 1000 int slptimeo) 1001{ 1002 int error; 1003 1004 BO_LOCK(bo); 1005 if (flags & V_SAVE) { 1006 error = bufobj_wwait(bo, slpflag, slptimeo); 1007 if (error) { 1008 BO_UNLOCK(bo); 1009 return (error); 1010 } 1011 if (bo->bo_dirty.bv_cnt > 0) { 1012 BO_UNLOCK(bo); 1013 if ((error = BO_SYNC(bo, MNT_WAIT, td)) != 0) 1014 return (error); 1015 /* 1016 * XXX We could save a lock/unlock if this was only 1017 * enabled under INVARIANTS 1018 */ 1019 BO_LOCK(bo); 1020 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1021 panic("vinvalbuf: dirty bufs"); 1022 } 1023 } 1024 /* 1025 * If you alter this loop please notice that interlock is dropped and 1026 * reacquired in flushbuflist. Special care is needed to ensure that 1027 * no race conditions occur from this. 1028 */ 1029 do { 1030 error = flushbuflist(&bo->bo_clean, 1031 flags, bo, slpflag, slptimeo); 1032 if (error == 0) 1033 error = flushbuflist(&bo->bo_dirty, 1034 flags, bo, slpflag, slptimeo); 1035 if (error != 0 && error != EAGAIN) { 1036 BO_UNLOCK(bo); 1037 return (error); 1038 } 1039 } while (error != 0); 1040 1041 /* 1042 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1043 * have write I/O in-progress but if there is a VM object then the 1044 * VM object can also have read-I/O in-progress. 1045 */ 1046 do { 1047 bufobj_wwait(bo, 0, 0); 1048 BO_UNLOCK(bo); 1049 if (bo->bo_object != NULL) { 1050 VM_OBJECT_LOCK(bo->bo_object); 1051 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1052 VM_OBJECT_UNLOCK(bo->bo_object); 1053 } 1054 BO_LOCK(bo); 1055 } while (bo->bo_numoutput > 0); 1056 BO_UNLOCK(bo); 1057 1058 /* 1059 * Destroy the copy in the VM cache, too. 1060 */ 1061 if (bo->bo_object != NULL) { 1062 VM_OBJECT_LOCK(bo->bo_object); 1063 vm_object_page_remove(bo->bo_object, 0, 0, 1064 (flags & V_SAVE) ? TRUE : FALSE); 1065 VM_OBJECT_UNLOCK(bo->bo_object); 1066 } 1067 1068#ifdef INVARIANTS 1069 BO_LOCK(bo); 1070 if ((flags & (V_ALT | V_NORMAL)) == 0 && 1071 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 1072 panic("vinvalbuf: flush failed"); 1073 BO_UNLOCK(bo); 1074#endif 1075 return (0); 1076} 1077 1078/* 1079 * Flush out and invalidate all buffers associated with a vnode. 1080 * Called with the underlying object locked. 1081 */ 1082int 1083vinvalbuf(struct vnode *vp, int flags, struct thread *td, int slpflag, 1084 int slptimeo) 1085{ 1086 1087 CTR2(KTR_VFS, "vinvalbuf vp %p flags %d", vp, flags); 1088 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1089 return (bufobj_invalbuf(&vp->v_bufobj, flags, td, slpflag, slptimeo)); 1090} 1091 1092/* 1093 * Flush out buffers on the specified list. 1094 * 1095 */ 1096static int 1097flushbuflist( struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1098 int slptimeo) 1099{ 1100 struct buf *bp, *nbp; 1101 int retval, error; 1102 daddr_t lblkno; 1103 b_xflags_t xflags; 1104 1105 ASSERT_BO_LOCKED(bo); 1106 1107 retval = 0; 1108 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1109 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1110 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1111 continue; 1112 } 1113 lblkno = 0; 1114 xflags = 0; 1115 if (nbp != NULL) { 1116 lblkno = nbp->b_lblkno; 1117 xflags = nbp->b_xflags & 1118 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN); 1119 } 1120 retval = EAGAIN; 1121 error = BUF_TIMELOCK(bp, 1122 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo), 1123 "flushbuf", slpflag, slptimeo); 1124 if (error) { 1125 BO_LOCK(bo); 1126 return (error != ENOLCK ? error : EAGAIN); 1127 } 1128 KASSERT(bp->b_bufobj == bo, 1129 ("bp %p wrong b_bufobj %p should be %p", 1130 bp, bp->b_bufobj, bo)); 1131 if (bp->b_bufobj != bo) { /* XXX: necessary ? */ 1132 BUF_UNLOCK(bp); 1133 BO_LOCK(bo); 1134 return (EAGAIN); 1135 } 1136 /* 1137 * XXX Since there are no node locks for NFS, I 1138 * believe there is a slight chance that a delayed 1139 * write will occur while sleeping just above, so 1140 * check for it. 1141 */ 1142 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1143 (flags & V_SAVE)) { 1144 bremfree(bp); 1145 bp->b_flags |= B_ASYNC; 1146 bwrite(bp); 1147 BO_LOCK(bo); 1148 return (EAGAIN); /* XXX: why not loop ? */ 1149 } 1150 bremfree(bp); 1151 bp->b_flags |= (B_INVAL | B_RELBUF); 1152 bp->b_flags &= ~B_ASYNC; 1153 brelse(bp); 1154 BO_LOCK(bo); 1155 if (nbp != NULL && 1156 (nbp->b_bufobj != bo || 1157 nbp->b_lblkno != lblkno || 1158 (nbp->b_xflags & 1159 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags)) 1160 break; /* nbp invalid */ 1161 } 1162 return (retval); 1163} 1164 1165/* 1166 * Truncate a file's buffer and pages to a specified length. This 1167 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1168 * sync activity. 1169 */ 1170int 1171vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td, 1172 off_t length, int blksize) 1173{ 1174 struct buf *bp, *nbp; 1175 int anyfreed; 1176 int trunclbn; 1177 struct bufobj *bo; 1178 1179 CTR2(KTR_VFS, "vtruncbuf vp %p length %jd", vp, length); 1180 /* 1181 * Round up to the *next* lbn. 1182 */ 1183 trunclbn = (length + blksize - 1) / blksize; 1184 1185 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1186restart: 1187 VI_LOCK(vp); 1188 bo = &vp->v_bufobj; 1189 anyfreed = 1; 1190 for (;anyfreed;) { 1191 anyfreed = 0; 1192 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1193 if (bp->b_lblkno < trunclbn) 1194 continue; 1195 if (BUF_LOCK(bp, 1196 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1197 VI_MTX(vp)) == ENOLCK) 1198 goto restart; 1199 1200 bremfree(bp); 1201 bp->b_flags |= (B_INVAL | B_RELBUF); 1202 bp->b_flags &= ~B_ASYNC; 1203 brelse(bp); 1204 anyfreed = 1; 1205 1206 if (nbp != NULL && 1207 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1208 (nbp->b_vp != vp) || 1209 (nbp->b_flags & B_DELWRI))) { 1210 goto restart; 1211 } 1212 VI_LOCK(vp); 1213 } 1214 1215 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1216 if (bp->b_lblkno < trunclbn) 1217 continue; 1218 if (BUF_LOCK(bp, 1219 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1220 VI_MTX(vp)) == ENOLCK) 1221 goto restart; 1222 bremfree(bp); 1223 bp->b_flags |= (B_INVAL | B_RELBUF); 1224 bp->b_flags &= ~B_ASYNC; 1225 brelse(bp); 1226 anyfreed = 1; 1227 if (nbp != NULL && 1228 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1229 (nbp->b_vp != vp) || 1230 (nbp->b_flags & B_DELWRI) == 0)) { 1231 goto restart; 1232 } 1233 VI_LOCK(vp); 1234 } 1235 } 1236 1237 if (length > 0) { 1238restartsync: 1239 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1240 if (bp->b_lblkno > 0) 1241 continue; 1242 /* 1243 * Since we hold the vnode lock this should only 1244 * fail if we're racing with the buf daemon. 1245 */ 1246 if (BUF_LOCK(bp, 1247 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1248 VI_MTX(vp)) == ENOLCK) { 1249 goto restart; 1250 } 1251 VNASSERT((bp->b_flags & B_DELWRI), vp, 1252 ("buf(%p) on dirty queue without DELWRI", bp)); 1253 1254 bremfree(bp); 1255 bawrite(bp); 1256 VI_LOCK(vp); 1257 goto restartsync; 1258 } 1259 } 1260 1261 bufobj_wwait(bo, 0, 0); 1262 VI_UNLOCK(vp); 1263 vnode_pager_setsize(vp, length); 1264 1265 return (0); 1266} 1267 1268/* 1269 * buf_splay() - splay tree core for the clean/dirty list of buffers in 1270 * a vnode. 1271 * 1272 * NOTE: We have to deal with the special case of a background bitmap 1273 * buffer, a situation where two buffers will have the same logical 1274 * block offset. We want (1) only the foreground buffer to be accessed 1275 * in a lookup and (2) must differentiate between the foreground and 1276 * background buffer in the splay tree algorithm because the splay 1277 * tree cannot normally handle multiple entities with the same 'index'. 1278 * We accomplish this by adding differentiating flags to the splay tree's 1279 * numerical domain. 1280 */ 1281static 1282struct buf * 1283buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root) 1284{ 1285 struct buf dummy; 1286 struct buf *lefttreemax, *righttreemin, *y; 1287 1288 if (root == NULL) 1289 return (NULL); 1290 lefttreemax = righttreemin = &dummy; 1291 for (;;) { 1292 if (lblkno < root->b_lblkno || 1293 (lblkno == root->b_lblkno && 1294 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1295 if ((y = root->b_left) == NULL) 1296 break; 1297 if (lblkno < y->b_lblkno) { 1298 /* Rotate right. */ 1299 root->b_left = y->b_right; 1300 y->b_right = root; 1301 root = y; 1302 if ((y = root->b_left) == NULL) 1303 break; 1304 } 1305 /* Link into the new root's right tree. */ 1306 righttreemin->b_left = root; 1307 righttreemin = root; 1308 } else if (lblkno > root->b_lblkno || 1309 (lblkno == root->b_lblkno && 1310 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) { 1311 if ((y = root->b_right) == NULL) 1312 break; 1313 if (lblkno > y->b_lblkno) { 1314 /* Rotate left. */ 1315 root->b_right = y->b_left; 1316 y->b_left = root; 1317 root = y; 1318 if ((y = root->b_right) == NULL) 1319 break; 1320 } 1321 /* Link into the new root's left tree. */ 1322 lefttreemax->b_right = root; 1323 lefttreemax = root; 1324 } else { 1325 break; 1326 } 1327 root = y; 1328 } 1329 /* Assemble the new root. */ 1330 lefttreemax->b_right = root->b_left; 1331 righttreemin->b_left = root->b_right; 1332 root->b_left = dummy.b_right; 1333 root->b_right = dummy.b_left; 1334 return (root); 1335} 1336 1337static void 1338buf_vlist_remove(struct buf *bp) 1339{ 1340 struct buf *root; 1341 struct bufv *bv; 1342 1343 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1344 ASSERT_BO_LOCKED(bp->b_bufobj); 1345 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1346 (BX_VNDIRTY|BX_VNCLEAN), 1347 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1348 if (bp->b_xflags & BX_VNDIRTY) 1349 bv = &bp->b_bufobj->bo_dirty; 1350 else 1351 bv = &bp->b_bufobj->bo_clean; 1352 if (bp != bv->bv_root) { 1353 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root); 1354 KASSERT(root == bp, ("splay lookup failed in remove")); 1355 } 1356 if (bp->b_left == NULL) { 1357 root = bp->b_right; 1358 } else { 1359 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1360 root->b_right = bp->b_right; 1361 } 1362 bv->bv_root = root; 1363 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1364 bv->bv_cnt--; 1365 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1366} 1367 1368/* 1369 * Add the buffer to the sorted clean or dirty block list using a 1370 * splay tree algorithm. 1371 * 1372 * NOTE: xflags is passed as a constant, optimizing this inline function! 1373 */ 1374static void 1375buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1376{ 1377 struct buf *root; 1378 struct bufv *bv; 1379 1380 ASSERT_BO_LOCKED(bo); 1381 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1382 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1383 bp->b_xflags |= xflags; 1384 if (xflags & BX_VNDIRTY) 1385 bv = &bo->bo_dirty; 1386 else 1387 bv = &bo->bo_clean; 1388 1389 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root); 1390 if (root == NULL) { 1391 bp->b_left = NULL; 1392 bp->b_right = NULL; 1393 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1394 } else if (bp->b_lblkno < root->b_lblkno || 1395 (bp->b_lblkno == root->b_lblkno && 1396 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1397 bp->b_left = root->b_left; 1398 bp->b_right = root; 1399 root->b_left = NULL; 1400 TAILQ_INSERT_BEFORE(root, bp, b_bobufs); 1401 } else { 1402 bp->b_right = root->b_right; 1403 bp->b_left = root; 1404 root->b_right = NULL; 1405 TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs); 1406 } 1407 bv->bv_cnt++; 1408 bv->bv_root = bp; 1409} 1410 1411/* 1412 * Lookup a buffer using the splay tree. Note that we specifically avoid 1413 * shadow buffers used in background bitmap writes. 1414 * 1415 * This code isn't quite efficient as it could be because we are maintaining 1416 * two sorted lists and do not know which list the block resides in. 1417 * 1418 * During a "make buildworld" the desired buffer is found at one of 1419 * the roots more than 60% of the time. Thus, checking both roots 1420 * before performing either splay eliminates unnecessary splays on the 1421 * first tree splayed. 1422 */ 1423struct buf * 1424gbincore(struct bufobj *bo, daddr_t lblkno) 1425{ 1426 struct buf *bp; 1427 1428 ASSERT_BO_LOCKED(bo); 1429 if ((bp = bo->bo_clean.bv_root) != NULL && 1430 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1431 return (bp); 1432 if ((bp = bo->bo_dirty.bv_root) != NULL && 1433 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1434 return (bp); 1435 if ((bp = bo->bo_clean.bv_root) != NULL) { 1436 bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp); 1437 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1438 return (bp); 1439 } 1440 if ((bp = bo->bo_dirty.bv_root) != NULL) { 1441 bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp); 1442 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1443 return (bp); 1444 } 1445 return (NULL); 1446} 1447 1448/* 1449 * Associate a buffer with a vnode. 1450 */ 1451void 1452bgetvp(struct vnode *vp, struct buf *bp) 1453{ 1454 1455 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 1456 1457 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 1458 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 1459 ("bgetvp: bp already attached! %p", bp)); 1460 1461 ASSERT_VI_LOCKED(vp, "bgetvp"); 1462 vholdl(vp); 1463 if (VFS_NEEDSGIANT(vp->v_mount) || 1464 vp->v_bufobj.bo_flag & BO_NEEDSGIANT) 1465 bp->b_flags |= B_NEEDSGIANT; 1466 bp->b_vp = vp; 1467 bp->b_bufobj = &vp->v_bufobj; 1468 /* 1469 * Insert onto list for new vnode. 1470 */ 1471 buf_vlist_add(bp, &vp->v_bufobj, BX_VNCLEAN); 1472} 1473 1474/* 1475 * Disassociate a buffer from a vnode. 1476 */ 1477void 1478brelvp(struct buf *bp) 1479{ 1480 struct bufobj *bo; 1481 struct vnode *vp; 1482 1483 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1484 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1485 1486 /* 1487 * Delete from old vnode list, if on one. 1488 */ 1489 vp = bp->b_vp; /* XXX */ 1490 bo = bp->b_bufobj; 1491 BO_LOCK(bo); 1492 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1493 buf_vlist_remove(bp); 1494 else 1495 panic("brelvp: Buffer %p not on queue.", bp); 1496 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1497 bo->bo_flag &= ~BO_ONWORKLST; 1498 mtx_lock(&sync_mtx); 1499 LIST_REMOVE(bo, bo_synclist); 1500 syncer_worklist_len--; 1501 mtx_unlock(&sync_mtx); 1502 } 1503 bp->b_flags &= ~B_NEEDSGIANT; 1504 bp->b_vp = NULL; 1505 bp->b_bufobj = NULL; 1506 vdropl(vp); 1507} 1508 1509/* 1510 * Add an item to the syncer work queue. 1511 */ 1512static void 1513vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 1514{ 1515 int slot; 1516 1517 ASSERT_BO_LOCKED(bo); 1518 1519 mtx_lock(&sync_mtx); 1520 if (bo->bo_flag & BO_ONWORKLST) 1521 LIST_REMOVE(bo, bo_synclist); 1522 else { 1523 bo->bo_flag |= BO_ONWORKLST; 1524 syncer_worklist_len++; 1525 } 1526 1527 if (delay > syncer_maxdelay - 2) 1528 delay = syncer_maxdelay - 2; 1529 slot = (syncer_delayno + delay) & syncer_mask; 1530 1531 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 1532 mtx_unlock(&sync_mtx); 1533} 1534 1535static int 1536sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 1537{ 1538 int error, len; 1539 1540 mtx_lock(&sync_mtx); 1541 len = syncer_worklist_len - sync_vnode_count; 1542 mtx_unlock(&sync_mtx); 1543 error = SYSCTL_OUT(req, &len, sizeof(len)); 1544 return (error); 1545} 1546 1547SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 1548 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 1549 1550static struct proc *updateproc; 1551static void sched_sync(void); 1552static struct kproc_desc up_kp = { 1553 "syncer", 1554 sched_sync, 1555 &updateproc 1556}; 1557SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1558 1559static int 1560sync_vnode(struct bufobj *bo, struct thread *td) 1561{ 1562 struct vnode *vp; 1563 struct mount *mp; 1564 1565 vp = bo->__bo_vnode; /* XXX */ 1566 if (VOP_ISLOCKED(vp, NULL) != 0) 1567 return (1); 1568 if (VI_TRYLOCK(vp) == 0) 1569 return (1); 1570 /* 1571 * We use vhold in case the vnode does not 1572 * successfully sync. vhold prevents the vnode from 1573 * going away when we unlock the sync_mtx so that 1574 * we can acquire the vnode interlock. 1575 */ 1576 vholdl(vp); 1577 mtx_unlock(&sync_mtx); 1578 VI_UNLOCK(vp); 1579 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1580 vdrop(vp); 1581 mtx_lock(&sync_mtx); 1582 return (1); 1583 } 1584 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1585 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1586 VOP_UNLOCK(vp, 0, td); 1587 vn_finished_write(mp); 1588 VI_LOCK(vp); 1589 if ((bo->bo_flag & BO_ONWORKLST) != 0) { 1590 /* 1591 * Put us back on the worklist. The worklist 1592 * routine will remove us from our current 1593 * position and then add us back in at a later 1594 * position. 1595 */ 1596 vn_syncer_add_to_worklist(bo, syncdelay); 1597 } 1598 vdropl(vp); 1599 mtx_lock(&sync_mtx); 1600 return (0); 1601} 1602 1603/* 1604 * System filesystem synchronizer daemon. 1605 */ 1606static void 1607sched_sync(void) 1608{ 1609 struct synclist *next; 1610 struct synclist *slp; 1611 struct bufobj *bo; 1612 long starttime; 1613 struct thread *td = FIRST_THREAD_IN_PROC(updateproc); 1614 static int dummychan; 1615 int last_work_seen; 1616 int net_worklist_len; 1617 int syncer_final_iter; 1618 int first_printf; 1619 int error; 1620 1621 mtx_lock(&Giant); 1622 last_work_seen = 0; 1623 syncer_final_iter = 0; 1624 first_printf = 1; 1625 syncer_state = SYNCER_RUNNING; 1626 starttime = time_uptime; 1627 td->td_pflags |= TDP_NORUNNINGBUF; 1628 1629 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 1630 SHUTDOWN_PRI_LAST); 1631 1632 for (;;) { 1633 mtx_lock(&sync_mtx); 1634 if (syncer_state == SYNCER_FINAL_DELAY && 1635 syncer_final_iter == 0) { 1636 mtx_unlock(&sync_mtx); 1637 kthread_suspend_check(td->td_proc); 1638 mtx_lock(&sync_mtx); 1639 } 1640 net_worklist_len = syncer_worklist_len - sync_vnode_count; 1641 if (syncer_state != SYNCER_RUNNING && 1642 starttime != time_uptime) { 1643 if (first_printf) { 1644 printf("\nSyncing disks, vnodes remaining..."); 1645 first_printf = 0; 1646 } 1647 printf("%d ", net_worklist_len); 1648 } 1649 starttime = time_uptime; 1650 1651 /* 1652 * Push files whose dirty time has expired. Be careful 1653 * of interrupt race on slp queue. 1654 * 1655 * Skip over empty worklist slots when shutting down. 1656 */ 1657 do { 1658 slp = &syncer_workitem_pending[syncer_delayno]; 1659 syncer_delayno += 1; 1660 if (syncer_delayno == syncer_maxdelay) 1661 syncer_delayno = 0; 1662 next = &syncer_workitem_pending[syncer_delayno]; 1663 /* 1664 * If the worklist has wrapped since the 1665 * it was emptied of all but syncer vnodes, 1666 * switch to the FINAL_DELAY state and run 1667 * for one more second. 1668 */ 1669 if (syncer_state == SYNCER_SHUTTING_DOWN && 1670 net_worklist_len == 0 && 1671 last_work_seen == syncer_delayno) { 1672 syncer_state = SYNCER_FINAL_DELAY; 1673 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 1674 } 1675 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 1676 syncer_worklist_len > 0); 1677 1678 /* 1679 * Keep track of the last time there was anything 1680 * on the worklist other than syncer vnodes. 1681 * Return to the SHUTTING_DOWN state if any 1682 * new work appears. 1683 */ 1684 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 1685 last_work_seen = syncer_delayno; 1686 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 1687 syncer_state = SYNCER_SHUTTING_DOWN; 1688 while ((bo = LIST_FIRST(slp)) != NULL) { 1689 error = sync_vnode(bo, td); 1690 if (error == 1) { 1691 LIST_REMOVE(bo, bo_synclist); 1692 LIST_INSERT_HEAD(next, bo, bo_synclist); 1693 continue; 1694 } 1695 } 1696 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 1697 syncer_final_iter--; 1698 mtx_unlock(&sync_mtx); 1699 /* 1700 * The variable rushjob allows the kernel to speed up the 1701 * processing of the filesystem syncer process. A rushjob 1702 * value of N tells the filesystem syncer to process the next 1703 * N seconds worth of work on its queue ASAP. Currently rushjob 1704 * is used by the soft update code to speed up the filesystem 1705 * syncer process when the incore state is getting so far 1706 * ahead of the disk that the kernel memory pool is being 1707 * threatened with exhaustion. 1708 */ 1709 mtx_lock(&sync_mtx); 1710 if (rushjob > 0) { 1711 rushjob -= 1; 1712 mtx_unlock(&sync_mtx); 1713 continue; 1714 } 1715 mtx_unlock(&sync_mtx); 1716 /* 1717 * Just sleep for a short period if time between 1718 * iterations when shutting down to allow some I/O 1719 * to happen. 1720 * 1721 * If it has taken us less than a second to process the 1722 * current work, then wait. Otherwise start right over 1723 * again. We can still lose time if any single round 1724 * takes more than two seconds, but it does not really 1725 * matter as we are just trying to generally pace the 1726 * filesystem activity. 1727 */ 1728 if (syncer_state != SYNCER_RUNNING) 1729 tsleep(&dummychan, PPAUSE, "syncfnl", 1730 hz / SYNCER_SHUTDOWN_SPEEDUP); 1731 else if (time_uptime == starttime) 1732 tsleep(&lbolt, PPAUSE, "syncer", 0); 1733 } 1734} 1735 1736/* 1737 * Request the syncer daemon to speed up its work. 1738 * We never push it to speed up more than half of its 1739 * normal turn time, otherwise it could take over the cpu. 1740 */ 1741int 1742speedup_syncer() 1743{ 1744 struct thread *td; 1745 int ret = 0; 1746 1747 td = FIRST_THREAD_IN_PROC(updateproc); 1748 sleepq_remove(td, &lbolt); 1749 mtx_lock(&sync_mtx); 1750 if (rushjob < syncdelay / 2) { 1751 rushjob += 1; 1752 stat_rush_requests += 1; 1753 ret = 1; 1754 } 1755 mtx_unlock(&sync_mtx); 1756 return (ret); 1757} 1758 1759/* 1760 * Tell the syncer to speed up its work and run though its work 1761 * list several times, then tell it to shut down. 1762 */ 1763static void 1764syncer_shutdown(void *arg, int howto) 1765{ 1766 struct thread *td; 1767 1768 if (howto & RB_NOSYNC) 1769 return; 1770 td = FIRST_THREAD_IN_PROC(updateproc); 1771 sleepq_remove(td, &lbolt); 1772 mtx_lock(&sync_mtx); 1773 syncer_state = SYNCER_SHUTTING_DOWN; 1774 rushjob = 0; 1775 mtx_unlock(&sync_mtx); 1776 kproc_shutdown(arg, howto); 1777} 1778 1779/* 1780 * Reassign a buffer from one vnode to another. 1781 * Used to assign file specific control information 1782 * (indirect blocks) to the vnode to which they belong. 1783 */ 1784void 1785reassignbuf(struct buf *bp) 1786{ 1787 struct vnode *vp; 1788 struct bufobj *bo; 1789 int delay; 1790#ifdef INVARIANTS 1791 struct bufv *bv; 1792#endif 1793 1794 vp = bp->b_vp; 1795 bo = bp->b_bufobj; 1796 ++reassignbufcalls; 1797 1798 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 1799 bp, bp->b_vp, bp->b_flags); 1800 /* 1801 * B_PAGING flagged buffers cannot be reassigned because their vp 1802 * is not fully linked in. 1803 */ 1804 if (bp->b_flags & B_PAGING) 1805 panic("cannot reassign paging buffer"); 1806 1807 /* 1808 * Delete from old vnode list, if on one. 1809 */ 1810 VI_LOCK(vp); 1811 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1812 buf_vlist_remove(bp); 1813 else 1814 panic("reassignbuf: Buffer %p not on queue.", bp); 1815 /* 1816 * If dirty, put on list of dirty buffers; otherwise insert onto list 1817 * of clean buffers. 1818 */ 1819 if (bp->b_flags & B_DELWRI) { 1820 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 1821 switch (vp->v_type) { 1822 case VDIR: 1823 delay = dirdelay; 1824 break; 1825 case VCHR: 1826 delay = metadelay; 1827 break; 1828 default: 1829 delay = filedelay; 1830 } 1831 vn_syncer_add_to_worklist(bo, delay); 1832 } 1833 buf_vlist_add(bp, bo, BX_VNDIRTY); 1834 } else { 1835 buf_vlist_add(bp, bo, BX_VNCLEAN); 1836 1837 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1838 mtx_lock(&sync_mtx); 1839 LIST_REMOVE(bo, bo_synclist); 1840 syncer_worklist_len--; 1841 mtx_unlock(&sync_mtx); 1842 bo->bo_flag &= ~BO_ONWORKLST; 1843 } 1844 } 1845#ifdef INVARIANTS 1846 bv = &bo->bo_clean; 1847 bp = TAILQ_FIRST(&bv->bv_hd); 1848 KASSERT(bp == NULL || bp->b_bufobj == bo, 1849 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 1850 bp = TAILQ_LAST(&bv->bv_hd, buflists); 1851 KASSERT(bp == NULL || bp->b_bufobj == bo, 1852 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 1853 bv = &bo->bo_dirty; 1854 bp = TAILQ_FIRST(&bv->bv_hd); 1855 KASSERT(bp == NULL || bp->b_bufobj == bo, 1856 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 1857 bp = TAILQ_LAST(&bv->bv_hd, buflists); 1858 KASSERT(bp == NULL || bp->b_bufobj == bo, 1859 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 1860#endif 1861 VI_UNLOCK(vp); 1862} 1863 1864/* 1865 * Increment the use and hold counts on the vnode, taking care to reference 1866 * the driver's usecount if this is a chardev. The vholdl() will remove 1867 * the vnode from the free list if it is presently free. Requires the 1868 * vnode interlock and returns with it held. 1869 */ 1870static void 1871v_incr_usecount(struct vnode *vp) 1872{ 1873 1874 CTR3(KTR_VFS, "v_incr_usecount: vp %p holdcnt %d usecount %d\n", 1875 vp, vp->v_holdcnt, vp->v_usecount); 1876 vp->v_usecount++; 1877 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 1878 dev_lock(); 1879 vp->v_rdev->si_usecount++; 1880 dev_unlock(); 1881 } 1882 vholdl(vp); 1883} 1884 1885/* 1886 * Turn a holdcnt into a use+holdcnt such that only one call to 1887 * v_decr_usecount is needed. 1888 */ 1889static void 1890v_upgrade_usecount(struct vnode *vp) 1891{ 1892 1893 CTR3(KTR_VFS, "v_upgrade_usecount: vp %p holdcnt %d usecount %d\n", 1894 vp, vp->v_holdcnt, vp->v_usecount); 1895 vp->v_usecount++; 1896 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 1897 dev_lock(); 1898 vp->v_rdev->si_usecount++; 1899 dev_unlock(); 1900 } 1901} 1902 1903/* 1904 * Decrement the vnode use and hold count along with the driver's usecount 1905 * if this is a chardev. The vdropl() below releases the vnode interlock 1906 * as it may free the vnode. 1907 */ 1908static void 1909v_decr_usecount(struct vnode *vp) 1910{ 1911 1912 CTR3(KTR_VFS, "v_decr_usecount: vp %p holdcnt %d usecount %d\n", 1913 vp, vp->v_holdcnt, vp->v_usecount); 1914 ASSERT_VI_LOCKED(vp, __FUNCTION__); 1915 VNASSERT(vp->v_usecount > 0, vp, 1916 ("v_decr_usecount: negative usecount")); 1917 vp->v_usecount--; 1918 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 1919 dev_lock(); 1920 vp->v_rdev->si_usecount--; 1921 dev_unlock(); 1922 } 1923 vdropl(vp); 1924} 1925 1926/* 1927 * Decrement only the use count and driver use count. This is intended to 1928 * be paired with a follow on vdropl() to release the remaining hold count. 1929 * In this way we may vgone() a vnode with a 0 usecount without risk of 1930 * having it end up on a free list because the hold count is kept above 0. 1931 */ 1932static void 1933v_decr_useonly(struct vnode *vp) 1934{ 1935 1936 CTR3(KTR_VFS, "v_decr_useonly: vp %p holdcnt %d usecount %d\n", 1937 vp, vp->v_holdcnt, vp->v_usecount); 1938 ASSERT_VI_LOCKED(vp, __FUNCTION__); 1939 VNASSERT(vp->v_usecount > 0, vp, 1940 ("v_decr_useonly: negative usecount")); 1941 vp->v_usecount--; 1942 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 1943 dev_lock(); 1944 vp->v_rdev->si_usecount--; 1945 dev_unlock(); 1946 } 1947} 1948 1949/* 1950 * Grab a particular vnode from the free list, increment its 1951 * reference count and lock it. The vnode lock bit is set if the 1952 * vnode is being eliminated in vgone. The process is awakened 1953 * when the transition is completed, and an error returned to 1954 * indicate that the vnode is no longer usable (possibly having 1955 * been changed to a new filesystem type). 1956 */ 1957int 1958vget(struct vnode *vp, int flags, struct thread *td) 1959{ 1960 int oweinact; 1961 int oldflags; 1962 int error; 1963 1964 error = 0; 1965 oldflags = flags; 1966 oweinact = 0; 1967 VFS_ASSERT_GIANT(vp->v_mount); 1968 if ((flags & LK_INTERLOCK) == 0) 1969 VI_LOCK(vp); 1970 /* 1971 * If the inactive call was deferred because vput() was called 1972 * with a shared lock, we have to do it here before another thread 1973 * gets a reference to data that should be dead. 1974 */ 1975 if (vp->v_iflag & VI_OWEINACT) { 1976 if (flags & LK_NOWAIT) { 1977 VI_UNLOCK(vp); 1978 return (EBUSY); 1979 } 1980 flags &= ~LK_TYPE_MASK; 1981 flags |= LK_EXCLUSIVE; 1982 oweinact = 1; 1983 } 1984 vholdl(vp); 1985 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) { 1986 vdrop(vp); 1987 return (error); 1988 } 1989 VI_LOCK(vp); 1990 /* Upgrade our holdcnt to a usecount. */ 1991 v_upgrade_usecount(vp); 1992 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 1993 panic("vget: vn_lock failed to return ENOENT\n"); 1994 if (oweinact) { 1995 if (vp->v_iflag & VI_OWEINACT) 1996 vinactive(vp, td); 1997 VI_UNLOCK(vp); 1998 if ((oldflags & LK_TYPE_MASK) == 0) 1999 VOP_UNLOCK(vp, 0, td); 2000 } else 2001 VI_UNLOCK(vp); 2002 return (0); 2003} 2004 2005/* 2006 * Increase the reference count of a vnode. 2007 */ 2008void 2009vref(struct vnode *vp) 2010{ 2011 2012 VI_LOCK(vp); 2013 v_incr_usecount(vp); 2014 VI_UNLOCK(vp); 2015} 2016 2017/* 2018 * Return reference count of a vnode. 2019 * 2020 * The results of this call are only guaranteed when some mechanism other 2021 * than the VI lock is used to stop other processes from gaining references 2022 * to the vnode. This may be the case if the caller holds the only reference. 2023 * This is also useful when stale data is acceptable as race conditions may 2024 * be accounted for by some other means. 2025 */ 2026int 2027vrefcnt(struct vnode *vp) 2028{ 2029 int usecnt; 2030 2031 VI_LOCK(vp); 2032 usecnt = vp->v_usecount; 2033 VI_UNLOCK(vp); 2034 2035 return (usecnt); 2036} 2037 2038 2039/* 2040 * Vnode put/release. 2041 * If count drops to zero, call inactive routine and return to freelist. 2042 */ 2043void 2044vrele(struct vnode *vp) 2045{ 2046 struct thread *td = curthread; /* XXX */ 2047 2048 KASSERT(vp != NULL, ("vrele: null vp")); 2049 VFS_ASSERT_GIANT(vp->v_mount); 2050 2051 VI_LOCK(vp); 2052 2053 /* Skip this v_writecount check if we're going to panic below. */ 2054 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2055 ("vrele: missed vn_close")); 2056 2057 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2058 vp->v_usecount == 1)) { 2059 v_decr_usecount(vp); 2060 return; 2061 } 2062 if (vp->v_usecount != 1) { 2063#ifdef DIAGNOSTIC 2064 vprint("vrele: negative ref count", vp); 2065#endif 2066 VI_UNLOCK(vp); 2067 panic("vrele: negative ref cnt"); 2068 } 2069 /* 2070 * We want to hold the vnode until the inactive finishes to 2071 * prevent vgone() races. We drop the use count here and the 2072 * hold count below when we're done. 2073 */ 2074 v_decr_useonly(vp); 2075 /* 2076 * We must call VOP_INACTIVE with the node locked. Mark 2077 * as VI_DOINGINACT to avoid recursion. 2078 */ 2079 vp->v_iflag |= VI_OWEINACT; 2080 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) { 2081 VI_LOCK(vp); 2082 if (vp->v_usecount > 0) 2083 vp->v_iflag &= ~VI_OWEINACT; 2084 if (vp->v_iflag & VI_OWEINACT) 2085 vinactive(vp, td); 2086 VOP_UNLOCK(vp, 0, td); 2087 } else { 2088 VI_LOCK(vp); 2089 if (vp->v_usecount > 0) 2090 vp->v_iflag &= ~VI_OWEINACT; 2091 } 2092 vdropl(vp); 2093} 2094 2095/* 2096 * Release an already locked vnode. This give the same effects as 2097 * unlock+vrele(), but takes less time and avoids releasing and 2098 * re-aquiring the lock (as vrele() aquires the lock internally.) 2099 */ 2100void 2101vput(struct vnode *vp) 2102{ 2103 struct thread *td = curthread; /* XXX */ 2104 int error; 2105 2106 KASSERT(vp != NULL, ("vput: null vp")); 2107 ASSERT_VOP_LOCKED(vp, "vput"); 2108 VFS_ASSERT_GIANT(vp->v_mount); 2109 VI_LOCK(vp); 2110 /* Skip this v_writecount check if we're going to panic below. */ 2111 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2112 ("vput: missed vn_close")); 2113 error = 0; 2114 2115 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2116 vp->v_usecount == 1)) { 2117 VOP_UNLOCK(vp, 0, td); 2118 v_decr_usecount(vp); 2119 return; 2120 } 2121 2122 if (vp->v_usecount != 1) { 2123#ifdef DIAGNOSTIC 2124 vprint("vput: negative ref count", vp); 2125#endif 2126 panic("vput: negative ref cnt"); 2127 } 2128 /* 2129 * We want to hold the vnode until the inactive finishes to 2130 * prevent vgone() races. We drop the use count here and the 2131 * hold count below when we're done. 2132 */ 2133 v_decr_useonly(vp); 2134 vp->v_iflag |= VI_OWEINACT; 2135 if (VOP_ISLOCKED(vp, NULL) != LK_EXCLUSIVE) { 2136 error = VOP_LOCK(vp, LK_EXCLUPGRADE|LK_INTERLOCK|LK_NOWAIT, td); 2137 VI_LOCK(vp); 2138 if (error) { 2139 if (vp->v_usecount > 0) 2140 vp->v_iflag &= ~VI_OWEINACT; 2141 goto done; 2142 } 2143 } 2144 if (vp->v_usecount > 0) 2145 vp->v_iflag &= ~VI_OWEINACT; 2146 if (vp->v_iflag & VI_OWEINACT) 2147 vinactive(vp, td); 2148 VOP_UNLOCK(vp, 0, td); 2149done: 2150 vdropl(vp); 2151} 2152 2153/* 2154 * Somebody doesn't want the vnode recycled. 2155 */ 2156void 2157vhold(struct vnode *vp) 2158{ 2159 2160 VI_LOCK(vp); 2161 vholdl(vp); 2162 VI_UNLOCK(vp); 2163} 2164 2165void 2166vholdl(struct vnode *vp) 2167{ 2168 2169 vp->v_holdcnt++; 2170 if (VSHOULDBUSY(vp)) 2171 vbusy(vp); 2172} 2173 2174/* 2175 * Note that there is one less who cares about this vnode. vdrop() is the 2176 * opposite of vhold(). 2177 */ 2178void 2179vdrop(struct vnode *vp) 2180{ 2181 2182 VI_LOCK(vp); 2183 vdropl(vp); 2184} 2185 2186/* 2187 * Drop the hold count of the vnode. If this is the last reference to 2188 * the vnode we will free it if it has been vgone'd otherwise it is 2189 * placed on the free list. 2190 */ 2191static void 2192vdropl(struct vnode *vp) 2193{ 2194 2195 if (vp->v_holdcnt <= 0) 2196 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2197 vp->v_holdcnt--; 2198 if (vp->v_holdcnt == 0) { 2199 if (vp->v_iflag & VI_DOOMED) { 2200 vdestroy(vp); 2201 return; 2202 } else 2203 vfree(vp); 2204 } 2205 VI_UNLOCK(vp); 2206} 2207 2208/* 2209 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2210 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2211 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2212 * failed lock upgrade. 2213 */ 2214static void 2215vinactive(struct vnode *vp, struct thread *td) 2216{ 2217 2218 ASSERT_VOP_LOCKED(vp, "vinactive"); 2219 ASSERT_VI_LOCKED(vp, "vinactive"); 2220 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2221 ("vinactive: recursed on VI_DOINGINACT")); 2222 vp->v_iflag |= VI_DOINGINACT; 2223 vp->v_iflag &= ~VI_OWEINACT; 2224 VI_UNLOCK(vp); 2225 VOP_INACTIVE(vp, td); 2226 VI_LOCK(vp); 2227 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2228 ("vinactive: lost VI_DOINGINACT")); 2229 vp->v_iflag &= ~VI_DOINGINACT; 2230} 2231 2232/* 2233 * Remove any vnodes in the vnode table belonging to mount point mp. 2234 * 2235 * If FORCECLOSE is not specified, there should not be any active ones, 2236 * return error if any are found (nb: this is a user error, not a 2237 * system error). If FORCECLOSE is specified, detach any active vnodes 2238 * that are found. 2239 * 2240 * If WRITECLOSE is set, only flush out regular file vnodes open for 2241 * writing. 2242 * 2243 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2244 * 2245 * `rootrefs' specifies the base reference count for the root vnode 2246 * of this filesystem. The root vnode is considered busy if its 2247 * v_usecount exceeds this value. On a successful return, vflush(, td) 2248 * will call vrele() on the root vnode exactly rootrefs times. 2249 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2250 * be zero. 2251 */ 2252#ifdef DIAGNOSTIC 2253static int busyprt = 0; /* print out busy vnodes */ 2254SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 2255#endif 2256 2257int 2258vflush( struct mount *mp, int rootrefs, int flags, struct thread *td) 2259{ 2260 struct vnode *vp, *mvp, *rootvp = NULL; 2261 struct vattr vattr; 2262 int busy = 0, error; 2263 2264 CTR1(KTR_VFS, "vflush: mp %p", mp); 2265 if (rootrefs > 0) { 2266 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2267 ("vflush: bad args")); 2268 /* 2269 * Get the filesystem root vnode. We can vput() it 2270 * immediately, since with rootrefs > 0, it won't go away. 2271 */ 2272 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp, td)) != 0) 2273 return (error); 2274 vput(rootvp); 2275 2276 } 2277 MNT_ILOCK(mp); 2278loop: 2279 MNT_VNODE_FOREACH(vp, mp, mvp) { 2280 2281 VI_LOCK(vp); 2282 vholdl(vp); 2283 MNT_IUNLOCK(mp); 2284 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td); 2285 if (error) { 2286 vdrop(vp); 2287 MNT_ILOCK(mp); 2288 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 2289 goto loop; 2290 } 2291 /* 2292 * Skip over a vnodes marked VV_SYSTEM. 2293 */ 2294 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2295 VOP_UNLOCK(vp, 0, td); 2296 vdrop(vp); 2297 MNT_ILOCK(mp); 2298 continue; 2299 } 2300 /* 2301 * If WRITECLOSE is set, flush out unlinked but still open 2302 * files (even if open only for reading) and regular file 2303 * vnodes open for writing. 2304 */ 2305 if (flags & WRITECLOSE) { 2306 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td); 2307 VI_LOCK(vp); 2308 2309 if ((vp->v_type == VNON || 2310 (error == 0 && vattr.va_nlink > 0)) && 2311 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2312 VOP_UNLOCK(vp, 0, td); 2313 vdropl(vp); 2314 MNT_ILOCK(mp); 2315 continue; 2316 } 2317 } else 2318 VI_LOCK(vp); 2319 /* 2320 * With v_usecount == 0, all we need to do is clear out the 2321 * vnode data structures and we are done. 2322 * 2323 * If FORCECLOSE is set, forcibly close the vnode. 2324 */ 2325 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 2326 VNASSERT(vp->v_usecount == 0 || 2327 (vp->v_type != VCHR && vp->v_type != VBLK), vp, 2328 ("device VNODE %p is FORCECLOSED", vp)); 2329 vgonel(vp); 2330 } else { 2331 busy++; 2332#ifdef DIAGNOSTIC 2333 if (busyprt) 2334 vprint("vflush: busy vnode", vp); 2335#endif 2336 } 2337 VOP_UNLOCK(vp, 0, td); 2338 vdropl(vp); 2339 MNT_ILOCK(mp); 2340 } 2341 MNT_IUNLOCK(mp); 2342 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2343 /* 2344 * If just the root vnode is busy, and if its refcount 2345 * is equal to `rootrefs', then go ahead and kill it. 2346 */ 2347 VI_LOCK(rootvp); 2348 KASSERT(busy > 0, ("vflush: not busy")); 2349 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 2350 ("vflush: usecount %d < rootrefs %d", 2351 rootvp->v_usecount, rootrefs)); 2352 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2353 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK, td); 2354 vgone(rootvp); 2355 VOP_UNLOCK(rootvp, 0, td); 2356 busy = 0; 2357 } else 2358 VI_UNLOCK(rootvp); 2359 } 2360 if (busy) 2361 return (EBUSY); 2362 for (; rootrefs > 0; rootrefs--) 2363 vrele(rootvp); 2364 return (0); 2365} 2366 2367/* 2368 * Recycle an unused vnode to the front of the free list. 2369 */ 2370int 2371vrecycle(struct vnode *vp, struct thread *td) 2372{ 2373 int recycled; 2374 2375 ASSERT_VOP_LOCKED(vp, "vrecycle"); 2376 recycled = 0; 2377 VI_LOCK(vp); 2378 if (vp->v_usecount == 0) { 2379 recycled = 1; 2380 vgonel(vp); 2381 } 2382 VI_UNLOCK(vp); 2383 return (recycled); 2384} 2385 2386/* 2387 * Eliminate all activity associated with a vnode 2388 * in preparation for reuse. 2389 */ 2390void 2391vgone(struct vnode *vp) 2392{ 2393 VI_LOCK(vp); 2394 vgonel(vp); 2395 VI_UNLOCK(vp); 2396} 2397 2398/* 2399 * vgone, with the vp interlock held. 2400 */ 2401void 2402vgonel(struct vnode *vp) 2403{ 2404 struct thread *td; 2405 int oweinact; 2406 int active; 2407 struct mount *mp; 2408 2409 CTR1(KTR_VFS, "vgonel: vp %p", vp); 2410 ASSERT_VOP_LOCKED(vp, "vgonel"); 2411 ASSERT_VI_LOCKED(vp, "vgonel"); 2412 VNASSERT(vp->v_holdcnt, vp, 2413 ("vgonel: vp %p has no reference.", vp)); 2414 td = curthread; 2415 2416 /* 2417 * Don't vgonel if we're already doomed. 2418 */ 2419 if (vp->v_iflag & VI_DOOMED) 2420 return; 2421 vp->v_iflag |= VI_DOOMED; 2422 /* 2423 * Check to see if the vnode is in use. If so, we have to call 2424 * VOP_CLOSE() and VOP_INACTIVE(). 2425 */ 2426 active = vp->v_usecount; 2427 oweinact = (vp->v_iflag & VI_OWEINACT); 2428 VI_UNLOCK(vp); 2429 /* 2430 * Clean out any buffers associated with the vnode. 2431 * If the flush fails, just toss the buffers. 2432 */ 2433 mp = NULL; 2434 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 2435 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 2436 if (vinvalbuf(vp, V_SAVE, td, 0, 0) != 0) 2437 vinvalbuf(vp, 0, td, 0, 0); 2438 2439 /* 2440 * If purging an active vnode, it must be closed and 2441 * deactivated before being reclaimed. 2442 */ 2443 if (active) 2444 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2445 if (oweinact || active) { 2446 VI_LOCK(vp); 2447 if ((vp->v_iflag & VI_DOINGINACT) == 0) 2448 vinactive(vp, td); 2449 VI_UNLOCK(vp); 2450 } 2451 /* 2452 * Reclaim the vnode. 2453 */ 2454 if (VOP_RECLAIM(vp, td)) 2455 panic("vgone: cannot reclaim"); 2456 if (mp != NULL) 2457 vn_finished_secondary_write(mp); 2458 VNASSERT(vp->v_object == NULL, vp, 2459 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 2460 /* 2461 * Delete from old mount point vnode list. 2462 */ 2463 delmntque(vp); 2464 cache_purge(vp); 2465 /* 2466 * Done with purge, reset to the standard lock and invalidate 2467 * the vnode. 2468 */ 2469 VI_LOCK(vp); 2470 vp->v_vnlock = &vp->v_lock; 2471 vp->v_op = &dead_vnodeops; 2472 vp->v_tag = "none"; 2473 vp->v_type = VBAD; 2474} 2475 2476/* 2477 * Calculate the total number of references to a special device. 2478 */ 2479int 2480vcount(struct vnode *vp) 2481{ 2482 int count; 2483 2484 dev_lock(); 2485 count = vp->v_rdev->si_usecount; 2486 dev_unlock(); 2487 return (count); 2488} 2489 2490/* 2491 * Same as above, but using the struct cdev *as argument 2492 */ 2493int 2494count_dev(struct cdev *dev) 2495{ 2496 int count; 2497 2498 dev_lock(); 2499 count = dev->si_usecount; 2500 dev_unlock(); 2501 return(count); 2502} 2503 2504/* 2505 * Print out a description of a vnode. 2506 */ 2507static char *typename[] = 2508{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 2509 "VMARKER"}; 2510 2511void 2512vn_printf(struct vnode *vp, const char *fmt, ...) 2513{ 2514 va_list ap; 2515 char buf[96]; 2516 2517 va_start(ap, fmt); 2518 vprintf(fmt, ap); 2519 va_end(ap); 2520 printf("%p: ", (void *)vp); 2521 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 2522 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", 2523 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); 2524 buf[0] = '\0'; 2525 buf[1] = '\0'; 2526 if (vp->v_vflag & VV_ROOT) 2527 strcat(buf, "|VV_ROOT"); 2528 if (vp->v_vflag & VV_TEXT) 2529 strcat(buf, "|VV_TEXT"); 2530 if (vp->v_vflag & VV_SYSTEM) 2531 strcat(buf, "|VV_SYSTEM"); 2532 if (vp->v_iflag & VI_DOOMED) 2533 strcat(buf, "|VI_DOOMED"); 2534 if (vp->v_iflag & VI_FREE) 2535 strcat(buf, "|VI_FREE"); 2536 printf(" flags (%s)\n", buf + 1); 2537 if (mtx_owned(VI_MTX(vp))) 2538 printf(" VI_LOCKed"); 2539 if (vp->v_object != NULL) 2540 printf(" v_object %p ref %d pages %d\n", 2541 vp->v_object, vp->v_object->ref_count, 2542 vp->v_object->resident_page_count); 2543 printf(" "); 2544 lockmgr_printinfo(vp->v_vnlock); 2545 printf("\n"); 2546 if (vp->v_data != NULL) 2547 VOP_PRINT(vp); 2548} 2549 2550#ifdef DDB 2551/* 2552 * List all of the locked vnodes in the system. 2553 * Called when debugging the kernel. 2554 */ 2555DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2556{ 2557 struct mount *mp, *nmp; 2558 struct vnode *vp; 2559 2560 /* 2561 * Note: because this is DDB, we can't obey the locking semantics 2562 * for these structures, which means we could catch an inconsistent 2563 * state and dereference a nasty pointer. Not much to be done 2564 * about that. 2565 */ 2566 printf("Locked vnodes\n"); 2567 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2568 nmp = TAILQ_NEXT(mp, mnt_list); 2569 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2570 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp, NULL)) 2571 vprint("", vp); 2572 } 2573 nmp = TAILQ_NEXT(mp, mnt_list); 2574 } 2575} 2576 2577/* 2578 * Show details about the given vnode. 2579 */ 2580DB_SHOW_COMMAND(vnode, db_show_vnode) 2581{ 2582 struct vnode *vp; 2583 2584 if (!have_addr) 2585 return; 2586 vp = (struct vnode *)addr; 2587 vn_printf(vp, "vnode "); 2588} 2589#endif /* DDB */ 2590 2591/* 2592 * Fill in a struct xvfsconf based on a struct vfsconf. 2593 */ 2594static void 2595vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp) 2596{ 2597 2598 strcpy(xvfsp->vfc_name, vfsp->vfc_name); 2599 xvfsp->vfc_typenum = vfsp->vfc_typenum; 2600 xvfsp->vfc_refcount = vfsp->vfc_refcount; 2601 xvfsp->vfc_flags = vfsp->vfc_flags; 2602 /* 2603 * These are unused in userland, we keep them 2604 * to not break binary compatibility. 2605 */ 2606 xvfsp->vfc_vfsops = NULL; 2607 xvfsp->vfc_next = NULL; 2608} 2609 2610/* 2611 * Top level filesystem related information gathering. 2612 */ 2613static int 2614sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 2615{ 2616 struct vfsconf *vfsp; 2617 struct xvfsconf xvfsp; 2618 int error; 2619 2620 error = 0; 2621 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 2622 bzero(&xvfsp, sizeof(xvfsp)); 2623 vfsconf2x(vfsp, &xvfsp); 2624 error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp); 2625 if (error) 2626 break; 2627 } 2628 return (error); 2629} 2630 2631SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist, 2632 "S,xvfsconf", "List of all configured filesystems"); 2633 2634#ifndef BURN_BRIDGES 2635static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 2636 2637static int 2638vfs_sysctl(SYSCTL_HANDLER_ARGS) 2639{ 2640 int *name = (int *)arg1 - 1; /* XXX */ 2641 u_int namelen = arg2 + 1; /* XXX */ 2642 struct vfsconf *vfsp; 2643 struct xvfsconf xvfsp; 2644 2645 printf("WARNING: userland calling deprecated sysctl, " 2646 "please rebuild world\n"); 2647 2648#if 1 || defined(COMPAT_PRELITE2) 2649 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2650 if (namelen == 1) 2651 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2652#endif 2653 2654 switch (name[1]) { 2655 case VFS_MAXTYPENUM: 2656 if (namelen != 2) 2657 return (ENOTDIR); 2658 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2659 case VFS_CONF: 2660 if (namelen != 3) 2661 return (ENOTDIR); /* overloaded */ 2662 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) 2663 if (vfsp->vfc_typenum == name[2]) 2664 break; 2665 if (vfsp == NULL) 2666 return (EOPNOTSUPP); 2667 bzero(&xvfsp, sizeof(xvfsp)); 2668 vfsconf2x(vfsp, &xvfsp); 2669 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 2670 } 2671 return (EOPNOTSUPP); 2672} 2673 2674static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, 2675 vfs_sysctl, "Generic filesystem"); 2676 2677#if 1 || defined(COMPAT_PRELITE2) 2678 2679static int 2680sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2681{ 2682 int error; 2683 struct vfsconf *vfsp; 2684 struct ovfsconf ovfs; 2685 2686 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 2687 bzero(&ovfs, sizeof(ovfs)); 2688 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2689 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2690 ovfs.vfc_index = vfsp->vfc_typenum; 2691 ovfs.vfc_refcount = vfsp->vfc_refcount; 2692 ovfs.vfc_flags = vfsp->vfc_flags; 2693 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2694 if (error) 2695 return error; 2696 } 2697 return 0; 2698} 2699 2700#endif /* 1 || COMPAT_PRELITE2 */ 2701#endif /* !BURN_BRIDGES */ 2702 2703#define KINFO_VNODESLOP 10 2704#ifdef notyet 2705/* 2706 * Dump vnode list (via sysctl). 2707 */ 2708/* ARGSUSED */ 2709static int 2710sysctl_vnode(SYSCTL_HANDLER_ARGS) 2711{ 2712 struct xvnode *xvn; 2713 struct thread *td = req->td; 2714 struct mount *mp; 2715 struct vnode *vp; 2716 int error, len, n; 2717 2718 /* 2719 * Stale numvnodes access is not fatal here. 2720 */ 2721 req->lock = 0; 2722 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 2723 if (!req->oldptr) 2724 /* Make an estimate */ 2725 return (SYSCTL_OUT(req, 0, len)); 2726 2727 error = sysctl_wire_old_buffer(req, 0); 2728 if (error != 0) 2729 return (error); 2730 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 2731 n = 0; 2732 mtx_lock(&mountlist_mtx); 2733 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2734 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) 2735 continue; 2736 MNT_ILOCK(mp); 2737 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2738 if (n == len) 2739 break; 2740 vref(vp); 2741 xvn[n].xv_size = sizeof *xvn; 2742 xvn[n].xv_vnode = vp; 2743 xvn[n].xv_id = 0; /* XXX compat */ 2744#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 2745 XV_COPY(usecount); 2746 XV_COPY(writecount); 2747 XV_COPY(holdcnt); 2748 XV_COPY(mount); 2749 XV_COPY(numoutput); 2750 XV_COPY(type); 2751#undef XV_COPY 2752 xvn[n].xv_flag = vp->v_vflag; 2753 2754 switch (vp->v_type) { 2755 case VREG: 2756 case VDIR: 2757 case VLNK: 2758 break; 2759 case VBLK: 2760 case VCHR: 2761 if (vp->v_rdev == NULL) { 2762 vrele(vp); 2763 continue; 2764 } 2765 xvn[n].xv_dev = dev2udev(vp->v_rdev); 2766 break; 2767 case VSOCK: 2768 xvn[n].xv_socket = vp->v_socket; 2769 break; 2770 case VFIFO: 2771 xvn[n].xv_fifo = vp->v_fifoinfo; 2772 break; 2773 case VNON: 2774 case VBAD: 2775 default: 2776 /* shouldn't happen? */ 2777 vrele(vp); 2778 continue; 2779 } 2780 vrele(vp); 2781 ++n; 2782 } 2783 MNT_IUNLOCK(mp); 2784 mtx_lock(&mountlist_mtx); 2785 vfs_unbusy(mp, td); 2786 if (n == len) 2787 break; 2788 } 2789 mtx_unlock(&mountlist_mtx); 2790 2791 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 2792 free(xvn, M_TEMP); 2793 return (error); 2794} 2795 2796SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2797 0, 0, sysctl_vnode, "S,xvnode", ""); 2798#endif 2799 2800/* 2801 * Unmount all filesystems. The list is traversed in reverse order 2802 * of mounting to avoid dependencies. 2803 */ 2804void 2805vfs_unmountall(void) 2806{ 2807 struct mount *mp; 2808 struct thread *td; 2809 int error; 2810 2811 KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread")); 2812 td = curthread; 2813 /* 2814 * Since this only runs when rebooting, it is not interlocked. 2815 */ 2816 while(!TAILQ_EMPTY(&mountlist)) { 2817 mp = TAILQ_LAST(&mountlist, mntlist); 2818 error = dounmount(mp, MNT_FORCE, td); 2819 if (error) { 2820 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2821 /* 2822 * XXX: Due to the way in which we mount the root 2823 * file system off of devfs, devfs will generate a 2824 * "busy" warning when we try to unmount it before 2825 * the root. Don't print a warning as a result in 2826 * order to avoid false positive errors that may 2827 * cause needless upset. 2828 */ 2829 if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) { 2830 printf("unmount of %s failed (", 2831 mp->mnt_stat.f_mntonname); 2832 if (error == EBUSY) 2833 printf("BUSY)\n"); 2834 else 2835 printf("%d)\n", error); 2836 } 2837 } else { 2838 /* The unmount has removed mp from the mountlist */ 2839 } 2840 } 2841} 2842 2843/* 2844 * perform msync on all vnodes under a mount point 2845 * the mount point must be locked. 2846 */ 2847void 2848vfs_msync(struct mount *mp, int flags) 2849{ 2850 struct vnode *vp, *mvp; 2851 struct vm_object *obj; 2852 2853 MNT_ILOCK(mp); 2854 MNT_VNODE_FOREACH(vp, mp, mvp) { 2855 VI_LOCK(vp); 2856 if ((vp->v_iflag & VI_OBJDIRTY) && 2857 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2858 MNT_IUNLOCK(mp); 2859 if (!vget(vp, 2860 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 2861 curthread)) { 2862 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 2863 vput(vp); 2864 MNT_ILOCK(mp); 2865 continue; 2866 } 2867 2868 obj = vp->v_object; 2869 if (obj != NULL) { 2870 VM_OBJECT_LOCK(obj); 2871 vm_object_page_clean(obj, 0, 0, 2872 flags == MNT_WAIT ? 2873 OBJPC_SYNC : OBJPC_NOSYNC); 2874 VM_OBJECT_UNLOCK(obj); 2875 } 2876 vput(vp); 2877 } 2878 MNT_ILOCK(mp); 2879 } else 2880 VI_UNLOCK(vp); 2881 } 2882 MNT_IUNLOCK(mp); 2883} 2884 2885/* 2886 * Mark a vnode as free, putting it up for recycling. 2887 */ 2888static void 2889vfree(struct vnode *vp) 2890{ 2891 2892 CTR1(KTR_VFS, "vfree vp %p", vp); 2893 ASSERT_VI_LOCKED(vp, "vfree"); 2894 mtx_lock(&vnode_free_list_mtx); 2895 VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed.")); 2896 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free")); 2897 VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't")); 2898 VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp, 2899 ("vfree: Freeing doomed vnode")); 2900 if (vp->v_iflag & VI_AGE) { 2901 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2902 } else { 2903 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2904 } 2905 freevnodes++; 2906 vp->v_iflag &= ~VI_AGE; 2907 vp->v_iflag |= VI_FREE; 2908 mtx_unlock(&vnode_free_list_mtx); 2909} 2910 2911/* 2912 * Opposite of vfree() - mark a vnode as in use. 2913 */ 2914static void 2915vbusy(struct vnode *vp) 2916{ 2917 CTR1(KTR_VFS, "vbusy vp %p", vp); 2918 ASSERT_VI_LOCKED(vp, "vbusy"); 2919 VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free")); 2920 VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed.")); 2921 2922 mtx_lock(&vnode_free_list_mtx); 2923 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2924 freevnodes--; 2925 vp->v_iflag &= ~(VI_FREE|VI_AGE); 2926 mtx_unlock(&vnode_free_list_mtx); 2927} 2928 2929/* 2930 * Initalize per-vnode helper structure to hold poll-related state. 2931 */ 2932void 2933v_addpollinfo(struct vnode *vp) 2934{ 2935 struct vpollinfo *vi; 2936 2937 vi = uma_zalloc(vnodepoll_zone, M_WAITOK); 2938 if (vp->v_pollinfo != NULL) { 2939 uma_zfree(vnodepoll_zone, vi); 2940 return; 2941 } 2942 vp->v_pollinfo = vi; 2943 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 2944 knlist_init(&vp->v_pollinfo->vpi_selinfo.si_note, vp, vfs_knllock, 2945 vfs_knlunlock, vfs_knllocked); 2946} 2947 2948/* 2949 * Record a process's interest in events which might happen to 2950 * a vnode. Because poll uses the historic select-style interface 2951 * internally, this routine serves as both the ``check for any 2952 * pending events'' and the ``record my interest in future events'' 2953 * functions. (These are done together, while the lock is held, 2954 * to avoid race conditions.) 2955 */ 2956int 2957vn_pollrecord(struct vnode *vp, struct thread *td, int events) 2958{ 2959 2960 if (vp->v_pollinfo == NULL) 2961 v_addpollinfo(vp); 2962 mtx_lock(&vp->v_pollinfo->vpi_lock); 2963 if (vp->v_pollinfo->vpi_revents & events) { 2964 /* 2965 * This leaves events we are not interested 2966 * in available for the other process which 2967 * which presumably had requested them 2968 * (otherwise they would never have been 2969 * recorded). 2970 */ 2971 events &= vp->v_pollinfo->vpi_revents; 2972 vp->v_pollinfo->vpi_revents &= ~events; 2973 2974 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2975 return events; 2976 } 2977 vp->v_pollinfo->vpi_events |= events; 2978 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 2979 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2980 return 0; 2981} 2982 2983/* 2984 * Routine to create and manage a filesystem syncer vnode. 2985 */ 2986#define sync_close ((int (*)(struct vop_close_args *))nullop) 2987static int sync_fsync(struct vop_fsync_args *); 2988static int sync_inactive(struct vop_inactive_args *); 2989static int sync_reclaim(struct vop_reclaim_args *); 2990 2991static struct vop_vector sync_vnodeops = { 2992 .vop_bypass = VOP_EOPNOTSUPP, 2993 .vop_close = sync_close, /* close */ 2994 .vop_fsync = sync_fsync, /* fsync */ 2995 .vop_inactive = sync_inactive, /* inactive */ 2996 .vop_reclaim = sync_reclaim, /* reclaim */ 2997 .vop_lock = vop_stdlock, /* lock */ 2998 .vop_unlock = vop_stdunlock, /* unlock */ 2999 .vop_islocked = vop_stdislocked, /* islocked */ 3000}; 3001 3002/* 3003 * Create a new filesystem syncer vnode for the specified mount point. 3004 */ 3005int 3006vfs_allocate_syncvnode(struct mount *mp) 3007{ 3008 struct vnode *vp; 3009 static long start, incr, next; 3010 int error; 3011 3012 /* Allocate a new vnode */ 3013 if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) { 3014 mp->mnt_syncer = NULL; 3015 return (error); 3016 } 3017 vp->v_type = VNON; 3018 /* 3019 * Place the vnode onto the syncer worklist. We attempt to 3020 * scatter them about on the list so that they will go off 3021 * at evenly distributed times even if all the filesystems 3022 * are mounted at once. 3023 */ 3024 next += incr; 3025 if (next == 0 || next > syncer_maxdelay) { 3026 start /= 2; 3027 incr /= 2; 3028 if (start == 0) { 3029 start = syncer_maxdelay / 2; 3030 incr = syncer_maxdelay; 3031 } 3032 next = start; 3033 } 3034 VI_LOCK(vp); 3035 vn_syncer_add_to_worklist(&vp->v_bufobj, 3036 syncdelay > 0 ? next % syncdelay : 0); 3037 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 3038 mtx_lock(&sync_mtx); 3039 sync_vnode_count++; 3040 mtx_unlock(&sync_mtx); 3041 VI_UNLOCK(vp); 3042 mp->mnt_syncer = vp; 3043 return (0); 3044} 3045 3046/* 3047 * Do a lazy sync of the filesystem. 3048 */ 3049static int 3050sync_fsync(struct vop_fsync_args *ap) 3051{ 3052 struct vnode *syncvp = ap->a_vp; 3053 struct mount *mp = syncvp->v_mount; 3054 struct thread *td = ap->a_td; 3055 int error; 3056 struct bufobj *bo; 3057 3058 /* 3059 * We only need to do something if this is a lazy evaluation. 3060 */ 3061 if (ap->a_waitfor != MNT_LAZY) 3062 return (0); 3063 3064 /* 3065 * Move ourselves to the back of the sync list. 3066 */ 3067 bo = &syncvp->v_bufobj; 3068 BO_LOCK(bo); 3069 vn_syncer_add_to_worklist(bo, syncdelay); 3070 BO_UNLOCK(bo); 3071 3072 /* 3073 * Walk the list of vnodes pushing all that are dirty and 3074 * not already on the sync list. 3075 */ 3076 mtx_lock(&mountlist_mtx); 3077 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) { 3078 mtx_unlock(&mountlist_mtx); 3079 return (0); 3080 } 3081 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3082 vfs_unbusy(mp, td); 3083 return (0); 3084 } 3085 MNT_ILOCK(mp); 3086 mp->mnt_noasync++; 3087 mp->mnt_kern_flag &= ~MNTK_ASYNC; 3088 MNT_IUNLOCK(mp); 3089 vfs_msync(mp, MNT_NOWAIT); 3090 error = VFS_SYNC(mp, MNT_LAZY, td); 3091 MNT_ILOCK(mp); 3092 mp->mnt_noasync--; 3093 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0) 3094 mp->mnt_kern_flag |= MNTK_ASYNC; 3095 MNT_IUNLOCK(mp); 3096 vn_finished_write(mp); 3097 vfs_unbusy(mp, td); 3098 return (error); 3099} 3100 3101/* 3102 * The syncer vnode is no referenced. 3103 */ 3104static int 3105sync_inactive(struct vop_inactive_args *ap) 3106{ 3107 3108 vgone(ap->a_vp); 3109 return (0); 3110} 3111 3112/* 3113 * The syncer vnode is no longer needed and is being decommissioned. 3114 * 3115 * Modifications to the worklist must be protected by sync_mtx. 3116 */ 3117static int 3118sync_reclaim(struct vop_reclaim_args *ap) 3119{ 3120 struct vnode *vp = ap->a_vp; 3121 struct bufobj *bo; 3122 3123 VI_LOCK(vp); 3124 bo = &vp->v_bufobj; 3125 vp->v_mount->mnt_syncer = NULL; 3126 if (bo->bo_flag & BO_ONWORKLST) { 3127 mtx_lock(&sync_mtx); 3128 LIST_REMOVE(bo, bo_synclist); 3129 syncer_worklist_len--; 3130 sync_vnode_count--; 3131 mtx_unlock(&sync_mtx); 3132 bo->bo_flag &= ~BO_ONWORKLST; 3133 } 3134 VI_UNLOCK(vp); 3135 3136 return (0); 3137} 3138 3139/* 3140 * Check if vnode represents a disk device 3141 */ 3142int 3143vn_isdisk(struct vnode *vp, int *errp) 3144{ 3145 int error; 3146 3147 error = 0; 3148 dev_lock(); 3149 if (vp->v_type != VCHR) 3150 error = ENOTBLK; 3151 else if (vp->v_rdev == NULL) 3152 error = ENXIO; 3153 else if (vp->v_rdev->si_devsw == NULL) 3154 error = ENXIO; 3155 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 3156 error = ENOTBLK; 3157 dev_unlock(); 3158 if (errp != NULL) 3159 *errp = error; 3160 return (error == 0); 3161} 3162 3163/* 3164 * Common filesystem object access control check routine. Accepts a 3165 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3166 * and optional call-by-reference privused argument allowing vaccess() 3167 * to indicate to the caller whether privilege was used to satisfy the 3168 * request (obsoleted). Returns 0 on success, or an errno on failure. 3169 * 3170 * The ifdef'd CAPABILITIES version is here for reference, but is not 3171 * actually used. 3172 */ 3173int 3174vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 3175 mode_t acc_mode, struct ucred *cred, int *privused) 3176{ 3177 mode_t dac_granted; 3178#ifdef CAPABILITIES 3179 mode_t cap_granted; 3180#endif 3181 3182 /* 3183 * Look for a normal, non-privileged way to access the file/directory 3184 * as requested. If it exists, go with that. 3185 */ 3186 3187 if (privused != NULL) 3188 *privused = 0; 3189 3190 dac_granted = 0; 3191 3192 /* Check the owner. */ 3193 if (cred->cr_uid == file_uid) { 3194 dac_granted |= VADMIN; 3195 if (file_mode & S_IXUSR) 3196 dac_granted |= VEXEC; 3197 if (file_mode & S_IRUSR) 3198 dac_granted |= VREAD; 3199 if (file_mode & S_IWUSR) 3200 dac_granted |= (VWRITE | VAPPEND); 3201 3202 if ((acc_mode & dac_granted) == acc_mode) 3203 return (0); 3204 3205 goto privcheck; 3206 } 3207 3208 /* Otherwise, check the groups (first match) */ 3209 if (groupmember(file_gid, cred)) { 3210 if (file_mode & S_IXGRP) 3211 dac_granted |= VEXEC; 3212 if (file_mode & S_IRGRP) 3213 dac_granted |= VREAD; 3214 if (file_mode & S_IWGRP) 3215 dac_granted |= (VWRITE | VAPPEND); 3216 3217 if ((acc_mode & dac_granted) == acc_mode) 3218 return (0); 3219 3220 goto privcheck; 3221 } 3222 3223 /* Otherwise, check everyone else. */ 3224 if (file_mode & S_IXOTH) 3225 dac_granted |= VEXEC; 3226 if (file_mode & S_IROTH) 3227 dac_granted |= VREAD; 3228 if (file_mode & S_IWOTH) 3229 dac_granted |= (VWRITE | VAPPEND); 3230 if ((acc_mode & dac_granted) == acc_mode) 3231 return (0); 3232 3233privcheck: 3234 if (!suser_cred(cred, SUSER_ALLOWJAIL)) { 3235 /* XXX audit: privilege used */ 3236 if (privused != NULL) 3237 *privused = 1; 3238 return (0); 3239 } 3240 3241#ifdef CAPABILITIES 3242 /* 3243 * Build a capability mask to determine if the set of capabilities 3244 * satisfies the requirements when combined with the granted mask 3245 * from above. For each capability, if the capability is required, 3246 * bitwise or the request type onto the cap_granted mask. 3247 * 3248 * Note: This is never actually used, but is here for reference 3249 * purposes. 3250 */ 3251 cap_granted = 0; 3252 3253 if (type == VDIR) { 3254 /* 3255 * For directories, use CAP_DAC_READ_SEARCH to satisfy 3256 * VEXEC requests, instead of CAP_DAC_EXECUTE. 3257 */ 3258 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3259 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, 3260 SUSER_ALLOWJAIL)) 3261 cap_granted |= VEXEC; 3262 } else { 3263 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3264 !cap_check(cred, NULL, CAP_DAC_EXECUTE, SUSER_ALLOWJAIL)) 3265 cap_granted |= VEXEC; 3266 } 3267 3268 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) && 3269 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL)) 3270 cap_granted |= VREAD; 3271 3272 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3273 !cap_check(cred, NULL, CAP_DAC_WRITE, SUSER_ALLOWJAIL)) 3274 cap_granted |= (VWRITE | VAPPEND); 3275 3276 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3277 !cap_check(cred, NULL, CAP_FOWNER, SUSER_ALLOWJAIL)) 3278 cap_granted |= VADMIN; 3279 3280 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) { 3281 /* XXX audit: privilege used */ 3282 if (privused != NULL) 3283 *privused = 1; 3284 return (0); 3285 } 3286#endif 3287 3288 return ((acc_mode & VADMIN) ? EPERM : EACCES); 3289} 3290 3291/* 3292 * Credential check based on process requesting service, and per-attribute 3293 * permissions. 3294 */ 3295int 3296extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 3297 struct thread *td, int access) 3298{ 3299 3300 /* 3301 * Kernel-invoked always succeeds. 3302 */ 3303 if (cred == NOCRED) 3304 return (0); 3305 3306 /* 3307 * Do not allow privileged processes in jail to directly 3308 * manipulate system attributes. 3309 * 3310 * XXX What capability should apply here? 3311 * Probably CAP_SYS_SETFFLAG. 3312 */ 3313 switch (attrnamespace) { 3314 case EXTATTR_NAMESPACE_SYSTEM: 3315 /* Potentially should be: return (EPERM); */ 3316 return (suser_cred(cred, 0)); 3317 case EXTATTR_NAMESPACE_USER: 3318 return (VOP_ACCESS(vp, access, cred, td)); 3319 default: 3320 return (EPERM); 3321 } 3322} 3323 3324#ifdef DEBUG_VFS_LOCKS 3325/* 3326 * This only exists to supress warnings from unlocked specfs accesses. It is 3327 * no longer ok to have an unlocked VFS. 3328 */ 3329#define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD) 3330 3331int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 3332SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, ""); 3333 3334int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 3335SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, ""); 3336 3337int vfs_badlock_print = 1; /* Print lock violations. */ 3338SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, ""); 3339 3340#ifdef KDB 3341int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 3342SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, ""); 3343#endif 3344 3345static void 3346vfs_badlock(const char *msg, const char *str, struct vnode *vp) 3347{ 3348 3349#ifdef KDB 3350 if (vfs_badlock_backtrace) 3351 kdb_backtrace(); 3352#endif 3353 if (vfs_badlock_print) 3354 printf("%s: %p %s\n", str, (void *)vp, msg); 3355 if (vfs_badlock_ddb) 3356 kdb_enter("lock violation"); 3357} 3358 3359void 3360assert_vi_locked(struct vnode *vp, const char *str) 3361{ 3362 3363 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 3364 vfs_badlock("interlock is not locked but should be", str, vp); 3365} 3366 3367void 3368assert_vi_unlocked(struct vnode *vp, const char *str) 3369{ 3370 3371 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 3372 vfs_badlock("interlock is locked but should not be", str, vp); 3373} 3374 3375void 3376assert_vop_locked(struct vnode *vp, const char *str) 3377{ 3378 3379 if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0) 3380 vfs_badlock("is not locked but should be", str, vp); 3381} 3382 3383void 3384assert_vop_unlocked(struct vnode *vp, const char *str) 3385{ 3386 3387 if (vp && !IGNORE_LOCK(vp) && 3388 VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE) 3389 vfs_badlock("is locked but should not be", str, vp); 3390} 3391 3392void 3393assert_vop_elocked(struct vnode *vp, const char *str) 3394{ 3395 3396 if (vp && !IGNORE_LOCK(vp) && 3397 VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE) 3398 vfs_badlock("is not exclusive locked but should be", str, vp); 3399} 3400 3401#if 0 3402void 3403assert_vop_elocked_other(struct vnode *vp, const char *str) 3404{ 3405 3406 if (vp && !IGNORE_LOCK(vp) && 3407 VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER) 3408 vfs_badlock("is not exclusive locked by another thread", 3409 str, vp); 3410} 3411 3412void 3413assert_vop_slocked(struct vnode *vp, const char *str) 3414{ 3415 3416 if (vp && !IGNORE_LOCK(vp) && 3417 VOP_ISLOCKED(vp, curthread) != LK_SHARED) 3418 vfs_badlock("is not locked shared but should be", str, vp); 3419} 3420#endif /* 0 */ 3421#endif /* DEBUG_VFS_LOCKS */ 3422 3423void 3424vop_rename_pre(void *ap) 3425{ 3426 struct vop_rename_args *a = ap; 3427 3428#ifdef DEBUG_VFS_LOCKS 3429 if (a->a_tvp) 3430 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 3431 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 3432 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 3433 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 3434 3435 /* Check the source (from). */ 3436 if (a->a_tdvp != a->a_fdvp && a->a_tvp != a->a_fdvp) 3437 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 3438 if (a->a_tvp != a->a_fvp) 3439 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked"); 3440 3441 /* Check the target. */ 3442 if (a->a_tvp) 3443 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 3444 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 3445#endif 3446 if (a->a_tdvp != a->a_fdvp) 3447 vhold(a->a_fdvp); 3448 if (a->a_tvp != a->a_fvp) 3449 vhold(a->a_fvp); 3450 vhold(a->a_tdvp); 3451 if (a->a_tvp) 3452 vhold(a->a_tvp); 3453} 3454 3455void 3456vop_strategy_pre(void *ap) 3457{ 3458#ifdef DEBUG_VFS_LOCKS 3459 struct vop_strategy_args *a; 3460 struct buf *bp; 3461 3462 a = ap; 3463 bp = a->a_bp; 3464 3465 /* 3466 * Cluster ops lock their component buffers but not the IO container. 3467 */ 3468 if ((bp->b_flags & B_CLUSTER) != 0) 3469 return; 3470 3471 if (BUF_REFCNT(bp) < 1) { 3472 if (vfs_badlock_print) 3473 printf( 3474 "VOP_STRATEGY: bp is not locked but should be\n"); 3475 if (vfs_badlock_ddb) 3476 kdb_enter("lock violation"); 3477 } 3478#endif 3479} 3480 3481void 3482vop_lookup_pre(void *ap) 3483{ 3484#ifdef DEBUG_VFS_LOCKS 3485 struct vop_lookup_args *a; 3486 struct vnode *dvp; 3487 3488 a = ap; 3489 dvp = a->a_dvp; 3490 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP"); 3491 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 3492#endif 3493} 3494 3495void 3496vop_lookup_post(void *ap, int rc) 3497{ 3498#ifdef DEBUG_VFS_LOCKS 3499 struct vop_lookup_args *a; 3500 struct vnode *dvp; 3501 struct vnode *vp; 3502 3503 a = ap; 3504 dvp = a->a_dvp; 3505 vp = *(a->a_vpp); 3506 3507 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP"); 3508 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 3509 3510 if (!rc) 3511 ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)"); 3512#endif 3513} 3514 3515void 3516vop_lock_pre(void *ap) 3517{ 3518#ifdef DEBUG_VFS_LOCKS 3519 struct vop_lock_args *a = ap; 3520 3521 if ((a->a_flags & LK_INTERLOCK) == 0) 3522 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 3523 else 3524 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 3525#endif 3526} 3527 3528void 3529vop_lock_post(void *ap, int rc) 3530{ 3531#ifdef DEBUG_VFS_LOCKS 3532 struct vop_lock_args *a = ap; 3533 3534 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 3535 if (rc == 0) 3536 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 3537#endif 3538} 3539 3540void 3541vop_unlock_pre(void *ap) 3542{ 3543#ifdef DEBUG_VFS_LOCKS 3544 struct vop_unlock_args *a = ap; 3545 3546 if (a->a_flags & LK_INTERLOCK) 3547 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 3548 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 3549#endif 3550} 3551 3552void 3553vop_unlock_post(void *ap, int rc) 3554{ 3555#ifdef DEBUG_VFS_LOCKS 3556 struct vop_unlock_args *a = ap; 3557 3558 if (a->a_flags & LK_INTERLOCK) 3559 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 3560#endif 3561} 3562 3563void 3564vop_create_post(void *ap, int rc) 3565{ 3566 struct vop_create_args *a = ap; 3567 3568 if (!rc) 3569 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 3570} 3571 3572void 3573vop_link_post(void *ap, int rc) 3574{ 3575 struct vop_link_args *a = ap; 3576 3577 if (!rc) { 3578 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 3579 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 3580 } 3581} 3582 3583void 3584vop_mkdir_post(void *ap, int rc) 3585{ 3586 struct vop_mkdir_args *a = ap; 3587 3588 if (!rc) 3589 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 3590} 3591 3592void 3593vop_mknod_post(void *ap, int rc) 3594{ 3595 struct vop_mknod_args *a = ap; 3596 3597 if (!rc) 3598 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 3599} 3600 3601void 3602vop_remove_post(void *ap, int rc) 3603{ 3604 struct vop_remove_args *a = ap; 3605 3606 if (!rc) { 3607 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 3608 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 3609 } 3610} 3611 3612void 3613vop_rename_post(void *ap, int rc) 3614{ 3615 struct vop_rename_args *a = ap; 3616 3617 if (!rc) { 3618 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE); 3619 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE); 3620 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 3621 if (a->a_tvp) 3622 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 3623 } 3624 if (a->a_tdvp != a->a_fdvp) 3625 vdrop(a->a_fdvp); 3626 if (a->a_tvp != a->a_fvp) 3627 vdrop(a->a_fvp); 3628 vdrop(a->a_tdvp); 3629 if (a->a_tvp) 3630 vdrop(a->a_tvp); 3631} 3632 3633void 3634vop_rmdir_post(void *ap, int rc) 3635{ 3636 struct vop_rmdir_args *a = ap; 3637 3638 if (!rc) { 3639 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 3640 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 3641 } 3642} 3643 3644void 3645vop_setattr_post(void *ap, int rc) 3646{ 3647 struct vop_setattr_args *a = ap; 3648 3649 if (!rc) 3650 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 3651} 3652 3653void 3654vop_symlink_post(void *ap, int rc) 3655{ 3656 struct vop_symlink_args *a = ap; 3657 3658 if (!rc) 3659 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 3660} 3661 3662static struct knlist fs_knlist; 3663 3664static void 3665vfs_event_init(void *arg) 3666{ 3667 knlist_init(&fs_knlist, NULL, NULL, NULL, NULL); 3668} 3669/* XXX - correct order? */ 3670SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 3671 3672void 3673vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused) 3674{ 3675 3676 KNOTE_UNLOCKED(&fs_knlist, event); 3677} 3678 3679static int filt_fsattach(struct knote *kn); 3680static void filt_fsdetach(struct knote *kn); 3681static int filt_fsevent(struct knote *kn, long hint); 3682 3683struct filterops fs_filtops = 3684 { 0, filt_fsattach, filt_fsdetach, filt_fsevent }; 3685 3686static int 3687filt_fsattach(struct knote *kn) 3688{ 3689 3690 kn->kn_flags |= EV_CLEAR; 3691 knlist_add(&fs_knlist, kn, 0); 3692 return (0); 3693} 3694 3695static void 3696filt_fsdetach(struct knote *kn) 3697{ 3698 3699 knlist_remove(&fs_knlist, kn, 0); 3700} 3701 3702static int 3703filt_fsevent(struct knote *kn, long hint) 3704{ 3705 3706 kn->kn_fflags |= hint; 3707 return (kn->kn_fflags != 0); 3708} 3709 3710static int 3711sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 3712{ 3713 struct vfsidctl vc; 3714 int error; 3715 struct mount *mp; 3716 3717 error = SYSCTL_IN(req, &vc, sizeof(vc)); 3718 if (error) 3719 return (error); 3720 if (vc.vc_vers != VFS_CTL_VERS1) 3721 return (EINVAL); 3722 mp = vfs_getvfs(&vc.vc_fsid); 3723 if (mp == NULL) 3724 return (ENOENT); 3725 /* ensure that a specific sysctl goes to the right filesystem. */ 3726 if (strcmp(vc.vc_fstypename, "*") != 0 && 3727 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 3728 vfs_rel(mp); 3729 return (EINVAL); 3730 } 3731 VCTLTOREQ(&vc, req); 3732 error = VFS_SYSCTL(mp, vc.vc_op, req); 3733 vfs_rel(mp); 3734 return (error); 3735} 3736 3737SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR, 3738 NULL, 0, sysctl_vfs_ctl, "", "Sysctl by fsid"); 3739 3740/* 3741 * Function to initialize a va_filerev field sensibly. 3742 * XXX: Wouldn't a random number make a lot more sense ?? 3743 */ 3744u_quad_t 3745init_va_filerev(void) 3746{ 3747 struct bintime bt; 3748 3749 getbinuptime(&bt); 3750 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 3751} 3752 3753static int filt_vfsread(struct knote *kn, long hint); 3754static int filt_vfswrite(struct knote *kn, long hint); 3755static int filt_vfsvnode(struct knote *kn, long hint); 3756static void filt_vfsdetach(struct knote *kn); 3757static struct filterops vfsread_filtops = 3758 { 1, NULL, filt_vfsdetach, filt_vfsread }; 3759static struct filterops vfswrite_filtops = 3760 { 1, NULL, filt_vfsdetach, filt_vfswrite }; 3761static struct filterops vfsvnode_filtops = 3762 { 1, NULL, filt_vfsdetach, filt_vfsvnode }; 3763 3764static void 3765vfs_knllock(void *arg) 3766{ 3767 struct vnode *vp = arg; 3768 3769 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread); 3770} 3771 3772static void 3773vfs_knlunlock(void *arg) 3774{ 3775 struct vnode *vp = arg; 3776 3777 VOP_UNLOCK(vp, 0, curthread); 3778} 3779 3780static int 3781vfs_knllocked(void *arg) 3782{ 3783 struct vnode *vp = arg; 3784 3785 return (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE); 3786} 3787 3788int 3789vfs_kqfilter(struct vop_kqfilter_args *ap) 3790{ 3791 struct vnode *vp = ap->a_vp; 3792 struct knote *kn = ap->a_kn; 3793 struct knlist *knl; 3794 3795 switch (kn->kn_filter) { 3796 case EVFILT_READ: 3797 kn->kn_fop = &vfsread_filtops; 3798 break; 3799 case EVFILT_WRITE: 3800 kn->kn_fop = &vfswrite_filtops; 3801 break; 3802 case EVFILT_VNODE: 3803 kn->kn_fop = &vfsvnode_filtops; 3804 break; 3805 default: 3806 return (EINVAL); 3807 } 3808 3809 kn->kn_hook = (caddr_t)vp; 3810 3811 if (vp->v_pollinfo == NULL) 3812 v_addpollinfo(vp); 3813 if (vp->v_pollinfo == NULL) 3814 return (ENOMEM); 3815 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 3816 knlist_add(knl, kn, 0); 3817 3818 return (0); 3819} 3820 3821/* 3822 * Detach knote from vnode 3823 */ 3824static void 3825filt_vfsdetach(struct knote *kn) 3826{ 3827 struct vnode *vp = (struct vnode *)kn->kn_hook; 3828 3829 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 3830 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 3831} 3832 3833/*ARGSUSED*/ 3834static int 3835filt_vfsread(struct knote *kn, long hint) 3836{ 3837 struct vnode *vp = (struct vnode *)kn->kn_hook; 3838 struct vattr va; 3839 3840 /* 3841 * filesystem is gone, so set the EOF flag and schedule 3842 * the knote for deletion. 3843 */ 3844 if (hint == NOTE_REVOKE) { 3845 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 3846 return (1); 3847 } 3848 3849 if (VOP_GETATTR(vp, &va, curthread->td_ucred, curthread)) 3850 return (0); 3851 3852 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 3853 return (kn->kn_data != 0); 3854} 3855 3856/*ARGSUSED*/ 3857static int 3858filt_vfswrite(struct knote *kn, long hint) 3859{ 3860 /* 3861 * filesystem is gone, so set the EOF flag and schedule 3862 * the knote for deletion. 3863 */ 3864 if (hint == NOTE_REVOKE) 3865 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 3866 3867 kn->kn_data = 0; 3868 return (1); 3869} 3870 3871static int 3872filt_vfsvnode(struct knote *kn, long hint) 3873{ 3874 if (kn->kn_sfflags & hint) 3875 kn->kn_fflags |= hint; 3876 if (hint == NOTE_REVOKE) { 3877 kn->kn_flags |= EV_EOF; 3878 return (1); 3879 } 3880 return (kn->kn_fflags != 0); 3881} 3882 3883int 3884vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 3885{ 3886 int error; 3887 3888 if (dp->d_reclen > ap->a_uio->uio_resid) 3889 return (ENAMETOOLONG); 3890 error = uiomove(dp, dp->d_reclen, ap->a_uio); 3891 if (error) { 3892 if (ap->a_ncookies != NULL) { 3893 if (ap->a_cookies != NULL) 3894 free(ap->a_cookies, M_TEMP); 3895 ap->a_cookies = NULL; 3896 *ap->a_ncookies = 0; 3897 } 3898 return (error); 3899 } 3900 if (ap->a_ncookies == NULL) 3901 return (0); 3902 3903 KASSERT(ap->a_cookies, 3904 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 3905 3906 *ap->a_cookies = realloc(*ap->a_cookies, 3907 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 3908 (*ap->a_cookies)[*ap->a_ncookies] = off; 3909 return (0); 3910} 3911 3912/* 3913 * Mark for update the access time of the file if the filesystem 3914 * supports VA_MARK_ATIME. This functionality is used by execve 3915 * and mmap, so we want to avoid the synchronous I/O implied by 3916 * directly setting va_atime for the sake of efficiency. 3917 */ 3918void 3919vfs_mark_atime(struct vnode *vp, struct thread *td) 3920{ 3921 struct vattr atimeattr; 3922 3923 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 3924 VATTR_NULL(&atimeattr); 3925 atimeattr.va_vaflags |= VA_MARK_ATIME; 3926 (void)VOP_SETATTR(vp, &atimeattr, td->td_ucred, td); 3927 } 3928} 3929