vfs_subr.c revision 96145
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: head/sys/kern/vfs_subr.c 96145 2002-05-07 02:44:06Z jeff $ 40 */ 41 42/* 43 * External virtual filesystem routines 44 */ 45#include "opt_ddb.h" 46#include "opt_ffs.h" 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/bio.h> 51#include <sys/buf.h> 52#include <sys/conf.h> 53#include <sys/eventhandler.h> 54#include <sys/fcntl.h> 55#include <sys/kernel.h> 56#include <sys/kthread.h> 57#include <sys/malloc.h> 58#include <sys/mount.h> 59#include <sys/namei.h> 60#include <sys/stat.h> 61#include <sys/sysctl.h> 62#include <sys/syslog.h> 63#include <sys/vmmeter.h> 64#include <sys/vnode.h> 65 66#include <vm/vm.h> 67#include <vm/vm_object.h> 68#include <vm/vm_extern.h> 69#include <vm/pmap.h> 70#include <vm/vm_map.h> 71#include <vm/vm_page.h> 72#include <vm/uma.h> 73 74static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 75 76static void addalias(struct vnode *vp, dev_t nvp_rdev); 77static void insmntque(struct vnode *vp, struct mount *mp); 78static void vclean(struct vnode *vp, int flags, struct thread *td); 79static void vlruvp(struct vnode *vp); 80 81/* 82 * Number of vnodes in existence. Increased whenever getnewvnode() 83 * allocates a new vnode, never decreased. 84 */ 85static unsigned long numvnodes; 86 87SYSCTL_LONG(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 88 89/* 90 * Conversion tables for conversion from vnode types to inode formats 91 * and back. 92 */ 93enum vtype iftovt_tab[16] = { 94 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 95 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 96}; 97int vttoif_tab[9] = { 98 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 99 S_IFSOCK, S_IFIFO, S_IFMT, 100}; 101 102/* 103 * List of vnodes that are ready for recycling. 104 */ 105static TAILQ_HEAD(freelst, vnode) vnode_free_list; 106 107/* 108 * Minimum number of free vnodes. If there are fewer than this free vnodes, 109 * getnewvnode() will return a newly allocated vnode. 110 */ 111static u_long wantfreevnodes = 25; 112SYSCTL_LONG(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 113/* Number of vnodes in the free list. */ 114static u_long freevnodes; 115SYSCTL_LONG(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 116 117/* 118 * Various variables used for debugging the new implementation of 119 * reassignbuf(). 120 * XXX these are probably of (very) limited utility now. 121 */ 122static int reassignbufcalls; 123SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 124static int reassignbufloops; 125SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, ""); 126static int reassignbufsortgood; 127SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, ""); 128static int reassignbufsortbad; 129SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, ""); 130/* Set to 0 for old insertion-sort based reassignbuf, 1 for modern method. */ 131static int reassignbufmethod = 1; 132SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, ""); 133static int nameileafonly; 134SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, ""); 135 136#ifdef ENABLE_VFS_IOOPT 137/* See NOTES for a description of this setting. */ 138int vfs_ioopt; 139SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 140#endif 141 142/* List of mounted filesystems. */ 143struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 144 145/* For any iteration/modification of mountlist */ 146struct mtx mountlist_mtx; 147 148/* For any iteration/modification of mnt_vnodelist */ 149struct mtx mntvnode_mtx; 150 151/* 152 * Cache for the mount type id assigned to NFS. This is used for 153 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 154 */ 155int nfs_mount_type = -1; 156 157/* To keep more than one thread at a time from running vfs_getnewfsid */ 158static struct mtx mntid_mtx; 159 160/* For any iteration/modification of vnode_free_list */ 161static struct mtx vnode_free_list_mtx; 162 163/* 164 * For any iteration/modification of dev->si_hlist (linked through 165 * v_specnext) 166 */ 167static struct mtx spechash_mtx; 168 169/* Publicly exported FS */ 170struct nfs_public nfs_pub; 171 172/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 173static uma_zone_t vnode_zone; 174static uma_zone_t vnodepoll_zone; 175 176/* Set to 1 to print out reclaim of active vnodes */ 177int prtactive; 178 179/* 180 * The workitem queue. 181 * 182 * It is useful to delay writes of file data and filesystem metadata 183 * for tens of seconds so that quickly created and deleted files need 184 * not waste disk bandwidth being created and removed. To realize this, 185 * we append vnodes to a "workitem" queue. When running with a soft 186 * updates implementation, most pending metadata dependencies should 187 * not wait for more than a few seconds. Thus, mounted on block devices 188 * are delayed only about a half the time that file data is delayed. 189 * Similarly, directory updates are more critical, so are only delayed 190 * about a third the time that file data is delayed. Thus, there are 191 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 192 * one each second (driven off the filesystem syncer process). The 193 * syncer_delayno variable indicates the next queue that is to be processed. 194 * Items that need to be processed soon are placed in this queue: 195 * 196 * syncer_workitem_pending[syncer_delayno] 197 * 198 * A delay of fifteen seconds is done by placing the request fifteen 199 * entries later in the queue: 200 * 201 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 202 * 203 */ 204static int syncer_delayno; 205static long syncer_mask; 206LIST_HEAD(synclist, vnode); 207static struct synclist *syncer_workitem_pending; 208 209#define SYNCER_MAXDELAY 32 210static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 211static int syncdelay = 30; /* max time to delay syncing data */ 212static int filedelay = 30; /* time to delay syncing files */ 213SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 214static int dirdelay = 29; /* time to delay syncing directories */ 215SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 216static int metadelay = 28; /* time to delay syncing metadata */ 217SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 218static int rushjob; /* number of slots to run ASAP */ 219static int stat_rush_requests; /* number of times I/O speeded up */ 220SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 221 222/* 223 * Number of vnodes we want to exist at any one time. This is mostly used 224 * to size hash tables in vnode-related code. It is normally not used in 225 * getnewvnode(), as wantfreevnodes is normally nonzero.) 226 * 227 * XXX desiredvnodes is historical cruft and should not exist. 228 */ 229int desiredvnodes; 230SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 231 &desiredvnodes, 0, "Maximum number of vnodes"); 232static int minvnodes; 233SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 234 &minvnodes, 0, "Minimum number of vnodes"); 235static int vnlru_nowhere; 236SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0, 237 "Number of times the vnlru process ran without success"); 238 239void 240v_addpollinfo(struct vnode *vp) 241{ 242 vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK); 243 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 244} 245 246/* 247 * Initialize the vnode management data structures. 248 */ 249static void 250vntblinit(void *dummy __unused) 251{ 252 253 desiredvnodes = maxproc + cnt.v_page_count / 4; 254 minvnodes = desiredvnodes / 4; 255 mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF); 256 mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF); 257 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 258 mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF); 259 TAILQ_INIT(&vnode_free_list); 260 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 261 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 262 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 263 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 264 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 265 /* 266 * Initialize the filesystem syncer. 267 */ 268 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 269 &syncer_mask); 270 syncer_maxdelay = syncer_mask + 1; 271} 272SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL) 273 274 275/* 276 * Mark a mount point as busy. Used to synchronize access and to delay 277 * unmounting. Interlock is not released on failure. 278 */ 279int 280vfs_busy(mp, flags, interlkp, td) 281 struct mount *mp; 282 int flags; 283 struct mtx *interlkp; 284 struct thread *td; 285{ 286 int lkflags; 287 288 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 289 if (flags & LK_NOWAIT) 290 return (ENOENT); 291 mp->mnt_kern_flag |= MNTK_MWAIT; 292 /* 293 * Since all busy locks are shared except the exclusive 294 * lock granted when unmounting, the only place that a 295 * wakeup needs to be done is at the release of the 296 * exclusive lock at the end of dounmount. 297 */ 298 msleep((caddr_t)mp, interlkp, PVFS, "vfs_busy", 0); 299 return (ENOENT); 300 } 301 lkflags = LK_SHARED | LK_NOPAUSE; 302 if (interlkp) 303 lkflags |= LK_INTERLOCK; 304 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 305 panic("vfs_busy: unexpected lock failure"); 306 return (0); 307} 308 309/* 310 * Free a busy filesystem. 311 */ 312void 313vfs_unbusy(mp, td) 314 struct mount *mp; 315 struct thread *td; 316{ 317 318 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 319} 320 321/* 322 * Lookup a filesystem type, and if found allocate and initialize 323 * a mount structure for it. 324 * 325 * Devname is usually updated by mount(8) after booting. 326 */ 327int 328vfs_rootmountalloc(fstypename, devname, mpp) 329 char *fstypename; 330 char *devname; 331 struct mount **mpp; 332{ 333 struct thread *td = curthread; /* XXX */ 334 struct vfsconf *vfsp; 335 struct mount *mp; 336 337 if (fstypename == NULL) 338 return (ENODEV); 339 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 340 if (!strcmp(vfsp->vfc_name, fstypename)) 341 break; 342 if (vfsp == NULL) 343 return (ENODEV); 344 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 345 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); 346 (void)vfs_busy(mp, LK_NOWAIT, 0, td); 347 TAILQ_INIT(&mp->mnt_nvnodelist); 348 TAILQ_INIT(&mp->mnt_reservedvnlist); 349 mp->mnt_vfc = vfsp; 350 mp->mnt_op = vfsp->vfc_vfsops; 351 mp->mnt_flag = MNT_RDONLY; 352 mp->mnt_vnodecovered = NULLVP; 353 vfsp->vfc_refcount++; 354 mp->mnt_iosize_max = DFLTPHYS; 355 mp->mnt_stat.f_type = vfsp->vfc_typenum; 356 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 357 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 358 mp->mnt_stat.f_mntonname[0] = '/'; 359 mp->mnt_stat.f_mntonname[1] = 0; 360 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 361 *mpp = mp; 362 return (0); 363} 364 365/* 366 * Find an appropriate filesystem to use for the root. If a filesystem 367 * has not been preselected, walk through the list of known filesystems 368 * trying those that have mountroot routines, and try them until one 369 * works or we have tried them all. 370 */ 371#ifdef notdef /* XXX JH */ 372int 373lite2_vfs_mountroot() 374{ 375 struct vfsconf *vfsp; 376 extern int (*lite2_mountroot)(void); 377 int error; 378 379 if (lite2_mountroot != NULL) 380 return ((*lite2_mountroot)()); 381 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 382 if (vfsp->vfc_mountroot == NULL) 383 continue; 384 if ((error = (*vfsp->vfc_mountroot)()) == 0) 385 return (0); 386 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 387 } 388 return (ENODEV); 389} 390#endif 391 392/* 393 * Lookup a mount point by filesystem identifier. 394 */ 395struct mount * 396vfs_getvfs(fsid) 397 fsid_t *fsid; 398{ 399 register struct mount *mp; 400 401 mtx_lock(&mountlist_mtx); 402 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 403 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 404 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 405 mtx_unlock(&mountlist_mtx); 406 return (mp); 407 } 408 } 409 mtx_unlock(&mountlist_mtx); 410 return ((struct mount *) 0); 411} 412 413/* 414 * Get a new unique fsid. Try to make its val[0] unique, since this value 415 * will be used to create fake device numbers for stat(). Also try (but 416 * not so hard) make its val[0] unique mod 2^16, since some emulators only 417 * support 16-bit device numbers. We end up with unique val[0]'s for the 418 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 419 * 420 * Keep in mind that several mounts may be running in parallel. Starting 421 * the search one past where the previous search terminated is both a 422 * micro-optimization and a defense against returning the same fsid to 423 * different mounts. 424 */ 425void 426vfs_getnewfsid(mp) 427 struct mount *mp; 428{ 429 static u_int16_t mntid_base; 430 fsid_t tfsid; 431 int mtype; 432 433 mtx_lock(&mntid_mtx); 434 mtype = mp->mnt_vfc->vfc_typenum; 435 tfsid.val[1] = mtype; 436 mtype = (mtype & 0xFF) << 24; 437 for (;;) { 438 tfsid.val[0] = makeudev(255, 439 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 440 mntid_base++; 441 if (vfs_getvfs(&tfsid) == NULL) 442 break; 443 } 444 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 445 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 446 mtx_unlock(&mntid_mtx); 447} 448 449/* 450 * Knob to control the precision of file timestamps: 451 * 452 * 0 = seconds only; nanoseconds zeroed. 453 * 1 = seconds and nanoseconds, accurate within 1/HZ. 454 * 2 = seconds and nanoseconds, truncated to microseconds. 455 * >=3 = seconds and nanoseconds, maximum precision. 456 */ 457enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 458 459static int timestamp_precision = TSP_SEC; 460SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 461 ×tamp_precision, 0, ""); 462 463/* 464 * Get a current timestamp. 465 */ 466void 467vfs_timestamp(tsp) 468 struct timespec *tsp; 469{ 470 struct timeval tv; 471 472 switch (timestamp_precision) { 473 case TSP_SEC: 474 tsp->tv_sec = time_second; 475 tsp->tv_nsec = 0; 476 break; 477 case TSP_HZ: 478 getnanotime(tsp); 479 break; 480 case TSP_USEC: 481 microtime(&tv); 482 TIMEVAL_TO_TIMESPEC(&tv, tsp); 483 break; 484 case TSP_NSEC: 485 default: 486 nanotime(tsp); 487 break; 488 } 489} 490 491/* 492 * Get a mount option by its name. 493 * 494 * Return 0 if the option was found. 495 * Return ENOENT if the option wasn't found. 496 * If len is a non-NULL pointer and *len 497 * a integer different from 0, then the size 498 * of the option will be compared with *len and 499 * if they doesn't match, EINVAL is returned. 500 * If len is non-NULL and *len == 0, it will 501 * be filled with the length of the option. 502 * Finally, if buf is non-NULL, it will be 503 * filled with the address of the option. 504 */ 505int 506vfs_getopt(opts, name, buf, len) 507 struct vfsoptlist *opts; 508 const char *name; 509 void **buf; 510 int *len; 511{ 512 struct vfsopt *opt; 513 int i; 514 515 i = 0; 516 opt = opts->opt; 517 while (i++ < opts->optcnt) { 518 if (strcmp(name, opt->name) == 0) { 519 if (len != NULL) { 520 if ((*len != 0) && (*len != opt->len)) 521 return (EINVAL); 522 *len = opt->len; 523 } 524 if (buf != NULL) 525 *buf = opt->value; 526 return (0); 527 } 528 opt++; 529 } 530 return (ENOENT); 531} 532 533/* 534 * Find and copy a mount option. 535 * The size of the buffer has to be specified 536 * in len, if it is not big enough, EINVAL is 537 * returned. Returns ENOENT if the option is 538 * not found. Otherwise, the number of bytes 539 * actually copied are put in done if it's 540 * non-NULL and 0 is returned. 541 */ 542int 543vfs_copyopt(opts, name, dest, len, done) 544 struct vfsoptlist *opts; 545 const char *name; 546 void *dest; 547 int len, *done; 548{ 549 struct vfsopt *opt; 550 int i; 551 552 i = 0; 553 opt = opts->opt; 554 while (i++ < opts->optcnt) { 555 if (strcmp(name, opt->name) == 0) { 556 if (len < opt->len) 557 return (EINVAL); 558 bcopy(dest, opt->value, opt->len); 559 if (done != NULL) 560 *done = opt->len; 561 return (0); 562 } 563 opt++; 564 } 565 return (ENOENT); 566} 567 568/* 569 * Set vnode attributes to VNOVAL 570 */ 571void 572vattr_null(vap) 573 register struct vattr *vap; 574{ 575 576 vap->va_type = VNON; 577 vap->va_size = VNOVAL; 578 vap->va_bytes = VNOVAL; 579 vap->va_mode = VNOVAL; 580 vap->va_nlink = VNOVAL; 581 vap->va_uid = VNOVAL; 582 vap->va_gid = VNOVAL; 583 vap->va_fsid = VNOVAL; 584 vap->va_fileid = VNOVAL; 585 vap->va_blocksize = VNOVAL; 586 vap->va_rdev = VNOVAL; 587 vap->va_atime.tv_sec = VNOVAL; 588 vap->va_atime.tv_nsec = VNOVAL; 589 vap->va_mtime.tv_sec = VNOVAL; 590 vap->va_mtime.tv_nsec = VNOVAL; 591 vap->va_ctime.tv_sec = VNOVAL; 592 vap->va_ctime.tv_nsec = VNOVAL; 593 vap->va_flags = VNOVAL; 594 vap->va_gen = VNOVAL; 595 vap->va_vaflags = 0; 596} 597 598/* 599 * This routine is called when we have too many vnodes. It attempts 600 * to free <count> vnodes and will potentially free vnodes that still 601 * have VM backing store (VM backing store is typically the cause 602 * of a vnode blowout so we want to do this). Therefore, this operation 603 * is not considered cheap. 604 * 605 * A number of conditions may prevent a vnode from being reclaimed. 606 * the buffer cache may have references on the vnode, a directory 607 * vnode may still have references due to the namei cache representing 608 * underlying files, or the vnode may be in active use. It is not 609 * desireable to reuse such vnodes. These conditions may cause the 610 * number of vnodes to reach some minimum value regardless of what 611 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 612 */ 613static int 614vlrureclaim(struct mount *mp, int count) 615{ 616 struct vnode *vp; 617 int done; 618 int trigger; 619 int usevnodes; 620 621 /* 622 * Calculate the trigger point, don't allow user 623 * screwups to blow us up. This prevents us from 624 * recycling vnodes with lots of resident pages. We 625 * aren't trying to free memory, we are trying to 626 * free vnodes. 627 */ 628 usevnodes = desiredvnodes; 629 if (usevnodes <= 0) 630 usevnodes = 1; 631 trigger = cnt.v_page_count * 2 / usevnodes; 632 633 done = 0; 634 mtx_lock(&mntvnode_mtx); 635 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 636 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 637 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 638 639 if (vp->v_type != VNON && 640 vp->v_type != VBAD && 641 VMIGHTFREE(vp) && /* critical path opt */ 642 (vp->v_object == NULL || vp->v_object->resident_page_count < trigger) && 643 mtx_trylock(&vp->v_interlock) 644 ) { 645 mtx_unlock(&mntvnode_mtx); 646 if (VMIGHTFREE(vp)) { 647 vgonel(vp, curthread); 648 done++; 649 } else { 650 mtx_unlock(&vp->v_interlock); 651 } 652 mtx_lock(&mntvnode_mtx); 653 } 654 --count; 655 } 656 mtx_unlock(&mntvnode_mtx); 657 return done; 658} 659 660/* 661 * Attempt to recycle vnodes in a context that is always safe to block. 662 * Calling vlrurecycle() from the bowels of file system code has some 663 * interesting deadlock problems. 664 */ 665static struct proc *vnlruproc; 666static int vnlruproc_sig; 667 668static void 669vnlru_proc(void) 670{ 671 struct mount *mp, *nmp; 672 int s; 673 int done; 674 struct proc *p = vnlruproc; 675 struct thread *td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 676 677 mtx_lock(&Giant); 678 679 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 680 SHUTDOWN_PRI_FIRST); 681 682 s = splbio(); 683 for (;;) { 684 kthread_suspend_check(p); 685 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 686 vnlruproc_sig = 0; 687 tsleep(vnlruproc, PVFS, "vlruwt", 0); 688 continue; 689 } 690 done = 0; 691 mtx_lock(&mountlist_mtx); 692 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 693 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 694 nmp = TAILQ_NEXT(mp, mnt_list); 695 continue; 696 } 697 done += vlrureclaim(mp, 10); 698 mtx_lock(&mountlist_mtx); 699 nmp = TAILQ_NEXT(mp, mnt_list); 700 vfs_unbusy(mp, td); 701 } 702 mtx_unlock(&mountlist_mtx); 703 if (done == 0) { 704#if 0 705 /* These messages are temporary debugging aids */ 706 if (vnlru_nowhere < 5) 707 printf("vnlru process getting nowhere..\n"); 708 else if (vnlru_nowhere == 5) 709 printf("vnlru process messages stopped.\n"); 710#endif 711 vnlru_nowhere++; 712 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 713 } 714 } 715 splx(s); 716} 717 718static struct kproc_desc vnlru_kp = { 719 "vnlru", 720 vnlru_proc, 721 &vnlruproc 722}; 723SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 724 725 726/* 727 * Routines having to do with the management of the vnode table. 728 */ 729 730/* 731 * Return the next vnode from the free list. 732 */ 733int 734getnewvnode(tag, mp, vops, vpp) 735 enum vtagtype tag; 736 struct mount *mp; 737 vop_t **vops; 738 struct vnode **vpp; 739{ 740 int s; 741 struct thread *td = curthread; /* XXX */ 742 struct vnode *vp = NULL; 743 struct mount *vnmp; 744 vm_object_t object; 745 746 s = splbio(); 747 /* 748 * Try to reuse vnodes if we hit the max. This situation only 749 * occurs in certain large-memory (2G+) situations. We cannot 750 * attempt to directly reclaim vnodes due to nasty recursion 751 * problems. 752 */ 753 if (vnlruproc_sig == 0 && numvnodes - freevnodes > desiredvnodes) { 754 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 755 wakeup(vnlruproc); 756 } 757 758 /* 759 * Attempt to reuse a vnode already on the free list, allocating 760 * a new vnode if we can't find one or if we have not reached a 761 * good minimum for good LRU performance. 762 */ 763 764 mtx_lock(&vnode_free_list_mtx); 765 766 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) { 767 int count; 768 769 for (count = 0; count < freevnodes; count++) { 770 vp = TAILQ_FIRST(&vnode_free_list); 771 if (vp == NULL || vp->v_usecount) 772 panic("getnewvnode: free vnode isn't"); 773 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 774 775 /* Don't recycle if we can't get the interlock */ 776 if (!mtx_trylock(&vp->v_interlock)) { 777 vp = NULL; 778 continue; 779 } 780 781 /* We should be able to immediately acquire this */ 782 if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) 783 continue; 784 /* 785 * Don't recycle if we still have cached pages. 786 */ 787 if (VOP_GETVOBJECT(vp, &object) == 0 && 788 (object->resident_page_count || 789 object->ref_count)) { 790 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 791 v_freelist); 792 vp = NULL; 793 VOP_UNLOCK(vp, 0, td); 794 continue; 795 } 796 if (LIST_FIRST(&vp->v_cache_src)) { 797 /* 798 * note: nameileafonly sysctl is temporary, 799 * for debugging only, and will eventually be 800 * removed. 801 */ 802 if (nameileafonly > 0) { 803 /* 804 * Do not reuse namei-cached directory 805 * vnodes that have cached 806 * subdirectories. 807 */ 808 if (cache_leaf_test(vp) < 0) { 809 VOP_UNLOCK(vp, 0, td); 810 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 811 vp = NULL; 812 continue; 813 } 814 } else if (nameileafonly < 0 || 815 vmiodirenable == 0) { 816 /* 817 * Do not reuse namei-cached directory 818 * vnodes if nameileafonly is -1 or 819 * if VMIO backing for directories is 820 * turned off (otherwise we reuse them 821 * too quickly). 822 */ 823 VOP_UNLOCK(vp, 0, td); 824 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 825 vp = NULL; 826 continue; 827 } 828 } 829 /* 830 * Skip over it if its filesystem is being suspended. 831 */ 832 if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0) 833 break; 834 VOP_UNLOCK(vp, 0, td); 835 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 836 vp = NULL; 837 } 838 } 839 if (vp) { 840 vp->v_flag |= VDOOMED; 841 vp->v_flag &= ~VFREE; 842 freevnodes--; 843 mtx_unlock(&vnode_free_list_mtx); 844 cache_purge(vp); 845 if (vp->v_type != VBAD) { 846 VOP_UNLOCK(vp, 0, td); 847 vgone(vp); 848 } else { 849 VOP_UNLOCK(vp, 0, td); 850 } 851 vn_finished_write(vnmp); 852 853#ifdef INVARIANTS 854 { 855 int s; 856 857 if (vp->v_data) 858 panic("cleaned vnode isn't"); 859 s = splbio(); 860 if (vp->v_numoutput) 861 panic("Clean vnode has pending I/O's"); 862 splx(s); 863 if (vp->v_writecount != 0) 864 panic("Non-zero write count"); 865 } 866#endif 867 if (vp->v_pollinfo) { 868 mtx_destroy(&vp->v_pollinfo->vpi_lock); 869 uma_zfree(vnodepoll_zone, vp->v_pollinfo); 870 } 871 vp->v_pollinfo = NULL; 872 vp->v_flag = 0; 873 vp->v_lastw = 0; 874 vp->v_lasta = 0; 875 vp->v_cstart = 0; 876 vp->v_clen = 0; 877 vp->v_socket = 0; 878 } else { 879 mtx_unlock(&vnode_free_list_mtx); 880 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 881 bzero((char *) vp, sizeof *vp); 882 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 883 vp->v_dd = vp; 884 cache_purge(vp); 885 LIST_INIT(&vp->v_cache_src); 886 TAILQ_INIT(&vp->v_cache_dst); 887 numvnodes++; 888 } 889 890 TAILQ_INIT(&vp->v_cleanblkhd); 891 TAILQ_INIT(&vp->v_dirtyblkhd); 892 vp->v_type = VNON; 893 vp->v_tag = tag; 894 vp->v_op = vops; 895 lockinit(&vp->v_lock, PVFS, "vnlock", VLKTIMEOUT, LK_NOPAUSE); 896 insmntque(vp, mp); 897 *vpp = vp; 898 vp->v_usecount = 1; 899 vp->v_data = 0; 900 901 splx(s); 902 903#if 0 904 vnodeallocs++; 905 if (vnodeallocs % vnoderecycleperiod == 0 && 906 freevnodes < vnoderecycleminfreevn && 907 vnoderecyclemintotalvn < numvnodes) { 908 /* Recycle vnodes. */ 909 cache_purgeleafdirs(vnoderecyclenumber); 910 } 911#endif 912 913 return (0); 914} 915 916/* 917 * Move a vnode from one mount queue to another. 918 */ 919static void 920insmntque(vp, mp) 921 register struct vnode *vp; 922 register struct mount *mp; 923{ 924 925 mtx_lock(&mntvnode_mtx); 926 /* 927 * Delete from old mount point vnode list, if on one. 928 */ 929 if (vp->v_mount != NULL) 930 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 931 /* 932 * Insert into list of vnodes for the new mount point, if available. 933 */ 934 if ((vp->v_mount = mp) == NULL) { 935 mtx_unlock(&mntvnode_mtx); 936 return; 937 } 938 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 939 mtx_unlock(&mntvnode_mtx); 940} 941 942/* 943 * Update outstanding I/O count and do wakeup if requested. 944 */ 945void 946vwakeup(bp) 947 register struct buf *bp; 948{ 949 register struct vnode *vp; 950 951 bp->b_flags &= ~B_WRITEINPROG; 952 if ((vp = bp->b_vp)) { 953 vp->v_numoutput--; 954 if (vp->v_numoutput < 0) 955 panic("vwakeup: neg numoutput"); 956 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 957 vp->v_flag &= ~VBWAIT; 958 wakeup((caddr_t) &vp->v_numoutput); 959 } 960 } 961} 962 963/* 964 * Flush out and invalidate all buffers associated with a vnode. 965 * Called with the underlying object locked. 966 */ 967int 968vinvalbuf(vp, flags, cred, td, slpflag, slptimeo) 969 register struct vnode *vp; 970 int flags; 971 struct ucred *cred; 972 struct thread *td; 973 int slpflag, slptimeo; 974{ 975 register struct buf *bp; 976 struct buf *nbp, *blist; 977 int s, error; 978 vm_object_t object; 979 980 GIANT_REQUIRED; 981 982 if (flags & V_SAVE) { 983 s = splbio(); 984 while (vp->v_numoutput) { 985 vp->v_flag |= VBWAIT; 986 error = tsleep((caddr_t)&vp->v_numoutput, 987 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 988 if (error) { 989 splx(s); 990 return (error); 991 } 992 } 993 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 994 splx(s); 995 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0) 996 return (error); 997 s = splbio(); 998 if (vp->v_numoutput > 0 || 999 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 1000 panic("vinvalbuf: dirty bufs"); 1001 } 1002 splx(s); 1003 } 1004 s = splbio(); 1005 for (;;) { 1006 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 1007 if (!blist) 1008 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 1009 if (!blist) 1010 break; 1011 1012 for (bp = blist; bp; bp = nbp) { 1013 nbp = TAILQ_NEXT(bp, b_vnbufs); 1014 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1015 error = BUF_TIMELOCK(bp, 1016 LK_EXCLUSIVE | LK_SLEEPFAIL, 1017 "vinvalbuf", slpflag, slptimeo); 1018 if (error == ENOLCK) 1019 break; 1020 splx(s); 1021 return (error); 1022 } 1023 /* 1024 * XXX Since there are no node locks for NFS, I 1025 * believe there is a slight chance that a delayed 1026 * write will occur while sleeping just above, so 1027 * check for it. Note that vfs_bio_awrite expects 1028 * buffers to reside on a queue, while BUF_WRITE and 1029 * brelse do not. 1030 */ 1031 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1032 (flags & V_SAVE)) { 1033 1034 if (bp->b_vp == vp) { 1035 if (bp->b_flags & B_CLUSTEROK) { 1036 BUF_UNLOCK(bp); 1037 vfs_bio_awrite(bp); 1038 } else { 1039 bremfree(bp); 1040 bp->b_flags |= B_ASYNC; 1041 BUF_WRITE(bp); 1042 } 1043 } else { 1044 bremfree(bp); 1045 (void) BUF_WRITE(bp); 1046 } 1047 break; 1048 } 1049 bremfree(bp); 1050 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 1051 bp->b_flags &= ~B_ASYNC; 1052 brelse(bp); 1053 } 1054 } 1055 1056 /* 1057 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1058 * have write I/O in-progress but if there is a VM object then the 1059 * VM object can also have read-I/O in-progress. 1060 */ 1061 do { 1062 while (vp->v_numoutput > 0) { 1063 vp->v_flag |= VBWAIT; 1064 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 1065 } 1066 if (VOP_GETVOBJECT(vp, &object) == 0) { 1067 while (object->paging_in_progress) 1068 vm_object_pip_sleep(object, "vnvlbx"); 1069 } 1070 } while (vp->v_numoutput > 0); 1071 1072 splx(s); 1073 1074 /* 1075 * Destroy the copy in the VM cache, too. 1076 */ 1077 mtx_lock(&vp->v_interlock); 1078 if (VOP_GETVOBJECT(vp, &object) == 0) { 1079 vm_object_page_remove(object, 0, 0, 1080 (flags & V_SAVE) ? TRUE : FALSE); 1081 } 1082 mtx_unlock(&vp->v_interlock); 1083 1084 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 1085 panic("vinvalbuf: flush failed"); 1086 return (0); 1087} 1088 1089/* 1090 * Truncate a file's buffer and pages to a specified length. This 1091 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1092 * sync activity. 1093 */ 1094int 1095vtruncbuf(vp, cred, td, length, blksize) 1096 register struct vnode *vp; 1097 struct ucred *cred; 1098 struct thread *td; 1099 off_t length; 1100 int blksize; 1101{ 1102 register struct buf *bp; 1103 struct buf *nbp; 1104 int s, anyfreed; 1105 int trunclbn; 1106 1107 /* 1108 * Round up to the *next* lbn. 1109 */ 1110 trunclbn = (length + blksize - 1) / blksize; 1111 1112 s = splbio(); 1113restart: 1114 anyfreed = 1; 1115 for (;anyfreed;) { 1116 anyfreed = 0; 1117 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 1118 nbp = TAILQ_NEXT(bp, b_vnbufs); 1119 if (bp->b_lblkno >= trunclbn) { 1120 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1121 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1122 goto restart; 1123 } else { 1124 bremfree(bp); 1125 bp->b_flags |= (B_INVAL | B_RELBUF); 1126 bp->b_flags &= ~B_ASYNC; 1127 brelse(bp); 1128 anyfreed = 1; 1129 } 1130 if (nbp && 1131 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1132 (nbp->b_vp != vp) || 1133 (nbp->b_flags & B_DELWRI))) { 1134 goto restart; 1135 } 1136 } 1137 } 1138 1139 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1140 nbp = TAILQ_NEXT(bp, b_vnbufs); 1141 if (bp->b_lblkno >= trunclbn) { 1142 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1143 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1144 goto restart; 1145 } else { 1146 bremfree(bp); 1147 bp->b_flags |= (B_INVAL | B_RELBUF); 1148 bp->b_flags &= ~B_ASYNC; 1149 brelse(bp); 1150 anyfreed = 1; 1151 } 1152 if (nbp && 1153 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1154 (nbp->b_vp != vp) || 1155 (nbp->b_flags & B_DELWRI) == 0)) { 1156 goto restart; 1157 } 1158 } 1159 } 1160 } 1161 1162 if (length > 0) { 1163restartsync: 1164 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1165 nbp = TAILQ_NEXT(bp, b_vnbufs); 1166 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 1167 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1168 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1169 goto restart; 1170 } else { 1171 bremfree(bp); 1172 if (bp->b_vp == vp) { 1173 bp->b_flags |= B_ASYNC; 1174 } else { 1175 bp->b_flags &= ~B_ASYNC; 1176 } 1177 BUF_WRITE(bp); 1178 } 1179 goto restartsync; 1180 } 1181 1182 } 1183 } 1184 1185 while (vp->v_numoutput > 0) { 1186 vp->v_flag |= VBWAIT; 1187 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 1188 } 1189 1190 splx(s); 1191 1192 vnode_pager_setsize(vp, length); 1193 1194 return (0); 1195} 1196 1197/* 1198 * Associate a buffer with a vnode. 1199 */ 1200void 1201bgetvp(vp, bp) 1202 register struct vnode *vp; 1203 register struct buf *bp; 1204{ 1205 int s; 1206 1207 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 1208 1209 vhold(vp); 1210 bp->b_vp = vp; 1211 bp->b_dev = vn_todev(vp); 1212 /* 1213 * Insert onto list for new vnode. 1214 */ 1215 s = splbio(); 1216 bp->b_xflags |= BX_VNCLEAN; 1217 bp->b_xflags &= ~BX_VNDIRTY; 1218 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 1219 splx(s); 1220} 1221 1222/* 1223 * Disassociate a buffer from a vnode. 1224 */ 1225void 1226brelvp(bp) 1227 register struct buf *bp; 1228{ 1229 struct vnode *vp; 1230 struct buflists *listheadp; 1231 int s; 1232 1233 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1234 1235 /* 1236 * Delete from old vnode list, if on one. 1237 */ 1238 vp = bp->b_vp; 1239 s = splbio(); 1240 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1241 if (bp->b_xflags & BX_VNDIRTY) 1242 listheadp = &vp->v_dirtyblkhd; 1243 else 1244 listheadp = &vp->v_cleanblkhd; 1245 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1246 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1247 } 1248 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 1249 vp->v_flag &= ~VONWORKLST; 1250 LIST_REMOVE(vp, v_synclist); 1251 } 1252 splx(s); 1253 bp->b_vp = (struct vnode *) 0; 1254 vdrop(vp); 1255} 1256 1257/* 1258 * Add an item to the syncer work queue. 1259 */ 1260static void 1261vn_syncer_add_to_worklist(struct vnode *vp, int delay) 1262{ 1263 int s, slot; 1264 1265 s = splbio(); 1266 1267 if (vp->v_flag & VONWORKLST) { 1268 LIST_REMOVE(vp, v_synclist); 1269 } 1270 1271 if (delay > syncer_maxdelay - 2) 1272 delay = syncer_maxdelay - 2; 1273 slot = (syncer_delayno + delay) & syncer_mask; 1274 1275 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 1276 vp->v_flag |= VONWORKLST; 1277 splx(s); 1278} 1279 1280struct proc *updateproc; 1281static void sched_sync(void); 1282static struct kproc_desc up_kp = { 1283 "syncer", 1284 sched_sync, 1285 &updateproc 1286}; 1287SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1288 1289/* 1290 * System filesystem synchronizer daemon. 1291 */ 1292void 1293sched_sync(void) 1294{ 1295 struct synclist *slp; 1296 struct vnode *vp; 1297 struct mount *mp; 1298 long starttime; 1299 int s; 1300 struct thread *td = FIRST_THREAD_IN_PROC(updateproc); /* XXXKSE */ 1301 1302 mtx_lock(&Giant); 1303 1304 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc, 1305 SHUTDOWN_PRI_LAST); 1306 1307 for (;;) { 1308 kthread_suspend_check(td->td_proc); 1309 1310 starttime = time_second; 1311 1312 /* 1313 * Push files whose dirty time has expired. Be careful 1314 * of interrupt race on slp queue. 1315 */ 1316 s = splbio(); 1317 slp = &syncer_workitem_pending[syncer_delayno]; 1318 syncer_delayno += 1; 1319 if (syncer_delayno == syncer_maxdelay) 1320 syncer_delayno = 0; 1321 splx(s); 1322 1323 while ((vp = LIST_FIRST(slp)) != NULL) { 1324 if (VOP_ISLOCKED(vp, NULL) == 0 && 1325 vn_start_write(vp, &mp, V_NOWAIT) == 0) { 1326 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1327 (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td); 1328 VOP_UNLOCK(vp, 0, td); 1329 vn_finished_write(mp); 1330 } 1331 s = splbio(); 1332 if (LIST_FIRST(slp) == vp) { 1333 /* 1334 * Note: v_tag VT_VFS vps can remain on the 1335 * worklist too with no dirty blocks, but 1336 * since sync_fsync() moves it to a different 1337 * slot we are safe. 1338 */ 1339 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1340 !vn_isdisk(vp, NULL)) 1341 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1342 /* 1343 * Put us back on the worklist. The worklist 1344 * routine will remove us from our current 1345 * position and then add us back in at a later 1346 * position. 1347 */ 1348 vn_syncer_add_to_worklist(vp, syncdelay); 1349 } 1350 splx(s); 1351 } 1352 1353 /* 1354 * Do soft update processing. 1355 */ 1356#ifdef SOFTUPDATES 1357 softdep_process_worklist(NULL); 1358#endif 1359 1360 /* 1361 * The variable rushjob allows the kernel to speed up the 1362 * processing of the filesystem syncer process. A rushjob 1363 * value of N tells the filesystem syncer to process the next 1364 * N seconds worth of work on its queue ASAP. Currently rushjob 1365 * is used by the soft update code to speed up the filesystem 1366 * syncer process when the incore state is getting so far 1367 * ahead of the disk that the kernel memory pool is being 1368 * threatened with exhaustion. 1369 */ 1370 if (rushjob > 0) { 1371 rushjob -= 1; 1372 continue; 1373 } 1374 /* 1375 * If it has taken us less than a second to process the 1376 * current work, then wait. Otherwise start right over 1377 * again. We can still lose time if any single round 1378 * takes more than two seconds, but it does not really 1379 * matter as we are just trying to generally pace the 1380 * filesystem activity. 1381 */ 1382 if (time_second == starttime) 1383 tsleep(&lbolt, PPAUSE, "syncer", 0); 1384 } 1385} 1386 1387/* 1388 * Request the syncer daemon to speed up its work. 1389 * We never push it to speed up more than half of its 1390 * normal turn time, otherwise it could take over the cpu. 1391 * XXXKSE only one update? 1392 */ 1393int 1394speedup_syncer() 1395{ 1396 1397 mtx_lock_spin(&sched_lock); 1398 if (FIRST_THREAD_IN_PROC(updateproc)->td_wchan == &lbolt) /* XXXKSE */ 1399 setrunnable(FIRST_THREAD_IN_PROC(updateproc)); 1400 mtx_unlock_spin(&sched_lock); 1401 if (rushjob < syncdelay / 2) { 1402 rushjob += 1; 1403 stat_rush_requests += 1; 1404 return (1); 1405 } 1406 return(0); 1407} 1408 1409/* 1410 * Associate a p-buffer with a vnode. 1411 * 1412 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1413 * with the buffer. i.e. the bp has not been linked into the vnode or 1414 * ref-counted. 1415 */ 1416void 1417pbgetvp(vp, bp) 1418 register struct vnode *vp; 1419 register struct buf *bp; 1420{ 1421 1422 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1423 1424 bp->b_vp = vp; 1425 bp->b_flags |= B_PAGING; 1426 bp->b_dev = vn_todev(vp); 1427} 1428 1429/* 1430 * Disassociate a p-buffer from a vnode. 1431 */ 1432void 1433pbrelvp(bp) 1434 register struct buf *bp; 1435{ 1436 1437 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1438 1439 /* XXX REMOVE ME */ 1440 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1441 panic( 1442 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1443 bp, 1444 (int)bp->b_flags 1445 ); 1446 } 1447 bp->b_vp = (struct vnode *) 0; 1448 bp->b_flags &= ~B_PAGING; 1449} 1450 1451/* 1452 * Reassign a buffer from one vnode to another. 1453 * Used to assign file specific control information 1454 * (indirect blocks) to the vnode to which they belong. 1455 */ 1456void 1457reassignbuf(bp, newvp) 1458 register struct buf *bp; 1459 register struct vnode *newvp; 1460{ 1461 struct buflists *listheadp; 1462 int delay; 1463 int s; 1464 1465 if (newvp == NULL) { 1466 printf("reassignbuf: NULL"); 1467 return; 1468 } 1469 ++reassignbufcalls; 1470 1471 /* 1472 * B_PAGING flagged buffers cannot be reassigned because their vp 1473 * is not fully linked in. 1474 */ 1475 if (bp->b_flags & B_PAGING) 1476 panic("cannot reassign paging buffer"); 1477 1478 s = splbio(); 1479 /* 1480 * Delete from old vnode list, if on one. 1481 */ 1482 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1483 if (bp->b_xflags & BX_VNDIRTY) 1484 listheadp = &bp->b_vp->v_dirtyblkhd; 1485 else 1486 listheadp = &bp->b_vp->v_cleanblkhd; 1487 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1488 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1489 if (bp->b_vp != newvp) { 1490 vdrop(bp->b_vp); 1491 bp->b_vp = NULL; /* for clarification */ 1492 } 1493 } 1494 /* 1495 * If dirty, put on list of dirty buffers; otherwise insert onto list 1496 * of clean buffers. 1497 */ 1498 if (bp->b_flags & B_DELWRI) { 1499 struct buf *tbp; 1500 1501 listheadp = &newvp->v_dirtyblkhd; 1502 if ((newvp->v_flag & VONWORKLST) == 0) { 1503 switch (newvp->v_type) { 1504 case VDIR: 1505 delay = dirdelay; 1506 break; 1507 case VCHR: 1508 if (newvp->v_rdev->si_mountpoint != NULL) { 1509 delay = metadelay; 1510 break; 1511 } 1512 /* fall through */ 1513 default: 1514 delay = filedelay; 1515 } 1516 vn_syncer_add_to_worklist(newvp, delay); 1517 } 1518 bp->b_xflags |= BX_VNDIRTY; 1519 tbp = TAILQ_FIRST(listheadp); 1520 if (tbp == NULL || 1521 bp->b_lblkno == 0 || 1522 (bp->b_lblkno > 0 && tbp->b_lblkno < 0) || 1523 (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) { 1524 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1525 ++reassignbufsortgood; 1526 } else if (bp->b_lblkno < 0) { 1527 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1528 ++reassignbufsortgood; 1529 } else if (reassignbufmethod == 1) { 1530 /* 1531 * New sorting algorithm, only handle sequential case, 1532 * otherwise append to end (but before metadata) 1533 */ 1534 if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL && 1535 (tbp->b_xflags & BX_VNDIRTY)) { 1536 /* 1537 * Found the best place to insert the buffer 1538 */ 1539 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1540 ++reassignbufsortgood; 1541 } else { 1542 /* 1543 * Missed, append to end, but before meta-data. 1544 * We know that the head buffer in the list is 1545 * not meta-data due to prior conditionals. 1546 * 1547 * Indirect effects: NFS second stage write 1548 * tends to wind up here, giving maximum 1549 * distance between the unstable write and the 1550 * commit rpc. 1551 */ 1552 tbp = TAILQ_LAST(listheadp, buflists); 1553 while (tbp && tbp->b_lblkno < 0) 1554 tbp = TAILQ_PREV(tbp, buflists, b_vnbufs); 1555 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1556 ++reassignbufsortbad; 1557 } 1558 } else { 1559 /* 1560 * Old sorting algorithm, scan queue and insert 1561 */ 1562 struct buf *ttbp; 1563 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1564 (ttbp->b_lblkno < bp->b_lblkno)) { 1565 ++reassignbufloops; 1566 tbp = ttbp; 1567 } 1568 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1569 } 1570 } else { 1571 bp->b_xflags |= BX_VNCLEAN; 1572 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1573 if ((newvp->v_flag & VONWORKLST) && 1574 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1575 newvp->v_flag &= ~VONWORKLST; 1576 LIST_REMOVE(newvp, v_synclist); 1577 } 1578 } 1579 if (bp->b_vp != newvp) { 1580 bp->b_vp = newvp; 1581 vhold(bp->b_vp); 1582 } 1583 splx(s); 1584} 1585 1586/* 1587 * Create a vnode for a device. 1588 * Used for mounting the root file system. 1589 */ 1590int 1591bdevvp(dev, vpp) 1592 dev_t dev; 1593 struct vnode **vpp; 1594{ 1595 register struct vnode *vp; 1596 struct vnode *nvp; 1597 int error; 1598 1599 if (dev == NODEV) { 1600 *vpp = NULLVP; 1601 return (ENXIO); 1602 } 1603 if (vfinddev(dev, VCHR, vpp)) 1604 return (0); 1605 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1606 if (error) { 1607 *vpp = NULLVP; 1608 return (error); 1609 } 1610 vp = nvp; 1611 vp->v_type = VCHR; 1612 addalias(vp, dev); 1613 *vpp = vp; 1614 return (0); 1615} 1616 1617/* 1618 * Add vnode to the alias list hung off the dev_t. 1619 * 1620 * The reason for this gunk is that multiple vnodes can reference 1621 * the same physical device, so checking vp->v_usecount to see 1622 * how many users there are is inadequate; the v_usecount for 1623 * the vnodes need to be accumulated. vcount() does that. 1624 */ 1625struct vnode * 1626addaliasu(nvp, nvp_rdev) 1627 struct vnode *nvp; 1628 udev_t nvp_rdev; 1629{ 1630 struct vnode *ovp; 1631 vop_t **ops; 1632 dev_t dev; 1633 1634 if (nvp->v_type == VBLK) 1635 return (nvp); 1636 if (nvp->v_type != VCHR) 1637 panic("addaliasu on non-special vnode"); 1638 dev = udev2dev(nvp_rdev, 0); 1639 /* 1640 * Check to see if we have a bdevvp vnode with no associated 1641 * filesystem. If so, we want to associate the filesystem of 1642 * the new newly instigated vnode with the bdevvp vnode and 1643 * discard the newly created vnode rather than leaving the 1644 * bdevvp vnode lying around with no associated filesystem. 1645 */ 1646 if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) { 1647 addalias(nvp, dev); 1648 return (nvp); 1649 } 1650 /* 1651 * Discard unneeded vnode, but save its node specific data. 1652 * Note that if there is a lock, it is carried over in the 1653 * node specific data to the replacement vnode. 1654 */ 1655 vref(ovp); 1656 ovp->v_data = nvp->v_data; 1657 ovp->v_tag = nvp->v_tag; 1658 nvp->v_data = NULL; 1659 lockinit(&ovp->v_lock, PVFS, nvp->v_lock.lk_wmesg, 1660 nvp->v_lock.lk_timo, nvp->v_lock.lk_flags & LK_EXTFLG_MASK); 1661 if (nvp->v_vnlock) 1662 ovp->v_vnlock = &ovp->v_lock; 1663 ops = ovp->v_op; 1664 ovp->v_op = nvp->v_op; 1665 if (VOP_ISLOCKED(nvp, curthread)) { 1666 VOP_UNLOCK(nvp, 0, curthread); 1667 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread); 1668 } 1669 nvp->v_op = ops; 1670 insmntque(ovp, nvp->v_mount); 1671 vrele(nvp); 1672 vgone(nvp); 1673 return (ovp); 1674} 1675 1676/* This is a local helper function that do the same as addaliasu, but for a 1677 * dev_t instead of an udev_t. */ 1678static void 1679addalias(nvp, dev) 1680 struct vnode *nvp; 1681 dev_t dev; 1682{ 1683 1684 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode")); 1685 nvp->v_rdev = dev; 1686 mtx_lock(&spechash_mtx); 1687 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 1688 mtx_unlock(&spechash_mtx); 1689} 1690 1691/* 1692 * Grab a particular vnode from the free list, increment its 1693 * reference count and lock it. The vnode lock bit is set if the 1694 * vnode is being eliminated in vgone. The process is awakened 1695 * when the transition is completed, and an error returned to 1696 * indicate that the vnode is no longer usable (possibly having 1697 * been changed to a new file system type). 1698 */ 1699int 1700vget(vp, flags, td) 1701 register struct vnode *vp; 1702 int flags; 1703 struct thread *td; 1704{ 1705 int error; 1706 1707 /* 1708 * If the vnode is in the process of being cleaned out for 1709 * another use, we wait for the cleaning to finish and then 1710 * return failure. Cleaning is determined by checking that 1711 * the VXLOCK flag is set. 1712 */ 1713 if ((flags & LK_INTERLOCK) == 0) 1714 mtx_lock(&vp->v_interlock); 1715 if (vp->v_flag & VXLOCK) { 1716 if (vp->v_vxproc == curthread) { 1717#if 0 1718 /* this can now occur in normal operation */ 1719 log(LOG_INFO, "VXLOCK interlock avoided\n"); 1720#endif 1721 } else { 1722 vp->v_flag |= VXWANT; 1723 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 1724 "vget", 0); 1725 return (ENOENT); 1726 } 1727 } 1728 1729 vp->v_usecount++; 1730 1731 if (VSHOULDBUSY(vp)) 1732 vbusy(vp); 1733 if (flags & LK_TYPE_MASK) { 1734 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) { 1735 /* 1736 * must expand vrele here because we do not want 1737 * to call VOP_INACTIVE if the reference count 1738 * drops back to zero since it was never really 1739 * active. We must remove it from the free list 1740 * before sleeping so that multiple processes do 1741 * not try to recycle it. 1742 */ 1743 mtx_lock(&vp->v_interlock); 1744 vp->v_usecount--; 1745 if (VSHOULDFREE(vp)) 1746 vfree(vp); 1747 else 1748 vlruvp(vp); 1749 mtx_unlock(&vp->v_interlock); 1750 } 1751 return (error); 1752 } 1753 mtx_unlock(&vp->v_interlock); 1754 return (0); 1755} 1756 1757/* 1758 * Increase the reference count of a vnode. 1759 */ 1760void 1761vref(struct vnode *vp) 1762{ 1763 mtx_lock(&vp->v_interlock); 1764 vp->v_usecount++; 1765 mtx_unlock(&vp->v_interlock); 1766} 1767 1768/* 1769 * Vnode put/release. 1770 * If count drops to zero, call inactive routine and return to freelist. 1771 */ 1772void 1773vrele(vp) 1774 struct vnode *vp; 1775{ 1776 struct thread *td = curthread; /* XXX */ 1777 1778 KASSERT(vp != NULL, ("vrele: null vp")); 1779 1780 mtx_lock(&vp->v_interlock); 1781 1782 /* Skip this v_writecount check if we're going to panic below. */ 1783 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1784 ("vrele: missed vn_close")); 1785 1786 if (vp->v_usecount > 1) { 1787 1788 vp->v_usecount--; 1789 mtx_unlock(&vp->v_interlock); 1790 1791 return; 1792 } 1793 1794 if (vp->v_usecount == 1) { 1795 vp->v_usecount--; 1796 /* 1797 * We must call VOP_INACTIVE with the node locked. 1798 * If we are doing a vput, the node is already locked, 1799 * but, in the case of vrele, we must explicitly lock 1800 * the vnode before calling VOP_INACTIVE. 1801 */ 1802 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) 1803 VOP_INACTIVE(vp, td); 1804 if (VSHOULDFREE(vp)) 1805 vfree(vp); 1806 else 1807 vlruvp(vp); 1808 1809 } else { 1810#ifdef DIAGNOSTIC 1811 vprint("vrele: negative ref count", vp); 1812 mtx_unlock(&vp->v_interlock); 1813#endif 1814 panic("vrele: negative ref cnt"); 1815 } 1816} 1817 1818/* 1819 * Release an already locked vnode. This give the same effects as 1820 * unlock+vrele(), but takes less time and avoids releasing and 1821 * re-aquiring the lock (as vrele() aquires the lock internally.) 1822 */ 1823void 1824vput(vp) 1825 struct vnode *vp; 1826{ 1827 struct thread *td = curthread; /* XXX */ 1828 1829 GIANT_REQUIRED; 1830 1831 KASSERT(vp != NULL, ("vput: null vp")); 1832 mtx_lock(&vp->v_interlock); 1833 /* Skip this v_writecount check if we're going to panic below. */ 1834 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1835 ("vput: missed vn_close")); 1836 1837 if (vp->v_usecount > 1) { 1838 vp->v_usecount--; 1839 VOP_UNLOCK(vp, LK_INTERLOCK, td); 1840 return; 1841 } 1842 1843 if (vp->v_usecount == 1) { 1844 vp->v_usecount--; 1845 /* 1846 * We must call VOP_INACTIVE with the node locked. 1847 * If we are doing a vput, the node is already locked, 1848 * so we just need to release the vnode mutex. 1849 */ 1850 mtx_unlock(&vp->v_interlock); 1851 VOP_INACTIVE(vp, td); 1852 if (VSHOULDFREE(vp)) 1853 vfree(vp); 1854 else 1855 vlruvp(vp); 1856 1857 } else { 1858#ifdef DIAGNOSTIC 1859 vprint("vput: negative ref count", vp); 1860#endif 1861 panic("vput: negative ref cnt"); 1862 } 1863} 1864 1865/* 1866 * Somebody doesn't want the vnode recycled. 1867 */ 1868void 1869vhold(vp) 1870 register struct vnode *vp; 1871{ 1872 int s; 1873 1874 s = splbio(); 1875 vp->v_holdcnt++; 1876 if (VSHOULDBUSY(vp)) 1877 vbusy(vp); 1878 splx(s); 1879} 1880 1881/* 1882 * Note that there is one less who cares about this vnode. vdrop() is the 1883 * opposite of vhold(). 1884 */ 1885void 1886vdrop(vp) 1887 register struct vnode *vp; 1888{ 1889 int s; 1890 1891 s = splbio(); 1892 if (vp->v_holdcnt <= 0) 1893 panic("vdrop: holdcnt"); 1894 vp->v_holdcnt--; 1895 if (VSHOULDFREE(vp)) 1896 vfree(vp); 1897 else 1898 vlruvp(vp); 1899 splx(s); 1900} 1901 1902/* 1903 * Remove any vnodes in the vnode table belonging to mount point mp. 1904 * 1905 * If FORCECLOSE is not specified, there should not be any active ones, 1906 * return error if any are found (nb: this is a user error, not a 1907 * system error). If FORCECLOSE is specified, detach any active vnodes 1908 * that are found. 1909 * 1910 * If WRITECLOSE is set, only flush out regular file vnodes open for 1911 * writing. 1912 * 1913 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1914 * 1915 * `rootrefs' specifies the base reference count for the root vnode 1916 * of this filesystem. The root vnode is considered busy if its 1917 * v_usecount exceeds this value. On a successful return, vflush() 1918 * will call vrele() on the root vnode exactly rootrefs times. 1919 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1920 * be zero. 1921 */ 1922#ifdef DIAGNOSTIC 1923static int busyprt = 0; /* print out busy vnodes */ 1924SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1925#endif 1926 1927int 1928vflush(mp, rootrefs, flags) 1929 struct mount *mp; 1930 int rootrefs; 1931 int flags; 1932{ 1933 struct thread *td = curthread; /* XXX */ 1934 struct vnode *vp, *nvp, *rootvp = NULL; 1935 struct vattr vattr; 1936 int busy = 0, error; 1937 1938 if (rootrefs > 0) { 1939 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1940 ("vflush: bad args")); 1941 /* 1942 * Get the filesystem root vnode. We can vput() it 1943 * immediately, since with rootrefs > 0, it won't go away. 1944 */ 1945 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 1946 return (error); 1947 vput(rootvp); 1948 } 1949 mtx_lock(&mntvnode_mtx); 1950loop: 1951 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) { 1952 /* 1953 * Make sure this vnode wasn't reclaimed in getnewvnode(). 1954 * Start over if it has (it won't be on the list anymore). 1955 */ 1956 if (vp->v_mount != mp) 1957 goto loop; 1958 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 1959 1960 mtx_unlock(&mntvnode_mtx); 1961 mtx_lock(&vp->v_interlock); 1962 /* 1963 * Skip over a vnodes marked VSYSTEM. 1964 */ 1965 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1966 mtx_unlock(&vp->v_interlock); 1967 mtx_lock(&mntvnode_mtx); 1968 continue; 1969 } 1970 /* 1971 * If WRITECLOSE is set, flush out unlinked but still open 1972 * files (even if open only for reading) and regular file 1973 * vnodes open for writing. 1974 */ 1975 if ((flags & WRITECLOSE) && 1976 (vp->v_type == VNON || 1977 (VOP_GETATTR(vp, &vattr, td->td_ucred, td) == 0 && 1978 vattr.va_nlink > 0)) && 1979 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1980 mtx_unlock(&vp->v_interlock); 1981 mtx_lock(&mntvnode_mtx); 1982 continue; 1983 } 1984 1985 /* 1986 * With v_usecount == 0, all we need to do is clear out the 1987 * vnode data structures and we are done. 1988 */ 1989 if (vp->v_usecount == 0) { 1990 vgonel(vp, td); 1991 mtx_lock(&mntvnode_mtx); 1992 continue; 1993 } 1994 1995 /* 1996 * If FORCECLOSE is set, forcibly close the vnode. For block 1997 * or character devices, revert to an anonymous device. For 1998 * all other files, just kill them. 1999 */ 2000 if (flags & FORCECLOSE) { 2001 if (vp->v_type != VCHR) { 2002 vgonel(vp, td); 2003 } else { 2004 vclean(vp, 0, td); 2005 vp->v_op = spec_vnodeop_p; 2006 insmntque(vp, (struct mount *) 0); 2007 } 2008 mtx_lock(&mntvnode_mtx); 2009 continue; 2010 } 2011#ifdef DIAGNOSTIC 2012 if (busyprt) 2013 vprint("vflush: busy vnode", vp); 2014#endif 2015 mtx_unlock(&vp->v_interlock); 2016 mtx_lock(&mntvnode_mtx); 2017 busy++; 2018 } 2019 mtx_unlock(&mntvnode_mtx); 2020 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2021 /* 2022 * If just the root vnode is busy, and if its refcount 2023 * is equal to `rootrefs', then go ahead and kill it. 2024 */ 2025 mtx_lock(&rootvp->v_interlock); 2026 KASSERT(busy > 0, ("vflush: not busy")); 2027 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 2028 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2029 vgonel(rootvp, td); 2030 busy = 0; 2031 } else 2032 mtx_unlock(&rootvp->v_interlock); 2033 } 2034 if (busy) 2035 return (EBUSY); 2036 for (; rootrefs > 0; rootrefs--) 2037 vrele(rootvp); 2038 return (0); 2039} 2040 2041/* 2042 * This moves a now (likely recyclable) vnode to the end of the 2043 * mountlist. XXX However, it is temporarily disabled until we 2044 * can clean up ffs_sync() and friends, which have loop restart 2045 * conditions which this code causes to operate O(N^2). 2046 */ 2047static void 2048vlruvp(struct vnode *vp) 2049{ 2050#if 0 2051 struct mount *mp; 2052 2053 if ((mp = vp->v_mount) != NULL) { 2054 mtx_lock(&mntvnode_mtx); 2055 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2056 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2057 mtx_unlock(&mntvnode_mtx); 2058 } 2059#endif 2060} 2061 2062/* 2063 * Disassociate the underlying file system from a vnode. 2064 */ 2065static void 2066vclean(vp, flags, td) 2067 struct vnode *vp; 2068 int flags; 2069 struct thread *td; 2070{ 2071 int active; 2072 2073 /* 2074 * Check to see if the vnode is in use. If so we have to reference it 2075 * before we clean it out so that its count cannot fall to zero and 2076 * generate a race against ourselves to recycle it. 2077 */ 2078 if ((active = vp->v_usecount)) 2079 vp->v_usecount++; 2080 2081 /* 2082 * Prevent the vnode from being recycled or brought into use while we 2083 * clean it out. 2084 */ 2085 if (vp->v_flag & VXLOCK) 2086 panic("vclean: deadlock"); 2087 vp->v_flag |= VXLOCK; 2088 vp->v_vxproc = curthread; 2089 /* 2090 * Even if the count is zero, the VOP_INACTIVE routine may still 2091 * have the object locked while it cleans it out. The VOP_LOCK 2092 * ensures that the VOP_INACTIVE routine is done with its work. 2093 * For active vnodes, it ensures that no other activity can 2094 * occur while the underlying object is being cleaned out. 2095 */ 2096 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td); 2097 2098 /* 2099 * Clean out any buffers associated with the vnode. 2100 * If the flush fails, just toss the buffers. 2101 */ 2102 if (flags & DOCLOSE) { 2103 if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL) 2104 (void) vn_write_suspend_wait(vp, NULL, V_WAIT); 2105 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0) 2106 vinvalbuf(vp, 0, NOCRED, td, 0, 0); 2107 } 2108 2109 VOP_DESTROYVOBJECT(vp); 2110 2111 /* 2112 * If purging an active vnode, it must be closed and 2113 * deactivated before being reclaimed. Note that the 2114 * VOP_INACTIVE will unlock the vnode. 2115 */ 2116 if (active) { 2117 if (flags & DOCLOSE) 2118 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2119 VOP_INACTIVE(vp, td); 2120 } else { 2121 /* 2122 * Any other processes trying to obtain this lock must first 2123 * wait for VXLOCK to clear, then call the new lock operation. 2124 */ 2125 VOP_UNLOCK(vp, 0, td); 2126 } 2127 /* 2128 * Reclaim the vnode. 2129 */ 2130 if (VOP_RECLAIM(vp, td)) 2131 panic("vclean: cannot reclaim"); 2132 2133 if (active) { 2134 /* 2135 * Inline copy of vrele() since VOP_INACTIVE 2136 * has already been called. 2137 */ 2138 mtx_lock(&vp->v_interlock); 2139 if (--vp->v_usecount <= 0) { 2140#ifdef DIAGNOSTIC 2141 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 2142 vprint("vclean: bad ref count", vp); 2143 panic("vclean: ref cnt"); 2144 } 2145#endif 2146 vfree(vp); 2147 } 2148 mtx_unlock(&vp->v_interlock); 2149 } 2150 2151 cache_purge(vp); 2152 vp->v_vnlock = NULL; 2153 lockdestroy(&vp->v_lock); 2154 2155 if (VSHOULDFREE(vp)) 2156 vfree(vp); 2157 2158 /* 2159 * Done with purge, notify sleepers of the grim news. 2160 */ 2161 vp->v_op = dead_vnodeop_p; 2162 if (vp->v_pollinfo != NULL) 2163 vn_pollgone(vp); 2164 vp->v_tag = VT_NON; 2165 vp->v_flag &= ~VXLOCK; 2166 vp->v_vxproc = NULL; 2167 if (vp->v_flag & VXWANT) { 2168 vp->v_flag &= ~VXWANT; 2169 wakeup((caddr_t) vp); 2170 } 2171} 2172 2173/* 2174 * Eliminate all activity associated with the requested vnode 2175 * and with all vnodes aliased to the requested vnode. 2176 */ 2177int 2178vop_revoke(ap) 2179 struct vop_revoke_args /* { 2180 struct vnode *a_vp; 2181 int a_flags; 2182 } */ *ap; 2183{ 2184 struct vnode *vp, *vq; 2185 dev_t dev; 2186 2187 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 2188 2189 vp = ap->a_vp; 2190 /* 2191 * If a vgone (or vclean) is already in progress, 2192 * wait until it is done and return. 2193 */ 2194 if (vp->v_flag & VXLOCK) { 2195 vp->v_flag |= VXWANT; 2196 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 2197 "vop_revokeall", 0); 2198 return (0); 2199 } 2200 dev = vp->v_rdev; 2201 for (;;) { 2202 mtx_lock(&spechash_mtx); 2203 vq = SLIST_FIRST(&dev->si_hlist); 2204 mtx_unlock(&spechash_mtx); 2205 if (!vq) 2206 break; 2207 vgone(vq); 2208 } 2209 return (0); 2210} 2211 2212/* 2213 * Recycle an unused vnode to the front of the free list. 2214 * Release the passed interlock if the vnode will be recycled. 2215 */ 2216int 2217vrecycle(vp, inter_lkp, td) 2218 struct vnode *vp; 2219 struct mtx *inter_lkp; 2220 struct thread *td; 2221{ 2222 2223 mtx_lock(&vp->v_interlock); 2224 if (vp->v_usecount == 0) { 2225 if (inter_lkp) { 2226 mtx_unlock(inter_lkp); 2227 } 2228 vgonel(vp, td); 2229 return (1); 2230 } 2231 mtx_unlock(&vp->v_interlock); 2232 return (0); 2233} 2234 2235/* 2236 * Eliminate all activity associated with a vnode 2237 * in preparation for reuse. 2238 */ 2239void 2240vgone(vp) 2241 register struct vnode *vp; 2242{ 2243 struct thread *td = curthread; /* XXX */ 2244 2245 mtx_lock(&vp->v_interlock); 2246 vgonel(vp, td); 2247} 2248 2249/* 2250 * vgone, with the vp interlock held. 2251 */ 2252void 2253vgonel(vp, td) 2254 struct vnode *vp; 2255 struct thread *td; 2256{ 2257 int s; 2258 2259 /* 2260 * If a vgone (or vclean) is already in progress, 2261 * wait until it is done and return. 2262 */ 2263 if (vp->v_flag & VXLOCK) { 2264 vp->v_flag |= VXWANT; 2265 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 2266 "vgone", 0); 2267 return; 2268 } 2269 2270 /* 2271 * Clean out the filesystem specific data. 2272 */ 2273 vclean(vp, DOCLOSE, td); 2274 mtx_lock(&vp->v_interlock); 2275 2276 /* 2277 * Delete from old mount point vnode list, if on one. 2278 */ 2279 if (vp->v_mount != NULL) 2280 insmntque(vp, (struct mount *)0); 2281 /* 2282 * If special device, remove it from special device alias list 2283 * if it is on one. 2284 */ 2285 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) { 2286 mtx_lock(&spechash_mtx); 2287 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext); 2288 freedev(vp->v_rdev); 2289 mtx_unlock(&spechash_mtx); 2290 vp->v_rdev = NULL; 2291 } 2292 2293 /* 2294 * If it is on the freelist and not already at the head, 2295 * move it to the head of the list. The test of the 2296 * VDOOMED flag and the reference count of zero is because 2297 * it will be removed from the free list by getnewvnode, 2298 * but will not have its reference count incremented until 2299 * after calling vgone. If the reference count were 2300 * incremented first, vgone would (incorrectly) try to 2301 * close the previous instance of the underlying object. 2302 */ 2303 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 2304 s = splbio(); 2305 mtx_lock(&vnode_free_list_mtx); 2306 if (vp->v_flag & VFREE) 2307 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2308 else 2309 freevnodes++; 2310 vp->v_flag |= VFREE; 2311 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2312 mtx_unlock(&vnode_free_list_mtx); 2313 splx(s); 2314 } 2315 2316 vp->v_type = VBAD; 2317 mtx_unlock(&vp->v_interlock); 2318} 2319 2320/* 2321 * Lookup a vnode by device number. 2322 */ 2323int 2324vfinddev(dev, type, vpp) 2325 dev_t dev; 2326 enum vtype type; 2327 struct vnode **vpp; 2328{ 2329 struct vnode *vp; 2330 2331 mtx_lock(&spechash_mtx); 2332 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2333 if (type == vp->v_type) { 2334 *vpp = vp; 2335 mtx_unlock(&spechash_mtx); 2336 return (1); 2337 } 2338 } 2339 mtx_unlock(&spechash_mtx); 2340 return (0); 2341} 2342 2343/* 2344 * Calculate the total number of references to a special device. 2345 */ 2346int 2347vcount(vp) 2348 struct vnode *vp; 2349{ 2350 struct vnode *vq; 2351 int count; 2352 2353 count = 0; 2354 mtx_lock(&spechash_mtx); 2355 SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext) 2356 count += vq->v_usecount; 2357 mtx_unlock(&spechash_mtx); 2358 return (count); 2359} 2360 2361/* 2362 * Same as above, but using the dev_t as argument 2363 */ 2364int 2365count_dev(dev) 2366 dev_t dev; 2367{ 2368 struct vnode *vp; 2369 2370 vp = SLIST_FIRST(&dev->si_hlist); 2371 if (vp == NULL) 2372 return (0); 2373 return(vcount(vp)); 2374} 2375 2376/* 2377 * Print out a description of a vnode. 2378 */ 2379static char *typename[] = 2380{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2381 2382void 2383vprint(label, vp) 2384 char *label; 2385 struct vnode *vp; 2386{ 2387 char buf[96]; 2388 2389 if (label != NULL) 2390 printf("%s: %p: ", label, (void *)vp); 2391 else 2392 printf("%p: ", (void *)vp); 2393 printf("type %s, usecount %d, writecount %d, refcount %d,", 2394 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 2395 vp->v_holdcnt); 2396 buf[0] = '\0'; 2397 if (vp->v_flag & VROOT) 2398 strcat(buf, "|VROOT"); 2399 if (vp->v_flag & VTEXT) 2400 strcat(buf, "|VTEXT"); 2401 if (vp->v_flag & VSYSTEM) 2402 strcat(buf, "|VSYSTEM"); 2403 if (vp->v_flag & VXLOCK) 2404 strcat(buf, "|VXLOCK"); 2405 if (vp->v_flag & VXWANT) 2406 strcat(buf, "|VXWANT"); 2407 if (vp->v_flag & VBWAIT) 2408 strcat(buf, "|VBWAIT"); 2409 if (vp->v_flag & VDOOMED) 2410 strcat(buf, "|VDOOMED"); 2411 if (vp->v_flag & VFREE) 2412 strcat(buf, "|VFREE"); 2413 if (vp->v_flag & VOBJBUF) 2414 strcat(buf, "|VOBJBUF"); 2415 if (buf[0] != '\0') 2416 printf(" flags (%s)", &buf[1]); 2417 if (vp->v_data == NULL) { 2418 printf("\n"); 2419 } else { 2420 printf("\n\t"); 2421 VOP_PRINT(vp); 2422 } 2423} 2424 2425#ifdef DDB 2426#include <ddb/ddb.h> 2427/* 2428 * List all of the locked vnodes in the system. 2429 * Called when debugging the kernel. 2430 */ 2431DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 2432{ 2433 struct thread *td = curthread; /* XXX */ 2434 struct mount *mp, *nmp; 2435 struct vnode *vp; 2436 2437 printf("Locked vnodes\n"); 2438 mtx_lock(&mountlist_mtx); 2439 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2440 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2441 nmp = TAILQ_NEXT(mp, mnt_list); 2442 continue; 2443 } 2444 mtx_lock(&mntvnode_mtx); 2445 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2446 if (VOP_ISLOCKED(vp, NULL)) 2447 vprint((char *)0, vp); 2448 } 2449 mtx_unlock(&mntvnode_mtx); 2450 mtx_lock(&mountlist_mtx); 2451 nmp = TAILQ_NEXT(mp, mnt_list); 2452 vfs_unbusy(mp, td); 2453 } 2454 mtx_unlock(&mountlist_mtx); 2455} 2456#endif 2457 2458/* 2459 * Top level filesystem related information gathering. 2460 */ 2461static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 2462 2463static int 2464vfs_sysctl(SYSCTL_HANDLER_ARGS) 2465{ 2466 int *name = (int *)arg1 - 1; /* XXX */ 2467 u_int namelen = arg2 + 1; /* XXX */ 2468 struct vfsconf *vfsp; 2469 2470#if 1 || defined(COMPAT_PRELITE2) 2471 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2472 if (namelen == 1) 2473 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2474#endif 2475 2476 /* XXX the below code does not compile; vfs_sysctl does not exist. */ 2477#ifdef notyet 2478 /* all sysctl names at this level are at least name and field */ 2479 if (namelen < 2) 2480 return (ENOTDIR); /* overloaded */ 2481 if (name[0] != VFS_GENERIC) { 2482 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2483 if (vfsp->vfc_typenum == name[0]) 2484 break; 2485 if (vfsp == NULL) 2486 return (EOPNOTSUPP); 2487 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2488 oldp, oldlenp, newp, newlen, td)); 2489 } 2490#endif 2491 switch (name[1]) { 2492 case VFS_MAXTYPENUM: 2493 if (namelen != 2) 2494 return (ENOTDIR); 2495 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2496 case VFS_CONF: 2497 if (namelen != 3) 2498 return (ENOTDIR); /* overloaded */ 2499 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2500 if (vfsp->vfc_typenum == name[2]) 2501 break; 2502 if (vfsp == NULL) 2503 return (EOPNOTSUPP); 2504 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2505 } 2506 return (EOPNOTSUPP); 2507} 2508 2509SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2510 "Generic filesystem"); 2511 2512#if 1 || defined(COMPAT_PRELITE2) 2513 2514static int 2515sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2516{ 2517 int error; 2518 struct vfsconf *vfsp; 2519 struct ovfsconf ovfs; 2520 2521 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2522 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2523 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2524 ovfs.vfc_index = vfsp->vfc_typenum; 2525 ovfs.vfc_refcount = vfsp->vfc_refcount; 2526 ovfs.vfc_flags = vfsp->vfc_flags; 2527 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2528 if (error) 2529 return error; 2530 } 2531 return 0; 2532} 2533 2534#endif /* 1 || COMPAT_PRELITE2 */ 2535 2536#if COMPILING_LINT 2537#define KINFO_VNODESLOP 10 2538/* 2539 * Dump vnode list (via sysctl). 2540 * Copyout address of vnode followed by vnode. 2541 */ 2542/* ARGSUSED */ 2543static int 2544sysctl_vnode(SYSCTL_HANDLER_ARGS) 2545{ 2546 struct thread *td = curthread; /* XXX */ 2547 struct mount *mp, *nmp; 2548 struct vnode *nvp, *vp; 2549 int error; 2550 2551#define VPTRSZ sizeof (struct vnode *) 2552#define VNODESZ sizeof (struct vnode) 2553 2554 req->lock = 0; 2555 if (!req->oldptr) /* Make an estimate */ 2556 return (SYSCTL_OUT(req, 0, 2557 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2558 2559 mtx_lock(&mountlist_mtx); 2560 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2561 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2562 nmp = TAILQ_NEXT(mp, mnt_list); 2563 continue; 2564 } 2565 mtx_lock(&mntvnode_mtx); 2566again: 2567 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 2568 vp != NULL; 2569 vp = nvp) { 2570 /* 2571 * Check that the vp is still associated with 2572 * this filesystem. RACE: could have been 2573 * recycled onto the same filesystem. 2574 */ 2575 if (vp->v_mount != mp) 2576 goto again; 2577 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2578 mtx_unlock(&mntvnode_mtx); 2579 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2580 (error = SYSCTL_OUT(req, vp, VNODESZ))) 2581 return (error); 2582 mtx_lock(&mntvnode_mtx); 2583 } 2584 mtx_unlock(&mntvnode_mtx); 2585 mtx_lock(&mountlist_mtx); 2586 nmp = TAILQ_NEXT(mp, mnt_list); 2587 vfs_unbusy(mp, td); 2588 } 2589 mtx_unlock(&mountlist_mtx); 2590 2591 return (0); 2592} 2593 2594/* 2595 * XXX 2596 * Exporting the vnode list on large systems causes them to crash. 2597 * Exporting the vnode list on medium systems causes sysctl to coredump. 2598 */ 2599SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2600 0, 0, sysctl_vnode, "S,vnode", ""); 2601#endif 2602 2603/* 2604 * Check to see if a filesystem is mounted on a block device. 2605 */ 2606int 2607vfs_mountedon(vp) 2608 struct vnode *vp; 2609{ 2610 2611 if (vp->v_rdev->si_mountpoint != NULL) 2612 return (EBUSY); 2613 return (0); 2614} 2615 2616/* 2617 * Unmount all filesystems. The list is traversed in reverse order 2618 * of mounting to avoid dependencies. 2619 */ 2620void 2621vfs_unmountall() 2622{ 2623 struct mount *mp; 2624 struct thread *td; 2625 int error; 2626 2627 if (curthread != NULL) 2628 td = curthread; 2629 else 2630 td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */ 2631 /* 2632 * Since this only runs when rebooting, it is not interlocked. 2633 */ 2634 while(!TAILQ_EMPTY(&mountlist)) { 2635 mp = TAILQ_LAST(&mountlist, mntlist); 2636 error = dounmount(mp, MNT_FORCE, td); 2637 if (error) { 2638 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2639 printf("unmount of %s failed (", 2640 mp->mnt_stat.f_mntonname); 2641 if (error == EBUSY) 2642 printf("BUSY)\n"); 2643 else 2644 printf("%d)\n", error); 2645 } else { 2646 /* The unmount has removed mp from the mountlist */ 2647 } 2648 } 2649} 2650 2651/* 2652 * perform msync on all vnodes under a mount point 2653 * the mount point must be locked. 2654 */ 2655void 2656vfs_msync(struct mount *mp, int flags) 2657{ 2658 struct vnode *vp, *nvp; 2659 struct vm_object *obj; 2660 int tries; 2661 2662 GIANT_REQUIRED; 2663 2664 tries = 5; 2665 mtx_lock(&mntvnode_mtx); 2666loop: 2667 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) { 2668 if (vp->v_mount != mp) { 2669 if (--tries > 0) 2670 goto loop; 2671 break; 2672 } 2673 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2674 2675 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2676 continue; 2677 2678 if (vp->v_flag & VNOSYNC) /* unlinked, skip it */ 2679 continue; 2680 2681 if ((vp->v_flag & VOBJDIRTY) && 2682 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2683 mtx_unlock(&mntvnode_mtx); 2684 if (!vget(vp, 2685 LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curthread)) { 2686 if (VOP_GETVOBJECT(vp, &obj) == 0) { 2687 vm_object_page_clean(obj, 0, 0, 2688 flags == MNT_WAIT ? 2689 OBJPC_SYNC : OBJPC_NOSYNC); 2690 } 2691 vput(vp); 2692 } 2693 mtx_lock(&mntvnode_mtx); 2694 if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) { 2695 if (--tries > 0) 2696 goto loop; 2697 break; 2698 } 2699 } 2700 } 2701 mtx_unlock(&mntvnode_mtx); 2702} 2703 2704/* 2705 * Create the VM object needed for VMIO and mmap support. This 2706 * is done for all VREG files in the system. Some filesystems might 2707 * afford the additional metadata buffering capability of the 2708 * VMIO code by making the device node be VMIO mode also. 2709 * 2710 * vp must be locked when vfs_object_create is called. 2711 */ 2712int 2713vfs_object_create(vp, td, cred) 2714 struct vnode *vp; 2715 struct thread *td; 2716 struct ucred *cred; 2717{ 2718 GIANT_REQUIRED; 2719 return (VOP_CREATEVOBJECT(vp, cred, td)); 2720} 2721 2722/* 2723 * Mark a vnode as free, putting it up for recycling. 2724 */ 2725void 2726vfree(vp) 2727 struct vnode *vp; 2728{ 2729 int s; 2730 2731 s = splbio(); 2732 mtx_lock(&vnode_free_list_mtx); 2733 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); 2734 if (vp->v_flag & VAGE) { 2735 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2736 } else { 2737 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2738 } 2739 freevnodes++; 2740 mtx_unlock(&vnode_free_list_mtx); 2741 vp->v_flag &= ~VAGE; 2742 vp->v_flag |= VFREE; 2743 splx(s); 2744} 2745 2746/* 2747 * Opposite of vfree() - mark a vnode as in use. 2748 */ 2749void 2750vbusy(vp) 2751 struct vnode *vp; 2752{ 2753 int s; 2754 2755 s = splbio(); 2756 mtx_lock(&vnode_free_list_mtx); 2757 KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free")); 2758 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2759 freevnodes--; 2760 mtx_unlock(&vnode_free_list_mtx); 2761 vp->v_flag &= ~(VFREE|VAGE); 2762 splx(s); 2763} 2764 2765/* 2766 * Record a process's interest in events which might happen to 2767 * a vnode. Because poll uses the historic select-style interface 2768 * internally, this routine serves as both the ``check for any 2769 * pending events'' and the ``record my interest in future events'' 2770 * functions. (These are done together, while the lock is held, 2771 * to avoid race conditions.) 2772 */ 2773int 2774vn_pollrecord(vp, td, events) 2775 struct vnode *vp; 2776 struct thread *td; 2777 short events; 2778{ 2779 2780 if (vp->v_pollinfo == NULL) 2781 v_addpollinfo(vp); 2782 mtx_lock(&vp->v_pollinfo->vpi_lock); 2783 if (vp->v_pollinfo->vpi_revents & events) { 2784 /* 2785 * This leaves events we are not interested 2786 * in available for the other process which 2787 * which presumably had requested them 2788 * (otherwise they would never have been 2789 * recorded). 2790 */ 2791 events &= vp->v_pollinfo->vpi_revents; 2792 vp->v_pollinfo->vpi_revents &= ~events; 2793 2794 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2795 return events; 2796 } 2797 vp->v_pollinfo->vpi_events |= events; 2798 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 2799 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2800 return 0; 2801} 2802 2803/* 2804 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2805 * it is possible for us to miss an event due to race conditions, but 2806 * that condition is expected to be rare, so for the moment it is the 2807 * preferred interface. 2808 */ 2809void 2810vn_pollevent(vp, events) 2811 struct vnode *vp; 2812 short events; 2813{ 2814 2815 if (vp->v_pollinfo == NULL) 2816 v_addpollinfo(vp); 2817 mtx_lock(&vp->v_pollinfo->vpi_lock); 2818 if (vp->v_pollinfo->vpi_events & events) { 2819 /* 2820 * We clear vpi_events so that we don't 2821 * call selwakeup() twice if two events are 2822 * posted before the polling process(es) is 2823 * awakened. This also ensures that we take at 2824 * most one selwakeup() if the polling process 2825 * is no longer interested. However, it does 2826 * mean that only one event can be noticed at 2827 * a time. (Perhaps we should only clear those 2828 * event bits which we note?) XXX 2829 */ 2830 vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */ 2831 vp->v_pollinfo->vpi_revents |= events; 2832 selwakeup(&vp->v_pollinfo->vpi_selinfo); 2833 } 2834 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2835} 2836 2837/* 2838 * Wake up anyone polling on vp because it is being revoked. 2839 * This depends on dead_poll() returning POLLHUP for correct 2840 * behavior. 2841 */ 2842void 2843vn_pollgone(vp) 2844 struct vnode *vp; 2845{ 2846 2847 mtx_lock(&vp->v_pollinfo->vpi_lock); 2848 VN_KNOTE(vp, NOTE_REVOKE); 2849 if (vp->v_pollinfo->vpi_events) { 2850 vp->v_pollinfo->vpi_events = 0; 2851 selwakeup(&vp->v_pollinfo->vpi_selinfo); 2852 } 2853 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2854} 2855 2856 2857 2858/* 2859 * Routine to create and manage a filesystem syncer vnode. 2860 */ 2861#define sync_close ((int (*)(struct vop_close_args *))nullop) 2862static int sync_fsync(struct vop_fsync_args *); 2863static int sync_inactive(struct vop_inactive_args *); 2864static int sync_reclaim(struct vop_reclaim_args *); 2865#define sync_lock ((int (*)(struct vop_lock_args *))vop_nolock) 2866#define sync_unlock ((int (*)(struct vop_unlock_args *))vop_nounlock) 2867static int sync_print(struct vop_print_args *); 2868#define sync_islocked ((int(*)(struct vop_islocked_args *))vop_noislocked) 2869 2870static vop_t **sync_vnodeop_p; 2871static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2872 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2873 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 2874 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 2875 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 2876 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 2877 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 2878 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 2879 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 2880 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 2881 { NULL, NULL } 2882}; 2883static struct vnodeopv_desc sync_vnodeop_opv_desc = 2884 { &sync_vnodeop_p, sync_vnodeop_entries }; 2885 2886VNODEOP_SET(sync_vnodeop_opv_desc); 2887 2888/* 2889 * Create a new filesystem syncer vnode for the specified mount point. 2890 */ 2891int 2892vfs_allocate_syncvnode(mp) 2893 struct mount *mp; 2894{ 2895 struct vnode *vp; 2896 static long start, incr, next; 2897 int error; 2898 2899 /* Allocate a new vnode */ 2900 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 2901 mp->mnt_syncer = NULL; 2902 return (error); 2903 } 2904 vp->v_type = VNON; 2905 /* 2906 * Place the vnode onto the syncer worklist. We attempt to 2907 * scatter them about on the list so that they will go off 2908 * at evenly distributed times even if all the filesystems 2909 * are mounted at once. 2910 */ 2911 next += incr; 2912 if (next == 0 || next > syncer_maxdelay) { 2913 start /= 2; 2914 incr /= 2; 2915 if (start == 0) { 2916 start = syncer_maxdelay / 2; 2917 incr = syncer_maxdelay; 2918 } 2919 next = start; 2920 } 2921 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 2922 mp->mnt_syncer = vp; 2923 return (0); 2924} 2925 2926/* 2927 * Do a lazy sync of the filesystem. 2928 */ 2929static int 2930sync_fsync(ap) 2931 struct vop_fsync_args /* { 2932 struct vnode *a_vp; 2933 struct ucred *a_cred; 2934 int a_waitfor; 2935 struct thread *a_td; 2936 } */ *ap; 2937{ 2938 struct vnode *syncvp = ap->a_vp; 2939 struct mount *mp = syncvp->v_mount; 2940 struct thread *td = ap->a_td; 2941 int asyncflag; 2942 2943 /* 2944 * We only need to do something if this is a lazy evaluation. 2945 */ 2946 if (ap->a_waitfor != MNT_LAZY) 2947 return (0); 2948 2949 /* 2950 * Move ourselves to the back of the sync list. 2951 */ 2952 vn_syncer_add_to_worklist(syncvp, syncdelay); 2953 2954 /* 2955 * Walk the list of vnodes pushing all that are dirty and 2956 * not already on the sync list. 2957 */ 2958 mtx_lock(&mountlist_mtx); 2959 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) { 2960 mtx_unlock(&mountlist_mtx); 2961 return (0); 2962 } 2963 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 2964 vfs_unbusy(mp, td); 2965 return (0); 2966 } 2967 asyncflag = mp->mnt_flag & MNT_ASYNC; 2968 mp->mnt_flag &= ~MNT_ASYNC; 2969 vfs_msync(mp, MNT_NOWAIT); 2970 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td); 2971 if (asyncflag) 2972 mp->mnt_flag |= MNT_ASYNC; 2973 vn_finished_write(mp); 2974 vfs_unbusy(mp, td); 2975 return (0); 2976} 2977 2978/* 2979 * The syncer vnode is no referenced. 2980 */ 2981static int 2982sync_inactive(ap) 2983 struct vop_inactive_args /* { 2984 struct vnode *a_vp; 2985 struct thread *a_td; 2986 } */ *ap; 2987{ 2988 2989 vgone(ap->a_vp); 2990 return (0); 2991} 2992 2993/* 2994 * The syncer vnode is no longer needed and is being decommissioned. 2995 * 2996 * Modifications to the worklist must be protected at splbio(). 2997 */ 2998static int 2999sync_reclaim(ap) 3000 struct vop_reclaim_args /* { 3001 struct vnode *a_vp; 3002 } */ *ap; 3003{ 3004 struct vnode *vp = ap->a_vp; 3005 int s; 3006 3007 s = splbio(); 3008 vp->v_mount->mnt_syncer = NULL; 3009 if (vp->v_flag & VONWORKLST) { 3010 LIST_REMOVE(vp, v_synclist); 3011 vp->v_flag &= ~VONWORKLST; 3012 } 3013 splx(s); 3014 3015 return (0); 3016} 3017 3018/* 3019 * Print out a syncer vnode. 3020 */ 3021static int 3022sync_print(ap) 3023 struct vop_print_args /* { 3024 struct vnode *a_vp; 3025 } */ *ap; 3026{ 3027 struct vnode *vp = ap->a_vp; 3028 3029 printf("syncer vnode"); 3030 if (vp->v_vnlock != NULL) 3031 lockmgr_printinfo(vp->v_vnlock); 3032 printf("\n"); 3033 return (0); 3034} 3035 3036/* 3037 * extract the dev_t from a VCHR 3038 */ 3039dev_t 3040vn_todev(vp) 3041 struct vnode *vp; 3042{ 3043 if (vp->v_type != VCHR) 3044 return (NODEV); 3045 return (vp->v_rdev); 3046} 3047 3048/* 3049 * Check if vnode represents a disk device 3050 */ 3051int 3052vn_isdisk(vp, errp) 3053 struct vnode *vp; 3054 int *errp; 3055{ 3056 struct cdevsw *cdevsw; 3057 3058 if (vp->v_type != VCHR) { 3059 if (errp != NULL) 3060 *errp = ENOTBLK; 3061 return (0); 3062 } 3063 if (vp->v_rdev == NULL) { 3064 if (errp != NULL) 3065 *errp = ENXIO; 3066 return (0); 3067 } 3068 cdevsw = devsw(vp->v_rdev); 3069 if (cdevsw == NULL) { 3070 if (errp != NULL) 3071 *errp = ENXIO; 3072 return (0); 3073 } 3074 if (!(cdevsw->d_flags & D_DISK)) { 3075 if (errp != NULL) 3076 *errp = ENOTBLK; 3077 return (0); 3078 } 3079 if (errp != NULL) 3080 *errp = 0; 3081 return (1); 3082} 3083 3084/* 3085 * Free data allocated by namei(); see namei(9) for details. 3086 */ 3087void 3088NDFREE(ndp, flags) 3089 struct nameidata *ndp; 3090 const uint flags; 3091{ 3092 if (!(flags & NDF_NO_FREE_PNBUF) && 3093 (ndp->ni_cnd.cn_flags & HASBUF)) { 3094 uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 3095 ndp->ni_cnd.cn_flags &= ~HASBUF; 3096 } 3097 if (!(flags & NDF_NO_DVP_UNLOCK) && 3098 (ndp->ni_cnd.cn_flags & LOCKPARENT) && 3099 ndp->ni_dvp != ndp->ni_vp) 3100 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread); 3101 if (!(flags & NDF_NO_DVP_RELE) && 3102 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { 3103 vrele(ndp->ni_dvp); 3104 ndp->ni_dvp = NULL; 3105 } 3106 if (!(flags & NDF_NO_VP_UNLOCK) && 3107 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) 3108 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread); 3109 if (!(flags & NDF_NO_VP_RELE) && 3110 ndp->ni_vp) { 3111 vrele(ndp->ni_vp); 3112 ndp->ni_vp = NULL; 3113 } 3114 if (!(flags & NDF_NO_STARTDIR_RELE) && 3115 (ndp->ni_cnd.cn_flags & SAVESTART)) { 3116 vrele(ndp->ni_startdir); 3117 ndp->ni_startdir = NULL; 3118 } 3119} 3120 3121/* 3122 * Common file system object access control check routine. Accepts a 3123 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3124 * and optional call-by-reference privused argument allowing vaccess() 3125 * to indicate to the caller whether privilege was used to satisfy the 3126 * request. Returns 0 on success, or an errno on failure. 3127 */ 3128int 3129vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused) 3130 enum vtype type; 3131 mode_t file_mode; 3132 uid_t file_uid; 3133 gid_t file_gid; 3134 mode_t acc_mode; 3135 struct ucred *cred; 3136 int *privused; 3137{ 3138 mode_t dac_granted; 3139#ifdef CAPABILITIES 3140 mode_t cap_granted; 3141#endif 3142 3143 /* 3144 * Look for a normal, non-privileged way to access the file/directory 3145 * as requested. If it exists, go with that. 3146 */ 3147 3148 if (privused != NULL) 3149 *privused = 0; 3150 3151 dac_granted = 0; 3152 3153 /* Check the owner. */ 3154 if (cred->cr_uid == file_uid) { 3155 dac_granted |= VADMIN; 3156 if (file_mode & S_IXUSR) 3157 dac_granted |= VEXEC; 3158 if (file_mode & S_IRUSR) 3159 dac_granted |= VREAD; 3160 if (file_mode & S_IWUSR) 3161 dac_granted |= VWRITE; 3162 3163 if ((acc_mode & dac_granted) == acc_mode) 3164 return (0); 3165 3166 goto privcheck; 3167 } 3168 3169 /* Otherwise, check the groups (first match) */ 3170 if (groupmember(file_gid, cred)) { 3171 if (file_mode & S_IXGRP) 3172 dac_granted |= VEXEC; 3173 if (file_mode & S_IRGRP) 3174 dac_granted |= VREAD; 3175 if (file_mode & S_IWGRP) 3176 dac_granted |= VWRITE; 3177 3178 if ((acc_mode & dac_granted) == acc_mode) 3179 return (0); 3180 3181 goto privcheck; 3182 } 3183 3184 /* Otherwise, check everyone else. */ 3185 if (file_mode & S_IXOTH) 3186 dac_granted |= VEXEC; 3187 if (file_mode & S_IROTH) 3188 dac_granted |= VREAD; 3189 if (file_mode & S_IWOTH) 3190 dac_granted |= VWRITE; 3191 if ((acc_mode & dac_granted) == acc_mode) 3192 return (0); 3193 3194privcheck: 3195 if (!suser_cred(cred, PRISON_ROOT)) { 3196 /* XXX audit: privilege used */ 3197 if (privused != NULL) 3198 *privused = 1; 3199 return (0); 3200 } 3201 3202#ifdef CAPABILITIES 3203 /* 3204 * Build a capability mask to determine if the set of capabilities 3205 * satisfies the requirements when combined with the granted mask 3206 * from above. 3207 * For each capability, if the capability is required, bitwise 3208 * or the request type onto the cap_granted mask. 3209 */ 3210 cap_granted = 0; 3211 3212 if (type == VDIR) { 3213 /* 3214 * For directories, use CAP_DAC_READ_SEARCH to satisfy 3215 * VEXEC requests, instead of CAP_DAC_EXECUTE. 3216 */ 3217 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3218 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3219 cap_granted |= VEXEC; 3220 } else { 3221 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3222 !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT)) 3223 cap_granted |= VEXEC; 3224 } 3225 3226 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) && 3227 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3228 cap_granted |= VREAD; 3229 3230 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3231 !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT)) 3232 cap_granted |= VWRITE; 3233 3234 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3235 !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT)) 3236 cap_granted |= VADMIN; 3237 3238 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) { 3239 /* XXX audit: privilege used */ 3240 if (privused != NULL) 3241 *privused = 1; 3242 return (0); 3243 } 3244#endif 3245 3246 return ((acc_mode & VADMIN) ? EPERM : EACCES); 3247} 3248 3249