vfs_subr.c revision 101040
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: head/sys/kern/vfs_subr.c 101040 2002-07-31 12:24:35Z des $ 40 */ 41 42/* 43 * External virtual filesystem routines 44 */ 45#include "opt_ddb.h" 46#include "opt_mac.h" 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/bio.h> 51#include <sys/buf.h> 52#include <sys/conf.h> 53#include <sys/eventhandler.h> 54#include <sys/fcntl.h> 55#include <sys/kernel.h> 56#include <sys/kthread.h> 57#include <sys/malloc.h> 58#include <sys/mac.h> 59#include <sys/mount.h> 60#include <sys/namei.h> 61#include <sys/stat.h> 62#include <sys/sysctl.h> 63#include <sys/syslog.h> 64#include <sys/vmmeter.h> 65#include <sys/vnode.h> 66 67#include <vm/vm.h> 68#include <vm/vm_object.h> 69#include <vm/vm_extern.h> 70#include <vm/pmap.h> 71#include <vm/vm_map.h> 72#include <vm/vm_page.h> 73#include <vm/uma.h> 74 75static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 76 77static void addalias(struct vnode *vp, dev_t nvp_rdev); 78static void insmntque(struct vnode *vp, struct mount *mp); 79static void vclean(struct vnode *vp, int flags, struct thread *td); 80static void vlruvp(struct vnode *vp); 81static int flushbuflist(struct buf *blist, int flags, struct vnode *vp, 82 int slpflag, int slptimeo, int *errorp); 83 84/* 85 * Number of vnodes in existence. Increased whenever getnewvnode() 86 * allocates a new vnode, never decreased. 87 */ 88static unsigned long numvnodes; 89 90SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 91 92/* 93 * Conversion tables for conversion from vnode types to inode formats 94 * and back. 95 */ 96enum vtype iftovt_tab[16] = { 97 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 98 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 99}; 100int vttoif_tab[9] = { 101 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 102 S_IFSOCK, S_IFIFO, S_IFMT, 103}; 104 105/* 106 * List of vnodes that are ready for recycling. 107 */ 108static TAILQ_HEAD(freelst, vnode) vnode_free_list; 109 110/* 111 * Minimum number of free vnodes. If there are fewer than this free vnodes, 112 * getnewvnode() will return a newly allocated vnode. 113 */ 114static u_long wantfreevnodes = 25; 115SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 116/* Number of vnodes in the free list. */ 117static u_long freevnodes; 118SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 119 120/* 121 * Various variables used for debugging the new implementation of 122 * reassignbuf(). 123 * XXX these are probably of (very) limited utility now. 124 */ 125static int reassignbufcalls; 126SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 127static int nameileafonly; 128SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, ""); 129 130#ifdef ENABLE_VFS_IOOPT 131/* See NOTES for a description of this setting. */ 132int vfs_ioopt; 133SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 134#endif 135 136/* 137 * Cache for the mount type id assigned to NFS. This is used for 138 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 139 */ 140int nfs_mount_type = -1; 141 142/* To keep more than one thread at a time from running vfs_getnewfsid */ 143static struct mtx mntid_mtx; 144 145/* For any iteration/modification of vnode_free_list */ 146static struct mtx vnode_free_list_mtx; 147 148/* 149 * For any iteration/modification of dev->si_hlist (linked through 150 * v_specnext) 151 */ 152static struct mtx spechash_mtx; 153 154/* Publicly exported FS */ 155struct nfs_public nfs_pub; 156 157/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 158static uma_zone_t vnode_zone; 159static uma_zone_t vnodepoll_zone; 160 161/* Set to 1 to print out reclaim of active vnodes */ 162int prtactive; 163 164/* 165 * The workitem queue. 166 * 167 * It is useful to delay writes of file data and filesystem metadata 168 * for tens of seconds so that quickly created and deleted files need 169 * not waste disk bandwidth being created and removed. To realize this, 170 * we append vnodes to a "workitem" queue. When running with a soft 171 * updates implementation, most pending metadata dependencies should 172 * not wait for more than a few seconds. Thus, mounted on block devices 173 * are delayed only about a half the time that file data is delayed. 174 * Similarly, directory updates are more critical, so are only delayed 175 * about a third the time that file data is delayed. Thus, there are 176 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 177 * one each second (driven off the filesystem syncer process). The 178 * syncer_delayno variable indicates the next queue that is to be processed. 179 * Items that need to be processed soon are placed in this queue: 180 * 181 * syncer_workitem_pending[syncer_delayno] 182 * 183 * A delay of fifteen seconds is done by placing the request fifteen 184 * entries later in the queue: 185 * 186 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 187 * 188 */ 189static int syncer_delayno; 190static long syncer_mask; 191LIST_HEAD(synclist, vnode); 192static struct synclist *syncer_workitem_pending; 193 194#define SYNCER_MAXDELAY 32 195static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 196static int syncdelay = 30; /* max time to delay syncing data */ 197static int filedelay = 30; /* time to delay syncing files */ 198SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 199static int dirdelay = 29; /* time to delay syncing directories */ 200SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 201static int metadelay = 28; /* time to delay syncing metadata */ 202SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 203static int rushjob; /* number of slots to run ASAP */ 204static int stat_rush_requests; /* number of times I/O speeded up */ 205SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 206 207/* 208 * Number of vnodes we want to exist at any one time. This is mostly used 209 * to size hash tables in vnode-related code. It is normally not used in 210 * getnewvnode(), as wantfreevnodes is normally nonzero.) 211 * 212 * XXX desiredvnodes is historical cruft and should not exist. 213 */ 214int desiredvnodes; 215SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 216 &desiredvnodes, 0, "Maximum number of vnodes"); 217static int minvnodes; 218SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 219 &minvnodes, 0, "Minimum number of vnodes"); 220static int vnlru_nowhere; 221SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0, 222 "Number of times the vnlru process ran without success"); 223 224/* Hook for calling soft updates */ 225int (*softdep_process_worklist_hook)(struct mount *); 226 227#ifdef DEBUG_VFS_LOCKS 228/* Print lock violations */ 229int vfs_badlock_print = 1; 230/* Panic on violation */ 231int vfs_badlock_panic = 1; 232 233void 234vop_rename_pre(void *ap) 235{ 236 struct vop_rename_args *a = ap; 237 238 /* Check the source (from) */ 239 if (a->a_tdvp != a->a_fdvp) 240 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n"); 241 if (a->a_tvp != a->a_fvp) 242 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n"); 243 244 /* Check the target */ 245 if (a->a_tvp) 246 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n"); 247 248 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n"); 249} 250 251void 252vop_strategy_pre(void *ap) 253{ 254 struct vop_strategy_args *a = ap; 255 struct buf *bp; 256 257 bp = a->a_bp; 258 259 /* 260 * Cluster ops lock their component buffers but not the IO container. 261 */ 262 if ((bp->b_flags & B_CLUSTER) != 0) 263 return; 264 265 if (BUF_REFCNT(bp) < 1) { 266 if (vfs_badlock_print) 267 printf("VOP_STRATEGY: bp is not locked but should be.\n"); 268 if (vfs_badlock_panic) 269 Debugger("Lock violation.\n"); 270 } 271} 272 273void 274vop_lookup_pre(void *ap) 275{ 276 struct vop_lookup_args *a = ap; 277 struct vnode *dvp; 278 279 dvp = a->a_dvp; 280 281 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 282} 283 284void 285vop_lookup_post(void *ap, int rc) 286{ 287 struct vop_lookup_args *a = ap; 288 struct componentname *cnp; 289 struct vnode *dvp; 290 struct vnode *vp; 291 int flags; 292 293 dvp = a->a_dvp; 294 cnp = a->a_cnp; 295 vp = *(a->a_vpp); 296 flags = cnp->cn_flags; 297 298 299 /* 300 * If this is the last path component for this lookup and LOCPARENT 301 * is set, OR if there is an error the directory has to be locked. 302 */ 303 if ((flags & LOCKPARENT) && (flags & ISLASTCN)) 304 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)"); 305 else if (rc != 0) 306 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)"); 307 else if (dvp != vp) 308 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)"); 309 310 if (flags & PDIRUNLOCK) 311 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)"); 312 313 if (rc == 0) 314 ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (vpp)"); 315} 316 317#endif /* DEBUG_VFS_LOCKS */ 318 319void 320v_addpollinfo(struct vnode *vp) 321{ 322 vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK); 323 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 324} 325 326/* 327 * Initialize the vnode management data structures. 328 */ 329static void 330vntblinit(void *dummy __unused) 331{ 332 333 desiredvnodes = maxproc + cnt.v_page_count / 4; 334 minvnodes = desiredvnodes / 4; 335 mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF); 336 mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF); 337 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 338 mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF); 339 TAILQ_INIT(&vnode_free_list); 340 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 341 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 342 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 343 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 344 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 345 /* 346 * Initialize the filesystem syncer. 347 */ 348 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 349 &syncer_mask); 350 syncer_maxdelay = syncer_mask + 1; 351} 352SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL) 353 354 355/* 356 * Mark a mount point as busy. Used to synchronize access and to delay 357 * unmounting. Interlock is not released on failure. 358 */ 359int 360vfs_busy(mp, flags, interlkp, td) 361 struct mount *mp; 362 int flags; 363 struct mtx *interlkp; 364 struct thread *td; 365{ 366 int lkflags; 367 368 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 369 if (flags & LK_NOWAIT) 370 return (ENOENT); 371 mp->mnt_kern_flag |= MNTK_MWAIT; 372 /* 373 * Since all busy locks are shared except the exclusive 374 * lock granted when unmounting, the only place that a 375 * wakeup needs to be done is at the release of the 376 * exclusive lock at the end of dounmount. 377 */ 378 msleep(mp, interlkp, PVFS, "vfs_busy", 0); 379 return (ENOENT); 380 } 381 lkflags = LK_SHARED | LK_NOPAUSE; 382 if (interlkp) 383 lkflags |= LK_INTERLOCK; 384 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 385 panic("vfs_busy: unexpected lock failure"); 386 return (0); 387} 388 389/* 390 * Free a busy filesystem. 391 */ 392void 393vfs_unbusy(mp, td) 394 struct mount *mp; 395 struct thread *td; 396{ 397 398 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 399} 400 401/* 402 * Lookup a mount point by filesystem identifier. 403 */ 404struct mount * 405vfs_getvfs(fsid) 406 fsid_t *fsid; 407{ 408 register struct mount *mp; 409 410 mtx_lock(&mountlist_mtx); 411 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 412 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 413 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 414 mtx_unlock(&mountlist_mtx); 415 return (mp); 416 } 417 } 418 mtx_unlock(&mountlist_mtx); 419 return ((struct mount *) 0); 420} 421 422/* 423 * Get a new unique fsid. Try to make its val[0] unique, since this value 424 * will be used to create fake device numbers for stat(). Also try (but 425 * not so hard) make its val[0] unique mod 2^16, since some emulators only 426 * support 16-bit device numbers. We end up with unique val[0]'s for the 427 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 428 * 429 * Keep in mind that several mounts may be running in parallel. Starting 430 * the search one past where the previous search terminated is both a 431 * micro-optimization and a defense against returning the same fsid to 432 * different mounts. 433 */ 434void 435vfs_getnewfsid(mp) 436 struct mount *mp; 437{ 438 static u_int16_t mntid_base; 439 fsid_t tfsid; 440 int mtype; 441 442 mtx_lock(&mntid_mtx); 443 mtype = mp->mnt_vfc->vfc_typenum; 444 tfsid.val[1] = mtype; 445 mtype = (mtype & 0xFF) << 24; 446 for (;;) { 447 tfsid.val[0] = makeudev(255, 448 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 449 mntid_base++; 450 if (vfs_getvfs(&tfsid) == NULL) 451 break; 452 } 453 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 454 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 455 mtx_unlock(&mntid_mtx); 456} 457 458/* 459 * Knob to control the precision of file timestamps: 460 * 461 * 0 = seconds only; nanoseconds zeroed. 462 * 1 = seconds and nanoseconds, accurate within 1/HZ. 463 * 2 = seconds and nanoseconds, truncated to microseconds. 464 * >=3 = seconds and nanoseconds, maximum precision. 465 */ 466enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 467 468static int timestamp_precision = TSP_SEC; 469SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 470 ×tamp_precision, 0, ""); 471 472/* 473 * Get a current timestamp. 474 */ 475void 476vfs_timestamp(tsp) 477 struct timespec *tsp; 478{ 479 struct timeval tv; 480 481 switch (timestamp_precision) { 482 case TSP_SEC: 483 tsp->tv_sec = time_second; 484 tsp->tv_nsec = 0; 485 break; 486 case TSP_HZ: 487 getnanotime(tsp); 488 break; 489 case TSP_USEC: 490 microtime(&tv); 491 TIMEVAL_TO_TIMESPEC(&tv, tsp); 492 break; 493 case TSP_NSEC: 494 default: 495 nanotime(tsp); 496 break; 497 } 498} 499 500/* 501 * Set vnode attributes to VNOVAL 502 */ 503void 504vattr_null(vap) 505 register struct vattr *vap; 506{ 507 508 vap->va_type = VNON; 509 vap->va_size = VNOVAL; 510 vap->va_bytes = VNOVAL; 511 vap->va_mode = VNOVAL; 512 vap->va_nlink = VNOVAL; 513 vap->va_uid = VNOVAL; 514 vap->va_gid = VNOVAL; 515 vap->va_fsid = VNOVAL; 516 vap->va_fileid = VNOVAL; 517 vap->va_blocksize = VNOVAL; 518 vap->va_rdev = VNOVAL; 519 vap->va_atime.tv_sec = VNOVAL; 520 vap->va_atime.tv_nsec = VNOVAL; 521 vap->va_mtime.tv_sec = VNOVAL; 522 vap->va_mtime.tv_nsec = VNOVAL; 523 vap->va_ctime.tv_sec = VNOVAL; 524 vap->va_ctime.tv_nsec = VNOVAL; 525 vap->va_birthtime.tv_sec = VNOVAL; 526 vap->va_birthtime.tv_nsec = VNOVAL; 527 vap->va_flags = VNOVAL; 528 vap->va_gen = VNOVAL; 529 vap->va_vaflags = 0; 530} 531 532/* 533 * This routine is called when we have too many vnodes. It attempts 534 * to free <count> vnodes and will potentially free vnodes that still 535 * have VM backing store (VM backing store is typically the cause 536 * of a vnode blowout so we want to do this). Therefore, this operation 537 * is not considered cheap. 538 * 539 * A number of conditions may prevent a vnode from being reclaimed. 540 * the buffer cache may have references on the vnode, a directory 541 * vnode may still have references due to the namei cache representing 542 * underlying files, or the vnode may be in active use. It is not 543 * desireable to reuse such vnodes. These conditions may cause the 544 * number of vnodes to reach some minimum value regardless of what 545 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 546 */ 547static int 548vlrureclaim(struct mount *mp, int count) 549{ 550 struct vnode *vp; 551 int done; 552 int trigger; 553 int usevnodes; 554 555 /* 556 * Calculate the trigger point, don't allow user 557 * screwups to blow us up. This prevents us from 558 * recycling vnodes with lots of resident pages. We 559 * aren't trying to free memory, we are trying to 560 * free vnodes. 561 */ 562 usevnodes = desiredvnodes; 563 if (usevnodes <= 0) 564 usevnodes = 1; 565 trigger = cnt.v_page_count * 2 / usevnodes; 566 567 done = 0; 568 mtx_lock(&mntvnode_mtx); 569 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 570 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 571 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 572 573 if (vp->v_type != VNON && 574 vp->v_type != VBAD && 575 VMIGHTFREE(vp) && /* critical path opt */ 576 (vp->v_object == NULL || vp->v_object->resident_page_count < trigger) && 577 mtx_trylock(&vp->v_interlock) 578 ) { 579 mtx_unlock(&mntvnode_mtx); 580 if (VMIGHTFREE(vp)) { 581 vgonel(vp, curthread); 582 done++; 583 } else { 584 mtx_unlock(&vp->v_interlock); 585 } 586 mtx_lock(&mntvnode_mtx); 587 } 588 --count; 589 } 590 mtx_unlock(&mntvnode_mtx); 591 return done; 592} 593 594/* 595 * Attempt to recycle vnodes in a context that is always safe to block. 596 * Calling vlrurecycle() from the bowels of filesystem code has some 597 * interesting deadlock problems. 598 */ 599static struct proc *vnlruproc; 600static int vnlruproc_sig; 601 602static void 603vnlru_proc(void) 604{ 605 struct mount *mp, *nmp; 606 int s; 607 int done; 608 struct proc *p = vnlruproc; 609 struct thread *td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 610 611 mtx_lock(&Giant); 612 613 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 614 SHUTDOWN_PRI_FIRST); 615 616 s = splbio(); 617 for (;;) { 618 kthread_suspend_check(p); 619 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 620 vnlruproc_sig = 0; 621 tsleep(vnlruproc, PVFS, "vlruwt", 0); 622 continue; 623 } 624 done = 0; 625 mtx_lock(&mountlist_mtx); 626 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 627 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 628 nmp = TAILQ_NEXT(mp, mnt_list); 629 continue; 630 } 631 done += vlrureclaim(mp, 10); 632 mtx_lock(&mountlist_mtx); 633 nmp = TAILQ_NEXT(mp, mnt_list); 634 vfs_unbusy(mp, td); 635 } 636 mtx_unlock(&mountlist_mtx); 637 if (done == 0) { 638#if 0 639 /* These messages are temporary debugging aids */ 640 if (vnlru_nowhere < 5) 641 printf("vnlru process getting nowhere..\n"); 642 else if (vnlru_nowhere == 5) 643 printf("vnlru process messages stopped.\n"); 644#endif 645 vnlru_nowhere++; 646 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 647 } 648 } 649 splx(s); 650} 651 652static struct kproc_desc vnlru_kp = { 653 "vnlru", 654 vnlru_proc, 655 &vnlruproc 656}; 657SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 658 659 660/* 661 * Routines having to do with the management of the vnode table. 662 */ 663 664/* 665 * Return the next vnode from the free list. 666 */ 667int 668getnewvnode(tag, mp, vops, vpp) 669 enum vtagtype tag; 670 struct mount *mp; 671 vop_t **vops; 672 struct vnode **vpp; 673{ 674 int s; 675 struct thread *td = curthread; /* XXX */ 676 struct vnode *vp = NULL; 677 struct mount *vnmp; 678 vm_object_t object; 679 680 s = splbio(); 681 /* 682 * Try to reuse vnodes if we hit the max. This situation only 683 * occurs in certain large-memory (2G+) situations. We cannot 684 * attempt to directly reclaim vnodes due to nasty recursion 685 * problems. 686 */ 687 if (vnlruproc_sig == 0 && numvnodes - freevnodes > desiredvnodes) { 688 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 689 wakeup(vnlruproc); 690 } 691 692 /* 693 * Attempt to reuse a vnode already on the free list, allocating 694 * a new vnode if we can't find one or if we have not reached a 695 * good minimum for good LRU performance. 696 */ 697 698 mtx_lock(&vnode_free_list_mtx); 699 700 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) { 701 int count; 702 703 for (count = 0; count < freevnodes; count++) { 704 vp = TAILQ_FIRST(&vnode_free_list); 705 if (vp == NULL || vp->v_usecount) 706 panic("getnewvnode: free vnode isn't"); 707 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 708 709 /* Don't recycle if we can't get the interlock */ 710 if (!mtx_trylock(&vp->v_interlock)) { 711 vp = NULL; 712 continue; 713 } 714 715 /* We should be able to immediately acquire this */ 716 if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) 717 continue; 718 /* 719 * Don't recycle if we still have cached pages. 720 */ 721 if (VOP_GETVOBJECT(vp, &object) == 0 && 722 (object->resident_page_count || 723 object->ref_count)) { 724 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 725 v_freelist); 726 VOP_UNLOCK(vp, 0, td); 727 vp = NULL; 728 continue; 729 } 730 if (LIST_FIRST(&vp->v_cache_src)) { 731 /* 732 * note: nameileafonly sysctl is temporary, 733 * for debugging only, and will eventually be 734 * removed. 735 */ 736 if (nameileafonly > 0) { 737 /* 738 * Do not reuse namei-cached directory 739 * vnodes that have cached 740 * subdirectories. 741 */ 742 if (cache_leaf_test(vp) < 0) { 743 VOP_UNLOCK(vp, 0, td); 744 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 745 vp = NULL; 746 continue; 747 } 748 } else if (nameileafonly < 0 || 749 vmiodirenable == 0) { 750 /* 751 * Do not reuse namei-cached directory 752 * vnodes if nameileafonly is -1 or 753 * if VMIO backing for directories is 754 * turned off (otherwise we reuse them 755 * too quickly). 756 */ 757 VOP_UNLOCK(vp, 0, td); 758 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 759 vp = NULL; 760 continue; 761 } 762 } 763 /* 764 * Skip over it if its filesystem is being suspended. 765 */ 766 if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0) 767 break; 768 VOP_UNLOCK(vp, 0, td); 769 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 770 vp = NULL; 771 } 772 } 773 if (vp) { 774 vp->v_flag |= VDOOMED; 775 vp->v_flag &= ~VFREE; 776 freevnodes--; 777 mtx_unlock(&vnode_free_list_mtx); 778 cache_purge(vp); 779 if (vp->v_type != VBAD) { 780 VOP_UNLOCK(vp, 0, td); 781 vgone(vp); 782 } else { 783 VOP_UNLOCK(vp, 0, td); 784 } 785 vn_finished_write(vnmp); 786 787#ifdef INVARIANTS 788 { 789 int s; 790 791 if (vp->v_data) 792 panic("cleaned vnode isn't"); 793 s = splbio(); 794 if (vp->v_numoutput) 795 panic("Clean vnode has pending I/O's"); 796 splx(s); 797 if (vp->v_writecount != 0) 798 panic("Non-zero write count"); 799 } 800#endif 801 if (vp->v_pollinfo) { 802 mtx_destroy(&vp->v_pollinfo->vpi_lock); 803 uma_zfree(vnodepoll_zone, vp->v_pollinfo); 804 } 805 vp->v_pollinfo = NULL; 806#ifdef MAC 807 mac_destroy_vnode(vp); 808#endif 809 vp->v_flag = 0; 810 vp->v_lastw = 0; 811 vp->v_lasta = 0; 812 vp->v_cstart = 0; 813 vp->v_clen = 0; 814 vp->v_socket = 0; 815 KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL")); 816 KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL")); 817 } else { 818 mtx_unlock(&vnode_free_list_mtx); 819 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 820 bzero((char *) vp, sizeof *vp); 821 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 822 vp->v_dd = vp; 823 cache_purge(vp); 824 LIST_INIT(&vp->v_cache_src); 825 TAILQ_INIT(&vp->v_cache_dst); 826 numvnodes++; 827 } 828 829 TAILQ_INIT(&vp->v_cleanblkhd); 830 TAILQ_INIT(&vp->v_dirtyblkhd); 831 vp->v_type = VNON; 832 vp->v_tag = tag; 833 vp->v_op = vops; 834 lockinit(&vp->v_lock, PVFS, "vnlock", VLKTIMEOUT, LK_NOPAUSE); 835#ifdef MAC 836 mac_init_vnode(vp); 837#endif 838 insmntque(vp, mp); 839 *vpp = vp; 840 vp->v_usecount = 1; 841 vp->v_data = 0; 842 vp->v_cachedid = -1; 843 844 splx(s); 845 846#if 0 847 vnodeallocs++; 848 if (vnodeallocs % vnoderecycleperiod == 0 && 849 freevnodes < vnoderecycleminfreevn && 850 vnoderecyclemintotalvn < numvnodes) { 851 /* Recycle vnodes. */ 852 cache_purgeleafdirs(vnoderecyclenumber); 853 } 854#endif 855 856 return (0); 857} 858 859/* 860 * Move a vnode from one mount queue to another. 861 */ 862static void 863insmntque(vp, mp) 864 register struct vnode *vp; 865 register struct mount *mp; 866{ 867 868 mtx_lock(&mntvnode_mtx); 869 /* 870 * Delete from old mount point vnode list, if on one. 871 */ 872 if (vp->v_mount != NULL) 873 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 874 /* 875 * Insert into list of vnodes for the new mount point, if available. 876 */ 877 if ((vp->v_mount = mp) == NULL) { 878 mtx_unlock(&mntvnode_mtx); 879 return; 880 } 881 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 882 mtx_unlock(&mntvnode_mtx); 883} 884 885/* 886 * Update outstanding I/O count and do wakeup if requested. 887 */ 888void 889vwakeup(bp) 890 register struct buf *bp; 891{ 892 register struct vnode *vp; 893 894 bp->b_flags &= ~B_WRITEINPROG; 895 if ((vp = bp->b_vp)) { 896 vp->v_numoutput--; 897 if (vp->v_numoutput < 0) 898 panic("vwakeup: neg numoutput"); 899 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 900 vp->v_flag &= ~VBWAIT; 901 wakeup(&vp->v_numoutput); 902 } 903 } 904} 905 906/* 907 * Flush out and invalidate all buffers associated with a vnode. 908 * Called with the underlying object locked. 909 */ 910int 911vinvalbuf(vp, flags, cred, td, slpflag, slptimeo) 912 struct vnode *vp; 913 int flags; 914 struct ucred *cred; 915 struct thread *td; 916 int slpflag, slptimeo; 917{ 918 struct buf *blist; 919 int s, error; 920 vm_object_t object; 921 922 GIANT_REQUIRED; 923 924 if (flags & V_SAVE) { 925 s = splbio(); 926 while (vp->v_numoutput) { 927 vp->v_flag |= VBWAIT; 928 error = tsleep(&vp->v_numoutput, 929 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 930 if (error) { 931 splx(s); 932 return (error); 933 } 934 } 935 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 936 splx(s); 937 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0) 938 return (error); 939 s = splbio(); 940 if (vp->v_numoutput > 0 || 941 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 942 panic("vinvalbuf: dirty bufs"); 943 } 944 splx(s); 945 } 946 s = splbio(); 947 for (error = 0;;) { 948 if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 && 949 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) { 950 if (error) 951 break; 952 continue; 953 } 954 if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 && 955 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) { 956 if (error) 957 break; 958 continue; 959 } 960 break; 961 } 962 if (error) { 963 splx(s); 964 return (error); 965 } 966 967 /* 968 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 969 * have write I/O in-progress but if there is a VM object then the 970 * VM object can also have read-I/O in-progress. 971 */ 972 do { 973 while (vp->v_numoutput > 0) { 974 vp->v_flag |= VBWAIT; 975 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 976 } 977 if (VOP_GETVOBJECT(vp, &object) == 0) { 978 while (object->paging_in_progress) 979 vm_object_pip_sleep(object, "vnvlbx"); 980 } 981 } while (vp->v_numoutput > 0); 982 983 splx(s); 984 985 /* 986 * Destroy the copy in the VM cache, too. 987 */ 988 mtx_lock(&vp->v_interlock); 989 if (VOP_GETVOBJECT(vp, &object) == 0) { 990 vm_object_page_remove(object, 0, 0, 991 (flags & V_SAVE) ? TRUE : FALSE); 992 } 993 mtx_unlock(&vp->v_interlock); 994 995 if ((flags & (V_ALT | V_NORMAL)) == 0 && 996 (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || 997 !TAILQ_EMPTY(&vp->v_cleanblkhd))) 998 panic("vinvalbuf: flush failed"); 999 return (0); 1000} 1001 1002/* 1003 * Flush out buffers on the specified list. 1004 */ 1005static int 1006flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp) 1007 struct buf *blist; 1008 int flags; 1009 struct vnode *vp; 1010 int slpflag, slptimeo; 1011 int *errorp; 1012{ 1013 struct buf *bp, *nbp; 1014 int found, error; 1015 1016 for (found = 0, bp = blist; bp; bp = nbp) { 1017 nbp = TAILQ_NEXT(bp, b_vnbufs); 1018 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1019 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) 1020 continue; 1021 found += 1; 1022 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1023 error = BUF_TIMELOCK(bp, 1024 LK_EXCLUSIVE | LK_SLEEPFAIL, 1025 "flushbuf", slpflag, slptimeo); 1026 if (error != ENOLCK) 1027 *errorp = error; 1028 return (found); 1029 } 1030 /* 1031 * XXX Since there are no node locks for NFS, I 1032 * believe there is a slight chance that a delayed 1033 * write will occur while sleeping just above, so 1034 * check for it. Note that vfs_bio_awrite expects 1035 * buffers to reside on a queue, while BUF_WRITE and 1036 * brelse do not. 1037 */ 1038 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1039 (flags & V_SAVE)) { 1040 1041 if (bp->b_vp == vp) { 1042 if (bp->b_flags & B_CLUSTEROK) { 1043 BUF_UNLOCK(bp); 1044 vfs_bio_awrite(bp); 1045 } else { 1046 bremfree(bp); 1047 bp->b_flags |= B_ASYNC; 1048 BUF_WRITE(bp); 1049 } 1050 } else { 1051 bremfree(bp); 1052 (void) BUF_WRITE(bp); 1053 } 1054 return (found); 1055 } 1056 bremfree(bp); 1057 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 1058 bp->b_flags &= ~B_ASYNC; 1059 brelse(bp); 1060 } 1061 return (found); 1062} 1063 1064/* 1065 * Truncate a file's buffer and pages to a specified length. This 1066 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1067 * sync activity. 1068 */ 1069int 1070vtruncbuf(vp, cred, td, length, blksize) 1071 register struct vnode *vp; 1072 struct ucred *cred; 1073 struct thread *td; 1074 off_t length; 1075 int blksize; 1076{ 1077 register struct buf *bp; 1078 struct buf *nbp; 1079 int s, anyfreed; 1080 int trunclbn; 1081 1082 /* 1083 * Round up to the *next* lbn. 1084 */ 1085 trunclbn = (length + blksize - 1) / blksize; 1086 1087 s = splbio(); 1088restart: 1089 anyfreed = 1; 1090 for (;anyfreed;) { 1091 anyfreed = 0; 1092 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 1093 nbp = TAILQ_NEXT(bp, b_vnbufs); 1094 if (bp->b_lblkno >= trunclbn) { 1095 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1096 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1097 goto restart; 1098 } else { 1099 bremfree(bp); 1100 bp->b_flags |= (B_INVAL | B_RELBUF); 1101 bp->b_flags &= ~B_ASYNC; 1102 brelse(bp); 1103 anyfreed = 1; 1104 } 1105 if (nbp && 1106 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1107 (nbp->b_vp != vp) || 1108 (nbp->b_flags & B_DELWRI))) { 1109 goto restart; 1110 } 1111 } 1112 } 1113 1114 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1115 nbp = TAILQ_NEXT(bp, b_vnbufs); 1116 if (bp->b_lblkno >= trunclbn) { 1117 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1118 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1119 goto restart; 1120 } else { 1121 bremfree(bp); 1122 bp->b_flags |= (B_INVAL | B_RELBUF); 1123 bp->b_flags &= ~B_ASYNC; 1124 brelse(bp); 1125 anyfreed = 1; 1126 } 1127 if (nbp && 1128 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1129 (nbp->b_vp != vp) || 1130 (nbp->b_flags & B_DELWRI) == 0)) { 1131 goto restart; 1132 } 1133 } 1134 } 1135 } 1136 1137 if (length > 0) { 1138restartsync: 1139 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1140 nbp = TAILQ_NEXT(bp, b_vnbufs); 1141 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 1142 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1143 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1144 goto restart; 1145 } else { 1146 bremfree(bp); 1147 if (bp->b_vp == vp) { 1148 bp->b_flags |= B_ASYNC; 1149 } else { 1150 bp->b_flags &= ~B_ASYNC; 1151 } 1152 BUF_WRITE(bp); 1153 } 1154 goto restartsync; 1155 } 1156 1157 } 1158 } 1159 1160 while (vp->v_numoutput > 0) { 1161 vp->v_flag |= VBWAIT; 1162 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 1163 } 1164 1165 splx(s); 1166 1167 vnode_pager_setsize(vp, length); 1168 1169 return (0); 1170} 1171 1172/* 1173 * buf_splay() - splay tree core for the clean/dirty list of buffers in 1174 * a vnode. 1175 * 1176 * NOTE: We have to deal with the special case of a background bitmap 1177 * buffer, a situation where two buffers will have the same logical 1178 * block offset. We want (1) only the foreground buffer to be accessed 1179 * in a lookup and (2) must differentiate between the foreground and 1180 * background buffer in the splay tree algorithm because the splay 1181 * tree cannot normally handle multiple entities with the same 'index'. 1182 * We accomplish this by adding differentiating flags to the splay tree's 1183 * numerical domain. 1184 */ 1185static 1186struct buf * 1187buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root) 1188{ 1189 struct buf dummy; 1190 struct buf *lefttreemax, *righttreemin, *y; 1191 1192 if (root == NULL) 1193 return (NULL); 1194 lefttreemax = righttreemin = &dummy; 1195 for (;;) { 1196 if (lblkno < root->b_lblkno || 1197 (lblkno == root->b_lblkno && 1198 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1199 if ((y = root->b_left) == NULL) 1200 break; 1201 if (lblkno < y->b_lblkno) { 1202 /* Rotate right. */ 1203 root->b_left = y->b_right; 1204 y->b_right = root; 1205 root = y; 1206 if ((y = root->b_left) == NULL) 1207 break; 1208 } 1209 /* Link into the new root's right tree. */ 1210 righttreemin->b_left = root; 1211 righttreemin = root; 1212 } else if (lblkno > root->b_lblkno || 1213 (lblkno == root->b_lblkno && 1214 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) { 1215 if ((y = root->b_right) == NULL) 1216 break; 1217 if (lblkno > y->b_lblkno) { 1218 /* Rotate left. */ 1219 root->b_right = y->b_left; 1220 y->b_left = root; 1221 root = y; 1222 if ((y = root->b_right) == NULL) 1223 break; 1224 } 1225 /* Link into the new root's left tree. */ 1226 lefttreemax->b_right = root; 1227 lefttreemax = root; 1228 } else { 1229 break; 1230 } 1231 root = y; 1232 } 1233 /* Assemble the new root. */ 1234 lefttreemax->b_right = root->b_left; 1235 righttreemin->b_left = root->b_right; 1236 root->b_left = dummy.b_right; 1237 root->b_right = dummy.b_left; 1238 return (root); 1239} 1240 1241static 1242void 1243buf_vlist_remove(struct buf *bp) 1244{ 1245 struct vnode *vp = bp->b_vp; 1246 struct buf *root; 1247 1248 if (bp->b_xflags & BX_VNDIRTY) { 1249 if (bp != vp->v_dirtyblkroot) { 1250 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot); 1251 KASSERT(root == bp, ("splay lookup failed during dirty remove")); 1252 } 1253 if (bp->b_left == NULL) { 1254 root = bp->b_right; 1255 } else { 1256 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1257 root->b_right = bp->b_right; 1258 } 1259 vp->v_dirtyblkroot = root; 1260 TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs); 1261 } else { 1262 /* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */ 1263 if (bp != vp->v_cleanblkroot) { 1264 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot); 1265 KASSERT(root == bp, ("splay lookup failed during clean remove")); 1266 } 1267 if (bp->b_left == NULL) { 1268 root = bp->b_right; 1269 } else { 1270 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1271 root->b_right = bp->b_right; 1272 } 1273 vp->v_cleanblkroot = root; 1274 TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs); 1275 } 1276 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1277} 1278 1279/* 1280 * Add the buffer to the sorted clean or dirty block list using a 1281 * splay tree algorithm. 1282 * 1283 * NOTE: xflags is passed as a constant, optimizing this inline function! 1284 */ 1285static 1286void 1287buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags) 1288{ 1289 struct buf *root; 1290 1291 bp->b_xflags |= xflags; 1292 if (xflags & BX_VNDIRTY) { 1293 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot); 1294 if (root == NULL) { 1295 bp->b_left = NULL; 1296 bp->b_right = NULL; 1297 TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs); 1298 } else if (bp->b_lblkno < root->b_lblkno || 1299 (bp->b_lblkno == root->b_lblkno && 1300 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1301 bp->b_left = root->b_left; 1302 bp->b_right = root; 1303 root->b_left = NULL; 1304 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs); 1305 } else { 1306 bp->b_right = root->b_right; 1307 bp->b_left = root; 1308 root->b_right = NULL; 1309 TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd, 1310 root, bp, b_vnbufs); 1311 } 1312 vp->v_dirtyblkroot = bp; 1313 } else { 1314 /* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */ 1315 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot); 1316 if (root == NULL) { 1317 bp->b_left = NULL; 1318 bp->b_right = NULL; 1319 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 1320 } else if (bp->b_lblkno < root->b_lblkno || 1321 (bp->b_lblkno == root->b_lblkno && 1322 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1323 bp->b_left = root->b_left; 1324 bp->b_right = root; 1325 root->b_left = NULL; 1326 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs); 1327 } else { 1328 bp->b_right = root->b_right; 1329 bp->b_left = root; 1330 root->b_right = NULL; 1331 TAILQ_INSERT_AFTER(&vp->v_cleanblkhd, 1332 root, bp, b_vnbufs); 1333 } 1334 vp->v_cleanblkroot = bp; 1335 } 1336} 1337 1338#ifndef USE_BUFHASH 1339 1340/* 1341 * Lookup a buffer using the splay tree. Note that we specifically avoid 1342 * shadow buffers used in background bitmap writes. 1343 * 1344 * This code isn't quite efficient as it could be because we are maintaining 1345 * two sorted lists and do not know which list the block resides in. 1346 */ 1347struct buf * 1348gbincore(struct vnode *vp, daddr_t lblkno) 1349{ 1350 struct buf *bp; 1351 1352 GIANT_REQUIRED; 1353 1354 bp = vp->v_cleanblkroot = buf_splay(lblkno, 0, vp->v_cleanblkroot); 1355 if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1356 return(bp); 1357 bp = vp->v_dirtyblkroot = buf_splay(lblkno, 0, vp->v_dirtyblkroot); 1358 if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1359 return(bp); 1360 return(NULL); 1361} 1362 1363#endif 1364 1365/* 1366 * Associate a buffer with a vnode. 1367 */ 1368void 1369bgetvp(vp, bp) 1370 register struct vnode *vp; 1371 register struct buf *bp; 1372{ 1373 int s; 1374 1375 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 1376 1377 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1378 ("bgetvp: bp already attached! %p", bp)); 1379 1380 vhold(vp); 1381 bp->b_vp = vp; 1382 bp->b_dev = vn_todev(vp); 1383 /* 1384 * Insert onto list for new vnode. 1385 */ 1386 s = splbio(); 1387 buf_vlist_add(bp, vp, BX_VNCLEAN); 1388 splx(s); 1389} 1390 1391/* 1392 * Disassociate a buffer from a vnode. 1393 */ 1394void 1395brelvp(bp) 1396 register struct buf *bp; 1397{ 1398 struct vnode *vp; 1399 int s; 1400 1401 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1402 1403 /* 1404 * Delete from old vnode list, if on one. 1405 */ 1406 vp = bp->b_vp; 1407 s = splbio(); 1408 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1409 buf_vlist_remove(bp); 1410 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 1411 vp->v_flag &= ~VONWORKLST; 1412 LIST_REMOVE(vp, v_synclist); 1413 } 1414 splx(s); 1415 bp->b_vp = (struct vnode *) 0; 1416 vdrop(vp); 1417 if (bp->b_object) 1418 bp->b_object = NULL; 1419} 1420 1421/* 1422 * Add an item to the syncer work queue. 1423 */ 1424static void 1425vn_syncer_add_to_worklist(struct vnode *vp, int delay) 1426{ 1427 int s, slot; 1428 1429 s = splbio(); 1430 1431 if (vp->v_flag & VONWORKLST) { 1432 LIST_REMOVE(vp, v_synclist); 1433 } 1434 1435 if (delay > syncer_maxdelay - 2) 1436 delay = syncer_maxdelay - 2; 1437 slot = (syncer_delayno + delay) & syncer_mask; 1438 1439 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 1440 vp->v_flag |= VONWORKLST; 1441 splx(s); 1442} 1443 1444struct proc *updateproc; 1445static void sched_sync(void); 1446static struct kproc_desc up_kp = { 1447 "syncer", 1448 sched_sync, 1449 &updateproc 1450}; 1451SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1452 1453/* 1454 * System filesystem synchronizer daemon. 1455 */ 1456void 1457sched_sync(void) 1458{ 1459 struct synclist *slp; 1460 struct vnode *vp; 1461 struct mount *mp; 1462 long starttime; 1463 int s; 1464 struct thread *td = FIRST_THREAD_IN_PROC(updateproc); /* XXXKSE */ 1465 1466 mtx_lock(&Giant); 1467 1468 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc, 1469 SHUTDOWN_PRI_LAST); 1470 1471 for (;;) { 1472 kthread_suspend_check(td->td_proc); 1473 1474 starttime = time_second; 1475 1476 /* 1477 * Push files whose dirty time has expired. Be careful 1478 * of interrupt race on slp queue. 1479 */ 1480 s = splbio(); 1481 slp = &syncer_workitem_pending[syncer_delayno]; 1482 syncer_delayno += 1; 1483 if (syncer_delayno == syncer_maxdelay) 1484 syncer_delayno = 0; 1485 splx(s); 1486 1487 while ((vp = LIST_FIRST(slp)) != NULL) { 1488 if (VOP_ISLOCKED(vp, NULL) == 0 && 1489 vn_start_write(vp, &mp, V_NOWAIT) == 0) { 1490 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1491 (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td); 1492 VOP_UNLOCK(vp, 0, td); 1493 vn_finished_write(mp); 1494 } 1495 s = splbio(); 1496 if (LIST_FIRST(slp) == vp) { 1497 /* 1498 * Note: v_tag VT_VFS vps can remain on the 1499 * worklist too with no dirty blocks, but 1500 * since sync_fsync() moves it to a different 1501 * slot we are safe. 1502 */ 1503 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1504 !vn_isdisk(vp, NULL)) 1505 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1506 /* 1507 * Put us back on the worklist. The worklist 1508 * routine will remove us from our current 1509 * position and then add us back in at a later 1510 * position. 1511 */ 1512 vn_syncer_add_to_worklist(vp, syncdelay); 1513 } 1514 splx(s); 1515 } 1516 1517 /* 1518 * Do soft update processing. 1519 */ 1520 if (softdep_process_worklist_hook != NULL) 1521 (*softdep_process_worklist_hook)(NULL); 1522 1523 /* 1524 * The variable rushjob allows the kernel to speed up the 1525 * processing of the filesystem syncer process. A rushjob 1526 * value of N tells the filesystem syncer to process the next 1527 * N seconds worth of work on its queue ASAP. Currently rushjob 1528 * is used by the soft update code to speed up the filesystem 1529 * syncer process when the incore state is getting so far 1530 * ahead of the disk that the kernel memory pool is being 1531 * threatened with exhaustion. 1532 */ 1533 if (rushjob > 0) { 1534 rushjob -= 1; 1535 continue; 1536 } 1537 /* 1538 * If it has taken us less than a second to process the 1539 * current work, then wait. Otherwise start right over 1540 * again. We can still lose time if any single round 1541 * takes more than two seconds, but it does not really 1542 * matter as we are just trying to generally pace the 1543 * filesystem activity. 1544 */ 1545 if (time_second == starttime) 1546 tsleep(&lbolt, PPAUSE, "syncer", 0); 1547 } 1548} 1549 1550/* 1551 * Request the syncer daemon to speed up its work. 1552 * We never push it to speed up more than half of its 1553 * normal turn time, otherwise it could take over the cpu. 1554 * XXXKSE only one update? 1555 */ 1556int 1557speedup_syncer() 1558{ 1559 1560 mtx_lock_spin(&sched_lock); 1561 if (FIRST_THREAD_IN_PROC(updateproc)->td_wchan == &lbolt) /* XXXKSE */ 1562 setrunnable(FIRST_THREAD_IN_PROC(updateproc)); 1563 mtx_unlock_spin(&sched_lock); 1564 if (rushjob < syncdelay / 2) { 1565 rushjob += 1; 1566 stat_rush_requests += 1; 1567 return (1); 1568 } 1569 return(0); 1570} 1571 1572/* 1573 * Associate a p-buffer with a vnode. 1574 * 1575 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1576 * with the buffer. i.e. the bp has not been linked into the vnode or 1577 * ref-counted. 1578 */ 1579void 1580pbgetvp(vp, bp) 1581 register struct vnode *vp; 1582 register struct buf *bp; 1583{ 1584 1585 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1586 1587 bp->b_vp = vp; 1588 bp->b_flags |= B_PAGING; 1589 bp->b_dev = vn_todev(vp); 1590} 1591 1592/* 1593 * Disassociate a p-buffer from a vnode. 1594 */ 1595void 1596pbrelvp(bp) 1597 register struct buf *bp; 1598{ 1599 1600 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1601 1602 /* XXX REMOVE ME */ 1603 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1604 panic( 1605 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1606 bp, 1607 (int)bp->b_flags 1608 ); 1609 } 1610 bp->b_vp = (struct vnode *) 0; 1611 bp->b_flags &= ~B_PAGING; 1612} 1613 1614/* 1615 * Reassign a buffer from one vnode to another. 1616 * Used to assign file specific control information 1617 * (indirect blocks) to the vnode to which they belong. 1618 */ 1619void 1620reassignbuf(bp, newvp) 1621 register struct buf *bp; 1622 register struct vnode *newvp; 1623{ 1624 int delay; 1625 int s; 1626 1627 if (newvp == NULL) { 1628 printf("reassignbuf: NULL"); 1629 return; 1630 } 1631 ++reassignbufcalls; 1632 1633 /* 1634 * B_PAGING flagged buffers cannot be reassigned because their vp 1635 * is not fully linked in. 1636 */ 1637 if (bp->b_flags & B_PAGING) 1638 panic("cannot reassign paging buffer"); 1639 1640 s = splbio(); 1641 /* 1642 * Delete from old vnode list, if on one. 1643 */ 1644 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1645 buf_vlist_remove(bp); 1646 if (bp->b_vp != newvp) { 1647 vdrop(bp->b_vp); 1648 bp->b_vp = NULL; /* for clarification */ 1649 } 1650 } 1651 /* 1652 * If dirty, put on list of dirty buffers; otherwise insert onto list 1653 * of clean buffers. 1654 */ 1655 if (bp->b_flags & B_DELWRI) { 1656 if ((newvp->v_flag & VONWORKLST) == 0) { 1657 switch (newvp->v_type) { 1658 case VDIR: 1659 delay = dirdelay; 1660 break; 1661 case VCHR: 1662 if (newvp->v_rdev->si_mountpoint != NULL) { 1663 delay = metadelay; 1664 break; 1665 } 1666 /* fall through */ 1667 default: 1668 delay = filedelay; 1669 } 1670 vn_syncer_add_to_worklist(newvp, delay); 1671 } 1672 buf_vlist_add(bp, newvp, BX_VNDIRTY); 1673 } else { 1674 buf_vlist_add(bp, newvp, BX_VNCLEAN); 1675 1676 if ((newvp->v_flag & VONWORKLST) && 1677 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1678 newvp->v_flag &= ~VONWORKLST; 1679 LIST_REMOVE(newvp, v_synclist); 1680 } 1681 } 1682 if (bp->b_vp != newvp) { 1683 bp->b_vp = newvp; 1684 vhold(bp->b_vp); 1685 } 1686 splx(s); 1687} 1688 1689/* 1690 * Create a vnode for a device. 1691 * Used for mounting the root filesystem. 1692 */ 1693int 1694bdevvp(dev, vpp) 1695 dev_t dev; 1696 struct vnode **vpp; 1697{ 1698 register struct vnode *vp; 1699 struct vnode *nvp; 1700 int error; 1701 1702 if (dev == NODEV) { 1703 *vpp = NULLVP; 1704 return (ENXIO); 1705 } 1706 if (vfinddev(dev, VCHR, vpp)) 1707 return (0); 1708 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1709 if (error) { 1710 *vpp = NULLVP; 1711 return (error); 1712 } 1713 vp = nvp; 1714 vp->v_type = VCHR; 1715 addalias(vp, dev); 1716 *vpp = vp; 1717 return (0); 1718} 1719 1720/* 1721 * Add vnode to the alias list hung off the dev_t. 1722 * 1723 * The reason for this gunk is that multiple vnodes can reference 1724 * the same physical device, so checking vp->v_usecount to see 1725 * how many users there are is inadequate; the v_usecount for 1726 * the vnodes need to be accumulated. vcount() does that. 1727 */ 1728struct vnode * 1729addaliasu(nvp, nvp_rdev) 1730 struct vnode *nvp; 1731 udev_t nvp_rdev; 1732{ 1733 struct vnode *ovp; 1734 vop_t **ops; 1735 dev_t dev; 1736 1737 if (nvp->v_type == VBLK) 1738 return (nvp); 1739 if (nvp->v_type != VCHR) 1740 panic("addaliasu on non-special vnode"); 1741 dev = udev2dev(nvp_rdev, 0); 1742 /* 1743 * Check to see if we have a bdevvp vnode with no associated 1744 * filesystem. If so, we want to associate the filesystem of 1745 * the new newly instigated vnode with the bdevvp vnode and 1746 * discard the newly created vnode rather than leaving the 1747 * bdevvp vnode lying around with no associated filesystem. 1748 */ 1749 if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) { 1750 addalias(nvp, dev); 1751 return (nvp); 1752 } 1753 /* 1754 * Discard unneeded vnode, but save its node specific data. 1755 * Note that if there is a lock, it is carried over in the 1756 * node specific data to the replacement vnode. 1757 */ 1758 vref(ovp); 1759 ovp->v_data = nvp->v_data; 1760 ovp->v_tag = nvp->v_tag; 1761 nvp->v_data = NULL; 1762 lockinit(&ovp->v_lock, PVFS, nvp->v_lock.lk_wmesg, 1763 nvp->v_lock.lk_timo, nvp->v_lock.lk_flags & LK_EXTFLG_MASK); 1764 if (nvp->v_vnlock) 1765 ovp->v_vnlock = &ovp->v_lock; 1766 ops = ovp->v_op; 1767 ovp->v_op = nvp->v_op; 1768 if (VOP_ISLOCKED(nvp, curthread)) { 1769 VOP_UNLOCK(nvp, 0, curthread); 1770 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread); 1771 } 1772 nvp->v_op = ops; 1773 insmntque(ovp, nvp->v_mount); 1774 vrele(nvp); 1775 vgone(nvp); 1776 return (ovp); 1777} 1778 1779/* This is a local helper function that do the same as addaliasu, but for a 1780 * dev_t instead of an udev_t. */ 1781static void 1782addalias(nvp, dev) 1783 struct vnode *nvp; 1784 dev_t dev; 1785{ 1786 1787 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode")); 1788 nvp->v_rdev = dev; 1789 mtx_lock(&spechash_mtx); 1790 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 1791 mtx_unlock(&spechash_mtx); 1792} 1793 1794/* 1795 * Grab a particular vnode from the free list, increment its 1796 * reference count and lock it. The vnode lock bit is set if the 1797 * vnode is being eliminated in vgone. The process is awakened 1798 * when the transition is completed, and an error returned to 1799 * indicate that the vnode is no longer usable (possibly having 1800 * been changed to a new filesystem type). 1801 */ 1802int 1803vget(vp, flags, td) 1804 register struct vnode *vp; 1805 int flags; 1806 struct thread *td; 1807{ 1808 int error; 1809 1810 /* 1811 * If the vnode is in the process of being cleaned out for 1812 * another use, we wait for the cleaning to finish and then 1813 * return failure. Cleaning is determined by checking that 1814 * the VXLOCK flag is set. 1815 */ 1816 if ((flags & LK_INTERLOCK) == 0) 1817 mtx_lock(&vp->v_interlock); 1818 if (vp->v_flag & VXLOCK) { 1819 if (vp->v_vxproc == curthread) { 1820#if 0 1821 /* this can now occur in normal operation */ 1822 log(LOG_INFO, "VXLOCK interlock avoided\n"); 1823#endif 1824 } else { 1825 vp->v_flag |= VXWANT; 1826 msleep(vp, &vp->v_interlock, PINOD | PDROP, "vget", 0); 1827 return (ENOENT); 1828 } 1829 } 1830 1831 vp->v_usecount++; 1832 1833 if (VSHOULDBUSY(vp)) 1834 vbusy(vp); 1835 if (flags & LK_TYPE_MASK) { 1836 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) { 1837 /* 1838 * must expand vrele here because we do not want 1839 * to call VOP_INACTIVE if the reference count 1840 * drops back to zero since it was never really 1841 * active. We must remove it from the free list 1842 * before sleeping so that multiple processes do 1843 * not try to recycle it. 1844 */ 1845 mtx_lock(&vp->v_interlock); 1846 vp->v_usecount--; 1847 if (VSHOULDFREE(vp)) 1848 vfree(vp); 1849 else 1850 vlruvp(vp); 1851 mtx_unlock(&vp->v_interlock); 1852 } 1853 return (error); 1854 } 1855 mtx_unlock(&vp->v_interlock); 1856 return (0); 1857} 1858 1859/* 1860 * Increase the reference count of a vnode. 1861 */ 1862void 1863vref(struct vnode *vp) 1864{ 1865 mtx_lock(&vp->v_interlock); 1866 vp->v_usecount++; 1867 mtx_unlock(&vp->v_interlock); 1868} 1869 1870/* 1871 * Vnode put/release. 1872 * If count drops to zero, call inactive routine and return to freelist. 1873 */ 1874void 1875vrele(vp) 1876 struct vnode *vp; 1877{ 1878 struct thread *td = curthread; /* XXX */ 1879 1880 KASSERT(vp != NULL, ("vrele: null vp")); 1881 1882 mtx_lock(&vp->v_interlock); 1883 1884 /* Skip this v_writecount check if we're going to panic below. */ 1885 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1886 ("vrele: missed vn_close")); 1887 1888 if (vp->v_usecount > 1) { 1889 1890 vp->v_usecount--; 1891 mtx_unlock(&vp->v_interlock); 1892 1893 return; 1894 } 1895 1896 if (vp->v_usecount == 1) { 1897 vp->v_usecount--; 1898 /* 1899 * We must call VOP_INACTIVE with the node locked. 1900 * If we are doing a vput, the node is already locked, 1901 * but, in the case of vrele, we must explicitly lock 1902 * the vnode before calling VOP_INACTIVE. 1903 */ 1904 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) 1905 VOP_INACTIVE(vp, td); 1906 if (VSHOULDFREE(vp)) 1907 vfree(vp); 1908 else 1909 vlruvp(vp); 1910 1911 } else { 1912#ifdef DIAGNOSTIC 1913 vprint("vrele: negative ref count", vp); 1914 mtx_unlock(&vp->v_interlock); 1915#endif 1916 panic("vrele: negative ref cnt"); 1917 } 1918} 1919 1920/* 1921 * Release an already locked vnode. This give the same effects as 1922 * unlock+vrele(), but takes less time and avoids releasing and 1923 * re-aquiring the lock (as vrele() aquires the lock internally.) 1924 */ 1925void 1926vput(vp) 1927 struct vnode *vp; 1928{ 1929 struct thread *td = curthread; /* XXX */ 1930 1931 GIANT_REQUIRED; 1932 1933 KASSERT(vp != NULL, ("vput: null vp")); 1934 mtx_lock(&vp->v_interlock); 1935 /* Skip this v_writecount check if we're going to panic below. */ 1936 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1937 ("vput: missed vn_close")); 1938 1939 if (vp->v_usecount > 1) { 1940 vp->v_usecount--; 1941 VOP_UNLOCK(vp, LK_INTERLOCK, td); 1942 return; 1943 } 1944 1945 if (vp->v_usecount == 1) { 1946 vp->v_usecount--; 1947 /* 1948 * We must call VOP_INACTIVE with the node locked. 1949 * If we are doing a vput, the node is already locked, 1950 * so we just need to release the vnode mutex. 1951 */ 1952 mtx_unlock(&vp->v_interlock); 1953 VOP_INACTIVE(vp, td); 1954 if (VSHOULDFREE(vp)) 1955 vfree(vp); 1956 else 1957 vlruvp(vp); 1958 1959 } else { 1960#ifdef DIAGNOSTIC 1961 vprint("vput: negative ref count", vp); 1962#endif 1963 panic("vput: negative ref cnt"); 1964 } 1965} 1966 1967/* 1968 * Somebody doesn't want the vnode recycled. 1969 */ 1970void 1971vhold(vp) 1972 register struct vnode *vp; 1973{ 1974 int s; 1975 1976 s = splbio(); 1977 vp->v_holdcnt++; 1978 if (VSHOULDBUSY(vp)) 1979 vbusy(vp); 1980 splx(s); 1981} 1982 1983/* 1984 * Note that there is one less who cares about this vnode. vdrop() is the 1985 * opposite of vhold(). 1986 */ 1987void 1988vdrop(vp) 1989 register struct vnode *vp; 1990{ 1991 int s; 1992 1993 s = splbio(); 1994 if (vp->v_holdcnt <= 0) 1995 panic("vdrop: holdcnt"); 1996 vp->v_holdcnt--; 1997 if (VSHOULDFREE(vp)) 1998 vfree(vp); 1999 else 2000 vlruvp(vp); 2001 splx(s); 2002} 2003 2004/* 2005 * Remove any vnodes in the vnode table belonging to mount point mp. 2006 * 2007 * If FORCECLOSE is not specified, there should not be any active ones, 2008 * return error if any are found (nb: this is a user error, not a 2009 * system error). If FORCECLOSE is specified, detach any active vnodes 2010 * that are found. 2011 * 2012 * If WRITECLOSE is set, only flush out regular file vnodes open for 2013 * writing. 2014 * 2015 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 2016 * 2017 * `rootrefs' specifies the base reference count for the root vnode 2018 * of this filesystem. The root vnode is considered busy if its 2019 * v_usecount exceeds this value. On a successful return, vflush() 2020 * will call vrele() on the root vnode exactly rootrefs times. 2021 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2022 * be zero. 2023 */ 2024#ifdef DIAGNOSTIC 2025static int busyprt = 0; /* print out busy vnodes */ 2026SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 2027#endif 2028 2029int 2030vflush(mp, rootrefs, flags) 2031 struct mount *mp; 2032 int rootrefs; 2033 int flags; 2034{ 2035 struct thread *td = curthread; /* XXX */ 2036 struct vnode *vp, *nvp, *rootvp = NULL; 2037 struct vattr vattr; 2038 int busy = 0, error; 2039 2040 if (rootrefs > 0) { 2041 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2042 ("vflush: bad args")); 2043 /* 2044 * Get the filesystem root vnode. We can vput() it 2045 * immediately, since with rootrefs > 0, it won't go away. 2046 */ 2047 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 2048 return (error); 2049 vput(rootvp); 2050 2051 } 2052 mtx_lock(&mntvnode_mtx); 2053loop: 2054 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) { 2055 /* 2056 * Make sure this vnode wasn't reclaimed in getnewvnode(). 2057 * Start over if it has (it won't be on the list anymore). 2058 */ 2059 if (vp->v_mount != mp) 2060 goto loop; 2061 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2062 2063 mtx_unlock(&mntvnode_mtx); 2064 mtx_lock(&vp->v_interlock); 2065 /* 2066 * Skip over a vnodes marked VSYSTEM. 2067 */ 2068 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 2069 mtx_unlock(&vp->v_interlock); 2070 mtx_lock(&mntvnode_mtx); 2071 continue; 2072 } 2073 /* 2074 * If WRITECLOSE is set, flush out unlinked but still open 2075 * files (even if open only for reading) and regular file 2076 * vnodes open for writing. 2077 */ 2078 if ((flags & WRITECLOSE) && 2079 (vp->v_type == VNON || 2080 (VOP_GETATTR(vp, &vattr, td->td_ucred, td) == 0 && 2081 vattr.va_nlink > 0)) && 2082 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2083 mtx_unlock(&vp->v_interlock); 2084 mtx_lock(&mntvnode_mtx); 2085 continue; 2086 } 2087 2088 /* 2089 * With v_usecount == 0, all we need to do is clear out the 2090 * vnode data structures and we are done. 2091 */ 2092 if (vp->v_usecount == 0) { 2093 vgonel(vp, td); 2094 mtx_lock(&mntvnode_mtx); 2095 continue; 2096 } 2097 2098 /* 2099 * If FORCECLOSE is set, forcibly close the vnode. For block 2100 * or character devices, revert to an anonymous device. For 2101 * all other files, just kill them. 2102 */ 2103 if (flags & FORCECLOSE) { 2104 if (vp->v_type != VCHR) { 2105 vgonel(vp, td); 2106 } else { 2107 vclean(vp, 0, td); 2108 vp->v_op = spec_vnodeop_p; 2109 insmntque(vp, (struct mount *) 0); 2110 } 2111 mtx_lock(&mntvnode_mtx); 2112 continue; 2113 } 2114#ifdef DIAGNOSTIC 2115 if (busyprt) 2116 vprint("vflush: busy vnode", vp); 2117#endif 2118 mtx_unlock(&vp->v_interlock); 2119 mtx_lock(&mntvnode_mtx); 2120 busy++; 2121 } 2122 mtx_unlock(&mntvnode_mtx); 2123 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2124 /* 2125 * If just the root vnode is busy, and if its refcount 2126 * is equal to `rootrefs', then go ahead and kill it. 2127 */ 2128 mtx_lock(&rootvp->v_interlock); 2129 KASSERT(busy > 0, ("vflush: not busy")); 2130 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 2131 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2132 vgonel(rootvp, td); 2133 busy = 0; 2134 } else 2135 mtx_unlock(&rootvp->v_interlock); 2136 } 2137 if (busy) 2138 return (EBUSY); 2139 for (; rootrefs > 0; rootrefs--) 2140 vrele(rootvp); 2141 return (0); 2142} 2143 2144/* 2145 * This moves a now (likely recyclable) vnode to the end of the 2146 * mountlist. XXX However, it is temporarily disabled until we 2147 * can clean up ffs_sync() and friends, which have loop restart 2148 * conditions which this code causes to operate O(N^2). 2149 */ 2150static void 2151vlruvp(struct vnode *vp) 2152{ 2153#if 0 2154 struct mount *mp; 2155 2156 if ((mp = vp->v_mount) != NULL) { 2157 mtx_lock(&mntvnode_mtx); 2158 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2159 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2160 mtx_unlock(&mntvnode_mtx); 2161 } 2162#endif 2163} 2164 2165/* 2166 * Disassociate the underlying filesystem from a vnode. 2167 */ 2168static void 2169vclean(vp, flags, td) 2170 struct vnode *vp; 2171 int flags; 2172 struct thread *td; 2173{ 2174 int active; 2175 2176 /* 2177 * Check to see if the vnode is in use. If so we have to reference it 2178 * before we clean it out so that its count cannot fall to zero and 2179 * generate a race against ourselves to recycle it. 2180 */ 2181 if ((active = vp->v_usecount)) 2182 vp->v_usecount++; 2183 2184 /* 2185 * Prevent the vnode from being recycled or brought into use while we 2186 * clean it out. 2187 */ 2188 if (vp->v_flag & VXLOCK) 2189 panic("vclean: deadlock"); 2190 vp->v_flag |= VXLOCK; 2191 vp->v_vxproc = curthread; 2192 /* 2193 * Even if the count is zero, the VOP_INACTIVE routine may still 2194 * have the object locked while it cleans it out. The VOP_LOCK 2195 * ensures that the VOP_INACTIVE routine is done with its work. 2196 * For active vnodes, it ensures that no other activity can 2197 * occur while the underlying object is being cleaned out. 2198 */ 2199 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td); 2200 2201 /* 2202 * Clean out any buffers associated with the vnode. 2203 * If the flush fails, just toss the buffers. 2204 */ 2205 if (flags & DOCLOSE) { 2206 if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL) 2207 (void) vn_write_suspend_wait(vp, NULL, V_WAIT); 2208 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0) 2209 vinvalbuf(vp, 0, NOCRED, td, 0, 0); 2210 } 2211 2212 VOP_DESTROYVOBJECT(vp); 2213 2214 /* 2215 * Any other processes trying to obtain this lock must first 2216 * wait for VXLOCK to clear, then call the new lock operation. 2217 */ 2218 VOP_UNLOCK(vp, 0, td); 2219 2220 /* 2221 * If purging an active vnode, it must be closed and 2222 * deactivated before being reclaimed. Note that the 2223 * VOP_INACTIVE will unlock the vnode. 2224 */ 2225 if (active) { 2226 if (flags & DOCLOSE) 2227 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2228 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0) 2229 panic("vclean: cannot relock."); 2230 VOP_INACTIVE(vp, td); 2231 } 2232 2233 /* 2234 * Reclaim the vnode. 2235 */ 2236 if (VOP_RECLAIM(vp, td)) 2237 panic("vclean: cannot reclaim"); 2238 2239 if (active) { 2240 /* 2241 * Inline copy of vrele() since VOP_INACTIVE 2242 * has already been called. 2243 */ 2244 mtx_lock(&vp->v_interlock); 2245 if (--vp->v_usecount <= 0) { 2246#ifdef DIAGNOSTIC 2247 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 2248 vprint("vclean: bad ref count", vp); 2249 panic("vclean: ref cnt"); 2250 } 2251#endif 2252 vfree(vp); 2253 } 2254 mtx_unlock(&vp->v_interlock); 2255 } 2256 2257 cache_purge(vp); 2258 vp->v_vnlock = NULL; 2259 lockdestroy(&vp->v_lock); 2260 2261 if (VSHOULDFREE(vp)) 2262 vfree(vp); 2263 2264 /* 2265 * Done with purge, notify sleepers of the grim news. 2266 */ 2267 vp->v_op = dead_vnodeop_p; 2268 if (vp->v_pollinfo != NULL) 2269 vn_pollgone(vp); 2270 vp->v_tag = VT_NON; 2271 vp->v_flag &= ~VXLOCK; 2272 vp->v_vxproc = NULL; 2273 if (vp->v_flag & VXWANT) { 2274 vp->v_flag &= ~VXWANT; 2275 wakeup(vp); 2276 } 2277} 2278 2279/* 2280 * Eliminate all activity associated with the requested vnode 2281 * and with all vnodes aliased to the requested vnode. 2282 */ 2283int 2284vop_revoke(ap) 2285 struct vop_revoke_args /* { 2286 struct vnode *a_vp; 2287 int a_flags; 2288 } */ *ap; 2289{ 2290 struct vnode *vp, *vq; 2291 dev_t dev; 2292 2293 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 2294 2295 vp = ap->a_vp; 2296 /* 2297 * If a vgone (or vclean) is already in progress, 2298 * wait until it is done and return. 2299 */ 2300 if (vp->v_flag & VXLOCK) { 2301 vp->v_flag |= VXWANT; 2302 msleep(vp, &vp->v_interlock, PINOD | PDROP, 2303 "vop_revokeall", 0); 2304 return (0); 2305 } 2306 dev = vp->v_rdev; 2307 for (;;) { 2308 mtx_lock(&spechash_mtx); 2309 vq = SLIST_FIRST(&dev->si_hlist); 2310 mtx_unlock(&spechash_mtx); 2311 if (!vq) 2312 break; 2313 vgone(vq); 2314 } 2315 return (0); 2316} 2317 2318/* 2319 * Recycle an unused vnode to the front of the free list. 2320 * Release the passed interlock if the vnode will be recycled. 2321 */ 2322int 2323vrecycle(vp, inter_lkp, td) 2324 struct vnode *vp; 2325 struct mtx *inter_lkp; 2326 struct thread *td; 2327{ 2328 2329 mtx_lock(&vp->v_interlock); 2330 if (vp->v_usecount == 0) { 2331 if (inter_lkp) { 2332 mtx_unlock(inter_lkp); 2333 } 2334 vgonel(vp, td); 2335 return (1); 2336 } 2337 mtx_unlock(&vp->v_interlock); 2338 return (0); 2339} 2340 2341/* 2342 * Eliminate all activity associated with a vnode 2343 * in preparation for reuse. 2344 */ 2345void 2346vgone(vp) 2347 register struct vnode *vp; 2348{ 2349 struct thread *td = curthread; /* XXX */ 2350 2351 mtx_lock(&vp->v_interlock); 2352 vgonel(vp, td); 2353} 2354 2355/* 2356 * vgone, with the vp interlock held. 2357 */ 2358void 2359vgonel(vp, td) 2360 struct vnode *vp; 2361 struct thread *td; 2362{ 2363 int s; 2364 2365 /* 2366 * If a vgone (or vclean) is already in progress, 2367 * wait until it is done and return. 2368 */ 2369 if (vp->v_flag & VXLOCK) { 2370 vp->v_flag |= VXWANT; 2371 msleep(vp, &vp->v_interlock, PINOD | PDROP, "vgone", 0); 2372 return; 2373 } 2374 2375 /* 2376 * Clean out the filesystem specific data. 2377 */ 2378 vclean(vp, DOCLOSE, td); 2379 mtx_lock(&vp->v_interlock); 2380 2381 /* 2382 * Delete from old mount point vnode list, if on one. 2383 */ 2384 if (vp->v_mount != NULL) 2385 insmntque(vp, (struct mount *)0); 2386 /* 2387 * If special device, remove it from special device alias list 2388 * if it is on one. 2389 */ 2390 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) { 2391 mtx_lock(&spechash_mtx); 2392 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext); 2393 freedev(vp->v_rdev); 2394 mtx_unlock(&spechash_mtx); 2395 vp->v_rdev = NULL; 2396 } 2397 2398 /* 2399 * If it is on the freelist and not already at the head, 2400 * move it to the head of the list. The test of the 2401 * VDOOMED flag and the reference count of zero is because 2402 * it will be removed from the free list by getnewvnode, 2403 * but will not have its reference count incremented until 2404 * after calling vgone. If the reference count were 2405 * incremented first, vgone would (incorrectly) try to 2406 * close the previous instance of the underlying object. 2407 */ 2408 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 2409 s = splbio(); 2410 mtx_lock(&vnode_free_list_mtx); 2411 if (vp->v_flag & VFREE) 2412 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2413 else 2414 freevnodes++; 2415 vp->v_flag |= VFREE; 2416 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2417 mtx_unlock(&vnode_free_list_mtx); 2418 splx(s); 2419 } 2420 2421 vp->v_type = VBAD; 2422 mtx_unlock(&vp->v_interlock); 2423} 2424 2425/* 2426 * Lookup a vnode by device number. 2427 */ 2428int 2429vfinddev(dev, type, vpp) 2430 dev_t dev; 2431 enum vtype type; 2432 struct vnode **vpp; 2433{ 2434 struct vnode *vp; 2435 2436 mtx_lock(&spechash_mtx); 2437 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2438 if (type == vp->v_type) { 2439 *vpp = vp; 2440 mtx_unlock(&spechash_mtx); 2441 return (1); 2442 } 2443 } 2444 mtx_unlock(&spechash_mtx); 2445 return (0); 2446} 2447 2448/* 2449 * Calculate the total number of references to a special device. 2450 */ 2451int 2452vcount(vp) 2453 struct vnode *vp; 2454{ 2455 struct vnode *vq; 2456 int count; 2457 2458 count = 0; 2459 mtx_lock(&spechash_mtx); 2460 SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext) 2461 count += vq->v_usecount; 2462 mtx_unlock(&spechash_mtx); 2463 return (count); 2464} 2465 2466/* 2467 * Same as above, but using the dev_t as argument 2468 */ 2469int 2470count_dev(dev) 2471 dev_t dev; 2472{ 2473 struct vnode *vp; 2474 2475 vp = SLIST_FIRST(&dev->si_hlist); 2476 if (vp == NULL) 2477 return (0); 2478 return(vcount(vp)); 2479} 2480 2481/* 2482 * Print out a description of a vnode. 2483 */ 2484static char *typename[] = 2485{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2486 2487void 2488vprint(label, vp) 2489 char *label; 2490 struct vnode *vp; 2491{ 2492 char buf[96]; 2493 2494 if (label != NULL) 2495 printf("%s: %p: ", label, (void *)vp); 2496 else 2497 printf("%p: ", (void *)vp); 2498 printf("type %s, usecount %d, writecount %d, refcount %d,", 2499 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 2500 vp->v_holdcnt); 2501 buf[0] = '\0'; 2502 if (vp->v_flag & VROOT) 2503 strcat(buf, "|VROOT"); 2504 if (vp->v_flag & VTEXT) 2505 strcat(buf, "|VTEXT"); 2506 if (vp->v_flag & VSYSTEM) 2507 strcat(buf, "|VSYSTEM"); 2508 if (vp->v_flag & VXLOCK) 2509 strcat(buf, "|VXLOCK"); 2510 if (vp->v_flag & VXWANT) 2511 strcat(buf, "|VXWANT"); 2512 if (vp->v_flag & VBWAIT) 2513 strcat(buf, "|VBWAIT"); 2514 if (vp->v_flag & VDOOMED) 2515 strcat(buf, "|VDOOMED"); 2516 if (vp->v_flag & VFREE) 2517 strcat(buf, "|VFREE"); 2518 if (vp->v_flag & VOBJBUF) 2519 strcat(buf, "|VOBJBUF"); 2520 if (buf[0] != '\0') 2521 printf(" flags (%s)", &buf[1]); 2522 if (vp->v_data == NULL) { 2523 printf("\n"); 2524 } else { 2525 printf("\n\t"); 2526 VOP_PRINT(vp); 2527 } 2528} 2529 2530#ifdef DDB 2531#include <ddb/ddb.h> 2532/* 2533 * List all of the locked vnodes in the system. 2534 * Called when debugging the kernel. 2535 */ 2536DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2537{ 2538 struct thread *td = curthread; /* XXX */ 2539 struct mount *mp, *nmp; 2540 struct vnode *vp; 2541 2542 printf("Locked vnodes\n"); 2543 mtx_lock(&mountlist_mtx); 2544 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2545 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2546 nmp = TAILQ_NEXT(mp, mnt_list); 2547 continue; 2548 } 2549 mtx_lock(&mntvnode_mtx); 2550 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2551 if (VOP_ISLOCKED(vp, NULL)) 2552 vprint((char *)0, vp); 2553 } 2554 mtx_unlock(&mntvnode_mtx); 2555 mtx_lock(&mountlist_mtx); 2556 nmp = TAILQ_NEXT(mp, mnt_list); 2557 vfs_unbusy(mp, td); 2558 } 2559 mtx_unlock(&mountlist_mtx); 2560} 2561#endif 2562 2563/* 2564 * Top level filesystem related information gathering. 2565 */ 2566static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 2567 2568static int 2569vfs_sysctl(SYSCTL_HANDLER_ARGS) 2570{ 2571 int *name = (int *)arg1 - 1; /* XXX */ 2572 u_int namelen = arg2 + 1; /* XXX */ 2573 struct vfsconf *vfsp; 2574 2575#if 1 || defined(COMPAT_PRELITE2) 2576 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2577 if (namelen == 1) 2578 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2579#endif 2580 2581 /* XXX the below code does not compile; vfs_sysctl does not exist. */ 2582#ifdef notyet 2583 /* all sysctl names at this level are at least name and field */ 2584 if (namelen < 2) 2585 return (ENOTDIR); /* overloaded */ 2586 if (name[0] != VFS_GENERIC) { 2587 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2588 if (vfsp->vfc_typenum == name[0]) 2589 break; 2590 if (vfsp == NULL) 2591 return (EOPNOTSUPP); 2592 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2593 oldp, oldlenp, newp, newlen, td)); 2594 } 2595#endif 2596 switch (name[1]) { 2597 case VFS_MAXTYPENUM: 2598 if (namelen != 2) 2599 return (ENOTDIR); 2600 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2601 case VFS_CONF: 2602 if (namelen != 3) 2603 return (ENOTDIR); /* overloaded */ 2604 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2605 if (vfsp->vfc_typenum == name[2]) 2606 break; 2607 if (vfsp == NULL) 2608 return (EOPNOTSUPP); 2609 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2610 } 2611 return (EOPNOTSUPP); 2612} 2613 2614SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2615 "Generic filesystem"); 2616 2617#if 1 || defined(COMPAT_PRELITE2) 2618 2619static int 2620sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2621{ 2622 int error; 2623 struct vfsconf *vfsp; 2624 struct ovfsconf ovfs; 2625 2626 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2627 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2628 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2629 ovfs.vfc_index = vfsp->vfc_typenum; 2630 ovfs.vfc_refcount = vfsp->vfc_refcount; 2631 ovfs.vfc_flags = vfsp->vfc_flags; 2632 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2633 if (error) 2634 return error; 2635 } 2636 return 0; 2637} 2638 2639#endif /* 1 || COMPAT_PRELITE2 */ 2640 2641#define KINFO_VNODESLOP 10 2642/* 2643 * Dump vnode list (via sysctl). 2644 */ 2645/* ARGSUSED */ 2646static int 2647sysctl_vnode(SYSCTL_HANDLER_ARGS) 2648{ 2649 struct xvnode *xvn; 2650 struct thread *td = req->td; 2651 struct mount *mp; 2652 struct vnode *vp; 2653 int error, len, n; 2654 2655 req->lock = 0; 2656 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 2657 if (!req->oldptr) 2658 /* Make an estimate */ 2659 return (SYSCTL_OUT(req, 0, len)); 2660 2661 sysctl_wire_old_buffer(req, 0); 2662 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 2663 n = 0; 2664 mtx_lock(&mountlist_mtx); 2665 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2666 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) 2667 continue; 2668 mtx_lock(&mntvnode_mtx); 2669 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2670 if (n == len) 2671 break; 2672 vref(vp); 2673 xvn[n].xv_size = sizeof *xvn; 2674 xvn[n].xv_vnode = vp; 2675#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 2676 XV_COPY(flag); 2677 XV_COPY(usecount); 2678 XV_COPY(writecount); 2679 XV_COPY(holdcnt); 2680 XV_COPY(id); 2681 XV_COPY(mount); 2682 XV_COPY(numoutput); 2683 XV_COPY(type); 2684#undef XV_COPY 2685 switch (vp->v_type) { 2686 case VREG: 2687 case VDIR: 2688 case VLNK: 2689 xvn[n].xv_dev = vp->v_cachedfs; 2690 xvn[n].xv_ino = vp->v_cachedid; 2691 break; 2692 case VBLK: 2693 case VCHR: 2694 if (vp->v_rdev == NULL) { 2695 vrele(vp); 2696 continue; 2697 } 2698 xvn[n].xv_dev = dev2udev(vp->v_rdev); 2699 break; 2700 case VSOCK: 2701 xvn[n].xv_socket = vp->v_socket; 2702 break; 2703 case VFIFO: 2704 xvn[n].xv_fifo = vp->v_fifoinfo; 2705 break; 2706 case VNON: 2707 case VBAD: 2708 default: 2709 /* shouldn't happen? */ 2710 vrele(vp); 2711 continue; 2712 } 2713 vrele(vp); 2714 ++n; 2715 } 2716 mtx_unlock(&mntvnode_mtx); 2717 mtx_lock(&mountlist_mtx); 2718 vfs_unbusy(mp, td); 2719 if (n == len) 2720 break; 2721 } 2722 mtx_unlock(&mountlist_mtx); 2723 2724 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 2725 free(xvn, M_TEMP); 2726 return (error); 2727} 2728 2729SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2730 0, 0, sysctl_vnode, "S,vnode", ""); 2731 2732/* 2733 * Check to see if a filesystem is mounted on a block device. 2734 */ 2735int 2736vfs_mountedon(vp) 2737 struct vnode *vp; 2738{ 2739 2740 if (vp->v_rdev->si_mountpoint != NULL) 2741 return (EBUSY); 2742 return (0); 2743} 2744 2745/* 2746 * Unmount all filesystems. The list is traversed in reverse order 2747 * of mounting to avoid dependencies. 2748 */ 2749void 2750vfs_unmountall() 2751{ 2752 struct mount *mp; 2753 struct thread *td; 2754 int error; 2755 2756 if (curthread != NULL) 2757 td = curthread; 2758 else 2759 td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */ 2760 /* 2761 * Since this only runs when rebooting, it is not interlocked. 2762 */ 2763 while(!TAILQ_EMPTY(&mountlist)) { 2764 mp = TAILQ_LAST(&mountlist, mntlist); 2765 error = dounmount(mp, MNT_FORCE, td); 2766 if (error) { 2767 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2768 printf("unmount of %s failed (", 2769 mp->mnt_stat.f_mntonname); 2770 if (error == EBUSY) 2771 printf("BUSY)\n"); 2772 else 2773 printf("%d)\n", error); 2774 } else { 2775 /* The unmount has removed mp from the mountlist */ 2776 } 2777 } 2778} 2779 2780/* 2781 * perform msync on all vnodes under a mount point 2782 * the mount point must be locked. 2783 */ 2784void 2785vfs_msync(struct mount *mp, int flags) 2786{ 2787 struct vnode *vp, *nvp; 2788 struct vm_object *obj; 2789 int tries; 2790 2791 GIANT_REQUIRED; 2792 2793 tries = 5; 2794 mtx_lock(&mntvnode_mtx); 2795loop: 2796 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) { 2797 if (vp->v_mount != mp) { 2798 if (--tries > 0) 2799 goto loop; 2800 break; 2801 } 2802 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2803 2804 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2805 continue; 2806 2807 if (vp->v_flag & VNOSYNC) /* unlinked, skip it */ 2808 continue; 2809 2810 if ((vp->v_flag & VOBJDIRTY) && 2811 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2812 mtx_unlock(&mntvnode_mtx); 2813 if (!vget(vp, 2814 LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curthread)) { 2815 if (VOP_GETVOBJECT(vp, &obj) == 0) { 2816 vm_object_page_clean(obj, 0, 0, 2817 flags == MNT_WAIT ? 2818 OBJPC_SYNC : OBJPC_NOSYNC); 2819 } 2820 vput(vp); 2821 } 2822 mtx_lock(&mntvnode_mtx); 2823 if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) { 2824 if (--tries > 0) 2825 goto loop; 2826 break; 2827 } 2828 } 2829 } 2830 mtx_unlock(&mntvnode_mtx); 2831} 2832 2833/* 2834 * Create the VM object needed for VMIO and mmap support. This 2835 * is done for all VREG files in the system. Some filesystems might 2836 * afford the additional metadata buffering capability of the 2837 * VMIO code by making the device node be VMIO mode also. 2838 * 2839 * vp must be locked when vfs_object_create is called. 2840 */ 2841int 2842vfs_object_create(vp, td, cred) 2843 struct vnode *vp; 2844 struct thread *td; 2845 struct ucred *cred; 2846{ 2847 GIANT_REQUIRED; 2848 return (VOP_CREATEVOBJECT(vp, cred, td)); 2849} 2850 2851/* 2852 * Mark a vnode as free, putting it up for recycling. 2853 */ 2854void 2855vfree(vp) 2856 struct vnode *vp; 2857{ 2858 int s; 2859 2860 s = splbio(); 2861 mtx_lock(&vnode_free_list_mtx); 2862 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); 2863 if (vp->v_flag & VAGE) { 2864 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2865 } else { 2866 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2867 } 2868 freevnodes++; 2869 mtx_unlock(&vnode_free_list_mtx); 2870 vp->v_flag &= ~VAGE; 2871 vp->v_flag |= VFREE; 2872 splx(s); 2873} 2874 2875/* 2876 * Opposite of vfree() - mark a vnode as in use. 2877 */ 2878void 2879vbusy(vp) 2880 struct vnode *vp; 2881{ 2882 int s; 2883 2884 s = splbio(); 2885 mtx_lock(&vnode_free_list_mtx); 2886 KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free")); 2887 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2888 freevnodes--; 2889 mtx_unlock(&vnode_free_list_mtx); 2890 vp->v_flag &= ~(VFREE|VAGE); 2891 splx(s); 2892} 2893 2894/* 2895 * Record a process's interest in events which might happen to 2896 * a vnode. Because poll uses the historic select-style interface 2897 * internally, this routine serves as both the ``check for any 2898 * pending events'' and the ``record my interest in future events'' 2899 * functions. (These are done together, while the lock is held, 2900 * to avoid race conditions.) 2901 */ 2902int 2903vn_pollrecord(vp, td, events) 2904 struct vnode *vp; 2905 struct thread *td; 2906 short events; 2907{ 2908 2909 if (vp->v_pollinfo == NULL) 2910 v_addpollinfo(vp); 2911 mtx_lock(&vp->v_pollinfo->vpi_lock); 2912 if (vp->v_pollinfo->vpi_revents & events) { 2913 /* 2914 * This leaves events we are not interested 2915 * in available for the other process which 2916 * which presumably had requested them 2917 * (otherwise they would never have been 2918 * recorded). 2919 */ 2920 events &= vp->v_pollinfo->vpi_revents; 2921 vp->v_pollinfo->vpi_revents &= ~events; 2922 2923 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2924 return events; 2925 } 2926 vp->v_pollinfo->vpi_events |= events; 2927 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 2928 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2929 return 0; 2930} 2931 2932/* 2933 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2934 * it is possible for us to miss an event due to race conditions, but 2935 * that condition is expected to be rare, so for the moment it is the 2936 * preferred interface. 2937 */ 2938void 2939vn_pollevent(vp, events) 2940 struct vnode *vp; 2941 short events; 2942{ 2943 2944 if (vp->v_pollinfo == NULL) 2945 v_addpollinfo(vp); 2946 mtx_lock(&vp->v_pollinfo->vpi_lock); 2947 if (vp->v_pollinfo->vpi_events & events) { 2948 /* 2949 * We clear vpi_events so that we don't 2950 * call selwakeup() twice if two events are 2951 * posted before the polling process(es) is 2952 * awakened. This also ensures that we take at 2953 * most one selwakeup() if the polling process 2954 * is no longer interested. However, it does 2955 * mean that only one event can be noticed at 2956 * a time. (Perhaps we should only clear those 2957 * event bits which we note?) XXX 2958 */ 2959 vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */ 2960 vp->v_pollinfo->vpi_revents |= events; 2961 selwakeup(&vp->v_pollinfo->vpi_selinfo); 2962 } 2963 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2964} 2965 2966/* 2967 * Wake up anyone polling on vp because it is being revoked. 2968 * This depends on dead_poll() returning POLLHUP for correct 2969 * behavior. 2970 */ 2971void 2972vn_pollgone(vp) 2973 struct vnode *vp; 2974{ 2975 2976 mtx_lock(&vp->v_pollinfo->vpi_lock); 2977 VN_KNOTE(vp, NOTE_REVOKE); 2978 if (vp->v_pollinfo->vpi_events) { 2979 vp->v_pollinfo->vpi_events = 0; 2980 selwakeup(&vp->v_pollinfo->vpi_selinfo); 2981 } 2982 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2983} 2984 2985 2986 2987/* 2988 * Routine to create and manage a filesystem syncer vnode. 2989 */ 2990#define sync_close ((int (*)(struct vop_close_args *))nullop) 2991static int sync_fsync(struct vop_fsync_args *); 2992static int sync_inactive(struct vop_inactive_args *); 2993static int sync_reclaim(struct vop_reclaim_args *); 2994static int sync_print(struct vop_print_args *); 2995 2996static vop_t **sync_vnodeop_p; 2997static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2998 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2999 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 3000 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 3001 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 3002 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 3003 { &vop_lock_desc, (vop_t *) vop_stdlock }, /* lock */ 3004 { &vop_unlock_desc, (vop_t *) vop_stdunlock }, /* unlock */ 3005 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 3006 { &vop_islocked_desc, (vop_t *) vop_stdislocked }, /* islocked */ 3007 { NULL, NULL } 3008}; 3009static struct vnodeopv_desc sync_vnodeop_opv_desc = 3010 { &sync_vnodeop_p, sync_vnodeop_entries }; 3011 3012VNODEOP_SET(sync_vnodeop_opv_desc); 3013 3014/* 3015 * Create a new filesystem syncer vnode for the specified mount point. 3016 */ 3017int 3018vfs_allocate_syncvnode(mp) 3019 struct mount *mp; 3020{ 3021 struct vnode *vp; 3022 static long start, incr, next; 3023 int error; 3024 3025 /* Allocate a new vnode */ 3026 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 3027 mp->mnt_syncer = NULL; 3028 return (error); 3029 } 3030 vp->v_type = VNON; 3031 /* 3032 * Place the vnode onto the syncer worklist. We attempt to 3033 * scatter them about on the list so that they will go off 3034 * at evenly distributed times even if all the filesystems 3035 * are mounted at once. 3036 */ 3037 next += incr; 3038 if (next == 0 || next > syncer_maxdelay) { 3039 start /= 2; 3040 incr /= 2; 3041 if (start == 0) { 3042 start = syncer_maxdelay / 2; 3043 incr = syncer_maxdelay; 3044 } 3045 next = start; 3046 } 3047 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 3048 mp->mnt_syncer = vp; 3049 return (0); 3050} 3051 3052/* 3053 * Do a lazy sync of the filesystem. 3054 */ 3055static int 3056sync_fsync(ap) 3057 struct vop_fsync_args /* { 3058 struct vnode *a_vp; 3059 struct ucred *a_cred; 3060 int a_waitfor; 3061 struct thread *a_td; 3062 } */ *ap; 3063{ 3064 struct vnode *syncvp = ap->a_vp; 3065 struct mount *mp = syncvp->v_mount; 3066 struct thread *td = ap->a_td; 3067 int asyncflag; 3068 3069 /* 3070 * We only need to do something if this is a lazy evaluation. 3071 */ 3072 if (ap->a_waitfor != MNT_LAZY) 3073 return (0); 3074 3075 /* 3076 * Move ourselves to the back of the sync list. 3077 */ 3078 vn_syncer_add_to_worklist(syncvp, syncdelay); 3079 3080 /* 3081 * Walk the list of vnodes pushing all that are dirty and 3082 * not already on the sync list. 3083 */ 3084 mtx_lock(&mountlist_mtx); 3085 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) { 3086 mtx_unlock(&mountlist_mtx); 3087 return (0); 3088 } 3089 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3090 vfs_unbusy(mp, td); 3091 return (0); 3092 } 3093 asyncflag = mp->mnt_flag & MNT_ASYNC; 3094 mp->mnt_flag &= ~MNT_ASYNC; 3095 vfs_msync(mp, MNT_NOWAIT); 3096 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td); 3097 if (asyncflag) 3098 mp->mnt_flag |= MNT_ASYNC; 3099 vn_finished_write(mp); 3100 vfs_unbusy(mp, td); 3101 return (0); 3102} 3103 3104/* 3105 * The syncer vnode is no referenced. 3106 */ 3107static int 3108sync_inactive(ap) 3109 struct vop_inactive_args /* { 3110 struct vnode *a_vp; 3111 struct thread *a_td; 3112 } */ *ap; 3113{ 3114 3115 VOP_UNLOCK(ap->a_vp, 0, ap->a_td); 3116 vgone(ap->a_vp); 3117 return (0); 3118} 3119 3120/* 3121 * The syncer vnode is no longer needed and is being decommissioned. 3122 * 3123 * Modifications to the worklist must be protected at splbio(). 3124 */ 3125static int 3126sync_reclaim(ap) 3127 struct vop_reclaim_args /* { 3128 struct vnode *a_vp; 3129 } */ *ap; 3130{ 3131 struct vnode *vp = ap->a_vp; 3132 int s; 3133 3134 s = splbio(); 3135 vp->v_mount->mnt_syncer = NULL; 3136 if (vp->v_flag & VONWORKLST) { 3137 LIST_REMOVE(vp, v_synclist); 3138 vp->v_flag &= ~VONWORKLST; 3139 } 3140 splx(s); 3141 3142 return (0); 3143} 3144 3145/* 3146 * Print out a syncer vnode. 3147 */ 3148static int 3149sync_print(ap) 3150 struct vop_print_args /* { 3151 struct vnode *a_vp; 3152 } */ *ap; 3153{ 3154 struct vnode *vp = ap->a_vp; 3155 3156 printf("syncer vnode"); 3157 if (vp->v_vnlock != NULL) 3158 lockmgr_printinfo(vp->v_vnlock); 3159 printf("\n"); 3160 return (0); 3161} 3162 3163/* 3164 * extract the dev_t from a VCHR 3165 */ 3166dev_t 3167vn_todev(vp) 3168 struct vnode *vp; 3169{ 3170 if (vp->v_type != VCHR) 3171 return (NODEV); 3172 return (vp->v_rdev); 3173} 3174 3175/* 3176 * Check if vnode represents a disk device 3177 */ 3178int 3179vn_isdisk(vp, errp) 3180 struct vnode *vp; 3181 int *errp; 3182{ 3183 struct cdevsw *cdevsw; 3184 3185 if (vp->v_type != VCHR) { 3186 if (errp != NULL) 3187 *errp = ENOTBLK; 3188 return (0); 3189 } 3190 if (vp->v_rdev == NULL) { 3191 if (errp != NULL) 3192 *errp = ENXIO; 3193 return (0); 3194 } 3195 cdevsw = devsw(vp->v_rdev); 3196 if (cdevsw == NULL) { 3197 if (errp != NULL) 3198 *errp = ENXIO; 3199 return (0); 3200 } 3201 if (!(cdevsw->d_flags & D_DISK)) { 3202 if (errp != NULL) 3203 *errp = ENOTBLK; 3204 return (0); 3205 } 3206 if (errp != NULL) 3207 *errp = 0; 3208 return (1); 3209} 3210 3211/* 3212 * Free data allocated by namei(); see namei(9) for details. 3213 */ 3214void 3215NDFREE(ndp, flags) 3216 struct nameidata *ndp; 3217 const uint flags; 3218{ 3219 if (!(flags & NDF_NO_FREE_PNBUF) && 3220 (ndp->ni_cnd.cn_flags & HASBUF)) { 3221 uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 3222 ndp->ni_cnd.cn_flags &= ~HASBUF; 3223 } 3224 if (!(flags & NDF_NO_DVP_UNLOCK) && 3225 (ndp->ni_cnd.cn_flags & LOCKPARENT) && 3226 ndp->ni_dvp != ndp->ni_vp) 3227 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread); 3228 if (!(flags & NDF_NO_DVP_RELE) && 3229 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { 3230 vrele(ndp->ni_dvp); 3231 ndp->ni_dvp = NULL; 3232 } 3233 if (!(flags & NDF_NO_VP_UNLOCK) && 3234 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) 3235 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread); 3236 if (!(flags & NDF_NO_VP_RELE) && 3237 ndp->ni_vp) { 3238 vrele(ndp->ni_vp); 3239 ndp->ni_vp = NULL; 3240 } 3241 if (!(flags & NDF_NO_STARTDIR_RELE) && 3242 (ndp->ni_cnd.cn_flags & SAVESTART)) { 3243 vrele(ndp->ni_startdir); 3244 ndp->ni_startdir = NULL; 3245 } 3246} 3247 3248/* 3249 * Common filesystem object access control check routine. Accepts a 3250 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3251 * and optional call-by-reference privused argument allowing vaccess() 3252 * to indicate to the caller whether privilege was used to satisfy the 3253 * request (obsoleted). Returns 0 on success, or an errno on failure. 3254 */ 3255int 3256vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused) 3257 enum vtype type; 3258 mode_t file_mode; 3259 uid_t file_uid; 3260 gid_t file_gid; 3261 mode_t acc_mode; 3262 struct ucred *cred; 3263 int *privused; 3264{ 3265 mode_t dac_granted; 3266#ifdef CAPABILITIES 3267 mode_t cap_granted; 3268#endif 3269 3270 /* 3271 * Look for a normal, non-privileged way to access the file/directory 3272 * as requested. If it exists, go with that. 3273 */ 3274 3275 if (privused != NULL) 3276 *privused = 0; 3277 3278 dac_granted = 0; 3279 3280 /* Check the owner. */ 3281 if (cred->cr_uid == file_uid) { 3282 dac_granted |= VADMIN; 3283 if (file_mode & S_IXUSR) 3284 dac_granted |= VEXEC; 3285 if (file_mode & S_IRUSR) 3286 dac_granted |= VREAD; 3287 if (file_mode & S_IWUSR) 3288 dac_granted |= (VWRITE | VAPPEND); 3289 3290 if ((acc_mode & dac_granted) == acc_mode) 3291 return (0); 3292 3293 goto privcheck; 3294 } 3295 3296 /* Otherwise, check the groups (first match) */ 3297 if (groupmember(file_gid, cred)) { 3298 if (file_mode & S_IXGRP) 3299 dac_granted |= VEXEC; 3300 if (file_mode & S_IRGRP) 3301 dac_granted |= VREAD; 3302 if (file_mode & S_IWGRP) 3303 dac_granted |= (VWRITE | VAPPEND); 3304 3305 if ((acc_mode & dac_granted) == acc_mode) 3306 return (0); 3307 3308 goto privcheck; 3309 } 3310 3311 /* Otherwise, check everyone else. */ 3312 if (file_mode & S_IXOTH) 3313 dac_granted |= VEXEC; 3314 if (file_mode & S_IROTH) 3315 dac_granted |= VREAD; 3316 if (file_mode & S_IWOTH) 3317 dac_granted |= (VWRITE | VAPPEND); 3318 if ((acc_mode & dac_granted) == acc_mode) 3319 return (0); 3320 3321privcheck: 3322 if (!suser_cred(cred, PRISON_ROOT)) { 3323 /* XXX audit: privilege used */ 3324 if (privused != NULL) 3325 *privused = 1; 3326 return (0); 3327 } 3328 3329#ifdef CAPABILITIES 3330 /* 3331 * Build a capability mask to determine if the set of capabilities 3332 * satisfies the requirements when combined with the granted mask 3333 * from above. 3334 * For each capability, if the capability is required, bitwise 3335 * or the request type onto the cap_granted mask. 3336 */ 3337 cap_granted = 0; 3338 3339 if (type == VDIR) { 3340 /* 3341 * For directories, use CAP_DAC_READ_SEARCH to satisfy 3342 * VEXEC requests, instead of CAP_DAC_EXECUTE. 3343 */ 3344 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3345 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3346 cap_granted |= VEXEC; 3347 } else { 3348 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3349 !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT)) 3350 cap_granted |= VEXEC; 3351 } 3352 3353 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) && 3354 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3355 cap_granted |= VREAD; 3356 3357 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3358 !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT)) 3359 cap_granted |= (VWRITE | VAPPEND); 3360 3361 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3362 !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT)) 3363 cap_granted |= VADMIN; 3364 3365 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) { 3366 /* XXX audit: privilege used */ 3367 if (privused != NULL) 3368 *privused = 1; 3369 return (0); 3370 } 3371#endif 3372 3373 return ((acc_mode & VADMIN) ? EPERM : EACCES); 3374} 3375