vfs_subr.c revision 115536
183364Sdfr/* 283364Sdfr * Copyright (c) 1989, 1993 383364Sdfr * The Regents of the University of California. All rights reserved. 483364Sdfr * (c) UNIX System Laboratories, Inc. 583364Sdfr * All or some portions of this file are derived from material licensed 683364Sdfr * to the University of California by American Telephone and Telegraph 783364Sdfr * Co. or Unix System Laboratories, Inc. and are reproduced herein with 883364Sdfr * the permission of UNIX System Laboratories, Inc. 983364Sdfr * 1083364Sdfr * Redistribution and use in source and binary forms, with or without 1183364Sdfr * modification, are permitted provided that the following conditions 1283364Sdfr * are met: 1383364Sdfr * 1. Redistributions of source code must retain the above copyright 1483364Sdfr * notice, this list of conditions and the following disclaimer. 1583364Sdfr * 2. Redistributions in binary form must reproduce the above copyright 1683364Sdfr * notice, this list of conditions and the following disclaimer in the 1783364Sdfr * documentation and/or other materials provided with the distribution. 1883364Sdfr * 3. All advertising materials mentioning features or use of this software 1983364Sdfr * must display the following acknowledgement: 2083364Sdfr * This product includes software developed by the University of 2183364Sdfr * California, Berkeley and its contributors. 2283364Sdfr * 4. Neither the name of the University nor the names of its contributors 2383364Sdfr * may be used to endorse or promote products derived from this software 2483364Sdfr * without specific prior written permission. 2583364Sdfr * 2683364Sdfr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27119880Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28119880Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29119880Sobrien * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3083364Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3183364Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3283364Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3383364Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3483364Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3583364Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36164010Smarcel * SUCH DAMAGE. 3783364Sdfr * 3883364Sdfr * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 3983364Sdfr * $FreeBSD: head/sys/kern/vfs_subr.c 115536 2003-05-31 20:09:01Z phk $ 4083364Sdfr */ 4183364Sdfr 4283364Sdfr/* 4383364Sdfr * External virtual filesystem routines 4483364Sdfr */ 4583364Sdfr#include "opt_ddb.h" 4683364Sdfr#include "opt_mac.h" 4783364Sdfr 4883364Sdfr#include <sys/param.h> 4983364Sdfr#include <sys/systm.h> 5083364Sdfr#include <sys/bio.h> 5183364Sdfr#include <sys/buf.h> 5283364Sdfr#include <sys/conf.h> 5383364Sdfr#include <sys/eventhandler.h> 5483364Sdfr#include <sys/extattr.h> 5583364Sdfr#include <sys/fcntl.h> 5683364Sdfr#include <sys/kernel.h> 5783364Sdfr#include <sys/kthread.h> 5883364Sdfr#include <sys/mac.h> 5983364Sdfr#include <sys/malloc.h> 6083364Sdfr#include <sys/mount.h> 6183364Sdfr#include <sys/namei.h> 6283364Sdfr#include <sys/stat.h> 6383364Sdfr#include <sys/sysctl.h> 6483364Sdfr#include <sys/syslog.h> 6583364Sdfr#include <sys/vmmeter.h> 6683364Sdfr#include <sys/vnode.h> 6783364Sdfr 6883364Sdfr#include <vm/vm.h> 6983364Sdfr#include <vm/vm_object.h> 7083364Sdfr#include <vm/vm_extern.h> 7183364Sdfr#include <vm/pmap.h> 7283364Sdfr#include <vm/vm_map.h> 7383364Sdfr#include <vm/vm_page.h> 7483364Sdfr#include <vm/vm_kern.h> 7583364Sdfr#include <vm/uma.h> 7683364Sdfr 7783364Sdfrstatic MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 7883364Sdfr 7983364Sdfrstatic void addalias(struct vnode *vp, dev_t nvp_rdev); 8083364Sdfrstatic void insmntque(struct vnode *vp, struct mount *mp); 8183364Sdfrstatic void vclean(struct vnode *vp, int flags, struct thread *td); 8283364Sdfrstatic void vlruvp(struct vnode *vp); 8383364Sdfrstatic int flushbuflist(struct buf *blist, int flags, struct vnode *vp, 8483364Sdfr int slpflag, int slptimeo, int *errorp); 8583364Sdfrstatic int vcanrecycle(struct vnode *vp, struct mount **vnmpp); 8683364Sdfr 8783364Sdfr 8883364Sdfr/* 8983364Sdfr * Number of vnodes in existence. Increased whenever getnewvnode() 9083364Sdfr * allocates a new vnode, never decreased. 9183364Sdfr */ 9283364Sdfrstatic unsigned long numvnodes; 9383364Sdfr 9483364SdfrSYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 9583364Sdfr 9683364Sdfr/* 9783364Sdfr * Conversion tables for conversion from vnode types to inode formats 9883364Sdfr * and back. 9983364Sdfr */ 10083364Sdfrenum vtype iftovt_tab[16] = { 10183364Sdfr VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 10283364Sdfr VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 10383364Sdfr}; 10483364Sdfrint vttoif_tab[9] = { 10583364Sdfr 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 10683364Sdfr S_IFSOCK, S_IFIFO, S_IFMT, 10783364Sdfr}; 10883364Sdfr 10983364Sdfr/* 11083364Sdfr * List of vnodes that are ready for recycling. 11183364Sdfr */ 11283364Sdfrstatic TAILQ_HEAD(freelst, vnode) vnode_free_list; 11383364Sdfr 11483364Sdfr/* 11583364Sdfr * Minimum number of free vnodes. If there are fewer than this free vnodes, 11683364Sdfr * getnewvnode() will return a newly allocated vnode. 11783364Sdfr */ 11883364Sdfrstatic u_long wantfreevnodes = 25; 11983364SdfrSYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 12083364Sdfr/* Number of vnodes in the free list. */ 12183364Sdfrstatic u_long freevnodes; 12283364SdfrSYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 12383364Sdfr 12483364Sdfr/* 12583364Sdfr * Various variables used for debugging the new implementation of 12683364Sdfr * reassignbuf(). 12783364Sdfr * XXX these are probably of (very) limited utility now. 12883364Sdfr */ 12983364Sdfrstatic int reassignbufcalls; 13083364SdfrSYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 13183364Sdfrstatic int nameileafonly; 13283364SdfrSYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, ""); 13383364Sdfr 13483364Sdfr/* 13583364Sdfr * Cache for the mount type id assigned to NFS. This is used for 13683364Sdfr * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 13783364Sdfr */ 13883364Sdfrint nfs_mount_type = -1; 13983364Sdfr 14083364Sdfr/* To keep more than one thread at a time from running vfs_getnewfsid */ 14183364Sdfrstatic struct mtx mntid_mtx; 14283364Sdfr 14383364Sdfr/* 14483364Sdfr * Lock for any access to the following: 14583364Sdfr * vnode_free_list 14683364Sdfr * numvnodes 14783364Sdfr * freevnodes 14883364Sdfr */ 14983364Sdfrstatic struct mtx vnode_free_list_mtx; 15083364Sdfr 15183364Sdfr/* 15283364Sdfr * For any iteration/modification of dev->si_hlist (linked through 15383364Sdfr * v_specnext) 15483364Sdfr */ 15583364Sdfrstatic struct mtx spechash_mtx; 15683364Sdfr 15783364Sdfr/* Publicly exported FS */ 15883364Sdfrstruct nfs_public nfs_pub; 15983364Sdfr 16083364Sdfr/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 16183364Sdfrstatic uma_zone_t vnode_zone; 16283364Sdfrstatic uma_zone_t vnodepoll_zone; 16383364Sdfr 16483364Sdfr/* Set to 1 to print out reclaim of active vnodes */ 16583364Sdfrint prtactive; 16683364Sdfr 16783364Sdfr/* 16883364Sdfr * The workitem queue. 16983364Sdfr * 17083364Sdfr * It is useful to delay writes of file data and filesystem metadata 17183364Sdfr * for tens of seconds so that quickly created and deleted files need 17283364Sdfr * not waste disk bandwidth being created and removed. To realize this, 17383364Sdfr * we append vnodes to a "workitem" queue. When running with a soft 17483364Sdfr * updates implementation, most pending metadata dependencies should 17583364Sdfr * not wait for more than a few seconds. Thus, mounted on block devices 17683364Sdfr * are delayed only about a half the time that file data is delayed. 17783364Sdfr * Similarly, directory updates are more critical, so are only delayed 17883364Sdfr * about a third the time that file data is delayed. Thus, there are 17983364Sdfr * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 18083364Sdfr * one each second (driven off the filesystem syncer process). The 18183364Sdfr * syncer_delayno variable indicates the next queue that is to be processed. 18283364Sdfr * Items that need to be processed soon are placed in this queue: 18383364Sdfr * 18483364Sdfr * syncer_workitem_pending[syncer_delayno] 18583364Sdfr * 18683364Sdfr * A delay of fifteen seconds is done by placing the request fifteen 18783364Sdfr * entries later in the queue: 18883364Sdfr * 18983364Sdfr * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 19083364Sdfr * 19183364Sdfr */ 19283364Sdfrstatic int syncer_delayno; 19383364Sdfrstatic long syncer_mask; 19483364SdfrLIST_HEAD(synclist, vnode); 195static struct synclist *syncer_workitem_pending; 196/* 197 * The sync_mtx protects: 198 * vp->v_synclist 199 * syncer_delayno 200 * syncer_workitem_pending 201 * rushjob 202 */ 203static struct mtx sync_mtx; 204 205#define SYNCER_MAXDELAY 32 206static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 207static int syncdelay = 30; /* max time to delay syncing data */ 208static int filedelay = 30; /* time to delay syncing files */ 209SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 210static int dirdelay = 29; /* time to delay syncing directories */ 211SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 212static int metadelay = 28; /* time to delay syncing metadata */ 213SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 214static int rushjob; /* number of slots to run ASAP */ 215static int stat_rush_requests; /* number of times I/O speeded up */ 216SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 217 218/* 219 * Number of vnodes we want to exist at any one time. This is mostly used 220 * to size hash tables in vnode-related code. It is normally not used in 221 * getnewvnode(), as wantfreevnodes is normally nonzero.) 222 * 223 * XXX desiredvnodes is historical cruft and should not exist. 224 */ 225int desiredvnodes; 226SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 227 &desiredvnodes, 0, "Maximum number of vnodes"); 228static int minvnodes; 229SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 230 &minvnodes, 0, "Minimum number of vnodes"); 231static int vnlru_nowhere; 232SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0, 233 "Number of times the vnlru process ran without success"); 234 235/* Hook for calling soft updates */ 236int (*softdep_process_worklist_hook)(struct mount *); 237 238/* 239 * This only exists to supress warnings from unlocked specfs accesses. It is 240 * no longer ok to have an unlocked VFS. 241 */ 242#define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD) 243 244/* Print lock violations */ 245int vfs_badlock_print = 1; 246 247/* Panic on violation */ 248int vfs_badlock_panic = 1; 249 250/* Check for interlock across VOPs */ 251int vfs_badlock_mutex = 1; 252 253static void 254vfs_badlock(char *msg, char *str, struct vnode *vp) 255{ 256 if (vfs_badlock_print) 257 printf("%s: %p %s\n", str, vp, msg); 258 if (vfs_badlock_panic) 259 Debugger("Lock violation.\n"); 260} 261 262void 263assert_vi_unlocked(struct vnode *vp, char *str) 264{ 265 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 266 vfs_badlock("interlock is locked but should not be", str, vp); 267} 268 269void 270assert_vi_locked(struct vnode *vp, char *str) 271{ 272 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 273 vfs_badlock("interlock is not locked but should be", str, vp); 274} 275 276void 277assert_vop_locked(struct vnode *vp, char *str) 278{ 279 if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL)) 280 vfs_badlock("is not locked but should be", str, vp); 281} 282 283void 284assert_vop_unlocked(struct vnode *vp, char *str) 285{ 286 if (vp && !IGNORE_LOCK(vp) && 287 VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE) 288 vfs_badlock("is locked but should not be", str, vp); 289} 290 291void 292assert_vop_elocked(struct vnode *vp, char *str) 293{ 294 if (vp && !IGNORE_LOCK(vp) && 295 VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE) 296 vfs_badlock("is not exclusive locked but should be", str, vp); 297} 298 299void 300assert_vop_elocked_other(struct vnode *vp, char *str) 301{ 302 if (vp && !IGNORE_LOCK(vp) && 303 VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER) 304 vfs_badlock("is not exclusive locked by another thread", 305 str, vp); 306} 307 308void 309assert_vop_slocked(struct vnode *vp, char *str) 310{ 311 if (vp && !IGNORE_LOCK(vp) && 312 VOP_ISLOCKED(vp, curthread) != LK_SHARED) 313 vfs_badlock("is not locked shared but should be", str, vp); 314} 315 316void 317vop_rename_pre(void *ap) 318{ 319 struct vop_rename_args *a = ap; 320 321 if (a->a_tvp) 322 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 323 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 324 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 325 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 326 327 /* Check the source (from) */ 328 if (a->a_tdvp != a->a_fdvp) 329 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n"); 330 if (a->a_tvp != a->a_fvp) 331 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n"); 332 333 /* Check the target */ 334 if (a->a_tvp) 335 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n"); 336 337 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n"); 338} 339 340void 341vop_strategy_pre(void *ap) 342{ 343 struct vop_strategy_args *a = ap; 344 struct buf *bp; 345 346 bp = a->a_bp; 347 348 /* 349 * Cluster ops lock their component buffers but not the IO container. 350 */ 351 if ((bp->b_flags & B_CLUSTER) != 0) 352 return; 353 354 if (BUF_REFCNT(bp) < 1) { 355 if (vfs_badlock_print) 356 printf("VOP_STRATEGY: bp is not locked but should be.\n"); 357 if (vfs_badlock_panic) 358 Debugger("Lock violation.\n"); 359 } 360} 361 362void 363vop_lookup_pre(void *ap) 364{ 365 struct vop_lookup_args *a = ap; 366 struct vnode *dvp; 367 368 dvp = a->a_dvp; 369 370 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP"); 371 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 372} 373 374void 375vop_lookup_post(void *ap, int rc) 376{ 377 struct vop_lookup_args *a = ap; 378 struct componentname *cnp; 379 struct vnode *dvp; 380 struct vnode *vp; 381 int flags; 382 383 dvp = a->a_dvp; 384 cnp = a->a_cnp; 385 vp = *(a->a_vpp); 386 flags = cnp->cn_flags; 387 388 389 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP"); 390 /* 391 * If this is the last path component for this lookup and LOCPARENT 392 * is set, OR if there is an error the directory has to be locked. 393 */ 394 if ((flags & LOCKPARENT) && (flags & ISLASTCN)) 395 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)"); 396 else if (rc != 0) 397 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)"); 398 else if (dvp != vp) 399 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)"); 400 401 if (flags & PDIRUNLOCK) 402 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)"); 403} 404 405void 406vop_unlock_pre(void *ap) 407{ 408 struct vop_unlock_args *a = ap; 409 410 if (a->a_flags & LK_INTERLOCK) 411 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 412 413 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 414} 415 416void 417vop_unlock_post(void *ap, int rc) 418{ 419 struct vop_unlock_args *a = ap; 420 421 if (a->a_flags & LK_INTERLOCK) 422 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 423} 424 425void 426vop_lock_pre(void *ap) 427{ 428 struct vop_lock_args *a = ap; 429 430 if ((a->a_flags & LK_INTERLOCK) == 0) 431 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 432 else 433 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 434} 435 436void 437vop_lock_post(void *ap, int rc) 438{ 439 struct vop_lock_args *a; 440 441 a = ap; 442 443 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 444 if (rc == 0) 445 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 446} 447 448void 449v_addpollinfo(struct vnode *vp) 450{ 451 vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK); 452 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 453} 454 455/* 456 * Initialize the vnode management data structures. 457 */ 458static void 459vntblinit(void *dummy __unused) 460{ 461 462 /* 463 * Desiredvnodes is a function of the physical memory size and 464 * the kernel's heap size. Specifically, desiredvnodes scales 465 * in proportion to the physical memory size until two fifths 466 * of the kernel's heap size is consumed by vnodes and vm 467 * objects. 468 */ 469 desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size / 470 (5 * (sizeof(struct vm_object) + sizeof(struct vnode)))); 471 minvnodes = desiredvnodes / 4; 472 mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF); 473 mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF); 474 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 475 mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF); 476 TAILQ_INIT(&vnode_free_list); 477 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 478 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 479 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 480 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 481 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 482 /* 483 * Initialize the filesystem syncer. 484 */ 485 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 486 &syncer_mask); 487 syncer_maxdelay = syncer_mask + 1; 488 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 489} 490SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL) 491 492 493/* 494 * Mark a mount point as busy. Used to synchronize access and to delay 495 * unmounting. Interlock is not released on failure. 496 */ 497int 498vfs_busy(mp, flags, interlkp, td) 499 struct mount *mp; 500 int flags; 501 struct mtx *interlkp; 502 struct thread *td; 503{ 504 int lkflags; 505 506 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 507 if (flags & LK_NOWAIT) 508 return (ENOENT); 509 mp->mnt_kern_flag |= MNTK_MWAIT; 510 /* 511 * Since all busy locks are shared except the exclusive 512 * lock granted when unmounting, the only place that a 513 * wakeup needs to be done is at the release of the 514 * exclusive lock at the end of dounmount. 515 */ 516 msleep(mp, interlkp, PVFS, "vfs_busy", 0); 517 return (ENOENT); 518 } 519 lkflags = LK_SHARED | LK_NOPAUSE; 520 if (interlkp) 521 lkflags |= LK_INTERLOCK; 522 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 523 panic("vfs_busy: unexpected lock failure"); 524 return (0); 525} 526 527/* 528 * Free a busy filesystem. 529 */ 530void 531vfs_unbusy(mp, td) 532 struct mount *mp; 533 struct thread *td; 534{ 535 536 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 537} 538 539/* 540 * Lookup a mount point by filesystem identifier. 541 */ 542struct mount * 543vfs_getvfs(fsid) 544 fsid_t *fsid; 545{ 546 register struct mount *mp; 547 548 mtx_lock(&mountlist_mtx); 549 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 550 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 551 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 552 mtx_unlock(&mountlist_mtx); 553 return (mp); 554 } 555 } 556 mtx_unlock(&mountlist_mtx); 557 return ((struct mount *) 0); 558} 559 560/* 561 * Get a new unique fsid. Try to make its val[0] unique, since this value 562 * will be used to create fake device numbers for stat(). Also try (but 563 * not so hard) make its val[0] unique mod 2^16, since some emulators only 564 * support 16-bit device numbers. We end up with unique val[0]'s for the 565 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 566 * 567 * Keep in mind that several mounts may be running in parallel. Starting 568 * the search one past where the previous search terminated is both a 569 * micro-optimization and a defense against returning the same fsid to 570 * different mounts. 571 */ 572void 573vfs_getnewfsid(mp) 574 struct mount *mp; 575{ 576 static u_int16_t mntid_base; 577 fsid_t tfsid; 578 int mtype; 579 580 mtx_lock(&mntid_mtx); 581 mtype = mp->mnt_vfc->vfc_typenum; 582 tfsid.val[1] = mtype; 583 mtype = (mtype & 0xFF) << 24; 584 for (;;) { 585 tfsid.val[0] = makeudev(255, 586 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 587 mntid_base++; 588 if (vfs_getvfs(&tfsid) == NULL) 589 break; 590 } 591 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 592 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 593 mtx_unlock(&mntid_mtx); 594} 595 596/* 597 * Knob to control the precision of file timestamps: 598 * 599 * 0 = seconds only; nanoseconds zeroed. 600 * 1 = seconds and nanoseconds, accurate within 1/HZ. 601 * 2 = seconds and nanoseconds, truncated to microseconds. 602 * >=3 = seconds and nanoseconds, maximum precision. 603 */ 604enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 605 606static int timestamp_precision = TSP_SEC; 607SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 608 ×tamp_precision, 0, ""); 609 610/* 611 * Get a current timestamp. 612 */ 613void 614vfs_timestamp(tsp) 615 struct timespec *tsp; 616{ 617 struct timeval tv; 618 619 switch (timestamp_precision) { 620 case TSP_SEC: 621 tsp->tv_sec = time_second; 622 tsp->tv_nsec = 0; 623 break; 624 case TSP_HZ: 625 getnanotime(tsp); 626 break; 627 case TSP_USEC: 628 microtime(&tv); 629 TIMEVAL_TO_TIMESPEC(&tv, tsp); 630 break; 631 case TSP_NSEC: 632 default: 633 nanotime(tsp); 634 break; 635 } 636} 637 638/* 639 * Set vnode attributes to VNOVAL 640 */ 641void 642vattr_null(vap) 643 register struct vattr *vap; 644{ 645 646 vap->va_type = VNON; 647 vap->va_size = VNOVAL; 648 vap->va_bytes = VNOVAL; 649 vap->va_mode = VNOVAL; 650 vap->va_nlink = VNOVAL; 651 vap->va_uid = VNOVAL; 652 vap->va_gid = VNOVAL; 653 vap->va_fsid = VNOVAL; 654 vap->va_fileid = VNOVAL; 655 vap->va_blocksize = VNOVAL; 656 vap->va_rdev = VNOVAL; 657 vap->va_atime.tv_sec = VNOVAL; 658 vap->va_atime.tv_nsec = VNOVAL; 659 vap->va_mtime.tv_sec = VNOVAL; 660 vap->va_mtime.tv_nsec = VNOVAL; 661 vap->va_ctime.tv_sec = VNOVAL; 662 vap->va_ctime.tv_nsec = VNOVAL; 663 vap->va_birthtime.tv_sec = VNOVAL; 664 vap->va_birthtime.tv_nsec = VNOVAL; 665 vap->va_flags = VNOVAL; 666 vap->va_gen = VNOVAL; 667 vap->va_vaflags = 0; 668} 669 670/* 671 * This routine is called when we have too many vnodes. It attempts 672 * to free <count> vnodes and will potentially free vnodes that still 673 * have VM backing store (VM backing store is typically the cause 674 * of a vnode blowout so we want to do this). Therefore, this operation 675 * is not considered cheap. 676 * 677 * A number of conditions may prevent a vnode from being reclaimed. 678 * the buffer cache may have references on the vnode, a directory 679 * vnode may still have references due to the namei cache representing 680 * underlying files, or the vnode may be in active use. It is not 681 * desireable to reuse such vnodes. These conditions may cause the 682 * number of vnodes to reach some minimum value regardless of what 683 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 684 */ 685static int 686vlrureclaim(struct mount *mp) 687{ 688 struct vnode *vp; 689 int done; 690 int trigger; 691 int usevnodes; 692 int count; 693 694 /* 695 * Calculate the trigger point, don't allow user 696 * screwups to blow us up. This prevents us from 697 * recycling vnodes with lots of resident pages. We 698 * aren't trying to free memory, we are trying to 699 * free vnodes. 700 */ 701 usevnodes = desiredvnodes; 702 if (usevnodes <= 0) 703 usevnodes = 1; 704 trigger = cnt.v_page_count * 2 / usevnodes; 705 706 done = 0; 707 mtx_lock(&mntvnode_mtx); 708 count = mp->mnt_nvnodelistsize / 10 + 1; 709 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 710 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 711 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 712 713 if (vp->v_type != VNON && 714 vp->v_type != VBAD && 715 VI_TRYLOCK(vp)) { 716 if (VMIGHTFREE(vp) && /* critical path opt */ 717 (vp->v_object == NULL || 718 vp->v_object->resident_page_count < trigger)) { 719 mtx_unlock(&mntvnode_mtx); 720 vgonel(vp, curthread); 721 done++; 722 mtx_lock(&mntvnode_mtx); 723 } else 724 VI_UNLOCK(vp); 725 } 726 --count; 727 } 728 mtx_unlock(&mntvnode_mtx); 729 return done; 730} 731 732/* 733 * Attempt to recycle vnodes in a context that is always safe to block. 734 * Calling vlrurecycle() from the bowels of filesystem code has some 735 * interesting deadlock problems. 736 */ 737static struct proc *vnlruproc; 738static int vnlruproc_sig; 739 740static void 741vnlru_proc(void) 742{ 743 struct mount *mp, *nmp; 744 int s; 745 int done; 746 struct proc *p = vnlruproc; 747 struct thread *td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 748 749 mtx_lock(&Giant); 750 751 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 752 SHUTDOWN_PRI_FIRST); 753 754 s = splbio(); 755 for (;;) { 756 kthread_suspend_check(p); 757 mtx_lock(&vnode_free_list_mtx); 758 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 759 mtx_unlock(&vnode_free_list_mtx); 760 vnlruproc_sig = 0; 761 wakeup(&vnlruproc_sig); 762 tsleep(vnlruproc, PVFS, "vlruwt", hz); 763 continue; 764 } 765 mtx_unlock(&vnode_free_list_mtx); 766 done = 0; 767 mtx_lock(&mountlist_mtx); 768 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 769 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 770 nmp = TAILQ_NEXT(mp, mnt_list); 771 continue; 772 } 773 done += vlrureclaim(mp); 774 mtx_lock(&mountlist_mtx); 775 nmp = TAILQ_NEXT(mp, mnt_list); 776 vfs_unbusy(mp, td); 777 } 778 mtx_unlock(&mountlist_mtx); 779 if (done == 0) { 780#if 0 781 /* These messages are temporary debugging aids */ 782 if (vnlru_nowhere < 5) 783 printf("vnlru process getting nowhere..\n"); 784 else if (vnlru_nowhere == 5) 785 printf("vnlru process messages stopped.\n"); 786#endif 787 vnlru_nowhere++; 788 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 789 } 790 } 791 splx(s); 792} 793 794static struct kproc_desc vnlru_kp = { 795 "vnlru", 796 vnlru_proc, 797 &vnlruproc 798}; 799SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 800 801 802/* 803 * Routines having to do with the management of the vnode table. 804 */ 805 806/* 807 * Check to see if a free vnode can be recycled. If it can, 808 * return it locked with the vn lock, but not interlock. Also 809 * get the vn_start_write lock. Otherwise indicate the error. 810 */ 811static int 812vcanrecycle(struct vnode *vp, struct mount **vnmpp) 813{ 814 struct thread *td = curthread; 815 vm_object_t object; 816 int error; 817 818 /* Don't recycle if we can't get the interlock */ 819 if (!VI_TRYLOCK(vp)) 820 return (EWOULDBLOCK); 821 822 /* We should be able to immediately acquire this */ 823 /* XXX This looks like it should panic if it fails */ 824 if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) { 825 if (VOP_ISLOCKED(vp, td)) 826 panic("vcanrecycle: locked vnode"); 827 return (EWOULDBLOCK); 828 } 829 830 /* 831 * Don't recycle if its filesystem is being suspended. 832 */ 833 if (vn_start_write(vp, vnmpp, V_NOWAIT) != 0) { 834 error = EBUSY; 835 goto done; 836 } 837 838 /* 839 * Don't recycle if we still have cached pages. 840 */ 841 if (VOP_GETVOBJECT(vp, &object) == 0) { 842 VM_OBJECT_LOCK(object); 843 if (object->resident_page_count || 844 object->ref_count) { 845 VM_OBJECT_UNLOCK(object); 846 error = EBUSY; 847 goto done; 848 } 849 VM_OBJECT_UNLOCK(object); 850 } 851 if (LIST_FIRST(&vp->v_cache_src)) { 852 /* 853 * note: nameileafonly sysctl is temporary, 854 * for debugging only, and will eventually be 855 * removed. 856 */ 857 if (nameileafonly > 0) { 858 /* 859 * Do not reuse namei-cached directory 860 * vnodes that have cached 861 * subdirectories. 862 */ 863 if (cache_leaf_test(vp) < 0) { 864 error = EISDIR; 865 goto done; 866 } 867 } else if (nameileafonly < 0 || 868 vmiodirenable == 0) { 869 /* 870 * Do not reuse namei-cached directory 871 * vnodes if nameileafonly is -1 or 872 * if VMIO backing for directories is 873 * turned off (otherwise we reuse them 874 * too quickly). 875 */ 876 error = EBUSY; 877 goto done; 878 } 879 } 880 return (0); 881done: 882 VOP_UNLOCK(vp, 0, td); 883 return (error); 884} 885 886/* 887 * Return the next vnode from the free list. 888 */ 889int 890getnewvnode(tag, mp, vops, vpp) 891 const char *tag; 892 struct mount *mp; 893 vop_t **vops; 894 struct vnode **vpp; 895{ 896 struct thread *td = curthread; /* XXX */ 897 struct vnode *vp = NULL; 898 struct vpollinfo *pollinfo = NULL; 899 struct mount *vnmp; 900 901 mtx_lock(&vnode_free_list_mtx); 902 903 /* 904 * Try to reuse vnodes if we hit the max. This situation only 905 * occurs in certain large-memory (2G+) situations. We cannot 906 * attempt to directly reclaim vnodes due to nasty recursion 907 * problems. 908 */ 909 while (numvnodes - freevnodes > desiredvnodes) { 910 if (vnlruproc_sig == 0) { 911 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 912 wakeup(vnlruproc); 913 } 914 mtx_unlock(&vnode_free_list_mtx); 915 tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz); 916 mtx_lock(&vnode_free_list_mtx); 917 } 918 919 /* 920 * Attempt to reuse a vnode already on the free list, allocating 921 * a new vnode if we can't find one or if we have not reached a 922 * good minimum for good LRU performance. 923 */ 924 925 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) { 926 int error; 927 int count; 928 929 for (count = 0; count < freevnodes; count++) { 930 vp = TAILQ_FIRST(&vnode_free_list); 931 932 KASSERT(vp->v_usecount == 0 && 933 (vp->v_iflag & VI_DOINGINACT) == 0, 934 ("getnewvnode: free vnode isn't")); 935 936 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 937 /* 938 * We have to drop the free list mtx to avoid lock 939 * order reversals with interlock. 940 */ 941 mtx_unlock(&vnode_free_list_mtx); 942 error = vcanrecycle(vp, &vnmp); 943 mtx_lock(&vnode_free_list_mtx); 944 if (error == 0) 945 break; 946 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 947 vp = NULL; 948 } 949 } 950 if (vp) { 951 freevnodes--; 952 mtx_unlock(&vnode_free_list_mtx); 953 954 cache_purge(vp); 955 VI_LOCK(vp); 956 vp->v_iflag |= VI_DOOMED; 957 vp->v_iflag &= ~VI_FREE; 958 if (vp->v_type != VBAD) { 959 VOP_UNLOCK(vp, 0, td); 960 vgonel(vp, td); 961 VI_LOCK(vp); 962 } else { 963 VOP_UNLOCK(vp, 0, td); 964 } 965 vn_finished_write(vnmp); 966 967#ifdef INVARIANTS 968 { 969 if (vp->v_data) 970 panic("cleaned vnode isn't"); 971 if (vp->v_numoutput) 972 panic("Clean vnode has pending I/O's"); 973 if (vp->v_writecount != 0) 974 panic("Non-zero write count"); 975 } 976#endif 977 if ((pollinfo = vp->v_pollinfo) != NULL) { 978 /* 979 * To avoid lock order reversals, the call to 980 * uma_zfree() must be delayed until the vnode 981 * interlock is released. 982 */ 983 vp->v_pollinfo = NULL; 984 } 985#ifdef MAC 986 mac_destroy_vnode(vp); 987#endif 988 vp->v_iflag = 0; 989 vp->v_vflag = 0; 990 vp->v_lastw = 0; 991 vp->v_lasta = 0; 992 vp->v_cstart = 0; 993 vp->v_clen = 0; 994 vp->v_socket = 0; 995 lockdestroy(vp->v_vnlock); 996 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE); 997 KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0")); 998 KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL")); 999 KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0")); 1000 KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL")); 1001 } else { 1002 numvnodes++; 1003 mtx_unlock(&vnode_free_list_mtx); 1004 1005 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO); 1006 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 1007 VI_LOCK(vp); 1008 vp->v_dd = vp; 1009 vp->v_vnlock = &vp->v_lock; 1010 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE); 1011 cache_purge(vp); 1012 LIST_INIT(&vp->v_cache_src); 1013 TAILQ_INIT(&vp->v_cache_dst); 1014 } 1015 1016 TAILQ_INIT(&vp->v_cleanblkhd); 1017 TAILQ_INIT(&vp->v_dirtyblkhd); 1018 vp->v_type = VNON; 1019 vp->v_tag = tag; 1020 vp->v_op = vops; 1021 *vpp = vp; 1022 vp->v_usecount = 1; 1023 vp->v_data = 0; 1024 vp->v_cachedid = -1; 1025 VI_UNLOCK(vp); 1026 if (pollinfo != NULL) { 1027 mtx_destroy(&pollinfo->vpi_lock); 1028 uma_zfree(vnodepoll_zone, pollinfo); 1029 } 1030#ifdef MAC 1031 mac_init_vnode(vp); 1032 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1033 mac_associate_vnode_singlelabel(mp, vp); 1034#endif 1035 insmntque(vp, mp); 1036 1037 return (0); 1038} 1039 1040/* 1041 * Move a vnode from one mount queue to another. 1042 */ 1043static void 1044insmntque(vp, mp) 1045 register struct vnode *vp; 1046 register struct mount *mp; 1047{ 1048 1049 mtx_lock(&mntvnode_mtx); 1050 /* 1051 * Delete from old mount point vnode list, if on one. 1052 */ 1053 if (vp->v_mount != NULL) { 1054 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 1055 ("bad mount point vnode list size")); 1056 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 1057 vp->v_mount->mnt_nvnodelistsize--; 1058 } 1059 /* 1060 * Insert into list of vnodes for the new mount point, if available. 1061 */ 1062 if ((vp->v_mount = mp) == NULL) { 1063 mtx_unlock(&mntvnode_mtx); 1064 return; 1065 } 1066 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1067 mp->mnt_nvnodelistsize++; 1068 mtx_unlock(&mntvnode_mtx); 1069} 1070 1071/* 1072 * Update outstanding I/O count and do wakeup if requested. 1073 */ 1074void 1075vwakeup(bp) 1076 register struct buf *bp; 1077{ 1078 register struct vnode *vp; 1079 1080 bp->b_flags &= ~B_WRITEINPROG; 1081 if ((vp = bp->b_vp)) { 1082 VI_LOCK(vp); 1083 vp->v_numoutput--; 1084 if (vp->v_numoutput < 0) 1085 panic("vwakeup: neg numoutput"); 1086 if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) { 1087 vp->v_iflag &= ~VI_BWAIT; 1088 wakeup(&vp->v_numoutput); 1089 } 1090 VI_UNLOCK(vp); 1091 } 1092} 1093 1094/* 1095 * Flush out and invalidate all buffers associated with a vnode. 1096 * Called with the underlying object locked. 1097 */ 1098int 1099vinvalbuf(vp, flags, cred, td, slpflag, slptimeo) 1100 struct vnode *vp; 1101 int flags; 1102 struct ucred *cred; 1103 struct thread *td; 1104 int slpflag, slptimeo; 1105{ 1106 struct buf *blist; 1107 int s, error; 1108 vm_object_t object; 1109 1110 GIANT_REQUIRED; 1111 1112 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1113 1114 VI_LOCK(vp); 1115 if (flags & V_SAVE) { 1116 s = splbio(); 1117 while (vp->v_numoutput) { 1118 vp->v_iflag |= VI_BWAIT; 1119 error = msleep(&vp->v_numoutput, VI_MTX(vp), 1120 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 1121 if (error) { 1122 VI_UNLOCK(vp); 1123 splx(s); 1124 return (error); 1125 } 1126 } 1127 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 1128 splx(s); 1129 VI_UNLOCK(vp); 1130 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0) 1131 return (error); 1132 /* 1133 * XXX We could save a lock/unlock if this was only 1134 * enabled under INVARIANTS 1135 */ 1136 VI_LOCK(vp); 1137 s = splbio(); 1138 if (vp->v_numoutput > 0 || 1139 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 1140 panic("vinvalbuf: dirty bufs"); 1141 } 1142 splx(s); 1143 } 1144 s = splbio(); 1145 /* 1146 * If you alter this loop please notice that interlock is dropped and 1147 * reacquired in flushbuflist. Special care is needed to ensure that 1148 * no race conditions occur from this. 1149 */ 1150 for (error = 0;;) { 1151 if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 && 1152 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) { 1153 if (error) 1154 break; 1155 continue; 1156 } 1157 if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 && 1158 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) { 1159 if (error) 1160 break; 1161 continue; 1162 } 1163 break; 1164 } 1165 if (error) { 1166 splx(s); 1167 VI_UNLOCK(vp); 1168 return (error); 1169 } 1170 1171 /* 1172 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1173 * have write I/O in-progress but if there is a VM object then the 1174 * VM object can also have read-I/O in-progress. 1175 */ 1176 do { 1177 while (vp->v_numoutput > 0) { 1178 vp->v_iflag |= VI_BWAIT; 1179 msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0); 1180 } 1181 VI_UNLOCK(vp); 1182 if (VOP_GETVOBJECT(vp, &object) == 0) { 1183 VM_OBJECT_LOCK(object); 1184 vm_object_pip_wait(object, "vnvlbx"); 1185 VM_OBJECT_UNLOCK(object); 1186 } 1187 VI_LOCK(vp); 1188 } while (vp->v_numoutput > 0); 1189 VI_UNLOCK(vp); 1190 1191 splx(s); 1192 1193 /* 1194 * Destroy the copy in the VM cache, too. 1195 */ 1196 if (VOP_GETVOBJECT(vp, &object) == 0) { 1197 VM_OBJECT_LOCK(object); 1198 vm_object_page_remove(object, 0, 0, 1199 (flags & V_SAVE) ? TRUE : FALSE); 1200 VM_OBJECT_UNLOCK(object); 1201 } 1202 1203#ifdef INVARIANTS 1204 VI_LOCK(vp); 1205 if ((flags & (V_ALT | V_NORMAL)) == 0 && 1206 (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || 1207 !TAILQ_EMPTY(&vp->v_cleanblkhd))) 1208 panic("vinvalbuf: flush failed"); 1209 VI_UNLOCK(vp); 1210#endif 1211 return (0); 1212} 1213 1214/* 1215 * Flush out buffers on the specified list. 1216 * 1217 */ 1218static int 1219flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp) 1220 struct buf *blist; 1221 int flags; 1222 struct vnode *vp; 1223 int slpflag, slptimeo; 1224 int *errorp; 1225{ 1226 struct buf *bp, *nbp; 1227 int found, error; 1228 1229 ASSERT_VI_LOCKED(vp, "flushbuflist"); 1230 1231 for (found = 0, bp = blist; bp; bp = nbp) { 1232 nbp = TAILQ_NEXT(bp, b_vnbufs); 1233 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1234 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1235 continue; 1236 } 1237 found += 1; 1238 error = BUF_TIMELOCK(bp, 1239 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp), 1240 "flushbuf", slpflag, slptimeo); 1241 if (error) { 1242 if (error != ENOLCK) 1243 *errorp = error; 1244 goto done; 1245 } 1246 /* 1247 * XXX Since there are no node locks for NFS, I 1248 * believe there is a slight chance that a delayed 1249 * write will occur while sleeping just above, so 1250 * check for it. Note that vfs_bio_awrite expects 1251 * buffers to reside on a queue, while BUF_WRITE and 1252 * brelse do not. 1253 */ 1254 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1255 (flags & V_SAVE)) { 1256 1257 if (bp->b_vp == vp) { 1258 if (bp->b_flags & B_CLUSTEROK) { 1259 vfs_bio_awrite(bp); 1260 } else { 1261 bremfree(bp); 1262 bp->b_flags |= B_ASYNC; 1263 BUF_WRITE(bp); 1264 } 1265 } else { 1266 bremfree(bp); 1267 (void) BUF_WRITE(bp); 1268 } 1269 goto done; 1270 } 1271 bremfree(bp); 1272 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 1273 bp->b_flags &= ~B_ASYNC; 1274 brelse(bp); 1275 VI_LOCK(vp); 1276 } 1277 return (found); 1278done: 1279 VI_LOCK(vp); 1280 return (found); 1281} 1282 1283/* 1284 * Truncate a file's buffer and pages to a specified length. This 1285 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1286 * sync activity. 1287 */ 1288int 1289vtruncbuf(vp, cred, td, length, blksize) 1290 register struct vnode *vp; 1291 struct ucred *cred; 1292 struct thread *td; 1293 off_t length; 1294 int blksize; 1295{ 1296 register struct buf *bp; 1297 struct buf *nbp; 1298 int s, anyfreed; 1299 int trunclbn; 1300 1301 /* 1302 * Round up to the *next* lbn. 1303 */ 1304 trunclbn = (length + blksize - 1) / blksize; 1305 1306 s = splbio(); 1307 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1308restart: 1309 VI_LOCK(vp); 1310 anyfreed = 1; 1311 for (;anyfreed;) { 1312 anyfreed = 0; 1313 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 1314 nbp = TAILQ_NEXT(bp, b_vnbufs); 1315 if (bp->b_lblkno >= trunclbn) { 1316 if (BUF_LOCK(bp, 1317 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1318 VI_MTX(vp)) == ENOLCK) 1319 goto restart; 1320 1321 bremfree(bp); 1322 bp->b_flags |= (B_INVAL | B_RELBUF); 1323 bp->b_flags &= ~B_ASYNC; 1324 brelse(bp); 1325 anyfreed = 1; 1326 1327 if (nbp && 1328 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1329 (nbp->b_vp != vp) || 1330 (nbp->b_flags & B_DELWRI))) { 1331 goto restart; 1332 } 1333 VI_LOCK(vp); 1334 } 1335 } 1336 1337 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1338 nbp = TAILQ_NEXT(bp, b_vnbufs); 1339 if (bp->b_lblkno >= trunclbn) { 1340 if (BUF_LOCK(bp, 1341 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1342 VI_MTX(vp)) == ENOLCK) 1343 goto restart; 1344 bremfree(bp); 1345 bp->b_flags |= (B_INVAL | B_RELBUF); 1346 bp->b_flags &= ~B_ASYNC; 1347 brelse(bp); 1348 anyfreed = 1; 1349 if (nbp && 1350 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1351 (nbp->b_vp != vp) || 1352 (nbp->b_flags & B_DELWRI) == 0)) { 1353 goto restart; 1354 } 1355 VI_LOCK(vp); 1356 } 1357 } 1358 } 1359 1360 if (length > 0) { 1361restartsync: 1362 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1363 nbp = TAILQ_NEXT(bp, b_vnbufs); 1364 if (bp->b_lblkno > 0) 1365 continue; 1366 /* 1367 * Since we hold the vnode lock this should only 1368 * fail if we're racing with the buf daemon. 1369 */ 1370 if (BUF_LOCK(bp, 1371 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1372 VI_MTX(vp)) == ENOLCK) { 1373 goto restart; 1374 } 1375 KASSERT((bp->b_flags & B_DELWRI), 1376 ("buf(%p) on dirty queue without DELWRI.", bp)); 1377 1378 bremfree(bp); 1379 bawrite(bp); 1380 VI_LOCK(vp); 1381 goto restartsync; 1382 } 1383 } 1384 1385 while (vp->v_numoutput > 0) { 1386 vp->v_iflag |= VI_BWAIT; 1387 msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0); 1388 } 1389 VI_UNLOCK(vp); 1390 splx(s); 1391 1392 vnode_pager_setsize(vp, length); 1393 1394 return (0); 1395} 1396 1397/* 1398 * buf_splay() - splay tree core for the clean/dirty list of buffers in 1399 * a vnode. 1400 * 1401 * NOTE: We have to deal with the special case of a background bitmap 1402 * buffer, a situation where two buffers will have the same logical 1403 * block offset. We want (1) only the foreground buffer to be accessed 1404 * in a lookup and (2) must differentiate between the foreground and 1405 * background buffer in the splay tree algorithm because the splay 1406 * tree cannot normally handle multiple entities with the same 'index'. 1407 * We accomplish this by adding differentiating flags to the splay tree's 1408 * numerical domain. 1409 */ 1410static 1411struct buf * 1412buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root) 1413{ 1414 struct buf dummy; 1415 struct buf *lefttreemax, *righttreemin, *y; 1416 1417 if (root == NULL) 1418 return (NULL); 1419 lefttreemax = righttreemin = &dummy; 1420 for (;;) { 1421 if (lblkno < root->b_lblkno || 1422 (lblkno == root->b_lblkno && 1423 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1424 if ((y = root->b_left) == NULL) 1425 break; 1426 if (lblkno < y->b_lblkno) { 1427 /* Rotate right. */ 1428 root->b_left = y->b_right; 1429 y->b_right = root; 1430 root = y; 1431 if ((y = root->b_left) == NULL) 1432 break; 1433 } 1434 /* Link into the new root's right tree. */ 1435 righttreemin->b_left = root; 1436 righttreemin = root; 1437 } else if (lblkno > root->b_lblkno || 1438 (lblkno == root->b_lblkno && 1439 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) { 1440 if ((y = root->b_right) == NULL) 1441 break; 1442 if (lblkno > y->b_lblkno) { 1443 /* Rotate left. */ 1444 root->b_right = y->b_left; 1445 y->b_left = root; 1446 root = y; 1447 if ((y = root->b_right) == NULL) 1448 break; 1449 } 1450 /* Link into the new root's left tree. */ 1451 lefttreemax->b_right = root; 1452 lefttreemax = root; 1453 } else { 1454 break; 1455 } 1456 root = y; 1457 } 1458 /* Assemble the new root. */ 1459 lefttreemax->b_right = root->b_left; 1460 righttreemin->b_left = root->b_right; 1461 root->b_left = dummy.b_right; 1462 root->b_right = dummy.b_left; 1463 return (root); 1464} 1465 1466static 1467void 1468buf_vlist_remove(struct buf *bp) 1469{ 1470 struct vnode *vp = bp->b_vp; 1471 struct buf *root; 1472 1473 ASSERT_VI_LOCKED(vp, "buf_vlist_remove"); 1474 if (bp->b_xflags & BX_VNDIRTY) { 1475 if (bp != vp->v_dirtyblkroot) { 1476 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot); 1477 KASSERT(root == bp, ("splay lookup failed during dirty remove")); 1478 } 1479 if (bp->b_left == NULL) { 1480 root = bp->b_right; 1481 } else { 1482 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1483 root->b_right = bp->b_right; 1484 } 1485 vp->v_dirtyblkroot = root; 1486 TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs); 1487 vp->v_dirtybufcnt--; 1488 } else { 1489 /* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */ 1490 if (bp != vp->v_cleanblkroot) { 1491 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot); 1492 KASSERT(root == bp, ("splay lookup failed during clean remove")); 1493 } 1494 if (bp->b_left == NULL) { 1495 root = bp->b_right; 1496 } else { 1497 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1498 root->b_right = bp->b_right; 1499 } 1500 vp->v_cleanblkroot = root; 1501 TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs); 1502 vp->v_cleanbufcnt--; 1503 } 1504 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1505} 1506 1507/* 1508 * Add the buffer to the sorted clean or dirty block list using a 1509 * splay tree algorithm. 1510 * 1511 * NOTE: xflags is passed as a constant, optimizing this inline function! 1512 */ 1513static 1514void 1515buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags) 1516{ 1517 struct buf *root; 1518 1519 ASSERT_VI_LOCKED(vp, "buf_vlist_add"); 1520 bp->b_xflags |= xflags; 1521 if (xflags & BX_VNDIRTY) { 1522 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot); 1523 if (root == NULL) { 1524 bp->b_left = NULL; 1525 bp->b_right = NULL; 1526 TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs); 1527 } else if (bp->b_lblkno < root->b_lblkno || 1528 (bp->b_lblkno == root->b_lblkno && 1529 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1530 bp->b_left = root->b_left; 1531 bp->b_right = root; 1532 root->b_left = NULL; 1533 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs); 1534 } else { 1535 bp->b_right = root->b_right; 1536 bp->b_left = root; 1537 root->b_right = NULL; 1538 TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd, 1539 root, bp, b_vnbufs); 1540 } 1541 vp->v_dirtybufcnt++; 1542 vp->v_dirtyblkroot = bp; 1543 } else { 1544 /* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */ 1545 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot); 1546 if (root == NULL) { 1547 bp->b_left = NULL; 1548 bp->b_right = NULL; 1549 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 1550 } else if (bp->b_lblkno < root->b_lblkno || 1551 (bp->b_lblkno == root->b_lblkno && 1552 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1553 bp->b_left = root->b_left; 1554 bp->b_right = root; 1555 root->b_left = NULL; 1556 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs); 1557 } else { 1558 bp->b_right = root->b_right; 1559 bp->b_left = root; 1560 root->b_right = NULL; 1561 TAILQ_INSERT_AFTER(&vp->v_cleanblkhd, 1562 root, bp, b_vnbufs); 1563 } 1564 vp->v_cleanbufcnt++; 1565 vp->v_cleanblkroot = bp; 1566 } 1567} 1568 1569/* 1570 * Lookup a buffer using the splay tree. Note that we specifically avoid 1571 * shadow buffers used in background bitmap writes. 1572 * 1573 * This code isn't quite efficient as it could be because we are maintaining 1574 * two sorted lists and do not know which list the block resides in. 1575 * 1576 * During a "make buildworld" the desired buffer is found at one of 1577 * the roots more than 60% of the time. Thus, checking both roots 1578 * before performing either splay eliminates unnecessary splays on the 1579 * first tree splayed. 1580 */ 1581struct buf * 1582gbincore(struct vnode *vp, daddr_t lblkno) 1583{ 1584 struct buf *bp; 1585 1586 GIANT_REQUIRED; 1587 1588 ASSERT_VI_LOCKED(vp, "gbincore"); 1589 if ((bp = vp->v_cleanblkroot) != NULL && 1590 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1591 return (bp); 1592 if ((bp = vp->v_dirtyblkroot) != NULL && 1593 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1594 return (bp); 1595 if ((bp = vp->v_cleanblkroot) != NULL) { 1596 vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp); 1597 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1598 return (bp); 1599 } 1600 if ((bp = vp->v_dirtyblkroot) != NULL) { 1601 vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp); 1602 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1603 return (bp); 1604 } 1605 return (NULL); 1606} 1607 1608/* 1609 * Associate a buffer with a vnode. 1610 */ 1611void 1612bgetvp(vp, bp) 1613 register struct vnode *vp; 1614 register struct buf *bp; 1615{ 1616 int s; 1617 1618 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 1619 1620 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1621 ("bgetvp: bp already attached! %p", bp)); 1622 1623 ASSERT_VI_LOCKED(vp, "bgetvp"); 1624 vholdl(vp); 1625 bp->b_vp = vp; 1626 bp->b_dev = vn_todev(vp); 1627 /* 1628 * Insert onto list for new vnode. 1629 */ 1630 s = splbio(); 1631 buf_vlist_add(bp, vp, BX_VNCLEAN); 1632 splx(s); 1633} 1634 1635/* 1636 * Disassociate a buffer from a vnode. 1637 */ 1638void 1639brelvp(bp) 1640 register struct buf *bp; 1641{ 1642 struct vnode *vp; 1643 int s; 1644 1645 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1646 1647 /* 1648 * Delete from old vnode list, if on one. 1649 */ 1650 vp = bp->b_vp; 1651 s = splbio(); 1652 VI_LOCK(vp); 1653 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1654 buf_vlist_remove(bp); 1655 if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 1656 vp->v_iflag &= ~VI_ONWORKLST; 1657 mtx_lock(&sync_mtx); 1658 LIST_REMOVE(vp, v_synclist); 1659 mtx_unlock(&sync_mtx); 1660 } 1661 vdropl(vp); 1662 VI_UNLOCK(vp); 1663 bp->b_vp = (struct vnode *) 0; 1664 if (bp->b_object) 1665 bp->b_object = NULL; 1666 splx(s); 1667} 1668 1669/* 1670 * Add an item to the syncer work queue. 1671 */ 1672static void 1673vn_syncer_add_to_worklist(struct vnode *vp, int delay) 1674{ 1675 int s, slot; 1676 1677 s = splbio(); 1678 ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist"); 1679 1680 mtx_lock(&sync_mtx); 1681 if (vp->v_iflag & VI_ONWORKLST) 1682 LIST_REMOVE(vp, v_synclist); 1683 else 1684 vp->v_iflag |= VI_ONWORKLST; 1685 1686 if (delay > syncer_maxdelay - 2) 1687 delay = syncer_maxdelay - 2; 1688 slot = (syncer_delayno + delay) & syncer_mask; 1689 1690 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 1691 mtx_unlock(&sync_mtx); 1692 1693 splx(s); 1694} 1695 1696struct proc *updateproc; 1697static void sched_sync(void); 1698static struct kproc_desc up_kp = { 1699 "syncer", 1700 sched_sync, 1701 &updateproc 1702}; 1703SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1704 1705/* 1706 * System filesystem synchronizer daemon. 1707 */ 1708static void 1709sched_sync(void) 1710{ 1711 struct synclist *slp; 1712 struct vnode *vp; 1713 struct mount *mp; 1714 long starttime; 1715 int s; 1716 struct thread *td = FIRST_THREAD_IN_PROC(updateproc); /* XXXKSE */ 1717 1718 mtx_lock(&Giant); 1719 1720 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc, 1721 SHUTDOWN_PRI_LAST); 1722 1723 for (;;) { 1724 kthread_suspend_check(td->td_proc); 1725 1726 starttime = time_second; 1727 1728 /* 1729 * Push files whose dirty time has expired. Be careful 1730 * of interrupt race on slp queue. 1731 */ 1732 s = splbio(); 1733 mtx_lock(&sync_mtx); 1734 slp = &syncer_workitem_pending[syncer_delayno]; 1735 syncer_delayno += 1; 1736 if (syncer_delayno == syncer_maxdelay) 1737 syncer_delayno = 0; 1738 splx(s); 1739 1740 while ((vp = LIST_FIRST(slp)) != NULL) { 1741 mtx_unlock(&sync_mtx); 1742 if (VOP_ISLOCKED(vp, NULL) == 0 && 1743 vn_start_write(vp, &mp, V_NOWAIT) == 0) { 1744 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1745 (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td); 1746 VOP_UNLOCK(vp, 0, td); 1747 vn_finished_write(mp); 1748 } 1749 s = splbio(); 1750 mtx_lock(&sync_mtx); 1751 if (LIST_FIRST(slp) == vp) { 1752 mtx_unlock(&sync_mtx); 1753 /* 1754 * Note: VFS vnodes can remain on the 1755 * worklist too with no dirty blocks, but 1756 * since sync_fsync() moves it to a different 1757 * slot we are safe. 1758 */ 1759 VI_LOCK(vp); 1760 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1761 !vn_isdisk(vp, NULL)) { 1762 panic("sched_sync: fsync failed " 1763 "vp %p tag %s", vp, vp->v_tag); 1764 } 1765 /* 1766 * Put us back on the worklist. The worklist 1767 * routine will remove us from our current 1768 * position and then add us back in at a later 1769 * position. 1770 */ 1771 vn_syncer_add_to_worklist(vp, syncdelay); 1772 VI_UNLOCK(vp); 1773 mtx_lock(&sync_mtx); 1774 } 1775 splx(s); 1776 } 1777 mtx_unlock(&sync_mtx); 1778 1779 /* 1780 * Do soft update processing. 1781 */ 1782 if (softdep_process_worklist_hook != NULL) 1783 (*softdep_process_worklist_hook)(NULL); 1784 1785 /* 1786 * The variable rushjob allows the kernel to speed up the 1787 * processing of the filesystem syncer process. A rushjob 1788 * value of N tells the filesystem syncer to process the next 1789 * N seconds worth of work on its queue ASAP. Currently rushjob 1790 * is used by the soft update code to speed up the filesystem 1791 * syncer process when the incore state is getting so far 1792 * ahead of the disk that the kernel memory pool is being 1793 * threatened with exhaustion. 1794 */ 1795 mtx_lock(&sync_mtx); 1796 if (rushjob > 0) { 1797 rushjob -= 1; 1798 mtx_unlock(&sync_mtx); 1799 continue; 1800 } 1801 mtx_unlock(&sync_mtx); 1802 /* 1803 * If it has taken us less than a second to process the 1804 * current work, then wait. Otherwise start right over 1805 * again. We can still lose time if any single round 1806 * takes more than two seconds, but it does not really 1807 * matter as we are just trying to generally pace the 1808 * filesystem activity. 1809 */ 1810 if (time_second == starttime) 1811 tsleep(&lbolt, PPAUSE, "syncer", 0); 1812 } 1813} 1814 1815/* 1816 * Request the syncer daemon to speed up its work. 1817 * We never push it to speed up more than half of its 1818 * normal turn time, otherwise it could take over the cpu. 1819 * XXXKSE only one update? 1820 */ 1821int 1822speedup_syncer() 1823{ 1824 struct thread *td; 1825 int ret = 0; 1826 1827 td = FIRST_THREAD_IN_PROC(updateproc); 1828 mtx_lock_spin(&sched_lock); 1829 if (td->td_wchan == &lbolt) { 1830 unsleep(td); 1831 TD_CLR_SLEEPING(td); 1832 setrunnable(td); 1833 } 1834 mtx_unlock_spin(&sched_lock); 1835 mtx_lock(&sync_mtx); 1836 if (rushjob < syncdelay / 2) { 1837 rushjob += 1; 1838 stat_rush_requests += 1; 1839 ret = 1; 1840 } 1841 mtx_unlock(&sync_mtx); 1842 return (ret); 1843} 1844 1845/* 1846 * Associate a p-buffer with a vnode. 1847 * 1848 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1849 * with the buffer. i.e. the bp has not been linked into the vnode or 1850 * ref-counted. 1851 */ 1852void 1853pbgetvp(vp, bp) 1854 register struct vnode *vp; 1855 register struct buf *bp; 1856{ 1857 1858 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1859 1860 bp->b_vp = vp; 1861 bp->b_flags |= B_PAGING; 1862 bp->b_dev = vn_todev(vp); 1863} 1864 1865/* 1866 * Disassociate a p-buffer from a vnode. 1867 */ 1868void 1869pbrelvp(bp) 1870 register struct buf *bp; 1871{ 1872 1873 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1874 1875 /* XXX REMOVE ME */ 1876 VI_LOCK(bp->b_vp); 1877 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1878 panic( 1879 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1880 bp, 1881 (int)bp->b_flags 1882 ); 1883 } 1884 VI_UNLOCK(bp->b_vp); 1885 bp->b_vp = (struct vnode *) 0; 1886 bp->b_flags &= ~B_PAGING; 1887} 1888 1889/* 1890 * Reassign a buffer from one vnode to another. 1891 * Used to assign file specific control information 1892 * (indirect blocks) to the vnode to which they belong. 1893 */ 1894void 1895reassignbuf(bp, newvp) 1896 register struct buf *bp; 1897 register struct vnode *newvp; 1898{ 1899 int delay; 1900 int s; 1901 1902 if (newvp == NULL) { 1903 printf("reassignbuf: NULL"); 1904 return; 1905 } 1906 ++reassignbufcalls; 1907 1908 /* 1909 * B_PAGING flagged buffers cannot be reassigned because their vp 1910 * is not fully linked in. 1911 */ 1912 if (bp->b_flags & B_PAGING) 1913 panic("cannot reassign paging buffer"); 1914 1915 s = splbio(); 1916 /* 1917 * Delete from old vnode list, if on one. 1918 */ 1919 VI_LOCK(bp->b_vp); 1920 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1921 buf_vlist_remove(bp); 1922 if (bp->b_vp != newvp) { 1923 vdropl(bp->b_vp); 1924 bp->b_vp = NULL; /* for clarification */ 1925 } 1926 } 1927 VI_UNLOCK(bp->b_vp); 1928 /* 1929 * If dirty, put on list of dirty buffers; otherwise insert onto list 1930 * of clean buffers. 1931 */ 1932 VI_LOCK(newvp); 1933 if (bp->b_flags & B_DELWRI) { 1934 if ((newvp->v_iflag & VI_ONWORKLST) == 0) { 1935 switch (newvp->v_type) { 1936 case VDIR: 1937 delay = dirdelay; 1938 break; 1939 case VCHR: 1940 if (newvp->v_rdev->si_mountpoint != NULL) { 1941 delay = metadelay; 1942 break; 1943 } 1944 /* FALLTHROUGH */ 1945 default: 1946 delay = filedelay; 1947 } 1948 vn_syncer_add_to_worklist(newvp, delay); 1949 } 1950 buf_vlist_add(bp, newvp, BX_VNDIRTY); 1951 } else { 1952 buf_vlist_add(bp, newvp, BX_VNCLEAN); 1953 1954 if ((newvp->v_iflag & VI_ONWORKLST) && 1955 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1956 mtx_lock(&sync_mtx); 1957 LIST_REMOVE(newvp, v_synclist); 1958 mtx_unlock(&sync_mtx); 1959 newvp->v_iflag &= ~VI_ONWORKLST; 1960 } 1961 } 1962 if (bp->b_vp != newvp) { 1963 bp->b_vp = newvp; 1964 vholdl(bp->b_vp); 1965 } 1966 VI_UNLOCK(newvp); 1967 splx(s); 1968} 1969 1970/* 1971 * Create a vnode for a device. 1972 * Used for mounting the root filesystem. 1973 */ 1974int 1975bdevvp(dev, vpp) 1976 dev_t dev; 1977 struct vnode **vpp; 1978{ 1979 register struct vnode *vp; 1980 struct vnode *nvp; 1981 int error; 1982 1983 if (dev == NODEV) { 1984 *vpp = NULLVP; 1985 return (ENXIO); 1986 } 1987 if (vfinddev(dev, VCHR, vpp)) 1988 return (0); 1989 error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp); 1990 if (error) { 1991 *vpp = NULLVP; 1992 return (error); 1993 } 1994 vp = nvp; 1995 vp->v_type = VCHR; 1996 addalias(vp, dev); 1997 *vpp = vp; 1998 return (0); 1999} 2000 2001static void 2002v_incr_usecount(struct vnode *vp, int delta) 2003{ 2004 vp->v_usecount += delta; 2005 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2006 mtx_lock(&spechash_mtx); 2007 vp->v_rdev->si_usecount += delta; 2008 mtx_unlock(&spechash_mtx); 2009 } 2010} 2011 2012/* 2013 * Add vnode to the alias list hung off the dev_t. 2014 * 2015 * The reason for this gunk is that multiple vnodes can reference 2016 * the same physical device, so checking vp->v_usecount to see 2017 * how many users there are is inadequate; the v_usecount for 2018 * the vnodes need to be accumulated. vcount() does that. 2019 */ 2020struct vnode * 2021addaliasu(nvp, nvp_rdev) 2022 struct vnode *nvp; 2023 udev_t nvp_rdev; 2024{ 2025 struct vnode *ovp; 2026 vop_t **ops; 2027 dev_t dev; 2028 2029 if (nvp->v_type == VBLK) 2030 return (nvp); 2031 if (nvp->v_type != VCHR) 2032 panic("addaliasu on non-special vnode"); 2033 dev = udev2dev(nvp_rdev, 0); 2034 /* 2035 * Check to see if we have a bdevvp vnode with no associated 2036 * filesystem. If so, we want to associate the filesystem of 2037 * the new newly instigated vnode with the bdevvp vnode and 2038 * discard the newly created vnode rather than leaving the 2039 * bdevvp vnode lying around with no associated filesystem. 2040 */ 2041 if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) { 2042 addalias(nvp, dev); 2043 return (nvp); 2044 } 2045 /* 2046 * Discard unneeded vnode, but save its node specific data. 2047 * Note that if there is a lock, it is carried over in the 2048 * node specific data to the replacement vnode. 2049 */ 2050 vref(ovp); 2051 ovp->v_data = nvp->v_data; 2052 ovp->v_tag = nvp->v_tag; 2053 nvp->v_data = NULL; 2054 lockdestroy(ovp->v_vnlock); 2055 lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg, 2056 nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK); 2057 ops = ovp->v_op; 2058 ovp->v_op = nvp->v_op; 2059 if (VOP_ISLOCKED(nvp, curthread)) { 2060 VOP_UNLOCK(nvp, 0, curthread); 2061 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread); 2062 } 2063 nvp->v_op = ops; 2064 insmntque(ovp, nvp->v_mount); 2065 vrele(nvp); 2066 vgone(nvp); 2067 return (ovp); 2068} 2069 2070/* This is a local helper function that do the same as addaliasu, but for a 2071 * dev_t instead of an udev_t. */ 2072static void 2073addalias(nvp, dev) 2074 struct vnode *nvp; 2075 dev_t dev; 2076{ 2077 2078 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode")); 2079 nvp->v_rdev = dev; 2080 VI_LOCK(nvp); 2081 mtx_lock(&spechash_mtx); 2082 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 2083 dev->si_usecount += nvp->v_usecount; 2084 mtx_unlock(&spechash_mtx); 2085 VI_UNLOCK(nvp); 2086} 2087 2088/* 2089 * Grab a particular vnode from the free list, increment its 2090 * reference count and lock it. The vnode lock bit is set if the 2091 * vnode is being eliminated in vgone. The process is awakened 2092 * when the transition is completed, and an error returned to 2093 * indicate that the vnode is no longer usable (possibly having 2094 * been changed to a new filesystem type). 2095 */ 2096int 2097vget(vp, flags, td) 2098 register struct vnode *vp; 2099 int flags; 2100 struct thread *td; 2101{ 2102 int error; 2103 2104 /* 2105 * If the vnode is in the process of being cleaned out for 2106 * another use, we wait for the cleaning to finish and then 2107 * return failure. Cleaning is determined by checking that 2108 * the VI_XLOCK flag is set. 2109 */ 2110 if ((flags & LK_INTERLOCK) == 0) 2111 VI_LOCK(vp); 2112 if (vp->v_iflag & VI_XLOCK && vp->v_vxproc != curthread) { 2113 vp->v_iflag |= VI_XWANT; 2114 msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0); 2115 return (ENOENT); 2116 } 2117 2118 v_incr_usecount(vp, 1); 2119 2120 if (VSHOULDBUSY(vp)) 2121 vbusy(vp); 2122 if (flags & LK_TYPE_MASK) { 2123 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) { 2124 /* 2125 * must expand vrele here because we do not want 2126 * to call VOP_INACTIVE if the reference count 2127 * drops back to zero since it was never really 2128 * active. We must remove it from the free list 2129 * before sleeping so that multiple processes do 2130 * not try to recycle it. 2131 */ 2132 VI_LOCK(vp); 2133 v_incr_usecount(vp, -1); 2134 if (VSHOULDFREE(vp)) 2135 vfree(vp); 2136 else 2137 vlruvp(vp); 2138 VI_UNLOCK(vp); 2139 } 2140 return (error); 2141 } 2142 VI_UNLOCK(vp); 2143 return (0); 2144} 2145 2146/* 2147 * Increase the reference count of a vnode. 2148 */ 2149void 2150vref(struct vnode *vp) 2151{ 2152 VI_LOCK(vp); 2153 v_incr_usecount(vp, 1); 2154 VI_UNLOCK(vp); 2155} 2156 2157/* 2158 * Return reference count of a vnode. 2159 * 2160 * The results of this call are only guaranteed when some mechanism other 2161 * than the VI lock is used to stop other processes from gaining references 2162 * to the vnode. This may be the case if the caller holds the only reference. 2163 * This is also useful when stale data is acceptable as race conditions may 2164 * be accounted for by some other means. 2165 */ 2166int 2167vrefcnt(struct vnode *vp) 2168{ 2169 int usecnt; 2170 2171 VI_LOCK(vp); 2172 usecnt = vp->v_usecount; 2173 VI_UNLOCK(vp); 2174 2175 return (usecnt); 2176} 2177 2178 2179/* 2180 * Vnode put/release. 2181 * If count drops to zero, call inactive routine and return to freelist. 2182 */ 2183void 2184vrele(vp) 2185 struct vnode *vp; 2186{ 2187 struct thread *td = curthread; /* XXX */ 2188 2189 KASSERT(vp != NULL, ("vrele: null vp")); 2190 2191 VI_LOCK(vp); 2192 2193 /* Skip this v_writecount check if we're going to panic below. */ 2194 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 2195 ("vrele: missed vn_close")); 2196 2197 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2198 vp->v_usecount == 1)) { 2199 v_incr_usecount(vp, -1); 2200 VI_UNLOCK(vp); 2201 2202 return; 2203 } 2204 2205 if (vp->v_usecount == 1) { 2206 v_incr_usecount(vp, -1); 2207 /* 2208 * We must call VOP_INACTIVE with the node locked. Mark 2209 * as VI_DOINGINACT to avoid recursion. 2210 */ 2211 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) { 2212 VI_LOCK(vp); 2213 vp->v_iflag |= VI_DOINGINACT; 2214 VI_UNLOCK(vp); 2215 VOP_INACTIVE(vp, td); 2216 VI_LOCK(vp); 2217 KASSERT(vp->v_iflag & VI_DOINGINACT, 2218 ("vrele: lost VI_DOINGINACT")); 2219 vp->v_iflag &= ~VI_DOINGINACT; 2220 VI_UNLOCK(vp); 2221 } 2222 VI_LOCK(vp); 2223 if (VSHOULDFREE(vp)) 2224 vfree(vp); 2225 else 2226 vlruvp(vp); 2227 VI_UNLOCK(vp); 2228 2229 } else { 2230#ifdef DIAGNOSTIC 2231 vprint("vrele: negative ref count", vp); 2232#endif 2233 VI_UNLOCK(vp); 2234 panic("vrele: negative ref cnt"); 2235 } 2236} 2237 2238/* 2239 * Release an already locked vnode. This give the same effects as 2240 * unlock+vrele(), but takes less time and avoids releasing and 2241 * re-aquiring the lock (as vrele() aquires the lock internally.) 2242 */ 2243void 2244vput(vp) 2245 struct vnode *vp; 2246{ 2247 struct thread *td = curthread; /* XXX */ 2248 2249 GIANT_REQUIRED; 2250 2251 KASSERT(vp != NULL, ("vput: null vp")); 2252 VI_LOCK(vp); 2253 /* Skip this v_writecount check if we're going to panic below. */ 2254 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 2255 ("vput: missed vn_close")); 2256 2257 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2258 vp->v_usecount == 1)) { 2259 v_incr_usecount(vp, -1); 2260 VOP_UNLOCK(vp, LK_INTERLOCK, td); 2261 return; 2262 } 2263 2264 if (vp->v_usecount == 1) { 2265 v_incr_usecount(vp, -1); 2266 /* 2267 * We must call VOP_INACTIVE with the node locked, so 2268 * we just need to release the vnode mutex. Mark as 2269 * as VI_DOINGINACT to avoid recursion. 2270 */ 2271 vp->v_iflag |= VI_DOINGINACT; 2272 VI_UNLOCK(vp); 2273 VOP_INACTIVE(vp, td); 2274 VI_LOCK(vp); 2275 KASSERT(vp->v_iflag & VI_DOINGINACT, 2276 ("vput: lost VI_DOINGINACT")); 2277 vp->v_iflag &= ~VI_DOINGINACT; 2278 if (VSHOULDFREE(vp)) 2279 vfree(vp); 2280 else 2281 vlruvp(vp); 2282 VI_UNLOCK(vp); 2283 2284 } else { 2285#ifdef DIAGNOSTIC 2286 vprint("vput: negative ref count", vp); 2287#endif 2288 panic("vput: negative ref cnt"); 2289 } 2290} 2291 2292/* 2293 * Somebody doesn't want the vnode recycled. 2294 */ 2295void 2296vhold(struct vnode *vp) 2297{ 2298 VI_LOCK(vp); 2299 vholdl(vp); 2300 VI_UNLOCK(vp); 2301} 2302 2303void 2304vholdl(vp) 2305 register struct vnode *vp; 2306{ 2307 int s; 2308 2309 s = splbio(); 2310 vp->v_holdcnt++; 2311 if (VSHOULDBUSY(vp)) 2312 vbusy(vp); 2313 splx(s); 2314} 2315 2316/* 2317 * Note that there is one less who cares about this vnode. vdrop() is the 2318 * opposite of vhold(). 2319 */ 2320void 2321vdrop(struct vnode *vp) 2322{ 2323 VI_LOCK(vp); 2324 vdropl(vp); 2325 VI_UNLOCK(vp); 2326} 2327 2328void 2329vdropl(vp) 2330 register struct vnode *vp; 2331{ 2332 int s; 2333 2334 s = splbio(); 2335 if (vp->v_holdcnt <= 0) 2336 panic("vdrop: holdcnt"); 2337 vp->v_holdcnt--; 2338 if (VSHOULDFREE(vp)) 2339 vfree(vp); 2340 else 2341 vlruvp(vp); 2342 splx(s); 2343} 2344 2345/* 2346 * Remove any vnodes in the vnode table belonging to mount point mp. 2347 * 2348 * If FORCECLOSE is not specified, there should not be any active ones, 2349 * return error if any are found (nb: this is a user error, not a 2350 * system error). If FORCECLOSE is specified, detach any active vnodes 2351 * that are found. 2352 * 2353 * If WRITECLOSE is set, only flush out regular file vnodes open for 2354 * writing. 2355 * 2356 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2357 * 2358 * `rootrefs' specifies the base reference count for the root vnode 2359 * of this filesystem. The root vnode is considered busy if its 2360 * v_usecount exceeds this value. On a successful return, vflush() 2361 * will call vrele() on the root vnode exactly rootrefs times. 2362 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2363 * be zero. 2364 */ 2365#ifdef DIAGNOSTIC 2366static int busyprt = 0; /* print out busy vnodes */ 2367SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 2368#endif 2369 2370int 2371vflush(mp, rootrefs, flags) 2372 struct mount *mp; 2373 int rootrefs; 2374 int flags; 2375{ 2376 struct thread *td = curthread; /* XXX */ 2377 struct vnode *vp, *nvp, *rootvp = NULL; 2378 struct vattr vattr; 2379 int busy = 0, error; 2380 2381 if (rootrefs > 0) { 2382 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2383 ("vflush: bad args")); 2384 /* 2385 * Get the filesystem root vnode. We can vput() it 2386 * immediately, since with rootrefs > 0, it won't go away. 2387 */ 2388 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 2389 return (error); 2390 vput(rootvp); 2391 2392 } 2393 mtx_lock(&mntvnode_mtx); 2394loop: 2395 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) { 2396 /* 2397 * Make sure this vnode wasn't reclaimed in getnewvnode(). 2398 * Start over if it has (it won't be on the list anymore). 2399 */ 2400 if (vp->v_mount != mp) 2401 goto loop; 2402 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2403 2404 VI_LOCK(vp); 2405 mtx_unlock(&mntvnode_mtx); 2406 vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, td); 2407 /* 2408 * This vnode could have been reclaimed while we were 2409 * waiting for the lock since we are not holding a 2410 * reference. 2411 * Start over if the vnode was reclaimed. 2412 */ 2413 if (vp->v_mount != mp) { 2414 VOP_UNLOCK(vp, 0, td); 2415 mtx_lock(&mntvnode_mtx); 2416 goto loop; 2417 } 2418 /* 2419 * Skip over a vnodes marked VV_SYSTEM. 2420 */ 2421 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2422 VOP_UNLOCK(vp, 0, td); 2423 mtx_lock(&mntvnode_mtx); 2424 continue; 2425 } 2426 /* 2427 * If WRITECLOSE is set, flush out unlinked but still open 2428 * files (even if open only for reading) and regular file 2429 * vnodes open for writing. 2430 */ 2431 if (flags & WRITECLOSE) { 2432 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td); 2433 VI_LOCK(vp); 2434 2435 if ((vp->v_type == VNON || 2436 (error == 0 && vattr.va_nlink > 0)) && 2437 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2438 VOP_UNLOCK(vp, LK_INTERLOCK, td); 2439 mtx_lock(&mntvnode_mtx); 2440 continue; 2441 } 2442 } else 2443 VI_LOCK(vp); 2444 2445 VOP_UNLOCK(vp, 0, td); 2446 2447 /* 2448 * With v_usecount == 0, all we need to do is clear out the 2449 * vnode data structures and we are done. 2450 */ 2451 if (vp->v_usecount == 0) { 2452 vgonel(vp, td); 2453 mtx_lock(&mntvnode_mtx); 2454 continue; 2455 } 2456 2457 /* 2458 * If FORCECLOSE is set, forcibly close the vnode. For block 2459 * or character devices, revert to an anonymous device. For 2460 * all other files, just kill them. 2461 */ 2462 if (flags & FORCECLOSE) { 2463 if (vp->v_type != VCHR) { 2464 vgonel(vp, td); 2465 } else { 2466 vclean(vp, 0, td); 2467 VI_UNLOCK(vp); 2468 vp->v_op = spec_vnodeop_p; 2469 insmntque(vp, (struct mount *) 0); 2470 } 2471 mtx_lock(&mntvnode_mtx); 2472 continue; 2473 } 2474#ifdef DIAGNOSTIC 2475 if (busyprt) 2476 vprint("vflush: busy vnode", vp); 2477#endif 2478 VI_UNLOCK(vp); 2479 mtx_lock(&mntvnode_mtx); 2480 busy++; 2481 } 2482 mtx_unlock(&mntvnode_mtx); 2483 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2484 /* 2485 * If just the root vnode is busy, and if its refcount 2486 * is equal to `rootrefs', then go ahead and kill it. 2487 */ 2488 VI_LOCK(rootvp); 2489 KASSERT(busy > 0, ("vflush: not busy")); 2490 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 2491 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2492 vgonel(rootvp, td); 2493 busy = 0; 2494 } else 2495 VI_UNLOCK(rootvp); 2496 } 2497 if (busy) 2498 return (EBUSY); 2499 for (; rootrefs > 0; rootrefs--) 2500 vrele(rootvp); 2501 return (0); 2502} 2503 2504/* 2505 * This moves a now (likely recyclable) vnode to the end of the 2506 * mountlist. XXX However, it is temporarily disabled until we 2507 * can clean up ffs_sync() and friends, which have loop restart 2508 * conditions which this code causes to operate O(N^2). 2509 */ 2510static void 2511vlruvp(struct vnode *vp) 2512{ 2513#if 0 2514 struct mount *mp; 2515 2516 if ((mp = vp->v_mount) != NULL) { 2517 mtx_lock(&mntvnode_mtx); 2518 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2519 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2520 mtx_unlock(&mntvnode_mtx); 2521 } 2522#endif 2523} 2524 2525/* 2526 * Disassociate the underlying filesystem from a vnode. 2527 */ 2528static void 2529vclean(vp, flags, td) 2530 struct vnode *vp; 2531 int flags; 2532 struct thread *td; 2533{ 2534 int active; 2535 2536 ASSERT_VI_LOCKED(vp, "vclean"); 2537 /* 2538 * Check to see if the vnode is in use. If so we have to reference it 2539 * before we clean it out so that its count cannot fall to zero and 2540 * generate a race against ourselves to recycle it. 2541 */ 2542 if ((active = vp->v_usecount)) 2543 v_incr_usecount(vp, 1); 2544 2545 /* 2546 * Prevent the vnode from being recycled or brought into use while we 2547 * clean it out. 2548 */ 2549 if (vp->v_iflag & VI_XLOCK) 2550 panic("vclean: deadlock"); 2551 vp->v_iflag |= VI_XLOCK; 2552 vp->v_vxproc = curthread; 2553 /* 2554 * Even if the count is zero, the VOP_INACTIVE routine may still 2555 * have the object locked while it cleans it out. The VOP_LOCK 2556 * ensures that the VOP_INACTIVE routine is done with its work. 2557 * For active vnodes, it ensures that no other activity can 2558 * occur while the underlying object is being cleaned out. 2559 */ 2560 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td); 2561 2562 /* 2563 * Clean out any buffers associated with the vnode. 2564 * If the flush fails, just toss the buffers. 2565 */ 2566 if (flags & DOCLOSE) { 2567 struct buf *bp; 2568 VI_LOCK(vp); 2569 bp = TAILQ_FIRST(&vp->v_dirtyblkhd); 2570 VI_UNLOCK(vp); 2571 if (bp != NULL) 2572 (void) vn_write_suspend_wait(vp, NULL, V_WAIT); 2573 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0) 2574 vinvalbuf(vp, 0, NOCRED, td, 0, 0); 2575 } 2576 2577 VOP_DESTROYVOBJECT(vp); 2578 2579 /* 2580 * Any other processes trying to obtain this lock must first 2581 * wait for VXLOCK to clear, then call the new lock operation. 2582 */ 2583 VOP_UNLOCK(vp, 0, td); 2584 2585 /* 2586 * If purging an active vnode, it must be closed and 2587 * deactivated before being reclaimed. Note that the 2588 * VOP_INACTIVE will unlock the vnode. 2589 */ 2590 if (active) { 2591 if (flags & DOCLOSE) 2592 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2593 VI_LOCK(vp); 2594 if ((vp->v_iflag & VI_DOINGINACT) == 0) { 2595 vp->v_iflag |= VI_DOINGINACT; 2596 VI_UNLOCK(vp); 2597 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0) 2598 panic("vclean: cannot relock."); 2599 VOP_INACTIVE(vp, td); 2600 VI_LOCK(vp); 2601 KASSERT(vp->v_iflag & VI_DOINGINACT, 2602 ("vclean: lost VI_DOINGINACT")); 2603 vp->v_iflag &= ~VI_DOINGINACT; 2604 } 2605 VI_UNLOCK(vp); 2606 } 2607 2608 /* 2609 * Reclaim the vnode. 2610 */ 2611 if (VOP_RECLAIM(vp, td)) 2612 panic("vclean: cannot reclaim"); 2613 2614 if (active) { 2615 /* 2616 * Inline copy of vrele() since VOP_INACTIVE 2617 * has already been called. 2618 */ 2619 VI_LOCK(vp); 2620 v_incr_usecount(vp, -1); 2621 if (vp->v_usecount <= 0) { 2622#ifdef DIAGNOSTIC 2623 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 2624 vprint("vclean: bad ref count", vp); 2625 panic("vclean: ref cnt"); 2626 } 2627#endif 2628 vfree(vp); 2629 } 2630 VI_UNLOCK(vp); 2631 } 2632 2633 cache_purge(vp); 2634 VI_LOCK(vp); 2635 if (VSHOULDFREE(vp)) 2636 vfree(vp); 2637 2638 /* 2639 * Done with purge, reset to the standard lock and 2640 * notify sleepers of the grim news. 2641 */ 2642 vp->v_vnlock = &vp->v_lock; 2643 vp->v_op = dead_vnodeop_p; 2644 if (vp->v_pollinfo != NULL) 2645 vn_pollgone(vp); 2646 vp->v_tag = "none"; 2647 vp->v_iflag &= ~VI_XLOCK; 2648 vp->v_vxproc = NULL; 2649 if (vp->v_iflag & VI_XWANT) { 2650 vp->v_iflag &= ~VI_XWANT; 2651 wakeup(vp); 2652 } 2653} 2654 2655/* 2656 * Eliminate all activity associated with the requested vnode 2657 * and with all vnodes aliased to the requested vnode. 2658 */ 2659int 2660vop_revoke(ap) 2661 struct vop_revoke_args /* { 2662 struct vnode *a_vp; 2663 int a_flags; 2664 } */ *ap; 2665{ 2666 struct vnode *vp, *vq; 2667 dev_t dev; 2668 2669 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 2670 vp = ap->a_vp; 2671 KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR")); 2672 2673 VI_LOCK(vp); 2674 /* 2675 * If a vgone (or vclean) is already in progress, 2676 * wait until it is done and return. 2677 */ 2678 if (vp->v_iflag & VI_XLOCK) { 2679 vp->v_iflag |= VI_XWANT; 2680 msleep(vp, VI_MTX(vp), PINOD | PDROP, 2681 "vop_revokeall", 0); 2682 return (0); 2683 } 2684 VI_UNLOCK(vp); 2685 dev = vp->v_rdev; 2686 for (;;) { 2687 mtx_lock(&spechash_mtx); 2688 vq = SLIST_FIRST(&dev->si_hlist); 2689 mtx_unlock(&spechash_mtx); 2690 if (!vq) 2691 break; 2692 vgone(vq); 2693 } 2694 return (0); 2695} 2696 2697/* 2698 * Recycle an unused vnode to the front of the free list. 2699 * Release the passed interlock if the vnode will be recycled. 2700 */ 2701int 2702vrecycle(vp, inter_lkp, td) 2703 struct vnode *vp; 2704 struct mtx *inter_lkp; 2705 struct thread *td; 2706{ 2707 2708 VI_LOCK(vp); 2709 if (vp->v_usecount == 0) { 2710 if (inter_lkp) { 2711 mtx_unlock(inter_lkp); 2712 } 2713 vgonel(vp, td); 2714 return (1); 2715 } 2716 VI_UNLOCK(vp); 2717 return (0); 2718} 2719 2720/* 2721 * Eliminate all activity associated with a vnode 2722 * in preparation for reuse. 2723 */ 2724void 2725vgone(vp) 2726 register struct vnode *vp; 2727{ 2728 struct thread *td = curthread; /* XXX */ 2729 2730 VI_LOCK(vp); 2731 vgonel(vp, td); 2732} 2733 2734/* 2735 * vgone, with the vp interlock held. 2736 */ 2737void 2738vgonel(vp, td) 2739 struct vnode *vp; 2740 struct thread *td; 2741{ 2742 int s; 2743 2744 /* 2745 * If a vgone (or vclean) is already in progress, 2746 * wait until it is done and return. 2747 */ 2748 ASSERT_VI_LOCKED(vp, "vgonel"); 2749 if (vp->v_iflag & VI_XLOCK) { 2750 vp->v_iflag |= VI_XWANT; 2751 msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0); 2752 return; 2753 } 2754 2755 /* 2756 * Clean out the filesystem specific data. 2757 */ 2758 vclean(vp, DOCLOSE, td); 2759 VI_UNLOCK(vp); 2760 2761 /* 2762 * Delete from old mount point vnode list, if on one. 2763 */ 2764 if (vp->v_mount != NULL) 2765 insmntque(vp, (struct mount *)0); 2766 /* 2767 * If special device, remove it from special device alias list 2768 * if it is on one. 2769 */ 2770 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) { 2771 VI_LOCK(vp); 2772 mtx_lock(&spechash_mtx); 2773 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext); 2774 vp->v_rdev->si_usecount -= vp->v_usecount; 2775 mtx_unlock(&spechash_mtx); 2776 VI_UNLOCK(vp); 2777 vp->v_rdev = NULL; 2778 } 2779 2780 /* 2781 * If it is on the freelist and not already at the head, 2782 * move it to the head of the list. The test of the 2783 * VDOOMED flag and the reference count of zero is because 2784 * it will be removed from the free list by getnewvnode, 2785 * but will not have its reference count incremented until 2786 * after calling vgone. If the reference count were 2787 * incremented first, vgone would (incorrectly) try to 2788 * close the previous instance of the underlying object. 2789 */ 2790 VI_LOCK(vp); 2791 if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) { 2792 s = splbio(); 2793 mtx_lock(&vnode_free_list_mtx); 2794 if (vp->v_iflag & VI_FREE) { 2795 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2796 } else { 2797 vp->v_iflag |= VI_FREE; 2798 freevnodes++; 2799 } 2800 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2801 mtx_unlock(&vnode_free_list_mtx); 2802 splx(s); 2803 } 2804 2805 vp->v_type = VBAD; 2806 VI_UNLOCK(vp); 2807} 2808 2809/* 2810 * Lookup a vnode by device number. 2811 */ 2812int 2813vfinddev(dev, type, vpp) 2814 dev_t dev; 2815 enum vtype type; 2816 struct vnode **vpp; 2817{ 2818 struct vnode *vp; 2819 2820 mtx_lock(&spechash_mtx); 2821 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2822 if (type == vp->v_type) { 2823 *vpp = vp; 2824 mtx_unlock(&spechash_mtx); 2825 return (1); 2826 } 2827 } 2828 mtx_unlock(&spechash_mtx); 2829 return (0); 2830} 2831 2832/* 2833 * Calculate the total number of references to a special device. 2834 */ 2835int 2836vcount(vp) 2837 struct vnode *vp; 2838{ 2839 int count; 2840 2841 mtx_lock(&spechash_mtx); 2842 count = vp->v_rdev->si_usecount; 2843 mtx_unlock(&spechash_mtx); 2844 return (count); 2845} 2846 2847/* 2848 * Same as above, but using the dev_t as argument 2849 */ 2850int 2851count_dev(dev) 2852 dev_t dev; 2853{ 2854 struct vnode *vp; 2855 2856 vp = SLIST_FIRST(&dev->si_hlist); 2857 if (vp == NULL) 2858 return (0); 2859 return(vcount(vp)); 2860} 2861 2862/* 2863 * Print out a description of a vnode. 2864 */ 2865static char *typename[] = 2866{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2867 2868void 2869vprint(label, vp) 2870 char *label; 2871 struct vnode *vp; 2872{ 2873 char buf[96]; 2874 2875 if (label != NULL) 2876 printf("%s: %p: ", label, (void *)vp); 2877 else 2878 printf("%p: ", (void *)vp); 2879 printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,", 2880 vp->v_tag, typename[vp->v_type], vp->v_usecount, 2881 vp->v_writecount, vp->v_holdcnt); 2882 buf[0] = '\0'; 2883 if (vp->v_vflag & VV_ROOT) 2884 strcat(buf, "|VV_ROOT"); 2885 if (vp->v_vflag & VV_TEXT) 2886 strcat(buf, "|VV_TEXT"); 2887 if (vp->v_vflag & VV_SYSTEM) 2888 strcat(buf, "|VV_SYSTEM"); 2889 if (vp->v_iflag & VI_XLOCK) 2890 strcat(buf, "|VI_XLOCK"); 2891 if (vp->v_iflag & VI_XWANT) 2892 strcat(buf, "|VI_XWANT"); 2893 if (vp->v_iflag & VI_BWAIT) 2894 strcat(buf, "|VI_BWAIT"); 2895 if (vp->v_iflag & VI_DOOMED) 2896 strcat(buf, "|VI_DOOMED"); 2897 if (vp->v_iflag & VI_FREE) 2898 strcat(buf, "|VI_FREE"); 2899 if (vp->v_vflag & VV_OBJBUF) 2900 strcat(buf, "|VV_OBJBUF"); 2901 if (buf[0] != '\0') 2902 printf(" flags (%s),", &buf[1]); 2903 lockmgr_printinfo(vp->v_vnlock); 2904 printf("\n"); 2905 if (vp->v_data != NULL) 2906 VOP_PRINT(vp); 2907} 2908 2909#ifdef DDB 2910#include <ddb/ddb.h> 2911/* 2912 * List all of the locked vnodes in the system. 2913 * Called when debugging the kernel. 2914 */ 2915DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2916{ 2917 struct mount *mp, *nmp; 2918 struct vnode *vp; 2919 2920 /* 2921 * Note: because this is DDB, we can't obey the locking semantics 2922 * for these structures, which means we could catch an inconsistent 2923 * state and dereference a nasty pointer. Not much to be done 2924 * about that. 2925 */ 2926 printf("Locked vnodes\n"); 2927 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2928 nmp = TAILQ_NEXT(mp, mnt_list); 2929 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2930 if (VOP_ISLOCKED(vp, NULL)) 2931 vprint(NULL, vp); 2932 } 2933 nmp = TAILQ_NEXT(mp, mnt_list); 2934 } 2935} 2936#endif 2937 2938/* 2939 * Fill in a struct xvfsconf based on a struct vfsconf. 2940 */ 2941static void 2942vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp) 2943{ 2944 2945 strcpy(xvfsp->vfc_name, vfsp->vfc_name); 2946 xvfsp->vfc_typenum = vfsp->vfc_typenum; 2947 xvfsp->vfc_refcount = vfsp->vfc_refcount; 2948 xvfsp->vfc_flags = vfsp->vfc_flags; 2949 /* 2950 * These are unused in userland, we keep them 2951 * to not break binary compatibility. 2952 */ 2953 xvfsp->vfc_vfsops = NULL; 2954 xvfsp->vfc_next = NULL; 2955} 2956 2957static int 2958sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 2959{ 2960 struct vfsconf *vfsp; 2961 struct xvfsconf *xvfsp; 2962 int cnt, error, i; 2963 2964 cnt = 0; 2965 for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next) 2966 cnt++; 2967 xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK); 2968 /* 2969 * Handle the race that we will have here when struct vfsconf 2970 * will be locked down by using both cnt and checking vfc_next 2971 * against NULL to determine the end of the loop. The race will 2972 * happen because we will have to unlock before calling malloc(). 2973 * We are protected by Giant for now. 2974 */ 2975 i = 0; 2976 for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) { 2977 vfsconf2x(vfsp, xvfsp + i); 2978 i++; 2979 } 2980 error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i); 2981 free(xvfsp, M_TEMP); 2982 return (error); 2983} 2984 2985SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist, 2986 "S,xvfsconf", "List of all configured filesystems"); 2987 2988/* 2989 * Top level filesystem related information gathering. 2990 */ 2991static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 2992 2993static int 2994vfs_sysctl(SYSCTL_HANDLER_ARGS) 2995{ 2996 int *name = (int *)arg1 - 1; /* XXX */ 2997 u_int namelen = arg2 + 1; /* XXX */ 2998 struct vfsconf *vfsp; 2999 struct xvfsconf xvfsp; 3000 3001 printf("WARNING: userland calling deprecated sysctl, " 3002 "please rebuild world\n"); 3003 3004#if 1 || defined(COMPAT_PRELITE2) 3005 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3006 if (namelen == 1) 3007 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3008#endif 3009 3010 switch (name[1]) { 3011 case VFS_MAXTYPENUM: 3012 if (namelen != 2) 3013 return (ENOTDIR); 3014 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3015 case VFS_CONF: 3016 if (namelen != 3) 3017 return (ENOTDIR); /* overloaded */ 3018 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 3019 if (vfsp->vfc_typenum == name[2]) 3020 break; 3021 if (vfsp == NULL) 3022 return (EOPNOTSUPP); 3023 vfsconf2x(vfsp, &xvfsp); 3024 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3025 } 3026 return (EOPNOTSUPP); 3027} 3028 3029SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl, 3030 "Generic filesystem"); 3031 3032#if 1 || defined(COMPAT_PRELITE2) 3033 3034static int 3035sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3036{ 3037 int error; 3038 struct vfsconf *vfsp; 3039 struct ovfsconf ovfs; 3040 3041 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 3042 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3043 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3044 ovfs.vfc_index = vfsp->vfc_typenum; 3045 ovfs.vfc_refcount = vfsp->vfc_refcount; 3046 ovfs.vfc_flags = vfsp->vfc_flags; 3047 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3048 if (error) 3049 return error; 3050 } 3051 return 0; 3052} 3053 3054#endif /* 1 || COMPAT_PRELITE2 */ 3055 3056#define KINFO_VNODESLOP 10 3057#ifdef notyet 3058/* 3059 * Dump vnode list (via sysctl). 3060 */ 3061/* ARGSUSED */ 3062static int 3063sysctl_vnode(SYSCTL_HANDLER_ARGS) 3064{ 3065 struct xvnode *xvn; 3066 struct thread *td = req->td; 3067 struct mount *mp; 3068 struct vnode *vp; 3069 int error, len, n; 3070 3071 /* 3072 * Stale numvnodes access is not fatal here. 3073 */ 3074 req->lock = 0; 3075 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3076 if (!req->oldptr) 3077 /* Make an estimate */ 3078 return (SYSCTL_OUT(req, 0, len)); 3079 3080 sysctl_wire_old_buffer(req, 0); 3081 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3082 n = 0; 3083 mtx_lock(&mountlist_mtx); 3084 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3085 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) 3086 continue; 3087 mtx_lock(&mntvnode_mtx); 3088 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3089 if (n == len) 3090 break; 3091 vref(vp); 3092 xvn[n].xv_size = sizeof *xvn; 3093 xvn[n].xv_vnode = vp; 3094#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3095 XV_COPY(usecount); 3096 XV_COPY(writecount); 3097 XV_COPY(holdcnt); 3098 XV_COPY(id); 3099 XV_COPY(mount); 3100 XV_COPY(numoutput); 3101 XV_COPY(type); 3102#undef XV_COPY 3103 xvn[n].xv_flag = vp->v_vflag; 3104 3105 switch (vp->v_type) { 3106 case VREG: 3107 case VDIR: 3108 case VLNK: 3109 xvn[n].xv_dev = vp->v_cachedfs; 3110 xvn[n].xv_ino = vp->v_cachedid; 3111 break; 3112 case VBLK: 3113 case VCHR: 3114 if (vp->v_rdev == NULL) { 3115 vrele(vp); 3116 continue; 3117 } 3118 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3119 break; 3120 case VSOCK: 3121 xvn[n].xv_socket = vp->v_socket; 3122 break; 3123 case VFIFO: 3124 xvn[n].xv_fifo = vp->v_fifoinfo; 3125 break; 3126 case VNON: 3127 case VBAD: 3128 default: 3129 /* shouldn't happen? */ 3130 vrele(vp); 3131 continue; 3132 } 3133 vrele(vp); 3134 ++n; 3135 } 3136 mtx_unlock(&mntvnode_mtx); 3137 mtx_lock(&mountlist_mtx); 3138 vfs_unbusy(mp, td); 3139 if (n == len) 3140 break; 3141 } 3142 mtx_unlock(&mountlist_mtx); 3143 3144 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3145 free(xvn, M_TEMP); 3146 return (error); 3147} 3148 3149SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 3150 0, 0, sysctl_vnode, "S,xvnode", ""); 3151#endif 3152 3153/* 3154 * Check to see if a filesystem is mounted on a block device. 3155 */ 3156int 3157vfs_mountedon(vp) 3158 struct vnode *vp; 3159{ 3160 3161 if (vp->v_rdev->si_mountpoint != NULL) 3162 return (EBUSY); 3163 return (0); 3164} 3165 3166/* 3167 * Unmount all filesystems. The list is traversed in reverse order 3168 * of mounting to avoid dependencies. 3169 */ 3170void 3171vfs_unmountall() 3172{ 3173 struct mount *mp; 3174 struct thread *td; 3175 int error; 3176 3177 if (curthread != NULL) 3178 td = curthread; 3179 else 3180 td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */ 3181 /* 3182 * Since this only runs when rebooting, it is not interlocked. 3183 */ 3184 while(!TAILQ_EMPTY(&mountlist)) { 3185 mp = TAILQ_LAST(&mountlist, mntlist); 3186 error = dounmount(mp, MNT_FORCE, td); 3187 if (error) { 3188 TAILQ_REMOVE(&mountlist, mp, mnt_list); 3189 printf("unmount of %s failed (", 3190 mp->mnt_stat.f_mntonname); 3191 if (error == EBUSY) 3192 printf("BUSY)\n"); 3193 else 3194 printf("%d)\n", error); 3195 } else { 3196 /* The unmount has removed mp from the mountlist */ 3197 } 3198 } 3199} 3200 3201/* 3202 * perform msync on all vnodes under a mount point 3203 * the mount point must be locked. 3204 */ 3205void 3206vfs_msync(struct mount *mp, int flags) 3207{ 3208 struct vnode *vp, *nvp; 3209 struct vm_object *obj; 3210 int tries; 3211 3212 GIANT_REQUIRED; 3213 3214 tries = 5; 3215 mtx_lock(&mntvnode_mtx); 3216loop: 3217 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) { 3218 if (vp->v_mount != mp) { 3219 if (--tries > 0) 3220 goto loop; 3221 break; 3222 } 3223 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 3224 3225 VI_LOCK(vp); 3226 if (vp->v_iflag & VI_XLOCK) { /* XXX: what if MNT_WAIT? */ 3227 VI_UNLOCK(vp); 3228 continue; 3229 } 3230 3231 if ((vp->v_iflag & VI_OBJDIRTY) && 3232 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 3233 mtx_unlock(&mntvnode_mtx); 3234 if (!vget(vp, 3235 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 3236 curthread)) { 3237 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 3238 vput(vp); 3239 mtx_lock(&mntvnode_mtx); 3240 continue; 3241 } 3242 3243 if (VOP_GETVOBJECT(vp, &obj) == 0) { 3244 VM_OBJECT_LOCK(obj); 3245 vm_object_page_clean(obj, 0, 0, 3246 flags == MNT_WAIT ? 3247 OBJPC_SYNC : OBJPC_NOSYNC); 3248 VM_OBJECT_UNLOCK(obj); 3249 } 3250 vput(vp); 3251 } 3252 mtx_lock(&mntvnode_mtx); 3253 if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) { 3254 if (--tries > 0) 3255 goto loop; 3256 break; 3257 } 3258 } else 3259 VI_UNLOCK(vp); 3260 } 3261 mtx_unlock(&mntvnode_mtx); 3262} 3263 3264/* 3265 * Create the VM object needed for VMIO and mmap support. This 3266 * is done for all VREG files in the system. Some filesystems might 3267 * afford the additional metadata buffering capability of the 3268 * VMIO code by making the device node be VMIO mode also. 3269 * 3270 * vp must be locked when vfs_object_create is called. 3271 */ 3272int 3273vfs_object_create(vp, td, cred) 3274 struct vnode *vp; 3275 struct thread *td; 3276 struct ucred *cred; 3277{ 3278 GIANT_REQUIRED; 3279 return (VOP_CREATEVOBJECT(vp, cred, td)); 3280} 3281 3282/* 3283 * Mark a vnode as free, putting it up for recycling. 3284 */ 3285void 3286vfree(vp) 3287 struct vnode *vp; 3288{ 3289 int s; 3290 3291 ASSERT_VI_LOCKED(vp, "vfree"); 3292 s = splbio(); 3293 mtx_lock(&vnode_free_list_mtx); 3294 KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free")); 3295 if (vp->v_iflag & VI_AGE) { 3296 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 3297 } else { 3298 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 3299 } 3300 freevnodes++; 3301 mtx_unlock(&vnode_free_list_mtx); 3302 vp->v_iflag &= ~VI_AGE; 3303 vp->v_iflag |= VI_FREE; 3304 splx(s); 3305} 3306 3307/* 3308 * Opposite of vfree() - mark a vnode as in use. 3309 */ 3310void 3311vbusy(vp) 3312 struct vnode *vp; 3313{ 3314 int s; 3315 3316 s = splbio(); 3317 ASSERT_VI_LOCKED(vp, "vbusy"); 3318 KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free")); 3319 3320 mtx_lock(&vnode_free_list_mtx); 3321 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 3322 freevnodes--; 3323 mtx_unlock(&vnode_free_list_mtx); 3324 3325 vp->v_iflag &= ~(VI_FREE|VI_AGE); 3326 splx(s); 3327} 3328 3329/* 3330 * Record a process's interest in events which might happen to 3331 * a vnode. Because poll uses the historic select-style interface 3332 * internally, this routine serves as both the ``check for any 3333 * pending events'' and the ``record my interest in future events'' 3334 * functions. (These are done together, while the lock is held, 3335 * to avoid race conditions.) 3336 */ 3337int 3338vn_pollrecord(vp, td, events) 3339 struct vnode *vp; 3340 struct thread *td; 3341 short events; 3342{ 3343 3344 if (vp->v_pollinfo == NULL) 3345 v_addpollinfo(vp); 3346 mtx_lock(&vp->v_pollinfo->vpi_lock); 3347 if (vp->v_pollinfo->vpi_revents & events) { 3348 /* 3349 * This leaves events we are not interested 3350 * in available for the other process which 3351 * which presumably had requested them 3352 * (otherwise they would never have been 3353 * recorded). 3354 */ 3355 events &= vp->v_pollinfo->vpi_revents; 3356 vp->v_pollinfo->vpi_revents &= ~events; 3357 3358 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3359 return events; 3360 } 3361 vp->v_pollinfo->vpi_events |= events; 3362 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 3363 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3364 return 0; 3365} 3366 3367/* 3368 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 3369 * it is possible for us to miss an event due to race conditions, but 3370 * that condition is expected to be rare, so for the moment it is the 3371 * preferred interface. 3372 */ 3373void 3374vn_pollevent(vp, events) 3375 struct vnode *vp; 3376 short events; 3377{ 3378 3379 if (vp->v_pollinfo == NULL) 3380 v_addpollinfo(vp); 3381 mtx_lock(&vp->v_pollinfo->vpi_lock); 3382 if (vp->v_pollinfo->vpi_events & events) { 3383 /* 3384 * We clear vpi_events so that we don't 3385 * call selwakeup() twice if two events are 3386 * posted before the polling process(es) is 3387 * awakened. This also ensures that we take at 3388 * most one selwakeup() if the polling process 3389 * is no longer interested. However, it does 3390 * mean that only one event can be noticed at 3391 * a time. (Perhaps we should only clear those 3392 * event bits which we note?) XXX 3393 */ 3394 vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */ 3395 vp->v_pollinfo->vpi_revents |= events; 3396 selwakeup(&vp->v_pollinfo->vpi_selinfo); 3397 } 3398 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3399} 3400 3401/* 3402 * Wake up anyone polling on vp because it is being revoked. 3403 * This depends on dead_poll() returning POLLHUP for correct 3404 * behavior. 3405 */ 3406void 3407vn_pollgone(vp) 3408 struct vnode *vp; 3409{ 3410 3411 mtx_lock(&vp->v_pollinfo->vpi_lock); 3412 VN_KNOTE(vp, NOTE_REVOKE); 3413 if (vp->v_pollinfo->vpi_events) { 3414 vp->v_pollinfo->vpi_events = 0; 3415 selwakeup(&vp->v_pollinfo->vpi_selinfo); 3416 } 3417 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3418} 3419 3420 3421 3422/* 3423 * Routine to create and manage a filesystem syncer vnode. 3424 */ 3425#define sync_close ((int (*)(struct vop_close_args *))nullop) 3426static int sync_fsync(struct vop_fsync_args *); 3427static int sync_inactive(struct vop_inactive_args *); 3428static int sync_reclaim(struct vop_reclaim_args *); 3429 3430static vop_t **sync_vnodeop_p; 3431static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 3432 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 3433 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 3434 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 3435 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 3436 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 3437 { &vop_lock_desc, (vop_t *) vop_stdlock }, /* lock */ 3438 { &vop_unlock_desc, (vop_t *) vop_stdunlock }, /* unlock */ 3439 { &vop_islocked_desc, (vop_t *) vop_stdislocked }, /* islocked */ 3440 { NULL, NULL } 3441}; 3442static struct vnodeopv_desc sync_vnodeop_opv_desc = 3443 { &sync_vnodeop_p, sync_vnodeop_entries }; 3444 3445VNODEOP_SET(sync_vnodeop_opv_desc); 3446 3447/* 3448 * Create a new filesystem syncer vnode for the specified mount point. 3449 */ 3450int 3451vfs_allocate_syncvnode(mp) 3452 struct mount *mp; 3453{ 3454 struct vnode *vp; 3455 static long start, incr, next; 3456 int error; 3457 3458 /* Allocate a new vnode */ 3459 if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) { 3460 mp->mnt_syncer = NULL; 3461 return (error); 3462 } 3463 vp->v_type = VNON; 3464 /* 3465 * Place the vnode onto the syncer worklist. We attempt to 3466 * scatter them about on the list so that they will go off 3467 * at evenly distributed times even if all the filesystems 3468 * are mounted at once. 3469 */ 3470 next += incr; 3471 if (next == 0 || next > syncer_maxdelay) { 3472 start /= 2; 3473 incr /= 2; 3474 if (start == 0) { 3475 start = syncer_maxdelay / 2; 3476 incr = syncer_maxdelay; 3477 } 3478 next = start; 3479 } 3480 VI_LOCK(vp); 3481 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 3482 VI_UNLOCK(vp); 3483 mp->mnt_syncer = vp; 3484 return (0); 3485} 3486 3487/* 3488 * Do a lazy sync of the filesystem. 3489 */ 3490static int 3491sync_fsync(ap) 3492 struct vop_fsync_args /* { 3493 struct vnode *a_vp; 3494 struct ucred *a_cred; 3495 int a_waitfor; 3496 struct thread *a_td; 3497 } */ *ap; 3498{ 3499 struct vnode *syncvp = ap->a_vp; 3500 struct mount *mp = syncvp->v_mount; 3501 struct thread *td = ap->a_td; 3502 int error, asyncflag; 3503 3504 /* 3505 * We only need to do something if this is a lazy evaluation. 3506 */ 3507 if (ap->a_waitfor != MNT_LAZY) 3508 return (0); 3509 3510 /* 3511 * Move ourselves to the back of the sync list. 3512 */ 3513 VI_LOCK(syncvp); 3514 vn_syncer_add_to_worklist(syncvp, syncdelay); 3515 VI_UNLOCK(syncvp); 3516 3517 /* 3518 * Walk the list of vnodes pushing all that are dirty and 3519 * not already on the sync list. 3520 */ 3521 mtx_lock(&mountlist_mtx); 3522 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) { 3523 mtx_unlock(&mountlist_mtx); 3524 return (0); 3525 } 3526 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3527 vfs_unbusy(mp, td); 3528 return (0); 3529 } 3530 asyncflag = mp->mnt_flag & MNT_ASYNC; 3531 mp->mnt_flag &= ~MNT_ASYNC; 3532 vfs_msync(mp, MNT_NOWAIT); 3533 error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td); 3534 if (asyncflag) 3535 mp->mnt_flag |= MNT_ASYNC; 3536 vn_finished_write(mp); 3537 vfs_unbusy(mp, td); 3538 return (error); 3539} 3540 3541/* 3542 * The syncer vnode is no referenced. 3543 */ 3544static int 3545sync_inactive(ap) 3546 struct vop_inactive_args /* { 3547 struct vnode *a_vp; 3548 struct thread *a_td; 3549 } */ *ap; 3550{ 3551 3552 VOP_UNLOCK(ap->a_vp, 0, ap->a_td); 3553 vgone(ap->a_vp); 3554 return (0); 3555} 3556 3557/* 3558 * The syncer vnode is no longer needed and is being decommissioned. 3559 * 3560 * Modifications to the worklist must be protected at splbio(). 3561 */ 3562static int 3563sync_reclaim(ap) 3564 struct vop_reclaim_args /* { 3565 struct vnode *a_vp; 3566 } */ *ap; 3567{ 3568 struct vnode *vp = ap->a_vp; 3569 int s; 3570 3571 s = splbio(); 3572 vp->v_mount->mnt_syncer = NULL; 3573 VI_LOCK(vp); 3574 if (vp->v_iflag & VI_ONWORKLST) { 3575 mtx_lock(&sync_mtx); 3576 LIST_REMOVE(vp, v_synclist); 3577 mtx_unlock(&sync_mtx); 3578 vp->v_iflag &= ~VI_ONWORKLST; 3579 } 3580 VI_UNLOCK(vp); 3581 splx(s); 3582 3583 return (0); 3584} 3585 3586/* 3587 * extract the dev_t from a VCHR 3588 */ 3589dev_t 3590vn_todev(vp) 3591 struct vnode *vp; 3592{ 3593 if (vp->v_type != VCHR) 3594 return (NODEV); 3595 return (vp->v_rdev); 3596} 3597 3598/* 3599 * Check if vnode represents a disk device 3600 */ 3601int 3602vn_isdisk(vp, errp) 3603 struct vnode *vp; 3604 int *errp; 3605{ 3606 struct cdevsw *cdevsw; 3607 3608 if (vp->v_type != VCHR) { 3609 if (errp != NULL) 3610 *errp = ENOTBLK; 3611 return (0); 3612 } 3613 if (vp->v_rdev == NULL) { 3614 if (errp != NULL) 3615 *errp = ENXIO; 3616 return (0); 3617 } 3618 cdevsw = devsw(vp->v_rdev); 3619 if (cdevsw == NULL) { 3620 if (errp != NULL) 3621 *errp = ENXIO; 3622 return (0); 3623 } 3624 if (!(cdevsw->d_flags & D_DISK)) { 3625 if (errp != NULL) 3626 *errp = ENOTBLK; 3627 return (0); 3628 } 3629 if (errp != NULL) 3630 *errp = 0; 3631 return (1); 3632} 3633 3634/* 3635 * Free data allocated by namei(); see namei(9) for details. 3636 */ 3637void 3638NDFREE(ndp, flags) 3639 struct nameidata *ndp; 3640 const uint flags; 3641{ 3642 if (!(flags & NDF_NO_FREE_PNBUF) && 3643 (ndp->ni_cnd.cn_flags & HASBUF)) { 3644 uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 3645 ndp->ni_cnd.cn_flags &= ~HASBUF; 3646 } 3647 if (!(flags & NDF_NO_DVP_UNLOCK) && 3648 (ndp->ni_cnd.cn_flags & LOCKPARENT) && 3649 ndp->ni_dvp != ndp->ni_vp) 3650 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread); 3651 if (!(flags & NDF_NO_DVP_RELE) && 3652 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { 3653 vrele(ndp->ni_dvp); 3654 ndp->ni_dvp = NULL; 3655 } 3656 if (!(flags & NDF_NO_VP_UNLOCK) && 3657 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) 3658 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread); 3659 if (!(flags & NDF_NO_VP_RELE) && 3660 ndp->ni_vp) { 3661 vrele(ndp->ni_vp); 3662 ndp->ni_vp = NULL; 3663 } 3664 if (!(flags & NDF_NO_STARTDIR_RELE) && 3665 (ndp->ni_cnd.cn_flags & SAVESTART)) { 3666 vrele(ndp->ni_startdir); 3667 ndp->ni_startdir = NULL; 3668 } 3669} 3670 3671/* 3672 * Common filesystem object access control check routine. Accepts a 3673 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3674 * and optional call-by-reference privused argument allowing vaccess() 3675 * to indicate to the caller whether privilege was used to satisfy the 3676 * request (obsoleted). Returns 0 on success, or an errno on failure. 3677 */ 3678int 3679vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused) 3680 enum vtype type; 3681 mode_t file_mode; 3682 uid_t file_uid; 3683 gid_t file_gid; 3684 mode_t acc_mode; 3685 struct ucred *cred; 3686 int *privused; 3687{ 3688 mode_t dac_granted; 3689#ifdef CAPABILITIES 3690 mode_t cap_granted; 3691#endif 3692 3693 /* 3694 * Look for a normal, non-privileged way to access the file/directory 3695 * as requested. If it exists, go with that. 3696 */ 3697 3698 if (privused != NULL) 3699 *privused = 0; 3700 3701 dac_granted = 0; 3702 3703 /* Check the owner. */ 3704 if (cred->cr_uid == file_uid) { 3705 dac_granted |= VADMIN; 3706 if (file_mode & S_IXUSR) 3707 dac_granted |= VEXEC; 3708 if (file_mode & S_IRUSR) 3709 dac_granted |= VREAD; 3710 if (file_mode & S_IWUSR) 3711 dac_granted |= (VWRITE | VAPPEND); 3712 3713 if ((acc_mode & dac_granted) == acc_mode) 3714 return (0); 3715 3716 goto privcheck; 3717 } 3718 3719 /* Otherwise, check the groups (first match) */ 3720 if (groupmember(file_gid, cred)) { 3721 if (file_mode & S_IXGRP) 3722 dac_granted |= VEXEC; 3723 if (file_mode & S_IRGRP) 3724 dac_granted |= VREAD; 3725 if (file_mode & S_IWGRP) 3726 dac_granted |= (VWRITE | VAPPEND); 3727 3728 if ((acc_mode & dac_granted) == acc_mode) 3729 return (0); 3730 3731 goto privcheck; 3732 } 3733 3734 /* Otherwise, check everyone else. */ 3735 if (file_mode & S_IXOTH) 3736 dac_granted |= VEXEC; 3737 if (file_mode & S_IROTH) 3738 dac_granted |= VREAD; 3739 if (file_mode & S_IWOTH) 3740 dac_granted |= (VWRITE | VAPPEND); 3741 if ((acc_mode & dac_granted) == acc_mode) 3742 return (0); 3743 3744privcheck: 3745 if (!suser_cred(cred, PRISON_ROOT)) { 3746 /* XXX audit: privilege used */ 3747 if (privused != NULL) 3748 *privused = 1; 3749 return (0); 3750 } 3751 3752#ifdef CAPABILITIES 3753 /* 3754 * Build a capability mask to determine if the set of capabilities 3755 * satisfies the requirements when combined with the granted mask 3756 * from above. 3757 * For each capability, if the capability is required, bitwise 3758 * or the request type onto the cap_granted mask. 3759 */ 3760 cap_granted = 0; 3761 3762 if (type == VDIR) { 3763 /* 3764 * For directories, use CAP_DAC_READ_SEARCH to satisfy 3765 * VEXEC requests, instead of CAP_DAC_EXECUTE. 3766 */ 3767 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3768 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3769 cap_granted |= VEXEC; 3770 } else { 3771 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3772 !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT)) 3773 cap_granted |= VEXEC; 3774 } 3775 3776 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) && 3777 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3778 cap_granted |= VREAD; 3779 3780 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3781 !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT)) 3782 cap_granted |= (VWRITE | VAPPEND); 3783 3784 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3785 !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT)) 3786 cap_granted |= VADMIN; 3787 3788 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) { 3789 /* XXX audit: privilege used */ 3790 if (privused != NULL) 3791 *privused = 1; 3792 return (0); 3793 } 3794#endif 3795 3796 return ((acc_mode & VADMIN) ? EPERM : EACCES); 3797} 3798 3799/* 3800 * Credential check based on process requesting service, and per-attribute 3801 * permissions. 3802 */ 3803int 3804extattr_check_cred(struct vnode *vp, int attrnamespace, 3805 struct ucred *cred, struct thread *td, int access) 3806{ 3807 3808 /* 3809 * Kernel-invoked always succeeds. 3810 */ 3811 if (cred == NOCRED) 3812 return (0); 3813 3814 /* 3815 * Do not allow privileged processes in jail to directly 3816 * manipulate system attributes. 3817 * 3818 * XXX What capability should apply here? 3819 * Probably CAP_SYS_SETFFLAG. 3820 */ 3821 switch (attrnamespace) { 3822 case EXTATTR_NAMESPACE_SYSTEM: 3823 /* Potentially should be: return (EPERM); */ 3824 return (suser_cred(cred, 0)); 3825 case EXTATTR_NAMESPACE_USER: 3826 return (VOP_ACCESS(vp, access, cred, td)); 3827 default: 3828 return (EPERM); 3829 } 3830} 3831