vfs_export.c revision 72200
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: head/sys/kern/vfs_export.c 72200 2001-02-09 06:11:45Z bmilekic $ 40 */ 41 42/* 43 * External virtual filesystem routines 44 */ 45#include "opt_ddb.h" 46#include "opt_ffs.h" 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/bio.h> 51#include <sys/buf.h> 52#include <sys/conf.h> 53#include <sys/dirent.h> 54#include <sys/domain.h> 55#include <sys/eventhandler.h> 56#include <sys/fcntl.h> 57#include <sys/kernel.h> 58#include <sys/kthread.h> 59#include <sys/ktr.h> 60#include <sys/malloc.h> 61#include <sys/mount.h> 62#include <sys/mutex.h> 63#include <sys/namei.h> 64#include <sys/proc.h> 65#include <sys/reboot.h> 66#include <sys/socket.h> 67#include <sys/stat.h> 68#include <sys/sysctl.h> 69#include <sys/vmmeter.h> 70#include <sys/vnode.h> 71 72#include <machine/limits.h> 73 74#include <vm/vm.h> 75#include <vm/vm_object.h> 76#include <vm/vm_extern.h> 77#include <vm/pmap.h> 78#include <vm/vm_map.h> 79#include <vm/vm_page.h> 80#include <vm/vm_pager.h> 81#include <vm/vnode_pager.h> 82#include <vm/vm_zone.h> 83 84static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 85 86static void addalias __P((struct vnode *vp, dev_t nvp_rdev)); 87static void insmntque __P((struct vnode *vp, struct mount *mp)); 88static void vclean __P((struct vnode *vp, int flags, struct proc *p)); 89 90/* 91 * Number of vnodes in existence. Increased whenever getnewvnode() 92 * allocates a new vnode, never decreased. 93 */ 94static unsigned long numvnodes; 95SYSCTL_LONG(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 96 97/* 98 * Conversion tables for conversion from vnode types to inode formats 99 * and back. 100 */ 101enum vtype iftovt_tab[16] = { 102 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 103 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 104}; 105int vttoif_tab[9] = { 106 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 107 S_IFSOCK, S_IFIFO, S_IFMT, 108}; 109 110/* 111 * List of vnodes that are ready for recycling. 112 */ 113static TAILQ_HEAD(freelst, vnode) vnode_free_list; 114 115/* 116 * Minimum number of free vnodes. If there are fewer than this free vnodes, 117 * getnewvnode() will return a newly allocated vnode. 118 */ 119static u_long wantfreevnodes = 25; 120SYSCTL_LONG(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 121/* Number of vnodes in the free list. */ 122static u_long freevnodes = 0; 123SYSCTL_LONG(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 124 125/* 126 * Various variables used for debugging the new implementation of 127 * reassignbuf(). 128 * XXX these are probably of (very) limited utility now. 129 */ 130static int reassignbufcalls; 131SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 132static int reassignbufloops; 133SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, ""); 134static int reassignbufsortgood; 135SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, ""); 136static int reassignbufsortbad; 137SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, ""); 138/* Set to 0 for old insertion-sort based reassignbuf, 1 for modern method. */ 139static int reassignbufmethod = 1; 140SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, ""); 141 142#ifdef ENABLE_VFS_IOOPT 143/* See NOTES for a description of this setting. */ 144int vfs_ioopt = 0; 145SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 146#endif 147 148/* List of mounted filesystems. */ 149struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 150 151/* For any iteration/modification of mountlist */ 152struct mtx mountlist_mtx; 153 154/* For any iteration/modification of mnt_vnodelist */ 155struct mtx mntvnode_mtx; 156 157/* 158 * Cache for the mount type id assigned to NFS. This is used for 159 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 160 */ 161int nfs_mount_type = -1; 162 163/* To keep more than one thread at a time from running vfs_getnewfsid */ 164static struct mtx mntid_mtx; 165 166/* For any iteration/modification of vnode_free_list */ 167static struct mtx vnode_free_list_mtx; 168 169/* 170 * For any iteration/modification of dev->si_hlist (linked through 171 * v_specnext) 172 */ 173static struct mtx spechash_mtx; 174 175/* Publicly exported FS */ 176struct nfs_public nfs_pub; 177 178/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 179static vm_zone_t vnode_zone; 180 181/* Set to 1 to print out reclaim of active vnodes */ 182int prtactive = 0; 183 184/* 185 * The workitem queue. 186 * 187 * It is useful to delay writes of file data and filesystem metadata 188 * for tens of seconds so that quickly created and deleted files need 189 * not waste disk bandwidth being created and removed. To realize this, 190 * we append vnodes to a "workitem" queue. When running with a soft 191 * updates implementation, most pending metadata dependencies should 192 * not wait for more than a few seconds. Thus, mounted on block devices 193 * are delayed only about a half the time that file data is delayed. 194 * Similarly, directory updates are more critical, so are only delayed 195 * about a third the time that file data is delayed. Thus, there are 196 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 197 * one each second (driven off the filesystem syncer process). The 198 * syncer_delayno variable indicates the next queue that is to be processed. 199 * Items that need to be processed soon are placed in this queue: 200 * 201 * syncer_workitem_pending[syncer_delayno] 202 * 203 * A delay of fifteen seconds is done by placing the request fifteen 204 * entries later in the queue: 205 * 206 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 207 * 208 */ 209static int syncer_delayno = 0; 210static long syncer_mask; 211LIST_HEAD(synclist, vnode); 212static struct synclist *syncer_workitem_pending; 213 214#define SYNCER_MAXDELAY 32 215static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 216time_t syncdelay = 30; /* max time to delay syncing data */ 217time_t filedelay = 30; /* time to delay syncing files */ 218SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 219time_t dirdelay = 29; /* time to delay syncing directories */ 220SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 221time_t metadelay = 28; /* time to delay syncing metadata */ 222SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 223static int rushjob; /* number of slots to run ASAP */ 224static int stat_rush_requests; /* number of times I/O speeded up */ 225SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 226 227/* 228 * Number of vnodes we want to exist at any one time. This is mostly used 229 * to size hash tables in vnode-related code. It is normally not used in 230 * getnewvnode(), as wantfreevnodes is normally nonzero.) 231 * 232 * XXX desiredvnodes is historical cruft and should not exist. 233 */ 234int desiredvnodes; 235SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 236 &desiredvnodes, 0, "Maximum number of vnodes"); 237 238static void vfs_free_addrlist __P((struct netexport *nep)); 239static int vfs_free_netcred __P((struct radix_node *rn, void *w)); 240static int vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep, 241 struct export_args *argp)); 242 243/* 244 * Initialize the vnode management data structures. 245 */ 246static void 247vntblinit(void *dummy __unused) 248{ 249 250 desiredvnodes = maxproc + cnt.v_page_count / 4; 251 mtx_init(&mountlist_mtx, "mountlist", MTX_DEF); 252 mtx_init(&mntvnode_mtx, "mntvnode", MTX_DEF); 253 mtx_init(&mntid_mtx, "mntid", MTX_DEF); 254 mtx_init(&spechash_mtx, "spechash", MTX_DEF); 255 TAILQ_INIT(&vnode_free_list); 256 mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF); 257 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5); 258 /* 259 * Initialize the filesystem syncer. 260 */ 261 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 262 &syncer_mask); 263 syncer_maxdelay = syncer_mask + 1; 264} 265SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL) 266 267 268/* 269 * Mark a mount point as busy. Used to synchronize access and to delay 270 * unmounting. Interlock is not released on failure. 271 */ 272int 273vfs_busy(mp, flags, interlkp, p) 274 struct mount *mp; 275 int flags; 276 struct mtx *interlkp; 277 struct proc *p; 278{ 279 int lkflags; 280 281 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 282 if (flags & LK_NOWAIT) 283 return (ENOENT); 284 mp->mnt_kern_flag |= MNTK_MWAIT; 285 /* 286 * Since all busy locks are shared except the exclusive 287 * lock granted when unmounting, the only place that a 288 * wakeup needs to be done is at the release of the 289 * exclusive lock at the end of dounmount. 290 */ 291 msleep((caddr_t)mp, interlkp, PVFS, "vfs_busy", 0); 292 return (ENOENT); 293 } 294 lkflags = LK_SHARED | LK_NOPAUSE; 295 if (interlkp) 296 lkflags |= LK_INTERLOCK; 297 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p)) 298 panic("vfs_busy: unexpected lock failure"); 299 return (0); 300} 301 302/* 303 * Free a busy filesystem. 304 */ 305void 306vfs_unbusy(mp, p) 307 struct mount *mp; 308 struct proc *p; 309{ 310 311 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p); 312} 313 314/* 315 * Lookup a filesystem type, and if found allocate and initialize 316 * a mount structure for it. 317 * 318 * Devname is usually updated by mount(8) after booting. 319 */ 320int 321vfs_rootmountalloc(fstypename, devname, mpp) 322 char *fstypename; 323 char *devname; 324 struct mount **mpp; 325{ 326 struct proc *p = curproc; /* XXX */ 327 struct vfsconf *vfsp; 328 struct mount *mp; 329 330 if (fstypename == NULL) 331 return (ENODEV); 332 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 333 if (!strcmp(vfsp->vfc_name, fstypename)) 334 break; 335 if (vfsp == NULL) 336 return (ENODEV); 337 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 338 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); 339 (void)vfs_busy(mp, LK_NOWAIT, 0, p); 340 LIST_INIT(&mp->mnt_vnodelist); 341 mp->mnt_vfc = vfsp; 342 mp->mnt_op = vfsp->vfc_vfsops; 343 mp->mnt_flag = MNT_RDONLY; 344 mp->mnt_vnodecovered = NULLVP; 345 vfsp->vfc_refcount++; 346 mp->mnt_iosize_max = DFLTPHYS; 347 mp->mnt_stat.f_type = vfsp->vfc_typenum; 348 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 349 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 350 mp->mnt_stat.f_mntonname[0] = '/'; 351 mp->mnt_stat.f_mntonname[1] = 0; 352 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 353 *mpp = mp; 354 return (0); 355} 356 357/* 358 * Find an appropriate filesystem to use for the root. If a filesystem 359 * has not been preselected, walk through the list of known filesystems 360 * trying those that have mountroot routines, and try them until one 361 * works or we have tried them all. 362 */ 363#ifdef notdef /* XXX JH */ 364int 365lite2_vfs_mountroot() 366{ 367 struct vfsconf *vfsp; 368 extern int (*lite2_mountroot) __P((void)); 369 int error; 370 371 if (lite2_mountroot != NULL) 372 return ((*lite2_mountroot)()); 373 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 374 if (vfsp->vfc_mountroot == NULL) 375 continue; 376 if ((error = (*vfsp->vfc_mountroot)()) == 0) 377 return (0); 378 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 379 } 380 return (ENODEV); 381} 382#endif 383 384/* 385 * Lookup a mount point by filesystem identifier. 386 */ 387struct mount * 388vfs_getvfs(fsid) 389 fsid_t *fsid; 390{ 391 register struct mount *mp; 392 393 mtx_lock(&mountlist_mtx); 394 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 395 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 396 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 397 mtx_unlock(&mountlist_mtx); 398 return (mp); 399 } 400 } 401 mtx_unlock(&mountlist_mtx); 402 return ((struct mount *) 0); 403} 404 405/* 406 * Get a new unique fsid. Try to make its val[0] unique, since this value 407 * will be used to create fake device numbers for stat(). Also try (but 408 * not so hard) make its val[0] unique mod 2^16, since some emulators only 409 * support 16-bit device numbers. We end up with unique val[0]'s for the 410 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 411 * 412 * Keep in mind that several mounts may be running in parallel. Starting 413 * the search one past where the previous search terminated is both a 414 * micro-optimization and a defense against returning the same fsid to 415 * different mounts. 416 */ 417void 418vfs_getnewfsid(mp) 419 struct mount *mp; 420{ 421 static u_int16_t mntid_base; 422 fsid_t tfsid; 423 int mtype; 424 425 mtx_lock(&mntid_mtx); 426 mtype = mp->mnt_vfc->vfc_typenum; 427 tfsid.val[1] = mtype; 428 mtype = (mtype & 0xFF) << 24; 429 for (;;) { 430 tfsid.val[0] = makeudev(255, 431 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 432 mntid_base++; 433 if (vfs_getvfs(&tfsid) == NULL) 434 break; 435 } 436 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 437 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 438 mtx_unlock(&mntid_mtx); 439} 440 441/* 442 * Knob to control the precision of file timestamps: 443 * 444 * 0 = seconds only; nanoseconds zeroed. 445 * 1 = seconds and nanoseconds, accurate within 1/HZ. 446 * 2 = seconds and nanoseconds, truncated to microseconds. 447 * >=3 = seconds and nanoseconds, maximum precision. 448 */ 449enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 450 451static int timestamp_precision = TSP_SEC; 452SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 453 ×tamp_precision, 0, ""); 454 455/* 456 * Get a current timestamp. 457 */ 458void 459vfs_timestamp(tsp) 460 struct timespec *tsp; 461{ 462 struct timeval tv; 463 464 switch (timestamp_precision) { 465 case TSP_SEC: 466 tsp->tv_sec = time_second; 467 tsp->tv_nsec = 0; 468 break; 469 case TSP_HZ: 470 getnanotime(tsp); 471 break; 472 case TSP_USEC: 473 microtime(&tv); 474 TIMEVAL_TO_TIMESPEC(&tv, tsp); 475 break; 476 case TSP_NSEC: 477 default: 478 nanotime(tsp); 479 break; 480 } 481} 482 483/* 484 * Set vnode attributes to VNOVAL 485 */ 486void 487vattr_null(vap) 488 register struct vattr *vap; 489{ 490 491 vap->va_type = VNON; 492 vap->va_size = VNOVAL; 493 vap->va_bytes = VNOVAL; 494 vap->va_mode = VNOVAL; 495 vap->va_nlink = VNOVAL; 496 vap->va_uid = VNOVAL; 497 vap->va_gid = VNOVAL; 498 vap->va_fsid = VNOVAL; 499 vap->va_fileid = VNOVAL; 500 vap->va_blocksize = VNOVAL; 501 vap->va_rdev = VNOVAL; 502 vap->va_atime.tv_sec = VNOVAL; 503 vap->va_atime.tv_nsec = VNOVAL; 504 vap->va_mtime.tv_sec = VNOVAL; 505 vap->va_mtime.tv_nsec = VNOVAL; 506 vap->va_ctime.tv_sec = VNOVAL; 507 vap->va_ctime.tv_nsec = VNOVAL; 508 vap->va_flags = VNOVAL; 509 vap->va_gen = VNOVAL; 510 vap->va_vaflags = 0; 511} 512 513/* 514 * Routines having to do with the management of the vnode table. 515 */ 516 517/* 518 * Return the next vnode from the free list. 519 */ 520int 521getnewvnode(tag, mp, vops, vpp) 522 enum vtagtype tag; 523 struct mount *mp; 524 vop_t **vops; 525 struct vnode **vpp; 526{ 527 int s, count; 528 struct proc *p = curproc; /* XXX */ 529 struct vnode *vp = NULL; 530 struct mount *vnmp; 531 vm_object_t object; 532 533 /* 534 * We take the least recently used vnode from the freelist 535 * if we can get it and it has no cached pages, and no 536 * namecache entries are relative to it. 537 * Otherwise we allocate a new vnode 538 */ 539 540 s = splbio(); 541 mtx_lock(&vnode_free_list_mtx); 542 543 if (wantfreevnodes && freevnodes < wantfreevnodes) { 544 vp = NULL; 545 } else if (!wantfreevnodes && freevnodes <= desiredvnodes) { 546 /* 547 * XXX: this is only here to be backwards compatible 548 */ 549 vp = NULL; 550 } else for (count = 0; count < freevnodes; count++) { 551 vp = TAILQ_FIRST(&vnode_free_list); 552 if (vp == NULL || vp->v_usecount) 553 panic("getnewvnode: free vnode isn't"); 554 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 555 /* 556 * Don't recycle if active in the namecache or 557 * if it still has cached pages or we cannot get 558 * its interlock. 559 */ 560 if (LIST_FIRST(&vp->v_cache_src) != NULL || 561 (VOP_GETVOBJECT(vp, &object) == 0 && 562 (object->resident_page_count || object->ref_count)) || 563 !mtx_trylock(&vp->v_interlock)) { 564 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 565 vp = NULL; 566 continue; 567 } 568 /* 569 * Skip over it if its filesystem is being suspended. 570 */ 571 if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0) 572 break; 573 mtx_unlock(&vp->v_interlock); 574 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 575 vp = NULL; 576 } 577 if (vp) { 578 vp->v_flag |= VDOOMED; 579 vp->v_flag &= ~VFREE; 580 freevnodes--; 581 mtx_unlock(&vnode_free_list_mtx); 582 cache_purge(vp); 583 vp->v_lease = NULL; 584 if (vp->v_type != VBAD) { 585 vgonel(vp, p); 586 } else { 587 mtx_unlock(&vp->v_interlock); 588 } 589 vn_finished_write(vnmp); 590 591#ifdef INVARIANTS 592 { 593 int s; 594 595 if (vp->v_data) 596 panic("cleaned vnode isn't"); 597 s = splbio(); 598 if (vp->v_numoutput) 599 panic("Clean vnode has pending I/O's"); 600 splx(s); 601 if (vp->v_writecount != 0) 602 panic("Non-zero write count"); 603 } 604#endif 605 vp->v_flag = 0; 606 vp->v_lastw = 0; 607 vp->v_lasta = 0; 608 vp->v_cstart = 0; 609 vp->v_clen = 0; 610 vp->v_socket = 0; 611 } else { 612 mtx_unlock(&vnode_free_list_mtx); 613 vp = (struct vnode *) zalloc(vnode_zone); 614 bzero((char *) vp, sizeof *vp); 615 mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF); 616 vp->v_dd = vp; 617 mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF); 618 cache_purge(vp); 619 LIST_INIT(&vp->v_cache_src); 620 TAILQ_INIT(&vp->v_cache_dst); 621 numvnodes++; 622 } 623 624 TAILQ_INIT(&vp->v_cleanblkhd); 625 TAILQ_INIT(&vp->v_dirtyblkhd); 626 vp->v_type = VNON; 627 vp->v_tag = tag; 628 vp->v_op = vops; 629 lockinit(&vp->v_lock, PVFS, "vnlock", 0, LK_NOPAUSE); 630 insmntque(vp, mp); 631 *vpp = vp; 632 vp->v_usecount = 1; 633 vp->v_data = 0; 634 splx(s); 635 636 vfs_object_create(vp, p, p->p_ucred); 637 return (0); 638} 639 640/* 641 * Move a vnode from one mount queue to another. 642 */ 643static void 644insmntque(vp, mp) 645 register struct vnode *vp; 646 register struct mount *mp; 647{ 648 649 mtx_lock(&mntvnode_mtx); 650 /* 651 * Delete from old mount point vnode list, if on one. 652 */ 653 if (vp->v_mount != NULL) 654 LIST_REMOVE(vp, v_mntvnodes); 655 /* 656 * Insert into list of vnodes for the new mount point, if available. 657 */ 658 if ((vp->v_mount = mp) == NULL) { 659 mtx_unlock(&mntvnode_mtx); 660 return; 661 } 662 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 663 mtx_unlock(&mntvnode_mtx); 664} 665 666/* 667 * Update outstanding I/O count and do wakeup if requested. 668 */ 669void 670vwakeup(bp) 671 register struct buf *bp; 672{ 673 register struct vnode *vp; 674 675 bp->b_flags &= ~B_WRITEINPROG; 676 if ((vp = bp->b_vp)) { 677 vp->v_numoutput--; 678 if (vp->v_numoutput < 0) 679 panic("vwakeup: neg numoutput"); 680 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 681 vp->v_flag &= ~VBWAIT; 682 wakeup((caddr_t) &vp->v_numoutput); 683 } 684 } 685} 686 687/* 688 * Flush out and invalidate all buffers associated with a vnode. 689 * Called with the underlying object locked. 690 */ 691int 692vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 693 register struct vnode *vp; 694 int flags; 695 struct ucred *cred; 696 struct proc *p; 697 int slpflag, slptimeo; 698{ 699 register struct buf *bp; 700 struct buf *nbp, *blist; 701 int s, error; 702 vm_object_t object; 703 704 if (flags & V_SAVE) { 705 s = splbio(); 706 while (vp->v_numoutput) { 707 vp->v_flag |= VBWAIT; 708 error = tsleep((caddr_t)&vp->v_numoutput, 709 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 710 if (error) { 711 splx(s); 712 return (error); 713 } 714 } 715 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 716 splx(s); 717 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 718 return (error); 719 s = splbio(); 720 if (vp->v_numoutput > 0 || 721 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 722 panic("vinvalbuf: dirty bufs"); 723 } 724 splx(s); 725 } 726 s = splbio(); 727 for (;;) { 728 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 729 if (!blist) 730 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 731 if (!blist) 732 break; 733 734 for (bp = blist; bp; bp = nbp) { 735 nbp = TAILQ_NEXT(bp, b_vnbufs); 736 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 737 error = BUF_TIMELOCK(bp, 738 LK_EXCLUSIVE | LK_SLEEPFAIL, 739 "vinvalbuf", slpflag, slptimeo); 740 if (error == ENOLCK) 741 break; 742 splx(s); 743 return (error); 744 } 745 /* 746 * XXX Since there are no node locks for NFS, I 747 * believe there is a slight chance that a delayed 748 * write will occur while sleeping just above, so 749 * check for it. Note that vfs_bio_awrite expects 750 * buffers to reside on a queue, while VOP_BWRITE and 751 * brelse do not. 752 */ 753 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 754 (flags & V_SAVE)) { 755 756 if (bp->b_vp == vp) { 757 if (bp->b_flags & B_CLUSTEROK) { 758 BUF_UNLOCK(bp); 759 vfs_bio_awrite(bp); 760 } else { 761 bremfree(bp); 762 bp->b_flags |= B_ASYNC; 763 BUF_WRITE(bp); 764 } 765 } else { 766 bremfree(bp); 767 (void) BUF_WRITE(bp); 768 } 769 break; 770 } 771 bremfree(bp); 772 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 773 bp->b_flags &= ~B_ASYNC; 774 brelse(bp); 775 } 776 } 777 778 while (vp->v_numoutput > 0) { 779 vp->v_flag |= VBWAIT; 780 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 781 } 782 783 splx(s); 784 785 /* 786 * Destroy the copy in the VM cache, too. 787 */ 788 mtx_lock(&vp->v_interlock); 789 if (VOP_GETVOBJECT(vp, &object) == 0) { 790 vm_object_page_remove(object, 0, 0, 791 (flags & V_SAVE) ? TRUE : FALSE); 792 } 793 mtx_unlock(&vp->v_interlock); 794 795 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 796 panic("vinvalbuf: flush failed"); 797 return (0); 798} 799 800/* 801 * Truncate a file's buffer and pages to a specified length. This 802 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 803 * sync activity. 804 */ 805int 806vtruncbuf(vp, cred, p, length, blksize) 807 register struct vnode *vp; 808 struct ucred *cred; 809 struct proc *p; 810 off_t length; 811 int blksize; 812{ 813 register struct buf *bp; 814 struct buf *nbp; 815 int s, anyfreed; 816 int trunclbn; 817 818 /* 819 * Round up to the *next* lbn. 820 */ 821 trunclbn = (length + blksize - 1) / blksize; 822 823 s = splbio(); 824restart: 825 anyfreed = 1; 826 for (;anyfreed;) { 827 anyfreed = 0; 828 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 829 nbp = TAILQ_NEXT(bp, b_vnbufs); 830 if (bp->b_lblkno >= trunclbn) { 831 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 832 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 833 goto restart; 834 } else { 835 bremfree(bp); 836 bp->b_flags |= (B_INVAL | B_RELBUF); 837 bp->b_flags &= ~B_ASYNC; 838 brelse(bp); 839 anyfreed = 1; 840 } 841 if (nbp && 842 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 843 (nbp->b_vp != vp) || 844 (nbp->b_flags & B_DELWRI))) { 845 goto restart; 846 } 847 } 848 } 849 850 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 851 nbp = TAILQ_NEXT(bp, b_vnbufs); 852 if (bp->b_lblkno >= trunclbn) { 853 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 854 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 855 goto restart; 856 } else { 857 bremfree(bp); 858 bp->b_flags |= (B_INVAL | B_RELBUF); 859 bp->b_flags &= ~B_ASYNC; 860 brelse(bp); 861 anyfreed = 1; 862 } 863 if (nbp && 864 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 865 (nbp->b_vp != vp) || 866 (nbp->b_flags & B_DELWRI) == 0)) { 867 goto restart; 868 } 869 } 870 } 871 } 872 873 if (length > 0) { 874restartsync: 875 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 876 nbp = TAILQ_NEXT(bp, b_vnbufs); 877 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 878 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 879 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 880 goto restart; 881 } else { 882 bremfree(bp); 883 if (bp->b_vp == vp) { 884 bp->b_flags |= B_ASYNC; 885 } else { 886 bp->b_flags &= ~B_ASYNC; 887 } 888 BUF_WRITE(bp); 889 } 890 goto restartsync; 891 } 892 893 } 894 } 895 896 while (vp->v_numoutput > 0) { 897 vp->v_flag |= VBWAIT; 898 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 899 } 900 901 splx(s); 902 903 vnode_pager_setsize(vp, length); 904 905 return (0); 906} 907 908/* 909 * Associate a buffer with a vnode. 910 */ 911void 912bgetvp(vp, bp) 913 register struct vnode *vp; 914 register struct buf *bp; 915{ 916 int s; 917 918 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 919 920 vhold(vp); 921 bp->b_vp = vp; 922 bp->b_dev = vn_todev(vp); 923 /* 924 * Insert onto list for new vnode. 925 */ 926 s = splbio(); 927 bp->b_xflags |= BX_VNCLEAN; 928 bp->b_xflags &= ~BX_VNDIRTY; 929 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 930 splx(s); 931} 932 933/* 934 * Disassociate a buffer from a vnode. 935 */ 936void 937brelvp(bp) 938 register struct buf *bp; 939{ 940 struct vnode *vp; 941 struct buflists *listheadp; 942 int s; 943 944 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 945 946 /* 947 * Delete from old vnode list, if on one. 948 */ 949 vp = bp->b_vp; 950 s = splbio(); 951 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 952 if (bp->b_xflags & BX_VNDIRTY) 953 listheadp = &vp->v_dirtyblkhd; 954 else 955 listheadp = &vp->v_cleanblkhd; 956 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 957 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 958 } 959 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 960 vp->v_flag &= ~VONWORKLST; 961 LIST_REMOVE(vp, v_synclist); 962 } 963 splx(s); 964 bp->b_vp = (struct vnode *) 0; 965 vdrop(vp); 966} 967 968/* 969 * Add an item to the syncer work queue. 970 */ 971static void 972vn_syncer_add_to_worklist(struct vnode *vp, int delay) 973{ 974 int s, slot; 975 976 s = splbio(); 977 978 if (vp->v_flag & VONWORKLST) { 979 LIST_REMOVE(vp, v_synclist); 980 } 981 982 if (delay > syncer_maxdelay - 2) 983 delay = syncer_maxdelay - 2; 984 slot = (syncer_delayno + delay) & syncer_mask; 985 986 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 987 vp->v_flag |= VONWORKLST; 988 splx(s); 989} 990 991struct proc *updateproc; 992static void sched_sync __P((void)); 993static struct kproc_desc up_kp = { 994 "syncer", 995 sched_sync, 996 &updateproc 997}; 998SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 999 1000/* 1001 * System filesystem synchronizer daemon. 1002 */ 1003void 1004sched_sync(void) 1005{ 1006 struct synclist *slp; 1007 struct vnode *vp; 1008 struct mount *mp; 1009 long starttime; 1010 int s; 1011 struct proc *p = updateproc; 1012 1013 mtx_lock(&Giant); 1014 1015 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 1016 SHUTDOWN_PRI_LAST); 1017 1018 for (;;) { 1019 kthread_suspend_check(p); 1020 1021 starttime = time_second; 1022 1023 /* 1024 * Push files whose dirty time has expired. Be careful 1025 * of interrupt race on slp queue. 1026 */ 1027 s = splbio(); 1028 slp = &syncer_workitem_pending[syncer_delayno]; 1029 syncer_delayno += 1; 1030 if (syncer_delayno == syncer_maxdelay) 1031 syncer_delayno = 0; 1032 splx(s); 1033 1034 while ((vp = LIST_FIRST(slp)) != NULL) { 1035 if (VOP_ISLOCKED(vp, NULL) == 0 && 1036 vn_start_write(vp, &mp, V_NOWAIT) == 0) { 1037 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1038 (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p); 1039 VOP_UNLOCK(vp, 0, p); 1040 vn_finished_write(mp); 1041 } 1042 s = splbio(); 1043 if (LIST_FIRST(slp) == vp) { 1044 /* 1045 * Note: v_tag VT_VFS vps can remain on the 1046 * worklist too with no dirty blocks, but 1047 * since sync_fsync() moves it to a different 1048 * slot we are safe. 1049 */ 1050 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1051 !vn_isdisk(vp, NULL)) 1052 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1053 /* 1054 * Put us back on the worklist. The worklist 1055 * routine will remove us from our current 1056 * position and then add us back in at a later 1057 * position. 1058 */ 1059 vn_syncer_add_to_worklist(vp, syncdelay); 1060 } 1061 splx(s); 1062 } 1063 1064 /* 1065 * Do soft update processing. 1066 */ 1067#ifdef SOFTUPDATES 1068 softdep_process_worklist(NULL); 1069#endif 1070 1071 /* 1072 * The variable rushjob allows the kernel to speed up the 1073 * processing of the filesystem syncer process. A rushjob 1074 * value of N tells the filesystem syncer to process the next 1075 * N seconds worth of work on its queue ASAP. Currently rushjob 1076 * is used by the soft update code to speed up the filesystem 1077 * syncer process when the incore state is getting so far 1078 * ahead of the disk that the kernel memory pool is being 1079 * threatened with exhaustion. 1080 */ 1081 if (rushjob > 0) { 1082 rushjob -= 1; 1083 continue; 1084 } 1085 /* 1086 * If it has taken us less than a second to process the 1087 * current work, then wait. Otherwise start right over 1088 * again. We can still lose time if any single round 1089 * takes more than two seconds, but it does not really 1090 * matter as we are just trying to generally pace the 1091 * filesystem activity. 1092 */ 1093 if (time_second == starttime) 1094 tsleep(&lbolt, PPAUSE, "syncer", 0); 1095 } 1096} 1097 1098/* 1099 * Request the syncer daemon to speed up its work. 1100 * We never push it to speed up more than half of its 1101 * normal turn time, otherwise it could take over the cpu. 1102 */ 1103int 1104speedup_syncer() 1105{ 1106 1107 mtx_lock_spin(&sched_lock); 1108 if (updateproc->p_wchan == &lbolt) 1109 setrunnable(updateproc); 1110 mtx_unlock_spin(&sched_lock); 1111 if (rushjob < syncdelay / 2) { 1112 rushjob += 1; 1113 stat_rush_requests += 1; 1114 return (1); 1115 } 1116 return(0); 1117} 1118 1119/* 1120 * Associate a p-buffer with a vnode. 1121 * 1122 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1123 * with the buffer. i.e. the bp has not been linked into the vnode or 1124 * ref-counted. 1125 */ 1126void 1127pbgetvp(vp, bp) 1128 register struct vnode *vp; 1129 register struct buf *bp; 1130{ 1131 1132 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1133 1134 bp->b_vp = vp; 1135 bp->b_flags |= B_PAGING; 1136 bp->b_dev = vn_todev(vp); 1137} 1138 1139/* 1140 * Disassociate a p-buffer from a vnode. 1141 */ 1142void 1143pbrelvp(bp) 1144 register struct buf *bp; 1145{ 1146 1147 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1148 1149 /* XXX REMOVE ME */ 1150 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1151 panic( 1152 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1153 bp, 1154 (int)bp->b_flags 1155 ); 1156 } 1157 bp->b_vp = (struct vnode *) 0; 1158 bp->b_flags &= ~B_PAGING; 1159} 1160 1161/* 1162 * Change the vnode a pager buffer is associated with. 1163 */ 1164void 1165pbreassignbuf(bp, newvp) 1166 struct buf *bp; 1167 struct vnode *newvp; 1168{ 1169 1170 KASSERT(bp->b_flags & B_PAGING, 1171 ("pbreassignbuf() on non phys bp %p", bp)); 1172 bp->b_vp = newvp; 1173} 1174 1175/* 1176 * Reassign a buffer from one vnode to another. 1177 * Used to assign file specific control information 1178 * (indirect blocks) to the vnode to which they belong. 1179 */ 1180void 1181reassignbuf(bp, newvp) 1182 register struct buf *bp; 1183 register struct vnode *newvp; 1184{ 1185 struct buflists *listheadp; 1186 int delay; 1187 int s; 1188 1189 if (newvp == NULL) { 1190 printf("reassignbuf: NULL"); 1191 return; 1192 } 1193 ++reassignbufcalls; 1194 1195 /* 1196 * B_PAGING flagged buffers cannot be reassigned because their vp 1197 * is not fully linked in. 1198 */ 1199 if (bp->b_flags & B_PAGING) 1200 panic("cannot reassign paging buffer"); 1201 1202 s = splbio(); 1203 /* 1204 * Delete from old vnode list, if on one. 1205 */ 1206 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1207 if (bp->b_xflags & BX_VNDIRTY) 1208 listheadp = &bp->b_vp->v_dirtyblkhd; 1209 else 1210 listheadp = &bp->b_vp->v_cleanblkhd; 1211 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1212 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1213 if (bp->b_vp != newvp) { 1214 vdrop(bp->b_vp); 1215 bp->b_vp = NULL; /* for clarification */ 1216 } 1217 } 1218 /* 1219 * If dirty, put on list of dirty buffers; otherwise insert onto list 1220 * of clean buffers. 1221 */ 1222 if (bp->b_flags & B_DELWRI) { 1223 struct buf *tbp; 1224 1225 listheadp = &newvp->v_dirtyblkhd; 1226 if ((newvp->v_flag & VONWORKLST) == 0) { 1227 switch (newvp->v_type) { 1228 case VDIR: 1229 delay = dirdelay; 1230 break; 1231 case VCHR: 1232 if (newvp->v_rdev->si_mountpoint != NULL) { 1233 delay = metadelay; 1234 break; 1235 } 1236 /* fall through */ 1237 default: 1238 delay = filedelay; 1239 } 1240 vn_syncer_add_to_worklist(newvp, delay); 1241 } 1242 bp->b_xflags |= BX_VNDIRTY; 1243 tbp = TAILQ_FIRST(listheadp); 1244 if (tbp == NULL || 1245 bp->b_lblkno == 0 || 1246 (bp->b_lblkno > 0 && tbp->b_lblkno < 0) || 1247 (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) { 1248 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1249 ++reassignbufsortgood; 1250 } else if (bp->b_lblkno < 0) { 1251 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1252 ++reassignbufsortgood; 1253 } else if (reassignbufmethod == 1) { 1254 /* 1255 * New sorting algorithm, only handle sequential case, 1256 * otherwise append to end (but before metadata) 1257 */ 1258 if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL && 1259 (tbp->b_xflags & BX_VNDIRTY)) { 1260 /* 1261 * Found the best place to insert the buffer 1262 */ 1263 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1264 ++reassignbufsortgood; 1265 } else { 1266 /* 1267 * Missed, append to end, but before meta-data. 1268 * We know that the head buffer in the list is 1269 * not meta-data due to prior conditionals. 1270 * 1271 * Indirect effects: NFS second stage write 1272 * tends to wind up here, giving maximum 1273 * distance between the unstable write and the 1274 * commit rpc. 1275 */ 1276 tbp = TAILQ_LAST(listheadp, buflists); 1277 while (tbp && tbp->b_lblkno < 0) 1278 tbp = TAILQ_PREV(tbp, buflists, b_vnbufs); 1279 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1280 ++reassignbufsortbad; 1281 } 1282 } else { 1283 /* 1284 * Old sorting algorithm, scan queue and insert 1285 */ 1286 struct buf *ttbp; 1287 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1288 (ttbp->b_lblkno < bp->b_lblkno)) { 1289 ++reassignbufloops; 1290 tbp = ttbp; 1291 } 1292 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1293 } 1294 } else { 1295 bp->b_xflags |= BX_VNCLEAN; 1296 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1297 if ((newvp->v_flag & VONWORKLST) && 1298 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1299 newvp->v_flag &= ~VONWORKLST; 1300 LIST_REMOVE(newvp, v_synclist); 1301 } 1302 } 1303 if (bp->b_vp != newvp) { 1304 bp->b_vp = newvp; 1305 vhold(bp->b_vp); 1306 } 1307 splx(s); 1308} 1309 1310/* 1311 * Create a vnode for a device. 1312 * Used for mounting the root file system. 1313 */ 1314int 1315bdevvp(dev, vpp) 1316 dev_t dev; 1317 struct vnode **vpp; 1318{ 1319 register struct vnode *vp; 1320 struct vnode *nvp; 1321 int error; 1322 1323 if (dev == NODEV) { 1324 *vpp = NULLVP; 1325 return (ENXIO); 1326 } 1327 if (vfinddev(dev, VCHR, vpp)) 1328 return (0); 1329 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1330 if (error) { 1331 *vpp = NULLVP; 1332 return (error); 1333 } 1334 vp = nvp; 1335 vp->v_type = VCHR; 1336 addalias(vp, dev); 1337 *vpp = vp; 1338 return (0); 1339} 1340 1341/* 1342 * Add vnode to the alias list hung off the dev_t. 1343 * 1344 * The reason for this gunk is that multiple vnodes can reference 1345 * the same physical device, so checking vp->v_usecount to see 1346 * how many users there are is inadequate; the v_usecount for 1347 * the vnodes need to be accumulated. vcount() does that. 1348 */ 1349struct vnode * 1350addaliasu(nvp, nvp_rdev) 1351 struct vnode *nvp; 1352 udev_t nvp_rdev; 1353{ 1354 struct vnode *ovp; 1355 vop_t **ops; 1356 dev_t dev; 1357 1358 if (nvp->v_type == VBLK) 1359 return (nvp); 1360 if (nvp->v_type != VCHR) 1361 panic("addaliasu on non-special vnode"); 1362 dev = udev2dev(nvp_rdev, 0); 1363 /* 1364 * Check to see if we have a bdevvp vnode with no associated 1365 * filesystem. If so, we want to associate the filesystem of 1366 * the new newly instigated vnode with the bdevvp vnode and 1367 * discard the newly created vnode rather than leaving the 1368 * bdevvp vnode lying around with no associated filesystem. 1369 */ 1370 if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) { 1371 addalias(nvp, dev); 1372 return (nvp); 1373 } 1374 /* 1375 * Discard unneeded vnode, but save its node specific data. 1376 * Note that if there is a lock, it is carried over in the 1377 * node specific data to the replacement vnode. 1378 */ 1379 vref(ovp); 1380 ovp->v_data = nvp->v_data; 1381 ovp->v_tag = nvp->v_tag; 1382 nvp->v_data = NULL; 1383 lockinit(&ovp->v_lock, PVFS, nvp->v_lock.lk_wmesg, 1384 nvp->v_lock.lk_timo, nvp->v_lock.lk_flags & LK_EXTFLG_MASK); 1385 if (nvp->v_vnlock) 1386 ovp->v_vnlock = &ovp->v_lock; 1387 ops = ovp->v_op; 1388 ovp->v_op = nvp->v_op; 1389 if (VOP_ISLOCKED(nvp, curproc)) { 1390 VOP_UNLOCK(nvp, 0, curproc); 1391 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curproc); 1392 } 1393 nvp->v_op = ops; 1394 insmntque(ovp, nvp->v_mount); 1395 vrele(nvp); 1396 vgone(nvp); 1397 return (ovp); 1398} 1399 1400/* This is a local helper function that do the same as addaliasu, but for a 1401 * dev_t instead of an udev_t. */ 1402static void 1403addalias(nvp, dev) 1404 struct vnode *nvp; 1405 dev_t dev; 1406{ 1407 1408 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode")); 1409 nvp->v_rdev = dev; 1410 mtx_lock(&spechash_mtx); 1411 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 1412 mtx_unlock(&spechash_mtx); 1413} 1414 1415/* 1416 * Grab a particular vnode from the free list, increment its 1417 * reference count and lock it. The vnode lock bit is set if the 1418 * vnode is being eliminated in vgone. The process is awakened 1419 * when the transition is completed, and an error returned to 1420 * indicate that the vnode is no longer usable (possibly having 1421 * been changed to a new file system type). 1422 */ 1423int 1424vget(vp, flags, p) 1425 register struct vnode *vp; 1426 int flags; 1427 struct proc *p; 1428{ 1429 int error; 1430 1431 /* 1432 * If the vnode is in the process of being cleaned out for 1433 * another use, we wait for the cleaning to finish and then 1434 * return failure. Cleaning is determined by checking that 1435 * the VXLOCK flag is set. 1436 */ 1437 if ((flags & LK_INTERLOCK) == 0) 1438 mtx_lock(&vp->v_interlock); 1439 if (vp->v_flag & VXLOCK) { 1440 if (vp->v_vxproc == curproc) { 1441 printf("VXLOCK interlock avoided\n"); 1442 } else { 1443 vp->v_flag |= VXWANT; 1444 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 1445 "vget", 0); 1446 return (ENOENT); 1447 } 1448 } 1449 1450 vp->v_usecount++; 1451 1452 if (VSHOULDBUSY(vp)) 1453 vbusy(vp); 1454 if (flags & LK_TYPE_MASK) { 1455 if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) { 1456 /* 1457 * must expand vrele here because we do not want 1458 * to call VOP_INACTIVE if the reference count 1459 * drops back to zero since it was never really 1460 * active. We must remove it from the free list 1461 * before sleeping so that multiple processes do 1462 * not try to recycle it. 1463 */ 1464 mtx_lock(&vp->v_interlock); 1465 vp->v_usecount--; 1466 if (VSHOULDFREE(vp)) 1467 vfree(vp); 1468 mtx_unlock(&vp->v_interlock); 1469 } 1470 return (error); 1471 } 1472 mtx_unlock(&vp->v_interlock); 1473 return (0); 1474} 1475 1476/* 1477 * Increase the reference count of a vnode. 1478 */ 1479void 1480vref(struct vnode *vp) 1481{ 1482 mtx_lock(&vp->v_interlock); 1483 vp->v_usecount++; 1484 mtx_unlock(&vp->v_interlock); 1485} 1486 1487/* 1488 * Vnode put/release. 1489 * If count drops to zero, call inactive routine and return to freelist. 1490 */ 1491void 1492vrele(vp) 1493 struct vnode *vp; 1494{ 1495 struct proc *p = curproc; /* XXX */ 1496 1497 KASSERT(vp != NULL, ("vrele: null vp")); 1498 1499 mtx_lock(&vp->v_interlock); 1500 1501 KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close")); 1502 1503 if (vp->v_usecount > 1) { 1504 1505 vp->v_usecount--; 1506 mtx_unlock(&vp->v_interlock); 1507 1508 return; 1509 } 1510 1511 if (vp->v_usecount == 1) { 1512 1513 vp->v_usecount--; 1514 if (VSHOULDFREE(vp)) 1515 vfree(vp); 1516 /* 1517 * If we are doing a vput, the node is already locked, and we must 1518 * call VOP_INACTIVE with the node locked. So, in the case of 1519 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1520 */ 1521 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { 1522 VOP_INACTIVE(vp, p); 1523 } 1524 1525 } else { 1526#ifdef DIAGNOSTIC 1527 vprint("vrele: negative ref count", vp); 1528 mtx_unlock(&vp->v_interlock); 1529#endif 1530 panic("vrele: negative ref cnt"); 1531 } 1532} 1533 1534/* 1535 * Release an already locked vnode. This give the same effects as 1536 * unlock+vrele(), but takes less time and avoids releasing and 1537 * re-aquiring the lock (as vrele() aquires the lock internally.) 1538 */ 1539void 1540vput(vp) 1541 struct vnode *vp; 1542{ 1543 struct proc *p = curproc; /* XXX */ 1544 1545 KASSERT(vp != NULL, ("vput: null vp")); 1546 mtx_lock(&vp->v_interlock); 1547 KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close")); 1548 1549 if (vp->v_usecount > 1) { 1550 1551 vp->v_usecount--; 1552 VOP_UNLOCK(vp, LK_INTERLOCK, p); 1553 return; 1554 1555 } 1556 1557 if (vp->v_usecount == 1) { 1558 1559 vp->v_usecount--; 1560 if (VSHOULDFREE(vp)) 1561 vfree(vp); 1562 /* 1563 * If we are doing a vput, the node is already locked, and we must 1564 * call VOP_INACTIVE with the node locked. So, in the case of 1565 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1566 */ 1567 mtx_unlock(&vp->v_interlock); 1568 VOP_INACTIVE(vp, p); 1569 1570 } else { 1571#ifdef DIAGNOSTIC 1572 vprint("vput: negative ref count", vp); 1573#endif 1574 panic("vput: negative ref cnt"); 1575 } 1576} 1577 1578/* 1579 * Somebody doesn't want the vnode recycled. 1580 */ 1581void 1582vhold(vp) 1583 register struct vnode *vp; 1584{ 1585 int s; 1586 1587 s = splbio(); 1588 vp->v_holdcnt++; 1589 if (VSHOULDBUSY(vp)) 1590 vbusy(vp); 1591 splx(s); 1592} 1593 1594/* 1595 * Note that there is one less who cares about this vnode. vdrop() is the 1596 * opposite of vhold(). 1597 */ 1598void 1599vdrop(vp) 1600 register struct vnode *vp; 1601{ 1602 int s; 1603 1604 s = splbio(); 1605 if (vp->v_holdcnt <= 0) 1606 panic("vdrop: holdcnt"); 1607 vp->v_holdcnt--; 1608 if (VSHOULDFREE(vp)) 1609 vfree(vp); 1610 splx(s); 1611} 1612 1613/* 1614 * Remove any vnodes in the vnode table belonging to mount point mp. 1615 * 1616 * If MNT_NOFORCE is specified, there should not be any active ones, 1617 * return error if any are found (nb: this is a user error, not a 1618 * system error). If MNT_FORCE is specified, detach any active vnodes 1619 * that are found. 1620 */ 1621#ifdef DIAGNOSTIC 1622static int busyprt = 0; /* print out busy vnodes */ 1623SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1624#endif 1625 1626int 1627vflush(mp, skipvp, flags) 1628 struct mount *mp; 1629 struct vnode *skipvp; 1630 int flags; 1631{ 1632 struct proc *p = curproc; /* XXX */ 1633 struct vnode *vp, *nvp; 1634 int busy = 0; 1635 1636 mtx_lock(&mntvnode_mtx); 1637loop: 1638 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { 1639 /* 1640 * Make sure this vnode wasn't reclaimed in getnewvnode(). 1641 * Start over if it has (it won't be on the list anymore). 1642 */ 1643 if (vp->v_mount != mp) 1644 goto loop; 1645 nvp = LIST_NEXT(vp, v_mntvnodes); 1646 /* 1647 * Skip over a selected vnode. 1648 */ 1649 if (vp == skipvp) 1650 continue; 1651 1652 mtx_lock(&vp->v_interlock); 1653 /* 1654 * Skip over a vnodes marked VSYSTEM. 1655 */ 1656 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1657 mtx_unlock(&vp->v_interlock); 1658 continue; 1659 } 1660 /* 1661 * If WRITECLOSE is set, only flush out regular file vnodes 1662 * open for writing. 1663 */ 1664 if ((flags & WRITECLOSE) && 1665 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1666 mtx_unlock(&vp->v_interlock); 1667 continue; 1668 } 1669 1670 /* 1671 * With v_usecount == 0, all we need to do is clear out the 1672 * vnode data structures and we are done. 1673 */ 1674 if (vp->v_usecount == 0) { 1675 mtx_unlock(&mntvnode_mtx); 1676 vgonel(vp, p); 1677 mtx_lock(&mntvnode_mtx); 1678 continue; 1679 } 1680 1681 /* 1682 * If FORCECLOSE is set, forcibly close the vnode. For block 1683 * or character devices, revert to an anonymous device. For 1684 * all other files, just kill them. 1685 */ 1686 if (flags & FORCECLOSE) { 1687 mtx_unlock(&mntvnode_mtx); 1688 if (vp->v_type != VCHR) { 1689 vgonel(vp, p); 1690 } else { 1691 vclean(vp, 0, p); 1692 vp->v_op = spec_vnodeop_p; 1693 insmntque(vp, (struct mount *) 0); 1694 } 1695 mtx_lock(&mntvnode_mtx); 1696 continue; 1697 } 1698#ifdef DIAGNOSTIC 1699 if (busyprt) 1700 vprint("vflush: busy vnode", vp); 1701#endif 1702 mtx_unlock(&vp->v_interlock); 1703 busy++; 1704 } 1705 mtx_unlock(&mntvnode_mtx); 1706 if (busy) 1707 return (EBUSY); 1708 return (0); 1709} 1710 1711/* 1712 * Disassociate the underlying file system from a vnode. 1713 */ 1714static void 1715vclean(vp, flags, p) 1716 struct vnode *vp; 1717 int flags; 1718 struct proc *p; 1719{ 1720 int active; 1721 1722 /* 1723 * Check to see if the vnode is in use. If so we have to reference it 1724 * before we clean it out so that its count cannot fall to zero and 1725 * generate a race against ourselves to recycle it. 1726 */ 1727 if ((active = vp->v_usecount)) 1728 vp->v_usecount++; 1729 1730 /* 1731 * Prevent the vnode from being recycled or brought into use while we 1732 * clean it out. 1733 */ 1734 if (vp->v_flag & VXLOCK) 1735 panic("vclean: deadlock"); 1736 vp->v_flag |= VXLOCK; 1737 vp->v_vxproc = curproc; 1738 /* 1739 * Even if the count is zero, the VOP_INACTIVE routine may still 1740 * have the object locked while it cleans it out. The VOP_LOCK 1741 * ensures that the VOP_INACTIVE routine is done with its work. 1742 * For active vnodes, it ensures that no other activity can 1743 * occur while the underlying object is being cleaned out. 1744 */ 1745 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); 1746 1747 /* 1748 * Clean out any buffers associated with the vnode. 1749 * If the flush fails, just toss the buffers. 1750 */ 1751 if (flags & DOCLOSE) { 1752 if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL) 1753 (void) vn_write_suspend_wait(vp, NULL, V_WAIT); 1754 if (vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0) != 0) 1755 vinvalbuf(vp, 0, NOCRED, p, 0, 0); 1756 } 1757 1758 VOP_DESTROYVOBJECT(vp); 1759 1760 /* 1761 * If purging an active vnode, it must be closed and 1762 * deactivated before being reclaimed. Note that the 1763 * VOP_INACTIVE will unlock the vnode. 1764 */ 1765 if (active) { 1766 if (flags & DOCLOSE) 1767 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 1768 VOP_INACTIVE(vp, p); 1769 } else { 1770 /* 1771 * Any other processes trying to obtain this lock must first 1772 * wait for VXLOCK to clear, then call the new lock operation. 1773 */ 1774 VOP_UNLOCK(vp, 0, p); 1775 } 1776 /* 1777 * Reclaim the vnode. 1778 */ 1779 if (VOP_RECLAIM(vp, p)) 1780 panic("vclean: cannot reclaim"); 1781 1782 if (active) { 1783 /* 1784 * Inline copy of vrele() since VOP_INACTIVE 1785 * has already been called. 1786 */ 1787 mtx_lock(&vp->v_interlock); 1788 if (--vp->v_usecount <= 0) { 1789#ifdef DIAGNOSTIC 1790 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1791 vprint("vclean: bad ref count", vp); 1792 panic("vclean: ref cnt"); 1793 } 1794#endif 1795 vfree(vp); 1796 } 1797 mtx_unlock(&vp->v_interlock); 1798 } 1799 1800 cache_purge(vp); 1801 vp->v_vnlock = NULL; 1802 lockdestroy(&vp->v_lock); 1803 1804 if (VSHOULDFREE(vp)) 1805 vfree(vp); 1806 1807 /* 1808 * Done with purge, notify sleepers of the grim news. 1809 */ 1810 vp->v_op = dead_vnodeop_p; 1811 vn_pollgone(vp); 1812 vp->v_tag = VT_NON; 1813 vp->v_flag &= ~VXLOCK; 1814 vp->v_vxproc = NULL; 1815 if (vp->v_flag & VXWANT) { 1816 vp->v_flag &= ~VXWANT; 1817 wakeup((caddr_t) vp); 1818 } 1819} 1820 1821/* 1822 * Eliminate all activity associated with the requested vnode 1823 * and with all vnodes aliased to the requested vnode. 1824 */ 1825int 1826vop_revoke(ap) 1827 struct vop_revoke_args /* { 1828 struct vnode *a_vp; 1829 int a_flags; 1830 } */ *ap; 1831{ 1832 struct vnode *vp, *vq; 1833 dev_t dev; 1834 1835 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 1836 1837 vp = ap->a_vp; 1838 /* 1839 * If a vgone (or vclean) is already in progress, 1840 * wait until it is done and return. 1841 */ 1842 if (vp->v_flag & VXLOCK) { 1843 vp->v_flag |= VXWANT; 1844 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 1845 "vop_revokeall", 0); 1846 return (0); 1847 } 1848 dev = vp->v_rdev; 1849 for (;;) { 1850 mtx_lock(&spechash_mtx); 1851 vq = SLIST_FIRST(&dev->si_hlist); 1852 mtx_unlock(&spechash_mtx); 1853 if (!vq) 1854 break; 1855 vgone(vq); 1856 } 1857 return (0); 1858} 1859 1860/* 1861 * Recycle an unused vnode to the front of the free list. 1862 * Release the passed interlock if the vnode will be recycled. 1863 */ 1864int 1865vrecycle(vp, inter_lkp, p) 1866 struct vnode *vp; 1867 struct mtx *inter_lkp; 1868 struct proc *p; 1869{ 1870 1871 mtx_lock(&vp->v_interlock); 1872 if (vp->v_usecount == 0) { 1873 if (inter_lkp) { 1874 mtx_unlock(inter_lkp); 1875 } 1876 vgonel(vp, p); 1877 return (1); 1878 } 1879 mtx_unlock(&vp->v_interlock); 1880 return (0); 1881} 1882 1883/* 1884 * Eliminate all activity associated with a vnode 1885 * in preparation for reuse. 1886 */ 1887void 1888vgone(vp) 1889 register struct vnode *vp; 1890{ 1891 struct proc *p = curproc; /* XXX */ 1892 1893 mtx_lock(&vp->v_interlock); 1894 vgonel(vp, p); 1895} 1896 1897/* 1898 * vgone, with the vp interlock held. 1899 */ 1900void 1901vgonel(vp, p) 1902 struct vnode *vp; 1903 struct proc *p; 1904{ 1905 int s; 1906 1907 /* 1908 * If a vgone (or vclean) is already in progress, 1909 * wait until it is done and return. 1910 */ 1911 if (vp->v_flag & VXLOCK) { 1912 vp->v_flag |= VXWANT; 1913 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 1914 "vgone", 0); 1915 return; 1916 } 1917 1918 /* 1919 * Clean out the filesystem specific data. 1920 */ 1921 vclean(vp, DOCLOSE, p); 1922 mtx_lock(&vp->v_interlock); 1923 1924 /* 1925 * Delete from old mount point vnode list, if on one. 1926 */ 1927 if (vp->v_mount != NULL) 1928 insmntque(vp, (struct mount *)0); 1929 /* 1930 * If special device, remove it from special device alias list 1931 * if it is on one. 1932 */ 1933 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) { 1934 mtx_lock(&spechash_mtx); 1935 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext); 1936 freedev(vp->v_rdev); 1937 mtx_unlock(&spechash_mtx); 1938 vp->v_rdev = NULL; 1939 } 1940 1941 /* 1942 * If it is on the freelist and not already at the head, 1943 * move it to the head of the list. The test of the 1944 * VDOOMED flag and the reference count of zero is because 1945 * it will be removed from the free list by getnewvnode, 1946 * but will not have its reference count incremented until 1947 * after calling vgone. If the reference count were 1948 * incremented first, vgone would (incorrectly) try to 1949 * close the previous instance of the underlying object. 1950 */ 1951 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 1952 s = splbio(); 1953 mtx_lock(&vnode_free_list_mtx); 1954 if (vp->v_flag & VFREE) 1955 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1956 else 1957 freevnodes++; 1958 vp->v_flag |= VFREE; 1959 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1960 mtx_unlock(&vnode_free_list_mtx); 1961 splx(s); 1962 } 1963 1964 vp->v_type = VBAD; 1965 mtx_unlock(&vp->v_interlock); 1966} 1967 1968/* 1969 * Lookup a vnode by device number. 1970 */ 1971int 1972vfinddev(dev, type, vpp) 1973 dev_t dev; 1974 enum vtype type; 1975 struct vnode **vpp; 1976{ 1977 struct vnode *vp; 1978 1979 mtx_lock(&spechash_mtx); 1980 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 1981 if (type == vp->v_type) { 1982 *vpp = vp; 1983 mtx_unlock(&spechash_mtx); 1984 return (1); 1985 } 1986 } 1987 mtx_unlock(&spechash_mtx); 1988 return (0); 1989} 1990 1991/* 1992 * Calculate the total number of references to a special device. 1993 */ 1994int 1995vcount(vp) 1996 struct vnode *vp; 1997{ 1998 struct vnode *vq; 1999 int count; 2000 2001 count = 0; 2002 mtx_lock(&spechash_mtx); 2003 SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext) 2004 count += vq->v_usecount; 2005 mtx_unlock(&spechash_mtx); 2006 return (count); 2007} 2008 2009/* 2010 * Same as above, but using the dev_t as argument 2011 */ 2012int 2013count_dev(dev) 2014 dev_t dev; 2015{ 2016 struct vnode *vp; 2017 2018 vp = SLIST_FIRST(&dev->si_hlist); 2019 if (vp == NULL) 2020 return (0); 2021 return(vcount(vp)); 2022} 2023 2024/* 2025 * Print out a description of a vnode. 2026 */ 2027static char *typename[] = 2028{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2029 2030void 2031vprint(label, vp) 2032 char *label; 2033 struct vnode *vp; 2034{ 2035 char buf[96]; 2036 2037 if (label != NULL) 2038 printf("%s: %p: ", label, (void *)vp); 2039 else 2040 printf("%p: ", (void *)vp); 2041 printf("type %s, usecount %d, writecount %d, refcount %d,", 2042 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 2043 vp->v_holdcnt); 2044 buf[0] = '\0'; 2045 if (vp->v_flag & VROOT) 2046 strcat(buf, "|VROOT"); 2047 if (vp->v_flag & VTEXT) 2048 strcat(buf, "|VTEXT"); 2049 if (vp->v_flag & VSYSTEM) 2050 strcat(buf, "|VSYSTEM"); 2051 if (vp->v_flag & VXLOCK) 2052 strcat(buf, "|VXLOCK"); 2053 if (vp->v_flag & VXWANT) 2054 strcat(buf, "|VXWANT"); 2055 if (vp->v_flag & VBWAIT) 2056 strcat(buf, "|VBWAIT"); 2057 if (vp->v_flag & VDOOMED) 2058 strcat(buf, "|VDOOMED"); 2059 if (vp->v_flag & VFREE) 2060 strcat(buf, "|VFREE"); 2061 if (vp->v_flag & VOBJBUF) 2062 strcat(buf, "|VOBJBUF"); 2063 if (buf[0] != '\0') 2064 printf(" flags (%s)", &buf[1]); 2065 if (vp->v_data == NULL) { 2066 printf("\n"); 2067 } else { 2068 printf("\n\t"); 2069 VOP_PRINT(vp); 2070 } 2071} 2072 2073#ifdef DDB 2074#include <ddb/ddb.h> 2075/* 2076 * List all of the locked vnodes in the system. 2077 * Called when debugging the kernel. 2078 */ 2079DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 2080{ 2081 struct proc *p = curproc; /* XXX */ 2082 struct mount *mp, *nmp; 2083 struct vnode *vp; 2084 2085 printf("Locked vnodes\n"); 2086 mtx_lock(&mountlist_mtx); 2087 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2088 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) { 2089 nmp = TAILQ_NEXT(mp, mnt_list); 2090 continue; 2091 } 2092 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 2093 if (VOP_ISLOCKED(vp, NULL)) 2094 vprint((char *)0, vp); 2095 } 2096 mtx_lock(&mountlist_mtx); 2097 nmp = TAILQ_NEXT(mp, mnt_list); 2098 vfs_unbusy(mp, p); 2099 } 2100 mtx_unlock(&mountlist_mtx); 2101} 2102#endif 2103 2104/* 2105 * Top level filesystem related information gathering. 2106 */ 2107static int sysctl_ovfs_conf __P((SYSCTL_HANDLER_ARGS)); 2108 2109static int 2110vfs_sysctl(SYSCTL_HANDLER_ARGS) 2111{ 2112 int *name = (int *)arg1 - 1; /* XXX */ 2113 u_int namelen = arg2 + 1; /* XXX */ 2114 struct vfsconf *vfsp; 2115 2116#if 1 || defined(COMPAT_PRELITE2) 2117 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2118 if (namelen == 1) 2119 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2120#endif 2121 2122 /* XXX the below code does not compile; vfs_sysctl does not exist. */ 2123#ifdef notyet 2124 /* all sysctl names at this level are at least name and field */ 2125 if (namelen < 2) 2126 return (ENOTDIR); /* overloaded */ 2127 if (name[0] != VFS_GENERIC) { 2128 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2129 if (vfsp->vfc_typenum == name[0]) 2130 break; 2131 if (vfsp == NULL) 2132 return (EOPNOTSUPP); 2133 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2134 oldp, oldlenp, newp, newlen, p)); 2135 } 2136#endif 2137 switch (name[1]) { 2138 case VFS_MAXTYPENUM: 2139 if (namelen != 2) 2140 return (ENOTDIR); 2141 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2142 case VFS_CONF: 2143 if (namelen != 3) 2144 return (ENOTDIR); /* overloaded */ 2145 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2146 if (vfsp->vfc_typenum == name[2]) 2147 break; 2148 if (vfsp == NULL) 2149 return (EOPNOTSUPP); 2150 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2151 } 2152 return (EOPNOTSUPP); 2153} 2154 2155SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2156 "Generic filesystem"); 2157 2158#if 1 || defined(COMPAT_PRELITE2) 2159 2160static int 2161sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2162{ 2163 int error; 2164 struct vfsconf *vfsp; 2165 struct ovfsconf ovfs; 2166 2167 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2168 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2169 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2170 ovfs.vfc_index = vfsp->vfc_typenum; 2171 ovfs.vfc_refcount = vfsp->vfc_refcount; 2172 ovfs.vfc_flags = vfsp->vfc_flags; 2173 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2174 if (error) 2175 return error; 2176 } 2177 return 0; 2178} 2179 2180#endif /* 1 || COMPAT_PRELITE2 */ 2181 2182#if COMPILING_LINT 2183#define KINFO_VNODESLOP 10 2184/* 2185 * Dump vnode list (via sysctl). 2186 * Copyout address of vnode followed by vnode. 2187 */ 2188/* ARGSUSED */ 2189static int 2190sysctl_vnode(SYSCTL_HANDLER_ARGS) 2191{ 2192 struct proc *p = curproc; /* XXX */ 2193 struct mount *mp, *nmp; 2194 struct vnode *nvp, *vp; 2195 int error; 2196 2197#define VPTRSZ sizeof (struct vnode *) 2198#define VNODESZ sizeof (struct vnode) 2199 2200 req->lock = 0; 2201 if (!req->oldptr) /* Make an estimate */ 2202 return (SYSCTL_OUT(req, 0, 2203 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2204 2205 mtx_lock(&mountlist_mtx); 2206 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2207 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) { 2208 nmp = TAILQ_NEXT(mp, mnt_list); 2209 continue; 2210 } 2211again: 2212 mtx_lock(&mntvnode_mtx); 2213 for (vp = LIST_FIRST(&mp->mnt_vnodelist); 2214 vp != NULL; 2215 vp = nvp) { 2216 /* 2217 * Check that the vp is still associated with 2218 * this filesystem. RACE: could have been 2219 * recycled onto the same filesystem. 2220 */ 2221 if (vp->v_mount != mp) { 2222 mtx_unlock(&mntvnode_mtx); 2223 goto again; 2224 } 2225 nvp = LIST_NEXT(vp, v_mntvnodes); 2226 mtx_unlock(&mntvnode_mtx); 2227 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2228 (error = SYSCTL_OUT(req, vp, VNODESZ))) 2229 return (error); 2230 mtx_lock(&mntvnode_mtx); 2231 } 2232 mtx_unlock(&mntvnode_mtx); 2233 mtx_lock(&mountlist_mtx); 2234 nmp = TAILQ_NEXT(mp, mnt_list); 2235 vfs_unbusy(mp, p); 2236 } 2237 mtx_unlock(&mountlist_mtx); 2238 2239 return (0); 2240} 2241 2242/* 2243 * XXX 2244 * Exporting the vnode list on large systems causes them to crash. 2245 * Exporting the vnode list on medium systems causes sysctl to coredump. 2246 */ 2247SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2248 0, 0, sysctl_vnode, "S,vnode", ""); 2249#endif 2250 2251/* 2252 * Check to see if a filesystem is mounted on a block device. 2253 */ 2254int 2255vfs_mountedon(vp) 2256 struct vnode *vp; 2257{ 2258 2259 if (vp->v_rdev->si_mountpoint != NULL) 2260 return (EBUSY); 2261 return (0); 2262} 2263 2264/* 2265 * Unmount all filesystems. The list is traversed in reverse order 2266 * of mounting to avoid dependencies. 2267 */ 2268void 2269vfs_unmountall() 2270{ 2271 struct mount *mp; 2272 struct proc *p; 2273 int error; 2274 2275 if (curproc != NULL) 2276 p = curproc; 2277 else 2278 p = initproc; /* XXX XXX should this be proc0? */ 2279 /* 2280 * Since this only runs when rebooting, it is not interlocked. 2281 */ 2282 while(!TAILQ_EMPTY(&mountlist)) { 2283 mp = TAILQ_LAST(&mountlist, mntlist); 2284 error = dounmount(mp, MNT_FORCE, p); 2285 if (error) { 2286 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2287 printf("unmount of %s failed (", 2288 mp->mnt_stat.f_mntonname); 2289 if (error == EBUSY) 2290 printf("BUSY)\n"); 2291 else 2292 printf("%d)\n", error); 2293 } else { 2294 /* The unmount has removed mp from the mountlist */ 2295 } 2296 } 2297} 2298 2299/* 2300 * Build hash lists of net addresses and hang them off the mount point. 2301 * Called by ufs_mount() to set up the lists of export addresses. 2302 */ 2303static int 2304vfs_hang_addrlist(mp, nep, argp) 2305 struct mount *mp; 2306 struct netexport *nep; 2307 struct export_args *argp; 2308{ 2309 register struct netcred *np; 2310 register struct radix_node_head *rnh; 2311 register int i; 2312 struct radix_node *rn; 2313 struct sockaddr *saddr, *smask = 0; 2314 struct domain *dom; 2315 int error; 2316 2317 if (argp->ex_addrlen == 0) { 2318 if (mp->mnt_flag & MNT_DEFEXPORTED) 2319 return (EPERM); 2320 np = &nep->ne_defexported; 2321 np->netc_exflags = argp->ex_flags; 2322 np->netc_anon = argp->ex_anon; 2323 np->netc_anon.cr_ref = 1; 2324 mp->mnt_flag |= MNT_DEFEXPORTED; 2325 return (0); 2326 } 2327 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2328 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK | M_ZERO); 2329 saddr = (struct sockaddr *) (np + 1); 2330 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 2331 goto out; 2332 if (saddr->sa_len > argp->ex_addrlen) 2333 saddr->sa_len = argp->ex_addrlen; 2334 if (argp->ex_masklen) { 2335 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 2336 error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen); 2337 if (error) 2338 goto out; 2339 if (smask->sa_len > argp->ex_masklen) 2340 smask->sa_len = argp->ex_masklen; 2341 } 2342 i = saddr->sa_family; 2343 if ((rnh = nep->ne_rtable[i]) == 0) { 2344 /* 2345 * Seems silly to initialize every AF when most are not used, 2346 * do so on demand here 2347 */ 2348 for (dom = domains; dom; dom = dom->dom_next) 2349 if (dom->dom_family == i && dom->dom_rtattach) { 2350 dom->dom_rtattach((void **) &nep->ne_rtable[i], 2351 dom->dom_rtoffset); 2352 break; 2353 } 2354 if ((rnh = nep->ne_rtable[i]) == 0) { 2355 error = ENOBUFS; 2356 goto out; 2357 } 2358 } 2359 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 2360 np->netc_rnodes); 2361 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 2362 error = EPERM; 2363 goto out; 2364 } 2365 np->netc_exflags = argp->ex_flags; 2366 np->netc_anon = argp->ex_anon; 2367 np->netc_anon.cr_ref = 1; 2368 return (0); 2369out: 2370 free(np, M_NETADDR); 2371 return (error); 2372} 2373 2374/* Helper for vfs_free_addrlist. */ 2375/* ARGSUSED */ 2376static int 2377vfs_free_netcred(rn, w) 2378 struct radix_node *rn; 2379 void *w; 2380{ 2381 register struct radix_node_head *rnh = (struct radix_node_head *) w; 2382 2383 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2384 free((caddr_t) rn, M_NETADDR); 2385 return (0); 2386} 2387 2388/* 2389 * Free the net address hash lists that are hanging off the mount points. 2390 */ 2391static void 2392vfs_free_addrlist(nep) 2393 struct netexport *nep; 2394{ 2395 register int i; 2396 register struct radix_node_head *rnh; 2397 2398 for (i = 0; i <= AF_MAX; i++) 2399 if ((rnh = nep->ne_rtable[i])) { 2400 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2401 (caddr_t) rnh); 2402 free((caddr_t) rnh, M_RTABLE); 2403 nep->ne_rtable[i] = 0; 2404 } 2405} 2406 2407/* 2408 * High level function to manipulate export options on a mount point 2409 * and the passed in netexport. 2410 * Struct export_args *argp is the variable used to twiddle options, 2411 * the structure is described in sys/mount.h 2412 */ 2413int 2414vfs_export(mp, nep, argp) 2415 struct mount *mp; 2416 struct netexport *nep; 2417 struct export_args *argp; 2418{ 2419 int error; 2420 2421 if (argp->ex_flags & MNT_DELEXPORT) { 2422 if (mp->mnt_flag & MNT_EXPUBLIC) { 2423 vfs_setpublicfs(NULL, NULL, NULL); 2424 mp->mnt_flag &= ~MNT_EXPUBLIC; 2425 } 2426 vfs_free_addrlist(nep); 2427 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2428 } 2429 if (argp->ex_flags & MNT_EXPORTED) { 2430 if (argp->ex_flags & MNT_EXPUBLIC) { 2431 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2432 return (error); 2433 mp->mnt_flag |= MNT_EXPUBLIC; 2434 } 2435 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2436 return (error); 2437 mp->mnt_flag |= MNT_EXPORTED; 2438 } 2439 return (0); 2440} 2441 2442/* 2443 * Set the publicly exported filesystem (WebNFS). Currently, only 2444 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2445 */ 2446int 2447vfs_setpublicfs(mp, nep, argp) 2448 struct mount *mp; 2449 struct netexport *nep; 2450 struct export_args *argp; 2451{ 2452 int error; 2453 struct vnode *rvp; 2454 char *cp; 2455 2456 /* 2457 * mp == NULL -> invalidate the current info, the FS is 2458 * no longer exported. May be called from either vfs_export 2459 * or unmount, so check if it hasn't already been done. 2460 */ 2461 if (mp == NULL) { 2462 if (nfs_pub.np_valid) { 2463 nfs_pub.np_valid = 0; 2464 if (nfs_pub.np_index != NULL) { 2465 FREE(nfs_pub.np_index, M_TEMP); 2466 nfs_pub.np_index = NULL; 2467 } 2468 } 2469 return (0); 2470 } 2471 2472 /* 2473 * Only one allowed at a time. 2474 */ 2475 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2476 return (EBUSY); 2477 2478 /* 2479 * Get real filehandle for root of exported FS. 2480 */ 2481 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2482 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2483 2484 if ((error = VFS_ROOT(mp, &rvp))) 2485 return (error); 2486 2487 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2488 return (error); 2489 2490 vput(rvp); 2491 2492 /* 2493 * If an indexfile was specified, pull it in. 2494 */ 2495 if (argp->ex_indexfile != NULL) { 2496 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2497 M_WAITOK); 2498 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2499 MAXNAMLEN, (size_t *)0); 2500 if (!error) { 2501 /* 2502 * Check for illegal filenames. 2503 */ 2504 for (cp = nfs_pub.np_index; *cp; cp++) { 2505 if (*cp == '/') { 2506 error = EINVAL; 2507 break; 2508 } 2509 } 2510 } 2511 if (error) { 2512 FREE(nfs_pub.np_index, M_TEMP); 2513 return (error); 2514 } 2515 } 2516 2517 nfs_pub.np_mount = mp; 2518 nfs_pub.np_valid = 1; 2519 return (0); 2520} 2521 2522/* 2523 * Used by the filesystems to determine if a given network address 2524 * (passed in 'nam') is present in thier exports list, returns a pointer 2525 * to struct netcred so that the filesystem can examine it for 2526 * access rights (read/write/etc). 2527 */ 2528struct netcred * 2529vfs_export_lookup(mp, nep, nam) 2530 register struct mount *mp; 2531 struct netexport *nep; 2532 struct sockaddr *nam; 2533{ 2534 register struct netcred *np; 2535 register struct radix_node_head *rnh; 2536 struct sockaddr *saddr; 2537 2538 np = NULL; 2539 if (mp->mnt_flag & MNT_EXPORTED) { 2540 /* 2541 * Lookup in the export list first. 2542 */ 2543 if (nam != NULL) { 2544 saddr = nam; 2545 rnh = nep->ne_rtable[saddr->sa_family]; 2546 if (rnh != NULL) { 2547 np = (struct netcred *) 2548 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2549 rnh); 2550 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2551 np = NULL; 2552 } 2553 } 2554 /* 2555 * If no address match, use the default if it exists. 2556 */ 2557 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2558 np = &nep->ne_defexported; 2559 } 2560 return (np); 2561} 2562 2563/* 2564 * perform msync on all vnodes under a mount point 2565 * the mount point must be locked. 2566 */ 2567void 2568vfs_msync(struct mount *mp, int flags) { 2569 struct vnode *vp, *nvp; 2570 struct vm_object *obj; 2571 int anyio, tries; 2572 2573 tries = 5; 2574loop: 2575 anyio = 0; 2576 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 2577 2578 nvp = LIST_NEXT(vp, v_mntvnodes); 2579 2580 if (vp->v_mount != mp) { 2581 goto loop; 2582 } 2583 2584 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2585 continue; 2586 2587 if (flags != MNT_WAIT) { 2588 if (VOP_GETVOBJECT(vp, &obj) != 0 || 2589 (obj->flags & OBJ_MIGHTBEDIRTY) == 0) 2590 continue; 2591 if (VOP_ISLOCKED(vp, NULL)) 2592 continue; 2593 } 2594 2595 mtx_lock(&vp->v_interlock); 2596 if (VOP_GETVOBJECT(vp, &obj) == 0 && 2597 (obj->flags & OBJ_MIGHTBEDIRTY)) { 2598 if (!vget(vp, 2599 LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) { 2600 if (VOP_GETVOBJECT(vp, &obj) == 0) { 2601 vm_object_page_clean(obj, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2602 anyio = 1; 2603 } 2604 vput(vp); 2605 } 2606 } else { 2607 mtx_unlock(&vp->v_interlock); 2608 } 2609 } 2610 if (anyio && (--tries > 0)) 2611 goto loop; 2612} 2613 2614/* 2615 * Create the VM object needed for VMIO and mmap support. This 2616 * is done for all VREG files in the system. Some filesystems might 2617 * afford the additional metadata buffering capability of the 2618 * VMIO code by making the device node be VMIO mode also. 2619 * 2620 * vp must be locked when vfs_object_create is called. 2621 */ 2622int 2623vfs_object_create(vp, p, cred) 2624 struct vnode *vp; 2625 struct proc *p; 2626 struct ucred *cred; 2627{ 2628 return (VOP_CREATEVOBJECT(vp, cred, p)); 2629} 2630 2631/* 2632 * Mark a vnode as free, putting it up for recycling. 2633 */ 2634void 2635vfree(vp) 2636 struct vnode *vp; 2637{ 2638 int s; 2639 2640 s = splbio(); 2641 mtx_lock(&vnode_free_list_mtx); 2642 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); 2643 if (vp->v_flag & VAGE) { 2644 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2645 } else { 2646 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2647 } 2648 freevnodes++; 2649 mtx_unlock(&vnode_free_list_mtx); 2650 vp->v_flag &= ~VAGE; 2651 vp->v_flag |= VFREE; 2652 splx(s); 2653} 2654 2655/* 2656 * Opposite of vfree() - mark a vnode as in use. 2657 */ 2658void 2659vbusy(vp) 2660 struct vnode *vp; 2661{ 2662 int s; 2663 2664 s = splbio(); 2665 mtx_lock(&vnode_free_list_mtx); 2666 KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free")); 2667 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2668 freevnodes--; 2669 mtx_unlock(&vnode_free_list_mtx); 2670 vp->v_flag &= ~(VFREE|VAGE); 2671 splx(s); 2672} 2673 2674/* 2675 * Record a process's interest in events which might happen to 2676 * a vnode. Because poll uses the historic select-style interface 2677 * internally, this routine serves as both the ``check for any 2678 * pending events'' and the ``record my interest in future events'' 2679 * functions. (These are done together, while the lock is held, 2680 * to avoid race conditions.) 2681 */ 2682int 2683vn_pollrecord(vp, p, events) 2684 struct vnode *vp; 2685 struct proc *p; 2686 short events; 2687{ 2688 mtx_lock(&vp->v_pollinfo.vpi_lock); 2689 if (vp->v_pollinfo.vpi_revents & events) { 2690 /* 2691 * This leaves events we are not interested 2692 * in available for the other process which 2693 * which presumably had requested them 2694 * (otherwise they would never have been 2695 * recorded). 2696 */ 2697 events &= vp->v_pollinfo.vpi_revents; 2698 vp->v_pollinfo.vpi_revents &= ~events; 2699 2700 mtx_unlock(&vp->v_pollinfo.vpi_lock); 2701 return events; 2702 } 2703 vp->v_pollinfo.vpi_events |= events; 2704 selrecord(p, &vp->v_pollinfo.vpi_selinfo); 2705 mtx_unlock(&vp->v_pollinfo.vpi_lock); 2706 return 0; 2707} 2708 2709/* 2710 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2711 * it is possible for us to miss an event due to race conditions, but 2712 * that condition is expected to be rare, so for the moment it is the 2713 * preferred interface. 2714 */ 2715void 2716vn_pollevent(vp, events) 2717 struct vnode *vp; 2718 short events; 2719{ 2720 mtx_lock(&vp->v_pollinfo.vpi_lock); 2721 if (vp->v_pollinfo.vpi_events & events) { 2722 /* 2723 * We clear vpi_events so that we don't 2724 * call selwakeup() twice if two events are 2725 * posted before the polling process(es) is 2726 * awakened. This also ensures that we take at 2727 * most one selwakeup() if the polling process 2728 * is no longer interested. However, it does 2729 * mean that only one event can be noticed at 2730 * a time. (Perhaps we should only clear those 2731 * event bits which we note?) XXX 2732 */ 2733 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 2734 vp->v_pollinfo.vpi_revents |= events; 2735 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2736 } 2737 mtx_unlock(&vp->v_pollinfo.vpi_lock); 2738} 2739 2740/* 2741 * Wake up anyone polling on vp because it is being revoked. 2742 * This depends on dead_poll() returning POLLHUP for correct 2743 * behavior. 2744 */ 2745void 2746vn_pollgone(vp) 2747 struct vnode *vp; 2748{ 2749 mtx_lock(&vp->v_pollinfo.vpi_lock); 2750 if (vp->v_pollinfo.vpi_events) { 2751 vp->v_pollinfo.vpi_events = 0; 2752 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2753 } 2754 mtx_unlock(&vp->v_pollinfo.vpi_lock); 2755} 2756 2757 2758 2759/* 2760 * Routine to create and manage a filesystem syncer vnode. 2761 */ 2762#define sync_close ((int (*) __P((struct vop_close_args *)))nullop) 2763static int sync_fsync __P((struct vop_fsync_args *)); 2764static int sync_inactive __P((struct vop_inactive_args *)); 2765static int sync_reclaim __P((struct vop_reclaim_args *)); 2766#define sync_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) 2767#define sync_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) 2768static int sync_print __P((struct vop_print_args *)); 2769#define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked) 2770 2771static vop_t **sync_vnodeop_p; 2772static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2773 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2774 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 2775 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 2776 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 2777 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 2778 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 2779 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 2780 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 2781 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 2782 { NULL, NULL } 2783}; 2784static struct vnodeopv_desc sync_vnodeop_opv_desc = 2785 { &sync_vnodeop_p, sync_vnodeop_entries }; 2786 2787VNODEOP_SET(sync_vnodeop_opv_desc); 2788 2789/* 2790 * Create a new filesystem syncer vnode for the specified mount point. 2791 */ 2792int 2793vfs_allocate_syncvnode(mp) 2794 struct mount *mp; 2795{ 2796 struct vnode *vp; 2797 static long start, incr, next; 2798 int error; 2799 2800 /* Allocate a new vnode */ 2801 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 2802 mp->mnt_syncer = NULL; 2803 return (error); 2804 } 2805 vp->v_type = VNON; 2806 /* 2807 * Place the vnode onto the syncer worklist. We attempt to 2808 * scatter them about on the list so that they will go off 2809 * at evenly distributed times even if all the filesystems 2810 * are mounted at once. 2811 */ 2812 next += incr; 2813 if (next == 0 || next > syncer_maxdelay) { 2814 start /= 2; 2815 incr /= 2; 2816 if (start == 0) { 2817 start = syncer_maxdelay / 2; 2818 incr = syncer_maxdelay; 2819 } 2820 next = start; 2821 } 2822 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 2823 mp->mnt_syncer = vp; 2824 return (0); 2825} 2826 2827/* 2828 * Do a lazy sync of the filesystem. 2829 */ 2830static int 2831sync_fsync(ap) 2832 struct vop_fsync_args /* { 2833 struct vnode *a_vp; 2834 struct ucred *a_cred; 2835 int a_waitfor; 2836 struct proc *a_p; 2837 } */ *ap; 2838{ 2839 struct vnode *syncvp = ap->a_vp; 2840 struct mount *mp = syncvp->v_mount; 2841 struct proc *p = ap->a_p; 2842 int asyncflag; 2843 2844 /* 2845 * We only need to do something if this is a lazy evaluation. 2846 */ 2847 if (ap->a_waitfor != MNT_LAZY) 2848 return (0); 2849 2850 /* 2851 * Move ourselves to the back of the sync list. 2852 */ 2853 vn_syncer_add_to_worklist(syncvp, syncdelay); 2854 2855 /* 2856 * Walk the list of vnodes pushing all that are dirty and 2857 * not already on the sync list. 2858 */ 2859 mtx_lock(&mountlist_mtx); 2860 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) { 2861 mtx_unlock(&mountlist_mtx); 2862 return (0); 2863 } 2864 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 2865 vfs_unbusy(mp, p); 2866 return (0); 2867 } 2868 asyncflag = mp->mnt_flag & MNT_ASYNC; 2869 mp->mnt_flag &= ~MNT_ASYNC; 2870 vfs_msync(mp, MNT_NOWAIT); 2871 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p); 2872 if (asyncflag) 2873 mp->mnt_flag |= MNT_ASYNC; 2874 vn_finished_write(mp); 2875 vfs_unbusy(mp, p); 2876 return (0); 2877} 2878 2879/* 2880 * The syncer vnode is no referenced. 2881 */ 2882static int 2883sync_inactive(ap) 2884 struct vop_inactive_args /* { 2885 struct vnode *a_vp; 2886 struct proc *a_p; 2887 } */ *ap; 2888{ 2889 2890 vgone(ap->a_vp); 2891 return (0); 2892} 2893 2894/* 2895 * The syncer vnode is no longer needed and is being decommissioned. 2896 * 2897 * Modifications to the worklist must be protected at splbio(). 2898 */ 2899static int 2900sync_reclaim(ap) 2901 struct vop_reclaim_args /* { 2902 struct vnode *a_vp; 2903 } */ *ap; 2904{ 2905 struct vnode *vp = ap->a_vp; 2906 int s; 2907 2908 s = splbio(); 2909 vp->v_mount->mnt_syncer = NULL; 2910 if (vp->v_flag & VONWORKLST) { 2911 LIST_REMOVE(vp, v_synclist); 2912 vp->v_flag &= ~VONWORKLST; 2913 } 2914 splx(s); 2915 2916 return (0); 2917} 2918 2919/* 2920 * Print out a syncer vnode. 2921 */ 2922static int 2923sync_print(ap) 2924 struct vop_print_args /* { 2925 struct vnode *a_vp; 2926 } */ *ap; 2927{ 2928 struct vnode *vp = ap->a_vp; 2929 2930 printf("syncer vnode"); 2931 if (vp->v_vnlock != NULL) 2932 lockmgr_printinfo(vp->v_vnlock); 2933 printf("\n"); 2934 return (0); 2935} 2936 2937/* 2938 * extract the dev_t from a VCHR 2939 */ 2940dev_t 2941vn_todev(vp) 2942 struct vnode *vp; 2943{ 2944 if (vp->v_type != VCHR) 2945 return (NODEV); 2946 return (vp->v_rdev); 2947} 2948 2949/* 2950 * Check if vnode represents a disk device 2951 */ 2952int 2953vn_isdisk(vp, errp) 2954 struct vnode *vp; 2955 int *errp; 2956{ 2957 struct cdevsw *cdevsw; 2958 2959 if (vp->v_type != VCHR) { 2960 if (errp != NULL) 2961 *errp = ENOTBLK; 2962 return (0); 2963 } 2964 if (vp->v_rdev == NULL) { 2965 if (errp != NULL) 2966 *errp = ENXIO; 2967 return (0); 2968 } 2969 cdevsw = devsw(vp->v_rdev); 2970 if (cdevsw == NULL) { 2971 if (errp != NULL) 2972 *errp = ENXIO; 2973 return (0); 2974 } 2975 if (!(cdevsw->d_flags & D_DISK)) { 2976 if (errp != NULL) 2977 *errp = ENOTBLK; 2978 return (0); 2979 } 2980 if (errp != NULL) 2981 *errp = 0; 2982 return (1); 2983} 2984 2985/* 2986 * Free data allocated by namei(); see namei(9) for details. 2987 */ 2988void 2989NDFREE(ndp, flags) 2990 struct nameidata *ndp; 2991 const uint flags; 2992{ 2993 if (!(flags & NDF_NO_FREE_PNBUF) && 2994 (ndp->ni_cnd.cn_flags & HASBUF)) { 2995 zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 2996 ndp->ni_cnd.cn_flags &= ~HASBUF; 2997 } 2998 if (!(flags & NDF_NO_DVP_UNLOCK) && 2999 (ndp->ni_cnd.cn_flags & LOCKPARENT) && 3000 ndp->ni_dvp != ndp->ni_vp) 3001 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_proc); 3002 if (!(flags & NDF_NO_DVP_RELE) && 3003 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { 3004 vrele(ndp->ni_dvp); 3005 ndp->ni_dvp = NULL; 3006 } 3007 if (!(flags & NDF_NO_VP_UNLOCK) && 3008 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) 3009 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_proc); 3010 if (!(flags & NDF_NO_VP_RELE) && 3011 ndp->ni_vp) { 3012 vrele(ndp->ni_vp); 3013 ndp->ni_vp = NULL; 3014 } 3015 if (!(flags & NDF_NO_STARTDIR_RELE) && 3016 (ndp->ni_cnd.cn_flags & SAVESTART)) { 3017 vrele(ndp->ni_startdir); 3018 ndp->ni_startdir = NULL; 3019 } 3020} 3021 3022/* 3023 * Common file system object access control check routine. Accepts a 3024 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3025 * and optional call-by-reference privused argument allowing vaccess() 3026 * to indicate to the caller whether privilege was used to satisfy the 3027 * request. Returns 0 on success, or an errno on failure. 3028 */ 3029int 3030vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused) 3031 enum vtype type; 3032 mode_t file_mode; 3033 uid_t file_uid; 3034 gid_t file_gid; 3035 mode_t acc_mode; 3036 struct ucred *cred; 3037 int *privused; 3038{ 3039 mode_t dac_granted; 3040#ifdef CAPABILITIES 3041 mode_t cap_granted; 3042#endif 3043 3044 /* 3045 * Look for a normal, non-privileged way to access the file/directory 3046 * as requested. If it exists, go with that. 3047 */ 3048 3049 if (privused != NULL) 3050 *privused = 0; 3051 3052 dac_granted = 0; 3053 3054 /* Check the owner. */ 3055 if (cred->cr_uid == file_uid) { 3056 dac_granted |= VADMIN; 3057 if (file_mode & S_IXUSR) 3058 dac_granted |= VEXEC; 3059 if (file_mode & S_IRUSR) 3060 dac_granted |= VREAD; 3061 if (file_mode & S_IWUSR) 3062 dac_granted |= VWRITE; 3063 3064 if ((acc_mode & dac_granted) == acc_mode) 3065 return (0); 3066 3067 goto privcheck; 3068 } 3069 3070 /* Otherwise, check the groups (first match) */ 3071 if (groupmember(file_gid, cred)) { 3072 if (file_mode & S_IXGRP) 3073 dac_granted |= VEXEC; 3074 if (file_mode & S_IRGRP) 3075 dac_granted |= VREAD; 3076 if (file_mode & S_IWGRP) 3077 dac_granted |= VWRITE; 3078 3079 if ((acc_mode & dac_granted) == acc_mode) 3080 return (0); 3081 3082 goto privcheck; 3083 } 3084 3085 /* Otherwise, check everyone else. */ 3086 if (file_mode & S_IXOTH) 3087 dac_granted |= VEXEC; 3088 if (file_mode & S_IROTH) 3089 dac_granted |= VREAD; 3090 if (file_mode & S_IWOTH) 3091 dac_granted |= VWRITE; 3092 if ((acc_mode & dac_granted) == acc_mode) 3093 return (0); 3094 3095privcheck: 3096 if (!suser_xxx(cred, NULL, PRISON_ROOT)) { 3097 /* XXX audit: privilege used */ 3098 if (privused != NULL) 3099 *privused = 1; 3100 return (0); 3101 } 3102 3103#ifdef CAPABILITIES 3104 /* 3105 * Build a capability mask to determine if the set of capabilities 3106 * satisfies the requirements when combined with the granted mask 3107 * from above. 3108 * For each capability, if the capability is required, bitwise 3109 * or the request type onto the cap_granted mask. 3110 */ 3111 cap_granted = 0; 3112 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3113 !cap_check_xxx(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT)) 3114 cap_granted |= VEXEC; 3115 3116 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) && 3117 !cap_check_xxx(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3118 cap_granted |= VREAD; 3119 3120 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3121 !cap_check_xxx(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT)) 3122 cap_granted |= VWRITE; 3123 3124 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3125 !cap_check_xxx(cred, NULL, CAP_FOWNER, PRISON_ROOT)) 3126 cap_granted |= VADMIN; 3127 3128 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) { 3129 /* XXX audit: privilege used */ 3130 if (privused != NULL) 3131 *privused = 1; 3132 return (0); 3133 } 3134#endif 3135 3136 return ((acc_mode & VADMIN) ? EPERM : EACCES); 3137} 3138