vfs_export.c revision 44150
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $Id: vfs_subr.c,v 1.186 1999/02/04 18:25:39 dillon Exp $ 40 */ 41 42/* 43 * External virtual filesystem routines 44 */ 45#include "opt_ddb.h" 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/conf.h> 50#include <sys/fcntl.h> 51#include <sys/kernel.h> 52#include <sys/proc.h> 53#include <sys/malloc.h> 54#include <sys/mount.h> 55#include <sys/socket.h> 56#include <sys/vnode.h> 57#include <sys/stat.h> 58#include <sys/buf.h> 59#include <sys/domain.h> 60#include <sys/dirent.h> 61#include <sys/vmmeter.h> 62 63#include <machine/limits.h> 64 65#include <vm/vm.h> 66#include <vm/vm_param.h> 67#include <vm/vm_prot.h> 68#include <vm/vm_object.h> 69#include <vm/vm_extern.h> 70#include <vm/pmap.h> 71#include <vm/vm_map.h> 72#include <vm/vm_page.h> 73#include <vm/vm_pager.h> 74#include <vm/vnode_pager.h> 75#include <vm/vm_zone.h> 76#include <sys/sysctl.h> 77 78#include <miscfs/specfs/specdev.h> 79 80static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 81 82static void insmntque __P((struct vnode *vp, struct mount *mp)); 83static void vclean __P((struct vnode *vp, int flags, struct proc *p)); 84static void vfree __P((struct vnode *)); 85static void vgonel __P((struct vnode *vp, struct proc *p)); 86static unsigned long numvnodes; 87SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 88 89enum vtype iftovt_tab[16] = { 90 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 91 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 92}; 93int vttoif_tab[9] = { 94 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 95 S_IFSOCK, S_IFIFO, S_IFMT, 96}; 97 98static TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 99struct tobefreelist vnode_tobefree_list; /* vnode free list */ 100 101static u_long wantfreevnodes = 25; 102SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 103static u_long freevnodes = 0; 104SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 105 106int vfs_ioopt = 0; 107#ifdef ENABLE_VFS_IOOPT 108SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 109#endif 110 111struct mntlist mountlist; /* mounted filesystem list */ 112struct simplelock mountlist_slock; 113struct simplelock mntvnode_slock; 114int nfs_mount_type = -1; 115#ifndef NULL_SIMPLELOCKS 116static struct simplelock mntid_slock; 117static struct simplelock vnode_free_list_slock; 118static struct simplelock spechash_slock; 119#endif 120struct nfs_public nfs_pub; /* publicly exported FS */ 121static vm_zone_t vnode_zone; 122 123/* 124 * The workitem queue. 125 */ 126#define SYNCER_MAXDELAY 32 127static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 128time_t syncdelay = 30; 129int rushjob; /* number of slots to run ASAP */ 130 131static int syncer_delayno = 0; 132static long syncer_mask; 133LIST_HEAD(synclist, vnode); 134static struct synclist *syncer_workitem_pending; 135 136int desiredvnodes; 137SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, &desiredvnodes, 0, ""); 138 139static void vfs_free_addrlist __P((struct netexport *nep)); 140static int vfs_free_netcred __P((struct radix_node *rn, void *w)); 141static int vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep, 142 struct export_args *argp)); 143 144/* 145 * Initialize the vnode management data structures. 146 */ 147void 148vntblinit() 149{ 150 151 desiredvnodes = maxproc + cnt.v_page_count / 4; 152 simple_lock_init(&mntvnode_slock); 153 simple_lock_init(&mntid_slock); 154 simple_lock_init(&spechash_slock); 155 TAILQ_INIT(&vnode_free_list); 156 TAILQ_INIT(&vnode_tobefree_list); 157 simple_lock_init(&vnode_free_list_slock); 158 CIRCLEQ_INIT(&mountlist); 159 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5); 160 /* 161 * Initialize the filesystem syncer. 162 */ 163 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 164 &syncer_mask); 165 syncer_maxdelay = syncer_mask + 1; 166} 167 168/* 169 * Mark a mount point as busy. Used to synchronize access and to delay 170 * unmounting. Interlock is not released on failure. 171 */ 172int 173vfs_busy(mp, flags, interlkp, p) 174 struct mount *mp; 175 int flags; 176 struct simplelock *interlkp; 177 struct proc *p; 178{ 179 int lkflags; 180 181 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 182 if (flags & LK_NOWAIT) 183 return (ENOENT); 184 mp->mnt_kern_flag |= MNTK_MWAIT; 185 if (interlkp) { 186 simple_unlock(interlkp); 187 } 188 /* 189 * Since all busy locks are shared except the exclusive 190 * lock granted when unmounting, the only place that a 191 * wakeup needs to be done is at the release of the 192 * exclusive lock at the end of dounmount. 193 */ 194 tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 195 if (interlkp) { 196 simple_lock(interlkp); 197 } 198 return (ENOENT); 199 } 200 lkflags = LK_SHARED | LK_NOPAUSE; 201 if (interlkp) 202 lkflags |= LK_INTERLOCK; 203 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p)) 204 panic("vfs_busy: unexpected lock failure"); 205 return (0); 206} 207 208/* 209 * Free a busy filesystem. 210 */ 211void 212vfs_unbusy(mp, p) 213 struct mount *mp; 214 struct proc *p; 215{ 216 217 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p); 218} 219 220/* 221 * Lookup a filesystem type, and if found allocate and initialize 222 * a mount structure for it. 223 * 224 * Devname is usually updated by mount(8) after booting. 225 */ 226int 227vfs_rootmountalloc(fstypename, devname, mpp) 228 char *fstypename; 229 char *devname; 230 struct mount **mpp; 231{ 232 struct proc *p = curproc; /* XXX */ 233 struct vfsconf *vfsp; 234 struct mount *mp; 235 236 if (fstypename == NULL) 237 return (ENODEV); 238 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 239 if (!strcmp(vfsp->vfc_name, fstypename)) 240 break; 241 if (vfsp == NULL) 242 return (ENODEV); 243 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 244 bzero((char *)mp, (u_long)sizeof(struct mount)); 245 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); 246 (void)vfs_busy(mp, LK_NOWAIT, 0, p); 247 LIST_INIT(&mp->mnt_vnodelist); 248 mp->mnt_vfc = vfsp; 249 mp->mnt_op = vfsp->vfc_vfsops; 250 mp->mnt_flag = MNT_RDONLY; 251 mp->mnt_vnodecovered = NULLVP; 252 vfsp->vfc_refcount++; 253 mp->mnt_stat.f_type = vfsp->vfc_typenum; 254 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 255 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 256 mp->mnt_stat.f_mntonname[0] = '/'; 257 mp->mnt_stat.f_mntonname[1] = 0; 258 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 259 *mpp = mp; 260 return (0); 261} 262 263/* 264 * Find an appropriate filesystem to use for the root. If a filesystem 265 * has not been preselected, walk through the list of known filesystems 266 * trying those that have mountroot routines, and try them until one 267 * works or we have tried them all. 268 */ 269#ifdef notdef /* XXX JH */ 270int 271lite2_vfs_mountroot() 272{ 273 struct vfsconf *vfsp; 274 extern int (*lite2_mountroot) __P((void)); 275 int error; 276 277 if (lite2_mountroot != NULL) 278 return ((*lite2_mountroot)()); 279 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 280 if (vfsp->vfc_mountroot == NULL) 281 continue; 282 if ((error = (*vfsp->vfc_mountroot)()) == 0) 283 return (0); 284 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 285 } 286 return (ENODEV); 287} 288#endif 289 290/* 291 * Lookup a mount point by filesystem identifier. 292 */ 293struct mount * 294vfs_getvfs(fsid) 295 fsid_t *fsid; 296{ 297 register struct mount *mp; 298 299 simple_lock(&mountlist_slock); 300 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 301 mp = mp->mnt_list.cqe_next) { 302 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 303 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 304 simple_unlock(&mountlist_slock); 305 return (mp); 306 } 307 } 308 simple_unlock(&mountlist_slock); 309 return ((struct mount *) 0); 310} 311 312/* 313 * Get a new unique fsid 314 */ 315void 316vfs_getnewfsid(mp) 317 struct mount *mp; 318{ 319 static u_short xxxfs_mntid; 320 321 fsid_t tfsid; 322 int mtype; 323 324 simple_lock(&mntid_slock); 325 mtype = mp->mnt_vfc->vfc_typenum; 326 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 327 mp->mnt_stat.f_fsid.val[1] = mtype; 328 if (xxxfs_mntid == 0) 329 ++xxxfs_mntid; 330 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 331 tfsid.val[1] = mtype; 332 if (mountlist.cqh_first != (void *)&mountlist) { 333 while (vfs_getvfs(&tfsid)) { 334 tfsid.val[0]++; 335 xxxfs_mntid++; 336 } 337 } 338 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 339 simple_unlock(&mntid_slock); 340} 341 342/* 343 * Set vnode attributes to VNOVAL 344 */ 345void 346vattr_null(vap) 347 register struct vattr *vap; 348{ 349 350 vap->va_type = VNON; 351 vap->va_size = VNOVAL; 352 vap->va_bytes = VNOVAL; 353 vap->va_mode = VNOVAL; 354 vap->va_nlink = VNOVAL; 355 vap->va_uid = VNOVAL; 356 vap->va_gid = VNOVAL; 357 vap->va_fsid = VNOVAL; 358 vap->va_fileid = VNOVAL; 359 vap->va_blocksize = VNOVAL; 360 vap->va_rdev = VNOVAL; 361 vap->va_atime.tv_sec = VNOVAL; 362 vap->va_atime.tv_nsec = VNOVAL; 363 vap->va_mtime.tv_sec = VNOVAL; 364 vap->va_mtime.tv_nsec = VNOVAL; 365 vap->va_ctime.tv_sec = VNOVAL; 366 vap->va_ctime.tv_nsec = VNOVAL; 367 vap->va_flags = VNOVAL; 368 vap->va_gen = VNOVAL; 369 vap->va_vaflags = 0; 370} 371 372/* 373 * Routines having to do with the management of the vnode table. 374 */ 375extern vop_t **dead_vnodeop_p; 376 377/* 378 * Return the next vnode from the free list. 379 */ 380int 381getnewvnode(tag, mp, vops, vpp) 382 enum vtagtype tag; 383 struct mount *mp; 384 vop_t **vops; 385 struct vnode **vpp; 386{ 387 int s; 388 struct proc *p = curproc; /* XXX */ 389 struct vnode *vp, *tvp, *nvp; 390 vm_object_t object; 391 TAILQ_HEAD(freelst, vnode) vnode_tmp_list; 392 393 /* 394 * We take the least recently used vnode from the freelist 395 * if we can get it and it has no cached pages, and no 396 * namecache entries are relative to it. 397 * Otherwise we allocate a new vnode 398 */ 399 400 s = splbio(); 401 simple_lock(&vnode_free_list_slock); 402 TAILQ_INIT(&vnode_tmp_list); 403 404 for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) { 405 nvp = TAILQ_NEXT(vp, v_freelist); 406 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 407 if (vp->v_flag & VAGE) { 408 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 409 } else { 410 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 411 } 412 vp->v_flag &= ~(VTBFREE|VAGE); 413 vp->v_flag |= VFREE; 414 if (vp->v_usecount) 415 panic("tobe free vnode isn't"); 416 freevnodes++; 417 } 418 419 if (wantfreevnodes && freevnodes < wantfreevnodes) { 420 vp = NULL; 421 } else if (!wantfreevnodes && freevnodes <= desiredvnodes) { 422 /* 423 * XXX: this is only here to be backwards compatible 424 */ 425 vp = NULL; 426 } else { 427 for (vp = TAILQ_FIRST(&vnode_free_list); vp; vp = nvp) { 428 nvp = TAILQ_NEXT(vp, v_freelist); 429 if (!simple_lock_try(&vp->v_interlock)) 430 continue; 431 if (vp->v_usecount) 432 panic("free vnode isn't"); 433 434 object = vp->v_object; 435 if (object && (object->resident_page_count || object->ref_count)) { 436 printf("object inconsistant state: RPC: %d, RC: %d\n", 437 object->resident_page_count, object->ref_count); 438 /* Don't recycle if it's caching some pages */ 439 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 440 TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist); 441 continue; 442 } else if (LIST_FIRST(&vp->v_cache_src)) { 443 /* Don't recycle if active in the namecache */ 444 simple_unlock(&vp->v_interlock); 445 continue; 446 } else { 447 break; 448 } 449 } 450 } 451 452 for (tvp = TAILQ_FIRST(&vnode_tmp_list); tvp; tvp = nvp) { 453 nvp = TAILQ_NEXT(tvp, v_freelist); 454 TAILQ_REMOVE(&vnode_tmp_list, tvp, v_freelist); 455 TAILQ_INSERT_TAIL(&vnode_free_list, tvp, v_freelist); 456 simple_unlock(&tvp->v_interlock); 457 } 458 459 if (vp) { 460 vp->v_flag |= VDOOMED; 461 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 462 freevnodes--; 463 simple_unlock(&vnode_free_list_slock); 464 cache_purge(vp); 465 vp->v_lease = NULL; 466 if (vp->v_type != VBAD) { 467 vgonel(vp, p); 468 } else { 469 simple_unlock(&vp->v_interlock); 470 } 471 472#ifdef INVARIANTS 473 { 474 int s; 475 476 if (vp->v_data) 477 panic("cleaned vnode isn't"); 478 s = splbio(); 479 if (vp->v_numoutput) 480 panic("Clean vnode has pending I/O's"); 481 splx(s); 482 } 483#endif 484 vp->v_flag = 0; 485 vp->v_lastr = 0; 486 vp->v_lastw = 0; 487 vp->v_lasta = 0; 488 vp->v_cstart = 0; 489 vp->v_clen = 0; 490 vp->v_socket = 0; 491 vp->v_writecount = 0; /* XXX */ 492 vp->v_maxio = 0; 493 } else { 494 simple_unlock(&vnode_free_list_slock); 495 vp = (struct vnode *) zalloc(vnode_zone); 496 bzero((char *) vp, sizeof *vp); 497 simple_lock_init(&vp->v_interlock); 498 vp->v_dd = vp; 499 cache_purge(vp); 500 LIST_INIT(&vp->v_cache_src); 501 TAILQ_INIT(&vp->v_cache_dst); 502 numvnodes++; 503 } 504 505 TAILQ_INIT(&vp->v_cleanblkhd); 506 TAILQ_INIT(&vp->v_dirtyblkhd); 507 vp->v_type = VNON; 508 vp->v_tag = tag; 509 vp->v_op = vops; 510 insmntque(vp, mp); 511 *vpp = vp; 512 vp->v_usecount = 1; 513 vp->v_data = 0; 514 splx(s); 515 516 vfs_object_create(vp, p, p->p_ucred); 517 return (0); 518} 519 520/* 521 * Move a vnode from one mount queue to another. 522 */ 523static void 524insmntque(vp, mp) 525 register struct vnode *vp; 526 register struct mount *mp; 527{ 528 529 simple_lock(&mntvnode_slock); 530 /* 531 * Delete from old mount point vnode list, if on one. 532 */ 533 if (vp->v_mount != NULL) 534 LIST_REMOVE(vp, v_mntvnodes); 535 /* 536 * Insert into list of vnodes for the new mount point, if available. 537 */ 538 if ((vp->v_mount = mp) == NULL) { 539 simple_unlock(&mntvnode_slock); 540 return; 541 } 542 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 543 simple_unlock(&mntvnode_slock); 544} 545 546/* 547 * Update outstanding I/O count and do wakeup if requested. 548 */ 549void 550vwakeup(bp) 551 register struct buf *bp; 552{ 553 register struct vnode *vp; 554 555 bp->b_flags &= ~B_WRITEINPROG; 556 if ((vp = bp->b_vp)) { 557 vp->v_numoutput--; 558 if (vp->v_numoutput < 0) 559 panic("vwakeup: neg numoutput"); 560 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 561 vp->v_flag &= ~VBWAIT; 562 wakeup((caddr_t) &vp->v_numoutput); 563 } 564 } 565} 566 567/* 568 * Flush out and invalidate all buffers associated with a vnode. 569 * Called with the underlying object locked. 570 */ 571int 572vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 573 register struct vnode *vp; 574 int flags; 575 struct ucred *cred; 576 struct proc *p; 577 int slpflag, slptimeo; 578{ 579 register struct buf *bp; 580 struct buf *nbp, *blist; 581 int s, error; 582 vm_object_t object; 583 584 if (flags & V_SAVE) { 585 s = splbio(); 586 while (vp->v_numoutput) { 587 vp->v_flag |= VBWAIT; 588 error = tsleep((caddr_t)&vp->v_numoutput, 589 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 590 if (error) { 591 splx(s); 592 return (error); 593 } 594 } 595 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 596 splx(s); 597 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 598 return (error); 599 s = splbio(); 600 if (vp->v_numoutput > 0 || 601 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 602 panic("vinvalbuf: dirty bufs"); 603 } 604 splx(s); 605 } 606 s = splbio(); 607 for (;;) { 608 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 609 if (!blist) 610 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 611 if (!blist) 612 break; 613 614 for (bp = blist; bp; bp = nbp) { 615 nbp = TAILQ_NEXT(bp, b_vnbufs); 616 if (bp->b_flags & B_BUSY) { 617 bp->b_flags |= B_WANTED; 618 error = tsleep((caddr_t) bp, 619 slpflag | (PRIBIO + 4), "vinvalbuf", 620 slptimeo); 621 if (error) { 622 splx(s); 623 return (error); 624 } 625 break; 626 } 627 /* 628 * XXX Since there are no node locks for NFS, I 629 * believe there is a slight chance that a delayed 630 * write will occur while sleeping just above, so 631 * check for it. Note that vfs_bio_awrite expects 632 * buffers to reside on a queue, while VOP_BWRITE and 633 * brelse do not. 634 */ 635 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 636 (flags & V_SAVE)) { 637 638 if (bp->b_vp == vp) { 639 if (bp->b_flags & B_CLUSTEROK) { 640 vfs_bio_awrite(bp); 641 } else { 642 bremfree(bp); 643 bp->b_flags |= (B_BUSY | B_ASYNC); 644 VOP_BWRITE(bp); 645 } 646 } else { 647 bremfree(bp); 648 bp->b_flags |= B_BUSY; 649 (void) VOP_BWRITE(bp); 650 } 651 break; 652 } 653 bremfree(bp); 654 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF | B_BUSY); 655 bp->b_flags &= ~B_ASYNC; 656 brelse(bp); 657 } 658 } 659 660 while (vp->v_numoutput > 0) { 661 vp->v_flag |= VBWAIT; 662 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 663 } 664 665 splx(s); 666 667 /* 668 * Destroy the copy in the VM cache, too. 669 */ 670 simple_lock(&vp->v_interlock); 671 object = vp->v_object; 672 if (object != NULL) { 673 vm_object_page_remove(object, 0, 0, 674 (flags & V_SAVE) ? TRUE : FALSE); 675 } 676 simple_unlock(&vp->v_interlock); 677 678 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 679 panic("vinvalbuf: flush failed"); 680 return (0); 681} 682 683/* 684 * Truncate a file's buffer and pages to a specified length. This 685 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 686 * sync activity. 687 */ 688int 689vtruncbuf(vp, cred, p, length, blksize) 690 register struct vnode *vp; 691 struct ucred *cred; 692 struct proc *p; 693 off_t length; 694 int blksize; 695{ 696 register struct buf *bp; 697 struct buf *nbp; 698 int s, anyfreed; 699 int trunclbn; 700 701 /* 702 * Round up to the *next* lbn. 703 */ 704 trunclbn = (length + blksize - 1) / blksize; 705 706 s = splbio(); 707restart: 708 anyfreed = 1; 709 for (;anyfreed;) { 710 anyfreed = 0; 711 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 712 nbp = TAILQ_NEXT(bp, b_vnbufs); 713 if (bp->b_lblkno >= trunclbn) { 714 if (bp->b_flags & B_BUSY) { 715 bp->b_flags |= B_WANTED; 716 tsleep(bp, PRIBIO + 4, "vtrb1", 0); 717 goto restart; 718 } else { 719 bremfree(bp); 720 bp->b_flags |= (B_BUSY | B_INVAL | B_RELBUF); 721 bp->b_flags &= ~B_ASYNC; 722 brelse(bp); 723 anyfreed = 1; 724 } 725 if (nbp && (((nbp->b_xflags & B_VNCLEAN) == 0)|| 726 (nbp->b_vp != vp) || 727 (nbp->b_flags & B_DELWRI))) { 728 goto restart; 729 } 730 } 731 } 732 733 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 734 nbp = TAILQ_NEXT(bp, b_vnbufs); 735 if (bp->b_lblkno >= trunclbn) { 736 if (bp->b_flags & B_BUSY) { 737 bp->b_flags |= B_WANTED; 738 tsleep(bp, PRIBIO + 4, "vtrb2", 0); 739 goto restart; 740 } else { 741 bremfree(bp); 742 bp->b_flags |= (B_BUSY | B_INVAL | B_RELBUF); 743 bp->b_flags &= ~B_ASYNC; 744 brelse(bp); 745 anyfreed = 1; 746 } 747 if (nbp && (((nbp->b_xflags & B_VNDIRTY) == 0)|| 748 (nbp->b_vp != vp) || 749 (nbp->b_flags & B_DELWRI) == 0)) { 750 goto restart; 751 } 752 } 753 } 754 } 755 756 if (length > 0) { 757restartsync: 758 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 759 nbp = TAILQ_NEXT(bp, b_vnbufs); 760 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 761 if (bp->b_flags & B_BUSY) { 762 bp->b_flags |= B_WANTED; 763 tsleep(bp, PRIBIO, "vtrb3", 0); 764 } else { 765 bremfree(bp); 766 bp->b_flags |= B_BUSY; 767 if (bp->b_vp == vp) { 768 bp->b_flags |= B_ASYNC; 769 } else { 770 bp->b_flags &= ~B_ASYNC; 771 } 772 VOP_BWRITE(bp); 773 } 774 goto restartsync; 775 } 776 777 } 778 } 779 780 while (vp->v_numoutput > 0) { 781 vp->v_flag |= VBWAIT; 782 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 783 } 784 785 splx(s); 786 787 vnode_pager_setsize(vp, length); 788 789 return (0); 790} 791 792/* 793 * Associate a buffer with a vnode. 794 */ 795void 796bgetvp(vp, bp) 797 register struct vnode *vp; 798 register struct buf *bp; 799{ 800 int s; 801 802 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 803 804 vhold(vp); 805 bp->b_vp = vp; 806 if (vp->v_type == VBLK || vp->v_type == VCHR) 807 bp->b_dev = vp->v_rdev; 808 else 809 bp->b_dev = NODEV; 810 /* 811 * Insert onto list for new vnode. 812 */ 813 s = splbio(); 814 bp->b_xflags |= B_VNCLEAN; 815 bp->b_xflags &= ~B_VNDIRTY; 816 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 817 splx(s); 818} 819 820/* 821 * Disassociate a buffer from a vnode. 822 */ 823void 824brelvp(bp) 825 register struct buf *bp; 826{ 827 struct vnode *vp; 828 struct buflists *listheadp; 829 int s; 830 831 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 832 833 /* 834 * Delete from old vnode list, if on one. 835 */ 836 vp = bp->b_vp; 837 s = splbio(); 838 if (bp->b_xflags & (B_VNDIRTY|B_VNCLEAN)) { 839 if (bp->b_xflags & B_VNDIRTY) 840 listheadp = &vp->v_dirtyblkhd; 841 else 842 listheadp = &vp->v_cleanblkhd; 843 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 844 bp->b_xflags &= ~(B_VNDIRTY|B_VNCLEAN); 845 } 846 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 847 vp->v_flag &= ~VONWORKLST; 848 LIST_REMOVE(vp, v_synclist); 849 } 850 splx(s); 851 bp->b_vp = (struct vnode *) 0; 852 vdrop(vp); 853} 854 855/* 856 * The workitem queue. 857 * 858 * It is useful to delay writes of file data and filesystem metadata 859 * for tens of seconds so that quickly created and deleted files need 860 * not waste disk bandwidth being created and removed. To realize this, 861 * we append vnodes to a "workitem" queue. When running with a soft 862 * updates implementation, most pending metadata dependencies should 863 * not wait for more than a few seconds. Thus, mounted on block devices 864 * are delayed only about a half the time that file data is delayed. 865 * Similarly, directory updates are more critical, so are only delayed 866 * about a third the time that file data is delayed. Thus, there are 867 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 868 * one each second (driven off the filesystem syner process). The 869 * syncer_delayno variable indicates the next queue that is to be processed. 870 * Items that need to be processed soon are placed in this queue: 871 * 872 * syncer_workitem_pending[syncer_delayno] 873 * 874 * A delay of fifteen seconds is done by placing the request fifteen 875 * entries later in the queue: 876 * 877 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 878 * 879 */ 880 881/* 882 * Add an item to the syncer work queue. 883 */ 884static void 885vn_syncer_add_to_worklist(struct vnode *vp, int delay) 886{ 887 int s, slot; 888 889 s = splbio(); 890 891 if (vp->v_flag & VONWORKLST) { 892 LIST_REMOVE(vp, v_synclist); 893 } 894 895 if (delay > syncer_maxdelay - 2) 896 delay = syncer_maxdelay - 2; 897 slot = (syncer_delayno + delay) & syncer_mask; 898 899 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 900 vp->v_flag |= VONWORKLST; 901 splx(s); 902} 903 904static void sched_sync __P((void)); 905static struct proc *updateproc; 906static const struct kproc_desc up_kp = { 907 "syncer", 908 sched_sync, 909 &updateproc 910}; 911SYSINIT_KT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 912 913/* 914 * System filesystem synchronizer daemon. 915 */ 916void 917sched_sync(void) 918{ 919 struct synclist *slp; 920 struct vnode *vp; 921 long starttime; 922 int s; 923 struct proc *p = updateproc; 924 925 for (;;) { 926 starttime = time_second; 927 928 /* 929 * Push files whose dirty time has expired. Be careful 930 * of interrupt race on slp queue. 931 */ 932 s = splbio(); 933 slp = &syncer_workitem_pending[syncer_delayno]; 934 syncer_delayno += 1; 935 if (syncer_delayno == syncer_maxdelay) 936 syncer_delayno = 0; 937 splx(s); 938 939 while ((vp = LIST_FIRST(slp)) != NULL) { 940 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 941 (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p); 942 VOP_UNLOCK(vp, 0, p); 943 s = splbio(); 944 if (LIST_FIRST(slp) == vp) { 945 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 946 vp->v_type != VBLK) 947 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 948 /* 949 * Put us back on the worklist. The worklist 950 * routine will remove us from our current 951 * position and then add us back in at a later 952 * position. 953 */ 954 vn_syncer_add_to_worklist(vp, syncdelay); 955 } 956 splx(s); 957 } 958 959 /* 960 * Do soft update processing. 961 */ 962 if (bioops.io_sync) 963 (*bioops.io_sync)(NULL); 964 965 /* 966 * The variable rushjob allows the kernel to speed up the 967 * processing of the filesystem syncer process. A rushjob 968 * value of N tells the filesystem syncer to process the next 969 * N seconds worth of work on its queue ASAP. Currently rushjob 970 * is used by the soft update code to speed up the filesystem 971 * syncer process when the incore state is getting so far 972 * ahead of the disk that the kernel memory pool is being 973 * threatened with exhaustion. 974 */ 975 if (rushjob > 0) { 976 rushjob -= 1; 977 continue; 978 } 979 /* 980 * If it has taken us less than a second to process the 981 * current work, then wait. Otherwise start right over 982 * again. We can still lose time if any single round 983 * takes more than two seconds, but it does not really 984 * matter as we are just trying to generally pace the 985 * filesystem activity. 986 */ 987 if (time_second == starttime) 988 tsleep(&lbolt, PPAUSE, "syncer", 0); 989 } 990} 991 992/* 993 * Associate a p-buffer with a vnode. 994 * 995 * Also sets B_PAGING flag to indicate that vnode is not fully associated 996 * with the buffer. i.e. the bp has not been linked into the vnode or 997 * ref-counted. 998 */ 999void 1000pbgetvp(vp, bp) 1001 register struct vnode *vp; 1002 register struct buf *bp; 1003{ 1004 1005 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1006 1007 bp->b_vp = vp; 1008 bp->b_flags |= B_PAGING; 1009 if (vp->v_type == VBLK || vp->v_type == VCHR) 1010 bp->b_dev = vp->v_rdev; 1011 else 1012 bp->b_dev = NODEV; 1013} 1014 1015/* 1016 * Disassociate a p-buffer from a vnode. 1017 */ 1018void 1019pbrelvp(bp) 1020 register struct buf *bp; 1021{ 1022 1023 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1024 1025#if !defined(MAX_PERF) 1026 /* XXX REMOVE ME */ 1027 if (bp->b_vnbufs.tqe_next != NULL) { 1028 panic( 1029 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1030 bp, 1031 (int)bp->b_flags 1032 ); 1033 } 1034#endif 1035 bp->b_vp = (struct vnode *) 0; 1036 bp->b_flags &= ~B_PAGING; 1037} 1038 1039void 1040pbreassignbuf(bp, newvp) 1041 struct buf *bp; 1042 struct vnode *newvp; 1043{ 1044#if !defined(MAX_PERF) 1045 if ((bp->b_flags & B_PAGING) == 0) { 1046 panic( 1047 "pbreassignbuf() on non phys bp %p", 1048 bp 1049 ); 1050 } 1051#endif 1052 bp->b_vp = newvp; 1053} 1054 1055/* 1056 * Reassign a buffer from one vnode to another. 1057 * Used to assign file specific control information 1058 * (indirect blocks) to the vnode to which they belong. 1059 */ 1060void 1061reassignbuf(bp, newvp) 1062 register struct buf *bp; 1063 register struct vnode *newvp; 1064{ 1065 struct buflists *listheadp; 1066 struct vnode *oldvp; 1067 int delay; 1068 int s; 1069 1070 if (newvp == NULL) { 1071 printf("reassignbuf: NULL"); 1072 return; 1073 } 1074 1075#if !defined(MAX_PERF) 1076 /* 1077 * B_PAGING flagged buffers cannot be reassigned because their vp 1078 * is not fully linked in. 1079 */ 1080 if (bp->b_flags & B_PAGING) 1081 panic("cannot reassign paging buffer"); 1082#endif 1083 1084 s = splbio(); 1085 /* 1086 * Delete from old vnode list, if on one. 1087 */ 1088 if (bp->b_xflags & (B_VNDIRTY|B_VNCLEAN)) { 1089 oldvp = bp->b_vp; 1090 if (bp->b_xflags & B_VNDIRTY) 1091 listheadp = &oldvp->v_dirtyblkhd; 1092 else 1093 listheadp = &oldvp->v_cleanblkhd; 1094 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1095 bp->b_xflags &= ~(B_VNDIRTY|B_VNCLEAN); 1096 vdrop(oldvp); 1097 } 1098 /* 1099 * If dirty, put on list of dirty buffers; otherwise insert onto list 1100 * of clean buffers. 1101 */ 1102 if (bp->b_flags & B_DELWRI) { 1103 struct buf *tbp; 1104 1105 listheadp = &newvp->v_dirtyblkhd; 1106 if ((newvp->v_flag & VONWORKLST) == 0) { 1107 switch (newvp->v_type) { 1108 case VDIR: 1109 delay = syncdelay / 3; 1110 break; 1111 case VBLK: 1112 if (newvp->v_specmountpoint != NULL) { 1113 delay = syncdelay / 2; 1114 break; 1115 } 1116 /* fall through */ 1117 default: 1118 delay = syncdelay; 1119 } 1120 vn_syncer_add_to_worklist(newvp, delay); 1121 } 1122 bp->b_xflags |= B_VNDIRTY; 1123 tbp = TAILQ_FIRST(listheadp); 1124 if (tbp == NULL || 1125 (bp->b_lblkno >= 0 && tbp->b_lblkno > bp->b_lblkno)) { 1126 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1127 } else { 1128 if (bp->b_lblkno >= 0) { 1129 struct buf *ttbp; 1130 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1131 (ttbp->b_lblkno < bp->b_lblkno)) { 1132 tbp = ttbp; 1133 } 1134 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1135 } else { 1136 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1137 } 1138 } 1139 } else { 1140 bp->b_xflags |= B_VNCLEAN; 1141 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1142 if ((newvp->v_flag & VONWORKLST) && 1143 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1144 newvp->v_flag &= ~VONWORKLST; 1145 LIST_REMOVE(newvp, v_synclist); 1146 } 1147 } 1148 bp->b_vp = newvp; 1149 vhold(bp->b_vp); 1150 splx(s); 1151} 1152 1153/* 1154 * Create a vnode for a block device. 1155 * Used for mounting the root file system. 1156 */ 1157int 1158bdevvp(dev, vpp) 1159 dev_t dev; 1160 struct vnode **vpp; 1161{ 1162 register struct vnode *vp; 1163 struct vnode *nvp; 1164 int error; 1165 1166 /* XXX 255 is for mfs. */ 1167 if (dev == NODEV || (major(dev) != 255 && (major(dev) >= nblkdev || 1168 bdevsw[major(dev)] == NULL))) { 1169 *vpp = NULLVP; 1170 return (ENXIO); 1171 } 1172 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1173 if (error) { 1174 *vpp = NULLVP; 1175 return (error); 1176 } 1177 vp = nvp; 1178 vp->v_type = VBLK; 1179 if ((nvp = checkalias(vp, dev, (struct mount *)0)) != NULL) { 1180 vput(vp); 1181 vp = nvp; 1182 } 1183 *vpp = vp; 1184 return (0); 1185} 1186 1187/* 1188 * Check to see if the new vnode represents a special device 1189 * for which we already have a vnode (either because of 1190 * bdevvp() or because of a different vnode representing 1191 * the same block device). If such an alias exists, deallocate 1192 * the existing contents and return the aliased vnode. The 1193 * caller is responsible for filling it with its new contents. 1194 */ 1195struct vnode * 1196checkalias(nvp, nvp_rdev, mp) 1197 register struct vnode *nvp; 1198 dev_t nvp_rdev; 1199 struct mount *mp; 1200{ 1201 struct proc *p = curproc; /* XXX */ 1202 struct vnode *vp; 1203 struct vnode **vpp; 1204 1205 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1206 return (NULLVP); 1207 1208 vpp = &speclisth[SPECHASH(nvp_rdev)]; 1209loop: 1210 simple_lock(&spechash_slock); 1211 for (vp = *vpp; vp; vp = vp->v_specnext) { 1212 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 1213 continue; 1214 /* 1215 * Alias, but not in use, so flush it out. 1216 * Only alias active device nodes. 1217 * Not sure why we don't re-use this like we do below. 1218 */ 1219 simple_lock(&vp->v_interlock); 1220 if (vp->v_usecount == 0) { 1221 simple_unlock(&spechash_slock); 1222 vgonel(vp, p); 1223 goto loop; 1224 } 1225 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { 1226 /* 1227 * It dissappeared, and we may have slept. 1228 * Restart from the beginning 1229 */ 1230 simple_unlock(&spechash_slock); 1231 goto loop; 1232 } 1233 break; 1234 } 1235 /* 1236 * It would be a lot clearer what is going on here if 1237 * this had been expressed as: 1238 * if ( vp && (vp->v_tag == VT_NULL)) 1239 * and the clauses had been swapped. 1240 */ 1241 if (vp == NULL || vp->v_tag != VT_NON) { 1242 /* 1243 * Put the new vnode into the hash chain. 1244 * and if there was an alias, connect them. 1245 */ 1246 MALLOC(nvp->v_specinfo, struct specinfo *, 1247 sizeof(struct specinfo), M_VNODE, M_WAITOK); 1248 nvp->v_rdev = nvp_rdev; 1249 nvp->v_hashchain = vpp; 1250 nvp->v_specnext = *vpp; 1251 nvp->v_specmountpoint = NULL; 1252 simple_unlock(&spechash_slock); 1253 *vpp = nvp; 1254 if (vp != NULLVP) { 1255 nvp->v_flag |= VALIASED; 1256 vp->v_flag |= VALIASED; 1257 vput(vp); 1258 } 1259 return (NULLVP); 1260 } 1261 /* 1262 * if ( vp && (vp->v_tag == VT_NULL)) 1263 * We have a vnode alias, but it is a trashed. 1264 * Make it look like it's newley allocated. (by getnewvnode()) 1265 * The caller should use this instead. 1266 */ 1267 simple_unlock(&spechash_slock); 1268 VOP_UNLOCK(vp, 0, p); 1269 simple_lock(&vp->v_interlock); 1270 vclean(vp, 0, p); 1271 vp->v_op = nvp->v_op; 1272 vp->v_tag = nvp->v_tag; 1273 nvp->v_type = VNON; 1274 insmntque(vp, mp); 1275 return (vp); 1276} 1277 1278/* 1279 * Grab a particular vnode from the free list, increment its 1280 * reference count and lock it. The vnode lock bit is set the 1281 * vnode is being eliminated in vgone. The process is awakened 1282 * when the transition is completed, and an error returned to 1283 * indicate that the vnode is no longer usable (possibly having 1284 * been changed to a new file system type). 1285 */ 1286int 1287vget(vp, flags, p) 1288 register struct vnode *vp; 1289 int flags; 1290 struct proc *p; 1291{ 1292 int error; 1293 1294 /* 1295 * If the vnode is in the process of being cleaned out for 1296 * another use, we wait for the cleaning to finish and then 1297 * return failure. Cleaning is determined by checking that 1298 * the VXLOCK flag is set. 1299 */ 1300 if ((flags & LK_INTERLOCK) == 0) { 1301 simple_lock(&vp->v_interlock); 1302 } 1303 if (vp->v_flag & VXLOCK) { 1304 vp->v_flag |= VXWANT; 1305 simple_unlock(&vp->v_interlock); 1306 tsleep((caddr_t)vp, PINOD, "vget", 0); 1307 return (ENOENT); 1308 } 1309 1310 vp->v_usecount++; 1311 1312 if (VSHOULDBUSY(vp)) 1313 vbusy(vp); 1314 if (flags & LK_TYPE_MASK) { 1315 if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) { 1316 /* 1317 * must expand vrele here because we do not want 1318 * to call VOP_INACTIVE if the reference count 1319 * drops back to zero since it was never really 1320 * active. We must remove it from the free list 1321 * before sleeping so that multiple processes do 1322 * not try to recycle it. 1323 */ 1324 simple_lock(&vp->v_interlock); 1325 vp->v_usecount--; 1326 if (VSHOULDFREE(vp)) 1327 vfree(vp); 1328 simple_unlock(&vp->v_interlock); 1329 } 1330 return (error); 1331 } 1332 simple_unlock(&vp->v_interlock); 1333 return (0); 1334} 1335 1336void 1337vref(struct vnode *vp) 1338{ 1339 simple_lock(&vp->v_interlock); 1340 vp->v_usecount++; 1341 simple_unlock(&vp->v_interlock); 1342} 1343 1344/* 1345 * Vnode put/release. 1346 * If count drops to zero, call inactive routine and return to freelist. 1347 */ 1348void 1349vrele(vp) 1350 struct vnode *vp; 1351{ 1352 struct proc *p = curproc; /* XXX */ 1353 1354 KASSERT(vp != NULL, ("vrele: null vp")); 1355 1356 simple_lock(&vp->v_interlock); 1357 1358 if (vp->v_usecount > 1) { 1359 1360 vp->v_usecount--; 1361 simple_unlock(&vp->v_interlock); 1362 1363 return; 1364 } 1365 1366 if (vp->v_usecount == 1) { 1367 1368 vp->v_usecount--; 1369 if (VSHOULDFREE(vp)) 1370 vfree(vp); 1371 /* 1372 * If we are doing a vput, the node is already locked, and we must 1373 * call VOP_INACTIVE with the node locked. So, in the case of 1374 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1375 */ 1376 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { 1377 VOP_INACTIVE(vp, p); 1378 } 1379 1380 } else { 1381#ifdef DIAGNOSTIC 1382 vprint("vrele: negative ref count", vp); 1383 simple_unlock(&vp->v_interlock); 1384#endif 1385 panic("vrele: negative ref cnt"); 1386 } 1387} 1388 1389void 1390vput(vp) 1391 struct vnode *vp; 1392{ 1393 struct proc *p = curproc; /* XXX */ 1394 1395 KASSERT(vp != NULL, ("vput: null vp")); 1396 1397 simple_lock(&vp->v_interlock); 1398 1399 if (vp->v_usecount > 1) { 1400 1401 vp->v_usecount--; 1402 VOP_UNLOCK(vp, LK_INTERLOCK, p); 1403 return; 1404 1405 } 1406 1407 if (vp->v_usecount == 1) { 1408 1409 vp->v_usecount--; 1410 if (VSHOULDFREE(vp)) 1411 vfree(vp); 1412 /* 1413 * If we are doing a vput, the node is already locked, and we must 1414 * call VOP_INACTIVE with the node locked. So, in the case of 1415 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1416 */ 1417 simple_unlock(&vp->v_interlock); 1418 VOP_INACTIVE(vp, p); 1419 1420 } else { 1421#ifdef DIAGNOSTIC 1422 vprint("vput: negative ref count", vp); 1423#endif 1424 panic("vput: negative ref cnt"); 1425 } 1426} 1427 1428/* 1429 * Somebody doesn't want the vnode recycled. 1430 */ 1431void 1432vhold(vp) 1433 register struct vnode *vp; 1434{ 1435 int s; 1436 1437 s = splbio(); 1438 vp->v_holdcnt++; 1439 if (VSHOULDBUSY(vp)) 1440 vbusy(vp); 1441 splx(s); 1442} 1443 1444/* 1445 * One less who cares about this vnode. 1446 */ 1447void 1448vdrop(vp) 1449 register struct vnode *vp; 1450{ 1451 int s; 1452 1453 s = splbio(); 1454 if (vp->v_holdcnt <= 0) 1455 panic("vdrop: holdcnt"); 1456 vp->v_holdcnt--; 1457 if (VSHOULDFREE(vp)) 1458 vfree(vp); 1459 splx(s); 1460} 1461 1462/* 1463 * Remove any vnodes in the vnode table belonging to mount point mp. 1464 * 1465 * If MNT_NOFORCE is specified, there should not be any active ones, 1466 * return error if any are found (nb: this is a user error, not a 1467 * system error). If MNT_FORCE is specified, detach any active vnodes 1468 * that are found. 1469 */ 1470#ifdef DIAGNOSTIC 1471static int busyprt = 0; /* print out busy vnodes */ 1472SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1473#endif 1474 1475int 1476vflush(mp, skipvp, flags) 1477 struct mount *mp; 1478 struct vnode *skipvp; 1479 int flags; 1480{ 1481 struct proc *p = curproc; /* XXX */ 1482 struct vnode *vp, *nvp; 1483 int busy = 0; 1484 1485 simple_lock(&mntvnode_slock); 1486loop: 1487 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 1488 /* 1489 * Make sure this vnode wasn't reclaimed in getnewvnode(). 1490 * Start over if it has (it won't be on the list anymore). 1491 */ 1492 if (vp->v_mount != mp) 1493 goto loop; 1494 nvp = vp->v_mntvnodes.le_next; 1495 /* 1496 * Skip over a selected vnode. 1497 */ 1498 if (vp == skipvp) 1499 continue; 1500 1501 simple_lock(&vp->v_interlock); 1502 /* 1503 * Skip over a vnodes marked VSYSTEM. 1504 */ 1505 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1506 simple_unlock(&vp->v_interlock); 1507 continue; 1508 } 1509 /* 1510 * If WRITECLOSE is set, only flush out regular file vnodes 1511 * open for writing. 1512 */ 1513 if ((flags & WRITECLOSE) && 1514 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1515 simple_unlock(&vp->v_interlock); 1516 continue; 1517 } 1518 1519 /* 1520 * With v_usecount == 0, all we need to do is clear out the 1521 * vnode data structures and we are done. 1522 */ 1523 if (vp->v_usecount == 0) { 1524 simple_unlock(&mntvnode_slock); 1525 vgonel(vp, p); 1526 simple_lock(&mntvnode_slock); 1527 continue; 1528 } 1529 1530 /* 1531 * If FORCECLOSE is set, forcibly close the vnode. For block 1532 * or character devices, revert to an anonymous device. For 1533 * all other files, just kill them. 1534 */ 1535 if (flags & FORCECLOSE) { 1536 simple_unlock(&mntvnode_slock); 1537 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1538 vgonel(vp, p); 1539 } else { 1540 vclean(vp, 0, p); 1541 vp->v_op = spec_vnodeop_p; 1542 insmntque(vp, (struct mount *) 0); 1543 } 1544 simple_lock(&mntvnode_slock); 1545 continue; 1546 } 1547#ifdef DIAGNOSTIC 1548 if (busyprt) 1549 vprint("vflush: busy vnode", vp); 1550#endif 1551 simple_unlock(&vp->v_interlock); 1552 busy++; 1553 } 1554 simple_unlock(&mntvnode_slock); 1555 if (busy) 1556 return (EBUSY); 1557 return (0); 1558} 1559 1560/* 1561 * Disassociate the underlying file system from a vnode. 1562 */ 1563static void 1564vclean(vp, flags, p) 1565 struct vnode *vp; 1566 int flags; 1567 struct proc *p; 1568{ 1569 int active; 1570 vm_object_t obj; 1571 1572 /* 1573 * Check to see if the vnode is in use. If so we have to reference it 1574 * before we clean it out so that its count cannot fall to zero and 1575 * generate a race against ourselves to recycle it. 1576 */ 1577 if ((active = vp->v_usecount)) 1578 vp->v_usecount++; 1579 1580 /* 1581 * Prevent the vnode from being recycled or brought into use while we 1582 * clean it out. 1583 */ 1584 if (vp->v_flag & VXLOCK) 1585 panic("vclean: deadlock"); 1586 vp->v_flag |= VXLOCK; 1587 /* 1588 * Even if the count is zero, the VOP_INACTIVE routine may still 1589 * have the object locked while it cleans it out. The VOP_LOCK 1590 * ensures that the VOP_INACTIVE routine is done with its work. 1591 * For active vnodes, it ensures that no other activity can 1592 * occur while the underlying object is being cleaned out. 1593 */ 1594 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); 1595 1596 /* 1597 * Clean out any buffers associated with the vnode. 1598 */ 1599 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 1600 if ((obj = vp->v_object) != NULL) { 1601 if (obj->ref_count == 0) { 1602 /* 1603 * This is a normal way of shutting down the object/vnode 1604 * association. 1605 */ 1606 vm_object_terminate(obj); 1607 } else { 1608 /* 1609 * Woe to the process that tries to page now :-). 1610 */ 1611 vm_pager_deallocate(obj); 1612 } 1613 } 1614 1615 /* 1616 * If purging an active vnode, it must be closed and 1617 * deactivated before being reclaimed. Note that the 1618 * VOP_INACTIVE will unlock the vnode. 1619 */ 1620 if (active) { 1621 if (flags & DOCLOSE) 1622 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 1623 VOP_INACTIVE(vp, p); 1624 } else { 1625 /* 1626 * Any other processes trying to obtain this lock must first 1627 * wait for VXLOCK to clear, then call the new lock operation. 1628 */ 1629 VOP_UNLOCK(vp, 0, p); 1630 } 1631 /* 1632 * Reclaim the vnode. 1633 */ 1634 if (VOP_RECLAIM(vp, p)) 1635 panic("vclean: cannot reclaim"); 1636 1637 if (active) 1638 vrele(vp); 1639 1640 cache_purge(vp); 1641 if (vp->v_vnlock) { 1642#if 0 /* This is the only place we have LK_DRAINED in the entire kernel ??? */ 1643#ifdef DIAGNOSTIC 1644 if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0) 1645 vprint("vclean: lock not drained", vp); 1646#endif 1647#endif 1648 FREE(vp->v_vnlock, M_VNODE); 1649 vp->v_vnlock = NULL; 1650 } 1651 1652 if (VSHOULDFREE(vp)) 1653 vfree(vp); 1654 1655 /* 1656 * Done with purge, notify sleepers of the grim news. 1657 */ 1658 vp->v_op = dead_vnodeop_p; 1659 vn_pollgone(vp); 1660 vp->v_tag = VT_NON; 1661 vp->v_flag &= ~VXLOCK; 1662 if (vp->v_flag & VXWANT) { 1663 vp->v_flag &= ~VXWANT; 1664 wakeup((caddr_t) vp); 1665 } 1666} 1667 1668/* 1669 * Eliminate all activity associated with the requested vnode 1670 * and with all vnodes aliased to the requested vnode. 1671 */ 1672int 1673vop_revoke(ap) 1674 struct vop_revoke_args /* { 1675 struct vnode *a_vp; 1676 int a_flags; 1677 } */ *ap; 1678{ 1679 struct vnode *vp, *vq; 1680 struct proc *p = curproc; /* XXX */ 1681 1682 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 1683 1684 vp = ap->a_vp; 1685 simple_lock(&vp->v_interlock); 1686 1687 if (vp->v_flag & VALIASED) { 1688 /* 1689 * If a vgone (or vclean) is already in progress, 1690 * wait until it is done and return. 1691 */ 1692 if (vp->v_flag & VXLOCK) { 1693 vp->v_flag |= VXWANT; 1694 simple_unlock(&vp->v_interlock); 1695 tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0); 1696 return (0); 1697 } 1698 /* 1699 * Ensure that vp will not be vgone'd while we 1700 * are eliminating its aliases. 1701 */ 1702 vp->v_flag |= VXLOCK; 1703 simple_unlock(&vp->v_interlock); 1704 while (vp->v_flag & VALIASED) { 1705 simple_lock(&spechash_slock); 1706 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1707 if (vq->v_rdev != vp->v_rdev || 1708 vq->v_type != vp->v_type || vp == vq) 1709 continue; 1710 simple_unlock(&spechash_slock); 1711 vgone(vq); 1712 break; 1713 } 1714 if (vq == NULLVP) { 1715 simple_unlock(&spechash_slock); 1716 } 1717 } 1718 /* 1719 * Remove the lock so that vgone below will 1720 * really eliminate the vnode after which time 1721 * vgone will awaken any sleepers. 1722 */ 1723 simple_lock(&vp->v_interlock); 1724 vp->v_flag &= ~VXLOCK; 1725 if (vp->v_flag & VXWANT) { 1726 vp->v_flag &= ~VXWANT; 1727 wakeup(vp); 1728 } 1729 } 1730 vgonel(vp, p); 1731 return (0); 1732} 1733 1734/* 1735 * Recycle an unused vnode to the front of the free list. 1736 * Release the passed interlock if the vnode will be recycled. 1737 */ 1738int 1739vrecycle(vp, inter_lkp, p) 1740 struct vnode *vp; 1741 struct simplelock *inter_lkp; 1742 struct proc *p; 1743{ 1744 1745 simple_lock(&vp->v_interlock); 1746 if (vp->v_usecount == 0) { 1747 if (inter_lkp) { 1748 simple_unlock(inter_lkp); 1749 } 1750 vgonel(vp, p); 1751 return (1); 1752 } 1753 simple_unlock(&vp->v_interlock); 1754 return (0); 1755} 1756 1757/* 1758 * Eliminate all activity associated with a vnode 1759 * in preparation for reuse. 1760 */ 1761void 1762vgone(vp) 1763 register struct vnode *vp; 1764{ 1765 struct proc *p = curproc; /* XXX */ 1766 1767 simple_lock(&vp->v_interlock); 1768 vgonel(vp, p); 1769} 1770 1771/* 1772 * vgone, with the vp interlock held. 1773 */ 1774static void 1775vgonel(vp, p) 1776 struct vnode *vp; 1777 struct proc *p; 1778{ 1779 int s; 1780 struct vnode *vq; 1781 struct vnode *vx; 1782 1783 /* 1784 * If a vgone (or vclean) is already in progress, 1785 * wait until it is done and return. 1786 */ 1787 if (vp->v_flag & VXLOCK) { 1788 vp->v_flag |= VXWANT; 1789 simple_unlock(&vp->v_interlock); 1790 tsleep((caddr_t)vp, PINOD, "vgone", 0); 1791 return; 1792 } 1793 1794 /* 1795 * Clean out the filesystem specific data. 1796 */ 1797 vclean(vp, DOCLOSE, p); 1798 simple_lock(&vp->v_interlock); 1799 1800 /* 1801 * Delete from old mount point vnode list, if on one. 1802 */ 1803 if (vp->v_mount != NULL) 1804 insmntque(vp, (struct mount *)0); 1805 /* 1806 * If special device, remove it from special device alias list 1807 * if it is on one. 1808 */ 1809 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1810 simple_lock(&spechash_slock); 1811 if (*vp->v_hashchain == vp) { 1812 *vp->v_hashchain = vp->v_specnext; 1813 } else { 1814 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1815 if (vq->v_specnext != vp) 1816 continue; 1817 vq->v_specnext = vp->v_specnext; 1818 break; 1819 } 1820 if (vq == NULL) 1821 panic("missing bdev"); 1822 } 1823 if (vp->v_flag & VALIASED) { 1824 vx = NULL; 1825 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1826 if (vq->v_rdev != vp->v_rdev || 1827 vq->v_type != vp->v_type) 1828 continue; 1829 if (vx) 1830 break; 1831 vx = vq; 1832 } 1833 if (vx == NULL) 1834 panic("missing alias"); 1835 if (vq == NULL) 1836 vx->v_flag &= ~VALIASED; 1837 vp->v_flag &= ~VALIASED; 1838 } 1839 simple_unlock(&spechash_slock); 1840 FREE(vp->v_specinfo, M_VNODE); 1841 vp->v_specinfo = NULL; 1842 } 1843 1844 /* 1845 * If it is on the freelist and not already at the head, 1846 * move it to the head of the list. The test of the back 1847 * pointer and the reference count of zero is because 1848 * it will be removed from the free list by getnewvnode, 1849 * but will not have its reference count incremented until 1850 * after calling vgone. If the reference count were 1851 * incremented first, vgone would (incorrectly) try to 1852 * close the previous instance of the underlying object. 1853 */ 1854 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 1855 s = splbio(); 1856 simple_lock(&vnode_free_list_slock); 1857 if (vp->v_flag & VFREE) { 1858 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1859 } else if (vp->v_flag & VTBFREE) { 1860 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 1861 vp->v_flag &= ~VTBFREE; 1862 freevnodes++; 1863 } else 1864 freevnodes++; 1865 vp->v_flag |= VFREE; 1866 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1867 simple_unlock(&vnode_free_list_slock); 1868 splx(s); 1869 } 1870 1871 vp->v_type = VBAD; 1872 simple_unlock(&vp->v_interlock); 1873} 1874 1875/* 1876 * Lookup a vnode by device number. 1877 */ 1878int 1879vfinddev(dev, type, vpp) 1880 dev_t dev; 1881 enum vtype type; 1882 struct vnode **vpp; 1883{ 1884 register struct vnode *vp; 1885 int rc = 0; 1886 1887 simple_lock(&spechash_slock); 1888 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1889 if (dev != vp->v_rdev || type != vp->v_type) 1890 continue; 1891 *vpp = vp; 1892 rc = 1; 1893 break; 1894 } 1895 simple_unlock(&spechash_slock); 1896 return (rc); 1897} 1898 1899/* 1900 * Calculate the total number of references to a special device. 1901 */ 1902int 1903vcount(vp) 1904 register struct vnode *vp; 1905{ 1906 struct vnode *vq, *vnext; 1907 int count; 1908 1909loop: 1910 if ((vp->v_flag & VALIASED) == 0) 1911 return (vp->v_usecount); 1912 simple_lock(&spechash_slock); 1913 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1914 vnext = vq->v_specnext; 1915 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1916 continue; 1917 /* 1918 * Alias, but not in use, so flush it out. 1919 */ 1920 if (vq->v_usecount == 0 && vq != vp) { 1921 simple_unlock(&spechash_slock); 1922 vgone(vq); 1923 goto loop; 1924 } 1925 count += vq->v_usecount; 1926 } 1927 simple_unlock(&spechash_slock); 1928 return (count); 1929} 1930/* 1931 * Print out a description of a vnode. 1932 */ 1933static char *typename[] = 1934{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1935 1936void 1937vprint(label, vp) 1938 char *label; 1939 register struct vnode *vp; 1940{ 1941 char buf[96]; 1942 1943 if (label != NULL) 1944 printf("%s: %p: ", label, (void *)vp); 1945 else 1946 printf("%p: ", (void *)vp); 1947 printf("type %s, usecount %d, writecount %d, refcount %d,", 1948 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1949 vp->v_holdcnt); 1950 buf[0] = '\0'; 1951 if (vp->v_flag & VROOT) 1952 strcat(buf, "|VROOT"); 1953 if (vp->v_flag & VTEXT) 1954 strcat(buf, "|VTEXT"); 1955 if (vp->v_flag & VSYSTEM) 1956 strcat(buf, "|VSYSTEM"); 1957 if (vp->v_flag & VXLOCK) 1958 strcat(buf, "|VXLOCK"); 1959 if (vp->v_flag & VXWANT) 1960 strcat(buf, "|VXWANT"); 1961 if (vp->v_flag & VBWAIT) 1962 strcat(buf, "|VBWAIT"); 1963 if (vp->v_flag & VALIASED) 1964 strcat(buf, "|VALIASED"); 1965 if (vp->v_flag & VDOOMED) 1966 strcat(buf, "|VDOOMED"); 1967 if (vp->v_flag & VFREE) 1968 strcat(buf, "|VFREE"); 1969 if (vp->v_flag & VOBJBUF) 1970 strcat(buf, "|VOBJBUF"); 1971 if (buf[0] != '\0') 1972 printf(" flags (%s)", &buf[1]); 1973 if (vp->v_data == NULL) { 1974 printf("\n"); 1975 } else { 1976 printf("\n\t"); 1977 VOP_PRINT(vp); 1978 } 1979} 1980 1981#ifdef DDB 1982#include <ddb/ddb.h> 1983/* 1984 * List all of the locked vnodes in the system. 1985 * Called when debugging the kernel. 1986 */ 1987DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1988{ 1989 struct proc *p = curproc; /* XXX */ 1990 struct mount *mp, *nmp; 1991 struct vnode *vp; 1992 1993 printf("Locked vnodes\n"); 1994 simple_lock(&mountlist_slock); 1995 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1996 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { 1997 nmp = mp->mnt_list.cqe_next; 1998 continue; 1999 } 2000 for (vp = mp->mnt_vnodelist.lh_first; 2001 vp != NULL; 2002 vp = vp->v_mntvnodes.le_next) { 2003 if (VOP_ISLOCKED(vp)) 2004 vprint((char *)0, vp); 2005 } 2006 simple_lock(&mountlist_slock); 2007 nmp = mp->mnt_list.cqe_next; 2008 vfs_unbusy(mp, p); 2009 } 2010 simple_unlock(&mountlist_slock); 2011} 2012#endif 2013 2014/* 2015 * Top level filesystem related information gathering. 2016 */ 2017static int sysctl_ovfs_conf __P(SYSCTL_HANDLER_ARGS); 2018 2019static int 2020vfs_sysctl SYSCTL_HANDLER_ARGS 2021{ 2022 int *name = (int *)arg1 - 1; /* XXX */ 2023 u_int namelen = arg2 + 1; /* XXX */ 2024 struct vfsconf *vfsp; 2025 2026#if 1 || defined(COMPAT_PRELITE2) 2027 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2028 if (namelen == 1) 2029 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2030#endif 2031 2032#ifdef notyet 2033 /* all sysctl names at this level are at least name and field */ 2034 if (namelen < 2) 2035 return (ENOTDIR); /* overloaded */ 2036 if (name[0] != VFS_GENERIC) { 2037 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2038 if (vfsp->vfc_typenum == name[0]) 2039 break; 2040 if (vfsp == NULL) 2041 return (EOPNOTSUPP); 2042 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2043 oldp, oldlenp, newp, newlen, p)); 2044 } 2045#endif 2046 switch (name[1]) { 2047 case VFS_MAXTYPENUM: 2048 if (namelen != 2) 2049 return (ENOTDIR); 2050 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2051 case VFS_CONF: 2052 if (namelen != 3) 2053 return (ENOTDIR); /* overloaded */ 2054 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2055 if (vfsp->vfc_typenum == name[2]) 2056 break; 2057 if (vfsp == NULL) 2058 return (EOPNOTSUPP); 2059 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2060 } 2061 return (EOPNOTSUPP); 2062} 2063 2064SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2065 "Generic filesystem"); 2066 2067#if 1 || defined(COMPAT_PRELITE2) 2068 2069static int 2070sysctl_ovfs_conf SYSCTL_HANDLER_ARGS 2071{ 2072 int error; 2073 struct vfsconf *vfsp; 2074 struct ovfsconf ovfs; 2075 2076 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2077 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2078 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2079 ovfs.vfc_index = vfsp->vfc_typenum; 2080 ovfs.vfc_refcount = vfsp->vfc_refcount; 2081 ovfs.vfc_flags = vfsp->vfc_flags; 2082 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2083 if (error) 2084 return error; 2085 } 2086 return 0; 2087} 2088 2089#endif /* 1 || COMPAT_PRELITE2 */ 2090 2091#if 0 2092#define KINFO_VNODESLOP 10 2093/* 2094 * Dump vnode list (via sysctl). 2095 * Copyout address of vnode followed by vnode. 2096 */ 2097/* ARGSUSED */ 2098static int 2099sysctl_vnode SYSCTL_HANDLER_ARGS 2100{ 2101 struct proc *p = curproc; /* XXX */ 2102 struct mount *mp, *nmp; 2103 struct vnode *nvp, *vp; 2104 int error; 2105 2106#define VPTRSZ sizeof (struct vnode *) 2107#define VNODESZ sizeof (struct vnode) 2108 2109 req->lock = 0; 2110 if (!req->oldptr) /* Make an estimate */ 2111 return (SYSCTL_OUT(req, 0, 2112 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2113 2114 simple_lock(&mountlist_slock); 2115 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 2116 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { 2117 nmp = mp->mnt_list.cqe_next; 2118 continue; 2119 } 2120again: 2121 simple_lock(&mntvnode_slock); 2122 for (vp = mp->mnt_vnodelist.lh_first; 2123 vp != NULL; 2124 vp = nvp) { 2125 /* 2126 * Check that the vp is still associated with 2127 * this filesystem. RACE: could have been 2128 * recycled onto the same filesystem. 2129 */ 2130 if (vp->v_mount != mp) { 2131 simple_unlock(&mntvnode_slock); 2132 goto again; 2133 } 2134 nvp = vp->v_mntvnodes.le_next; 2135 simple_unlock(&mntvnode_slock); 2136 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2137 (error = SYSCTL_OUT(req, vp, VNODESZ))) 2138 return (error); 2139 simple_lock(&mntvnode_slock); 2140 } 2141 simple_unlock(&mntvnode_slock); 2142 simple_lock(&mountlist_slock); 2143 nmp = mp->mnt_list.cqe_next; 2144 vfs_unbusy(mp, p); 2145 } 2146 simple_unlock(&mountlist_slock); 2147 2148 return (0); 2149} 2150#endif 2151 2152/* 2153 * XXX 2154 * Exporting the vnode list on large systems causes them to crash. 2155 * Exporting the vnode list on medium systems causes sysctl to coredump. 2156 */ 2157#if 0 2158SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2159 0, 0, sysctl_vnode, "S,vnode", ""); 2160#endif 2161 2162/* 2163 * Check to see if a filesystem is mounted on a block device. 2164 */ 2165int 2166vfs_mountedon(vp) 2167 struct vnode *vp; 2168{ 2169 struct vnode *vq; 2170 int error = 0; 2171 2172 if (vp->v_specmountpoint != NULL) 2173 return (EBUSY); 2174 if (vp->v_flag & VALIASED) { 2175 simple_lock(&spechash_slock); 2176 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 2177 if (vq->v_rdev != vp->v_rdev || 2178 vq->v_type != vp->v_type) 2179 continue; 2180 if (vq->v_specmountpoint != NULL) { 2181 error = EBUSY; 2182 break; 2183 } 2184 } 2185 simple_unlock(&spechash_slock); 2186 } 2187 return (error); 2188} 2189 2190/* 2191 * Unmount all filesystems. The list is traversed in reverse order 2192 * of mounting to avoid dependencies. 2193 */ 2194void 2195vfs_unmountall() 2196{ 2197 struct mount *mp, *nmp; 2198 struct proc *p; 2199 int error; 2200 2201 if (curproc != NULL) 2202 p = curproc; 2203 else 2204 p = initproc; /* XXX XXX should this be proc0? */ 2205 /* 2206 * Since this only runs when rebooting, it is not interlocked. 2207 */ 2208 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 2209 nmp = mp->mnt_list.cqe_prev; 2210 error = dounmount(mp, MNT_FORCE, p); 2211 if (error) { 2212 printf("unmount of %s failed (", 2213 mp->mnt_stat.f_mntonname); 2214 if (error == EBUSY) 2215 printf("BUSY)\n"); 2216 else 2217 printf("%d)\n", error); 2218 } 2219 } 2220} 2221 2222/* 2223 * Build hash lists of net addresses and hang them off the mount point. 2224 * Called by ufs_mount() to set up the lists of export addresses. 2225 */ 2226static int 2227vfs_hang_addrlist(mp, nep, argp) 2228 struct mount *mp; 2229 struct netexport *nep; 2230 struct export_args *argp; 2231{ 2232 register struct netcred *np; 2233 register struct radix_node_head *rnh; 2234 register int i; 2235 struct radix_node *rn; 2236 struct sockaddr *saddr, *smask = 0; 2237 struct domain *dom; 2238 int error; 2239 2240 if (argp->ex_addrlen == 0) { 2241 if (mp->mnt_flag & MNT_DEFEXPORTED) 2242 return (EPERM); 2243 np = &nep->ne_defexported; 2244 np->netc_exflags = argp->ex_flags; 2245 np->netc_anon = argp->ex_anon; 2246 np->netc_anon.cr_ref = 1; 2247 mp->mnt_flag |= MNT_DEFEXPORTED; 2248 return (0); 2249 } 2250 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2251 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 2252 bzero((caddr_t) np, i); 2253 saddr = (struct sockaddr *) (np + 1); 2254 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 2255 goto out; 2256 if (saddr->sa_len > argp->ex_addrlen) 2257 saddr->sa_len = argp->ex_addrlen; 2258 if (argp->ex_masklen) { 2259 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 2260 error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen); 2261 if (error) 2262 goto out; 2263 if (smask->sa_len > argp->ex_masklen) 2264 smask->sa_len = argp->ex_masklen; 2265 } 2266 i = saddr->sa_family; 2267 if ((rnh = nep->ne_rtable[i]) == 0) { 2268 /* 2269 * Seems silly to initialize every AF when most are not used, 2270 * do so on demand here 2271 */ 2272 for (dom = domains; dom; dom = dom->dom_next) 2273 if (dom->dom_family == i && dom->dom_rtattach) { 2274 dom->dom_rtattach((void **) &nep->ne_rtable[i], 2275 dom->dom_rtoffset); 2276 break; 2277 } 2278 if ((rnh = nep->ne_rtable[i]) == 0) { 2279 error = ENOBUFS; 2280 goto out; 2281 } 2282 } 2283 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 2284 np->netc_rnodes); 2285 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 2286 error = EPERM; 2287 goto out; 2288 } 2289 np->netc_exflags = argp->ex_flags; 2290 np->netc_anon = argp->ex_anon; 2291 np->netc_anon.cr_ref = 1; 2292 return (0); 2293out: 2294 free(np, M_NETADDR); 2295 return (error); 2296} 2297 2298/* ARGSUSED */ 2299static int 2300vfs_free_netcred(rn, w) 2301 struct radix_node *rn; 2302 void *w; 2303{ 2304 register struct radix_node_head *rnh = (struct radix_node_head *) w; 2305 2306 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2307 free((caddr_t) rn, M_NETADDR); 2308 return (0); 2309} 2310 2311/* 2312 * Free the net address hash lists that are hanging off the mount points. 2313 */ 2314static void 2315vfs_free_addrlist(nep) 2316 struct netexport *nep; 2317{ 2318 register int i; 2319 register struct radix_node_head *rnh; 2320 2321 for (i = 0; i <= AF_MAX; i++) 2322 if ((rnh = nep->ne_rtable[i])) { 2323 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2324 (caddr_t) rnh); 2325 free((caddr_t) rnh, M_RTABLE); 2326 nep->ne_rtable[i] = 0; 2327 } 2328} 2329 2330int 2331vfs_export(mp, nep, argp) 2332 struct mount *mp; 2333 struct netexport *nep; 2334 struct export_args *argp; 2335{ 2336 int error; 2337 2338 if (argp->ex_flags & MNT_DELEXPORT) { 2339 if (mp->mnt_flag & MNT_EXPUBLIC) { 2340 vfs_setpublicfs(NULL, NULL, NULL); 2341 mp->mnt_flag &= ~MNT_EXPUBLIC; 2342 } 2343 vfs_free_addrlist(nep); 2344 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2345 } 2346 if (argp->ex_flags & MNT_EXPORTED) { 2347 if (argp->ex_flags & MNT_EXPUBLIC) { 2348 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2349 return (error); 2350 mp->mnt_flag |= MNT_EXPUBLIC; 2351 } 2352 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2353 return (error); 2354 mp->mnt_flag |= MNT_EXPORTED; 2355 } 2356 return (0); 2357} 2358 2359 2360/* 2361 * Set the publicly exported filesystem (WebNFS). Currently, only 2362 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2363 */ 2364int 2365vfs_setpublicfs(mp, nep, argp) 2366 struct mount *mp; 2367 struct netexport *nep; 2368 struct export_args *argp; 2369{ 2370 int error; 2371 struct vnode *rvp; 2372 char *cp; 2373 2374 /* 2375 * mp == NULL -> invalidate the current info, the FS is 2376 * no longer exported. May be called from either vfs_export 2377 * or unmount, so check if it hasn't already been done. 2378 */ 2379 if (mp == NULL) { 2380 if (nfs_pub.np_valid) { 2381 nfs_pub.np_valid = 0; 2382 if (nfs_pub.np_index != NULL) { 2383 FREE(nfs_pub.np_index, M_TEMP); 2384 nfs_pub.np_index = NULL; 2385 } 2386 } 2387 return (0); 2388 } 2389 2390 /* 2391 * Only one allowed at a time. 2392 */ 2393 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2394 return (EBUSY); 2395 2396 /* 2397 * Get real filehandle for root of exported FS. 2398 */ 2399 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2400 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2401 2402 if ((error = VFS_ROOT(mp, &rvp))) 2403 return (error); 2404 2405 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2406 return (error); 2407 2408 vput(rvp); 2409 2410 /* 2411 * If an indexfile was specified, pull it in. 2412 */ 2413 if (argp->ex_indexfile != NULL) { 2414 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2415 M_WAITOK); 2416 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2417 MAXNAMLEN, (size_t *)0); 2418 if (!error) { 2419 /* 2420 * Check for illegal filenames. 2421 */ 2422 for (cp = nfs_pub.np_index; *cp; cp++) { 2423 if (*cp == '/') { 2424 error = EINVAL; 2425 break; 2426 } 2427 } 2428 } 2429 if (error) { 2430 FREE(nfs_pub.np_index, M_TEMP); 2431 return (error); 2432 } 2433 } 2434 2435 nfs_pub.np_mount = mp; 2436 nfs_pub.np_valid = 1; 2437 return (0); 2438} 2439 2440struct netcred * 2441vfs_export_lookup(mp, nep, nam) 2442 register struct mount *mp; 2443 struct netexport *nep; 2444 struct sockaddr *nam; 2445{ 2446 register struct netcred *np; 2447 register struct radix_node_head *rnh; 2448 struct sockaddr *saddr; 2449 2450 np = NULL; 2451 if (mp->mnt_flag & MNT_EXPORTED) { 2452 /* 2453 * Lookup in the export list first. 2454 */ 2455 if (nam != NULL) { 2456 saddr = nam; 2457 rnh = nep->ne_rtable[saddr->sa_family]; 2458 if (rnh != NULL) { 2459 np = (struct netcred *) 2460 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2461 rnh); 2462 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2463 np = NULL; 2464 } 2465 } 2466 /* 2467 * If no address match, use the default if it exists. 2468 */ 2469 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2470 np = &nep->ne_defexported; 2471 } 2472 return (np); 2473} 2474 2475/* 2476 * perform msync on all vnodes under a mount point 2477 * the mount point must be locked. 2478 */ 2479void 2480vfs_msync(struct mount *mp, int flags) { 2481 struct vnode *vp, *nvp; 2482 struct vm_object *obj; 2483 int anyio, tries; 2484 2485 tries = 5; 2486loop: 2487 anyio = 0; 2488 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 2489 2490 nvp = vp->v_mntvnodes.le_next; 2491 2492 if (vp->v_mount != mp) { 2493 goto loop; 2494 } 2495 2496 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2497 continue; 2498 2499 if (flags != MNT_WAIT) { 2500 obj = vp->v_object; 2501 if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0) 2502 continue; 2503 if (VOP_ISLOCKED(vp)) 2504 continue; 2505 } 2506 2507 simple_lock(&vp->v_interlock); 2508 if (vp->v_object && 2509 (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) { 2510 if (!vget(vp, 2511 LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) { 2512 if (vp->v_object) { 2513 vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : 0); 2514 anyio = 1; 2515 } 2516 vput(vp); 2517 } 2518 } else { 2519 simple_unlock(&vp->v_interlock); 2520 } 2521 } 2522 if (anyio && (--tries > 0)) 2523 goto loop; 2524} 2525 2526/* 2527 * Create the VM object needed for VMIO and mmap support. This 2528 * is done for all VREG files in the system. Some filesystems might 2529 * afford the additional metadata buffering capability of the 2530 * VMIO code by making the device node be VMIO mode also. 2531 * 2532 * vp must be locked when vfs_object_create is called. 2533 */ 2534int 2535vfs_object_create(vp, p, cred) 2536 struct vnode *vp; 2537 struct proc *p; 2538 struct ucred *cred; 2539{ 2540 struct vattr vat; 2541 vm_object_t object; 2542 int error = 0; 2543 2544 if ((vp->v_type != VREG) && (vp->v_type != VBLK)) 2545 return 0; 2546 2547retry: 2548 if ((object = vp->v_object) == NULL) { 2549 if (vp->v_type == VREG) { 2550 if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0) 2551 goto retn; 2552 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 2553 } else if (major(vp->v_rdev) < nblkdev && 2554 bdevsw[major(vp->v_rdev)] != NULL) { 2555 /* 2556 * This simply allocates the biggest object possible 2557 * for a VBLK vnode. This should be fixed, but doesn't 2558 * cause any problems (yet). 2559 */ 2560 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 2561 } else { 2562 goto retn; 2563 } 2564 /* 2565 * Dereference the reference we just created. This assumes 2566 * that the object is associated with the vp. 2567 */ 2568 object->ref_count--; 2569 vp->v_usecount--; 2570 } else { 2571 if (object->flags & OBJ_DEAD) { 2572 VOP_UNLOCK(vp, 0, p); 2573 tsleep(object, PVM, "vodead", 0); 2574 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 2575 goto retry; 2576 } 2577 } 2578 2579 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 2580 vp->v_flag |= VOBJBUF; 2581 2582retn: 2583 return error; 2584} 2585 2586static void 2587vfree(vp) 2588 struct vnode *vp; 2589{ 2590 int s; 2591 2592 s = splbio(); 2593 simple_lock(&vnode_free_list_slock); 2594 if (vp->v_flag & VTBFREE) { 2595 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 2596 vp->v_flag &= ~VTBFREE; 2597 } 2598 if (vp->v_flag & VAGE) { 2599 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2600 } else { 2601 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2602 } 2603 freevnodes++; 2604 simple_unlock(&vnode_free_list_slock); 2605 vp->v_flag &= ~VAGE; 2606 vp->v_flag |= VFREE; 2607 splx(s); 2608} 2609 2610void 2611vbusy(vp) 2612 struct vnode *vp; 2613{ 2614 int s; 2615 2616 s = splbio(); 2617 simple_lock(&vnode_free_list_slock); 2618 if (vp->v_flag & VTBFREE) { 2619 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 2620 vp->v_flag &= ~VTBFREE; 2621 } else { 2622 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2623 freevnodes--; 2624 } 2625 simple_unlock(&vnode_free_list_slock); 2626 vp->v_flag &= ~(VFREE|VAGE); 2627 splx(s); 2628} 2629 2630/* 2631 * Record a process's interest in events which might happen to 2632 * a vnode. Because poll uses the historic select-style interface 2633 * internally, this routine serves as both the ``check for any 2634 * pending events'' and the ``record my interest in future events'' 2635 * functions. (These are done together, while the lock is held, 2636 * to avoid race conditions.) 2637 */ 2638int 2639vn_pollrecord(vp, p, events) 2640 struct vnode *vp; 2641 struct proc *p; 2642 short events; 2643{ 2644 simple_lock(&vp->v_pollinfo.vpi_lock); 2645 if (vp->v_pollinfo.vpi_revents & events) { 2646 /* 2647 * This leaves events we are not interested 2648 * in available for the other process which 2649 * which presumably had requested them 2650 * (otherwise they would never have been 2651 * recorded). 2652 */ 2653 events &= vp->v_pollinfo.vpi_revents; 2654 vp->v_pollinfo.vpi_revents &= ~events; 2655 2656 simple_unlock(&vp->v_pollinfo.vpi_lock); 2657 return events; 2658 } 2659 vp->v_pollinfo.vpi_events |= events; 2660 selrecord(p, &vp->v_pollinfo.vpi_selinfo); 2661 simple_unlock(&vp->v_pollinfo.vpi_lock); 2662 return 0; 2663} 2664 2665/* 2666 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2667 * it is possible for us to miss an event due to race conditions, but 2668 * that condition is expected to be rare, so for the moment it is the 2669 * preferred interface. 2670 */ 2671void 2672vn_pollevent(vp, events) 2673 struct vnode *vp; 2674 short events; 2675{ 2676 simple_lock(&vp->v_pollinfo.vpi_lock); 2677 if (vp->v_pollinfo.vpi_events & events) { 2678 /* 2679 * We clear vpi_events so that we don't 2680 * call selwakeup() twice if two events are 2681 * posted before the polling process(es) is 2682 * awakened. This also ensures that we take at 2683 * most one selwakeup() if the polling process 2684 * is no longer interested. However, it does 2685 * mean that only one event can be noticed at 2686 * a time. (Perhaps we should only clear those 2687 * event bits which we note?) XXX 2688 */ 2689 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 2690 vp->v_pollinfo.vpi_revents |= events; 2691 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2692 } 2693 simple_unlock(&vp->v_pollinfo.vpi_lock); 2694} 2695 2696/* 2697 * Wake up anyone polling on vp because it is being revoked. 2698 * This depends on dead_poll() returning POLLHUP for correct 2699 * behavior. 2700 */ 2701void 2702vn_pollgone(vp) 2703 struct vnode *vp; 2704{ 2705 simple_lock(&vp->v_pollinfo.vpi_lock); 2706 if (vp->v_pollinfo.vpi_events) { 2707 vp->v_pollinfo.vpi_events = 0; 2708 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2709 } 2710 simple_unlock(&vp->v_pollinfo.vpi_lock); 2711} 2712 2713 2714 2715/* 2716 * Routine to create and manage a filesystem syncer vnode. 2717 */ 2718#define sync_close ((int (*) __P((struct vop_close_args *)))nullop) 2719static int sync_fsync __P((struct vop_fsync_args *)); 2720static int sync_inactive __P((struct vop_inactive_args *)); 2721static int sync_reclaim __P((struct vop_reclaim_args *)); 2722#define sync_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) 2723#define sync_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) 2724static int sync_print __P((struct vop_print_args *)); 2725#define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked) 2726 2727static vop_t **sync_vnodeop_p; 2728static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2729 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2730 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 2731 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 2732 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 2733 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 2734 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 2735 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 2736 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 2737 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 2738 { NULL, NULL } 2739}; 2740static struct vnodeopv_desc sync_vnodeop_opv_desc = 2741 { &sync_vnodeop_p, sync_vnodeop_entries }; 2742 2743VNODEOP_SET(sync_vnodeop_opv_desc); 2744 2745/* 2746 * Create a new filesystem syncer vnode for the specified mount point. 2747 */ 2748int 2749vfs_allocate_syncvnode(mp) 2750 struct mount *mp; 2751{ 2752 struct vnode *vp; 2753 static long start, incr, next; 2754 int error; 2755 2756 /* Allocate a new vnode */ 2757 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 2758 mp->mnt_syncer = NULL; 2759 return (error); 2760 } 2761 vp->v_type = VNON; 2762 /* 2763 * Place the vnode onto the syncer worklist. We attempt to 2764 * scatter them about on the list so that they will go off 2765 * at evenly distributed times even if all the filesystems 2766 * are mounted at once. 2767 */ 2768 next += incr; 2769 if (next == 0 || next > syncer_maxdelay) { 2770 start /= 2; 2771 incr /= 2; 2772 if (start == 0) { 2773 start = syncer_maxdelay / 2; 2774 incr = syncer_maxdelay; 2775 } 2776 next = start; 2777 } 2778 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 2779 mp->mnt_syncer = vp; 2780 return (0); 2781} 2782 2783/* 2784 * Do a lazy sync of the filesystem. 2785 */ 2786static int 2787sync_fsync(ap) 2788 struct vop_fsync_args /* { 2789 struct vnode *a_vp; 2790 struct ucred *a_cred; 2791 int a_waitfor; 2792 struct proc *a_p; 2793 } */ *ap; 2794{ 2795 struct vnode *syncvp = ap->a_vp; 2796 struct mount *mp = syncvp->v_mount; 2797 struct proc *p = ap->a_p; 2798 int asyncflag; 2799 2800 /* 2801 * We only need to do something if this is a lazy evaluation. 2802 */ 2803 if (ap->a_waitfor != MNT_LAZY) 2804 return (0); 2805 2806 /* 2807 * Move ourselves to the back of the sync list. 2808 */ 2809 vn_syncer_add_to_worklist(syncvp, syncdelay); 2810 2811 /* 2812 * Walk the list of vnodes pushing all that are dirty and 2813 * not already on the sync list. 2814 */ 2815 simple_lock(&mountlist_slock); 2816 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) { 2817 simple_unlock(&mountlist_slock); 2818 return (0); 2819 } 2820 asyncflag = mp->mnt_flag & MNT_ASYNC; 2821 mp->mnt_flag &= ~MNT_ASYNC; 2822 vfs_msync(mp, MNT_NOWAIT); 2823 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p); 2824 if (asyncflag) 2825 mp->mnt_flag |= MNT_ASYNC; 2826 vfs_unbusy(mp, p); 2827 return (0); 2828} 2829 2830/* 2831 * The syncer vnode is no referenced. 2832 */ 2833static int 2834sync_inactive(ap) 2835 struct vop_inactive_args /* { 2836 struct vnode *a_vp; 2837 struct proc *a_p; 2838 } */ *ap; 2839{ 2840 2841 vgone(ap->a_vp); 2842 return (0); 2843} 2844 2845/* 2846 * The syncer vnode is no longer needed and is being decommissioned. 2847 * 2848 * Modifications to the worklist must be protected at splbio(). 2849 */ 2850static int 2851sync_reclaim(ap) 2852 struct vop_reclaim_args /* { 2853 struct vnode *a_vp; 2854 } */ *ap; 2855{ 2856 struct vnode *vp = ap->a_vp; 2857 int s; 2858 2859 s = splbio(); 2860 vp->v_mount->mnt_syncer = NULL; 2861 if (vp->v_flag & VONWORKLST) { 2862 LIST_REMOVE(vp, v_synclist); 2863 vp->v_flag &= ~VONWORKLST; 2864 } 2865 splx(s); 2866 2867 return (0); 2868} 2869 2870/* 2871 * Print out a syncer vnode. 2872 */ 2873static int 2874sync_print(ap) 2875 struct vop_print_args /* { 2876 struct vnode *a_vp; 2877 } */ *ap; 2878{ 2879 struct vnode *vp = ap->a_vp; 2880 2881 printf("syncer vnode"); 2882 if (vp->v_vnlock != NULL) 2883 lockmgr_printinfo(vp->v_vnlock); 2884 printf("\n"); 2885 return (0); 2886} 2887