vfs_subr.c revision 1.228
1145326Snyan/* $OpenBSD: vfs_subr.c,v 1.228 2015/01/09 05:01:56 tedu Exp $ */ 2145326Snyan/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */ 3145326Snyan 4145326Snyan/* 5145326Snyan * Copyright (c) 1989, 1993 6145326Snyan * The Regents of the University of California. All rights reserved. 7145326Snyan * (c) UNIX System Laboratories, Inc. 8145326Snyan * All or some portions of this file are derived from material licensed 9145326Snyan * to the University of California by American Telephone and Telegraph 10145326Snyan * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11145326Snyan * the permission of UNIX System Laboratories, Inc. 12145326Snyan * 13145326Snyan * Redistribution and use in source and binary forms, with or without 14145326Snyan * modification, are permitted provided that the following conditions 15145326Snyan * are met: 16145326Snyan * 1. Redistributions of source code must retain the above copyright 17145326Snyan * notice, this list of conditions and the following disclaimer. 18145326Snyan * 2. Redistributions in binary form must reproduce the above copyright 19145326Snyan * notice, this list of conditions and the following disclaimer in the 20145326Snyan * documentation and/or other materials provided with the distribution. 21145326Snyan * 3. Neither the name of the University nor the names of its contributors 22145326Snyan * may be used to endorse or promote products derived from this software 23145326Snyan * without specific prior written permission. 24145326Snyan * 25145326Snyan * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26145326Snyan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27145326Snyan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28145326Snyan * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29145326Snyan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30145326Snyan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31145326Snyan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32145326Snyan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33145326Snyan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34145326Snyan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35145326Snyan * SUCH DAMAGE. 36145326Snyan * 37145326Snyan * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 38145326Snyan */ 39145326Snyan 40145326Snyan/* 41145326Snyan * External virtual filesystem routines 42145326Snyan */ 43145326Snyan 44145326Snyan#include <sys/param.h> 45145326Snyan#include <sys/systm.h> 46145326Snyan#include <sys/proc.h> 47145326Snyan#include <sys/sysctl.h> 48145326Snyan#include <sys/mount.h> 49145326Snyan#include <sys/time.h> 50145326Snyan#include <sys/fcntl.h> 51145326Snyan#include <sys/kernel.h> 52145326Snyan#include <sys/vnode.h> 53145326Snyan#include <sys/lock.h> 54145326Snyan#include <sys/stat.h> 55145326Snyan#include <sys/acct.h> 56145326Snyan#include <sys/namei.h> 57145326Snyan#include <sys/ucred.h> 58145326Snyan#include <sys/buf.h> 59145326Snyan#include <sys/errno.h> 60145326Snyan#include <sys/malloc.h> 61145326Snyan#include <sys/mbuf.h> 62145326Snyan#include <sys/syscallargs.h> 63145326Snyan#include <sys/pool.h> 64145326Snyan#include <sys/tree.h> 65145326Snyan#include <sys/specdev.h> 66145326Snyan 67145326Snyan#include <netinet/in.h> 68145326Snyan 69145326Snyan#include <uvm/uvm_extern.h> 70145326Snyan#include <uvm/uvm_vnode.h> 71145326Snyan 72145326Snyan#include "softraid.h" 73145326Snyan 74145326Snyanvoid sr_shutdown(void); 75145326Snyan 76145326Snyanenum vtype iftovt_tab[16] = { 77145326Snyan VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 78145326Snyan VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 79145326Snyan}; 80145326Snyan 81145326Snyanint vttoif_tab[9] = { 82145326Snyan 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 83145326Snyan S_IFSOCK, S_IFIFO, S_IFMT, 84145326Snyan}; 85145326Snyan 86145326Snyanint doforce = 1; /* 1 => permit forcible unmounting */ 87145326Snyanint prtactive = 0; /* 1 => print out reclaim of active vnodes */ 88int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */ 89 90/* 91 * Insq/Remq for the vnode usage lists. 92 */ 93#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 94#define bufremvn(bp) { \ 95 LIST_REMOVE(bp, b_vnbufs); \ 96 LIST_NEXT(bp, b_vnbufs) = NOLIST; \ 97} 98 99struct freelst vnode_hold_list; /* list of vnodes referencing buffers */ 100struct freelst vnode_free_list; /* vnode free list */ 101 102struct mntlist mountlist; /* mounted filesystem list */ 103 104void vclean(struct vnode *, int, struct proc *); 105 106void insmntque(struct vnode *, struct mount *); 107int getdevvp(dev_t, struct vnode **, enum vtype); 108 109int vfs_hang_addrlist(struct mount *, struct netexport *, 110 struct export_args *); 111int vfs_free_netcred(struct radix_node *, void *, u_int); 112void vfs_free_addrlist(struct netexport *); 113void vputonfreelist(struct vnode *); 114 115int vflush_vnode(struct vnode *, void *); 116int maxvnodes; 117 118#ifdef DEBUG 119void printlockedvnodes(void); 120#endif 121 122struct pool vnode_pool; 123struct pool uvm_vnode_pool; 124 125static int rb_buf_compare(struct buf *b1, struct buf *b2); 126RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); 127 128static int 129rb_buf_compare(struct buf *b1, struct buf *b2) 130{ 131 if (b1->b_lblkno < b2->b_lblkno) 132 return(-1); 133 if (b1->b_lblkno > b2->b_lblkno) 134 return(1); 135 return(0); 136} 137 138/* 139 * Initialize the vnode management data structures. 140 */ 141void 142vntblinit(void) 143{ 144 /* buffer cache may need a vnode for each buffer */ 145 maxvnodes = 2 * initialvnodes; 146 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, PR_WAITOK, 147 "vnodes", NULL); 148 pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, 0, PR_WAITOK, 149 "uvmvnodes", NULL); 150 TAILQ_INIT(&vnode_hold_list); 151 TAILQ_INIT(&vnode_free_list); 152 TAILQ_INIT(&mountlist); 153 /* 154 * Initialize the filesystem syncer. 155 */ 156 vn_initialize_syncerd(); 157} 158 159/* 160 * Mark a mount point as busy. Used to synchronize access and to delay 161 * unmounting. 162 * 163 * Default behaviour is to attempt getting a READ lock and in case of an 164 * ongoing unmount, to wait for it to finish and then return failure. 165 */ 166int 167vfs_busy(struct mount *mp, int flags) 168{ 169 int rwflags = 0; 170 171 /* new mountpoints need their lock initialised */ 172 if (mp->mnt_lock.rwl_name == NULL) 173 rw_init(&mp->mnt_lock, "vfslock"); 174 175 if (flags & VB_WRITE) 176 rwflags |= RW_WRITE; 177 else 178 rwflags |= RW_READ; 179 180 if (flags & VB_WAIT) 181 rwflags |= RW_SLEEPFAIL; 182 else 183 rwflags |= RW_NOSLEEP; 184 185 if (rw_enter(&mp->mnt_lock, rwflags)) 186 return (EBUSY); 187 188 return (0); 189} 190 191/* 192 * Free a busy file system 193 */ 194void 195vfs_unbusy(struct mount *mp) 196{ 197 rw_exit(&mp->mnt_lock); 198} 199 200int 201vfs_isbusy(struct mount *mp) 202{ 203 if (RWLOCK_OWNER(&mp->mnt_lock) > 0) 204 return (1); 205 else 206 return (0); 207} 208 209/* 210 * Lookup a filesystem type, and if found allocate and initialize 211 * a mount structure for it. 212 * 213 * Devname is usually updated by mount(8) after booting. 214 */ 215int 216vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 217{ 218 struct vfsconf *vfsp; 219 struct mount *mp; 220 221 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 222 if (!strcmp(vfsp->vfc_name, fstypename)) 223 break; 224 if (vfsp == NULL) 225 return (ENODEV); 226 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO); 227 (void)vfs_busy(mp, VB_READ|VB_NOWAIT); 228 LIST_INIT(&mp->mnt_vnodelist); 229 mp->mnt_vfc = vfsp; 230 mp->mnt_op = vfsp->vfc_vfsops; 231 mp->mnt_flag = MNT_RDONLY; 232 mp->mnt_vnodecovered = NULLVP; 233 vfsp->vfc_refcount++; 234 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 235 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 236 mp->mnt_stat.f_mntonname[0] = '/'; 237 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0); 238 copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0); 239 *mpp = mp; 240 return (0); 241 } 242 243/* 244 * Lookup a mount point by filesystem identifier. 245 */ 246struct mount * 247vfs_getvfs(fsid_t *fsid) 248{ 249 struct mount *mp; 250 251 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 252 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 253 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 254 return (mp); 255 } 256 } 257 258 return (NULL); 259} 260 261 262/* 263 * Get a new unique fsid 264 */ 265void 266vfs_getnewfsid(struct mount *mp) 267{ 268 static u_short xxxfs_mntid; 269 270 fsid_t tfsid; 271 int mtype; 272 273 mtype = mp->mnt_vfc->vfc_typenum; 274 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 275 mp->mnt_stat.f_fsid.val[1] = mtype; 276 if (xxxfs_mntid == 0) 277 ++xxxfs_mntid; 278 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 279 tfsid.val[1] = mtype; 280 if (!TAILQ_EMPTY(&mountlist)) { 281 while (vfs_getvfs(&tfsid)) { 282 tfsid.val[0]++; 283 xxxfs_mntid++; 284 } 285 } 286 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 287} 288 289/* 290 * Set vnode attributes to VNOVAL 291 */ 292void 293vattr_null(struct vattr *vap) 294{ 295 296 vap->va_type = VNON; 297 /* XXX These next two used to be one line, but for a GCC bug. */ 298 vap->va_size = VNOVAL; 299 vap->va_bytes = VNOVAL; 300 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 301 vap->va_fsid = vap->va_fileid = 302 vap->va_blocksize = vap->va_rdev = 303 vap->va_atime.tv_sec = vap->va_atime.tv_nsec = 304 vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec = 305 vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec = 306 vap->va_flags = vap->va_gen = VNOVAL; 307 vap->va_vaflags = 0; 308} 309 310/* 311 * Routines having to do with the management of the vnode table. 312 */ 313long numvnodes; 314 315/* 316 * Return the next vnode from the free list. 317 */ 318int 319getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops, 320 struct vnode **vpp) 321{ 322 struct proc *p = curproc; 323 struct freelst *listhd; 324 static int toggle; 325 struct vnode *vp; 326 int s; 327 328 /* 329 * allow maxvnodes to increase if the buffer cache itself 330 * is big enough to justify it. (we don't shrink it ever) 331 */ 332 maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs 333 : maxvnodes; 334 335 /* 336 * We must choose whether to allocate a new vnode or recycle an 337 * existing one. The criterion for allocating a new one is that 338 * the total number of vnodes is less than the number desired or 339 * there are no vnodes on either free list. Generally we only 340 * want to recycle vnodes that have no buffers associated with 341 * them, so we look first on the vnode_free_list. If it is empty, 342 * we next consider vnodes with referencing buffers on the 343 * vnode_hold_list. The toggle ensures that half the time we 344 * will use a buffer from the vnode_hold_list, and half the time 345 * we will allocate a new one unless the list has grown to twice 346 * the desired size. We are reticent to recycle vnodes from the 347 * vnode_hold_list because we will lose the identity of all its 348 * referencing buffers. 349 */ 350 toggle ^= 1; 351 if (numvnodes / 2 > maxvnodes) 352 toggle = 0; 353 354 s = splbio(); 355 if ((numvnodes < maxvnodes) || 356 ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) && 357 ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) { 358 splx(s); 359 vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO); 360 vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO); 361 vp->v_uvm->u_vnode = vp; 362 RB_INIT(&vp->v_bufs_tree); 363 RB_INIT(&vp->v_nc_tree); 364 TAILQ_INIT(&vp->v_cache_dst); 365 numvnodes++; 366 } else { 367 for (vp = TAILQ_FIRST(listhd); vp != NULLVP; 368 vp = TAILQ_NEXT(vp, v_freelist)) { 369 if (VOP_ISLOCKED(vp) == 0) 370 break; 371 } 372 /* 373 * Unless this is a bad time of the month, at most 374 * the first NCPUS items on the free list are 375 * locked, so this is close enough to being empty. 376 */ 377 if (vp == NULL) { 378 splx(s); 379 tablefull("vnode"); 380 *vpp = 0; 381 return (ENFILE); 382 } 383 384#ifdef DIAGNOSTIC 385 if (vp->v_usecount) { 386 vprint("free vnode", vp); 387 panic("free vnode isn't"); 388 } 389#endif 390 391 TAILQ_REMOVE(listhd, vp, v_freelist); 392 vp->v_bioflag &= ~VBIOONFREELIST; 393 splx(s); 394 395 if (vp->v_type != VBAD) 396 vgonel(vp, p); 397#ifdef DIAGNOSTIC 398 if (vp->v_data) { 399 vprint("cleaned vnode", vp); 400 panic("cleaned vnode isn't"); 401 } 402 s = splbio(); 403 if (vp->v_numoutput) 404 panic("Clean vnode has pending I/O's"); 405 splx(s); 406#endif 407 vp->v_flag = 0; 408 vp->v_socket = 0; 409 } 410 cache_purge(vp); 411 vp->v_type = VNON; 412 vp->v_tag = tag; 413 vp->v_op = vops; 414 insmntque(vp, mp); 415 *vpp = vp; 416 vp->v_usecount = 1; 417 vp->v_data = 0; 418 return (0); 419} 420 421/* 422 * Move a vnode from one mount queue to another. 423 */ 424void 425insmntque(struct vnode *vp, struct mount *mp) 426{ 427 /* 428 * Delete from old mount point vnode list, if on one. 429 */ 430 if (vp->v_mount != NULL) 431 LIST_REMOVE(vp, v_mntvnodes); 432 /* 433 * Insert into list of vnodes for the new mount point, if available. 434 */ 435 if ((vp->v_mount = mp) != NULL) 436 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 437} 438 439/* 440 * Create a vnode for a block device. 441 * Used for root filesystem, argdev, and swap areas. 442 * Also used for memory file system special devices. 443 */ 444int 445bdevvp(dev_t dev, struct vnode **vpp) 446{ 447 return (getdevvp(dev, vpp, VBLK)); 448} 449 450/* 451 * Create a vnode for a character device. 452 * Used for console handling. 453 */ 454int 455cdevvp(dev_t dev, struct vnode **vpp) 456{ 457 return (getdevvp(dev, vpp, VCHR)); 458} 459 460/* 461 * Create a vnode for a device. 462 * Used by bdevvp (block device) for root file system etc., 463 * and by cdevvp (character device) for console. 464 */ 465int 466getdevvp(dev_t dev, struct vnode **vpp, enum vtype type) 467{ 468 struct vnode *vp; 469 struct vnode *nvp; 470 int error; 471 472 if (dev == NODEV) { 473 *vpp = NULLVP; 474 return (0); 475 } 476 error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp); 477 if (error) { 478 *vpp = NULLVP; 479 return (error); 480 } 481 vp = nvp; 482 vp->v_type = type; 483 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 484 vput(vp); 485 vp = nvp; 486 } 487 *vpp = vp; 488 return (0); 489} 490 491/* 492 * Check to see if the new vnode represents a special device 493 * for which we already have a vnode (either because of 494 * bdevvp() or because of a different vnode representing 495 * the same block device). If such an alias exists, deallocate 496 * the existing contents and return the aliased vnode. The 497 * caller is responsible for filling it with its new contents. 498 */ 499struct vnode * 500checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp) 501{ 502 struct proc *p = curproc; 503 struct vnode *vp; 504 struct vnode **vpp; 505 506 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 507 return (NULLVP); 508 509 vpp = &speclisth[SPECHASH(nvp_rdev)]; 510loop: 511 for (vp = *vpp; vp; vp = vp->v_specnext) { 512 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) { 513 continue; 514 } 515 /* 516 * Alias, but not in use, so flush it out. 517 */ 518 if (vp->v_usecount == 0) { 519 vgonel(vp, p); 520 goto loop; 521 } 522 if (vget(vp, LK_EXCLUSIVE, p)) { 523 goto loop; 524 } 525 break; 526 } 527 528 /* 529 * Common case is actually in the if statement 530 */ 531 if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) { 532 nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE, 533 M_WAITOK); 534 nvp->v_rdev = nvp_rdev; 535 nvp->v_hashchain = vpp; 536 nvp->v_specnext = *vpp; 537 nvp->v_specmountpoint = NULL; 538 nvp->v_speclockf = NULL; 539 memset(nvp->v_specbitmap, 0, sizeof(nvp->v_specbitmap)); 540 *vpp = nvp; 541 if (vp != NULLVP) { 542 nvp->v_flag |= VALIASED; 543 vp->v_flag |= VALIASED; 544 vput(vp); 545 } 546 return (NULLVP); 547 } 548 549 /* 550 * This code is the uncommon case. It is called in case 551 * we found an alias that was VT_NON && vtype of VBLK 552 * This means we found a block device that was created 553 * using bdevvp. 554 * An example of such a vnode is the root partition device vnode 555 * created in ffs_mountroot. 556 * 557 * The vnodes created by bdevvp should not be aliased (why?). 558 */ 559 560 VOP_UNLOCK(vp, 0, p); 561 vclean(vp, 0, p); 562 vp->v_op = nvp->v_op; 563 vp->v_tag = nvp->v_tag; 564 nvp->v_type = VNON; 565 insmntque(vp, mp); 566 return (vp); 567} 568 569/* 570 * Grab a particular vnode from the free list, increment its 571 * reference count and lock it. If the vnode lock bit is set, 572 * the vnode is being eliminated in vgone. In that case, we 573 * cannot grab it, so the process is awakened when the 574 * transition is completed, and an error code is returned to 575 * indicate that the vnode is no longer usable, possibly 576 * having been changed to a new file system type. 577 */ 578int 579vget(struct vnode *vp, int flags, struct proc *p) 580{ 581 int error, s, onfreelist; 582 583 /* 584 * If the vnode is in the process of being cleaned out for 585 * another use, we wait for the cleaning to finish and then 586 * return failure. Cleaning is determined by checking that 587 * the VXLOCK flag is set. 588 */ 589 590 if (vp->v_flag & VXLOCK) { 591 if (flags & LK_NOWAIT) { 592 return (EBUSY); 593 } 594 595 vp->v_flag |= VXWANT; 596 tsleep(vp, PINOD, "vget", 0); 597 return (ENOENT); 598 } 599 600 onfreelist = vp->v_bioflag & VBIOONFREELIST; 601 if (vp->v_usecount == 0 && onfreelist) { 602 s = splbio(); 603 if (vp->v_holdcnt > 0) 604 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 605 else 606 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 607 vp->v_bioflag &= ~VBIOONFREELIST; 608 splx(s); 609 } 610 611 vp->v_usecount++; 612 if (flags & LK_TYPE_MASK) { 613 if ((error = vn_lock(vp, flags, p)) != 0) { 614 vp->v_usecount--; 615 if (vp->v_usecount == 0 && onfreelist) 616 vputonfreelist(vp); 617 } 618 return (error); 619 } 620 621 return (0); 622} 623 624 625/* Vnode reference. */ 626void 627vref(struct vnode *vp) 628{ 629#ifdef DIAGNOSTIC 630 if (vp->v_usecount == 0) 631 panic("vref used where vget required"); 632 if (vp->v_type == VNON) 633 panic("vref on a VNON vnode"); 634#endif 635 vp->v_usecount++; 636} 637 638void 639vputonfreelist(struct vnode *vp) 640{ 641 int s; 642 struct freelst *lst; 643 644 s = splbio(); 645#ifdef DIAGNOSTIC 646 if (vp->v_usecount != 0) 647 panic("Use count is not zero!"); 648 649 if (vp->v_bioflag & VBIOONFREELIST) { 650 vprint("vnode already on free list: ", vp); 651 panic("vnode already on free list"); 652 } 653#endif 654 655 vp->v_bioflag |= VBIOONFREELIST; 656 657 if (vp->v_holdcnt > 0) 658 lst = &vnode_hold_list; 659 else 660 lst = &vnode_free_list; 661 662 if (vp->v_type == VBAD) 663 TAILQ_INSERT_HEAD(lst, vp, v_freelist); 664 else 665 TAILQ_INSERT_TAIL(lst, vp, v_freelist); 666 667 splx(s); 668} 669 670/* 671 * vput(), just unlock and vrele() 672 */ 673void 674vput(struct vnode *vp) 675{ 676 struct proc *p = curproc; 677 678#ifdef DIAGNOSTIC 679 if (vp == NULL) 680 panic("vput: null vp"); 681#endif 682 683#ifdef DIAGNOSTIC 684 if (vp->v_usecount == 0) { 685 vprint("vput: bad ref count", vp); 686 panic("vput: ref cnt"); 687 } 688#endif 689 vp->v_usecount--; 690 if (vp->v_usecount > 0) { 691 VOP_UNLOCK(vp, 0, p); 692 return; 693 } 694 695#ifdef DIAGNOSTIC 696 if (vp->v_writecount != 0) { 697 vprint("vput: bad writecount", vp); 698 panic("vput: v_writecount != 0"); 699 } 700#endif 701 702 VOP_INACTIVE(vp, p); 703 704 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 705 vputonfreelist(vp); 706} 707 708/* 709 * Vnode release - use for active VNODES. 710 * If count drops to zero, call inactive routine and return to freelist. 711 * Returns 0 if it did not sleep. 712 */ 713int 714vrele(struct vnode *vp) 715{ 716 struct proc *p = curproc; 717 718#ifdef DIAGNOSTIC 719 if (vp == NULL) 720 panic("vrele: null vp"); 721#endif 722#ifdef DIAGNOSTIC 723 if (vp->v_usecount == 0) { 724 vprint("vrele: bad ref count", vp); 725 panic("vrele: ref cnt"); 726 } 727#endif 728 vp->v_usecount--; 729 if (vp->v_usecount > 0) { 730 return (0); 731 } 732 733#ifdef DIAGNOSTIC 734 if (vp->v_writecount != 0) { 735 vprint("vrele: bad writecount", vp); 736 panic("vrele: v_writecount != 0"); 737 } 738#endif 739 740 if (vn_lock(vp, LK_EXCLUSIVE, p)) { 741#ifdef DIAGNOSTIC 742 vprint("vrele: cannot lock", vp); 743#endif 744 return (1); 745 } 746 747 VOP_INACTIVE(vp, p); 748 749 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 750 vputonfreelist(vp); 751 return (1); 752} 753 754/* Page or buffer structure gets a reference. */ 755void 756vhold(struct vnode *vp) 757{ 758 /* 759 * If it is on the freelist and the hold count is currently 760 * zero, move it to the hold list. 761 */ 762 if ((vp->v_bioflag & VBIOONFREELIST) && 763 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 764 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 765 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 766 } 767 vp->v_holdcnt++; 768} 769 770/* Lose interest in a vnode. */ 771void 772vdrop(struct vnode *vp) 773{ 774#ifdef DIAGNOSTIC 775 if (vp->v_holdcnt == 0) 776 panic("vdrop: zero holdcnt"); 777#endif 778 779 vp->v_holdcnt--; 780 781 /* 782 * If it is on the holdlist and the hold count drops to 783 * zero, move it to the free list. 784 */ 785 if ((vp->v_bioflag & VBIOONFREELIST) && 786 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 787 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 788 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 789 } 790} 791 792/* 793 * Remove any vnodes in the vnode table belonging to mount point mp. 794 * 795 * If MNT_NOFORCE is specified, there should not be any active ones, 796 * return error if any are found (nb: this is a user error, not a 797 * system error). If MNT_FORCE is specified, detach any active vnodes 798 * that are found. 799 */ 800#ifdef DEBUG 801int busyprt = 0; /* print out busy vnodes */ 802struct ctldebug debug1 = { "busyprt", &busyprt }; 803#endif 804 805int 806vfs_mount_foreach_vnode(struct mount *mp, 807 int (*func)(struct vnode *, void *), void *arg) { 808 struct vnode *vp, *nvp; 809 int error = 0; 810 811loop: 812 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 813 if (vp->v_mount != mp) 814 goto loop; 815 nvp = LIST_NEXT(vp, v_mntvnodes); 816 817 error = func(vp, arg); 818 819 if (error != 0) 820 break; 821 } 822 823 return (error); 824} 825 826struct vflush_args { 827 struct vnode *skipvp; 828 int busy; 829 int flags; 830}; 831 832int 833vflush_vnode(struct vnode *vp, void *arg) { 834 struct vflush_args *va = arg; 835 struct proc *p = curproc; 836 837 if (vp == va->skipvp) { 838 return (0); 839 } 840 841 if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 842 return (0); 843 } 844 845 /* 846 * If WRITECLOSE is set, only flush out regular file 847 * vnodes open for writing. 848 */ 849 if ((va->flags & WRITECLOSE) && 850 (vp->v_writecount == 0 || vp->v_type != VREG)) { 851 return (0); 852 } 853 854 /* 855 * With v_usecount == 0, all we need to do is clear 856 * out the vnode data structures and we are done. 857 */ 858 if (vp->v_usecount == 0) { 859 vgonel(vp, p); 860 return (0); 861 } 862 863 /* 864 * If FORCECLOSE is set, forcibly close the vnode. 865 * For block or character devices, revert to an 866 * anonymous device. For all other files, just kill them. 867 */ 868 if (va->flags & FORCECLOSE) { 869 if (vp->v_type != VBLK && vp->v_type != VCHR) { 870 vgonel(vp, p); 871 } else { 872 vclean(vp, 0, p); 873 vp->v_op = &spec_vops; 874 insmntque(vp, (struct mount *)0); 875 } 876 return (0); 877 } 878 879#ifdef DEBUG 880 if (busyprt) 881 vprint("vflush: busy vnode", vp); 882#endif 883 va->busy++; 884 return (0); 885} 886 887int 888vflush(struct mount *mp, struct vnode *skipvp, int flags) 889{ 890 struct vflush_args va; 891 va.skipvp = skipvp; 892 va.busy = 0; 893 va.flags = flags; 894 895 vfs_mount_foreach_vnode(mp, vflush_vnode, &va); 896 897 if (va.busy) 898 return (EBUSY); 899 return (0); 900} 901 902/* 903 * Disassociate the underlying file system from a vnode. 904 */ 905void 906vclean(struct vnode *vp, int flags, struct proc *p) 907{ 908 int active; 909 910 /* 911 * Check to see if the vnode is in use. 912 * If so we have to reference it before we clean it out 913 * so that its count cannot fall to zero and generate a 914 * race against ourselves to recycle it. 915 */ 916 if ((active = vp->v_usecount) != 0) 917 vp->v_usecount++; 918 919 /* 920 * Prevent the vnode from being recycled or 921 * brought into use while we clean it out. 922 */ 923 if (vp->v_flag & VXLOCK) 924 panic("vclean: deadlock"); 925 vp->v_flag |= VXLOCK; 926 /* 927 * Even if the count is zero, the VOP_INACTIVE routine may still 928 * have the object locked while it cleans it out. The VOP_LOCK 929 * ensures that the VOP_INACTIVE routine is done with its work. 930 * For active vnodes, it ensures that no other activity can 931 * occur while the underlying object is being cleaned out. 932 */ 933 VOP_LOCK(vp, LK_DRAIN, p); 934 935 /* 936 * Clean out any VM data associated with the vnode. 937 */ 938 uvm_vnp_terminate(vp); 939 /* 940 * Clean out any buffers associated with the vnode. 941 */ 942 if (flags & DOCLOSE) 943 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 944 /* 945 * If purging an active vnode, it must be closed and 946 * deactivated before being reclaimed. Note that the 947 * VOP_INACTIVE will unlock the vnode 948 */ 949 if (active) { 950 if (flags & DOCLOSE) 951 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 952 VOP_INACTIVE(vp, p); 953 } else { 954 /* 955 * Any other processes trying to obtain this lock must first 956 * wait for VXLOCK to clear, then call the new lock operation. 957 */ 958 VOP_UNLOCK(vp, 0, p); 959 } 960 961 /* 962 * Reclaim the vnode. 963 */ 964 if (VOP_RECLAIM(vp, p)) 965 panic("vclean: cannot reclaim"); 966 if (active) { 967 vp->v_usecount--; 968 if (vp->v_usecount == 0) { 969 if (vp->v_holdcnt > 0) 970 panic("vclean: not clean"); 971 vputonfreelist(vp); 972 } 973 } 974 cache_purge(vp); 975 976 /* 977 * Done with purge, notify sleepers of the grim news. 978 */ 979 vp->v_op = &dead_vops; 980 VN_KNOTE(vp, NOTE_REVOKE); 981 vp->v_tag = VT_NON; 982 vp->v_flag &= ~VXLOCK; 983#ifdef VFSLCKDEBUG 984 vp->v_flag &= ~VLOCKSWORK; 985#endif 986 if (vp->v_flag & VXWANT) { 987 vp->v_flag &= ~VXWANT; 988 wakeup(vp); 989 } 990} 991 992/* 993 * Recycle an unused vnode to the front of the free list. 994 */ 995int 996vrecycle(struct vnode *vp, struct proc *p) 997{ 998 if (vp->v_usecount == 0) { 999 vgonel(vp, p); 1000 return (1); 1001 } 1002 return (0); 1003} 1004 1005/* 1006 * Eliminate all activity associated with a vnode 1007 * in preparation for reuse. 1008 */ 1009void 1010vgone(struct vnode *vp) 1011{ 1012 struct proc *p = curproc; 1013 vgonel(vp, p); 1014} 1015 1016/* 1017 * vgone, with struct proc. 1018 */ 1019void 1020vgonel(struct vnode *vp, struct proc *p) 1021{ 1022 struct vnode *vq; 1023 struct vnode *vx; 1024 1025 /* 1026 * If a vgone (or vclean) is already in progress, 1027 * wait until it is done and return. 1028 */ 1029 if (vp->v_flag & VXLOCK) { 1030 vp->v_flag |= VXWANT; 1031 tsleep(vp, PINOD, "vgone", 0); 1032 return; 1033 } 1034 1035 /* 1036 * Clean out the filesystem specific data. 1037 */ 1038 vclean(vp, DOCLOSE, p); 1039 /* 1040 * Delete from old mount point vnode list, if on one. 1041 */ 1042 if (vp->v_mount != NULL) 1043 insmntque(vp, (struct mount *)0); 1044 /* 1045 * If special device, remove it from special device alias list 1046 * if it is on one. 1047 */ 1048 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1049 if (*vp->v_hashchain == vp) { 1050 *vp->v_hashchain = vp->v_specnext; 1051 } else { 1052 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1053 if (vq->v_specnext != vp) 1054 continue; 1055 vq->v_specnext = vp->v_specnext; 1056 break; 1057 } 1058 if (vq == NULL) 1059 panic("missing bdev"); 1060 } 1061 if (vp->v_flag & VALIASED) { 1062 vx = NULL; 1063 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1064 if (vq->v_rdev != vp->v_rdev || 1065 vq->v_type != vp->v_type) 1066 continue; 1067 if (vx) 1068 break; 1069 vx = vq; 1070 } 1071 if (vx == NULL) 1072 panic("missing alias"); 1073 if (vq == NULL) 1074 vx->v_flag &= ~VALIASED; 1075 vp->v_flag &= ~VALIASED; 1076 } 1077 free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo)); 1078 vp->v_specinfo = NULL; 1079 } 1080 /* 1081 * If it is on the freelist and not already at the head, 1082 * move it to the head of the list. 1083 */ 1084 vp->v_type = VBAD; 1085 1086 /* 1087 * Move onto the free list, unless we were called from 1088 * getnewvnode and we're not on any free list 1089 */ 1090 if (vp->v_usecount == 0 && 1091 (vp->v_bioflag & VBIOONFREELIST)) { 1092 int s; 1093 1094 s = splbio(); 1095 1096 if (vp->v_holdcnt > 0) 1097 panic("vgonel: not clean"); 1098 1099 if (TAILQ_FIRST(&vnode_free_list) != vp) { 1100 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1101 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1102 } 1103 splx(s); 1104 } 1105} 1106 1107/* 1108 * Lookup a vnode by device number. 1109 */ 1110int 1111vfinddev(dev_t dev, enum vtype type, struct vnode **vpp) 1112{ 1113 struct vnode *vp; 1114 int rc =0; 1115 1116 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1117 if (dev != vp->v_rdev || type != vp->v_type) 1118 continue; 1119 *vpp = vp; 1120 rc = 1; 1121 break; 1122 } 1123 return (rc); 1124} 1125 1126/* 1127 * Revoke all the vnodes corresponding to the specified minor number 1128 * range (endpoints inclusive) of the specified major. 1129 */ 1130void 1131vdevgone(int maj, int minl, int minh, enum vtype type) 1132{ 1133 struct vnode *vp; 1134 int mn; 1135 1136 for (mn = minl; mn <= minh; mn++) 1137 if (vfinddev(makedev(maj, mn), type, &vp)) 1138 VOP_REVOKE(vp, REVOKEALL); 1139} 1140 1141/* 1142 * Calculate the total number of references to a special device. 1143 */ 1144int 1145vcount(struct vnode *vp) 1146{ 1147 struct vnode *vq, *vnext; 1148 int count; 1149 1150loop: 1151 if ((vp->v_flag & VALIASED) == 0) 1152 return (vp->v_usecount); 1153 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1154 vnext = vq->v_specnext; 1155 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1156 continue; 1157 /* 1158 * Alias, but not in use, so flush it out. 1159 */ 1160 if (vq->v_usecount == 0 && vq != vp) { 1161 vgone(vq); 1162 goto loop; 1163 } 1164 count += vq->v_usecount; 1165 } 1166 return (count); 1167} 1168 1169#if defined(DEBUG) || defined(DIAGNOSTIC) 1170/* 1171 * Print out a description of a vnode. 1172 */ 1173static char *typename[] = 1174 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1175 1176void 1177vprint(char *label, struct vnode *vp) 1178{ 1179 char buf[64]; 1180 1181 if (label != NULL) 1182 printf("%s: ", label); 1183 printf("%p, type %s, use %u, write %u, hold %u,", 1184 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1185 vp->v_holdcnt); 1186 buf[0] = '\0'; 1187 if (vp->v_flag & VROOT) 1188 strlcat(buf, "|VROOT", sizeof buf); 1189 if (vp->v_flag & VTEXT) 1190 strlcat(buf, "|VTEXT", sizeof buf); 1191 if (vp->v_flag & VSYSTEM) 1192 strlcat(buf, "|VSYSTEM", sizeof buf); 1193 if (vp->v_flag & VXLOCK) 1194 strlcat(buf, "|VXLOCK", sizeof buf); 1195 if (vp->v_flag & VXWANT) 1196 strlcat(buf, "|VXWANT", sizeof buf); 1197 if (vp->v_bioflag & VBIOWAIT) 1198 strlcat(buf, "|VBIOWAIT", sizeof buf); 1199 if (vp->v_bioflag & VBIOONFREELIST) 1200 strlcat(buf, "|VBIOONFREELIST", sizeof buf); 1201 if (vp->v_bioflag & VBIOONSYNCLIST) 1202 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf); 1203 if (vp->v_flag & VALIASED) 1204 strlcat(buf, "|VALIASED", sizeof buf); 1205 if (buf[0] != '\0') 1206 printf(" flags (%s)", &buf[1]); 1207 if (vp->v_data == NULL) { 1208 printf("\n"); 1209 } else { 1210 printf("\n\t"); 1211 VOP_PRINT(vp); 1212 } 1213} 1214#endif /* DEBUG || DIAGNOSTIC */ 1215 1216#ifdef DEBUG 1217/* 1218 * List all of the locked vnodes in the system. 1219 * Called when debugging the kernel. 1220 */ 1221void 1222printlockedvnodes(void) 1223{ 1224 struct mount *mp, *nmp; 1225 struct vnode *vp; 1226 1227 printf("Locked vnodes\n"); 1228 1229 TAILQ_FOREACH_SAFE(mp, &mountlist, mnt_list, nmp) { 1230 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) 1231 continue; 1232 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1233 if (VOP_ISLOCKED(vp)) 1234 vprint((char *)0, vp); 1235 } 1236 vfs_unbusy(mp); 1237 } 1238 1239} 1240#endif 1241 1242/* 1243 * Top level filesystem related information gathering. 1244 */ 1245int 1246vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1247 size_t newlen, struct proc *p) 1248{ 1249 struct vfsconf *vfsp, *tmpvfsp; 1250 int ret; 1251 1252 /* all sysctl names at this level are at least name and field */ 1253 if (namelen < 2) 1254 return (ENOTDIR); /* overloaded */ 1255 1256 if (name[0] != VFS_GENERIC) { 1257 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1258 if (vfsp->vfc_typenum == name[0]) 1259 break; 1260 1261 if (vfsp == NULL) 1262 return (EOPNOTSUPP); 1263 1264 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1265 oldp, oldlenp, newp, newlen, p)); 1266 } 1267 1268 switch (name[1]) { 1269 case VFS_MAXTYPENUM: 1270 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); 1271 1272 case VFS_CONF: 1273 if (namelen < 3) 1274 return (ENOTDIR); /* overloaded */ 1275 1276 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1277 if (vfsp->vfc_typenum == name[2]) 1278 break; 1279 1280 if (vfsp == NULL) 1281 return (EOPNOTSUPP); 1282 1283 /* Make a copy, clear out kernel pointers */ 1284 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK); 1285 memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp)); 1286 tmpvfsp->vfc_vfsops = NULL; 1287 tmpvfsp->vfc_next = NULL; 1288 1289 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp, 1290 sizeof(struct vfsconf)); 1291 1292 free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp)); 1293 return (ret); 1294 case VFS_BCACHESTAT: /* buffer cache statistics */ 1295 ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats, 1296 sizeof(struct bcachestats)); 1297 return(ret); 1298 } 1299 return (EOPNOTSUPP); 1300} 1301 1302/* 1303 * Check to see if a filesystem is mounted on a block device. 1304 */ 1305int 1306vfs_mountedon(struct vnode *vp) 1307{ 1308 struct vnode *vq; 1309 int error = 0; 1310 1311 if (vp->v_specmountpoint != NULL) 1312 return (EBUSY); 1313 if (vp->v_flag & VALIASED) { 1314 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1315 if (vq->v_rdev != vp->v_rdev || 1316 vq->v_type != vp->v_type) 1317 continue; 1318 if (vq->v_specmountpoint != NULL) { 1319 error = EBUSY; 1320 break; 1321 } 1322 } 1323 } 1324 return (error); 1325} 1326 1327/* 1328 * Build hash lists of net addresses and hang them off the mount point. 1329 * Called by ufs_mount() to set up the lists of export addresses. 1330 */ 1331int 1332vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1333 struct export_args *argp) 1334{ 1335 struct netcred *np; 1336 struct radix_node_head *rnh; 1337 int nplen, i; 1338 struct radix_node *rn; 1339 struct sockaddr *saddr, *smask = 0; 1340 int error; 1341 1342 if (argp->ex_addrlen == 0) { 1343 if (mp->mnt_flag & MNT_DEFEXPORTED) 1344 return (EPERM); 1345 np = &nep->ne_defexported; 1346 mp->mnt_flag |= MNT_DEFEXPORTED; 1347 goto finish; 1348 } 1349 if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN || 1350 argp->ex_addrlen < 0 || argp->ex_masklen < 0) 1351 return (EINVAL); 1352 nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1353 np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO); 1354 saddr = (struct sockaddr *)(np + 1); 1355 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen); 1356 if (error) 1357 goto out; 1358 if (saddr->sa_len > argp->ex_addrlen) 1359 saddr->sa_len = argp->ex_addrlen; 1360 if (argp->ex_masklen) { 1361 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1362 error = copyin(argp->ex_mask, smask, argp->ex_masklen); 1363 if (error) 1364 goto out; 1365 if (smask->sa_len > argp->ex_masklen) 1366 smask->sa_len = argp->ex_masklen; 1367 } 1368 i = saddr->sa_family; 1369 switch (i) { 1370 case AF_INET: 1371 if ((rnh = nep->ne_rtable_inet) == NULL) { 1372 if (!rn_inithead((void **)&nep->ne_rtable_inet, 1373 offsetof(struct sockaddr_in, sin_addr) * 8)) { 1374 error = ENOBUFS; 1375 goto out; 1376 } 1377 rnh = nep->ne_rtable_inet; 1378 } 1379 break; 1380 default: 1381 error = EINVAL; 1382 goto out; 1383 } 1384 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 1385 np->netc_rnodes, 0); 1386 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 1387 error = EPERM; 1388 goto out; 1389 } 1390finish: 1391 np->netc_exflags = argp->ex_flags; 1392 /* fill in the kernel's ucred from userspace's xucred */ 1393 crfromxucred(&np->netc_anon, &argp->ex_anon); 1394 return (0); 1395out: 1396 free(np, M_NETADDR, nplen); 1397 return (error); 1398} 1399 1400/* ARGSUSED */ 1401int 1402vfs_free_netcred(struct radix_node *rn, void *w, u_int id) 1403{ 1404 struct radix_node_head *rnh = (struct radix_node_head *)w; 1405 1406 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL); 1407 free(rn, M_NETADDR, 0); 1408 return (0); 1409} 1410 1411/* 1412 * Free the net address hash lists that are hanging off the mount points. 1413 */ 1414void 1415vfs_free_addrlist(struct netexport *nep) 1416{ 1417 struct radix_node_head *rnh; 1418 1419 if ((rnh = nep->ne_rtable_inet) != NULL) { 1420 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 1421 free(rnh, M_RTABLE, 0); 1422 nep->ne_rtable_inet = NULL; 1423 } 1424} 1425 1426int 1427vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp) 1428{ 1429 int error; 1430 1431 if (argp->ex_flags & MNT_DELEXPORT) { 1432 vfs_free_addrlist(nep); 1433 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1434 } 1435 if (argp->ex_flags & MNT_EXPORTED) { 1436 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 1437 return (error); 1438 mp->mnt_flag |= MNT_EXPORTED; 1439 } 1440 return (0); 1441} 1442 1443struct netcred * 1444vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam) 1445{ 1446 struct netcred *np; 1447 struct radix_node_head *rnh; 1448 struct sockaddr *saddr; 1449 1450 np = NULL; 1451 if (mp->mnt_flag & MNT_EXPORTED) { 1452 /* 1453 * Lookup in the export list first. 1454 */ 1455 if (nam != NULL) { 1456 saddr = mtod(nam, struct sockaddr *); 1457 switch(saddr->sa_family) { 1458 case AF_INET: 1459 rnh = nep->ne_rtable_inet; 1460 break; 1461 default: 1462 rnh = NULL; 1463 break; 1464 } 1465 if (rnh != NULL) { 1466 np = (struct netcred *) 1467 (*rnh->rnh_matchaddr)((caddr_t)saddr, 1468 rnh); 1469 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1470 np = NULL; 1471 } 1472 } 1473 /* 1474 * If no address match, use the default if it exists. 1475 */ 1476 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1477 np = &nep->ne_defexported; 1478 } 1479 return (np); 1480} 1481 1482/* 1483 * Do the usual access checking. 1484 * file_mode, uid and gid are from the vnode in question, 1485 * while acc_mode and cred are from the VOP_ACCESS parameter list 1486 */ 1487int 1488vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1489 mode_t acc_mode, struct ucred *cred) 1490{ 1491 mode_t mask; 1492 1493 /* User id 0 always gets read/write access. */ 1494 if (cred->cr_uid == 0) { 1495 /* For VEXEC, at least one of the execute bits must be set. */ 1496 if ((acc_mode & VEXEC) && type != VDIR && 1497 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1498 return EACCES; 1499 return 0; 1500 } 1501 1502 mask = 0; 1503 1504 /* Otherwise, check the owner. */ 1505 if (cred->cr_uid == uid) { 1506 if (acc_mode & VEXEC) 1507 mask |= S_IXUSR; 1508 if (acc_mode & VREAD) 1509 mask |= S_IRUSR; 1510 if (acc_mode & VWRITE) 1511 mask |= S_IWUSR; 1512 return (file_mode & mask) == mask ? 0 : EACCES; 1513 } 1514 1515 /* Otherwise, check the groups. */ 1516 if (groupmember(gid, cred)) { 1517 if (acc_mode & VEXEC) 1518 mask |= S_IXGRP; 1519 if (acc_mode & VREAD) 1520 mask |= S_IRGRP; 1521 if (acc_mode & VWRITE) 1522 mask |= S_IWGRP; 1523 return (file_mode & mask) == mask ? 0 : EACCES; 1524 } 1525 1526 /* Otherwise, check everyone else. */ 1527 if (acc_mode & VEXEC) 1528 mask |= S_IXOTH; 1529 if (acc_mode & VREAD) 1530 mask |= S_IROTH; 1531 if (acc_mode & VWRITE) 1532 mask |= S_IWOTH; 1533 return (file_mode & mask) == mask ? 0 : EACCES; 1534} 1535 1536/* 1537 * Unmount all file systems. 1538 * We traverse the list in reverse order under the assumption that doing so 1539 * will avoid needing to worry about dependencies. 1540 */ 1541void 1542vfs_unmountall(void) 1543{ 1544 struct mount *mp, *nmp; 1545 int allerror, error, again = 1; 1546 1547 retry: 1548 allerror = 0; 1549 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) { 1550 if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0) 1551 continue; 1552 if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) { 1553 printf("unmount of %s failed with error %d\n", 1554 mp->mnt_stat.f_mntonname, error); 1555 allerror = 1; 1556 } 1557 } 1558 1559 if (allerror) { 1560 printf("WARNING: some file systems would not unmount\n"); 1561 if (again) { 1562 printf("retrying\n"); 1563 again = 0; 1564 goto retry; 1565 } 1566 } 1567} 1568 1569/* 1570 * Sync and unmount file systems before shutting down. 1571 */ 1572void 1573vfs_shutdown(void) 1574{ 1575#ifdef ACCOUNTING 1576 acct_shutdown(); 1577#endif 1578 1579 /* XXX Should suspend scheduling. */ 1580 (void) spl0(); 1581 1582 printf("syncing disks... "); 1583 1584 if (panicstr == 0) { 1585 /* Sync before unmount, in case we hang on something. */ 1586 sys_sync(&proc0, (void *)0, (register_t *)0); 1587 1588 /* Unmount file systems. */ 1589 vfs_unmountall(); 1590 } 1591 1592 if (vfs_syncwait(1)) 1593 printf("giving up\n"); 1594 else 1595 printf("done\n"); 1596 1597#if NSOFTRAID > 0 1598 sr_shutdown(); 1599#endif 1600} 1601 1602/* 1603 * perform sync() operation and wait for buffers to flush. 1604 * assumptions: called w/ scheduler disabled and physical io enabled 1605 * for now called at spl0() XXX 1606 */ 1607int 1608vfs_syncwait(int verbose) 1609{ 1610 struct buf *bp; 1611 int iter, nbusy, dcount, s; 1612 struct proc *p; 1613 1614 p = curproc? curproc : &proc0; 1615 sys_sync(p, (void *)0, (register_t *)0); 1616 1617 /* Wait for sync to finish. */ 1618 dcount = 10000; 1619 for (iter = 0; iter < 20; iter++) { 1620 nbusy = 0; 1621 LIST_FOREACH(bp, &bufhead, b_list) { 1622 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1623 nbusy++; 1624 /* 1625 * With soft updates, some buffers that are 1626 * written will be remarked as dirty until other 1627 * buffers are written. 1628 */ 1629 if (bp->b_flags & B_DELWRI) { 1630 s = splbio(); 1631 bremfree(bp); 1632 buf_acquire(bp); 1633 splx(s); 1634 nbusy++; 1635 bawrite(bp); 1636 if (dcount-- <= 0) { 1637 if (verbose) 1638 printf("softdep "); 1639 return 1; 1640 } 1641 } 1642 } 1643 if (nbusy == 0) 1644 break; 1645 if (verbose) 1646 printf("%d ", nbusy); 1647 DELAY(40000 * iter); 1648 } 1649 1650 return nbusy; 1651} 1652 1653/* 1654 * posix file system related system variables. 1655 */ 1656int 1657fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1658 void *newp, size_t newlen, struct proc *p) 1659{ 1660 /* all sysctl names at this level are terminal */ 1661 if (namelen != 1) 1662 return (ENOTDIR); 1663 1664 switch (name[0]) { 1665 case FS_POSIX_SETUID: 1666 if (newp && securelevel > 0) 1667 return (EPERM); 1668 return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear)); 1669 default: 1670 return (EOPNOTSUPP); 1671 } 1672 /* NOTREACHED */ 1673} 1674 1675/* 1676 * file system related system variables. 1677 */ 1678int 1679fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1680 size_t newlen, struct proc *p) 1681{ 1682 sysctlfn *fn; 1683 1684 switch (name[0]) { 1685 case FS_POSIX: 1686 fn = fs_posix_sysctl; 1687 break; 1688 default: 1689 return (EOPNOTSUPP); 1690 } 1691 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p); 1692} 1693 1694 1695/* 1696 * Routines dealing with vnodes and buffers 1697 */ 1698 1699/* 1700 * Wait for all outstanding I/Os to complete 1701 * 1702 * Manipulates v_numoutput. Must be called at splbio() 1703 */ 1704int 1705vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo) 1706{ 1707 int error = 0; 1708 1709 splassert(IPL_BIO); 1710 1711 while (vp->v_numoutput) { 1712 vp->v_bioflag |= VBIOWAIT; 1713 error = tsleep(&vp->v_numoutput, 1714 slpflag | (PRIBIO + 1), wmesg, timeo); 1715 if (error) 1716 break; 1717 } 1718 1719 return (error); 1720} 1721 1722/* 1723 * Update outstanding I/O count and do wakeup if requested. 1724 * 1725 * Manipulates v_numoutput. Must be called at splbio() 1726 */ 1727void 1728vwakeup(struct vnode *vp) 1729{ 1730 splassert(IPL_BIO); 1731 1732 if (vp != NULL) { 1733 if (vp->v_numoutput-- == 0) 1734 panic("vwakeup: neg numoutput"); 1735 if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) { 1736 vp->v_bioflag &= ~VBIOWAIT; 1737 wakeup(&vp->v_numoutput); 1738 } 1739 } 1740} 1741 1742/* 1743 * Flush out and invalidate all buffers associated with a vnode. 1744 * Called with the underlying object locked. 1745 */ 1746int 1747vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p, 1748 int slpflag, int slptimeo) 1749{ 1750 struct buf *bp; 1751 struct buf *nbp, *blist; 1752 int s, error; 1753 1754#ifdef VFSLCKDEBUG 1755 if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp)) 1756 panic("vinvalbuf(): vp isn't locked"); 1757#endif 1758 1759 if (flags & V_SAVE) { 1760 s = splbio(); 1761 vwaitforio(vp, 0, "vinvalbuf", 0); 1762 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1763 splx(s); 1764 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 1765 return (error); 1766 s = splbio(); 1767 if (vp->v_numoutput > 0 || 1768 !LIST_EMPTY(&vp->v_dirtyblkhd)) 1769 panic("vinvalbuf: dirty bufs"); 1770 } 1771 splx(s); 1772 } 1773loop: 1774 s = splbio(); 1775 for (;;) { 1776 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) && 1777 (flags & V_SAVEMETA)) 1778 while (blist && blist->b_lblkno < 0) 1779 blist = LIST_NEXT(blist, b_vnbufs); 1780 if (blist == NULL && 1781 (blist = LIST_FIRST(&vp->v_dirtyblkhd)) && 1782 (flags & V_SAVEMETA)) 1783 while (blist && blist->b_lblkno < 0) 1784 blist = LIST_NEXT(blist, b_vnbufs); 1785 if (!blist) 1786 break; 1787 1788 for (bp = blist; bp; bp = nbp) { 1789 nbp = LIST_NEXT(bp, b_vnbufs); 1790 if (flags & V_SAVEMETA && bp->b_lblkno < 0) 1791 continue; 1792 if (bp->b_flags & B_BUSY) { 1793 bp->b_flags |= B_WANTED; 1794 error = tsleep(bp, slpflag | (PRIBIO + 1), 1795 "vinvalbuf", slptimeo); 1796 if (error) { 1797 splx(s); 1798 return (error); 1799 } 1800 break; 1801 } 1802 bremfree(bp); 1803 /* 1804 * XXX Since there are no node locks for NFS, I believe 1805 * there is a slight chance that a delayed write will 1806 * occur while sleeping just above, so check for it. 1807 */ 1808 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 1809 buf_acquire(bp); 1810 splx(s); 1811 (void) VOP_BWRITE(bp); 1812 goto loop; 1813 } 1814 buf_acquire_nomap(bp); 1815 bp->b_flags |= B_INVAL; 1816 brelse(bp); 1817 } 1818 } 1819 if (!(flags & V_SAVEMETA) && 1820 (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd))) 1821 panic("vinvalbuf: flush failed"); 1822 splx(s); 1823 return (0); 1824} 1825 1826void 1827vflushbuf(struct vnode *vp, int sync) 1828{ 1829 struct buf *bp, *nbp; 1830 int s; 1831 1832loop: 1833 s = splbio(); 1834 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 1835 nbp = LIST_NEXT(bp, b_vnbufs); 1836 if ((bp->b_flags & B_BUSY)) 1837 continue; 1838 if ((bp->b_flags & B_DELWRI) == 0) 1839 panic("vflushbuf: not dirty"); 1840 bremfree(bp); 1841 buf_acquire(bp); 1842 splx(s); 1843 /* 1844 * Wait for I/O associated with indirect blocks to complete, 1845 * since there is no way to quickly wait for them below. 1846 */ 1847 if (bp->b_vp == vp || sync == 0) 1848 (void) bawrite(bp); 1849 else 1850 (void) bwrite(bp); 1851 goto loop; 1852 } 1853 if (sync == 0) { 1854 splx(s); 1855 return; 1856 } 1857 vwaitforio(vp, 0, "vflushbuf", 0); 1858 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1859 splx(s); 1860#ifdef DIAGNOSTIC 1861 vprint("vflushbuf: dirty", vp); 1862#endif 1863 goto loop; 1864 } 1865 splx(s); 1866} 1867 1868/* 1869 * Associate a buffer with a vnode. 1870 * 1871 * Manipulates buffer vnode queues. Must be called at splbio(). 1872 */ 1873void 1874bgetvp(struct vnode *vp, struct buf *bp) 1875{ 1876 splassert(IPL_BIO); 1877 1878 1879 if (bp->b_vp) 1880 panic("bgetvp: not free"); 1881 vhold(vp); 1882 bp->b_vp = vp; 1883 if (vp->v_type == VBLK || vp->v_type == VCHR) 1884 bp->b_dev = vp->v_rdev; 1885 else 1886 bp->b_dev = NODEV; 1887 /* 1888 * Insert onto list for new vnode. 1889 */ 1890 bufinsvn(bp, &vp->v_cleanblkhd); 1891} 1892 1893/* 1894 * Disassociate a buffer from a vnode. 1895 * 1896 * Manipulates vnode buffer queues. Must be called at splbio(). 1897 */ 1898void 1899brelvp(struct buf *bp) 1900{ 1901 struct vnode *vp; 1902 1903 splassert(IPL_BIO); 1904 1905 if ((vp = bp->b_vp) == (struct vnode *) 0) 1906 panic("brelvp: NULL"); 1907 /* 1908 * Delete from old vnode list, if on one. 1909 */ 1910 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 1911 bufremvn(bp); 1912 if ((vp->v_bioflag & VBIOONSYNCLIST) && 1913 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 1914 vp->v_bioflag &= ~VBIOONSYNCLIST; 1915 LIST_REMOVE(vp, v_synclist); 1916 } 1917 bp->b_vp = NULL; 1918 1919 vdrop(vp); 1920} 1921 1922/* 1923 * Replaces the current vnode associated with the buffer, if any, 1924 * with a new vnode. 1925 * 1926 * If an output I/O is pending on the buffer, the old vnode 1927 * I/O count is adjusted. 1928 * 1929 * Ignores vnode buffer queues. Must be called at splbio(). 1930 */ 1931void 1932buf_replacevnode(struct buf *bp, struct vnode *newvp) 1933{ 1934 struct vnode *oldvp = bp->b_vp; 1935 1936 splassert(IPL_BIO); 1937 1938 if (oldvp) 1939 brelvp(bp); 1940 1941 if ((bp->b_flags & (B_READ | B_DONE)) == 0) { 1942 newvp->v_numoutput++; /* put it on swapdev */ 1943 vwakeup(oldvp); 1944 } 1945 1946 bgetvp(newvp, bp); 1947 bufremvn(bp); 1948} 1949 1950/* 1951 * Used to assign buffers to the appropriate clean or dirty list on 1952 * the vnode and to add newly dirty vnodes to the appropriate 1953 * filesystem syncer list. 1954 * 1955 * Manipulates vnode buffer queues. Must be called at splbio(). 1956 */ 1957void 1958reassignbuf(struct buf *bp) 1959{ 1960 struct buflists *listheadp; 1961 int delay; 1962 struct vnode *vp = bp->b_vp; 1963 1964 splassert(IPL_BIO); 1965 1966 /* 1967 * Delete from old vnode list, if on one. 1968 */ 1969 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 1970 bufremvn(bp); 1971 1972 /* 1973 * If dirty, put on list of dirty buffers; 1974 * otherwise insert onto list of clean buffers. 1975 */ 1976 if ((bp->b_flags & B_DELWRI) == 0) { 1977 listheadp = &vp->v_cleanblkhd; 1978 if ((vp->v_bioflag & VBIOONSYNCLIST) && 1979 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 1980 vp->v_bioflag &= ~VBIOONSYNCLIST; 1981 LIST_REMOVE(vp, v_synclist); 1982 } 1983 } else { 1984 listheadp = &vp->v_dirtyblkhd; 1985 if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) { 1986 switch (vp->v_type) { 1987 case VDIR: 1988 delay = syncdelay / 2; 1989 break; 1990 case VBLK: 1991 if (vp->v_specmountpoint != NULL) { 1992 delay = syncdelay / 3; 1993 break; 1994 } 1995 /* FALLTHROUGH */ 1996 default: 1997 delay = syncdelay; 1998 } 1999 vn_syncer_add_to_worklist(vp, delay); 2000 } 2001 } 2002 bufinsvn(bp, listheadp); 2003} 2004 2005int 2006vfs_register(struct vfsconf *vfs) 2007{ 2008 struct vfsconf *vfsp; 2009 struct vfsconf **vfspp; 2010 2011#ifdef DIAGNOSTIC 2012 /* Paranoia? */ 2013 if (vfs->vfc_refcount != 0) 2014 printf("vfs_register called with vfc_refcount > 0\n"); 2015#endif 2016 2017 /* Check if filesystem already known */ 2018 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2019 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) 2020 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2021 return (EEXIST); 2022 2023 if (vfs->vfc_typenum > maxvfsconf) 2024 maxvfsconf = vfs->vfc_typenum; 2025 2026 vfs->vfc_next = NULL; 2027 2028 /* Add to the end of the list */ 2029 *vfspp = vfs; 2030 2031 /* Call vfs_init() */ 2032 if (vfs->vfc_vfsops->vfs_init) 2033 (*(vfs->vfc_vfsops->vfs_init))(vfs); 2034 2035 return 0; 2036} 2037 2038int 2039vfs_unregister(struct vfsconf *vfs) 2040{ 2041 struct vfsconf *vfsp; 2042 struct vfsconf **vfspp; 2043 int maxtypenum; 2044 2045 /* Find our vfsconf struct */ 2046 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2047 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) { 2048 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2049 break; 2050 } 2051 2052 if (!vfsp) /* Not found */ 2053 return (ENOENT); 2054 2055 if (vfsp->vfc_refcount) /* In use */ 2056 return (EBUSY); 2057 2058 /* Remove from list and free */ 2059 *vfspp = vfsp->vfc_next; 2060 2061 maxtypenum = 0; 2062 2063 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2064 if (vfsp->vfc_typenum > maxtypenum) 2065 maxtypenum = vfsp->vfc_typenum; 2066 2067 maxvfsconf = maxtypenum; 2068 return 0; 2069} 2070 2071/* 2072 * Check if vnode represents a disk device 2073 */ 2074int 2075vn_isdisk(struct vnode *vp, int *errp) 2076{ 2077 if (vp->v_type != VBLK && vp->v_type != VCHR) 2078 return (0); 2079 2080 return (1); 2081} 2082 2083#ifdef DDB 2084#include <machine/db_machdep.h> 2085#include <ddb/db_interface.h> 2086#include <ddb/db_output.h> 2087 2088void 2089vfs_buf_print(void *b, int full, 2090 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2091{ 2092 struct buf *bp = b; 2093 2094 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n" 2095 " proc %p error %d flags %lb\n", 2096 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev, 2097 bp->b_proc, bp->b_error, bp->b_flags, B_BITS); 2098 2099 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n" 2100 " data %p saveaddr %p dep %p iodone %p\n", 2101 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, 2102 bp->b_data, bp->b_saveaddr, 2103 LIST_FIRST(&bp->b_dep), bp->b_iodone); 2104 2105 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n", 2106 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend); 2107 2108#ifdef FFS_SOFTUPDATES 2109 if (full) 2110 softdep_print(bp, full, pr); 2111#endif 2112} 2113 2114const char *vtypes[] = { VTYPE_NAMES }; 2115const char *vtags[] = { VTAG_NAMES }; 2116 2117void 2118vfs_vnode_print(void *v, int full, 2119 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2120{ 2121 struct vnode *vp = v; 2122 2123 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n", 2124 vp->v_tag > nitems(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag, 2125 vp->v_type > nitems(vtypes)? "<unk>":vtypes[vp->v_type], 2126 vp->v_type, vp->v_mount, vp->v_mountedhere); 2127 2128 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n", 2129 vp->v_data, vp->v_usecount, vp->v_writecount, 2130 vp->v_holdcnt, vp->v_numoutput); 2131 2132 /* uvm_object_printit(&vp->v_uobj, full, pr); */ 2133 2134 if (full) { 2135 struct buf *bp; 2136 2137 (*pr)("clean bufs:\n"); 2138 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2139 (*pr)(" bp %p\n", bp); 2140 vfs_buf_print(bp, full, pr); 2141 } 2142 2143 (*pr)("dirty bufs:\n"); 2144 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2145 (*pr)(" bp %p\n", bp); 2146 vfs_buf_print(bp, full, pr); 2147 } 2148 } 2149} 2150 2151void 2152vfs_mount_print(struct mount *mp, int full, 2153 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2154{ 2155 struct vfsconf *vfc = mp->mnt_vfc; 2156 struct vnode *vp; 2157 int cnt = 0; 2158 2159 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n", 2160 mp->mnt_flag, MNT_BITS, 2161 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data); 2162 2163 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n", 2164 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum, 2165 vfc->vfc_refcount, vfc->vfc_flags); 2166 2167 (*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n", 2168 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks, 2169 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail); 2170 2171 (*pr)(" files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files, 2172 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail); 2173 2174 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n", 2175 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1], 2176 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime); 2177 2178 (*pr)(" syncwrites %llu asyncwrites = %llu\n", 2179 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites); 2180 2181 (*pr)(" syncreads %llu asyncreads = %llu\n", 2182 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads); 2183 2184 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n", 2185 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname, 2186 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec); 2187 2188 (*pr)("locked vnodes:"); 2189 /* XXX would take mountlist lock, except ddb has no context */ 2190 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2191 if (VOP_ISLOCKED(vp)) { 2192 if (!LIST_NEXT(vp, v_mntvnodes)) 2193 (*pr)(" %p", vp); 2194 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2195 (*pr)("\n\t%p", vp); 2196 else 2197 (*pr)(", %p", vp); 2198 } 2199 (*pr)("\n"); 2200 2201 if (full) { 2202 (*pr)("all vnodes:\n\t"); 2203 /* XXX would take mountlist lock, except ddb has no context */ 2204 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2205 if (!LIST_NEXT(vp, v_mntvnodes)) 2206 (*pr)(" %p", vp); 2207 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2208 (*pr)(" %p,\n\t", vp); 2209 else 2210 (*pr)(" %p,", vp); 2211 (*pr)("\n"); 2212 } 2213} 2214#endif /* DDB */ 2215 2216void 2217copy_statfs_info(struct statfs *sbp, const struct mount *mp) 2218{ 2219 const struct statfs *mbp; 2220 2221 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN); 2222 2223 if (sbp == (mbp = &mp->mnt_stat)) 2224 return; 2225 2226 sbp->f_fsid = mbp->f_fsid; 2227 sbp->f_owner = mbp->f_owner; 2228 sbp->f_flags = mbp->f_flags; 2229 sbp->f_syncwrites = mbp->f_syncwrites; 2230 sbp->f_asyncwrites = mbp->f_asyncwrites; 2231 sbp->f_syncreads = mbp->f_syncreads; 2232 sbp->f_asyncreads = mbp->f_asyncreads; 2233 sbp->f_namemax = mbp->f_namemax; 2234 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN); 2235 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN); 2236 memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN); 2237 memcpy(&sbp->mount_info.ufs_args, &mp->mnt_stat.mount_info.ufs_args, 2238 sizeof(struct ufs_args)); 2239} 2240