vfs_subr.c revision 1.222
1/* $OpenBSD: vfs_subr.c,v 1.222 2014/11/19 18:04:54 tedu Exp $ */ 2/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */ 3 4/* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 38 */ 39 40/* 41 * External virtual filesystem routines 42 */ 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/proc.h> 47#include <sys/sysctl.h> 48#include <sys/mount.h> 49#include <sys/time.h> 50#include <sys/fcntl.h> 51#include <sys/kernel.h> 52#include <sys/vnode.h> 53#include <sys/stat.h> 54#include <sys/acct.h> 55#include <sys/namei.h> 56#include <sys/ucred.h> 57#include <sys/buf.h> 58#include <sys/errno.h> 59#include <sys/malloc.h> 60#include <sys/mbuf.h> 61#include <sys/syscallargs.h> 62#include <sys/pool.h> 63#include <sys/tree.h> 64#include <sys/specdev.h> 65 66#include <netinet/in.h> 67 68#include "softraid.h" 69 70void sr_shutdown(void); 71 72enum vtype iftovt_tab[16] = { 73 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 74 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 75}; 76 77int vttoif_tab[9] = { 78 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 79 S_IFSOCK, S_IFIFO, S_IFMT, 80}; 81 82int doforce = 1; /* 1 => permit forcible unmounting */ 83int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 84int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */ 85 86/* 87 * Insq/Remq for the vnode usage lists. 88 */ 89#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 90#define bufremvn(bp) { \ 91 LIST_REMOVE(bp, b_vnbufs); \ 92 LIST_NEXT(bp, b_vnbufs) = NOLIST; \ 93} 94 95struct freelst vnode_hold_list; /* list of vnodes referencing buffers */ 96struct freelst vnode_free_list; /* vnode free list */ 97 98struct mntlist mountlist; /* mounted filesystem list */ 99 100void vclean(struct vnode *, int, struct proc *); 101 102void insmntque(struct vnode *, struct mount *); 103int getdevvp(dev_t, struct vnode **, enum vtype); 104 105int vfs_hang_addrlist(struct mount *, struct netexport *, 106 struct export_args *); 107int vfs_free_netcred(struct radix_node *, void *, u_int); 108void vfs_free_addrlist(struct netexport *); 109void vputonfreelist(struct vnode *); 110 111int vflush_vnode(struct vnode *, void *); 112int maxvnodes; 113 114#ifdef DEBUG 115void printlockedvnodes(void); 116#endif 117 118struct pool vnode_pool; 119 120static int rb_buf_compare(struct buf *b1, struct buf *b2); 121RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); 122 123static int 124rb_buf_compare(struct buf *b1, struct buf *b2) 125{ 126 if (b1->b_lblkno < b2->b_lblkno) 127 return(-1); 128 if (b1->b_lblkno > b2->b_lblkno) 129 return(1); 130 return(0); 131} 132 133/* 134 * Initialize the vnode management data structures. 135 */ 136void 137vntblinit(void) 138{ 139 /* buffer cache may need a vnode for each buffer */ 140 maxvnodes = 2 * desiredvnodes; 141 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes", 142 &pool_allocator_nointr); 143 TAILQ_INIT(&vnode_hold_list); 144 TAILQ_INIT(&vnode_free_list); 145 TAILQ_INIT(&mountlist); 146 /* 147 * Initialize the filesystem syncer. 148 */ 149 vn_initialize_syncerd(); 150} 151 152/* 153 * Mark a mount point as busy. Used to synchronize access and to delay 154 * unmounting. 155 * 156 * Default behaviour is to attempt getting a READ lock and in case of an 157 * ongoing unmount, to wait for it to finish and then return failure. 158 */ 159int 160vfs_busy(struct mount *mp, int flags) 161{ 162 int rwflags = 0; 163 164 /* new mountpoints need their lock initialised */ 165 if (mp->mnt_lock.rwl_name == NULL) 166 rw_init(&mp->mnt_lock, "vfslock"); 167 168 if (flags & VB_WRITE) 169 rwflags |= RW_WRITE; 170 else 171 rwflags |= RW_READ; 172 173 if (flags & VB_WAIT) 174 rwflags |= RW_SLEEPFAIL; 175 else 176 rwflags |= RW_NOSLEEP; 177 178 if (rw_enter(&mp->mnt_lock, rwflags)) 179 return (EBUSY); 180 181 return (0); 182} 183 184/* 185 * Free a busy file system 186 */ 187void 188vfs_unbusy(struct mount *mp) 189{ 190 rw_exit(&mp->mnt_lock); 191} 192 193int 194vfs_isbusy(struct mount *mp) 195{ 196 if (RWLOCK_OWNER(&mp->mnt_lock) > 0) 197 return (1); 198 else 199 return (0); 200} 201 202/* 203 * Lookup a filesystem type, and if found allocate and initialize 204 * a mount structure for it. 205 * 206 * Devname is usually updated by mount(8) after booting. 207 */ 208int 209vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 210{ 211 struct vfsconf *vfsp; 212 struct mount *mp; 213 214 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 215 if (!strcmp(vfsp->vfc_name, fstypename)) 216 break; 217 if (vfsp == NULL) 218 return (ENODEV); 219 mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO); 220 (void)vfs_busy(mp, VB_READ|VB_NOWAIT); 221 LIST_INIT(&mp->mnt_vnodelist); 222 mp->mnt_vfc = vfsp; 223 mp->mnt_op = vfsp->vfc_vfsops; 224 mp->mnt_flag = MNT_RDONLY; 225 mp->mnt_vnodecovered = NULLVP; 226 vfsp->vfc_refcount++; 227 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 228 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 229 mp->mnt_stat.f_mntonname[0] = '/'; 230 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0); 231 copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0); 232 *mpp = mp; 233 return (0); 234 } 235 236/* 237 * Lookup a mount point by filesystem identifier. 238 */ 239struct mount * 240vfs_getvfs(fsid_t *fsid) 241{ 242 struct mount *mp; 243 244 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 245 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 246 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 247 return (mp); 248 } 249 } 250 251 return (NULL); 252} 253 254 255/* 256 * Get a new unique fsid 257 */ 258void 259vfs_getnewfsid(struct mount *mp) 260{ 261 static u_short xxxfs_mntid; 262 263 fsid_t tfsid; 264 int mtype; 265 266 mtype = mp->mnt_vfc->vfc_typenum; 267 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 268 mp->mnt_stat.f_fsid.val[1] = mtype; 269 if (xxxfs_mntid == 0) 270 ++xxxfs_mntid; 271 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 272 tfsid.val[1] = mtype; 273 if (!TAILQ_EMPTY(&mountlist)) { 274 while (vfs_getvfs(&tfsid)) { 275 tfsid.val[0]++; 276 xxxfs_mntid++; 277 } 278 } 279 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 280} 281 282/* 283 * Set vnode attributes to VNOVAL 284 */ 285void 286vattr_null(struct vattr *vap) 287{ 288 289 vap->va_type = VNON; 290 /* XXX These next two used to be one line, but for a GCC bug. */ 291 vap->va_size = VNOVAL; 292 vap->va_bytes = VNOVAL; 293 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 294 vap->va_fsid = vap->va_fileid = 295 vap->va_blocksize = vap->va_rdev = 296 vap->va_atime.tv_sec = vap->va_atime.tv_nsec = 297 vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec = 298 vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec = 299 vap->va_flags = vap->va_gen = VNOVAL; 300 vap->va_vaflags = 0; 301} 302 303/* 304 * Routines having to do with the management of the vnode table. 305 */ 306long numvnodes; 307 308/* 309 * Return the next vnode from the free list. 310 */ 311int 312getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops, 313 struct vnode **vpp) 314{ 315 struct proc *p = curproc; 316 struct freelst *listhd; 317 static int toggle; 318 struct vnode *vp; 319 int s; 320 321 /* 322 * allow maxvnodes to increase if the buffer cache itself 323 * is big enough to justify it. (we don't shrink it ever) 324 */ 325 maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs 326 : maxvnodes; 327 328 /* 329 * We must choose whether to allocate a new vnode or recycle an 330 * existing one. The criterion for allocating a new one is that 331 * the total number of vnodes is less than the number desired or 332 * there are no vnodes on either free list. Generally we only 333 * want to recycle vnodes that have no buffers associated with 334 * them, so we look first on the vnode_free_list. If it is empty, 335 * we next consider vnodes with referencing buffers on the 336 * vnode_hold_list. The toggle ensures that half the time we 337 * will use a buffer from the vnode_hold_list, and half the time 338 * we will allocate a new one unless the list has grown to twice 339 * the desired size. We are reticent to recycle vnodes from the 340 * vnode_hold_list because we will lose the identity of all its 341 * referencing buffers. 342 */ 343 toggle ^= 1; 344 if (numvnodes / 2 > maxvnodes) 345 toggle = 0; 346 347 s = splbio(); 348 if ((numvnodes < maxvnodes) || 349 ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) && 350 ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) { 351 splx(s); 352 vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO); 353 RB_INIT(&vp->v_bufs_tree); 354 RB_INIT(&vp->v_nc_tree); 355 TAILQ_INIT(&vp->v_cache_dst); 356 numvnodes++; 357 } else { 358 for (vp = TAILQ_FIRST(listhd); vp != NULLVP; 359 vp = TAILQ_NEXT(vp, v_freelist)) { 360 if (VOP_ISLOCKED(vp) == 0) 361 break; 362 } 363 /* 364 * Unless this is a bad time of the month, at most 365 * the first NCPUS items on the free list are 366 * locked, so this is close enough to being empty. 367 */ 368 if (vp == NULL) { 369 splx(s); 370 tablefull("vnode"); 371 *vpp = 0; 372 return (ENFILE); 373 } 374 375#ifdef DIAGNOSTIC 376 if (vp->v_usecount) { 377 vprint("free vnode", vp); 378 panic("free vnode isn't"); 379 } 380#endif 381 382 TAILQ_REMOVE(listhd, vp, v_freelist); 383 vp->v_bioflag &= ~VBIOONFREELIST; 384 splx(s); 385 386 if (vp->v_type != VBAD) 387 vgonel(vp, p); 388#ifdef DIAGNOSTIC 389 if (vp->v_data) { 390 vprint("cleaned vnode", vp); 391 panic("cleaned vnode isn't"); 392 } 393 s = splbio(); 394 if (vp->v_numoutput) 395 panic("Clean vnode has pending I/O's"); 396 splx(s); 397#endif 398 vp->v_flag = 0; 399 vp->v_socket = 0; 400 } 401 cache_purge(vp); 402 vp->v_type = VNON; 403 vp->v_tag = tag; 404 vp->v_op = vops; 405 insmntque(vp, mp); 406 *vpp = vp; 407 vp->v_usecount = 1; 408 vp->v_data = 0; 409 simple_lock_init(&vp->v_uvm.u_obj.vmobjlock); 410 return (0); 411} 412 413/* 414 * Move a vnode from one mount queue to another. 415 */ 416void 417insmntque(struct vnode *vp, struct mount *mp) 418{ 419 /* 420 * Delete from old mount point vnode list, if on one. 421 */ 422 if (vp->v_mount != NULL) 423 LIST_REMOVE(vp, v_mntvnodes); 424 /* 425 * Insert into list of vnodes for the new mount point, if available. 426 */ 427 if ((vp->v_mount = mp) != NULL) 428 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 429} 430 431/* 432 * Create a vnode for a block device. 433 * Used for root filesystem, argdev, and swap areas. 434 * Also used for memory file system special devices. 435 */ 436int 437bdevvp(dev_t dev, struct vnode **vpp) 438{ 439 return (getdevvp(dev, vpp, VBLK)); 440} 441 442/* 443 * Create a vnode for a character device. 444 * Used for console handling. 445 */ 446int 447cdevvp(dev_t dev, struct vnode **vpp) 448{ 449 return (getdevvp(dev, vpp, VCHR)); 450} 451 452/* 453 * Create a vnode for a device. 454 * Used by bdevvp (block device) for root file system etc., 455 * and by cdevvp (character device) for console. 456 */ 457int 458getdevvp(dev_t dev, struct vnode **vpp, enum vtype type) 459{ 460 struct vnode *vp; 461 struct vnode *nvp; 462 int error; 463 464 if (dev == NODEV) { 465 *vpp = NULLVP; 466 return (0); 467 } 468 error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp); 469 if (error) { 470 *vpp = NULLVP; 471 return (error); 472 } 473 vp = nvp; 474 vp->v_type = type; 475 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 476 vput(vp); 477 vp = nvp; 478 } 479 *vpp = vp; 480 return (0); 481} 482 483/* 484 * Check to see if the new vnode represents a special device 485 * for which we already have a vnode (either because of 486 * bdevvp() or because of a different vnode representing 487 * the same block device). If such an alias exists, deallocate 488 * the existing contents and return the aliased vnode. The 489 * caller is responsible for filling it with its new contents. 490 */ 491struct vnode * 492checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp) 493{ 494 struct proc *p = curproc; 495 struct vnode *vp; 496 struct vnode **vpp; 497 498 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 499 return (NULLVP); 500 501 vpp = &speclisth[SPECHASH(nvp_rdev)]; 502loop: 503 for (vp = *vpp; vp; vp = vp->v_specnext) { 504 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) { 505 continue; 506 } 507 /* 508 * Alias, but not in use, so flush it out. 509 */ 510 if (vp->v_usecount == 0) { 511 vgonel(vp, p); 512 goto loop; 513 } 514 if (vget(vp, LK_EXCLUSIVE, p)) { 515 goto loop; 516 } 517 break; 518 } 519 520 /* 521 * Common case is actually in the if statement 522 */ 523 if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) { 524 nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE, 525 M_WAITOK); 526 nvp->v_rdev = nvp_rdev; 527 nvp->v_hashchain = vpp; 528 nvp->v_specnext = *vpp; 529 nvp->v_specmountpoint = NULL; 530 nvp->v_speclockf = NULL; 531 memset(nvp->v_specbitmap, 0, sizeof(nvp->v_specbitmap)); 532 *vpp = nvp; 533 if (vp != NULLVP) { 534 nvp->v_flag |= VALIASED; 535 vp->v_flag |= VALIASED; 536 vput(vp); 537 } 538 return (NULLVP); 539 } 540 541 /* 542 * This code is the uncommon case. It is called in case 543 * we found an alias that was VT_NON && vtype of VBLK 544 * This means we found a block device that was created 545 * using bdevvp. 546 * An example of such a vnode is the root partition device vnode 547 * created in ffs_mountroot. 548 * 549 * The vnodes created by bdevvp should not be aliased (why?). 550 */ 551 552 VOP_UNLOCK(vp, 0, p); 553 vclean(vp, 0, p); 554 vp->v_op = nvp->v_op; 555 vp->v_tag = nvp->v_tag; 556 nvp->v_type = VNON; 557 insmntque(vp, mp); 558 return (vp); 559} 560 561/* 562 * Grab a particular vnode from the free list, increment its 563 * reference count and lock it. If the vnode lock bit is set, 564 * the vnode is being eliminated in vgone. In that case, we 565 * cannot grab it, so the process is awakened when the 566 * transition is completed, and an error code is returned to 567 * indicate that the vnode is no longer usable, possibly 568 * having been changed to a new file system type. 569 */ 570int 571vget(struct vnode *vp, int flags, struct proc *p) 572{ 573 int error, s, onfreelist; 574 575 /* 576 * If the vnode is in the process of being cleaned out for 577 * another use, we wait for the cleaning to finish and then 578 * return failure. Cleaning is determined by checking that 579 * the VXLOCK flag is set. 580 */ 581 582 if (vp->v_flag & VXLOCK) { 583 if (flags & LK_NOWAIT) { 584 return (EBUSY); 585 } 586 587 vp->v_flag |= VXWANT; 588 tsleep(vp, PINOD, "vget", 0); 589 return (ENOENT); 590 } 591 592 onfreelist = vp->v_bioflag & VBIOONFREELIST; 593 if (vp->v_usecount == 0 && onfreelist) { 594 s = splbio(); 595 if (vp->v_holdcnt > 0) 596 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 597 else 598 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 599 vp->v_bioflag &= ~VBIOONFREELIST; 600 splx(s); 601 } 602 603 vp->v_usecount++; 604 if (flags & LK_TYPE_MASK) { 605 if ((error = vn_lock(vp, flags, p)) != 0) { 606 vp->v_usecount--; 607 if (vp->v_usecount == 0 && onfreelist) 608 vputonfreelist(vp); 609 } 610 return (error); 611 } 612 613 return (0); 614} 615 616 617/* Vnode reference. */ 618void 619vref(struct vnode *vp) 620{ 621#ifdef DIAGNOSTIC 622 if (vp->v_usecount == 0) 623 panic("vref used where vget required"); 624 if (vp->v_type == VNON) 625 panic("vref on a VNON vnode"); 626#endif 627 vp->v_usecount++; 628} 629 630void 631vputonfreelist(struct vnode *vp) 632{ 633 int s; 634 struct freelst *lst; 635 636 s = splbio(); 637#ifdef DIAGNOSTIC 638 if (vp->v_usecount != 0) 639 panic("Use count is not zero!"); 640 641 if (vp->v_bioflag & VBIOONFREELIST) { 642 vprint("vnode already on free list: ", vp); 643 panic("vnode already on free list"); 644 } 645#endif 646 647 vp->v_bioflag |= VBIOONFREELIST; 648 649 if (vp->v_holdcnt > 0) 650 lst = &vnode_hold_list; 651 else 652 lst = &vnode_free_list; 653 654 if (vp->v_type == VBAD) 655 TAILQ_INSERT_HEAD(lst, vp, v_freelist); 656 else 657 TAILQ_INSERT_TAIL(lst, vp, v_freelist); 658 659 splx(s); 660} 661 662/* 663 * vput(), just unlock and vrele() 664 */ 665void 666vput(struct vnode *vp) 667{ 668 struct proc *p = curproc; 669 670#ifdef DIAGNOSTIC 671 if (vp == NULL) 672 panic("vput: null vp"); 673#endif 674 675#ifdef DIAGNOSTIC 676 if (vp->v_usecount == 0) { 677 vprint("vput: bad ref count", vp); 678 panic("vput: ref cnt"); 679 } 680#endif 681 vp->v_usecount--; 682 if (vp->v_usecount > 0) { 683 VOP_UNLOCK(vp, 0, p); 684 return; 685 } 686 687#ifdef DIAGNOSTIC 688 if (vp->v_writecount != 0) { 689 vprint("vput: bad writecount", vp); 690 panic("vput: v_writecount != 0"); 691 } 692#endif 693 694 VOP_INACTIVE(vp, p); 695 696 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 697 vputonfreelist(vp); 698} 699 700/* 701 * Vnode release - use for active VNODES. 702 * If count drops to zero, call inactive routine and return to freelist. 703 * Returns 0 if it did not sleep. 704 */ 705int 706vrele(struct vnode *vp) 707{ 708 struct proc *p = curproc; 709 710#ifdef DIAGNOSTIC 711 if (vp == NULL) 712 panic("vrele: null vp"); 713#endif 714#ifdef DIAGNOSTIC 715 if (vp->v_usecount == 0) { 716 vprint("vrele: bad ref count", vp); 717 panic("vrele: ref cnt"); 718 } 719#endif 720 vp->v_usecount--; 721 if (vp->v_usecount > 0) { 722 return (0); 723 } 724 725#ifdef DIAGNOSTIC 726 if (vp->v_writecount != 0) { 727 vprint("vrele: bad writecount", vp); 728 panic("vrele: v_writecount != 0"); 729 } 730#endif 731 732 if (vn_lock(vp, LK_EXCLUSIVE, p)) { 733#ifdef DIAGNOSTIC 734 vprint("vrele: cannot lock", vp); 735#endif 736 return (1); 737 } 738 739 VOP_INACTIVE(vp, p); 740 741 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 742 vputonfreelist(vp); 743 return (1); 744} 745 746/* Page or buffer structure gets a reference. */ 747void 748vhold(struct vnode *vp) 749{ 750 /* 751 * If it is on the freelist and the hold count is currently 752 * zero, move it to the hold list. 753 */ 754 if ((vp->v_bioflag & VBIOONFREELIST) && 755 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 756 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 757 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 758 } 759 vp->v_holdcnt++; 760} 761 762/* Lose interest in a vnode. */ 763void 764vdrop(struct vnode *vp) 765{ 766#ifdef DIAGNOSTIC 767 if (vp->v_holdcnt == 0) 768 panic("vdrop: zero holdcnt"); 769#endif 770 771 vp->v_holdcnt--; 772 773 /* 774 * If it is on the holdlist and the hold count drops to 775 * zero, move it to the free list. 776 */ 777 if ((vp->v_bioflag & VBIOONFREELIST) && 778 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 779 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 780 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 781 } 782} 783 784/* 785 * Remove any vnodes in the vnode table belonging to mount point mp. 786 * 787 * If MNT_NOFORCE is specified, there should not be any active ones, 788 * return error if any are found (nb: this is a user error, not a 789 * system error). If MNT_FORCE is specified, detach any active vnodes 790 * that are found. 791 */ 792#ifdef DEBUG 793int busyprt = 0; /* print out busy vnodes */ 794struct ctldebug debug1 = { "busyprt", &busyprt }; 795#endif 796 797int 798vfs_mount_foreach_vnode(struct mount *mp, 799 int (*func)(struct vnode *, void *), void *arg) { 800 struct vnode *vp, *nvp; 801 int error = 0; 802 803loop: 804 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 805 if (vp->v_mount != mp) 806 goto loop; 807 nvp = LIST_NEXT(vp, v_mntvnodes); 808 809 error = func(vp, arg); 810 811 if (error != 0) 812 break; 813 } 814 815 return (error); 816} 817 818struct vflush_args { 819 struct vnode *skipvp; 820 int busy; 821 int flags; 822}; 823 824int 825vflush_vnode(struct vnode *vp, void *arg) { 826 struct vflush_args *va = arg; 827 struct proc *p = curproc; 828 829 if (vp == va->skipvp) { 830 return (0); 831 } 832 833 if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 834 return (0); 835 } 836 837 /* 838 * If WRITECLOSE is set, only flush out regular file 839 * vnodes open for writing. 840 */ 841 if ((va->flags & WRITECLOSE) && 842 (vp->v_writecount == 0 || vp->v_type != VREG)) { 843 return (0); 844 } 845 846 /* 847 * With v_usecount == 0, all we need to do is clear 848 * out the vnode data structures and we are done. 849 */ 850 if (vp->v_usecount == 0) { 851 vgonel(vp, p); 852 return (0); 853 } 854 855 /* 856 * If FORCECLOSE is set, forcibly close the vnode. 857 * For block or character devices, revert to an 858 * anonymous device. For all other files, just kill them. 859 */ 860 if (va->flags & FORCECLOSE) { 861 if (vp->v_type != VBLK && vp->v_type != VCHR) { 862 vgonel(vp, p); 863 } else { 864 vclean(vp, 0, p); 865 vp->v_op = &spec_vops; 866 insmntque(vp, (struct mount *)0); 867 } 868 return (0); 869 } 870 871#ifdef DEBUG 872 if (busyprt) 873 vprint("vflush: busy vnode", vp); 874#endif 875 va->busy++; 876 return (0); 877} 878 879int 880vflush(struct mount *mp, struct vnode *skipvp, int flags) 881{ 882 struct vflush_args va; 883 va.skipvp = skipvp; 884 va.busy = 0; 885 va.flags = flags; 886 887 vfs_mount_foreach_vnode(mp, vflush_vnode, &va); 888 889 if (va.busy) 890 return (EBUSY); 891 return (0); 892} 893 894/* 895 * Disassociate the underlying file system from a vnode. 896 */ 897void 898vclean(struct vnode *vp, int flags, struct proc *p) 899{ 900 int active; 901 902 /* 903 * Check to see if the vnode is in use. 904 * If so we have to reference it before we clean it out 905 * so that its count cannot fall to zero and generate a 906 * race against ourselves to recycle it. 907 */ 908 if ((active = vp->v_usecount) != 0) 909 vp->v_usecount++; 910 911 /* 912 * Prevent the vnode from being recycled or 913 * brought into use while we clean it out. 914 */ 915 if (vp->v_flag & VXLOCK) 916 panic("vclean: deadlock"); 917 vp->v_flag |= VXLOCK; 918 /* 919 * Even if the count is zero, the VOP_INACTIVE routine may still 920 * have the object locked while it cleans it out. The VOP_LOCK 921 * ensures that the VOP_INACTIVE routine is done with its work. 922 * For active vnodes, it ensures that no other activity can 923 * occur while the underlying object is being cleaned out. 924 */ 925 VOP_LOCK(vp, LK_DRAIN, p); 926 927 /* 928 * Clean out any VM data associated with the vnode. 929 */ 930 uvm_vnp_terminate(vp); 931 /* 932 * Clean out any buffers associated with the vnode. 933 */ 934 if (flags & DOCLOSE) 935 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 936 /* 937 * If purging an active vnode, it must be closed and 938 * deactivated before being reclaimed. Note that the 939 * VOP_INACTIVE will unlock the vnode 940 */ 941 if (active) { 942 if (flags & DOCLOSE) 943 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 944 VOP_INACTIVE(vp, p); 945 } else { 946 /* 947 * Any other processes trying to obtain this lock must first 948 * wait for VXLOCK to clear, then call the new lock operation. 949 */ 950 VOP_UNLOCK(vp, 0, p); 951 } 952 953 /* 954 * Reclaim the vnode. 955 */ 956 if (VOP_RECLAIM(vp, p)) 957 panic("vclean: cannot reclaim"); 958 if (active) { 959 vp->v_usecount--; 960 if (vp->v_usecount == 0) { 961 if (vp->v_holdcnt > 0) 962 panic("vclean: not clean"); 963 vputonfreelist(vp); 964 } 965 } 966 cache_purge(vp); 967 968 /* 969 * Done with purge, notify sleepers of the grim news. 970 */ 971 vp->v_op = &dead_vops; 972 VN_KNOTE(vp, NOTE_REVOKE); 973 vp->v_tag = VT_NON; 974 vp->v_flag &= ~VXLOCK; 975#ifdef VFSLCKDEBUG 976 vp->v_flag &= ~VLOCKSWORK; 977#endif 978 if (vp->v_flag & VXWANT) { 979 vp->v_flag &= ~VXWANT; 980 wakeup(vp); 981 } 982} 983 984/* 985 * Recycle an unused vnode to the front of the free list. 986 */ 987int 988vrecycle(struct vnode *vp, struct proc *p) 989{ 990 if (vp->v_usecount == 0) { 991 vgonel(vp, p); 992 return (1); 993 } 994 return (0); 995} 996 997/* 998 * Eliminate all activity associated with a vnode 999 * in preparation for reuse. 1000 */ 1001void 1002vgone(struct vnode *vp) 1003{ 1004 struct proc *p = curproc; 1005 vgonel(vp, p); 1006} 1007 1008/* 1009 * vgone, with struct proc. 1010 */ 1011void 1012vgonel(struct vnode *vp, struct proc *p) 1013{ 1014 struct vnode *vq; 1015 struct vnode *vx; 1016 1017 /* 1018 * If a vgone (or vclean) is already in progress, 1019 * wait until it is done and return. 1020 */ 1021 if (vp->v_flag & VXLOCK) { 1022 vp->v_flag |= VXWANT; 1023 tsleep(vp, PINOD, "vgone", 0); 1024 return; 1025 } 1026 1027 /* 1028 * Clean out the filesystem specific data. 1029 */ 1030 vclean(vp, DOCLOSE, p); 1031 /* 1032 * Delete from old mount point vnode list, if on one. 1033 */ 1034 if (vp->v_mount != NULL) 1035 insmntque(vp, (struct mount *)0); 1036 /* 1037 * If special device, remove it from special device alias list 1038 * if it is on one. 1039 */ 1040 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1041 if (*vp->v_hashchain == vp) { 1042 *vp->v_hashchain = vp->v_specnext; 1043 } else { 1044 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1045 if (vq->v_specnext != vp) 1046 continue; 1047 vq->v_specnext = vp->v_specnext; 1048 break; 1049 } 1050 if (vq == NULL) 1051 panic("missing bdev"); 1052 } 1053 if (vp->v_flag & VALIASED) { 1054 vx = NULL; 1055 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1056 if (vq->v_rdev != vp->v_rdev || 1057 vq->v_type != vp->v_type) 1058 continue; 1059 if (vx) 1060 break; 1061 vx = vq; 1062 } 1063 if (vx == NULL) 1064 panic("missing alias"); 1065 if (vq == NULL) 1066 vx->v_flag &= ~VALIASED; 1067 vp->v_flag &= ~VALIASED; 1068 } 1069 free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo)); 1070 vp->v_specinfo = NULL; 1071 } 1072 /* 1073 * If it is on the freelist and not already at the head, 1074 * move it to the head of the list. 1075 */ 1076 vp->v_type = VBAD; 1077 1078 /* 1079 * Move onto the free list, unless we were called from 1080 * getnewvnode and we're not on any free list 1081 */ 1082 if (vp->v_usecount == 0 && 1083 (vp->v_bioflag & VBIOONFREELIST)) { 1084 int s; 1085 1086 s = splbio(); 1087 1088 if (vp->v_holdcnt > 0) 1089 panic("vgonel: not clean"); 1090 1091 if (TAILQ_FIRST(&vnode_free_list) != vp) { 1092 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1093 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1094 } 1095 splx(s); 1096 } 1097} 1098 1099/* 1100 * Lookup a vnode by device number. 1101 */ 1102int 1103vfinddev(dev_t dev, enum vtype type, struct vnode **vpp) 1104{ 1105 struct vnode *vp; 1106 int rc =0; 1107 1108 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1109 if (dev != vp->v_rdev || type != vp->v_type) 1110 continue; 1111 *vpp = vp; 1112 rc = 1; 1113 break; 1114 } 1115 return (rc); 1116} 1117 1118/* 1119 * Revoke all the vnodes corresponding to the specified minor number 1120 * range (endpoints inclusive) of the specified major. 1121 */ 1122void 1123vdevgone(int maj, int minl, int minh, enum vtype type) 1124{ 1125 struct vnode *vp; 1126 int mn; 1127 1128 for (mn = minl; mn <= minh; mn++) 1129 if (vfinddev(makedev(maj, mn), type, &vp)) 1130 VOP_REVOKE(vp, REVOKEALL); 1131} 1132 1133/* 1134 * Calculate the total number of references to a special device. 1135 */ 1136int 1137vcount(struct vnode *vp) 1138{ 1139 struct vnode *vq, *vnext; 1140 int count; 1141 1142loop: 1143 if ((vp->v_flag & VALIASED) == 0) 1144 return (vp->v_usecount); 1145 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1146 vnext = vq->v_specnext; 1147 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1148 continue; 1149 /* 1150 * Alias, but not in use, so flush it out. 1151 */ 1152 if (vq->v_usecount == 0 && vq != vp) { 1153 vgone(vq); 1154 goto loop; 1155 } 1156 count += vq->v_usecount; 1157 } 1158 return (count); 1159} 1160 1161#if defined(DEBUG) || defined(DIAGNOSTIC) 1162/* 1163 * Print out a description of a vnode. 1164 */ 1165static char *typename[] = 1166 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1167 1168void 1169vprint(char *label, struct vnode *vp) 1170{ 1171 char buf[64]; 1172 1173 if (label != NULL) 1174 printf("%s: ", label); 1175 printf("%p, type %s, use %u, write %u, hold %u,", 1176 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1177 vp->v_holdcnt); 1178 buf[0] = '\0'; 1179 if (vp->v_flag & VROOT) 1180 strlcat(buf, "|VROOT", sizeof buf); 1181 if (vp->v_flag & VTEXT) 1182 strlcat(buf, "|VTEXT", sizeof buf); 1183 if (vp->v_flag & VSYSTEM) 1184 strlcat(buf, "|VSYSTEM", sizeof buf); 1185 if (vp->v_flag & VXLOCK) 1186 strlcat(buf, "|VXLOCK", sizeof buf); 1187 if (vp->v_flag & VXWANT) 1188 strlcat(buf, "|VXWANT", sizeof buf); 1189 if (vp->v_bioflag & VBIOWAIT) 1190 strlcat(buf, "|VBIOWAIT", sizeof buf); 1191 if (vp->v_bioflag & VBIOONFREELIST) 1192 strlcat(buf, "|VBIOONFREELIST", sizeof buf); 1193 if (vp->v_bioflag & VBIOONSYNCLIST) 1194 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf); 1195 if (vp->v_flag & VALIASED) 1196 strlcat(buf, "|VALIASED", sizeof buf); 1197 if (buf[0] != '\0') 1198 printf(" flags (%s)", &buf[1]); 1199 if (vp->v_data == NULL) { 1200 printf("\n"); 1201 } else { 1202 printf("\n\t"); 1203 VOP_PRINT(vp); 1204 } 1205} 1206#endif /* DEBUG || DIAGNOSTIC */ 1207 1208#ifdef DEBUG 1209/* 1210 * List all of the locked vnodes in the system. 1211 * Called when debugging the kernel. 1212 */ 1213void 1214printlockedvnodes(void) 1215{ 1216 struct mount *mp, *nmp; 1217 struct vnode *vp; 1218 1219 printf("Locked vnodes\n"); 1220 1221 TAILQ_FOREACH_SAFE(mp, &mountlist, mnt_list, nmp) { 1222 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) 1223 continue; 1224 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1225 if (VOP_ISLOCKED(vp)) 1226 vprint((char *)0, vp); 1227 } 1228 vfs_unbusy(mp); 1229 } 1230 1231} 1232#endif 1233 1234/* 1235 * Top level filesystem related information gathering. 1236 */ 1237int 1238vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1239 size_t newlen, struct proc *p) 1240{ 1241 struct vfsconf *vfsp, *tmpvfsp; 1242 int ret; 1243 1244 /* all sysctl names at this level are at least name and field */ 1245 if (namelen < 2) 1246 return (ENOTDIR); /* overloaded */ 1247 1248 if (name[0] != VFS_GENERIC) { 1249 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1250 if (vfsp->vfc_typenum == name[0]) 1251 break; 1252 1253 if (vfsp == NULL) 1254 return (EOPNOTSUPP); 1255 1256 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1257 oldp, oldlenp, newp, newlen, p)); 1258 } 1259 1260 switch (name[1]) { 1261 case VFS_MAXTYPENUM: 1262 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); 1263 1264 case VFS_CONF: 1265 if (namelen < 3) 1266 return (ENOTDIR); /* overloaded */ 1267 1268 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1269 if (vfsp->vfc_typenum == name[2]) 1270 break; 1271 1272 if (vfsp == NULL) 1273 return (EOPNOTSUPP); 1274 1275 /* Make a copy, clear out kernel pointers */ 1276 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK); 1277 bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp)); 1278 tmpvfsp->vfc_vfsops = NULL; 1279 tmpvfsp->vfc_next = NULL; 1280 1281 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp, 1282 sizeof(struct vfsconf)); 1283 1284 free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp)); 1285 return (ret); 1286 case VFS_BCACHESTAT: /* buffer cache statistics */ 1287 ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats, 1288 sizeof(struct bcachestats)); 1289 return(ret); 1290 } 1291 return (EOPNOTSUPP); 1292} 1293 1294/* 1295 * Check to see if a filesystem is mounted on a block device. 1296 */ 1297int 1298vfs_mountedon(struct vnode *vp) 1299{ 1300 struct vnode *vq; 1301 int error = 0; 1302 1303 if (vp->v_specmountpoint != NULL) 1304 return (EBUSY); 1305 if (vp->v_flag & VALIASED) { 1306 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1307 if (vq->v_rdev != vp->v_rdev || 1308 vq->v_type != vp->v_type) 1309 continue; 1310 if (vq->v_specmountpoint != NULL) { 1311 error = EBUSY; 1312 break; 1313 } 1314 } 1315 } 1316 return (error); 1317} 1318 1319/* 1320 * Build hash lists of net addresses and hang them off the mount point. 1321 * Called by ufs_mount() to set up the lists of export addresses. 1322 */ 1323int 1324vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1325 struct export_args *argp) 1326{ 1327 struct netcred *np; 1328 struct radix_node_head *rnh; 1329 int nplen, i; 1330 struct radix_node *rn; 1331 struct sockaddr *saddr, *smask = 0; 1332 int error; 1333 1334 if (argp->ex_addrlen == 0) { 1335 if (mp->mnt_flag & MNT_DEFEXPORTED) 1336 return (EPERM); 1337 np = &nep->ne_defexported; 1338 mp->mnt_flag |= MNT_DEFEXPORTED; 1339 goto finish; 1340 } 1341 if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN || 1342 argp->ex_addrlen < 0 || argp->ex_masklen < 0) 1343 return (EINVAL); 1344 nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1345 np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO); 1346 saddr = (struct sockaddr *)(np + 1); 1347 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen); 1348 if (error) 1349 goto out; 1350 if (saddr->sa_len > argp->ex_addrlen) 1351 saddr->sa_len = argp->ex_addrlen; 1352 if (argp->ex_masklen) { 1353 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1354 error = copyin(argp->ex_mask, smask, argp->ex_masklen); 1355 if (error) 1356 goto out; 1357 if (smask->sa_len > argp->ex_masklen) 1358 smask->sa_len = argp->ex_masklen; 1359 } 1360 i = saddr->sa_family; 1361 switch (i) { 1362 case AF_INET: 1363 if ((rnh = nep->ne_rtable_inet) == NULL) { 1364 if (!rn_inithead((void **)&nep->ne_rtable_inet, 1365 offsetof(struct sockaddr_in, sin_addr) * 8)) { 1366 error = ENOBUFS; 1367 goto out; 1368 } 1369 rnh = nep->ne_rtable_inet; 1370 } 1371 break; 1372 default: 1373 error = EINVAL; 1374 goto out; 1375 } 1376 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 1377 np->netc_rnodes, 0); 1378 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 1379 error = EPERM; 1380 goto out; 1381 } 1382finish: 1383 np->netc_exflags = argp->ex_flags; 1384 /* fill in the kernel's ucred from userspace's xucred */ 1385 crfromxucred(&np->netc_anon, &argp->ex_anon); 1386 return (0); 1387out: 1388 free(np, M_NETADDR, nplen); 1389 return (error); 1390} 1391 1392/* ARGSUSED */ 1393int 1394vfs_free_netcred(struct radix_node *rn, void *w, u_int id) 1395{ 1396 struct radix_node_head *rnh = (struct radix_node_head *)w; 1397 1398 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL); 1399 free(rn, M_NETADDR, 0); 1400 return (0); 1401} 1402 1403/* 1404 * Free the net address hash lists that are hanging off the mount points. 1405 */ 1406void 1407vfs_free_addrlist(struct netexport *nep) 1408{ 1409 struct radix_node_head *rnh; 1410 1411 if ((rnh = nep->ne_rtable_inet) != NULL) { 1412 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 1413 free(rnh, M_RTABLE, 0); 1414 nep->ne_rtable_inet = NULL; 1415 } 1416} 1417 1418int 1419vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp) 1420{ 1421 int error; 1422 1423 if (argp->ex_flags & MNT_DELEXPORT) { 1424 vfs_free_addrlist(nep); 1425 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1426 } 1427 if (argp->ex_flags & MNT_EXPORTED) { 1428 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 1429 return (error); 1430 mp->mnt_flag |= MNT_EXPORTED; 1431 } 1432 return (0); 1433} 1434 1435struct netcred * 1436vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam) 1437{ 1438 struct netcred *np; 1439 struct radix_node_head *rnh; 1440 struct sockaddr *saddr; 1441 1442 np = NULL; 1443 if (mp->mnt_flag & MNT_EXPORTED) { 1444 /* 1445 * Lookup in the export list first. 1446 */ 1447 if (nam != NULL) { 1448 saddr = mtod(nam, struct sockaddr *); 1449 switch(saddr->sa_family) { 1450 case AF_INET: 1451 rnh = nep->ne_rtable_inet; 1452 break; 1453 default: 1454 rnh = NULL; 1455 break; 1456 } 1457 if (rnh != NULL) { 1458 np = (struct netcred *) 1459 (*rnh->rnh_matchaddr)((caddr_t)saddr, 1460 rnh); 1461 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1462 np = NULL; 1463 } 1464 } 1465 /* 1466 * If no address match, use the default if it exists. 1467 */ 1468 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1469 np = &nep->ne_defexported; 1470 } 1471 return (np); 1472} 1473 1474/* 1475 * Do the usual access checking. 1476 * file_mode, uid and gid are from the vnode in question, 1477 * while acc_mode and cred are from the VOP_ACCESS parameter list 1478 */ 1479int 1480vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1481 mode_t acc_mode, struct ucred *cred) 1482{ 1483 mode_t mask; 1484 1485 /* User id 0 always gets read/write access. */ 1486 if (cred->cr_uid == 0) { 1487 /* For VEXEC, at least one of the execute bits must be set. */ 1488 if ((acc_mode & VEXEC) && type != VDIR && 1489 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1490 return EACCES; 1491 return 0; 1492 } 1493 1494 mask = 0; 1495 1496 /* Otherwise, check the owner. */ 1497 if (cred->cr_uid == uid) { 1498 if (acc_mode & VEXEC) 1499 mask |= S_IXUSR; 1500 if (acc_mode & VREAD) 1501 mask |= S_IRUSR; 1502 if (acc_mode & VWRITE) 1503 mask |= S_IWUSR; 1504 return (file_mode & mask) == mask ? 0 : EACCES; 1505 } 1506 1507 /* Otherwise, check the groups. */ 1508 if (groupmember(gid, cred)) { 1509 if (acc_mode & VEXEC) 1510 mask |= S_IXGRP; 1511 if (acc_mode & VREAD) 1512 mask |= S_IRGRP; 1513 if (acc_mode & VWRITE) 1514 mask |= S_IWGRP; 1515 return (file_mode & mask) == mask ? 0 : EACCES; 1516 } 1517 1518 /* Otherwise, check everyone else. */ 1519 if (acc_mode & VEXEC) 1520 mask |= S_IXOTH; 1521 if (acc_mode & VREAD) 1522 mask |= S_IROTH; 1523 if (acc_mode & VWRITE) 1524 mask |= S_IWOTH; 1525 return (file_mode & mask) == mask ? 0 : EACCES; 1526} 1527 1528/* 1529 * Unmount all file systems. 1530 * We traverse the list in reverse order under the assumption that doing so 1531 * will avoid needing to worry about dependencies. 1532 */ 1533void 1534vfs_unmountall(void) 1535{ 1536 struct mount *mp, *nmp; 1537 int allerror, error, again = 1; 1538 1539 retry: 1540 allerror = 0; 1541 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) { 1542 if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0) 1543 continue; 1544 if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) { 1545 printf("unmount of %s failed with error %d\n", 1546 mp->mnt_stat.f_mntonname, error); 1547 allerror = 1; 1548 } 1549 } 1550 1551 if (allerror) { 1552 printf("WARNING: some file systems would not unmount\n"); 1553 if (again) { 1554 printf("retrying\n"); 1555 again = 0; 1556 goto retry; 1557 } 1558 } 1559} 1560 1561/* 1562 * Sync and unmount file systems before shutting down. 1563 */ 1564void 1565vfs_shutdown(void) 1566{ 1567#ifdef ACCOUNTING 1568 acct_shutdown(); 1569#endif 1570 1571 /* XXX Should suspend scheduling. */ 1572 (void) spl0(); 1573 1574 printf("syncing disks... "); 1575 1576 if (panicstr == 0) { 1577 /* Sync before unmount, in case we hang on something. */ 1578 sys_sync(&proc0, (void *)0, (register_t *)0); 1579 1580 /* Unmount file systems. */ 1581 vfs_unmountall(); 1582 } 1583 1584 if (vfs_syncwait(1)) 1585 printf("giving up\n"); 1586 else 1587 printf("done\n"); 1588 1589#if NSOFTRAID > 0 1590 sr_shutdown(); 1591#endif 1592} 1593 1594/* 1595 * perform sync() operation and wait for buffers to flush. 1596 * assumptions: called w/ scheduler disabled and physical io enabled 1597 * for now called at spl0() XXX 1598 */ 1599int 1600vfs_syncwait(int verbose) 1601{ 1602 struct buf *bp; 1603 int iter, nbusy, dcount, s; 1604 struct proc *p; 1605 1606 p = curproc? curproc : &proc0; 1607 sys_sync(p, (void *)0, (register_t *)0); 1608 1609 /* Wait for sync to finish. */ 1610 dcount = 10000; 1611 for (iter = 0; iter < 20; iter++) { 1612 nbusy = 0; 1613 LIST_FOREACH(bp, &bufhead, b_list) { 1614 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1615 nbusy++; 1616 /* 1617 * With soft updates, some buffers that are 1618 * written will be remarked as dirty until other 1619 * buffers are written. 1620 */ 1621 if (bp->b_flags & B_DELWRI) { 1622 s = splbio(); 1623 bremfree(bp); 1624 buf_acquire(bp); 1625 splx(s); 1626 nbusy++; 1627 bawrite(bp); 1628 if (dcount-- <= 0) { 1629 if (verbose) 1630 printf("softdep "); 1631 return 1; 1632 } 1633 } 1634 } 1635 if (nbusy == 0) 1636 break; 1637 if (verbose) 1638 printf("%d ", nbusy); 1639 DELAY(40000 * iter); 1640 } 1641 1642 return nbusy; 1643} 1644 1645/* 1646 * posix file system related system variables. 1647 */ 1648int 1649fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1650 void *newp, size_t newlen, struct proc *p) 1651{ 1652 /* all sysctl names at this level are terminal */ 1653 if (namelen != 1) 1654 return (ENOTDIR); 1655 1656 switch (name[0]) { 1657 case FS_POSIX_SETUID: 1658 if (newp && securelevel > 0) 1659 return (EPERM); 1660 return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear)); 1661 default: 1662 return (EOPNOTSUPP); 1663 } 1664 /* NOTREACHED */ 1665} 1666 1667/* 1668 * file system related system variables. 1669 */ 1670int 1671fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1672 size_t newlen, struct proc *p) 1673{ 1674 sysctlfn *fn; 1675 1676 switch (name[0]) { 1677 case FS_POSIX: 1678 fn = fs_posix_sysctl; 1679 break; 1680 default: 1681 return (EOPNOTSUPP); 1682 } 1683 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p); 1684} 1685 1686 1687/* 1688 * Routines dealing with vnodes and buffers 1689 */ 1690 1691/* 1692 * Wait for all outstanding I/Os to complete 1693 * 1694 * Manipulates v_numoutput. Must be called at splbio() 1695 */ 1696int 1697vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo) 1698{ 1699 int error = 0; 1700 1701 splassert(IPL_BIO); 1702 1703 while (vp->v_numoutput) { 1704 vp->v_bioflag |= VBIOWAIT; 1705 error = tsleep(&vp->v_numoutput, 1706 slpflag | (PRIBIO + 1), wmesg, timeo); 1707 if (error) 1708 break; 1709 } 1710 1711 return (error); 1712} 1713 1714/* 1715 * Update outstanding I/O count and do wakeup if requested. 1716 * 1717 * Manipulates v_numoutput. Must be called at splbio() 1718 */ 1719void 1720vwakeup(struct vnode *vp) 1721{ 1722 splassert(IPL_BIO); 1723 1724 if (vp != NULL) { 1725 if (vp->v_numoutput-- == 0) 1726 panic("vwakeup: neg numoutput"); 1727 if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) { 1728 vp->v_bioflag &= ~VBIOWAIT; 1729 wakeup(&vp->v_numoutput); 1730 } 1731 } 1732} 1733 1734/* 1735 * Flush out and invalidate all buffers associated with a vnode. 1736 * Called with the underlying object locked. 1737 */ 1738int 1739vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p, 1740 int slpflag, int slptimeo) 1741{ 1742 struct buf *bp; 1743 struct buf *nbp, *blist; 1744 int s, error; 1745 1746#ifdef VFSLCKDEBUG 1747 if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp)) 1748 panic("vinvalbuf(): vp isn't locked"); 1749#endif 1750 1751 if (flags & V_SAVE) { 1752 s = splbio(); 1753 vwaitforio(vp, 0, "vinvalbuf", 0); 1754 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1755 splx(s); 1756 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 1757 return (error); 1758 s = splbio(); 1759 if (vp->v_numoutput > 0 || 1760 !LIST_EMPTY(&vp->v_dirtyblkhd)) 1761 panic("vinvalbuf: dirty bufs"); 1762 } 1763 splx(s); 1764 } 1765loop: 1766 s = splbio(); 1767 for (;;) { 1768 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) && 1769 (flags & V_SAVEMETA)) 1770 while (blist && blist->b_lblkno < 0) 1771 blist = LIST_NEXT(blist, b_vnbufs); 1772 if (blist == NULL && 1773 (blist = LIST_FIRST(&vp->v_dirtyblkhd)) && 1774 (flags & V_SAVEMETA)) 1775 while (blist && blist->b_lblkno < 0) 1776 blist = LIST_NEXT(blist, b_vnbufs); 1777 if (!blist) 1778 break; 1779 1780 for (bp = blist; bp; bp = nbp) { 1781 nbp = LIST_NEXT(bp, b_vnbufs); 1782 if (flags & V_SAVEMETA && bp->b_lblkno < 0) 1783 continue; 1784 if (bp->b_flags & B_BUSY) { 1785 bp->b_flags |= B_WANTED; 1786 error = tsleep(bp, slpflag | (PRIBIO + 1), 1787 "vinvalbuf", slptimeo); 1788 if (error) { 1789 splx(s); 1790 return (error); 1791 } 1792 break; 1793 } 1794 bremfree(bp); 1795 /* 1796 * XXX Since there are no node locks for NFS, I believe 1797 * there is a slight chance that a delayed write will 1798 * occur while sleeping just above, so check for it. 1799 */ 1800 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 1801 buf_acquire(bp); 1802 splx(s); 1803 (void) VOP_BWRITE(bp); 1804 goto loop; 1805 } 1806 buf_acquire_nomap(bp); 1807 bp->b_flags |= B_INVAL; 1808 brelse(bp); 1809 } 1810 } 1811 if (!(flags & V_SAVEMETA) && 1812 (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd))) 1813 panic("vinvalbuf: flush failed"); 1814 splx(s); 1815 return (0); 1816} 1817 1818void 1819vflushbuf(struct vnode *vp, int sync) 1820{ 1821 struct buf *bp, *nbp; 1822 int s; 1823 1824loop: 1825 s = splbio(); 1826 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 1827 nbp = LIST_NEXT(bp, b_vnbufs); 1828 if ((bp->b_flags & B_BUSY)) 1829 continue; 1830 if ((bp->b_flags & B_DELWRI) == 0) 1831 panic("vflushbuf: not dirty"); 1832 bremfree(bp); 1833 buf_acquire(bp); 1834 splx(s); 1835 /* 1836 * Wait for I/O associated with indirect blocks to complete, 1837 * since there is no way to quickly wait for them below. 1838 */ 1839 if (bp->b_vp == vp || sync == 0) 1840 (void) bawrite(bp); 1841 else 1842 (void) bwrite(bp); 1843 goto loop; 1844 } 1845 if (sync == 0) { 1846 splx(s); 1847 return; 1848 } 1849 vwaitforio(vp, 0, "vflushbuf", 0); 1850 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1851 splx(s); 1852#ifdef DIAGNOSTIC 1853 vprint("vflushbuf: dirty", vp); 1854#endif 1855 goto loop; 1856 } 1857 splx(s); 1858} 1859 1860/* 1861 * Associate a buffer with a vnode. 1862 * 1863 * Manipulates buffer vnode queues. Must be called at splbio(). 1864 */ 1865void 1866bgetvp(struct vnode *vp, struct buf *bp) 1867{ 1868 splassert(IPL_BIO); 1869 1870 1871 if (bp->b_vp) 1872 panic("bgetvp: not free"); 1873 vhold(vp); 1874 bp->b_vp = vp; 1875 if (vp->v_type == VBLK || vp->v_type == VCHR) 1876 bp->b_dev = vp->v_rdev; 1877 else 1878 bp->b_dev = NODEV; 1879 /* 1880 * Insert onto list for new vnode. 1881 */ 1882 bufinsvn(bp, &vp->v_cleanblkhd); 1883} 1884 1885/* 1886 * Disassociate a buffer from a vnode. 1887 * 1888 * Manipulates vnode buffer queues. Must be called at splbio(). 1889 */ 1890void 1891brelvp(struct buf *bp) 1892{ 1893 struct vnode *vp; 1894 1895 splassert(IPL_BIO); 1896 1897 if ((vp = bp->b_vp) == (struct vnode *) 0) 1898 panic("brelvp: NULL"); 1899 /* 1900 * Delete from old vnode list, if on one. 1901 */ 1902 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 1903 bufremvn(bp); 1904 if ((vp->v_bioflag & VBIOONSYNCLIST) && 1905 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 1906 vp->v_bioflag &= ~VBIOONSYNCLIST; 1907 LIST_REMOVE(vp, v_synclist); 1908 } 1909 bp->b_vp = NULL; 1910 1911 vdrop(vp); 1912} 1913 1914/* 1915 * Replaces the current vnode associated with the buffer, if any, 1916 * with a new vnode. 1917 * 1918 * If an output I/O is pending on the buffer, the old vnode 1919 * I/O count is adjusted. 1920 * 1921 * Ignores vnode buffer queues. Must be called at splbio(). 1922 */ 1923void 1924buf_replacevnode(struct buf *bp, struct vnode *newvp) 1925{ 1926 struct vnode *oldvp = bp->b_vp; 1927 1928 splassert(IPL_BIO); 1929 1930 if (oldvp) 1931 brelvp(bp); 1932 1933 if ((bp->b_flags & (B_READ | B_DONE)) == 0) { 1934 newvp->v_numoutput++; /* put it on swapdev */ 1935 vwakeup(oldvp); 1936 } 1937 1938 bgetvp(newvp, bp); 1939 bufremvn(bp); 1940} 1941 1942/* 1943 * Used to assign buffers to the appropriate clean or dirty list on 1944 * the vnode and to add newly dirty vnodes to the appropriate 1945 * filesystem syncer list. 1946 * 1947 * Manipulates vnode buffer queues. Must be called at splbio(). 1948 */ 1949void 1950reassignbuf(struct buf *bp) 1951{ 1952 struct buflists *listheadp; 1953 int delay; 1954 struct vnode *vp = bp->b_vp; 1955 1956 splassert(IPL_BIO); 1957 1958 /* 1959 * Delete from old vnode list, if on one. 1960 */ 1961 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 1962 bufremvn(bp); 1963 1964 /* 1965 * If dirty, put on list of dirty buffers; 1966 * otherwise insert onto list of clean buffers. 1967 */ 1968 if ((bp->b_flags & B_DELWRI) == 0) { 1969 listheadp = &vp->v_cleanblkhd; 1970 if ((vp->v_bioflag & VBIOONSYNCLIST) && 1971 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 1972 vp->v_bioflag &= ~VBIOONSYNCLIST; 1973 LIST_REMOVE(vp, v_synclist); 1974 } 1975 } else { 1976 listheadp = &vp->v_dirtyblkhd; 1977 if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) { 1978 switch (vp->v_type) { 1979 case VDIR: 1980 delay = syncdelay / 2; 1981 break; 1982 case VBLK: 1983 if (vp->v_specmountpoint != NULL) { 1984 delay = syncdelay / 3; 1985 break; 1986 } 1987 /* FALLTHROUGH */ 1988 default: 1989 delay = syncdelay; 1990 } 1991 vn_syncer_add_to_worklist(vp, delay); 1992 } 1993 } 1994 bufinsvn(bp, listheadp); 1995} 1996 1997int 1998vfs_register(struct vfsconf *vfs) 1999{ 2000 struct vfsconf *vfsp; 2001 struct vfsconf **vfspp; 2002 2003#ifdef DIAGNOSTIC 2004 /* Paranoia? */ 2005 if (vfs->vfc_refcount != 0) 2006 printf("vfs_register called with vfc_refcount > 0\n"); 2007#endif 2008 2009 /* Check if filesystem already known */ 2010 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2011 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) 2012 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2013 return (EEXIST); 2014 2015 if (vfs->vfc_typenum > maxvfsconf) 2016 maxvfsconf = vfs->vfc_typenum; 2017 2018 vfs->vfc_next = NULL; 2019 2020 /* Add to the end of the list */ 2021 *vfspp = vfs; 2022 2023 /* Call vfs_init() */ 2024 if (vfs->vfc_vfsops->vfs_init) 2025 (*(vfs->vfc_vfsops->vfs_init))(vfs); 2026 2027 return 0; 2028} 2029 2030int 2031vfs_unregister(struct vfsconf *vfs) 2032{ 2033 struct vfsconf *vfsp; 2034 struct vfsconf **vfspp; 2035 int maxtypenum; 2036 2037 /* Find our vfsconf struct */ 2038 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2039 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) { 2040 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2041 break; 2042 } 2043 2044 if (!vfsp) /* Not found */ 2045 return (ENOENT); 2046 2047 if (vfsp->vfc_refcount) /* In use */ 2048 return (EBUSY); 2049 2050 /* Remove from list and free */ 2051 *vfspp = vfsp->vfc_next; 2052 2053 maxtypenum = 0; 2054 2055 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2056 if (vfsp->vfc_typenum > maxtypenum) 2057 maxtypenum = vfsp->vfc_typenum; 2058 2059 maxvfsconf = maxtypenum; 2060 return 0; 2061} 2062 2063/* 2064 * Check if vnode represents a disk device 2065 */ 2066int 2067vn_isdisk(struct vnode *vp, int *errp) 2068{ 2069 if (vp->v_type != VBLK && vp->v_type != VCHR) 2070 return (0); 2071 2072 return (1); 2073} 2074 2075#ifdef DDB 2076#include <machine/db_machdep.h> 2077#include <ddb/db_interface.h> 2078#include <ddb/db_output.h> 2079 2080void 2081vfs_buf_print(void *b, int full, 2082 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2083{ 2084 struct buf *bp = b; 2085 2086 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n" 2087 " proc %p error %d flags %lb\n", 2088 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev, 2089 bp->b_proc, bp->b_error, bp->b_flags, B_BITS); 2090 2091 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n" 2092 " data %p saveaddr %p dep %p iodone %p\n", 2093 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, 2094 bp->b_data, bp->b_saveaddr, 2095 LIST_FIRST(&bp->b_dep), bp->b_iodone); 2096 2097 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n", 2098 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend); 2099 2100#ifdef FFS_SOFTUPDATES 2101 if (full) 2102 softdep_print(bp, full, pr); 2103#endif 2104} 2105 2106const char *vtypes[] = { VTYPE_NAMES }; 2107const char *vtags[] = { VTAG_NAMES }; 2108 2109void 2110vfs_vnode_print(void *v, int full, 2111 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2112{ 2113 struct vnode *vp = v; 2114 2115 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n", 2116 vp->v_tag > nitems(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag, 2117 vp->v_type > nitems(vtypes)? "<unk>":vtypes[vp->v_type], 2118 vp->v_type, vp->v_mount, vp->v_mountedhere); 2119 2120 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n", 2121 vp->v_data, vp->v_usecount, vp->v_writecount, 2122 vp->v_holdcnt, vp->v_numoutput); 2123 2124 /* uvm_object_printit(&vp->v_uobj, full, pr); */ 2125 2126 if (full) { 2127 struct buf *bp; 2128 2129 (*pr)("clean bufs:\n"); 2130 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2131 (*pr)(" bp %p\n", bp); 2132 vfs_buf_print(bp, full, pr); 2133 } 2134 2135 (*pr)("dirty bufs:\n"); 2136 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2137 (*pr)(" bp %p\n", bp); 2138 vfs_buf_print(bp, full, pr); 2139 } 2140 } 2141} 2142 2143void 2144vfs_mount_print(struct mount *mp, int full, 2145 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2146{ 2147 struct vfsconf *vfc = mp->mnt_vfc; 2148 struct vnode *vp; 2149 int cnt = 0; 2150 2151 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n", 2152 mp->mnt_flag, MNT_BITS, 2153 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data); 2154 2155 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n", 2156 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum, 2157 vfc->vfc_refcount, vfc->vfc_flags); 2158 2159 (*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n", 2160 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks, 2161 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail); 2162 2163 (*pr)(" files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files, 2164 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail); 2165 2166 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n", 2167 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1], 2168 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime); 2169 2170 (*pr)(" syncwrites %llu asyncwrites = %llu\n", 2171 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites); 2172 2173 (*pr)(" syncreads %llu asyncreads = %llu\n", 2174 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads); 2175 2176 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n", 2177 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname, 2178 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec); 2179 2180 (*pr)("locked vnodes:"); 2181 /* XXX would take mountlist lock, except ddb has no context */ 2182 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2183 if (VOP_ISLOCKED(vp)) { 2184 if (!LIST_NEXT(vp, v_mntvnodes)) 2185 (*pr)(" %p", vp); 2186 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2187 (*pr)("\n\t%p", vp); 2188 else 2189 (*pr)(", %p", vp); 2190 } 2191 (*pr)("\n"); 2192 2193 if (full) { 2194 (*pr)("all vnodes:\n\t"); 2195 /* XXX would take mountlist lock, except ddb has no context */ 2196 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2197 if (!LIST_NEXT(vp, v_mntvnodes)) 2198 (*pr)(" %p", vp); 2199 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2200 (*pr)(" %p,\n\t", vp); 2201 else 2202 (*pr)(" %p,", vp); 2203 (*pr)("\n"); 2204 } 2205} 2206#endif /* DDB */ 2207 2208void 2209copy_statfs_info(struct statfs *sbp, const struct mount *mp) 2210{ 2211 const struct statfs *mbp; 2212 2213 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN); 2214 2215 if (sbp == (mbp = &mp->mnt_stat)) 2216 return; 2217 2218 sbp->f_fsid = mbp->f_fsid; 2219 sbp->f_owner = mbp->f_owner; 2220 sbp->f_flags = mbp->f_flags; 2221 sbp->f_syncwrites = mbp->f_syncwrites; 2222 sbp->f_asyncwrites = mbp->f_asyncwrites; 2223 sbp->f_syncreads = mbp->f_syncreads; 2224 sbp->f_asyncreads = mbp->f_asyncreads; 2225 sbp->f_namemax = mbp->f_namemax; 2226 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 2227 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 2228 bcopy(mp->mnt_stat.f_mntfromspec, sbp->f_mntfromspec, MNAMELEN); 2229 bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args, 2230 sizeof(struct ufs_args)); 2231} 2232