vfs_subr.c revision 1.207
1/* $OpenBSD: vfs_subr.c,v 1.207 2013/10/01 20:15:56 sf Exp $ */ 2/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */ 3 4/* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 38 */ 39 40/* 41 * External virtual filesystem routines 42 */ 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/proc.h> 47#include <sys/mount.h> 48#include <sys/time.h> 49#include <sys/fcntl.h> 50#include <sys/kernel.h> 51#include <sys/vnode.h> 52#include <sys/stat.h> 53#include <sys/acct.h> 54#include <sys/namei.h> 55#include <sys/ucred.h> 56#include <sys/buf.h> 57#include <sys/errno.h> 58#include <sys/malloc.h> 59#include <sys/domain.h> 60#include <sys/mbuf.h> 61#include <sys/syscallargs.h> 62#include <sys/pool.h> 63#include <sys/tree.h> 64#include <sys/specdev.h> 65 66#include <uvm/uvm_extern.h> 67#include <sys/sysctl.h> 68 69enum vtype iftovt_tab[16] = { 70 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 71 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 72}; 73 74int vttoif_tab[9] = { 75 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 76 S_IFSOCK, S_IFIFO, S_IFMT, 77}; 78 79int doforce = 1; /* 1 => permit forcible unmounting */ 80int prtactive = 0; /* 1 => print out reclaim of active vnodes */ 81int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */ 82 83/* 84 * Insq/Remq for the vnode usage lists. 85 */ 86#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 87#define bufremvn(bp) { \ 88 LIST_REMOVE(bp, b_vnbufs); \ 89 LIST_NEXT(bp, b_vnbufs) = NOLIST; \ 90} 91 92struct freelst vnode_hold_list; /* list of vnodes referencing buffers */ 93struct freelst vnode_free_list; /* vnode free list */ 94 95struct mntlist mountlist; /* mounted filesystem list */ 96 97void vclean(struct vnode *, int, struct proc *); 98 99void insmntque(struct vnode *, struct mount *); 100int getdevvp(dev_t, struct vnode **, enum vtype); 101 102int vfs_hang_addrlist(struct mount *, struct netexport *, 103 struct export_args *); 104int vfs_free_netcred(struct radix_node *, void *, u_int); 105void vfs_free_addrlist(struct netexport *); 106void vputonfreelist(struct vnode *); 107 108int vflush_vnode(struct vnode *, void *); 109int maxvnodes; 110 111#ifdef DEBUG 112void printlockedvnodes(void); 113#endif 114 115struct pool vnode_pool; 116 117static int rb_buf_compare(struct buf *b1, struct buf *b2); 118RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); 119 120static int 121rb_buf_compare(struct buf *b1, struct buf *b2) 122{ 123 if (b1->b_lblkno < b2->b_lblkno) 124 return(-1); 125 if (b1->b_lblkno > b2->b_lblkno) 126 return(1); 127 return(0); 128} 129 130/* 131 * Initialize the vnode management data structures. 132 */ 133void 134vntblinit(void) 135{ 136 /* buffer cache may need a vnode for each buffer */ 137 maxvnodes = 2 * desiredvnodes; 138 pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes", 139 &pool_allocator_nointr); 140 TAILQ_INIT(&vnode_hold_list); 141 TAILQ_INIT(&vnode_free_list); 142 CIRCLEQ_INIT(&mountlist); 143 /* 144 * Initialize the filesystem syncer. 145 */ 146 vn_initialize_syncerd(); 147} 148 149/* 150 * Mark a mount point as busy. Used to synchronize access and to delay 151 * unmounting. 152 * 153 * Default behaviour is to attempt getting a READ lock and in case of an 154 * ongoing unmount, to wait for it to finish and then return failure. 155 */ 156int 157vfs_busy(struct mount *mp, int flags) 158{ 159 int rwflags = 0; 160 161 /* new mountpoints need their lock initialised */ 162 if (mp->mnt_lock.rwl_name == NULL) 163 rw_init(&mp->mnt_lock, "vfslock"); 164 165 if (flags & VB_WRITE) 166 rwflags |= RW_WRITE; 167 else 168 rwflags |= RW_READ; 169 170 if (flags & VB_WAIT) 171 rwflags |= RW_SLEEPFAIL; 172 else 173 rwflags |= RW_NOSLEEP; 174 175 if (rw_enter(&mp->mnt_lock, rwflags)) 176 return (EBUSY); 177 178 return (0); 179} 180 181/* 182 * Free a busy file system 183 */ 184void 185vfs_unbusy(struct mount *mp) 186{ 187 rw_exit(&mp->mnt_lock); 188} 189 190int 191vfs_isbusy(struct mount *mp) 192{ 193 if (RWLOCK_OWNER(&mp->mnt_lock) > 0) 194 return (1); 195 else 196 return (0); 197} 198 199/* 200 * Lookup a filesystem type, and if found allocate and initialize 201 * a mount structure for it. 202 * 203 * Devname is usually updated by mount(8) after booting. 204 */ 205int 206vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 207{ 208 struct vfsconf *vfsp; 209 struct mount *mp; 210 211 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 212 if (!strcmp(vfsp->vfc_name, fstypename)) 213 break; 214 if (vfsp == NULL) 215 return (ENODEV); 216 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO); 217 (void)vfs_busy(mp, VB_READ|VB_NOWAIT); 218 LIST_INIT(&mp->mnt_vnodelist); 219 mp->mnt_vfc = vfsp; 220 mp->mnt_op = vfsp->vfc_vfsops; 221 mp->mnt_flag = MNT_RDONLY; 222 mp->mnt_vnodecovered = NULLVP; 223 vfsp->vfc_refcount++; 224 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 225 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 226 mp->mnt_stat.f_mntonname[0] = '/'; 227 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0); 228 copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0); 229 *mpp = mp; 230 return (0); 231 } 232 233/* 234 * Lookup a mount point by filesystem identifier. 235 */ 236struct mount * 237vfs_getvfs(fsid_t *fsid) 238{ 239 struct mount *mp; 240 241 CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) { 242 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 243 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 244 return (mp); 245 } 246 } 247 248 return (NULL); 249} 250 251 252/* 253 * Get a new unique fsid 254 */ 255void 256vfs_getnewfsid(struct mount *mp) 257{ 258 static u_short xxxfs_mntid; 259 260 fsid_t tfsid; 261 int mtype; 262 263 mtype = mp->mnt_vfc->vfc_typenum; 264 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 265 mp->mnt_stat.f_fsid.val[1] = mtype; 266 if (xxxfs_mntid == 0) 267 ++xxxfs_mntid; 268 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 269 tfsid.val[1] = mtype; 270 if (!CIRCLEQ_EMPTY(&mountlist)) { 271 while (vfs_getvfs(&tfsid)) { 272 tfsid.val[0]++; 273 xxxfs_mntid++; 274 } 275 } 276 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 277} 278 279/* 280 * Set vnode attributes to VNOVAL 281 */ 282void 283vattr_null(struct vattr *vap) 284{ 285 286 vap->va_type = VNON; 287 /* XXX These next two used to be one line, but for a GCC bug. */ 288 vap->va_size = VNOVAL; 289 vap->va_bytes = VNOVAL; 290 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 291 vap->va_fsid = vap->va_fileid = 292 vap->va_blocksize = vap->va_rdev = 293 vap->va_atime.tv_sec = vap->va_atime.tv_nsec = 294 vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec = 295 vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec = 296 vap->va_flags = vap->va_gen = VNOVAL; 297 vap->va_vaflags = 0; 298} 299 300/* 301 * Routines having to do with the management of the vnode table. 302 */ 303long numvnodes; 304 305/* 306 * Return the next vnode from the free list. 307 */ 308int 309getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops, 310 struct vnode **vpp) 311{ 312 struct proc *p = curproc; 313 struct freelst *listhd; 314 static int toggle; 315 struct vnode *vp; 316 int s; 317 318 /* 319 * allow maxvnodes to increase if the buffer cache itself 320 * is big enough to justify it. (we don't shrink it ever) 321 */ 322 maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs 323 : maxvnodes; 324 325 /* 326 * We must choose whether to allocate a new vnode or recycle an 327 * existing one. The criterion for allocating a new one is that 328 * the total number of vnodes is less than the number desired or 329 * there are no vnodes on either free list. Generally we only 330 * want to recycle vnodes that have no buffers associated with 331 * them, so we look first on the vnode_free_list. If it is empty, 332 * we next consider vnodes with referencing buffers on the 333 * vnode_hold_list. The toggle ensures that half the time we 334 * will use a buffer from the vnode_hold_list, and half the time 335 * we will allocate a new one unless the list has grown to twice 336 * the desired size. We are reticent to recycle vnodes from the 337 * vnode_hold_list because we will lose the identity of all its 338 * referencing buffers. 339 */ 340 toggle ^= 1; 341 if (numvnodes / 2 > maxvnodes) 342 toggle = 0; 343 344 s = splbio(); 345 if ((numvnodes < maxvnodes) || 346 ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) && 347 ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) { 348 splx(s); 349 vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO); 350 RB_INIT(&vp->v_bufs_tree); 351 RB_INIT(&vp->v_nc_tree); 352 TAILQ_INIT(&vp->v_cache_dst); 353 numvnodes++; 354 } else { 355 for (vp = TAILQ_FIRST(listhd); vp != NULLVP; 356 vp = TAILQ_NEXT(vp, v_freelist)) { 357 if (VOP_ISLOCKED(vp) == 0) 358 break; 359 } 360 /* 361 * Unless this is a bad time of the month, at most 362 * the first NCPUS items on the free list are 363 * locked, so this is close enough to being empty. 364 */ 365 if (vp == NULL) { 366 splx(s); 367 tablefull("vnode"); 368 *vpp = 0; 369 return (ENFILE); 370 } 371 372#ifdef DIAGNOSTIC 373 if (vp->v_usecount) { 374 vprint("free vnode", vp); 375 panic("free vnode isn't"); 376 } 377#endif 378 379 TAILQ_REMOVE(listhd, vp, v_freelist); 380 vp->v_bioflag &= ~VBIOONFREELIST; 381 splx(s); 382 383 if (vp->v_type != VBAD) 384 vgonel(vp, p); 385#ifdef DIAGNOSTIC 386 if (vp->v_data) { 387 vprint("cleaned vnode", vp); 388 panic("cleaned vnode isn't"); 389 } 390 s = splbio(); 391 if (vp->v_numoutput) 392 panic("Clean vnode has pending I/O's"); 393 splx(s); 394#endif 395 vp->v_flag = 0; 396 vp->v_socket = 0; 397 } 398 vp->v_type = VNON; 399 cache_purge(vp); 400 vp->v_tag = tag; 401 vp->v_op = vops; 402 insmntque(vp, mp); 403 *vpp = vp; 404 vp->v_usecount = 1; 405 vp->v_data = 0; 406 simple_lock_init(&vp->v_uvm.u_obj.vmobjlock); 407 return (0); 408} 409 410/* 411 * Move a vnode from one mount queue to another. 412 */ 413void 414insmntque(struct vnode *vp, struct mount *mp) 415{ 416 /* 417 * Delete from old mount point vnode list, if on one. 418 */ 419 if (vp->v_mount != NULL) 420 LIST_REMOVE(vp, v_mntvnodes); 421 /* 422 * Insert into list of vnodes for the new mount point, if available. 423 */ 424 if ((vp->v_mount = mp) != NULL) 425 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 426} 427 428/* 429 * Create a vnode for a block device. 430 * Used for root filesystem, argdev, and swap areas. 431 * Also used for memory file system special devices. 432 */ 433int 434bdevvp(dev_t dev, struct vnode **vpp) 435{ 436 return (getdevvp(dev, vpp, VBLK)); 437} 438 439/* 440 * Create a vnode for a character device. 441 * Used for console handling. 442 */ 443int 444cdevvp(dev_t dev, struct vnode **vpp) 445{ 446 return (getdevvp(dev, vpp, VCHR)); 447} 448 449/* 450 * Create a vnode for a device. 451 * Used by bdevvp (block device) for root file system etc., 452 * and by cdevvp (character device) for console. 453 */ 454int 455getdevvp(dev_t dev, struct vnode **vpp, enum vtype type) 456{ 457 struct vnode *vp; 458 struct vnode *nvp; 459 int error; 460 461 if (dev == NODEV) { 462 *vpp = NULLVP; 463 return (0); 464 } 465 error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp); 466 if (error) { 467 *vpp = NULLVP; 468 return (error); 469 } 470 vp = nvp; 471 vp->v_type = type; 472 if ((nvp = checkalias(vp, dev, NULL)) != 0) { 473 vput(vp); 474 vp = nvp; 475 } 476 *vpp = vp; 477 return (0); 478} 479 480/* 481 * Check to see if the new vnode represents a special device 482 * for which we already have a vnode (either because of 483 * bdevvp() or because of a different vnode representing 484 * the same block device). If such an alias exists, deallocate 485 * the existing contents and return the aliased vnode. The 486 * caller is responsible for filling it with its new contents. 487 */ 488struct vnode * 489checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp) 490{ 491 struct proc *p = curproc; 492 struct vnode *vp; 493 struct vnode **vpp; 494 495 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 496 return (NULLVP); 497 498 vpp = &speclisth[SPECHASH(nvp_rdev)]; 499loop: 500 for (vp = *vpp; vp; vp = vp->v_specnext) { 501 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) { 502 continue; 503 } 504 /* 505 * Alias, but not in use, so flush it out. 506 */ 507 if (vp->v_usecount == 0) { 508 vgonel(vp, p); 509 goto loop; 510 } 511 if (vget(vp, LK_EXCLUSIVE, p)) { 512 goto loop; 513 } 514 break; 515 } 516 517 /* 518 * Common case is actually in the if statement 519 */ 520 if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) { 521 nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE, 522 M_WAITOK); 523 nvp->v_rdev = nvp_rdev; 524 nvp->v_hashchain = vpp; 525 nvp->v_specnext = *vpp; 526 nvp->v_specmountpoint = NULL; 527 nvp->v_speclockf = NULL; 528 bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap)); 529 *vpp = nvp; 530 if (vp != NULLVP) { 531 nvp->v_flag |= VALIASED; 532 vp->v_flag |= VALIASED; 533 vput(vp); 534 } 535 return (NULLVP); 536 } 537 538 /* 539 * This code is the uncommon case. It is called in case 540 * we found an alias that was VT_NON && vtype of VBLK 541 * This means we found a block device that was created 542 * using bdevvp. 543 * An example of such a vnode is the root partition device vnode 544 * created in ffs_mountroot. 545 * 546 * The vnodes created by bdevvp should not be aliased (why?). 547 */ 548 549 VOP_UNLOCK(vp, 0, p); 550 vclean(vp, 0, p); 551 vp->v_op = nvp->v_op; 552 vp->v_tag = nvp->v_tag; 553 nvp->v_type = VNON; 554 insmntque(vp, mp); 555 return (vp); 556} 557 558/* 559 * Grab a particular vnode from the free list, increment its 560 * reference count and lock it. If the vnode lock bit is set, 561 * the vnode is being eliminated in vgone. In that case, we 562 * cannot grab it, so the process is awakened when the 563 * transition is completed, and an error code is returned to 564 * indicate that the vnode is no longer usable, possibly 565 * having been changed to a new file system type. 566 */ 567int 568vget(struct vnode *vp, int flags, struct proc *p) 569{ 570 int error, s, onfreelist; 571 572 /* 573 * If the vnode is in the process of being cleaned out for 574 * another use, we wait for the cleaning to finish and then 575 * return failure. Cleaning is determined by checking that 576 * the VXLOCK flag is set. 577 */ 578 579 if (vp->v_flag & VXLOCK) { 580 if (flags & LK_NOWAIT) { 581 return (EBUSY); 582 } 583 584 vp->v_flag |= VXWANT; 585 tsleep(vp, PINOD, "vget", 0); 586 return (ENOENT); 587 } 588 589 onfreelist = vp->v_bioflag & VBIOONFREELIST; 590 if (vp->v_usecount == 0 && onfreelist) { 591 s = splbio(); 592 if (vp->v_holdcnt > 0) 593 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 594 else 595 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 596 vp->v_bioflag &= ~VBIOONFREELIST; 597 splx(s); 598 } 599 600 vp->v_usecount++; 601 if (flags & LK_TYPE_MASK) { 602 if ((error = vn_lock(vp, flags, p)) != 0) { 603 vp->v_usecount--; 604 if (vp->v_usecount == 0 && onfreelist) 605 vputonfreelist(vp); 606 } 607 return (error); 608 } 609 610 return (0); 611} 612 613 614/* Vnode reference. */ 615void 616vref(struct vnode *vp) 617{ 618#ifdef DIAGNOSTIC 619 if (vp->v_usecount == 0) 620 panic("vref used where vget required"); 621 if (vp->v_type == VNON) 622 panic("vref on a VNON vnode"); 623#endif 624 vp->v_usecount++; 625} 626 627void 628vputonfreelist(struct vnode *vp) 629{ 630 int s; 631 struct freelst *lst; 632 633 s = splbio(); 634#ifdef DIAGNOSTIC 635 if (vp->v_usecount != 0) 636 panic("Use count is not zero!"); 637 638 if (vp->v_bioflag & VBIOONFREELIST) { 639 vprint("vnode already on free list: ", vp); 640 panic("vnode already on free list"); 641 } 642#endif 643 644 vp->v_bioflag |= VBIOONFREELIST; 645 646 if (vp->v_holdcnt > 0) 647 lst = &vnode_hold_list; 648 else 649 lst = &vnode_free_list; 650 651 if (vp->v_type == VBAD) 652 TAILQ_INSERT_HEAD(lst, vp, v_freelist); 653 else 654 TAILQ_INSERT_TAIL(lst, vp, v_freelist); 655 656 splx(s); 657} 658 659/* 660 * vput(), just unlock and vrele() 661 */ 662void 663vput(struct vnode *vp) 664{ 665 struct proc *p = curproc; 666 667#ifdef DIAGNOSTIC 668 if (vp == NULL) 669 panic("vput: null vp"); 670#endif 671 672#ifdef DIAGNOSTIC 673 if (vp->v_usecount == 0) { 674 vprint("vput: bad ref count", vp); 675 panic("vput: ref cnt"); 676 } 677#endif 678 vp->v_usecount--; 679 if (vp->v_usecount > 0) { 680 VOP_UNLOCK(vp, 0, p); 681 return; 682 } 683 684#ifdef DIAGNOSTIC 685 if (vp->v_writecount != 0) { 686 vprint("vput: bad writecount", vp); 687 panic("vput: v_writecount != 0"); 688 } 689#endif 690 691 VOP_INACTIVE(vp, p); 692 693 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 694 vputonfreelist(vp); 695} 696 697/* 698 * Vnode release - use for active VNODES. 699 * If count drops to zero, call inactive routine and return to freelist. 700 * Returns 0 if it did not sleep. 701 */ 702int 703vrele(struct vnode *vp) 704{ 705 struct proc *p = curproc; 706 707#ifdef DIAGNOSTIC 708 if (vp == NULL) 709 panic("vrele: null vp"); 710#endif 711#ifdef DIAGNOSTIC 712 if (vp->v_usecount == 0) { 713 vprint("vrele: bad ref count", vp); 714 panic("vrele: ref cnt"); 715 } 716#endif 717 vp->v_usecount--; 718 if (vp->v_usecount > 0) { 719 return (0); 720 } 721 722#ifdef DIAGNOSTIC 723 if (vp->v_writecount != 0) { 724 vprint("vrele: bad writecount", vp); 725 panic("vrele: v_writecount != 0"); 726 } 727#endif 728 729 if (vn_lock(vp, LK_EXCLUSIVE, p)) { 730#ifdef DIAGNOSTIC 731 vprint("vrele: cannot lock", vp); 732#endif 733 return (1); 734 } 735 736 VOP_INACTIVE(vp, p); 737 738 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST)) 739 vputonfreelist(vp); 740 return (1); 741} 742 743/* Page or buffer structure gets a reference. */ 744void 745vhold(struct vnode *vp) 746{ 747 /* 748 * If it is on the freelist and the hold count is currently 749 * zero, move it to the hold list. 750 */ 751 if ((vp->v_bioflag & VBIOONFREELIST) && 752 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 753 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 754 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 755 } 756 vp->v_holdcnt++; 757} 758 759/* Lose interest in a vnode. */ 760void 761vdrop(struct vnode *vp) 762{ 763#ifdef DIAGNOSTIC 764 if (vp->v_holdcnt == 0) 765 panic("vdrop: zero holdcnt"); 766#endif 767 768 vp->v_holdcnt--; 769 770 /* 771 * If it is on the holdlist and the hold count drops to 772 * zero, move it to the free list. 773 */ 774 if ((vp->v_bioflag & VBIOONFREELIST) && 775 vp->v_holdcnt == 0 && vp->v_usecount == 0) { 776 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist); 777 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 778 } 779} 780 781/* 782 * Remove any vnodes in the vnode table belonging to mount point mp. 783 * 784 * If MNT_NOFORCE is specified, there should not be any active ones, 785 * return error if any are found (nb: this is a user error, not a 786 * system error). If MNT_FORCE is specified, detach any active vnodes 787 * that are found. 788 */ 789#ifdef DEBUG 790int busyprt = 0; /* print out busy vnodes */ 791struct ctldebug debug1 = { "busyprt", &busyprt }; 792#endif 793 794int 795vfs_mount_foreach_vnode(struct mount *mp, 796 int (*func)(struct vnode *, void *), void *arg) { 797 struct vnode *vp, *nvp; 798 int error = 0; 799 800loop: 801 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 802 if (vp->v_mount != mp) 803 goto loop; 804 nvp = LIST_NEXT(vp, v_mntvnodes); 805 806 error = func(vp, arg); 807 808 if (error != 0) 809 break; 810 } 811 812 return (error); 813} 814 815struct vflush_args { 816 struct vnode *skipvp; 817 int busy; 818 int flags; 819}; 820 821int 822vflush_vnode(struct vnode *vp, void *arg) { 823 struct vflush_args *va = arg; 824 struct proc *p = curproc; 825 826 if (vp == va->skipvp) { 827 return (0); 828 } 829 830 if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 831 return (0); 832 } 833 834 /* 835 * If WRITECLOSE is set, only flush out regular file 836 * vnodes open for writing. 837 */ 838 if ((va->flags & WRITECLOSE) && 839 (vp->v_writecount == 0 || vp->v_type != VREG)) { 840 return (0); 841 } 842 843 /* 844 * With v_usecount == 0, all we need to do is clear 845 * out the vnode data structures and we are done. 846 */ 847 if (vp->v_usecount == 0) { 848 vgonel(vp, p); 849 return (0); 850 } 851 852 /* 853 * If FORCECLOSE is set, forcibly close the vnode. 854 * For block or character devices, revert to an 855 * anonymous device. For all other files, just kill them. 856 */ 857 if (va->flags & FORCECLOSE) { 858 if (vp->v_type != VBLK && vp->v_type != VCHR) { 859 vgonel(vp, p); 860 } else { 861 vclean(vp, 0, p); 862 vp->v_op = &spec_vops; 863 insmntque(vp, (struct mount *)0); 864 } 865 return (0); 866 } 867 868#ifdef DEBUG 869 if (busyprt) 870 vprint("vflush: busy vnode", vp); 871#endif 872 va->busy++; 873 return (0); 874} 875 876int 877vflush(struct mount *mp, struct vnode *skipvp, int flags) 878{ 879 struct vflush_args va; 880 va.skipvp = skipvp; 881 va.busy = 0; 882 va.flags = flags; 883 884 vfs_mount_foreach_vnode(mp, vflush_vnode, &va); 885 886 if (va.busy) 887 return (EBUSY); 888 return (0); 889} 890 891/* 892 * Disassociate the underlying file system from a vnode. 893 */ 894void 895vclean(struct vnode *vp, int flags, struct proc *p) 896{ 897 int active; 898 899 /* 900 * Check to see if the vnode is in use. 901 * If so we have to reference it before we clean it out 902 * so that its count cannot fall to zero and generate a 903 * race against ourselves to recycle it. 904 */ 905 if ((active = vp->v_usecount) != 0) 906 vp->v_usecount++; 907 908 /* 909 * Prevent the vnode from being recycled or 910 * brought into use while we clean it out. 911 */ 912 if (vp->v_flag & VXLOCK) 913 panic("vclean: deadlock"); 914 vp->v_flag |= VXLOCK; 915 /* 916 * Even if the count is zero, the VOP_INACTIVE routine may still 917 * have the object locked while it cleans it out. The VOP_LOCK 918 * ensures that the VOP_INACTIVE routine is done with its work. 919 * For active vnodes, it ensures that no other activity can 920 * occur while the underlying object is being cleaned out. 921 */ 922 VOP_LOCK(vp, LK_DRAIN, p); 923 924 /* 925 * Clean out any VM data associated with the vnode. 926 */ 927 uvm_vnp_terminate(vp); 928 /* 929 * Clean out any buffers associated with the vnode. 930 */ 931 if (flags & DOCLOSE) 932 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 933 /* 934 * If purging an active vnode, it must be closed and 935 * deactivated before being reclaimed. Note that the 936 * VOP_INACTIVE will unlock the vnode 937 */ 938 if (active) { 939 if (flags & DOCLOSE) 940 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 941 VOP_INACTIVE(vp, p); 942 } else { 943 /* 944 * Any other processes trying to obtain this lock must first 945 * wait for VXLOCK to clear, then call the new lock operation. 946 */ 947 VOP_UNLOCK(vp, 0, p); 948 } 949 950 /* 951 * Reclaim the vnode. 952 */ 953 if (VOP_RECLAIM(vp, p)) 954 panic("vclean: cannot reclaim"); 955 if (active) { 956 vp->v_usecount--; 957 if (vp->v_usecount == 0) { 958 if (vp->v_holdcnt > 0) 959 panic("vclean: not clean"); 960 vputonfreelist(vp); 961 } 962 } 963 cache_purge(vp); 964 965 /* 966 * Done with purge, notify sleepers of the grim news. 967 */ 968 vp->v_op = &dead_vops; 969 VN_KNOTE(vp, NOTE_REVOKE); 970 vp->v_tag = VT_NON; 971 vp->v_flag &= ~VXLOCK; 972#ifdef VFSLCKDEBUG 973 vp->v_flag &= ~VLOCKSWORK; 974#endif 975 if (vp->v_flag & VXWANT) { 976 vp->v_flag &= ~VXWANT; 977 wakeup(vp); 978 } 979} 980 981/* 982 * Recycle an unused vnode to the front of the free list. 983 */ 984int 985vrecycle(struct vnode *vp, struct proc *p) 986{ 987 if (vp->v_usecount == 0) { 988 vgonel(vp, p); 989 return (1); 990 } 991 return (0); 992} 993 994/* 995 * Eliminate all activity associated with a vnode 996 * in preparation for reuse. 997 */ 998void 999vgone(struct vnode *vp) 1000{ 1001 struct proc *p = curproc; 1002 vgonel(vp, p); 1003} 1004 1005/* 1006 * vgone, with struct proc. 1007 */ 1008void 1009vgonel(struct vnode *vp, struct proc *p) 1010{ 1011 struct vnode *vq; 1012 struct vnode *vx; 1013 1014 /* 1015 * If a vgone (or vclean) is already in progress, 1016 * wait until it is done and return. 1017 */ 1018 if (vp->v_flag & VXLOCK) { 1019 vp->v_flag |= VXWANT; 1020 tsleep(vp, PINOD, "vgone", 0); 1021 return; 1022 } 1023 1024 /* 1025 * Clean out the filesystem specific data. 1026 */ 1027 vclean(vp, DOCLOSE, p); 1028 /* 1029 * Delete from old mount point vnode list, if on one. 1030 */ 1031 if (vp->v_mount != NULL) 1032 insmntque(vp, (struct mount *)0); 1033 /* 1034 * If special device, remove it from special device alias list 1035 * if it is on one. 1036 */ 1037 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1038 if (*vp->v_hashchain == vp) { 1039 *vp->v_hashchain = vp->v_specnext; 1040 } else { 1041 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1042 if (vq->v_specnext != vp) 1043 continue; 1044 vq->v_specnext = vp->v_specnext; 1045 break; 1046 } 1047 if (vq == NULL) 1048 panic("missing bdev"); 1049 } 1050 if (vp->v_flag & VALIASED) { 1051 vx = NULL; 1052 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1053 if (vq->v_rdev != vp->v_rdev || 1054 vq->v_type != vp->v_type) 1055 continue; 1056 if (vx) 1057 break; 1058 vx = vq; 1059 } 1060 if (vx == NULL) 1061 panic("missing alias"); 1062 if (vq == NULL) 1063 vx->v_flag &= ~VALIASED; 1064 vp->v_flag &= ~VALIASED; 1065 } 1066 free(vp->v_specinfo, M_VNODE); 1067 vp->v_specinfo = NULL; 1068 } 1069 /* 1070 * If it is on the freelist and not already at the head, 1071 * move it to the head of the list. 1072 */ 1073 vp->v_type = VBAD; 1074 1075 /* 1076 * Move onto the free list, unless we were called from 1077 * getnewvnode and we're not on any free list 1078 */ 1079 if (vp->v_usecount == 0 && 1080 (vp->v_bioflag & VBIOONFREELIST)) { 1081 int s; 1082 1083 s = splbio(); 1084 1085 if (vp->v_holdcnt > 0) 1086 panic("vgonel: not clean"); 1087 1088 if (TAILQ_FIRST(&vnode_free_list) != vp) { 1089 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1090 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1091 } 1092 splx(s); 1093 } 1094} 1095 1096/* 1097 * Lookup a vnode by device number. 1098 */ 1099int 1100vfinddev(dev_t dev, enum vtype type, struct vnode **vpp) 1101{ 1102 struct vnode *vp; 1103 int rc =0; 1104 1105 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1106 if (dev != vp->v_rdev || type != vp->v_type) 1107 continue; 1108 *vpp = vp; 1109 rc = 1; 1110 break; 1111 } 1112 return (rc); 1113} 1114 1115/* 1116 * Revoke all the vnodes corresponding to the specified minor number 1117 * range (endpoints inclusive) of the specified major. 1118 */ 1119void 1120vdevgone(int maj, int minl, int minh, enum vtype type) 1121{ 1122 struct vnode *vp; 1123 int mn; 1124 1125 for (mn = minl; mn <= minh; mn++) 1126 if (vfinddev(makedev(maj, mn), type, &vp)) 1127 VOP_REVOKE(vp, REVOKEALL); 1128} 1129 1130/* 1131 * Calculate the total number of references to a special device. 1132 */ 1133int 1134vcount(struct vnode *vp) 1135{ 1136 struct vnode *vq, *vnext; 1137 int count; 1138 1139loop: 1140 if ((vp->v_flag & VALIASED) == 0) 1141 return (vp->v_usecount); 1142 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1143 vnext = vq->v_specnext; 1144 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1145 continue; 1146 /* 1147 * Alias, but not in use, so flush it out. 1148 */ 1149 if (vq->v_usecount == 0 && vq != vp) { 1150 vgone(vq); 1151 goto loop; 1152 } 1153 count += vq->v_usecount; 1154 } 1155 return (count); 1156} 1157 1158#if defined(DEBUG) || defined(DIAGNOSTIC) 1159/* 1160 * Print out a description of a vnode. 1161 */ 1162static char *typename[] = 1163 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; 1164 1165void 1166vprint(char *label, struct vnode *vp) 1167{ 1168 char buf[64]; 1169 1170 if (label != NULL) 1171 printf("%s: ", label); 1172 printf("%p, type %s, use %u, write %u, hold %u,", 1173 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1174 vp->v_holdcnt); 1175 buf[0] = '\0'; 1176 if (vp->v_flag & VROOT) 1177 strlcat(buf, "|VROOT", sizeof buf); 1178 if (vp->v_flag & VTEXT) 1179 strlcat(buf, "|VTEXT", sizeof buf); 1180 if (vp->v_flag & VSYSTEM) 1181 strlcat(buf, "|VSYSTEM", sizeof buf); 1182 if (vp->v_flag & VXLOCK) 1183 strlcat(buf, "|VXLOCK", sizeof buf); 1184 if (vp->v_flag & VXWANT) 1185 strlcat(buf, "|VXWANT", sizeof buf); 1186 if (vp->v_bioflag & VBIOWAIT) 1187 strlcat(buf, "|VBIOWAIT", sizeof buf); 1188 if (vp->v_bioflag & VBIOONFREELIST) 1189 strlcat(buf, "|VBIOONFREELIST", sizeof buf); 1190 if (vp->v_bioflag & VBIOONSYNCLIST) 1191 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf); 1192 if (vp->v_flag & VALIASED) 1193 strlcat(buf, "|VALIASED", sizeof buf); 1194 if (buf[0] != '\0') 1195 printf(" flags (%s)", &buf[1]); 1196 if (vp->v_data == NULL) { 1197 printf("\n"); 1198 } else { 1199 printf("\n\t"); 1200 VOP_PRINT(vp); 1201 } 1202} 1203#endif /* DEBUG || DIAGNOSTIC */ 1204 1205#ifdef DEBUG 1206/* 1207 * List all of the locked vnodes in the system. 1208 * Called when debugging the kernel. 1209 */ 1210void 1211printlockedvnodes(void) 1212{ 1213 struct mount *mp, *nmp; 1214 struct vnode *vp; 1215 1216 printf("Locked vnodes\n"); 1217 1218 for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1219 mp = nmp) { 1220 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) { 1221 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1222 continue; 1223 } 1224 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1225 if (VOP_ISLOCKED(vp)) 1226 vprint((char *)0, vp); 1227 } 1228 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1229 vfs_unbusy(mp); 1230 } 1231 1232} 1233#endif 1234 1235/* 1236 * Top level filesystem related information gathering. 1237 */ 1238int 1239vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1240 size_t newlen, struct proc *p) 1241{ 1242 struct vfsconf *vfsp, *tmpvfsp; 1243 int ret; 1244 1245 /* all sysctl names at this level are at least name and field */ 1246 if (namelen < 2) 1247 return (ENOTDIR); /* overloaded */ 1248 1249 if (name[0] != VFS_GENERIC) { 1250 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1251 if (vfsp->vfc_typenum == name[0]) 1252 break; 1253 1254 if (vfsp == NULL) 1255 return (EOPNOTSUPP); 1256 1257 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1258 oldp, oldlenp, newp, newlen, p)); 1259 } 1260 1261 switch (name[1]) { 1262 case VFS_MAXTYPENUM: 1263 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); 1264 1265 case VFS_CONF: 1266 if (namelen < 3) 1267 return (ENOTDIR); /* overloaded */ 1268 1269 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1270 if (vfsp->vfc_typenum == name[2]) 1271 break; 1272 1273 if (vfsp == NULL) 1274 return (EOPNOTSUPP); 1275 1276 /* Make a copy, clear out kernel pointers */ 1277 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK); 1278 bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp)); 1279 tmpvfsp->vfc_vfsops = NULL; 1280 tmpvfsp->vfc_next = NULL; 1281 1282 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp, 1283 sizeof(struct vfsconf)); 1284 1285 free(tmpvfsp, M_TEMP); 1286 return (ret); 1287 case VFS_BCACHESTAT: /* buffer cache statistics */ 1288 ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats, 1289 sizeof(struct bcachestats)); 1290 return(ret); 1291 } 1292 return (EOPNOTSUPP); 1293} 1294 1295int kinfo_vdebug = 1; 1296#define KINFO_VNODESLOP 10 1297/* 1298 * Dump vnode list (via sysctl). 1299 * Copyout address of vnode followed by vnode. 1300 */ 1301/* ARGSUSED */ 1302int 1303sysctl_vnode(char *where, size_t *sizep, struct proc *p) 1304{ 1305 struct mount *mp, *nmp; 1306 struct vnode *vp, *nvp; 1307 char *bp = where, *savebp; 1308 char *ewhere; 1309 int error; 1310 1311 if (where == NULL) { 1312 *sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode); 1313 return (0); 1314 } 1315 ewhere = where + *sizep; 1316 1317 for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1318 mp = nmp) { 1319 if (vfs_busy(mp, VB_READ|VB_NOWAIT)) { 1320 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1321 continue; 1322 } 1323 savebp = bp; 1324again: 1325 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; 1326 vp = nvp) { 1327 /* 1328 * Check that the vp is still associated with 1329 * this filesystem. RACE: could have been 1330 * recycled onto the same filesystem. 1331 */ 1332 if (vp->v_mount != mp) { 1333 if (kinfo_vdebug) 1334 printf("kinfo: vp changed\n"); 1335 bp = savebp; 1336 goto again; 1337 } 1338 nvp = LIST_NEXT(vp, v_mntvnodes); 1339 if (bp + sizeof(struct e_vnode) > ewhere) { 1340 *sizep = bp - where; 1341 vfs_unbusy(mp); 1342 return (ENOMEM); 1343 } 1344 if ((error = copyout(&vp, 1345 &((struct e_vnode *)bp)->vptr, 1346 sizeof(struct vnode *))) || 1347 (error = copyout(vp, 1348 &((struct e_vnode *)bp)->vnode, 1349 sizeof(struct vnode)))) { 1350 vfs_unbusy(mp); 1351 return (error); 1352 } 1353 bp += sizeof(struct e_vnode); 1354 } 1355 1356 nmp = CIRCLEQ_NEXT(mp, mnt_list); 1357 vfs_unbusy(mp); 1358 } 1359 1360 *sizep = bp - where; 1361 1362 return (0); 1363} 1364 1365/* 1366 * Check to see if a filesystem is mounted on a block device. 1367 */ 1368int 1369vfs_mountedon(struct vnode *vp) 1370{ 1371 struct vnode *vq; 1372 int error = 0; 1373 1374 if (vp->v_specmountpoint != NULL) 1375 return (EBUSY); 1376 if (vp->v_flag & VALIASED) { 1377 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1378 if (vq->v_rdev != vp->v_rdev || 1379 vq->v_type != vp->v_type) 1380 continue; 1381 if (vq->v_specmountpoint != NULL) { 1382 error = EBUSY; 1383 break; 1384 } 1385 } 1386 } 1387 return (error); 1388} 1389 1390/* 1391 * Build hash lists of net addresses and hang them off the mount point. 1392 * Called by ufs_mount() to set up the lists of export addresses. 1393 */ 1394int 1395vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1396 struct export_args *argp) 1397{ 1398 struct netcred *np; 1399 struct radix_node_head *rnh; 1400 int i; 1401 struct radix_node *rn; 1402 struct sockaddr *saddr, *smask = 0; 1403 struct domain *dom; 1404 int error; 1405 1406 if (argp->ex_addrlen == 0) { 1407 if (mp->mnt_flag & MNT_DEFEXPORTED) 1408 return (EPERM); 1409 np = &nep->ne_defexported; 1410 np->netc_exflags = argp->ex_flags; 1411 np->netc_anon = argp->ex_anon; 1412 np->netc_anon.cr_ref = 1; 1413 mp->mnt_flag |= MNT_DEFEXPORTED; 1414 return (0); 1415 } 1416 if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN || 1417 argp->ex_addrlen < 0 || argp->ex_masklen < 0) 1418 return (EINVAL); 1419 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1420 np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO); 1421 saddr = (struct sockaddr *)(np + 1); 1422 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen); 1423 if (error) 1424 goto out; 1425 if (saddr->sa_len > argp->ex_addrlen) 1426 saddr->sa_len = argp->ex_addrlen; 1427 if (argp->ex_masklen) { 1428 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); 1429 error = copyin(argp->ex_mask, smask, argp->ex_masklen); 1430 if (error) 1431 goto out; 1432 if (smask->sa_len > argp->ex_masklen) 1433 smask->sa_len = argp->ex_masklen; 1434 } 1435 i = saddr->sa_family; 1436 if (i < 0 || i > AF_MAX) { 1437 error = EINVAL; 1438 goto out; 1439 } 1440 if ((rnh = nep->ne_rtable[i]) == 0) { 1441 /* 1442 * Seems silly to initialize every AF when most are not 1443 * used, do so on demand here 1444 */ 1445 for (dom = domains; dom; dom = dom->dom_next) 1446 if (dom->dom_family == i && dom->dom_rtattach) { 1447 dom->dom_rtattach((void **)&nep->ne_rtable[i], 1448 dom->dom_rtoffset); 1449 break; 1450 } 1451 if ((rnh = nep->ne_rtable[i]) == 0) { 1452 error = ENOBUFS; 1453 goto out; 1454 } 1455 } 1456 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, 1457 np->netc_rnodes, 0); 1458 if (rn == 0 || np != (struct netcred *)rn) { /* already exists */ 1459 error = EPERM; 1460 goto out; 1461 } 1462 np->netc_exflags = argp->ex_flags; 1463 np->netc_anon = argp->ex_anon; 1464 np->netc_anon.cr_ref = 1; 1465 return (0); 1466out: 1467 free(np, M_NETADDR); 1468 return (error); 1469} 1470 1471/* ARGSUSED */ 1472int 1473vfs_free_netcred(struct radix_node *rn, void *w, u_int id) 1474{ 1475 struct radix_node_head *rnh = (struct radix_node_head *)w; 1476 1477 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL); 1478 free(rn, M_NETADDR); 1479 return (0); 1480} 1481 1482/* 1483 * Free the net address hash lists that are hanging off the mount points. 1484 */ 1485void 1486vfs_free_addrlist(struct netexport *nep) 1487{ 1488 int i; 1489 struct radix_node_head *rnh; 1490 1491 for (i = 0; i <= AF_MAX; i++) 1492 if ((rnh = nep->ne_rtable[i]) != NULL) { 1493 (*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh); 1494 free(rnh, M_RTABLE); 1495 nep->ne_rtable[i] = 0; 1496 } 1497} 1498 1499int 1500vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp) 1501{ 1502 int error; 1503 1504 if (argp->ex_flags & MNT_DELEXPORT) { 1505 vfs_free_addrlist(nep); 1506 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1507 } 1508 if (argp->ex_flags & MNT_EXPORTED) { 1509 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0) 1510 return (error); 1511 mp->mnt_flag |= MNT_EXPORTED; 1512 } 1513 return (0); 1514} 1515 1516struct netcred * 1517vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam) 1518{ 1519 struct netcred *np; 1520 struct radix_node_head *rnh; 1521 struct sockaddr *saddr; 1522 1523 np = NULL; 1524 if (mp->mnt_flag & MNT_EXPORTED) { 1525 /* 1526 * Lookup in the export list first. 1527 */ 1528 if (nam != NULL) { 1529 saddr = mtod(nam, struct sockaddr *); 1530 rnh = nep->ne_rtable[saddr->sa_family]; 1531 if (rnh != NULL) { 1532 np = (struct netcred *) 1533 (*rnh->rnh_matchaddr)((caddr_t)saddr, 1534 rnh); 1535 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1536 np = NULL; 1537 } 1538 } 1539 /* 1540 * If no address match, use the default if it exists. 1541 */ 1542 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1543 np = &nep->ne_defexported; 1544 } 1545 return (np); 1546} 1547 1548/* 1549 * Do the usual access checking. 1550 * file_mode, uid and gid are from the vnode in question, 1551 * while acc_mode and cred are from the VOP_ACCESS parameter list 1552 */ 1553int 1554vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, 1555 mode_t acc_mode, struct ucred *cred) 1556{ 1557 mode_t mask; 1558 1559 /* User id 0 always gets read/write access. */ 1560 if (cred->cr_uid == 0) { 1561 /* For VEXEC, at least one of the execute bits must be set. */ 1562 if ((acc_mode & VEXEC) && type != VDIR && 1563 (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) 1564 return EACCES; 1565 return 0; 1566 } 1567 1568 mask = 0; 1569 1570 /* Otherwise, check the owner. */ 1571 if (cred->cr_uid == uid) { 1572 if (acc_mode & VEXEC) 1573 mask |= S_IXUSR; 1574 if (acc_mode & VREAD) 1575 mask |= S_IRUSR; 1576 if (acc_mode & VWRITE) 1577 mask |= S_IWUSR; 1578 return (file_mode & mask) == mask ? 0 : EACCES; 1579 } 1580 1581 /* Otherwise, check the groups. */ 1582 if (groupmember(gid, cred)) { 1583 if (acc_mode & VEXEC) 1584 mask |= S_IXGRP; 1585 if (acc_mode & VREAD) 1586 mask |= S_IRGRP; 1587 if (acc_mode & VWRITE) 1588 mask |= S_IWGRP; 1589 return (file_mode & mask) == mask ? 0 : EACCES; 1590 } 1591 1592 /* Otherwise, check everyone else. */ 1593 if (acc_mode & VEXEC) 1594 mask |= S_IXOTH; 1595 if (acc_mode & VREAD) 1596 mask |= S_IROTH; 1597 if (acc_mode & VWRITE) 1598 mask |= S_IWOTH; 1599 return (file_mode & mask) == mask ? 0 : EACCES; 1600} 1601 1602/* 1603 * Unmount all file systems. 1604 * We traverse the list in reverse order under the assumption that doing so 1605 * will avoid needing to worry about dependencies. 1606 */ 1607void 1608vfs_unmountall(void) 1609{ 1610 struct mount *mp, *nmp; 1611 int allerror, error, again = 1; 1612 1613 retry: 1614 allerror = 0; 1615 for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist); 1616 mp = nmp) { 1617 nmp = CIRCLEQ_PREV(mp, mnt_list); 1618 if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0) 1619 continue; 1620 if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) { 1621 printf("unmount of %s failed with error %d\n", 1622 mp->mnt_stat.f_mntonname, error); 1623 allerror = 1; 1624 } 1625 } 1626 1627 if (allerror) { 1628 printf("WARNING: some file systems would not unmount\n"); 1629 if (again) { 1630 printf("retrying\n"); 1631 again = 0; 1632 goto retry; 1633 } 1634 } 1635} 1636 1637/* 1638 * Sync and unmount file systems before shutting down. 1639 */ 1640void 1641vfs_shutdown(void) 1642{ 1643#ifdef ACCOUNTING 1644 acct_shutdown(); 1645#endif 1646 1647 /* XXX Should suspend scheduling. */ 1648 (void) spl0(); 1649 1650 printf("syncing disks... "); 1651 1652 if (panicstr == 0) { 1653 /* Sync before unmount, in case we hang on something. */ 1654 sys_sync(&proc0, (void *)0, (register_t *)0); 1655 1656 /* Unmount file systems. */ 1657 vfs_unmountall(); 1658 } 1659 1660 if (vfs_syncwait(1)) 1661 printf("giving up\n"); 1662 else 1663 printf("done\n"); 1664} 1665 1666/* 1667 * perform sync() operation and wait for buffers to flush. 1668 * assumptions: called w/ scheduler disabled and physical io enabled 1669 * for now called at spl0() XXX 1670 */ 1671int 1672vfs_syncwait(int verbose) 1673{ 1674 struct buf *bp; 1675 int iter, nbusy, dcount, s; 1676 struct proc *p; 1677 1678 p = curproc? curproc : &proc0; 1679 sys_sync(p, (void *)0, (register_t *)0); 1680 1681 /* Wait for sync to finish. */ 1682 dcount = 10000; 1683 for (iter = 0; iter < 20; iter++) { 1684 nbusy = 0; 1685 LIST_FOREACH(bp, &bufhead, b_list) { 1686 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1687 nbusy++; 1688 /* 1689 * With soft updates, some buffers that are 1690 * written will be remarked as dirty until other 1691 * buffers are written. 1692 */ 1693 if (bp->b_flags & B_DELWRI) { 1694 s = splbio(); 1695 bremfree(bp); 1696 buf_acquire(bp); 1697 splx(s); 1698 nbusy++; 1699 bawrite(bp); 1700 if (dcount-- <= 0) { 1701 if (verbose) 1702 printf("softdep "); 1703 return 1; 1704 } 1705 } 1706 } 1707 if (nbusy == 0) 1708 break; 1709 if (verbose) 1710 printf("%d ", nbusy); 1711 DELAY(40000 * iter); 1712 } 1713 1714 return nbusy; 1715} 1716 1717/* 1718 * posix file system related system variables. 1719 */ 1720int 1721fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, 1722 void *newp, size_t newlen, struct proc *p) 1723{ 1724 /* all sysctl names at this level are terminal */ 1725 if (namelen != 1) 1726 return (ENOTDIR); 1727 1728 switch (name[0]) { 1729 case FS_POSIX_SETUID: 1730 if (newp && securelevel > 0) 1731 return (EPERM); 1732 return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear)); 1733 default: 1734 return (EOPNOTSUPP); 1735 } 1736 /* NOTREACHED */ 1737} 1738 1739/* 1740 * file system related system variables. 1741 */ 1742int 1743fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 1744 size_t newlen, struct proc *p) 1745{ 1746 sysctlfn *fn; 1747 1748 switch (name[0]) { 1749 case FS_POSIX: 1750 fn = fs_posix_sysctl; 1751 break; 1752 default: 1753 return (EOPNOTSUPP); 1754 } 1755 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p); 1756} 1757 1758 1759/* 1760 * Routines dealing with vnodes and buffers 1761 */ 1762 1763/* 1764 * Wait for all outstanding I/Os to complete 1765 * 1766 * Manipulates v_numoutput. Must be called at splbio() 1767 */ 1768int 1769vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo) 1770{ 1771 int error = 0; 1772 1773 splassert(IPL_BIO); 1774 1775 while (vp->v_numoutput) { 1776 vp->v_bioflag |= VBIOWAIT; 1777 error = tsleep(&vp->v_numoutput, 1778 slpflag | (PRIBIO + 1), wmesg, timeo); 1779 if (error) 1780 break; 1781 } 1782 1783 return (error); 1784} 1785 1786/* 1787 * Update outstanding I/O count and do wakeup if requested. 1788 * 1789 * Manipulates v_numoutput. Must be called at splbio() 1790 */ 1791void 1792vwakeup(struct vnode *vp) 1793{ 1794 splassert(IPL_BIO); 1795 1796 if (vp != NULL) { 1797 if (vp->v_numoutput-- == 0) 1798 panic("vwakeup: neg numoutput"); 1799 if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) { 1800 vp->v_bioflag &= ~VBIOWAIT; 1801 wakeup(&vp->v_numoutput); 1802 } 1803 } 1804} 1805 1806/* 1807 * Flush out and invalidate all buffers associated with a vnode. 1808 * Called with the underlying object locked. 1809 */ 1810int 1811vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p, 1812 int slpflag, int slptimeo) 1813{ 1814 struct buf *bp; 1815 struct buf *nbp, *blist; 1816 int s, error; 1817 1818#ifdef VFSLCKDEBUG 1819 if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp)) 1820 panic("vinvalbuf(): vp isn't locked"); 1821#endif 1822 1823 if (flags & V_SAVE) { 1824 s = splbio(); 1825 vwaitforio(vp, 0, "vinvalbuf", 0); 1826 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1827 splx(s); 1828 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 1829 return (error); 1830 s = splbio(); 1831 if (vp->v_numoutput > 0 || 1832 !LIST_EMPTY(&vp->v_dirtyblkhd)) 1833 panic("vinvalbuf: dirty bufs"); 1834 } 1835 splx(s); 1836 } 1837loop: 1838 s = splbio(); 1839 for (;;) { 1840 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) && 1841 (flags & V_SAVEMETA)) 1842 while (blist && blist->b_lblkno < 0) 1843 blist = LIST_NEXT(blist, b_vnbufs); 1844 if (blist == NULL && 1845 (blist = LIST_FIRST(&vp->v_dirtyblkhd)) && 1846 (flags & V_SAVEMETA)) 1847 while (blist && blist->b_lblkno < 0) 1848 blist = LIST_NEXT(blist, b_vnbufs); 1849 if (!blist) 1850 break; 1851 1852 for (bp = blist; bp; bp = nbp) { 1853 nbp = LIST_NEXT(bp, b_vnbufs); 1854 if (flags & V_SAVEMETA && bp->b_lblkno < 0) 1855 continue; 1856 if (bp->b_flags & B_BUSY) { 1857 bp->b_flags |= B_WANTED; 1858 error = tsleep(bp, slpflag | (PRIBIO + 1), 1859 "vinvalbuf", slptimeo); 1860 if (error) { 1861 splx(s); 1862 return (error); 1863 } 1864 break; 1865 } 1866 bremfree(bp); 1867 /* 1868 * XXX Since there are no node locks for NFS, I believe 1869 * there is a slight chance that a delayed write will 1870 * occur while sleeping just above, so check for it. 1871 */ 1872 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 1873 buf_acquire(bp); 1874 splx(s); 1875 (void) VOP_BWRITE(bp); 1876 goto loop; 1877 } 1878 buf_acquire_nomap(bp); 1879 bp->b_flags |= B_INVAL; 1880 brelse(bp); 1881 } 1882 } 1883 if (!(flags & V_SAVEMETA) && 1884 (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd))) 1885 panic("vinvalbuf: flush failed"); 1886 splx(s); 1887 return (0); 1888} 1889 1890void 1891vflushbuf(struct vnode *vp, int sync) 1892{ 1893 struct buf *bp, *nbp; 1894 int s; 1895 1896loop: 1897 s = splbio(); 1898 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); 1899 bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) { 1900 nbp = LIST_NEXT(bp, b_vnbufs); 1901 if ((bp->b_flags & B_BUSY)) 1902 continue; 1903 if ((bp->b_flags & B_DELWRI) == 0) 1904 panic("vflushbuf: not dirty"); 1905 bremfree(bp); 1906 buf_acquire(bp); 1907 splx(s); 1908 /* 1909 * Wait for I/O associated with indirect blocks to complete, 1910 * since there is no way to quickly wait for them below. 1911 */ 1912 if (bp->b_vp == vp || sync == 0) 1913 (void) bawrite(bp); 1914 else 1915 (void) bwrite(bp); 1916 goto loop; 1917 } 1918 if (sync == 0) { 1919 splx(s); 1920 return; 1921 } 1922 vwaitforio(vp, 0, "vflushbuf", 0); 1923 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 1924 splx(s); 1925#ifdef DIAGNOSTIC 1926 vprint("vflushbuf: dirty", vp); 1927#endif 1928 goto loop; 1929 } 1930 splx(s); 1931} 1932 1933/* 1934 * Associate a buffer with a vnode. 1935 * 1936 * Manipulates buffer vnode queues. Must be called at splbio(). 1937 */ 1938void 1939bgetvp(struct vnode *vp, struct buf *bp) 1940{ 1941 splassert(IPL_BIO); 1942 1943 1944 if (bp->b_vp) 1945 panic("bgetvp: not free"); 1946 vhold(vp); 1947 bp->b_vp = vp; 1948 if (vp->v_type == VBLK || vp->v_type == VCHR) 1949 bp->b_dev = vp->v_rdev; 1950 else 1951 bp->b_dev = NODEV; 1952 /* 1953 * Insert onto list for new vnode. 1954 */ 1955 bufinsvn(bp, &vp->v_cleanblkhd); 1956} 1957 1958/* 1959 * Disassociate a buffer from a vnode. 1960 * 1961 * Manipulates vnode buffer queues. Must be called at splbio(). 1962 */ 1963void 1964brelvp(struct buf *bp) 1965{ 1966 struct vnode *vp; 1967 1968 splassert(IPL_BIO); 1969 1970 if ((vp = bp->b_vp) == (struct vnode *) 0) 1971 panic("brelvp: NULL"); 1972 /* 1973 * Delete from old vnode list, if on one. 1974 */ 1975 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 1976 bufremvn(bp); 1977 if ((vp->v_bioflag & VBIOONSYNCLIST) && 1978 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 1979 vp->v_bioflag &= ~VBIOONSYNCLIST; 1980 LIST_REMOVE(vp, v_synclist); 1981 } 1982 bp->b_vp = NULL; 1983 1984 vdrop(vp); 1985} 1986 1987/* 1988 * Replaces the current vnode associated with the buffer, if any, 1989 * with a new vnode. 1990 * 1991 * If an output I/O is pending on the buffer, the old vnode 1992 * I/O count is adjusted. 1993 * 1994 * Ignores vnode buffer queues. Must be called at splbio(). 1995 */ 1996void 1997buf_replacevnode(struct buf *bp, struct vnode *newvp) 1998{ 1999 struct vnode *oldvp = bp->b_vp; 2000 2001 splassert(IPL_BIO); 2002 2003 if (oldvp) 2004 brelvp(bp); 2005 2006 if ((bp->b_flags & (B_READ | B_DONE)) == 0) { 2007 newvp->v_numoutput++; /* put it on swapdev */ 2008 vwakeup(oldvp); 2009 } 2010 2011 bgetvp(newvp, bp); 2012 bufremvn(bp); 2013} 2014 2015/* 2016 * Used to assign buffers to the appropriate clean or dirty list on 2017 * the vnode and to add newly dirty vnodes to the appropriate 2018 * filesystem syncer list. 2019 * 2020 * Manipulates vnode buffer queues. Must be called at splbio(). 2021 */ 2022void 2023reassignbuf(struct buf *bp) 2024{ 2025 struct buflists *listheadp; 2026 int delay; 2027 struct vnode *vp = bp->b_vp; 2028 2029 splassert(IPL_BIO); 2030 2031 /* 2032 * Delete from old vnode list, if on one. 2033 */ 2034 if (LIST_NEXT(bp, b_vnbufs) != NOLIST) 2035 bufremvn(bp); 2036 2037 /* 2038 * If dirty, put on list of dirty buffers; 2039 * otherwise insert onto list of clean buffers. 2040 */ 2041 if ((bp->b_flags & B_DELWRI) == 0) { 2042 listheadp = &vp->v_cleanblkhd; 2043 if ((vp->v_bioflag & VBIOONSYNCLIST) && 2044 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 2045 vp->v_bioflag &= ~VBIOONSYNCLIST; 2046 LIST_REMOVE(vp, v_synclist); 2047 } 2048 } else { 2049 listheadp = &vp->v_dirtyblkhd; 2050 if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) { 2051 switch (vp->v_type) { 2052 case VDIR: 2053 delay = syncdelay / 2; 2054 break; 2055 case VBLK: 2056 if (vp->v_specmountpoint != NULL) { 2057 delay = syncdelay / 3; 2058 break; 2059 } 2060 /* FALLTHROUGH */ 2061 default: 2062 delay = syncdelay; 2063 } 2064 vn_syncer_add_to_worklist(vp, delay); 2065 } 2066 } 2067 bufinsvn(bp, listheadp); 2068} 2069 2070int 2071vfs_register(struct vfsconf *vfs) 2072{ 2073 struct vfsconf *vfsp; 2074 struct vfsconf **vfspp; 2075 2076#ifdef DIAGNOSTIC 2077 /* Paranoia? */ 2078 if (vfs->vfc_refcount != 0) 2079 printf("vfs_register called with vfc_refcount > 0\n"); 2080#endif 2081 2082 /* Check if filesystem already known */ 2083 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2084 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) 2085 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2086 return (EEXIST); 2087 2088 if (vfs->vfc_typenum > maxvfsconf) 2089 maxvfsconf = vfs->vfc_typenum; 2090 2091 vfs->vfc_next = NULL; 2092 2093 /* Add to the end of the list */ 2094 *vfspp = vfs; 2095 2096 /* Call vfs_init() */ 2097 if (vfs->vfc_vfsops->vfs_init) 2098 (*(vfs->vfc_vfsops->vfs_init))(vfs); 2099 2100 return 0; 2101} 2102 2103int 2104vfs_unregister(struct vfsconf *vfs) 2105{ 2106 struct vfsconf *vfsp; 2107 struct vfsconf **vfspp; 2108 int maxtypenum; 2109 2110 /* Find our vfsconf struct */ 2111 for (vfspp = &vfsconf, vfsp = vfsconf; vfsp; 2112 vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) { 2113 if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0) 2114 break; 2115 } 2116 2117 if (!vfsp) /* Not found */ 2118 return (ENOENT); 2119 2120 if (vfsp->vfc_refcount) /* In use */ 2121 return (EBUSY); 2122 2123 /* Remove from list and free */ 2124 *vfspp = vfsp->vfc_next; 2125 2126 maxtypenum = 0; 2127 2128 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2129 if (vfsp->vfc_typenum > maxtypenum) 2130 maxtypenum = vfsp->vfc_typenum; 2131 2132 maxvfsconf = maxtypenum; 2133 return 0; 2134} 2135 2136/* 2137 * Check if vnode represents a disk device 2138 */ 2139int 2140vn_isdisk(struct vnode *vp, int *errp) 2141{ 2142 if (vp->v_type != VBLK && vp->v_type != VCHR) 2143 return (0); 2144 2145 return (1); 2146} 2147 2148#ifdef DDB 2149#include <machine/db_machdep.h> 2150#include <ddb/db_interface.h> 2151#include <ddb/db_output.h> 2152 2153void 2154vfs_buf_print(void *b, int full, 2155 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2156{ 2157 struct buf *bp = b; 2158 2159 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n" 2160 " proc %p error %d flags %b\n", 2161 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev, 2162 bp->b_proc, bp->b_error, bp->b_flags, B_BITS); 2163 2164 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%llx\n" 2165 " data %p saveaddr %p dep %p iodone %p\n", 2166 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, 2167 (long long)bp->b_synctime, bp->b_data, bp->b_saveaddr, 2168 LIST_FIRST(&bp->b_dep), bp->b_iodone); 2169 2170 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n", 2171 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend); 2172 2173#ifdef FFS_SOFTUPDATES 2174 if (full) 2175 softdep_print(bp, full, pr); 2176#endif 2177} 2178 2179const char *vtypes[] = { VTYPE_NAMES }; 2180const char *vtags[] = { VTAG_NAMES }; 2181 2182void 2183vfs_vnode_print(void *v, int full, 2184 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2185{ 2186 struct vnode *vp = v; 2187 2188 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n", 2189 vp->v_tag > nitems(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag, 2190 vp->v_type > nitems(vtypes)? "<unk>":vtypes[vp->v_type], 2191 vp->v_type, vp->v_mount, vp->v_mountedhere); 2192 2193 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n", 2194 vp->v_data, vp->v_usecount, vp->v_writecount, 2195 vp->v_holdcnt, vp->v_numoutput); 2196 2197 /* uvm_object_printit(&vp->v_uobj, full, pr); */ 2198 2199 if (full) { 2200 struct buf *bp; 2201 2202 (*pr)("clean bufs:\n"); 2203 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) { 2204 (*pr)(" bp %p\n", bp); 2205 vfs_buf_print(bp, full, pr); 2206 } 2207 2208 (*pr)("dirty bufs:\n"); 2209 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2210 (*pr)(" bp %p\n", bp); 2211 vfs_buf_print(bp, full, pr); 2212 } 2213 } 2214} 2215 2216void 2217vfs_mount_print(struct mount *mp, int full, 2218 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2219{ 2220 struct vfsconf *vfc = mp->mnt_vfc; 2221 struct vnode *vp; 2222 int cnt = 0; 2223 2224 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n", 2225 mp->mnt_flag, MNT_BITS, 2226 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data); 2227 2228 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n", 2229 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum, 2230 vfc->vfc_refcount, vfc->vfc_flags); 2231 2232 (*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n", 2233 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks, 2234 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail); 2235 2236 (*pr)(" files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files, 2237 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail); 2238 2239 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n", 2240 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1], 2241 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime); 2242 2243 (*pr)(" syncwrites %llu asyncwrites = %llu\n", 2244 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites); 2245 2246 (*pr)(" syncreads %llu asyncreads = %llu\n", 2247 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads); 2248 2249 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n", 2250 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname, 2251 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec); 2252 2253 (*pr)("locked vnodes:"); 2254 /* XXX would take mountlist lock, except ddb has no context */ 2255 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2256 if (VOP_ISLOCKED(vp)) { 2257 if (!LIST_NEXT(vp, v_mntvnodes)) 2258 (*pr)(" %p", vp); 2259 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2260 (*pr)("\n\t%p", vp); 2261 else 2262 (*pr)(", %p", vp); 2263 } 2264 (*pr)("\n"); 2265 2266 if (full) { 2267 (*pr)("all vnodes:\n\t"); 2268 /* XXX would take mountlist lock, except ddb has no context */ 2269 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) 2270 if (!LIST_NEXT(vp, v_mntvnodes)) 2271 (*pr)(" %p", vp); 2272 else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4)))) 2273 (*pr)(" %p,\n\t", vp); 2274 else 2275 (*pr)(" %p,", vp); 2276 (*pr)("\n"); 2277 } 2278} 2279#endif /* DDB */ 2280 2281void 2282copy_statfs_info(struct statfs *sbp, const struct mount *mp) 2283{ 2284 const struct statfs *mbp; 2285 2286 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN); 2287 2288 if (sbp == (mbp = &mp->mnt_stat)) 2289 return; 2290 2291 sbp->f_fsid = mbp->f_fsid; 2292 sbp->f_owner = mbp->f_owner; 2293 sbp->f_flags = mbp->f_flags; 2294 sbp->f_syncwrites = mbp->f_syncwrites; 2295 sbp->f_asyncwrites = mbp->f_asyncwrites; 2296 sbp->f_syncreads = mbp->f_syncreads; 2297 sbp->f_asyncreads = mbp->f_asyncreads; 2298 sbp->f_namemax = mbp->f_namemax; 2299 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 2300 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 2301 bcopy(mp->mnt_stat.f_mntfromspec, sbp->f_mntfromspec, MNAMELEN); 2302 bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args, 2303 sizeof(struct ufs_args)); 2304} 2305