vfs_default.c revision 114378
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * 39 * $FreeBSD: head/sys/kern/vfs_default.c 114378 2003-05-01 03:51:05Z alc $ 40 */ 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bio.h> 45#include <sys/buf.h> 46#include <sys/conf.h> 47#include <sys/kernel.h> 48#include <sys/limits.h> 49#include <sys/lock.h> 50#include <sys/malloc.h> 51#include <sys/mount.h> 52#include <sys/mutex.h> 53#include <sys/unistd.h> 54#include <sys/vnode.h> 55#include <sys/poll.h> 56 57#include <vm/vm.h> 58#include <vm/vm_object.h> 59#include <vm/vm_extern.h> 60#include <vm/pmap.h> 61#include <vm/vm_map.h> 62#include <vm/vm_page.h> 63#include <vm/vm_pager.h> 64#include <vm/vnode_pager.h> 65 66static int vop_nolookup(struct vop_lookup_args *); 67static int vop_nostrategy(struct vop_strategy_args *); 68static int vop_nospecstrategy(struct vop_specstrategy_args *); 69 70/* 71 * This vnode table stores what we want to do if the filesystem doesn't 72 * implement a particular VOP. 73 * 74 * If there is no specific entry here, we will return EOPNOTSUPP. 75 * 76 */ 77 78vop_t **default_vnodeop_p; 79static struct vnodeopv_entry_desc default_vnodeop_entries[] = { 80 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 81 { &vop_advlock_desc, (vop_t *) vop_einval }, 82 { &vop_bmap_desc, (vop_t *) vop_stdbmap }, 83 { &vop_close_desc, (vop_t *) vop_null }, 84 { &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject }, 85 { &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject }, 86 { &vop_fsync_desc, (vop_t *) vop_null }, 87 { &vop_getpages_desc, (vop_t *) vop_stdgetpages }, 88 { &vop_getvobject_desc, (vop_t *) vop_stdgetvobject }, 89 { &vop_inactive_desc, (vop_t *) vop_stdinactive }, 90 { &vop_ioctl_desc, (vop_t *) vop_enotty }, 91 { &vop_islocked_desc, (vop_t *) vop_stdislocked }, 92 { &vop_lease_desc, (vop_t *) vop_null }, 93 { &vop_lock_desc, (vop_t *) vop_stdlock }, 94 { &vop_lookup_desc, (vop_t *) vop_nolookup }, 95 { &vop_open_desc, (vop_t *) vop_null }, 96 { &vop_pathconf_desc, (vop_t *) vop_einval }, 97 { &vop_poll_desc, (vop_t *) vop_nopoll }, 98 { &vop_putpages_desc, (vop_t *) vop_stdputpages }, 99 { &vop_readlink_desc, (vop_t *) vop_einval }, 100 { &vop_revoke_desc, (vop_t *) vop_revoke }, 101 { &vop_specstrategy_desc, (vop_t *) vop_nospecstrategy }, 102 { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 103 { &vop_unlock_desc, (vop_t *) vop_stdunlock }, 104 { NULL, NULL } 105}; 106 107static struct vnodeopv_desc default_vnodeop_opv_desc = 108 { &default_vnodeop_p, default_vnodeop_entries }; 109 110VNODEOP_SET(default_vnodeop_opv_desc); 111 112/* 113 * Series of placeholder functions for various error returns for 114 * VOPs. 115 */ 116 117int 118vop_eopnotsupp(struct vop_generic_args *ap) 119{ 120 /* 121 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 122 */ 123 124 return (EOPNOTSUPP); 125} 126 127int 128vop_ebadf(struct vop_generic_args *ap) 129{ 130 131 return (EBADF); 132} 133 134int 135vop_enotty(struct vop_generic_args *ap) 136{ 137 138 return (ENOTTY); 139} 140 141int 142vop_einval(struct vop_generic_args *ap) 143{ 144 145 return (EINVAL); 146} 147 148int 149vop_null(struct vop_generic_args *ap) 150{ 151 152 return (0); 153} 154 155/* 156 * Used to make a defined VOP fall back to the default VOP. 157 */ 158int 159vop_defaultop(struct vop_generic_args *ap) 160{ 161 162 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 163} 164 165/* 166 * Helper function to panic on some bad VOPs in some filesystems. 167 */ 168int 169vop_panic(struct vop_generic_args *ap) 170{ 171 172 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 173} 174 175/* 176 * vop_std<something> and vop_no<something> are default functions for use by 177 * filesystems that need the "default reasonable" implementation for a 178 * particular operation. 179 * 180 * The documentation for the operations they implement exists (if it exists) 181 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 182 */ 183 184/* 185 * Default vop for filesystems that do not support name lookup 186 */ 187static int 188vop_nolookup(ap) 189 struct vop_lookup_args /* { 190 struct vnode *a_dvp; 191 struct vnode **a_vpp; 192 struct componentname *a_cnp; 193 } */ *ap; 194{ 195 196 *ap->a_vpp = NULL; 197 return (ENOTDIR); 198} 199 200/* 201 * vop_nostrategy: 202 * 203 * Strategy routine for VFS devices that have none. 204 * 205 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 206 * routine. Typically this is done for a BIO_READ strategy call. 207 * Typically B_INVAL is assumed to already be clear prior to a write 208 * and should not be cleared manually unless you just made the buffer 209 * invalid. BIO_ERROR should be cleared either way. 210 */ 211 212static int 213vop_nostrategy (struct vop_strategy_args *ap) 214{ 215 printf("No strategy for buffer at %p\n", ap->a_bp); 216 vprint("vnode", ap->a_vp); 217 vprint("device vnode", ap->a_bp->b_vp); 218 ap->a_bp->b_ioflags |= BIO_ERROR; 219 ap->a_bp->b_error = EOPNOTSUPP; 220 bufdone(ap->a_bp); 221 return (EOPNOTSUPP); 222} 223 224/* 225 * vop_nospecstrategy: 226 * 227 * This shouldn't happen. VOP_SPECSTRATEGY should always have a VCHR 228 * argument vnode, and thos have a method for specstrategy over in 229 * specfs, so we only ever get here if somebody botched it. 230 * Pass the call to VOP_STRATEGY() and get on with life. 231 * The first time we print some info useful for debugging. 232 */ 233 234static int 235vop_nospecstrategy (struct vop_specstrategy_args *ap) 236{ 237 static int once; 238 239 if (!once) { 240 vprint("VOP_SPECSTRATEGY on non-VCHR", ap->a_vp); 241 backtrace(); 242 once++; 243 } 244 return VOP_STRATEGY(ap->a_vp, ap->a_bp); 245} 246 247/* 248 * vop_stdpathconf: 249 * 250 * Standard implementation of POSIX pathconf, to get information about limits 251 * for a filesystem. 252 * Override per filesystem for the case where the filesystem has smaller 253 * limits. 254 */ 255int 256vop_stdpathconf(ap) 257 struct vop_pathconf_args /* { 258 struct vnode *a_vp; 259 int a_name; 260 int *a_retval; 261 } */ *ap; 262{ 263 264 switch (ap->a_name) { 265 case _PC_LINK_MAX: 266 *ap->a_retval = LINK_MAX; 267 return (0); 268 case _PC_MAX_CANON: 269 *ap->a_retval = MAX_CANON; 270 return (0); 271 case _PC_MAX_INPUT: 272 *ap->a_retval = MAX_INPUT; 273 return (0); 274 case _PC_PIPE_BUF: 275 *ap->a_retval = PIPE_BUF; 276 return (0); 277 case _PC_CHOWN_RESTRICTED: 278 *ap->a_retval = 1; 279 return (0); 280 case _PC_VDISABLE: 281 *ap->a_retval = _POSIX_VDISABLE; 282 return (0); 283 default: 284 return (EINVAL); 285 } 286 /* NOTREACHED */ 287} 288 289/* 290 * Standard lock, unlock and islocked functions. 291 */ 292int 293vop_stdlock(ap) 294 struct vop_lock_args /* { 295 struct vnode *a_vp; 296 int a_flags; 297 struct thread *a_td; 298 } */ *ap; 299{ 300 struct vnode *vp = ap->a_vp; 301 302#ifndef DEBUG_LOCKS 303 return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td)); 304#else 305 return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 306 ap->a_td, "vop_stdlock", vp->filename, vp->line)); 307#endif 308} 309 310/* See above. */ 311int 312vop_stdunlock(ap) 313 struct vop_unlock_args /* { 314 struct vnode *a_vp; 315 int a_flags; 316 struct thread *a_td; 317 } */ *ap; 318{ 319 struct vnode *vp = ap->a_vp; 320 321 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp), 322 ap->a_td)); 323} 324 325/* See above. */ 326int 327vop_stdislocked(ap) 328 struct vop_islocked_args /* { 329 struct vnode *a_vp; 330 struct thread *a_td; 331 } */ *ap; 332{ 333 334 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td)); 335} 336 337/* Mark the vnode inactive */ 338int 339vop_stdinactive(ap) 340 struct vop_inactive_args /* { 341 struct vnode *a_vp; 342 struct thread *a_td; 343 } */ *ap; 344{ 345 346 VOP_UNLOCK(ap->a_vp, 0, ap->a_td); 347 return (0); 348} 349 350/* 351 * Return true for select/poll. 352 */ 353int 354vop_nopoll(ap) 355 struct vop_poll_args /* { 356 struct vnode *a_vp; 357 int a_events; 358 struct ucred *a_cred; 359 struct thread *a_td; 360 } */ *ap; 361{ 362 /* 363 * Return true for read/write. If the user asked for something 364 * special, return POLLNVAL, so that clients have a way of 365 * determining reliably whether or not the extended 366 * functionality is present without hard-coding knowledge 367 * of specific filesystem implementations. 368 */ 369 if (ap->a_events & ~POLLSTANDARD) 370 return (POLLNVAL); 371 372 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 373} 374 375/* 376 * Implement poll for local filesystems that support it. 377 */ 378int 379vop_stdpoll(ap) 380 struct vop_poll_args /* { 381 struct vnode *a_vp; 382 int a_events; 383 struct ucred *a_cred; 384 struct thread *a_td; 385 } */ *ap; 386{ 387 if (ap->a_events & ~POLLSTANDARD) 388 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 389 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 390} 391 392/* 393 * Stubs to use when there is no locking to be done on the underlying object. 394 * A minimal shared lock is necessary to ensure that the underlying object 395 * is not revoked while an operation is in progress. So, an active shared 396 * count is maintained in an auxillary vnode lock structure. 397 */ 398int 399vop_sharedlock(ap) 400 struct vop_lock_args /* { 401 struct vnode *a_vp; 402 int a_flags; 403 struct thread *a_td; 404 } */ *ap; 405{ 406 /* 407 * This code cannot be used until all the non-locking filesystems 408 * (notably NFS) are converted to properly lock and release nodes. 409 * Also, certain vnode operations change the locking state within 410 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 411 * and symlink). Ideally these operations should not change the 412 * lock state, but should be changed to let the caller of the 413 * function unlock them. Otherwise all intermediate vnode layers 414 * (such as union, umapfs, etc) must catch these functions to do 415 * the necessary locking at their layer. Note that the inactive 416 * and lookup operations also change their lock state, but this 417 * cannot be avoided, so these two operations will always need 418 * to be handled in intermediate layers. 419 */ 420 struct vnode *vp = ap->a_vp; 421 int vnflags, flags = ap->a_flags; 422 423 switch (flags & LK_TYPE_MASK) { 424 case LK_DRAIN: 425 vnflags = LK_DRAIN; 426 break; 427 case LK_EXCLUSIVE: 428#ifdef DEBUG_VFS_LOCKS 429 /* 430 * Normally, we use shared locks here, but that confuses 431 * the locking assertions. 432 */ 433 vnflags = LK_EXCLUSIVE; 434 break; 435#endif 436 case LK_SHARED: 437 vnflags = LK_SHARED; 438 break; 439 case LK_UPGRADE: 440 case LK_EXCLUPGRADE: 441 case LK_DOWNGRADE: 442 return (0); 443 case LK_RELEASE: 444 default: 445 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 446 } 447 vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK); 448#ifndef DEBUG_LOCKS 449 return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td)); 450#else 451 return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td, 452 "vop_sharedlock", vp->filename, vp->line)); 453#endif 454} 455 456/* 457 * Stubs to use when there is no locking to be done on the underlying object. 458 * A minimal shared lock is necessary to ensure that the underlying object 459 * is not revoked while an operation is in progress. So, an active shared 460 * count is maintained in an auxillary vnode lock structure. 461 */ 462int 463vop_nolock(ap) 464 struct vop_lock_args /* { 465 struct vnode *a_vp; 466 int a_flags; 467 struct thread *a_td; 468 } */ *ap; 469{ 470#ifdef notyet 471 /* 472 * This code cannot be used until all the non-locking filesystems 473 * (notably NFS) are converted to properly lock and release nodes. 474 * Also, certain vnode operations change the locking state within 475 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 476 * and symlink). Ideally these operations should not change the 477 * lock state, but should be changed to let the caller of the 478 * function unlock them. Otherwise all intermediate vnode layers 479 * (such as union, umapfs, etc) must catch these functions to do 480 * the necessary locking at their layer. Note that the inactive 481 * and lookup operations also change their lock state, but this 482 * cannot be avoided, so these two operations will always need 483 * to be handled in intermediate layers. 484 */ 485 struct vnode *vp = ap->a_vp; 486 int vnflags, flags = ap->a_flags; 487 488 switch (flags & LK_TYPE_MASK) { 489 case LK_DRAIN: 490 vnflags = LK_DRAIN; 491 break; 492 case LK_EXCLUSIVE: 493 case LK_SHARED: 494 vnflags = LK_SHARED; 495 break; 496 case LK_UPGRADE: 497 case LK_EXCLUPGRADE: 498 case LK_DOWNGRADE: 499 return (0); 500 case LK_RELEASE: 501 default: 502 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 503 } 504 vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK); 505 return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td)); 506#else /* for now */ 507 /* 508 * Since we are not using the lock manager, we must clear 509 * the interlock here. 510 */ 511 if (ap->a_flags & LK_INTERLOCK) 512 VI_UNLOCK(ap->a_vp); 513 return (0); 514#endif 515} 516 517/* 518 * Do the inverse of vop_nolock, handling the interlock in a compatible way. 519 */ 520int 521vop_nounlock(ap) 522 struct vop_unlock_args /* { 523 struct vnode *a_vp; 524 int a_flags; 525 struct thread *a_td; 526 } */ *ap; 527{ 528 529 /* 530 * Since we are not using the lock manager, we must clear 531 * the interlock here. 532 */ 533 if (ap->a_flags & LK_INTERLOCK) 534 VI_UNLOCK(ap->a_vp); 535 return (0); 536} 537 538/* 539 * Return whether or not the node is in use. 540 */ 541int 542vop_noislocked(ap) 543 struct vop_islocked_args /* { 544 struct vnode *a_vp; 545 struct thread *a_td; 546 } */ *ap; 547{ 548 549 return (0); 550} 551 552/* 553 * Return our mount point, as we will take charge of the writes. 554 */ 555int 556vop_stdgetwritemount(ap) 557 struct vop_getwritemount_args /* { 558 struct vnode *a_vp; 559 struct mount **a_mpp; 560 } */ *ap; 561{ 562 563 *(ap->a_mpp) = ap->a_vp->v_mount; 564 return (0); 565} 566 567/* Create the VM system backing object for this vnode */ 568int 569vop_stdcreatevobject(ap) 570 struct vop_createvobject_args /* { 571 struct vnode *vp; 572 struct ucred *cred; 573 struct thread *td; 574 } */ *ap; 575{ 576 struct vnode *vp = ap->a_vp; 577 struct ucred *cred = ap->a_cred; 578 struct thread *td = ap->a_td; 579 struct vattr vat; 580 vm_object_t object; 581 int error = 0; 582 583 GIANT_REQUIRED; 584 585 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 586 return (0); 587 588retry: 589 if ((object = vp->v_object) == NULL) { 590 if (vp->v_type == VREG || vp->v_type == VDIR) { 591 if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0) 592 goto retn; 593 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 594 } else if (devsw(vp->v_rdev) != NULL) { 595 /* 596 * This simply allocates the biggest object possible 597 * for a disk vnode. This should be fixed, but doesn't 598 * cause any problems (yet). 599 */ 600 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 601 } else { 602 goto retn; 603 } 604 /* 605 * Dereference the reference we just created. This assumes 606 * that the object is associated with the vp. 607 */ 608 VM_OBJECT_LOCK(object); 609 object->ref_count--; 610 VM_OBJECT_UNLOCK(object); 611 vrele(vp); 612 } else { 613 if (object->flags & OBJ_DEAD) { 614 VOP_UNLOCK(vp, 0, td); 615 tsleep(object, PVM, "vodead", 0); 616 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 617 goto retry; 618 } 619 } 620 621 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 622 vp->v_vflag |= VV_OBJBUF; 623 624retn: 625 return (error); 626} 627 628/* Destroy the VM system object associated with this vnode */ 629int 630vop_stddestroyvobject(ap) 631 struct vop_destroyvobject_args /* { 632 struct vnode *vp; 633 } */ *ap; 634{ 635 struct vnode *vp = ap->a_vp; 636 vm_object_t obj = vp->v_object; 637 638 GIANT_REQUIRED; 639 640 if (obj == NULL) 641 return (0); 642 VM_OBJECT_LOCK(obj); 643 if (obj->ref_count == 0) { 644 /* 645 * vclean() may be called twice. The first time 646 * removes the primary reference to the object, 647 * the second time goes one further and is a 648 * special-case to terminate the object. 649 * 650 * don't double-terminate the object 651 */ 652 if ((obj->flags & OBJ_DEAD) == 0) 653 vm_object_terminate(obj); 654 else 655 VM_OBJECT_UNLOCK(obj); 656 } else { 657 VM_OBJECT_UNLOCK(obj); 658 /* 659 * Woe to the process that tries to page now :-). 660 */ 661 vm_pager_deallocate(obj); 662 } 663 return (0); 664} 665 666/* 667 * Return the underlying VM object. This routine may be called with or 668 * without the vnode interlock held. If called without, the returned 669 * object is not guarenteed to be valid. The syncer typically gets the 670 * object without holding the interlock in order to quickly test whether 671 * it might be dirty before going heavy-weight. vm_object's use zalloc 672 * and thus stable-storage, so this is safe. 673 */ 674int 675vop_stdgetvobject(ap) 676 struct vop_getvobject_args /* { 677 struct vnode *vp; 678 struct vm_object **objpp; 679 } */ *ap; 680{ 681 struct vnode *vp = ap->a_vp; 682 struct vm_object **objpp = ap->a_objpp; 683 684 if (objpp) 685 *objpp = vp->v_object; 686 return (vp->v_object ? 0 : EINVAL); 687} 688 689/* XXX Needs good comment and VOP_BMAP(9) manpage */ 690int 691vop_stdbmap(ap) 692 struct vop_bmap_args /* { 693 struct vnode *a_vp; 694 daddr_t a_bn; 695 struct vnode **a_vpp; 696 daddr_t *a_bnp; 697 int *a_runp; 698 int *a_runb; 699 } */ *ap; 700{ 701 702 if (ap->a_vpp != NULL) 703 *ap->a_vpp = ap->a_vp; 704 if (ap->a_bnp != NULL) 705 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 706 if (ap->a_runp != NULL) 707 *ap->a_runp = 0; 708 if (ap->a_runb != NULL) 709 *ap->a_runb = 0; 710 return (0); 711} 712 713int 714vop_stdfsync(ap) 715 struct vop_fsync_args /* { 716 struct vnode *a_vp; 717 struct ucred *a_cred; 718 int a_waitfor; 719 struct thread *a_td; 720 } */ *ap; 721{ 722 struct vnode *vp = ap->a_vp; 723 struct buf *bp; 724 struct buf *nbp; 725 int s, error = 0; 726 int maxretry = 100; /* large, arbitrarily chosen */ 727 728 VI_LOCK(vp); 729loop1: 730 /* 731 * MARK/SCAN initialization to avoid infinite loops. 732 */ 733 s = splbio(); 734 TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 735 bp->b_vflags &= ~BV_SCANNED; 736 bp->b_error = 0; 737 } 738 splx(s); 739 740 /* 741 * Flush all dirty buffers associated with a block device. 742 */ 743loop2: 744 s = splbio(); 745 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 746 nbp = TAILQ_NEXT(bp, b_vnbufs); 747 if ((bp->b_vflags & BV_SCANNED) != 0) 748 continue; 749 bp->b_vflags |= BV_SCANNED; 750 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 751 continue; 752 VI_UNLOCK(vp); 753 if ((bp->b_flags & B_DELWRI) == 0) 754 panic("fsync: not dirty"); 755 if ((vp->v_vflag & VV_OBJBUF) && (bp->b_flags & B_CLUSTEROK)) { 756 vfs_bio_awrite(bp); 757 splx(s); 758 } else { 759 bremfree(bp); 760 splx(s); 761 bawrite(bp); 762 } 763 VI_LOCK(vp); 764 goto loop2; 765 } 766 767 /* 768 * If synchronous the caller expects us to completely resolve all 769 * dirty buffers in the system. Wait for in-progress I/O to 770 * complete (which could include background bitmap writes), then 771 * retry if dirty blocks still exist. 772 */ 773 if (ap->a_waitfor == MNT_WAIT) { 774 while (vp->v_numoutput) { 775 vp->v_iflag |= VI_BWAIT; 776 msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp), 777 PRIBIO + 1, "fsync", 0); 778 } 779 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 780 /* 781 * If we are unable to write any of these buffers 782 * then we fail now rather than trying endlessly 783 * to write them out. 784 */ 785 TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) 786 if ((error = bp->b_error) == 0) 787 continue; 788 if (error == 0 && --maxretry >= 0) { 789 splx(s); 790 goto loop1; 791 } 792 vprint("fsync: giving up on dirty", vp); 793 error = EAGAIN; 794 } 795 } 796 VI_UNLOCK(vp); 797 splx(s); 798 799 return (error); 800} 801 802/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 803int 804vop_stdgetpages(ap) 805 struct vop_getpages_args /* { 806 struct vnode *a_vp; 807 vm_page_t *a_m; 808 int a_count; 809 int a_reqpage; 810 vm_ooffset_t a_offset; 811 } */ *ap; 812{ 813 814 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 815 ap->a_count, ap->a_reqpage); 816} 817 818/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 819int 820vop_stdputpages(ap) 821 struct vop_putpages_args /* { 822 struct vnode *a_vp; 823 vm_page_t *a_m; 824 int a_count; 825 int a_sync; 826 int *a_rtvals; 827 vm_ooffset_t a_offset; 828 } */ *ap; 829{ 830 831 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 832 ap->a_sync, ap->a_rtvals); 833} 834 835/* 836 * vfs default ops 837 * used to fill the vfs function table to get reasonable default return values. 838 */ 839int 840vfs_stdroot (mp, vpp) 841 struct mount *mp; 842 struct vnode **vpp; 843{ 844 return (EOPNOTSUPP); 845} 846 847int 848vfs_stdstatfs (mp, sbp, td) 849 struct mount *mp; 850 struct statfs *sbp; 851 struct thread *td; 852{ 853 return (EOPNOTSUPP); 854} 855 856int 857vfs_stdvptofh (vp, fhp) 858 struct vnode *vp; 859 struct fid *fhp; 860{ 861 return (EOPNOTSUPP); 862} 863 864int 865vfs_stdstart (mp, flags, td) 866 struct mount *mp; 867 int flags; 868 struct thread *td; 869{ 870 return (0); 871} 872 873int 874vfs_stdquotactl (mp, cmds, uid, arg, td) 875 struct mount *mp; 876 int cmds; 877 uid_t uid; 878 caddr_t arg; 879 struct thread *td; 880{ 881 return (EOPNOTSUPP); 882} 883 884int 885vfs_stdsync(mp, waitfor, cred, td) 886 struct mount *mp; 887 int waitfor; 888 struct ucred *cred; 889 struct thread *td; 890{ 891 struct vnode *vp, *nvp; 892 int error, lockreq, allerror = 0; 893 894 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 895 if (waitfor != MNT_WAIT) 896 lockreq |= LK_NOWAIT; 897 /* 898 * Force stale buffer cache information to be flushed. 899 */ 900 mtx_lock(&mntvnode_mtx); 901loop: 902 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) { 903 /* 904 * If the vnode that we are about to sync is no longer 905 * associated with this mount point, start over. 906 */ 907 if (vp->v_mount != mp) 908 goto loop; 909 910 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 911 912 VI_LOCK(vp); 913 if (TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 914 VI_UNLOCK(vp); 915 continue; 916 } 917 mtx_unlock(&mntvnode_mtx); 918 919 if ((error = vget(vp, lockreq, td)) != 0) { 920 if (error == ENOENT) 921 goto loop; 922 continue; 923 } 924 error = VOP_FSYNC(vp, cred, waitfor, td); 925 if (error) 926 allerror = error; 927 928 mtx_lock(&mntvnode_mtx); 929 if (nvp != TAILQ_NEXT(vp, v_nmntvnodes)) { 930 vput(vp); 931 goto loop; 932 } 933 vput(vp); 934 } 935 mtx_unlock(&mntvnode_mtx); 936 return (allerror); 937} 938 939int 940vfs_stdnosync (mp, waitfor, cred, td) 941 struct mount *mp; 942 int waitfor; 943 struct ucred *cred; 944 struct thread *td; 945{ 946 return (0); 947} 948 949int 950vfs_stdvget (mp, ino, flags, vpp) 951 struct mount *mp; 952 ino_t ino; 953 int flags; 954 struct vnode **vpp; 955{ 956 return (EOPNOTSUPP); 957} 958 959int 960vfs_stdfhtovp (mp, fhp, vpp) 961 struct mount *mp; 962 struct fid *fhp; 963 struct vnode **vpp; 964{ 965 return (EOPNOTSUPP); 966} 967 968int 969vfs_stdinit (vfsp) 970 struct vfsconf *vfsp; 971{ 972 return (0); 973} 974 975int 976vfs_stduninit (vfsp) 977 struct vfsconf *vfsp; 978{ 979 return(0); 980} 981 982int 983vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 984 struct mount *mp; 985 int cmd; 986 struct vnode *filename_vp; 987 int attrnamespace; 988 const char *attrname; 989 struct thread *td; 990{ 991 if (filename_vp != NULL) 992 VOP_UNLOCK(filename_vp, 0, td); 993 return(EOPNOTSUPP); 994} 995 996/* end of vfs default ops */ 997