vfs_default.c revision 108686
119304Speter/* 219304Speter * Copyright (c) 1989, 1993 319304Speter * The Regents of the University of California. All rights reserved. 419304Speter * 519304Speter * This code is derived from software contributed 619304Speter * to Berkeley by John Heidemann of the UCLA Ficus project. 719304Speter * 819304Speter * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 919304Speter * 1019304Speter * Redistribution and use in source and binary forms, with or without 1119304Speter * modification, are permitted provided that the following conditions 1219304Speter * are met: 13254225Speter * 1. Redistributions of source code must retain the above copyright 1419304Speter * notice, this list of conditions and the following disclaimer. 1519304Speter * 2. Redistributions in binary form must reproduce the above copyright 1619304Speter * notice, this list of conditions and the following disclaimer in the 1719304Speter * documentation and/or other materials provided with the distribution. 1819304Speter * 3. All advertising materials mentioning features or use of this software 1919304Speter * must display the following acknowledgement: 2019304Speter * This product includes software developed by the University of 2119304Speter * California, Berkeley and its contributors. 2219304Speter * 4. Neither the name of the University nor the names of its contributors 2319304Speter * may be used to endorse or promote products derived from this software 2419304Speter * without specific prior written permission. 2519304Speter * 2619304Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2719304Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2819304Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2919304Speter * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3019304Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3119304Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3219304Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3319304Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3419304Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3519304Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3619304Speter * SUCH DAMAGE. 37254225Speter * 3819304Speter * 3919304Speter * $FreeBSD: head/sys/kern/vfs_default.c 108686 2003-01-04 22:10:36Z phk $ 4019304Speter */ 4119304Speter 4219304Speter#include <sys/param.h> 4319304Speter#include <sys/systm.h> 4419304Speter#include <sys/bio.h> 4519304Speter#include <sys/buf.h> 4619304Speter#include <sys/conf.h> 4719304Speter#include <sys/kernel.h> 4819304Speter#include <sys/lock.h> 4919304Speter#include <sys/malloc.h> 5019304Speter#include <sys/mount.h> 5119304Speter#include <sys/mutex.h> 5219304Speter#include <sys/unistd.h> 5319304Speter#include <sys/vnode.h> 5419304Speter#include <sys/poll.h> 5519304Speter 5619304Speter#include <machine/limits.h> 5719304Speter 5819304Speter#include <vm/vm.h> 5919304Speter#include <vm/vm_object.h> 6019304Speter#include <vm/vm_extern.h> 6119304Speter#include <vm/pmap.h> 6219304Speter#include <vm/vm_map.h> 6319304Speter#include <vm/vm_page.h> 6419304Speter#include <vm/vm_pager.h> 6519304Speter#include <vm/vnode_pager.h> 6619304Speter 6719304Speterstatic int vop_nolookup(struct vop_lookup_args *); 6819304Speterstatic int vop_nostrategy(struct vop_strategy_args *); 6919304Speterstatic int vop_nospecstrategy(struct vop_specstrategy_args *); 7019304Speter 7119304Speter/* 72254225Speter * This vnode table stores what we want to do if the filesystem doesn't 7319304Speter * implement a particular VOP. 7419304Speter * 7519304Speter * If there is no specific entry here, we will return EOPNOTSUPP. 7619304Speter * 7719304Speter */ 7819304Speter 7919304Spetervop_t **default_vnodeop_p; 8019304Speterstatic struct vnodeopv_entry_desc default_vnodeop_entries[] = { 8119304Speter { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 8219304Speter { &vop_advlock_desc, (vop_t *) vop_einval }, 8319304Speter { &vop_bmap_desc, (vop_t *) vop_stdbmap }, 8419304Speter { &vop_close_desc, (vop_t *) vop_null }, 8519304Speter { &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject }, 8619304Speter { &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject }, 8719304Speter { &vop_fsync_desc, (vop_t *) vop_null }, 8819304Speter { &vop_getpages_desc, (vop_t *) vop_stdgetpages }, 8919304Speter { &vop_getvobject_desc, (vop_t *) vop_stdgetvobject }, 9019304Speter { &vop_inactive_desc, (vop_t *) vop_stdinactive }, 9119304Speter { &vop_ioctl_desc, (vop_t *) vop_enotty }, 9219304Speter { &vop_islocked_desc, (vop_t *) vop_stdislocked }, 9319304Speter { &vop_lease_desc, (vop_t *) vop_null }, 9419304Speter { &vop_lock_desc, (vop_t *) vop_stdlock }, 9519304Speter { &vop_lookup_desc, (vop_t *) vop_nolookup }, 9619304Speter { &vop_open_desc, (vop_t *) vop_null }, 9719304Speter { &vop_pathconf_desc, (vop_t *) vop_einval }, 9819304Speter { &vop_poll_desc, (vop_t *) vop_nopoll }, 9919304Speter { &vop_putpages_desc, (vop_t *) vop_stdputpages }, 10019304Speter { &vop_readlink_desc, (vop_t *) vop_einval }, 101254225Speter { &vop_revoke_desc, (vop_t *) vop_revoke }, 10219304Speter { &vop_specstrategy_desc, (vop_t *) vop_nospecstrategy }, 10319304Speter { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 10419304Speter { &vop_unlock_desc, (vop_t *) vop_stdunlock }, 10519304Speter { NULL, NULL } 10619304Speter}; 10719304Speter 108254225Speterstatic struct vnodeopv_desc default_vnodeop_opv_desc = 10919304Speter { &default_vnodeop_p, default_vnodeop_entries }; 11019304Speter 11119304SpeterVNODEOP_SET(default_vnodeop_opv_desc); 11219304Speter 11319304Speter/* 114 * Series of placeholder functions for various error returns for 115 * VOPs. 116 */ 117 118int 119vop_eopnotsupp(struct vop_generic_args *ap) 120{ 121 /* 122 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 123 */ 124 125 return (EOPNOTSUPP); 126} 127 128int 129vop_ebadf(struct vop_generic_args *ap) 130{ 131 132 return (EBADF); 133} 134 135int 136vop_enotty(struct vop_generic_args *ap) 137{ 138 139 return (ENOTTY); 140} 141 142int 143vop_einval(struct vop_generic_args *ap) 144{ 145 146 return (EINVAL); 147} 148 149int 150vop_null(struct vop_generic_args *ap) 151{ 152 153 return (0); 154} 155 156/* 157 * Used to make a defined VOP fall back to the default VOP. 158 */ 159int 160vop_defaultop(struct vop_generic_args *ap) 161{ 162 163 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 164} 165 166/* 167 * Helper function to panic on some bad VOPs in some filesystems. 168 */ 169int 170vop_panic(struct vop_generic_args *ap) 171{ 172 173 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 174} 175 176/* 177 * vop_std<something> and vop_no<something> are default functions for use by 178 * filesystems that need the "default reasonable" implementation for a 179 * particular operation. 180 * 181 * The documentation for the operations they implement exists (if it exists) 182 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 183 */ 184 185/* 186 * Default vop for filesystems that do not support name lookup 187 */ 188static int 189vop_nolookup(ap) 190 struct vop_lookup_args /* { 191 struct vnode *a_dvp; 192 struct vnode **a_vpp; 193 struct componentname *a_cnp; 194 } */ *ap; 195{ 196 197 *ap->a_vpp = NULL; 198 return (ENOTDIR); 199} 200 201/* 202 * vop_nostrategy: 203 * 204 * Strategy routine for VFS devices that have none. 205 * 206 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 207 * routine. Typically this is done for a BIO_READ strategy call. 208 * Typically B_INVAL is assumed to already be clear prior to a write 209 * and should not be cleared manually unless you just made the buffer 210 * invalid. BIO_ERROR should be cleared either way. 211 */ 212 213static int 214vop_nostrategy (struct vop_strategy_args *ap) 215{ 216 printf("No strategy for buffer at %p\n", ap->a_bp); 217 vprint("", ap->a_vp); 218 vprint("", ap->a_bp->b_vp); 219 ap->a_bp->b_ioflags |= BIO_ERROR; 220 ap->a_bp->b_error = EOPNOTSUPP; 221 bufdone(ap->a_bp); 222 return (EOPNOTSUPP); 223} 224 225/* 226 * vop_nospecstrategy: 227 * 228 * This shouldn't happen. VOP_SPECSTRATEGY should always have a VCHR 229 * argument vnode, and thos have a method for specstrategy over in 230 * specfs, so we only ever get here if somebody botched it. 231 * Pass the call to VOP_STRATEGY() and get on with life. 232 * The first time we print some info useful for debugging. 233 */ 234 235static int 236vop_nospecstrategy (struct vop_specstrategy_args *ap) 237{ 238 static int once; 239 240 if (!once) { 241 vprint("\nVOP_SPECSTRATEGY on non-VCHR\n", ap->a_vp); 242 backtrace(); 243 once++; 244 } 245 return VOP_STRATEGY(ap->a_vp, ap->a_bp); 246} 247 248/* 249 * vop_stdpathconf: 250 * 251 * Standard implementation of POSIX pathconf, to get information about limits 252 * for a filesystem. 253 * Override per filesystem for the case where the filesystem has smaller 254 * limits. 255 */ 256int 257vop_stdpathconf(ap) 258 struct vop_pathconf_args /* { 259 struct vnode *a_vp; 260 int a_name; 261 int *a_retval; 262 } */ *ap; 263{ 264 265 switch (ap->a_name) { 266 case _PC_LINK_MAX: 267 *ap->a_retval = LINK_MAX; 268 return (0); 269 case _PC_MAX_CANON: 270 *ap->a_retval = MAX_CANON; 271 return (0); 272 case _PC_MAX_INPUT: 273 *ap->a_retval = MAX_INPUT; 274 return (0); 275 case _PC_PIPE_BUF: 276 *ap->a_retval = PIPE_BUF; 277 return (0); 278 case _PC_CHOWN_RESTRICTED: 279 *ap->a_retval = 1; 280 return (0); 281 case _PC_VDISABLE: 282 *ap->a_retval = _POSIX_VDISABLE; 283 return (0); 284 default: 285 return (EINVAL); 286 } 287 /* NOTREACHED */ 288} 289 290/* 291 * Standard lock, unlock and islocked functions. 292 */ 293int 294vop_stdlock(ap) 295 struct vop_lock_args /* { 296 struct vnode *a_vp; 297 int a_flags; 298 struct thread *a_td; 299 } */ *ap; 300{ 301 struct vnode *vp = ap->a_vp; 302 303#ifndef DEBUG_LOCKS 304 return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td)); 305#else 306 return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 307 ap->a_td, "vop_stdlock", vp->filename, vp->line)); 308#endif 309} 310 311/* See above. */ 312int 313vop_stdunlock(ap) 314 struct vop_unlock_args /* { 315 struct vnode *a_vp; 316 int a_flags; 317 struct thread *a_td; 318 } */ *ap; 319{ 320 struct vnode *vp = ap->a_vp; 321 322 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp), 323 ap->a_td)); 324} 325 326/* See above. */ 327int 328vop_stdislocked(ap) 329 struct vop_islocked_args /* { 330 struct vnode *a_vp; 331 struct thread *a_td; 332 } */ *ap; 333{ 334 335 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td)); 336} 337 338/* Mark the vnode inactive */ 339int 340vop_stdinactive(ap) 341 struct vop_inactive_args /* { 342 struct vnode *a_vp; 343 struct thread *a_td; 344 } */ *ap; 345{ 346 347 VOP_UNLOCK(ap->a_vp, 0, ap->a_td); 348 return (0); 349} 350 351/* 352 * Return true for select/poll. 353 */ 354int 355vop_nopoll(ap) 356 struct vop_poll_args /* { 357 struct vnode *a_vp; 358 int a_events; 359 struct ucred *a_cred; 360 struct thread *a_td; 361 } */ *ap; 362{ 363 /* 364 * Return true for read/write. If the user asked for something 365 * special, return POLLNVAL, so that clients have a way of 366 * determining reliably whether or not the extended 367 * functionality is present without hard-coding knowledge 368 * of specific filesystem implementations. 369 */ 370 if (ap->a_events & ~POLLSTANDARD) 371 return (POLLNVAL); 372 373 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 374} 375 376/* 377 * Implement poll for local filesystems that support it. 378 */ 379int 380vop_stdpoll(ap) 381 struct vop_poll_args /* { 382 struct vnode *a_vp; 383 int a_events; 384 struct ucred *a_cred; 385 struct thread *a_td; 386 } */ *ap; 387{ 388 if (ap->a_events & ~POLLSTANDARD) 389 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 390 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 391} 392 393/* 394 * Stubs to use when there is no locking to be done on the underlying object. 395 * A minimal shared lock is necessary to ensure that the underlying object 396 * is not revoked while an operation is in progress. So, an active shared 397 * count is maintained in an auxillary vnode lock structure. 398 */ 399int 400vop_sharedlock(ap) 401 struct vop_lock_args /* { 402 struct vnode *a_vp; 403 int a_flags; 404 struct thread *a_td; 405 } */ *ap; 406{ 407 /* 408 * This code cannot be used until all the non-locking filesystems 409 * (notably NFS) are converted to properly lock and release nodes. 410 * Also, certain vnode operations change the locking state within 411 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 412 * and symlink). Ideally these operations should not change the 413 * lock state, but should be changed to let the caller of the 414 * function unlock them. Otherwise all intermediate vnode layers 415 * (such as union, umapfs, etc) must catch these functions to do 416 * the necessary locking at their layer. Note that the inactive 417 * and lookup operations also change their lock state, but this 418 * cannot be avoided, so these two operations will always need 419 * to be handled in intermediate layers. 420 */ 421 struct vnode *vp = ap->a_vp; 422 int vnflags, flags = ap->a_flags; 423 424 switch (flags & LK_TYPE_MASK) { 425 case LK_DRAIN: 426 vnflags = LK_DRAIN; 427 break; 428 case LK_EXCLUSIVE: 429#ifdef DEBUG_VFS_LOCKS 430 /* 431 * Normally, we use shared locks here, but that confuses 432 * the locking assertions. 433 */ 434 vnflags = LK_EXCLUSIVE; 435 break; 436#endif 437 case LK_SHARED: 438 vnflags = LK_SHARED; 439 break; 440 case LK_UPGRADE: 441 case LK_EXCLUPGRADE: 442 case LK_DOWNGRADE: 443 return (0); 444 case LK_RELEASE: 445 default: 446 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 447 } 448 if (flags & LK_INTERLOCK) 449 vnflags |= LK_INTERLOCK; 450#ifndef DEBUG_LOCKS 451 return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td)); 452#else 453 return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td, 454 "vop_sharedlock", vp->filename, vp->line)); 455#endif 456} 457 458/* 459 * Stubs to use when there is no locking to be done on the underlying object. 460 * A minimal shared lock is necessary to ensure that the underlying object 461 * is not revoked while an operation is in progress. So, an active shared 462 * count is maintained in an auxillary vnode lock structure. 463 */ 464int 465vop_nolock(ap) 466 struct vop_lock_args /* { 467 struct vnode *a_vp; 468 int a_flags; 469 struct thread *a_td; 470 } */ *ap; 471{ 472#ifdef notyet 473 /* 474 * This code cannot be used until all the non-locking filesystems 475 * (notably NFS) are converted to properly lock and release nodes. 476 * Also, certain vnode operations change the locking state within 477 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 478 * and symlink). Ideally these operations should not change the 479 * lock state, but should be changed to let the caller of the 480 * function unlock them. Otherwise all intermediate vnode layers 481 * (such as union, umapfs, etc) must catch these functions to do 482 * the necessary locking at their layer. Note that the inactive 483 * and lookup operations also change their lock state, but this 484 * cannot be avoided, so these two operations will always need 485 * to be handled in intermediate layers. 486 */ 487 struct vnode *vp = ap->a_vp; 488 int vnflags, flags = ap->a_flags; 489 490 switch (flags & LK_TYPE_MASK) { 491 case LK_DRAIN: 492 vnflags = LK_DRAIN; 493 break; 494 case LK_EXCLUSIVE: 495 case LK_SHARED: 496 vnflags = LK_SHARED; 497 break; 498 case LK_UPGRADE: 499 case LK_EXCLUPGRADE: 500 case LK_DOWNGRADE: 501 return (0); 502 case LK_RELEASE: 503 default: 504 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 505 } 506 if (flags & LK_INTERLOCK) 507 vnflags |= LK_INTERLOCK; 508 return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td)); 509#else /* for now */ 510 /* 511 * Since we are not using the lock manager, we must clear 512 * the interlock here. 513 */ 514 if (ap->a_flags & LK_INTERLOCK) 515 VI_UNLOCK(ap->a_vp); 516 return (0); 517#endif 518} 519 520/* 521 * Do the inverse of vop_nolock, handling the interlock in a compatible way. 522 */ 523int 524vop_nounlock(ap) 525 struct vop_unlock_args /* { 526 struct vnode *a_vp; 527 int a_flags; 528 struct thread *a_td; 529 } */ *ap; 530{ 531 532 /* 533 * Since we are not using the lock manager, we must clear 534 * the interlock here. 535 */ 536 if (ap->a_flags & LK_INTERLOCK) 537 VI_UNLOCK(ap->a_vp); 538 return (0); 539} 540 541/* 542 * Return whether or not the node is in use. 543 */ 544int 545vop_noislocked(ap) 546 struct vop_islocked_args /* { 547 struct vnode *a_vp; 548 struct thread *a_td; 549 } */ *ap; 550{ 551 552 return (0); 553} 554 555/* 556 * Return our mount point, as we will take charge of the writes. 557 */ 558int 559vop_stdgetwritemount(ap) 560 struct vop_getwritemount_args /* { 561 struct vnode *a_vp; 562 struct mount **a_mpp; 563 } */ *ap; 564{ 565 566 *(ap->a_mpp) = ap->a_vp->v_mount; 567 return (0); 568} 569 570/* Create the VM system backing object for this vnode */ 571int 572vop_stdcreatevobject(ap) 573 struct vop_createvobject_args /* { 574 struct vnode *vp; 575 struct ucred *cred; 576 struct thread *td; 577 } */ *ap; 578{ 579 struct vnode *vp = ap->a_vp; 580 struct ucred *cred = ap->a_cred; 581 struct thread *td = ap->a_td; 582 struct vattr vat; 583 vm_object_t object; 584 int error = 0; 585 586 GIANT_REQUIRED; 587 588 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 589 return (0); 590 591retry: 592 if ((object = vp->v_object) == NULL) { 593 if (vp->v_type == VREG || vp->v_type == VDIR) { 594 if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0) 595 goto retn; 596 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 597 } else if (devsw(vp->v_rdev) != NULL) { 598 /* 599 * This simply allocates the biggest object possible 600 * for a disk vnode. This should be fixed, but doesn't 601 * cause any problems (yet). 602 */ 603 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 604 } else { 605 goto retn; 606 } 607 /* 608 * Dereference the reference we just created. This assumes 609 * that the object is associated with the vp. 610 */ 611 object->ref_count--; 612 vrele(vp); 613 } else { 614 if (object->flags & OBJ_DEAD) { 615 VOP_UNLOCK(vp, 0, td); 616 tsleep(object, PVM, "vodead", 0); 617 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 618 goto retry; 619 } 620 } 621 622 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 623 vp->v_vflag |= VV_OBJBUF; 624 625retn: 626 return (error); 627} 628 629/* Destroy the VM system object associated with this vnode */ 630int 631vop_stddestroyvobject(ap) 632 struct vop_destroyvobject_args /* { 633 struct vnode *vp; 634 } */ *ap; 635{ 636 struct vnode *vp = ap->a_vp; 637 vm_object_t obj = vp->v_object; 638 639 GIANT_REQUIRED; 640 641 if (vp->v_object == NULL) 642 return (0); 643 644 if (obj->ref_count == 0) { 645 /* 646 * vclean() may be called twice. The first time 647 * removes the primary reference to the object, 648 * the second time goes one further and is a 649 * special-case to terminate the object. 650 * 651 * don't double-terminate the object 652 */ 653 if ((obj->flags & OBJ_DEAD) == 0) 654 vm_object_terminate(obj); 655 } else { 656 /* 657 * Woe to the process that tries to page now :-). 658 */ 659 vm_pager_deallocate(obj); 660 } 661 return (0); 662} 663 664/* 665 * Return the underlying VM object. This routine may be called with or 666 * without the vnode interlock held. If called without, the returned 667 * object is not guarenteed to be valid. The syncer typically gets the 668 * object without holding the interlock in order to quickly test whether 669 * it might be dirty before going heavy-weight. vm_object's use zalloc 670 * and thus stable-storage, so this is safe. 671 */ 672int 673vop_stdgetvobject(ap) 674 struct vop_getvobject_args /* { 675 struct vnode *vp; 676 struct vm_object **objpp; 677 } */ *ap; 678{ 679 struct vnode *vp = ap->a_vp; 680 struct vm_object **objpp = ap->a_objpp; 681 682 if (objpp) 683 *objpp = vp->v_object; 684 return (vp->v_object ? 0 : EINVAL); 685} 686 687/* XXX Needs good comment and VOP_BMAP(9) manpage */ 688int 689vop_stdbmap(ap) 690 struct vop_bmap_args /* { 691 struct vnode *a_vp; 692 daddr_t a_bn; 693 struct vnode **a_vpp; 694 daddr_t *a_bnp; 695 int *a_runp; 696 int *a_runb; 697 } */ *ap; 698{ 699 700 if (ap->a_vpp != NULL) 701 *ap->a_vpp = ap->a_vp; 702 if (ap->a_bnp != NULL) 703 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 704 if (ap->a_runp != NULL) 705 *ap->a_runp = 0; 706 if (ap->a_runb != NULL) 707 *ap->a_runb = 0; 708 return (0); 709} 710 711/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 712int 713vop_stdgetpages(ap) 714 struct vop_getpages_args /* { 715 struct vnode *a_vp; 716 vm_page_t *a_m; 717 int a_count; 718 int a_reqpage; 719 vm_ooffset_t a_offset; 720 } */ *ap; 721{ 722 723 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 724 ap->a_count, ap->a_reqpage); 725} 726 727/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 728int 729vop_stdputpages(ap) 730 struct vop_putpages_args /* { 731 struct vnode *a_vp; 732 vm_page_t *a_m; 733 int a_count; 734 int a_sync; 735 int *a_rtvals; 736 vm_ooffset_t a_offset; 737 } */ *ap; 738{ 739 740 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 741 ap->a_sync, ap->a_rtvals); 742} 743 744 745 746/* 747 * vfs default ops 748 * used to fill the vfs function table to get reasonable default return values. 749 */ 750int 751vfs_stdroot (mp, vpp) 752 struct mount *mp; 753 struct vnode **vpp; 754{ 755 return (EOPNOTSUPP); 756} 757 758int 759vfs_stdstatfs (mp, sbp, td) 760 struct mount *mp; 761 struct statfs *sbp; 762 struct thread *td; 763{ 764 return (EOPNOTSUPP); 765} 766 767int 768vfs_stdvptofh (vp, fhp) 769 struct vnode *vp; 770 struct fid *fhp; 771{ 772 return (EOPNOTSUPP); 773} 774 775int 776vfs_stdstart (mp, flags, td) 777 struct mount *mp; 778 int flags; 779 struct thread *td; 780{ 781 return (0); 782} 783 784int 785vfs_stdquotactl (mp, cmds, uid, arg, td) 786 struct mount *mp; 787 int cmds; 788 uid_t uid; 789 caddr_t arg; 790 struct thread *td; 791{ 792 return (EOPNOTSUPP); 793} 794 795int 796vfs_stdsync (mp, waitfor, cred, td) 797 struct mount *mp; 798 int waitfor; 799 struct ucred *cred; 800 struct thread *td; 801{ 802 return (0); 803} 804 805int 806vfs_stdvget (mp, ino, flags, vpp) 807 struct mount *mp; 808 ino_t ino; 809 int flags; 810 struct vnode **vpp; 811{ 812 return (EOPNOTSUPP); 813} 814 815int 816vfs_stdfhtovp (mp, fhp, vpp) 817 struct mount *mp; 818 struct fid *fhp; 819 struct vnode **vpp; 820{ 821 return (EOPNOTSUPP); 822} 823 824int 825vfs_stdinit (vfsp) 826 struct vfsconf *vfsp; 827{ 828 return (0); 829} 830 831int 832vfs_stduninit (vfsp) 833 struct vfsconf *vfsp; 834{ 835 return(0); 836} 837 838int 839vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 840 struct mount *mp; 841 int cmd; 842 struct vnode *filename_vp; 843 int attrnamespace; 844 const char *attrname; 845 struct thread *td; 846{ 847 if (filename_vp != NULL) 848 VOP_UNLOCK(filename_vp, 0, td); 849 return(EOPNOTSUPP); 850} 851 852/* end of vfs default ops */ 853