vfs_default.c revision 92462
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * 39 * $FreeBSD: head/sys/kern/vfs_default.c 92462 2002-03-17 01:25:47Z mckusick $ 40 */ 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bio.h> 45#include <sys/buf.h> 46#include <sys/conf.h> 47#include <sys/kernel.h> 48#include <sys/lock.h> 49#include <sys/malloc.h> 50#include <sys/mount.h> 51#include <sys/mutex.h> 52#include <sys/unistd.h> 53#include <sys/vnode.h> 54#include <sys/poll.h> 55 56#include <machine/limits.h> 57 58#include <vm/vm.h> 59#include <vm/vm_object.h> 60#include <vm/vm_extern.h> 61#include <vm/pmap.h> 62#include <vm/vm_map.h> 63#include <vm/vm_page.h> 64#include <vm/vm_pager.h> 65#include <vm/vnode_pager.h> 66#include <vm/vm_zone.h> 67 68static int vop_nolookup __P((struct vop_lookup_args *)); 69static int vop_nostrategy __P((struct vop_strategy_args *)); 70 71/* 72 * This vnode table stores what we want to do if the filesystem doesn't 73 * implement a particular VOP. 74 * 75 * If there is no specific entry here, we will return EOPNOTSUPP. 76 * 77 */ 78 79vop_t **default_vnodeop_p; 80static struct vnodeopv_entry_desc default_vnodeop_entries[] = { 81 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 82 { &vop_advlock_desc, (vop_t *) vop_einval }, 83 { &vop_bmap_desc, (vop_t *) vop_stdbmap }, 84 { &vop_close_desc, (vop_t *) vop_null }, 85 { &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject }, 86 { &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject }, 87 { &vop_fsync_desc, (vop_t *) vop_null }, 88 { &vop_getpages_desc, (vop_t *) vop_stdgetpages }, 89 { &vop_getvobject_desc, (vop_t *) vop_stdgetvobject }, 90 { &vop_inactive_desc, (vop_t *) vop_stdinactive }, 91 { &vop_ioctl_desc, (vop_t *) vop_enotty }, 92 { &vop_islocked_desc, (vop_t *) vop_noislocked }, 93 { &vop_lease_desc, (vop_t *) vop_null }, 94 { &vop_lock_desc, (vop_t *) vop_nolock }, 95 { &vop_lookup_desc, (vop_t *) vop_nolookup }, 96 { &vop_open_desc, (vop_t *) vop_null }, 97 { &vop_pathconf_desc, (vop_t *) vop_einval }, 98 { &vop_putpages_desc, (vop_t *) vop_stdputpages }, 99 { &vop_poll_desc, (vop_t *) vop_nopoll }, 100 { &vop_readlink_desc, (vop_t *) vop_einval }, 101 { &vop_revoke_desc, (vop_t *) vop_revoke }, 102 { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 103 { &vop_unlock_desc, (vop_t *) vop_nounlock }, 104 { NULL, NULL } 105}; 106 107static struct vnodeopv_desc default_vnodeop_opv_desc = 108 { &default_vnodeop_p, default_vnodeop_entries }; 109 110VNODEOP_SET(default_vnodeop_opv_desc); 111 112/* 113 * Series of placeholder functions for various error returns for 114 * VOPs. 115 */ 116 117int 118vop_eopnotsupp(struct vop_generic_args *ap) 119{ 120 /* 121 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 122 */ 123 124 return (EOPNOTSUPP); 125} 126 127int 128vop_ebadf(struct vop_generic_args *ap) 129{ 130 131 return (EBADF); 132} 133 134int 135vop_enotty(struct vop_generic_args *ap) 136{ 137 138 return (ENOTTY); 139} 140 141int 142vop_einval(struct vop_generic_args *ap) 143{ 144 145 return (EINVAL); 146} 147 148int 149vop_null(struct vop_generic_args *ap) 150{ 151 152 return (0); 153} 154 155/* 156 * Used to make a defined VOP fall back to the default VOP. 157 */ 158int 159vop_defaultop(struct vop_generic_args *ap) 160{ 161 162 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 163} 164 165/* 166 * Helper function to panic on some bad VOPs in some filesystems. 167 */ 168int 169vop_panic(struct vop_generic_args *ap) 170{ 171 172 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 173} 174 175/* 176 * vop_std<something> and vop_no<something> are default functions for use by 177 * filesystems that need the "default reasonable" implementation for a 178 * particular operation. 179 * 180 * The documentation for the operations they implement exists (if it exists) 181 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 182 */ 183 184/* 185 * Default vop for filesystems that do not support name lookup 186 */ 187static int 188vop_nolookup(ap) 189 struct vop_lookup_args /* { 190 struct vnode *a_dvp; 191 struct vnode **a_vpp; 192 struct componentname *a_cnp; 193 } */ *ap; 194{ 195 196 *ap->a_vpp = NULL; 197 return (ENOTDIR); 198} 199 200/* 201 * vop_nostrategy: 202 * 203 * Strategy routine for VFS devices that have none. 204 * 205 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 206 * routine. Typically this is done for a BIO_READ strategy call. 207 * Typically B_INVAL is assumed to already be clear prior to a write 208 * and should not be cleared manually unless you just made the buffer 209 * invalid. BIO_ERROR should be cleared either way. 210 */ 211 212static int 213vop_nostrategy (struct vop_strategy_args *ap) 214{ 215 printf("No strategy for buffer at %p\n", ap->a_bp); 216 vprint("", ap->a_vp); 217 vprint("", ap->a_bp->b_vp); 218 ap->a_bp->b_ioflags |= BIO_ERROR; 219 ap->a_bp->b_error = EOPNOTSUPP; 220 bufdone(ap->a_bp); 221 return (EOPNOTSUPP); 222} 223 224/* 225 * vop_stdpathconf: 226 * 227 * Standard implementation of POSIX pathconf, to get information about limits 228 * for a filesystem. 229 * Override per filesystem for the case where the filesystem has smaller 230 * limits. 231 */ 232int 233vop_stdpathconf(ap) 234 struct vop_pathconf_args /* { 235 struct vnode *a_vp; 236 int a_name; 237 int *a_retval; 238 } */ *ap; 239{ 240 241 switch (ap->a_name) { 242 case _PC_LINK_MAX: 243 *ap->a_retval = LINK_MAX; 244 return (0); 245 case _PC_MAX_CANON: 246 *ap->a_retval = MAX_CANON; 247 return (0); 248 case _PC_MAX_INPUT: 249 *ap->a_retval = MAX_INPUT; 250 return (0); 251 case _PC_PIPE_BUF: 252 *ap->a_retval = PIPE_BUF; 253 return (0); 254 case _PC_CHOWN_RESTRICTED: 255 *ap->a_retval = 1; 256 return (0); 257 case _PC_VDISABLE: 258 *ap->a_retval = _POSIX_VDISABLE; 259 return (0); 260 default: 261 return (EINVAL); 262 } 263 /* NOTREACHED */ 264} 265 266/* 267 * Standard lock, unlock and islocked functions. 268 * 269 * These depend on the lock structure being the first element in the 270 * inode, ie: vp->v_data points to the the lock! 271 */ 272int 273vop_stdlock(ap) 274 struct vop_lock_args /* { 275 struct vnode *a_vp; 276 int a_flags; 277 struct thread *a_td; 278 } */ *ap; 279{ 280 struct vnode *vp = ap->a_vp; 281 282#ifndef DEBUG_LOCKS 283 return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, ap->a_td)); 284#else 285 return (debuglockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, 286 ap->a_td, "vop_stdlock", vp->filename, vp->line)); 287#endif 288} 289 290/* See above. */ 291int 292vop_stdunlock(ap) 293 struct vop_unlock_args /* { 294 struct vnode *a_vp; 295 int a_flags; 296 struct thread *a_td; 297 } */ *ap; 298{ 299 struct vnode *vp = ap->a_vp; 300 301 return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock, 302 ap->a_td)); 303} 304 305/* See above. */ 306int 307vop_stdislocked(ap) 308 struct vop_islocked_args /* { 309 struct vnode *a_vp; 310 struct thread *a_td; 311 } */ *ap; 312{ 313 314 return (lockstatus(&ap->a_vp->v_lock, ap->a_td)); 315} 316 317/* Mark the vnode inactive */ 318int 319vop_stdinactive(ap) 320 struct vop_inactive_args /* { 321 struct vnode *a_vp; 322 struct thread *a_td; 323 } */ *ap; 324{ 325 326 VOP_UNLOCK(ap->a_vp, 0, ap->a_td); 327 return (0); 328} 329 330/* 331 * Return true for select/poll. 332 */ 333int 334vop_nopoll(ap) 335 struct vop_poll_args /* { 336 struct vnode *a_vp; 337 int a_events; 338 struct ucred *a_cred; 339 struct thread *a_td; 340 } */ *ap; 341{ 342 /* 343 * Return true for read/write. If the user asked for something 344 * special, return POLLNVAL, so that clients have a way of 345 * determining reliably whether or not the extended 346 * functionality is present without hard-coding knowledge 347 * of specific filesystem implementations. 348 */ 349 if (ap->a_events & ~POLLSTANDARD) 350 return (POLLNVAL); 351 352 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 353} 354 355/* 356 * Implement poll for local filesystems that support it. 357 */ 358int 359vop_stdpoll(ap) 360 struct vop_poll_args /* { 361 struct vnode *a_vp; 362 int a_events; 363 struct ucred *a_cred; 364 struct thread *a_td; 365 } */ *ap; 366{ 367 if (ap->a_events & ~POLLSTANDARD) 368 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 369 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 370} 371 372/* 373 * Stubs to use when there is no locking to be done on the underlying object. 374 * A minimal shared lock is necessary to ensure that the underlying object 375 * is not revoked while an operation is in progress. So, an active shared 376 * count is maintained in an auxillary vnode lock structure. 377 */ 378int 379vop_sharedlock(ap) 380 struct vop_lock_args /* { 381 struct vnode *a_vp; 382 int a_flags; 383 struct thread *a_td; 384 } */ *ap; 385{ 386 /* 387 * This code cannot be used until all the non-locking filesystems 388 * (notably NFS) are converted to properly lock and release nodes. 389 * Also, certain vnode operations change the locking state within 390 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 391 * and symlink). Ideally these operations should not change the 392 * lock state, but should be changed to let the caller of the 393 * function unlock them. Otherwise all intermediate vnode layers 394 * (such as union, umapfs, etc) must catch these functions to do 395 * the necessary locking at their layer. Note that the inactive 396 * and lookup operations also change their lock state, but this 397 * cannot be avoided, so these two operations will always need 398 * to be handled in intermediate layers. 399 */ 400 struct vnode *vp = ap->a_vp; 401 int vnflags, flags = ap->a_flags; 402 403 switch (flags & LK_TYPE_MASK) { 404 case LK_DRAIN: 405 vnflags = LK_DRAIN; 406 break; 407 case LK_EXCLUSIVE: 408#ifdef DEBUG_VFS_LOCKS 409 /* 410 * Normally, we use shared locks here, but that confuses 411 * the locking assertions. 412 */ 413 vnflags = LK_EXCLUSIVE; 414 break; 415#endif 416 case LK_SHARED: 417 vnflags = LK_SHARED; 418 break; 419 case LK_UPGRADE: 420 case LK_EXCLUPGRADE: 421 case LK_DOWNGRADE: 422 return (0); 423 case LK_RELEASE: 424 default: 425 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 426 } 427 if (flags & LK_INTERLOCK) 428 vnflags |= LK_INTERLOCK; 429#ifndef DEBUG_LOCKS 430 return (lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td)); 431#else 432 return (debuglockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td, 433 "vop_sharedlock", vp->filename, vp->line)); 434#endif 435} 436 437/* 438 * Stubs to use when there is no locking to be done on the underlying object. 439 * A minimal shared lock is necessary to ensure that the underlying object 440 * is not revoked while an operation is in progress. So, an active shared 441 * count is maintained in an auxillary vnode lock structure. 442 */ 443int 444vop_nolock(ap) 445 struct vop_lock_args /* { 446 struct vnode *a_vp; 447 int a_flags; 448 struct thread *a_td; 449 } */ *ap; 450{ 451#ifdef notyet 452 /* 453 * This code cannot be used until all the non-locking filesystems 454 * (notably NFS) are converted to properly lock and release nodes. 455 * Also, certain vnode operations change the locking state within 456 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 457 * and symlink). Ideally these operations should not change the 458 * lock state, but should be changed to let the caller of the 459 * function unlock them. Otherwise all intermediate vnode layers 460 * (such as union, umapfs, etc) must catch these functions to do 461 * the necessary locking at their layer. Note that the inactive 462 * and lookup operations also change their lock state, but this 463 * cannot be avoided, so these two operations will always need 464 * to be handled in intermediate layers. 465 */ 466 struct vnode *vp = ap->a_vp; 467 int vnflags, flags = ap->a_flags; 468 469 switch (flags & LK_TYPE_MASK) { 470 case LK_DRAIN: 471 vnflags = LK_DRAIN; 472 break; 473 case LK_EXCLUSIVE: 474 case LK_SHARED: 475 vnflags = LK_SHARED; 476 break; 477 case LK_UPGRADE: 478 case LK_EXCLUPGRADE: 479 case LK_DOWNGRADE: 480 return (0); 481 case LK_RELEASE: 482 default: 483 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 484 } 485 if (flags & LK_INTERLOCK) 486 vnflags |= LK_INTERLOCK; 487 return(lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td)); 488#else /* for now */ 489 /* 490 * Since we are not using the lock manager, we must clear 491 * the interlock here. 492 */ 493 if (ap->a_flags & LK_INTERLOCK) 494 mtx_unlock(&ap->a_vp->v_interlock); 495 return (0); 496#endif 497} 498 499/* 500 * Do the inverse of vop_nolock, handling the interlock in a compatible way. 501 */ 502int 503vop_nounlock(ap) 504 struct vop_unlock_args /* { 505 struct vnode *a_vp; 506 int a_flags; 507 struct thread *a_td; 508 } */ *ap; 509{ 510 511 /* 512 * Since we are not using the lock manager, we must clear 513 * the interlock here. 514 */ 515 if (ap->a_flags & LK_INTERLOCK) 516 mtx_unlock(&ap->a_vp->v_interlock); 517 return (0); 518} 519 520/* 521 * Return whether or not the node is in use. 522 */ 523int 524vop_noislocked(ap) 525 struct vop_islocked_args /* { 526 struct vnode *a_vp; 527 struct thread *a_td; 528 } */ *ap; 529{ 530 531 return (0); 532} 533 534/* 535 * Return our mount point, as we will take charge of the writes. 536 */ 537int 538vop_stdgetwritemount(ap) 539 struct vop_getwritemount_args /* { 540 struct vnode *a_vp; 541 struct mount **a_mpp; 542 } */ *ap; 543{ 544 545 *(ap->a_mpp) = ap->a_vp->v_mount; 546 return (0); 547} 548 549/* Create the VM system backing object for this vnode */ 550int 551vop_stdcreatevobject(ap) 552 struct vop_createvobject_args /* { 553 struct vnode *vp; 554 struct ucred *cred; 555 struct thread *td; 556 } */ *ap; 557{ 558 struct vnode *vp = ap->a_vp; 559 struct ucred *cred = ap->a_cred; 560 struct thread *td = ap->a_td; 561 struct vattr vat; 562 vm_object_t object; 563 int error = 0; 564 565 GIANT_REQUIRED; 566 567 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 568 return (0); 569 570retry: 571 if ((object = vp->v_object) == NULL) { 572 if (vp->v_type == VREG || vp->v_type == VDIR) { 573 if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0) 574 goto retn; 575 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 576 } else if (devsw(vp->v_rdev) != NULL) { 577 /* 578 * This simply allocates the biggest object possible 579 * for a disk vnode. This should be fixed, but doesn't 580 * cause any problems (yet). 581 */ 582 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 583 } else { 584 goto retn; 585 } 586 /* 587 * Dereference the reference we just created. This assumes 588 * that the object is associated with the vp. 589 */ 590 object->ref_count--; 591 vp->v_usecount--; 592 } else { 593 if (object->flags & OBJ_DEAD) { 594 VOP_UNLOCK(vp, 0, td); 595 tsleep(object, PVM, "vodead", 0); 596 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 597 goto retry; 598 } 599 } 600 601 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 602 vp->v_flag |= VOBJBUF; 603 604retn: 605 return (error); 606} 607 608/* Destroy the VM system object associated with this vnode */ 609int 610vop_stddestroyvobject(ap) 611 struct vop_destroyvobject_args /* { 612 struct vnode *vp; 613 } */ *ap; 614{ 615 struct vnode *vp = ap->a_vp; 616 vm_object_t obj = vp->v_object; 617 618 GIANT_REQUIRED; 619 620 if (vp->v_object == NULL) 621 return (0); 622 623 if (obj->ref_count == 0) { 624 /* 625 * vclean() may be called twice. The first time 626 * removes the primary reference to the object, 627 * the second time goes one further and is a 628 * special-case to terminate the object. 629 * 630 * don't double-terminate the object 631 */ 632 if ((obj->flags & OBJ_DEAD) == 0) 633 vm_object_terminate(obj); 634 } else { 635 /* 636 * Woe to the process that tries to page now :-). 637 */ 638 vm_pager_deallocate(obj); 639 } 640 return (0); 641} 642 643/* 644 * Return the underlying VM object. This routine may be called with or 645 * without the vnode interlock held. If called without, the returned 646 * object is not guarenteed to be valid. The syncer typically gets the 647 * object without holding the interlock in order to quickly test whether 648 * it might be dirty before going heavy-weight. vm_object's use zalloc 649 * and thus stable-storage, so this is safe. 650 */ 651int 652vop_stdgetvobject(ap) 653 struct vop_getvobject_args /* { 654 struct vnode *vp; 655 struct vm_object **objpp; 656 } */ *ap; 657{ 658 struct vnode *vp = ap->a_vp; 659 struct vm_object **objpp = ap->a_objpp; 660 661 if (objpp) 662 *objpp = vp->v_object; 663 return (vp->v_object ? 0 : EINVAL); 664} 665 666/* XXX Needs good comment and VOP_BMAP(9) manpage */ 667int 668vop_stdbmap(ap) 669 struct vop_bmap_args /* { 670 struct vnode *a_vp; 671 daddr_t a_bn; 672 struct vnode **a_vpp; 673 daddr_t *a_bnp; 674 int *a_runp; 675 int *a_runb; 676 } */ *ap; 677{ 678 679 if (ap->a_vpp != NULL) 680 *ap->a_vpp = ap->a_vp; 681 if (ap->a_bnp != NULL) 682 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 683 if (ap->a_runp != NULL) 684 *ap->a_runp = 0; 685 if (ap->a_runb != NULL) 686 *ap->a_runb = 0; 687 return (0); 688} 689 690/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 691int 692vop_stdgetpages(ap) 693 struct vop_getpages_args /* { 694 struct vnode *a_vp; 695 vm_page_t *a_m; 696 int a_count; 697 int a_reqpage; 698 vm_ooffset_t a_offset; 699 } */ *ap; 700{ 701 702 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 703 ap->a_count, ap->a_reqpage); 704} 705 706/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 707int 708vop_stdputpages(ap) 709 struct vop_putpages_args /* { 710 struct vnode *a_vp; 711 vm_page_t *a_m; 712 int a_count; 713 int a_sync; 714 int *a_rtvals; 715 vm_ooffset_t a_offset; 716 } */ *ap; 717{ 718 719 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 720 ap->a_sync, ap->a_rtvals); 721} 722 723 724 725/* 726 * vfs default ops 727 * used to fill the vfs function table to get reasonable default return values. 728 */ 729int 730vfs_stdmount (mp, path, data, ndp, td) 731 struct mount *mp; 732 char *path; 733 caddr_t data; 734 struct nameidata *ndp; 735 struct thread *td; 736{ 737 return (0); 738} 739 740int 741vfs_stdunmount (mp, mntflags, td) 742 struct mount *mp; 743 int mntflags; 744 struct thread *td; 745{ 746 return (0); 747} 748 749int 750vfs_stdroot (mp, vpp) 751 struct mount *mp; 752 struct vnode **vpp; 753{ 754 return (EOPNOTSUPP); 755} 756 757int 758vfs_stdstatfs (mp, sbp, td) 759 struct mount *mp; 760 struct statfs *sbp; 761 struct thread *td; 762{ 763 return (EOPNOTSUPP); 764} 765 766int 767vfs_stdvptofh (vp, fhp) 768 struct vnode *vp; 769 struct fid *fhp; 770{ 771 return (EOPNOTSUPP); 772} 773 774int 775vfs_stdstart (mp, flags, td) 776 struct mount *mp; 777 int flags; 778 struct thread *td; 779{ 780 return (0); 781} 782 783int 784vfs_stdquotactl (mp, cmds, uid, arg, td) 785 struct mount *mp; 786 int cmds; 787 uid_t uid; 788 caddr_t arg; 789 struct thread *td; 790{ 791 return (EOPNOTSUPP); 792} 793 794int 795vfs_stdsync (mp, waitfor, cred, td) 796 struct mount *mp; 797 int waitfor; 798 struct ucred *cred; 799 struct thread *td; 800{ 801 return (0); 802} 803 804int 805vfs_stdvget (mp, ino, flags, vpp) 806 struct mount *mp; 807 ino_t ino; 808 int flags; 809 struct vnode **vpp; 810{ 811 return (EOPNOTSUPP); 812} 813 814int 815vfs_stdfhtovp (mp, fhp, vpp) 816 struct mount *mp; 817 struct fid *fhp; 818 struct vnode **vpp; 819{ 820 return (EOPNOTSUPP); 821} 822 823int 824vfs_stdinit (vfsp) 825 struct vfsconf *vfsp; 826{ 827 return (0); 828} 829 830int 831vfs_stduninit (vfsp) 832 struct vfsconf *vfsp; 833{ 834 return(0); 835} 836 837int 838vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 839 struct mount *mp; 840 int cmd; 841 struct vnode *filename_vp; 842 int attrnamespace; 843 const char *attrname; 844 struct thread *td; 845{ 846 return(EOPNOTSUPP); 847} 848 849/* end of vfs default ops */ 850