vfs_default.c revision 58934
1275970Scy/* 2275970Scy * Copyright (c) 1989, 1993 3275970Scy * The Regents of the University of California. All rights reserved. 4275970Scy * 5275970Scy * This code is derived from software contributed 6275970Scy * to Berkeley by John Heidemann of the UCLA Ficus project. 7275970Scy * 8275970Scy * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9275970Scy * 10275970Scy * Redistribution and use in source and binary forms, with or without 11275970Scy * modification, are permitted provided that the following conditions 12275970Scy * are met: 13275970Scy * 1. Redistributions of source code must retain the above copyright 14275970Scy * notice, this list of conditions and the following disclaimer. 15275970Scy * 2. Redistributions in binary form must reproduce the above copyright 16275970Scy * notice, this list of conditions and the following disclaimer in the 17275970Scy * documentation and/or other materials provided with the distribution. 18275970Scy * 3. All advertising materials mentioning features or use of this software 19275970Scy * must display the following acknowledgement: 20275970Scy * This product includes software developed by the University of 21275970Scy * California, Berkeley and its contributors. 22275970Scy * 4. Neither the name of the University nor the names of its contributors 23275970Scy * may be used to endorse or promote products derived from this software 24275970Scy * without specific prior written permission. 25275970Scy * 26275970Scy * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27275970Scy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28275970Scy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29275970Scy * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30275970Scy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31275970Scy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32275970Scy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33275970Scy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34275970Scy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35275970Scy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36275970Scy * SUCH DAMAGE. 37275970Scy * 38275970Scy * 39275970Scy * $FreeBSD: head/sys/kern/vfs_default.c 58934 2000-04-02 15:24:56Z phk $ 40275970Scy */ 41275970Scy 42275970Scy#include <sys/param.h> 43275970Scy#include <sys/systm.h> 44275970Scy#include <sys/buf.h> 45275970Scy#include <sys/kernel.h> 46275970Scy#include <sys/lock.h> 47275970Scy#include <sys/malloc.h> 48275970Scy#include <sys/mount.h> 49275970Scy#include <sys/unistd.h> 50275970Scy#include <sys/vnode.h> 51275970Scy#include <sys/poll.h> 52275970Scy 53275970Scystatic int vop_nostrategy __P((struct vop_strategy_args *)); 54275970Scy 55275970Scy/* 56275970Scy * This vnode table stores what we want to do if the filesystem doesn't 57275970Scy * implement a particular VOP. 58275970Scy * 59275970Scy * If there is no specific entry here, we will return EOPNOTSUPP. 60275970Scy * 61275970Scy */ 62275970Scy 63275970Scyvop_t **default_vnodeop_p; 64275970Scystatic struct vnodeopv_entry_desc default_vnodeop_entries[] = { 65275970Scy { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 66275970Scy { &vop_advlock_desc, (vop_t *) vop_einval }, 67275970Scy { &vop_bwrite_desc, (vop_t *) vop_stdbwrite }, 68275970Scy { &vop_close_desc, (vop_t *) vop_null }, 69275970Scy { &vop_fsync_desc, (vop_t *) vop_null }, 70275970Scy { &vop_ioctl_desc, (vop_t *) vop_enotty }, 71275970Scy { &vop_islocked_desc, (vop_t *) vop_noislocked }, 72275970Scy { &vop_lease_desc, (vop_t *) vop_null }, 73275970Scy { &vop_lock_desc, (vop_t *) vop_nolock }, 74275970Scy { &vop_mmap_desc, (vop_t *) vop_einval }, 75275970Scy { &vop_open_desc, (vop_t *) vop_null }, 76275970Scy { &vop_pathconf_desc, (vop_t *) vop_einval }, 77275970Scy { &vop_poll_desc, (vop_t *) vop_nopoll }, 78275970Scy { &vop_readlink_desc, (vop_t *) vop_einval }, 79275970Scy { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp }, 80275970Scy { &vop_revoke_desc, (vop_t *) vop_revoke }, 81275970Scy { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 82275970Scy { &vop_unlock_desc, (vop_t *) vop_nounlock }, 83275970Scy { &vop_getacl_desc, (vop_t *) vop_eopnotsupp }, 84275970Scy { &vop_setacl_desc, (vop_t *) vop_eopnotsupp }, 85275970Scy { &vop_aclcheck_desc, (vop_t *) vop_eopnotsupp }, 86275970Scy { &vop_getextattr_desc, (vop_t *) vop_eopnotsupp }, 87275970Scy { &vop_setextattr_desc, (vop_t *) vop_eopnotsupp }, 88275970Scy { NULL, NULL } 89275970Scy}; 90275970Scy 91275970Scystatic struct vnodeopv_desc default_vnodeop_opv_desc = 92275970Scy { &default_vnodeop_p, default_vnodeop_entries }; 93275970Scy 94275970ScyVNODEOP_SET(default_vnodeop_opv_desc); 95275970Scy 96275970Scyint 97275970Scyvop_eopnotsupp(struct vop_generic_args *ap) 98275970Scy{ 99275970Scy /* 100275970Scy printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 101275970Scy */ 102275970Scy 103275970Scy return (EOPNOTSUPP); 104275970Scy} 105275970Scy 106275970Scyint 107275970Scyvop_ebadf(struct vop_generic_args *ap) 108275970Scy{ 109275970Scy 110275970Scy return (EBADF); 111275970Scy} 112275970Scy 113275970Scyint 114275970Scyvop_enotty(struct vop_generic_args *ap) 115275970Scy{ 116275970Scy 117275970Scy return (ENOTTY); 118275970Scy} 119275970Scy 120275970Scyint 121275970Scyvop_einval(struct vop_generic_args *ap) 122275970Scy{ 123275970Scy 124275970Scy return (EINVAL); 125275970Scy} 126275970Scy 127275970Scyint 128275970Scyvop_null(struct vop_generic_args *ap) 129275970Scy{ 130275970Scy 131275970Scy return (0); 132275970Scy} 133275970Scy 134275970Scyint 135275970Scyvop_defaultop(struct vop_generic_args *ap) 136275970Scy{ 137275970Scy 138275970Scy return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 139275970Scy} 140275970Scy 141275970Scyint 142275970Scyvop_panic(struct vop_generic_args *ap) 143275970Scy{ 144275970Scy 145275970Scy printf("vop_panic[%s]\n", ap->a_desc->vdesc_name); 146275970Scy panic("Filesystem goof"); 147275970Scy return (0); 148275970Scy} 149275970Scy 150275970Scy/* 151275970Scy * vop_nostrategy: 152275970Scy * 153275970Scy * Strategy routine for VFS devices that have none. 154275970Scy * 155275970Scy * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 156275970Scy * routine. Typically this is done for a BIO_READ strategy call. 157275970Scy * Typically B_INVAL is assumed to already be clear prior to a write 158275970Scy * and should not be cleared manually unless you just made the buffer 159275970Scy * invalid. BIO_ERROR should be cleared either way. 160275970Scy */ 161275970Scy 162275970Scystatic int 163275970Scyvop_nostrategy (struct vop_strategy_args *ap) 164275970Scy{ 165275970Scy printf("No strategy for buffer at %p\n", ap->a_bp); 166275970Scy vprint("", ap->a_vp); 167275970Scy vprint("", ap->a_bp->b_vp); 168275970Scy ap->a_bp->b_ioflags |= BIO_ERROR; 169275970Scy ap->a_bp->b_error = EOPNOTSUPP; 170275970Scy biodone(ap->a_bp); 171275970Scy return (EOPNOTSUPP); 172275970Scy} 173275970Scy 174275970Scyint 175275970Scyvop_stdpathconf(ap) 176275970Scy struct vop_pathconf_args /* { 177275970Scy struct vnode *a_vp; 178275970Scy int a_name; 179275970Scy int *a_retval; 180275970Scy } */ *ap; 181275970Scy{ 182275970Scy 183275970Scy switch (ap->a_name) { 184275970Scy case _PC_LINK_MAX: 185275970Scy *ap->a_retval = LINK_MAX; 186275970Scy return (0); 187275970Scy case _PC_MAX_CANON: 188275970Scy *ap->a_retval = MAX_CANON; 189275970Scy return (0); 190275970Scy case _PC_MAX_INPUT: 191275970Scy *ap->a_retval = MAX_INPUT; 192275970Scy return (0); 193275970Scy case _PC_PIPE_BUF: 194275970Scy *ap->a_retval = PIPE_BUF; 195275970Scy return (0); 196275970Scy case _PC_CHOWN_RESTRICTED: 197275970Scy *ap->a_retval = 1; 198275970Scy return (0); 199275970Scy case _PC_VDISABLE: 200275970Scy *ap->a_retval = _POSIX_VDISABLE; 201275970Scy return (0); 202275970Scy default: 203275970Scy return (EINVAL); 204275970Scy } 205275970Scy /* NOTREACHED */ 206275970Scy} 207275970Scy 208275970Scy/* 209275970Scy * Standard lock, unlock and islocked functions. 210275970Scy * 211275970Scy * These depend on the lock structure being the first element in the 212275970Scy * inode, ie: vp->v_data points to the the lock! 213275970Scy */ 214275970Scyint 215275970Scyvop_stdlock(ap) 216275970Scy struct vop_lock_args /* { 217275970Scy struct vnode *a_vp; 218275970Scy int a_flags; 219275970Scy struct proc *a_p; 220275970Scy } */ *ap; 221275970Scy{ 222275970Scy struct lock *l; 223275970Scy 224275970Scy if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 225275970Scy if (ap->a_flags & LK_INTERLOCK) 226275970Scy simple_unlock(&ap->a_vp->v_interlock); 227275970Scy return 0; 228275970Scy } 229275970Scy 230275970Scy#ifndef DEBUG_LOCKS 231275970Scy return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p)); 232275970Scy#else 233275970Scy return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p, 234275970Scy "vop_stdlock", ap->a_vp->filename, ap->a_vp->line)); 235275970Scy#endif 236275970Scy} 237275970Scy 238275970Scyint 239275970Scyvop_stdunlock(ap) 240275970Scy struct vop_unlock_args /* { 241275970Scy struct vnode *a_vp; 242275970Scy int a_flags; 243275970Scy struct proc *a_p; 244275970Scy } */ *ap; 245275970Scy{ 246275970Scy struct lock *l; 247275970Scy 248275970Scy if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 249275970Scy if (ap->a_flags & LK_INTERLOCK) 250275970Scy simple_unlock(&ap->a_vp->v_interlock); 251275970Scy return 0; 252275970Scy } 253275970Scy 254275970Scy return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock, 255275970Scy ap->a_p)); 256275970Scy} 257275970Scy 258275970Scyint 259275970Scyvop_stdislocked(ap) 260275970Scy struct vop_islocked_args /* { 261275970Scy struct vnode *a_vp; 262275970Scy struct proc *a_p; 263275970Scy } */ *ap; 264275970Scy{ 265275970Scy struct lock *l; 266275970Scy 267275970Scy if ((l = (struct lock *)ap->a_vp->v_data) == NULL) 268275970Scy return 0; 269275970Scy 270275970Scy return (lockstatus(l, ap->a_p)); 271275970Scy} 272275970Scy 273275970Scy/* 274275970Scy * Return true for select/poll. 275275970Scy */ 276275970Scyint 277275970Scyvop_nopoll(ap) 278275970Scy struct vop_poll_args /* { 279275970Scy struct vnode *a_vp; 280275970Scy int a_events; 281275970Scy struct ucred *a_cred; 282275970Scy struct proc *a_p; 283275970Scy } */ *ap; 284275970Scy{ 285275970Scy /* 286275970Scy * Return true for read/write. If the user asked for something 287275970Scy * special, return POLLNVAL, so that clients have a way of 288275970Scy * determining reliably whether or not the extended 289275970Scy * functionality is present without hard-coding knowledge 290275970Scy * of specific filesystem implementations. 291275970Scy */ 292275970Scy if (ap->a_events & ~POLLSTANDARD) 293275970Scy return (POLLNVAL); 294275970Scy 295275970Scy return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 296275970Scy} 297275970Scy 298275970Scy/* 299275970Scy * Implement poll for local filesystems that support it. 300275970Scy */ 301275970Scyint 302275970Scyvop_stdpoll(ap) 303275970Scy struct vop_poll_args /* { 304275970Scy struct vnode *a_vp; 305275970Scy int a_events; 306275970Scy struct ucred *a_cred; 307275970Scy struct proc *a_p; 308275970Scy } */ *ap; 309275970Scy{ 310275970Scy if ((ap->a_events & ~POLLSTANDARD) == 0) 311275970Scy return (ap->a_events & (POLLRDNORM|POLLWRNORM)); 312275970Scy return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events)); 313275970Scy} 314275970Scy 315275970Scyint 316275970Scyvop_stdbwrite(ap) 317275970Scy struct vop_bwrite_args *ap; 318275970Scy{ 319275970Scy return (bwrite(ap->a_bp)); 320275970Scy} 321275970Scy 322275970Scy/* 323275970Scy * Stubs to use when there is no locking to be done on the underlying object. 324275970Scy * A minimal shared lock is necessary to ensure that the underlying object 325275970Scy * is not revoked while an operation is in progress. So, an active shared 326275970Scy * count is maintained in an auxillary vnode lock structure. 327275970Scy */ 328275970Scyint 329275970Scyvop_sharedlock(ap) 330275970Scy struct vop_lock_args /* { 331275970Scy struct vnode *a_vp; 332275970Scy int a_flags; 333275970Scy struct proc *a_p; 334275970Scy } */ *ap; 335275970Scy{ 336275970Scy /* 337275970Scy * This code cannot be used until all the non-locking filesystems 338275970Scy * (notably NFS) are converted to properly lock and release nodes. 339275970Scy * Also, certain vnode operations change the locking state within 340275970Scy * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 341275970Scy * and symlink). Ideally these operations should not change the 342275970Scy * lock state, but should be changed to let the caller of the 343275970Scy * function unlock them. Otherwise all intermediate vnode layers 344275970Scy * (such as union, umapfs, etc) must catch these functions to do 345275970Scy * the necessary locking at their layer. Note that the inactive 346275970Scy * and lookup operations also change their lock state, but this 347275970Scy * cannot be avoided, so these two operations will always need 348275970Scy * to be handled in intermediate layers. 349275970Scy */ 350275970Scy struct vnode *vp = ap->a_vp; 351275970Scy int vnflags, flags = ap->a_flags; 352275970Scy 353275970Scy if (vp->v_vnlock == NULL) { 354275970Scy if ((flags & LK_TYPE_MASK) == LK_DRAIN) 355275970Scy return (0); 356275970Scy MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 357275970Scy M_VNODE, M_WAITOK); 358275970Scy lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 359275970Scy } 360275970Scy switch (flags & LK_TYPE_MASK) { 361275970Scy case LK_DRAIN: 362275970Scy vnflags = LK_DRAIN; 363275970Scy break; 364275970Scy case LK_EXCLUSIVE: 365275970Scy#ifdef DEBUG_VFS_LOCKS 366275970Scy /* 367275970Scy * Normally, we use shared locks here, but that confuses 368275970Scy * the locking assertions. 369275970Scy */ 370275970Scy vnflags = LK_EXCLUSIVE; 371282408Scy break; 372282408Scy#endif 373282408Scy case LK_SHARED: 374282408Scy vnflags = LK_SHARED; 375282408Scy break; 376282408Scy case LK_UPGRADE: 377282408Scy case LK_EXCLUPGRADE: 378282408Scy case LK_DOWNGRADE: 379282408Scy return (0); 380282408Scy case LK_RELEASE: 381282408Scy default: 382282408Scy panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 383282408Scy } 384282408Scy if (flags & LK_INTERLOCK) 385275970Scy vnflags |= LK_INTERLOCK; 386275970Scy#ifndef DEBUG_LOCKS 387275970Scy return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 388275970Scy#else 389275970Scy return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p, 390275970Scy "vop_sharedlock", vp->filename, vp->line)); 391275970Scy#endif 392275970Scy} 393275970Scy 394275970Scy/* 395275970Scy * Stubs to use when there is no locking to be done on the underlying object. 396275970Scy * A minimal shared lock is necessary to ensure that the underlying object 397275970Scy * is not revoked while an operation is in progress. So, an active shared 398275970Scy * count is maintained in an auxillary vnode lock structure. 399275970Scy */ 400275970Scyint 401275970Scyvop_nolock(ap) 402275970Scy struct vop_lock_args /* { 403275970Scy struct vnode *a_vp; 404275970Scy int a_flags; 405275970Scy struct proc *a_p; 406275970Scy } */ *ap; 407275970Scy{ 408275970Scy#ifdef notyet 409275970Scy /* 410275970Scy * This code cannot be used until all the non-locking filesystems 411275970Scy * (notably NFS) are converted to properly lock and release nodes. 412275970Scy * Also, certain vnode operations change the locking state within 413275970Scy * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 414275970Scy * and symlink). Ideally these operations should not change the 415275970Scy * lock state, but should be changed to let the caller of the 416275970Scy * function unlock them. Otherwise all intermediate vnode layers 417275970Scy * (such as union, umapfs, etc) must catch these functions to do 418275970Scy * the necessary locking at their layer. Note that the inactive 419275970Scy * and lookup operations also change their lock state, but this 420275970Scy * cannot be avoided, so these two operations will always need 421275970Scy * to be handled in intermediate layers. 422275970Scy */ 423275970Scy struct vnode *vp = ap->a_vp; 424275970Scy int vnflags, flags = ap->a_flags; 425275970Scy 426275970Scy if (vp->v_vnlock == NULL) { 427275970Scy if ((flags & LK_TYPE_MASK) == LK_DRAIN) 428275970Scy return (0); 429275970Scy MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 430275970Scy M_VNODE, M_WAITOK); 431275970Scy lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 432275970Scy } 433275970Scy switch (flags & LK_TYPE_MASK) { 434275970Scy case LK_DRAIN: 435275970Scy vnflags = LK_DRAIN; 436275970Scy break; 437275970Scy case LK_EXCLUSIVE: 438275970Scy case LK_SHARED: 439275970Scy vnflags = LK_SHARED; 440275970Scy break; 441275970Scy case LK_UPGRADE: 442275970Scy case LK_EXCLUPGRADE: 443275970Scy case LK_DOWNGRADE: 444275970Scy return (0); 445275970Scy case LK_RELEASE: 446275970Scy default: 447275970Scy panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 448275970Scy } 449275970Scy if (flags & LK_INTERLOCK) 450275970Scy vnflags |= LK_INTERLOCK; 451275970Scy return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 452275970Scy#else /* for now */ 453275970Scy /* 454275970Scy * Since we are not using the lock manager, we must clear 455275970Scy * the interlock here. 456275970Scy */ 457275970Scy if (ap->a_flags & LK_INTERLOCK) 458275970Scy simple_unlock(&ap->a_vp->v_interlock); 459275970Scy return (0); 460275970Scy#endif 461275970Scy} 462275970Scy 463275970Scy/* 464275970Scy * Do the inverse of vop_nolock, handling the interlock in a compatible way. 465275970Scy */ 466275970Scyint 467275970Scyvop_nounlock(ap) 468275970Scy struct vop_unlock_args /* { 469275970Scy struct vnode *a_vp; 470275970Scy int a_flags; 471275970Scy struct proc *a_p; 472275970Scy } */ *ap; 473275970Scy{ 474275970Scy struct vnode *vp = ap->a_vp; 475275970Scy 476275970Scy if (vp->v_vnlock == NULL) { 477275970Scy if (ap->a_flags & LK_INTERLOCK) 478275970Scy simple_unlock(&ap->a_vp->v_interlock); 479275970Scy return (0); 480275970Scy } 481275970Scy return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags, 482275970Scy &ap->a_vp->v_interlock, ap->a_p)); 483275970Scy} 484275970Scy 485275970Scy/* 486275970Scy * Return whether or not the node is in use. 487275970Scy */ 488275970Scyint 489275970Scyvop_noislocked(ap) 490275970Scy struct vop_islocked_args /* { 491275970Scy struct vnode *a_vp; 492275970Scy struct proc *a_p; 493275970Scy } */ *ap; 494275970Scy{ 495275970Scy struct vnode *vp = ap->a_vp; 496275970Scy 497275970Scy if (vp->v_vnlock == NULL) 498275970Scy return (0); 499275970Scy return (lockstatus(vp->v_vnlock, ap->a_p)); 500275970Scy} 501275970Scy 502275970Scy/* 503275970Scy * vfs default ops 504275970Scy * used to fill the vfs fucntion table to get reasonable default return values. 505275970Scy */ 506275970Scyint 507275970Scyvfs_stdmount (mp, path, data, ndp, p) 508275970Scy struct mount *mp; 509275970Scy char *path; 510275970Scy caddr_t data; 511275970Scy struct nameidata *ndp; 512275970Scy struct proc *p; 513275970Scy{ 514275970Scy return (0); 515275970Scy} 516275970Scy 517275970Scyint 518275970Scyvfs_stdunmount (mp, mntflags, p) 519275970Scy struct mount *mp; 520275970Scy int mntflags; 521275970Scy struct proc *p; 522275970Scy{ 523275970Scy return (0); 524275970Scy} 525275970Scy 526275970Scyint 527275970Scyvfs_stdroot (mp, vpp) 528275970Scy struct mount *mp; 529275970Scy struct vnode **vpp; 530275970Scy{ 531275970Scy return (EOPNOTSUPP); 532275970Scy} 533275970Scy 534275970Scyint 535275970Scyvfs_stdstatfs (mp, sbp, p) 536275970Scy struct mount *mp; 537275970Scy struct statfs *sbp; 538275970Scy struct proc *p; 539275970Scy{ 540275970Scy return (EOPNOTSUPP); 541275970Scy} 542275970Scy 543275970Scyint 544275970Scyvfs_stdvptofh (vp, fhp) 545275970Scy struct vnode *vp; 546275970Scy struct fid *fhp; 547275970Scy{ 548275970Scy return (EOPNOTSUPP); 549275970Scy} 550275970Scy 551275970Scyint 552275970Scyvfs_stdstart (mp, flags, p) 553275970Scy struct mount *mp; 554275970Scy int flags; 555275970Scy struct proc *p; 556275970Scy{ 557275970Scy return (0); 558275970Scy} 559275970Scy 560275970Scyint 561275970Scyvfs_stdquotactl (mp, cmds, uid, arg, p) 562275970Scy struct mount *mp; 563275970Scy int cmds; 564275970Scy uid_t uid; 565275970Scy caddr_t arg; 566275970Scy struct proc *p; 567275970Scy{ 568275970Scy return (EOPNOTSUPP); 569275970Scy} 570275970Scy 571275970Scyint 572275970Scyvfs_stdsync (mp, waitfor, cred, p) 573275970Scy struct mount *mp; 574275970Scy int waitfor; 575275970Scy struct ucred *cred; 576275970Scy struct proc *p; 577275970Scy{ 578275970Scy return (0); 579275970Scy} 580275970Scy 581275970Scyint 582275970Scyvfs_stdvget (mp, ino, vpp) 583275970Scy struct mount *mp; 584275970Scy ino_t ino; 585275970Scy struct vnode **vpp; 586275970Scy{ 587275970Scy return (EOPNOTSUPP); 588275970Scy} 589275970Scy 590275970Scyint 591275970Scyvfs_stdfhtovp (mp, fhp, vpp) 592275970Scy struct mount *mp; 593275970Scy struct fid *fhp; 594275970Scy struct vnode **vpp; 595275970Scy{ 596275970Scy return (EOPNOTSUPP); 597275970Scy} 598275970Scy 599275970Scyint 600275970Scyvfs_stdcheckexp (mp, nam, extflagsp, credanonp) 601275970Scy struct mount *mp; 602275970Scy struct sockaddr *nam; 603275970Scy int *extflagsp; 604275970Scy struct ucred **credanonp; 605275970Scy{ 606275970Scy return (EOPNOTSUPP); 607275970Scy} 608275970Scy 609275970Scyint 610275970Scyvfs_stdinit (vfsp) 611275970Scy struct vfsconf *vfsp; 612275970Scy{ 613275970Scy return (0); 614275970Scy} 615275970Scy 616275970Scyint 617275970Scyvfs_stduninit (vfsp) 618275970Scy struct vfsconf *vfsp; 619275970Scy{ 620275970Scy return(0); 621275970Scy} 622275970Scy 623275970Scyint 624275970Scyvfs_stdextattrctl(mp, cmd, attrname, arg, p) 625275970Scy struct mount *mp; 626275970Scy int cmd; 627275970Scy const char *attrname; 628275970Scy caddr_t arg; 629275970Scy struct proc *p; 630275970Scy{ 631275970Scy return(EOPNOTSUPP); 632275970Scy} 633275970Scy 634275970Scy/* end of vfs default ops */ 635275970Scy