vfs_default.c revision 31727
130489Sphk/* 230489Sphk * Copyright (c) 1989, 1993 330489Sphk * The Regents of the University of California. All rights reserved. 430489Sphk * 530489Sphk * This code is derived from software contributed 630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project. 730489Sphk * 830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 930489Sphk * 1030489Sphk * Redistribution and use in source and binary forms, with or without 1130489Sphk * modification, are permitted provided that the following conditions 1230489Sphk * are met: 1330489Sphk * 1. Redistributions of source code must retain the above copyright 1430489Sphk * notice, this list of conditions and the following disclaimer. 1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright 1630489Sphk * notice, this list of conditions and the following disclaimer in the 1730489Sphk * documentation and/or other materials provided with the distribution. 1830489Sphk * 3. All advertising materials mentioning features or use of this software 1930489Sphk * must display the following acknowledgement: 2030489Sphk * This product includes software developed by the University of 2130489Sphk * California, Berkeley and its contributors. 2230489Sphk * 4. Neither the name of the University nor the names of its contributors 2330489Sphk * may be used to endorse or promote products derived from this software 2430489Sphk * without specific prior written permission. 2530489Sphk * 2630489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2730489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2830489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2930489Sphk * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3030489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3130489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3230489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3330489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3430489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3530489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3630489Sphk * SUCH DAMAGE. 3730489Sphk * 3830489Sphk */ 3930489Sphk 4030489Sphk#include <sys/param.h> 4130489Sphk#include <sys/systm.h> 4230489Sphk#include <sys/kernel.h> 4331561Sbde#include <sys/lock.h> 4430743Sphk#include <sys/malloc.h> 4530492Sphk#include <sys/unistd.h> 4630489Sphk#include <sys/vnode.h> 4730743Sphk#include <sys/poll.h> 4830489Sphk 4930489Sphkstatic int vop_nostrategy __P((struct vop_strategy_args *)); 5030489Sphk 5130489Sphk/* 5230489Sphk * This vnode table stores what we want to do if the filesystem doesn't 5330489Sphk * implement a particular VOP. 5430489Sphk * 5530489Sphk * If there is no specific entry here, we will return EOPNOTSUPP. 5630489Sphk * 5730489Sphk */ 5830489Sphk 5930489Sphkvop_t **default_vnodeop_p; 6030489Sphkstatic struct vnodeopv_entry_desc default_vnodeop_entries[] = { 6130492Sphk { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 6230743Sphk { &vop_abortop_desc, (vop_t *) vop_null }, 6330492Sphk { &vop_advlock_desc, (vop_t *) vop_einval }, 6430743Sphk { &vop_bwrite_desc, (vop_t *) vop_stdbwrite }, 6530492Sphk { &vop_close_desc, (vop_t *) vop_null }, 6630492Sphk { &vop_fsync_desc, (vop_t *) vop_null }, 6730492Sphk { &vop_ioctl_desc, (vop_t *) vop_enotty }, 6830496Sphk { &vop_islocked_desc, (vop_t *) vop_noislocked }, 6930739Sphk { &vop_lease_desc, (vop_t *) vop_null }, 7030496Sphk { &vop_lock_desc, (vop_t *) vop_nolock }, 7130492Sphk { &vop_mmap_desc, (vop_t *) vop_einval }, 7230492Sphk { &vop_open_desc, (vop_t *) vop_null }, 7330492Sphk { &vop_pathconf_desc, (vop_t *) vop_einval }, 7430489Sphk { &vop_poll_desc, (vop_t *) vop_nopoll }, 7530492Sphk { &vop_readlink_desc, (vop_t *) vop_einval }, 7630492Sphk { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp }, 7730489Sphk { &vop_revoke_desc, (vop_t *) vop_revoke }, 7830489Sphk { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 7930496Sphk { &vop_unlock_desc, (vop_t *) vop_nounlock }, 8030489Sphk { NULL, NULL } 8130489Sphk}; 8230489Sphk 8330489Sphkstatic struct vnodeopv_desc default_vnodeop_opv_desc = 8430489Sphk { &default_vnodeop_p, default_vnodeop_entries }; 8530489Sphk 8630489SphkVNODEOP_SET(default_vnodeop_opv_desc); 8730489Sphk 8830489Sphkint 8930492Sphkvop_eopnotsupp(struct vop_generic_args *ap) 9030489Sphk{ 9130489Sphk /* 9230492Sphk printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 9330489Sphk */ 9430489Sphk 9530489Sphk return (EOPNOTSUPP); 9630489Sphk} 9730489Sphk 9830489Sphkint 9930492Sphkvop_ebadf(struct vop_generic_args *ap) 10030489Sphk{ 10130489Sphk 10230492Sphk return (EBADF); 10330492Sphk} 10430492Sphk 10530492Sphkint 10630492Sphkvop_enotty(struct vop_generic_args *ap) 10730492Sphk{ 10830492Sphk 10930492Sphk return (ENOTTY); 11030492Sphk} 11130492Sphk 11230492Sphkint 11330492Sphkvop_einval(struct vop_generic_args *ap) 11430492Sphk{ 11530492Sphk 11630492Sphk return (EINVAL); 11730492Sphk} 11830492Sphk 11930492Sphkint 12030492Sphkvop_null(struct vop_generic_args *ap) 12130492Sphk{ 12230492Sphk 12330492Sphk return (0); 12430492Sphk} 12530492Sphk 12630492Sphkint 12730492Sphkvop_defaultop(struct vop_generic_args *ap) 12830492Sphk{ 12930492Sphk 13030489Sphk return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 13130489Sphk} 13230489Sphk 13330489Sphkstatic int 13430489Sphkvop_nostrategy (struct vop_strategy_args *ap) 13530489Sphk{ 13630489Sphk printf("No strategy for buffer at %p\n", ap->a_bp); 13730489Sphk vprint("", ap->a_bp->b_vp); 13830489Sphk ap->a_bp->b_flags |= B_ERROR; 13930489Sphk ap->a_bp->b_error = EOPNOTSUPP; 14030489Sphk biodone(ap->a_bp); 14130489Sphk return (EOPNOTSUPP); 14230489Sphk} 14330492Sphk 14430492Sphkint 14530492Sphkvop_stdpathconf(ap) 14630492Sphk struct vop_pathconf_args /* { 14730492Sphk struct vnode *a_vp; 14830492Sphk int a_name; 14930492Sphk int *a_retval; 15030492Sphk } */ *ap; 15130492Sphk{ 15230492Sphk 15330492Sphk switch (ap->a_name) { 15430492Sphk case _PC_LINK_MAX: 15530492Sphk *ap->a_retval = LINK_MAX; 15630492Sphk return (0); 15730492Sphk case _PC_MAX_CANON: 15830492Sphk *ap->a_retval = MAX_CANON; 15930492Sphk return (0); 16030492Sphk case _PC_MAX_INPUT: 16130492Sphk *ap->a_retval = MAX_INPUT; 16230492Sphk return (0); 16330492Sphk case _PC_PIPE_BUF: 16430492Sphk *ap->a_retval = PIPE_BUF; 16530492Sphk return (0); 16630492Sphk case _PC_CHOWN_RESTRICTED: 16730492Sphk *ap->a_retval = 1; 16830492Sphk return (0); 16930492Sphk case _PC_VDISABLE: 17030492Sphk *ap->a_retval = _POSIX_VDISABLE; 17130492Sphk return (0); 17230492Sphk default: 17330492Sphk return (EINVAL); 17430492Sphk } 17530492Sphk /* NOTREACHED */ 17630492Sphk} 17730513Sphk 17830513Sphk/* 17930513Sphk * Standard lock, unlock and islocked functions. 18030513Sphk * 18130513Sphk * These depend on the lock structure being the first element in the 18230513Sphk * inode, ie: vp->v_data points to the the lock! 18330513Sphk */ 18430513Sphkint 18530513Sphkvop_stdlock(ap) 18630513Sphk struct vop_lock_args /* { 18730513Sphk struct vnode *a_vp; 18830513Sphk int a_flags; 18930513Sphk struct proc *a_p; 19030513Sphk } */ *ap; 19130513Sphk{ 19230513Sphk struct lock *l = (struct lock*)ap->a_vp->v_data; 19330513Sphk 19430513Sphk return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p)); 19530513Sphk} 19630513Sphk 19730513Sphkint 19830513Sphkvop_stdunlock(ap) 19930513Sphk struct vop_unlock_args /* { 20030513Sphk struct vnode *a_vp; 20130513Sphk int a_flags; 20230513Sphk struct proc *a_p; 20330513Sphk } */ *ap; 20430513Sphk{ 20530513Sphk struct lock *l = (struct lock*)ap->a_vp->v_data; 20630513Sphk 20730513Sphk return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock, 20830513Sphk ap->a_p)); 20930513Sphk} 21030513Sphk 21130513Sphkint 21230513Sphkvop_stdislocked(ap) 21330513Sphk struct vop_islocked_args /* { 21430513Sphk struct vnode *a_vp; 21530513Sphk } */ *ap; 21630513Sphk{ 21730513Sphk struct lock *l = (struct lock*)ap->a_vp->v_data; 21830513Sphk 21930513Sphk return (lockstatus(l)); 22030513Sphk} 22130513Sphk 22230743Sphk/* 22330743Sphk * Return true for select/poll. 22430743Sphk */ 22530743Sphkint 22630743Sphkvop_nopoll(ap) 22730743Sphk struct vop_poll_args /* { 22830743Sphk struct vnode *a_vp; 22930743Sphk int a_events; 23030743Sphk struct ucred *a_cred; 23130743Sphk struct proc *a_p; 23230743Sphk } */ *ap; 23330743Sphk{ 23430743Sphk /* 23531727Swollman * Return true for read/write. If the user asked for something 23631727Swollman * special, return POLLNVAL, so that clients have a way of 23731727Swollman * determining reliably whether or not the extended 23831727Swollman * functionality is present without hard-coding knowledge 23931727Swollman * of specific filesystem implementations. 24030743Sphk */ 24131727Swollman if (ap->a_events & ~POLLSTANDARD) 24231727Swollman return (POLLNVAL); 24331727Swollman 24430743Sphk return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 24530743Sphk} 24630743Sphk 24731727Swollman/* 24831727Swollman * Implement poll for local filesystems that support it. 24931727Swollman */ 25030743Sphkint 25131727Swollmanvop_stdpoll(ap) 25231727Swollman struct vop_poll_args /* { 25331727Swollman struct vnode *a_vp; 25431727Swollman int a_events; 25531727Swollman struct ucred *a_cred; 25631727Swollman struct proc *a_p; 25731727Swollman } */ *ap; 25831727Swollman{ 25931727Swollman return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events)); 26031727Swollman} 26131727Swollman 26231727Swollmanint 26330743Sphkvop_stdbwrite(ap) 26430743Sphk struct vop_bwrite_args *ap; 26530743Sphk{ 26630743Sphk return (bwrite(ap->a_bp)); 26730743Sphk} 26830743Sphk 26930743Sphk/* 27030743Sphk * Stubs to use when there is no locking to be done on the underlying object. 27130743Sphk * A minimal shared lock is necessary to ensure that the underlying object 27230743Sphk * is not revoked while an operation is in progress. So, an active shared 27330743Sphk * count is maintained in an auxillary vnode lock structure. 27430743Sphk */ 27530743Sphkint 27630743Sphkvop_sharedlock(ap) 27730743Sphk struct vop_lock_args /* { 27830743Sphk struct vnode *a_vp; 27930743Sphk int a_flags; 28030743Sphk struct proc *a_p; 28130743Sphk } */ *ap; 28230743Sphk{ 28330743Sphk /* 28430743Sphk * This code cannot be used until all the non-locking filesystems 28530743Sphk * (notably NFS) are converted to properly lock and release nodes. 28630743Sphk * Also, certain vnode operations change the locking state within 28730743Sphk * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 28830743Sphk * and symlink). Ideally these operations should not change the 28930743Sphk * lock state, but should be changed to let the caller of the 29030743Sphk * function unlock them. Otherwise all intermediate vnode layers 29130743Sphk * (such as union, umapfs, etc) must catch these functions to do 29230743Sphk * the necessary locking at their layer. Note that the inactive 29330743Sphk * and lookup operations also change their lock state, but this 29430743Sphk * cannot be avoided, so these two operations will always need 29530743Sphk * to be handled in intermediate layers. 29630743Sphk */ 29730743Sphk struct vnode *vp = ap->a_vp; 29830743Sphk int vnflags, flags = ap->a_flags; 29930743Sphk 30030743Sphk if (vp->v_vnlock == NULL) { 30130743Sphk if ((flags & LK_TYPE_MASK) == LK_DRAIN) 30230743Sphk return (0); 30330743Sphk MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 30430743Sphk M_VNODE, M_WAITOK); 30530743Sphk lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 30630743Sphk } 30730743Sphk switch (flags & LK_TYPE_MASK) { 30830743Sphk case LK_DRAIN: 30930743Sphk vnflags = LK_DRAIN; 31030743Sphk break; 31130743Sphk case LK_EXCLUSIVE: 31230743Sphk#ifdef DEBUG_VFS_LOCKS 31330743Sphk /* 31430743Sphk * Normally, we use shared locks here, but that confuses 31530743Sphk * the locking assertions. 31630743Sphk */ 31730743Sphk vnflags = LK_EXCLUSIVE; 31830743Sphk break; 31930743Sphk#endif 32030743Sphk case LK_SHARED: 32130743Sphk vnflags = LK_SHARED; 32230743Sphk break; 32330743Sphk case LK_UPGRADE: 32430743Sphk case LK_EXCLUPGRADE: 32530743Sphk case LK_DOWNGRADE: 32630743Sphk return (0); 32730743Sphk case LK_RELEASE: 32830743Sphk default: 32930743Sphk panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 33030743Sphk } 33130743Sphk if (flags & LK_INTERLOCK) 33230743Sphk vnflags |= LK_INTERLOCK; 33330743Sphk return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 33430743Sphk} 33530743Sphk 33630743Sphk/* 33730743Sphk * Stubs to use when there is no locking to be done on the underlying object. 33830743Sphk * A minimal shared lock is necessary to ensure that the underlying object 33930743Sphk * is not revoked while an operation is in progress. So, an active shared 34030743Sphk * count is maintained in an auxillary vnode lock structure. 34130743Sphk */ 34230743Sphkint 34330743Sphkvop_nolock(ap) 34430743Sphk struct vop_lock_args /* { 34530743Sphk struct vnode *a_vp; 34630743Sphk int a_flags; 34730743Sphk struct proc *a_p; 34830743Sphk } */ *ap; 34930743Sphk{ 35030743Sphk#ifdef notyet 35130743Sphk /* 35230743Sphk * This code cannot be used until all the non-locking filesystems 35330743Sphk * (notably NFS) are converted to properly lock and release nodes. 35430743Sphk * Also, certain vnode operations change the locking state within 35530743Sphk * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 35630743Sphk * and symlink). Ideally these operations should not change the 35730743Sphk * lock state, but should be changed to let the caller of the 35830743Sphk * function unlock them. Otherwise all intermediate vnode layers 35930743Sphk * (such as union, umapfs, etc) must catch these functions to do 36030743Sphk * the necessary locking at their layer. Note that the inactive 36130743Sphk * and lookup operations also change their lock state, but this 36230743Sphk * cannot be avoided, so these two operations will always need 36330743Sphk * to be handled in intermediate layers. 36430743Sphk */ 36530743Sphk struct vnode *vp = ap->a_vp; 36630743Sphk int vnflags, flags = ap->a_flags; 36730743Sphk 36830743Sphk if (vp->v_vnlock == NULL) { 36930743Sphk if ((flags & LK_TYPE_MASK) == LK_DRAIN) 37030743Sphk return (0); 37130743Sphk MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 37230743Sphk M_VNODE, M_WAITOK); 37330743Sphk lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 37430743Sphk } 37530743Sphk switch (flags & LK_TYPE_MASK) { 37630743Sphk case LK_DRAIN: 37730743Sphk vnflags = LK_DRAIN; 37830743Sphk break; 37930743Sphk case LK_EXCLUSIVE: 38030743Sphk case LK_SHARED: 38130743Sphk vnflags = LK_SHARED; 38230743Sphk break; 38330743Sphk case LK_UPGRADE: 38430743Sphk case LK_EXCLUPGRADE: 38530743Sphk case LK_DOWNGRADE: 38630743Sphk return (0); 38730743Sphk case LK_RELEASE: 38830743Sphk default: 38930743Sphk panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 39030743Sphk } 39130743Sphk if (flags & LK_INTERLOCK) 39230743Sphk vnflags |= LK_INTERLOCK; 39330743Sphk return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 39430743Sphk#else /* for now */ 39530743Sphk /* 39630743Sphk * Since we are not using the lock manager, we must clear 39730743Sphk * the interlock here. 39830743Sphk */ 39931263Sbde if (ap->a_flags & LK_INTERLOCK) 40030743Sphk simple_unlock(&ap->a_vp->v_interlock); 40130743Sphk return (0); 40230743Sphk#endif 40330743Sphk} 40430743Sphk 40530743Sphk/* 40630743Sphk * Do the inverse of vop_nolock, handling the interlock in a compatible way. 40730743Sphk */ 40830743Sphkint 40930743Sphkvop_nounlock(ap) 41030743Sphk struct vop_unlock_args /* { 41130743Sphk struct vnode *a_vp; 41230743Sphk int a_flags; 41330743Sphk struct proc *a_p; 41430743Sphk } */ *ap; 41530743Sphk{ 41630743Sphk struct vnode *vp = ap->a_vp; 41730743Sphk 41830743Sphk if (vp->v_vnlock == NULL) { 41930743Sphk if (ap->a_flags & LK_INTERLOCK) 42030743Sphk simple_unlock(&ap->a_vp->v_interlock); 42130743Sphk return (0); 42230743Sphk } 42330743Sphk return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags, 42430743Sphk &ap->a_vp->v_interlock, ap->a_p)); 42530743Sphk} 42630743Sphk 42730743Sphk/* 42830743Sphk * Return whether or not the node is in use. 42930743Sphk */ 43030743Sphkint 43130743Sphkvop_noislocked(ap) 43230743Sphk struct vop_islocked_args /* { 43330743Sphk struct vnode *a_vp; 43430743Sphk } */ *ap; 43530743Sphk{ 43630743Sphk struct vnode *vp = ap->a_vp; 43730743Sphk 43830743Sphk if (vp->v_vnlock == NULL) 43930743Sphk return (0); 44030743Sphk return (lockstatus(vp->v_vnlock)); 44130743Sphk} 44230743Sphk 443