vfs_default.c revision 34206
130489Sphk/* 230489Sphk * Copyright (c) 1989, 1993 330489Sphk * The Regents of the University of California. All rights reserved. 430489Sphk * 530489Sphk * This code is derived from software contributed 630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project. 730489Sphk * 830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 930489Sphk * 1030489Sphk * Redistribution and use in source and binary forms, with or without 1130489Sphk * modification, are permitted provided that the following conditions 1230489Sphk * are met: 1330489Sphk * 1. Redistributions of source code must retain the above copyright 1430489Sphk * notice, this list of conditions and the following disclaimer. 1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright 1630489Sphk * notice, this list of conditions and the following disclaimer in the 1730489Sphk * documentation and/or other materials provided with the distribution. 1830489Sphk * 3. All advertising materials mentioning features or use of this software 1930489Sphk * must display the following acknowledgement: 2030489Sphk * This product includes software developed by the University of 2130489Sphk * California, Berkeley and its contributors. 2230489Sphk * 4. Neither the name of the University nor the names of its contributors 2330489Sphk * may be used to endorse or promote products derived from this software 2430489Sphk * without specific prior written permission. 2530489Sphk * 2630489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2730489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2830489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2930489Sphk * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3030489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3130489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3230489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3330489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3430489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3530489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3630489Sphk * SUCH DAMAGE. 3730489Sphk * 3830489Sphk */ 3930489Sphk 4030489Sphk#include <sys/param.h> 4130489Sphk#include <sys/systm.h> 4230489Sphk#include <sys/kernel.h> 4331561Sbde#include <sys/lock.h> 4430743Sphk#include <sys/malloc.h> 4530492Sphk#include <sys/unistd.h> 4630489Sphk#include <sys/vnode.h> 4733964Smsmith#include <sys/mount.h> 4830743Sphk#include <sys/poll.h> 4930489Sphk 5033964Smsmith/* 5133964Smsmith * VFS operations 5233964Smsmith */ 5333964Smsmith 5433964Smsmith/* 5533964Smsmith * Complement to all vpp returning ops. 5633964Smsmith * XXX - initially only to get rid of WILLRELE. 5733964Smsmith */ 5833964Smsmith/* ARGSUSED */ 5933964Smsmithint 6033964Smsmithvfs_vrele(mp, vp) 6133964Smsmith struct mount *mp; 6233964Smsmith struct vnode *vp; 6333964Smsmith{ 6433964Smsmith vrele(vp); 6533964Smsmith return (0); 6633964Smsmith} 6733964Smsmith 6833964Smsmith/* 6933964Smsmith * vnode operations 7033964Smsmith */ 7133964Smsmith 7230489Sphkstatic int vop_nostrategy __P((struct vop_strategy_args *)); 7330489Sphk 7430489Sphk/* 7530489Sphk * This vnode table stores what we want to do if the filesystem doesn't 7630489Sphk * implement a particular VOP. 7730489Sphk * 7830489Sphk * If there is no specific entry here, we will return EOPNOTSUPP. 7930489Sphk * 8030489Sphk */ 8130489Sphk 8230489Sphkvop_t **default_vnodeop_p; 8330489Sphkstatic struct vnodeopv_entry_desc default_vnodeop_entries[] = { 8430492Sphk { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 8530743Sphk { &vop_abortop_desc, (vop_t *) vop_null }, 8630492Sphk { &vop_advlock_desc, (vop_t *) vop_einval }, 8730743Sphk { &vop_bwrite_desc, (vop_t *) vop_stdbwrite }, 8830492Sphk { &vop_close_desc, (vop_t *) vop_null }, 8930492Sphk { &vop_fsync_desc, (vop_t *) vop_null }, 9030492Sphk { &vop_ioctl_desc, (vop_t *) vop_enotty }, 9130496Sphk { &vop_islocked_desc, (vop_t *) vop_noislocked }, 9230739Sphk { &vop_lease_desc, (vop_t *) vop_null }, 9330496Sphk { &vop_lock_desc, (vop_t *) vop_nolock }, 9430492Sphk { &vop_mmap_desc, (vop_t *) vop_einval }, 9530492Sphk { &vop_open_desc, (vop_t *) vop_null }, 9630492Sphk { &vop_pathconf_desc, (vop_t *) vop_einval }, 9730489Sphk { &vop_poll_desc, (vop_t *) vop_nopoll }, 9830492Sphk { &vop_readlink_desc, (vop_t *) vop_einval }, 9930492Sphk { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp }, 10030489Sphk { &vop_revoke_desc, (vop_t *) vop_revoke }, 10130489Sphk { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 10230496Sphk { &vop_unlock_desc, (vop_t *) vop_nounlock }, 10330489Sphk { NULL, NULL } 10430489Sphk}; 10530489Sphk 10630489Sphkstatic struct vnodeopv_desc default_vnodeop_opv_desc = 10730489Sphk { &default_vnodeop_p, default_vnodeop_entries }; 10830489Sphk 10930489SphkVNODEOP_SET(default_vnodeop_opv_desc); 11030489Sphk 11130489Sphkint 11230492Sphkvop_eopnotsupp(struct vop_generic_args *ap) 11330489Sphk{ 11430489Sphk /* 11530492Sphk printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 11630489Sphk */ 11730489Sphk 11830489Sphk return (EOPNOTSUPP); 11930489Sphk} 12030489Sphk 12130489Sphkint 12230492Sphkvop_ebadf(struct vop_generic_args *ap) 12330489Sphk{ 12430489Sphk 12530492Sphk return (EBADF); 12630492Sphk} 12730492Sphk 12830492Sphkint 12930492Sphkvop_enotty(struct vop_generic_args *ap) 13030492Sphk{ 13130492Sphk 13230492Sphk return (ENOTTY); 13330492Sphk} 13430492Sphk 13530492Sphkint 13630492Sphkvop_einval(struct vop_generic_args *ap) 13730492Sphk{ 13830492Sphk 13930492Sphk return (EINVAL); 14030492Sphk} 14130492Sphk 14230492Sphkint 14330492Sphkvop_null(struct vop_generic_args *ap) 14430492Sphk{ 14530492Sphk 14630492Sphk return (0); 14730492Sphk} 14830492Sphk 14930492Sphkint 15030492Sphkvop_defaultop(struct vop_generic_args *ap) 15130492Sphk{ 15230492Sphk 15330489Sphk return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 15430489Sphk} 15530489Sphk 15630489Sphkstatic int 15730489Sphkvop_nostrategy (struct vop_strategy_args *ap) 15830489Sphk{ 15930489Sphk printf("No strategy for buffer at %p\n", ap->a_bp); 16030489Sphk vprint("", ap->a_bp->b_vp); 16130489Sphk ap->a_bp->b_flags |= B_ERROR; 16230489Sphk ap->a_bp->b_error = EOPNOTSUPP; 16330489Sphk biodone(ap->a_bp); 16430489Sphk return (EOPNOTSUPP); 16530489Sphk} 16630492Sphk 16730492Sphkint 16830492Sphkvop_stdpathconf(ap) 16930492Sphk struct vop_pathconf_args /* { 17030492Sphk struct vnode *a_vp; 17130492Sphk int a_name; 17230492Sphk int *a_retval; 17330492Sphk } */ *ap; 17430492Sphk{ 17530492Sphk 17630492Sphk switch (ap->a_name) { 17730492Sphk case _PC_LINK_MAX: 17830492Sphk *ap->a_retval = LINK_MAX; 17930492Sphk return (0); 18030492Sphk case _PC_MAX_CANON: 18130492Sphk *ap->a_retval = MAX_CANON; 18230492Sphk return (0); 18330492Sphk case _PC_MAX_INPUT: 18430492Sphk *ap->a_retval = MAX_INPUT; 18530492Sphk return (0); 18630492Sphk case _PC_PIPE_BUF: 18730492Sphk *ap->a_retval = PIPE_BUF; 18830492Sphk return (0); 18930492Sphk case _PC_CHOWN_RESTRICTED: 19030492Sphk *ap->a_retval = 1; 19130492Sphk return (0); 19230492Sphk case _PC_VDISABLE: 19330492Sphk *ap->a_retval = _POSIX_VDISABLE; 19430492Sphk return (0); 19530492Sphk default: 19630492Sphk return (EINVAL); 19730492Sphk } 19830492Sphk /* NOTREACHED */ 19930492Sphk} 20030513Sphk 20130513Sphk/* 20230513Sphk * Standard lock, unlock and islocked functions. 20330513Sphk * 20430513Sphk * These depend on the lock structure being the first element in the 20530513Sphk * inode, ie: vp->v_data points to the the lock! 20630513Sphk */ 20730513Sphkint 20830513Sphkvop_stdlock(ap) 20930513Sphk struct vop_lock_args /* { 21030513Sphk struct vnode *a_vp; 21130513Sphk int a_flags; 21230513Sphk struct proc *a_p; 21330513Sphk } */ *ap; 21430513Sphk{ 21532286Sdyson struct lock *l; 21630513Sphk 21732286Sdyson if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 21832286Sdyson if (ap->a_flags & LK_INTERLOCK) 21932286Sdyson simple_unlock(&ap->a_vp->v_interlock); 22032286Sdyson return 0; 22132286Sdyson } 22232286Sdyson 22330513Sphk return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p)); 22430513Sphk} 22530513Sphk 22630513Sphkint 22730513Sphkvop_stdunlock(ap) 22830513Sphk struct vop_unlock_args /* { 22930513Sphk struct vnode *a_vp; 23030513Sphk int a_flags; 23130513Sphk struct proc *a_p; 23230513Sphk } */ *ap; 23330513Sphk{ 23432286Sdyson struct lock *l; 23530513Sphk 23632286Sdyson if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 23732286Sdyson if (ap->a_flags & LK_INTERLOCK) 23832286Sdyson simple_unlock(&ap->a_vp->v_interlock); 23932286Sdyson return 0; 24032286Sdyson } 24132286Sdyson 24230513Sphk return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock, 24330513Sphk ap->a_p)); 24430513Sphk} 24530513Sphk 24630513Sphkint 24730513Sphkvop_stdislocked(ap) 24830513Sphk struct vop_islocked_args /* { 24930513Sphk struct vnode *a_vp; 25030513Sphk } */ *ap; 25130513Sphk{ 25232286Sdyson struct lock *l; 25330513Sphk 25432286Sdyson if ((l = (struct lock *)ap->a_vp->v_data) == NULL) 25532286Sdyson return 0; 25632286Sdyson 25730513Sphk return (lockstatus(l)); 25830513Sphk} 25930513Sphk 26030743Sphk/* 26130743Sphk * Return true for select/poll. 26230743Sphk */ 26330743Sphkint 26430743Sphkvop_nopoll(ap) 26530743Sphk struct vop_poll_args /* { 26630743Sphk struct vnode *a_vp; 26730743Sphk int a_events; 26830743Sphk struct ucred *a_cred; 26930743Sphk struct proc *a_p; 27030743Sphk } */ *ap; 27130743Sphk{ 27230743Sphk /* 27331727Swollman * Return true for read/write. If the user asked for something 27431727Swollman * special, return POLLNVAL, so that clients have a way of 27531727Swollman * determining reliably whether or not the extended 27631727Swollman * functionality is present without hard-coding knowledge 27731727Swollman * of specific filesystem implementations. 27830743Sphk */ 27931727Swollman if (ap->a_events & ~POLLSTANDARD) 28031727Swollman return (POLLNVAL); 28131727Swollman 28230743Sphk return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 28330743Sphk} 28430743Sphk 28531727Swollman/* 28631727Swollman * Implement poll for local filesystems that support it. 28731727Swollman */ 28830743Sphkint 28931727Swollmanvop_stdpoll(ap) 29031727Swollman struct vop_poll_args /* { 29131727Swollman struct vnode *a_vp; 29231727Swollman int a_events; 29331727Swollman struct ucred *a_cred; 29431727Swollman struct proc *a_p; 29531727Swollman } */ *ap; 29631727Swollman{ 29731811Swollman if ((ap->a_events & ~POLLSTANDARD) == 0) 29831811Swollman return (ap->a_events & (POLLRDNORM|POLLWRNORM)); 29931727Swollman return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events)); 30031727Swollman} 30131727Swollman 30231727Swollmanint 30330743Sphkvop_stdbwrite(ap) 30430743Sphk struct vop_bwrite_args *ap; 30530743Sphk{ 30630743Sphk return (bwrite(ap->a_bp)); 30730743Sphk} 30830743Sphk 30930743Sphk/* 31030743Sphk * Stubs to use when there is no locking to be done on the underlying object. 31130743Sphk * A minimal shared lock is necessary to ensure that the underlying object 31230743Sphk * is not revoked while an operation is in progress. So, an active shared 31330743Sphk * count is maintained in an auxillary vnode lock structure. 31430743Sphk */ 31530743Sphkint 31630743Sphkvop_sharedlock(ap) 31730743Sphk struct vop_lock_args /* { 31830743Sphk struct vnode *a_vp; 31930743Sphk int a_flags; 32030743Sphk struct proc *a_p; 32130743Sphk } */ *ap; 32230743Sphk{ 32330743Sphk /* 32430743Sphk * This code cannot be used until all the non-locking filesystems 32530743Sphk * (notably NFS) are converted to properly lock and release nodes. 32630743Sphk * Also, certain vnode operations change the locking state within 32730743Sphk * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 32830743Sphk * and symlink). Ideally these operations should not change the 32930743Sphk * lock state, but should be changed to let the caller of the 33030743Sphk * function unlock them. Otherwise all intermediate vnode layers 33130743Sphk * (such as union, umapfs, etc) must catch these functions to do 33230743Sphk * the necessary locking at their layer. Note that the inactive 33330743Sphk * and lookup operations also change their lock state, but this 33430743Sphk * cannot be avoided, so these two operations will always need 33530743Sphk * to be handled in intermediate layers. 33630743Sphk */ 33730743Sphk struct vnode *vp = ap->a_vp; 33830743Sphk int vnflags, flags = ap->a_flags; 33930743Sphk 34030743Sphk if (vp->v_vnlock == NULL) { 34130743Sphk if ((flags & LK_TYPE_MASK) == LK_DRAIN) 34230743Sphk return (0); 34330743Sphk MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 34430743Sphk M_VNODE, M_WAITOK); 34534206Sdyson lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 34630743Sphk } 34730743Sphk switch (flags & LK_TYPE_MASK) { 34830743Sphk case LK_DRAIN: 34930743Sphk vnflags = LK_DRAIN; 35030743Sphk break; 35130743Sphk case LK_EXCLUSIVE: 35230743Sphk#ifdef DEBUG_VFS_LOCKS 35330743Sphk /* 35430743Sphk * Normally, we use shared locks here, but that confuses 35530743Sphk * the locking assertions. 35630743Sphk */ 35730743Sphk vnflags = LK_EXCLUSIVE; 35830743Sphk break; 35930743Sphk#endif 36030743Sphk case LK_SHARED: 36130743Sphk vnflags = LK_SHARED; 36230743Sphk break; 36330743Sphk case LK_UPGRADE: 36430743Sphk case LK_EXCLUPGRADE: 36530743Sphk case LK_DOWNGRADE: 36630743Sphk return (0); 36730743Sphk case LK_RELEASE: 36830743Sphk default: 36930743Sphk panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 37030743Sphk } 37130743Sphk if (flags & LK_INTERLOCK) 37230743Sphk vnflags |= LK_INTERLOCK; 37330743Sphk return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 37430743Sphk} 37530743Sphk 37630743Sphk/* 37730743Sphk * Stubs to use when there is no locking to be done on the underlying object. 37830743Sphk * A minimal shared lock is necessary to ensure that the underlying object 37930743Sphk * is not revoked while an operation is in progress. So, an active shared 38030743Sphk * count is maintained in an auxillary vnode lock structure. 38130743Sphk */ 38230743Sphkint 38330743Sphkvop_nolock(ap) 38430743Sphk struct vop_lock_args /* { 38530743Sphk struct vnode *a_vp; 38630743Sphk int a_flags; 38730743Sphk struct proc *a_p; 38830743Sphk } */ *ap; 38930743Sphk{ 39030743Sphk#ifdef notyet 39130743Sphk /* 39230743Sphk * This code cannot be used until all the non-locking filesystems 39330743Sphk * (notably NFS) are converted to properly lock and release nodes. 39430743Sphk * Also, certain vnode operations change the locking state within 39530743Sphk * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 39630743Sphk * and symlink). Ideally these operations should not change the 39730743Sphk * lock state, but should be changed to let the caller of the 39830743Sphk * function unlock them. Otherwise all intermediate vnode layers 39930743Sphk * (such as union, umapfs, etc) must catch these functions to do 40030743Sphk * the necessary locking at their layer. Note that the inactive 40130743Sphk * and lookup operations also change their lock state, but this 40230743Sphk * cannot be avoided, so these two operations will always need 40330743Sphk * to be handled in intermediate layers. 40430743Sphk */ 40530743Sphk struct vnode *vp = ap->a_vp; 40630743Sphk int vnflags, flags = ap->a_flags; 40730743Sphk 40830743Sphk if (vp->v_vnlock == NULL) { 40930743Sphk if ((flags & LK_TYPE_MASK) == LK_DRAIN) 41030743Sphk return (0); 41130743Sphk MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 41230743Sphk M_VNODE, M_WAITOK); 41334206Sdyson lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 41430743Sphk } 41530743Sphk switch (flags & LK_TYPE_MASK) { 41630743Sphk case LK_DRAIN: 41730743Sphk vnflags = LK_DRAIN; 41830743Sphk break; 41930743Sphk case LK_EXCLUSIVE: 42030743Sphk case LK_SHARED: 42130743Sphk vnflags = LK_SHARED; 42230743Sphk break; 42330743Sphk case LK_UPGRADE: 42430743Sphk case LK_EXCLUPGRADE: 42530743Sphk case LK_DOWNGRADE: 42630743Sphk return (0); 42730743Sphk case LK_RELEASE: 42830743Sphk default: 42930743Sphk panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 43030743Sphk } 43130743Sphk if (flags & LK_INTERLOCK) 43230743Sphk vnflags |= LK_INTERLOCK; 43330743Sphk return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 43430743Sphk#else /* for now */ 43530743Sphk /* 43630743Sphk * Since we are not using the lock manager, we must clear 43730743Sphk * the interlock here. 43830743Sphk */ 43931263Sbde if (ap->a_flags & LK_INTERLOCK) 44030743Sphk simple_unlock(&ap->a_vp->v_interlock); 44130743Sphk return (0); 44230743Sphk#endif 44330743Sphk} 44430743Sphk 44530743Sphk/* 44630743Sphk * Do the inverse of vop_nolock, handling the interlock in a compatible way. 44730743Sphk */ 44830743Sphkint 44930743Sphkvop_nounlock(ap) 45030743Sphk struct vop_unlock_args /* { 45130743Sphk struct vnode *a_vp; 45230743Sphk int a_flags; 45330743Sphk struct proc *a_p; 45430743Sphk } */ *ap; 45530743Sphk{ 45630743Sphk struct vnode *vp = ap->a_vp; 45730743Sphk 45830743Sphk if (vp->v_vnlock == NULL) { 45930743Sphk if (ap->a_flags & LK_INTERLOCK) 46030743Sphk simple_unlock(&ap->a_vp->v_interlock); 46130743Sphk return (0); 46230743Sphk } 46330743Sphk return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags, 46430743Sphk &ap->a_vp->v_interlock, ap->a_p)); 46530743Sphk} 46630743Sphk 46730743Sphk/* 46830743Sphk * Return whether or not the node is in use. 46930743Sphk */ 47030743Sphkint 47130743Sphkvop_noislocked(ap) 47230743Sphk struct vop_islocked_args /* { 47330743Sphk struct vnode *a_vp; 47430743Sphk } */ *ap; 47530743Sphk{ 47630743Sphk struct vnode *vp = ap->a_vp; 47730743Sphk 47830743Sphk if (vp->v_vnlock == NULL) 47930743Sphk return (0); 48030743Sphk return (lockstatus(vp->v_vnlock)); 48130743Sphk} 48230743Sphk 483