vfs_default.c revision 47989
130489Sphk/* 230489Sphk * Copyright (c) 1989, 1993 330489Sphk * The Regents of the University of California. All rights reserved. 430489Sphk * 530489Sphk * This code is derived from software contributed 630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project. 730489Sphk * 830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 930489Sphk * 1030489Sphk * Redistribution and use in source and binary forms, with or without 1130489Sphk * modification, are permitted provided that the following conditions 1230489Sphk * are met: 1330489Sphk * 1. Redistributions of source code must retain the above copyright 1430489Sphk * notice, this list of conditions and the following disclaimer. 1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright 1630489Sphk * notice, this list of conditions and the following disclaimer in the 1730489Sphk * documentation and/or other materials provided with the distribution. 1830489Sphk * 3. All advertising materials mentioning features or use of this software 1930489Sphk * must display the following acknowledgement: 2030489Sphk * This product includes software developed by the University of 2130489Sphk * California, Berkeley and its contributors. 2230489Sphk * 4. Neither the name of the University nor the names of its contributors 2330489Sphk * may be used to endorse or promote products derived from this software 2430489Sphk * without specific prior written permission. 2530489Sphk * 2630489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2730489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2830489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2930489Sphk * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3030489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3130489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3230489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3330489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3430489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3530489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3630489Sphk * SUCH DAMAGE. 3730489Sphk * 3847989Sgpalmer * 3947989Sgpalmer * $Id$ 4030489Sphk */ 4130489Sphk 4230489Sphk#include <sys/param.h> 4330489Sphk#include <sys/systm.h> 4444272Sbde#include <sys/buf.h> 4530489Sphk#include <sys/kernel.h> 4631561Sbde#include <sys/lock.h> 4730743Sphk#include <sys/malloc.h> 4830492Sphk#include <sys/unistd.h> 4930489Sphk#include <sys/vnode.h> 5030743Sphk#include <sys/poll.h> 5130489Sphk 5230489Sphkstatic int vop_nostrategy __P((struct vop_strategy_args *)); 5330489Sphk 5430489Sphk/* 5530489Sphk * This vnode table stores what we want to do if the filesystem doesn't 5630489Sphk * implement a particular VOP. 5730489Sphk * 5830489Sphk * If there is no specific entry here, we will return EOPNOTSUPP. 5930489Sphk * 6030489Sphk */ 6130489Sphk 6230489Sphkvop_t **default_vnodeop_p; 6330489Sphkstatic struct vnodeopv_entry_desc default_vnodeop_entries[] = { 6430492Sphk { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 6530743Sphk { &vop_abortop_desc, (vop_t *) vop_null }, 6630492Sphk { &vop_advlock_desc, (vop_t *) vop_einval }, 6730743Sphk { &vop_bwrite_desc, (vop_t *) vop_stdbwrite }, 6830492Sphk { &vop_close_desc, (vop_t *) vop_null }, 6930492Sphk { &vop_fsync_desc, (vop_t *) vop_null }, 7030492Sphk { &vop_ioctl_desc, (vop_t *) vop_enotty }, 7130496Sphk { &vop_islocked_desc, (vop_t *) vop_noislocked }, 7230739Sphk { &vop_lease_desc, (vop_t *) vop_null }, 7330496Sphk { &vop_lock_desc, (vop_t *) vop_nolock }, 7430492Sphk { &vop_mmap_desc, (vop_t *) vop_einval }, 7530492Sphk { &vop_open_desc, (vop_t *) vop_null }, 7630492Sphk { &vop_pathconf_desc, (vop_t *) vop_einval }, 7730489Sphk { &vop_poll_desc, (vop_t *) vop_nopoll }, 7830492Sphk { &vop_readlink_desc, (vop_t *) vop_einval }, 7930492Sphk { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp }, 8030489Sphk { &vop_revoke_desc, (vop_t *) vop_revoke }, 8130489Sphk { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 8230496Sphk { &vop_unlock_desc, (vop_t *) vop_nounlock }, 8330489Sphk { NULL, NULL } 8430489Sphk}; 8530489Sphk 8630489Sphkstatic struct vnodeopv_desc default_vnodeop_opv_desc = 8730489Sphk { &default_vnodeop_p, default_vnodeop_entries }; 8830489Sphk 8930489SphkVNODEOP_SET(default_vnodeop_opv_desc); 9030489Sphk 9130489Sphkint 9230492Sphkvop_eopnotsupp(struct vop_generic_args *ap) 9330489Sphk{ 9430489Sphk /* 9530492Sphk printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 9630489Sphk */ 9730489Sphk 9830489Sphk return (EOPNOTSUPP); 9930489Sphk} 10030489Sphk 10130489Sphkint 10230492Sphkvop_ebadf(struct vop_generic_args *ap) 10330489Sphk{ 10430489Sphk 10530492Sphk return (EBADF); 10630492Sphk} 10730492Sphk 10830492Sphkint 10930492Sphkvop_enotty(struct vop_generic_args *ap) 11030492Sphk{ 11130492Sphk 11230492Sphk return (ENOTTY); 11330492Sphk} 11430492Sphk 11530492Sphkint 11630492Sphkvop_einval(struct vop_generic_args *ap) 11730492Sphk{ 11830492Sphk 11930492Sphk return (EINVAL); 12030492Sphk} 12130492Sphk 12230492Sphkint 12330492Sphkvop_null(struct vop_generic_args *ap) 12430492Sphk{ 12530492Sphk 12630492Sphk return (0); 12730492Sphk} 12830492Sphk 12930492Sphkint 13030492Sphkvop_defaultop(struct vop_generic_args *ap) 13130492Sphk{ 13230492Sphk 13330489Sphk return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 13430489Sphk} 13530489Sphk 13641056Speterint 13741056Spetervop_panic(struct vop_generic_args *ap) 13841056Speter{ 13941056Speter 14041056Speter panic("illegal vnode op called"); 14141056Speter} 14241056Speter 14346349Salc/* 14446349Salc * vop_nostrategy: 14546349Salc * 14646349Salc * Strategy routine for VFS devices that have none. 14746349Salc * 14846349Salc * B_ERROR and B_INVAL must be cleared prior to calling any strategy 14946349Salc * routine. Typically this is done for a B_READ strategy call. Typically 15046349Salc * B_INVAL is assumed to already be clear prior to a write and should not 15146349Salc * be cleared manually unless you just made the buffer invalid. B_ERROR 15246349Salc * should be cleared either way. 15346349Salc */ 15446349Salc 15530489Sphkstatic int 15630489Sphkvop_nostrategy (struct vop_strategy_args *ap) 15730489Sphk{ 15830489Sphk printf("No strategy for buffer at %p\n", ap->a_bp); 15937384Sjulian vprint("", ap->a_vp); 16030489Sphk vprint("", ap->a_bp->b_vp); 16130489Sphk ap->a_bp->b_flags |= B_ERROR; 16230489Sphk ap->a_bp->b_error = EOPNOTSUPP; 16330489Sphk biodone(ap->a_bp); 16430489Sphk return (EOPNOTSUPP); 16530489Sphk} 16630492Sphk 16730492Sphkint 16830492Sphkvop_stdpathconf(ap) 16930492Sphk struct vop_pathconf_args /* { 17030492Sphk struct vnode *a_vp; 17130492Sphk int a_name; 17230492Sphk int *a_retval; 17330492Sphk } */ *ap; 17430492Sphk{ 17530492Sphk 17630492Sphk switch (ap->a_name) { 17730492Sphk case _PC_LINK_MAX: 17830492Sphk *ap->a_retval = LINK_MAX; 17930492Sphk return (0); 18030492Sphk case _PC_MAX_CANON: 18130492Sphk *ap->a_retval = MAX_CANON; 18230492Sphk return (0); 18330492Sphk case _PC_MAX_INPUT: 18430492Sphk *ap->a_retval = MAX_INPUT; 18530492Sphk return (0); 18630492Sphk case _PC_PIPE_BUF: 18730492Sphk *ap->a_retval = PIPE_BUF; 18830492Sphk return (0); 18930492Sphk case _PC_CHOWN_RESTRICTED: 19030492Sphk *ap->a_retval = 1; 19130492Sphk return (0); 19230492Sphk case _PC_VDISABLE: 19330492Sphk *ap->a_retval = _POSIX_VDISABLE; 19430492Sphk return (0); 19530492Sphk default: 19630492Sphk return (EINVAL); 19730492Sphk } 19830492Sphk /* NOTREACHED */ 19930492Sphk} 20030513Sphk 20130513Sphk/* 20230513Sphk * Standard lock, unlock and islocked functions. 20330513Sphk * 20430513Sphk * These depend on the lock structure being the first element in the 20530513Sphk * inode, ie: vp->v_data points to the the lock! 20630513Sphk */ 20730513Sphkint 20830513Sphkvop_stdlock(ap) 20930513Sphk struct vop_lock_args /* { 21030513Sphk struct vnode *a_vp; 21130513Sphk int a_flags; 21230513Sphk struct proc *a_p; 21330513Sphk } */ *ap; 21430513Sphk{ 21532286Sdyson struct lock *l; 21630513Sphk 21732286Sdyson if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 21832286Sdyson if (ap->a_flags & LK_INTERLOCK) 21932286Sdyson simple_unlock(&ap->a_vp->v_interlock); 22032286Sdyson return 0; 22132286Sdyson } 22232286Sdyson 22342900Seivind#ifndef DEBUG_LOCKS 22430513Sphk return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p)); 22542900Seivind#else 22642900Seivind return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p, 22742900Seivind "vop_stdlock", ap->a_vp->filename, ap->a_vp->line)); 22842900Seivind#endif 22930513Sphk} 23030513Sphk 23130513Sphkint 23230513Sphkvop_stdunlock(ap) 23330513Sphk struct vop_unlock_args /* { 23430513Sphk struct vnode *a_vp; 23530513Sphk int a_flags; 23630513Sphk struct proc *a_p; 23730513Sphk } */ *ap; 23830513Sphk{ 23932286Sdyson struct lock *l; 24030513Sphk 24132286Sdyson if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 24232286Sdyson if (ap->a_flags & LK_INTERLOCK) 24332286Sdyson simple_unlock(&ap->a_vp->v_interlock); 24432286Sdyson return 0; 24532286Sdyson } 24632286Sdyson 24730513Sphk return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock, 24830513Sphk ap->a_p)); 24930513Sphk} 25030513Sphk 25130513Sphkint 25230513Sphkvop_stdislocked(ap) 25330513Sphk struct vop_islocked_args /* { 25430513Sphk struct vnode *a_vp; 25530513Sphk } */ *ap; 25630513Sphk{ 25732286Sdyson struct lock *l; 25830513Sphk 25932286Sdyson if ((l = (struct lock *)ap->a_vp->v_data) == NULL) 26032286Sdyson return 0; 26132286Sdyson 26230513Sphk return (lockstatus(l)); 26330513Sphk} 26430513Sphk 26530743Sphk/* 26630743Sphk * Return true for select/poll. 26730743Sphk */ 26830743Sphkint 26930743Sphkvop_nopoll(ap) 27030743Sphk struct vop_poll_args /* { 27130743Sphk struct vnode *a_vp; 27230743Sphk int a_events; 27330743Sphk struct ucred *a_cred; 27430743Sphk struct proc *a_p; 27530743Sphk } */ *ap; 27630743Sphk{ 27730743Sphk /* 27831727Swollman * Return true for read/write. If the user asked for something 27931727Swollman * special, return POLLNVAL, so that clients have a way of 28031727Swollman * determining reliably whether or not the extended 28131727Swollman * functionality is present without hard-coding knowledge 28231727Swollman * of specific filesystem implementations. 28330743Sphk */ 28431727Swollman if (ap->a_events & ~POLLSTANDARD) 28531727Swollman return (POLLNVAL); 28631727Swollman 28730743Sphk return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 28830743Sphk} 28930743Sphk 29031727Swollman/* 29131727Swollman * Implement poll for local filesystems that support it. 29231727Swollman */ 29330743Sphkint 29431727Swollmanvop_stdpoll(ap) 29531727Swollman struct vop_poll_args /* { 29631727Swollman struct vnode *a_vp; 29731727Swollman int a_events; 29831727Swollman struct ucred *a_cred; 29931727Swollman struct proc *a_p; 30031727Swollman } */ *ap; 30131727Swollman{ 30231811Swollman if ((ap->a_events & ~POLLSTANDARD) == 0) 30331811Swollman return (ap->a_events & (POLLRDNORM|POLLWRNORM)); 30431727Swollman return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events)); 30531727Swollman} 30631727Swollman 30731727Swollmanint 30830743Sphkvop_stdbwrite(ap) 30930743Sphk struct vop_bwrite_args *ap; 31030743Sphk{ 31130743Sphk return (bwrite(ap->a_bp)); 31230743Sphk} 31330743Sphk 31430743Sphk/* 31530743Sphk * Stubs to use when there is no locking to be done on the underlying object. 31630743Sphk * A minimal shared lock is necessary to ensure that the underlying object 31730743Sphk * is not revoked while an operation is in progress. So, an active shared 31830743Sphk * count is maintained in an auxillary vnode lock structure. 31930743Sphk */ 32030743Sphkint 32130743Sphkvop_sharedlock(ap) 32230743Sphk struct vop_lock_args /* { 32330743Sphk struct vnode *a_vp; 32430743Sphk int a_flags; 32530743Sphk struct proc *a_p; 32630743Sphk } */ *ap; 32730743Sphk{ 32830743Sphk /* 32930743Sphk * This code cannot be used until all the non-locking filesystems 33030743Sphk * (notably NFS) are converted to properly lock and release nodes. 33130743Sphk * Also, certain vnode operations change the locking state within 33230743Sphk * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 33330743Sphk * and symlink). Ideally these operations should not change the 33430743Sphk * lock state, but should be changed to let the caller of the 33530743Sphk * function unlock them. Otherwise all intermediate vnode layers 33630743Sphk * (such as union, umapfs, etc) must catch these functions to do 33730743Sphk * the necessary locking at their layer. Note that the inactive 33830743Sphk * and lookup operations also change their lock state, but this 33930743Sphk * cannot be avoided, so these two operations will always need 34030743Sphk * to be handled in intermediate layers. 34130743Sphk */ 34230743Sphk struct vnode *vp = ap->a_vp; 34330743Sphk int vnflags, flags = ap->a_flags; 34430743Sphk 34530743Sphk if (vp->v_vnlock == NULL) { 34630743Sphk if ((flags & LK_TYPE_MASK) == LK_DRAIN) 34730743Sphk return (0); 34830743Sphk MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 34930743Sphk M_VNODE, M_WAITOK); 35034206Sdyson lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 35130743Sphk } 35230743Sphk switch (flags & LK_TYPE_MASK) { 35330743Sphk case LK_DRAIN: 35430743Sphk vnflags = LK_DRAIN; 35530743Sphk break; 35630743Sphk case LK_EXCLUSIVE: 35730743Sphk#ifdef DEBUG_VFS_LOCKS 35830743Sphk /* 35930743Sphk * Normally, we use shared locks here, but that confuses 36030743Sphk * the locking assertions. 36130743Sphk */ 36230743Sphk vnflags = LK_EXCLUSIVE; 36330743Sphk break; 36430743Sphk#endif 36530743Sphk case LK_SHARED: 36630743Sphk vnflags = LK_SHARED; 36730743Sphk break; 36830743Sphk case LK_UPGRADE: 36930743Sphk case LK_EXCLUPGRADE: 37030743Sphk case LK_DOWNGRADE: 37130743Sphk return (0); 37230743Sphk case LK_RELEASE: 37330743Sphk default: 37430743Sphk panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 37530743Sphk } 37630743Sphk if (flags & LK_INTERLOCK) 37730743Sphk vnflags |= LK_INTERLOCK; 37842900Seivind#ifndef DEBUG_LOCKS 37942900Seivind return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 38042900Seivind#else 38142900Seivind return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p, 38242900Seivind "vop_sharedlock", vp->filename, vp->line)); 38342900Seivind#endif 38430743Sphk} 38530743Sphk 38630743Sphk/* 38730743Sphk * Stubs to use when there is no locking to be done on the underlying object. 38830743Sphk * A minimal shared lock is necessary to ensure that the underlying object 38930743Sphk * is not revoked while an operation is in progress. So, an active shared 39030743Sphk * count is maintained in an auxillary vnode lock structure. 39130743Sphk */ 39230743Sphkint 39330743Sphkvop_nolock(ap) 39430743Sphk struct vop_lock_args /* { 39530743Sphk struct vnode *a_vp; 39630743Sphk int a_flags; 39730743Sphk struct proc *a_p; 39830743Sphk } */ *ap; 39930743Sphk{ 40030743Sphk#ifdef notyet 40130743Sphk /* 40230743Sphk * This code cannot be used until all the non-locking filesystems 40330743Sphk * (notably NFS) are converted to properly lock and release nodes. 40430743Sphk * Also, certain vnode operations change the locking state within 40530743Sphk * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 40630743Sphk * and symlink). Ideally these operations should not change the 40730743Sphk * lock state, but should be changed to let the caller of the 40830743Sphk * function unlock them. Otherwise all intermediate vnode layers 40930743Sphk * (such as union, umapfs, etc) must catch these functions to do 41030743Sphk * the necessary locking at their layer. Note that the inactive 41130743Sphk * and lookup operations also change their lock state, but this 41230743Sphk * cannot be avoided, so these two operations will always need 41330743Sphk * to be handled in intermediate layers. 41430743Sphk */ 41530743Sphk struct vnode *vp = ap->a_vp; 41630743Sphk int vnflags, flags = ap->a_flags; 41730743Sphk 41830743Sphk if (vp->v_vnlock == NULL) { 41930743Sphk if ((flags & LK_TYPE_MASK) == LK_DRAIN) 42030743Sphk return (0); 42130743Sphk MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 42230743Sphk M_VNODE, M_WAITOK); 42334206Sdyson lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 42430743Sphk } 42530743Sphk switch (flags & LK_TYPE_MASK) { 42630743Sphk case LK_DRAIN: 42730743Sphk vnflags = LK_DRAIN; 42830743Sphk break; 42930743Sphk case LK_EXCLUSIVE: 43030743Sphk case LK_SHARED: 43130743Sphk vnflags = LK_SHARED; 43230743Sphk break; 43330743Sphk case LK_UPGRADE: 43430743Sphk case LK_EXCLUPGRADE: 43530743Sphk case LK_DOWNGRADE: 43630743Sphk return (0); 43730743Sphk case LK_RELEASE: 43830743Sphk default: 43930743Sphk panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 44030743Sphk } 44130743Sphk if (flags & LK_INTERLOCK) 44230743Sphk vnflags |= LK_INTERLOCK; 44330743Sphk return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 44430743Sphk#else /* for now */ 44530743Sphk /* 44630743Sphk * Since we are not using the lock manager, we must clear 44730743Sphk * the interlock here. 44830743Sphk */ 44931263Sbde if (ap->a_flags & LK_INTERLOCK) 45030743Sphk simple_unlock(&ap->a_vp->v_interlock); 45130743Sphk return (0); 45230743Sphk#endif 45330743Sphk} 45430743Sphk 45530743Sphk/* 45630743Sphk * Do the inverse of vop_nolock, handling the interlock in a compatible way. 45730743Sphk */ 45830743Sphkint 45930743Sphkvop_nounlock(ap) 46030743Sphk struct vop_unlock_args /* { 46130743Sphk struct vnode *a_vp; 46230743Sphk int a_flags; 46330743Sphk struct proc *a_p; 46430743Sphk } */ *ap; 46530743Sphk{ 46630743Sphk struct vnode *vp = ap->a_vp; 46730743Sphk 46830743Sphk if (vp->v_vnlock == NULL) { 46930743Sphk if (ap->a_flags & LK_INTERLOCK) 47030743Sphk simple_unlock(&ap->a_vp->v_interlock); 47130743Sphk return (0); 47230743Sphk } 47330743Sphk return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags, 47430743Sphk &ap->a_vp->v_interlock, ap->a_p)); 47530743Sphk} 47630743Sphk 47730743Sphk/* 47830743Sphk * Return whether or not the node is in use. 47930743Sphk */ 48030743Sphkint 48130743Sphkvop_noislocked(ap) 48230743Sphk struct vop_islocked_args /* { 48330743Sphk struct vnode *a_vp; 48430743Sphk } */ *ap; 48530743Sphk{ 48630743Sphk struct vnode *vp = ap->a_vp; 48730743Sphk 48830743Sphk if (vp->v_vnlock == NULL) 48930743Sphk return (0); 49030743Sphk return (lockstatus(vp->v_vnlock)); 49130743Sphk} 49230743Sphk 493