vfs_default.c revision 32286
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 */ 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/kernel.h> 43#include <sys/lock.h> 44#include <sys/malloc.h> 45#include <sys/unistd.h> 46#include <sys/vnode.h> 47#include <sys/poll.h> 48 49static int vop_nostrategy __P((struct vop_strategy_args *)); 50 51/* 52 * This vnode table stores what we want to do if the filesystem doesn't 53 * implement a particular VOP. 54 * 55 * If there is no specific entry here, we will return EOPNOTSUPP. 56 * 57 */ 58 59vop_t **default_vnodeop_p; 60static struct vnodeopv_entry_desc default_vnodeop_entries[] = { 61 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 62 { &vop_abortop_desc, (vop_t *) vop_null }, 63 { &vop_advlock_desc, (vop_t *) vop_einval }, 64 { &vop_bwrite_desc, (vop_t *) vop_stdbwrite }, 65 { &vop_close_desc, (vop_t *) vop_null }, 66 { &vop_fsync_desc, (vop_t *) vop_null }, 67 { &vop_ioctl_desc, (vop_t *) vop_enotty }, 68 { &vop_islocked_desc, (vop_t *) vop_noislocked }, 69 { &vop_lease_desc, (vop_t *) vop_null }, 70 { &vop_lock_desc, (vop_t *) vop_nolock }, 71 { &vop_mmap_desc, (vop_t *) vop_einval }, 72 { &vop_open_desc, (vop_t *) vop_null }, 73 { &vop_pathconf_desc, (vop_t *) vop_einval }, 74 { &vop_poll_desc, (vop_t *) vop_nopoll }, 75 { &vop_readlink_desc, (vop_t *) vop_einval }, 76 { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp }, 77 { &vop_revoke_desc, (vop_t *) vop_revoke }, 78 { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 79 { &vop_unlock_desc, (vop_t *) vop_nounlock }, 80 { NULL, NULL } 81}; 82 83static struct vnodeopv_desc default_vnodeop_opv_desc = 84 { &default_vnodeop_p, default_vnodeop_entries }; 85 86VNODEOP_SET(default_vnodeop_opv_desc); 87 88int 89vop_eopnotsupp(struct vop_generic_args *ap) 90{ 91 /* 92 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 93 */ 94 95 return (EOPNOTSUPP); 96} 97 98int 99vop_ebadf(struct vop_generic_args *ap) 100{ 101 102 return (EBADF); 103} 104 105int 106vop_enotty(struct vop_generic_args *ap) 107{ 108 109 return (ENOTTY); 110} 111 112int 113vop_einval(struct vop_generic_args *ap) 114{ 115 116 return (EINVAL); 117} 118 119int 120vop_null(struct vop_generic_args *ap) 121{ 122 123 return (0); 124} 125 126int 127vop_defaultop(struct vop_generic_args *ap) 128{ 129 130 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 131} 132 133static int 134vop_nostrategy (struct vop_strategy_args *ap) 135{ 136 printf("No strategy for buffer at %p\n", ap->a_bp); 137 vprint("", ap->a_bp->b_vp); 138 ap->a_bp->b_flags |= B_ERROR; 139 ap->a_bp->b_error = EOPNOTSUPP; 140 biodone(ap->a_bp); 141 return (EOPNOTSUPP); 142} 143 144int 145vop_stdpathconf(ap) 146 struct vop_pathconf_args /* { 147 struct vnode *a_vp; 148 int a_name; 149 int *a_retval; 150 } */ *ap; 151{ 152 153 switch (ap->a_name) { 154 case _PC_LINK_MAX: 155 *ap->a_retval = LINK_MAX; 156 return (0); 157 case _PC_MAX_CANON: 158 *ap->a_retval = MAX_CANON; 159 return (0); 160 case _PC_MAX_INPUT: 161 *ap->a_retval = MAX_INPUT; 162 return (0); 163 case _PC_PIPE_BUF: 164 *ap->a_retval = PIPE_BUF; 165 return (0); 166 case _PC_CHOWN_RESTRICTED: 167 *ap->a_retval = 1; 168 return (0); 169 case _PC_VDISABLE: 170 *ap->a_retval = _POSIX_VDISABLE; 171 return (0); 172 default: 173 return (EINVAL); 174 } 175 /* NOTREACHED */ 176} 177 178/* 179 * Standard lock, unlock and islocked functions. 180 * 181 * These depend on the lock structure being the first element in the 182 * inode, ie: vp->v_data points to the the lock! 183 */ 184int 185vop_stdlock(ap) 186 struct vop_lock_args /* { 187 struct vnode *a_vp; 188 int a_flags; 189 struct proc *a_p; 190 } */ *ap; 191{ 192 struct lock *l; 193 194 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 195 if (ap->a_flags & LK_INTERLOCK) 196 simple_unlock(&ap->a_vp->v_interlock); 197 return 0; 198 } 199 200 return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p)); 201} 202 203int 204vop_stdunlock(ap) 205 struct vop_unlock_args /* { 206 struct vnode *a_vp; 207 int a_flags; 208 struct proc *a_p; 209 } */ *ap; 210{ 211 struct lock *l; 212 213 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 214 if (ap->a_flags & LK_INTERLOCK) 215 simple_unlock(&ap->a_vp->v_interlock); 216 return 0; 217 } 218 219 return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock, 220 ap->a_p)); 221} 222 223int 224vop_stdislocked(ap) 225 struct vop_islocked_args /* { 226 struct vnode *a_vp; 227 } */ *ap; 228{ 229 struct lock *l; 230 231 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) 232 return 0; 233 234 return (lockstatus(l)); 235} 236 237/* 238 * Return true for select/poll. 239 */ 240int 241vop_nopoll(ap) 242 struct vop_poll_args /* { 243 struct vnode *a_vp; 244 int a_events; 245 struct ucred *a_cred; 246 struct proc *a_p; 247 } */ *ap; 248{ 249 /* 250 * Return true for read/write. If the user asked for something 251 * special, return POLLNVAL, so that clients have a way of 252 * determining reliably whether or not the extended 253 * functionality is present without hard-coding knowledge 254 * of specific filesystem implementations. 255 */ 256 if (ap->a_events & ~POLLSTANDARD) 257 return (POLLNVAL); 258 259 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 260} 261 262/* 263 * Implement poll for local filesystems that support it. 264 */ 265int 266vop_stdpoll(ap) 267 struct vop_poll_args /* { 268 struct vnode *a_vp; 269 int a_events; 270 struct ucred *a_cred; 271 struct proc *a_p; 272 } */ *ap; 273{ 274 if ((ap->a_events & ~POLLSTANDARD) == 0) 275 return (ap->a_events & (POLLRDNORM|POLLWRNORM)); 276 return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events)); 277} 278 279int 280vop_stdbwrite(ap) 281 struct vop_bwrite_args *ap; 282{ 283 return (bwrite(ap->a_bp)); 284} 285 286/* 287 * Stubs to use when there is no locking to be done on the underlying object. 288 * A minimal shared lock is necessary to ensure that the underlying object 289 * is not revoked while an operation is in progress. So, an active shared 290 * count is maintained in an auxillary vnode lock structure. 291 */ 292int 293vop_sharedlock(ap) 294 struct vop_lock_args /* { 295 struct vnode *a_vp; 296 int a_flags; 297 struct proc *a_p; 298 } */ *ap; 299{ 300 /* 301 * This code cannot be used until all the non-locking filesystems 302 * (notably NFS) are converted to properly lock and release nodes. 303 * Also, certain vnode operations change the locking state within 304 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 305 * and symlink). Ideally these operations should not change the 306 * lock state, but should be changed to let the caller of the 307 * function unlock them. Otherwise all intermediate vnode layers 308 * (such as union, umapfs, etc) must catch these functions to do 309 * the necessary locking at their layer. Note that the inactive 310 * and lookup operations also change their lock state, but this 311 * cannot be avoided, so these two operations will always need 312 * to be handled in intermediate layers. 313 */ 314 struct vnode *vp = ap->a_vp; 315 int vnflags, flags = ap->a_flags; 316 317 if (vp->v_vnlock == NULL) { 318 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 319 return (0); 320 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 321 M_VNODE, M_WAITOK); 322 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 323 } 324 switch (flags & LK_TYPE_MASK) { 325 case LK_DRAIN: 326 vnflags = LK_DRAIN; 327 break; 328 case LK_EXCLUSIVE: 329#ifdef DEBUG_VFS_LOCKS 330 /* 331 * Normally, we use shared locks here, but that confuses 332 * the locking assertions. 333 */ 334 vnflags = LK_EXCLUSIVE; 335 break; 336#endif 337 case LK_SHARED: 338 vnflags = LK_SHARED; 339 break; 340 case LK_UPGRADE: 341 case LK_EXCLUPGRADE: 342 case LK_DOWNGRADE: 343 return (0); 344 case LK_RELEASE: 345 default: 346 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 347 } 348 if (flags & LK_INTERLOCK) 349 vnflags |= LK_INTERLOCK; 350 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 351} 352 353/* 354 * Stubs to use when there is no locking to be done on the underlying object. 355 * A minimal shared lock is necessary to ensure that the underlying object 356 * is not revoked while an operation is in progress. So, an active shared 357 * count is maintained in an auxillary vnode lock structure. 358 */ 359int 360vop_nolock(ap) 361 struct vop_lock_args /* { 362 struct vnode *a_vp; 363 int a_flags; 364 struct proc *a_p; 365 } */ *ap; 366{ 367#ifdef notyet 368 /* 369 * This code cannot be used until all the non-locking filesystems 370 * (notably NFS) are converted to properly lock and release nodes. 371 * Also, certain vnode operations change the locking state within 372 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 373 * and symlink). Ideally these operations should not change the 374 * lock state, but should be changed to let the caller of the 375 * function unlock them. Otherwise all intermediate vnode layers 376 * (such as union, umapfs, etc) must catch these functions to do 377 * the necessary locking at their layer. Note that the inactive 378 * and lookup operations also change their lock state, but this 379 * cannot be avoided, so these two operations will always need 380 * to be handled in intermediate layers. 381 */ 382 struct vnode *vp = ap->a_vp; 383 int vnflags, flags = ap->a_flags; 384 385 if (vp->v_vnlock == NULL) { 386 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 387 return (0); 388 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 389 M_VNODE, M_WAITOK); 390 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 391 } 392 switch (flags & LK_TYPE_MASK) { 393 case LK_DRAIN: 394 vnflags = LK_DRAIN; 395 break; 396 case LK_EXCLUSIVE: 397 case LK_SHARED: 398 vnflags = LK_SHARED; 399 break; 400 case LK_UPGRADE: 401 case LK_EXCLUPGRADE: 402 case LK_DOWNGRADE: 403 return (0); 404 case LK_RELEASE: 405 default: 406 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 407 } 408 if (flags & LK_INTERLOCK) 409 vnflags |= LK_INTERLOCK; 410 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 411#else /* for now */ 412 /* 413 * Since we are not using the lock manager, we must clear 414 * the interlock here. 415 */ 416 if (ap->a_flags & LK_INTERLOCK) 417 simple_unlock(&ap->a_vp->v_interlock); 418 return (0); 419#endif 420} 421 422/* 423 * Do the inverse of vop_nolock, handling the interlock in a compatible way. 424 */ 425int 426vop_nounlock(ap) 427 struct vop_unlock_args /* { 428 struct vnode *a_vp; 429 int a_flags; 430 struct proc *a_p; 431 } */ *ap; 432{ 433 struct vnode *vp = ap->a_vp; 434 435 if (vp->v_vnlock == NULL) { 436 if (ap->a_flags & LK_INTERLOCK) 437 simple_unlock(&ap->a_vp->v_interlock); 438 return (0); 439 } 440 return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags, 441 &ap->a_vp->v_interlock, ap->a_p)); 442} 443 444/* 445 * Return whether or not the node is in use. 446 */ 447int 448vop_noislocked(ap) 449 struct vop_islocked_args /* { 450 struct vnode *a_vp; 451 } */ *ap; 452{ 453 struct vnode *vp = ap->a_vp; 454 455 if (vp->v_vnlock == NULL) 456 return (0); 457 return (lockstatus(vp->v_vnlock)); 458} 459 460