1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 *
|
39 * $FreeBSD: head/sys/kern/vfs_default.c 52970 1999-11-07 15:09:49Z phk $
|
39 * $FreeBSD: head/sys/kern/vfs_default.c 54444 1999-12-11 16:13:02Z eivind $ |
40 */ 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/buf.h> 45#include <sys/kernel.h> 46#include <sys/lock.h> 47#include <sys/malloc.h> 48#include <sys/mount.h> 49#include <sys/unistd.h> 50#include <sys/vnode.h> 51#include <sys/poll.h> 52 53static int vop_nostrategy __P((struct vop_strategy_args *)); 54 55/* 56 * This vnode table stores what we want to do if the filesystem doesn't 57 * implement a particular VOP. 58 * 59 * If there is no specific entry here, we will return EOPNOTSUPP. 60 * 61 */ 62 63vop_t **default_vnodeop_p; 64static struct vnodeopv_entry_desc default_vnodeop_entries[] = { 65 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 66 { &vop_abortop_desc, (vop_t *) vop_null }, 67 { &vop_advlock_desc, (vop_t *) vop_einval }, 68 { &vop_bwrite_desc, (vop_t *) vop_stdbwrite }, 69 { &vop_close_desc, (vop_t *) vop_null }, 70 { &vop_fsync_desc, (vop_t *) vop_null }, 71 { &vop_ioctl_desc, (vop_t *) vop_enotty }, 72 { &vop_islocked_desc, (vop_t *) vop_noislocked }, 73 { &vop_lease_desc, (vop_t *) vop_null }, 74 { &vop_lock_desc, (vop_t *) vop_nolock }, 75 { &vop_mmap_desc, (vop_t *) vop_einval }, 76 { &vop_open_desc, (vop_t *) vop_null }, 77 { &vop_pathconf_desc, (vop_t *) vop_einval }, 78 { &vop_poll_desc, (vop_t *) vop_nopoll }, 79 { &vop_readlink_desc, (vop_t *) vop_einval }, 80 { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp }, 81 { &vop_revoke_desc, (vop_t *) vop_revoke }, 82 { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 83 { &vop_unlock_desc, (vop_t *) vop_nounlock }, 84 { NULL, NULL } 85}; 86 87static struct vnodeopv_desc default_vnodeop_opv_desc = 88 { &default_vnodeop_p, default_vnodeop_entries }; 89 90VNODEOP_SET(default_vnodeop_opv_desc); 91 92int 93vop_eopnotsupp(struct vop_generic_args *ap) 94{ 95 /* 96 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 97 */ 98 99 return (EOPNOTSUPP); 100} 101 102int 103vop_ebadf(struct vop_generic_args *ap) 104{ 105 106 return (EBADF); 107} 108 109int 110vop_enotty(struct vop_generic_args *ap) 111{ 112 113 return (ENOTTY); 114} 115 116int 117vop_einval(struct vop_generic_args *ap) 118{ 119 120 return (EINVAL); 121} 122 123int 124vop_null(struct vop_generic_args *ap) 125{ 126 127 return (0); 128} 129 130int 131vop_defaultop(struct vop_generic_args *ap) 132{ 133 134 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 135} 136 137int 138vop_panic(struct vop_generic_args *ap) 139{ 140 141 printf("vop_panic[%s]\n", ap->a_desc->vdesc_name); 142 panic("Filesystem goof"); 143 return (0); 144} 145 146/* 147 * vop_nostrategy: 148 * 149 * Strategy routine for VFS devices that have none. 150 * 151 * B_ERROR and B_INVAL must be cleared prior to calling any strategy 152 * routine. Typically this is done for a B_READ strategy call. Typically 153 * B_INVAL is assumed to already be clear prior to a write and should not 154 * be cleared manually unless you just made the buffer invalid. B_ERROR 155 * should be cleared either way. 156 */ 157 158static int 159vop_nostrategy (struct vop_strategy_args *ap) 160{ 161 printf("No strategy for buffer at %p\n", ap->a_bp); 162 vprint("", ap->a_vp); 163 vprint("", ap->a_bp->b_vp); 164 ap->a_bp->b_flags |= B_ERROR; 165 ap->a_bp->b_error = EOPNOTSUPP; 166 biodone(ap->a_bp); 167 return (EOPNOTSUPP); 168} 169 170int 171vop_stdpathconf(ap) 172 struct vop_pathconf_args /* { 173 struct vnode *a_vp; 174 int a_name; 175 int *a_retval; 176 } */ *ap; 177{ 178 179 switch (ap->a_name) { 180 case _PC_LINK_MAX: 181 *ap->a_retval = LINK_MAX; 182 return (0); 183 case _PC_MAX_CANON: 184 *ap->a_retval = MAX_CANON; 185 return (0); 186 case _PC_MAX_INPUT: 187 *ap->a_retval = MAX_INPUT; 188 return (0); 189 case _PC_PIPE_BUF: 190 *ap->a_retval = PIPE_BUF; 191 return (0); 192 case _PC_CHOWN_RESTRICTED: 193 *ap->a_retval = 1; 194 return (0); 195 case _PC_VDISABLE: 196 *ap->a_retval = _POSIX_VDISABLE; 197 return (0); 198 default: 199 return (EINVAL); 200 } 201 /* NOTREACHED */ 202} 203 204/* 205 * Standard lock, unlock and islocked functions. 206 * 207 * These depend on the lock structure being the first element in the 208 * inode, ie: vp->v_data points to the the lock! 209 */ 210int 211vop_stdlock(ap) 212 struct vop_lock_args /* { 213 struct vnode *a_vp; 214 int a_flags; 215 struct proc *a_p; 216 } */ *ap; 217{ 218 struct lock *l; 219 220 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 221 if (ap->a_flags & LK_INTERLOCK) 222 simple_unlock(&ap->a_vp->v_interlock); 223 return 0; 224 } 225 226#ifndef DEBUG_LOCKS 227 return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p)); 228#else 229 return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p, 230 "vop_stdlock", ap->a_vp->filename, ap->a_vp->line)); 231#endif 232} 233 234int 235vop_stdunlock(ap) 236 struct vop_unlock_args /* { 237 struct vnode *a_vp; 238 int a_flags; 239 struct proc *a_p; 240 } */ *ap; 241{ 242 struct lock *l; 243 244 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 245 if (ap->a_flags & LK_INTERLOCK) 246 simple_unlock(&ap->a_vp->v_interlock); 247 return 0; 248 } 249 250 return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock, 251 ap->a_p)); 252} 253 254int 255vop_stdislocked(ap) 256 struct vop_islocked_args /* { 257 struct vnode *a_vp;
|
258 struct proc *a_p; |
259 } */ *ap; 260{ 261 struct lock *l; 262 263 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) 264 return 0; 265
|
265 return (lockstatus(l));
|
266 return (lockstatus(l, ap->a_p)); |
267} 268 269/* 270 * Return true for select/poll. 271 */ 272int 273vop_nopoll(ap) 274 struct vop_poll_args /* { 275 struct vnode *a_vp; 276 int a_events; 277 struct ucred *a_cred; 278 struct proc *a_p; 279 } */ *ap; 280{ 281 /* 282 * Return true for read/write. If the user asked for something 283 * special, return POLLNVAL, so that clients have a way of 284 * determining reliably whether or not the extended 285 * functionality is present without hard-coding knowledge 286 * of specific filesystem implementations. 287 */ 288 if (ap->a_events & ~POLLSTANDARD) 289 return (POLLNVAL); 290 291 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 292} 293 294/* 295 * Implement poll for local filesystems that support it. 296 */ 297int 298vop_stdpoll(ap) 299 struct vop_poll_args /* { 300 struct vnode *a_vp; 301 int a_events; 302 struct ucred *a_cred; 303 struct proc *a_p; 304 } */ *ap; 305{ 306 if ((ap->a_events & ~POLLSTANDARD) == 0) 307 return (ap->a_events & (POLLRDNORM|POLLWRNORM)); 308 return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events)); 309} 310 311int 312vop_stdbwrite(ap) 313 struct vop_bwrite_args *ap; 314{ 315 return (bwrite(ap->a_bp)); 316} 317 318/* 319 * Stubs to use when there is no locking to be done on the underlying object. 320 * A minimal shared lock is necessary to ensure that the underlying object 321 * is not revoked while an operation is in progress. So, an active shared 322 * count is maintained in an auxillary vnode lock structure. 323 */ 324int 325vop_sharedlock(ap) 326 struct vop_lock_args /* { 327 struct vnode *a_vp; 328 int a_flags; 329 struct proc *a_p; 330 } */ *ap; 331{ 332 /* 333 * This code cannot be used until all the non-locking filesystems 334 * (notably NFS) are converted to properly lock and release nodes. 335 * Also, certain vnode operations change the locking state within 336 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 337 * and symlink). Ideally these operations should not change the 338 * lock state, but should be changed to let the caller of the 339 * function unlock them. Otherwise all intermediate vnode layers 340 * (such as union, umapfs, etc) must catch these functions to do 341 * the necessary locking at their layer. Note that the inactive 342 * and lookup operations also change their lock state, but this 343 * cannot be avoided, so these two operations will always need 344 * to be handled in intermediate layers. 345 */ 346 struct vnode *vp = ap->a_vp; 347 int vnflags, flags = ap->a_flags; 348 349 if (vp->v_vnlock == NULL) { 350 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 351 return (0); 352 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 353 M_VNODE, M_WAITOK); 354 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 355 } 356 switch (flags & LK_TYPE_MASK) { 357 case LK_DRAIN: 358 vnflags = LK_DRAIN; 359 break; 360 case LK_EXCLUSIVE: 361#ifdef DEBUG_VFS_LOCKS 362 /* 363 * Normally, we use shared locks here, but that confuses 364 * the locking assertions. 365 */ 366 vnflags = LK_EXCLUSIVE; 367 break; 368#endif 369 case LK_SHARED: 370 vnflags = LK_SHARED; 371 break; 372 case LK_UPGRADE: 373 case LK_EXCLUPGRADE: 374 case LK_DOWNGRADE: 375 return (0); 376 case LK_RELEASE: 377 default: 378 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 379 } 380 if (flags & LK_INTERLOCK) 381 vnflags |= LK_INTERLOCK; 382#ifndef DEBUG_LOCKS 383 return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 384#else 385 return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p, 386 "vop_sharedlock", vp->filename, vp->line)); 387#endif 388} 389 390/* 391 * Stubs to use when there is no locking to be done on the underlying object. 392 * A minimal shared lock is necessary to ensure that the underlying object 393 * is not revoked while an operation is in progress. So, an active shared 394 * count is maintained in an auxillary vnode lock structure. 395 */ 396int 397vop_nolock(ap) 398 struct vop_lock_args /* { 399 struct vnode *a_vp; 400 int a_flags; 401 struct proc *a_p; 402 } */ *ap; 403{ 404#ifdef notyet 405 /* 406 * This code cannot be used until all the non-locking filesystems 407 * (notably NFS) are converted to properly lock and release nodes. 408 * Also, certain vnode operations change the locking state within 409 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 410 * and symlink). Ideally these operations should not change the 411 * lock state, but should be changed to let the caller of the 412 * function unlock them. Otherwise all intermediate vnode layers 413 * (such as union, umapfs, etc) must catch these functions to do 414 * the necessary locking at their layer. Note that the inactive 415 * and lookup operations also change their lock state, but this 416 * cannot be avoided, so these two operations will always need 417 * to be handled in intermediate layers. 418 */ 419 struct vnode *vp = ap->a_vp; 420 int vnflags, flags = ap->a_flags; 421 422 if (vp->v_vnlock == NULL) { 423 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 424 return (0); 425 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 426 M_VNODE, M_WAITOK); 427 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 428 } 429 switch (flags & LK_TYPE_MASK) { 430 case LK_DRAIN: 431 vnflags = LK_DRAIN; 432 break; 433 case LK_EXCLUSIVE: 434 case LK_SHARED: 435 vnflags = LK_SHARED; 436 break; 437 case LK_UPGRADE: 438 case LK_EXCLUPGRADE: 439 case LK_DOWNGRADE: 440 return (0); 441 case LK_RELEASE: 442 default: 443 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 444 } 445 if (flags & LK_INTERLOCK) 446 vnflags |= LK_INTERLOCK; 447 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 448#else /* for now */ 449 /* 450 * Since we are not using the lock manager, we must clear 451 * the interlock here. 452 */ 453 if (ap->a_flags & LK_INTERLOCK) 454 simple_unlock(&ap->a_vp->v_interlock); 455 return (0); 456#endif 457} 458 459/* 460 * Do the inverse of vop_nolock, handling the interlock in a compatible way. 461 */ 462int 463vop_nounlock(ap) 464 struct vop_unlock_args /* { 465 struct vnode *a_vp; 466 int a_flags; 467 struct proc *a_p; 468 } */ *ap; 469{ 470 struct vnode *vp = ap->a_vp; 471 472 if (vp->v_vnlock == NULL) { 473 if (ap->a_flags & LK_INTERLOCK) 474 simple_unlock(&ap->a_vp->v_interlock); 475 return (0); 476 } 477 return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags, 478 &ap->a_vp->v_interlock, ap->a_p)); 479} 480 481/* 482 * Return whether or not the node is in use. 483 */ 484int 485vop_noislocked(ap) 486 struct vop_islocked_args /* { 487 struct vnode *a_vp;
|
488 struct proc *a_p; |
489 } */ *ap; 490{ 491 struct vnode *vp = ap->a_vp; 492 493 if (vp->v_vnlock == NULL) 494 return (0);
|
493 return (lockstatus(vp->v_vnlock));
|
495 return (lockstatus(vp->v_vnlock, ap->a_p)); |
496} 497 498/* 499 * vfs default ops 500 * used to fill the vfs fucntion table to get reasonable default return values. 501 */ 502int 503vfs_stdmount (mp, path, data, ndp, p) 504 struct mount *mp; 505 char *path; 506 caddr_t data; 507 struct nameidata *ndp; 508 struct proc *p; 509{ 510 return (0); 511} 512 513int 514vfs_stdunmount (mp, mntflags, p) 515 struct mount *mp; 516 int mntflags; 517 struct proc *p; 518{ 519 return (0); 520} 521 522int 523vfs_stdroot (mp, vpp) 524 struct mount *mp; 525 struct vnode **vpp; 526{ 527 return (EOPNOTSUPP); 528} 529 530int 531vfs_stdstatfs (mp, sbp, p) 532 struct mount *mp; 533 struct statfs *sbp; 534 struct proc *p; 535{ 536 return (EOPNOTSUPP); 537} 538 539int 540vfs_stdvptofh (vp, fhp) 541 struct vnode *vp; 542 struct fid *fhp; 543{ 544 return (EOPNOTSUPP); 545} 546 547int 548vfs_stdstart (mp, flags, p) 549 struct mount *mp; 550 int flags; 551 struct proc *p; 552{ 553 return (0); 554} 555 556int 557vfs_stdquotactl (mp, cmds, uid, arg, p) 558 struct mount *mp; 559 int cmds; 560 uid_t uid; 561 caddr_t arg; 562 struct proc *p; 563{ 564 return (EOPNOTSUPP); 565} 566 567int 568vfs_stdsync (mp, waitfor, cred, p) 569 struct mount *mp; 570 int waitfor; 571 struct ucred *cred; 572 struct proc *p; 573{ 574 return (0); 575} 576 577int 578vfs_stdvget (mp, ino, vpp) 579 struct mount *mp; 580 ino_t ino; 581 struct vnode **vpp; 582{ 583 return (EOPNOTSUPP); 584} 585 586int 587vfs_stdfhtovp (mp, fhp, vpp) 588 struct mount *mp; 589 struct fid *fhp; 590 struct vnode **vpp; 591{ 592 return (EOPNOTSUPP); 593} 594 595int 596vfs_stdcheckexp (mp, nam, extflagsp, credanonp) 597 struct mount *mp; 598 struct sockaddr *nam; 599 int *extflagsp; 600 struct ucred **credanonp; 601{ 602 return (EOPNOTSUPP); 603} 604 605int 606vfs_stdinit (vfsp) 607 struct vfsconf *vfsp; 608{ 609 return (0); 610} 611 612int 613vfs_stduninit (vfsp) 614 struct vfsconf *vfsp; 615{ 616 return(0); 617} 618 619/* end of vfs default ops */
|