1/*- 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * John Heidemann of the UCLA Ficus project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 33 * 34 * Ancestors: 35 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 36 * ...and... 37 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project 38 *
|
39 * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 140165 2005-01-13 07:53:01Z phk $
|
39 * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 140728 2005-01-24 11:49:41Z phk $ |
40 */ 41 42/* 43 * Null Layer 44 * 45 * (See mount_nullfs(8) for more information.) 46 * 47 * The null layer duplicates a portion of the filesystem 48 * name space under a new name. In this respect, it is 49 * similar to the loopback filesystem. It differs from 50 * the loopback fs in two respects: it is implemented using 51 * a stackable layers techniques, and its "null-node"s stack above 52 * all lower-layer vnodes, not just over directory vnodes. 53 * 54 * The null layer has two purposes. First, it serves as a demonstration 55 * of layering by proving a layer which does nothing. (It actually 56 * does everything the loopback filesystem does, which is slightly 57 * more than nothing.) Second, the null layer can serve as a prototype 58 * layer. Since it provides all necessary layer framework, 59 * new filesystem layers can be created very easily be starting 60 * with a null layer. 61 * 62 * The remainder of this man page examines the null layer as a basis 63 * for constructing new layers. 64 * 65 * 66 * INSTANTIATING NEW NULL LAYERS 67 * 68 * New null layers are created with mount_nullfs(8). 69 * Mount_nullfs(8) takes two arguments, the pathname 70 * of the lower vfs (target-pn) and the pathname where the null 71 * layer will appear in the namespace (alias-pn). After 72 * the null layer is put into place, the contents 73 * of target-pn subtree will be aliased under alias-pn. 74 * 75 * 76 * OPERATION OF A NULL LAYER 77 * 78 * The null layer is the minimum filesystem layer, 79 * simply bypassing all possible operations to the lower layer 80 * for processing there. The majority of its activity centers 81 * on the bypass routine, through which nearly all vnode operations 82 * pass. 83 * 84 * The bypass routine accepts arbitrary vnode operations for 85 * handling by the lower layer. It begins by examing vnode 86 * operation arguments and replacing any null-nodes by their 87 * lower-layer equivlants. It then invokes the operation 88 * on the lower layer. Finally, it replaces the null-nodes 89 * in the arguments and, if a vnode is return by the operation, 90 * stacks a null-node on top of the returned vnode. 91 * 92 * Although bypass handles most operations, vop_getattr, vop_lock, 93 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not 94 * bypassed. Vop_getattr must change the fsid being returned. 95 * Vop_lock and vop_unlock must handle any locking for the 96 * current vnode as well as pass the lock request down. 97 * Vop_inactive and vop_reclaim are not bypassed so that 98 * they can handle freeing null-layer specific data. Vop_print 99 * is not bypassed to avoid excessive debugging information. 100 * Also, certain vnode operations change the locking state within 101 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 102 * and symlink). Ideally these operations should not change the 103 * lock state, but should be changed to let the caller of the 104 * function unlock them. Otherwise all intermediate vnode layers 105 * (such as union, umapfs, etc) must catch these functions to do 106 * the necessary locking at their layer. 107 * 108 * 109 * INSTANTIATING VNODE STACKS 110 * 111 * Mounting associates the null layer with a lower layer, 112 * effect stacking two VFSes. Vnode stacks are instead 113 * created on demand as files are accessed. 114 * 115 * The initial mount creates a single vnode stack for the 116 * root of the new null layer. All other vnode stacks 117 * are created as a result of vnode operations on 118 * this or other null vnode stacks. 119 * 120 * New vnode stacks come into existance as a result of 121 * an operation which returns a vnode. 122 * The bypass routine stacks a null-node above the new 123 * vnode before returning it to the caller. 124 * 125 * For example, imagine mounting a null layer with 126 * "mount_nullfs /usr/include /dev/layer/null". 127 * Changing directory to /dev/layer/null will assign 128 * the root null-node (which was created when the null layer was mounted). 129 * Now consider opening "sys". A vop_lookup would be 130 * done on the root null-node. This operation would bypass through 131 * to the lower layer which would return a vnode representing 132 * the UFS "sys". Null_bypass then builds a null-node 133 * aliasing the UFS "sys" and returns this to the caller. 134 * Later operations on the null-node "sys" will repeat this 135 * process when constructing other vnode stacks. 136 * 137 * 138 * CREATING OTHER FILE SYSTEM LAYERS 139 * 140 * One of the easiest ways to construct new filesystem layers is to make 141 * a copy of the null layer, rename all files and variables, and 142 * then begin modifing the copy. Sed can be used to easily rename 143 * all variables. 144 * 145 * The umap layer is an example of a layer descended from the 146 * null layer. 147 * 148 * 149 * INVOKING OPERATIONS ON LOWER LAYERS 150 * 151 * There are two techniques to invoke operations on a lower layer 152 * when the operation cannot be completely bypassed. Each method 153 * is appropriate in different situations. In both cases, 154 * it is the responsibility of the aliasing layer to make 155 * the operation arguments "correct" for the lower layer 156 * by mapping a vnode arguments to the lower layer. 157 * 158 * The first approach is to call the aliasing layer's bypass routine. 159 * This method is most suitable when you wish to invoke the operation 160 * currently being handled on the lower layer. It has the advantage 161 * that the bypass routine already must do argument mapping. 162 * An example of this is null_getattrs in the null layer. 163 * 164 * A second approach is to directly invoke vnode operations on 165 * the lower layer with the VOP_OPERATIONNAME interface. 166 * The advantage of this method is that it is easy to invoke 167 * arbitrary operations on the lower layer. The disadvantage 168 * is that vnode arguments must be manualy mapped. 169 * 170 */ 171 172#include <sys/param.h> 173#include <sys/systm.h> 174#include <sys/conf.h> 175#include <sys/kernel.h> 176#include <sys/lock.h> 177#include <sys/malloc.h> 178#include <sys/mount.h> 179#include <sys/mutex.h> 180#include <sys/namei.h> 181#include <sys/sysctl.h> 182#include <sys/vnode.h> 183 184#include <fs/nullfs/null.h> 185 186#include <vm/vm.h> 187#include <vm/vm_extern.h> 188#include <vm/vm_object.h> 189#include <vm/vnode_pager.h> 190 191static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ 192SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, 193 &null_bug_bypass, 0, ""); 194
|
195static vop_access_t null_access;
196static vop_createvobject_t null_createvobject;
197static vop_destroyvobject_t null_destroyvobject;
198static vop_getattr_t null_getattr;
199static vop_getvobject_t null_getvobject;
200static vop_inactive_t null_inactive;
201static vop_islocked_t null_islocked;
202static vop_lock_t null_lock;
203static vop_lookup_t null_lookup;
204static vop_print_t null_print;
205static vop_reclaim_t null_reclaim;
206static vop_rename_t null_rename;
207static vop_setattr_t null_setattr;
208static vop_unlock_t null_unlock;
209
|
195/* 196 * This is the 10-Apr-92 bypass routine. 197 * This version has been optimized for speed, throwing away some 198 * safety checks. It should still always work, but it's not as 199 * robust to programmer errors. 200 * 201 * In general, we map all vnodes going down and unmap them on the way back. 202 * As an exception to this, vnodes can be marked "unmapped" by setting 203 * the Nth bit in operation's vdesc_flags. 204 * 205 * Also, some BSD vnode operations have the side effect of vrele'ing 206 * their arguments. With stacking, the reference counts are held 207 * by the upper node, not the lower one, so we must handle these 208 * side-effects here. This is not of concern in Sun-derived systems 209 * since there are no such side-effects. 210 * 211 * This makes the following assumptions: 212 * - only one returned vpp 213 * - no INOUT vpp's (Sun's vop_open has one of these) 214 * - the vnode operation vector of the first vnode should be used 215 * to determine what implementation of the op should be invoked 216 * - all mapped vnodes are of our vnode-type (NEEDSWORK: 217 * problems on rmdir'ing mount points and renaming?) 218 */ 219int
|
235null_bypass(ap)
236 struct vop_generic_args /* {
237 struct vnodeop_desc *a_desc;
238 <other random data follows, presumably>
239 } */ *ap;
|
220null_bypass(struct vop_generic_args *ap) |
221{ 222 register struct vnode **this_vp_p; 223 int error; 224 struct vnode *old_vps[VDESC_MAX_VPS]; 225 struct vnode **vps_p[VDESC_MAX_VPS]; 226 struct vnode ***vppp; 227 struct vnodeop_desc *descp = ap->a_desc; 228 int reles, i; 229 230 if (null_bug_bypass) 231 printf ("null_bypass: %s\n", descp->vdesc_name); 232 233#ifdef DIAGNOSTIC 234 /* 235 * We require at least one vp. 236 */ 237 if (descp->vdesc_vp_offsets == NULL || 238 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) 239 panic ("null_bypass: no vp's in map"); 240#endif 241 242 /* 243 * Map the vnodes going in. 244 * Later, we'll invoke the operation based on 245 * the first mapped vnode's operation vector. 246 */ 247 reles = descp->vdesc_flags; 248 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 249 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 250 break; /* bail out at end of list */ 251 vps_p[i] = this_vp_p = 252 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); 253 /* 254 * We're not guaranteed that any but the first vnode 255 * are of our type. Check for and don't map any 256 * that aren't. (We must always map first vp or vclean fails.) 257 */ 258 if (i && (*this_vp_p == NULLVP || 259 (*this_vp_p)->v_op != &null_vnodeops)) { 260 old_vps[i] = NULLVP; 261 } else { 262 old_vps[i] = *this_vp_p; 263 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); 264 /* 265 * XXX - Several operations have the side effect 266 * of vrele'ing their vp's. We must account for 267 * that. (This should go away in the future.) 268 */ 269 if (reles & VDESC_VP0_WILLRELE) 270 VREF(*this_vp_p); 271 } 272 273 } 274 275 /* 276 * Call the operation on the lower layer 277 * with the modified argument structure. 278 */ 279 if (vps_p[0] && *vps_p[0]) 280 error = VCALL(ap); 281 else { 282 printf("null_bypass: no map for %s\n", descp->vdesc_name); 283 error = EINVAL; 284 } 285 286 /* 287 * Maintain the illusion of call-by-value 288 * by restoring vnodes in the argument structure 289 * to their original value. 290 */ 291 reles = descp->vdesc_flags; 292 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { 293 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) 294 break; /* bail out at end of list */ 295 if (old_vps[i]) { 296 *(vps_p[i]) = old_vps[i]; 297#if 0 298 if (reles & VDESC_VP0_WILLUNLOCK) 299 VOP_UNLOCK(*(vps_p[i]), LK_THISLAYER, curthread); 300#endif 301 if (reles & VDESC_VP0_WILLRELE) 302 vrele(*(vps_p[i])); 303 } 304 } 305 306 /* 307 * Map the possible out-going vpp 308 * (Assumes that the lower layer always returns 309 * a VREF'ed vpp unless it gets an error.) 310 */ 311 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && 312 !(descp->vdesc_flags & VDESC_NOMAP_VPP) && 313 !error) { 314 /* 315 * XXX - even though some ops have vpp returned vp's, 316 * several ops actually vrele this before returning. 317 * We must avoid these ops. 318 * (This should go away when these ops are regularized.) 319 */ 320 if (descp->vdesc_flags & VDESC_VPP_WILLRELE) 321 goto out; 322 vppp = VOPARG_OFFSETTO(struct vnode***, 323 descp->vdesc_vpp_offset,ap); 324 if (*vppp) 325 error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp); 326 } 327 328 out: 329 return (error); 330} 331 332/* 333 * We have to carry on the locking protocol on the null layer vnodes 334 * as we progress through the tree. We also have to enforce read-only 335 * if this layer is mounted read-only. 336 */ 337static int
|
357null_lookup(ap)
358 struct vop_lookup_args /* {
359 struct vnode * a_dvp;
360 struct vnode ** a_vpp;
361 struct componentname * a_cnp;
362 } */ *ap;
|
338null_lookup(struct vop_lookup_args *ap) |
339{ 340 struct componentname *cnp = ap->a_cnp; 341 struct vnode *dvp = ap->a_dvp; 342 struct thread *td = cnp->cn_thread; 343 int flags = cnp->cn_flags; 344 struct vnode *vp, *ldvp, *lvp; 345 int error; 346 347 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 348 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 349 return (EROFS); 350 /* 351 * Although it is possible to call null_bypass(), we'll do 352 * a direct call to reduce overhead 353 */ 354 ldvp = NULLVPTOLOWERVP(dvp); 355 vp = lvp = NULL; 356 error = VOP_LOOKUP(ldvp, &lvp, cnp); 357 if (error == EJUSTRETURN && (flags & ISLASTCN) && 358 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 359 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) 360 error = EROFS; 361 362 /* 363 * Rely only on the PDIRUNLOCK flag which should be carefully 364 * tracked by underlying filesystem. 365 */ 366 if ((cnp->cn_flags & PDIRUNLOCK) && dvp->v_vnlock != ldvp->v_vnlock) 367 VOP_UNLOCK(dvp, LK_THISLAYER, td); 368 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { 369 if (ldvp == lvp) { 370 *ap->a_vpp = dvp; 371 VREF(dvp); 372 vrele(lvp); 373 } else { 374 error = null_nodeget(dvp->v_mount, lvp, &vp); 375 if (error) { 376 /* XXX Cleanup needed... */ 377 panic("null_nodeget failed"); 378 } 379 *ap->a_vpp = vp; 380 } 381 } 382 return (error); 383} 384 385/* 386 * Setattr call. Disallow write attempts if the layer is mounted read-only. 387 */ 388static int
|
413null_setattr(ap)
414 struct vop_setattr_args /* {
415 struct vnodeop_desc *a_desc;
416 struct vnode *a_vp;
417 struct vattr *a_vap;
418 struct ucred *a_cred;
419 struct thread *a_td;
420 } */ *ap;
|
389null_setattr(struct vop_setattr_args *ap) |
390{ 391 struct vnode *vp = ap->a_vp; 392 struct vattr *vap = ap->a_vap; 393 394 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 395 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 396 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 397 (vp->v_mount->mnt_flag & MNT_RDONLY)) 398 return (EROFS); 399 if (vap->va_size != VNOVAL) { 400 switch (vp->v_type) { 401 case VDIR: 402 return (EISDIR); 403 case VCHR: 404 case VBLK: 405 case VSOCK: 406 case VFIFO: 407 if (vap->va_flags != VNOVAL) 408 return (EOPNOTSUPP); 409 return (0); 410 case VREG: 411 case VLNK: 412 default: 413 /* 414 * Disallow write attempts if the filesystem is 415 * mounted read-only. 416 */ 417 if (vp->v_mount->mnt_flag & MNT_RDONLY) 418 return (EROFS); 419 } 420 } 421 422 return (null_bypass((struct vop_generic_args *)ap)); 423} 424 425/* 426 * We handle getattr only to change the fsid. 427 */ 428static int
|
460null_getattr(ap)
461 struct vop_getattr_args /* {
462 struct vnode *a_vp;
463 struct vattr *a_vap;
464 struct ucred *a_cred;
465 struct thread *a_td;
466 } */ *ap;
|
429null_getattr(struct vop_getattr_args *ap) |
430{ 431 int error; 432 433 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0) 434 return (error); 435 436 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 437 return (0); 438} 439 440/* 441 * Handle to disallow write access if mounted read-only. 442 */ 443static int
|
481null_access(ap)
482 struct vop_access_args /* {
483 struct vnode *a_vp;
484 int a_mode;
485 struct ucred *a_cred;
486 struct thread *a_td;
487 } */ *ap;
|
444null_access(struct vop_access_args *ap) |
445{ 446 struct vnode *vp = ap->a_vp; 447 mode_t mode = ap->a_mode; 448 449 /* 450 * Disallow write attempts on read-only layers; 451 * unless the file is a socket, fifo, or a block or 452 * character device resident on the filesystem. 453 */ 454 if (mode & VWRITE) { 455 switch (vp->v_type) { 456 case VDIR: 457 case VLNK: 458 case VREG: 459 if (vp->v_mount->mnt_flag & MNT_RDONLY) 460 return (EROFS); 461 break; 462 default: 463 break; 464 } 465 } 466 return (null_bypass((struct vop_generic_args *)ap)); 467} 468 469/* 470 * We handle this to eliminate null FS to lower FS 471 * file moving. Don't know why we don't allow this, 472 * possibly we should. 473 */ 474static int
|
518null_rename(ap)
519 struct vop_rename_args /* {
520 struct vnode *a_fdvp;
521 struct vnode *a_fvp;
522 struct componentname *a_fcnp;
523 struct vnode *a_tdvp;
524 struct vnode *a_tvp;
525 struct componentname *a_tcnp;
526 } */ *ap;
|
475null_rename(struct vop_rename_args *ap) |
476{ 477 struct vnode *tdvp = ap->a_tdvp; 478 struct vnode *fvp = ap->a_fvp; 479 struct vnode *fdvp = ap->a_fdvp; 480 struct vnode *tvp = ap->a_tvp; 481 482 /* Check for cross-device rename. */ 483 if ((fvp->v_mount != tdvp->v_mount) || 484 (tvp && (fvp->v_mount != tvp->v_mount))) { 485 if (tdvp == tvp) 486 vrele(tdvp); 487 else 488 vput(tdvp); 489 if (tvp) 490 vput(tvp); 491 vrele(fdvp); 492 vrele(fvp); 493 return (EXDEV); 494 } 495 496 return (null_bypass((struct vop_generic_args *)ap)); 497} 498 499/* 500 * We need to process our own vnode lock and then clear the 501 * interlock flag as it applies only to our vnode, not the 502 * vnodes below us on the stack. 503 */ 504static int
|
556null_lock(ap)
557 struct vop_lock_args /* {
558 struct vnode *a_vp;
559 int a_flags;
560 struct thread *a_td;
561 } */ *ap;
|
505null_lock(struct vop_lock_args *ap) |
506{ 507 struct vnode *vp = ap->a_vp; 508 int flags = ap->a_flags; 509 struct thread *td = ap->a_td; 510 struct vnode *lvp; 511 int error; 512 struct null_node *nn; 513 514 if (flags & LK_THISLAYER) { 515 if (vp->v_vnlock != NULL) { 516 /* lock is shared across layers */ 517 if (flags & LK_INTERLOCK) 518 mtx_unlock(&vp->v_interlock); 519 return 0; 520 } 521 error = lockmgr(&vp->v_lock, flags & ~LK_THISLAYER, 522 &vp->v_interlock, td); 523 return (error); 524 } 525 526 if (vp->v_vnlock != NULL) { 527 /* 528 * The lower level has exported a struct lock to us. Use 529 * it so that all vnodes in the stack lock and unlock 530 * simultaneously. Note: we don't DRAIN the lock as DRAIN 531 * decommissions the lock - just because our vnode is 532 * going away doesn't mean the struct lock below us is. 533 * LK_EXCLUSIVE is fine. 534 */ 535 if ((flags & LK_INTERLOCK) == 0) { 536 VI_LOCK(vp); 537 flags |= LK_INTERLOCK; 538 } 539 nn = VTONULL(vp); 540 if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 541 NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n"); 542 /* 543 * Emulate lock draining by waiting for all other 544 * pending locks to complete. Afterwards the 545 * lockmgr call might block, but no other threads 546 * will attempt to use this nullfs vnode due to the 547 * VI_XLOCK flag. 548 */ 549 while (nn->null_pending_locks > 0) { 550 nn->null_drain_wakeup = 1; 551 msleep(&nn->null_pending_locks, 552 VI_MTX(vp), 553 PVFS, 554 "nuldr", 0); 555 } 556 error = lockmgr(vp->v_vnlock, 557 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, 558 VI_MTX(vp), td); 559 return error; 560 } 561 nn->null_pending_locks++; 562 error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td); 563 VI_LOCK(vp); 564 /* 565 * If we're called from vrele then v_usecount can have been 0 566 * and another process might have initiated a recycle 567 * operation. When that happens, just back out. 568 */ 569 if (error == 0 && (vp->v_iflag & VI_XLOCK) != 0 && 570 td != vp->v_vxthread) { 571 lockmgr(vp->v_vnlock, 572 (flags & ~LK_TYPE_MASK) | LK_RELEASE, 573 VI_MTX(vp), td); 574 VI_LOCK(vp); 575 error = ENOENT; 576 } 577 nn->null_pending_locks--; 578 /* 579 * Wakeup the process draining the vnode after all 580 * pending lock attempts has been failed. 581 */ 582 if (nn->null_pending_locks == 0 && 583 nn->null_drain_wakeup != 0) { 584 nn->null_drain_wakeup = 0; 585 wakeup(&nn->null_pending_locks); 586 } 587 if (error == ENOENT && (vp->v_iflag & VI_XLOCK) != 0 && 588 vp->v_vxthread != curthread) { 589 vp->v_iflag |= VI_XWANT; 590 msleep(vp, VI_MTX(vp), PINOD, "nulbo", 0); 591 } 592 VI_UNLOCK(vp); 593 return error; 594 } else { 595 /* 596 * To prevent race conditions involving doing a lookup 597 * on "..", we have to lock the lower node, then lock our 598 * node. Most of the time it won't matter that we lock our 599 * node (as any locking would need the lower one locked 600 * first). But we can LK_DRAIN the upper lock as a step 601 * towards decomissioning it. 602 */ 603 lvp = NULLVPTOLOWERVP(vp); 604 if (lvp == NULL) 605 return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, td)); 606 if (flags & LK_INTERLOCK) { 607 mtx_unlock(&vp->v_interlock); 608 flags &= ~LK_INTERLOCK; 609 } 610 if ((flags & LK_TYPE_MASK) == LK_DRAIN) { 611 error = VOP_LOCK(lvp, 612 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, td); 613 } else 614 error = VOP_LOCK(lvp, flags, td); 615 if (error) 616 return (error); 617 error = lockmgr(&vp->v_lock, flags, &vp->v_interlock, td); 618 if (error) 619 VOP_UNLOCK(lvp, 0, td); 620 return (error); 621 } 622} 623 624/* 625 * We need to process our own vnode unlock and then clear the 626 * interlock flag as it applies only to our vnode, not the 627 * vnodes below us on the stack. 628 */ 629static int
|
686null_unlock(ap)
687 struct vop_unlock_args /* {
688 struct vnode *a_vp;
689 int a_flags;
690 struct thread *a_td;
691 } */ *ap;
|
630null_unlock(struct vop_unlock_args *ap) |
631{ 632 struct vnode *vp = ap->a_vp; 633 int flags = ap->a_flags; 634 struct thread *td = ap->a_td; 635 struct vnode *lvp; 636 637 if (vp->v_vnlock != NULL) { 638 if (flags & LK_THISLAYER) 639 return 0; /* the lock is shared across layers */ 640 flags &= ~LK_THISLAYER; 641 return (lockmgr(vp->v_vnlock, flags | LK_RELEASE, 642 &vp->v_interlock, td)); 643 } 644 lvp = NULLVPTOLOWERVP(vp); 645 if (lvp == NULL) 646 return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td)); 647 if ((flags & LK_THISLAYER) == 0) { 648 if (flags & LK_INTERLOCK) { 649 mtx_unlock(&vp->v_interlock); 650 flags &= ~LK_INTERLOCK; 651 } 652 VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, td); 653 } else 654 flags &= ~LK_THISLAYER; 655 return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td)); 656} 657 658static int
|
720null_islocked(ap)
721 struct vop_islocked_args /* {
722 struct vnode *a_vp;
723 struct thread *a_td;
724 } */ *ap;
|
659null_islocked(struct vop_islocked_args *ap) |
660{ 661 struct vnode *vp = ap->a_vp; 662 struct thread *td = ap->a_td; 663 664 if (vp->v_vnlock != NULL) 665 return (lockstatus(vp->v_vnlock, td)); 666 return (lockstatus(&vp->v_lock, td)); 667} 668 669/* 670 * There is no way to tell that someone issued remove/rmdir operation 671 * on the underlying filesystem. For now we just have to release lowevrp 672 * as soon as possible. 673 * 674 * Note, we can't release any resources nor remove vnode from hash before 675 * appropriate VXLOCK stuff is is done because other process can find this 676 * vnode in hash during inactivation and may be sitting in vget() and waiting 677 * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM. 678 */ 679static int
|
745null_inactive(ap)
746 struct vop_inactive_args /* {
747 struct vnode *a_vp;
748 struct thread *a_td;
749 } */ *ap;
|
680null_inactive(struct vop_inactive_args *ap) |
681{ 682 struct vnode *vp = ap->a_vp; 683 struct thread *td = ap->a_td; 684 685 VOP_UNLOCK(vp, 0, td); 686 687 /* 688 * If this is the last reference, then free up the vnode 689 * so as not to tie up the lower vnodes. 690 */ 691 vrecycle(vp, NULL, td); 692 693 return (0); 694} 695 696/* 697 * Now, the VXLOCK is in force and we're free to destroy the null vnode. 698 */ 699static int
|
769null_reclaim(ap)
770 struct vop_reclaim_args /* {
771 struct vnode *a_vp;
772 struct thread *a_td;
773 } */ *ap;
|
700null_reclaim(struct vop_reclaim_args *ap) |
701{ 702 struct vnode *vp = ap->a_vp; 703 struct null_node *xp = VTONULL(vp); 704 struct vnode *lowervp = xp->null_lowervp; 705 706 if (lowervp) { 707 null_hashrem(xp); 708 709 vrele(lowervp); 710 vrele(lowervp); 711 } 712 713 vp->v_data = NULL; 714 vp->v_vnlock = &vp->v_lock; 715 FREE(xp, M_NULLFSNODE); 716 717 return (0); 718} 719 720static int
|
794null_print(ap)
795 struct vop_print_args /* {
796 struct vnode *a_vp;
797 } */ *ap;
|
721null_print(struct vop_print_args *ap) |
722{ 723 register struct vnode *vp = ap->a_vp; 724 printf("\tvp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp)); 725 return (0); 726} 727 728/* 729 * Let an underlying filesystem do the work 730 */ 731static int
|
808null_createvobject(ap)
809 struct vop_createvobject_args /* {
810 struct vnode *vp;
811 struct ucred *cred;
812 struct thread *td;
813 } */ *ap;
|
732null_createvobject(struct vop_createvobject_args *ap) |
733{ 734 struct vnode *vp = ap->a_vp; 735 struct vnode *lowervp = VTONULL(vp) ? NULLVPTOLOWERVP(vp) : NULL; 736 int error; 737 738 if (vp->v_type == VNON || lowervp == NULL) 739 return 0; 740 error = VOP_CREATEVOBJECT(lowervp, ap->a_cred, ap->a_td); 741 if (error) 742 return (error); 743 vp->v_vflag |= VV_OBJBUF; 744 return (0); 745} 746 747/* 748 * We have nothing to destroy and this operation shouldn't be bypassed. 749 */ 750static int
|
832null_destroyvobject(ap)
833 struct vop_destroyvobject_args /* {
834 struct vnode *vp;
835 } */ *ap;
|
751null_destroyvobject(struct vop_destroyvobject_args *ap) |
752{ 753 struct vnode *vp = ap->a_vp; 754 755 vp->v_vflag &= ~VV_OBJBUF; 756 return (0); 757} 758 759static int
|
844null_getvobject(ap)
845 struct vop_getvobject_args /* {
846 struct vnode *vp;
847 struct vm_object **objpp;
848 } */ *ap;
|
760null_getvobject(struct vop_getvobject_args *ap) |
761{ 762 struct vnode *lvp = NULLVPTOLOWERVP(ap->a_vp); 763 764 if (lvp == NULL) 765 return EINVAL; 766 return (VOP_GETVOBJECT(lvp, ap->a_objpp)); 767} 768 769/* 770 * Global vfs data structures 771 */ 772struct vop_vector null_vnodeops = { 773 .vop_bypass = null_bypass, 774 775 .vop_access = null_access, 776 .vop_bmap = VOP_EOPNOTSUPP, 777 .vop_createvobject = null_createvobject, 778 .vop_destroyvobject = null_destroyvobject, 779 .vop_getattr = null_getattr, 780 .vop_getvobject = null_getvobject, 781 .vop_getwritemount = vop_stdgetwritemount, 782 .vop_inactive = null_inactive, 783 .vop_islocked = null_islocked, 784 .vop_lock = null_lock, 785 .vop_lookup = null_lookup, 786 .vop_print = null_print, 787 .vop_reclaim = null_reclaim, 788 .vop_rename = null_rename, 789 .vop_setattr = null_setattr, 790 .vop_strategy = VOP_EOPNOTSUPP, 791 .vop_unlock = null_unlock, 792};
|