Deleted Added
full compact
null_vnops.c (111841) null_vnops.c (116469)
1/*
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * John Heidemann of the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 26 unchanged lines hidden (view full) ---

35 *
36 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
37 *
38 * Ancestors:
39 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
40 * ...and...
41 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
42 *
1/*
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * John Heidemann of the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 26 unchanged lines hidden (view full) ---

35 *
36 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
37 *
38 * Ancestors:
39 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
40 * ...and...
41 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
42 *
43 * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 111841 2003-03-03 19:15:40Z njl $
43 * $FreeBSD: head/sys/fs/nullfs/null_vnops.c 116469 2003-06-17 08:52:45Z tjr $
44 */
45
46/*
47 * Null Layer
48 *
49 * (See mount_nullfs(8) for more information.)
50 *
51 * The null layer duplicates a portion of the filesystem

--- 535 unchanged lines hidden (view full) ---

587 struct thread *a_td;
588 } */ *ap;
589{
590 struct vnode *vp = ap->a_vp;
591 int flags = ap->a_flags;
592 struct thread *td = ap->a_td;
593 struct vnode *lvp;
594 int error;
44 */
45
46/*
47 * Null Layer
48 *
49 * (See mount_nullfs(8) for more information.)
50 *
51 * The null layer duplicates a portion of the filesystem

--- 535 unchanged lines hidden (view full) ---

587 struct thread *a_td;
588 } */ *ap;
589{
590 struct vnode *vp = ap->a_vp;
591 int flags = ap->a_flags;
592 struct thread *td = ap->a_td;
593 struct vnode *lvp;
594 int error;
595 struct null_node *nn;
595
596 if (flags & LK_THISLAYER) {
597 if (vp->v_vnlock != NULL) {
598 /* lock is shared across layers */
599 if (flags & LK_INTERLOCK)
600 mtx_unlock(&vp->v_interlock);
601 return 0;
602 }

--- 6 unchanged lines hidden (view full) ---

609 /*
610 * The lower level has exported a struct lock to us. Use
611 * it so that all vnodes in the stack lock and unlock
612 * simultaneously. Note: we don't DRAIN the lock as DRAIN
613 * decommissions the lock - just because our vnode is
614 * going away doesn't mean the struct lock below us is.
615 * LK_EXCLUSIVE is fine.
616 */
596
597 if (flags & LK_THISLAYER) {
598 if (vp->v_vnlock != NULL) {
599 /* lock is shared across layers */
600 if (flags & LK_INTERLOCK)
601 mtx_unlock(&vp->v_interlock);
602 return 0;
603 }

--- 6 unchanged lines hidden (view full) ---

610 /*
611 * The lower level has exported a struct lock to us. Use
612 * it so that all vnodes in the stack lock and unlock
613 * simultaneously. Note: we don't DRAIN the lock as DRAIN
614 * decommissions the lock - just because our vnode is
615 * going away doesn't mean the struct lock below us is.
616 * LK_EXCLUSIVE is fine.
617 */
618 if ((flags & LK_INTERLOCK) == 0) {
619 VI_LOCK(vp);
620 flags |= LK_INTERLOCK;
621 }
622 nn = VTONULL(vp);
617 if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
618 NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n");
623 if ((flags & LK_TYPE_MASK) == LK_DRAIN) {
624 NULLFSDEBUG("null_lock: avoiding LK_DRAIN\n");
619 return(lockmgr(vp->v_vnlock,
620 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
621 &vp->v_interlock, td));
625 /*
626 * Emulate lock draining by waiting for all other
627 * pending locks to complete. Afterwards the
628 * lockmgr call might block, but no other threads
629 * will attempt to use this nullfs vnode due to the
630 * VI_XLOCK flag.
631 */
632 while (nn->null_pending_locks > 0) {
633 nn->null_drain_wakeup = 1;
634 msleep(&nn->null_pending_locks,
635 VI_MTX(vp),
636 PVFS,
637 "nuldr", 0);
638 }
639 error = lockmgr(vp->v_vnlock,
640 (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE,
641 VI_MTX(vp), td);
642 return error;
622 }
643 }
623 return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td));
644 nn->null_pending_locks++;
645 error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td);
646 VI_LOCK(vp);
647 /*
648 * If we're called from vrele then v_usecount can have been 0
649 * and another process might have initiated a recycle
650 * operation. When that happens, just back out.
651 */
652 if (error == 0 && (vp->v_iflag & VI_XLOCK) != 0 &&
653 td != vp->v_vxproc) {
654 lockmgr(vp->v_vnlock,
655 (flags & ~LK_TYPE_MASK) | LK_RELEASE,
656 VI_MTX(vp), td);
657 VI_LOCK(vp);
658 error = ENOENT;
659 }
660 nn->null_pending_locks--;
661 /*
662 * Wakeup the process draining the vnode after all
663 * pending lock attempts has been failed.
664 */
665 if (nn->null_pending_locks == 0 &&
666 nn->null_drain_wakeup != 0) {
667 nn->null_drain_wakeup = 0;
668 wakeup(&nn->null_pending_locks);
669 }
670 if (error == ENOENT && (vp->v_iflag & VI_XLOCK) != 0 &&
671 vp->v_vxproc != curthread) {
672 vp->v_iflag |= VI_XWANT;
673 msleep(vp, VI_MTX(vp), PINOD, "nulbo", 0);
674 }
675 VI_UNLOCK(vp);
676 return error;
624 } else {
625 /*
626 * To prevent race conditions involving doing a lookup
627 * on "..", we have to lock the lower node, then lock our
628 * node. Most of the time it won't matter that we lock our
629 * node (as any locking would need the lower one locked
630 * first). But we can LK_DRAIN the upper lock as a step
631 * towards decomissioning it.

--- 230 unchanged lines hidden ---
677 } else {
678 /*
679 * To prevent race conditions involving doing a lookup
680 * on "..", we have to lock the lower node, then lock our
681 * node. Most of the time it won't matter that we lock our
682 * node (as any locking would need the lower one locked
683 * first). But we can LK_DRAIN the upper lock as a step
684 * towards decomissioning it.

--- 230 unchanged lines hidden ---