Deleted Added
full compact
kern_lock.c (176116) kern_lock.c (176249)
1/*-
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Copyright (C) 1997
6 * John S. Dyson. All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by

--- 27 unchanged lines hidden (view full) ---

36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 */
42
43#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Copyright (C) 1997
6 * John S. Dyson. All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by

--- 27 unchanged lines hidden (view full) ---

36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 176116 2008-02-08 21:45:47Z attilio $");
44__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 176249 2008-02-13 20:44:19Z attilio $");
45
46#include "opt_ddb.h"
47#include "opt_global.h"
48
49#include <sys/param.h>
50#include <sys/kdb.h>
51#include <sys/kernel.h>
52#include <sys/ktr.h>

--- 4 unchanged lines hidden (view full) ---

57#include <sys/systm.h>
58#include <sys/lock_profile.h>
59#ifdef DEBUG_LOCKS
60#include <sys/stack.h>
61#endif
62
63#define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT)
64#define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
45
46#include "opt_ddb.h"
47#include "opt_global.h"
48
49#include <sys/param.h>
50#include <sys/kdb.h>
51#include <sys/kernel.h>
52#include <sys/ktr.h>

--- 4 unchanged lines hidden (view full) ---

57#include <sys/systm.h>
58#include <sys/lock_profile.h>
59#ifdef DEBUG_LOCKS
60#include <sys/stack.h>
61#endif
62
63#define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT)
64#define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
65#define LOCKMGR_UNHELD(x) (((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0)
66#define LOCKMGR_NOTOWNER(td) ((td) != curthread && (td) != LK_KERNPROC)
65
66static void assert_lockmgr(struct lock_object *lock, int what);
67#ifdef DDB
68#include <ddb/ddb.h>
69static void db_show_lockmgr(struct lock_object *lock);
70#endif
71static void lock_lockmgr(struct lock_object *lock, int how);
72static int unlock_lockmgr(struct lock_object *lock);

--- 4 unchanged lines hidden (view full) ---

77 .lc_assert = assert_lockmgr,
78#ifdef DDB
79 .lc_ddb_show = db_show_lockmgr,
80#endif
81 .lc_lock = lock_lockmgr,
82 .lc_unlock = unlock_lockmgr,
83};
84
67
68static void assert_lockmgr(struct lock_object *lock, int what);
69#ifdef DDB
70#include <ddb/ddb.h>
71static void db_show_lockmgr(struct lock_object *lock);
72#endif
73static void lock_lockmgr(struct lock_object *lock, int how);
74static int unlock_lockmgr(struct lock_object *lock);

--- 4 unchanged lines hidden (view full) ---

79 .lc_assert = assert_lockmgr,
80#ifdef DDB
81 .lc_ddb_show = db_show_lockmgr,
82#endif
83 .lc_lock = lock_lockmgr,
84 .lc_unlock = unlock_lockmgr,
85};
86
87#ifndef INVARIANTS
88#define _lockmgr_assert(lkp, what, file, line)
89#endif
90
85/*
86 * Locking primitives implementation.
87 * Locks provide shared/exclusive sychronization.
88 */
89
90void
91assert_lockmgr(struct lock_object *lock, int what)
92{

--- 107 unchanged lines hidden (view full) ---

200 int error;
201 int extflags, lockflags;
202 int contested = 0;
203 uint64_t waitstart = 0;
204
205 error = 0;
206 td = curthread;
207
91/*
92 * Locking primitives implementation.
93 * Locks provide shared/exclusive sychronization.
94 */
95
96void
97assert_lockmgr(struct lock_object *lock, int what)
98{

--- 107 unchanged lines hidden (view full) ---

206 int error;
207 int extflags, lockflags;
208 int contested = 0;
209 uint64_t waitstart = 0;
210
211 error = 0;
212 td = curthread;
213
214#ifdef INVARIANTS
215 if (lkp->lk_flags & LK_DESTROYED) {
216 if (flags & LK_INTERLOCK)
217 mtx_unlock(interlkp);
218 if (panicstr != NULL)
219 return (0);
220 panic("%s: %p lockmgr is destroyed", __func__, lkp);
221 }
222#endif
208 if ((flags & LK_INTERNAL) == 0)
209 mtx_lock(lkp->lk_interlock);
210 CTR6(KTR_LOCK,
211 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
212 "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
213 lkp->lk_exclusivecount, flags, td);
214#ifdef DEBUG_LOCKS
215 {

--- 59 unchanged lines hidden (view full) ---

275 }
276 /*
277 * We hold an exclusive lock, so downgrade it to shared.
278 * An alternative would be to fail with EDEADLK.
279 */
280 /* FALLTHROUGH downgrade */
281
282 case LK_DOWNGRADE:
223 if ((flags & LK_INTERNAL) == 0)
224 mtx_lock(lkp->lk_interlock);
225 CTR6(KTR_LOCK,
226 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
227 "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
228 lkp->lk_exclusivecount, flags, td);
229#ifdef DEBUG_LOCKS
230 {

--- 59 unchanged lines hidden (view full) ---

290 }
291 /*
292 * We hold an exclusive lock, so downgrade it to shared.
293 * An alternative would be to fail with EDEADLK.
294 */
295 /* FALLTHROUGH downgrade */
296
297 case LK_DOWNGRADE:
283 KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0,
284 ("lockmgr: not holding exclusive lock "
285 "(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
286 lkp->lk_lockholder, td, lkp->lk_exclusivecount));
298 _lockmgr_assert(lkp, KA_XLOCKED, file, line);
287 sharelock(td, lkp, lkp->lk_exclusivecount);
288 WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
289 COUNT(td, -lkp->lk_exclusivecount);
290 lkp->lk_exclusivecount = 0;
291 lkp->lk_flags &= ~LK_HAVE_EXCL;
292 lkp->lk_lockholder = LK_NOPROC;
293 if (lkp->lk_waitcount)
294 wakeup((void *)lkp);
295 break;
296
297 case LK_UPGRADE:
298 /*
299 * Upgrade a shared lock to an exclusive one. If another
300 * shared lock has already requested an upgrade to an
301 * exclusive lock, our shared lock is released and an
302 * exclusive lock is requested (which will be granted
303 * after the upgrade). If we return an error, the file
304 * will always be unlocked.
305 */
299 sharelock(td, lkp, lkp->lk_exclusivecount);
300 WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
301 COUNT(td, -lkp->lk_exclusivecount);
302 lkp->lk_exclusivecount = 0;
303 lkp->lk_flags &= ~LK_HAVE_EXCL;
304 lkp->lk_lockholder = LK_NOPROC;
305 if (lkp->lk_waitcount)
306 wakeup((void *)lkp);
307 break;
308
309 case LK_UPGRADE:
310 /*
311 * Upgrade a shared lock to an exclusive one. If another
312 * shared lock has already requested an upgrade to an
313 * exclusive lock, our shared lock is released and an
314 * exclusive lock is requested (which will be granted
315 * after the upgrade). If we return an error, the file
316 * will always be unlocked.
317 */
306 if (lkp->lk_lockholder == td)
307 panic("lockmgr: upgrade exclusive lock");
308 if (lkp->lk_sharecount <= 0)
309 panic("lockmgr: upgrade without shared");
318 _lockmgr_assert(lkp, KA_SLOCKED, file, line);
310 shareunlock(td, lkp, 1);
311 if (lkp->lk_sharecount == 0)
312 lock_profile_release_lock(&lkp->lk_object);
313 /*
314 * If we are just polling, check to see if we will block.
315 */
316 if ((extflags & LK_NOWAIT) &&
317 ((lkp->lk_flags & LK_WANT_UPGRADE) ||

--- 96 unchanged lines hidden (view full) ---

414 COUNT(td, 1);
415 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
416#if defined(DEBUG_LOCKS)
417 stack_save(&lkp->lk_stack);
418#endif
419 break;
420
421 case LK_RELEASE:
319 shareunlock(td, lkp, 1);
320 if (lkp->lk_sharecount == 0)
321 lock_profile_release_lock(&lkp->lk_object);
322 /*
323 * If we are just polling, check to see if we will block.
324 */
325 if ((extflags & LK_NOWAIT) &&
326 ((lkp->lk_flags & LK_WANT_UPGRADE) ||

--- 96 unchanged lines hidden (view full) ---

423 COUNT(td, 1);
424 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
425#if defined(DEBUG_LOCKS)
426 stack_save(&lkp->lk_stack);
427#endif
428 break;
429
430 case LK_RELEASE:
431 _lockmgr_assert(lkp, KA_LOCKED, file, line);
422 if (lkp->lk_exclusivecount != 0) {
432 if (lkp->lk_exclusivecount != 0) {
423 if (lkp->lk_lockholder != td &&
424 lkp->lk_lockholder != LK_KERNPROC) {
425 panic("lockmgr: thread %p, not %s %p unlocking",
426 td, "exclusive lock holder",
427 lkp->lk_lockholder);
428 }
429 if (lkp->lk_lockholder != LK_KERNPROC) {
430 WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
431 file, line);
432 COUNT(td, -1);
433 }
433 if (lkp->lk_lockholder != LK_KERNPROC) {
434 WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
435 file, line);
436 COUNT(td, -1);
437 }
434 if (lkp->lk_exclusivecount == 1) {
438 if (lkp->lk_exclusivecount-- == 1) {
435 lkp->lk_flags &= ~LK_HAVE_EXCL;
436 lkp->lk_lockholder = LK_NOPROC;
439 lkp->lk_flags &= ~LK_HAVE_EXCL;
440 lkp->lk_lockholder = LK_NOPROC;
437 lkp->lk_exclusivecount = 0;
438 lock_profile_release_lock(&lkp->lk_object);
441 lock_profile_release_lock(&lkp->lk_object);
439 } else {
440 lkp->lk_exclusivecount--;
441 }
442 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
443 WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
444 shareunlock(td, lkp, 1);
442 }
443 } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
444 WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
445 shareunlock(td, lkp, 1);
445 } else {
446 printf("lockmgr: thread %p unlocking unheld lock\n",
447 td);
448 kdb_backtrace();
449 }
450
451 if (lkp->lk_flags & LK_WAIT_NONZERO)
452 wakeup((void *)lkp);
453 break;
454
455 case LK_DRAIN:
456 /*

--- 100 unchanged lines hidden (view full) ---

557 */
558void
559lockdestroy(lkp)
560 struct lock *lkp;
561{
562
563 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
564 lkp, lkp->lk_wmesg);
446 }
447
448 if (lkp->lk_flags & LK_WAIT_NONZERO)
449 wakeup((void *)lkp);
450 break;
451
452 case LK_DRAIN:
453 /*

--- 100 unchanged lines hidden (view full) ---

554 */
555void
556lockdestroy(lkp)
557 struct lock *lkp;
558{
559
560 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
561 lkp, lkp->lk_wmesg);
562 KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0,
563 ("lockmgr still held"));
564 KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed"));
565 lkp->lk_flags = LK_DESTROYED;
565 lock_destroy(&lkp->lk_object);
566}
567
568/*
569 * Disown the lockmgr.
570 */
571void
572_lockmgr_disown(struct lock *lkp, const char *file, int line)
573{
574 struct thread *td;
575
576 td = curthread;
566 lock_destroy(&lkp->lk_object);
567}
568
569/*
570 * Disown the lockmgr.
571 */
572void
573_lockmgr_disown(struct lock *lkp, const char *file, int line)
574{
575 struct thread *td;
576
577 td = curthread;
577 KASSERT(panicstr != NULL || lkp->lk_exclusivecount,
578 ("%s: %p lockmgr must be exclusively locked", __func__, lkp));
579 KASSERT(panicstr != NULL || lkp->lk_lockholder == td ||
580 lkp->lk_lockholder == LK_KERNPROC,
581 ("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp,
582 td));
578 KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0,
579 ("%s: %p lockmgr is destroyed", __func__, lkp));
580 _lockmgr_assert(lkp, KA_XLOCKED | KA_NOTRECURSED, file, line);
583
584 /*
585 * Drop the lock reference and switch the owner. This will result
586 * in an atomic operation like td_lock is only accessed by curthread
587 * and lk_lockholder only needs one write. Note also that the lock
588 * owner can be alredy KERNPROC, so in that case just skip the
589 * decrement.
590 */

--- 12 unchanged lines hidden (view full) ---

603 struct lock *lkp;
604 struct thread *td;
605{
606 int lock_type = 0;
607 int interlocked;
608
609 KASSERT(td == curthread,
610 ("%s: thread passed argument (%p) is not valid", __func__, td));
581
582 /*
583 * Drop the lock reference and switch the owner. This will result
584 * in an atomic operation like td_lock is only accessed by curthread
585 * and lk_lockholder only needs one write. Note also that the lock
586 * owner can be alredy KERNPROC, so in that case just skip the
587 * decrement.
588 */

--- 12 unchanged lines hidden (view full) ---

601 struct lock *lkp;
602 struct thread *td;
603{
604 int lock_type = 0;
605 int interlocked;
606
607 KASSERT(td == curthread,
608 ("%s: thread passed argument (%p) is not valid", __func__, td));
609 KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
610 ("%s: %p lockmgr is destroyed", __func__, lkp));
611
612 if (!kdb_active) {
613 interlocked = 1;
614 mtx_lock(lkp->lk_interlock);
615 } else
616 interlocked = 0;
617 if (lkp->lk_exclusivecount != 0) {
618 if (lkp->lk_lockholder == td)

--- 11 unchanged lines hidden (view full) ---

630 * Determine the number of waiters on a lock.
631 */
632int
633lockwaiters(lkp)
634 struct lock *lkp;
635{
636 int count;
637
611
612 if (!kdb_active) {
613 interlocked = 1;
614 mtx_lock(lkp->lk_interlock);
615 } else
616 interlocked = 0;
617 if (lkp->lk_exclusivecount != 0) {
618 if (lkp->lk_lockholder == td)

--- 11 unchanged lines hidden (view full) ---

630 * Determine the number of waiters on a lock.
631 */
632int
633lockwaiters(lkp)
634 struct lock *lkp;
635{
636 int count;
637
638 KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
639 ("%s: %p lockmgr is destroyed", __func__, lkp));
638 mtx_lock(lkp->lk_interlock);
639 count = lkp->lk_waitcount;
640 mtx_unlock(lkp->lk_interlock);
641 return (count);
642}
643
644/*
645 * Print out information about state of a lock. Used by VOP_PRINT

--- 13 unchanged lines hidden (view full) ---

659 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
660 if (lkp->lk_waitcount > 0)
661 printf(" with %d pending", lkp->lk_waitcount);
662#ifdef DEBUG_LOCKS
663 stack_print_ddb(&lkp->lk_stack);
664#endif
665}
666
640 mtx_lock(lkp->lk_interlock);
641 count = lkp->lk_waitcount;
642 mtx_unlock(lkp->lk_interlock);
643 return (count);
644}
645
646/*
647 * Print out information about state of a lock. Used by VOP_PRINT

--- 13 unchanged lines hidden (view full) ---

661 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
662 if (lkp->lk_waitcount > 0)
663 printf(" with %d pending", lkp->lk_waitcount);
664#ifdef DEBUG_LOCKS
665 stack_print_ddb(&lkp->lk_stack);
666#endif
667}
668
669#ifdef INVARIANT_SUPPORT
670#ifndef INVARIANTS
671#undef _lockmgr_assert
672#endif
673
674void
675_lockmgr_assert(struct lock *lkp, int what, const char *file, int line)
676{
677 struct thread *td;
678 u_int x;
679 int slocked = 0;
680
681 x = lkp->lk_flags;
682 td = lkp->lk_lockholder;
683 if (panicstr != NULL)
684 return;
685 switch (what) {
686 case KA_SLOCKED:
687 case KA_SLOCKED | KA_NOTRECURSED:
688 case KA_SLOCKED | KA_RECURSED:
689 slocked = 1;
690 case KA_LOCKED:
691 case KA_LOCKED | KA_NOTRECURSED:
692 case KA_LOCKED | KA_RECURSED:
693#ifdef WITNESS
694 /*
695 * We cannot trust WITNESS if the lock is held in
696 * exclusive mode and a call to lockmgr_disown() happened.
697 * Workaround this skipping the check if the lock is
698 * held in exclusive mode even for the KA_LOCKED case.
699 */
700 if (slocked || (x & LK_HAVE_EXCL) == 0) {
701 witness_assert(&lkp->lk_object, what, file, line);
702 break;
703 }
704#endif
705 if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 &&
706 (slocked || LOCKMGR_NOTOWNER(td))))
707 panic("Lock %s not %slocked @ %s:%d\n",
708 lkp->lk_object.lo_name, slocked ? "share " : "",
709 file, line);
710 if ((x & LK_SHARE_NONZERO) == 0) {
711 if (lockmgr_recursed(lkp)) {
712 if (what & KA_NOTRECURSED)
713 panic("Lock %s recursed @ %s:%d\n",
714 lkp->lk_object.lo_name, file, line);
715 } else if (what & KA_RECURSED)
716 panic("Lock %s not recursed @ %s:%d\n",
717 lkp->lk_object.lo_name, file, line);
718 }
719 break;
720 case KA_XLOCKED:
721 case KA_XLOCKED | KA_NOTRECURSED:
722 case KA_XLOCKED | KA_RECURSED:
723 if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td))
724 panic("Lock %s not exclusively locked @ %s:%d\n",
725 lkp->lk_object.lo_name, file, line);
726 if (lockmgr_recursed(lkp)) {
727 if (what & KA_NOTRECURSED)
728 panic("Lock %s recursed @ %s:%d\n",
729 lkp->lk_object.lo_name, file, line);
730 } else if (what & KA_RECURSED)
731 panic("Lock %s not recursed @ %s:%d\n",
732 lkp->lk_object.lo_name, file, line);
733 break;
734 case KA_UNLOCKED:
735 if (td == curthread || td == LK_KERNPROC)
736 panic("Lock %s exclusively locked @ %s:%d\n",
737 lkp->lk_object.lo_name, file, line);
738 break;
739 case KA_HELD:
740 case KA_UNHELD:
741 if (LOCKMGR_UNHELD(x)) {
742 if (what & KA_HELD)
743 panic("Lock %s not locked by anyone @ %s:%d\n",
744 lkp->lk_object.lo_name, file, line);
745 } else if (what & KA_UNHELD)
746 panic("Lock %s locked by someone @ %s:%d\n",
747 lkp->lk_object.lo_name, file, line);
748 break;
749 default:
750 panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what,
751 file, line);
752 }
753}
754#endif /* INVARIANT_SUPPORT */
755
667#ifdef DDB
668/*
669 * Check to see if a thread that is blocked on a sleep queue is actually
670 * blocked on a 'struct lock'. If so, output some details and return true.
671 * If the lock has an exclusive owner, return that in *ownerp.
672 */
673int
674lockmgr_chain(struct thread *td, struct thread **ownerp)

--- 59 unchanged lines hidden ---
756#ifdef DDB
757/*
758 * Check to see if a thread that is blocked on a sleep queue is actually
759 * blocked on a 'struct lock'. If so, output some details and return true.
760 * If the lock has an exclusive owner, return that in *ownerp.
761 */
762int
763lockmgr_chain(struct thread *td, struct thread **ownerp)

--- 59 unchanged lines hidden ---