Deleted Added
full compact
30c30
< * $FreeBSD: head/sys/kern/subr_witness.c 71709 2001-01-27 07:51:34Z jhb $
---
> * $FreeBSD: head/sys/kern/subr_witness.c 72200 2001-02-09 06:11:45Z bmilekic $
33a34,38
> * Machine independent bits of mutex implementation and implementation of
> * `witness' structure & related debugging routines.
> */
>
> /*
56,61d60
< /*
< * Cause non-inlined mtx_*() to be compiled.
< * Must be defined early because other system headers may include mutex.h.
< */
< #define _KERN_MUTEX_C_
<
85c84
< * Machine independent bits of the mutex implementation
---
> * The WITNESS-enabled mutex debug structure.
87d85
<
103,104c101
< * Assembly macros
< *------------------------------------------------------------------------------
---
> * Internal utility macros.
105a103
> #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
107c105,106
< #define _V(x) __STRING(x)
---
> #define mtx_owner(m) (mtx_unowned((m)) ? NULL \
> : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
109,111c108,109
< /*
< * Default, unoptimized mutex micro-operations
< */
---
> #define RETIP(x) *(((uintptr_t *)(&x)) - 1)
> #define SET_PRIO(p, pri) (p)->p_priority = (pri)
113,157d110
< #ifndef _obtain_lock
< /* Actually obtain mtx_lock */
< #define _obtain_lock(mp, tid) \
< atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
< #endif
<
< #ifndef _release_lock
< /* Actually release mtx_lock */
< #define _release_lock(mp, tid) \
< atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
< #endif
<
< #ifndef _release_lock_quick
< /* Actually release mtx_lock quickly assuming that we own it */
< #define _release_lock_quick(mp) \
< atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
< #endif
<
< #ifndef _getlock_sleep
< /* Get a sleep lock, deal with recursion inline. */
< #define _getlock_sleep(mp, tid, type) do { \
< if (!_obtain_lock(mp, tid)) { \
< if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
< mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
< else { \
< atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \
< (mp)->mtx_recurse++; \
< } \
< } \
< } while (0)
< #endif
<
< #ifndef _getlock_spin_block
< /* Get a spin lock, handle recursion inline (as the less common case) */
< #define _getlock_spin_block(mp, tid, type) do { \
< u_int _mtx_intr = save_intr(); \
< disable_intr(); \
< if (!_obtain_lock(mp, tid)) \
< mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
< else \
< (mp)->mtx_saveintr = _mtx_intr; \
< } while (0)
< #endif
<
< #ifndef _getlock_norecurse
159,160c112
< * Get a lock without any recursion handling. Calls the hard enter function if
< * we can't get it inline.
---
> * Early WITNESS-enabled declarations.
162,166c114
< #define _getlock_norecurse(mp, tid, type) do { \
< if (!_obtain_lock(mp, tid)) \
< mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
< } while (0)
< #endif
---
> #ifdef WITNESS
168d115
< #ifndef _exitlock_norecurse
170,177c117,124
< * Release a sleep lock assuming we haven't recursed on it, recursion is handled
< * in the hard function.
< */
< #define _exitlock_norecurse(mp, tid, type) do { \
< if (!_release_lock(mp, tid)) \
< mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
< } while (0)
< #endif
---
> * Internal WITNESS routines which must be prototyped early.
> *
> * XXX: When/if witness code is cleaned up, it would be wise to place all
> * witness prototyping early in this file.
> */
> static void witness_init(struct mtx *, int flag);
> static void witness_destroy(struct mtx *);
> static void witness_display(void(*)(const char *fmt, ...));
179,195c126
< #ifndef _exitlock
< /*
< * Release a sleep lock when its likely we recursed (the code to
< * deal with simple recursion is inline).
< */
< #define _exitlock(mp, tid, type) do { \
< if (!_release_lock(mp, tid)) { \
< if ((mp)->mtx_lock & MTX_RECURSED) { \
< if (--((mp)->mtx_recurse) == 0) \
< atomic_clear_ptr(&(mp)->mtx_lock, \
< MTX_RECURSED); \
< } else { \
< mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
< } \
< } \
< } while (0)
< #endif
---
> MALLOC_DEFINE(M_WITNESS, "witness", "witness mtx_debug structure");
197,215d127
< #ifndef _exitlock_spin
< /* Release a spin lock (with possible recursion). */
< #define _exitlock_spin(mp) do { \
< if (!mtx_recursed((mp))) { \
< int _mtx_intr = (mp)->mtx_saveintr; \
< \
< _release_lock_quick(mp); \
< restore_intr(_mtx_intr); \
< } else { \
< (mp)->mtx_recurse--; \
< } \
< } while (0)
< #endif
<
< #ifdef WITNESS
< static void witness_init(struct mtx *, int flag);
< static void witness_destroy(struct mtx *);
< static void witness_display(void(*)(const char *fmt, ...));
<
217a130
>
219,220c132
< * Set to 0 once mutexes have been fully initialized so that witness code can be
< * safely executed.
---
> * This global is set to 0 once it becomes safe to use the witness code.
222a135
>
225,227c138,139
< /*
< * flag++ is slezoid way of shutting up unused parameter warning
< * in mtx_init()
---
> /* XXX XXX XXX
> * flag++ is sleazoid way of shuting up warning
234c146,148
< /* All mutexes in system (used for debug/panic) */
---
> /*
> * All mutex locks in system are kept on the all_mtx list.
> */
244a159,161
> /*
> * Global variables for book keeping.
> */
247a165,169
> /*
> * Prototypes for non-exported routines.
> *
> * NOTE: Prototypes for witness routines are placed at the bottom of the file.
> */
249,250d170
< static void mtx_enter_hard(struct mtx *, int type, int saveintr);
< static void mtx_exit_hard(struct mtx *, int type);
252,258d171
< #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
< #define mtx_owner(m) (mtx_unowned(m) ? NULL \
< : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
<
< #define RETIP(x) *(((uintptr_t *)(&x)) - 1)
< #define SET_PRIO(p, pri) (p)->p_priority = (pri)
<
279a193
>
317c231
< printf("XXX: moving process %d(%s) to a new run queue\n",
---
> printf("XXX: moving proc %d(%s) to a new run queue\n",
340a255
>
348a264
>
352c268
< "XXX: previous process %d(%s) has higher priority\n",
---
> "XXX: previous process %d(%s) has higher priority\n",
369a286
>
379,382c296,298
< * Get lock 'm', the macro handles the easy (and most common cases) and leaves
< * the slow stuff to the mtx_enter_hard() function.
< *
< * Note: since type is usually a constant much of this code is optimized out.
---
> * The important part of mtx_trylock{,_flags}()
> * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
> * if we're called, it's because we know we don't already own this lock.
384,385c300,301
< void
< _mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
---
> int
> _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
387c303
< struct mtx *mpp = mtxp;
---
> int rval;
389,391c305
< /* bits only valid on mtx_exit() */
< MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
< STR_mtx_bad_type, file, line);
---
> KASSERT(CURPROC != NULL, ("curproc is NULL in _mtx_trylock"));
393c307,315
< if ((type) & MTX_SPIN) {
---
> /*
> * _mtx_trylock does not accept MTX_NOSWITCH option.
> */
> MPASS((opts & MTX_NOSWITCH) == 0);
>
> rval = _obtain_lock(m, CURTHD);
>
> #ifdef WITNESS
> if (rval && m->mtx_witness != NULL) {
395,400c317,318
< * Easy cases of spin locks:
< *
< * 1) We already own the lock and will simply recurse on it (if
< * RLIKELY)
< *
< * 2) The lock is free, we just get it
---
> * We do not handle recursion in _mtx_trylock; see the
> * note at the top of the routine.
402,434c320,321
< if ((type) & MTX_RLIKELY) {
< /*
< * Check for recursion, if we already have this
< * lock we just bump the recursion count.
< */
< if (mpp->mtx_lock == (uintptr_t)CURTHD) {
< mpp->mtx_recurse++;
< goto done;
< }
< }
<
< if (((type) & MTX_TOPHALF) == 0) {
< /*
< * If an interrupt thread uses this we must block
< * interrupts here.
< */
< if ((type) & MTX_FIRST) {
< ASS_IEN;
< disable_intr();
< _getlock_norecurse(mpp, CURTHD,
< (type) & MTX_HARDOPTS);
< } else {
< _getlock_spin_block(mpp, CURTHD,
< (type) & MTX_HARDOPTS);
< }
< } else
< _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
< } else {
< /* Sleep locks */
< if ((type) & MTX_RLIKELY)
< _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
< else
< _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
---
> MPASS(!mtx_recursed(m));
> witness_try_enter(m, (opts | m->mtx_flags), file, line);
436,440c323
< done:
< WITNESS_ENTER(mpp, type, file, line);
< if (((type) & MTX_QUIET) == 0)
< CTR5(KTR_LOCK, STR_mtx_enter_fmt,
< mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
---
> #endif /* WITNESS */
441a325,329
> if ((opts & MTX_QUIET) == 0)
> CTR5(KTR_LOCK, "TRY_ENTER %s [%p] result=%d at %s:%d",
> m->mtx_description, m, rval, file, line);
>
> return rval;
445c333
< * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
---
> * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
447c335,336
< * XXX DOES NOT HANDLE RECURSION
---
> * We call this if the lock is either contested (i.e. we need to go to
> * sleep waiting for it), or if we need to recurse on it.
449,450c338,339
< int
< _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
---
> void
> _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
452,453c341
< struct mtx *const mpp = mtxp;
< int rval;
---
> struct proc *p = CURPROC;
455,459c343,348
< rval = _obtain_lock(mpp, CURTHD);
< #ifdef WITNESS
< if (rval && mpp->mtx_witness != NULL) {
< MPASS(mpp->mtx_recurse == 0);
< witness_try_enter(mpp, type, file, line);
---
> if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
> m->mtx_recurse++;
> atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
> if ((opts & MTX_QUIET) == 0)
> CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recurse", m);
> return;
461,464d349
< #endif /* WITNESS */
< if (((type) & MTX_QUIET) == 0)
< CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
< mpp->mtx_description, mpp, rval, file, line);
466,467c351,353
< return rval;
< }
---
> if ((opts & MTX_QUIET) == 0)
> CTR3(KTR_LOCK, "mtx_lock: %p contested (lock=%p) [%p]", m,
> (void *)m->mtx_lock, (void *)RETIP(m));
469,475c355,364
< /*
< * Release lock m.
< */
< void
< _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
< {
< struct mtx *const mpp = mtxp;
---
> /*
> * Save our priority. Even though p_nativepri is protected by
> * sched_lock, we don't obtain it here as it can be expensive.
> * Since this is the only place p_nativepri is set, and since two
> * CPUs will not be executing the same process concurrently, we know
> * that no other CPU is going to be messing with this. Also,
> * p_nativepri is only read when we are blocked on a mutex, so that
> * can't be happening right now either.
> */
> p->p_nativepri = p->p_priority;
477,484c366,368
< MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line);
< WITNESS_EXIT(mpp, type, file, line);
< if (((type) & MTX_QUIET) == 0)
< CTR5(KTR_LOCK, STR_mtx_exit_fmt,
< mpp->mtx_description, mpp, mpp->mtx_recurse, file, line);
< if ((type) & MTX_SPIN) {
< if ((type) & MTX_NORECURSE) {
< int mtx_intr = mpp->mtx_saveintr;
---
> while (!_obtain_lock(m, p)) {
> uintptr_t v;
> struct proc *p1;
486,502c370,377
< MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse,
< file, line);
< _release_lock_quick(mpp);
< if (((type) & MTX_TOPHALF) == 0) {
< if ((type) & MTX_FIRST) {
< ASS_IDIS;
< enable_intr();
< } else
< restore_intr(mtx_intr);
< }
< } else {
< if (((type & MTX_TOPHALF) == 0) &&
< (type & MTX_FIRST)) {
< ASS_IDIS;
< ASS_SIEN(mpp);
< }
< _exitlock_spin(mpp);
---
> mtx_lock_spin(&sched_lock);
> /*
> * Check if the lock has been released while spinning for
> * the sched_lock.
> */
> if ((v = m->mtx_lock) == MTX_UNOWNED) {
> mtx_unlock_spin(&sched_lock);
> continue;
504,513d378
< } else {
< /* Handle sleep locks */
< if ((type) & MTX_RLIKELY)
< _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
< else {
< _exitlock_norecurse(mpp, CURTHD,
< (type) & MTX_HARDOPTS);
< }
< }
< }
515,518c380,388
< void
< mtx_enter_hard(struct mtx *m, int type, int saveintr)
< {
< struct proc *p = CURPROC;
---
> /*
> * The mutex was marked contested on release. This means that
> * there are processes blocked on it.
> */
> if (v == MTX_CONTESTED) {
> p1 = TAILQ_FIRST(&m->mtx_blocked);
> KASSERT(p1 != NULL,
> ("contested mutex has no contesters"));
> m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
520,528c390,392
< KASSERT(p != NULL, ("curproc is NULL in mutex"));
<
< switch (type) {
< case MTX_DEF:
< if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
< m->mtx_recurse++;
< atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
< if ((type & MTX_QUIET) == 0)
< CTR1(KTR_LOCK, "mtx_enter: %p recurse", m);
---
> if (p1->p_priority < p->p_priority)
> SET_PRIO(p, p1->p_priority);
> mtx_unlock_spin(&sched_lock);
531,534d394
< if ((type & MTX_QUIET) == 0)
< CTR3(KTR_LOCK,
< "mtx_enter: %p contested (lock=%p) [%p]",
< m, (void *)m->mtx_lock, (void *)RETIP(m));
537,544c397,399
< * Save our priority. Even though p_nativepri is protected
< * by sched_lock, we don't obtain it here as it can be
< * expensive. Since this is the only place p_nativepri is
< * set, and since two CPUs will not be executing the same
< * process concurrently, we know that no other CPU is going
< * to be messing with this. Also, p_nativepri is only read
< * when we are blocked on a mutex, so that can't be happening
< * right now either.
---
> * If the mutex isn't already contested and a failure occurs
> * setting the contested bit, the mutex was either released
> * or the state of the MTX_RECURSED bit changed.
546,549c401,406
< p->p_nativepri = p->p_priority;
< while (!_obtain_lock(m, p)) {
< uintptr_t v;
< struct proc *p1;
---
> if ((v & MTX_CONTESTED) == 0 &&
> !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
> (void *)(v | MTX_CONTESTED))) {
> mtx_unlock_spin(&sched_lock);
> continue;
> }
551,586c408,411
< mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
< /*
< * check if the lock has been released while
< * waiting for the schedlock.
< */
< if ((v = m->mtx_lock) == MTX_UNOWNED) {
< mtx_exit(&sched_lock, MTX_SPIN);
< continue;
< }
< /*
< * The mutex was marked contested on release. This
< * means that there are processes blocked on it.
< */
< if (v == MTX_CONTESTED) {
< p1 = TAILQ_FIRST(&m->mtx_blocked);
< KASSERT(p1 != NULL, ("contested mutex has no contesters"));
< KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
< m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
< if (p1->p_priority < p->p_priority) {
< SET_PRIO(p, p1->p_priority);
< }
< mtx_exit(&sched_lock, MTX_SPIN);
< return;
< }
< /*
< * If the mutex isn't already contested and
< * a failure occurs setting the contested bit the
< * mutex was either release or the
< * state of the RECURSION bit changed.
< */
< if ((v & MTX_CONTESTED) == 0 &&
< !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
< (void *)(v | MTX_CONTESTED))) {
< mtx_exit(&sched_lock, MTX_SPIN);
< continue;
< }
---
> /*
> * We deffinately must sleep for this lock.
> */
> mtx_assert(m, MA_NOTOWNED);
588,590d412
< /* We definitely have to sleep for this lock */
< mtx_assert(m, MA_NOTOWNED);
<
592,597c414,419
< /*
< * If we're borrowing an interrupted thread's VM
< * context must clean up before going to sleep.
< */
< if (p->p_flag & (P_ITHD | P_SITHD)) {
< ithd_t *it = (ithd_t *)p;
---
> /*
> * If we're borrowing an interrupted thread's VM context, we
> * must clean up before going to sleep.
> */
> if (p->p_flag & (P_ITHD | P_SITHD)) {
> ithd_t *it = (ithd_t *)p;
599,605c421,426
< if (it->it_interrupted) {
< if ((type & MTX_QUIET) == 0)
< CTR2(KTR_LOCK,
< "mtx_enter: 0x%x interrupted 0x%x",
< it, it->it_interrupted);
< intr_thd_fixup(it);
< }
---
> if (it->it_interrupted) {
> if ((opts & MTX_QUIET) == 0)
> CTR2(KTR_LOCK,
> "mtx_lock: 0x%x interrupted 0x%x",
> it, it->it_interrupted);
> intr_thd_fixup(it);
606a428
> }
609,614c431,444
< /* Put us on the list of procs blocked on this mutex */
< if (TAILQ_EMPTY(&m->mtx_blocked)) {
< p1 = (struct proc *)(m->mtx_lock &
< MTX_FLAGMASK);
< LIST_INSERT_HEAD(&p1->p_contested, m,
< mtx_contested);
---
> /*
> * Put us on the list of threads blocked on this mutex.
> */
> if (TAILQ_EMPTY(&m->mtx_blocked)) {
> p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
> LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
> TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
> } else {
> TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
> if (p1->p_priority > p->p_priority)
> break;
> if (p1)
> TAILQ_INSERT_BEFORE(p1, p, p_procq);
> else
616,625c446
< } else {
< TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
< if (p1->p_priority > p->p_priority)
< break;
< if (p1)
< TAILQ_INSERT_BEFORE(p1, p, p_procq);
< else
< TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
< p_procq);
< }
---
> }
627,629c448,453
< p->p_blocked = m; /* Who we're blocked on */
< p->p_mtxname = m->mtx_description;
< p->p_stat = SMTX;
---
> /*
> * Save who we're blocked on.
> */
> p->p_blocked = m;
> p->p_mtxname = m->mtx_description;
> p->p_stat = SMTX;
631c455
< propagate_priority(p);
---
> propagate_priority(p);
633,649d456
< if ((type & MTX_QUIET) == 0)
< CTR3(KTR_LOCK,
< "mtx_enter: p %p blocked on [%p] %s",
< p, m, m->mtx_description);
< mi_switch();
< if ((type & MTX_QUIET) == 0)
< CTR3(KTR_LOCK,
< "mtx_enter: p %p free from blocked on [%p] %s",
< p, m, m->mtx_description);
< mtx_exit(&sched_lock, MTX_SPIN);
< }
< return;
< case MTX_SPIN:
< case MTX_SPIN | MTX_FIRST:
< case MTX_SPIN | MTX_TOPHALF:
< {
< int i = 0;
651,664c458,499
< if (m->mtx_lock == (uintptr_t)p) {
< m->mtx_recurse++;
< return;
< }
< if ((type & MTX_QUIET) == 0)
< CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
< for (;;) {
< if (_obtain_lock(m, p))
< break;
< while (m->mtx_lock != MTX_UNOWNED) {
< if (i++ < 1000000)
< continue;
< if (i++ < 6000000)
< DELAY (1);
---
> if ((opts & MTX_QUIET) == 0)
> CTR3(KTR_LOCK,
> "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
> m->mtx_description);
>
> mi_switch();
>
> if ((opts & MTX_QUIET) == 0)
> CTR3(KTR_LOCK,
> "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
> p, m, m->mtx_description);
>
> mtx_unlock_spin(&sched_lock);
> }
>
> return;
> }
>
> /*
> * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
> *
> * This is only called if we need to actually spin for the lock. Recursion
> * is handled inline.
> */
> void
> _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file,
> int line)
> {
> int i = 0;
>
> if ((opts & MTX_QUIET) == 0)
> CTR1(KTR_LOCK, "mtx_lock_spin: %p spinning", m);
>
> for (;;) {
> if (_obtain_lock(m, CURPROC))
> break;
>
> while (m->mtx_lock != MTX_UNOWNED) {
> if (i++ < 1000000)
> continue;
> if (i++ < 6000000)
> DELAY(1);
666c501
< else if (!db_active)
---
> else if (!db_active)
668c503
< else
---
> else
670,674c505,506
< panic(
< "spin lock %s held by %p for > 5 seconds",
< m->mtx_description,
< (void *)m->mtx_lock);
< }
---
> panic("spin lock %s held by %p for > 5 seconds",
> m->mtx_description, (void *)m->mtx_lock);
676,686d507
<
< #ifdef MUTEX_DEBUG
< if (type != MTX_SPIN)
< m->mtx_saveintr = 0xbeefface;
< else
< #endif
< m->mtx_saveintr = saveintr;
< if ((type & MTX_QUIET) == 0)
< CTR1(KTR_LOCK, "mtx_enter: %p spin done", m);
< return;
< }
687a509,514
>
> m->mtx_saveintr = mtx_intr;
> if ((opts & MTX_QUIET) == 0)
> CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
>
> return;
689a517,522
> /*
> * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
> *
> * We are only called here if the lock is recursed or contested (i.e. we
> * need to wake up a blocked thread).
> */
691c524
< mtx_exit_hard(struct mtx *m, int type)
---
> _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
698,739c531,583
< switch (type) {
< case MTX_DEF:
< case MTX_DEF | MTX_NOSWITCH:
< if (mtx_recursed(m)) {
< if (--(m->mtx_recurse) == 0)
< atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
< if ((type & MTX_QUIET) == 0)
< CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m);
< return;
< }
< mtx_enter(&sched_lock, MTX_SPIN);
< if ((type & MTX_QUIET) == 0)
< CTR1(KTR_LOCK, "mtx_exit: %p contested", m);
< p1 = TAILQ_FIRST(&m->mtx_blocked);
< MPASS(p->p_magic == P_MAGIC);
< MPASS(p1->p_magic == P_MAGIC);
< TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
< if (TAILQ_EMPTY(&m->mtx_blocked)) {
< LIST_REMOVE(m, mtx_contested);
< _release_lock_quick(m);
< if ((type & MTX_QUIET) == 0)
< CTR1(KTR_LOCK, "mtx_exit: %p not held", m);
< } else
< atomic_store_rel_ptr(&m->mtx_lock,
< (void *)MTX_CONTESTED);
< pri = MAXPRI;
< LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
< int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
< if (cp < pri)
< pri = cp;
< }
< if (pri > p->p_nativepri)
< pri = p->p_nativepri;
< SET_PRIO(p, pri);
< if ((type & MTX_QUIET) == 0)
< CTR2(KTR_LOCK,
< "mtx_exit: %p contested setrunqueue %p", m, p1);
< p1->p_blocked = NULL;
< p1->p_mtxname = NULL;
< p1->p_stat = SRUN;
< setrunqueue(p1);
< if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
---
> MPASS4(mtx_owned(m), "mtx_owned(mpp)", file, line);
>
> if ((opts & MTX_QUIET) == 0)
> CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", m->mtx_description,
> m, m->mtx_recurse, file, line);
>
> if (mtx_recursed(m)) {
> if (--(m->mtx_recurse) == 0)
> atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
> if ((opts & MTX_QUIET) == 0)
> CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
> return;
> }
>
> mtx_lock_spin(&sched_lock);
> if ((opts & MTX_QUIET) == 0)
> CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
>
> p1 = TAILQ_FIRST(&m->mtx_blocked);
> MPASS(p->p_magic == P_MAGIC);
> MPASS(p1->p_magic == P_MAGIC);
>
> TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
>
> if (TAILQ_EMPTY(&m->mtx_blocked)) {
> LIST_REMOVE(m, mtx_contested);
> _release_lock_quick(m);
> if ((opts & MTX_QUIET) == 0)
> CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
> } else
> atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
>
> pri = MAXPRI;
> LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
> int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
> if (cp < pri)
> pri = cp;
> }
>
> if (pri > p->p_nativepri)
> pri = p->p_nativepri;
> SET_PRIO(p, pri);
>
> if ((opts & MTX_QUIET) == 0)
> CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
> m, p1);
>
> p1->p_blocked = NULL;
> p1->p_mtxname = NULL;
> p1->p_stat = SRUN;
> setrunqueue(p1);
>
> if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
741,742c585,586
< if (p->p_flag & (P_ITHD | P_SITHD)) {
< ithd_t *it = (ithd_t *)p;
---
> if (p->p_flag & (P_ITHD | P_SITHD)) {
> ithd_t *it = (ithd_t *)p;
744,750c588,593
< if (it->it_interrupted) {
< if ((type & MTX_QUIET) == 0)
< CTR2(KTR_LOCK,
< "mtx_exit: 0x%x interruped 0x%x",
< it, it->it_interrupted);
< intr_thd_fixup(it);
< }
---
> if (it->it_interrupted) {
> if ((opts & MTX_QUIET) == 0)
> CTR2(KTR_LOCK,
> "_mtx_unlock_sleep: 0x%x interrupted 0x%x",
> it, it->it_interrupted);
> intr_thd_fixup(it);
752,762d594
< #endif
< setrunqueue(p);
< if ((type & MTX_QUIET) == 0)
< CTR2(KTR_LOCK,
< "mtx_exit: %p switching out lock=%p",
< m, (void *)m->mtx_lock);
< mi_switch();
< if ((type & MTX_QUIET) == 0)
< CTR2(KTR_LOCK,
< "mtx_exit: %p resuming lock=%p",
< m, (void *)m->mtx_lock);
764,790c596,606
< mtx_exit(&sched_lock, MTX_SPIN);
< break;
< case MTX_SPIN:
< case MTX_SPIN | MTX_FIRST:
< if (mtx_recursed(m)) {
< m->mtx_recurse--;
< return;
< }
< MPASS(mtx_owned(m));
< _release_lock_quick(m);
< if (type & MTX_FIRST)
< enable_intr(); /* XXX is this kosher? */
< else {
< MPASS(m->mtx_saveintr != 0xbeefface);
< restore_intr(m->mtx_saveintr);
< }
< break;
< case MTX_SPIN | MTX_TOPHALF:
< if (mtx_recursed(m)) {
< m->mtx_recurse--;
< return;
< }
< MPASS(mtx_owned(m));
< _release_lock_quick(m);
< break;
< default:
< panic("mtx_exit_hard: unsupported type 0x%x\n", type);
---
> #endif
> setrunqueue(p);
> if ((opts & MTX_QUIET) == 0)
> CTR2(KTR_LOCK,
> "_mtx_unlock_sleep: %p switching out lock=%p", m,
> (void *)m->mtx_lock);
>
> mi_switch();
> if ((opts & MTX_QUIET) == 0)
> CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
> m, (void *)m->mtx_lock);
791a608,611
>
> mtx_unlock_spin(&sched_lock);
>
> return;
793a614,621
> /*
> * All the unlocking of MTX_SPIN locks is done inline.
> * See the _rel_spin_lock() macro for the details.
> */
>
> /*
> * The INVARIANTS-enabled mtx_assert()
> */
824a653,655
> /*
> * The MUTEX_DEBUG-enabled mtx_validate()
> */
846c677
< mtx_enter(&all_mtx, MTX_DEF);
---
> mtx_lock(&all_mtx);
890c721
< mtx_exit(&all_mtx, MTX_DEF);
---
> mtx_unlock(&all_mtx);
894a726,730
> /*
> * Mutex initialization routine; initialize lock `m' of type contained in
> * `opts' with options contained in `opts' and description `description.'
> * Place on "all_mtx" queue.
> */
896c732
< mtx_init(struct mtx *m, const char *t, int flag)
---
> mtx_init(struct mtx *m, const char *description, int opts)
898,899c734,737
< if ((flag & MTX_QUIET) == 0)
< CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t);
---
>
> if ((opts & MTX_QUIET) == 0)
> CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description);
>
901c739,740
< if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
---
> /* Diagnostic and error correction */
> if (mtx_validate(m, MV_INIT))
906a746
>
909d748
< /* XXX - should not use DEVBUF */
911c750
< M_DEVBUF, M_NOWAIT | M_ZERO);
---
> M_WITNESS, M_NOWAIT | M_ZERO);
915d753
< m->mtx_description = t;
917c755,756
< m->mtx_flags = flag;
---
> m->mtx_description = description;
> m->mtx_flags = opts;
918a758
>
920c760
< mtx_enter(&all_mtx, MTX_DEF);
---
> mtx_lock(&all_mtx);
927c767,768
< mtx_exit(&all_mtx, MTX_DEF);
---
> mtx_unlock(&all_mtx);
>
930c771
< witness_init(m, flag);
---
> witness_init(m, opts);
933a775,777
> /*
> * Remove lock `m' from all_mtx queue.
> */
941a786
>
942a788
>
953c799,801
< mtx_validate(m, MV_DESTROY); /* diagnostic */
---
>
> /* diagnostic */
> mtx_validate(m, MV_DESTROY);
962c810
< mtx_enter(&all_mtx, MTX_DEF);
---
> mtx_lock(&all_mtx);
964a813
>
967a817
>
969c819
< free(m->mtx_debug, M_DEVBUF);
---
> free(m->mtx_debug, M_WITNESS);
971a822
>
973c824
< mtx_exit(&all_mtx, MTX_DEF);
---
> mtx_unlock(&all_mtx);
975a827
>
977,978c829
< * The non-inlined versions of the mtx_*() functions are always built (above),
< * but the witness code depends on the WITNESS kernel option being specified.
---
> * The WITNESS-enabled diagnostic code.
980d830
<
991c841
< mtx_exit(&Giant, MTX_DEF);
---
> mtx_unlock(&Giant);
993d842
< mtx_enter(&all_mtx, MTX_DEF);
994a844,845
> mtx_lock(&all_mtx);
>
998d848
< /* XXX - should not use DEVBUF */
1000c850
< M_DEVBUF, M_NOWAIT | M_ZERO);
---
> M_WITNESS, M_NOWAIT | M_ZERO);
1005c855
< mtx_exit(&all_mtx, MTX_DEF);
---
> mtx_unlock(&all_mtx);
1010c860
< mtx_enter(&Giant, MTX_DEF);
---
> mtx_lock(&Giant);
1063a914,916
> /*
> * Witness-enabled globals
> */
1072,1084c925,940
< static struct witness *enroll __P((const char *description, int flag));
< static int itismychild __P((struct witness *parent, struct witness *child));
< static void removechild __P((struct witness *parent, struct witness *child));
< static int isitmychild __P((struct witness *parent, struct witness *child));
< static int isitmydescendant __P((struct witness *parent, struct witness *child));
< static int dup_ok __P((struct witness *));
< static int blessed __P((struct witness *, struct witness *));
< static void witness_displaydescendants
< __P((void(*)(const char *fmt, ...), struct witness *));
< static void witness_leveldescendents __P((struct witness *parent, int level));
< static void witness_levelall __P((void));
< static struct witness * witness_get __P((void));
< static void witness_free __P((struct witness *m));
---
> /*
> * Internal witness routine prototypes
> */
> static struct witness *enroll(const char *description, int flag);
> static int itismychild(struct witness *parent, struct witness *child);
> static void removechild(struct witness *parent, struct witness *child);
> static int isitmychild(struct witness *parent, struct witness *child);
> static int isitmydescendant(struct witness *parent, struct witness *child);
> static int dup_ok(struct witness *);
> static int blessed(struct witness *, struct witness *);
> static void
> witness_displaydescendants(void(*)(const char *fmt, ...), struct witness *);
> static void witness_leveldescendents(struct witness *parent, int level);
> static void witness_levelall(void);
> static struct witness * witness_get(void);
> static void witness_free(struct witness *m);
1086d941
<
1132c987,988
< static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed);
---
> static int blessed_count =
> sizeof(blessed_list) / sizeof(struct witness_blessed);
1214c1070
< mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
1217c1073
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1224c1080
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1248c1104
< panic("blockable mtx_enter() of %s when not legal @ %s:%d",
---
> panic("blockable mtx_lock() of %s when not legal @ %s:%d",
1270c1126
< mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
1275c1131
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1279c1135
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1287c1143
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1316c1172
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1359c1215
< mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
1362c1218
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1410c1266
< mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
1413c1269
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1429c1285
< panic("switchable mtx_exit() of %s when not legal @ %s:%d",
---
> panic("switchable mtx_unlock() of %s when not legal @ %s:%d",
1500c1356
< mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_lock_spin_flags(&w_mtx, MTX_QUIET);
1503c1359
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1512c1368
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);
1734c1590
< mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
---
> mtx_unlock_spin_flags(&w_mtx, MTX_QUIET);