Deleted Added
full compact
60c60
< __FBSDID("$FreeBSD: head/sys/kern/subr_turnstile.c 169666 2007-05-18 06:32:24Z jeff $");
---
> __FBSDID("$FreeBSD: head/sys/kern/subr_turnstile.c 170295 2007-06-04 23:51:44Z jeff $");
118a119
> struct mtx ts_lock; /* Spin lock for self. */
164a166
> static void turnstile_fini(void *mem, int size);
174d175
< struct turnstile_chain *tc;
178c179
< mtx_assert(&sched_lock, MA_OWNED);
---
> THREAD_LOCK_ASSERT(td, MA_OWNED);
180a182,189
> MPASS(td->td_lock == &ts->ts_lock);
> /*
> * Grab a recursive lock on this turnstile chain so it stays locked
> * for the whole operation. The caller expects us to return with
> * the original lock held. We only ever lock down the chain so
> * the lock order is constant.
> */
> mtx_lock_spin(&ts->ts_lock);
188a198
> mtx_unlock_spin(&ts->ts_lock);
191a202,203
> thread_lock_flags(td, MTX_DUPOK);
> mtx_unlock_spin(&ts->ts_lock);
216c228,229
< if (td->td_priority <= pri)
---
> if (td->td_priority <= pri) {
> thread_unlock(td);
217a231
> }
229a244
> thread_unlock(td);
254,256c269
< tc = TC_LOOKUP(ts->ts_lockobj);
< mtx_lock_spin(&tc->tc_lock);
<
---
> MPASS(td->td_lock == &ts->ts_lock);
259c272
< mtx_unlock_spin(&tc->tc_lock);
---
> mtx_unlock_spin(&ts->ts_lock);
262c275
< mtx_unlock_spin(&tc->tc_lock);
---
> /* The thread lock is released as ts lock above. */
273d285
< struct turnstile_chain *tc;
277c289
< mtx_assert(&sched_lock, MA_OWNED);
---
> THREAD_LOCK_ASSERT(td, MA_OWNED);
283c295
< * that is waiting on sched_lock in turnstile_unpend() to
---
> * that is waiting on the thread lock in turnstile_unpend() to
298,299c310
< tc = TC_LOOKUP(ts->ts_lockobj);
< mtx_assert(&tc->tc_lock, MA_OWNED);
---
> MPASS(td->td_lock == &ts->ts_lock);
388c399,400
< NULL, turnstile_dtor, turnstile_init, NULL, UMA_ALIGN_CACHE, 0);
---
> NULL, turnstile_dtor, turnstile_init, turnstile_fini,
> UMA_ALIGN_CACHE, 0);
390c402
< NULL, NULL, turnstile_init, NULL, UMA_ALIGN_CACHE, 0);
---
> NULL, NULL, turnstile_init, turnstile_fini, UMA_ALIGN_CACHE, 0);
403d414
< struct turnstile_chain *tc;
406d416
< mtx_assert(&sched_lock, MA_OWNED);
414,415c424,425
< tc = TC_LOOKUP(ts->ts_lockobj);
< mtx_lock_spin(&tc->tc_lock);
---
> MPASS(td->td_lock == &ts->ts_lock);
> mtx_assert(&ts->ts_lock, MA_OWNED);
418,419c428
< if (!turnstile_adjust_thread(ts, td)) {
< mtx_unlock_spin(&tc->tc_lock);
---
> if (!turnstile_adjust_thread(ts, td))
421,422d429
< }
<
433,434d439
< mtx_unlock_spin(&tc->tc_lock);
< critical_enter();
436,438c441
< critical_exit();
< } else
< mtx_unlock_spin(&tc->tc_lock);
---
> }
489a493
> mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN | MTX_RECURSE);
492a497,505
> static void
> turnstile_fini(void *mem, int size)
> {
> struct turnstile *ts;
>
> ts = mem;
> mtx_destroy(&ts->ts_lock);
> }
>
517c530
< turnstile_lock(struct lock_object *lock)
---
> turnstile_chain_lock(struct lock_object *lock)
524a538,576
> struct turnstile *
> turnstile_trywait(struct lock_object *lock)
> {
> struct turnstile_chain *tc;
> struct turnstile *ts;
>
> tc = TC_LOOKUP(lock);
> mtx_lock_spin(&tc->tc_lock);
> LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
> if (ts->ts_lockobj == lock) {
> mtx_lock_spin(&ts->ts_lock);
> return (ts);
> }
>
> ts = curthread->td_turnstile;
> MPASS(ts != NULL);
> mtx_lock_spin(&ts->ts_lock);
> KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
> ts->ts_lockobj = lock;
>
> return (ts);
> }
>
> void
> turnstile_cancel(struct turnstile *ts)
> {
> struct turnstile_chain *tc;
> struct lock_object *lock;
>
> mtx_assert(&ts->ts_lock, MA_OWNED);
>
> mtx_unlock_spin(&ts->ts_lock);
> lock = ts->ts_lockobj;
> if (ts == curthread->td_turnstile)
> ts->ts_lockobj = NULL;
> tc = TC_LOOKUP(lock);
> mtx_unlock_spin(&tc->tc_lock);
> }
>
539c591,592
< if (ts->ts_lockobj == lock)
---
> if (ts->ts_lockobj == lock) {
> mtx_lock_spin(&ts->ts_lock);
540a594
> }
548c602
< turnstile_release(struct lock_object *lock)
---
> turnstile_chain_unlock(struct lock_object *lock)
577c631
< turnstile_claim(struct lock_object *lock)
---
> turnstile_claim(struct turnstile *ts)
579,580d632
< struct turnstile_chain *tc;
< struct turnstile *ts;
581a634
> struct turnstile_chain *tc;
583,586c636,637
< tc = TC_LOOKUP(lock);
< mtx_assert(&tc->tc_lock, MA_OWNED);
< ts = turnstile_lookup(lock);
< MPASS(ts != NULL);
---
> mtx_assert(&ts->ts_lock, MA_OWNED);
> MPASS(ts != curthread->td_turnstile);
596c647
< mtx_unlock_spin(&tc->tc_lock);
---
> MPASS(td->td_lock == &ts->ts_lock);
601c652
< mtx_lock_spin(&sched_lock);
---
> thread_lock(owner);
604c655,658
< mtx_unlock_spin(&sched_lock);
---
> thread_unlock(owner);
> tc = TC_LOOKUP(ts->ts_lockobj);
> mtx_unlock_spin(&ts->ts_lock);
> mtx_unlock_spin(&tc->tc_lock);
614c668
< turnstile_wait(struct lock_object *lock, struct thread *owner, int queue)
---
> turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
617d670
< struct turnstile *ts;
618a672
> struct lock_object *lock;
621,623c675
< tc = TC_LOOKUP(lock);
< mtx_assert(&tc->tc_lock, MA_OWNED);
< MPASS(td->td_turnstile != NULL);
---
> mtx_assert(&ts->ts_lock, MA_OWNED);
630,632d681
< /* Look up the turnstile associated with the lock 'lock'. */
< ts = turnstile_lookup(lock);
<
638c687,689
< if (ts == NULL) {
---
> tc = TC_LOOKUP(ts->ts_lockobj);
> if (ts == td->td_turnstile) {
> mtx_assert(&tc->tc_lock, MA_OWNED);
647c698
< ts = td->td_turnstile;
---
> tc = TC_LOOKUP(ts->ts_lockobj);
657,658c708
< KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
< ts->ts_lockobj = lock;
---
> MPASS(ts->ts_lockobj != NULL);
676a727,728
> thread_lock(td);
> thread_lock_set(td, &ts->ts_lock);
678d729
< mtx_unlock_spin(&tc->tc_lock);
680,708d730
< mtx_lock_spin(&sched_lock);
< /*
< * Handle race condition where a thread on another CPU that owns
< * lock 'lock' could have woken us in between us dropping the
< * turnstile chain lock and acquiring the sched_lock.
< */
< if (td->td_flags & TDF_TSNOBLOCK) {
< td->td_flags &= ~TDF_TSNOBLOCK;
< mtx_unlock_spin(&sched_lock);
< return;
< }
<
< #ifdef notyet
< /*
< * If we're borrowing an interrupted thread's VM context, we
< * must clean up before going to sleep.
< */
< if (td->td_ithd != NULL) {
< struct ithd *it = td->td_ithd;
<
< if (it->it_interrupted) {
< if (LOCK_LOG_TEST(lock, 0))
< CTR3(KTR_LOCK, "%s: %p interrupted %p",
< __func__, it, it->it_interrupted);
< intr_thd_fixup(it);
< }
< }
< #endif
<
709a732
> lock = ts->ts_lockobj;
714c737
< critical_enter();
---
> mtx_unlock_spin(&tc->tc_lock);
716d738
< critical_exit();
721a744,745
> MPASS(td->td_lock == &ts->ts_lock);
> SCHED_STAT_INC(switch_turnstile);
727,728c751
<
< mtx_unlock_spin(&sched_lock);
---
> thread_unlock(td);
742a766
> mtx_assert(&ts->ts_lock, MA_OWNED);
746,747d769
< tc = TC_LOOKUP(ts->ts_lockobj);
< mtx_assert(&tc->tc_lock, MA_OWNED);
768a791,792
> tc = TC_LOOKUP(ts->ts_lockobj);
> mtx_assert(&tc->tc_lock, MA_OWNED);
793a818
> mtx_assert(&ts->ts_lock, MA_OWNED);
796a822,825
> /*
> * We must have the chain locked so that we can remove the empty
> * turnstile from the hash queue.
> */
836c865
< struct turnstile_chain *tc;
---
> struct turnstile *nts;
840a870
> mtx_assert(&ts->ts_lock, MA_OWNED);
843,844d872
< tc = TC_LOOKUP(ts->ts_lockobj);
< mtx_assert(&tc->tc_lock, MA_OWNED);
858d885
<
859a887,895
> * Adjust the priority of curthread based on other contested
> * locks it owns. Don't lower the priority below the base
> * priority however.
> */
> td = curthread;
> pri = PRI_MAX;
> thread_lock(td);
> mtx_lock_spin(&td_contested_lock);
> /*
867d902
< mtx_lock_spin(&td_contested_lock);
870d904
< mtx_unlock_spin(&td_contested_lock);
872,885c906,907
< critical_enter();
< mtx_unlock_spin(&tc->tc_lock);
<
< /*
< * Adjust the priority of curthread based on other contested
< * locks it owns. Don't lower the priority below the base
< * priority however.
< */
< td = curthread;
< pri = PRI_MAX;
< mtx_lock_spin(&sched_lock);
< mtx_lock_spin(&td_contested_lock);
< LIST_FOREACH(ts, &td->td_contested, ts_link) {
< cp = turnstile_first_waiter(ts)->td_priority;
---
> LIST_FOREACH(nts, &td->td_contested, ts_link) {
> cp = turnstile_first_waiter(nts)->td_priority;
891c913
<
---
> thread_unlock(td);
901a924,925
> thread_lock(td);
> MPASS(td->td_lock == &ts->ts_lock);
903,905c927,931
< if (TD_ON_LOCK(td)) {
< td->td_blocked = NULL;
< td->td_lockname = NULL;
---
> MPASS(TD_ON_LOCK(td));
> TD_CLR_LOCK(td);
> MPASS(TD_CAN_RUN(td));
> td->td_blocked = NULL;
> td->td_lockname = NULL;
907c933
< td->td_tsqueue = 0xff;
---
> td->td_tsqueue = 0xff;
909,915c935,936
< TD_CLR_LOCK(td);
< MPASS(TD_CAN_RUN(td));
< sched_add(td, SRQ_BORING);
< } else {
< td->td_flags |= TDF_TSNOBLOCK;
< MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
< }
---
> sched_add(td, SRQ_BORING);
> thread_unlock(td);
917,918c938
< critical_exit();
< mtx_unlock_spin(&sched_lock);
---
> mtx_unlock_spin(&ts->ts_lock);
928d947
< struct turnstile_chain *tc;
932a952
> mtx_assert(&ts->ts_lock, MA_OWNED);
934,935d953
< tc = TC_LOOKUP(ts->ts_lockobj);
< mtx_assert(&tc->tc_lock, MA_OWNED);
950d967
< mtx_unlock_spin(&tc->tc_lock);
959c976,977
< mtx_lock_spin(&sched_lock);
---
> thread_lock(td);
> mtx_unlock_spin(&ts->ts_lock);
968c986
< mtx_unlock_spin(&sched_lock);
---
> thread_unlock(td);
978d995
< struct turnstile_chain *tc;
982,983c999
< tc = TC_LOOKUP(ts->ts_lockobj);
< mtx_assert(&tc->tc_lock, MA_OWNED);
---
> mtx_assert(&ts->ts_lock, MA_OWNED);
995d1010
< struct turnstile_chain *tc;
999,1000c1014
< tc = TC_LOOKUP(ts->ts_lockobj);
< mtx_assert(&tc->tc_lock, MA_OWNED);
---
> mtx_assert(&ts->ts_lock, MA_OWNED);