subr_turnstile.c revision 154482
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Implementation of turnstiles used to hold queue of threads blocked on 34 * non-sleepable locks. Sleepable locks use condition variables to 35 * implement their queues. Turnstiles differ from a sleep queue in that 36 * turnstile queue's are assigned to a lock held by an owning thread. Thus, 37 * when one thread is enqueued onto a turnstile, it can lend its priority 38 * to the owning thread. 39 * 40 * We wish to avoid bloating locks with an embedded turnstile and we do not 41 * want to use back-pointers in the locks for the same reason. Thus, we 42 * use a similar approach to that of Solaris 7 as described in Solaris 43 * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up 44 * in a hash table based on the address of the lock. Each entry in the 45 * hash table is a linked-lists of turnstiles and is called a turnstile 46 * chain. Each chain contains a spin mutex that protects all of the 47 * turnstiles in the chain. 48 * 49 * Each time a thread is created, a turnstile is malloc'd and attached to 50 * that thread. When a thread blocks on a lock, if it is the first thread 51 * to block, it lends its turnstile to the lock. If the lock already has 52 * a turnstile, then it gives its turnstile to the lock's turnstile's free 53 * list. When a thread is woken up, it takes a turnstile from the free list 54 * if there are any other waiters. If it is the only thread blocked on the 55 * lock, then it reclaims the turnstile associated with the lock and removes 56 * it from the hash table. 57 */ 58 59#include "opt_turnstile_profiling.h" 60 61#include <sys/cdefs.h> 62__FBSDID("$FreeBSD: head/sys/kern/subr_turnstile.c 154482 2006-01-17 16:47:42Z jhb $"); 63 64#include <sys/param.h> 65#include <sys/systm.h> 66#include <sys/kernel.h> 67#include <sys/ktr.h> 68#include <sys/lock.h> 69#include <sys/malloc.h> 70#include <sys/mutex.h> 71#include <sys/proc.h> 72#include <sys/queue.h> 73#include <sys/sched.h> 74#include <sys/sysctl.h> 75#include <sys/turnstile.h> 76 77/* 78 * Constants for the hash table of turnstile chains. TC_SHIFT is a magic 79 * number chosen because the sleep queue's use the same value for the 80 * shift. Basically, we ignore the lower 8 bits of the address. 81 * TC_TABLESIZE must be a power of two for TC_MASK to work properly. 82 */ 83#define TC_TABLESIZE 128 /* Must be power of 2. */ 84#define TC_MASK (TC_TABLESIZE - 1) 85#define TC_SHIFT 8 86#define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK) 87#define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)] 88 89/* 90 * There are three different lists of turnstiles as follows. The list 91 * connected by ts_link entries is a per-thread list of all the turnstiles 92 * attached to locks that we own. This is used to fixup our priority when 93 * a lock is released. The other two lists use the ts_hash entries. The 94 * first of these two is the turnstile chain list that a turnstile is on 95 * when it is attached to a lock. The second list to use ts_hash is the 96 * free list hung off of a turnstile that is attached to a lock. 97 * 98 * Each turnstile contains two lists of threads. The ts_blocked list is 99 * a linked list of threads blocked on the turnstile's lock. The 100 * ts_pending list is a linked list of threads previously awakened by 101 * turnstile_signal() or turnstile_wait() that are waiting to be put on 102 * the run queue. 103 * 104 * Locking key: 105 * c - turnstile chain lock 106 * q - td_contested lock 107 */ 108struct turnstile { 109 TAILQ_HEAD(, thread) ts_blocked; /* (c + q) Blocked threads. */ 110 TAILQ_HEAD(, thread) ts_pending; /* (c) Pending threads. */ 111 LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */ 112 LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */ 113 LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */ 114 struct lock_object *ts_lockobj; /* (c) Lock we reference. */ 115 struct thread *ts_owner; /* (c + q) Who owns the lock. */ 116}; 117 118struct turnstile_chain { 119 LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */ 120 struct mtx tc_lock; /* Spin lock for this chain. */ 121#ifdef TURNSTILE_PROFILING 122 u_int tc_depth; /* Length of tc_queues. */ 123 u_int tc_max_depth; /* Max length of tc_queues. */ 124#endif 125}; 126 127#ifdef TURNSTILE_PROFILING 128u_int turnstile_max_depth; 129SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling"); 130SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0, 131 "turnstile chain stats"); 132SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD, 133 &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain"); 134#endif 135static struct mtx td_contested_lock; 136static struct turnstile_chain turnstile_chains[TC_TABLESIZE]; 137 138static MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles"); 139 140/* 141 * Prototypes for non-exported routines. 142 */ 143static void init_turnstile0(void *dummy); 144#ifdef TURNSTILE_PROFILING 145static void init_turnstile_profiling(void *arg); 146#endif 147static void propagate_priority(struct thread *td); 148static int turnstile_adjust_thread(struct turnstile *ts, 149 struct thread *td); 150static void turnstile_setowner(struct turnstile *ts, struct thread *owner); 151 152/* 153 * Walks the chain of turnstiles and their owners to propagate the priority 154 * of the thread being blocked to all the threads holding locks that have to 155 * release their locks before this thread can run again. 156 */ 157static void 158propagate_priority(struct thread *td) 159{ 160 struct turnstile_chain *tc; 161 struct turnstile *ts; 162 int pri; 163 164 mtx_assert(&sched_lock, MA_OWNED); 165 pri = td->td_priority; 166 ts = td->td_blocked; 167 for (;;) { 168 td = ts->ts_owner; 169 170 if (td == NULL) { 171 /* 172 * This really isn't quite right. Really 173 * ought to bump priority of thread that 174 * next acquires the lock. 175 */ 176 return; 177 } 178 179 MPASS(td->td_proc != NULL); 180 MPASS(td->td_proc->p_magic == P_MAGIC); 181 182 /* 183 * XXX: The owner of a turnstile can be stale if it is the 184 * first thread to grab a slock of a sx lock. In that case 185 * it is possible for us to be at SSLEEP or some other 186 * weird state. We should probably just return if the state 187 * isn't SRUN or SLOCK. 188 */ 189 KASSERT(!TD_IS_SLEEPING(td), 190 ("sleeping thread (tid %d) owns a non-sleepable lock", 191 td->td_tid)); 192 193 /* 194 * If this thread already has higher priority than the 195 * thread that is being blocked, we are finished. 196 */ 197 if (td->td_priority <= pri) 198 return; 199 200 /* 201 * Bump this thread's priority. 202 */ 203 sched_lend_prio(td, pri); 204 205 /* 206 * If lock holder is actually running or on the run queue 207 * then we are done. 208 */ 209 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) { 210 MPASS(td->td_blocked == NULL); 211 return; 212 } 213 214#ifndef SMP 215 /* 216 * For UP, we check to see if td is curthread (this shouldn't 217 * ever happen however as it would mean we are in a deadlock.) 218 */ 219 KASSERT(td != curthread, ("Deadlock detected")); 220#endif 221 222 /* 223 * If we aren't blocked on a lock, we should be. 224 */ 225 KASSERT(TD_ON_LOCK(td), ( 226 "thread %d(%s):%d holds %s but isn't blocked on a lock\n", 227 td->td_tid, td->td_proc->p_comm, td->td_state, 228 ts->ts_lockobj->lo_name)); 229 230 /* 231 * Pick up the lock that td is blocked on. 232 */ 233 ts = td->td_blocked; 234 MPASS(ts != NULL); 235 tc = TC_LOOKUP(ts->ts_lockobj); 236 mtx_lock_spin(&tc->tc_lock); 237 238 /* Resort td on the list if needed. */ 239 if (!turnstile_adjust_thread(ts, td)) { 240 mtx_unlock_spin(&tc->tc_lock); 241 return; 242 } 243 mtx_unlock_spin(&tc->tc_lock); 244 } 245} 246 247/* 248 * Adjust the thread's position on a turnstile after its priority has been 249 * changed. 250 */ 251static int 252turnstile_adjust_thread(struct turnstile *ts, struct thread *td) 253{ 254 struct turnstile_chain *tc; 255 struct thread *td1, *td2; 256 257 mtx_assert(&sched_lock, MA_OWNED); 258 MPASS(TD_ON_LOCK(td)); 259 260 /* 261 * This thread may not be blocked on this turnstile anymore 262 * but instead might already be woken up on another CPU 263 * that is waiting on sched_lock in turnstile_unpend() to 264 * finish waking this thread up. We can detect this case 265 * by checking to see if this thread has been given a 266 * turnstile by either turnstile_signal() or 267 * turnstile_broadcast(). In this case, treat the thread as 268 * if it was already running. 269 */ 270 if (td->td_turnstile != NULL) 271 return (0); 272 273 /* 274 * Check if the thread needs to be moved on the blocked chain. 275 * It needs to be moved if either its priority is lower than 276 * the previous thread or higher than the next thread. 277 */ 278 tc = TC_LOOKUP(ts->ts_lockobj); 279 mtx_assert(&tc->tc_lock, MA_OWNED); 280 td1 = TAILQ_PREV(td, threadqueue, td_lockq); 281 td2 = TAILQ_NEXT(td, td_lockq); 282 if ((td1 != NULL && td->td_priority < td1->td_priority) || 283 (td2 != NULL && td->td_priority > td2->td_priority)) { 284 285 /* 286 * Remove thread from blocked chain and determine where 287 * it should be moved to. 288 */ 289 mtx_lock_spin(&td_contested_lock); 290 TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq); 291 TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) { 292 MPASS(td1->td_proc->p_magic == P_MAGIC); 293 if (td1->td_priority > td->td_priority) 294 break; 295 } 296 297 if (td1 == NULL) 298 TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq); 299 else 300 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 301 mtx_unlock_spin(&td_contested_lock); 302 if (td1 == NULL) 303 CTR3(KTR_LOCK, 304 "turnstile_adjust_thread: td %d put at tail on [%p] %s", 305 td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name); 306 else 307 CTR4(KTR_LOCK, 308 "turnstile_adjust_thread: td %d moved before %d on [%p] %s", 309 td->td_tid, td1->td_tid, ts->ts_lockobj, 310 ts->ts_lockobj->lo_name); 311 } 312 return (1); 313} 314 315/* 316 * Early initialization of turnstiles. This is not done via a SYSINIT() 317 * since this needs to be initialized very early when mutexes are first 318 * initialized. 319 */ 320void 321init_turnstiles(void) 322{ 323 int i; 324 325 for (i = 0; i < TC_TABLESIZE; i++) { 326 LIST_INIT(&turnstile_chains[i].tc_turnstiles); 327 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain", 328 NULL, MTX_SPIN); 329 } 330 mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN); 331 LIST_INIT(&thread0.td_contested); 332 thread0.td_turnstile = NULL; 333} 334 335#ifdef TURNSTILE_PROFILING 336static void 337init_turnstile_profiling(void *arg) 338{ 339 struct sysctl_oid *chain_oid; 340 char chain_name[10]; 341 int i; 342 343 for (i = 0; i < TC_TABLESIZE; i++) { 344 snprintf(chain_name, sizeof(chain_name), "%d", i); 345 chain_oid = SYSCTL_ADD_NODE(NULL, 346 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO, 347 chain_name, CTLFLAG_RD, NULL, "turnstile chain stats"); 348 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 349 "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0, 350 NULL); 351 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 352 "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth, 353 0, NULL); 354 } 355} 356SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 357 init_turnstile_profiling, NULL); 358#endif 359 360static void 361init_turnstile0(void *dummy) 362{ 363 364 thread0.td_turnstile = turnstile_alloc(); 365} 366SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL); 367 368/* 369 * Update a thread on the turnstile list after it's priority has been changed. 370 * The old priority is passed in as an argument. 371 */ 372void 373turnstile_adjust(struct thread *td, u_char oldpri) 374{ 375 struct turnstile_chain *tc; 376 struct turnstile *ts; 377 378 mtx_assert(&sched_lock, MA_OWNED); 379 MPASS(TD_ON_LOCK(td)); 380 381 /* 382 * Pick up the lock that td is blocked on. 383 */ 384 ts = td->td_blocked; 385 MPASS(ts != NULL); 386 tc = TC_LOOKUP(ts->ts_lockobj); 387 mtx_lock_spin(&tc->tc_lock); 388 389 /* Resort the turnstile on the list. */ 390 if (!turnstile_adjust_thread(ts, td)) { 391 mtx_unlock_spin(&tc->tc_lock); 392 return; 393 } 394 395 /* 396 * If our priority was lowered and we are at the head of the 397 * turnstile, then propagate our new priority up the chain. 398 * Note that we currently don't try to revoke lent priorities 399 * when our priority goes up. 400 */ 401 if (td == TAILQ_FIRST(&ts->ts_blocked) && td->td_priority < oldpri) { 402 mtx_unlock_spin(&tc->tc_lock); 403 propagate_priority(td); 404 } else 405 mtx_unlock_spin(&tc->tc_lock); 406} 407 408/* 409 * Set the owner of the lock this turnstile is attached to. 410 */ 411static void 412turnstile_setowner(struct turnstile *ts, struct thread *owner) 413{ 414 415 mtx_assert(&td_contested_lock, MA_OWNED); 416 MPASS(owner->td_proc->p_magic == P_MAGIC); 417 MPASS(ts->ts_owner == NULL); 418 ts->ts_owner = owner; 419 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link); 420} 421 422/* 423 * Malloc a turnstile for a new thread, initialize it and return it. 424 */ 425struct turnstile * 426turnstile_alloc(void) 427{ 428 struct turnstile *ts; 429 430 ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO); 431 TAILQ_INIT(&ts->ts_blocked); 432 TAILQ_INIT(&ts->ts_pending); 433 LIST_INIT(&ts->ts_free); 434 return (ts); 435} 436 437/* 438 * Free a turnstile when a thread is destroyed. 439 */ 440void 441turnstile_free(struct turnstile *ts) 442{ 443 444 MPASS(ts != NULL); 445 MPASS(TAILQ_EMPTY(&ts->ts_blocked)); 446 MPASS(TAILQ_EMPTY(&ts->ts_pending)); 447 free(ts, M_TURNSTILE); 448} 449 450/* 451 * Lock the turnstile chain associated with the specified lock. 452 */ 453void 454turnstile_lock(struct lock_object *lock) 455{ 456 struct turnstile_chain *tc; 457 458 tc = TC_LOOKUP(lock); 459 mtx_lock_spin(&tc->tc_lock); 460} 461 462/* 463 * Look up the turnstile for a lock in the hash table locking the associated 464 * turnstile chain along the way. If no turnstile is found in the hash 465 * table, NULL is returned. 466 */ 467struct turnstile * 468turnstile_lookup(struct lock_object *lock) 469{ 470 struct turnstile_chain *tc; 471 struct turnstile *ts; 472 473 tc = TC_LOOKUP(lock); 474 mtx_assert(&tc->tc_lock, MA_OWNED); 475 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 476 if (ts->ts_lockobj == lock) 477 return (ts); 478 return (NULL); 479} 480 481/* 482 * Unlock the turnstile chain associated with a given lock. 483 */ 484void 485turnstile_release(struct lock_object *lock) 486{ 487 struct turnstile_chain *tc; 488 489 tc = TC_LOOKUP(lock); 490 mtx_unlock_spin(&tc->tc_lock); 491} 492 493/* 494 * Take ownership of a turnstile and adjust the priority of the new 495 * owner appropriately. 496 */ 497void 498turnstile_claim(struct lock_object *lock) 499{ 500 struct turnstile_chain *tc; 501 struct turnstile *ts; 502 struct thread *td, *owner; 503 504 tc = TC_LOOKUP(lock); 505 mtx_assert(&tc->tc_lock, MA_OWNED); 506 ts = turnstile_lookup(lock); 507 MPASS(ts != NULL); 508 509 owner = curthread; 510 mtx_lock_spin(&td_contested_lock); 511 turnstile_setowner(ts, owner); 512 mtx_unlock_spin(&td_contested_lock); 513 514 td = TAILQ_FIRST(&ts->ts_blocked); 515 MPASS(td != NULL); 516 MPASS(td->td_proc->p_magic == P_MAGIC); 517 mtx_unlock_spin(&tc->tc_lock); 518 519 /* 520 * Update the priority of the new owner if needed. 521 */ 522 mtx_lock_spin(&sched_lock); 523 if (td->td_priority < owner->td_priority) 524 sched_lend_prio(owner, td->td_priority); 525 mtx_unlock_spin(&sched_lock); 526} 527 528/* 529 * Block the current thread on the turnstile assicated with 'lock'. This 530 * function will context switch and not return until this thread has been 531 * woken back up. This function must be called with the appropriate 532 * turnstile chain locked and will return with it unlocked. 533 */ 534void 535turnstile_wait(struct lock_object *lock, struct thread *owner) 536{ 537 struct turnstile_chain *tc; 538 struct turnstile *ts; 539 struct thread *td, *td1; 540 541 td = curthread; 542 tc = TC_LOOKUP(lock); 543 mtx_assert(&tc->tc_lock, MA_OWNED); 544 MPASS(td->td_turnstile != NULL); 545 MPASS(owner != NULL); 546 MPASS(owner->td_proc->p_magic == P_MAGIC); 547 548 /* Look up the turnstile associated with the lock 'lock'. */ 549 ts = turnstile_lookup(lock); 550 551 /* 552 * If the lock does not already have a turnstile, use this thread's 553 * turnstile. Otherwise insert the current thread into the 554 * turnstile already in use by this lock. 555 */ 556 if (ts == NULL) { 557#ifdef TURNSTILE_PROFILING 558 tc->tc_depth++; 559 if (tc->tc_depth > tc->tc_max_depth) { 560 tc->tc_max_depth = tc->tc_depth; 561 if (tc->tc_max_depth > turnstile_max_depth) 562 turnstile_max_depth = tc->tc_max_depth; 563 } 564#endif 565 ts = td->td_turnstile; 566 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash); 567 KASSERT(TAILQ_EMPTY(&ts->ts_pending), 568 ("thread's turnstile has pending threads")); 569 KASSERT(TAILQ_EMPTY(&ts->ts_blocked), 570 ("thread's turnstile has a non-empty queue")); 571 KASSERT(LIST_EMPTY(&ts->ts_free), 572 ("thread's turnstile has a non-empty free list")); 573 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer")); 574 ts->ts_lockobj = lock; 575 mtx_lock_spin(&td_contested_lock); 576 TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq); 577 turnstile_setowner(ts, owner); 578 mtx_unlock_spin(&td_contested_lock); 579 } else { 580 TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) 581 if (td1->td_priority > td->td_priority) 582 break; 583 mtx_lock_spin(&td_contested_lock); 584 if (td1 != NULL) 585 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 586 else 587 TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq); 588 mtx_unlock_spin(&td_contested_lock); 589 MPASS(td->td_turnstile != NULL); 590 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash); 591 MPASS(owner == ts->ts_owner); 592 } 593 td->td_turnstile = NULL; 594 mtx_unlock_spin(&tc->tc_lock); 595 596 mtx_lock_spin(&sched_lock); 597 /* 598 * Handle race condition where a thread on another CPU that owns 599 * lock 'lock' could have woken us in between us dropping the 600 * turnstile chain lock and acquiring the sched_lock. 601 */ 602 if (td->td_flags & TDF_TSNOBLOCK) { 603 td->td_flags &= ~TDF_TSNOBLOCK; 604 mtx_unlock_spin(&sched_lock); 605 return; 606 } 607 608#ifdef notyet 609 /* 610 * If we're borrowing an interrupted thread's VM context, we 611 * must clean up before going to sleep. 612 */ 613 if (td->td_ithd != NULL) { 614 struct ithd *it = td->td_ithd; 615 616 if (it->it_interrupted) { 617 if (LOCK_LOG_TEST(lock, 0)) 618 CTR3(KTR_LOCK, "%s: %p interrupted %p", 619 __func__, it, it->it_interrupted); 620 intr_thd_fixup(it); 621 } 622 } 623#endif 624 625 /* Save who we are blocked on and switch. */ 626 td->td_blocked = ts; 627 td->td_lockname = lock->lo_name; 628 TD_SET_LOCK(td); 629 propagate_priority(td); 630 631 if (LOCK_LOG_TEST(lock, 0)) 632 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__, 633 td->td_tid, lock, lock->lo_name); 634 635 mi_switch(SW_VOL, NULL); 636 637 if (LOCK_LOG_TEST(lock, 0)) 638 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s", 639 __func__, td->td_tid, lock, lock->lo_name); 640 641 mtx_unlock_spin(&sched_lock); 642} 643 644/* 645 * Pick the highest priority thread on this turnstile and put it on the 646 * pending list. This must be called with the turnstile chain locked. 647 */ 648int 649turnstile_signal(struct turnstile *ts) 650{ 651 struct turnstile_chain *tc; 652 struct thread *td; 653 int empty; 654 655 MPASS(ts != NULL); 656 MPASS(curthread->td_proc->p_magic == P_MAGIC); 657 MPASS(ts->ts_owner == curthread); 658 tc = TC_LOOKUP(ts->ts_lockobj); 659 mtx_assert(&tc->tc_lock, MA_OWNED); 660 661 /* 662 * Pick the highest priority thread blocked on this lock and 663 * move it to the pending list. 664 */ 665 td = TAILQ_FIRST(&ts->ts_blocked); 666 MPASS(td->td_proc->p_magic == P_MAGIC); 667 mtx_lock_spin(&td_contested_lock); 668 TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq); 669 mtx_unlock_spin(&td_contested_lock); 670 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq); 671 672 /* 673 * If the turnstile is now empty, remove it from its chain and 674 * give it to the about-to-be-woken thread. Otherwise take a 675 * turnstile from the free list and give it to the thread. 676 */ 677 empty = TAILQ_EMPTY(&ts->ts_blocked); 678 if (empty) { 679 MPASS(LIST_EMPTY(&ts->ts_free)); 680#ifdef TURNSTILE_PROFILING 681 tc->tc_depth--; 682#endif 683 } else 684 ts = LIST_FIRST(&ts->ts_free); 685 MPASS(ts != NULL); 686 LIST_REMOVE(ts, ts_hash); 687 td->td_turnstile = ts; 688 689 return (empty); 690} 691 692/* 693 * Put all blocked threads on the pending list. This must be called with 694 * the turnstile chain locked. 695 */ 696void 697turnstile_broadcast(struct turnstile *ts) 698{ 699 struct turnstile_chain *tc; 700 struct turnstile *ts1; 701 struct thread *td; 702 703 MPASS(ts != NULL); 704 MPASS(curthread->td_proc->p_magic == P_MAGIC); 705 MPASS(ts->ts_owner == curthread); 706 tc = TC_LOOKUP(ts->ts_lockobj); 707 mtx_assert(&tc->tc_lock, MA_OWNED); 708 709 /* 710 * Transfer the blocked list to the pending list. 711 */ 712 mtx_lock_spin(&td_contested_lock); 713 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked, td_lockq); 714 mtx_unlock_spin(&td_contested_lock); 715 716 /* 717 * Give a turnstile to each thread. The last thread gets 718 * this turnstile. 719 */ 720 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) { 721 if (LIST_EMPTY(&ts->ts_free)) { 722 MPASS(TAILQ_NEXT(td, td_lockq) == NULL); 723 ts1 = ts; 724#ifdef TURNSTILE_PROFILING 725 tc->tc_depth--; 726#endif 727 } else 728 ts1 = LIST_FIRST(&ts->ts_free); 729 MPASS(ts1 != NULL); 730 LIST_REMOVE(ts1, ts_hash); 731 td->td_turnstile = ts1; 732 } 733} 734 735/* 736 * Wakeup all threads on the pending list and adjust the priority of the 737 * current thread appropriately. This must be called with the turnstile 738 * chain locked. 739 */ 740void 741turnstile_unpend(struct turnstile *ts) 742{ 743 TAILQ_HEAD( ,thread) pending_threads; 744 struct turnstile_chain *tc; 745 struct thread *td; 746 u_char cp, pri; 747 748 MPASS(ts != NULL); 749 MPASS(ts->ts_owner == curthread); 750 tc = TC_LOOKUP(ts->ts_lockobj); 751 mtx_assert(&tc->tc_lock, MA_OWNED); 752 MPASS(!TAILQ_EMPTY(&ts->ts_pending)); 753 754 /* 755 * Move the list of pending threads out of the turnstile and 756 * into a local variable. 757 */ 758 TAILQ_INIT(&pending_threads); 759 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq); 760#ifdef INVARIANTS 761 if (TAILQ_EMPTY(&ts->ts_blocked)) 762 ts->ts_lockobj = NULL; 763#endif 764 765 /* 766 * Remove the turnstile from this thread's list of contested locks 767 * since this thread doesn't own it anymore. New threads will 768 * not be blocking on the turnstile until it is claimed by a new 769 * owner. 770 */ 771 mtx_lock_spin(&td_contested_lock); 772 ts->ts_owner = NULL; 773 LIST_REMOVE(ts, ts_link); 774 mtx_unlock_spin(&td_contested_lock); 775 critical_enter(); 776 mtx_unlock_spin(&tc->tc_lock); 777 778 /* 779 * Adjust the priority of curthread based on other contested 780 * locks it owns. Don't lower the priority below the base 781 * priority however. 782 */ 783 td = curthread; 784 pri = PRI_MAX; 785 mtx_lock_spin(&sched_lock); 786 mtx_lock_spin(&td_contested_lock); 787 LIST_FOREACH(ts, &td->td_contested, ts_link) { 788 cp = TAILQ_FIRST(&ts->ts_blocked)->td_priority; 789 if (cp < pri) 790 pri = cp; 791 } 792 mtx_unlock_spin(&td_contested_lock); 793 sched_unlend_prio(td, pri); 794 795 /* 796 * Wake up all the pending threads. If a thread is not blocked 797 * on a lock, then it is currently executing on another CPU in 798 * turnstile_wait() or sitting on a run queue waiting to resume 799 * in turnstile_wait(). Set a flag to force it to try to acquire 800 * the lock again instead of blocking. 801 */ 802 while (!TAILQ_EMPTY(&pending_threads)) { 803 td = TAILQ_FIRST(&pending_threads); 804 TAILQ_REMOVE(&pending_threads, td, td_lockq); 805 MPASS(td->td_proc->p_magic == P_MAGIC); 806 if (TD_ON_LOCK(td)) { 807 td->td_blocked = NULL; 808 td->td_lockname = NULL; 809 TD_CLR_LOCK(td); 810 MPASS(TD_CAN_RUN(td)); 811 setrunqueue(td, SRQ_BORING); 812 } else { 813 td->td_flags |= TDF_TSNOBLOCK; 814 MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td)); 815 } 816 } 817 critical_exit(); 818 mtx_unlock_spin(&sched_lock); 819} 820 821/* 822 * Return the first thread in a turnstile. 823 */ 824struct thread * 825turnstile_head(struct turnstile *ts) 826{ 827#ifdef INVARIANTS 828 struct turnstile_chain *tc; 829 830 MPASS(ts != NULL); 831 tc = TC_LOOKUP(ts->ts_lockobj); 832 mtx_assert(&tc->tc_lock, MA_OWNED); 833#endif 834 return (TAILQ_FIRST(&ts->ts_blocked)); 835} 836