subr_turnstile.c revision 157275
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Implementation of turnstiles used to hold queue of threads blocked on 34 * non-sleepable locks. Sleepable locks use condition variables to 35 * implement their queues. Turnstiles differ from a sleep queue in that 36 * turnstile queue's are assigned to a lock held by an owning thread. Thus, 37 * when one thread is enqueued onto a turnstile, it can lend its priority 38 * to the owning thread. 39 * 40 * We wish to avoid bloating locks with an embedded turnstile and we do not 41 * want to use back-pointers in the locks for the same reason. Thus, we 42 * use a similar approach to that of Solaris 7 as described in Solaris 43 * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up 44 * in a hash table based on the address of the lock. Each entry in the 45 * hash table is a linked-lists of turnstiles and is called a turnstile 46 * chain. Each chain contains a spin mutex that protects all of the 47 * turnstiles in the chain. 48 * 49 * Each time a thread is created, a turnstile is malloc'd and attached to 50 * that thread. When a thread blocks on a lock, if it is the first thread 51 * to block, it lends its turnstile to the lock. If the lock already has 52 * a turnstile, then it gives its turnstile to the lock's turnstile's free 53 * list. When a thread is woken up, it takes a turnstile from the free list 54 * if there are any other waiters. If it is the only thread blocked on the 55 * lock, then it reclaims the turnstile associated with the lock and removes 56 * it from the hash table. 57 */ 58 59#include <sys/cdefs.h> 60__FBSDID("$FreeBSD: head/sys/kern/subr_turnstile.c 157275 2006-03-29 23:24:55Z jhb $"); 61 62#include "opt_ddb.h" 63#include "opt_turnstile_profiling.h" 64 65#include <sys/param.h> 66#include <sys/systm.h> 67#include <sys/kernel.h> 68#include <sys/ktr.h> 69#include <sys/lock.h> 70#include <sys/malloc.h> 71#include <sys/mutex.h> 72#include <sys/proc.h> 73#include <sys/queue.h> 74#include <sys/sched.h> 75#include <sys/sysctl.h> 76#include <sys/turnstile.h> 77 78#ifdef DDB 79#include <ddb/ddb.h> 80#endif 81 82/* 83 * Constants for the hash table of turnstile chains. TC_SHIFT is a magic 84 * number chosen because the sleep queue's use the same value for the 85 * shift. Basically, we ignore the lower 8 bits of the address. 86 * TC_TABLESIZE must be a power of two for TC_MASK to work properly. 87 */ 88#define TC_TABLESIZE 128 /* Must be power of 2. */ 89#define TC_MASK (TC_TABLESIZE - 1) 90#define TC_SHIFT 8 91#define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK) 92#define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)] 93 94/* 95 * There are three different lists of turnstiles as follows. The list 96 * connected by ts_link entries is a per-thread list of all the turnstiles 97 * attached to locks that we own. This is used to fixup our priority when 98 * a lock is released. The other two lists use the ts_hash entries. The 99 * first of these two is the turnstile chain list that a turnstile is on 100 * when it is attached to a lock. The second list to use ts_hash is the 101 * free list hung off of a turnstile that is attached to a lock. 102 * 103 * Each turnstile contains three lists of threads. The two ts_blocked lists 104 * are linked list of threads blocked on the turnstile's lock. One list is 105 * for exclusive waiters, and the other is for shared waiters. The 106 * ts_pending list is a linked list of threads previously awakened by 107 * turnstile_signal() or turnstile_wait() that are waiting to be put on 108 * the run queue. 109 * 110 * Locking key: 111 * c - turnstile chain lock 112 * q - td_contested lock 113 */ 114struct turnstile { 115 struct threadqueue ts_blocked[2]; /* (c + q) Blocked threads. */ 116 struct threadqueue ts_pending; /* (c) Pending threads. */ 117 LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */ 118 LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */ 119 LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */ 120 struct lock_object *ts_lockobj; /* (c) Lock we reference. */ 121 struct thread *ts_owner; /* (c + q) Who owns the lock. */ 122}; 123 124struct turnstile_chain { 125 LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */ 126 struct mtx tc_lock; /* Spin lock for this chain. */ 127#ifdef TURNSTILE_PROFILING 128 u_int tc_depth; /* Length of tc_queues. */ 129 u_int tc_max_depth; /* Max length of tc_queues. */ 130#endif 131}; 132 133#ifdef TURNSTILE_PROFILING 134u_int turnstile_max_depth; 135SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling"); 136SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0, 137 "turnstile chain stats"); 138SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD, 139 &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain"); 140#endif 141static struct mtx td_contested_lock; 142static struct turnstile_chain turnstile_chains[TC_TABLESIZE]; 143 144static MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles"); 145 146/* 147 * Prototypes for non-exported routines. 148 */ 149static void init_turnstile0(void *dummy); 150#ifdef TURNSTILE_PROFILING 151static void init_turnstile_profiling(void *arg); 152#endif 153static void propagate_priority(struct thread *td); 154static int turnstile_adjust_thread(struct turnstile *ts, 155 struct thread *td); 156static struct thread *turnstile_first_waiter(struct turnstile *ts); 157static void turnstile_setowner(struct turnstile *ts, struct thread *owner); 158 159/* 160 * Walks the chain of turnstiles and their owners to propagate the priority 161 * of the thread being blocked to all the threads holding locks that have to 162 * release their locks before this thread can run again. 163 */ 164static void 165propagate_priority(struct thread *td) 166{ 167 struct turnstile_chain *tc; 168 struct turnstile *ts; 169 int pri; 170 171 mtx_assert(&sched_lock, MA_OWNED); 172 pri = td->td_priority; 173 ts = td->td_blocked; 174 for (;;) { 175 td = ts->ts_owner; 176 177 if (td == NULL) { 178 /* 179 * This might be a read lock with no owner. There's 180 * not much we can do, so just bail. 181 */ 182 return; 183 } 184 185 MPASS(td->td_proc != NULL); 186 MPASS(td->td_proc->p_magic == P_MAGIC); 187 188 /* 189 * If the thread is asleep, then we are probably about 190 * to deadlock. To make debugging this easier, just 191 * panic and tell the user which thread misbehaved so 192 * they can hopefully get a stack trace from the truly 193 * misbehaving thread. 194 */ 195 if (TD_IS_SLEEPING(td)) { 196 printf( 197 "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n", 198 td->td_tid, td->td_proc->p_pid); 199#ifdef DDB 200 db_trace_thread(td, -1); 201#endif 202 panic("sleeping thread"); 203 } 204 205 /* 206 * If this thread already has higher priority than the 207 * thread that is being blocked, we are finished. 208 */ 209 if (td->td_priority <= pri) 210 return; 211 212 /* 213 * Bump this thread's priority. 214 */ 215 sched_lend_prio(td, pri); 216 217 /* 218 * If lock holder is actually running or on the run queue 219 * then we are done. 220 */ 221 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) { 222 MPASS(td->td_blocked == NULL); 223 return; 224 } 225 226#ifndef SMP 227 /* 228 * For UP, we check to see if td is curthread (this shouldn't 229 * ever happen however as it would mean we are in a deadlock.) 230 */ 231 KASSERT(td != curthread, ("Deadlock detected")); 232#endif 233 234 /* 235 * If we aren't blocked on a lock, we should be. 236 */ 237 KASSERT(TD_ON_LOCK(td), ( 238 "thread %d(%s):%d holds %s but isn't blocked on a lock\n", 239 td->td_tid, td->td_proc->p_comm, td->td_state, 240 ts->ts_lockobj->lo_name)); 241 242 /* 243 * Pick up the lock that td is blocked on. 244 */ 245 ts = td->td_blocked; 246 MPASS(ts != NULL); 247 tc = TC_LOOKUP(ts->ts_lockobj); 248 mtx_lock_spin(&tc->tc_lock); 249 250 /* Resort td on the list if needed. */ 251 if (!turnstile_adjust_thread(ts, td)) { 252 mtx_unlock_spin(&tc->tc_lock); 253 return; 254 } 255 mtx_unlock_spin(&tc->tc_lock); 256 } 257} 258 259/* 260 * Adjust the thread's position on a turnstile after its priority has been 261 * changed. 262 */ 263static int 264turnstile_adjust_thread(struct turnstile *ts, struct thread *td) 265{ 266 struct turnstile_chain *tc; 267 struct thread *td1, *td2; 268 int queue; 269 270 mtx_assert(&sched_lock, MA_OWNED); 271 MPASS(TD_ON_LOCK(td)); 272 273 /* 274 * This thread may not be blocked on this turnstile anymore 275 * but instead might already be woken up on another CPU 276 * that is waiting on sched_lock in turnstile_unpend() to 277 * finish waking this thread up. We can detect this case 278 * by checking to see if this thread has been given a 279 * turnstile by either turnstile_signal() or 280 * turnstile_broadcast(). In this case, treat the thread as 281 * if it was already running. 282 */ 283 if (td->td_turnstile != NULL) 284 return (0); 285 286 /* 287 * Check if the thread needs to be moved on the blocked chain. 288 * It needs to be moved if either its priority is lower than 289 * the previous thread or higher than the next thread. 290 */ 291 tc = TC_LOOKUP(ts->ts_lockobj); 292 mtx_assert(&tc->tc_lock, MA_OWNED); 293 td1 = TAILQ_PREV(td, threadqueue, td_lockq); 294 td2 = TAILQ_NEXT(td, td_lockq); 295 if ((td1 != NULL && td->td_priority < td1->td_priority) || 296 (td2 != NULL && td->td_priority > td2->td_priority)) { 297 298 /* 299 * Remove thread from blocked chain and determine where 300 * it should be moved to. 301 */ 302 queue = td->td_tsqueue; 303 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE); 304 mtx_lock_spin(&td_contested_lock); 305 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq); 306 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) { 307 MPASS(td1->td_proc->p_magic == P_MAGIC); 308 if (td1->td_priority > td->td_priority) 309 break; 310 } 311 312 if (td1 == NULL) 313 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 314 else 315 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 316 mtx_unlock_spin(&td_contested_lock); 317 if (td1 == NULL) 318 CTR3(KTR_LOCK, 319 "turnstile_adjust_thread: td %d put at tail on [%p] %s", 320 td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name); 321 else 322 CTR4(KTR_LOCK, 323 "turnstile_adjust_thread: td %d moved before %d on [%p] %s", 324 td->td_tid, td1->td_tid, ts->ts_lockobj, 325 ts->ts_lockobj->lo_name); 326 } 327 return (1); 328} 329 330/* 331 * Early initialization of turnstiles. This is not done via a SYSINIT() 332 * since this needs to be initialized very early when mutexes are first 333 * initialized. 334 */ 335void 336init_turnstiles(void) 337{ 338 int i; 339 340 for (i = 0; i < TC_TABLESIZE; i++) { 341 LIST_INIT(&turnstile_chains[i].tc_turnstiles); 342 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain", 343 NULL, MTX_SPIN); 344 } 345 mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN); 346 LIST_INIT(&thread0.td_contested); 347 thread0.td_turnstile = NULL; 348} 349 350#ifdef TURNSTILE_PROFILING 351static void 352init_turnstile_profiling(void *arg) 353{ 354 struct sysctl_oid *chain_oid; 355 char chain_name[10]; 356 int i; 357 358 for (i = 0; i < TC_TABLESIZE; i++) { 359 snprintf(chain_name, sizeof(chain_name), "%d", i); 360 chain_oid = SYSCTL_ADD_NODE(NULL, 361 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO, 362 chain_name, CTLFLAG_RD, NULL, "turnstile chain stats"); 363 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 364 "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0, 365 NULL); 366 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 367 "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth, 368 0, NULL); 369 } 370} 371SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 372 init_turnstile_profiling, NULL); 373#endif 374 375static void 376init_turnstile0(void *dummy) 377{ 378 379 thread0.td_turnstile = turnstile_alloc(); 380} 381SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL); 382 383/* 384 * Update a thread on the turnstile list after it's priority has been changed. 385 * The old priority is passed in as an argument. 386 */ 387void 388turnstile_adjust(struct thread *td, u_char oldpri) 389{ 390 struct turnstile_chain *tc; 391 struct turnstile *ts; 392 393 mtx_assert(&sched_lock, MA_OWNED); 394 MPASS(TD_ON_LOCK(td)); 395 396 /* 397 * Pick up the lock that td is blocked on. 398 */ 399 ts = td->td_blocked; 400 MPASS(ts != NULL); 401 tc = TC_LOOKUP(ts->ts_lockobj); 402 mtx_lock_spin(&tc->tc_lock); 403 404 /* Resort the turnstile on the list. */ 405 if (!turnstile_adjust_thread(ts, td)) { 406 mtx_unlock_spin(&tc->tc_lock); 407 return; 408 } 409 410 /* 411 * If our priority was lowered and we are at the head of the 412 * turnstile, then propagate our new priority up the chain. 413 * Note that we currently don't try to revoke lent priorities 414 * when our priority goes up. 415 */ 416 MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE || 417 td->td_tsqueue == TS_SHARED_QUEUE); 418 if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) && 419 td->td_priority < oldpri) { 420 mtx_unlock_spin(&tc->tc_lock); 421 propagate_priority(td); 422 } else 423 mtx_unlock_spin(&tc->tc_lock); 424} 425 426/* 427 * Set the owner of the lock this turnstile is attached to. 428 */ 429static void 430turnstile_setowner(struct turnstile *ts, struct thread *owner) 431{ 432 433 mtx_assert(&td_contested_lock, MA_OWNED); 434 MPASS(ts->ts_owner == NULL); 435 436 /* A shared lock might not have an owner. */ 437 if (owner == NULL) 438 return; 439 440 MPASS(owner->td_proc->p_magic == P_MAGIC); 441 ts->ts_owner = owner; 442 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link); 443} 444 445/* 446 * Malloc a turnstile for a new thread, initialize it and return it. 447 */ 448struct turnstile * 449turnstile_alloc(void) 450{ 451 struct turnstile *ts; 452 453 ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO); 454 TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); 455 TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]); 456 TAILQ_INIT(&ts->ts_pending); 457 LIST_INIT(&ts->ts_free); 458 return (ts); 459} 460 461/* 462 * Free a turnstile when a thread is destroyed. 463 */ 464void 465turnstile_free(struct turnstile *ts) 466{ 467 468 MPASS(ts != NULL); 469 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE])); 470 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])); 471 MPASS(TAILQ_EMPTY(&ts->ts_pending)); 472 free(ts, M_TURNSTILE); 473} 474 475/* 476 * Lock the turnstile chain associated with the specified lock. 477 */ 478void 479turnstile_lock(struct lock_object *lock) 480{ 481 struct turnstile_chain *tc; 482 483 tc = TC_LOOKUP(lock); 484 mtx_lock_spin(&tc->tc_lock); 485} 486 487/* 488 * Look up the turnstile for a lock in the hash table locking the associated 489 * turnstile chain along the way. If no turnstile is found in the hash 490 * table, NULL is returned. 491 */ 492struct turnstile * 493turnstile_lookup(struct lock_object *lock) 494{ 495 struct turnstile_chain *tc; 496 struct turnstile *ts; 497 498 tc = TC_LOOKUP(lock); 499 mtx_assert(&tc->tc_lock, MA_OWNED); 500 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 501 if (ts->ts_lockobj == lock) 502 return (ts); 503 return (NULL); 504} 505 506/* 507 * Unlock the turnstile chain associated with a given lock. 508 */ 509void 510turnstile_release(struct lock_object *lock) 511{ 512 struct turnstile_chain *tc; 513 514 tc = TC_LOOKUP(lock); 515 mtx_unlock_spin(&tc->tc_lock); 516} 517 518/* 519 * Return a pointer to the thread waiting on this turnstile with the 520 * most important priority or NULL if the turnstile has no waiters. 521 */ 522static struct thread * 523turnstile_first_waiter(struct turnstile *ts) 524{ 525 struct thread *std, *xtd; 526 527 std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]); 528 xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); 529 if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority)) 530 return (std); 531 return (xtd); 532} 533 534/* 535 * Take ownership of a turnstile and adjust the priority of the new 536 * owner appropriately. 537 */ 538void 539turnstile_claim(struct lock_object *lock) 540{ 541 struct turnstile_chain *tc; 542 struct turnstile *ts; 543 struct thread *td, *owner; 544 545 tc = TC_LOOKUP(lock); 546 mtx_assert(&tc->tc_lock, MA_OWNED); 547 ts = turnstile_lookup(lock); 548 MPASS(ts != NULL); 549 550 owner = curthread; 551 mtx_lock_spin(&td_contested_lock); 552 turnstile_setowner(ts, owner); 553 mtx_unlock_spin(&td_contested_lock); 554 555 td = turnstile_first_waiter(ts); 556 MPASS(td != NULL); 557 MPASS(td->td_proc->p_magic == P_MAGIC); 558 mtx_unlock_spin(&tc->tc_lock); 559 560 /* 561 * Update the priority of the new owner if needed. 562 */ 563 mtx_lock_spin(&sched_lock); 564 if (td->td_priority < owner->td_priority) 565 sched_lend_prio(owner, td->td_priority); 566 mtx_unlock_spin(&sched_lock); 567} 568 569/* 570 * Block the current thread on the turnstile assicated with 'lock'. This 571 * function will context switch and not return until this thread has been 572 * woken back up. This function must be called with the appropriate 573 * turnstile chain locked and will return with it unlocked. 574 */ 575void 576turnstile_wait(struct lock_object *lock, struct thread *owner, int queue) 577{ 578 struct turnstile_chain *tc; 579 struct turnstile *ts; 580 struct thread *td, *td1; 581 582 td = curthread; 583 tc = TC_LOOKUP(lock); 584 mtx_assert(&tc->tc_lock, MA_OWNED); 585 MPASS(td->td_turnstile != NULL); 586 if (queue == TS_SHARED_QUEUE) 587 MPASS(owner != NULL); 588 if (owner) 589 MPASS(owner->td_proc->p_magic == P_MAGIC); 590 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 591 592 /* Look up the turnstile associated with the lock 'lock'. */ 593 ts = turnstile_lookup(lock); 594 595 /* 596 * If the lock does not already have a turnstile, use this thread's 597 * turnstile. Otherwise insert the current thread into the 598 * turnstile already in use by this lock. 599 */ 600 if (ts == NULL) { 601#ifdef TURNSTILE_PROFILING 602 tc->tc_depth++; 603 if (tc->tc_depth > tc->tc_max_depth) { 604 tc->tc_max_depth = tc->tc_depth; 605 if (tc->tc_max_depth > turnstile_max_depth) 606 turnstile_max_depth = tc->tc_max_depth; 607 } 608#endif 609 ts = td->td_turnstile; 610 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash); 611 KASSERT(TAILQ_EMPTY(&ts->ts_pending), 612 ("thread's turnstile has pending threads")); 613 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]), 614 ("thread's turnstile has exclusive waiters")); 615 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]), 616 ("thread's turnstile has shared waiters")); 617 KASSERT(LIST_EMPTY(&ts->ts_free), 618 ("thread's turnstile has a non-empty free list")); 619 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer")); 620 ts->ts_lockobj = lock; 621 mtx_lock_spin(&td_contested_lock); 622 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 623 turnstile_setowner(ts, owner); 624 mtx_unlock_spin(&td_contested_lock); 625 } else { 626 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) 627 if (td1->td_priority > td->td_priority) 628 break; 629 mtx_lock_spin(&td_contested_lock); 630 if (td1 != NULL) 631 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 632 else 633 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 634 MPASS(owner == ts->ts_owner); 635 mtx_unlock_spin(&td_contested_lock); 636 MPASS(td->td_turnstile != NULL); 637 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash); 638 } 639 td->td_turnstile = NULL; 640 mtx_unlock_spin(&tc->tc_lock); 641 642 mtx_lock_spin(&sched_lock); 643 /* 644 * Handle race condition where a thread on another CPU that owns 645 * lock 'lock' could have woken us in between us dropping the 646 * turnstile chain lock and acquiring the sched_lock. 647 */ 648 if (td->td_flags & TDF_TSNOBLOCK) { 649 td->td_flags &= ~TDF_TSNOBLOCK; 650 mtx_unlock_spin(&sched_lock); 651 return; 652 } 653 654#ifdef notyet 655 /* 656 * If we're borrowing an interrupted thread's VM context, we 657 * must clean up before going to sleep. 658 */ 659 if (td->td_ithd != NULL) { 660 struct ithd *it = td->td_ithd; 661 662 if (it->it_interrupted) { 663 if (LOCK_LOG_TEST(lock, 0)) 664 CTR3(KTR_LOCK, "%s: %p interrupted %p", 665 __func__, it, it->it_interrupted); 666 intr_thd_fixup(it); 667 } 668 } 669#endif 670 671 /* Save who we are blocked on and switch. */ 672 td->td_tsqueue = queue; 673 td->td_blocked = ts; 674 td->td_lockname = lock->lo_name; 675 TD_SET_LOCK(td); 676 propagate_priority(td); 677 678 if (LOCK_LOG_TEST(lock, 0)) 679 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__, 680 td->td_tid, lock, lock->lo_name); 681 682 mi_switch(SW_VOL, NULL); 683 684 if (LOCK_LOG_TEST(lock, 0)) 685 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s", 686 __func__, td->td_tid, lock, lock->lo_name); 687 688 mtx_unlock_spin(&sched_lock); 689} 690 691/* 692 * Pick the highest priority thread on this turnstile and put it on the 693 * pending list. This must be called with the turnstile chain locked. 694 */ 695int 696turnstile_signal(struct turnstile *ts, int queue) 697{ 698 struct turnstile_chain *tc; 699 struct thread *td; 700 int empty; 701 702 MPASS(ts != NULL); 703 MPASS(curthread->td_proc->p_magic == P_MAGIC); 704 MPASS(ts->ts_owner == curthread || 705 (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL)); 706 tc = TC_LOOKUP(ts->ts_lockobj); 707 mtx_assert(&tc->tc_lock, MA_OWNED); 708 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 709 710 /* 711 * Pick the highest priority thread blocked on this lock and 712 * move it to the pending list. 713 */ 714 td = TAILQ_FIRST(&ts->ts_blocked[queue]); 715 MPASS(td->td_proc->p_magic == P_MAGIC); 716 mtx_lock_spin(&td_contested_lock); 717 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq); 718 mtx_unlock_spin(&td_contested_lock); 719 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq); 720 721 /* 722 * If the turnstile is now empty, remove it from its chain and 723 * give it to the about-to-be-woken thread. Otherwise take a 724 * turnstile from the free list and give it to the thread. 725 */ 726 empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && 727 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]); 728 if (empty) { 729 MPASS(LIST_EMPTY(&ts->ts_free)); 730#ifdef TURNSTILE_PROFILING 731 tc->tc_depth--; 732#endif 733 } else 734 ts = LIST_FIRST(&ts->ts_free); 735 MPASS(ts != NULL); 736 LIST_REMOVE(ts, ts_hash); 737 td->td_turnstile = ts; 738 739 return (empty); 740} 741 742/* 743 * Put all blocked threads on the pending list. This must be called with 744 * the turnstile chain locked. 745 */ 746void 747turnstile_broadcast(struct turnstile *ts, int queue) 748{ 749 struct turnstile_chain *tc; 750 struct turnstile *ts1; 751 struct thread *td; 752 753 MPASS(ts != NULL); 754 MPASS(curthread->td_proc->p_magic == P_MAGIC); 755 MPASS(ts->ts_owner == curthread || 756 (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL)); 757 tc = TC_LOOKUP(ts->ts_lockobj); 758 mtx_assert(&tc->tc_lock, MA_OWNED); 759 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 760 761 /* 762 * Transfer the blocked list to the pending list. 763 */ 764 mtx_lock_spin(&td_contested_lock); 765 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq); 766 mtx_unlock_spin(&td_contested_lock); 767 768 /* 769 * Give a turnstile to each thread. The last thread gets 770 * this turnstile if the turnstile is empty. 771 */ 772 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) { 773 if (LIST_EMPTY(&ts->ts_free)) { 774 MPASS(TAILQ_NEXT(td, td_lockq) == NULL); 775 ts1 = ts; 776#ifdef TURNSTILE_PROFILING 777 tc->tc_depth--; 778#endif 779 } else 780 ts1 = LIST_FIRST(&ts->ts_free); 781 MPASS(ts1 != NULL); 782 LIST_REMOVE(ts1, ts_hash); 783 td->td_turnstile = ts1; 784 } 785} 786 787/* 788 * Wakeup all threads on the pending list and adjust the priority of the 789 * current thread appropriately. This must be called with the turnstile 790 * chain locked. 791 */ 792void 793turnstile_unpend(struct turnstile *ts, int owner_type) 794{ 795 TAILQ_HEAD( ,thread) pending_threads; 796 struct turnstile_chain *tc; 797 struct thread *td; 798 u_char cp, pri; 799 800 MPASS(ts != NULL); 801 MPASS(ts->ts_owner == curthread || 802 (owner_type == TS_SHARED_LOCK && ts->ts_owner == NULL)); 803 tc = TC_LOOKUP(ts->ts_lockobj); 804 mtx_assert(&tc->tc_lock, MA_OWNED); 805 MPASS(!TAILQ_EMPTY(&ts->ts_pending)); 806 807 /* 808 * Move the list of pending threads out of the turnstile and 809 * into a local variable. 810 */ 811 TAILQ_INIT(&pending_threads); 812 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq); 813#ifdef INVARIANTS 814 if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && 815 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])) 816 ts->ts_lockobj = NULL; 817#endif 818 819 /* 820 * Remove the turnstile from this thread's list of contested locks 821 * since this thread doesn't own it anymore. New threads will 822 * not be blocking on the turnstile until it is claimed by a new 823 * owner. There might not be a current owner if this is a shared 824 * lock. 825 */ 826 if (ts->ts_owner != NULL) { 827 mtx_lock_spin(&td_contested_lock); 828 ts->ts_owner = NULL; 829 LIST_REMOVE(ts, ts_link); 830 mtx_unlock_spin(&td_contested_lock); 831 } 832 critical_enter(); 833 mtx_unlock_spin(&tc->tc_lock); 834 835 /* 836 * Adjust the priority of curthread based on other contested 837 * locks it owns. Don't lower the priority below the base 838 * priority however. 839 */ 840 td = curthread; 841 pri = PRI_MAX; 842 mtx_lock_spin(&sched_lock); 843 mtx_lock_spin(&td_contested_lock); 844 LIST_FOREACH(ts, &td->td_contested, ts_link) { 845 cp = turnstile_first_waiter(ts)->td_priority; 846 if (cp < pri) 847 pri = cp; 848 } 849 mtx_unlock_spin(&td_contested_lock); 850 sched_unlend_prio(td, pri); 851 852 /* 853 * Wake up all the pending threads. If a thread is not blocked 854 * on a lock, then it is currently executing on another CPU in 855 * turnstile_wait() or sitting on a run queue waiting to resume 856 * in turnstile_wait(). Set a flag to force it to try to acquire 857 * the lock again instead of blocking. 858 */ 859 while (!TAILQ_EMPTY(&pending_threads)) { 860 td = TAILQ_FIRST(&pending_threads); 861 TAILQ_REMOVE(&pending_threads, td, td_lockq); 862 MPASS(td->td_proc->p_magic == P_MAGIC); 863 if (TD_ON_LOCK(td)) { 864 td->td_blocked = NULL; 865 td->td_lockname = NULL; 866#ifdef INVARIANTS 867 td->td_tsqueue = 0xff; 868#endif 869 TD_CLR_LOCK(td); 870 MPASS(TD_CAN_RUN(td)); 871 setrunqueue(td, SRQ_BORING); 872 } else { 873 td->td_flags |= TDF_TSNOBLOCK; 874 MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td)); 875 } 876 } 877 critical_exit(); 878 mtx_unlock_spin(&sched_lock); 879} 880 881/* 882 * Return the first thread in a turnstile. 883 */ 884struct thread * 885turnstile_head(struct turnstile *ts, int queue) 886{ 887#ifdef INVARIANTS 888 struct turnstile_chain *tc; 889 890 MPASS(ts != NULL); 891 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 892 tc = TC_LOOKUP(ts->ts_lockobj); 893 mtx_assert(&tc->tc_lock, MA_OWNED); 894#endif 895 return (TAILQ_FIRST(&ts->ts_blocked[queue])); 896} 897 898#ifdef DDB 899static void 900print_thread(struct thread *td, const char *prefix) 901{ 902 903 db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid, 904 td->td_proc->p_pid, td->td_proc->p_comm); 905} 906 907static void 908print_queue(struct threadqueue *queue, const char *header, const char *prefix) 909{ 910 struct thread *td; 911 912 db_printf("%s:\n", header); 913 if (TAILQ_EMPTY(queue)) { 914 db_printf("%sempty\n", prefix); 915 return; 916 } 917 TAILQ_FOREACH(td, queue, td_lockq) { 918 print_thread(td, prefix); 919 } 920} 921 922DB_SHOW_COMMAND(turnstile, db_show_turnstile) 923{ 924 struct turnstile_chain *tc; 925 struct turnstile *ts; 926 struct lock_object *lock; 927 int i; 928 929 if (!have_addr) 930 return; 931 932 /* 933 * First, see if there is an active turnstile for the lock indicated 934 * by the address. 935 */ 936 lock = (struct lock_object *)addr; 937 tc = TC_LOOKUP(lock); 938 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 939 if (ts->ts_lockobj == lock) 940 goto found; 941 942 /* 943 * Second, see if there is an active turnstile at the address 944 * indicated. 945 */ 946 for (i = 0; i < TC_TABLESIZE; i++) 947 LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) { 948 if (ts == (struct turnstile *)addr) 949 goto found; 950 } 951 952 db_printf("Unable to locate a turnstile via %p\n", (void *)addr); 953 return; 954found: 955 lock = ts->ts_lockobj; 956 db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name, 957 lock->lo_name); 958 if (ts->ts_owner) 959 print_thread(ts->ts_owner, "Lock Owner: "); 960 else 961 db_printf("Lock Owner: none\n"); 962 print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t"); 963 print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters", 964 "\t"); 965 print_queue(&ts->ts_pending, "Pending Threads", "\t"); 966 967} 968#endif 969