subr_sleepqueue.c revision 182875
1/*- 2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * Implementation of sleep queues used to hold queue of threads blocked on 32 * a wait channel. Sleep queues different from turnstiles in that wait 33 * channels are not owned by anyone, so there is no priority propagation. 34 * Sleep queues can also provide a timeout and can also be interrupted by 35 * signals. That said, there are several similarities between the turnstile 36 * and sleep queue implementations. (Note: turnstiles were implemented 37 * first.) For example, both use a hash table of the same size where each 38 * bucket is referred to as a "chain" that contains both a spin lock and 39 * a linked list of queues. An individual queue is located by using a hash 40 * to pick a chain, locking the chain, and then walking the chain searching 41 * for the queue. This means that a wait channel object does not need to 42 * embed it's queue head just as locks do not embed their turnstile queue 43 * head. Threads also carry around a sleep queue that they lend to the 44 * wait channel when blocking. Just as in turnstiles, the queue includes 45 * a free list of the sleep queues of other threads blocked on the same 46 * wait channel in the case of multiple waiters. 47 * 48 * Some additional functionality provided by sleep queues include the 49 * ability to set a timeout. The timeout is managed using a per-thread 50 * callout that resumes a thread if it is asleep. A thread may also 51 * catch signals while it is asleep (aka an interruptible sleep). The 52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 53 * sleep queues also provide some extra assertions. One is not allowed to 54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 55 * must consistently use the same lock to synchronize with a wait channel, 56 * though this check is currently only a warning for sleep/wakeup due to 57 * pre-existing abuse of that API. The same lock must also be held when 58 * awakening threads, though that is currently only enforced for condition 59 * variables. 60 */ 61 62#include <sys/cdefs.h> 63__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 182875 2008-09-08 19:44:57Z jhb $"); 64 65#include "opt_sleepqueue_profiling.h" 66#include "opt_ddb.h" 67#include "opt_sched.h" 68 69#include <sys/param.h> 70#include <sys/systm.h> 71#include <sys/lock.h> 72#include <sys/kernel.h> 73#include <sys/ktr.h> 74#include <sys/mutex.h> 75#include <sys/proc.h> 76#include <sys/sbuf.h> 77#include <sys/sched.h> 78#include <sys/signalvar.h> 79#include <sys/sleepqueue.h> 80#include <sys/sysctl.h> 81 82#include <vm/uma.h> 83 84#ifdef DDB 85#include <ddb/ddb.h> 86#endif 87 88/* 89 * Constants for the hash table of sleep queue chains. These constants are 90 * the same ones that 4BSD (and possibly earlier versions of BSD) used. 91 * Basically, we ignore the lower 8 bits of the address since most wait 92 * channel pointers are aligned and only look at the next 7 bits for the 93 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly. 94 */ 95#define SC_TABLESIZE 128 /* Must be power of 2. */ 96#define SC_MASK (SC_TABLESIZE - 1) 97#define SC_SHIFT 8 98#define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK) 99#define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 100#define NR_SLEEPQS 2 101/* 102 * There two different lists of sleep queues. Both lists are connected 103 * via the sq_hash entries. The first list is the sleep queue chain list 104 * that a sleep queue is on when it is attached to a wait channel. The 105 * second list is the free list hung off of a sleep queue that is attached 106 * to a wait channel. 107 * 108 * Each sleep queue also contains the wait channel it is attached to, the 109 * list of threads blocked on that wait channel, flags specific to the 110 * wait channel, and the lock used to synchronize with a wait channel. 111 * The flags are used to catch mismatches between the various consumers 112 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 113 * The lock pointer is only used when invariants are enabled for various 114 * debugging checks. 115 * 116 * Locking key: 117 * c - sleep queue chain lock 118 */ 119struct sleepqueue { 120 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */ 121 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 122 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 123 void *sq_wchan; /* (c) Wait channel. */ 124#ifdef INVARIANTS 125 int sq_type; /* (c) Queue type. */ 126 struct lock_object *sq_lock; /* (c) Associated lock. */ 127#endif 128}; 129 130struct sleepqueue_chain { 131 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 132 struct mtx sc_lock; /* Spin lock for this chain. */ 133#ifdef SLEEPQUEUE_PROFILING 134 u_int sc_depth; /* Length of sc_queues. */ 135 u_int sc_max_depth; /* Max length of sc_queues. */ 136#endif 137}; 138 139#ifdef SLEEPQUEUE_PROFILING 140u_int sleepq_max_depth; 141SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling"); 142SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0, 143 "sleepq chain stats"); 144SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 145 0, "maxmimum depth achieved of a single chain"); 146 147static void sleepq_profile(const char *wmesg); 148static int prof_enabled; 149#endif 150static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 151static uma_zone_t sleepq_zone; 152 153/* 154 * Prototypes for non-exported routines. 155 */ 156static int sleepq_catch_signals(void *wchan, int pri); 157static int sleepq_check_signals(void); 158static int sleepq_check_timeout(void); 159#ifdef INVARIANTS 160static void sleepq_dtor(void *mem, int size, void *arg); 161#endif 162static int sleepq_init(void *mem, int size, int flags); 163static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, 164 int pri); 165static void sleepq_switch(void *wchan, int pri); 166static void sleepq_timeout(void *arg); 167 168/* 169 * Early initialization of sleep queues that is called from the sleepinit() 170 * SYSINIT. 171 */ 172void 173init_sleepqueues(void) 174{ 175#ifdef SLEEPQUEUE_PROFILING 176 struct sysctl_oid *chain_oid; 177 char chain_name[10]; 178#endif 179 int i; 180 181 for (i = 0; i < SC_TABLESIZE; i++) { 182 LIST_INIT(&sleepq_chains[i].sc_queues); 183 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 184 MTX_SPIN | MTX_RECURSE); 185#ifdef SLEEPQUEUE_PROFILING 186 snprintf(chain_name, sizeof(chain_name), "%d", i); 187 chain_oid = SYSCTL_ADD_NODE(NULL, 188 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, 189 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats"); 190 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 191 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); 192 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 193 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, 194 NULL); 195#endif 196 } 197 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), 198#ifdef INVARIANTS 199 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 200#else 201 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 202#endif 203 204 thread0.td_sleepqueue = sleepq_alloc(); 205} 206 207/* 208 * Get a sleep queue for a new thread. 209 */ 210struct sleepqueue * 211sleepq_alloc(void) 212{ 213 214 return (uma_zalloc(sleepq_zone, M_WAITOK)); 215} 216 217/* 218 * Free a sleep queue when a thread is destroyed. 219 */ 220void 221sleepq_free(struct sleepqueue *sq) 222{ 223 224 uma_zfree(sleepq_zone, sq); 225} 226 227/* 228 * Lock the sleep queue chain associated with the specified wait channel. 229 */ 230void 231sleepq_lock(void *wchan) 232{ 233 struct sleepqueue_chain *sc; 234 235 sc = SC_LOOKUP(wchan); 236 mtx_lock_spin(&sc->sc_lock); 237} 238 239/* 240 * Look up the sleep queue associated with a given wait channel in the hash 241 * table locking the associated sleep queue chain. If no queue is found in 242 * the table, NULL is returned. 243 */ 244struct sleepqueue * 245sleepq_lookup(void *wchan) 246{ 247 struct sleepqueue_chain *sc; 248 struct sleepqueue *sq; 249 250 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 251 sc = SC_LOOKUP(wchan); 252 mtx_assert(&sc->sc_lock, MA_OWNED); 253 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 254 if (sq->sq_wchan == wchan) 255 return (sq); 256 return (NULL); 257} 258 259/* 260 * Unlock the sleep queue chain associated with a given wait channel. 261 */ 262void 263sleepq_release(void *wchan) 264{ 265 struct sleepqueue_chain *sc; 266 267 sc = SC_LOOKUP(wchan); 268 mtx_unlock_spin(&sc->sc_lock); 269} 270 271/* 272 * Places the current thread on the sleep queue for the specified wait 273 * channel. If INVARIANTS is enabled, then it associates the passed in 274 * lock with the sleepq to make sure it is held when that sleep queue is 275 * woken up. 276 */ 277void 278sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, 279 int queue) 280{ 281 struct sleepqueue_chain *sc; 282 struct sleepqueue *sq; 283 struct thread *td; 284 285 td = curthread; 286 sc = SC_LOOKUP(wchan); 287 mtx_assert(&sc->sc_lock, MA_OWNED); 288 MPASS(td->td_sleepqueue != NULL); 289 MPASS(wchan != NULL); 290 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 291 292 /* If this thread is not allowed to sleep, die a horrible death. */ 293 KASSERT(!(td->td_pflags & TDP_NOSLEEPING), 294 ("Trying sleep, but thread marked as sleeping prohibited")); 295 296 /* Look up the sleep queue associated with the wait channel 'wchan'. */ 297 sq = sleepq_lookup(wchan); 298 299 /* 300 * If the wait channel does not already have a sleep queue, use 301 * this thread's sleep queue. Otherwise, insert the current thread 302 * into the sleep queue already in use by this wait channel. 303 */ 304 if (sq == NULL) { 305#ifdef INVARIANTS 306 int i; 307 308 sq = td->td_sleepqueue; 309 for (i = 0; i < NR_SLEEPQS; i++) 310 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]), 311 ("thread's sleep queue %d is not empty", i)); 312 KASSERT(LIST_EMPTY(&sq->sq_free), 313 ("thread's sleep queue has a non-empty free list")); 314 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 315 sq->sq_lock = lock; 316 sq->sq_type = flags & SLEEPQ_TYPE; 317#endif 318#ifdef SLEEPQUEUE_PROFILING 319 sc->sc_depth++; 320 if (sc->sc_depth > sc->sc_max_depth) { 321 sc->sc_max_depth = sc->sc_depth; 322 if (sc->sc_max_depth > sleepq_max_depth) 323 sleepq_max_depth = sc->sc_max_depth; 324 } 325#endif 326 sq = td->td_sleepqueue; 327 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 328 sq->sq_wchan = wchan; 329 } else { 330 MPASS(wchan == sq->sq_wchan); 331 MPASS(lock == sq->sq_lock); 332 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); 333 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 334 } 335 thread_lock(td); 336 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); 337 td->td_sleepqueue = NULL; 338 td->td_sqqueue = queue; 339 td->td_wchan = wchan; 340 td->td_wmesg = wmesg; 341 if (flags & SLEEPQ_INTERRUPTIBLE) { 342 td->td_flags |= TDF_SINTR; 343 td->td_flags &= ~TDF_SLEEPABORT; 344 } 345 thread_unlock(td); 346} 347 348/* 349 * Sets a timeout that will remove the current thread from the specified 350 * sleep queue after timo ticks if the thread has not already been awakened. 351 */ 352void 353sleepq_set_timeout(void *wchan, int timo) 354{ 355 struct sleepqueue_chain *sc; 356 struct thread *td; 357 358 td = curthread; 359 sc = SC_LOOKUP(wchan); 360 mtx_assert(&sc->sc_lock, MA_OWNED); 361 MPASS(TD_ON_SLEEPQ(td)); 362 MPASS(td->td_sleepqueue == NULL); 363 MPASS(wchan != NULL); 364 callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td); 365} 366 367/* 368 * Marks the pending sleep of the current thread as interruptible and 369 * makes an initial check for pending signals before putting a thread 370 * to sleep. Enters and exits with the thread lock held. Thread lock 371 * may have transitioned from the sleepq lock to a run lock. 372 */ 373static int 374sleepq_catch_signals(void *wchan, int pri) 375{ 376 struct sleepqueue_chain *sc; 377 struct sleepqueue *sq; 378 struct thread *td; 379 struct proc *p; 380 struct sigacts *ps; 381 int sig, ret; 382 383 td = curthread; 384 p = curproc; 385 sc = SC_LOOKUP(wchan); 386 mtx_assert(&sc->sc_lock, MA_OWNED); 387 MPASS(wchan != NULL); 388 /* 389 * See if there are any pending signals for this thread. If not 390 * we can switch immediately. Otherwise do the signal processing 391 * directly. 392 */ 393 thread_lock(td); 394 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) { 395 sleepq_switch(wchan, pri); 396 return (0); 397 } 398 thread_unlock(td); 399 mtx_unlock_spin(&sc->sc_lock); 400 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", 401 (void *)td, (long)p->p_pid, td->td_name); 402 PROC_LOCK(p); 403 ps = p->p_sigacts; 404 mtx_lock(&ps->ps_mtx); 405 sig = cursig(td); 406 if (sig == 0) { 407 mtx_unlock(&ps->ps_mtx); 408 ret = thread_suspend_check(1); 409 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 410 } else { 411 if (SIGISMEMBER(ps->ps_sigintr, sig)) 412 ret = EINTR; 413 else 414 ret = ERESTART; 415 mtx_unlock(&ps->ps_mtx); 416 } 417 /* 418 * Lock the per-process spinlock prior to dropping the PROC_LOCK 419 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and 420 * thread_lock() are currently held in tdsignal(). 421 */ 422 PROC_SLOCK(p); 423 mtx_lock_spin(&sc->sc_lock); 424 PROC_UNLOCK(p); 425 thread_lock(td); 426 PROC_SUNLOCK(p); 427 if (ret == 0) { 428 sleepq_switch(wchan, pri); 429 return (0); 430 } 431 /* 432 * There were pending signals and this thread is still 433 * on the sleep queue, remove it from the sleep queue. 434 */ 435 if (TD_ON_SLEEPQ(td)) { 436 sq = sleepq_lookup(wchan); 437 if (sleepq_resume_thread(sq, td, 0)) { 438#ifdef INVARIANTS 439 /* 440 * This thread hasn't gone to sleep yet, so it 441 * should not be swapped out. 442 */ 443 panic("not waking up swapper"); 444#endif 445 } 446 } 447 mtx_unlock_spin(&sc->sc_lock); 448 MPASS(td->td_lock != &sc->sc_lock); 449 return (ret); 450} 451 452/* 453 * Switches to another thread if we are still asleep on a sleep queue. 454 * Returns with thread lock. 455 */ 456static void 457sleepq_switch(void *wchan, int pri) 458{ 459 struct sleepqueue_chain *sc; 460 struct sleepqueue *sq; 461 struct thread *td; 462 463 td = curthread; 464 sc = SC_LOOKUP(wchan); 465 mtx_assert(&sc->sc_lock, MA_OWNED); 466 THREAD_LOCK_ASSERT(td, MA_OWNED); 467 468 /* 469 * If we have a sleep queue, then we've already been woken up, so 470 * just return. 471 */ 472 if (td->td_sleepqueue != NULL) { 473 mtx_unlock_spin(&sc->sc_lock); 474 return; 475 } 476 477 /* 478 * If TDF_TIMEOUT is set, then our sleep has been timed out 479 * already but we are still on the sleep queue, so dequeue the 480 * thread and return. 481 */ 482 if (td->td_flags & TDF_TIMEOUT) { 483 MPASS(TD_ON_SLEEPQ(td)); 484 sq = sleepq_lookup(wchan); 485 if (sleepq_resume_thread(sq, td, 0)) { 486#ifdef INVARIANTS 487 /* 488 * This thread hasn't gone to sleep yet, so it 489 * should not be swapped out. 490 */ 491 panic("not waking up swapper"); 492#endif 493 } 494 mtx_unlock_spin(&sc->sc_lock); 495 return; 496 } 497#ifdef SLEEPQUEUE_PROFILING 498 if (prof_enabled) 499 sleepq_profile(td->td_wmesg); 500#endif 501 MPASS(td->td_sleepqueue == NULL); 502 sched_sleep(td, pri); 503 thread_lock_set(td, &sc->sc_lock); 504 TD_SET_SLEEPING(td); 505 mi_switch(SW_VOL | SWT_SLEEPQ, NULL); 506 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 507 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", 508 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 509} 510 511/* 512 * Check to see if we timed out. 513 */ 514static int 515sleepq_check_timeout(void) 516{ 517 struct thread *td; 518 519 td = curthread; 520 THREAD_LOCK_ASSERT(td, MA_OWNED); 521 522 /* 523 * If TDF_TIMEOUT is set, we timed out. 524 */ 525 if (td->td_flags & TDF_TIMEOUT) { 526 td->td_flags &= ~TDF_TIMEOUT; 527 return (EWOULDBLOCK); 528 } 529 530 /* 531 * If TDF_TIMOFAIL is set, the timeout ran after we had 532 * already been woken up. 533 */ 534 if (td->td_flags & TDF_TIMOFAIL) 535 td->td_flags &= ~TDF_TIMOFAIL; 536 537 /* 538 * If callout_stop() fails, then the timeout is running on 539 * another CPU, so synchronize with it to avoid having it 540 * accidentally wake up a subsequent sleep. 541 */ 542 else if (callout_stop(&td->td_slpcallout) == 0) { 543 td->td_flags |= TDF_TIMEOUT; 544 TD_SET_SLEEPING(td); 545 mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL); 546 } 547 return (0); 548} 549 550/* 551 * Check to see if we were awoken by a signal. 552 */ 553static int 554sleepq_check_signals(void) 555{ 556 struct thread *td; 557 558 td = curthread; 559 THREAD_LOCK_ASSERT(td, MA_OWNED); 560 561 /* We are no longer in an interruptible sleep. */ 562 if (td->td_flags & TDF_SINTR) 563 td->td_flags &= ~TDF_SINTR; 564 565 if (td->td_flags & TDF_SLEEPABORT) { 566 td->td_flags &= ~TDF_SLEEPABORT; 567 return (td->td_intrval); 568 } 569 570 return (0); 571} 572 573/* 574 * Block the current thread until it is awakened from its sleep queue. 575 */ 576void 577sleepq_wait(void *wchan, int pri) 578{ 579 struct thread *td; 580 581 td = curthread; 582 MPASS(!(td->td_flags & TDF_SINTR)); 583 thread_lock(td); 584 sleepq_switch(wchan, pri); 585 thread_unlock(td); 586} 587 588/* 589 * Block the current thread until it is awakened from its sleep queue 590 * or it is interrupted by a signal. 591 */ 592int 593sleepq_wait_sig(void *wchan, int pri) 594{ 595 int rcatch; 596 int rval; 597 598 rcatch = sleepq_catch_signals(wchan, pri); 599 rval = sleepq_check_signals(); 600 thread_unlock(curthread); 601 if (rcatch) 602 return (rcatch); 603 return (rval); 604} 605 606/* 607 * Block the current thread until it is awakened from its sleep queue 608 * or it times out while waiting. 609 */ 610int 611sleepq_timedwait(void *wchan, int pri) 612{ 613 struct thread *td; 614 int rval; 615 616 td = curthread; 617 MPASS(!(td->td_flags & TDF_SINTR)); 618 thread_lock(td); 619 sleepq_switch(wchan, pri); 620 rval = sleepq_check_timeout(); 621 thread_unlock(td); 622 623 return (rval); 624} 625 626/* 627 * Block the current thread until it is awakened from its sleep queue, 628 * it is interrupted by a signal, or it times out waiting to be awakened. 629 */ 630int 631sleepq_timedwait_sig(void *wchan, int pri) 632{ 633 int rcatch, rvalt, rvals; 634 635 rcatch = sleepq_catch_signals(wchan, pri); 636 rvalt = sleepq_check_timeout(); 637 rvals = sleepq_check_signals(); 638 thread_unlock(curthread); 639 if (rcatch) 640 return (rcatch); 641 if (rvals) 642 return (rvals); 643 return (rvalt); 644} 645 646/* 647 * Removes a thread from a sleep queue and makes it 648 * runnable. 649 */ 650static int 651sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) 652{ 653 struct sleepqueue_chain *sc; 654 655 MPASS(td != NULL); 656 MPASS(sq->sq_wchan != NULL); 657 MPASS(td->td_wchan == sq->sq_wchan); 658 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0); 659 THREAD_LOCK_ASSERT(td, MA_OWNED); 660 sc = SC_LOOKUP(sq->sq_wchan); 661 mtx_assert(&sc->sc_lock, MA_OWNED); 662 663 /* Remove the thread from the queue. */ 664 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq); 665 666 /* 667 * Get a sleep queue for this thread. If this is the last waiter, 668 * use the queue itself and take it out of the chain, otherwise, 669 * remove a queue from the free list. 670 */ 671 if (LIST_EMPTY(&sq->sq_free)) { 672 td->td_sleepqueue = sq; 673#ifdef INVARIANTS 674 sq->sq_wchan = NULL; 675#endif 676#ifdef SLEEPQUEUE_PROFILING 677 sc->sc_depth--; 678#endif 679 } else 680 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 681 LIST_REMOVE(td->td_sleepqueue, sq_hash); 682 683 td->td_wmesg = NULL; 684 td->td_wchan = NULL; 685 td->td_flags &= ~TDF_SINTR; 686 687 /* 688 * Note that thread td might not be sleeping if it is running 689 * sleepq_catch_signals() on another CPU or is blocked on 690 * its proc lock to check signals. It doesn't hurt to clear 691 * the sleeping flag if it isn't set though, so we just always 692 * do it. However, we can't assert that it is set. 693 */ 694 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", 695 (void *)td, (long)td->td_proc->p_pid, td->td_name); 696 TD_CLR_SLEEPING(td); 697 698 /* Adjust priority if requested. */ 699 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); 700 if (pri != 0 && td->td_priority > pri) 701 sched_prio(td, pri); 702 return (setrunnable(td)); 703} 704 705#ifdef INVARIANTS 706/* 707 * UMA zone item deallocator. 708 */ 709static void 710sleepq_dtor(void *mem, int size, void *arg) 711{ 712 struct sleepqueue *sq; 713 int i; 714 715 sq = mem; 716 for (i = 0; i < NR_SLEEPQS; i++) 717 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); 718} 719#endif 720 721/* 722 * UMA zone item initializer. 723 */ 724static int 725sleepq_init(void *mem, int size, int flags) 726{ 727 struct sleepqueue *sq; 728 int i; 729 730 bzero(mem, size); 731 sq = mem; 732 for (i = 0; i < NR_SLEEPQS; i++) 733 TAILQ_INIT(&sq->sq_blocked[i]); 734 LIST_INIT(&sq->sq_free); 735 return (0); 736} 737 738/* 739 * Find the highest priority thread sleeping on a wait channel and resume it. 740 */ 741int 742sleepq_signal(void *wchan, int flags, int pri, int queue) 743{ 744 struct sleepqueue *sq; 745 struct thread *td, *besttd; 746 int wakeup_swapper; 747 748 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 749 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 750 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 751 sq = sleepq_lookup(wchan); 752 if (sq == NULL) 753 return (0); 754 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 755 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 756 757 /* 758 * Find the highest priority thread on the queue. If there is a 759 * tie, use the thread that first appears in the queue as it has 760 * been sleeping the longest since threads are always added to 761 * the tail of sleep queues. 762 */ 763 besttd = NULL; 764 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) { 765 if (besttd == NULL || td->td_priority < besttd->td_priority) 766 besttd = td; 767 } 768 MPASS(besttd != NULL); 769 thread_lock(besttd); 770 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri); 771 thread_unlock(besttd); 772 return (wakeup_swapper); 773} 774 775/* 776 * Resume all threads sleeping on a specified wait channel. 777 */ 778int 779sleepq_broadcast(void *wchan, int flags, int pri, int queue) 780{ 781 struct sleepqueue *sq; 782 struct thread *td, *tdn; 783 int wakeup_swapper; 784 785 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 786 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 787 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 788 sq = sleepq_lookup(wchan); 789 if (sq == NULL) 790 return (0); 791 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 792 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 793 794 /* Resume all blocked threads on the sleep queue. */ 795 wakeup_swapper = 0; 796 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) { 797 thread_lock(td); 798 if (sleepq_resume_thread(sq, td, pri)) 799 wakeup_swapper = 1; 800 thread_unlock(td); 801 } 802 return (wakeup_swapper); 803} 804 805/* 806 * Time sleeping threads out. When the timeout expires, the thread is 807 * removed from the sleep queue and made runnable if it is still asleep. 808 */ 809static void 810sleepq_timeout(void *arg) 811{ 812 struct sleepqueue_chain *sc; 813 struct sleepqueue *sq; 814 struct thread *td; 815 void *wchan; 816 int wakeup_swapper; 817 818 td = arg; 819 wakeup_swapper = 0; 820 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", 821 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 822 823 /* 824 * First, see if the thread is asleep and get the wait channel if 825 * it is. 826 */ 827 thread_lock(td); 828 if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { 829 wchan = td->td_wchan; 830 sc = SC_LOOKUP(wchan); 831 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); 832 sq = sleepq_lookup(wchan); 833 MPASS(sq != NULL); 834 td->td_flags |= TDF_TIMEOUT; 835 wakeup_swapper = sleepq_resume_thread(sq, td, 0); 836 thread_unlock(td); 837 if (wakeup_swapper) 838 kick_proc0(); 839 return; 840 } 841 842 /* 843 * If the thread is on the SLEEPQ but isn't sleeping yet, it 844 * can either be on another CPU in between sleepq_add() and 845 * one of the sleepq_*wait*() routines or it can be in 846 * sleepq_catch_signals(). 847 */ 848 if (TD_ON_SLEEPQ(td)) { 849 td->td_flags |= TDF_TIMEOUT; 850 thread_unlock(td); 851 return; 852 } 853 854 /* 855 * Now check for the edge cases. First, if TDF_TIMEOUT is set, 856 * then the other thread has already yielded to us, so clear 857 * the flag and resume it. If TDF_TIMEOUT is not set, then the 858 * we know that the other thread is not on a sleep queue, but it 859 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL 860 * to let it know that the timeout has already run and doesn't 861 * need to be canceled. 862 */ 863 if (td->td_flags & TDF_TIMEOUT) { 864 MPASS(TD_IS_SLEEPING(td)); 865 td->td_flags &= ~TDF_TIMEOUT; 866 TD_CLR_SLEEPING(td); 867 wakeup_swapper = setrunnable(td); 868 } else 869 td->td_flags |= TDF_TIMOFAIL; 870 thread_unlock(td); 871 if (wakeup_swapper) 872 kick_proc0(); 873} 874 875/* 876 * Resumes a specific thread from the sleep queue associated with a specific 877 * wait channel if it is on that queue. 878 */ 879void 880sleepq_remove(struct thread *td, void *wchan) 881{ 882 struct sleepqueue *sq; 883 int wakeup_swapper; 884 885 /* 886 * Look up the sleep queue for this wait channel, then re-check 887 * that the thread is asleep on that channel, if it is not, then 888 * bail. 889 */ 890 MPASS(wchan != NULL); 891 sleepq_lock(wchan); 892 sq = sleepq_lookup(wchan); 893 /* 894 * We can not lock the thread here as it may be sleeping on a 895 * different sleepq. However, holding the sleepq lock for this 896 * wchan can guarantee that we do not miss a wakeup for this 897 * channel. The asserts below will catch any false positives. 898 */ 899 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 900 sleepq_release(wchan); 901 return; 902 } 903 /* Thread is asleep on sleep queue sq, so wake it up. */ 904 thread_lock(td); 905 MPASS(sq != NULL); 906 MPASS(td->td_wchan == wchan); 907 wakeup_swapper = sleepq_resume_thread(sq, td, 0); 908 thread_unlock(td); 909 sleepq_release(wchan); 910 if (wakeup_swapper) 911 kick_proc0(); 912} 913 914/* 915 * Abort a thread as if an interrupt had occurred. Only abort 916 * interruptible waits (unfortunately it isn't safe to abort others). 917 */ 918int 919sleepq_abort(struct thread *td, int intrval) 920{ 921 struct sleepqueue *sq; 922 void *wchan; 923 924 THREAD_LOCK_ASSERT(td, MA_OWNED); 925 MPASS(TD_ON_SLEEPQ(td)); 926 MPASS(td->td_flags & TDF_SINTR); 927 MPASS(intrval == EINTR || intrval == ERESTART); 928 929 /* 930 * If the TDF_TIMEOUT flag is set, just leave. A 931 * timeout is scheduled anyhow. 932 */ 933 if (td->td_flags & TDF_TIMEOUT) 934 return (0); 935 936 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", 937 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 938 td->td_intrval = intrval; 939 td->td_flags |= TDF_SLEEPABORT; 940 /* 941 * If the thread has not slept yet it will find the signal in 942 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise 943 * we have to do it here. 944 */ 945 if (!TD_IS_SLEEPING(td)) 946 return (0); 947 wchan = td->td_wchan; 948 MPASS(wchan != NULL); 949 sq = sleepq_lookup(wchan); 950 MPASS(sq != NULL); 951 952 /* Thread is asleep on sleep queue sq, so wake it up. */ 953 return (sleepq_resume_thread(sq, td, 0)); 954} 955 956#ifdef SLEEPQUEUE_PROFILING 957#define SLEEPQ_PROF_LOCATIONS 1024 958#define SLEEPQ_SBUFSIZE (40 * 512) 959struct sleepq_prof { 960 LIST_ENTRY(sleepq_prof) sp_link; 961 const char *sp_wmesg; 962 long sp_count; 963}; 964 965LIST_HEAD(sqphead, sleepq_prof); 966 967struct sqphead sleepq_prof_free; 968struct sqphead sleepq_hash[SC_TABLESIZE]; 969static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS]; 970static struct mtx sleepq_prof_lock; 971MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN); 972 973static void 974sleepq_profile(const char *wmesg) 975{ 976 struct sleepq_prof *sp; 977 978 mtx_lock_spin(&sleepq_prof_lock); 979 if (prof_enabled == 0) 980 goto unlock; 981 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link) 982 if (sp->sp_wmesg == wmesg) 983 goto done; 984 sp = LIST_FIRST(&sleepq_prof_free); 985 if (sp == NULL) 986 goto unlock; 987 sp->sp_wmesg = wmesg; 988 LIST_REMOVE(sp, sp_link); 989 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link); 990done: 991 sp->sp_count++; 992unlock: 993 mtx_unlock_spin(&sleepq_prof_lock); 994 return; 995} 996 997static void 998sleepq_prof_reset(void) 999{ 1000 struct sleepq_prof *sp; 1001 int enabled; 1002 int i; 1003 1004 mtx_lock_spin(&sleepq_prof_lock); 1005 enabled = prof_enabled; 1006 prof_enabled = 0; 1007 for (i = 0; i < SC_TABLESIZE; i++) 1008 LIST_INIT(&sleepq_hash[i]); 1009 LIST_INIT(&sleepq_prof_free); 1010 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) { 1011 sp = &sleepq_profent[i]; 1012 sp->sp_wmesg = NULL; 1013 sp->sp_count = 0; 1014 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link); 1015 } 1016 prof_enabled = enabled; 1017 mtx_unlock_spin(&sleepq_prof_lock); 1018} 1019 1020static int 1021enable_sleepq_prof(SYSCTL_HANDLER_ARGS) 1022{ 1023 int error, v; 1024 1025 v = prof_enabled; 1026 error = sysctl_handle_int(oidp, &v, v, req); 1027 if (error) 1028 return (error); 1029 if (req->newptr == NULL) 1030 return (error); 1031 if (v == prof_enabled) 1032 return (0); 1033 if (v == 1) 1034 sleepq_prof_reset(); 1035 mtx_lock_spin(&sleepq_prof_lock); 1036 prof_enabled = !!v; 1037 mtx_unlock_spin(&sleepq_prof_lock); 1038 1039 return (0); 1040} 1041 1042static int 1043reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1044{ 1045 int error, v; 1046 1047 v = 0; 1048 error = sysctl_handle_int(oidp, &v, 0, req); 1049 if (error) 1050 return (error); 1051 if (req->newptr == NULL) 1052 return (error); 1053 if (v == 0) 1054 return (0); 1055 sleepq_prof_reset(); 1056 1057 return (0); 1058} 1059 1060static int 1061dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1062{ 1063 static int multiplier = 1; 1064 struct sleepq_prof *sp; 1065 struct sbuf *sb; 1066 int enabled; 1067 int error; 1068 int i; 1069 1070retry_sbufops: 1071 sb = sbuf_new(NULL, NULL, SLEEPQ_SBUFSIZE * multiplier, SBUF_FIXEDLEN); 1072 sbuf_printf(sb, "\nwmesg\tcount\n"); 1073 enabled = prof_enabled; 1074 mtx_lock_spin(&sleepq_prof_lock); 1075 prof_enabled = 0; 1076 mtx_unlock_spin(&sleepq_prof_lock); 1077 for (i = 0; i < SC_TABLESIZE; i++) { 1078 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) { 1079 sbuf_printf(sb, "%s\t%ld\n", 1080 sp->sp_wmesg, sp->sp_count); 1081 if (sbuf_overflowed(sb)) { 1082 sbuf_delete(sb); 1083 multiplier++; 1084 goto retry_sbufops; 1085 } 1086 } 1087 } 1088 mtx_lock_spin(&sleepq_prof_lock); 1089 prof_enabled = enabled; 1090 mtx_unlock_spin(&sleepq_prof_lock); 1091 1092 sbuf_finish(sb); 1093 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 1094 sbuf_delete(sb); 1095 return (error); 1096} 1097 1098SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 1099 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics"); 1100SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 1101 NULL, 0, reset_sleepq_prof_stats, "I", 1102 "Reset sleepqueue profiling statistics"); 1103SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 1104 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling"); 1105#endif 1106 1107#ifdef DDB 1108DB_SHOW_COMMAND(sleepq, db_show_sleepqueue) 1109{ 1110 struct sleepqueue_chain *sc; 1111 struct sleepqueue *sq; 1112#ifdef INVARIANTS 1113 struct lock_object *lock; 1114#endif 1115 struct thread *td; 1116 void *wchan; 1117 int i; 1118 1119 if (!have_addr) 1120 return; 1121 1122 /* 1123 * First, see if there is an active sleep queue for the wait channel 1124 * indicated by the address. 1125 */ 1126 wchan = (void *)addr; 1127 sc = SC_LOOKUP(wchan); 1128 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 1129 if (sq->sq_wchan == wchan) 1130 goto found; 1131 1132 /* 1133 * Second, see if there is an active sleep queue at the address 1134 * indicated. 1135 */ 1136 for (i = 0; i < SC_TABLESIZE; i++) 1137 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) { 1138 if (sq == (struct sleepqueue *)addr) 1139 goto found; 1140 } 1141 1142 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr); 1143 return; 1144found: 1145 db_printf("Wait channel: %p\n", sq->sq_wchan); 1146#ifdef INVARIANTS 1147 db_printf("Queue type: %d\n", sq->sq_type); 1148 if (sq->sq_lock) { 1149 lock = sq->sq_lock; 1150 db_printf("Associated Interlock: %p - (%s) %s\n", lock, 1151 LOCK_CLASS(lock)->lc_name, lock->lo_name); 1152 } 1153#endif 1154 db_printf("Blocked threads:\n"); 1155 for (i = 0; i < NR_SLEEPQS; i++) { 1156 db_printf("\nQueue[%d]:\n", i); 1157 if (TAILQ_EMPTY(&sq->sq_blocked[i])) 1158 db_printf("\tempty\n"); 1159 else 1160 TAILQ_FOREACH(td, &sq->sq_blocked[0], 1161 td_slpq) { 1162 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, 1163 td->td_tid, td->td_proc->p_pid, 1164 td->td_name); 1165 } 1166 } 1167} 1168 1169/* Alias 'show sleepqueue' to 'show sleepq'. */ 1170DB_SET(sleepqueue, db_show_sleepqueue, db_show_cmd_set, 0, NULL); 1171#endif 1172