subr_sleepqueue.c revision 350357
1/*- 2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/* 28 * Implementation of sleep queues used to hold queue of threads blocked on 29 * a wait channel. Sleep queues are different from turnstiles in that wait 30 * channels are not owned by anyone, so there is no priority propagation. 31 * Sleep queues can also provide a timeout and can also be interrupted by 32 * signals. That said, there are several similarities between the turnstile 33 * and sleep queue implementations. (Note: turnstiles were implemented 34 * first.) For example, both use a hash table of the same size where each 35 * bucket is referred to as a "chain" that contains both a spin lock and 36 * a linked list of queues. An individual queue is located by using a hash 37 * to pick a chain, locking the chain, and then walking the chain searching 38 * for the queue. This means that a wait channel object does not need to 39 * embed its queue head just as locks do not embed their turnstile queue 40 * head. Threads also carry around a sleep queue that they lend to the 41 * wait channel when blocking. Just as in turnstiles, the queue includes 42 * a free list of the sleep queues of other threads blocked on the same 43 * wait channel in the case of multiple waiters. 44 * 45 * Some additional functionality provided by sleep queues include the 46 * ability to set a timeout. The timeout is managed using a per-thread 47 * callout that resumes a thread if it is asleep. A thread may also 48 * catch signals while it is asleep (aka an interruptible sleep). The 49 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 50 * sleep queues also provide some extra assertions. One is not allowed to 51 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 52 * must consistently use the same lock to synchronize with a wait channel, 53 * though this check is currently only a warning for sleep/wakeup due to 54 * pre-existing abuse of that API. The same lock must also be held when 55 * awakening threads, though that is currently only enforced for condition 56 * variables. 57 */ 58 59#include <sys/cdefs.h> 60__FBSDID("$FreeBSD: stable/11/sys/kern/subr_sleepqueue.c 350357 2019-07-26 10:43:07Z kib $"); 61 62#include "opt_sleepqueue_profiling.h" 63#include "opt_ddb.h" 64#include "opt_sched.h" 65#include "opt_stack.h" 66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/lock.h> 70#include <sys/kernel.h> 71#include <sys/ktr.h> 72#include <sys/mutex.h> 73#include <sys/proc.h> 74#include <sys/sbuf.h> 75#include <sys/sched.h> 76#include <sys/sdt.h> 77#include <sys/signalvar.h> 78#include <sys/sleepqueue.h> 79#include <sys/stack.h> 80#include <sys/sysctl.h> 81#include <sys/time.h> 82 83#include <machine/atomic.h> 84 85#include <vm/uma.h> 86 87#ifdef DDB 88#include <ddb/ddb.h> 89#endif 90 91 92/* 93 * Constants for the hash table of sleep queue chains. 94 * SC_TABLESIZE must be a power of two for SC_MASK to work properly. 95 */ 96#define SC_TABLESIZE 256 /* Must be power of 2. */ 97#define SC_MASK (SC_TABLESIZE - 1) 98#define SC_SHIFT 8 99#define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \ 100 SC_MASK) 101#define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 102#define NR_SLEEPQS 2 103/* 104 * There are two different lists of sleep queues. Both lists are connected 105 * via the sq_hash entries. The first list is the sleep queue chain list 106 * that a sleep queue is on when it is attached to a wait channel. The 107 * second list is the free list hung off of a sleep queue that is attached 108 * to a wait channel. 109 * 110 * Each sleep queue also contains the wait channel it is attached to, the 111 * list of threads blocked on that wait channel, flags specific to the 112 * wait channel, and the lock used to synchronize with a wait channel. 113 * The flags are used to catch mismatches between the various consumers 114 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 115 * The lock pointer is only used when invariants are enabled for various 116 * debugging checks. 117 * 118 * Locking key: 119 * c - sleep queue chain lock 120 */ 121struct sleepqueue { 122 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */ 123 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */ 124 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 125 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 126 void *sq_wchan; /* (c) Wait channel. */ 127 int sq_type; /* (c) Queue type. */ 128#ifdef INVARIANTS 129 struct lock_object *sq_lock; /* (c) Associated lock. */ 130#endif 131}; 132 133struct sleepqueue_chain { 134 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 135 struct mtx sc_lock; /* Spin lock for this chain. */ 136#ifdef SLEEPQUEUE_PROFILING 137 u_int sc_depth; /* Length of sc_queues. */ 138 u_int sc_max_depth; /* Max length of sc_queues. */ 139#endif 140}; 141 142#ifdef SLEEPQUEUE_PROFILING 143u_int sleepq_max_depth; 144static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling"); 145static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0, 146 "sleepq chain stats"); 147SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 148 0, "maxmimum depth achieved of a single chain"); 149 150static void sleepq_profile(const char *wmesg); 151static int prof_enabled; 152#endif 153static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 154static uma_zone_t sleepq_zone; 155 156/* 157 * Prototypes for non-exported routines. 158 */ 159static int sleepq_catch_signals(void *wchan, int pri); 160static int sleepq_check_signals(void); 161static int sleepq_check_timeout(void); 162#ifdef INVARIANTS 163static void sleepq_dtor(void *mem, int size, void *arg); 164#endif 165static int sleepq_init(void *mem, int size, int flags); 166static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, 167 int pri); 168static void sleepq_switch(void *wchan, int pri); 169static void sleepq_timeout(void *arg); 170 171SDT_PROBE_DECLARE(sched, , , sleep); 172SDT_PROBE_DECLARE(sched, , , wakeup); 173 174/* 175 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes. 176 * Note that it must happen after sleepinit() has been fully executed, so 177 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup. 178 */ 179#ifdef SLEEPQUEUE_PROFILING 180static void 181init_sleepqueue_profiling(void) 182{ 183 char chain_name[10]; 184 struct sysctl_oid *chain_oid; 185 u_int i; 186 187 for (i = 0; i < SC_TABLESIZE; i++) { 188 snprintf(chain_name, sizeof(chain_name), "%u", i); 189 chain_oid = SYSCTL_ADD_NODE(NULL, 190 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, 191 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats"); 192 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 193 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); 194 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 195 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, 196 NULL); 197 } 198} 199 200SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 201 init_sleepqueue_profiling, NULL); 202#endif 203 204/* 205 * Early initialization of sleep queues that is called from the sleepinit() 206 * SYSINIT. 207 */ 208void 209init_sleepqueues(void) 210{ 211 int i; 212 213 for (i = 0; i < SC_TABLESIZE; i++) { 214 LIST_INIT(&sleepq_chains[i].sc_queues); 215 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 216 MTX_SPIN | MTX_RECURSE); 217 } 218 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), 219#ifdef INVARIANTS 220 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 221#else 222 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 223#endif 224 225 thread0.td_sleepqueue = sleepq_alloc(); 226} 227 228/* 229 * Get a sleep queue for a new thread. 230 */ 231struct sleepqueue * 232sleepq_alloc(void) 233{ 234 235 return (uma_zalloc(sleepq_zone, M_WAITOK)); 236} 237 238/* 239 * Free a sleep queue when a thread is destroyed. 240 */ 241void 242sleepq_free(struct sleepqueue *sq) 243{ 244 245 uma_zfree(sleepq_zone, sq); 246} 247 248/* 249 * Lock the sleep queue chain associated with the specified wait channel. 250 */ 251void 252sleepq_lock(void *wchan) 253{ 254 struct sleepqueue_chain *sc; 255 256 sc = SC_LOOKUP(wchan); 257 mtx_lock_spin(&sc->sc_lock); 258} 259 260/* 261 * Look up the sleep queue associated with a given wait channel in the hash 262 * table locking the associated sleep queue chain. If no queue is found in 263 * the table, NULL is returned. 264 */ 265struct sleepqueue * 266sleepq_lookup(void *wchan) 267{ 268 struct sleepqueue_chain *sc; 269 struct sleepqueue *sq; 270 271 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 272 sc = SC_LOOKUP(wchan); 273 mtx_assert(&sc->sc_lock, MA_OWNED); 274 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 275 if (sq->sq_wchan == wchan) 276 return (sq); 277 return (NULL); 278} 279 280/* 281 * Unlock the sleep queue chain associated with a given wait channel. 282 */ 283void 284sleepq_release(void *wchan) 285{ 286 struct sleepqueue_chain *sc; 287 288 sc = SC_LOOKUP(wchan); 289 mtx_unlock_spin(&sc->sc_lock); 290} 291 292/* 293 * Places the current thread on the sleep queue for the specified wait 294 * channel. If INVARIANTS is enabled, then it associates the passed in 295 * lock with the sleepq to make sure it is held when that sleep queue is 296 * woken up. 297 */ 298void 299sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, 300 int queue) 301{ 302 struct sleepqueue_chain *sc; 303 struct sleepqueue *sq; 304 struct thread *td; 305 306 td = curthread; 307 sc = SC_LOOKUP(wchan); 308 mtx_assert(&sc->sc_lock, MA_OWNED); 309 MPASS(td->td_sleepqueue != NULL); 310 MPASS(wchan != NULL); 311 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 312 313 /* If this thread is not allowed to sleep, die a horrible death. */ 314 KASSERT(td->td_no_sleeping == 0, 315 ("%s: td %p to sleep on wchan %p with sleeping prohibited", 316 __func__, td, wchan)); 317 318 /* Look up the sleep queue associated with the wait channel 'wchan'. */ 319 sq = sleepq_lookup(wchan); 320 321 /* 322 * If the wait channel does not already have a sleep queue, use 323 * this thread's sleep queue. Otherwise, insert the current thread 324 * into the sleep queue already in use by this wait channel. 325 */ 326 if (sq == NULL) { 327#ifdef INVARIANTS 328 int i; 329 330 sq = td->td_sleepqueue; 331 for (i = 0; i < NR_SLEEPQS; i++) { 332 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]), 333 ("thread's sleep queue %d is not empty", i)); 334 KASSERT(sq->sq_blockedcnt[i] == 0, 335 ("thread's sleep queue %d count mismatches", i)); 336 } 337 KASSERT(LIST_EMPTY(&sq->sq_free), 338 ("thread's sleep queue has a non-empty free list")); 339 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 340 sq->sq_lock = lock; 341#endif 342#ifdef SLEEPQUEUE_PROFILING 343 sc->sc_depth++; 344 if (sc->sc_depth > sc->sc_max_depth) { 345 sc->sc_max_depth = sc->sc_depth; 346 if (sc->sc_max_depth > sleepq_max_depth) 347 sleepq_max_depth = sc->sc_max_depth; 348 } 349#endif 350 sq = td->td_sleepqueue; 351 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 352 sq->sq_wchan = wchan; 353 sq->sq_type = flags & SLEEPQ_TYPE; 354 } else { 355 MPASS(wchan == sq->sq_wchan); 356 MPASS(lock == sq->sq_lock); 357 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); 358 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 359 } 360 thread_lock(td); 361 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); 362 sq->sq_blockedcnt[queue]++; 363 td->td_sleepqueue = NULL; 364 td->td_sqqueue = queue; 365 td->td_wchan = wchan; 366 td->td_wmesg = wmesg; 367 if (flags & SLEEPQ_INTERRUPTIBLE) { 368 td->td_flags |= TDF_SINTR; 369 td->td_flags &= ~TDF_SLEEPABORT; 370 } 371 thread_unlock(td); 372} 373 374/* 375 * Sets a timeout that will remove the current thread from the specified 376 * sleep queue after timo ticks if the thread has not already been awakened. 377 */ 378void 379sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr, 380 int flags) 381{ 382 struct sleepqueue_chain *sc; 383 struct thread *td; 384 sbintime_t pr1; 385 386 td = curthread; 387 sc = SC_LOOKUP(wchan); 388 mtx_assert(&sc->sc_lock, MA_OWNED); 389 MPASS(TD_ON_SLEEPQ(td)); 390 MPASS(td->td_sleepqueue == NULL); 391 MPASS(wchan != NULL); 392 if (cold && td == &thread0) 393 panic("timed sleep before timers are working"); 394 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx", 395 td->td_tid, td, (uintmax_t)td->td_sleeptimo)); 396 thread_lock(td); 397 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1); 398 thread_unlock(td); 399 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1, 400 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC | 401 C_DIRECT_EXEC); 402} 403 404/* 405 * Return the number of actual sleepers for the specified queue. 406 */ 407u_int 408sleepq_sleepcnt(void *wchan, int queue) 409{ 410 struct sleepqueue *sq; 411 412 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 413 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 414 sq = sleepq_lookup(wchan); 415 if (sq == NULL) 416 return (0); 417 return (sq->sq_blockedcnt[queue]); 418} 419 420/* 421 * Marks the pending sleep of the current thread as interruptible and 422 * makes an initial check for pending signals before putting a thread 423 * to sleep. Enters and exits with the thread lock held. Thread lock 424 * may have transitioned from the sleepq lock to a run lock. 425 */ 426static int 427sleepq_catch_signals(void *wchan, int pri) 428{ 429 struct sleepqueue_chain *sc; 430 struct sleepqueue *sq; 431 struct thread *td; 432 struct proc *p; 433 struct sigacts *ps; 434 int sig, ret; 435 436 ret = 0; 437 td = curthread; 438 p = curproc; 439 sc = SC_LOOKUP(wchan); 440 mtx_assert(&sc->sc_lock, MA_OWNED); 441 MPASS(wchan != NULL); 442 if ((td->td_pflags & TDP_WAKEUP) != 0) { 443 td->td_pflags &= ~TDP_WAKEUP; 444 ret = EINTR; 445 thread_lock(td); 446 goto out; 447 } 448 449 /* 450 * See if there are any pending signals or suspension requests for this 451 * thread. If not, we can switch immediately. 452 */ 453 thread_lock(td); 454 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) { 455 thread_unlock(td); 456 mtx_unlock_spin(&sc->sc_lock); 457 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", 458 (void *)td, (long)p->p_pid, td->td_name); 459 PROC_LOCK(p); 460 /* 461 * Check for suspension first. Checking for signals and then 462 * suspending could result in a missed signal, since a signal 463 * can be delivered while this thread is suspended. 464 */ 465 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) { 466 ret = thread_suspend_check(1); 467 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 468 if (ret != 0) { 469 PROC_UNLOCK(p); 470 mtx_lock_spin(&sc->sc_lock); 471 thread_lock(td); 472 goto out; 473 } 474 } 475 if ((td->td_flags & TDF_NEEDSIGCHK) != 0) { 476 ps = p->p_sigacts; 477 mtx_lock(&ps->ps_mtx); 478 sig = cursig(td); 479 if (sig == -1) { 480 mtx_unlock(&ps->ps_mtx); 481 KASSERT((td->td_flags & TDF_SBDRY) != 0, 482 ("lost TDF_SBDRY")); 483 KASSERT(TD_SBDRY_INTR(td), 484 ("lost TDF_SERESTART of TDF_SEINTR")); 485 KASSERT((td->td_flags & 486 (TDF_SEINTR | TDF_SERESTART)) != 487 (TDF_SEINTR | TDF_SERESTART), 488 ("both TDF_SEINTR and TDF_SERESTART")); 489 ret = TD_SBDRY_ERRNO(td); 490 } else if (sig != 0) { 491 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? 492 EINTR : ERESTART; 493 mtx_unlock(&ps->ps_mtx); 494 } else { 495 mtx_unlock(&ps->ps_mtx); 496 } 497 498 /* 499 * Do not go into sleep if this thread was the 500 * ptrace(2) attach leader. cursig() consumed 501 * SIGSTOP from PT_ATTACH, but we usually act 502 * on the signal by interrupting sleep, and 503 * should do that here as well. 504 */ 505 if ((td->td_dbgflags & TDB_FSTP) != 0) { 506 if (ret == 0) 507 ret = EINTR; 508 td->td_dbgflags &= ~TDB_FSTP; 509 } 510 } 511 /* 512 * Lock the per-process spinlock prior to dropping the PROC_LOCK 513 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and 514 * thread_lock() are currently held in tdsendsignal(). 515 */ 516 PROC_SLOCK(p); 517 mtx_lock_spin(&sc->sc_lock); 518 PROC_UNLOCK(p); 519 thread_lock(td); 520 PROC_SUNLOCK(p); 521 } 522 if (ret == 0) { 523 sleepq_switch(wchan, pri); 524 return (0); 525 } 526out: 527 /* 528 * There were pending signals and this thread is still 529 * on the sleep queue, remove it from the sleep queue. 530 */ 531 if (TD_ON_SLEEPQ(td)) { 532 sq = sleepq_lookup(wchan); 533 if (sleepq_resume_thread(sq, td, 0)) { 534#ifdef INVARIANTS 535 /* 536 * This thread hasn't gone to sleep yet, so it 537 * should not be swapped out. 538 */ 539 panic("not waking up swapper"); 540#endif 541 } 542 } 543 mtx_unlock_spin(&sc->sc_lock); 544 MPASS(td->td_lock != &sc->sc_lock); 545 return (ret); 546} 547 548/* 549 * Switches to another thread if we are still asleep on a sleep queue. 550 * Returns with thread lock. 551 */ 552static void 553sleepq_switch(void *wchan, int pri) 554{ 555 struct sleepqueue_chain *sc; 556 struct sleepqueue *sq; 557 struct thread *td; 558 bool rtc_changed; 559 560 td = curthread; 561 sc = SC_LOOKUP(wchan); 562 mtx_assert(&sc->sc_lock, MA_OWNED); 563 THREAD_LOCK_ASSERT(td, MA_OWNED); 564 565 /* 566 * If we have a sleep queue, then we've already been woken up, so 567 * just return. 568 */ 569 if (td->td_sleepqueue != NULL) { 570 mtx_unlock_spin(&sc->sc_lock); 571 return; 572 } 573 574 /* 575 * If TDF_TIMEOUT is set, then our sleep has been timed out 576 * already but we are still on the sleep queue, so dequeue the 577 * thread and return. 578 * 579 * Do the same if the real-time clock has been adjusted since this 580 * thread calculated its timeout based on that clock. This handles 581 * the following race: 582 * - The Ts thread needs to sleep until an absolute real-clock time. 583 * It copies the global rtc_generation into curthread->td_rtcgen, 584 * reads the RTC, and calculates a sleep duration based on that time. 585 * See umtxq_sleep() for an example. 586 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes 587 * threads that are sleeping until an absolute real-clock time. 588 * See tc_setclock() and the POSIX specification of clock_settime(). 589 * - Ts reaches the code below. It holds the sleepqueue chain lock, 590 * so Tc has finished waking, so this thread must test td_rtcgen. 591 * (The declaration of td_rtcgen refers to this comment.) 592 */ 593 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation; 594 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) { 595 if (rtc_changed) { 596 td->td_rtcgen = 0; 597 } 598 MPASS(TD_ON_SLEEPQ(td)); 599 sq = sleepq_lookup(wchan); 600 if (sleepq_resume_thread(sq, td, 0)) { 601#ifdef INVARIANTS 602 /* 603 * This thread hasn't gone to sleep yet, so it 604 * should not be swapped out. 605 */ 606 panic("not waking up swapper"); 607#endif 608 } 609 mtx_unlock_spin(&sc->sc_lock); 610 return; 611 } 612#ifdef SLEEPQUEUE_PROFILING 613 if (prof_enabled) 614 sleepq_profile(td->td_wmesg); 615#endif 616 MPASS(td->td_sleepqueue == NULL); 617 sched_sleep(td, pri); 618 thread_lock_set(td, &sc->sc_lock); 619 SDT_PROBE0(sched, , , sleep); 620 TD_SET_SLEEPING(td); 621 mi_switch(SW_VOL | SWT_SLEEPQ, NULL); 622 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 623 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", 624 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 625} 626 627/* 628 * Check to see if we timed out. 629 */ 630static int 631sleepq_check_timeout(void) 632{ 633 struct thread *td; 634 int res; 635 636 td = curthread; 637 THREAD_LOCK_ASSERT(td, MA_OWNED); 638 639 /* 640 * If TDF_TIMEOUT is set, we timed out. But recheck 641 * td_sleeptimo anyway. 642 */ 643 res = 0; 644 if (td->td_sleeptimo != 0) { 645 if (td->td_sleeptimo <= sbinuptime()) 646 res = EWOULDBLOCK; 647 td->td_sleeptimo = 0; 648 } 649 if (td->td_flags & TDF_TIMEOUT) 650 td->td_flags &= ~TDF_TIMEOUT; 651 else 652 /* 653 * We ignore the situation where timeout subsystem was 654 * unable to stop our callout. The struct thread is 655 * type-stable, the callout will use the correct 656 * memory when running. The checks of the 657 * td_sleeptimo value in this function and in 658 * sleepq_timeout() ensure that the thread does not 659 * get spurious wakeups, even if the callout was reset 660 * or thread reused. 661 */ 662 callout_stop(&td->td_slpcallout); 663 return (res); 664} 665 666/* 667 * Check to see if we were awoken by a signal. 668 */ 669static int 670sleepq_check_signals(void) 671{ 672 struct thread *td; 673 674 td = curthread; 675 THREAD_LOCK_ASSERT(td, MA_OWNED); 676 677 /* We are no longer in an interruptible sleep. */ 678 if (td->td_flags & TDF_SINTR) 679 td->td_flags &= ~TDF_SINTR; 680 681 if (td->td_flags & TDF_SLEEPABORT) { 682 td->td_flags &= ~TDF_SLEEPABORT; 683 return (td->td_intrval); 684 } 685 686 return (0); 687} 688 689/* 690 * Block the current thread until it is awakened from its sleep queue. 691 */ 692void 693sleepq_wait(void *wchan, int pri) 694{ 695 struct thread *td; 696 697 td = curthread; 698 MPASS(!(td->td_flags & TDF_SINTR)); 699 thread_lock(td); 700 sleepq_switch(wchan, pri); 701 thread_unlock(td); 702} 703 704/* 705 * Block the current thread until it is awakened from its sleep queue 706 * or it is interrupted by a signal. 707 */ 708int 709sleepq_wait_sig(void *wchan, int pri) 710{ 711 int rcatch; 712 int rval; 713 714 rcatch = sleepq_catch_signals(wchan, pri); 715 rval = sleepq_check_signals(); 716 thread_unlock(curthread); 717 if (rcatch) 718 return (rcatch); 719 return (rval); 720} 721 722/* 723 * Block the current thread until it is awakened from its sleep queue 724 * or it times out while waiting. 725 */ 726int 727sleepq_timedwait(void *wchan, int pri) 728{ 729 struct thread *td; 730 int rval; 731 732 td = curthread; 733 MPASS(!(td->td_flags & TDF_SINTR)); 734 thread_lock(td); 735 sleepq_switch(wchan, pri); 736 rval = sleepq_check_timeout(); 737 thread_unlock(td); 738 739 return (rval); 740} 741 742/* 743 * Block the current thread until it is awakened from its sleep queue, 744 * it is interrupted by a signal, or it times out waiting to be awakened. 745 */ 746int 747sleepq_timedwait_sig(void *wchan, int pri) 748{ 749 int rcatch, rvalt, rvals; 750 751 rcatch = sleepq_catch_signals(wchan, pri); 752 rvalt = sleepq_check_timeout(); 753 rvals = sleepq_check_signals(); 754 thread_unlock(curthread); 755 if (rcatch) 756 return (rcatch); 757 if (rvals) 758 return (rvals); 759 return (rvalt); 760} 761 762/* 763 * Returns the type of sleepqueue given a waitchannel. 764 */ 765int 766sleepq_type(void *wchan) 767{ 768 struct sleepqueue *sq; 769 int type; 770 771 MPASS(wchan != NULL); 772 773 sleepq_lock(wchan); 774 sq = sleepq_lookup(wchan); 775 if (sq == NULL) { 776 sleepq_release(wchan); 777 return (-1); 778 } 779 type = sq->sq_type; 780 sleepq_release(wchan); 781 return (type); 782} 783 784/* 785 * Removes a thread from a sleep queue and makes it 786 * runnable. 787 */ 788static int 789sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) 790{ 791 struct sleepqueue_chain *sc; 792 793 MPASS(td != NULL); 794 MPASS(sq->sq_wchan != NULL); 795 MPASS(td->td_wchan == sq->sq_wchan); 796 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0); 797 THREAD_LOCK_ASSERT(td, MA_OWNED); 798 sc = SC_LOOKUP(sq->sq_wchan); 799 mtx_assert(&sc->sc_lock, MA_OWNED); 800 801 SDT_PROBE2(sched, , , wakeup, td, td->td_proc); 802 803 /* Remove the thread from the queue. */ 804 sq->sq_blockedcnt[td->td_sqqueue]--; 805 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq); 806 807 /* 808 * Get a sleep queue for this thread. If this is the last waiter, 809 * use the queue itself and take it out of the chain, otherwise, 810 * remove a queue from the free list. 811 */ 812 if (LIST_EMPTY(&sq->sq_free)) { 813 td->td_sleepqueue = sq; 814#ifdef INVARIANTS 815 sq->sq_wchan = NULL; 816#endif 817#ifdef SLEEPQUEUE_PROFILING 818 sc->sc_depth--; 819#endif 820 } else 821 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 822 LIST_REMOVE(td->td_sleepqueue, sq_hash); 823 824 td->td_wmesg = NULL; 825 td->td_wchan = NULL; 826 td->td_flags &= ~TDF_SINTR; 827 828 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", 829 (void *)td, (long)td->td_proc->p_pid, td->td_name); 830 831 /* Adjust priority if requested. */ 832 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); 833 if (pri != 0 && td->td_priority > pri && 834 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 835 sched_prio(td, pri); 836 837 /* 838 * Note that thread td might not be sleeping if it is running 839 * sleepq_catch_signals() on another CPU or is blocked on its 840 * proc lock to check signals. There's no need to mark the 841 * thread runnable in that case. 842 */ 843 if (TD_IS_SLEEPING(td)) { 844 TD_CLR_SLEEPING(td); 845 return (setrunnable(td)); 846 } 847 return (0); 848} 849 850#ifdef INVARIANTS 851/* 852 * UMA zone item deallocator. 853 */ 854static void 855sleepq_dtor(void *mem, int size, void *arg) 856{ 857 struct sleepqueue *sq; 858 int i; 859 860 sq = mem; 861 for (i = 0; i < NR_SLEEPQS; i++) { 862 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); 863 MPASS(sq->sq_blockedcnt[i] == 0); 864 } 865} 866#endif 867 868/* 869 * UMA zone item initializer. 870 */ 871static int 872sleepq_init(void *mem, int size, int flags) 873{ 874 struct sleepqueue *sq; 875 int i; 876 877 bzero(mem, size); 878 sq = mem; 879 for (i = 0; i < NR_SLEEPQS; i++) { 880 TAILQ_INIT(&sq->sq_blocked[i]); 881 sq->sq_blockedcnt[i] = 0; 882 } 883 LIST_INIT(&sq->sq_free); 884 return (0); 885} 886 887/* 888 * Find the highest priority thread sleeping on a wait channel and resume it. 889 */ 890int 891sleepq_signal(void *wchan, int flags, int pri, int queue) 892{ 893 struct sleepqueue *sq; 894 struct thread *td, *besttd; 895 int wakeup_swapper; 896 897 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 898 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 899 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 900 sq = sleepq_lookup(wchan); 901 if (sq == NULL) 902 return (0); 903 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 904 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 905 906 /* 907 * Find the highest priority thread on the queue. If there is a 908 * tie, use the thread that first appears in the queue as it has 909 * been sleeping the longest since threads are always added to 910 * the tail of sleep queues. 911 */ 912 besttd = NULL; 913 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) { 914 if (besttd == NULL || td->td_priority < besttd->td_priority) 915 besttd = td; 916 } 917 MPASS(besttd != NULL); 918 thread_lock(besttd); 919 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri); 920 thread_unlock(besttd); 921 return (wakeup_swapper); 922} 923 924static bool 925match_any(struct thread *td __unused) 926{ 927 928 return (true); 929} 930 931/* 932 * Resume all threads sleeping on a specified wait channel. 933 */ 934int 935sleepq_broadcast(void *wchan, int flags, int pri, int queue) 936{ 937 struct sleepqueue *sq; 938 939 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 940 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 941 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 942 sq = sleepq_lookup(wchan); 943 if (sq == NULL) 944 return (0); 945 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 946 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 947 948 return (sleepq_remove_matching(sq, queue, match_any, pri)); 949} 950 951/* 952 * Resume threads on the sleep queue that match the given predicate. 953 */ 954int 955sleepq_remove_matching(struct sleepqueue *sq, int queue, 956 bool (*matches)(struct thread *), int pri) 957{ 958 struct thread *td, *tdn; 959 int wakeup_swapper; 960 961 /* 962 * The last thread will be given ownership of sq and may 963 * re-enqueue itself before sleepq_resume_thread() returns, 964 * so we must cache the "next" queue item at the beginning 965 * of the final iteration. 966 */ 967 wakeup_swapper = 0; 968 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) { 969 thread_lock(td); 970 if (matches(td)) 971 wakeup_swapper |= sleepq_resume_thread(sq, td, pri); 972 thread_unlock(td); 973 } 974 975 return (wakeup_swapper); 976} 977 978/* 979 * Time sleeping threads out. When the timeout expires, the thread is 980 * removed from the sleep queue and made runnable if it is still asleep. 981 */ 982static void 983sleepq_timeout(void *arg) 984{ 985 struct sleepqueue_chain *sc; 986 struct sleepqueue *sq; 987 struct thread *td; 988 void *wchan; 989 int wakeup_swapper; 990 991 td = arg; 992 wakeup_swapper = 0; 993 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", 994 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 995 996 thread_lock(td); 997 998 if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) { 999 /* 1000 * The thread does not want a timeout (yet). 1001 */ 1002 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { 1003 /* 1004 * See if the thread is asleep and get the wait 1005 * channel if it is. 1006 */ 1007 wchan = td->td_wchan; 1008 sc = SC_LOOKUP(wchan); 1009 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); 1010 sq = sleepq_lookup(wchan); 1011 MPASS(sq != NULL); 1012 td->td_flags |= TDF_TIMEOUT; 1013 wakeup_swapper = sleepq_resume_thread(sq, td, 0); 1014 } else if (TD_ON_SLEEPQ(td)) { 1015 /* 1016 * If the thread is on the SLEEPQ but isn't sleeping 1017 * yet, it can either be on another CPU in between 1018 * sleepq_add() and one of the sleepq_*wait*() 1019 * routines or it can be in sleepq_catch_signals(). 1020 */ 1021 td->td_flags |= TDF_TIMEOUT; 1022 } 1023 1024 thread_unlock(td); 1025 if (wakeup_swapper) 1026 kick_proc0(); 1027} 1028 1029/* 1030 * Resumes a specific thread from the sleep queue associated with a specific 1031 * wait channel if it is on that queue. 1032 */ 1033void 1034sleepq_remove(struct thread *td, void *wchan) 1035{ 1036 struct sleepqueue *sq; 1037 int wakeup_swapper; 1038 1039 /* 1040 * Look up the sleep queue for this wait channel, then re-check 1041 * that the thread is asleep on that channel, if it is not, then 1042 * bail. 1043 */ 1044 MPASS(wchan != NULL); 1045 sleepq_lock(wchan); 1046 sq = sleepq_lookup(wchan); 1047 /* 1048 * We can not lock the thread here as it may be sleeping on a 1049 * different sleepq. However, holding the sleepq lock for this 1050 * wchan can guarantee that we do not miss a wakeup for this 1051 * channel. The asserts below will catch any false positives. 1052 */ 1053 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 1054 sleepq_release(wchan); 1055 return; 1056 } 1057 /* Thread is asleep on sleep queue sq, so wake it up. */ 1058 thread_lock(td); 1059 MPASS(sq != NULL); 1060 MPASS(td->td_wchan == wchan); 1061 wakeup_swapper = sleepq_resume_thread(sq, td, 0); 1062 thread_unlock(td); 1063 sleepq_release(wchan); 1064 if (wakeup_swapper) 1065 kick_proc0(); 1066} 1067 1068/* 1069 * Abort a thread as if an interrupt had occurred. Only abort 1070 * interruptible waits (unfortunately it isn't safe to abort others). 1071 */ 1072int 1073sleepq_abort(struct thread *td, int intrval) 1074{ 1075 struct sleepqueue *sq; 1076 void *wchan; 1077 1078 THREAD_LOCK_ASSERT(td, MA_OWNED); 1079 MPASS(TD_ON_SLEEPQ(td)); 1080 MPASS(td->td_flags & TDF_SINTR); 1081 MPASS(intrval == EINTR || intrval == ERESTART); 1082 1083 /* 1084 * If the TDF_TIMEOUT flag is set, just leave. A 1085 * timeout is scheduled anyhow. 1086 */ 1087 if (td->td_flags & TDF_TIMEOUT) 1088 return (0); 1089 1090 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", 1091 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1092 td->td_intrval = intrval; 1093 td->td_flags |= TDF_SLEEPABORT; 1094 /* 1095 * If the thread has not slept yet it will find the signal in 1096 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise 1097 * we have to do it here. 1098 */ 1099 if (!TD_IS_SLEEPING(td)) 1100 return (0); 1101 wchan = td->td_wchan; 1102 MPASS(wchan != NULL); 1103 sq = sleepq_lookup(wchan); 1104 MPASS(sq != NULL); 1105 1106 /* Thread is asleep on sleep queue sq, so wake it up. */ 1107 return (sleepq_resume_thread(sq, td, 0)); 1108} 1109 1110void 1111sleepq_chains_remove_matching(bool (*matches)(struct thread *)) 1112{ 1113 struct sleepqueue_chain *sc; 1114 struct sleepqueue *sq, *sq1; 1115 int i, wakeup_swapper; 1116 1117 wakeup_swapper = 0; 1118 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) { 1119 if (LIST_EMPTY(&sc->sc_queues)) { 1120 continue; 1121 } 1122 mtx_lock_spin(&sc->sc_lock); 1123 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) { 1124 for (i = 0; i < NR_SLEEPQS; ++i) { 1125 wakeup_swapper |= sleepq_remove_matching(sq, i, 1126 matches, 0); 1127 } 1128 } 1129 mtx_unlock_spin(&sc->sc_lock); 1130 } 1131 if (wakeup_swapper) { 1132 kick_proc0(); 1133 } 1134} 1135 1136/* 1137 * Prints the stacks of all threads presently sleeping on wchan/queue to 1138 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually 1139 * printed. Typically, this will equal the number of threads sleeping on the 1140 * queue, but may be less if sb overflowed before all stacks were printed. 1141 */ 1142#ifdef STACK 1143int 1144sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue, 1145 int *count_stacks_printed) 1146{ 1147 struct thread *td, *td_next; 1148 struct sleepqueue *sq; 1149 struct stack **st; 1150 struct sbuf **td_infos; 1151 int i, stack_idx, error, stacks_to_allocate; 1152 bool finished, partial_print; 1153 1154 error = 0; 1155 finished = false; 1156 partial_print = false; 1157 1158 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 1159 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 1160 1161 stacks_to_allocate = 10; 1162 for (i = 0; i < 3 && !finished ; i++) { 1163 /* We cannot malloc while holding the queue's spinlock, so 1164 * we do our mallocs now, and hope it is enough. If it 1165 * isn't, we will free these, drop the lock, malloc more, 1166 * and try again, up to a point. After that point we will 1167 * give up and report ENOMEM. We also cannot write to sb 1168 * during this time since the client may have set the 1169 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a 1170 * malloc as we print to it. So we defer actually printing 1171 * to sb until after we drop the spinlock. 1172 */ 1173 1174 /* Where we will store the stacks. */ 1175 st = malloc(sizeof(struct stack *) * stacks_to_allocate, 1176 M_TEMP, M_WAITOK); 1177 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1178 stack_idx++) 1179 st[stack_idx] = stack_create(); 1180 1181 /* Where we will store the td name, tid, etc. */ 1182 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate, 1183 M_TEMP, M_WAITOK); 1184 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1185 stack_idx++) 1186 td_infos[stack_idx] = sbuf_new(NULL, NULL, 1187 MAXCOMLEN + sizeof(struct thread *) * 2 + 40, 1188 SBUF_FIXEDLEN); 1189 1190 sleepq_lock(wchan); 1191 sq = sleepq_lookup(wchan); 1192 if (sq == NULL) { 1193 /* This sleepq does not exist; exit and return ENOENT. */ 1194 error = ENOENT; 1195 finished = true; 1196 sleepq_release(wchan); 1197 goto loop_end; 1198 } 1199 1200 stack_idx = 0; 1201 /* Save thread info */ 1202 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, 1203 td_next) { 1204 if (stack_idx >= stacks_to_allocate) 1205 goto loop_end; 1206 1207 /* Note the td_lock is equal to the sleepq_lock here. */ 1208 stack_save_td(st[stack_idx], td); 1209 1210 sbuf_printf(td_infos[stack_idx], "%d: %s %p", 1211 td->td_tid, td->td_name, td); 1212 1213 ++stack_idx; 1214 } 1215 1216 finished = true; 1217 sleepq_release(wchan); 1218 1219 /* Print the stacks */ 1220 for (i = 0; i < stack_idx; i++) { 1221 sbuf_finish(td_infos[i]); 1222 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i])); 1223 stack_sbuf_print(sb, st[i]); 1224 sbuf_printf(sb, "\n"); 1225 1226 error = sbuf_error(sb); 1227 if (error == 0) 1228 *count_stacks_printed = stack_idx; 1229 } 1230 1231loop_end: 1232 if (!finished) 1233 sleepq_release(wchan); 1234 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1235 stack_idx++) 1236 stack_destroy(st[stack_idx]); 1237 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1238 stack_idx++) 1239 sbuf_delete(td_infos[stack_idx]); 1240 free(st, M_TEMP); 1241 free(td_infos, M_TEMP); 1242 stacks_to_allocate *= 10; 1243 } 1244 1245 if (!finished && error == 0) 1246 error = ENOMEM; 1247 1248 return (error); 1249} 1250#endif 1251 1252#ifdef SLEEPQUEUE_PROFILING 1253#define SLEEPQ_PROF_LOCATIONS 1024 1254#define SLEEPQ_SBUFSIZE 512 1255struct sleepq_prof { 1256 LIST_ENTRY(sleepq_prof) sp_link; 1257 const char *sp_wmesg; 1258 long sp_count; 1259}; 1260 1261LIST_HEAD(sqphead, sleepq_prof); 1262 1263struct sqphead sleepq_prof_free; 1264struct sqphead sleepq_hash[SC_TABLESIZE]; 1265static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS]; 1266static struct mtx sleepq_prof_lock; 1267MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN); 1268 1269static void 1270sleepq_profile(const char *wmesg) 1271{ 1272 struct sleepq_prof *sp; 1273 1274 mtx_lock_spin(&sleepq_prof_lock); 1275 if (prof_enabled == 0) 1276 goto unlock; 1277 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link) 1278 if (sp->sp_wmesg == wmesg) 1279 goto done; 1280 sp = LIST_FIRST(&sleepq_prof_free); 1281 if (sp == NULL) 1282 goto unlock; 1283 sp->sp_wmesg = wmesg; 1284 LIST_REMOVE(sp, sp_link); 1285 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link); 1286done: 1287 sp->sp_count++; 1288unlock: 1289 mtx_unlock_spin(&sleepq_prof_lock); 1290 return; 1291} 1292 1293static void 1294sleepq_prof_reset(void) 1295{ 1296 struct sleepq_prof *sp; 1297 int enabled; 1298 int i; 1299 1300 mtx_lock_spin(&sleepq_prof_lock); 1301 enabled = prof_enabled; 1302 prof_enabled = 0; 1303 for (i = 0; i < SC_TABLESIZE; i++) 1304 LIST_INIT(&sleepq_hash[i]); 1305 LIST_INIT(&sleepq_prof_free); 1306 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) { 1307 sp = &sleepq_profent[i]; 1308 sp->sp_wmesg = NULL; 1309 sp->sp_count = 0; 1310 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link); 1311 } 1312 prof_enabled = enabled; 1313 mtx_unlock_spin(&sleepq_prof_lock); 1314} 1315 1316static int 1317enable_sleepq_prof(SYSCTL_HANDLER_ARGS) 1318{ 1319 int error, v; 1320 1321 v = prof_enabled; 1322 error = sysctl_handle_int(oidp, &v, v, req); 1323 if (error) 1324 return (error); 1325 if (req->newptr == NULL) 1326 return (error); 1327 if (v == prof_enabled) 1328 return (0); 1329 if (v == 1) 1330 sleepq_prof_reset(); 1331 mtx_lock_spin(&sleepq_prof_lock); 1332 prof_enabled = !!v; 1333 mtx_unlock_spin(&sleepq_prof_lock); 1334 1335 return (0); 1336} 1337 1338static int 1339reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1340{ 1341 int error, v; 1342 1343 v = 0; 1344 error = sysctl_handle_int(oidp, &v, 0, req); 1345 if (error) 1346 return (error); 1347 if (req->newptr == NULL) 1348 return (error); 1349 if (v == 0) 1350 return (0); 1351 sleepq_prof_reset(); 1352 1353 return (0); 1354} 1355 1356static int 1357dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1358{ 1359 struct sleepq_prof *sp; 1360 struct sbuf *sb; 1361 int enabled; 1362 int error; 1363 int i; 1364 1365 error = sysctl_wire_old_buffer(req, 0); 1366 if (error != 0) 1367 return (error); 1368 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req); 1369 sbuf_printf(sb, "\nwmesg\tcount\n"); 1370 enabled = prof_enabled; 1371 mtx_lock_spin(&sleepq_prof_lock); 1372 prof_enabled = 0; 1373 mtx_unlock_spin(&sleepq_prof_lock); 1374 for (i = 0; i < SC_TABLESIZE; i++) { 1375 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) { 1376 sbuf_printf(sb, "%s\t%ld\n", 1377 sp->sp_wmesg, sp->sp_count); 1378 } 1379 } 1380 mtx_lock_spin(&sleepq_prof_lock); 1381 prof_enabled = enabled; 1382 mtx_unlock_spin(&sleepq_prof_lock); 1383 1384 error = sbuf_finish(sb); 1385 sbuf_delete(sb); 1386 return (error); 1387} 1388 1389SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 1390 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics"); 1391SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 1392 NULL, 0, reset_sleepq_prof_stats, "I", 1393 "Reset sleepqueue profiling statistics"); 1394SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 1395 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling"); 1396#endif 1397 1398#ifdef DDB 1399DB_SHOW_COMMAND(sleepq, db_show_sleepqueue) 1400{ 1401 struct sleepqueue_chain *sc; 1402 struct sleepqueue *sq; 1403#ifdef INVARIANTS 1404 struct lock_object *lock; 1405#endif 1406 struct thread *td; 1407 void *wchan; 1408 int i; 1409 1410 if (!have_addr) 1411 return; 1412 1413 /* 1414 * First, see if there is an active sleep queue for the wait channel 1415 * indicated by the address. 1416 */ 1417 wchan = (void *)addr; 1418 sc = SC_LOOKUP(wchan); 1419 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 1420 if (sq->sq_wchan == wchan) 1421 goto found; 1422 1423 /* 1424 * Second, see if there is an active sleep queue at the address 1425 * indicated. 1426 */ 1427 for (i = 0; i < SC_TABLESIZE; i++) 1428 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) { 1429 if (sq == (struct sleepqueue *)addr) 1430 goto found; 1431 } 1432 1433 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr); 1434 return; 1435found: 1436 db_printf("Wait channel: %p\n", sq->sq_wchan); 1437 db_printf("Queue type: %d\n", sq->sq_type); 1438#ifdef INVARIANTS 1439 if (sq->sq_lock) { 1440 lock = sq->sq_lock; 1441 db_printf("Associated Interlock: %p - (%s) %s\n", lock, 1442 LOCK_CLASS(lock)->lc_name, lock->lo_name); 1443 } 1444#endif 1445 db_printf("Blocked threads:\n"); 1446 for (i = 0; i < NR_SLEEPQS; i++) { 1447 db_printf("\nQueue[%d]:\n", i); 1448 if (TAILQ_EMPTY(&sq->sq_blocked[i])) 1449 db_printf("\tempty\n"); 1450 else 1451 TAILQ_FOREACH(td, &sq->sq_blocked[i], 1452 td_slpq) { 1453 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, 1454 td->td_tid, td->td_proc->p_pid, 1455 td->td_name); 1456 } 1457 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]); 1458 } 1459} 1460 1461/* Alias 'show sleepqueue' to 'show sleepq'. */ 1462DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue); 1463#endif 1464