1/* 2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * Implementation of sleep queues used to hold queue of threads blocked on 32 * a wait channel. Sleep queues different from turnstiles in that wait 33 * channels are not owned by anyone, so there is no priority propagation. 34 * Sleep queues can also provide a timeout and can also be interrupted by 35 * signals. That said, there are several similarities between the turnstile 36 * and sleep queue implementations. (Note: turnstiles were implemented 37 * first.) For example, both use a hash table of the same size where each 38 * bucket is referred to as a "chain" that contains both a spin lock and 39 * a linked list of queues. An individual queue is located by using a hash 40 * to pick a chain, locking the chain, and then walking the chain searching 41 * for the queue. This means that a wait channel object does not need to 42 * embed it's queue head just as locks do not embed their turnstile queue 43 * head. Threads also carry around a sleep queue that they lend to the 44 * wait channel when blocking. Just as in turnstiles, the queue includes 45 * a free list of the sleep queues of other threads blocked on the same 46 * wait channel in the case of multiple waiters. 47 * 48 * Some additional functionality provided by sleep queues include the 49 * ability to set a timeout. The timeout is managed using a per-thread 50 * callout that resumes a thread if it is asleep. A thread may also 51 * catch signals while it is asleep (aka an interruptible sleep). The 52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 53 * sleep queues also provide some extra assertions. One is not allowed to 54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 55 * must consistently use the same lock to synchronize with a wait channel, 56 * though this check is currently only a warning for sleep/wakeup due to 57 * pre-existing abuse of that API. The same lock must also be held when 58 * awakening threads, though that is currently only enforced for condition 59 * variables. 60 */ 61 62#include "opt_sleepqueue_profiling.h" 63 64#include <sys/cdefs.h>
|
65__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 136439 2004-10-12 16:31:23Z ups $");
|
65__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 136445 2004-10-12 18:36:20Z jhb $"); |
66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/lock.h> 70#include <sys/kernel.h> 71#include <sys/ktr.h> 72#include <sys/malloc.h> 73#include <sys/mutex.h> 74#include <sys/proc.h> 75#include <sys/sched.h> 76#include <sys/signalvar.h> 77#include <sys/sleepqueue.h> 78#include <sys/sysctl.h> 79 80/* 81 * Constants for the hash table of sleep queue chains. These constants are 82 * the same ones that 4BSD (and possibly earlier versions of BSD) used. 83 * Basically, we ignore the lower 8 bits of the address since most wait 84 * channel pointers are aligned and only look at the next 7 bits for the 85 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly. 86 */ 87#define SC_TABLESIZE 128 /* Must be power of 2. */ 88#define SC_MASK (SC_TABLESIZE - 1) 89#define SC_SHIFT 8 90#define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK) 91#define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 92 93/* 94 * There two different lists of sleep queues. Both lists are connected 95 * via the sq_hash entries. The first list is the sleep queue chain list 96 * that a sleep queue is on when it is attached to a wait channel. The 97 * second list is the free list hung off of a sleep queue that is attached 98 * to a wait channel. 99 * 100 * Each sleep queue also contains the wait channel it is attached to, the 101 * list of threads blocked on that wait channel, flags specific to the 102 * wait channel, and the lock used to synchronize with a wait channel. 103 * The flags are used to catch mismatches between the various consumers 104 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 105 * The lock pointer is only used when invariants are enabled for various 106 * debugging checks. 107 * 108 * Locking key: 109 * c - sleep queue chain lock 110 */ 111struct sleepqueue { 112 TAILQ_HEAD(, thread) sq_blocked; /* (c) Blocked threads. */ 113 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 114 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 115 void *sq_wchan; /* (c) Wait channel. */
|
116 int sq_type; /* (c) Queue type. */
|
116#ifdef INVARIANTS
|
117 int sq_type; /* (c) Queue type. */ |
118 struct mtx *sq_lock; /* (c) Associated lock. */ 119#endif 120}; 121 122struct sleepqueue_chain { 123 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 124 struct mtx sc_lock; /* Spin lock for this chain. */ 125#ifdef SLEEPQUEUE_PROFILING 126 u_int sc_depth; /* Length of sc_queues. */ 127 u_int sc_max_depth; /* Max length of sc_queues. */ 128#endif 129}; 130 131#ifdef SLEEPQUEUE_PROFILING 132u_int sleepq_max_depth; 133SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling"); 134SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0, 135 "sleepq chain stats"); 136SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 137 0, "maxmimum depth achieved of a single chain"); 138#endif 139static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 140 141MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues"); 142 143/* 144 * Prototypes for non-exported routines. 145 */ 146static int sleepq_check_timeout(void); 147static void sleepq_switch(void *wchan); 148static void sleepq_timeout(void *arg); 149static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td); 150static void sleepq_resume_thread(struct thread *td, int pri); 151 152/* 153 * Early initialization of sleep queues that is called from the sleepinit() 154 * SYSINIT. 155 */ 156void 157init_sleepqueues(void) 158{ 159#ifdef SLEEPQUEUE_PROFILING 160 struct sysctl_oid *chain_oid; 161 char chain_name[10]; 162#endif 163 int i; 164 165 for (i = 0; i < SC_TABLESIZE; i++) { 166 LIST_INIT(&sleepq_chains[i].sc_queues); 167 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 168 MTX_SPIN); 169#ifdef SLEEPQUEUE_PROFILING 170 snprintf(chain_name, sizeof(chain_name), "%d", i); 171 chain_oid = SYSCTL_ADD_NODE(NULL, 172 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, 173 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats"); 174 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 175 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); 176 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 177 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, 178 NULL); 179#endif 180 } 181 thread0.td_sleepqueue = sleepq_alloc(); 182} 183 184/* 185 * Malloc and initialize a new sleep queue for a new thread. 186 */ 187struct sleepqueue * 188sleepq_alloc(void) 189{ 190 struct sleepqueue *sq; 191 192 sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO); 193 TAILQ_INIT(&sq->sq_blocked); 194 LIST_INIT(&sq->sq_free); 195 return (sq); 196} 197 198/* 199 * Free a sleep queue when a thread is destroyed. 200 */ 201void 202sleepq_free(struct sleepqueue *sq) 203{ 204 205 MPASS(sq != NULL); 206 MPASS(TAILQ_EMPTY(&sq->sq_blocked)); 207 free(sq, M_SLEEPQUEUE); 208} 209 210/*
|
211 * Lock the sleep queue chain associated with the specified wait channel. 212 */ 213void 214sleepq_lock(void *wchan) 215{ 216 struct sleepqueue_chain *sc; 217 218 sc = SC_LOOKUP(wchan); 219 mtx_lock_spin(&sc->sc_lock); 220} 221 222/* |
223 * Look up the sleep queue associated with a given wait channel in the hash
|
212 * table locking the associated sleep queue chain. Return holdind the sleep
213 * queue chain lock. If no queue is found in the table, NULL is returned.
|
224 * table locking the associated sleep queue chain. If no queue is found in 225 * the table, NULL is returned. |
226 */ 227struct sleepqueue * 228sleepq_lookup(void *wchan) 229{ 230 struct sleepqueue_chain *sc; 231 struct sleepqueue *sq; 232 233 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 234 sc = SC_LOOKUP(wchan);
|
223 mtx_lock_spin(&sc->sc_lock);
|
235 mtx_assert(&sc->sc_lock, MA_OWNED); |
236 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 237 if (sq->sq_wchan == wchan) 238 return (sq); 239 return (NULL); 240} 241 242/* 243 * Unlock the sleep queue chain associated with a given wait channel. 244 */ 245void 246sleepq_release(void *wchan) 247{ 248 struct sleepqueue_chain *sc; 249 250 sc = SC_LOOKUP(wchan); 251 mtx_unlock_spin(&sc->sc_lock); 252} 253 254/* 255 * Places the current thread on the sleepqueue for the specified wait 256 * channel. If INVARIANTS is enabled, then it associates the passed in 257 * lock with the sleepq to make sure it is held when that sleep queue is 258 * woken up. 259 */ 260void
|
249sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
250 const char *wmesg, int flags)
|
261sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags) |
262{ 263 struct sleepqueue_chain *sc;
|
264 struct sleepqueue *sq; |
265 struct thread *td, *td1; 266 267 td = curthread; 268 sc = SC_LOOKUP(wchan); 269 mtx_assert(&sc->sc_lock, MA_OWNED); 270 MPASS(td->td_sleepqueue != NULL); 271 MPASS(wchan != NULL); 272
|
261 /* If the passed in sleep queue is NULL, use this thread's queue. */
|
273 /* Look up the sleep queue associated with the wait channel 'wchan'. */ 274 sq = sleepq_lookup(wchan); 275 276 /* 277 * If the wait channel does not already have a sleep queue, use 278 * this thread's sleep queue. Otherwise, insert the current thread 279 * into the sleep queue already in use by this wait channel. 280 */ |
281 if (sq == NULL) { 282#ifdef SLEEPQUEUE_PROFILING 283 sc->sc_depth++; 284 if (sc->sc_depth > sc->sc_max_depth) { 285 sc->sc_max_depth = sc->sc_depth; 286 if (sc->sc_max_depth > sleepq_max_depth) 287 sleepq_max_depth = sc->sc_max_depth; 288 } 289#endif 290 sq = td->td_sleepqueue; 291 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 292 KASSERT(TAILQ_EMPTY(&sq->sq_blocked), 293 ("thread's sleep queue has a non-empty queue")); 294 KASSERT(LIST_EMPTY(&sq->sq_free), 295 ("thread's sleep queue has a non-empty free list")); 296 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 297 sq->sq_wchan = wchan; 298#ifdef INVARIANTS 299 sq->sq_lock = lock;
|
281#endif
|
300 sq->sq_type = flags & SLEEPQ_TYPE;
|
301#endif |
302 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); 303 } else { 304 MPASS(wchan == sq->sq_wchan); 305 MPASS(lock == sq->sq_lock);
|
306 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); |
307 TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq) 308 if (td1->td_priority > td->td_priority) 309 break; 310 if (td1 != NULL) 311 TAILQ_INSERT_BEFORE(td1, td, td_slpq); 312 else 313 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); 314 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 315 } 316 td->td_sleepqueue = NULL; 317 mtx_lock_spin(&sched_lock); 318 td->td_wchan = wchan; 319 td->td_wmesg = wmesg; 320 if (flags & SLEEPQ_INTERRUPTIBLE) 321 td->td_flags |= TDF_SINTR; 322 mtx_unlock_spin(&sched_lock); 323} 324 325/* 326 * Sets a timeout that will remove the current thread from the specified 327 * sleep queue after timo ticks if the thread has not already been awakened. 328 */ 329void 330sleepq_set_timeout(void *wchan, int timo) 331{ 332 struct sleepqueue_chain *sc; 333 struct thread *td; 334 335 td = curthread; 336 sc = SC_LOOKUP(wchan); 337 mtx_assert(&sc->sc_lock, MA_OWNED); 338 MPASS(TD_ON_SLEEPQ(td)); 339 MPASS(td->td_sleepqueue == NULL); 340 MPASS(wchan != NULL); 341 callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td); 342} 343 344/* 345 * Marks the pending sleep of the current thread as interruptible and 346 * makes an initial check for pending signals before putting a thread 347 * to sleep. 348 */ 349int 350sleepq_catch_signals(void *wchan) 351{ 352 struct sleepqueue_chain *sc; 353 struct sleepqueue *sq; 354 struct thread *td; 355 struct proc *p; 356 int do_upcall; 357 int sig; 358 359 do_upcall = 0; 360 td = curthread; 361 p = td->td_proc; 362 sc = SC_LOOKUP(wchan); 363 mtx_assert(&sc->sc_lock, MA_OWNED); 364 MPASS(td->td_sleepqueue == NULL); 365 MPASS(wchan != NULL); 366 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", 367 (void *)td, (long)p->p_pid, p->p_comm); 368 369 /* Mark thread as being in an interruptible sleep. */ 370 MPASS(td->td_flags & TDF_SINTR); 371 MPASS(TD_ON_SLEEPQ(td)); 372 sleepq_release(wchan); 373 374 /* See if there are any pending signals for this thread. */ 375 PROC_LOCK(p); 376 mtx_lock(&p->p_sigacts->ps_mtx); 377 sig = cursig(td); 378 mtx_unlock(&p->p_sigacts->ps_mtx); 379 if (sig == 0 && thread_suspend_check(1)) 380 sig = SIGSTOP; 381 else 382 do_upcall = thread_upcall_check(td); 383 PROC_UNLOCK(p); 384 385 /* 386 * If there were pending signals and this thread is still on 387 * the sleep queue, remove it from the sleep queue. If the 388 * thread was removed from the sleep queue while we were blocked 389 * above, then clear TDF_SINTR before returning. 390 */
|
391 sleepq_lock(wchan); |
392 sq = sleepq_lookup(wchan); 393 mtx_lock_spin(&sched_lock); 394 if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) { 395 mtx_unlock_spin(&sched_lock); 396 sleepq_remove_thread(sq, td); 397 } else { 398 if (!TD_ON_SLEEPQ(td) && sig == 0) 399 td->td_flags &= ~TDF_SINTR; 400 mtx_unlock_spin(&sched_lock); 401 } 402 return (sig); 403} 404 405/* 406 * Switches to another thread if we are still asleep on a sleep queue and 407 * drop the lock on the sleepqueue chain. Returns with sched_lock held. 408 */ 409static void 410sleepq_switch(void *wchan) 411{ 412 struct sleepqueue_chain *sc; 413 struct thread *td; 414 415 td = curthread; 416 sc = SC_LOOKUP(wchan); 417 mtx_assert(&sc->sc_lock, MA_OWNED); 418 419 /* 420 * If we have a sleep queue, then we've already been woken up, so 421 * just return. 422 */ 423 if (td->td_sleepqueue != NULL) { 424 MPASS(!TD_ON_SLEEPQ(td)); 425 mtx_unlock_spin(&sc->sc_lock); 426 mtx_lock_spin(&sched_lock); 427 return; 428 } 429 430 /* 431 * Otherwise, actually go to sleep. 432 */ 433 mtx_lock_spin(&sched_lock); 434 mtx_unlock_spin(&sc->sc_lock); 435 436 sched_sleep(td); 437 TD_SET_SLEEPING(td); 438 mi_switch(SW_VOL, NULL); 439 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 440 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", 441 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm); 442} 443 444/* 445 * Check to see if we timed out. 446 */ 447static int 448sleepq_check_timeout(void) 449{ 450 struct thread *td; 451 452 mtx_assert(&sched_lock, MA_OWNED); 453 td = curthread; 454 455 /* 456 * If TDF_TIMEOUT is set, we timed out. 457 */ 458 if (td->td_flags & TDF_TIMEOUT) { 459 td->td_flags &= ~TDF_TIMEOUT; 460 return (EWOULDBLOCK); 461 } 462 463 /* 464 * If TDF_TIMOFAIL is set, the timeout ran after we had 465 * already been woken up. 466 */ 467 if (td->td_flags & TDF_TIMOFAIL) 468 td->td_flags &= ~TDF_TIMOFAIL; 469 470 /* 471 * If callout_stop() fails, then the timeout is running on 472 * another CPU, so synchronize with it to avoid having it 473 * accidentally wake up a subsequent sleep. 474 */ 475 else if (callout_stop(&td->td_slpcallout) == 0) { 476 td->td_flags |= TDF_TIMEOUT; 477 TD_SET_SLEEPING(td); 478 mi_switch(SW_INVOL, NULL); 479 } 480 return (0); 481} 482 483/* 484 * Check to see if we were awoken by a signal. 485 */ 486static int 487sleepq_check_signals(void) 488{ 489 struct thread *td; 490 491 mtx_assert(&sched_lock, MA_OWNED); 492 td = curthread; 493 494 /* 495 * If TDF_SINTR is clear, then we were awakened while executing 496 * sleepq_catch_signals(). 497 */ 498 if (!(td->td_flags & TDF_SINTR)) 499 return (0); 500 501 /* We are no longer in an interruptible sleep. */ 502 td->td_flags &= ~TDF_SINTR; 503 504 if (td->td_flags & TDF_INTERRUPT) 505 return (td->td_intrval); 506 return (0); 507} 508 509/* 510 * If we were in an interruptible sleep and we weren't interrupted and 511 * didn't timeout, check to see if there are any pending signals and 512 * which return value we should use if so. The return value from an 513 * earlier call to sleepq_catch_signals() should be passed in as the 514 * argument. 515 */ 516int 517sleepq_calc_signal_retval(int sig) 518{ 519 struct thread *td; 520 struct proc *p; 521 int rval; 522 523 td = curthread; 524 p = td->td_proc; 525 PROC_LOCK(p); 526 mtx_lock(&p->p_sigacts->ps_mtx); 527 /* XXX: Should we always be calling cursig()? */ 528 if (sig == 0) 529 sig = cursig(td); 530 if (sig != 0) { 531 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 532 rval = EINTR; 533 else 534 rval = ERESTART; 535 } else 536 rval = 0; 537 mtx_unlock(&p->p_sigacts->ps_mtx); 538 PROC_UNLOCK(p); 539 return (rval); 540} 541 542/* 543 * Block the current thread until it is awakened from its sleep queue. 544 */ 545void 546sleepq_wait(void *wchan) 547{ 548 549 MPASS(!(curthread->td_flags & TDF_SINTR)); 550 sleepq_switch(wchan); 551 mtx_unlock_spin(&sched_lock); 552} 553 554/* 555 * Block the current thread until it is awakened from its sleep queue 556 * or it is interrupted by a signal. 557 */ 558int 559sleepq_wait_sig(void *wchan) 560{ 561 int rval; 562 563 sleepq_switch(wchan); 564 rval = sleepq_check_signals(); 565 mtx_unlock_spin(&sched_lock); 566 return (rval); 567} 568 569/* 570 * Block the current thread until it is awakened from its sleep queue 571 * or it times out while waiting. 572 */ 573int 574sleepq_timedwait(void *wchan) 575{ 576 int rval; 577 578 MPASS(!(curthread->td_flags & TDF_SINTR)); 579 sleepq_switch(wchan); 580 rval = sleepq_check_timeout(); 581 mtx_unlock_spin(&sched_lock); 582 return (rval); 583} 584 585/* 586 * Block the current thread until it is awakened from its sleep queue, 587 * it is interrupted by a signal, or it times out waiting to be awakened. 588 */ 589int 590sleepq_timedwait_sig(void *wchan, int signal_caught) 591{ 592 int rvalt, rvals; 593 594 sleepq_switch(wchan); 595 rvalt = sleepq_check_timeout(); 596 rvals = sleepq_check_signals(); 597 mtx_unlock_spin(&sched_lock); 598 if (signal_caught || rvalt == 0) 599 return (rvals); 600 else 601 return (rvalt); 602} 603 604/* 605 * Removes a thread from a sleep queue. 606 */ 607static void 608sleepq_remove_thread(struct sleepqueue *sq, struct thread *td) 609{ 610 struct sleepqueue_chain *sc; 611 612 MPASS(td != NULL); 613 MPASS(sq->sq_wchan != NULL); 614 MPASS(td->td_wchan == sq->sq_wchan); 615 sc = SC_LOOKUP(sq->sq_wchan); 616 mtx_assert(&sc->sc_lock, MA_OWNED); 617 618 /* Remove the thread from the queue. */ 619 TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq); 620 621 /* 622 * Get a sleep queue for this thread. If this is the last waiter, 623 * use the queue itself and take it out of the chain, otherwise, 624 * remove a queue from the free list. 625 */ 626 if (LIST_EMPTY(&sq->sq_free)) { 627 td->td_sleepqueue = sq; 628#ifdef INVARIANTS 629 sq->sq_wchan = NULL; 630#endif 631#ifdef SLEEPQUEUE_PROFILING 632 sc->sc_depth--; 633#endif 634 } else 635 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 636 LIST_REMOVE(td->td_sleepqueue, sq_hash); 637 638 mtx_lock_spin(&sched_lock); 639 td->td_wmesg = NULL; 640 td->td_wchan = NULL; 641 mtx_unlock_spin(&sched_lock); 642} 643 644/* 645 * Resumes a thread that was asleep on a queue. 646 */ 647static void 648sleepq_resume_thread(struct thread *td, int pri) 649{ 650 651 /* 652 * Note that thread td might not be sleeping if it is running 653 * sleepq_catch_signals() on another CPU or is blocked on 654 * its proc lock to check signals. It doesn't hurt to clear 655 * the sleeping flag if it isn't set though, so we just always 656 * do it. However, we can't assert that it is set. 657 */ 658 mtx_lock_spin(&sched_lock); 659 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", 660 (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm); 661 TD_CLR_SLEEPING(td); 662 663 /* Adjust priority if requested. */ 664 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX)); 665 if (pri != -1 && td->td_priority > pri) 666 sched_prio(td, pri); 667 setrunnable(td); 668 mtx_unlock_spin(&sched_lock); 669} 670 671/* 672 * Find the highest priority thread sleeping on a wait channel and resume it. 673 */ 674void 675sleepq_signal(void *wchan, int flags, int pri) 676{ 677 struct sleepqueue *sq; 678 struct thread *td; 679 680 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 681 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 682 sq = sleepq_lookup(wchan); 683 if (sq == NULL) { 684 sleepq_release(wchan); 685 return; 686 } 687 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 688 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
|
668 /* XXX: Do for all sleep queues eventually. */
669 if (flags & SLEEPQ_CONDVAR)
670 mtx_assert(sq->sq_lock, MA_OWNED);
|
689 690 /* Remove first thread from queue and awaken it. */ 691 td = TAILQ_FIRST(&sq->sq_blocked); 692 sleepq_remove_thread(sq, td); 693 sleepq_release(wchan); 694 sleepq_resume_thread(td, pri); 695} 696 697/* 698 * Resume all threads sleeping on a specified wait channel. 699 */ 700void 701sleepq_broadcast(void *wchan, int flags, int pri) 702{ 703 TAILQ_HEAD(, thread) list; 704 struct sleepqueue *sq; 705 struct thread *td; 706 707 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 708 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 709 sq = sleepq_lookup(wchan); 710 if (sq == NULL) { 711 sleepq_release(wchan); 712 return; 713 } 714 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 715 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
|
698 /* XXX: Do for all sleep queues eventually. */
699 if (flags & SLEEPQ_CONDVAR)
700 mtx_assert(sq->sq_lock, MA_OWNED);
|
716 717 /* Move blocked threads from the sleep queue to a temporary list. */ 718 TAILQ_INIT(&list); 719 while (!TAILQ_EMPTY(&sq->sq_blocked)) { 720 td = TAILQ_FIRST(&sq->sq_blocked); 721 sleepq_remove_thread(sq, td); 722 TAILQ_INSERT_TAIL(&list, td, td_slpq); 723 } 724 sleepq_release(wchan); 725 726 /* Resume all the threads on the temporary list. */ 727 while (!TAILQ_EMPTY(&list)) { 728 td = TAILQ_FIRST(&list); 729 TAILQ_REMOVE(&list, td, td_slpq); 730 sleepq_resume_thread(td, pri); 731 } 732} 733 734/* 735 * Time sleeping threads out. When the timeout expires, the thread is 736 * removed from the sleep queue and made runnable if it is still asleep. 737 */ 738static void 739sleepq_timeout(void *arg) 740{ 741 struct sleepqueue *sq; 742 struct thread *td; 743 void *wchan; 744 745 td = arg; 746 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", 747 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm); 748 749 /* 750 * First, see if the thread is asleep and get the wait channel if 751 * it is. 752 */ 753 mtx_lock_spin(&sched_lock); 754 if (TD_ON_SLEEPQ(td)) { 755 wchan = td->td_wchan; 756 mtx_unlock_spin(&sched_lock);
|
757 sleepq_lock(wchan); |
758 sq = sleepq_lookup(wchan); 759 mtx_lock_spin(&sched_lock); 760 } else { 761 wchan = NULL; 762 sq = NULL; 763 } 764 765 /* 766 * At this point, if the thread is still on the sleep queue, 767 * we have that sleep queue locked as it cannot migrate sleep 768 * queues while we dropped sched_lock. If it had resumed and 769 * was on another CPU while the lock was dropped, it would have 770 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the 771 * call to callout_stop() to stop this routine would have failed 772 * meaning that it would have already set TDF_TIMEOUT to 773 * synchronize with this function. 774 */ 775 if (TD_ON_SLEEPQ(td)) { 776 MPASS(td->td_wchan == wchan); 777 MPASS(sq != NULL); 778 td->td_flags |= TDF_TIMEOUT; 779 mtx_unlock_spin(&sched_lock); 780 sleepq_remove_thread(sq, td); 781 sleepq_release(wchan); 782 sleepq_resume_thread(td, -1); 783 return; 784 } else if (wchan != NULL) 785 sleepq_release(wchan); 786 787 /* 788 * Now check for the edge cases. First, if TDF_TIMEOUT is set, 789 * then the other thread has already yielded to us, so clear 790 * the flag and resume it. If TDF_TIMEOUT is not set, then the 791 * we know that the other thread is not on a sleep queue, but it 792 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL 793 * to let it know that the timeout has already run and doesn't 794 * need to be canceled. 795 */ 796 if (td->td_flags & TDF_TIMEOUT) { 797 MPASS(TD_IS_SLEEPING(td)); 798 td->td_flags &= ~TDF_TIMEOUT; 799 TD_CLR_SLEEPING(td); 800 setrunnable(td); 801 } else 802 td->td_flags |= TDF_TIMOFAIL; 803 mtx_unlock_spin(&sched_lock); 804} 805 806/* 807 * Resumes a specific thread from the sleep queue associated with a specific 808 * wait channel if it is on that queue. 809 */ 810void 811sleepq_remove(struct thread *td, void *wchan) 812{ 813 struct sleepqueue *sq; 814 815 /* 816 * Look up the sleep queue for this wait channel, then re-check 817 * that the thread is asleep on that channel, if it is not, then 818 * bail. 819 */ 820 MPASS(wchan != NULL);
|
821 sleepq_lock(wchan); |
822 sq = sleepq_lookup(wchan); 823 mtx_lock_spin(&sched_lock); 824 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 825 mtx_unlock_spin(&sched_lock); 826 sleepq_release(wchan); 827 return; 828 } 829 mtx_unlock_spin(&sched_lock); 830 MPASS(sq != NULL); 831 832 /* Thread is asleep on sleep queue sq, so wake it up. */ 833 sleepq_remove_thread(sq, td); 834 sleepq_release(wchan); 835 sleepq_resume_thread(td, -1); 836} 837 838/* 839 * Abort a thread as if an interrupt had occurred. Only abort 840 * interruptible waits (unfortunately it isn't safe to abort others). 841 * 842 * XXX: What in the world does the comment below mean? 843 * Also, whatever the signal code does... 844 */ 845void 846sleepq_abort(struct thread *td) 847{ 848 void *wchan; 849 850 mtx_assert(&sched_lock, MA_OWNED); 851 MPASS(TD_ON_SLEEPQ(td)); 852 MPASS(td->td_flags & TDF_SINTR); 853 854 /* 855 * If the TDF_TIMEOUT flag is set, just leave. A 856 * timeout is scheduled anyhow. 857 */ 858 if (td->td_flags & TDF_TIMEOUT) 859 return; 860 861 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", 862 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm); 863 wchan = td->td_wchan; 864 mtx_unlock_spin(&sched_lock); 865 sleepq_remove(td, wchan); 866 mtx_lock_spin(&sched_lock); 867}
|