subr_sleepqueue.c revision 126885
1/* 2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * Implementation of sleep queues used to hold queue of threads blocked on 32 * a wait channel. Sleep queues different from turnstiles in that wait 33 * channels are not owned by anyone, so there is no priority propagation. 34 * Sleep queues can also provide a timeout and can also be interrupted by 35 * signals. That said, there are several similarities between the turnstile 36 * and sleep queue implementations. (Note: turnstiles were implemented 37 * first.) For example, both use a hash table of the same size where each 38 * bucket is referred to as a "chain" that contains both a spin lock and 39 * a linked list of queues. An individual queue is located by using a hash 40 * to pick a chain, locking the chain, and then walking the chain searching 41 * for the queue. This means that a wait channel object does not need to 42 * embed it's queue head just as locks do not embed their turnstile queue 43 * head. Threads also carry around a sleep queue that they lend to the 44 * wait channel when blocking. Just as in turnstiles, the queue includes 45 * a free list of the sleep queues of other threads blocked on the same 46 * wait channel in the case of multiple waiters. 47 * 48 * Some additional functionality provided by sleep queues include the 49 * ability to set a timeout. The timeout is managed using a per-thread 50 * callout that resumes a thread if it is asleep. A thread may also 51 * catch signals while it is asleep (aka an interruptible sleep). The 52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 53 * sleep queues also provide some extra assertions. One is not allowed to 54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 55 * must consistently use the same lock to synchronize with a wait channel, 56 * though this check is currently only a warning for sleep/wakeup due to 57 * pre-existing abuse of that API. The same lock must also be held when 58 * awakening threads, though that is currently only enforced for condition 59 * variables. 60 */ 61 62#include <sys/cdefs.h> 63__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 126885 2004-03-12 19:06:18Z jhb $"); 64 65#include <sys/param.h> 66#include <sys/systm.h> 67#include <sys/lock.h> 68#include <sys/kernel.h> 69#include <sys/ktr.h> 70#include <sys/malloc.h> 71#include <sys/mutex.h> 72#include <sys/proc.h> 73#include <sys/sched.h> 74#include <sys/signalvar.h> 75#include <sys/sleepqueue.h> 76 77/* 78 * Constants for the hash table of sleep queue chains. These constants are 79 * the same ones that 4BSD (and possibly earlier versions of BSD) used. 80 * Basically, we ignore the lower 8 bits of the address since most wait 81 * channel pointers are aligned and only look at the next 7 bits for the 82 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly. 83 */ 84#define SC_TABLESIZE 128 /* Must be power of 2. */ 85#define SC_MASK (SC_TABLESIZE - 1) 86#define SC_SHIFT 8 87#define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK) 88#define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 89 90/* 91 * There two different lists of sleep queues. Both lists are connected 92 * via the sq_hash entries. The first list is the sleep queue chain list 93 * that a sleep queue is on when it is attached to a wait channel. The 94 * second list is the free list hung off of a sleep queue that is attached 95 * to a wait channel. 96 * 97 * Each sleep queue also contains the wait channel it is attached to, the 98 * list of threads blocked on that wait channel, flags specific to the 99 * wait channel, and the lock used to synchronize with a wait channel. 100 * The flags are used to catch mismatches between the various consumers 101 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 102 * The lock pointer is only used when invariants are enabled for various 103 * debugging checks. 104 * 105 * Locking key: 106 * c - sleep queue chain lock 107 */ 108struct sleepqueue { 109 TAILQ_HEAD(, thread) sq_blocked; /* (c) Blocked threads. */ 110 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 111 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 112 void *sq_wchan; /* (c) Wait channel. */ 113 int sq_flags; /* (c) Flags. */ 114#ifdef INVARIANTS 115 struct mtx *sq_lock; /* (c) Associated lock. */ 116#endif 117}; 118 119struct sleepqueue_chain { 120 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 121 struct mtx sc_lock; /* Spin lock for this chain. */ 122}; 123 124static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 125 126MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues"); 127 128/* 129 * Prototypes for non-exported routines. 130 */ 131static int sleepq_check_timeout(void); 132static void sleepq_switch(void *wchan); 133static void sleepq_timeout(void *arg); 134static void sleepq_wakeup_thread(struct sleepqueue *sq, struct thread *td, 135 int pri); 136 137/* 138 * Early initialization of sleep queues that is called from the sleepinit() 139 * SYSINIT. 140 */ 141void 142init_sleepqueues(void) 143{ 144 int i; 145 146 for (i = 0; i < SC_TABLESIZE; i++) { 147 LIST_INIT(&sleepq_chains[i].sc_queues); 148 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 149 MTX_SPIN); 150 } 151 thread0.td_sleepqueue = sleepq_alloc(); 152} 153 154/* 155 * Malloc and initialize a new sleep queue for a new thread. 156 */ 157struct sleepqueue * 158sleepq_alloc(void) 159{ 160 struct sleepqueue *sq; 161 162 sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO); 163 TAILQ_INIT(&sq->sq_blocked); 164 LIST_INIT(&sq->sq_free); 165 return (sq); 166} 167 168/* 169 * Free a sleep queue when a thread is destroyed. 170 */ 171void 172sleepq_free(struct sleepqueue *sq) 173{ 174 175 MPASS(sq != NULL); 176 MPASS(TAILQ_EMPTY(&sq->sq_blocked)); 177 free(sq, M_SLEEPQUEUE); 178} 179 180/* 181 * Look up the sleep queue associated with a given wait channel in the hash 182 * table locking the associated sleep queue chain. Return holdind the sleep 183 * queue chain lock. If no queue is found in the table, NULL is returned. 184 */ 185struct sleepqueue * 186sleepq_lookup(void *wchan) 187{ 188 struct sleepqueue_chain *sc; 189 struct sleepqueue *sq; 190 191 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 192 sc = SC_LOOKUP(wchan); 193 mtx_lock_spin(&sc->sc_lock); 194 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 195 if (sq->sq_wchan == wchan) 196 return (sq); 197 return (NULL); 198} 199 200/* 201 * Unlock the sleep queue chain associated with a given wait channel. 202 */ 203void 204sleepq_release(void *wchan) 205{ 206 struct sleepqueue_chain *sc; 207 208 sc = SC_LOOKUP(wchan); 209 mtx_unlock_spin(&sc->sc_lock); 210} 211 212/* 213 * Places the current thread on the sleepqueue for the specified wait 214 * channel. If INVARIANTS is enabled, then it associates the passed in 215 * lock with the sleepq to make sure it is held when that sleep queue is 216 * woken up. 217 */ 218void 219sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock, 220 const char *wmesg, int flags) 221{ 222 struct sleepqueue_chain *sc; 223 struct thread *td, *td1; 224 225 td = curthread; 226 sc = SC_LOOKUP(wchan); 227 mtx_assert(&sc->sc_lock, MA_OWNED); 228 MPASS(td->td_sleepqueue != NULL); 229 MPASS(wchan != NULL); 230 231 /* If the passed in sleep queue is NULL, use this thread's queue. */ 232 if (sq == NULL) { 233 sq = td->td_sleepqueue; 234 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 235 KASSERT(TAILQ_EMPTY(&sq->sq_blocked), 236 ("thread's sleep queue has a non-empty queue")); 237 KASSERT(LIST_EMPTY(&sq->sq_free), 238 ("thread's sleep queue has a non-empty free list")); 239 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 240 sq->sq_wchan = wchan; 241#ifdef INVARIANTS 242 sq->sq_lock = lock; 243#endif 244 sq->sq_flags = flags; 245 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); 246 } else { 247 MPASS(wchan == sq->sq_wchan); 248 MPASS(lock == sq->sq_lock); 249 TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq) 250 if (td1->td_priority > td->td_priority) 251 break; 252 if (td1 != NULL) 253 TAILQ_INSERT_BEFORE(td1, td, td_slpq); 254 else 255 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); 256 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 257 } 258 td->td_sleepqueue = NULL; 259 mtx_lock_spin(&sched_lock); 260 td->td_wchan = wchan; 261 td->td_wmesg = wmesg; 262 mtx_unlock_spin(&sched_lock); 263} 264 265/* 266 * Sets a timeout that will remove the current thread from the specified 267 * sleep queue after timo ticks if the thread has not already been awakened. 268 */ 269void 270sleepq_set_timeout(void *wchan, int timo) 271{ 272 struct sleepqueue_chain *sc; 273 struct thread *td; 274 275 td = curthread; 276 sc = SC_LOOKUP(wchan); 277 mtx_assert(&sc->sc_lock, MA_OWNED); 278 MPASS(TD_ON_SLEEPQ(td)); 279 MPASS(td->td_sleepqueue == NULL); 280 MPASS(wchan != NULL); 281 callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td); 282} 283 284/* 285 * Marks the pending sleep of the current thread as interruptible and 286 * makes an initial check for pending signals before putting a thread 287 * to sleep. 288 */ 289int 290sleepq_catch_signals(void *wchan) 291{ 292 struct sleepqueue_chain *sc; 293 struct sleepqueue *sq; 294 struct thread *td; 295 struct proc *p; 296 int sig; 297 298 td = curthread; 299 p = td->td_proc; 300 sc = SC_LOOKUP(wchan); 301 mtx_assert(&sc->sc_lock, MA_OWNED); 302 MPASS(td->td_sleepqueue == NULL); 303 MPASS(wchan != NULL); 304 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %d, %s)", td, 305 p->p_pid, p->p_comm); 306 307 /* Mark thread as being in an interruptible sleep. */ 308 mtx_lock_spin(&sched_lock); 309 MPASS(TD_ON_SLEEPQ(td)); 310 td->td_flags |= TDF_SINTR; 311 mtx_unlock_spin(&sched_lock); 312 sleepq_release(wchan); 313 314 /* See if there are any pending signals for this thread. */ 315 PROC_LOCK(p); 316 mtx_lock(&p->p_sigacts->ps_mtx); 317 sig = cursig(td); 318 mtx_unlock(&p->p_sigacts->ps_mtx); 319 if (sig == 0 && thread_suspend_check(1)) 320 sig = SIGSTOP; 321 PROC_UNLOCK(p); 322 323 /* 324 * If there were pending signals and this thread is still on 325 * the sleep queue, remove it from the sleep queue. 326 */ 327 sq = sleepq_lookup(wchan); 328 mtx_lock_spin(&sched_lock); 329 if (TD_ON_SLEEPQ(td) && sig != 0) { 330 mtx_unlock_spin(&sched_lock); 331 sleepq_wakeup_thread(sq, td, -1); 332 } else 333 mtx_unlock_spin(&sched_lock); 334 return (sig); 335} 336 337/* 338 * Switches to another thread if we are still asleep on a sleep queue and 339 * drop the lock on the sleepqueue chain. Returns with sched_lock held. 340 */ 341static void 342sleepq_switch(void *wchan) 343{ 344 struct sleepqueue_chain *sc; 345 struct thread *td; 346 347 td = curthread; 348 sc = SC_LOOKUP(wchan); 349 mtx_assert(&sc->sc_lock, MA_OWNED); 350 351 /* 352 * If we have a sleep queue, then we've already been woken up, so 353 * just return. 354 */ 355 if (td->td_sleepqueue != NULL) { 356 MPASS(!TD_ON_SLEEPQ(td)); 357 mtx_unlock_spin(&sc->sc_lock); 358 mtx_lock_spin(&sched_lock); 359 return; 360 } 361 362 /* 363 * Otherwise, actually go to sleep. 364 */ 365 mtx_lock_spin(&sched_lock); 366 mtx_unlock_spin(&sc->sc_lock); 367 368 sched_sleep(td); 369 TD_SET_SLEEPING(td); 370 mi_switch(SW_VOL); 371 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 372 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %d, %s)", td, 373 td->td_proc->p_pid, td->td_proc->p_comm); 374} 375 376/* 377 * Check to see if we timed out. 378 */ 379static int 380sleepq_check_timeout(void) 381{ 382 struct thread *td; 383 384 mtx_assert(&sched_lock, MA_OWNED); 385 td = curthread; 386 387 /* 388 * If TDF_TIMEOUT is set, we timed out. 389 */ 390 if (td->td_flags & TDF_TIMEOUT) { 391 td->td_flags &= ~TDF_TIMEOUT; 392 return (EWOULDBLOCK); 393 } 394 395 /* 396 * If TDF_TIMOFAIL is set, the timeout ran after we had 397 * already been woken up. 398 */ 399 if (td->td_flags & TDF_TIMOFAIL) 400 td->td_flags &= ~TDF_TIMOFAIL; 401 402 /* 403 * If callout_stop() fails, then the timeout is running on 404 * another CPU, so synchronize with it to avoid having it 405 * accidentally wake up a subsequent sleep. 406 */ 407 else if (callout_stop(&td->td_slpcallout) == 0) { 408 td->td_flags |= TDF_TIMEOUT; 409 TD_SET_SLEEPING(td); 410 mi_switch(SW_INVOL); 411 } 412 return (0); 413} 414 415/* 416 * Check to see if we were awoken by a signal. 417 */ 418static int 419sleepq_check_signals(void) 420{ 421 struct thread *td; 422 423 mtx_assert(&sched_lock, MA_OWNED); 424 td = curthread; 425 426 /* We are no longer in an interruptible sleep. */ 427 td->td_flags &= ~TDF_SINTR; 428 429 /* If we were interrupted, return td_intrval. */ 430 if (td->td_flags & TDF_INTERRUPT) 431 return (td->td_intrval); 432 return (0); 433} 434 435/* 436 * If we were in an interruptible sleep and we weren't interrupted and 437 * didn't timeout, check to see if there are any pending signals and 438 * which return value we should use if so. The return value from an 439 * earlier call to sleepq_catch_signals() should be passed in as the 440 * argument. 441 */ 442int 443sleepq_calc_signal_retval(int sig) 444{ 445 struct thread *td; 446 struct proc *p; 447 int rval; 448 449 td = curthread; 450 p = td->td_proc; 451 PROC_LOCK(p); 452 mtx_lock(&p->p_sigacts->ps_mtx); 453 /* XXX: Should we always be calling cursig()? */ 454 if (sig == 0) 455 sig = cursig(td); 456 if (sig != 0) { 457 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 458 rval = EINTR; 459 else 460 rval = ERESTART; 461 } else 462 rval = 0; 463 mtx_unlock(&p->p_sigacts->ps_mtx); 464 PROC_UNLOCK(p); 465 return (rval); 466} 467 468/* 469 * Block the current thread until it is awakened from its sleep queue. 470 */ 471void 472sleepq_wait(void *wchan) 473{ 474 475 sleepq_switch(wchan); 476 mtx_unlock_spin(&sched_lock); 477} 478 479/* 480 * Block the current thread until it is awakened from its sleep queue 481 * or it is interrupted by a signal. 482 */ 483int 484sleepq_wait_sig(void *wchan) 485{ 486 int rval; 487 488 sleepq_switch(wchan); 489 rval = sleepq_check_signals(); 490 mtx_unlock_spin(&sched_lock); 491 return (rval); 492} 493 494/* 495 * Block the current thread until it is awakened from its sleep queue 496 * or it times out while waiting. 497 */ 498int 499sleepq_timedwait(void *wchan, int signal_caught) 500{ 501 int rval; 502 503 sleepq_switch(wchan); 504 rval = sleepq_check_timeout(); 505 mtx_unlock_spin(&sched_lock); 506 if (signal_caught) 507 return (0); 508 else 509 return (rval); 510} 511 512/* 513 * Block the current thread until it is awakened from its sleep queue, 514 * it is interrupted by a signal, or it times out waiting to be awakened. 515 */ 516int 517sleepq_timedwait_sig(void *wchan, int signal_caught) 518{ 519 int rvalt, rvals; 520 521 sleepq_switch(wchan); 522 rvalt = sleepq_check_timeout(); 523 rvals = sleepq_check_signals(); 524 mtx_unlock_spin(&sched_lock); 525 if (signal_caught || rvalt == 0) 526 return (rvals); 527 else 528 return (rvalt); 529} 530 531/* 532 * Removes a thread from a sleep queue and resumes it. 533 */ 534static void 535sleepq_wakeup_thread(struct sleepqueue *sq, struct thread *td, int pri) 536{ 537 struct sleepqueue_chain *sc; 538 539 MPASS(td != NULL); 540 MPASS(sq->sq_wchan != NULL); 541 MPASS(td->td_wchan == sq->sq_wchan); 542 sc = SC_LOOKUP(sq->sq_wchan); 543 mtx_assert(&sc->sc_lock, MA_OWNED); 544 545 /* Remove the thread from the queue. */ 546 TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq); 547 548 /* 549 * Get a sleep queue for this thread. If this is the last waiter, 550 * use the queue itself and take it out of the chain, otherwise, 551 * remove a queue from the free list. 552 */ 553 if (LIST_EMPTY(&sq->sq_free)) { 554 td->td_sleepqueue = sq; 555#ifdef INVARIANTS 556 sq->sq_wchan = NULL; 557#endif 558 } else 559 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 560 LIST_REMOVE(td->td_sleepqueue, sq_hash); 561 562 /* 563 * Finish resuming the thread. 564 */ 565 mtx_lock_spin(&sched_lock); 566 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %d, %s)", td, 567 td->td_proc->p_pid, td->td_proc->p_comm); 568 td->td_wmesg = NULL; 569 td->td_wchan = NULL; 570 TD_CLR_SLEEPING(td); 571 572 /* Adjust priority if requested. */ 573 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX)); 574 if (pri != -1 && td->td_priority > pri) 575 td->td_priority = pri; 576 setrunnable(td); 577 mtx_unlock_spin(&sched_lock); 578} 579 580/* 581 * Find the highest priority thread sleeping on a wait channel and resume it. 582 */ 583void 584sleepq_signal(void *wchan, int flags, int pri) 585{ 586 struct sleepqueue *sq; 587 588 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 589 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 590 sq = sleepq_lookup(wchan); 591 if (sq == NULL) { 592 sleepq_release(wchan); 593 return; 594 } 595 KASSERT(sq->sq_flags == flags, 596 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 597 /* XXX: Do for all sleep queues eventually. */ 598 if (flags & SLEEPQ_CONDVAR) 599 mtx_assert(sq->sq_lock, MA_OWNED); 600 sleepq_wakeup_thread(sq, TAILQ_FIRST(&sq->sq_blocked), pri); 601 sleepq_release(wchan); 602} 603 604/* 605 * Resume all threads sleeping on a specified wait channel. 606 */ 607void 608sleepq_broadcast(void *wchan, int flags, int pri) 609{ 610 struct sleepqueue *sq; 611 612 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 613 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 614 sq = sleepq_lookup(wchan); 615 if (sq == NULL) { 616 sleepq_release(wchan); 617 return; 618 } 619 KASSERT(sq->sq_flags == flags, 620 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 621 /* XXX: Do for all sleep queues eventually. */ 622 if (flags & SLEEPQ_CONDVAR) 623 mtx_assert(sq->sq_lock, MA_OWNED); 624 while (!TAILQ_EMPTY(&sq->sq_blocked)) 625 sleepq_wakeup_thread(sq, TAILQ_FIRST(&sq->sq_blocked), pri); 626 sleepq_release(wchan); 627} 628 629/* 630 * Time sleeping threads out. When the timeout expires, the thread is 631 * removed from the sleep queue and made runnable if it is still asleep. 632 */ 633static void 634sleepq_timeout(void *arg) 635{ 636 struct sleepqueue *sq; 637 struct thread *td; 638 void *wchan; 639 640 td = (struct thread *)arg; 641 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %d, %s)", 642 td, td->td_proc->p_pid, td->td_proc->p_comm); 643 644 /* 645 * First, see if the thread is asleep and get the wait channel if 646 * it is. 647 */ 648 mtx_lock_spin(&sched_lock); 649 if (TD_ON_SLEEPQ(td)) { 650 wchan = td->td_wchan; 651 mtx_unlock_spin(&sched_lock); 652 sq = sleepq_lookup(wchan); 653 mtx_lock_spin(&sched_lock); 654 } else { 655 wchan = NULL; 656 sq = NULL; 657 } 658 659 /* 660 * At this point, if the thread is still on the sleep queue, 661 * we have that sleep queue locked as it cannot migrate sleep 662 * queues while we dropped sched_lock. If it had resumed and 663 * was on another CPU while the lock was dropped, it would have 664 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the 665 * call to callout_stop() to stop this routine would have failed 666 * meaning that it would have already set TDF_TIMEOUT to 667 * synchronize with this function. 668 */ 669 if (TD_ON_SLEEPQ(td)) { 670 MPASS(TD_IS_SLEEPING(td)); 671 MPASS(td->td_wchan == wchan); 672 MPASS(sq != NULL); 673 td->td_flags |= TDF_TIMEOUT; 674 mtx_unlock_spin(&sched_lock); 675 sleepq_wakeup_thread(sq, td, -1); 676 sleepq_release(wchan); 677 return; 678 } else if (wchan != NULL) 679 sleepq_release(wchan); 680 681 /* 682 * Now check for the edge cases. First, if TDF_TIMEOUT is set, 683 * then the other thread has already yielded to us, so clear 684 * the flag and resume it. If TDF_TIMEOUT is not set, then the 685 * we know that the other thread is not on a sleep queue, but it 686 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL 687 * to let it know that the timeout has already run and doesn't 688 * need to be canceled. 689 */ 690 if (td->td_flags & TDF_TIMEOUT) { 691 td->td_flags &= ~TDF_TIMEOUT; 692 TD_CLR_SLEEPING(td); 693 setrunnable(td); 694 } else 695 td->td_flags |= TDF_TIMOFAIL; 696 mtx_unlock_spin(&sched_lock); 697} 698 699/* 700 * Resumes a specific thread from the sleep queue associated with a specific 701 * wait channel if it is on that queue. 702 */ 703void 704sleepq_remove(struct thread *td, void *wchan) 705{ 706 struct sleepqueue *sq; 707 708 /* 709 * Look up the sleep queue for this wait channel, then re-check 710 * that the thread is asleep on that channel, if it is not, then 711 * bail. 712 */ 713 MPASS(wchan != NULL); 714 sq = sleepq_lookup(wchan); 715 mtx_lock_spin(&sched_lock); 716 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 717 mtx_unlock_spin(&sched_lock); 718 sleepq_release(wchan); 719 return; 720 } 721 mtx_unlock_spin(&sched_lock); 722 MPASS(sq != NULL); 723 724 /* Thread is asleep on sleep queue sq, so wake it up. */ 725 sleepq_wakeup_thread(sq, td, -1); 726 sleepq_release(wchan); 727} 728 729/* 730 * Abort a thread as if an interrupt had occured. Only abort 731 * interruptable waits (unfortunately it isn't safe to abort others). 732 * 733 * XXX: What in the world does the comment below mean? 734 * Also, whatever the signal code does... 735 */ 736void 737sleepq_abort(struct thread *td) 738{ 739 void *wchan; 740 741 mtx_assert(&sched_lock, MA_OWNED); 742 MPASS(TD_ON_SLEEPQ(td)); 743 MPASS(td->td_flags & TDF_SINTR); 744 745 /* 746 * If the TDF_TIMEOUT flag is set, just leave. A 747 * timeout is scheduled anyhow. 748 */ 749 if (td->td_flags & TDF_TIMEOUT) 750 return; 751 752 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %d, %s)", td, 753 td->td_proc->p_pid, td->td_proc->p_comm); 754 wchan = td->td_wchan; 755 mtx_unlock_spin(&sched_lock); 756 sleepq_remove(td, wchan); 757 mtx_lock_spin(&sched_lock); 758} 759