subr_sleepqueue.c revision 155936
1/*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel.  Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals.  That said, there are several similarities between the turnstile
36 * and sleep queue implementations.  (Note: turnstiles were implemented
37 * first.)  For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues.  An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue.  This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head.  Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking.  Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout.  The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep.  A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep).  The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
53 * sleep queues also provide some extra assertions.  One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API.  The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include <sys/cdefs.h>
63__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 155936 2006-02-23 03:42:17Z davidxu $");
64
65#include "opt_sleepqueue_profiling.h"
66#include "opt_ddb.h"
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/lock.h>
71#include <sys/kernel.h>
72#include <sys/ktr.h>
73#include <sys/malloc.h>
74#include <sys/mutex.h>
75#include <sys/proc.h>
76#include <sys/sched.h>
77#include <sys/signalvar.h>
78#include <sys/sleepqueue.h>
79#include <sys/sysctl.h>
80
81#ifdef DDB
82#include <ddb/ddb.h>
83#endif
84
85/*
86 * Constants for the hash table of sleep queue chains.  These constants are
87 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
88 * Basically, we ignore the lower 8 bits of the address since most wait
89 * channel pointers are aligned and only look at the next 7 bits for the
90 * hash.  SC_TABLESIZE must be a power of two for SC_MASK to work properly.
91 */
92#define	SC_TABLESIZE	128			/* Must be power of 2. */
93#define	SC_MASK		(SC_TABLESIZE - 1)
94#define	SC_SHIFT	8
95#define	SC_HASH(wc)	(((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
96#define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
97
98/*
99 * There two different lists of sleep queues.  Both lists are connected
100 * via the sq_hash entries.  The first list is the sleep queue chain list
101 * that a sleep queue is on when it is attached to a wait channel.  The
102 * second list is the free list hung off of a sleep queue that is attached
103 * to a wait channel.
104 *
105 * Each sleep queue also contains the wait channel it is attached to, the
106 * list of threads blocked on that wait channel, flags specific to the
107 * wait channel, and the lock used to synchronize with a wait channel.
108 * The flags are used to catch mismatches between the various consumers
109 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
110 * The lock pointer is only used when invariants are enabled for various
111 * debugging checks.
112 *
113 * Locking key:
114 *  c - sleep queue chain lock
115 */
116struct sleepqueue {
117	TAILQ_HEAD(, thread) sq_blocked;	/* (c) Blocked threads. */
118	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
119	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
120	void	*sq_wchan;			/* (c) Wait channel. */
121#ifdef INVARIANTS
122	int	sq_type;			/* (c) Queue type. */
123	struct mtx *sq_lock;			/* (c) Associated lock. */
124#endif
125};
126
127struct sleepqueue_chain {
128	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
129	struct mtx sc_lock;			/* Spin lock for this chain. */
130#ifdef SLEEPQUEUE_PROFILING
131	u_int	sc_depth;			/* Length of sc_queues. */
132	u_int	sc_max_depth;			/* Max length of sc_queues. */
133#endif
134};
135
136#ifdef SLEEPQUEUE_PROFILING
137u_int sleepq_max_depth;
138SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
139SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
140    "sleepq chain stats");
141SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
142    0, "maxmimum depth achieved of a single chain");
143#endif
144static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
145
146static MALLOC_DEFINE(M_SLEEPQUEUE, "sleepqueue", "sleep queues");
147
148/*
149 * Prototypes for non-exported routines.
150 */
151static int	sleepq_check_timeout(void);
152static void	sleepq_switch(void *wchan);
153static void	sleepq_timeout(void *arg);
154static void	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri);
155
156/*
157 * Early initialization of sleep queues that is called from the sleepinit()
158 * SYSINIT.
159 */
160void
161init_sleepqueues(void)
162{
163#ifdef SLEEPQUEUE_PROFILING
164	struct sysctl_oid *chain_oid;
165	char chain_name[10];
166#endif
167	int i;
168
169	for (i = 0; i < SC_TABLESIZE; i++) {
170		LIST_INIT(&sleepq_chains[i].sc_queues);
171		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
172		    MTX_SPIN);
173#ifdef SLEEPQUEUE_PROFILING
174		snprintf(chain_name, sizeof(chain_name), "%d", i);
175		chain_oid = SYSCTL_ADD_NODE(NULL,
176		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
177		    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
178		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
179		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
180		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
181		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
182		    NULL);
183#endif
184	}
185	thread0.td_sleepqueue = sleepq_alloc();
186}
187
188/*
189 * Malloc and initialize a new sleep queue for a new thread.
190 */
191struct sleepqueue *
192sleepq_alloc(void)
193{
194	struct sleepqueue *sq;
195
196	sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO);
197	TAILQ_INIT(&sq->sq_blocked);
198	LIST_INIT(&sq->sq_free);
199	return (sq);
200}
201
202/*
203 * Free a sleep queue when a thread is destroyed.
204 */
205void
206sleepq_free(struct sleepqueue *sq)
207{
208
209	MPASS(sq != NULL);
210	MPASS(TAILQ_EMPTY(&sq->sq_blocked));
211	free(sq, M_SLEEPQUEUE);
212}
213
214/*
215 * Lock the sleep queue chain associated with the specified wait channel.
216 */
217void
218sleepq_lock(void *wchan)
219{
220	struct sleepqueue_chain *sc;
221
222	sc = SC_LOOKUP(wchan);
223	mtx_lock_spin(&sc->sc_lock);
224}
225
226/*
227 * Look up the sleep queue associated with a given wait channel in the hash
228 * table locking the associated sleep queue chain.  If no queue is found in
229 * the table, NULL is returned.
230 */
231struct sleepqueue *
232sleepq_lookup(void *wchan)
233{
234	struct sleepqueue_chain *sc;
235	struct sleepqueue *sq;
236
237	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
238	sc = SC_LOOKUP(wchan);
239	mtx_assert(&sc->sc_lock, MA_OWNED);
240	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
241		if (sq->sq_wchan == wchan)
242			return (sq);
243	return (NULL);
244}
245
246/*
247 * Unlock the sleep queue chain associated with a given wait channel.
248 */
249void
250sleepq_release(void *wchan)
251{
252	struct sleepqueue_chain *sc;
253
254	sc = SC_LOOKUP(wchan);
255	mtx_unlock_spin(&sc->sc_lock);
256}
257
258/*
259 * Places the current thread on the sleep queue for the specified wait
260 * channel.  If INVARIANTS is enabled, then it associates the passed in
261 * lock with the sleepq to make sure it is held when that sleep queue is
262 * woken up.
263 */
264void
265sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags)
266{
267	struct sleepqueue_chain *sc;
268	struct sleepqueue *sq;
269	struct thread *td;
270
271	td = curthread;
272	sc = SC_LOOKUP(wchan);
273	mtx_assert(&sc->sc_lock, MA_OWNED);
274	MPASS(td->td_sleepqueue != NULL);
275	MPASS(wchan != NULL);
276
277	/* If this thread is not allowed to sleep, die a horrible death. */
278	KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
279	    ("Trying sleep, but thread marked as sleeping prohibited"));
280
281	/* Look up the sleep queue associated with the wait channel 'wchan'. */
282	sq = sleepq_lookup(wchan);
283
284	/*
285	 * If the wait channel does not already have a sleep queue, use
286	 * this thread's sleep queue.  Otherwise, insert the current thread
287	 * into the sleep queue already in use by this wait channel.
288	 */
289	if (sq == NULL) {
290#ifdef SLEEPQUEUE_PROFILING
291		sc->sc_depth++;
292		if (sc->sc_depth > sc->sc_max_depth) {
293			sc->sc_max_depth = sc->sc_depth;
294			if (sc->sc_max_depth > sleepq_max_depth)
295				sleepq_max_depth = sc->sc_max_depth;
296		}
297#endif
298		sq = td->td_sleepqueue;
299		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
300		KASSERT(TAILQ_EMPTY(&sq->sq_blocked),
301		    ("thread's sleep queue has a non-empty queue"));
302		KASSERT(LIST_EMPTY(&sq->sq_free),
303		    ("thread's sleep queue has a non-empty free list"));
304		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
305		sq->sq_wchan = wchan;
306#ifdef INVARIANTS
307		sq->sq_lock = lock;
308		sq->sq_type = flags & SLEEPQ_TYPE;
309#endif
310	} else {
311		MPASS(wchan == sq->sq_wchan);
312		MPASS(lock == sq->sq_lock);
313		MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
314		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
315	}
316	TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
317	td->td_sleepqueue = NULL;
318	mtx_lock_spin(&sched_lock);
319	td->td_wchan = wchan;
320	td->td_wmesg = wmesg;
321	if (flags & SLEEPQ_INTERRUPTIBLE) {
322		td->td_flags |= TDF_SINTR;
323		td->td_flags &= ~TDF_SLEEPABORT;
324	}
325	mtx_unlock_spin(&sched_lock);
326}
327
328/*
329 * Sets a timeout that will remove the current thread from the specified
330 * sleep queue after timo ticks if the thread has not already been awakened.
331 */
332void
333sleepq_set_timeout(void *wchan, int timo)
334{
335	struct sleepqueue_chain *sc;
336	struct thread *td;
337
338	td = curthread;
339	sc = SC_LOOKUP(wchan);
340	mtx_assert(&sc->sc_lock, MA_OWNED);
341	MPASS(TD_ON_SLEEPQ(td));
342	MPASS(td->td_sleepqueue == NULL);
343	MPASS(wchan != NULL);
344	callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td);
345}
346
347/*
348 * Marks the pending sleep of the current thread as interruptible and
349 * makes an initial check for pending signals before putting a thread
350 * to sleep. Return with sleep queue and scheduler lock held.
351 */
352static int
353sleepq_catch_signals(void *wchan)
354{
355	struct sleepqueue_chain *sc;
356	struct sleepqueue *sq;
357	struct thread *td;
358	struct proc *p;
359	struct sigacts *ps;
360	int sig, ret;
361
362	td = curthread;
363	p = curproc;
364	sc = SC_LOOKUP(wchan);
365	mtx_assert(&sc->sc_lock, MA_OWNED);
366	MPASS(wchan != NULL);
367	CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
368		(void *)td, (long)p->p_pid, p->p_comm);
369
370	MPASS(td->td_flags & TDF_SINTR);
371	mtx_unlock_spin(&sc->sc_lock);
372
373	/* See if there are any pending signals for this thread. */
374	PROC_LOCK(p);
375	ps = p->p_sigacts;
376	mtx_lock(&ps->ps_mtx);
377	sig = cursig(td);
378	if (sig == 0) {
379		mtx_unlock(&ps->ps_mtx);
380		ret = thread_suspend_check(1);
381		MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
382	} else {
383		if (SIGISMEMBER(ps->ps_sigintr, sig))
384			ret = EINTR;
385		else
386			ret = ERESTART;
387		mtx_unlock(&ps->ps_mtx);
388	}
389
390	if (ret == 0) {
391		mtx_lock_spin(&sc->sc_lock);
392		/*
393		 * Lock sched_lock before unlocking proc lock,
394		 * without this, we could lose a race.
395		 */
396		mtx_lock_spin(&sched_lock);
397		PROC_UNLOCK(p);
398		if (!(td->td_flags & TDF_INTERRUPT))
399			return (0);
400		/* KSE threads tried unblocking us. */
401		ret = td->td_intrval;
402		mtx_unlock_spin(&sched_lock);
403		MPASS(ret == EINTR || ret == ERESTART);
404	} else {
405		PROC_UNLOCK(p);
406		mtx_lock_spin(&sc->sc_lock);
407	}
408	/*
409	 * There were pending signals and this thread is still
410	 * on the sleep queue, remove it from the sleep queue.
411	 */
412	sq = sleepq_lookup(wchan);
413	mtx_lock_spin(&sched_lock);
414	if (TD_ON_SLEEPQ(td))
415		sleepq_resume_thread(sq, td, -1);
416	td->td_flags &= ~TDF_SINTR;
417	return (ret);
418}
419
420/*
421 * Switches to another thread if we are still asleep on a sleep queue and
422 * drop the lock on the sleep queue chain.  Returns with sched_lock held.
423 */
424static void
425sleepq_switch(void *wchan)
426{
427	struct sleepqueue_chain *sc;
428	struct thread *td;
429
430	td = curthread;
431	sc = SC_LOOKUP(wchan);
432	mtx_assert(&sc->sc_lock, MA_OWNED);
433	mtx_assert(&sched_lock, MA_OWNED);
434
435	/*
436	 * If we have a sleep queue, then we've already been woken up, so
437	 * just return.
438	 */
439	if (td->td_sleepqueue != NULL) {
440		MPASS(!TD_ON_SLEEPQ(td));
441		mtx_unlock_spin(&sc->sc_lock);
442		return;
443	}
444
445	/*
446	 * Otherwise, actually go to sleep.
447	 */
448	mtx_unlock_spin(&sc->sc_lock);
449	sched_sleep(td);
450	TD_SET_SLEEPING(td);
451	mi_switch(SW_VOL, NULL);
452	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
453	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
454	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
455}
456
457/*
458 * Check to see if we timed out.
459 */
460static int
461sleepq_check_timeout(void)
462{
463	struct thread *td;
464
465	mtx_assert(&sched_lock, MA_OWNED);
466	td = curthread;
467
468	/*
469	 * If TDF_TIMEOUT is set, we timed out.
470	 */
471	if (td->td_flags & TDF_TIMEOUT) {
472		td->td_flags &= ~TDF_TIMEOUT;
473		return (EWOULDBLOCK);
474	}
475
476	/*
477	 * If TDF_TIMOFAIL is set, the timeout ran after we had
478	 * already been woken up.
479	 */
480	if (td->td_flags & TDF_TIMOFAIL)
481		td->td_flags &= ~TDF_TIMOFAIL;
482
483	/*
484	 * If callout_stop() fails, then the timeout is running on
485	 * another CPU, so synchronize with it to avoid having it
486	 * accidentally wake up a subsequent sleep.
487	 */
488	else if (callout_stop(&td->td_slpcallout) == 0) {
489		td->td_flags |= TDF_TIMEOUT;
490		TD_SET_SLEEPING(td);
491		mi_switch(SW_INVOL, NULL);
492	}
493	return (0);
494}
495
496/*
497 * Check to see if we were awoken by a signal.
498 */
499static int
500sleepq_check_signals(void)
501{
502	struct thread *td;
503
504	mtx_assert(&sched_lock, MA_OWNED);
505	td = curthread;
506
507	/* We are no longer in an interruptible sleep. */
508	if (td->td_flags & TDF_SINTR)
509		td->td_flags &= ~TDF_SINTR;
510
511	if (td->td_flags & TDF_SLEEPABORT) {
512		td->td_flags &= ~TDF_SLEEPABORT;
513		return (td->td_intrval);
514	}
515
516	if (td->td_flags & TDF_INTERRUPT)
517		return (td->td_intrval);
518
519	return (0);
520}
521
522/*
523 * Block the current thread until it is awakened from its sleep queue.
524 */
525void
526sleepq_wait(void *wchan)
527{
528
529	MPASS(!(curthread->td_flags & TDF_SINTR));
530	mtx_lock_spin(&sched_lock);
531	sleepq_switch(wchan);
532	mtx_unlock_spin(&sched_lock);
533}
534
535/*
536 * Block the current thread until it is awakened from its sleep queue
537 * or it is interrupted by a signal.
538 */
539int
540sleepq_wait_sig(void *wchan)
541{
542	int rcatch;
543	int rval;
544
545	rcatch = sleepq_catch_signals(wchan);
546	if (rcatch == 0)
547		sleepq_switch(wchan);
548	else
549		sleepq_release(wchan);
550	rval = sleepq_check_signals();
551	mtx_unlock_spin(&sched_lock);
552	if (rcatch)
553		return (rcatch);
554	return (rval);
555}
556
557/*
558 * Block the current thread until it is awakened from its sleep queue
559 * or it times out while waiting.
560 */
561int
562sleepq_timedwait(void *wchan)
563{
564	int rval;
565
566	MPASS(!(curthread->td_flags & TDF_SINTR));
567	mtx_lock_spin(&sched_lock);
568	sleepq_switch(wchan);
569	rval = sleepq_check_timeout();
570	mtx_unlock_spin(&sched_lock);
571	return (rval);
572}
573
574/*
575 * Block the current thread until it is awakened from its sleep queue,
576 * it is interrupted by a signal, or it times out waiting to be awakened.
577 */
578int
579sleepq_timedwait_sig(void *wchan)
580{
581	int rcatch, rvalt, rvals;
582
583	rcatch = sleepq_catch_signals(wchan);
584	if (rcatch == 0)
585		sleepq_switch(wchan);
586	else
587		sleepq_release(wchan);
588	rvalt = sleepq_check_timeout();
589	rvals = sleepq_check_signals();
590	mtx_unlock_spin(&sched_lock);
591	if (rcatch)
592		return (rcatch);
593	if (rvals)
594		return (rvals);
595	return (rvalt);
596}
597
598/*
599 * Removes a thread from a sleep queue and makes it
600 * runnable.
601 */
602static void
603sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
604{
605	struct sleepqueue_chain *sc;
606
607	MPASS(td != NULL);
608	MPASS(sq->sq_wchan != NULL);
609	MPASS(td->td_wchan == sq->sq_wchan);
610	sc = SC_LOOKUP(sq->sq_wchan);
611	mtx_assert(&sc->sc_lock, MA_OWNED);
612	mtx_assert(&sched_lock, MA_OWNED);
613
614	/* Remove the thread from the queue. */
615	TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq);
616
617	/*
618	 * Get a sleep queue for this thread.  If this is the last waiter,
619	 * use the queue itself and take it out of the chain, otherwise,
620	 * remove a queue from the free list.
621	 */
622	if (LIST_EMPTY(&sq->sq_free)) {
623		td->td_sleepqueue = sq;
624#ifdef INVARIANTS
625		sq->sq_wchan = NULL;
626#endif
627#ifdef SLEEPQUEUE_PROFILING
628		sc->sc_depth--;
629#endif
630	} else
631		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
632	LIST_REMOVE(td->td_sleepqueue, sq_hash);
633
634	td->td_wmesg = NULL;
635	td->td_wchan = NULL;
636
637	/*
638	 * Note that thread td might not be sleeping if it is running
639	 * sleepq_catch_signals() on another CPU or is blocked on
640	 * its proc lock to check signals.  It doesn't hurt to clear
641	 * the sleeping flag if it isn't set though, so we just always
642	 * do it.  However, we can't assert that it is set.
643	 */
644	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
645	    (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
646	TD_CLR_SLEEPING(td);
647
648	/* Adjust priority if requested. */
649	MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
650	if (pri != -1 && td->td_priority > pri)
651		sched_prio(td, pri);
652	setrunnable(td);
653}
654
655/*
656 * Find the highest priority thread sleeping on a wait channel and resume it.
657 */
658void
659sleepq_signal(void *wchan, int flags, int pri)
660{
661	struct sleepqueue *sq;
662	struct thread *td, *besttd;
663
664	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
665	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
666	sq = sleepq_lookup(wchan);
667	if (sq == NULL) {
668		sleepq_release(wchan);
669		return;
670	}
671	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
672	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
673
674	/*
675	 * Find the highest priority thread on the queue.  If there is a
676	 * tie, use the thread that first appears in the queue as it has
677	 * been sleeping the longest since threads are always added to
678	 * the tail of sleep queues.
679	 */
680	besttd = NULL;
681	TAILQ_FOREACH(td, &sq->sq_blocked, td_slpq) {
682		if (besttd == NULL || td->td_priority < besttd->td_priority)
683			besttd = td;
684	}
685	MPASS(besttd != NULL);
686	mtx_lock_spin(&sched_lock);
687	sleepq_resume_thread(sq, besttd, pri);
688	mtx_unlock_spin(&sched_lock);
689	sleepq_release(wchan);
690}
691
692/*
693 * Resume all threads sleeping on a specified wait channel.
694 */
695void
696sleepq_broadcast(void *wchan, int flags, int pri)
697{
698	struct sleepqueue *sq;
699
700	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
701	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
702	sq = sleepq_lookup(wchan);
703	if (sq == NULL) {
704		sleepq_release(wchan);
705		return;
706	}
707	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
708	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
709
710	/* Resume all blocked threads on the sleep queue. */
711	mtx_lock_spin(&sched_lock);
712	while (!TAILQ_EMPTY(&sq->sq_blocked))
713		sleepq_resume_thread(sq, TAILQ_FIRST(&sq->sq_blocked), pri);
714	mtx_unlock_spin(&sched_lock);
715	sleepq_release(wchan);
716}
717
718/*
719 * Time sleeping threads out.  When the timeout expires, the thread is
720 * removed from the sleep queue and made runnable if it is still asleep.
721 */
722static void
723sleepq_timeout(void *arg)
724{
725	struct sleepqueue *sq;
726	struct thread *td;
727	void *wchan;
728
729	td = arg;
730	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
731	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
732
733	/*
734	 * First, see if the thread is asleep and get the wait channel if
735	 * it is.
736	 */
737	mtx_lock_spin(&sched_lock);
738	if (TD_ON_SLEEPQ(td)) {
739		wchan = td->td_wchan;
740		mtx_unlock_spin(&sched_lock);
741		sleepq_lock(wchan);
742		sq = sleepq_lookup(wchan);
743		mtx_lock_spin(&sched_lock);
744	} else {
745		wchan = NULL;
746		sq = NULL;
747	}
748
749	/*
750	 * At this point, if the thread is still on the sleep queue,
751	 * we have that sleep queue locked as it cannot migrate sleep
752	 * queues while we dropped sched_lock.  If it had resumed and
753	 * was on another CPU while the lock was dropped, it would have
754	 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the
755	 * call to callout_stop() to stop this routine would have failed
756	 * meaning that it would have already set TDF_TIMEOUT to
757	 * synchronize with this function.
758	 */
759	if (TD_ON_SLEEPQ(td)) {
760		MPASS(td->td_wchan == wchan);
761		MPASS(sq != NULL);
762		td->td_flags |= TDF_TIMEOUT;
763		sleepq_resume_thread(sq, td, -1);
764		mtx_unlock_spin(&sched_lock);
765		sleepq_release(wchan);
766		return;
767	} else if (wchan != NULL)
768		sleepq_release(wchan);
769
770	/*
771	 * Now check for the edge cases.  First, if TDF_TIMEOUT is set,
772	 * then the other thread has already yielded to us, so clear
773	 * the flag and resume it.  If TDF_TIMEOUT is not set, then the
774	 * we know that the other thread is not on a sleep queue, but it
775	 * hasn't resumed execution yet.  In that case, set TDF_TIMOFAIL
776	 * to let it know that the timeout has already run and doesn't
777	 * need to be canceled.
778	 */
779	if (td->td_flags & TDF_TIMEOUT) {
780		MPASS(TD_IS_SLEEPING(td));
781		td->td_flags &= ~TDF_TIMEOUT;
782		TD_CLR_SLEEPING(td);
783		setrunnable(td);
784	} else
785		td->td_flags |= TDF_TIMOFAIL;
786	mtx_unlock_spin(&sched_lock);
787}
788
789/*
790 * Resumes a specific thread from the sleep queue associated with a specific
791 * wait channel if it is on that queue.
792 */
793void
794sleepq_remove(struct thread *td, void *wchan)
795{
796	struct sleepqueue *sq;
797
798	/*
799	 * Look up the sleep queue for this wait channel, then re-check
800	 * that the thread is asleep on that channel, if it is not, then
801	 * bail.
802	 */
803	MPASS(wchan != NULL);
804	sleepq_lock(wchan);
805	sq = sleepq_lookup(wchan);
806	mtx_lock_spin(&sched_lock);
807	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
808		mtx_unlock_spin(&sched_lock);
809		sleepq_release(wchan);
810		return;
811	}
812	MPASS(sq != NULL);
813
814	/* Thread is asleep on sleep queue sq, so wake it up. */
815	sleepq_resume_thread(sq, td, -1);
816	sleepq_release(wchan);
817	mtx_unlock_spin(&sched_lock);
818}
819
820/*
821 * Abort a thread as if an interrupt had occurred.  Only abort
822 * interruptible waits (unfortunately it isn't safe to abort others).
823 *
824 * XXX: What in the world does the comment below mean?
825 * Also, whatever the signal code does...
826 */
827void
828sleepq_abort(struct thread *td, int intrval)
829{
830	void *wchan;
831
832	mtx_assert(&sched_lock, MA_OWNED);
833	MPASS(TD_ON_SLEEPQ(td));
834	MPASS(td->td_flags & TDF_SINTR);
835	MPASS(intrval == EINTR || intrval == ERESTART);
836
837	/*
838	 * If the TDF_TIMEOUT flag is set, just leave. A
839	 * timeout is scheduled anyhow.
840	 */
841	if (td->td_flags & TDF_TIMEOUT)
842		return;
843
844	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
845	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
846	wchan = td->td_wchan;
847	if (wchan != NULL) {
848		td->td_intrval = intrval;
849		td->td_flags |= TDF_SLEEPABORT;
850	}
851	mtx_unlock_spin(&sched_lock);
852	sleepq_remove(td, wchan);
853	mtx_lock_spin(&sched_lock);
854}
855
856#ifdef DDB
857DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
858{
859	struct sleepqueue_chain *sc;
860	struct sleepqueue *sq;
861#ifdef INVARIANTS
862	struct lock_object *lock;
863#endif
864	struct thread *td;
865	void *wchan;
866	int i;
867
868	if (!have_addr)
869		return;
870
871	/*
872	 * First, see if there is an active sleep queue for the wait channel
873	 * indicated by the address.
874	 */
875	wchan = (void *)addr;
876	sc = SC_LOOKUP(wchan);
877	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
878		if (sq->sq_wchan == wchan)
879			goto found;
880
881	/*
882	 * Second, see if there is an active sleep queue at the address
883	 * indicated.
884	 */
885	for (i = 0; i < SC_TABLESIZE; i++)
886		LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
887			if (sq == (struct sleepqueue *)addr)
888				goto found;
889		}
890
891	db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
892	return;
893found:
894	db_printf("Wait channel: %p\n", sq->sq_wchan);
895#ifdef INVARIANTS
896	db_printf("Queue type: %d\n", sq->sq_type);
897	if (sq->sq_lock) {
898		lock = &sq->sq_lock->mtx_object;
899		db_printf("Associated Interlock: %p - (%s) %s\n", lock,
900		    LOCK_CLASS(lock)->lc_name, lock->lo_name);
901	}
902#endif
903	db_printf("Blocked threads:\n");
904	if (TAILQ_EMPTY(&sq->sq_blocked))
905		db_printf("\tempty\n");
906	else
907		TAILQ_FOREACH(td, &sq->sq_blocked, td_slpq) {
908			db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
909			    td->td_tid, td->td_proc->p_pid,
910			    td->td_proc->p_comm);
911		}
912}
913#endif
914