subr_sleepqueue.c revision 131473
1/*
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel.  Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals.  That said, there are several similarities between the turnstile
36 * and sleep queue implementations.  (Note: turnstiles were implemented
37 * first.)  For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues.  An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue.  This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head.  Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking.  Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
47 *
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout.  The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep.  A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep).  The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
53 * sleep queues also provide some extra assertions.  One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API.  The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include "opt_sleepqueue_profiling.h"
63
64#include <sys/cdefs.h>
65__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 131473 2004-07-02 19:09:50Z jhb $");
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/malloc.h>
73#include <sys/mutex.h>
74#include <sys/proc.h>
75#include <sys/sched.h>
76#include <sys/signalvar.h>
77#include <sys/sleepqueue.h>
78#include <sys/sysctl.h>
79
80/*
81 * Constants for the hash table of sleep queue chains.  These constants are
82 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
83 * Basically, we ignore the lower 8 bits of the address since most wait
84 * channel pointers are aligned and only look at the next 7 bits for the
85 * hash.  SC_TABLESIZE must be a power of two for SC_MASK to work properly.
86 */
87#define	SC_TABLESIZE	128			/* Must be power of 2. */
88#define	SC_MASK		(SC_TABLESIZE - 1)
89#define	SC_SHIFT	8
90#define	SC_HASH(wc)	(((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
91#define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
92
93/*
94 * There two different lists of sleep queues.  Both lists are connected
95 * via the sq_hash entries.  The first list is the sleep queue chain list
96 * that a sleep queue is on when it is attached to a wait channel.  The
97 * second list is the free list hung off of a sleep queue that is attached
98 * to a wait channel.
99 *
100 * Each sleep queue also contains the wait channel it is attached to, the
101 * list of threads blocked on that wait channel, flags specific to the
102 * wait channel, and the lock used to synchronize with a wait channel.
103 * The flags are used to catch mismatches between the various consumers
104 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
105 * The lock pointer is only used when invariants are enabled for various
106 * debugging checks.
107 *
108 * Locking key:
109 *  c - sleep queue chain lock
110 */
111struct sleepqueue {
112	TAILQ_HEAD(, thread) sq_blocked;	/* (c) Blocked threads. */
113	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
114	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
115	void	*sq_wchan;			/* (c) Wait channel. */
116	int	sq_flags;			/* (c) Flags. */
117#ifdef INVARIANTS
118	struct mtx *sq_lock;			/* (c) Associated lock. */
119#endif
120};
121
122struct sleepqueue_chain {
123	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
124	struct mtx sc_lock;			/* Spin lock for this chain. */
125#ifdef SLEEPQUEUE_PROFILING
126	u_int	sc_depth;			/* Length of sc_queues. */
127	u_int	sc_max_depth;			/* Max length of sc_queues. */
128#endif
129};
130
131#ifdef SLEEPQUEUE_PROFILING
132u_int sleepq_max_depth;
133SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
134SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
135    "sleepq chain stats");
136SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
137    0, "maxmimum depth achieved of a single chain");
138#endif
139static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
140
141MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues");
142
143/*
144 * Prototypes for non-exported routines.
145 */
146static int	sleepq_check_timeout(void);
147static void	sleepq_switch(void *wchan);
148static void	sleepq_timeout(void *arg);
149static void	sleepq_remove_thread(struct sleepqueue *sq, struct thread *td);
150static void	sleepq_resume_thread(struct thread *td, int pri);
151
152/*
153 * Early initialization of sleep queues that is called from the sleepinit()
154 * SYSINIT.
155 */
156void
157init_sleepqueues(void)
158{
159#ifdef SLEEPQUEUE_PROFILING
160	struct sysctl_oid *chain_oid;
161	char chain_name[10];
162#endif
163	int i;
164
165	for (i = 0; i < SC_TABLESIZE; i++) {
166		LIST_INIT(&sleepq_chains[i].sc_queues);
167		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
168		    MTX_SPIN);
169#ifdef SLEEPQUEUE_PROFILING
170		snprintf(chain_name, sizeof(chain_name), "%d", i);
171		chain_oid = SYSCTL_ADD_NODE(NULL,
172		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
173		    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
174		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
175		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
176		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
177		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
178		    NULL);
179#endif
180	}
181	thread0.td_sleepqueue = sleepq_alloc();
182}
183
184/*
185 * Malloc and initialize a new sleep queue for a new thread.
186 */
187struct sleepqueue *
188sleepq_alloc(void)
189{
190	struct sleepqueue *sq;
191
192	sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO);
193	TAILQ_INIT(&sq->sq_blocked);
194	LIST_INIT(&sq->sq_free);
195	return (sq);
196}
197
198/*
199 * Free a sleep queue when a thread is destroyed.
200 */
201void
202sleepq_free(struct sleepqueue *sq)
203{
204
205	MPASS(sq != NULL);
206	MPASS(TAILQ_EMPTY(&sq->sq_blocked));
207	free(sq, M_SLEEPQUEUE);
208}
209
210/*
211 * Look up the sleep queue associated with a given wait channel in the hash
212 * table locking the associated sleep queue chain.  Return holdind the sleep
213 * queue chain lock.  If no queue is found in the table, NULL is returned.
214 */
215struct sleepqueue *
216sleepq_lookup(void *wchan)
217{
218	struct sleepqueue_chain *sc;
219	struct sleepqueue *sq;
220
221	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
222	sc = SC_LOOKUP(wchan);
223	mtx_lock_spin(&sc->sc_lock);
224	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
225		if (sq->sq_wchan == wchan)
226			return (sq);
227	return (NULL);
228}
229
230/*
231 * Unlock the sleep queue chain associated with a given wait channel.
232 */
233void
234sleepq_release(void *wchan)
235{
236	struct sleepqueue_chain *sc;
237
238	sc = SC_LOOKUP(wchan);
239	mtx_unlock_spin(&sc->sc_lock);
240}
241
242/*
243 * Places the current thread on the sleepqueue for the specified wait
244 * channel.  If INVARIANTS is enabled, then it associates the passed in
245 * lock with the sleepq to make sure it is held when that sleep queue is
246 * woken up.
247 */
248void
249sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
250    const char *wmesg, int flags)
251{
252	struct sleepqueue_chain *sc;
253	struct thread *td, *td1;
254
255	td = curthread;
256	sc = SC_LOOKUP(wchan);
257	mtx_assert(&sc->sc_lock, MA_OWNED);
258	MPASS(td->td_sleepqueue != NULL);
259	MPASS(wchan != NULL);
260
261	/* If the passed in sleep queue is NULL, use this thread's queue. */
262	if (sq == NULL) {
263#ifdef SLEEPQUEUE_PROFILING
264		sc->sc_depth++;
265		if (sc->sc_depth > sc->sc_max_depth) {
266			sc->sc_max_depth = sc->sc_depth;
267			if (sc->sc_max_depth > sleepq_max_depth)
268				sleepq_max_depth = sc->sc_max_depth;
269		}
270#endif
271		sq = td->td_sleepqueue;
272		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
273		KASSERT(TAILQ_EMPTY(&sq->sq_blocked),
274		    ("thread's sleep queue has a non-empty queue"));
275		KASSERT(LIST_EMPTY(&sq->sq_free),
276		    ("thread's sleep queue has a non-empty free list"));
277		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
278		sq->sq_wchan = wchan;
279#ifdef INVARIANTS
280		sq->sq_lock = lock;
281#endif
282		sq->sq_flags = flags;
283		TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
284	} else {
285		MPASS(wchan == sq->sq_wchan);
286		MPASS(lock == sq->sq_lock);
287		TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq)
288			if (td1->td_priority > td->td_priority)
289				break;
290		if (td1 != NULL)
291			TAILQ_INSERT_BEFORE(td1, td, td_slpq);
292		else
293			TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
294		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
295	}
296	td->td_sleepqueue = NULL;
297	mtx_lock_spin(&sched_lock);
298	td->td_wchan = wchan;
299	td->td_wmesg = wmesg;
300	mtx_unlock_spin(&sched_lock);
301}
302
303/*
304 * Sets a timeout that will remove the current thread from the specified
305 * sleep queue after timo ticks if the thread has not already been awakened.
306 */
307void
308sleepq_set_timeout(void *wchan, int timo)
309{
310	struct sleepqueue_chain *sc;
311	struct thread *td;
312
313	td = curthread;
314	sc = SC_LOOKUP(wchan);
315	mtx_assert(&sc->sc_lock, MA_OWNED);
316	MPASS(TD_ON_SLEEPQ(td));
317	MPASS(td->td_sleepqueue == NULL);
318	MPASS(wchan != NULL);
319	callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td);
320}
321
322/*
323 * Marks the pending sleep of the current thread as interruptible and
324 * makes an initial check for pending signals before putting a thread
325 * to sleep.
326 */
327int
328sleepq_catch_signals(void *wchan)
329{
330	struct sleepqueue_chain *sc;
331	struct sleepqueue *sq;
332	struct thread *td;
333	struct proc *p;
334	int do_upcall;
335	int sig;
336
337	do_upcall = 0;
338	td = curthread;
339	p = td->td_proc;
340	sc = SC_LOOKUP(wchan);
341	mtx_assert(&sc->sc_lock, MA_OWNED);
342	MPASS(td->td_sleepqueue == NULL);
343	MPASS(wchan != NULL);
344	CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
345	    (void *)td, (long)p->p_pid, p->p_comm);
346
347	/* Mark thread as being in an interruptible sleep. */
348	mtx_lock_spin(&sched_lock);
349	MPASS(TD_ON_SLEEPQ(td));
350	td->td_flags |= TDF_SINTR;
351	mtx_unlock_spin(&sched_lock);
352	sleepq_release(wchan);
353
354	/* See if there are any pending signals for this thread. */
355	PROC_LOCK(p);
356	mtx_lock(&p->p_sigacts->ps_mtx);
357	sig = cursig(td);
358	mtx_unlock(&p->p_sigacts->ps_mtx);
359	if (sig == 0 && thread_suspend_check(1))
360		sig = SIGSTOP;
361	else
362		do_upcall = thread_upcall_check(td);
363	PROC_UNLOCK(p);
364
365	/*
366	 * If there were pending signals and this thread is still on
367	 * the sleep queue, remove it from the sleep queue.
368	 */
369	sq = sleepq_lookup(wchan);
370	mtx_lock_spin(&sched_lock);
371	if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
372		mtx_unlock_spin(&sched_lock);
373		sleepq_remove_thread(sq, td);
374	} else
375		mtx_unlock_spin(&sched_lock);
376	return (sig);
377}
378
379/*
380 * Switches to another thread if we are still asleep on a sleep queue and
381 * drop the lock on the sleepqueue chain.  Returns with sched_lock held.
382 */
383static void
384sleepq_switch(void *wchan)
385{
386	struct sleepqueue_chain *sc;
387	struct thread *td;
388
389	td = curthread;
390	sc = SC_LOOKUP(wchan);
391	mtx_assert(&sc->sc_lock, MA_OWNED);
392
393	/*
394	 * If we have a sleep queue, then we've already been woken up, so
395	 * just return.
396	 */
397	if (td->td_sleepqueue != NULL) {
398		MPASS(!TD_ON_SLEEPQ(td));
399		mtx_unlock_spin(&sc->sc_lock);
400		mtx_lock_spin(&sched_lock);
401		return;
402	}
403
404	/*
405	 * Otherwise, actually go to sleep.
406	 */
407	mtx_lock_spin(&sched_lock);
408	mtx_unlock_spin(&sc->sc_lock);
409
410	sched_sleep(td);
411	TD_SET_SLEEPING(td);
412	mi_switch(SW_VOL, NULL);
413	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
414	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
415	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
416}
417
418/*
419 * Check to see if we timed out.
420 */
421static int
422sleepq_check_timeout(void)
423{
424	struct thread *td;
425
426	mtx_assert(&sched_lock, MA_OWNED);
427	td = curthread;
428
429	/*
430	 * If TDF_TIMEOUT is set, we timed out.
431	 */
432	if (td->td_flags & TDF_TIMEOUT) {
433		td->td_flags &= ~TDF_TIMEOUT;
434		return (EWOULDBLOCK);
435	}
436
437	/*
438	 * If TDF_TIMOFAIL is set, the timeout ran after we had
439	 * already been woken up.
440	 */
441	if (td->td_flags & TDF_TIMOFAIL)
442		td->td_flags &= ~TDF_TIMOFAIL;
443
444	/*
445	 * If callout_stop() fails, then the timeout is running on
446	 * another CPU, so synchronize with it to avoid having it
447	 * accidentally wake up a subsequent sleep.
448	 */
449	else if (callout_stop(&td->td_slpcallout) == 0) {
450		td->td_flags |= TDF_TIMEOUT;
451		TD_SET_SLEEPING(td);
452		mi_switch(SW_INVOL, NULL);
453	}
454	return (0);
455}
456
457/*
458 * Check to see if we were awoken by a signal.
459 */
460static int
461sleepq_check_signals(void)
462{
463	struct thread *td;
464
465	mtx_assert(&sched_lock, MA_OWNED);
466	td = curthread;
467
468	/* We are no longer in an interruptible sleep. */
469	td->td_flags &= ~TDF_SINTR;
470
471	if (td->td_flags & TDF_INTERRUPT)
472		return (td->td_intrval);
473	return (0);
474}
475
476/*
477 * If we were in an interruptible sleep and we weren't interrupted and
478 * didn't timeout, check to see if there are any pending signals and
479 * which return value we should use if so.  The return value from an
480 * earlier call to sleepq_catch_signals() should be passed in as the
481 * argument.
482 */
483int
484sleepq_calc_signal_retval(int sig)
485{
486	struct thread *td;
487	struct proc *p;
488	int rval;
489
490	td = curthread;
491	p = td->td_proc;
492	PROC_LOCK(p);
493	mtx_lock(&p->p_sigacts->ps_mtx);
494	/* XXX: Should we always be calling cursig()? */
495	if (sig == 0)
496		sig = cursig(td);
497	if (sig != 0) {
498		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
499			rval = EINTR;
500		else
501			rval = ERESTART;
502	} else
503		rval = 0;
504	mtx_unlock(&p->p_sigacts->ps_mtx);
505	PROC_UNLOCK(p);
506	return (rval);
507}
508
509/*
510 * Block the current thread until it is awakened from its sleep queue.
511 */
512void
513sleepq_wait(void *wchan)
514{
515
516	sleepq_switch(wchan);
517	mtx_unlock_spin(&sched_lock);
518}
519
520/*
521 * Block the current thread until it is awakened from its sleep queue
522 * or it is interrupted by a signal.
523 */
524int
525sleepq_wait_sig(void *wchan)
526{
527	int rval;
528
529	sleepq_switch(wchan);
530	rval = sleepq_check_signals();
531	mtx_unlock_spin(&sched_lock);
532	return (rval);
533}
534
535/*
536 * Block the current thread until it is awakened from its sleep queue
537 * or it times out while waiting.
538 */
539int
540sleepq_timedwait(void *wchan)
541{
542	int rval;
543
544	sleepq_switch(wchan);
545	rval = sleepq_check_timeout();
546	mtx_unlock_spin(&sched_lock);
547	return (rval);
548}
549
550/*
551 * Block the current thread until it is awakened from its sleep queue,
552 * it is interrupted by a signal, or it times out waiting to be awakened.
553 */
554int
555sleepq_timedwait_sig(void *wchan, int signal_caught)
556{
557	int rvalt, rvals;
558
559	sleepq_switch(wchan);
560	rvalt = sleepq_check_timeout();
561	rvals = sleepq_check_signals();
562	mtx_unlock_spin(&sched_lock);
563	if (signal_caught || rvalt == 0)
564		return (rvals);
565	else
566		return (rvalt);
567}
568
569/*
570 * Removes a thread from a sleep queue.
571 */
572static void
573sleepq_remove_thread(struct sleepqueue *sq, struct thread *td)
574{
575	struct sleepqueue_chain *sc;
576
577	MPASS(td != NULL);
578	MPASS(sq->sq_wchan != NULL);
579	MPASS(td->td_wchan == sq->sq_wchan);
580	sc = SC_LOOKUP(sq->sq_wchan);
581	mtx_assert(&sc->sc_lock, MA_OWNED);
582
583	/* Remove the thread from the queue. */
584	TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq);
585
586	/*
587	 * Get a sleep queue for this thread.  If this is the last waiter,
588	 * use the queue itself and take it out of the chain, otherwise,
589	 * remove a queue from the free list.
590	 */
591	if (LIST_EMPTY(&sq->sq_free)) {
592		td->td_sleepqueue = sq;
593#ifdef INVARIANTS
594		sq->sq_wchan = NULL;
595#endif
596#ifdef SLEEPQUEUE_PROFILING
597		sc->sc_depth--;
598#endif
599	} else
600		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
601	LIST_REMOVE(td->td_sleepqueue, sq_hash);
602
603	mtx_lock_spin(&sched_lock);
604	td->td_wmesg = NULL;
605	td->td_wchan = NULL;
606	mtx_unlock_spin(&sched_lock);
607}
608
609/*
610 * Resumes a thread that was asleep on a queue.
611 */
612static void
613sleepq_resume_thread(struct thread *td, int pri)
614{
615
616	/*
617	 * Note that thread td might not be sleeping if it is running
618	 * sleepq_catch_signals() on another CPU or is blocked on
619	 * its proc lock to check signals.  It doesn't hurt to clear
620	 * the sleeping flag if it isn't set though, so we just always
621	 * do it.  However, we can't assert that it is set.
622	 */
623	mtx_lock_spin(&sched_lock);
624	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
625	    (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
626	TD_CLR_SLEEPING(td);
627
628	/* Adjust priority if requested. */
629	MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
630	if (pri != -1 && td->td_priority > pri)
631		td->td_priority = pri;
632	setrunnable(td);
633	mtx_unlock_spin(&sched_lock);
634}
635
636/*
637 * Find the highest priority thread sleeping on a wait channel and resume it.
638 */
639void
640sleepq_signal(void *wchan, int flags, int pri)
641{
642	struct sleepqueue *sq;
643	struct thread *td;
644
645	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
646	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
647	sq = sleepq_lookup(wchan);
648	if (sq == NULL) {
649		sleepq_release(wchan);
650		return;
651	}
652	KASSERT(sq->sq_flags == flags,
653	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
654	/* XXX: Do for all sleep queues eventually. */
655	if (flags & SLEEPQ_CONDVAR)
656		mtx_assert(sq->sq_lock, MA_OWNED);
657
658	/* Remove first thread from queue and awaken it. */
659	td = TAILQ_FIRST(&sq->sq_blocked);
660	sleepq_remove_thread(sq, td);
661	sleepq_release(wchan);
662	sleepq_resume_thread(td, pri);
663}
664
665/*
666 * Resume all threads sleeping on a specified wait channel.
667 */
668void
669sleepq_broadcast(void *wchan, int flags, int pri)
670{
671	TAILQ_HEAD(, thread) list;
672	struct sleepqueue *sq;
673	struct thread *td;
674
675	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
676	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
677	sq = sleepq_lookup(wchan);
678	if (sq == NULL) {
679		sleepq_release(wchan);
680		return;
681	}
682	KASSERT(sq->sq_flags == flags,
683	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
684	/* XXX: Do for all sleep queues eventually. */
685	if (flags & SLEEPQ_CONDVAR)
686		mtx_assert(sq->sq_lock, MA_OWNED);
687
688	/* Move blocked threads from the sleep queue to a temporary list. */
689	TAILQ_INIT(&list);
690	while (!TAILQ_EMPTY(&sq->sq_blocked)) {
691		td = TAILQ_FIRST(&sq->sq_blocked);
692		sleepq_remove_thread(sq, td);
693		TAILQ_INSERT_TAIL(&list, td, td_slpq);
694	}
695	sleepq_release(wchan);
696
697	/* Resume all the threads on the temporary list. */
698	while (!TAILQ_EMPTY(&list)) {
699		td = TAILQ_FIRST(&list);
700		TAILQ_REMOVE(&list, td, td_slpq);
701		sleepq_resume_thread(td, pri);
702	}
703}
704
705/*
706 * Time sleeping threads out.  When the timeout expires, the thread is
707 * removed from the sleep queue and made runnable if it is still asleep.
708 */
709static void
710sleepq_timeout(void *arg)
711{
712	struct sleepqueue *sq;
713	struct thread *td;
714	void *wchan;
715
716	td = arg;
717	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
718	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
719
720	/*
721	 * First, see if the thread is asleep and get the wait channel if
722	 * it is.
723	 */
724	mtx_lock_spin(&sched_lock);
725	if (TD_ON_SLEEPQ(td)) {
726		wchan = td->td_wchan;
727		mtx_unlock_spin(&sched_lock);
728		sq = sleepq_lookup(wchan);
729		mtx_lock_spin(&sched_lock);
730	} else {
731		wchan = NULL;
732		sq = NULL;
733	}
734
735	/*
736	 * At this point, if the thread is still on the sleep queue,
737	 * we have that sleep queue locked as it cannot migrate sleep
738	 * queues while we dropped sched_lock.  If it had resumed and
739	 * was on another CPU while the lock was dropped, it would have
740	 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the
741	 * call to callout_stop() to stop this routine would have failed
742	 * meaning that it would have already set TDF_TIMEOUT to
743	 * synchronize with this function.
744	 */
745	if (TD_ON_SLEEPQ(td)) {
746		MPASS(td->td_wchan == wchan);
747		MPASS(sq != NULL);
748		td->td_flags |= TDF_TIMEOUT;
749		mtx_unlock_spin(&sched_lock);
750		sleepq_remove_thread(sq, td);
751		sleepq_release(wchan);
752		sleepq_resume_thread(td, -1);
753		return;
754	} else if (wchan != NULL)
755		sleepq_release(wchan);
756
757	/*
758	 * Now check for the edge cases.  First, if TDF_TIMEOUT is set,
759	 * then the other thread has already yielded to us, so clear
760	 * the flag and resume it.  If TDF_TIMEOUT is not set, then the
761	 * we know that the other thread is not on a sleep queue, but it
762	 * hasn't resumed execution yet.  In that case, set TDF_TIMOFAIL
763	 * to let it know that the timeout has already run and doesn't
764	 * need to be canceled.
765	 */
766	if (td->td_flags & TDF_TIMEOUT) {
767		MPASS(TD_IS_SLEEPING(td));
768		td->td_flags &= ~TDF_TIMEOUT;
769		TD_CLR_SLEEPING(td);
770		setrunnable(td);
771	} else
772		td->td_flags |= TDF_TIMOFAIL;
773	mtx_unlock_spin(&sched_lock);
774}
775
776/*
777 * Resumes a specific thread from the sleep queue associated with a specific
778 * wait channel if it is on that queue.
779 */
780void
781sleepq_remove(struct thread *td, void *wchan)
782{
783	struct sleepqueue *sq;
784
785	/*
786	 * Look up the sleep queue for this wait channel, then re-check
787	 * that the thread is asleep on that channel, if it is not, then
788	 * bail.
789	 */
790	MPASS(wchan != NULL);
791	sq = sleepq_lookup(wchan);
792	mtx_lock_spin(&sched_lock);
793	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
794		mtx_unlock_spin(&sched_lock);
795		sleepq_release(wchan);
796		return;
797	}
798	mtx_unlock_spin(&sched_lock);
799	MPASS(sq != NULL);
800
801	/* Thread is asleep on sleep queue sq, so wake it up. */
802	sleepq_remove_thread(sq, td);
803	sleepq_release(wchan);
804	sleepq_resume_thread(td, -1);
805}
806
807/*
808 * Abort a thread as if an interrupt had occurred.  Only abort
809 * interruptible waits (unfortunately it isn't safe to abort others).
810 *
811 * XXX: What in the world does the comment below mean?
812 * Also, whatever the signal code does...
813 */
814void
815sleepq_abort(struct thread *td)
816{
817	void *wchan;
818
819	mtx_assert(&sched_lock, MA_OWNED);
820	MPASS(TD_ON_SLEEPQ(td));
821	MPASS(td->td_flags & TDF_SINTR);
822
823	/*
824	 * If the TDF_TIMEOUT flag is set, just leave. A
825	 * timeout is scheduled anyhow.
826	 */
827	if (td->td_flags & TDF_TIMEOUT)
828		return;
829
830	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
831	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
832	wchan = td->td_wchan;
833	mtx_unlock_spin(&sched_lock);
834	sleepq_remove(td, wchan);
835	mtx_lock_spin(&sched_lock);
836}
837