subr_sleepqueue.c revision 296973
1/*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * Implementation of sleep queues used to hold queue of threads blocked on
29 * a wait channel.  Sleep queues different from turnstiles in that wait
30 * channels are not owned by anyone, so there is no priority propagation.
31 * Sleep queues can also provide a timeout and can also be interrupted by
32 * signals.  That said, there are several similarities between the turnstile
33 * and sleep queue implementations.  (Note: turnstiles were implemented
34 * first.)  For example, both use a hash table of the same size where each
35 * bucket is referred to as a "chain" that contains both a spin lock and
36 * a linked list of queues.  An individual queue is located by using a hash
37 * to pick a chain, locking the chain, and then walking the chain searching
38 * for the queue.  This means that a wait channel object does not need to
39 * embed it's queue head just as locks do not embed their turnstile queue
40 * head.  Threads also carry around a sleep queue that they lend to the
41 * wait channel when blocking.  Just as in turnstiles, the queue includes
42 * a free list of the sleep queues of other threads blocked on the same
43 * wait channel in the case of multiple waiters.
44 *
45 * Some additional functionality provided by sleep queues include the
46 * ability to set a timeout.  The timeout is managed using a per-thread
47 * callout that resumes a thread if it is asleep.  A thread may also
48 * catch signals while it is asleep (aka an interruptible sleep).  The
49 * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
50 * sleep queues also provide some extra assertions.  One is not allowed to
51 * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
52 * must consistently use the same lock to synchronize with a wait channel,
53 * though this check is currently only a warning for sleep/wakeup due to
54 * pre-existing abuse of that API.  The same lock must also be held when
55 * awakening threads, though that is currently only enforced for condition
56 * variables.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 296973 2016-03-17 01:05:53Z cem $");
61
62#include "opt_sleepqueue_profiling.h"
63#include "opt_ddb.h"
64#include "opt_sched.h"
65#include "opt_stack.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/mutex.h>
73#include <sys/proc.h>
74#include <sys/sbuf.h>
75#include <sys/sched.h>
76#include <sys/sdt.h>
77#include <sys/signalvar.h>
78#include <sys/sleepqueue.h>
79#include <sys/stack.h>
80#include <sys/sysctl.h>
81
82#include <vm/uma.h>
83
84#ifdef DDB
85#include <ddb/ddb.h>
86#endif
87
88
89/*
90 * Constants for the hash table of sleep queue chains.
91 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
92 */
93#define	SC_TABLESIZE	256			/* Must be power of 2. */
94#define	SC_MASK		(SC_TABLESIZE - 1)
95#define	SC_SHIFT	8
96#define	SC_HASH(wc)	((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
97			    SC_MASK)
98#define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
99#define NR_SLEEPQS      2
100/*
101 * There two different lists of sleep queues.  Both lists are connected
102 * via the sq_hash entries.  The first list is the sleep queue chain list
103 * that a sleep queue is on when it is attached to a wait channel.  The
104 * second list is the free list hung off of a sleep queue that is attached
105 * to a wait channel.
106 *
107 * Each sleep queue also contains the wait channel it is attached to, the
108 * list of threads blocked on that wait channel, flags specific to the
109 * wait channel, and the lock used to synchronize with a wait channel.
110 * The flags are used to catch mismatches between the various consumers
111 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
112 * The lock pointer is only used when invariants are enabled for various
113 * debugging checks.
114 *
115 * Locking key:
116 *  c - sleep queue chain lock
117 */
118struct sleepqueue {
119	TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS];	/* (c) Blocked threads. */
120	u_int sq_blockedcnt[NR_SLEEPQS];	/* (c) N. of blocked threads. */
121	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
122	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
123	void	*sq_wchan;			/* (c) Wait channel. */
124	int	sq_type;			/* (c) Queue type. */
125#ifdef INVARIANTS
126	struct lock_object *sq_lock;		/* (c) Associated lock. */
127#endif
128};
129
130struct sleepqueue_chain {
131	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
132	struct mtx sc_lock;			/* Spin lock for this chain. */
133#ifdef SLEEPQUEUE_PROFILING
134	u_int	sc_depth;			/* Length of sc_queues. */
135	u_int	sc_max_depth;			/* Max length of sc_queues. */
136#endif
137};
138
139#ifdef SLEEPQUEUE_PROFILING
140u_int sleepq_max_depth;
141static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
142static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
143    "sleepq chain stats");
144SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
145    0, "maxmimum depth achieved of a single chain");
146
147static void	sleepq_profile(const char *wmesg);
148static int	prof_enabled;
149#endif
150static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
151static uma_zone_t sleepq_zone;
152
153/*
154 * Prototypes for non-exported routines.
155 */
156static int	sleepq_catch_signals(void *wchan, int pri);
157static int	sleepq_check_signals(void);
158static int	sleepq_check_timeout(void);
159#ifdef INVARIANTS
160static void	sleepq_dtor(void *mem, int size, void *arg);
161#endif
162static int	sleepq_init(void *mem, int size, int flags);
163static int	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
164		    int pri);
165static void	sleepq_switch(void *wchan, int pri);
166static void	sleepq_timeout(void *arg);
167
168SDT_PROBE_DECLARE(sched, , , sleep);
169SDT_PROBE_DECLARE(sched, , , wakeup);
170
171/*
172 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
173 * Note that it must happen after sleepinit() has been fully executed, so
174 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
175 */
176#ifdef SLEEPQUEUE_PROFILING
177static void
178init_sleepqueue_profiling(void)
179{
180	char chain_name[10];
181	struct sysctl_oid *chain_oid;
182	u_int i;
183
184	for (i = 0; i < SC_TABLESIZE; i++) {
185		snprintf(chain_name, sizeof(chain_name), "%u", i);
186		chain_oid = SYSCTL_ADD_NODE(NULL,
187		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
188		    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
189		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
190		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
191		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
192		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
193		    NULL);
194	}
195}
196
197SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
198    init_sleepqueue_profiling, NULL);
199#endif
200
201/*
202 * Early initialization of sleep queues that is called from the sleepinit()
203 * SYSINIT.
204 */
205void
206init_sleepqueues(void)
207{
208	int i;
209
210	for (i = 0; i < SC_TABLESIZE; i++) {
211		LIST_INIT(&sleepq_chains[i].sc_queues);
212		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
213		    MTX_SPIN | MTX_RECURSE);
214	}
215	sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
216#ifdef INVARIANTS
217	    NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
218#else
219	    NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
220#endif
221
222	thread0.td_sleepqueue = sleepq_alloc();
223}
224
225/*
226 * Get a sleep queue for a new thread.
227 */
228struct sleepqueue *
229sleepq_alloc(void)
230{
231
232	return (uma_zalloc(sleepq_zone, M_WAITOK));
233}
234
235/*
236 * Free a sleep queue when a thread is destroyed.
237 */
238void
239sleepq_free(struct sleepqueue *sq)
240{
241
242	uma_zfree(sleepq_zone, sq);
243}
244
245/*
246 * Lock the sleep queue chain associated with the specified wait channel.
247 */
248void
249sleepq_lock(void *wchan)
250{
251	struct sleepqueue_chain *sc;
252
253	sc = SC_LOOKUP(wchan);
254	mtx_lock_spin(&sc->sc_lock);
255}
256
257/*
258 * Look up the sleep queue associated with a given wait channel in the hash
259 * table locking the associated sleep queue chain.  If no queue is found in
260 * the table, NULL is returned.
261 */
262struct sleepqueue *
263sleepq_lookup(void *wchan)
264{
265	struct sleepqueue_chain *sc;
266	struct sleepqueue *sq;
267
268	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
269	sc = SC_LOOKUP(wchan);
270	mtx_assert(&sc->sc_lock, MA_OWNED);
271	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
272		if (sq->sq_wchan == wchan)
273			return (sq);
274	return (NULL);
275}
276
277/*
278 * Unlock the sleep queue chain associated with a given wait channel.
279 */
280void
281sleepq_release(void *wchan)
282{
283	struct sleepqueue_chain *sc;
284
285	sc = SC_LOOKUP(wchan);
286	mtx_unlock_spin(&sc->sc_lock);
287}
288
289/*
290 * Places the current thread on the sleep queue for the specified wait
291 * channel.  If INVARIANTS is enabled, then it associates the passed in
292 * lock with the sleepq to make sure it is held when that sleep queue is
293 * woken up.
294 */
295void
296sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
297    int queue)
298{
299	struct sleepqueue_chain *sc;
300	struct sleepqueue *sq;
301	struct thread *td;
302
303	td = curthread;
304	sc = SC_LOOKUP(wchan);
305	mtx_assert(&sc->sc_lock, MA_OWNED);
306	MPASS(td->td_sleepqueue != NULL);
307	MPASS(wchan != NULL);
308	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
309
310	/* If this thread is not allowed to sleep, die a horrible death. */
311	KASSERT(td->td_no_sleeping == 0,
312	    ("%s: td %p to sleep on wchan %p with sleeping prohibited",
313	    __func__, td, wchan));
314
315	/* Look up the sleep queue associated with the wait channel 'wchan'. */
316	sq = sleepq_lookup(wchan);
317
318	/*
319	 * If the wait channel does not already have a sleep queue, use
320	 * this thread's sleep queue.  Otherwise, insert the current thread
321	 * into the sleep queue already in use by this wait channel.
322	 */
323	if (sq == NULL) {
324#ifdef INVARIANTS
325		int i;
326
327		sq = td->td_sleepqueue;
328		for (i = 0; i < NR_SLEEPQS; i++) {
329			KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
330			    ("thread's sleep queue %d is not empty", i));
331			KASSERT(sq->sq_blockedcnt[i] == 0,
332			    ("thread's sleep queue %d count mismatches", i));
333		}
334		KASSERT(LIST_EMPTY(&sq->sq_free),
335		    ("thread's sleep queue has a non-empty free list"));
336		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
337		sq->sq_lock = lock;
338#endif
339#ifdef SLEEPQUEUE_PROFILING
340		sc->sc_depth++;
341		if (sc->sc_depth > sc->sc_max_depth) {
342			sc->sc_max_depth = sc->sc_depth;
343			if (sc->sc_max_depth > sleepq_max_depth)
344				sleepq_max_depth = sc->sc_max_depth;
345		}
346#endif
347		sq = td->td_sleepqueue;
348		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
349		sq->sq_wchan = wchan;
350		sq->sq_type = flags & SLEEPQ_TYPE;
351	} else {
352		MPASS(wchan == sq->sq_wchan);
353		MPASS(lock == sq->sq_lock);
354		MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
355		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
356	}
357	thread_lock(td);
358	TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
359	sq->sq_blockedcnt[queue]++;
360	td->td_sleepqueue = NULL;
361	td->td_sqqueue = queue;
362	td->td_wchan = wchan;
363	td->td_wmesg = wmesg;
364	if (flags & SLEEPQ_INTERRUPTIBLE) {
365		td->td_flags |= TDF_SINTR;
366		td->td_flags &= ~TDF_SLEEPABORT;
367	}
368	thread_unlock(td);
369}
370
371/*
372 * Sets a timeout that will remove the current thread from the specified
373 * sleep queue after timo ticks if the thread has not already been awakened.
374 */
375void
376sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
377    int flags)
378{
379	struct sleepqueue_chain *sc;
380	struct thread *td;
381
382	td = curthread;
383	sc = SC_LOOKUP(wchan);
384	mtx_assert(&sc->sc_lock, MA_OWNED);
385	MPASS(TD_ON_SLEEPQ(td));
386	MPASS(td->td_sleepqueue == NULL);
387	MPASS(wchan != NULL);
388	callout_reset_sbt_on(&td->td_slpcallout, sbt, pr,
389	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC);
390}
391
392/*
393 * Return the number of actual sleepers for the specified queue.
394 */
395u_int
396sleepq_sleepcnt(void *wchan, int queue)
397{
398	struct sleepqueue *sq;
399
400	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
401	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
402	sq = sleepq_lookup(wchan);
403	if (sq == NULL)
404		return (0);
405	return (sq->sq_blockedcnt[queue]);
406}
407
408/*
409 * Marks the pending sleep of the current thread as interruptible and
410 * makes an initial check for pending signals before putting a thread
411 * to sleep. Enters and exits with the thread lock held.  Thread lock
412 * may have transitioned from the sleepq lock to a run lock.
413 */
414static int
415sleepq_catch_signals(void *wchan, int pri)
416{
417	struct sleepqueue_chain *sc;
418	struct sleepqueue *sq;
419	struct thread *td;
420	struct proc *p;
421	struct sigacts *ps;
422	int sig, ret;
423
424	td = curthread;
425	p = curproc;
426	sc = SC_LOOKUP(wchan);
427	mtx_assert(&sc->sc_lock, MA_OWNED);
428	MPASS(wchan != NULL);
429	if ((td->td_pflags & TDP_WAKEUP) != 0) {
430		td->td_pflags &= ~TDP_WAKEUP;
431		ret = EINTR;
432		thread_lock(td);
433		goto out;
434	}
435
436	/*
437	 * See if there are any pending signals for this thread.  If not
438	 * we can switch immediately.  Otherwise do the signal processing
439	 * directly.
440	 */
441	thread_lock(td);
442	if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
443		sleepq_switch(wchan, pri);
444		return (0);
445	}
446	thread_unlock(td);
447	mtx_unlock_spin(&sc->sc_lock);
448	CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
449		(void *)td, (long)p->p_pid, td->td_name);
450	PROC_LOCK(p);
451	ps = p->p_sigacts;
452	mtx_lock(&ps->ps_mtx);
453	sig = cursig(td);
454	if (sig == 0) {
455		mtx_unlock(&ps->ps_mtx);
456		ret = thread_suspend_check(1);
457		MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
458	} else {
459		if (SIGISMEMBER(ps->ps_sigintr, sig))
460			ret = EINTR;
461		else
462			ret = ERESTART;
463		mtx_unlock(&ps->ps_mtx);
464	}
465	/*
466	 * Lock the per-process spinlock prior to dropping the PROC_LOCK
467	 * to avoid a signal delivery race.  PROC_LOCK, PROC_SLOCK, and
468	 * thread_lock() are currently held in tdsendsignal().
469	 */
470	PROC_SLOCK(p);
471	mtx_lock_spin(&sc->sc_lock);
472	PROC_UNLOCK(p);
473	thread_lock(td);
474	PROC_SUNLOCK(p);
475	if (ret == 0) {
476		sleepq_switch(wchan, pri);
477		return (0);
478	}
479out:
480	/*
481	 * There were pending signals and this thread is still
482	 * on the sleep queue, remove it from the sleep queue.
483	 */
484	if (TD_ON_SLEEPQ(td)) {
485		sq = sleepq_lookup(wchan);
486		if (sleepq_resume_thread(sq, td, 0)) {
487#ifdef INVARIANTS
488			/*
489			 * This thread hasn't gone to sleep yet, so it
490			 * should not be swapped out.
491			 */
492			panic("not waking up swapper");
493#endif
494		}
495	}
496	mtx_unlock_spin(&sc->sc_lock);
497	MPASS(td->td_lock != &sc->sc_lock);
498	return (ret);
499}
500
501/*
502 * Switches to another thread if we are still asleep on a sleep queue.
503 * Returns with thread lock.
504 */
505static void
506sleepq_switch(void *wchan, int pri)
507{
508	struct sleepqueue_chain *sc;
509	struct sleepqueue *sq;
510	struct thread *td;
511
512	td = curthread;
513	sc = SC_LOOKUP(wchan);
514	mtx_assert(&sc->sc_lock, MA_OWNED);
515	THREAD_LOCK_ASSERT(td, MA_OWNED);
516
517	/*
518	 * If we have a sleep queue, then we've already been woken up, so
519	 * just return.
520	 */
521	if (td->td_sleepqueue != NULL) {
522		mtx_unlock_spin(&sc->sc_lock);
523		return;
524	}
525
526	/*
527	 * If TDF_TIMEOUT is set, then our sleep has been timed out
528	 * already but we are still on the sleep queue, so dequeue the
529	 * thread and return.
530	 */
531	if (td->td_flags & TDF_TIMEOUT) {
532		MPASS(TD_ON_SLEEPQ(td));
533		sq = sleepq_lookup(wchan);
534		if (sleepq_resume_thread(sq, td, 0)) {
535#ifdef INVARIANTS
536			/*
537			 * This thread hasn't gone to sleep yet, so it
538			 * should not be swapped out.
539			 */
540			panic("not waking up swapper");
541#endif
542		}
543		mtx_unlock_spin(&sc->sc_lock);
544		return;
545	}
546#ifdef SLEEPQUEUE_PROFILING
547	if (prof_enabled)
548		sleepq_profile(td->td_wmesg);
549#endif
550	MPASS(td->td_sleepqueue == NULL);
551	sched_sleep(td, pri);
552	thread_lock_set(td, &sc->sc_lock);
553	SDT_PROBE0(sched, , , sleep);
554	TD_SET_SLEEPING(td);
555	mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
556	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
557	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
558	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
559}
560
561/*
562 * Check to see if we timed out.
563 */
564static int
565sleepq_check_timeout(void)
566{
567	struct thread *td;
568
569	td = curthread;
570	THREAD_LOCK_ASSERT(td, MA_OWNED);
571
572	/*
573	 * If TDF_TIMEOUT is set, we timed out.
574	 */
575	if (td->td_flags & TDF_TIMEOUT) {
576		td->td_flags &= ~TDF_TIMEOUT;
577		return (EWOULDBLOCK);
578	}
579
580	/*
581	 * If TDF_TIMOFAIL is set, the timeout ran after we had
582	 * already been woken up.
583	 */
584	if (td->td_flags & TDF_TIMOFAIL)
585		td->td_flags &= ~TDF_TIMOFAIL;
586
587	/*
588	 * If callout_stop() fails, then the timeout is running on
589	 * another CPU, so synchronize with it to avoid having it
590	 * accidentally wake up a subsequent sleep.
591	 */
592	else if (_callout_stop_safe(&td->td_slpcallout, CS_MIGRBLOCK, NULL)
593	    == 0) {
594		td->td_flags |= TDF_TIMEOUT;
595		TD_SET_SLEEPING(td);
596		mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
597	}
598	return (0);
599}
600
601/*
602 * Check to see if we were awoken by a signal.
603 */
604static int
605sleepq_check_signals(void)
606{
607	struct thread *td;
608
609	td = curthread;
610	THREAD_LOCK_ASSERT(td, MA_OWNED);
611
612	/* We are no longer in an interruptible sleep. */
613	if (td->td_flags & TDF_SINTR)
614		td->td_flags &= ~TDF_SINTR;
615
616	if (td->td_flags & TDF_SLEEPABORT) {
617		td->td_flags &= ~TDF_SLEEPABORT;
618		return (td->td_intrval);
619	}
620
621	return (0);
622}
623
624/*
625 * Block the current thread until it is awakened from its sleep queue.
626 */
627void
628sleepq_wait(void *wchan, int pri)
629{
630	struct thread *td;
631
632	td = curthread;
633	MPASS(!(td->td_flags & TDF_SINTR));
634	thread_lock(td);
635	sleepq_switch(wchan, pri);
636	thread_unlock(td);
637}
638
639/*
640 * Block the current thread until it is awakened from its sleep queue
641 * or it is interrupted by a signal.
642 */
643int
644sleepq_wait_sig(void *wchan, int pri)
645{
646	int rcatch;
647	int rval;
648
649	rcatch = sleepq_catch_signals(wchan, pri);
650	rval = sleepq_check_signals();
651	thread_unlock(curthread);
652	if (rcatch)
653		return (rcatch);
654	return (rval);
655}
656
657/*
658 * Block the current thread until it is awakened from its sleep queue
659 * or it times out while waiting.
660 */
661int
662sleepq_timedwait(void *wchan, int pri)
663{
664	struct thread *td;
665	int rval;
666
667	td = curthread;
668	MPASS(!(td->td_flags & TDF_SINTR));
669	thread_lock(td);
670	sleepq_switch(wchan, pri);
671	rval = sleepq_check_timeout();
672	thread_unlock(td);
673
674	return (rval);
675}
676
677/*
678 * Block the current thread until it is awakened from its sleep queue,
679 * it is interrupted by a signal, or it times out waiting to be awakened.
680 */
681int
682sleepq_timedwait_sig(void *wchan, int pri)
683{
684	int rcatch, rvalt, rvals;
685
686	rcatch = sleepq_catch_signals(wchan, pri);
687	rvalt = sleepq_check_timeout();
688	rvals = sleepq_check_signals();
689	thread_unlock(curthread);
690	if (rcatch)
691		return (rcatch);
692	if (rvals)
693		return (rvals);
694	return (rvalt);
695}
696
697/*
698 * Returns the type of sleepqueue given a waitchannel.
699 */
700int
701sleepq_type(void *wchan)
702{
703	struct sleepqueue *sq;
704	int type;
705
706	MPASS(wchan != NULL);
707
708	sleepq_lock(wchan);
709	sq = sleepq_lookup(wchan);
710	if (sq == NULL) {
711		sleepq_release(wchan);
712		return (-1);
713	}
714	type = sq->sq_type;
715	sleepq_release(wchan);
716	return (type);
717}
718
719/*
720 * Removes a thread from a sleep queue and makes it
721 * runnable.
722 */
723static int
724sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
725{
726	struct sleepqueue_chain *sc;
727
728	MPASS(td != NULL);
729	MPASS(sq->sq_wchan != NULL);
730	MPASS(td->td_wchan == sq->sq_wchan);
731	MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
732	THREAD_LOCK_ASSERT(td, MA_OWNED);
733	sc = SC_LOOKUP(sq->sq_wchan);
734	mtx_assert(&sc->sc_lock, MA_OWNED);
735
736	SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
737
738	/* Remove the thread from the queue. */
739	sq->sq_blockedcnt[td->td_sqqueue]--;
740	TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
741
742	/*
743	 * Get a sleep queue for this thread.  If this is the last waiter,
744	 * use the queue itself and take it out of the chain, otherwise,
745	 * remove a queue from the free list.
746	 */
747	if (LIST_EMPTY(&sq->sq_free)) {
748		td->td_sleepqueue = sq;
749#ifdef INVARIANTS
750		sq->sq_wchan = NULL;
751#endif
752#ifdef SLEEPQUEUE_PROFILING
753		sc->sc_depth--;
754#endif
755	} else
756		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
757	LIST_REMOVE(td->td_sleepqueue, sq_hash);
758
759	td->td_wmesg = NULL;
760	td->td_wchan = NULL;
761	td->td_flags &= ~TDF_SINTR;
762
763	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
764	    (void *)td, (long)td->td_proc->p_pid, td->td_name);
765
766	/* Adjust priority if requested. */
767	MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
768	if (pri != 0 && td->td_priority > pri &&
769	    PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
770		sched_prio(td, pri);
771
772	/*
773	 * Note that thread td might not be sleeping if it is running
774	 * sleepq_catch_signals() on another CPU or is blocked on its
775	 * proc lock to check signals.  There's no need to mark the
776	 * thread runnable in that case.
777	 */
778	if (TD_IS_SLEEPING(td)) {
779		TD_CLR_SLEEPING(td);
780		return (setrunnable(td));
781	}
782	return (0);
783}
784
785#ifdef INVARIANTS
786/*
787 * UMA zone item deallocator.
788 */
789static void
790sleepq_dtor(void *mem, int size, void *arg)
791{
792	struct sleepqueue *sq;
793	int i;
794
795	sq = mem;
796	for (i = 0; i < NR_SLEEPQS; i++) {
797		MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
798		MPASS(sq->sq_blockedcnt[i] == 0);
799	}
800}
801#endif
802
803/*
804 * UMA zone item initializer.
805 */
806static int
807sleepq_init(void *mem, int size, int flags)
808{
809	struct sleepqueue *sq;
810	int i;
811
812	bzero(mem, size);
813	sq = mem;
814	for (i = 0; i < NR_SLEEPQS; i++) {
815		TAILQ_INIT(&sq->sq_blocked[i]);
816		sq->sq_blockedcnt[i] = 0;
817	}
818	LIST_INIT(&sq->sq_free);
819	return (0);
820}
821
822/*
823 * Find the highest priority thread sleeping on a wait channel and resume it.
824 */
825int
826sleepq_signal(void *wchan, int flags, int pri, int queue)
827{
828	struct sleepqueue *sq;
829	struct thread *td, *besttd;
830	int wakeup_swapper;
831
832	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
833	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
834	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
835	sq = sleepq_lookup(wchan);
836	if (sq == NULL)
837		return (0);
838	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
839	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
840
841	/*
842	 * Find the highest priority thread on the queue.  If there is a
843	 * tie, use the thread that first appears in the queue as it has
844	 * been sleeping the longest since threads are always added to
845	 * the tail of sleep queues.
846	 */
847	besttd = NULL;
848	TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
849		if (besttd == NULL || td->td_priority < besttd->td_priority)
850			besttd = td;
851	}
852	MPASS(besttd != NULL);
853	thread_lock(besttd);
854	wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
855	thread_unlock(besttd);
856	return (wakeup_swapper);
857}
858
859/*
860 * Resume all threads sleeping on a specified wait channel.
861 */
862int
863sleepq_broadcast(void *wchan, int flags, int pri, int queue)
864{
865	struct sleepqueue *sq;
866	struct thread *td, *tdn;
867	int wakeup_swapper;
868
869	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
870	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
871	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
872	sq = sleepq_lookup(wchan);
873	if (sq == NULL)
874		return (0);
875	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
876	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
877
878	/* Resume all blocked threads on the sleep queue. */
879	wakeup_swapper = 0;
880	TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
881		thread_lock(td);
882		if (sleepq_resume_thread(sq, td, pri))
883			wakeup_swapper = 1;
884		thread_unlock(td);
885	}
886	return (wakeup_swapper);
887}
888
889/*
890 * Time sleeping threads out.  When the timeout expires, the thread is
891 * removed from the sleep queue and made runnable if it is still asleep.
892 */
893static void
894sleepq_timeout(void *arg)
895{
896	struct sleepqueue_chain *sc;
897	struct sleepqueue *sq;
898	struct thread *td;
899	void *wchan;
900	int wakeup_swapper;
901
902	td = arg;
903	wakeup_swapper = 0;
904	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
905	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
906
907	/*
908	 * First, see if the thread is asleep and get the wait channel if
909	 * it is.
910	 */
911	thread_lock(td);
912	if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
913		wchan = td->td_wchan;
914		sc = SC_LOOKUP(wchan);
915		THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
916		sq = sleepq_lookup(wchan);
917		MPASS(sq != NULL);
918		td->td_flags |= TDF_TIMEOUT;
919		wakeup_swapper = sleepq_resume_thread(sq, td, 0);
920		thread_unlock(td);
921		if (wakeup_swapper)
922			kick_proc0();
923		return;
924	}
925
926	/*
927	 * If the thread is on the SLEEPQ but isn't sleeping yet, it
928	 * can either be on another CPU in between sleepq_add() and
929	 * one of the sleepq_*wait*() routines or it can be in
930	 * sleepq_catch_signals().
931	 */
932	if (TD_ON_SLEEPQ(td)) {
933		td->td_flags |= TDF_TIMEOUT;
934		thread_unlock(td);
935		return;
936	}
937
938	/*
939	 * Now check for the edge cases.  First, if TDF_TIMEOUT is set,
940	 * then the other thread has already yielded to us, so clear
941	 * the flag and resume it.  If TDF_TIMEOUT is not set, then the
942	 * we know that the other thread is not on a sleep queue, but it
943	 * hasn't resumed execution yet.  In that case, set TDF_TIMOFAIL
944	 * to let it know that the timeout has already run and doesn't
945	 * need to be canceled.
946	 */
947	if (td->td_flags & TDF_TIMEOUT) {
948		MPASS(TD_IS_SLEEPING(td));
949		td->td_flags &= ~TDF_TIMEOUT;
950		TD_CLR_SLEEPING(td);
951		wakeup_swapper = setrunnable(td);
952	} else
953		td->td_flags |= TDF_TIMOFAIL;
954	thread_unlock(td);
955	if (wakeup_swapper)
956		kick_proc0();
957}
958
959/*
960 * Resumes a specific thread from the sleep queue associated with a specific
961 * wait channel if it is on that queue.
962 */
963void
964sleepq_remove(struct thread *td, void *wchan)
965{
966	struct sleepqueue *sq;
967	int wakeup_swapper;
968
969	/*
970	 * Look up the sleep queue for this wait channel, then re-check
971	 * that the thread is asleep on that channel, if it is not, then
972	 * bail.
973	 */
974	MPASS(wchan != NULL);
975	sleepq_lock(wchan);
976	sq = sleepq_lookup(wchan);
977	/*
978	 * We can not lock the thread here as it may be sleeping on a
979	 * different sleepq.  However, holding the sleepq lock for this
980	 * wchan can guarantee that we do not miss a wakeup for this
981	 * channel.  The asserts below will catch any false positives.
982	 */
983	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
984		sleepq_release(wchan);
985		return;
986	}
987	/* Thread is asleep on sleep queue sq, so wake it up. */
988	thread_lock(td);
989	MPASS(sq != NULL);
990	MPASS(td->td_wchan == wchan);
991	wakeup_swapper = sleepq_resume_thread(sq, td, 0);
992	thread_unlock(td);
993	sleepq_release(wchan);
994	if (wakeup_swapper)
995		kick_proc0();
996}
997
998/*
999 * Abort a thread as if an interrupt had occurred.  Only abort
1000 * interruptible waits (unfortunately it isn't safe to abort others).
1001 */
1002int
1003sleepq_abort(struct thread *td, int intrval)
1004{
1005	struct sleepqueue *sq;
1006	void *wchan;
1007
1008	THREAD_LOCK_ASSERT(td, MA_OWNED);
1009	MPASS(TD_ON_SLEEPQ(td));
1010	MPASS(td->td_flags & TDF_SINTR);
1011	MPASS(intrval == EINTR || intrval == ERESTART);
1012
1013	/*
1014	 * If the TDF_TIMEOUT flag is set, just leave. A
1015	 * timeout is scheduled anyhow.
1016	 */
1017	if (td->td_flags & TDF_TIMEOUT)
1018		return (0);
1019
1020	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1021	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1022	td->td_intrval = intrval;
1023	td->td_flags |= TDF_SLEEPABORT;
1024	/*
1025	 * If the thread has not slept yet it will find the signal in
1026	 * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1027	 * we have to do it here.
1028	 */
1029	if (!TD_IS_SLEEPING(td))
1030		return (0);
1031	wchan = td->td_wchan;
1032	MPASS(wchan != NULL);
1033	sq = sleepq_lookup(wchan);
1034	MPASS(sq != NULL);
1035
1036	/* Thread is asleep on sleep queue sq, so wake it up. */
1037	return (sleepq_resume_thread(sq, td, 0));
1038}
1039
1040/*
1041 * Prints the stacks of all threads presently sleeping on wchan/queue to
1042 * the sbuf sb.  Sets count_stacks_printed to the number of stacks actually
1043 * printed.  Typically, this will equal the number of threads sleeping on the
1044 * queue, but may be less if sb overflowed before all stacks were printed.
1045 */
1046#ifdef STACK
1047int
1048sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue,
1049    int *count_stacks_printed)
1050{
1051	struct thread *td, *td_next;
1052	struct sleepqueue *sq;
1053	struct stack **st;
1054	struct sbuf **td_infos;
1055	int i, stack_idx, error, stacks_to_allocate;
1056	bool finished, partial_print;
1057
1058	error = 0;
1059	finished = false;
1060	partial_print = false;
1061
1062	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1063	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1064
1065	stacks_to_allocate = 10;
1066	for (i = 0; i < 3 && !finished ; i++) {
1067		/* We cannot malloc while holding the queue's spinlock, so
1068		 * we do our mallocs now, and hope it is enough.  If it
1069		 * isn't, we will free these, drop the lock, malloc more,
1070		 * and try again, up to a point.  After that point we will
1071		 * give up and report ENOMEM. We also cannot write to sb
1072		 * during this time since the client may have set the
1073		 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1074		 * malloc as we print to it.  So we defer actually printing
1075		 * to sb until after we drop the spinlock.
1076		 */
1077
1078		/* Where we will store the stacks. */
1079		st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1080		    M_TEMP, M_WAITOK);
1081		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1082		    stack_idx++)
1083			st[stack_idx] = stack_create();
1084
1085		/* Where we will store the td name, tid, etc. */
1086		td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1087		    M_TEMP, M_WAITOK);
1088		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1089		    stack_idx++)
1090			td_infos[stack_idx] = sbuf_new(NULL, NULL,
1091			    MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1092			    SBUF_FIXEDLEN);
1093
1094		sleepq_lock(wchan);
1095		sq = sleepq_lookup(wchan);
1096		if (sq == NULL) {
1097			/* This sleepq does not exist; exit and return ENOENT. */
1098			error = ENOENT;
1099			finished = true;
1100			sleepq_release(wchan);
1101			goto loop_end;
1102		}
1103
1104		stack_idx = 0;
1105		/* Save thread info */
1106		TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1107		    td_next) {
1108			if (stack_idx >= stacks_to_allocate)
1109				goto loop_end;
1110
1111			/* Note the td_lock is equal to the sleepq_lock here. */
1112			stack_save_td(st[stack_idx], td);
1113
1114			sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1115			    td->td_tid, td->td_name, td);
1116
1117			++stack_idx;
1118		}
1119
1120		finished = true;
1121		sleepq_release(wchan);
1122
1123		/* Print the stacks */
1124		for (i = 0; i < stack_idx; i++) {
1125			sbuf_finish(td_infos[i]);
1126			sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1127			stack_sbuf_print(sb, st[i]);
1128			sbuf_printf(sb, "\n");
1129
1130			error = sbuf_error(sb);
1131			if (error == 0)
1132				*count_stacks_printed = stack_idx;
1133		}
1134
1135loop_end:
1136		if (!finished)
1137			sleepq_release(wchan);
1138		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1139		    stack_idx++)
1140			stack_destroy(st[stack_idx]);
1141		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1142		    stack_idx++)
1143			sbuf_delete(td_infos[stack_idx]);
1144		free(st, M_TEMP);
1145		free(td_infos, M_TEMP);
1146		stacks_to_allocate *= 10;
1147	}
1148
1149	if (!finished && error == 0)
1150		error = ENOMEM;
1151
1152	return (error);
1153}
1154#endif
1155
1156#ifdef SLEEPQUEUE_PROFILING
1157#define	SLEEPQ_PROF_LOCATIONS	1024
1158#define	SLEEPQ_SBUFSIZE		512
1159struct sleepq_prof {
1160	LIST_ENTRY(sleepq_prof) sp_link;
1161	const char	*sp_wmesg;
1162	long		sp_count;
1163};
1164
1165LIST_HEAD(sqphead, sleepq_prof);
1166
1167struct sqphead sleepq_prof_free;
1168struct sqphead sleepq_hash[SC_TABLESIZE];
1169static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1170static struct mtx sleepq_prof_lock;
1171MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1172
1173static void
1174sleepq_profile(const char *wmesg)
1175{
1176	struct sleepq_prof *sp;
1177
1178	mtx_lock_spin(&sleepq_prof_lock);
1179	if (prof_enabled == 0)
1180		goto unlock;
1181	LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1182		if (sp->sp_wmesg == wmesg)
1183			goto done;
1184	sp = LIST_FIRST(&sleepq_prof_free);
1185	if (sp == NULL)
1186		goto unlock;
1187	sp->sp_wmesg = wmesg;
1188	LIST_REMOVE(sp, sp_link);
1189	LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1190done:
1191	sp->sp_count++;
1192unlock:
1193	mtx_unlock_spin(&sleepq_prof_lock);
1194	return;
1195}
1196
1197static void
1198sleepq_prof_reset(void)
1199{
1200	struct sleepq_prof *sp;
1201	int enabled;
1202	int i;
1203
1204	mtx_lock_spin(&sleepq_prof_lock);
1205	enabled = prof_enabled;
1206	prof_enabled = 0;
1207	for (i = 0; i < SC_TABLESIZE; i++)
1208		LIST_INIT(&sleepq_hash[i]);
1209	LIST_INIT(&sleepq_prof_free);
1210	for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1211		sp = &sleepq_profent[i];
1212		sp->sp_wmesg = NULL;
1213		sp->sp_count = 0;
1214		LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1215	}
1216	prof_enabled = enabled;
1217	mtx_unlock_spin(&sleepq_prof_lock);
1218}
1219
1220static int
1221enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1222{
1223	int error, v;
1224
1225	v = prof_enabled;
1226	error = sysctl_handle_int(oidp, &v, v, req);
1227	if (error)
1228		return (error);
1229	if (req->newptr == NULL)
1230		return (error);
1231	if (v == prof_enabled)
1232		return (0);
1233	if (v == 1)
1234		sleepq_prof_reset();
1235	mtx_lock_spin(&sleepq_prof_lock);
1236	prof_enabled = !!v;
1237	mtx_unlock_spin(&sleepq_prof_lock);
1238
1239	return (0);
1240}
1241
1242static int
1243reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1244{
1245	int error, v;
1246
1247	v = 0;
1248	error = sysctl_handle_int(oidp, &v, 0, req);
1249	if (error)
1250		return (error);
1251	if (req->newptr == NULL)
1252		return (error);
1253	if (v == 0)
1254		return (0);
1255	sleepq_prof_reset();
1256
1257	return (0);
1258}
1259
1260static int
1261dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1262{
1263	struct sleepq_prof *sp;
1264	struct sbuf *sb;
1265	int enabled;
1266	int error;
1267	int i;
1268
1269	error = sysctl_wire_old_buffer(req, 0);
1270	if (error != 0)
1271		return (error);
1272	sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1273	sbuf_printf(sb, "\nwmesg\tcount\n");
1274	enabled = prof_enabled;
1275	mtx_lock_spin(&sleepq_prof_lock);
1276	prof_enabled = 0;
1277	mtx_unlock_spin(&sleepq_prof_lock);
1278	for (i = 0; i < SC_TABLESIZE; i++) {
1279		LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1280			sbuf_printf(sb, "%s\t%ld\n",
1281			    sp->sp_wmesg, sp->sp_count);
1282		}
1283	}
1284	mtx_lock_spin(&sleepq_prof_lock);
1285	prof_enabled = enabled;
1286	mtx_unlock_spin(&sleepq_prof_lock);
1287
1288	error = sbuf_finish(sb);
1289	sbuf_delete(sb);
1290	return (error);
1291}
1292
1293SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1294    NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1295SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1296    NULL, 0, reset_sleepq_prof_stats, "I",
1297    "Reset sleepqueue profiling statistics");
1298SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1299    NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1300#endif
1301
1302#ifdef DDB
1303DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1304{
1305	struct sleepqueue_chain *sc;
1306	struct sleepqueue *sq;
1307#ifdef INVARIANTS
1308	struct lock_object *lock;
1309#endif
1310	struct thread *td;
1311	void *wchan;
1312	int i;
1313
1314	if (!have_addr)
1315		return;
1316
1317	/*
1318	 * First, see if there is an active sleep queue for the wait channel
1319	 * indicated by the address.
1320	 */
1321	wchan = (void *)addr;
1322	sc = SC_LOOKUP(wchan);
1323	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1324		if (sq->sq_wchan == wchan)
1325			goto found;
1326
1327	/*
1328	 * Second, see if there is an active sleep queue at the address
1329	 * indicated.
1330	 */
1331	for (i = 0; i < SC_TABLESIZE; i++)
1332		LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1333			if (sq == (struct sleepqueue *)addr)
1334				goto found;
1335		}
1336
1337	db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1338	return;
1339found:
1340	db_printf("Wait channel: %p\n", sq->sq_wchan);
1341	db_printf("Queue type: %d\n", sq->sq_type);
1342#ifdef INVARIANTS
1343	if (sq->sq_lock) {
1344		lock = sq->sq_lock;
1345		db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1346		    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1347	}
1348#endif
1349	db_printf("Blocked threads:\n");
1350	for (i = 0; i < NR_SLEEPQS; i++) {
1351		db_printf("\nQueue[%d]:\n", i);
1352		if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1353			db_printf("\tempty\n");
1354		else
1355			TAILQ_FOREACH(td, &sq->sq_blocked[0],
1356				      td_slpq) {
1357				db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1358					  td->td_tid, td->td_proc->p_pid,
1359					  td->td_name);
1360			}
1361		db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1362	}
1363}
1364
1365/* Alias 'show sleepqueue' to 'show sleepq'. */
1366DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1367#endif
1368