kern_switch.c revision 134067
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/***
28Here is the logic..
29
30If there are N processors, then there are at most N KSEs (kernel
31schedulable entities) working to process threads that belong to a
32KSEGROUP (kg). If there are X of these KSEs actually running at the
33moment in question, then there are at most M (N-X) of these KSEs on
34the run queue, as running KSEs are not on the queue.
35
36Runnable threads are queued off the KSEGROUP in priority order.
37If there are M or more threads runnable, the top M threads
38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39their priority from those threads and are put on the run queue.
40
41The last thread that had a priority high enough to have a KSE associated
42with it, AND IS ON THE RUN QUEUE is pointed to by
43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44assigned as all the available KSEs are activly running, or because there
45are no threads queued, that pointer is NULL.
46
47When a KSE is removed from the run queue to become runnable, we know
48it was associated with the highest priority thread in the queue (at the head
49of the queue). If it is also the last assigned we know M was 1 and must
50now be 0. Since the thread is no longer queued that pointer must be
51removed from it. Since we know there were no more KSEs available,
52(M was 1 and is now 0) and since we are not FREEING our KSE
53but using it, we know there are STILL no more KSEs available, we can prove
54that the next thread in the ksegrp list will not have a KSE to assign to
55it, so we can show that the pointer must be made 'invalid' (NULL).
56
57The pointer exists so that when a new thread is made runnable, it can
58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
66When a thread sleeps or is removed, the KSE becomes available and if there
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75              \    \____
76               \        \
77    KSEGROUP---thread--thread--thread    (queued in priority order)
78        \                 /
79         \_______________/
80          (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 134067 2004-08-20 05:18:50Z scottl $");
90
91#include "opt_full_preemption.h"
92
93#include <sys/param.h>
94#include <sys/systm.h>
95#include <sys/kdb.h>
96#include <sys/kernel.h>
97#include <sys/ktr.h>
98#include <sys/lock.h>
99#include <sys/mutex.h>
100#include <sys/proc.h>
101#include <sys/queue.h>
102#include <sys/sched.h>
103#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
104#include <sys/smp.h>
105#endif
106#include <machine/critical.h>
107
108CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
109
110void panc(char *string1, char *string2);
111
112#if 0
113static void runq_readjust(struct runq *rq, struct kse *ke);
114#endif
115/************************************************************************
116 * Functions that manipulate runnability from a thread perspective.	*
117 ************************************************************************/
118/*
119 * Select the KSE that will be run next.  From that find the thread, and
120 * remove it from the KSEGRP's run queue.  If there is thread clustering,
121 * this will be what does it.
122 */
123struct thread *
124choosethread(void)
125{
126	struct kse *ke;
127	struct thread *td;
128	struct ksegrp *kg;
129
130#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
131	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
132		/* Shutting down, run idlethread on AP's */
133		td = PCPU_GET(idlethread);
134		ke = td->td_kse;
135		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
136		ke->ke_flags |= KEF_DIDRUN;
137		TD_SET_RUNNING(td);
138		return (td);
139	}
140#endif
141
142retry:
143	ke = sched_choose();
144	if (ke) {
145		td = ke->ke_thread;
146		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
147		kg = ke->ke_ksegrp;
148		if (td->td_proc->p_flag & P_SA) {
149			if (kg->kg_last_assigned == td) {
150				kg->kg_last_assigned = TAILQ_PREV(td,
151				    threadqueue, td_runq);
152			}
153			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
154			kg->kg_runnable--;
155		}
156		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
157		    td, td->td_priority);
158	} else {
159		/* Simulate runq_choose() having returned the idle thread */
160		td = PCPU_GET(idlethread);
161		ke = td->td_kse;
162		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
163	}
164	ke->ke_flags |= KEF_DIDRUN;
165
166	/*
167	 * If we are in panic, only allow system threads,
168	 * plus the one we are running in, to be run.
169	 */
170	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
171	    (td->td_flags & TDF_INPANIC) == 0)) {
172		/* note that it is no longer on the run queue */
173		TD_SET_CAN_RUN(td);
174		goto retry;
175	}
176
177	TD_SET_RUNNING(td);
178	return (td);
179}
180
181/*
182 * Given a surplus KSE, either assign a new runable thread to it
183 * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
184 * Assumes that the original thread is not runnable.
185 */
186void
187kse_reassign(struct kse *ke)
188{
189	struct ksegrp *kg;
190	struct thread *td;
191	struct thread *original;
192
193	mtx_assert(&sched_lock, MA_OWNED);
194	original = ke->ke_thread;
195	KASSERT(original == NULL || TD_IS_INHIBITED(original),
196    	    ("reassigning KSE with runnable thread"));
197	kg = ke->ke_ksegrp;
198	if (original)
199		original->td_kse = NULL;
200
201	/*
202	 * Find the first unassigned thread
203	 */
204	if ((td = kg->kg_last_assigned) != NULL)
205		td = TAILQ_NEXT(td, td_runq);
206	else
207		td = TAILQ_FIRST(&kg->kg_runq);
208
209	/*
210	 * If we found one, assign it the kse, otherwise idle the kse.
211	 */
212	if (td) {
213		kg->kg_last_assigned = td;
214		td->td_kse = ke;
215		ke->ke_thread = td;
216		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
217		sched_add(td);
218		return;
219	}
220
221	ke->ke_state = KES_IDLE;
222	ke->ke_thread = NULL;
223	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
224	kg->kg_idle_kses++;
225	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
226	return;
227}
228
229#if 0
230/*
231 * Remove a thread from its KSEGRP's run queue.
232 * This in turn may remove it from a KSE if it was already assigned
233 * to one, possibly causing a new thread to be assigned to the KSE
234 * and the KSE getting a new priority.
235 */
236static void
237remrunqueue(struct thread *td)
238{
239	struct thread *td2, *td3;
240	struct ksegrp *kg;
241	struct kse *ke;
242
243	mtx_assert(&sched_lock, MA_OWNED);
244	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
245	kg = td->td_ksegrp;
246	ke = td->td_kse;
247	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
248	TD_SET_CAN_RUN(td);
249	/*
250	 * If it is not a threaded process, take the shortcut.
251	 */
252	if ((td->td_proc->p_flag & P_SA) == 0) {
253		/* Bring its kse with it, leave the thread attached */
254		sched_rem(td);
255		ke->ke_state = KES_THREAD;
256		return;
257	}
258   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
259	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
260	kg->kg_runnable--;
261	if (ke) {
262		/*
263		 * This thread has been assigned to a KSE.
264		 * We need to dissociate it and try assign the
265		 * KSE to the next available thread. Then, we should
266		 * see if we need to move the KSE in the run queues.
267		 */
268		sched_rem(td);
269		ke->ke_state = KES_THREAD;
270		td2 = kg->kg_last_assigned;
271		KASSERT((td2 != NULL), ("last assigned has wrong value"));
272		if (td2 == td)
273			kg->kg_last_assigned = td3;
274		kse_reassign(ke);
275	}
276}
277#endif
278
279/*
280 * Change the priority of a thread that is on the run queue.
281 */
282void
283adjustrunqueue( struct thread *td, int newpri)
284{
285	struct ksegrp *kg;
286	struct kse *ke;
287
288	mtx_assert(&sched_lock, MA_OWNED);
289	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
290
291	ke = td->td_kse;
292	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
293	/*
294	 * If it is not a threaded process, take the shortcut.
295	 */
296	if ((td->td_proc->p_flag & P_SA) == 0) {
297		/* We only care about the kse in the run queue. */
298		td->td_priority = newpri;
299		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
300			sched_rem(td);
301			sched_add(td);
302		}
303		return;
304	}
305
306	/* It is a threaded process */
307	kg = td->td_ksegrp;
308	TD_SET_CAN_RUN(td);
309	if (ke) {
310		if (kg->kg_last_assigned == td) {
311			kg->kg_last_assigned =
312			    TAILQ_PREV(td, threadqueue, td_runq);
313		}
314		sched_rem(td);
315	}
316	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
317	kg->kg_runnable--;
318	td->td_priority = newpri;
319	setrunqueue(td);
320}
321
322void
323setrunqueue(struct thread *td)
324{
325	struct kse *ke;
326	struct ksegrp *kg;
327	struct thread *td2;
328	struct thread *tda;
329	int count;
330
331	CTR4(KTR_RUNQ, "setrunqueue: td:%p ke:%p kg:%p pid:%d",
332	    td, td->td_kse, td->td_ksegrp, td->td_proc->p_pid);
333	mtx_assert(&sched_lock, MA_OWNED);
334	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
335	    ("setrunqueue: bad thread state"));
336	TD_SET_RUNQ(td);
337	kg = td->td_ksegrp;
338	if ((td->td_proc->p_flag & P_SA) == 0) {
339		/*
340		 * Common path optimisation: Only one of everything
341		 * and the KSE is always already attached.
342		 * Totally ignore the ksegrp run queue.
343		 */
344		sched_add(td);
345		return;
346	}
347
348	tda = kg->kg_last_assigned;
349	if ((ke = td->td_kse) == NULL) {
350		if (kg->kg_idle_kses) {
351			/*
352			 * There is a free one so it's ours for the asking..
353			 */
354			ke = TAILQ_FIRST(&kg->kg_iq);
355			CTR2(KTR_RUNQ, "setrunqueue: kg:%p: Use free ke:%p",
356			    kg, ke);
357			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
358			ke->ke_state = KES_THREAD;
359			kg->kg_idle_kses--;
360		} else if (tda && (tda->td_priority > td->td_priority)) {
361			/*
362			 * None free, but there is one we can commandeer.
363			 */
364			ke = tda->td_kse;
365			CTR3(KTR_RUNQ,
366			    "setrunqueue: kg:%p: take ke:%p from td: %p",
367			    kg, ke, tda);
368			sched_rem(tda);
369			tda->td_kse = NULL;
370			ke->ke_thread = NULL;
371			tda = kg->kg_last_assigned =
372		    	    TAILQ_PREV(tda, threadqueue, td_runq);
373		}
374	} else {
375		/*
376		 * Temporarily disassociate so it looks like the other cases.
377		 */
378		ke->ke_thread = NULL;
379		td->td_kse = NULL;
380	}
381
382	/*
383	 * Add the thread to the ksegrp's run queue at
384	 * the appropriate place.
385	 */
386	count = 0;
387	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
388		if (td2->td_priority > td->td_priority) {
389			kg->kg_runnable++;
390			TAILQ_INSERT_BEFORE(td2, td, td_runq);
391			break;
392		}
393		/* XXX Debugging hack */
394		if (++count > 10000) {
395			printf("setrunqueue(): corrupt kq_runq, td= %p\n", td);
396			panic("deadlock in setrunqueue");
397		}
398	}
399	if (td2 == NULL) {
400		/* We ran off the end of the TAILQ or it was empty. */
401		kg->kg_runnable++;
402		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
403	}
404
405	/*
406	 * If we have a ke to use, then put it on the run queue and
407	 * If needed, readjust the last_assigned pointer.
408	 */
409	if (ke) {
410		if (tda == NULL) {
411			/*
412			 * No pre-existing last assigned so whoever is first
413			 * gets the KSE we brought in.. (maybe us)
414			 */
415			td2 = TAILQ_FIRST(&kg->kg_runq);
416			KASSERT((td2->td_kse == NULL),
417			    ("unexpected ke present"));
418			td2->td_kse = ke;
419			ke->ke_thread = td2;
420			kg->kg_last_assigned = td2;
421		} else if (tda->td_priority > td->td_priority) {
422			/*
423			 * It's ours, grab it, but last_assigned is past us
424			 * so don't change it.
425			 */
426			td->td_kse = ke;
427			ke->ke_thread = td;
428		} else {
429			/*
430			 * We are past last_assigned, so
431			 * put the new kse on whatever is next,
432			 * which may or may not be us.
433			 */
434			td2 = TAILQ_NEXT(tda, td_runq);
435			kg->kg_last_assigned = td2;
436			td2->td_kse = ke;
437			ke->ke_thread = td2;
438		}
439		sched_add(ke->ke_thread);
440	} else {
441		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
442			td, td->td_ksegrp, td->td_proc->p_pid);
443	}
444}
445
446/*
447 * Kernel thread preemption implementation.  Critical sections mark
448 * regions of code in which preemptions are not allowed.
449 */
450void
451critical_enter(void)
452{
453	struct thread *td;
454
455	td = curthread;
456	if (td->td_critnest == 0)
457		cpu_critical_enter(td);
458	td->td_critnest++;
459}
460
461void
462critical_exit(void)
463{
464	struct thread *td;
465
466	td = curthread;
467	KASSERT(td->td_critnest != 0,
468	    ("critical_exit: td_critnest == 0"));
469	if (td->td_critnest == 1) {
470#ifdef PREEMPTION
471		mtx_assert(&sched_lock, MA_NOTOWNED);
472		if (td->td_pflags & TDP_OWEPREEMPT) {
473			mtx_lock_spin(&sched_lock);
474			mi_switch(SW_INVOL, NULL);
475			mtx_unlock_spin(&sched_lock);
476		}
477#endif
478		td->td_critnest = 0;
479		cpu_critical_exit(td);
480	} else {
481		td->td_critnest--;
482	}
483}
484
485/*
486 * This function is called when a thread is about to be put on run queue
487 * because it has been made runnable or its priority has been adjusted.  It
488 * determines if the new thread should be immediately preempted to.  If so,
489 * it switches to it and eventually returns true.  If not, it returns false
490 * so that the caller may place the thread on an appropriate run queue.
491 */
492int
493maybe_preempt(struct thread *td)
494{
495#ifdef PREEMPTION
496	struct thread *ctd;
497	int cpri, pri;
498#endif
499
500	mtx_assert(&sched_lock, MA_OWNED);
501#ifdef PREEMPTION
502	/*
503	 * The new thread should not preempt the current thread if any of the
504	 * following conditions are true:
505	 *
506	 *  - The current thread has a higher (numerically lower) or
507	 *    equivalent priority.  Note that this prevents curthread from
508	 *    trying to preempt to itself.
509	 *  - It is too early in the boot for context switches (cold is set).
510	 *  - The current thread has an inhibitor set or is in the process of
511	 *    exiting.  In this case, the current thread is about to switch
512	 *    out anyways, so there's no point in preempting.  If we did,
513	 *    the current thread would not be properly resumed as well, so
514	 *    just avoid that whole landmine.
515	 *  - If the new thread's priority is not a realtime priority and
516	 *    the current thread's priority is not an idle priority and
517	 *    FULL_PREEMPTION is disabled.
518	 *
519	 * If all of these conditions are false, but the current thread is in
520	 * a nested critical section, then we have to defer the preemption
521	 * until we exit the critical section.  Otherwise, switch immediately
522	 * to the new thread.
523	 */
524	ctd = curthread;
525
526	if ((ctd->td_kse == NULL) || (ctd->td_kse->ke_thread != ctd))
527		return (0);
528
529	pri = td->td_priority;
530	cpri = ctd->td_priority;
531	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
532	    td->td_kse->ke_state != KES_THREAD)
533		return (0);
534#ifndef FULL_PREEMPTION
535	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
536	    !(cpri >= PRI_MIN_IDLE))
537		return (0);
538#endif
539	if (ctd->td_critnest > 1) {
540		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
541		    ctd->td_critnest);
542		ctd->td_pflags |= TDP_OWEPREEMPT;
543		return (0);
544	}
545
546	/*
547	 * Our thread state says that we are already on a run queue, so
548	 * update our state as if we had been dequeued by choosethread().
549	 */
550	MPASS(TD_ON_RUNQ(td));
551	TD_SET_RUNNING(td);
552	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
553	    td->td_proc->p_pid, td->td_proc->p_comm);
554	mi_switch(SW_INVOL, td);
555	return (1);
556#else
557	return (0);
558#endif
559}
560
561#if 0
562#ifndef PREEMPTION
563/* XXX: There should be a non-static version of this. */
564static void
565printf_caddr_t(void *data)
566{
567	printf("%s", (char *)data);
568}
569static char preempt_warning[] =
570    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
571SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
572    preempt_warning)
573#endif
574#endif
575
576/************************************************************************
577 * SYSTEM RUN QUEUE manipulations and tests				*
578 ************************************************************************/
579/*
580 * Initialize a run structure.
581 */
582void
583runq_init(struct runq *rq)
584{
585	int i;
586
587	bzero(rq, sizeof *rq);
588	for (i = 0; i < RQ_NQS; i++)
589		TAILQ_INIT(&rq->rq_queues[i]);
590}
591
592/*
593 * Clear the status bit of the queue corresponding to priority level pri,
594 * indicating that it is empty.
595 */
596static __inline void
597runq_clrbit(struct runq *rq, int pri)
598{
599	struct rqbits *rqb;
600
601	rqb = &rq->rq_status;
602	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
603	    rqb->rqb_bits[RQB_WORD(pri)],
604	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
605	    RQB_BIT(pri), RQB_WORD(pri));
606	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
607}
608
609/*
610 * Find the index of the first non-empty run queue.  This is done by
611 * scanning the status bits, a set bit indicates a non-empty queue.
612 */
613static __inline int
614runq_findbit(struct runq *rq)
615{
616	struct rqbits *rqb;
617	int pri;
618	int i;
619
620	rqb = &rq->rq_status;
621	for (i = 0; i < RQB_LEN; i++)
622		if (rqb->rqb_bits[i]) {
623			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
624			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
625			    rqb->rqb_bits[i], i, pri);
626			return (pri);
627		}
628
629	return (-1);
630}
631
632/*
633 * Set the status bit of the queue corresponding to priority level pri,
634 * indicating that it is non-empty.
635 */
636static __inline void
637runq_setbit(struct runq *rq, int pri)
638{
639	struct rqbits *rqb;
640
641	rqb = &rq->rq_status;
642	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
643	    rqb->rqb_bits[RQB_WORD(pri)],
644	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
645	    RQB_BIT(pri), RQB_WORD(pri));
646	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
647}
648
649/*
650 * Add the KSE to the queue specified by its priority, and set the
651 * corresponding status bit.
652 */
653void
654runq_add(struct runq *rq, struct kse *ke)
655{
656	struct rqhead *rqh;
657	int pri;
658
659	pri = ke->ke_thread->td_priority / RQ_PPQ;
660	ke->ke_rqindex = pri;
661	runq_setbit(rq, pri);
662	rqh = &rq->rq_queues[pri];
663	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
664	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
665	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
666}
667
668/*
669 * Return true if there are runnable processes of any priority on the run
670 * queue, false otherwise.  Has no side effects, does not modify the run
671 * queue structure.
672 */
673int
674runq_check(struct runq *rq)
675{
676	struct rqbits *rqb;
677	int i;
678
679	rqb = &rq->rq_status;
680	for (i = 0; i < RQB_LEN; i++)
681		if (rqb->rqb_bits[i]) {
682			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
683			    rqb->rqb_bits[i], i);
684			return (1);
685		}
686	CTR0(KTR_RUNQ, "runq_check: empty");
687
688	return (0);
689}
690
691/*
692 * Find the highest priority process on the run queue.
693 */
694struct kse *
695runq_choose(struct runq *rq)
696{
697	struct rqhead *rqh;
698	struct kse *ke;
699	int pri;
700
701	mtx_assert(&sched_lock, MA_OWNED);
702	while ((pri = runq_findbit(rq)) != -1) {
703		rqh = &rq->rq_queues[pri];
704		ke = TAILQ_FIRST(rqh);
705		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
706		CTR3(KTR_RUNQ,
707		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
708		return (ke);
709	}
710	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
711
712	return (NULL);
713}
714
715/*
716 * Remove the KSE from the queue specified by its priority, and clear the
717 * corresponding status bit if the queue becomes empty.
718 * Caller must set ke->ke_state afterwards.
719 */
720void
721runq_remove(struct runq *rq, struct kse *ke)
722{
723	struct rqhead *rqh;
724	int pri;
725
726	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
727		("runq_remove: process swapped out"));
728	pri = ke->ke_rqindex;
729	rqh = &rq->rq_queues[pri];
730	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
731	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
732	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
733	TAILQ_REMOVE(rqh, ke, ke_procq);
734	if (TAILQ_EMPTY(rqh)) {
735		CTR0(KTR_RUNQ, "runq_remove: empty");
736		runq_clrbit(rq, pri);
737	}
738}
739
740#if 0
741void
742panc(char *string1, char *string2)
743{
744	printf("%s", string1);
745	kdb_enter(string2);
746}
747
748void
749thread_sanity_check(struct thread *td, char *string)
750{
751	struct proc *p;
752	struct ksegrp *kg;
753	struct kse *ke;
754	struct thread *td2 = NULL;
755	unsigned int prevpri;
756	int	saw_lastassigned = 0;
757	int unassigned = 0;
758	int assigned = 0;
759
760	p = td->td_proc;
761	kg = td->td_ksegrp;
762	ke = td->td_kse;
763
764
765	if (ke) {
766		if (p != ke->ke_proc) {
767			panc(string, "wrong proc");
768		}
769		if (ke->ke_thread != td) {
770			panc(string, "wrong thread");
771		}
772	}
773
774	if ((p->p_flag & P_SA) == 0) {
775		if (ke == NULL) {
776			panc(string, "non KSE thread lost kse");
777		}
778	} else {
779		prevpri = 0;
780		saw_lastassigned = 0;
781		unassigned = 0;
782		assigned = 0;
783		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
784			if (td2->td_priority < prevpri) {
785				panc(string, "thread runqueue unosorted");
786			}
787			if ((td2->td_state == TDS_RUNQ) &&
788			    td2->td_kse &&
789			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
790				panc(string, "KSE wrong state");
791			}
792			prevpri = td2->td_priority;
793			if (td2->td_kse) {
794				assigned++;
795				if (unassigned) {
796					panc(string, "unassigned before assigned");
797				}
798 				if  (kg->kg_last_assigned == NULL) {
799					panc(string, "lastassigned corrupt");
800				}
801				if (saw_lastassigned) {
802					panc(string, "last assigned not last");
803				}
804				if (td2->td_kse->ke_thread != td2) {
805					panc(string, "mismatched kse/thread");
806				}
807			} else {
808				unassigned++;
809			}
810			if (td2 == kg->kg_last_assigned) {
811				saw_lastassigned = 1;
812				if (td2->td_kse == NULL) {
813					panc(string, "last assigned not assigned");
814				}
815			}
816		}
817		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
818			panc(string, "where on earth does lastassigned point?");
819		}
820#if 0
821		FOREACH_THREAD_IN_GROUP(kg, td2) {
822			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
823			    (TD_ON_RUNQ(td2))) {
824				assigned++;
825				if (td2->td_kse == NULL) {
826					panc(string, "BOUND thread with no KSE");
827				}
828			}
829		}
830#endif
831#if 0
832		if ((unassigned + assigned) != kg->kg_runnable) {
833			panc(string, "wrong number in runnable");
834		}
835#endif
836	}
837	if (assigned == 12345) {
838		printf("%p %p %p %p %p %d, %d",
839		    td, td2, ke, kg, p, assigned, saw_lastassigned);
840	}
841}
842#endif
843
844