kern_switch.c revision 134070
1205821Sedwin/*
2205821Sedwin * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3205821Sedwin * All rights reserved.
4205821Sedwin *
5205821Sedwin * Redistribution and use in source and binary forms, with or without
6205821Sedwin * modification, are permitted provided that the following conditions
7205821Sedwin * are met:
8205821Sedwin * 1. Redistributions of source code must retain the above copyright
9205821Sedwin *    notice, this list of conditions and the following disclaimer.
10205821Sedwin * 2. Redistributions in binary form must reproduce the above copyright
11205821Sedwin *    notice, this list of conditions and the following disclaimer in the
12205821Sedwin *    documentation and/or other materials provided with the distribution.
13205821Sedwin *
14205821Sedwin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15205821Sedwin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16205821Sedwin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17205821Sedwin * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18205821Sedwin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19205821Sedwin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20205821Sedwin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21205821Sedwin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22205821Sedwin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23205821Sedwin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24205821Sedwin * SUCH DAMAGE.
25205821Sedwin */
26205821Sedwin
27205821Sedwin/***
28205821SedwinHere is the logic..
29205821Sedwin
30205821SedwinIf there are N processors, then there are at most N KSEs (kernel
31205821Sedwinschedulable entities) working to process threads that belong to a
32205821SedwinKSEGROUP (kg). If there are X of these KSEs actually running at the
33205821Sedwinmoment in question, then there are at most M (N-X) of these KSEs on
34205821Sedwinthe run queue, as running KSEs are not on the queue.
35205821Sedwin
36205821SedwinRunnable threads are queued off the KSEGROUP in priority order.
37205821SedwinIf there are M or more threads runnable, the top M threads
38205821Sedwin(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39205821Sedwintheir priority from those threads and are put on the run queue.
40205821Sedwin
41205821SedwinThe last thread that had a priority high enough to have a KSE associated
42205821Sedwinwith it, AND IS ON THE RUN QUEUE is pointed to by
43205821Sedwinkg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44205821Sedwinassigned as all the available KSEs are activly running, or because there
45205821Sedwinare no threads queued, that pointer is NULL.
46205821Sedwin
47205821SedwinWhen a KSE is removed from the run queue to become runnable, we know
48205821Sedwinit was associated with the highest priority thread in the queue (at the head
49205821Sedwinof the queue). If it is also the last assigned we know M was 1 and must
50205821Sedwinnow be 0. Since the thread is no longer queued that pointer must be
51205821Sedwinremoved from it. Since we know there were no more KSEs available,
52205821Sedwin(M was 1 and is now 0) and since we are not FREEING our KSE
53205821Sedwinbut using it, we know there are STILL no more KSEs available, we can prove
54205821Sedwinthat the next thread in the ksegrp list will not have a KSE to assign to
55205821Sedwinit, so we can show that the pointer must be made 'invalid' (NULL).
56205821Sedwin
57205821SedwinThe pointer exists so that when a new thread is made runnable, it can
58205821Sedwinhave its priority compared with the last assigned thread to see if
59205821Sedwinit should 'steal' its KSE or not.. i.e. is it 'earlier'
60205821Sedwinon the list than that thread or later.. If it's earlier, then the KSE is
61205821Sedwinremoved from the last assigned (which is now not assigned a KSE)
62205821Sedwinand reassigned to the new thread, which is placed earlier in the list.
63205821SedwinThe pointer is then backed up to the previous thread (which may or may not
64205821Sedwinbe the new thread).
65205821Sedwin
66205821SedwinWhen a thread sleeps or is removed, the KSE becomes available and if there
67205821Sedwinare queued threads that are not assigned KSEs, the highest priority one of
68205821Sedwinthem is assigned the KSE, which is then placed back on the run queue at
69205821Sedwinthe approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70205821Sedwinto point to it.
71205821Sedwin
72205821SedwinThe following diagram shows 2 KSEs and 3 threads from a single process.
73205821Sedwin
74205821Sedwin RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75205821Sedwin              \    \____
76205821Sedwin               \        \
77205821Sedwin    KSEGROUP---thread--thread--thread    (queued in priority order)
78205821Sedwin        \                 /
79211517Sedwin         \_______________/
80205821Sedwin          (last_assigned)
81205821Sedwin
82205821SedwinThe result of this scheme is that the M available KSEs are always
83205821Sedwinqueued at the priorities they have inherrited from the M highest priority
84205821Sedwinthreads for that KSEGROUP. If this situation changes, the KSEs are
85205821Sedwinreassigned to keep this true.
86205821Sedwin***/
87205821Sedwin
88205821Sedwin#include <sys/cdefs.h>
89205821Sedwin__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 134070 2004-08-20 05:58:38Z scottl $");
90205821Sedwin
91205821Sedwin#include "opt_full_preemption.h"
92205821Sedwin
93205821Sedwin#include <sys/param.h>
94205821Sedwin#include <sys/systm.h>
95205821Sedwin#include <sys/kdb.h>
96205821Sedwin#include <sys/kernel.h>
97205821Sedwin#include <sys/ktr.h>
98205821Sedwin#include <sys/lock.h>
99205821Sedwin#include <sys/mutex.h>
100205821Sedwin#include <sys/proc.h>
101205821Sedwin#include <sys/queue.h>
102205821Sedwin#include <sys/sched.h>
103205821Sedwin#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
104205821Sedwin#include <sys/smp.h>
105205821Sedwin#endif
106205821Sedwin#include <machine/critical.h>
107211517Sedwin
108205821SedwinCTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
109205821Sedwin
110205821Sedwinvoid panc(char *string1, char *string2);
111205821Sedwin
112205821Sedwin#if 0
113205821Sedwinstatic void runq_readjust(struct runq *rq, struct kse *ke);
114205821Sedwin#endif
115205821Sedwin/************************************************************************
116205821Sedwin * Functions that manipulate runnability from a thread perspective.	*
117205821Sedwin ************************************************************************/
118205821Sedwin/*
119205821Sedwin * Select the KSE that will be run next.  From that find the thread, and
120205821Sedwin * remove it from the KSEGRP's run queue.  If there is thread clustering,
121205821Sedwin * this will be what does it.
122205821Sedwin */
123205821Sedwinstruct thread *
124205821Sedwinchoosethread(void)
125205821Sedwin{
126205821Sedwin	struct kse *ke;
127205821Sedwin	struct thread *td;
128205821Sedwin	struct ksegrp *kg;
129205821Sedwin
130205821Sedwin#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
131205821Sedwin	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
132205821Sedwin		/* Shutting down, run idlethread on AP's */
133205821Sedwin		td = PCPU_GET(idlethread);
134205821Sedwin		ke = td->td_kse;
135205821Sedwin		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
136205821Sedwin		ke->ke_flags |= KEF_DIDRUN;
137205821Sedwin		TD_SET_RUNNING(td);
138205821Sedwin		return (td);
139205821Sedwin	}
140205821Sedwin#endif
141205821Sedwin
142205821Sedwinretry:
143205821Sedwin	ke = sched_choose();
144205821Sedwin	if (ke) {
145205821Sedwin		td = ke->ke_thread;
146208827Sedwin		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
147205821Sedwin		kg = ke->ke_ksegrp;
148205821Sedwin		if (td->td_proc->p_flag & P_SA) {
149205821Sedwin			if (kg->kg_last_assigned == td) {
150205821Sedwin				kg->kg_last_assigned = TAILQ_PREV(td,
151208827Sedwin				    threadqueue, td_runq);
152205821Sedwin			}
153205821Sedwin			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
154205821Sedwin			kg->kg_runnable--;
155205821Sedwin		}
156205821Sedwin		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
157205821Sedwin		    td, td->td_priority);
158205821Sedwin	} else {
159205821Sedwin		/* Simulate runq_choose() having returned the idle thread */
160205821Sedwin		td = PCPU_GET(idlethread);
161205821Sedwin		ke = td->td_kse;
162205821Sedwin		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
163205821Sedwin	}
164205821Sedwin	ke->ke_flags |= KEF_DIDRUN;
165205821Sedwin
166205821Sedwin	/*
167	 * If we are in panic, only allow system threads,
168	 * plus the one we are running in, to be run.
169	 */
170	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
171	    (td->td_flags & TDF_INPANIC) == 0)) {
172		/* note that it is no longer on the run queue */
173		TD_SET_CAN_RUN(td);
174		goto retry;
175	}
176
177	TD_SET_RUNNING(td);
178	return (td);
179}
180
181/*
182 * Given a surplus KSE, either assign a new runable thread to it
183 * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
184 * Assumes that the original thread is not runnable.
185 */
186void
187kse_reassign(struct kse *ke)
188{
189	struct ksegrp *kg;
190	struct thread *td;
191	struct thread *original;
192
193	mtx_assert(&sched_lock, MA_OWNED);
194	original = ke->ke_thread;
195	KASSERT(original == NULL || TD_IS_INHIBITED(original),
196    	    ("reassigning KSE with runnable thread"));
197	kg = ke->ke_ksegrp;
198	if (original)
199		original->td_kse = NULL;
200
201	/*
202	 * Find the first unassigned thread
203	 */
204	if ((td = kg->kg_last_assigned) != NULL)
205		td = TAILQ_NEXT(td, td_runq);
206	else
207		td = TAILQ_FIRST(&kg->kg_runq);
208
209	/*
210	 * If we found one, assign it the kse, otherwise idle the kse.
211	 */
212	if (td) {
213		kg->kg_last_assigned = td;
214		td->td_kse = ke;
215		ke->ke_thread = td;
216		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
217		sched_add(td);
218		return;
219	}
220
221	ke->ke_state = KES_IDLE;
222	ke->ke_thread = NULL;
223	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
224	kg->kg_idle_kses++;
225	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
226	return;
227}
228
229#if 0
230/*
231 * Remove a thread from its KSEGRP's run queue.
232 * This in turn may remove it from a KSE if it was already assigned
233 * to one, possibly causing a new thread to be assigned to the KSE
234 * and the KSE getting a new priority.
235 */
236static void
237remrunqueue(struct thread *td)
238{
239	struct thread *td2, *td3;
240	struct ksegrp *kg;
241	struct kse *ke;
242
243	mtx_assert(&sched_lock, MA_OWNED);
244	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
245	kg = td->td_ksegrp;
246	ke = td->td_kse;
247	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
248	TD_SET_CAN_RUN(td);
249	/*
250	 * If it is not a threaded process, take the shortcut.
251	 */
252	if ((td->td_proc->p_flag & P_SA) == 0) {
253		/* Bring its kse with it, leave the thread attached */
254		sched_rem(td);
255		ke->ke_state = KES_THREAD;
256		return;
257	}
258   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
259	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
260	kg->kg_runnable--;
261	if (ke) {
262		/*
263		 * This thread has been assigned to a KSE.
264		 * We need to dissociate it and try assign the
265		 * KSE to the next available thread. Then, we should
266		 * see if we need to move the KSE in the run queues.
267		 */
268		sched_rem(td);
269		ke->ke_state = KES_THREAD;
270		td2 = kg->kg_last_assigned;
271		KASSERT((td2 != NULL), ("last assigned has wrong value"));
272		if (td2 == td)
273			kg->kg_last_assigned = td3;
274		kse_reassign(ke);
275	}
276}
277#endif
278
279/*
280 * Change the priority of a thread that is on the run queue.
281 */
282void
283adjustrunqueue( struct thread *td, int newpri)
284{
285	struct ksegrp *kg;
286	struct kse *ke;
287
288	mtx_assert(&sched_lock, MA_OWNED);
289	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
290
291	ke = td->td_kse;
292	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
293	/*
294	 * If it is not a threaded process, take the shortcut.
295	 */
296	if ((td->td_proc->p_flag & P_SA) == 0) {
297		/* We only care about the kse in the run queue. */
298		td->td_priority = newpri;
299		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
300			sched_rem(td);
301			sched_add(td);
302		}
303		return;
304	}
305
306	/* It is a threaded process */
307	kg = td->td_ksegrp;
308	TD_SET_CAN_RUN(td);
309	if (ke) {
310		if (kg->kg_last_assigned == td) {
311			kg->kg_last_assigned =
312			    TAILQ_PREV(td, threadqueue, td_runq);
313		}
314		sched_rem(td);
315	}
316	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
317	kg->kg_runnable--;
318	td->td_priority = newpri;
319	setrunqueue(td);
320}
321
322void
323setrunqueue(struct thread *td)
324{
325	struct kse *ke;
326	struct ksegrp *kg;
327	struct thread *td2;
328	struct thread *tda;
329	int count;
330
331	CTR4(KTR_RUNQ, "setrunqueue: td:%p ke:%p kg:%p pid:%d",
332	    td, td->td_kse, td->td_ksegrp, td->td_proc->p_pid);
333	mtx_assert(&sched_lock, MA_OWNED);
334	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
335	    ("setrunqueue: bad thread state"));
336	TD_SET_RUNQ(td);
337	kg = td->td_ksegrp;
338	if ((td->td_proc->p_flag & P_SA) == 0) {
339		/*
340		 * Common path optimisation: Only one of everything
341		 * and the KSE is always already attached.
342		 * Totally ignore the ksegrp run queue.
343		 */
344		sched_add(td);
345		return;
346	}
347
348	tda = kg->kg_last_assigned;
349	if ((ke = td->td_kse) == NULL) {
350		if (kg->kg_idle_kses) {
351			/*
352			 * There is a free one so it's ours for the asking..
353			 */
354			ke = TAILQ_FIRST(&kg->kg_iq);
355			CTR2(KTR_RUNQ, "setrunqueue: kg:%p: Use free ke:%p",
356			    kg, ke);
357			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
358			ke->ke_state = KES_THREAD;
359			kg->kg_idle_kses--;
360		} else if (tda && (tda->td_priority > td->td_priority)) {
361			/*
362			 * None free, but there is one we can commandeer.
363			 */
364			ke = tda->td_kse;
365			CTR3(KTR_RUNQ,
366			    "setrunqueue: kg:%p: take ke:%p from td: %p",
367			    kg, ke, tda);
368			sched_rem(tda);
369			tda->td_kse = NULL;
370			ke->ke_thread = NULL;
371			tda = kg->kg_last_assigned =
372		    	    TAILQ_PREV(tda, threadqueue, td_runq);
373		}
374	} else {
375		/*
376		 * Temporarily disassociate so it looks like the other cases.
377		 */
378		ke->ke_thread = NULL;
379		td->td_kse = NULL;
380	}
381
382	/*
383	 * Add the thread to the ksegrp's run queue at
384	 * the appropriate place.
385	 */
386	count = 0;
387	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
388		if (td2->td_priority > td->td_priority) {
389			kg->kg_runnable++;
390			TAILQ_INSERT_BEFORE(td2, td, td_runq);
391			break;
392		}
393		/* XXX Debugging hack */
394		if (++count > 10000) {
395			printf("setrunqueue(): corrupt kq_runq, td= %p\n", td);
396			panic("deadlock in setrunqueue");
397		}
398	}
399	if (td2 == NULL) {
400		/* We ran off the end of the TAILQ or it was empty. */
401		kg->kg_runnable++;
402		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
403	}
404
405	/*
406	 * If we have a ke to use, then put it on the run queue and
407	 * If needed, readjust the last_assigned pointer.
408	 */
409	if (ke) {
410		if (tda == NULL) {
411			/*
412			 * No pre-existing last assigned so whoever is first
413			 * gets the KSE we brought in.. (maybe us)
414			 */
415			td2 = TAILQ_FIRST(&kg->kg_runq);
416			KASSERT((td2->td_kse == NULL),
417			    ("unexpected ke present"));
418			td2->td_kse = ke;
419			ke->ke_thread = td2;
420			kg->kg_last_assigned = td2;
421		} else if (tda->td_priority > td->td_priority) {
422			/*
423			 * It's ours, grab it, but last_assigned is past us
424			 * so don't change it.
425			 */
426			td->td_kse = ke;
427			ke->ke_thread = td;
428		} else {
429			/*
430			 * We are past last_assigned, so
431			 * put the new kse on whatever is next,
432			 * which may or may not be us.
433			 */
434			td2 = TAILQ_NEXT(tda, td_runq);
435			kg->kg_last_assigned = td2;
436			td2->td_kse = ke;
437			ke->ke_thread = td2;
438		}
439		sched_add(ke->ke_thread);
440	} else {
441		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
442			td, td->td_ksegrp, td->td_proc->p_pid);
443	}
444}
445
446/*
447 * Kernel thread preemption implementation.  Critical sections mark
448 * regions of code in which preemptions are not allowed.
449 */
450void
451critical_enter(void)
452{
453	struct thread *td;
454
455	td = curthread;
456	if (td->td_critnest == 0)
457		cpu_critical_enter(td);
458	td->td_critnest++;
459}
460
461void
462critical_exit(void)
463{
464	struct thread *td;
465
466	td = curthread;
467	KASSERT(td->td_critnest != 0,
468	    ("critical_exit: td_critnest == 0"));
469	if (td->td_critnest == 1) {
470#ifdef PREEMPTION
471		mtx_assert(&sched_lock, MA_NOTOWNED);
472		if (td->td_pflags & TDP_OWEPREEMPT) {
473			mtx_lock_spin(&sched_lock);
474			mi_switch(SW_INVOL, NULL);
475			mtx_unlock_spin(&sched_lock);
476		}
477#endif
478		td->td_critnest = 0;
479		cpu_critical_exit(td);
480	} else {
481		td->td_critnest--;
482	}
483}
484
485/*
486 * This function is called when a thread is about to be put on run queue
487 * because it has been made runnable or its priority has been adjusted.  It
488 * determines if the new thread should be immediately preempted to.  If so,
489 * it switches to it and eventually returns true.  If not, it returns false
490 * so that the caller may place the thread on an appropriate run queue.
491 */
492int
493maybe_preempt(struct thread *td)
494{
495#ifdef PREEMPTION
496	struct thread *ctd;
497	int cpri, pri;
498#endif
499
500	mtx_assert(&sched_lock, MA_OWNED);
501#ifdef PREEMPTION
502	/*
503	 * The new thread should not preempt the current thread if any of the
504	 * following conditions are true:
505	 *
506	 *  - The current thread has a higher (numerically lower) or
507	 *    equivalent priority.  Note that this prevents curthread from
508	 *    trying to preempt to itself.
509	 *  - It is too early in the boot for context switches (cold is set).
510	 *  - The current thread has an inhibitor set or is in the process of
511	 *    exiting.  In this case, the current thread is about to switch
512	 *    out anyways, so there's no point in preempting.  If we did,
513	 *    the current thread would not be properly resumed as well, so
514	 *    just avoid that whole landmine.
515	 *  - If the new thread's priority is not a realtime priority and
516	 *    the current thread's priority is not an idle priority and
517	 *    FULL_PREEMPTION is disabled.
518	 *
519	 * If all of these conditions are false, but the current thread is in
520	 * a nested critical section, then we have to defer the preemption
521	 * until we exit the critical section.  Otherwise, switch immediately
522	 * to the new thread.
523	 */
524	ctd = curthread;
525	pri = td->td_priority;
526	cpri = ctd->td_priority;
527	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
528	    td->td_kse->ke_state != KES_THREAD)
529		return (0);
530#ifndef FULL_PREEMPTION
531	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
532	    !(cpri >= PRI_MIN_IDLE))
533		return (0);
534#endif
535	if (ctd->td_critnest > 1) {
536		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
537		    ctd->td_critnest);
538		ctd->td_pflags |= TDP_OWEPREEMPT;
539		return (0);
540	}
541
542	/*
543	 * Our thread state says that we are already on a run queue, so
544	 * update our state as if we had been dequeued by choosethread().
545	 */
546	MPASS(TD_ON_RUNQ(td));
547	TD_SET_RUNNING(td);
548	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
549	    td->td_proc->p_pid, td->td_proc->p_comm);
550	mi_switch(SW_INVOL, td);
551	return (1);
552#else
553	return (0);
554#endif
555}
556
557#if 0
558#ifndef PREEMPTION
559/* XXX: There should be a non-static version of this. */
560static void
561printf_caddr_t(void *data)
562{
563	printf("%s", (char *)data);
564}
565static char preempt_warning[] =
566    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
567SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
568    preempt_warning)
569#endif
570#endif
571
572/************************************************************************
573 * SYSTEM RUN QUEUE manipulations and tests				*
574 ************************************************************************/
575/*
576 * Initialize a run structure.
577 */
578void
579runq_init(struct runq *rq)
580{
581	int i;
582
583	bzero(rq, sizeof *rq);
584	for (i = 0; i < RQ_NQS; i++)
585		TAILQ_INIT(&rq->rq_queues[i]);
586}
587
588/*
589 * Clear the status bit of the queue corresponding to priority level pri,
590 * indicating that it is empty.
591 */
592static __inline void
593runq_clrbit(struct runq *rq, int pri)
594{
595	struct rqbits *rqb;
596
597	rqb = &rq->rq_status;
598	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
599	    rqb->rqb_bits[RQB_WORD(pri)],
600	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
601	    RQB_BIT(pri), RQB_WORD(pri));
602	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
603}
604
605/*
606 * Find the index of the first non-empty run queue.  This is done by
607 * scanning the status bits, a set bit indicates a non-empty queue.
608 */
609static __inline int
610runq_findbit(struct runq *rq)
611{
612	struct rqbits *rqb;
613	int pri;
614	int i;
615
616	rqb = &rq->rq_status;
617	for (i = 0; i < RQB_LEN; i++)
618		if (rqb->rqb_bits[i]) {
619			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
620			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
621			    rqb->rqb_bits[i], i, pri);
622			return (pri);
623		}
624
625	return (-1);
626}
627
628/*
629 * Set the status bit of the queue corresponding to priority level pri,
630 * indicating that it is non-empty.
631 */
632static __inline void
633runq_setbit(struct runq *rq, int pri)
634{
635	struct rqbits *rqb;
636
637	rqb = &rq->rq_status;
638	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
639	    rqb->rqb_bits[RQB_WORD(pri)],
640	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
641	    RQB_BIT(pri), RQB_WORD(pri));
642	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
643}
644
645/*
646 * Add the KSE to the queue specified by its priority, and set the
647 * corresponding status bit.
648 */
649void
650runq_add(struct runq *rq, struct kse *ke)
651{
652	struct rqhead *rqh;
653	int pri;
654
655	pri = ke->ke_thread->td_priority / RQ_PPQ;
656	ke->ke_rqindex = pri;
657	runq_setbit(rq, pri);
658	rqh = &rq->rq_queues[pri];
659	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
660	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
661	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
662}
663
664/*
665 * Return true if there are runnable processes of any priority on the run
666 * queue, false otherwise.  Has no side effects, does not modify the run
667 * queue structure.
668 */
669int
670runq_check(struct runq *rq)
671{
672	struct rqbits *rqb;
673	int i;
674
675	rqb = &rq->rq_status;
676	for (i = 0; i < RQB_LEN; i++)
677		if (rqb->rqb_bits[i]) {
678			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
679			    rqb->rqb_bits[i], i);
680			return (1);
681		}
682	CTR0(KTR_RUNQ, "runq_check: empty");
683
684	return (0);
685}
686
687/*
688 * Find the highest priority process on the run queue.
689 */
690struct kse *
691runq_choose(struct runq *rq)
692{
693	struct rqhead *rqh;
694	struct kse *ke;
695	int pri;
696
697	mtx_assert(&sched_lock, MA_OWNED);
698	while ((pri = runq_findbit(rq)) != -1) {
699		rqh = &rq->rq_queues[pri];
700		ke = TAILQ_FIRST(rqh);
701		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
702		CTR3(KTR_RUNQ,
703		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
704		return (ke);
705	}
706	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
707
708	return (NULL);
709}
710
711/*
712 * Remove the KSE from the queue specified by its priority, and clear the
713 * corresponding status bit if the queue becomes empty.
714 * Caller must set ke->ke_state afterwards.
715 */
716void
717runq_remove(struct runq *rq, struct kse *ke)
718{
719	struct rqhead *rqh;
720	int pri;
721
722	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
723		("runq_remove: process swapped out"));
724	pri = ke->ke_rqindex;
725	rqh = &rq->rq_queues[pri];
726	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
727	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
728	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
729	TAILQ_REMOVE(rqh, ke, ke_procq);
730	if (TAILQ_EMPTY(rqh)) {
731		CTR0(KTR_RUNQ, "runq_remove: empty");
732		runq_clrbit(rq, pri);
733	}
734}
735
736#if 0
737void
738panc(char *string1, char *string2)
739{
740	printf("%s", string1);
741	kdb_enter(string2);
742}
743
744void
745thread_sanity_check(struct thread *td, char *string)
746{
747	struct proc *p;
748	struct ksegrp *kg;
749	struct kse *ke;
750	struct thread *td2 = NULL;
751	unsigned int prevpri;
752	int	saw_lastassigned = 0;
753	int unassigned = 0;
754	int assigned = 0;
755
756	p = td->td_proc;
757	kg = td->td_ksegrp;
758	ke = td->td_kse;
759
760
761	if (ke) {
762		if (p != ke->ke_proc) {
763			panc(string, "wrong proc");
764		}
765		if (ke->ke_thread != td) {
766			panc(string, "wrong thread");
767		}
768	}
769
770	if ((p->p_flag & P_SA) == 0) {
771		if (ke == NULL) {
772			panc(string, "non KSE thread lost kse");
773		}
774	} else {
775		prevpri = 0;
776		saw_lastassigned = 0;
777		unassigned = 0;
778		assigned = 0;
779		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
780			if (td2->td_priority < prevpri) {
781				panc(string, "thread runqueue unosorted");
782			}
783			if ((td2->td_state == TDS_RUNQ) &&
784			    td2->td_kse &&
785			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
786				panc(string, "KSE wrong state");
787			}
788			prevpri = td2->td_priority;
789			if (td2->td_kse) {
790				assigned++;
791				if (unassigned) {
792					panc(string, "unassigned before assigned");
793				}
794 				if  (kg->kg_last_assigned == NULL) {
795					panc(string, "lastassigned corrupt");
796				}
797				if (saw_lastassigned) {
798					panc(string, "last assigned not last");
799				}
800				if (td2->td_kse->ke_thread != td2) {
801					panc(string, "mismatched kse/thread");
802				}
803			} else {
804				unassigned++;
805			}
806			if (td2 == kg->kg_last_assigned) {
807				saw_lastassigned = 1;
808				if (td2->td_kse == NULL) {
809					panc(string, "last assigned not assigned");
810				}
811			}
812		}
813		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
814			panc(string, "where on earth does lastassigned point?");
815		}
816#if 0
817		FOREACH_THREAD_IN_GROUP(kg, td2) {
818			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
819			    (TD_ON_RUNQ(td2))) {
820				assigned++;
821				if (td2->td_kse == NULL) {
822					panc(string, "BOUND thread with no KSE");
823				}
824			}
825		}
826#endif
827#if 0
828		if ((unassigned + assigned) != kg->kg_runnable) {
829			panc(string, "wrong number in runnable");
830		}
831#endif
832	}
833	if (assigned == 12345) {
834		printf("%p %p %p %p %p %d, %d",
835		    td, td2, ke, kg, p, assigned, saw_lastassigned);
836	}
837}
838#endif
839
840