kern_switch.c revision 99942
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/kern_switch.c 99942 2002-07-14 03:43:33Z julian $
27 */
28
29/***
30
31Here is the logic..
32
33If there are N processors, then there are at most N KSEs (kernel
34schedulable entities) working to process threads that belong to a
35KSEGOUP (kg). If there are X of these KSEs actually running at the
36moment in question, then there are at most M (N-X) of these KSEs on
37the run queue, as running KSEs are not on the queue.
38
39Runnable threads are queued off the KSEGROUP in priority order.
40If there are M or more threads runnable, the top M threads
41(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42their priority from those threads and are put on the run queue.
43
44The last thread that had a priority high enough to have a KSE associated
45with it, AND IS ON THE RUN QUEUE is pointed to by
46kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47assigned as all the available KSEs are activly running, or because there
48are no threads queued, that pointer is NULL.
49
50When a KSE is removed from the run queue to become runnable, we know
51it was associated with the highest priority thread in the queue (at the head
52of the queue). If it is also the last assigned we know M was 1 and must
53now be 0. Since the thread is no longer queued that pointer must be
54removed from it. Since we know there were no more KSEs available,
55(M was 1 and is now 0) and since we are not FREEING our KSE
56but using it, we know there are STILL no more KSEs available, we can prove
57that the next thread in the ksegrp list will not have a KSE to assign to
58it, so we can show that the pointer must be made 'invalid' (NULL).
59
60The pointer exists so that when a new thread is made runnable, it can
61have its priority compared with the last assigned thread to see if
62it should 'steal' its KSE or not.. i.e. is it 'earlier'
63on the list than that thread or later.. If it's earlier, then the KSE is
64removed from the last assigned (which is now not assigned a KSE)
65and reassigned to the new thread, which is placed earlier in the list.
66The pointer is then backed up to the previous thread (which may or may not
67be the new thread).
68
69When a thread sleeps or is removed, the KSE becomes available and if there
70are queued threads that are not assigned KSEs, the highest priority one of
71them is assigned the KSE, which is then placed back on the run queue at
72the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73to point to it.
74
75The following diagram shows 2 KSEs and 3 threads from a single process.
76
77 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78              \    \____
79               \        \
80    KSEGROUP---thread--thread--thread    (queued in priority order)
81        \                 /
82         \_______________/
83          (last_assigned)
84
85The result of this scheme is that the M available KSEs are always
86queued at the priorities they have inherrited from the M highest priority
87threads for that KSEGROUP. If this situation changes, the KSEs are
88reassigned to keep this true.
89
90*/
91
92#include <sys/param.h>
93#include <sys/systm.h>
94#include <sys/kernel.h>
95#include <sys/ktr.h>
96#include <sys/lock.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/queue.h>
100#include <machine/critical.h>
101
102CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
103
104/*
105 * Global run queue.
106 */
107static struct runq runq;
108SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
109
110static void runq_readjust(struct runq *rq, struct kse *ke);
111/************************************************************************
112 * Functions that manipulate runnability from a thread perspective.	*
113 ************************************************************************/
114
115/*
116 * Select the KSE that will be run next.  From that find the thread, and x
117 * remove it from the KSEGRP's run queue.  If there is thread clustering,
118 * this will be what does it.
119 */
120struct thread *
121choosethread(void)
122{
123	struct kse *ke;
124	struct thread *td;
125	struct ksegrp *kg;
126
127	if ((ke = runq_choose(&runq))) {
128		td = ke->ke_thread;
129		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
130		kg = ke->ke_ksegrp;
131		if (td->td_flags & TDF_UNBOUND) {
132			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
133			if (kg->kg_last_assigned == td)
134				if (TAILQ_PREV(td, threadqueue, td_runq)
135				    != NULL)
136					printf("Yo MAMA!\n");
137				kg->kg_last_assigned = TAILQ_PREV(td,
138				    threadqueue, td_runq);
139			/*
140			 *  If we have started running an upcall,
141			 * Then TDF_UNBOUND WAS set because the thread was
142			 * created without a KSE. Now that we have one,
143			 * and it is our time to run, we make sure
144			 * that BOUND semantics apply for the rest of
145			 * the journey to userland, and into the UTS.
146			 */
147#ifdef	NOTYET
148			if (td->td_flags & TDF_UPCALLING)
149				tdf->td_flags &= ~TDF_UNBOUND;
150#endif
151		}
152		kg->kg_runnable--;
153		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
154		    td, td->td_priority);
155	} else {
156		/* Simulate runq_choose() having returned the idle thread */
157		td = PCPU_GET(idlethread);
158		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
159	}
160	td->td_state = TDS_RUNNING;
161	return (td);
162}
163
164/*
165 * Given a KSE (now surplus), either assign a new runable thread to it
166 * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
167 * Assumes the kse is not linked to any threads any more. (has been cleaned).
168 */
169void
170kse_reassign(struct kse *ke)
171{
172	struct ksegrp *kg;
173	struct thread *td;
174
175	kg = ke->ke_ksegrp;
176
177	/*
178	 * Find the first unassigned thread
179	 * If there is a 'last assigned' then see what's next.
180	 * otherwise look at what is first.
181	 */
182	if ((td = kg->kg_last_assigned)) {
183		td = TAILQ_NEXT(td, td_runq);
184	} else {
185		td = TAILQ_FIRST(&kg->kg_runq);
186	}
187
188	/*
189	 * If we found one assign it the kse, otherwise idle the kse.
190	 */
191	if (td) {
192		kg->kg_last_assigned = td;
193		td->td_kse = ke;
194		ke->ke_thread = td;
195		runq_add(&runq, ke);
196		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
197	} else {
198		ke->ke_state = KES_IDLE;
199		ke->ke_thread = NULL;
200		TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
201		kg->kg_idle_kses++;
202		CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
203	}
204}
205
206int
207kserunnable(void)
208{
209	return runq_check(&runq);
210}
211
212/*
213 * Remove a thread from its KSEGRP's run queue.
214 * This in turn may remove it from a KSE if it was already assigned
215 * to one, possibly causing a new thread to be assigned to the KSE
216 * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
217 */
218void
219remrunqueue(struct thread *td)
220{
221	struct thread *td2, *td3;
222	struct ksegrp *kg;
223	struct kse *ke;
224
225	mtx_assert(&sched_lock, MA_OWNED);
226	KASSERT ((td->td_state == TDS_RUNQ),
227		("remrunqueue: Bad state on run queue"));
228	kg = td->td_ksegrp;
229	ke = td->td_kse;
230	/*
231	 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
232	 * threads are BOUND.
233	 */
234	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
235	td->td_state = TDS_UNQUEUED;
236	kg->kg_runnable--;
237	if ((td->td_flags & TDF_UNBOUND) == 0)  {
238		/* Bring its kse with it, leave the thread attached */
239		runq_remove(&runq, ke);
240		ke->ke_state = KES_THREAD;
241		return;
242	}
243	if (ke) {
244		/*
245		 * This thread has been assigned to a KSE.
246		 * We need to dissociate it and try assign the
247		 * KSE to the next available thread. Then, we should
248		 * see if we need to move the KSE in the run queues.
249		 */
250		td2 = kg->kg_last_assigned;
251		KASSERT((td2 != NULL), ("last assigned has wrong value "));
252		td->td_kse = NULL;
253		if ((td3 = TAILQ_NEXT(td2, td_runq))) {
254			KASSERT(td3 != td, ("td3 somehow matched td"));
255			/*
256			 * Give the next unassigned thread to the KSE
257			 * so the number of runnable KSEs remains
258			 * constant.
259			 */
260			td3->td_kse = ke;
261			ke->ke_thread = td3;
262			kg->kg_last_assigned = td3;
263			runq_readjust(&runq, ke);
264		} else {
265			/*
266			 * There is no unassigned thread.
267			 * If we were the last assigned one,
268			 * adjust the last assigned pointer back
269			 * one, which may result in NULL.
270			 */
271			if (td == td2) {
272				kg->kg_last_assigned =
273				    TAILQ_PREV(td, threadqueue, td_runq);
274			}
275			runq_remove(&runq, ke);
276			KASSERT((ke->ke_state != KES_IDLE),
277			    ("kse already idle"));
278			ke->ke_state = KES_IDLE;
279			ke->ke_thread = NULL;
280			TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
281			kg->kg_idle_kses++;
282		}
283	}
284	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
285}
286
287void
288setrunqueue(struct thread *td)
289{
290	struct kse *ke;
291	struct ksegrp *kg;
292	struct thread *td2;
293	struct thread *tda;
294
295	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
296	mtx_assert(&sched_lock, MA_OWNED);
297	KASSERT((td->td_state != TDS_RUNQ), ("setrunqueue: bad thread state"));
298	td->td_state = TDS_RUNQ;
299	kg = td->td_ksegrp;
300	kg->kg_runnable++;
301	if ((td->td_flags & TDF_UNBOUND) == 0) {
302		KASSERT((td->td_kse != NULL),
303		    ("queueing BAD thread to run queue"));
304		/*
305		 * Common path optimisation: Only one of everything
306		 * and the KSE is always already attached.
307		 * Totally ignore the ksegrp run queue.
308		 */
309		runq_add(&runq, td->td_kse);
310		return;
311	}
312	/*
313	 * Ok, so we are threading with this thread.
314	 * We don't have a KSE, see if we can get one..
315	 */
316	tda = kg->kg_last_assigned;
317	if ((ke = td->td_kse) == NULL) {
318		/*
319		 * We will need a KSE, see if there is one..
320		 * First look for a free one, before getting desperate.
321		 * If we can't get one, our priority is not high enough..
322		 * that's ok..
323		 */
324		if (kg->kg_idle_kses) {
325			/*
326			 * There is a free one so it's ours for the asking..
327			 */
328			ke = TAILQ_FIRST(&kg->kg_iq);
329			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
330			ke->ke_state = KES_THREAD;
331			kg->kg_idle_kses--;
332		} else if (tda && (tda->td_priority > td->td_priority)) {
333			/*
334			 * None free, but there is one we can commandeer.
335			 */
336			ke = tda->td_kse;
337			tda->td_kse = NULL;
338			ke->ke_thread = NULL;
339			tda = kg->kg_last_assigned =
340		    	    TAILQ_PREV(tda, threadqueue, td_runq);
341			runq_remove(&runq, ke);
342		}
343	} else {
344		/*
345		 * Temporarily disassociate so it looks like the other cases.
346		 */
347		ke->ke_thread = NULL;
348		td->td_kse = NULL;
349	}
350
351	/*
352	 * Add the thread to the ksegrp's run queue at
353	 * the appropriate place.
354	 */
355	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
356		if (td2->td_priority > td->td_priority) {
357			TAILQ_INSERT_BEFORE(td2, td, td_runq);
358			break;
359		}
360	}
361	if (td2 == NULL) {
362		/* We ran off the end of the TAILQ or it was empty. */
363		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
364	}
365
366	/*
367	 * If we have a ke to use, then put it on the run queue and
368	 * If needed, readjust the last_assigned pointer.
369	 */
370	if (ke) {
371		if (tda == NULL) {
372			/*
373			 * No pre-existing last assigned so whoever is first
374			 * gets the KSE we brought in.. (maybe us)
375			 */
376			td2 = TAILQ_FIRST(&kg->kg_runq);
377			KASSERT((td2->td_kse == NULL),
378			    ("unexpected ke present"));
379			td2->td_kse = ke;
380			ke->ke_thread = td2;
381			kg->kg_last_assigned = td2;
382		} else if (tda->td_priority > td->td_priority) {
383			/*
384			 * It's ours, grab it, but last_assigned is past us
385			 * so don't change it.
386			 */
387			td->td_kse = ke;
388			ke->ke_thread = td;
389		} else {
390			/*
391			 * We are past last_assigned, so
392			 * put the new kse on whatever is next,
393			 * which may or may not be us.
394			 */
395			td2 = TAILQ_NEXT(tda, td_runq);
396			kg->kg_last_assigned = td2;
397			td2->td_kse = ke;
398			ke->ke_thread = td2;
399		}
400		runq_add(&runq, ke);
401	}
402}
403
404/************************************************************************
405 * Critical section marker functions					*
406 ************************************************************************/
407/* Critical sections that prevent preemption. */
408void
409critical_enter(void)
410{
411	struct thread *td;
412
413	td = curthread;
414	if (td->td_critnest == 0)
415		cpu_critical_enter();
416	td->td_critnest++;
417}
418
419void
420critical_exit(void)
421{
422	struct thread *td;
423
424	td = curthread;
425	if (td->td_critnest == 1) {
426		td->td_critnest = 0;
427		cpu_critical_exit();
428	} else {
429		td->td_critnest--;
430	}
431}
432
433
434/************************************************************************
435 * SYSTEM RUN QUEUE manipulations and tests				*
436 ************************************************************************/
437/*
438 * Initialize a run structure.
439 */
440void
441runq_init(struct runq *rq)
442{
443	int i;
444
445	bzero(rq, sizeof *rq);
446	for (i = 0; i < RQ_NQS; i++)
447		TAILQ_INIT(&rq->rq_queues[i]);
448}
449
450/*
451 * Clear the status bit of the queue corresponding to priority level pri,
452 * indicating that it is empty.
453 */
454static __inline void
455runq_clrbit(struct runq *rq, int pri)
456{
457	struct rqbits *rqb;
458
459	rqb = &rq->rq_status;
460	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
461	    rqb->rqb_bits[RQB_WORD(pri)],
462	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
463	    RQB_BIT(pri), RQB_WORD(pri));
464	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
465}
466
467/*
468 * Find the index of the first non-empty run queue.  This is done by
469 * scanning the status bits, a set bit indicates a non-empty queue.
470 */
471static __inline int
472runq_findbit(struct runq *rq)
473{
474	struct rqbits *rqb;
475	int pri;
476	int i;
477
478	rqb = &rq->rq_status;
479	for (i = 0; i < RQB_LEN; i++)
480		if (rqb->rqb_bits[i]) {
481			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
482			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
483			    rqb->rqb_bits[i], i, pri);
484			return (pri);
485		}
486
487	return (-1);
488}
489
490/*
491 * Set the status bit of the queue corresponding to priority level pri,
492 * indicating that it is non-empty.
493 */
494static __inline void
495runq_setbit(struct runq *rq, int pri)
496{
497	struct rqbits *rqb;
498
499	rqb = &rq->rq_status;
500	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
501	    rqb->rqb_bits[RQB_WORD(pri)],
502	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
503	    RQB_BIT(pri), RQB_WORD(pri));
504	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
505}
506
507/*
508 * Add the KSE to the queue specified by its priority, and set the
509 * corresponding status bit.
510 */
511void
512runq_add(struct runq *rq, struct kse *ke)
513{
514	struct rqhead *rqh;
515	int pri;
516
517	mtx_assert(&sched_lock, MA_OWNED);
518	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
519	KASSERT((ke->ke_thread->td_kse != NULL),
520	    ("runq_add: No KSE on thread"));
521	KASSERT(ke->ke_state != KES_ONRUNQ,
522	    ("runq_add: kse %p (%s) already in run queue", ke,
523	    ke->ke_proc->p_comm));
524	pri = ke->ke_thread->td_priority / RQ_PPQ;
525	ke->ke_rqindex = pri;
526	runq_setbit(rq, pri);
527	rqh = &rq->rq_queues[pri];
528	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
529	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
530	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
531	ke->ke_ksegrp->kg_runq_kses++;
532	ke->ke_state = KES_ONRUNQ;
533}
534
535/*
536 * Return true if there are runnable processes of any priority on the run
537 * queue, false otherwise.  Has no side effects, does not modify the run
538 * queue structure.
539 */
540int
541runq_check(struct runq *rq)
542{
543	struct rqbits *rqb;
544	int i;
545
546	rqb = &rq->rq_status;
547	for (i = 0; i < RQB_LEN; i++)
548		if (rqb->rqb_bits[i]) {
549			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
550			    rqb->rqb_bits[i], i);
551			return (1);
552		}
553	CTR0(KTR_RUNQ, "runq_check: empty");
554
555	return (0);
556}
557
558/*
559 * Find and remove the highest priority process from the run queue.
560 * If there are no runnable processes, the per-cpu idle process is
561 * returned.  Will not return NULL under any circumstances.
562 */
563struct kse *
564runq_choose(struct runq *rq)
565{
566	struct rqhead *rqh;
567	struct kse *ke;
568	int pri;
569
570	mtx_assert(&sched_lock, MA_OWNED);
571	while ((pri = runq_findbit(rq)) != -1) {
572		rqh = &rq->rq_queues[pri];
573		ke = TAILQ_FIRST(rqh);
574		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
575		CTR3(KTR_RUNQ,
576		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
577		TAILQ_REMOVE(rqh, ke, ke_procq);
578		ke->ke_ksegrp->kg_runq_kses--;
579		if (TAILQ_EMPTY(rqh)) {
580			CTR0(KTR_RUNQ, "runq_choose: empty");
581			runq_clrbit(rq, pri);
582		}
583
584		ke->ke_state = KES_THREAD;
585		KASSERT((ke->ke_thread != NULL),
586		    ("runq_choose: No thread on KSE"));
587		KASSERT((ke->ke_thread->td_kse != NULL),
588		    ("runq_choose: No KSE on thread"));
589		return (ke);
590	}
591	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
592
593	return (NULL);
594}
595
596/*
597 * Remove the KSE from the queue specified by its priority, and clear the
598 * corresponding status bit if the queue becomes empty.
599 * Caller must set ke->ke_state afterwards.
600 */
601void
602runq_remove(struct runq *rq, struct kse *ke)
603{
604	struct rqhead *rqh;
605	int pri;
606
607	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
608	mtx_assert(&sched_lock, MA_OWNED);
609	pri = ke->ke_rqindex;
610	rqh = &rq->rq_queues[pri];
611	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
612	    ke, ke->ke_thread->td_priority, pri, rqh);
613	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
614	TAILQ_REMOVE(rqh, ke, ke_procq);
615	if (TAILQ_EMPTY(rqh)) {
616		CTR0(KTR_RUNQ, "runq_remove: empty");
617		runq_clrbit(rq, pri);
618	}
619	ke->ke_state = KES_THREAD;
620	ke->ke_ksegrp->kg_runq_kses--;
621}
622
623static void
624runq_readjust(struct runq *rq, struct kse *ke)
625{
626
627	if (ke->ke_rqindex != (ke->ke_thread->td_priority / RQ_PPQ)) {
628		runq_remove(rq, ke);
629		runq_add(rq, ke);
630	}
631}
632
633#if 0
634void
635thread_sanity_check(struct thread *td)
636{
637	struct proc *p;
638	struct ksegrp *kg;
639	struct kse *ke;
640	struct thread *td2;
641	unsigned int prevpri;
642	int	saw_lastassigned;
643	int unassigned;
644	int assigned;
645
646	p = td->td_proc;
647	kg = td->td_ksegrp;
648	ke = td->td_kse;
649
650	if (kg != &p->p_ksegrp) {
651		panic ("wrong ksegrp");
652	}
653
654	if (ke) {
655		if (ke != &p->p_kse) {
656			panic("wrong kse");
657		}
658		if (ke->ke_thread != td) {
659			panic("wrong thread");
660		}
661	}
662
663	if ((p->p_flag & P_KSES) == 0) {
664		if (ke == NULL) {
665			panic("non KSE thread lost kse");
666		}
667	} else {
668		prevpri = 0;
669		saw_lastassigned = 0;
670		unassigned = 0;
671		assigned = 0;
672		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
673			if (td2->td_priority < prevpri) {
674				panic("thread runqueue unosorted");
675			}
676			prevpri = td2->td_priority;
677			if (td2->td_kse) {
678				assigned++;
679				if (unassigned) {
680					panic("unassigned before assigned");
681				}
682 				if  (kg->kg_last_assigned == NULL) {
683					panic("lastassigned corrupt");
684				}
685				if (saw_lastassigned) {
686					panic("last assigned not last");
687				}
688				if (td2->td_kse->ke_thread != td2) {
689					panic("mismatched kse/thread");
690				}
691			} else {
692				unassigned++;
693			}
694			if (td2 == kg->kg_last_assigned) {
695				saw_lastassigned = 1;
696				if (td2->td_kse == NULL) {
697					panic("last assigned not assigned");
698				}
699			}
700		}
701		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
702			panic("where on earth does lastassigned point?");
703		}
704		FOREACH_THREAD_IN_GROUP(kg, td2) {
705			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
706			    (td2->td_state == TDS_RUNQ)) {
707				assigned++;
708				if (td2->td_kse == NULL) {
709					panic ("BOUND thread with no KSE");
710				}
711			}
712		}
713#if 0
714		if ((unassigned + assigned) != kg->kg_runnable) {
715			panic("wrong number in runnable");
716		}
717#endif
718	}
719}
720#endif
721
722