kern_switch.c revision 104964
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/kern_switch.c 104964 2002-10-12 05:32:24Z jeff $
27 */
28
29/***
30
31Here is the logic..
32
33If there are N processors, then there are at most N KSEs (kernel
34schedulable entities) working to process threads that belong to a
35KSEGOUP (kg). If there are X of these KSEs actually running at the
36moment in question, then there are at most M (N-X) of these KSEs on
37the run queue, as running KSEs are not on the queue.
38
39Runnable threads are queued off the KSEGROUP in priority order.
40If there are M or more threads runnable, the top M threads
41(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42their priority from those threads and are put on the run queue.
43
44The last thread that had a priority high enough to have a KSE associated
45with it, AND IS ON THE RUN QUEUE is pointed to by
46kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47assigned as all the available KSEs are activly running, or because there
48are no threads queued, that pointer is NULL.
49
50When a KSE is removed from the run queue to become runnable, we know
51it was associated with the highest priority thread in the queue (at the head
52of the queue). If it is also the last assigned we know M was 1 and must
53now be 0. Since the thread is no longer queued that pointer must be
54removed from it. Since we know there were no more KSEs available,
55(M was 1 and is now 0) and since we are not FREEING our KSE
56but using it, we know there are STILL no more KSEs available, we can prove
57that the next thread in the ksegrp list will not have a KSE to assign to
58it, so we can show that the pointer must be made 'invalid' (NULL).
59
60The pointer exists so that when a new thread is made runnable, it can
61have its priority compared with the last assigned thread to see if
62it should 'steal' its KSE or not.. i.e. is it 'earlier'
63on the list than that thread or later.. If it's earlier, then the KSE is
64removed from the last assigned (which is now not assigned a KSE)
65and reassigned to the new thread, which is placed earlier in the list.
66The pointer is then backed up to the previous thread (which may or may not
67be the new thread).
68
69When a thread sleeps or is removed, the KSE becomes available and if there
70are queued threads that are not assigned KSEs, the highest priority one of
71them is assigned the KSE, which is then placed back on the run queue at
72the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73to point to it.
74
75The following diagram shows 2 KSEs and 3 threads from a single process.
76
77 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78              \    \____
79               \        \
80    KSEGROUP---thread--thread--thread    (queued in priority order)
81        \                 /
82         \_______________/
83          (last_assigned)
84
85The result of this scheme is that the M available KSEs are always
86queued at the priorities they have inherrited from the M highest priority
87threads for that KSEGROUP. If this situation changes, the KSEs are
88reassigned to keep this true.
89
90*/
91
92#include <sys/param.h>
93#include <sys/systm.h>
94#include <sys/kernel.h>
95#include <sys/ktr.h>
96#include <sys/lock.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/queue.h>
100#include <sys/sched.h>
101#include <machine/critical.h>
102
103CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
104
105void panc(char *string1, char *string2);
106
107#if 0
108static void runq_readjust(struct runq *rq, struct kse *ke);
109#endif
110/************************************************************************
111 * Functions that manipulate runnability from a thread perspective.	*
112 ************************************************************************/
113
114/*
115 * Select the KSE that will be run next.  From that find the thread, and x
116 * remove it from the KSEGRP's run queue.  If there is thread clustering,
117 * this will be what does it.
118 */
119struct thread *
120choosethread(void)
121{
122	struct kse *ke;
123	struct thread *td;
124	struct ksegrp *kg;
125
126retry:
127	if ((ke = sched_choose())) {
128		td = ke->ke_thread;
129		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
130		kg = ke->ke_ksegrp;
131		if (td->td_flags & TDF_UNBOUND) {
132			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
133			if (kg->kg_last_assigned == td) {
134				if (TAILQ_PREV(td, threadqueue, td_runq)
135				    != NULL)
136					printf("Yo MAMA!\n");
137				kg->kg_last_assigned = TAILQ_PREV(td,
138				    threadqueue, td_runq);
139			}
140			/*
141			 *  If we have started running an upcall,
142			 * Then TDF_UNBOUND WAS set because the thread was
143			 * created without a KSE. Now that we have one,
144			 * and it is our time to run, we make sure
145			 * that BOUND semantics apply for the rest of
146			 * the journey to userland, and into the UTS.
147			 */
148#ifdef	NOTYET
149			if (td->td_flags & TDF_UPCALLING)
150				tdf->td_flags &= ~TDF_UNBOUND;
151#endif
152		}
153		kg->kg_runnable--;
154		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
155		    td, td->td_priority);
156	} else {
157		/* Simulate runq_choose() having returned the idle thread */
158		td = PCPU_GET(idlethread);
159		ke = td->td_kse;
160		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
161	}
162	ke->ke_flags |= KEF_DIDRUN;
163	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
164	    (td->td_flags & TDF_INPANIC) == 0))
165		goto retry;
166	TD_SET_RUNNING(td);
167	return (td);
168}
169
170/*
171 * Given a KSE (now surplus or at least loanable), either assign a new
172 * runable thread to it
173 * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
174 * Or aybe give it back to its owner if it's been loaned.
175 */
176void
177kse_reassign(struct kse *ke)
178{
179	struct ksegrp *kg;
180	struct thread *td;
181	struct thread *owner;
182	struct thread *original;
183
184	mtx_assert(&sched_lock, MA_OWNED);
185	kg = ke->ke_ksegrp;
186	owner = ke->ke_bound;
187	original = ke->ke_thread;
188	KASSERT(!(owner && ((owner->td_kse != ke) ||
189		    (owner->td_flags & TDF_UNBOUND))),
190		("kse_reassign: bad thread bound state"));
191
192	/*
193	 * Find the first unassigned thread
194	 * If there is a 'last assigned' then see what's next.
195	 * otherwise look at what is first.
196	 */
197	if ((td = kg->kg_last_assigned)) {
198		td = TAILQ_NEXT(td, td_runq);
199	} else {
200		td = TAILQ_FIRST(&kg->kg_runq);
201	}
202
203	/*
204	 * If we found one assign it the kse, otherwise idle the kse.
205	 */
206	if (td) {
207		/*
208		 * If the original is bound to us we can only be lent out so
209		 * make a loan, otherwise we just drop the
210		 * original thread.
211		 */
212		if (original) {
213			if (((original->td_flags & TDF_UNBOUND) == 0)) {
214				/*
215				 * Put the owner on the side
216				 */
217				ke->ke_bound = original;
218				TD_SET_LOAN(original);
219			} else {
220				original->td_kse = NULL;
221			}
222		}
223		kg->kg_last_assigned = td;
224		td->td_kse = ke;
225		ke->ke_thread = td;
226		sched_add(ke);
227		/*
228		 * if we have already borrowed this,
229		 * just pass it to the new thread,
230		 * otherwise, enact the loan.
231		 */
232		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
233		return;
234	}
235	if (owner) { /* already loaned out */
236		/* effectivly unloan it */
237		TD_CLR_LOAN(owner);
238		ke->ke_thread = owner;
239		ke->ke_bound = NULL;
240		if (original)
241			original->td_kse = NULL;
242		original = owner;
243
244		if (TD_CAN_RUN(owner)) {
245			/*
246			 * If the owner thread is now runnable,  run it..
247			 * Let it have its KSE back.
248			 */
249			setrunqueue(owner);
250			CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
251			    ke, owner);
252			return;
253		}
254	}
255	/*
256	 * Presetly NOT loaned out.
257	 * If we are bound, we go on the loanable queue
258	 * otherwise onto the free queue.
259	 */
260	if (original) {
261		if (((original->td_flags & TDF_UNBOUND) == 0)) {
262			ke->ke_state = KES_THREAD;
263			ke->ke_flags |= KEF_ONLOANQ;
264			ke->ke_bound = NULL;
265			TAILQ_INSERT_HEAD(&kg->kg_lq, ke, ke_kgrlist);
266			kg->kg_loan_kses++;
267			CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
268			return;
269		} else {
270			original->td_kse = NULL;
271		}
272	}
273	ke->ke_state = KES_IDLE;
274	ke->ke_thread = NULL;
275	TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
276	kg->kg_idle_kses++;
277	CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke);
278}
279
280/*
281 * Remove a thread from its KSEGRP's run queue.
282 * This in turn may remove it from a KSE if it was already assigned
283 * to one, possibly causing a new thread to be assigned to the KSE
284 * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
285 */
286void
287remrunqueue(struct thread *td)
288{
289	struct thread *td2, *td3;
290	struct ksegrp *kg;
291	struct kse *ke;
292
293	mtx_assert(&sched_lock, MA_OWNED);
294	KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
295	kg = td->td_ksegrp;
296	ke = td->td_kse;
297	/*
298	 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
299	 * threads are BOUND.
300	 */
301	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
302	kg->kg_runnable--;
303	TD_SET_CAN_RUN(td);
304	if ((td->td_flags & TDF_UNBOUND) == 0)  {
305		/* Bring its kse with it, leave the thread attached */
306		sched_rem(ke);
307		ke->ke_state = KES_THREAD;
308		return;
309	}
310   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
311	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
312	if (ke) {
313		/*
314		 * This thread has been assigned to a KSE.
315		 * We need to dissociate it and try assign the
316		 * KSE to the next available thread. Then, we should
317		 * see if we need to move the KSE in the run queues.
318		 */
319		td2 = kg->kg_last_assigned;
320		KASSERT((td2 != NULL), ("last assigned has wrong value "));
321		if (td2 == td)
322			kg->kg_last_assigned = td3;
323		td->td_kse = NULL;
324		ke->ke_thread = NULL;
325		kse_reassign(ke);
326	}
327}
328
329void
330setrunqueue(struct thread *td)
331{
332	struct kse *ke;
333	struct ksegrp *kg;
334	struct thread *td2;
335	struct thread *tda;
336
337	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
338	mtx_assert(&sched_lock, MA_OWNED);
339	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
340	    ("setrunqueue: bad thread state"));
341	TD_SET_RUNQ(td);
342	kg = td->td_ksegrp;
343	kg->kg_runnable++;
344	if ((td->td_proc->p_flag & P_KSES) == 0) {
345		/*
346		 * Common path optimisation: Only one of everything
347		 * and the KSE is always already attached.
348		 * Totally ignore the ksegrp run queue.
349		 */
350		sched_add(td->td_kse);
351		return;
352	}
353	if ((td->td_flags & TDF_UNBOUND) == 0) {
354		KASSERT((td->td_kse != NULL),
355		    ("queueing BAD thread to run queue"));
356		ke = td->td_kse;
357		ke->ke_bound = NULL;
358		if (ke->ke_flags & KEF_ONLOANQ) {
359			ke->ke_flags &= ~KEF_ONLOANQ;
360			TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
361			kg->kg_loan_kses--;
362		}
363		sched_add(td->td_kse);
364		return;
365	}
366
367	/*
368	 * Ok, so we are threading with this thread.
369	 * We don't have a KSE, see if we can get one..
370	 */
371	tda = kg->kg_last_assigned;
372	if ((ke = td->td_kse) == NULL) {
373		/*
374		 * We will need a KSE, see if there is one..
375		 * First look for a free one, before getting desperate.
376		 * If we can't get one, our priority is not high enough..
377		 * that's ok..
378		 */
379		if (kg->kg_idle_kses) {
380			/*
381			 * There is a free one so it's ours for the asking..
382			 */
383			ke = TAILQ_FIRST(&kg->kg_iq);
384			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
385			ke->ke_state = KES_THREAD;
386			kg->kg_idle_kses--;
387		} else if (kg->kg_loan_kses) {
388			/*
389			 * Failing that see if we can borrow one.
390			 */
391			ke = TAILQ_FIRST(&kg->kg_lq);
392			TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
393			ke->ke_flags &= ~KEF_ONLOANQ;
394			ke->ke_state = KES_THREAD;
395			TD_SET_LOAN(ke->ke_thread);
396			ke->ke_bound = ke->ke_thread;
397			ke->ke_thread  = NULL;
398			kg->kg_loan_kses--;
399		} else if (tda && (tda->td_priority > td->td_priority)) {
400			/*
401			 * None free, but there is one we can commandeer.
402			 */
403			ke = tda->td_kse;
404			tda->td_kse = NULL;
405			ke->ke_thread = NULL;
406			tda = kg->kg_last_assigned =
407		    	    TAILQ_PREV(tda, threadqueue, td_runq);
408			sched_rem(ke);
409		}
410	} else {
411		/*
412		 * Temporarily disassociate so it looks like the other cases.
413		 */
414		ke->ke_thread = NULL;
415		td->td_kse = NULL;
416	}
417
418	/*
419	 * Add the thread to the ksegrp's run queue at
420	 * the appropriate place.
421	 */
422	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
423		if (td2->td_priority > td->td_priority) {
424			TAILQ_INSERT_BEFORE(td2, td, td_runq);
425			break;
426		}
427	}
428	if (td2 == NULL) {
429		/* We ran off the end of the TAILQ or it was empty. */
430		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
431	}
432
433	/*
434	 * If we have a ke to use, then put it on the run queue and
435	 * If needed, readjust the last_assigned pointer.
436	 */
437	if (ke) {
438		if (tda == NULL) {
439			/*
440			 * No pre-existing last assigned so whoever is first
441			 * gets the KSE we brought in.. (maybe us)
442			 */
443			td2 = TAILQ_FIRST(&kg->kg_runq);
444			KASSERT((td2->td_kse == NULL),
445			    ("unexpected ke present"));
446			td2->td_kse = ke;
447			ke->ke_thread = td2;
448			kg->kg_last_assigned = td2;
449		} else if (tda->td_priority > td->td_priority) {
450			/*
451			 * It's ours, grab it, but last_assigned is past us
452			 * so don't change it.
453			 */
454			td->td_kse = ke;
455			ke->ke_thread = td;
456		} else {
457			/*
458			 * We are past last_assigned, so
459			 * put the new kse on whatever is next,
460			 * which may or may not be us.
461			 */
462			td2 = TAILQ_NEXT(tda, td_runq);
463			kg->kg_last_assigned = td2;
464			td2->td_kse = ke;
465			ke->ke_thread = td2;
466		}
467		sched_add(ke);
468	}
469}
470
471/************************************************************************
472 * Critical section marker functions					*
473 ************************************************************************/
474/* Critical sections that prevent preemption. */
475void
476critical_enter(void)
477{
478	struct thread *td;
479
480	td = curthread;
481	if (td->td_critnest == 0)
482		cpu_critical_enter();
483	td->td_critnest++;
484}
485
486void
487critical_exit(void)
488{
489	struct thread *td;
490
491	td = curthread;
492	if (td->td_critnest == 1) {
493		td->td_critnest = 0;
494		cpu_critical_exit();
495	} else {
496		td->td_critnest--;
497	}
498}
499
500
501/************************************************************************
502 * SYSTEM RUN QUEUE manipulations and tests				*
503 ************************************************************************/
504/*
505 * Initialize a run structure.
506 */
507void
508runq_init(struct runq *rq)
509{
510	int i;
511
512	bzero(rq, sizeof *rq);
513	for (i = 0; i < RQ_NQS; i++)
514		TAILQ_INIT(&rq->rq_queues[i]);
515}
516
517/*
518 * Clear the status bit of the queue corresponding to priority level pri,
519 * indicating that it is empty.
520 */
521static __inline void
522runq_clrbit(struct runq *rq, int pri)
523{
524	struct rqbits *rqb;
525
526	rqb = &rq->rq_status;
527	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
528	    rqb->rqb_bits[RQB_WORD(pri)],
529	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
530	    RQB_BIT(pri), RQB_WORD(pri));
531	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
532}
533
534/*
535 * Find the index of the first non-empty run queue.  This is done by
536 * scanning the status bits, a set bit indicates a non-empty queue.
537 */
538static __inline int
539runq_findbit(struct runq *rq)
540{
541	struct rqbits *rqb;
542	int pri;
543	int i;
544
545	rqb = &rq->rq_status;
546	for (i = 0; i < RQB_LEN; i++)
547		if (rqb->rqb_bits[i]) {
548			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
549			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
550			    rqb->rqb_bits[i], i, pri);
551			return (pri);
552		}
553
554	return (-1);
555}
556
557/*
558 * Set the status bit of the queue corresponding to priority level pri,
559 * indicating that it is non-empty.
560 */
561static __inline void
562runq_setbit(struct runq *rq, int pri)
563{
564	struct rqbits *rqb;
565
566	rqb = &rq->rq_status;
567	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
568	    rqb->rqb_bits[RQB_WORD(pri)],
569	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
570	    RQB_BIT(pri), RQB_WORD(pri));
571	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
572}
573
574/*
575 * Add the KSE to the queue specified by its priority, and set the
576 * corresponding status bit.
577 */
578void
579runq_add(struct runq *rq, struct kse *ke)
580{
581	struct rqhead *rqh;
582	int pri;
583
584	pri = ke->ke_thread->td_priority / RQ_PPQ;
585	ke->ke_rqindex = pri;
586	runq_setbit(rq, pri);
587	rqh = &rq->rq_queues[pri];
588	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
589	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
590	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
591}
592
593/*
594 * Return true if there are runnable processes of any priority on the run
595 * queue, false otherwise.  Has no side effects, does not modify the run
596 * queue structure.
597 */
598int
599runq_check(struct runq *rq)
600{
601	struct rqbits *rqb;
602	int i;
603
604	rqb = &rq->rq_status;
605	for (i = 0; i < RQB_LEN; i++)
606		if (rqb->rqb_bits[i]) {
607			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
608			    rqb->rqb_bits[i], i);
609			return (1);
610		}
611	CTR0(KTR_RUNQ, "runq_check: empty");
612
613	return (0);
614}
615
616/*
617 * Find the highest priority process on the run queue.
618 */
619struct kse *
620runq_choose(struct runq *rq)
621{
622	struct rqhead *rqh;
623	struct kse *ke;
624	int pri;
625
626	mtx_assert(&sched_lock, MA_OWNED);
627	while ((pri = runq_findbit(rq)) != -1) {
628		rqh = &rq->rq_queues[pri];
629		ke = TAILQ_FIRST(rqh);
630		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
631		CTR3(KTR_RUNQ,
632		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
633		return (ke);
634	}
635	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
636
637	return (NULL);
638}
639
640/*
641 * Remove the KSE from the queue specified by its priority, and clear the
642 * corresponding status bit if the queue becomes empty.
643 * Caller must set ke->ke_state afterwards.
644 */
645void
646runq_remove(struct runq *rq, struct kse *ke)
647{
648	struct rqhead *rqh;
649	int pri;
650
651	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
652		("runq_remove: process swapped out"));
653	pri = ke->ke_rqindex;
654	rqh = &rq->rq_queues[pri];
655	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
656	    ke, ke->ke_thread->td_priority, pri, rqh);
657	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
658	TAILQ_REMOVE(rqh, ke, ke_procq);
659	if (TAILQ_EMPTY(rqh)) {
660		CTR0(KTR_RUNQ, "runq_remove: empty");
661		runq_clrbit(rq, pri);
662	}
663}
664
665#if 0
666static void
667runq_readjust(struct runq *rq, struct kse *ke)
668{
669
670	if (ke->ke_rqindex != (ke->ke_thread->td_priority / RQ_PPQ)) {
671		runq_remove(rq, ke);
672		runq_add(rq, ke);
673	}
674}
675#endif
676
677#if 0
678void
679panc(char *string1, char *string2)
680{
681	printf("%s", string1);
682	Debugger(string2);
683}
684
685void
686thread_sanity_check(struct thread *td, char *string)
687{
688	struct proc *p;
689	struct ksegrp *kg;
690	struct kse *ke;
691	struct thread *td2 = NULL;
692	unsigned int prevpri;
693	int	saw_lastassigned = 0;
694	int unassigned = 0;
695	int assigned = 0;
696
697	p = td->td_proc;
698	kg = td->td_ksegrp;
699	ke = td->td_kse;
700
701
702	if (ke) {
703		if (p != ke->ke_proc) {
704			panc(string, "wrong proc");
705		}
706		if (ke->ke_thread != td) {
707			panc(string, "wrong thread");
708		}
709	}
710
711	if ((p->p_flag & P_KSES) == 0) {
712		if (ke == NULL) {
713			panc(string, "non KSE thread lost kse");
714		}
715	} else {
716		prevpri = 0;
717		saw_lastassigned = 0;
718		unassigned = 0;
719		assigned = 0;
720		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
721			if (td2->td_priority < prevpri) {
722				panc(string, "thread runqueue unosorted");
723			}
724			if ((td2->td_state == TDS_RUNQ) &&
725			    td2->td_kse &&
726			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
727				panc(string, "KSE wrong state");
728			}
729			prevpri = td2->td_priority;
730			if (td2->td_kse) {
731				assigned++;
732				if (unassigned) {
733					panc(string, "unassigned before assigned");
734				}
735 				if  (kg->kg_last_assigned == NULL) {
736					panc(string, "lastassigned corrupt");
737				}
738				if (saw_lastassigned) {
739					panc(string, "last assigned not last");
740				}
741				if (td2->td_kse->ke_thread != td2) {
742					panc(string, "mismatched kse/thread");
743				}
744			} else {
745				unassigned++;
746			}
747			if (td2 == kg->kg_last_assigned) {
748				saw_lastassigned = 1;
749				if (td2->td_kse == NULL) {
750					panc(string, "last assigned not assigned");
751				}
752			}
753		}
754		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
755			panc(string, "where on earth does lastassigned point?");
756		}
757		FOREACH_THREAD_IN_GROUP(kg, td2) {
758			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
759			    (TD_ON_RUNQ(td2))) {
760				assigned++;
761				if (td2->td_kse == NULL) {
762					panc(string, "BOUND thread with no KSE");
763				}
764			}
765		}
766#if 0
767		if ((unassigned + assigned) != kg->kg_runnable) {
768			panc(string, "wrong number in runnable");
769		}
770#endif
771	}
772	if (assigned == 12345) {
773		printf("%p %p %p %p %p %d, %d",
774		    td, td2, ke, kg, p, assigned, saw_lastassigned);
775	}
776}
777#endif
778
779