Deleted Added
full compact
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/kern_switch.c 111028 2003-02-17 05:14:26Z jeff $
26 * $FreeBSD: head/sys/kern/kern_switch.c 111032 2003-02-17 09:55:10Z julian $
27 */
28
29/***
30
31Here is the logic..
32
33If there are N processors, then there are at most N KSEs (kernel
34schedulable entities) working to process threads that belong to a
35KSEGOUP (kg). If there are X of these KSEs actually running at the
36moment in question, then there are at most M (N-X) of these KSEs on
37the run queue, as running KSEs are not on the queue.
38
39Runnable threads are queued off the KSEGROUP in priority order.
40If there are M or more threads runnable, the top M threads
41(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42their priority from those threads and are put on the run queue.
43
44The last thread that had a priority high enough to have a KSE associated
45with it, AND IS ON THE RUN QUEUE is pointed to by
46kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47assigned as all the available KSEs are activly running, or because there
48are no threads queued, that pointer is NULL.
49
50When a KSE is removed from the run queue to become runnable, we know
51it was associated with the highest priority thread in the queue (at the head
52of the queue). If it is also the last assigned we know M was 1 and must
53now be 0. Since the thread is no longer queued that pointer must be
54removed from it. Since we know there were no more KSEs available,
55(M was 1 and is now 0) and since we are not FREEING our KSE
56but using it, we know there are STILL no more KSEs available, we can prove
57that the next thread in the ksegrp list will not have a KSE to assign to
58it, so we can show that the pointer must be made 'invalid' (NULL).
59
60The pointer exists so that when a new thread is made runnable, it can
61have its priority compared with the last assigned thread to see if
62it should 'steal' its KSE or not.. i.e. is it 'earlier'
63on the list than that thread or later.. If it's earlier, then the KSE is
64removed from the last assigned (which is now not assigned a KSE)
65and reassigned to the new thread, which is placed earlier in the list.
66The pointer is then backed up to the previous thread (which may or may not
67be the new thread).
68
69When a thread sleeps or is removed, the KSE becomes available and if there
70are queued threads that are not assigned KSEs, the highest priority one of
71them is assigned the KSE, which is then placed back on the run queue at
72the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73to point to it.
74
75The following diagram shows 2 KSEs and 3 threads from a single process.
76
77 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
78 \ \____
79 \ \
80 KSEGROUP---thread--thread--thread (queued in priority order)
81 \ /
82 \_______________/
83 (last_assigned)
84
85The result of this scheme is that the M available KSEs are always
86queued at the priorities they have inherrited from the M highest priority
87threads for that KSEGROUP. If this situation changes, the KSEs are
88reassigned to keep this true.
89
90*/
91
92#include <sys/param.h>
93#include <sys/systm.h>
94#include <sys/kernel.h>
95#include <sys/ktr.h>
96#include <sys/lock.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/queue.h>
100#include <sys/sched.h>
101#include <machine/critical.h>
102
103CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
104
105void panc(char *string1, char *string2);
106
107#if 0
108static void runq_readjust(struct runq *rq, struct kse *ke);
109#endif
110/************************************************************************
111 * Functions that manipulate runnability from a thread perspective. *
112 ************************************************************************/
113/*
114 * Select the KSE that will be run next. From that find the thread, and
115 * remove it from the KSEGRP's run queue. If there is thread clustering,
116 * this will be what does it.
117 */
118struct thread *
119choosethread(void)
120{
121 struct kse *ke;
122 struct thread *td;
123 struct ksegrp *kg;
124
125retry:
126 if ((ke = sched_choose())) {
127 td = ke->ke_thread;
128 KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
129 kg = ke->ke_ksegrp;
130 if (td->td_proc->p_flag & P_KSES) {
131 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
132 if (kg->kg_last_assigned == td) {
133 kg->kg_last_assigned = TAILQ_PREV(td,
134 threadqueue, td_runq);
135 }
136 }
137 kg->kg_runnable--;
138 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
139 td, td->td_priority);
140 } else {
141 /* Simulate runq_choose() having returned the idle thread */
142 td = PCPU_GET(idlethread);
143 ke = td->td_kse;
144 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
145 }
146 ke->ke_flags |= KEF_DIDRUN;
147
148 /*
149 * Only allow non system threads to run in panic
150 * if they are the one we are tracing. (I think.. [JRE])
151 */
152 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
153 (td->td_flags & TDF_INPANIC) == 0))
154 goto retry;
155
156 TD_SET_RUNNING(td);
157 return (td);
158}
159
160/*
161 * Given a surplus KSE, either assign a new runable thread to it
162 * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
163 * Or maybe give it back to its owner if it's been loaned.
164 * Assumes that the original thread is either not runnable or
165 * already on the run queue
166 */
167void
168kse_reassign(struct kse *ke)
169{
170 struct ksegrp *kg;
171 struct thread *td;
172 struct thread *original;
173 struct kse_upcall *ku;
174
175 mtx_assert(&sched_lock, MA_OWNED);
176 original = ke->ke_thread;
177 KASSERT(original == NULL || TD_IS_INHIBITED(original),
178 ("reassigning KSE with runnable thread"));
179 kg = ke->ke_ksegrp;
180 if (original) {
181 /*
182 * If the outgoing thread is in threaded group and has never
183 * scheduled an upcall, decide whether this is a short
184 * or long term event and thus whether or not to schedule
185 * an upcall.
186 * If it is a short term event, just suspend it in
187 * a way that takes its KSE with it.
188 * Select the events for which we want to schedule upcalls.
189 * For now it's just sleep.
190 * XXXKSE eventually almost any inhibition could do.
191 */
192 if (TD_CAN_UNBIND(original) && (original->td_standin) &&
193 TD_ON_SLEEPQ(original)) {
194 /*
195 * Release ownership of upcall, and schedule an upcall
196 * thread, this new upcall thread becomes the owner of
197 * the upcall structure.
198 */
199 ku = original->td_upcall;
200 ku->ku_owner = NULL;
201 original->td_upcall = NULL;
202 original->td_flags &= ~TDF_CAN_UNBIND;
203 thread_schedule_upcall(original, ku);
204 }
205 original->td_kse = NULL;
206 }
207
208 /*
209 * Find the first unassigned thread
210 */
211 if ((td = kg->kg_last_assigned) != NULL)
212 td = TAILQ_NEXT(td, td_runq);
213 else
214 td = TAILQ_FIRST(&kg->kg_runq);
215
216 /*
217 * If we found one, assign it the kse, otherwise idle the kse.
218 */
219 if (td) {
220 kg->kg_last_assigned = td;
221 td->td_kse = ke;
222 ke->ke_thread = td;
223 if (td->td_proc->p_sflag & PS_NEEDSIGCHK)
224 td->td_flags |= TDF_ASTPENDING;
225 sched_add(ke);
226 CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
227 return;
228 }
229
230 ke->ke_state = KES_IDLE;
231 ke->ke_thread = NULL;
232 TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
233 kg->kg_idle_kses++;
234 CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
235 return;
236}
237
238#if 0
239/*
240 * Remove a thread from its KSEGRP's run queue.
241 * This in turn may remove it from a KSE if it was already assigned
242 * to one, possibly causing a new thread to be assigned to the KSE
243 * and the KSE getting a new priority.
244 */
245static void
246remrunqueue(struct thread *td)
247{
248 struct thread *td2, *td3;
249 struct ksegrp *kg;
250 struct kse *ke;
251
252 mtx_assert(&sched_lock, MA_OWNED);
253 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
254 kg = td->td_ksegrp;
255 ke = td->td_kse;
256 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
257 kg->kg_runnable--;
258 TD_SET_CAN_RUN(td);
259 /*
260 * If it is not a threaded process, take the shortcut.
261 */
262 if ((td->td_proc->p_flag & P_KSES) == 0) {
263 /* Bring its kse with it, leave the thread attached */
264 sched_rem(ke);
265 ke->ke_state = KES_THREAD;
266 return;
267 }
268 td3 = TAILQ_PREV(td, threadqueue, td_runq);
269 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
270 if (ke) {
271 /*
272 * This thread has been assigned to a KSE.
273 * We need to dissociate it and try assign the
274 * KSE to the next available thread. Then, we should
275 * see if we need to move the KSE in the run queues.
276 */
277 sched_rem(ke);
278 ke->ke_state = KES_THREAD;
279 td2 = kg->kg_last_assigned;
280 KASSERT((td2 != NULL), ("last assigned has wrong value"));
281 if (td2 == td)
282 kg->kg_last_assigned = td3;
283 kse_reassign(ke);
284 }
285}
286#endif
287
288/*
289 * Change the priority of a thread that is on the run queue.
290 */
291void
292adjustrunqueue( struct thread *td, int newpri)
293{
294 struct ksegrp *kg;
295 struct kse *ke;
296
297 mtx_assert(&sched_lock, MA_OWNED);
298 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
299
300 ke = td->td_kse;
301 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
302 /*
303 * If it is not a threaded process, take the shortcut.
304 */
305 if ((td->td_proc->p_flag & P_KSES) == 0) {
306 /* We only care about the kse in the run queue. */
307 td->td_priority = newpri;
308 if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
309 sched_rem(ke);
310 sched_add(ke);
311 }
312 return;
313 }
314
315 /* It is a threaded process */
316 kg = td->td_ksegrp;
317 kg->kg_runnable--;
318 TD_SET_CAN_RUN(td);
319 if (ke) {
320 if (kg->kg_last_assigned == td) {
321 kg->kg_last_assigned =
322 TAILQ_PREV(td, threadqueue, td_runq);
323 }
324 sched_rem(ke);
325 }
326 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
327 td->td_priority = newpri;
328 setrunqueue(td);
329}
330
331void
332setrunqueue(struct thread *td)
333{
334 struct kse *ke;
335 struct ksegrp *kg;
336 struct thread *td2;
337 struct thread *tda;
338
339 CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
340 mtx_assert(&sched_lock, MA_OWNED);
341 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
342 ("setrunqueue: bad thread state"));
343 TD_SET_RUNQ(td);
344 kg = td->td_ksegrp;
345 kg->kg_runnable++;
346 if ((td->td_proc->p_flag & P_KSES) == 0) {
347 /*
348 * Common path optimisation: Only one of everything
349 * and the KSE is always already attached.
350 * Totally ignore the ksegrp run queue.
351 */
352 sched_add(td->td_kse);
353 return;
354 }
355
356 tda = kg->kg_last_assigned;
357 if ((ke = td->td_kse) == NULL) {
358 if (kg->kg_idle_kses) {
359 /*
360 * There is a free one so it's ours for the asking..
361 */
362 ke = TAILQ_FIRST(&kg->kg_iq);
363 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
364 ke->ke_state = KES_THREAD;
365 kg->kg_idle_kses--;
366 } else if (tda && (tda->td_priority > td->td_priority)) {
367 /*
368 * None free, but there is one we can commandeer.
369 */
370 ke = tda->td_kse;
371 tda->td_kse = NULL;
372 ke->ke_thread = NULL;
373 tda = kg->kg_last_assigned =
374 TAILQ_PREV(tda, threadqueue, td_runq);
375 sched_rem(ke);
376 }
377 } else {
378 /*
379 * Temporarily disassociate so it looks like the other cases.
380 */
381 ke->ke_thread = NULL;
382 td->td_kse = NULL;
383 }
384
385 /*
386 * Add the thread to the ksegrp's run queue at
387 * the appropriate place.
388 */
389 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
390 if (td2->td_priority > td->td_priority) {
391 TAILQ_INSERT_BEFORE(td2, td, td_runq);
392 break;
393 }
394 }
395 if (td2 == NULL) {
396 /* We ran off the end of the TAILQ or it was empty. */
397 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
398 }
399
400 /*
401 * If we have a ke to use, then put it on the run queue and
402 * If needed, readjust the last_assigned pointer.
403 */
404 if (ke) {
405 if (tda == NULL) {
406 /*
407 * No pre-existing last assigned so whoever is first
408 * gets the KSE we brought in.. (maybe us)
409 */
410 td2 = TAILQ_FIRST(&kg->kg_runq);
411 KASSERT((td2->td_kse == NULL),
412 ("unexpected ke present"));
413 td2->td_kse = ke;
414 ke->ke_thread = td2;
415 kg->kg_last_assigned = td2;
416 } else if (tda->td_priority > td->td_priority) {
417 /*
418 * It's ours, grab it, but last_assigned is past us
419 * so don't change it.
420 */
421 td->td_kse = ke;
422 ke->ke_thread = td;
423 } else {
424 /*
425 * We are past last_assigned, so
426 * put the new kse on whatever is next,
427 * which may or may not be us.
428 */
429 td2 = TAILQ_NEXT(tda, td_runq);
430 kg->kg_last_assigned = td2;
431 td2->td_kse = ke;
432 ke->ke_thread = td2;
433 }
434 sched_add(ke);
435 }
436}
437
438/************************************************************************
439 * Critical section marker functions *
440 ************************************************************************/
441/* Critical sections that prevent preemption. */
442void
443critical_enter(void)
444{
445 struct thread *td;
446
447 td = curthread;
448 if (td->td_critnest == 0)
449 cpu_critical_enter();
450 td->td_critnest++;
451}
452
453void
454critical_exit(void)
455{
456 struct thread *td;
457
458 td = curthread;
459 if (td->td_critnest == 1) {
460 td->td_critnest = 0;
461 cpu_critical_exit();
462 } else {
463 td->td_critnest--;
464 }
465}
466
467
468/************************************************************************
469 * SYSTEM RUN QUEUE manipulations and tests *
470 ************************************************************************/
471/*
472 * Initialize a run structure.
473 */
474void
475runq_init(struct runq *rq)
476{
477 int i;
478
479 bzero(rq, sizeof *rq);
480 for (i = 0; i < RQ_NQS; i++)
481 TAILQ_INIT(&rq->rq_queues[i]);
482}
483
484/*
485 * Clear the status bit of the queue corresponding to priority level pri,
486 * indicating that it is empty.
487 */
488static __inline void
489runq_clrbit(struct runq *rq, int pri)
490{
491 struct rqbits *rqb;
492
493 rqb = &rq->rq_status;
494 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
495 rqb->rqb_bits[RQB_WORD(pri)],
496 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
497 RQB_BIT(pri), RQB_WORD(pri));
498 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
499}
500
501/*
502 * Find the index of the first non-empty run queue. This is done by
503 * scanning the status bits, a set bit indicates a non-empty queue.
504 */
505static __inline int
506runq_findbit(struct runq *rq)
507{
508 struct rqbits *rqb;
509 int pri;
510 int i;
511
512 rqb = &rq->rq_status;
513 for (i = 0; i < RQB_LEN; i++)
514 if (rqb->rqb_bits[i]) {
515 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
516 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
517 rqb->rqb_bits[i], i, pri);
518 return (pri);
519 }
520
521 return (-1);
522}
523
524/*
525 * Set the status bit of the queue corresponding to priority level pri,
526 * indicating that it is non-empty.
527 */
528static __inline void
529runq_setbit(struct runq *rq, int pri)
530{
531 struct rqbits *rqb;
532
533 rqb = &rq->rq_status;
534 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
535 rqb->rqb_bits[RQB_WORD(pri)],
536 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
537 RQB_BIT(pri), RQB_WORD(pri));
538 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
539}
540
541/*
542 * Add the KSE to the queue specified by its priority, and set the
543 * corresponding status bit.
544 */
545void
546runq_add(struct runq *rq, struct kse *ke)
547{
548 struct rqhead *rqh;
549 int pri;
550
551 pri = ke->ke_thread->td_priority / RQ_PPQ;
552 ke->ke_rqindex = pri;
553 runq_setbit(rq, pri);
554 rqh = &rq->rq_queues[pri];
555 CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
556 ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
557 TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
558}
559
560/*
561 * Return true if there are runnable processes of any priority on the run
562 * queue, false otherwise. Has no side effects, does not modify the run
563 * queue structure.
564 */
565int
566runq_check(struct runq *rq)
567{
568 struct rqbits *rqb;
569 int i;
570
571 rqb = &rq->rq_status;
572 for (i = 0; i < RQB_LEN; i++)
573 if (rqb->rqb_bits[i]) {
574 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
575 rqb->rqb_bits[i], i);
576 return (1);
577 }
578 CTR0(KTR_RUNQ, "runq_check: empty");
579
580 return (0);
581}
582
583/*
584 * Find the highest priority process on the run queue.
585 */
586struct kse *
587runq_choose(struct runq *rq)
588{
589 struct rqhead *rqh;
590 struct kse *ke;
591 int pri;
592
593 mtx_assert(&sched_lock, MA_OWNED);
594 while ((pri = runq_findbit(rq)) != -1) {
595 rqh = &rq->rq_queues[pri];
596 ke = TAILQ_FIRST(rqh);
597 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
598 CTR3(KTR_RUNQ,
599 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
600 return (ke);
601 }
602 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
603
604 return (NULL);
605}
606
607/*
608 * Remove the KSE from the queue specified by its priority, and clear the
609 * corresponding status bit if the queue becomes empty.
610 * Caller must set ke->ke_state afterwards.
611 */
612void
613runq_remove(struct runq *rq, struct kse *ke)
614{
615 struct rqhead *rqh;
616 int pri;
617
618 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
619 ("runq_remove: process swapped out"));
620 pri = ke->ke_rqindex;
621 rqh = &rq->rq_queues[pri];
622 CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
623 ke, ke->ke_thread->td_priority, pri, rqh);
624 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
625 TAILQ_REMOVE(rqh, ke, ke_procq);
626 if (TAILQ_EMPTY(rqh)) {
627 CTR0(KTR_RUNQ, "runq_remove: empty");
628 runq_clrbit(rq, pri);
629 }
630}
631
632#if 0
633void
634panc(char *string1, char *string2)
635{
636 printf("%s", string1);
637 Debugger(string2);
638}
639
640void
641thread_sanity_check(struct thread *td, char *string)
642{
643 struct proc *p;
644 struct ksegrp *kg;
645 struct kse *ke;
646 struct thread *td2 = NULL;
647 unsigned int prevpri;
648 int saw_lastassigned = 0;
649 int unassigned = 0;
650 int assigned = 0;
651
652 p = td->td_proc;
653 kg = td->td_ksegrp;
654 ke = td->td_kse;
655
656
657 if (ke) {
658 if (p != ke->ke_proc) {
659 panc(string, "wrong proc");
660 }
661 if (ke->ke_thread != td) {
662 panc(string, "wrong thread");
663 }
664 }
665
666 if ((p->p_flag & P_KSES) == 0) {
667 if (ke == NULL) {
668 panc(string, "non KSE thread lost kse");
669 }
670 } else {
671 prevpri = 0;
672 saw_lastassigned = 0;
673 unassigned = 0;
674 assigned = 0;
675 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
676 if (td2->td_priority < prevpri) {
677 panc(string, "thread runqueue unosorted");
678 }
679 if ((td2->td_state == TDS_RUNQ) &&
680 td2->td_kse &&
681 (td2->td_kse->ke_state != KES_ONRUNQ)) {
682 panc(string, "KSE wrong state");
683 }
684 prevpri = td2->td_priority;
685 if (td2->td_kse) {
686 assigned++;
687 if (unassigned) {
688 panc(string, "unassigned before assigned");
689 }
690 if (kg->kg_last_assigned == NULL) {
691 panc(string, "lastassigned corrupt");
692 }
693 if (saw_lastassigned) {
694 panc(string, "last assigned not last");
695 }
696 if (td2->td_kse->ke_thread != td2) {
697 panc(string, "mismatched kse/thread");
698 }
699 } else {
700 unassigned++;
701 }
702 if (td2 == kg->kg_last_assigned) {
703 saw_lastassigned = 1;
704 if (td2->td_kse == NULL) {
705 panc(string, "last assigned not assigned");
706 }
707 }
708 }
709 if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
710 panc(string, "where on earth does lastassigned point?");
711 }
712#if 0
713 FOREACH_THREAD_IN_GROUP(kg, td2) {
714 if (((td2->td_flags & TDF_UNBOUND) == 0) &&
715 (TD_ON_RUNQ(td2))) {
716 assigned++;
717 if (td2->td_kse == NULL) {
718 panc(string, "BOUND thread with no KSE");
719 }
720 }
721 }
722#endif
723#if 0
724 if ((unassigned + assigned) != kg->kg_runnable) {
725 panc(string, "wrong number in runnable");
726 }
727#endif
728 }
729 if (assigned == 12345) {
730 printf("%p %p %p %p %p %d, %d",
731 td, td2, ke, kg, p, assigned, saw_lastassigned);
732 }
733}
734#endif
735