kern_switch.c revision 103216
1/* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/kern/kern_switch.c 103216 2002-09-11 08:13:56Z julian $ 27 */ 28 29/*** 30 31Here is the logic.. 32 33If there are N processors, then there are at most N KSEs (kernel 34schedulable entities) working to process threads that belong to a 35KSEGOUP (kg). If there are X of these KSEs actually running at the 36moment in question, then there are at most M (N-X) of these KSEs on 37the run queue, as running KSEs are not on the queue. 38 39Runnable threads are queued off the KSEGROUP in priority order. 40If there are M or more threads runnable, the top M threads 41(by priority) are 'preassigned' to the M KSEs not running. The KSEs take 42their priority from those threads and are put on the run queue. 43 44The last thread that had a priority high enough to have a KSE associated 45with it, AND IS ON THE RUN QUEUE is pointed to by 46kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 47assigned as all the available KSEs are activly running, or because there 48are no threads queued, that pointer is NULL. 49 50When a KSE is removed from the run queue to become runnable, we know 51it was associated with the highest priority thread in the queue (at the head 52of the queue). If it is also the last assigned we know M was 1 and must 53now be 0. Since the thread is no longer queued that pointer must be 54removed from it. Since we know there were no more KSEs available, 55(M was 1 and is now 0) and since we are not FREEING our KSE 56but using it, we know there are STILL no more KSEs available, we can prove 57that the next thread in the ksegrp list will not have a KSE to assign to 58it, so we can show that the pointer must be made 'invalid' (NULL). 59 60The pointer exists so that when a new thread is made runnable, it can 61have its priority compared with the last assigned thread to see if 62it should 'steal' its KSE or not.. i.e. is it 'earlier' 63on the list than that thread or later.. If it's earlier, then the KSE is 64removed from the last assigned (which is now not assigned a KSE) 65and reassigned to the new thread, which is placed earlier in the list. 66The pointer is then backed up to the previous thread (which may or may not 67be the new thread). 68 69When a thread sleeps or is removed, the KSE becomes available and if there 70are queued threads that are not assigned KSEs, the highest priority one of 71them is assigned the KSE, which is then placed back on the run queue at 72the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 73to point to it. 74 75The following diagram shows 2 KSEs and 3 threads from a single process. 76 77 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 78 \ \____ 79 \ \ 80 KSEGROUP---thread--thread--thread (queued in priority order) 81 \ / 82 \_______________/ 83 (last_assigned) 84 85The result of this scheme is that the M available KSEs are always 86queued at the priorities they have inherrited from the M highest priority 87threads for that KSEGROUP. If this situation changes, the KSEs are 88reassigned to keep this true. 89 90*/ 91 92#include <sys/param.h> 93#include <sys/systm.h> 94#include <sys/kernel.h> 95#include <sys/ktr.h> 96#include <sys/lock.h> 97#include <sys/mutex.h> 98#include <sys/proc.h> 99#include <sys/queue.h> 100#include <machine/critical.h> 101 102CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 103 104/* 105 * Global run queue. 106 */ 107static struct runq runq; 108SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq) 109 110static void runq_readjust(struct runq *rq, struct kse *ke); 111/************************************************************************ 112 * Functions that manipulate runnability from a thread perspective. * 113 ************************************************************************/ 114 115/* 116 * Select the KSE that will be run next. From that find the thread, and x 117 * remove it from the KSEGRP's run queue. If there is thread clustering, 118 * this will be what does it. 119 */ 120struct thread * 121choosethread(void) 122{ 123 struct kse *ke; 124 struct thread *td; 125 struct ksegrp *kg; 126 127retry: 128 if ((ke = runq_choose(&runq))) { 129 td = ke->ke_thread; 130 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 131 kg = ke->ke_ksegrp; 132 if (td->td_flags & TDF_UNBOUND) { 133 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 134 if (kg->kg_last_assigned == td) 135 if (TAILQ_PREV(td, threadqueue, td_runq) 136 != NULL) 137 printf("Yo MAMA!\n"); 138 kg->kg_last_assigned = TAILQ_PREV(td, 139 threadqueue, td_runq); 140 /* 141 * If we have started running an upcall, 142 * Then TDF_UNBOUND WAS set because the thread was 143 * created without a KSE. Now that we have one, 144 * and it is our time to run, we make sure 145 * that BOUND semantics apply for the rest of 146 * the journey to userland, and into the UTS. 147 */ 148#ifdef NOTYET 149 if (td->td_flags & TDF_UPCALLING) 150 tdf->td_flags &= ~TDF_UNBOUND; 151#endif 152 } 153 kg->kg_runnable--; 154 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 155 td, td->td_priority); 156 } else { 157 /* Simulate runq_choose() having returned the idle thread */ 158 td = PCPU_GET(idlethread); 159 ke = td->td_kse; 160 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 161 } 162 ke->ke_flags |= KEF_DIDRUN; 163 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 164 (td->td_flags & TDF_INPANIC) == 0)) 165 goto retry; 166 TD_SET_RUNNING(td); 167 return (td); 168} 169 170/* 171 * Given a KSE (now surplus), either assign a new runable thread to it 172 * (and put it in the run queue) or put it in the ksegrp's idle KSE list. 173 * Assumes the kse is not linked to any threads any more. (has been cleaned). 174 */ 175void 176kse_reassign(struct kse *ke) 177{ 178 struct ksegrp *kg; 179 struct thread *td; 180 181 kg = ke->ke_ksegrp; 182 183 /* 184 * Find the first unassigned thread 185 * If there is a 'last assigned' then see what's next. 186 * otherwise look at what is first. 187 */ 188 if ((td = kg->kg_last_assigned)) { 189 td = TAILQ_NEXT(td, td_runq); 190 } else { 191 td = TAILQ_FIRST(&kg->kg_runq); 192 } 193 194 /* 195 * If we found one assign it the kse, otherwise idle the kse. 196 */ 197 if (td) { 198 kg->kg_last_assigned = td; 199 td->td_kse = ke; 200 ke->ke_thread = td; 201 runq_add(&runq, ke); 202 CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); 203 } else { 204 ke->ke_state = KES_IDLE; 205 ke->ke_thread = NULL; 206 TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); 207 kg->kg_idle_kses++; 208 CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke); 209 } 210} 211 212int 213kserunnable(void) 214{ 215 return runq_check(&runq); 216} 217 218/* 219 * Remove a thread from its KSEGRP's run queue. 220 * This in turn may remove it from a KSE if it was already assigned 221 * to one, possibly causing a new thread to be assigned to the KSE 222 * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair). 223 */ 224void 225remrunqueue(struct thread *td) 226{ 227 struct thread *td2, *td3; 228 struct ksegrp *kg; 229 struct kse *ke; 230 231 mtx_assert(&sched_lock, MA_OWNED); 232 KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 233 kg = td->td_ksegrp; 234 ke = td->td_kse; 235 /* 236 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE 237 * threads are BOUND. 238 */ 239 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 240 kg->kg_runnable--; 241 TD_SET_CAN_RUN(td); 242 if ((td->td_flags & TDF_UNBOUND) == 0) { 243 /* Bring its kse with it, leave the thread attached */ 244 runq_remove(&runq, ke); 245 ke->ke_state = KES_THREAD; 246 return; 247 } 248 if (ke) { 249 /* 250 * This thread has been assigned to a KSE. 251 * We need to dissociate it and try assign the 252 * KSE to the next available thread. Then, we should 253 * see if we need to move the KSE in the run queues. 254 */ 255 td2 = kg->kg_last_assigned; 256 KASSERT((td2 != NULL), ("last assigned has wrong value ")); 257 td->td_kse = NULL; 258 if ((td3 = TAILQ_NEXT(td2, td_runq))) { 259 KASSERT(td3 != td, ("td3 somehow matched td")); 260 /* 261 * Give the next unassigned thread to the KSE 262 * so the number of runnable KSEs remains 263 * constant. 264 */ 265 td3->td_kse = ke; 266 ke->ke_thread = td3; 267 kg->kg_last_assigned = td3; 268 runq_readjust(&runq, ke); 269 } else { 270 /* 271 * There is no unassigned thread. 272 * If we were the last assigned one, 273 * adjust the last assigned pointer back 274 * one, which may result in NULL. 275 */ 276 if (td == td2) { 277 kg->kg_last_assigned = 278 TAILQ_PREV(td, threadqueue, td_runq); 279 } 280 runq_remove(&runq, ke); 281 KASSERT((ke->ke_state != KES_IDLE), 282 ("kse already idle")); 283 ke->ke_state = KES_IDLE; 284 ke->ke_thread = NULL; 285 TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); 286 kg->kg_idle_kses++; 287 } 288 } 289 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 290} 291 292void 293setrunqueue(struct thread *td) 294{ 295 struct kse *ke; 296 struct ksegrp *kg; 297 struct thread *td2; 298 struct thread *tda; 299 300 CTR1(KTR_RUNQ, "setrunqueue: td%p", td); 301 mtx_assert(&sched_lock, MA_OWNED); 302 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 303 ("setrunqueue: bad thread state")); 304 TD_SET_RUNQ(td); 305 kg = td->td_ksegrp; 306 kg->kg_runnable++; 307 if ((td->td_flags & TDF_UNBOUND) == 0) { 308 KASSERT((td->td_kse != NULL), 309 ("queueing BAD thread to run queue")); 310 /* 311 * Common path optimisation: Only one of everything 312 * and the KSE is always already attached. 313 * Totally ignore the ksegrp run queue. 314 */ 315 runq_add(&runq, td->td_kse); 316 return; 317 } 318 /* 319 * Ok, so we are threading with this thread. 320 * We don't have a KSE, see if we can get one.. 321 */ 322 tda = kg->kg_last_assigned; 323 if ((ke = td->td_kse) == NULL) { 324 /* 325 * We will need a KSE, see if there is one.. 326 * First look for a free one, before getting desperate. 327 * If we can't get one, our priority is not high enough.. 328 * that's ok.. 329 */ 330 if (kg->kg_idle_kses) { 331 /* 332 * There is a free one so it's ours for the asking.. 333 */ 334 ke = TAILQ_FIRST(&kg->kg_iq); 335 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 336 ke->ke_state = KES_THREAD; 337 kg->kg_idle_kses--; 338 } else if (tda && (tda->td_priority > td->td_priority)) { 339 /* 340 * None free, but there is one we can commandeer. 341 */ 342 ke = tda->td_kse; 343 tda->td_kse = NULL; 344 ke->ke_thread = NULL; 345 tda = kg->kg_last_assigned = 346 TAILQ_PREV(tda, threadqueue, td_runq); 347 runq_remove(&runq, ke); 348 } 349 } else { 350 /* 351 * Temporarily disassociate so it looks like the other cases. 352 */ 353 ke->ke_thread = NULL; 354 td->td_kse = NULL; 355 } 356 357 /* 358 * Add the thread to the ksegrp's run queue at 359 * the appropriate place. 360 */ 361 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 362 if (td2->td_priority > td->td_priority) { 363 TAILQ_INSERT_BEFORE(td2, td, td_runq); 364 break; 365 } 366 } 367 if (td2 == NULL) { 368 /* We ran off the end of the TAILQ or it was empty. */ 369 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 370 } 371 372 /* 373 * If we have a ke to use, then put it on the run queue and 374 * If needed, readjust the last_assigned pointer. 375 */ 376 if (ke) { 377 if (tda == NULL) { 378 /* 379 * No pre-existing last assigned so whoever is first 380 * gets the KSE we brought in.. (maybe us) 381 */ 382 td2 = TAILQ_FIRST(&kg->kg_runq); 383 KASSERT((td2->td_kse == NULL), 384 ("unexpected ke present")); 385 td2->td_kse = ke; 386 ke->ke_thread = td2; 387 kg->kg_last_assigned = td2; 388 } else if (tda->td_priority > td->td_priority) { 389 /* 390 * It's ours, grab it, but last_assigned is past us 391 * so don't change it. 392 */ 393 td->td_kse = ke; 394 ke->ke_thread = td; 395 } else { 396 /* 397 * We are past last_assigned, so 398 * put the new kse on whatever is next, 399 * which may or may not be us. 400 */ 401 td2 = TAILQ_NEXT(tda, td_runq); 402 kg->kg_last_assigned = td2; 403 td2->td_kse = ke; 404 ke->ke_thread = td2; 405 } 406 runq_add(&runq, ke); 407 } 408} 409 410/************************************************************************ 411 * Critical section marker functions * 412 ************************************************************************/ 413/* Critical sections that prevent preemption. */ 414void 415critical_enter(void) 416{ 417 struct thread *td; 418 419 td = curthread; 420 if (td->td_critnest == 0) 421 cpu_critical_enter(); 422 td->td_critnest++; 423} 424 425void 426critical_exit(void) 427{ 428 struct thread *td; 429 430 td = curthread; 431 if (td->td_critnest == 1) { 432 td->td_critnest = 0; 433 cpu_critical_exit(); 434 } else { 435 td->td_critnest--; 436 } 437} 438 439 440/************************************************************************ 441 * SYSTEM RUN QUEUE manipulations and tests * 442 ************************************************************************/ 443/* 444 * Initialize a run structure. 445 */ 446void 447runq_init(struct runq *rq) 448{ 449 int i; 450 451 bzero(rq, sizeof *rq); 452 for (i = 0; i < RQ_NQS; i++) 453 TAILQ_INIT(&rq->rq_queues[i]); 454} 455 456/* 457 * Clear the status bit of the queue corresponding to priority level pri, 458 * indicating that it is empty. 459 */ 460static __inline void 461runq_clrbit(struct runq *rq, int pri) 462{ 463 struct rqbits *rqb; 464 465 rqb = &rq->rq_status; 466 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 467 rqb->rqb_bits[RQB_WORD(pri)], 468 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 469 RQB_BIT(pri), RQB_WORD(pri)); 470 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 471} 472 473/* 474 * Find the index of the first non-empty run queue. This is done by 475 * scanning the status bits, a set bit indicates a non-empty queue. 476 */ 477static __inline int 478runq_findbit(struct runq *rq) 479{ 480 struct rqbits *rqb; 481 int pri; 482 int i; 483 484 rqb = &rq->rq_status; 485 for (i = 0; i < RQB_LEN; i++) 486 if (rqb->rqb_bits[i]) { 487 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 488 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 489 rqb->rqb_bits[i], i, pri); 490 return (pri); 491 } 492 493 return (-1); 494} 495 496/* 497 * Set the status bit of the queue corresponding to priority level pri, 498 * indicating that it is non-empty. 499 */ 500static __inline void 501runq_setbit(struct runq *rq, int pri) 502{ 503 struct rqbits *rqb; 504 505 rqb = &rq->rq_status; 506 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 507 rqb->rqb_bits[RQB_WORD(pri)], 508 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 509 RQB_BIT(pri), RQB_WORD(pri)); 510 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 511} 512 513/* 514 * Add the KSE to the queue specified by its priority, and set the 515 * corresponding status bit. 516 */ 517void 518runq_add(struct runq *rq, struct kse *ke) 519{ 520 struct rqhead *rqh; 521 int pri; 522 523 mtx_assert(&sched_lock, MA_OWNED); 524 KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE")); 525 KASSERT((ke->ke_thread->td_kse != NULL), 526 ("runq_add: No KSE on thread")); 527 KASSERT(ke->ke_state != KES_ONRUNQ, 528 ("runq_add: kse %p (%s) already in run queue", ke, 529 ke->ke_proc->p_comm)); 530 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 531 ("runq_add: process swapped out")); 532 pri = ke->ke_thread->td_priority / RQ_PPQ; 533 ke->ke_rqindex = pri; 534 runq_setbit(rq, pri); 535 rqh = &rq->rq_queues[pri]; 536 CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p", 537 ke->ke_proc, ke->ke_thread->td_priority, pri, rqh); 538 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 539 ke->ke_ksegrp->kg_runq_kses++; 540 ke->ke_state = KES_ONRUNQ; 541} 542 543/* 544 * Return true if there are runnable processes of any priority on the run 545 * queue, false otherwise. Has no side effects, does not modify the run 546 * queue structure. 547 */ 548int 549runq_check(struct runq *rq) 550{ 551 struct rqbits *rqb; 552 int i; 553 554 rqb = &rq->rq_status; 555 for (i = 0; i < RQB_LEN; i++) 556 if (rqb->rqb_bits[i]) { 557 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 558 rqb->rqb_bits[i], i); 559 return (1); 560 } 561 CTR0(KTR_RUNQ, "runq_check: empty"); 562 563 return (0); 564} 565 566/* 567 * Find and remove the highest priority process from the run queue. 568 * If there are no runnable processes, the per-cpu idle process is 569 * returned. Will not return NULL under any circumstances. 570 */ 571struct kse * 572runq_choose(struct runq *rq) 573{ 574 struct rqhead *rqh; 575 struct kse *ke; 576 int pri; 577 578 mtx_assert(&sched_lock, MA_OWNED); 579 while ((pri = runq_findbit(rq)) != -1) { 580 rqh = &rq->rq_queues[pri]; 581 ke = TAILQ_FIRST(rqh); 582 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 583 CTR3(KTR_RUNQ, 584 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 585 TAILQ_REMOVE(rqh, ke, ke_procq); 586 ke->ke_ksegrp->kg_runq_kses--; 587 if (TAILQ_EMPTY(rqh)) { 588 CTR0(KTR_RUNQ, "runq_choose: empty"); 589 runq_clrbit(rq, pri); 590 } 591 592 ke->ke_state = KES_THREAD; 593 KASSERT((ke->ke_thread != NULL), 594 ("runq_choose: No thread on KSE")); 595 KASSERT((ke->ke_thread->td_kse != NULL), 596 ("runq_choose: No KSE on thread")); 597 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 598 ("runq_choose: process swapped out")); 599 return (ke); 600 } 601 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 602 603 return (NULL); 604} 605 606/* 607 * Remove the KSE from the queue specified by its priority, and clear the 608 * corresponding status bit if the queue becomes empty. 609 * Caller must set ke->ke_state afterwards. 610 */ 611void 612runq_remove(struct runq *rq, struct kse *ke) 613{ 614 struct rqhead *rqh; 615 int pri; 616 617 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 618 mtx_assert(&sched_lock, MA_OWNED); 619 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 620 ("runq_remove: process swapped out")); 621 pri = ke->ke_rqindex; 622 rqh = &rq->rq_queues[pri]; 623 CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p", 624 ke, ke->ke_thread->td_priority, pri, rqh); 625 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 626 TAILQ_REMOVE(rqh, ke, ke_procq); 627 if (TAILQ_EMPTY(rqh)) { 628 CTR0(KTR_RUNQ, "runq_remove: empty"); 629 runq_clrbit(rq, pri); 630 } 631 ke->ke_state = KES_THREAD; 632 ke->ke_ksegrp->kg_runq_kses--; 633} 634 635static void 636runq_readjust(struct runq *rq, struct kse *ke) 637{ 638 639 if (ke->ke_rqindex != (ke->ke_thread->td_priority / RQ_PPQ)) { 640 runq_remove(rq, ke); 641 runq_add(rq, ke); 642 } 643} 644 645#if 0 646void 647thread_sanity_check(struct thread *td) 648{ 649 struct proc *p; 650 struct ksegrp *kg; 651 struct kse *ke; 652 struct thread *td2; 653 unsigned int prevpri; 654 int saw_lastassigned; 655 int unassigned; 656 int assigned; 657 658 p = td->td_proc; 659 kg = td->td_ksegrp; 660 ke = td->td_kse; 661 662 if (kg != &p->p_ksegrp) { 663 panic ("wrong ksegrp"); 664 } 665 666 if (ke) { 667 if (ke != &p->p_kse) { 668 panic("wrong kse"); 669 } 670 if (ke->ke_thread != td) { 671 panic("wrong thread"); 672 } 673 } 674 675 if ((p->p_flag & P_KSES) == 0) { 676 if (ke == NULL) { 677 panic("non KSE thread lost kse"); 678 } 679 } else { 680 prevpri = 0; 681 saw_lastassigned = 0; 682 unassigned = 0; 683 assigned = 0; 684 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 685 if (td2->td_priority < prevpri) { 686 panic("thread runqueue unosorted"); 687 } 688 prevpri = td2->td_priority; 689 if (td2->td_kse) { 690 assigned++; 691 if (unassigned) { 692 panic("unassigned before assigned"); 693 } 694 if (kg->kg_last_assigned == NULL) { 695 panic("lastassigned corrupt"); 696 } 697 if (saw_lastassigned) { 698 panic("last assigned not last"); 699 } 700 if (td2->td_kse->ke_thread != td2) { 701 panic("mismatched kse/thread"); 702 } 703 } else { 704 unassigned++; 705 } 706 if (td2 == kg->kg_last_assigned) { 707 saw_lastassigned = 1; 708 if (td2->td_kse == NULL) { 709 panic("last assigned not assigned"); 710 } 711 } 712 } 713 if (kg->kg_last_assigned && (saw_lastassigned == 0)) { 714 panic("where on earth does lastassigned point?"); 715 } 716 FOREACH_THREAD_IN_GROUP(kg, td2) { 717 if (((td2->td_flags & TDF_UNBOUND) == 0) && 718 (TD_ON_RUNQ(td2))) { 719 assigned++; 720 if (td2->td_kse == NULL) { 721 panic ("BOUND thread with no KSE"); 722 } 723 } 724 } 725#if 0 726 if ((unassigned + assigned) != kg->kg_runnable) { 727 panic("wrong number in runnable"); 728 } 729#endif 730 } 731} 732#endif 733 734