kern_switch.c revision 134586
1/* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/*** 28Here is the logic.. 29 30If there are N processors, then there are at most N KSEs (kernel 31schedulable entities) working to process threads that belong to a 32KSEGROUP (kg). If there are X of these KSEs actually running at the 33moment in question, then there are at most M (N-X) of these KSEs on 34the run queue, as running KSEs are not on the queue. 35 36Runnable threads are queued off the KSEGROUP in priority order. 37If there are M or more threads runnable, the top M threads 38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take 39their priority from those threads and are put on the run queue. 40 41The last thread that had a priority high enough to have a KSE associated 42with it, AND IS ON THE RUN QUEUE is pointed to by 43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 44assigned as all the available KSEs are activly running, or because there 45are no threads queued, that pointer is NULL. 46 47When a KSE is removed from the run queue to become runnable, we know 48it was associated with the highest priority thread in the queue (at the head 49of the queue). If it is also the last assigned we know M was 1 and must 50now be 0. Since the thread is no longer queued that pointer must be 51removed from it. Since we know there were no more KSEs available, 52(M was 1 and is now 0) and since we are not FREEING our KSE 53but using it, we know there are STILL no more KSEs available, we can prove 54that the next thread in the ksegrp list will not have a KSE to assign to 55it, so we can show that the pointer must be made 'invalid' (NULL). 56 57The pointer exists so that when a new thread is made runnable, it can 58have its priority compared with the last assigned thread to see if 59it should 'steal' its KSE or not.. i.e. is it 'earlier' 60on the list than that thread or later.. If it's earlier, then the KSE is 61removed from the last assigned (which is now not assigned a KSE) 62and reassigned to the new thread, which is placed earlier in the list. 63The pointer is then backed up to the previous thread (which may or may not 64be the new thread). 65 66When a thread sleeps or is removed, the KSE becomes available and if there 67are queued threads that are not assigned KSEs, the highest priority one of 68them is assigned the KSE, which is then placed back on the run queue at 69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 70to point to it. 71 72The following diagram shows 2 KSEs and 3 threads from a single process. 73 74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 75 \ \____ 76 \ \ 77 KSEGROUP---thread--thread--thread (queued in priority order) 78 \ / 79 \_______________/ 80 (last_assigned) 81 82The result of this scheme is that the M available KSEs are always 83queued at the priorities they have inherrited from the M highest priority 84threads for that KSEGROUP. If this situation changes, the KSEs are 85reassigned to keep this true. 86***/ 87 88#include <sys/cdefs.h> 89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 134586 2004-09-01 02:11:28Z julian $"); 90 91#include "opt_full_preemption.h" 92 93#include <sys/param.h> 94#include <sys/systm.h> 95#include <sys/kdb.h> 96#include <sys/kernel.h> 97#include <sys/ktr.h> 98#include <sys/lock.h> 99#include <sys/mutex.h> 100#include <sys/proc.h> 101#include <sys/queue.h> 102#include <sys/sched.h> 103#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 104#include <sys/smp.h> 105#endif 106#include <machine/critical.h> 107 108CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 109 110void panc(char *string1, char *string2); 111 112#if 0 113static void runq_readjust(struct runq *rq, struct kse *ke); 114#endif 115/************************************************************************ 116 * Functions that manipulate runnability from a thread perspective. * 117 ************************************************************************/ 118/* 119 * Select the KSE that will be run next. From that find the thread, and 120 * remove it from the KSEGRP's run queue. If there is thread clustering, 121 * this will be what does it. 122 */ 123struct thread * 124choosethread(void) 125{ 126 struct kse *ke; 127 struct thread *td; 128 struct ksegrp *kg; 129 130#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 131 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 132 /* Shutting down, run idlethread on AP's */ 133 td = PCPU_GET(idlethread); 134 ke = td->td_kse; 135 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 136 ke->ke_flags |= KEF_DIDRUN; 137 TD_SET_RUNNING(td); 138 return (td); 139 } 140#endif 141 142retry: 143 ke = sched_choose(); 144 if (ke) { 145 td = ke->ke_thread; 146 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 147 kg = ke->ke_ksegrp; 148 if (td->td_proc->p_flag & P_SA) { 149 if (kg->kg_last_assigned == td) { 150 kg->kg_last_assigned = TAILQ_PREV(td, 151 threadqueue, td_runq); 152 } 153 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 154 kg->kg_runnable--; 155 } 156 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 157 td, td->td_priority); 158 } else { 159 /* Simulate runq_choose() having returned the idle thread */ 160 td = PCPU_GET(idlethread); 161 ke = td->td_kse; 162 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 163 } 164 ke->ke_flags |= KEF_DIDRUN; 165 166 /* 167 * If we are in panic, only allow system threads, 168 * plus the one we are running in, to be run. 169 */ 170 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 171 (td->td_flags & TDF_INPANIC) == 0)) { 172 /* note that it is no longer on the run queue */ 173 TD_SET_CAN_RUN(td); 174 goto retry; 175 } 176 177 TD_SET_RUNNING(td); 178 return (td); 179} 180 181/* 182 * Given a surplus KSE, either assign a new runable thread to it 183 * (and put it in the run queue) or put it in the ksegrp's idle KSE list. 184 * Assumes that the original thread is not runnable. 185 */ 186void 187kse_reassign(struct kse *ke) 188{ 189 struct ksegrp *kg; 190 struct thread *td; 191 struct thread *original; 192 193 mtx_assert(&sched_lock, MA_OWNED); 194 original = ke->ke_thread; 195 KASSERT(original == NULL || TD_IS_INHIBITED(original), 196 ("reassigning KSE with runnable thread")); 197 kg = ke->ke_ksegrp; 198 if (original) 199 original->td_kse = NULL; 200 201 /* 202 * Find the first unassigned thread 203 */ 204 if ((td = kg->kg_last_assigned) != NULL) 205 td = TAILQ_NEXT(td, td_runq); 206 else 207 td = TAILQ_FIRST(&kg->kg_runq); 208 209 /* 210 * If we found one, assign it the kse, otherwise idle the kse. 211 */ 212 if (td) { 213 kg->kg_last_assigned = td; 214 td->td_kse = ke; 215 ke->ke_thread = td; 216 CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); 217 sched_add(td, SRQ_BORING); 218 return; 219 } 220 221 ke->ke_state = KES_IDLE; 222 ke->ke_thread = NULL; 223 TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist); 224 kg->kg_idle_kses++; 225 CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke); 226 return; 227} 228 229#if 0 230/* 231 * Remove a thread from its KSEGRP's run queue. 232 * This in turn may remove it from a KSE if it was already assigned 233 * to one, possibly causing a new thread to be assigned to the KSE 234 * and the KSE getting a new priority. 235 */ 236static void 237remrunqueue(struct thread *td) 238{ 239 struct thread *td2, *td3; 240 struct ksegrp *kg; 241 struct kse *ke; 242 243 mtx_assert(&sched_lock, MA_OWNED); 244 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 245 kg = td->td_ksegrp; 246 ke = td->td_kse; 247 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 248 TD_SET_CAN_RUN(td); 249 /* 250 * If it is not a threaded process, take the shortcut. 251 */ 252 if ((td->td_proc->p_flag & P_SA) == 0) { 253 /* Bring its kse with it, leave the thread attached */ 254 sched_rem(td); 255 ke->ke_state = KES_THREAD; 256 return; 257 } 258 td3 = TAILQ_PREV(td, threadqueue, td_runq); 259 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 260 kg->kg_runnable--; 261 if (ke) { 262 /* 263 * This thread has been assigned to a KSE. 264 * We need to dissociate it and try assign the 265 * KSE to the next available thread. Then, we should 266 * see if we need to move the KSE in the run queues. 267 */ 268 sched_rem(td); 269 ke->ke_state = KES_THREAD; 270 td2 = kg->kg_last_assigned; 271 KASSERT((td2 != NULL), ("last assigned has wrong value")); 272 if (td2 == td) 273 kg->kg_last_assigned = td3; 274 kse_reassign(ke); 275 } 276} 277#endif 278 279/* 280 * Change the priority of a thread that is on the run queue. 281 */ 282void 283adjustrunqueue( struct thread *td, int newpri) 284{ 285 struct ksegrp *kg; 286 struct kse *ke; 287 288 mtx_assert(&sched_lock, MA_OWNED); 289 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 290 291 ke = td->td_kse; 292 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 293 /* 294 * If it is not a threaded process, take the shortcut. 295 */ 296 if ((td->td_proc->p_flag & P_SA) == 0) { 297 /* We only care about the kse in the run queue. */ 298 td->td_priority = newpri; 299 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 300 sched_rem(td); 301 sched_add(td, SRQ_BORING); 302 } 303 return; 304 } 305 306 /* It is a threaded process */ 307 kg = td->td_ksegrp; 308 TD_SET_CAN_RUN(td); 309 if (ke) { 310 if (kg->kg_last_assigned == td) { 311 kg->kg_last_assigned = 312 TAILQ_PREV(td, threadqueue, td_runq); 313 } 314 sched_rem(td); 315 } 316 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 317 kg->kg_runnable--; 318 td->td_priority = newpri; 319 setrunqueue(td, SRQ_BORING); 320} 321 322void 323setrunqueue(struct thread *td, int flags) 324{ 325 struct kse *ke; 326 struct ksegrp *kg; 327 struct thread *td2; 328 struct thread *tda; 329 int count; 330 331 CTR4(KTR_RUNQ, "setrunqueue: td:%p ke:%p kg:%p pid:%d", 332 td, td->td_kse, td->td_ksegrp, td->td_proc->p_pid); 333 mtx_assert(&sched_lock, MA_OWNED); 334 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 335 ("setrunqueue: bad thread state")); 336 TD_SET_RUNQ(td); 337 kg = td->td_ksegrp; 338 if ((td->td_proc->p_flag & P_SA) == 0) { 339 /* 340 * Common path optimisation: Only one of everything 341 * and the KSE is always already attached. 342 * Totally ignore the ksegrp run queue. 343 */ 344 sched_add(td, flags); 345 return; 346 } 347 348 tda = kg->kg_last_assigned; 349 if ((ke = td->td_kse) == NULL) { 350 if (kg->kg_idle_kses) { 351 /* 352 * There is a free one so it's ours for the asking.. 353 */ 354 ke = TAILQ_FIRST(&kg->kg_iq); 355 CTR2(KTR_RUNQ, "setrunqueue: kg:%p: Use free ke:%p", 356 kg, ke); 357 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 358 ke->ke_state = KES_THREAD; 359 kg->kg_idle_kses--; 360 } else if (tda && (tda->td_priority > td->td_priority)) { 361 /* 362 * None free, but there is one we can commandeer. 363 */ 364 ke = tda->td_kse; 365 CTR3(KTR_RUNQ, 366 "setrunqueue: kg:%p: take ke:%p from td: %p", 367 kg, ke, tda); 368 sched_rem(tda); 369 tda->td_kse = NULL; 370 ke->ke_thread = NULL; 371 tda = kg->kg_last_assigned = 372 TAILQ_PREV(tda, threadqueue, td_runq); 373 } 374 } else { 375 /* 376 * Temporarily disassociate so it looks like the other cases. 377 */ 378 ke->ke_thread = NULL; 379 td->td_kse = NULL; 380 } 381 382 /* 383 * Add the thread to the ksegrp's run queue at 384 * the appropriate place. 385 */ 386 count = 0; 387 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 388 if (td2->td_priority > td->td_priority) { 389 kg->kg_runnable++; 390 TAILQ_INSERT_BEFORE(td2, td, td_runq); 391 break; 392 } 393 /* XXX Debugging hack */ 394 if (++count > 10000) { 395 printf("setrunqueue(): corrupt kq_runq, td= %p\n", td); 396 panic("deadlock in setrunqueue"); 397 } 398 } 399 if (td2 == NULL) { 400 /* We ran off the end of the TAILQ or it was empty. */ 401 kg->kg_runnable++; 402 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 403 } 404 405 /* 406 * If we have a ke to use, then put it on the run queue and 407 * If needed, readjust the last_assigned pointer. 408 */ 409 if (ke) { 410 if (tda == NULL) { 411 /* 412 * No pre-existing last assigned so whoever is first 413 * gets the KSE we brought in.. (maybe us) 414 */ 415 td2 = TAILQ_FIRST(&kg->kg_runq); 416 KASSERT((td2->td_kse == NULL), 417 ("unexpected ke present")); 418 td2->td_kse = ke; 419 ke->ke_thread = td2; 420 kg->kg_last_assigned = td2; 421 } else if (tda->td_priority > td->td_priority) { 422 /* 423 * It's ours, grab it, but last_assigned is past us 424 * so don't change it. 425 */ 426 td->td_kse = ke; 427 ke->ke_thread = td; 428 } else { 429 /* 430 * We are past last_assigned, so 431 * put the new kse on whatever is next, 432 * which may or may not be us. 433 */ 434 td2 = TAILQ_NEXT(tda, td_runq); 435 kg->kg_last_assigned = td2; 436 td2->td_kse = ke; 437 ke->ke_thread = td2; 438 } 439 sched_add(ke->ke_thread, flags); 440 } else { 441 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", 442 td, td->td_ksegrp, td->td_proc->p_pid); 443 } 444} 445 446/* 447 * Kernel thread preemption implementation. Critical sections mark 448 * regions of code in which preemptions are not allowed. 449 */ 450void 451critical_enter(void) 452{ 453 struct thread *td; 454 455 td = curthread; 456 if (td->td_critnest == 0) 457 cpu_critical_enter(td); 458 td->td_critnest++; 459} 460 461void 462critical_exit(void) 463{ 464 struct thread *td; 465 466 td = curthread; 467 KASSERT(td->td_critnest != 0, 468 ("critical_exit: td_critnest == 0")); 469 if (td->td_critnest == 1) { 470#ifdef PREEMPTION 471 mtx_assert(&sched_lock, MA_NOTOWNED); 472 if (td->td_pflags & TDP_OWEPREEMPT) { 473 mtx_lock_spin(&sched_lock); 474 mi_switch(SW_INVOL, NULL); 475 mtx_unlock_spin(&sched_lock); 476 } 477#endif 478 td->td_critnest = 0; 479 cpu_critical_exit(td); 480 } else { 481 td->td_critnest--; 482 } 483} 484 485/* 486 * This function is called when a thread is about to be put on run queue 487 * because it has been made runnable or its priority has been adjusted. It 488 * determines if the new thread should be immediately preempted to. If so, 489 * it switches to it and eventually returns true. If not, it returns false 490 * so that the caller may place the thread on an appropriate run queue. 491 */ 492int 493maybe_preempt(struct thread *td) 494{ 495#ifdef PREEMPTION 496 struct thread *ctd; 497 int cpri, pri; 498#endif 499 500 mtx_assert(&sched_lock, MA_OWNED); 501#ifdef PREEMPTION 502 /* 503 * The new thread should not preempt the current thread if any of the 504 * following conditions are true: 505 * 506 * - The current thread has a higher (numerically lower) or 507 * equivalent priority. Note that this prevents curthread from 508 * trying to preempt to itself. 509 * - It is too early in the boot for context switches (cold is set). 510 * - The current thread has an inhibitor set or is in the process of 511 * exiting. In this case, the current thread is about to switch 512 * out anyways, so there's no point in preempting. If we did, 513 * the current thread would not be properly resumed as well, so 514 * just avoid that whole landmine. 515 * - If the new thread's priority is not a realtime priority and 516 * the current thread's priority is not an idle priority and 517 * FULL_PREEMPTION is disabled. 518 * 519 * If all of these conditions are false, but the current thread is in 520 * a nested critical section, then we have to defer the preemption 521 * until we exit the critical section. Otherwise, switch immediately 522 * to the new thread. 523 */ 524 ctd = curthread; 525 if (ctd->td_kse == NULL || ctd->td_kse->ke_thread != ctd) 526 return (0); 527 pri = td->td_priority; 528 cpri = ctd->td_priority; 529 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || 530 td->td_kse->ke_state != KES_THREAD) 531 return (0); 532#ifndef FULL_PREEMPTION 533 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && 534 !(cpri >= PRI_MIN_IDLE)) 535 return (0); 536#endif 537 if (ctd->td_critnest > 1) { 538 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 539 ctd->td_critnest); 540 ctd->td_pflags |= TDP_OWEPREEMPT; 541 return (0); 542 } 543 544 /* 545 * Our thread state says that we are already on a run queue, so 546 * update our state as if we had been dequeued by choosethread(). 547 */ 548 MPASS(TD_ON_RUNQ(td)); 549 TD_SET_RUNNING(td); 550 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 551 td->td_proc->p_pid, td->td_proc->p_comm); 552 mi_switch(SW_INVOL, td); 553 return (1); 554#else 555 return (0); 556#endif 557} 558 559#if 0 560#ifndef PREEMPTION 561/* XXX: There should be a non-static version of this. */ 562static void 563printf_caddr_t(void *data) 564{ 565 printf("%s", (char *)data); 566} 567static char preempt_warning[] = 568 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 569SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 570 preempt_warning) 571#endif 572#endif 573 574/************************************************************************ 575 * SYSTEM RUN QUEUE manipulations and tests * 576 ************************************************************************/ 577/* 578 * Initialize a run structure. 579 */ 580void 581runq_init(struct runq *rq) 582{ 583 int i; 584 585 bzero(rq, sizeof *rq); 586 for (i = 0; i < RQ_NQS; i++) 587 TAILQ_INIT(&rq->rq_queues[i]); 588} 589 590/* 591 * Clear the status bit of the queue corresponding to priority level pri, 592 * indicating that it is empty. 593 */ 594static __inline void 595runq_clrbit(struct runq *rq, int pri) 596{ 597 struct rqbits *rqb; 598 599 rqb = &rq->rq_status; 600 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 601 rqb->rqb_bits[RQB_WORD(pri)], 602 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 603 RQB_BIT(pri), RQB_WORD(pri)); 604 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 605} 606 607/* 608 * Find the index of the first non-empty run queue. This is done by 609 * scanning the status bits, a set bit indicates a non-empty queue. 610 */ 611static __inline int 612runq_findbit(struct runq *rq) 613{ 614 struct rqbits *rqb; 615 int pri; 616 int i; 617 618 rqb = &rq->rq_status; 619 for (i = 0; i < RQB_LEN; i++) 620 if (rqb->rqb_bits[i]) { 621 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 622 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 623 rqb->rqb_bits[i], i, pri); 624 return (pri); 625 } 626 627 return (-1); 628} 629 630/* 631 * Set the status bit of the queue corresponding to priority level pri, 632 * indicating that it is non-empty. 633 */ 634static __inline void 635runq_setbit(struct runq *rq, int pri) 636{ 637 struct rqbits *rqb; 638 639 rqb = &rq->rq_status; 640 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 641 rqb->rqb_bits[RQB_WORD(pri)], 642 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 643 RQB_BIT(pri), RQB_WORD(pri)); 644 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 645} 646 647/* 648 * Add the KSE to the queue specified by its priority, and set the 649 * corresponding status bit. 650 */ 651void 652runq_add(struct runq *rq, struct kse *ke) 653{ 654 struct rqhead *rqh; 655 int pri; 656 657 pri = ke->ke_thread->td_priority / RQ_PPQ; 658 ke->ke_rqindex = pri; 659 runq_setbit(rq, pri); 660 rqh = &rq->rq_queues[pri]; 661 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", 662 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 663 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 664} 665 666/* 667 * Return true if there are runnable processes of any priority on the run 668 * queue, false otherwise. Has no side effects, does not modify the run 669 * queue structure. 670 */ 671int 672runq_check(struct runq *rq) 673{ 674 struct rqbits *rqb; 675 int i; 676 677 rqb = &rq->rq_status; 678 for (i = 0; i < RQB_LEN; i++) 679 if (rqb->rqb_bits[i]) { 680 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 681 rqb->rqb_bits[i], i); 682 return (1); 683 } 684 CTR0(KTR_RUNQ, "runq_check: empty"); 685 686 return (0); 687} 688 689/* 690 * Find the highest priority process on the run queue. 691 */ 692struct kse * 693runq_choose(struct runq *rq) 694{ 695 struct rqhead *rqh; 696 struct kse *ke; 697 int pri; 698 699 mtx_assert(&sched_lock, MA_OWNED); 700 while ((pri = runq_findbit(rq)) != -1) { 701 rqh = &rq->rq_queues[pri]; 702 ke = TAILQ_FIRST(rqh); 703 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 704 CTR3(KTR_RUNQ, 705 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 706 return (ke); 707 } 708 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 709 710 return (NULL); 711} 712 713/* 714 * Remove the KSE from the queue specified by its priority, and clear the 715 * corresponding status bit if the queue becomes empty. 716 * Caller must set ke->ke_state afterwards. 717 */ 718void 719runq_remove(struct runq *rq, struct kse *ke) 720{ 721 struct rqhead *rqh; 722 int pri; 723 724 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 725 ("runq_remove: process swapped out")); 726 pri = ke->ke_rqindex; 727 rqh = &rq->rq_queues[pri]; 728 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", 729 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 730 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 731 TAILQ_REMOVE(rqh, ke, ke_procq); 732 if (TAILQ_EMPTY(rqh)) { 733 CTR0(KTR_RUNQ, "runq_remove: empty"); 734 runq_clrbit(rq, pri); 735 } 736} 737 738#if 0 739void 740panc(char *string1, char *string2) 741{ 742 printf("%s", string1); 743 kdb_enter(string2); 744} 745 746void 747thread_sanity_check(struct thread *td, char *string) 748{ 749 struct proc *p; 750 struct ksegrp *kg; 751 struct kse *ke; 752 struct thread *td2 = NULL; 753 unsigned int prevpri; 754 int saw_lastassigned = 0; 755 int unassigned = 0; 756 int assigned = 0; 757 758 p = td->td_proc; 759 kg = td->td_ksegrp; 760 ke = td->td_kse; 761 762 763 if (ke) { 764 if (p != ke->ke_proc) { 765 panc(string, "wrong proc"); 766 } 767 if (ke->ke_thread != td) { 768 panc(string, "wrong thread"); 769 } 770 } 771 772 if ((p->p_flag & P_SA) == 0) { 773 if (ke == NULL) { 774 panc(string, "non KSE thread lost kse"); 775 } 776 } else { 777 prevpri = 0; 778 saw_lastassigned = 0; 779 unassigned = 0; 780 assigned = 0; 781 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 782 if (td2->td_priority < prevpri) { 783 panc(string, "thread runqueue unosorted"); 784 } 785 if ((td2->td_state == TDS_RUNQ) && 786 td2->td_kse && 787 (td2->td_kse->ke_state != KES_ONRUNQ)) { 788 panc(string, "KSE wrong state"); 789 } 790 prevpri = td2->td_priority; 791 if (td2->td_kse) { 792 assigned++; 793 if (unassigned) { 794 panc(string, "unassigned before assigned"); 795 } 796 if (kg->kg_last_assigned == NULL) { 797 panc(string, "lastassigned corrupt"); 798 } 799 if (saw_lastassigned) { 800 panc(string, "last assigned not last"); 801 } 802 if (td2->td_kse->ke_thread != td2) { 803 panc(string, "mismatched kse/thread"); 804 } 805 } else { 806 unassigned++; 807 } 808 if (td2 == kg->kg_last_assigned) { 809 saw_lastassigned = 1; 810 if (td2->td_kse == NULL) { 811 panc(string, "last assigned not assigned"); 812 } 813 } 814 } 815 if (kg->kg_last_assigned && (saw_lastassigned == 0)) { 816 panc(string, "where on earth does lastassigned point?"); 817 } 818#if 0 819 FOREACH_THREAD_IN_GROUP(kg, td2) { 820 if (((td2->td_flags & TDF_UNBOUND) == 0) && 821 (TD_ON_RUNQ(td2))) { 822 assigned++; 823 if (td2->td_kse == NULL) { 824 panc(string, "BOUND thread with no KSE"); 825 } 826 } 827 } 828#endif 829#if 0 830 if ((unassigned + assigned) != kg->kg_runnable) { 831 panc(string, "wrong number in runnable"); 832 } 833#endif 834 } 835 if (assigned == 12345) { 836 printf("%p %p %p %p %p %d, %d", 837 td, td2, ke, kg, p, assigned, saw_lastassigned); 838 } 839} 840#endif 841 842