kern_switch.c revision 135182
1/* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/*** 28Here is the logic.. 29 30If there are N processors, then there are at most N KSEs (kernel 31schedulable entities) working to process threads that belong to a 32KSEGROUP (kg). If there are X of these KSEs actually running at the 33moment in question, then there are at most M (N-X) of these KSEs on 34the run queue, as running KSEs are not on the queue. 35 36Runnable threads are queued off the KSEGROUP in priority order. 37If there are M or more threads runnable, the top M threads 38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take 39their priority from those threads and are put on the run queue. 40 41The last thread that had a priority high enough to have a KSE associated 42with it, AND IS ON THE RUN QUEUE is pointed to by 43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 44assigned as all the available KSEs are activly running, or because there 45are no threads queued, that pointer is NULL. 46 47When a KSE is removed from the run queue to become runnable, we know 48it was associated with the highest priority thread in the queue (at the head 49of the queue). If it is also the last assigned we know M was 1 and must 50now be 0. Since the thread is no longer queued that pointer must be 51removed from it. Since we know there were no more KSEs available, 52(M was 1 and is now 0) and since we are not FREEING our KSE 53but using it, we know there are STILL no more KSEs available, we can prove 54that the next thread in the ksegrp list will not have a KSE to assign to 55it, so we can show that the pointer must be made 'invalid' (NULL). 56 57The pointer exists so that when a new thread is made runnable, it can 58have its priority compared with the last assigned thread to see if 59it should 'steal' its KSE or not.. i.e. is it 'earlier' 60on the list than that thread or later.. If it's earlier, then the KSE is 61removed from the last assigned (which is now not assigned a KSE) 62and reassigned to the new thread, which is placed earlier in the list. 63The pointer is then backed up to the previous thread (which may or may not 64be the new thread). 65 66When a thread sleeps or is removed, the KSE becomes available and if there 67are queued threads that are not assigned KSEs, the highest priority one of 68them is assigned the KSE, which is then placed back on the run queue at 69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 70to point to it. 71 72The following diagram shows 2 KSEs and 3 threads from a single process. 73 74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 75 \ \____ 76 \ \ 77 KSEGROUP---thread--thread--thread (queued in priority order) 78 \ / 79 \_______________/ 80 (last_assigned) 81 82The result of this scheme is that the M available KSEs are always 83queued at the priorities they have inherrited from the M highest priority 84threads for that KSEGROUP. If this situation changes, the KSEs are 85reassigned to keep this true. 86***/ 87 88#include <sys/cdefs.h> 89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 135182 2004-09-13 23:06:39Z julian $"); 90 91#include "opt_sched.h" 92 93#ifndef KERN_SWITCH_INCLUDE 94#include <sys/param.h> 95#include <sys/systm.h> 96#include <sys/kdb.h> 97#include <sys/kernel.h> 98#include <sys/ktr.h> 99#include <sys/lock.h> 100#include <sys/mutex.h> 101#include <sys/proc.h> 102#include <sys/queue.h> 103#include <sys/sched.h> 104#else /* KERN_SWITCH_INCLUDE */ 105#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 106#include <sys/smp.h> 107#endif 108#include <machine/critical.h> 109#if defined(SMP) && defined(SCHED_4BSD) 110#include <sys/sysctl.h> 111#endif 112 113#ifdef FULL_PREEMPTION 114#ifndef PREEMPTION 115#error "The FULL_PREEMPTION option requires the PREEMPTION option" 116#endif 117#endif 118 119CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 120 121#define td_kse td_sched 122 123/************************************************************************ 124 * Functions that manipulate runnability from a thread perspective. * 125 ************************************************************************/ 126/* 127 * Select the KSE that will be run next. From that find the thread, and 128 * remove it from the KSEGRP's run queue. If there is thread clustering, 129 * this will be what does it. 130 */ 131struct thread * 132choosethread(void) 133{ 134 struct kse *ke; 135 struct thread *td; 136 struct ksegrp *kg; 137 138#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 139 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 140 /* Shutting down, run idlethread on AP's */ 141 td = PCPU_GET(idlethread); 142 ke = td->td_kse; 143 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 144 ke->ke_flags |= KEF_DIDRUN; 145 TD_SET_RUNNING(td); 146 return (td); 147 } 148#endif 149 150retry: 151 ke = sched_choose(); 152 if (ke) { 153 td = ke->ke_thread; 154 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 155 kg = ke->ke_ksegrp; 156 if (td->td_proc->p_flag & P_HADTHREADS) { 157 if (kg->kg_last_assigned == td) { 158 kg->kg_last_assigned = TAILQ_PREV(td, 159 threadqueue, td_runq); 160 } 161 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 162 kg->kg_runnable--; 163 } 164 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 165 td, td->td_priority); 166 } else { 167 /* Simulate runq_choose() having returned the idle thread */ 168 td = PCPU_GET(idlethread); 169 ke = td->td_kse; 170 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 171 } 172 ke->ke_flags |= KEF_DIDRUN; 173 174 /* 175 * If we are in panic, only allow system threads, 176 * plus the one we are running in, to be run. 177 */ 178 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 179 (td->td_flags & TDF_INPANIC) == 0)) { 180 /* note that it is no longer on the run queue */ 181 TD_SET_CAN_RUN(td); 182 goto retry; 183 } 184 185 TD_SET_RUNNING(td); 186 return (td); 187} 188 189/* 190 * Given a surplus system slot, try assign a new runnable thread to it. 191 * Called from: 192 * sched_thread_exit() (local) 193 * sched_switch() (local) 194 * sched_thread_exit() (local) 195 * remrunqueue() (local) 196 */ 197static void 198slot_fill(struct ksegrp *kg) 199{ 200 struct thread *td; 201 202 mtx_assert(&sched_lock, MA_OWNED); 203 while (kg->kg_avail_opennings > 0) { 204 /* 205 * Find the first unassigned thread 206 */ 207 if ((td = kg->kg_last_assigned) != NULL) 208 td = TAILQ_NEXT(td, td_runq); 209 else 210 td = TAILQ_FIRST(&kg->kg_runq); 211 212 /* 213 * If we found one, send it to the system scheduler. 214 */ 215 if (td) { 216 kg->kg_last_assigned = td; 217 kg->kg_avail_opennings--; 218 sched_add(td, SRQ_BORING); 219 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); 220 } else { 221 /* no threads to use up the slots. quit now */ 222 break; 223 } 224 } 225} 226 227/* 228 * Remove a thread from its KSEGRP's run queue. 229 * This in turn may remove it from a KSE if it was already assigned 230 * to one, possibly causing a new thread to be assigned to the KSE 231 * and the KSE getting a new priority. 232 */ 233static void 234remrunqueue(struct thread *td) 235{ 236 struct thread *td2, *td3; 237 struct ksegrp *kg; 238 struct kse *ke; 239 240 mtx_assert(&sched_lock, MA_OWNED); 241 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 242 kg = td->td_ksegrp; 243 ke = td->td_kse; 244 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 245 TD_SET_CAN_RUN(td); 246 /* 247 * If it is not a threaded process, take the shortcut. 248 */ 249 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 250 /* remve from sys run queue and free up a slot */ 251 sched_rem(td); 252 kg->kg_avail_opennings++; 253 ke->ke_state = KES_THREAD; 254 return; 255 } 256 td3 = TAILQ_PREV(td, threadqueue, td_runq); 257 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 258 kg->kg_runnable--; 259 if (ke->ke_state == KES_ONRUNQ) { 260 /* 261 * This thread has been assigned to the system run queue. 262 * We need to dissociate it and try assign the 263 * KSE to the next available thread. Then, we should 264 * see if we need to move the KSE in the run queues. 265 */ 266 sched_rem(td); 267 kg->kg_avail_opennings++; 268 ke->ke_state = KES_THREAD; 269 td2 = kg->kg_last_assigned; 270 KASSERT((td2 != NULL), ("last assigned has wrong value")); 271 if (td2 == td) 272 kg->kg_last_assigned = td3; 273 /* slot_fill(kg); */ /* will replace it with another */ 274 } 275} 276 277/* 278 * Change the priority of a thread that is on the run queue. 279 */ 280void 281adjustrunqueue( struct thread *td, int newpri) 282{ 283 struct ksegrp *kg; 284 struct kse *ke; 285 286 mtx_assert(&sched_lock, MA_OWNED); 287 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 288 289 ke = td->td_kse; 290 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 291 /* 292 * If it is not a threaded process, take the shortcut. 293 */ 294 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 295 /* We only care about the kse in the run queue. */ 296 td->td_priority = newpri; 297 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 298 sched_rem(td); 299 sched_add(td, SRQ_BORING); 300 } 301 return; 302 } 303 304 /* It is a threaded process */ 305 kg = td->td_ksegrp; 306 TD_SET_CAN_RUN(td); 307 if (ke->ke_state == KES_ONRUNQ) { 308 if (kg->kg_last_assigned == td) { 309 kg->kg_last_assigned = 310 TAILQ_PREV(td, threadqueue, td_runq); 311 } 312 sched_rem(td); 313 kg->kg_avail_opennings++; 314 } 315 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 316 kg->kg_runnable--; 317 td->td_priority = newpri; 318 setrunqueue(td, SRQ_BORING); 319} 320int limitcount; 321void 322setrunqueue(struct thread *td, int flags) 323{ 324 struct ksegrp *kg; 325 struct thread *td2; 326 struct thread *tda; 327 int count; 328 329 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", 330 td, td->td_ksegrp, td->td_proc->p_pid); 331 mtx_assert(&sched_lock, MA_OWNED); 332 KASSERT((td->td_inhibitors == 0), 333 ("setrunqueue: trying to run inhibitted thread")); 334 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 335 ("setrunqueue: bad thread state")); 336 TD_SET_RUNQ(td); 337 kg = td->td_ksegrp; 338 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 339 /* 340 * Common path optimisation: Only one of everything 341 * and the KSE is always already attached. 342 * Totally ignore the ksegrp run queue. 343 */ 344 if (kg->kg_avail_opennings != 1) { 345 if (limitcount < 1) { 346 limitcount++; 347 printf("pid %d: corrected slot count (%d->1)\n", 348 td->td_proc->p_pid, kg->kg_avail_opennings); 349 350 } 351 kg->kg_avail_opennings = 1; 352 } 353 kg->kg_avail_opennings--; 354 sched_add(td, flags); 355 return; 356 } 357 358 tda = kg->kg_last_assigned; 359 if ((kg->kg_avail_opennings <= 0) && 360 (tda && (tda->td_priority > td->td_priority))) { 361 /* 362 * None free, but there is one we can commandeer. 363 */ 364 CTR2(KTR_RUNQ, 365 "setrunqueue: kg:%p: take slot from td: %p", kg, tda); 366 sched_rem(tda); 367 tda = kg->kg_last_assigned = 368 TAILQ_PREV(tda, threadqueue, td_runq); 369 kg->kg_avail_opennings++; 370 } 371 372 /* 373 * Add the thread to the ksegrp's run queue at 374 * the appropriate place. 375 */ 376 count = 0; 377 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 378 if (td2->td_priority > td->td_priority) { 379 kg->kg_runnable++; 380 TAILQ_INSERT_BEFORE(td2, td, td_runq); 381 break; 382 } 383 /* XXX Debugging hack */ 384 if (++count > 10000) { 385 printf("setrunqueue(): corrupt kq_runq, td= %p\n", td); 386 panic("deadlock in setrunqueue"); 387 } 388 } 389 if (td2 == NULL) { 390 /* We ran off the end of the TAILQ or it was empty. */ 391 kg->kg_runnable++; 392 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 393 } 394 395 /* 396 * If we have a slot to use, then put the thread on the system 397 * run queue and if needed, readjust the last_assigned pointer. 398 */ 399 if (kg->kg_avail_opennings > 0) { 400 if (tda == NULL) { 401 /* 402 * No pre-existing last assigned so whoever is first 403 * gets the KSE we brought in.. (maybe us) 404 */ 405 td2 = TAILQ_FIRST(&kg->kg_runq); 406 kg->kg_last_assigned = td2; 407 } else if (tda->td_priority > td->td_priority) { 408 td2 = td; 409 } else { 410 /* 411 * We are past last_assigned, so 412 * gave the next slot to whatever is next, 413 * which may or may not be us. 414 */ 415 td2 = TAILQ_NEXT(tda, td_runq); 416 kg->kg_last_assigned = td2; 417 } 418 kg->kg_avail_opennings--; 419 sched_add(td2, flags); 420 } else { 421 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", 422 td, td->td_ksegrp, td->td_proc->p_pid); 423 } 424} 425 426/* 427 * Kernel thread preemption implementation. Critical sections mark 428 * regions of code in which preemptions are not allowed. 429 */ 430void 431critical_enter(void) 432{ 433 struct thread *td; 434 435 td = curthread; 436 if (td->td_critnest == 0) 437 cpu_critical_enter(td); 438 td->td_critnest++; 439} 440 441void 442critical_exit(void) 443{ 444 struct thread *td; 445 446 td = curthread; 447 KASSERT(td->td_critnest != 0, 448 ("critical_exit: td_critnest == 0")); 449 if (td->td_critnest == 1) { 450#ifdef PREEMPTION 451 mtx_assert(&sched_lock, MA_NOTOWNED); 452 if (td->td_pflags & TDP_OWEPREEMPT) { 453 mtx_lock_spin(&sched_lock); 454 mi_switch(SW_INVOL, NULL); 455 mtx_unlock_spin(&sched_lock); 456 } 457#endif 458 td->td_critnest = 0; 459 cpu_critical_exit(td); 460 } else { 461 td->td_critnest--; 462 } 463} 464 465/* 466 * This function is called when a thread is about to be put on run queue 467 * because it has been made runnable or its priority has been adjusted. It 468 * determines if the new thread should be immediately preempted to. If so, 469 * it switches to it and eventually returns true. If not, it returns false 470 * so that the caller may place the thread on an appropriate run queue. 471 */ 472int 473maybe_preempt(struct thread *td) 474{ 475#ifdef PREEMPTION 476 struct thread *ctd; 477 int cpri, pri; 478#endif 479 480 mtx_assert(&sched_lock, MA_OWNED); 481#ifdef PREEMPTION 482 /* 483 * The new thread should not preempt the current thread if any of the 484 * following conditions are true: 485 * 486 * - The current thread has a higher (numerically lower) or 487 * equivalent priority. Note that this prevents curthread from 488 * trying to preempt to itself. 489 * - It is too early in the boot for context switches (cold is set). 490 * - The current thread has an inhibitor set or is in the process of 491 * exiting. In this case, the current thread is about to switch 492 * out anyways, so there's no point in preempting. If we did, 493 * the current thread would not be properly resumed as well, so 494 * just avoid that whole landmine. 495 * - If the new thread's priority is not a realtime priority and 496 * the current thread's priority is not an idle priority and 497 * FULL_PREEMPTION is disabled. 498 * 499 * If all of these conditions are false, but the current thread is in 500 * a nested critical section, then we have to defer the preemption 501 * until we exit the critical section. Otherwise, switch immediately 502 * to the new thread. 503 */ 504 ctd = curthread; 505 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd), 506 ("thread has no (or wrong) sched-private part.")); 507 KASSERT((td->td_inhibitors == 0), 508 ("maybe_preempt: trying to run inhibitted thread")); 509 pri = td->td_priority; 510 cpri = ctd->td_priority; 511 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || 512 td->td_kse->ke_state != KES_THREAD) 513 return (0); 514#ifndef FULL_PREEMPTION 515 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && 516 !(cpri >= PRI_MIN_IDLE)) 517 return (0); 518#endif 519 if (ctd->td_critnest > 1) { 520 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 521 ctd->td_critnest); 522 ctd->td_pflags |= TDP_OWEPREEMPT; 523 return (0); 524 } 525 526 /* 527 * Our thread state says that we are already on a run queue, so 528 * update our state as if we had been dequeued by choosethread(). 529 * However we must not actually be on the system run queue yet. 530 */ 531 MPASS(TD_ON_RUNQ(td)); 532 MPASS(td->td_sched->ke_state != KES_ONRUNQ); 533 if (td->td_proc->p_flag & P_HADTHREADS) { 534 /* 535 * If this is a threaded process we actually ARE on the 536 * ksegrp run queue so take it off that first. 537 */ 538 remrunqueue(td); /* maybe use a simpler version */ 539 } 540 541 TD_SET_RUNNING(td); 542 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 543 td->td_proc->p_pid, td->td_proc->p_comm); 544 mi_switch(SW_INVOL, td); 545 return (1); 546#else 547 return (0); 548#endif 549} 550 551#if 0 552#ifndef PREEMPTION 553/* XXX: There should be a non-static version of this. */ 554static void 555printf_caddr_t(void *data) 556{ 557 printf("%s", (char *)data); 558} 559static char preempt_warning[] = 560 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 561SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 562 preempt_warning) 563#endif 564#endif 565 566/************************************************************************ 567 * SYSTEM RUN QUEUE manipulations and tests * 568 ************************************************************************/ 569/* 570 * Initialize a run structure. 571 */ 572void 573runq_init(struct runq *rq) 574{ 575 int i; 576 577 bzero(rq, sizeof *rq); 578 for (i = 0; i < RQ_NQS; i++) 579 TAILQ_INIT(&rq->rq_queues[i]); 580} 581 582/* 583 * Clear the status bit of the queue corresponding to priority level pri, 584 * indicating that it is empty. 585 */ 586static __inline void 587runq_clrbit(struct runq *rq, int pri) 588{ 589 struct rqbits *rqb; 590 591 rqb = &rq->rq_status; 592 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 593 rqb->rqb_bits[RQB_WORD(pri)], 594 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 595 RQB_BIT(pri), RQB_WORD(pri)); 596 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 597} 598 599/* 600 * Find the index of the first non-empty run queue. This is done by 601 * scanning the status bits, a set bit indicates a non-empty queue. 602 */ 603static __inline int 604runq_findbit(struct runq *rq) 605{ 606 struct rqbits *rqb; 607 int pri; 608 int i; 609 610 rqb = &rq->rq_status; 611 for (i = 0; i < RQB_LEN; i++) 612 if (rqb->rqb_bits[i]) { 613 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 614 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 615 rqb->rqb_bits[i], i, pri); 616 return (pri); 617 } 618 619 return (-1); 620} 621 622/* 623 * Set the status bit of the queue corresponding to priority level pri, 624 * indicating that it is non-empty. 625 */ 626static __inline void 627runq_setbit(struct runq *rq, int pri) 628{ 629 struct rqbits *rqb; 630 631 rqb = &rq->rq_status; 632 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 633 rqb->rqb_bits[RQB_WORD(pri)], 634 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 635 RQB_BIT(pri), RQB_WORD(pri)); 636 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 637} 638 639/* 640 * Add the KSE to the queue specified by its priority, and set the 641 * corresponding status bit. 642 */ 643void 644runq_add(struct runq *rq, struct kse *ke) 645{ 646 struct rqhead *rqh; 647 int pri; 648 649 pri = ke->ke_thread->td_priority / RQ_PPQ; 650 ke->ke_rqindex = pri; 651 runq_setbit(rq, pri); 652 rqh = &rq->rq_queues[pri]; 653 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", 654 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 655 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 656} 657 658/* 659 * Return true if there are runnable processes of any priority on the run 660 * queue, false otherwise. Has no side effects, does not modify the run 661 * queue structure. 662 */ 663int 664runq_check(struct runq *rq) 665{ 666 struct rqbits *rqb; 667 int i; 668 669 rqb = &rq->rq_status; 670 for (i = 0; i < RQB_LEN; i++) 671 if (rqb->rqb_bits[i]) { 672 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 673 rqb->rqb_bits[i], i); 674 return (1); 675 } 676 CTR0(KTR_RUNQ, "runq_check: empty"); 677 678 return (0); 679} 680 681#if defined(SMP) && defined(SCHED_4BSD) 682int runq_fuzz = 1; 683SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 684#endif 685 686/* 687 * Find the highest priority process on the run queue. 688 */ 689struct kse * 690runq_choose(struct runq *rq) 691{ 692 struct rqhead *rqh; 693 struct kse *ke; 694 int pri; 695 696 mtx_assert(&sched_lock, MA_OWNED); 697 while ((pri = runq_findbit(rq)) != -1) { 698 rqh = &rq->rq_queues[pri]; 699#if defined(SMP) && defined(SCHED_4BSD) 700 /* fuzz == 1 is normal.. 0 or less are ignored */ 701 if (runq_fuzz > 1) { 702 /* 703 * In the first couple of entries, check if 704 * there is one for our CPU as a preference. 705 */ 706 int count = runq_fuzz; 707 int cpu = PCPU_GET(cpuid); 708 struct kse *ke2; 709 ke2 = ke = TAILQ_FIRST(rqh); 710 711 while (count-- && ke2) { 712 if (ke->ke_thread->td_lastcpu == cpu) { 713 ke = ke2; 714 break; 715 } 716 ke2 = TAILQ_NEXT(ke2, ke_procq); 717 } 718 } else 719#endif 720 ke = TAILQ_FIRST(rqh); 721 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 722 CTR3(KTR_RUNQ, 723 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 724 return (ke); 725 } 726 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 727 728 return (NULL); 729} 730 731/* 732 * Remove the KSE from the queue specified by its priority, and clear the 733 * corresponding status bit if the queue becomes empty. 734 * Caller must set ke->ke_state afterwards. 735 */ 736void 737runq_remove(struct runq *rq, struct kse *ke) 738{ 739 struct rqhead *rqh; 740 int pri; 741 742 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 743 ("runq_remove: process swapped out")); 744 pri = ke->ke_rqindex; 745 rqh = &rq->rq_queues[pri]; 746 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", 747 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 748 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 749 TAILQ_REMOVE(rqh, ke, ke_procq); 750 if (TAILQ_EMPTY(rqh)) { 751 CTR0(KTR_RUNQ, "runq_remove: empty"); 752 runq_clrbit(rq, pri); 753 } 754} 755 756/****** functions that are temporarily here ***********/ 757#include <vm/uma.h> 758#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 759extern struct mtx kse_zombie_lock; 760 761/* 762 * Allocate scheduler specific per-process resources. 763 * The thread and ksegrp have already been linked in. 764 * In this case just set the default concurrency value. 765 * 766 * Called from: 767 * proc_init() (UMA init method) 768 */ 769void 770sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) 771{ 772 773 /* This can go in sched_fork */ 774 sched_init_concurrency(kg); 775} 776 777/* 778 * Called by the uma process fini routine.. 779 * undo anything we may have done in the uma_init method. 780 * Panic if it's not all 1:1:1:1 781 * Called from: 782 * proc_fini() (UMA method) 783 */ 784void 785sched_destroyproc(struct proc *p) 786{ 787 788 /* this function slated for destruction */ 789 KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread ")); 790 KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp ")); 791} 792 793#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 794/* 795 * thread is being either created or recycled. 796 * Fix up the per-scheduler resources associated with it. 797 * Called from: 798 * sched_fork_thread() 799 * thread_dtor() (*may go away) 800 * thread_init() (*may go away) 801 */ 802void 803sched_newthread(struct thread *td) 804{ 805 struct td_sched *ke; 806 807 ke = (struct td_sched *) (td + 1); 808 bzero(ke, sizeof(*ke)); 809 td->td_sched = ke; 810 ke->ke_thread = td; 811 ke->ke_oncpu = NOCPU; 812 ke->ke_state = KES_THREAD; 813} 814 815/* 816 * Set up an initial concurrency of 1 817 * and set the given thread (if given) to be using that 818 * concurrency slot. 819 * May be used "offline"..before the ksegrp is attached to the world 820 * and thus wouldn't need schedlock in that case. 821 * Called from: 822 * thr_create() 823 * proc_init() (UMA) via sched_newproc() 824 */ 825void 826sched_init_concurrency(struct ksegrp *kg) 827{ 828 829 kg->kg_concurrency = 1; 830 kg->kg_avail_opennings = 1; 831} 832 833/* 834 * Change the concurrency of an existing ksegrp to N 835 * Called from: 836 * kse_create() 837 * kse_exit() 838 * thread_exit() 839 * thread_single() 840 */ 841void 842sched_set_concurrency(struct ksegrp *kg, int concurrency) 843{ 844 845 /* Handle the case for a declining concurrency */ 846 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency); 847 kg->kg_concurrency = concurrency; 848} 849 850/* 851 * Called from thread_exit() for all exiting thread 852 * 853 * Not to be confused with sched_exit_thread() 854 * that is only called from thread_exit() for threads exiting 855 * without the rest of the process exiting because it is also called from 856 * sched_exit() and we wouldn't want to call it twice. 857 * XXX This can probably be fixed. 858 */ 859void 860sched_thread_exit(struct thread *td) 861{ 862 863 td->td_ksegrp->kg_avail_opennings++; 864 slot_fill(td->td_ksegrp); 865} 866 867#endif /* KERN_SWITCH_INCLUDE */ 868