kern_switch.c revision 147182
1/*- 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/*** 28Here is the logic.. 29 30If there are N processors, then there are at most N KSEs (kernel 31schedulable entities) working to process threads that belong to a 32KSEGROUP (kg). If there are X of these KSEs actually running at the 33moment in question, then there are at most M (N-X) of these KSEs on 34the run queue, as running KSEs are not on the queue. 35 36Runnable threads are queued off the KSEGROUP in priority order. 37If there are M or more threads runnable, the top M threads 38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take 39their priority from those threads and are put on the run queue. 40 41The last thread that had a priority high enough to have a KSE associated 42with it, AND IS ON THE RUN QUEUE is pointed to by 43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 44assigned as all the available KSEs are activly running, or because there 45are no threads queued, that pointer is NULL. 46 47When a KSE is removed from the run queue to become runnable, we know 48it was associated with the highest priority thread in the queue (at the head 49of the queue). If it is also the last assigned we know M was 1 and must 50now be 0. Since the thread is no longer queued that pointer must be 51removed from it. Since we know there were no more KSEs available, 52(M was 1 and is now 0) and since we are not FREEING our KSE 53but using it, we know there are STILL no more KSEs available, we can prove 54that the next thread in the ksegrp list will not have a KSE to assign to 55it, so we can show that the pointer must be made 'invalid' (NULL). 56 57The pointer exists so that when a new thread is made runnable, it can 58have its priority compared with the last assigned thread to see if 59it should 'steal' its KSE or not.. i.e. is it 'earlier' 60on the list than that thread or later.. If it's earlier, then the KSE is 61removed from the last assigned (which is now not assigned a KSE) 62and reassigned to the new thread, which is placed earlier in the list. 63The pointer is then backed up to the previous thread (which may or may not 64be the new thread). 65 66When a thread sleeps or is removed, the KSE becomes available and if there 67are queued threads that are not assigned KSEs, the highest priority one of 68them is assigned the KSE, which is then placed back on the run queue at 69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 70to point to it. 71 72The following diagram shows 2 KSEs and 3 threads from a single process. 73 74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 75 \ \____ 76 \ \ 77 KSEGROUP---thread--thread--thread (queued in priority order) 78 \ / 79 \_______________/ 80 (last_assigned) 81 82The result of this scheme is that the M available KSEs are always 83queued at the priorities they have inherrited from the M highest priority 84threads for that KSEGROUP. If this situation changes, the KSEs are 85reassigned to keep this true. 86***/ 87 88#include <sys/cdefs.h> 89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 147182 2005-06-09 18:26:31Z ups $"); 90 91#include "opt_sched.h" 92 93#ifndef KERN_SWITCH_INCLUDE 94#include <sys/param.h> 95#include <sys/systm.h> 96#include <sys/kdb.h> 97#include <sys/kernel.h> 98#include <sys/ktr.h> 99#include <sys/lock.h> 100#include <sys/mutex.h> 101#include <sys/proc.h> 102#include <sys/queue.h> 103#include <sys/sched.h> 104#else /* KERN_SWITCH_INCLUDE */ 105#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 106#include <sys/smp.h> 107#endif 108#if defined(SMP) && defined(SCHED_4BSD) 109#include <sys/sysctl.h> 110#endif 111 112#ifdef FULL_PREEMPTION 113#ifndef PREEMPTION 114#error "The FULL_PREEMPTION option requires the PREEMPTION option" 115#endif 116#endif 117 118CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 119 120#define td_kse td_sched 121 122/* 123 * kern.sched.preemption allows user space to determine if preemption support 124 * is compiled in or not. It is not currently a boot or runtime flag that 125 * can be changed. 126 */ 127#ifdef PREEMPTION 128static int kern_sched_preemption = 1; 129#else 130static int kern_sched_preemption = 0; 131#endif 132SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD, 133 &kern_sched_preemption, 0, "Kernel preemption enabled"); 134 135/************************************************************************ 136 * Functions that manipulate runnability from a thread perspective. * 137 ************************************************************************/ 138/* 139 * Select the KSE that will be run next. From that find the thread, and 140 * remove it from the KSEGRP's run queue. If there is thread clustering, 141 * this will be what does it. 142 */ 143struct thread * 144choosethread(void) 145{ 146 struct kse *ke; 147 struct thread *td; 148 struct ksegrp *kg; 149 150#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 151 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 152 /* Shutting down, run idlethread on AP's */ 153 td = PCPU_GET(idlethread); 154 ke = td->td_kse; 155 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 156 ke->ke_flags |= KEF_DIDRUN; 157 TD_SET_RUNNING(td); 158 return (td); 159 } 160#endif 161 162retry: 163 ke = sched_choose(); 164 if (ke) { 165 td = ke->ke_thread; 166 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 167 kg = ke->ke_ksegrp; 168 if (td->td_proc->p_flag & P_HADTHREADS) { 169 if (kg->kg_last_assigned == td) { 170 kg->kg_last_assigned = TAILQ_PREV(td, 171 threadqueue, td_runq); 172 } 173 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 174 } 175 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 176 td, td->td_priority); 177 } else { 178 /* Simulate runq_choose() having returned the idle thread */ 179 td = PCPU_GET(idlethread); 180 ke = td->td_kse; 181 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 182 } 183 ke->ke_flags |= KEF_DIDRUN; 184 185 /* 186 * If we are in panic, only allow system threads, 187 * plus the one we are running in, to be run. 188 */ 189 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 190 (td->td_flags & TDF_INPANIC) == 0)) { 191 /* note that it is no longer on the run queue */ 192 TD_SET_CAN_RUN(td); 193 goto retry; 194 } 195 196 TD_SET_RUNNING(td); 197 return (td); 198} 199 200/* 201 * Given a surplus system slot, try assign a new runnable thread to it. 202 * Called from: 203 * sched_thread_exit() (local) 204 * sched_switch() (local) 205 * sched_thread_exit() (local) 206 * remrunqueue() (local) (not at the moment) 207 */ 208static void 209slot_fill(struct ksegrp *kg) 210{ 211 struct thread *td; 212 213 mtx_assert(&sched_lock, MA_OWNED); 214 while (kg->kg_avail_opennings > 0) { 215 /* 216 * Find the first unassigned thread 217 */ 218 if ((td = kg->kg_last_assigned) != NULL) 219 td = TAILQ_NEXT(td, td_runq); 220 else 221 td = TAILQ_FIRST(&kg->kg_runq); 222 223 /* 224 * If we found one, send it to the system scheduler. 225 */ 226 if (td) { 227 kg->kg_last_assigned = td; 228 sched_add(td, SRQ_YIELDING); 229 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); 230 } else { 231 /* no threads to use up the slots. quit now */ 232 break; 233 } 234 } 235} 236 237#ifdef SCHED_4BSD 238/* 239 * Remove a thread from its KSEGRP's run queue. 240 * This in turn may remove it from a KSE if it was already assigned 241 * to one, possibly causing a new thread to be assigned to the KSE 242 * and the KSE getting a new priority. 243 */ 244static void 245remrunqueue(struct thread *td) 246{ 247 struct thread *td2, *td3; 248 struct ksegrp *kg; 249 struct kse *ke; 250 251 mtx_assert(&sched_lock, MA_OWNED); 252 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 253 kg = td->td_ksegrp; 254 ke = td->td_kse; 255 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 256 TD_SET_CAN_RUN(td); 257 /* 258 * If it is not a threaded process, take the shortcut. 259 */ 260 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 261 /* remve from sys run queue and free up a slot */ 262 sched_rem(td); 263 ke->ke_state = KES_THREAD; 264 return; 265 } 266 td3 = TAILQ_PREV(td, threadqueue, td_runq); 267 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 268 if (ke->ke_state == KES_ONRUNQ) { 269 /* 270 * This thread has been assigned to the system run queue. 271 * We need to dissociate it and try assign the 272 * KSE to the next available thread. Then, we should 273 * see if we need to move the KSE in the run queues. 274 */ 275 sched_rem(td); 276 ke->ke_state = KES_THREAD; 277 td2 = kg->kg_last_assigned; 278 KASSERT((td2 != NULL), ("last assigned has wrong value")); 279 if (td2 == td) 280 kg->kg_last_assigned = td3; 281 /* slot_fill(kg); */ /* will replace it with another */ 282 } 283} 284#endif 285 286/* 287 * Change the priority of a thread that is on the run queue. 288 */ 289void 290adjustrunqueue( struct thread *td, int newpri) 291{ 292 struct ksegrp *kg; 293 struct kse *ke; 294 295 mtx_assert(&sched_lock, MA_OWNED); 296 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 297 298 ke = td->td_kse; 299 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 300 /* 301 * If it is not a threaded process, take the shortcut. 302 */ 303 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 304 /* We only care about the kse in the run queue. */ 305 td->td_priority = newpri; 306 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 307 sched_rem(td); 308 sched_add(td, SRQ_BORING); 309 } 310 return; 311 } 312 313 /* It is a threaded process */ 314 kg = td->td_ksegrp; 315 if (ke->ke_state == KES_ONRUNQ) { 316 if (kg->kg_last_assigned == td) { 317 kg->kg_last_assigned = 318 TAILQ_PREV(td, threadqueue, td_runq); 319 } 320 sched_rem(td); 321 } 322 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 323 TD_SET_CAN_RUN(td); 324 td->td_priority = newpri; 325 setrunqueue(td, SRQ_BORING); 326} 327 328/* 329 * This function is called when a thread is about to be put on a 330 * ksegrp run queue because it has been made runnable or its 331 * priority has been adjusted and the ksegrp does not have a 332 * free kse slot. It determines if a thread from the same ksegrp 333 * should be preempted. If so, it tries to switch threads 334 * if the thread is on the same cpu or notifies another cpu that 335 * it should switch threads. 336 */ 337 338static void 339maybe_preempt_in_ksegrp(struct thread *td) 340#if !defined(SMP) 341{ 342 struct thread *running_thread; 343 344 mtx_assert(&sched_lock, MA_OWNED); 345 running_thread = curthread; 346 347 if (running_thread->td_ksegrp != td->td_ksegrp) 348 return; 349 350 if (td->td_priority >= running_thread->td_priority) 351 return; 352#ifdef PREEMPTION 353#ifndef FULL_PREEMPTION 354 if (td->td_priority > PRI_MAX_ITHD) { 355 running_thread->td_flags |= TDF_NEEDRESCHED; 356 return; 357 } 358#endif /* FULL_PREEMPTION */ 359 360 if (running_thread->td_critnest > 1) 361 running_thread->td_owepreempt = 1; 362 else 363 mi_switch(SW_INVOL, NULL); 364 365#else /* PREEMPTION */ 366 running_thread->td_flags |= TDF_NEEDRESCHED; 367#endif /* PREEMPTION */ 368 return; 369} 370 371#else /* SMP */ 372{ 373 struct thread *running_thread; 374 int worst_pri; 375 struct ksegrp *kg; 376 cpumask_t cpumask,dontuse; 377 struct pcpu *pc; 378 struct pcpu *best_pcpu; 379 struct thread *cputhread; 380 381 mtx_assert(&sched_lock, MA_OWNED); 382 383 running_thread = curthread; 384 385#if !defined(KSEG_PEEMPT_BEST_CPU) 386 if (running_thread->td_ksegrp != td->td_ksegrp) { 387#endif 388 kg = td->td_ksegrp; 389 390 /* if someone is ahead of this thread, wait our turn */ 391 if (td != TAILQ_FIRST(&kg->kg_runq)) 392 return; 393 394 worst_pri = td->td_priority; 395 best_pcpu = NULL; 396 dontuse = stopped_cpus | idle_cpus_mask; 397 398 /* 399 * Find a cpu with the worst priority that runs at thread from 400 * the same ksegrp - if multiple exist give first the last run 401 * cpu and then the current cpu priority 402 */ 403 404 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 405 cpumask = pc->pc_cpumask; 406 cputhread = pc->pc_curthread; 407 408 if ((cpumask & dontuse) || 409 cputhread->td_ksegrp != kg) 410 continue; 411 412 if (cputhread->td_priority > worst_pri) { 413 worst_pri = cputhread->td_priority; 414 best_pcpu = pc; 415 continue; 416 } 417 418 if (cputhread->td_priority == worst_pri && 419 best_pcpu != NULL && 420 (td->td_lastcpu == pc->pc_cpuid || 421 (PCPU_GET(cpumask) == cpumask && 422 td->td_lastcpu != best_pcpu->pc_cpuid))) 423 best_pcpu = pc; 424 } 425 426 /* Check if we need to preempt someone */ 427 if (best_pcpu == NULL) 428 return; 429 430#if defined(IPI_PREEMPTION) && defined(PREEMPTION) 431 432#if !defined(FULL_PREEMPTION) 433 if (td->td_priority <= PRI_MAX_ITHD) 434#endif /* ! FULL_PREEMPTION */ 435 { 436 ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT); 437 return; 438 } 439#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ 440 441 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) { 442 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 443 ipi_selected(best_pcpu->pc_cpumask, IPI_AST); 444 return; 445 } 446#if !defined(KSEG_PEEMPT_BEST_CPU) 447 } 448#endif 449 450 if (td->td_priority >= running_thread->td_priority) 451 return; 452#ifdef PREEMPTION 453 454#if !defined(FULL_PREEMPTION) 455 if (td->td_priority > PRI_MAX_ITHD) { 456 running_thread->td_flags |= TDF_NEEDRESCHED; 457 } 458#endif /* ! FULL_PREEMPTION */ 459 460 if (running_thread->td_critnest > 1) 461 running_thread->td_owepreempt = 1; 462 else 463 mi_switch(SW_INVOL, NULL); 464 465#else /* PREEMPTION */ 466 running_thread->td_flags |= TDF_NEEDRESCHED; 467#endif /* PREEMPTION */ 468 return; 469} 470#endif /* !SMP */ 471 472 473int limitcount; 474void 475setrunqueue(struct thread *td, int flags) 476{ 477 struct ksegrp *kg; 478 struct thread *td2; 479 struct thread *tda; 480 481 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", 482 td, td->td_ksegrp, td->td_proc->p_pid); 483 CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)", 484 td, td->td_proc->p_comm, td->td_priority, curthread, 485 curthread->td_proc->p_comm); 486 mtx_assert(&sched_lock, MA_OWNED); 487 KASSERT((td->td_inhibitors == 0), 488 ("setrunqueue: trying to run inhibitted thread")); 489 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 490 ("setrunqueue: bad thread state")); 491 TD_SET_RUNQ(td); 492 kg = td->td_ksegrp; 493 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 494 /* 495 * Common path optimisation: Only one of everything 496 * and the KSE is always already attached. 497 * Totally ignore the ksegrp run queue. 498 */ 499 if (kg->kg_avail_opennings != 1) { 500 if (limitcount < 1) { 501 limitcount++; 502 printf("pid %d: corrected slot count (%d->1)\n", 503 td->td_proc->p_pid, kg->kg_avail_opennings); 504 505 } 506 kg->kg_avail_opennings = 1; 507 } 508 sched_add(td, flags); 509 return; 510 } 511 512 /* 513 * If the concurrency has reduced, and we would go in the 514 * assigned section, then keep removing entries from the 515 * system run queue, until we are not in that section 516 * or there is room for us to be put in that section. 517 * What we MUST avoid is the case where there are threads of less 518 * priority than the new one scheduled, but it can not 519 * be scheduled itself. That would lead to a non contiguous set 520 * of scheduled threads, and everything would break. 521 */ 522 tda = kg->kg_last_assigned; 523 while ((kg->kg_avail_opennings <= 0) && 524 (tda && (tda->td_priority > td->td_priority))) { 525 /* 526 * None free, but there is one we can commandeer. 527 */ 528 CTR2(KTR_RUNQ, 529 "setrunqueue: kg:%p: take slot from td: %p", kg, tda); 530 sched_rem(tda); 531 tda = kg->kg_last_assigned = 532 TAILQ_PREV(tda, threadqueue, td_runq); 533 } 534 535 /* 536 * Add the thread to the ksegrp's run queue at 537 * the appropriate place. 538 */ 539 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 540 if (td2->td_priority > td->td_priority) { 541 TAILQ_INSERT_BEFORE(td2, td, td_runq); 542 break; 543 } 544 } 545 if (td2 == NULL) { 546 /* We ran off the end of the TAILQ or it was empty. */ 547 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 548 } 549 550 /* 551 * If we have a slot to use, then put the thread on the system 552 * run queue and if needed, readjust the last_assigned pointer. 553 * it may be that we need to schedule something anyhow 554 * even if the availabel slots are -ve so that 555 * all the items < last_assigned are scheduled. 556 */ 557 if (kg->kg_avail_opennings > 0) { 558 if (tda == NULL) { 559 /* 560 * No pre-existing last assigned so whoever is first 561 * gets the slot.. (maybe us) 562 */ 563 td2 = TAILQ_FIRST(&kg->kg_runq); 564 kg->kg_last_assigned = td2; 565 } else if (tda->td_priority > td->td_priority) { 566 td2 = td; 567 } else { 568 /* 569 * We are past last_assigned, so 570 * give the next slot to whatever is next, 571 * which may or may not be us. 572 */ 573 td2 = TAILQ_NEXT(tda, td_runq); 574 kg->kg_last_assigned = td2; 575 } 576 sched_add(td2, flags); 577 } else { 578 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", 579 td, td->td_ksegrp, td->td_proc->p_pid); 580 if ((flags & SRQ_YIELDING) == 0) 581 maybe_preempt_in_ksegrp(td); 582 } 583} 584 585/* 586 * Kernel thread preemption implementation. Critical sections mark 587 * regions of code in which preemptions are not allowed. 588 */ 589void 590critical_enter(void) 591{ 592 struct thread *td; 593 594 td = curthread; 595 td->td_critnest++; 596 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, 597 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 598} 599 600void 601critical_exit(void) 602{ 603 struct thread *td; 604 605 td = curthread; 606 KASSERT(td->td_critnest != 0, 607 ("critical_exit: td_critnest == 0")); 608#ifdef PREEMPTION 609 if (td->td_critnest == 1) { 610 td->td_critnest = 0; 611 mtx_assert(&sched_lock, MA_NOTOWNED); 612 if (td->td_owepreempt) { 613 td->td_critnest = 1; 614 mtx_lock_spin(&sched_lock); 615 td->td_critnest--; 616 mi_switch(SW_INVOL, NULL); 617 mtx_unlock_spin(&sched_lock); 618 } 619 } else 620#endif 621 td->td_critnest--; 622 623 624 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, 625 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 626} 627 628/* 629 * This function is called when a thread is about to be put on run queue 630 * because it has been made runnable or its priority has been adjusted. It 631 * determines if the new thread should be immediately preempted to. If so, 632 * it switches to it and eventually returns true. If not, it returns false 633 * so that the caller may place the thread on an appropriate run queue. 634 */ 635int 636maybe_preempt(struct thread *td) 637{ 638#ifdef PREEMPTION 639 struct thread *ctd; 640 int cpri, pri; 641#endif 642 643 mtx_assert(&sched_lock, MA_OWNED); 644#ifdef PREEMPTION 645 /* 646 * The new thread should not preempt the current thread if any of the 647 * following conditions are true: 648 * 649 * - The kernel is in the throes of crashing (panicstr). 650 * - The current thread has a higher (numerically lower) or 651 * equivalent priority. Note that this prevents curthread from 652 * trying to preempt to itself. 653 * - It is too early in the boot for context switches (cold is set). 654 * - The current thread has an inhibitor set or is in the process of 655 * exiting. In this case, the current thread is about to switch 656 * out anyways, so there's no point in preempting. If we did, 657 * the current thread would not be properly resumed as well, so 658 * just avoid that whole landmine. 659 * - If the new thread's priority is not a realtime priority and 660 * the current thread's priority is not an idle priority and 661 * FULL_PREEMPTION is disabled. 662 * 663 * If all of these conditions are false, but the current thread is in 664 * a nested critical section, then we have to defer the preemption 665 * until we exit the critical section. Otherwise, switch immediately 666 * to the new thread. 667 */ 668 ctd = curthread; 669 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd), 670 ("thread has no (or wrong) sched-private part.")); 671 KASSERT((td->td_inhibitors == 0), 672 ("maybe_preempt: trying to run inhibitted thread")); 673 pri = td->td_priority; 674 cpri = ctd->td_priority; 675 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || 676 TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD) 677 return (0); 678#ifndef FULL_PREEMPTION 679 if ((pri > PRI_MAX_ITHD) && 680 !(cpri >= PRI_MIN_IDLE)) 681 return (0); 682#endif 683 if (ctd->td_critnest > 1) { 684 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 685 ctd->td_critnest); 686 ctd->td_owepreempt = 1; 687 return (0); 688 } 689 690 /* 691 * Thread is runnable but not yet put on system run queue. 692 */ 693 MPASS(TD_ON_RUNQ(td)); 694 MPASS(td->td_sched->ke_state != KES_ONRUNQ); 695 if (td->td_proc->p_flag & P_HADTHREADS) { 696 /* 697 * If this is a threaded process we actually ARE on the 698 * ksegrp run queue so take it off that first. 699 * Also undo any damage done to the last_assigned pointer. 700 * XXX Fix setrunqueue so this isn't needed 701 */ 702 struct ksegrp *kg; 703 704 kg = td->td_ksegrp; 705 if (kg->kg_last_assigned == td) 706 kg->kg_last_assigned = 707 TAILQ_PREV(td, threadqueue, td_runq); 708 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 709 } 710 711 TD_SET_RUNNING(td); 712 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 713 td->td_proc->p_pid, td->td_proc->p_comm); 714 mi_switch(SW_INVOL|SW_PREEMPT, td); 715 return (1); 716#else 717 return (0); 718#endif 719} 720 721#if 0 722#ifndef PREEMPTION 723/* XXX: There should be a non-static version of this. */ 724static void 725printf_caddr_t(void *data) 726{ 727 printf("%s", (char *)data); 728} 729static char preempt_warning[] = 730 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 731SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 732 preempt_warning) 733#endif 734#endif 735 736/************************************************************************ 737 * SYSTEM RUN QUEUE manipulations and tests * 738 ************************************************************************/ 739/* 740 * Initialize a run structure. 741 */ 742void 743runq_init(struct runq *rq) 744{ 745 int i; 746 747 bzero(rq, sizeof *rq); 748 for (i = 0; i < RQ_NQS; i++) 749 TAILQ_INIT(&rq->rq_queues[i]); 750} 751 752/* 753 * Clear the status bit of the queue corresponding to priority level pri, 754 * indicating that it is empty. 755 */ 756static __inline void 757runq_clrbit(struct runq *rq, int pri) 758{ 759 struct rqbits *rqb; 760 761 rqb = &rq->rq_status; 762 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 763 rqb->rqb_bits[RQB_WORD(pri)], 764 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 765 RQB_BIT(pri), RQB_WORD(pri)); 766 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 767} 768 769/* 770 * Find the index of the first non-empty run queue. This is done by 771 * scanning the status bits, a set bit indicates a non-empty queue. 772 */ 773static __inline int 774runq_findbit(struct runq *rq) 775{ 776 struct rqbits *rqb; 777 int pri; 778 int i; 779 780 rqb = &rq->rq_status; 781 for (i = 0; i < RQB_LEN; i++) 782 if (rqb->rqb_bits[i]) { 783 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 784 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 785 rqb->rqb_bits[i], i, pri); 786 return (pri); 787 } 788 789 return (-1); 790} 791 792/* 793 * Set the status bit of the queue corresponding to priority level pri, 794 * indicating that it is non-empty. 795 */ 796static __inline void 797runq_setbit(struct runq *rq, int pri) 798{ 799 struct rqbits *rqb; 800 801 rqb = &rq->rq_status; 802 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 803 rqb->rqb_bits[RQB_WORD(pri)], 804 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 805 RQB_BIT(pri), RQB_WORD(pri)); 806 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 807} 808 809/* 810 * Add the KSE to the queue specified by its priority, and set the 811 * corresponding status bit. 812 */ 813void 814runq_add(struct runq *rq, struct kse *ke, int flags) 815{ 816 struct rqhead *rqh; 817 int pri; 818 819 pri = ke->ke_thread->td_priority / RQ_PPQ; 820 ke->ke_rqindex = pri; 821 runq_setbit(rq, pri); 822 rqh = &rq->rq_queues[pri]; 823 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", 824 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 825 if (flags & SRQ_PREEMPTED) { 826 TAILQ_INSERT_HEAD(rqh, ke, ke_procq); 827 } else { 828 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 829 } 830} 831 832/* 833 * Return true if there are runnable processes of any priority on the run 834 * queue, false otherwise. Has no side effects, does not modify the run 835 * queue structure. 836 */ 837int 838runq_check(struct runq *rq) 839{ 840 struct rqbits *rqb; 841 int i; 842 843 rqb = &rq->rq_status; 844 for (i = 0; i < RQB_LEN; i++) 845 if (rqb->rqb_bits[i]) { 846 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 847 rqb->rqb_bits[i], i); 848 return (1); 849 } 850 CTR0(KTR_RUNQ, "runq_check: empty"); 851 852 return (0); 853} 854 855#if defined(SMP) && defined(SCHED_4BSD) 856int runq_fuzz = 1; 857SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 858#endif 859 860/* 861 * Find the highest priority process on the run queue. 862 */ 863struct kse * 864runq_choose(struct runq *rq) 865{ 866 struct rqhead *rqh; 867 struct kse *ke; 868 int pri; 869 870 mtx_assert(&sched_lock, MA_OWNED); 871 while ((pri = runq_findbit(rq)) != -1) { 872 rqh = &rq->rq_queues[pri]; 873#if defined(SMP) && defined(SCHED_4BSD) 874 /* fuzz == 1 is normal.. 0 or less are ignored */ 875 if (runq_fuzz > 1) { 876 /* 877 * In the first couple of entries, check if 878 * there is one for our CPU as a preference. 879 */ 880 int count = runq_fuzz; 881 int cpu = PCPU_GET(cpuid); 882 struct kse *ke2; 883 ke2 = ke = TAILQ_FIRST(rqh); 884 885 while (count-- && ke2) { 886 if (ke->ke_thread->td_lastcpu == cpu) { 887 ke = ke2; 888 break; 889 } 890 ke2 = TAILQ_NEXT(ke2, ke_procq); 891 } 892 } else 893#endif 894 ke = TAILQ_FIRST(rqh); 895 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 896 CTR3(KTR_RUNQ, 897 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 898 return (ke); 899 } 900 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 901 902 return (NULL); 903} 904 905/* 906 * Remove the KSE from the queue specified by its priority, and clear the 907 * corresponding status bit if the queue becomes empty. 908 * Caller must set ke->ke_state afterwards. 909 */ 910void 911runq_remove(struct runq *rq, struct kse *ke) 912{ 913 struct rqhead *rqh; 914 int pri; 915 916 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 917 ("runq_remove: process swapped out")); 918 pri = ke->ke_rqindex; 919 rqh = &rq->rq_queues[pri]; 920 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", 921 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 922 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 923 TAILQ_REMOVE(rqh, ke, ke_procq); 924 if (TAILQ_EMPTY(rqh)) { 925 CTR0(KTR_RUNQ, "runq_remove: empty"); 926 runq_clrbit(rq, pri); 927 } 928} 929 930/****** functions that are temporarily here ***********/ 931#include <vm/uma.h> 932extern struct mtx kse_zombie_lock; 933 934/* 935 * Allocate scheduler specific per-process resources. 936 * The thread and ksegrp have already been linked in. 937 * In this case just set the default concurrency value. 938 * 939 * Called from: 940 * proc_init() (UMA init method) 941 */ 942void 943sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) 944{ 945 946 /* This can go in sched_fork */ 947 sched_init_concurrency(kg); 948} 949 950/* 951 * thread is being either created or recycled. 952 * Fix up the per-scheduler resources associated with it. 953 * Called from: 954 * sched_fork_thread() 955 * thread_dtor() (*may go away) 956 * thread_init() (*may go away) 957 */ 958void 959sched_newthread(struct thread *td) 960{ 961 struct td_sched *ke; 962 963 ke = (struct td_sched *) (td + 1); 964 bzero(ke, sizeof(*ke)); 965 td->td_sched = ke; 966 ke->ke_thread = td; 967 ke->ke_state = KES_THREAD; 968} 969 970/* 971 * Set up an initial concurrency of 1 972 * and set the given thread (if given) to be using that 973 * concurrency slot. 974 * May be used "offline"..before the ksegrp is attached to the world 975 * and thus wouldn't need schedlock in that case. 976 * Called from: 977 * thr_create() 978 * proc_init() (UMA) via sched_newproc() 979 */ 980void 981sched_init_concurrency(struct ksegrp *kg) 982{ 983 984 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg); 985 kg->kg_concurrency = 1; 986 kg->kg_avail_opennings = 1; 987} 988 989/* 990 * Change the concurrency of an existing ksegrp to N 991 * Called from: 992 * kse_create() 993 * kse_exit() 994 * thread_exit() 995 * thread_single() 996 */ 997void 998sched_set_concurrency(struct ksegrp *kg, int concurrency) 999{ 1000 1001 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d", 1002 kg, 1003 concurrency, 1004 kg->kg_avail_opennings, 1005 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency)); 1006 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency); 1007 kg->kg_concurrency = concurrency; 1008} 1009 1010/* 1011 * Called from thread_exit() for all exiting thread 1012 * 1013 * Not to be confused with sched_exit_thread() 1014 * that is only called from thread_exit() for threads exiting 1015 * without the rest of the process exiting because it is also called from 1016 * sched_exit() and we wouldn't want to call it twice. 1017 * XXX This can probably be fixed. 1018 */ 1019void 1020sched_thread_exit(struct thread *td) 1021{ 1022 1023 SLOT_RELEASE(td->td_ksegrp); 1024 slot_fill(td->td_ksegrp); 1025} 1026 1027#endif /* KERN_SWITCH_INCLUDE */ 1028