kern_switch.c revision 146362
1/*- 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/*** 28Here is the logic.. 29 30If there are N processors, then there are at most N KSEs (kernel 31schedulable entities) working to process threads that belong to a 32KSEGROUP (kg). If there are X of these KSEs actually running at the 33moment in question, then there are at most M (N-X) of these KSEs on 34the run queue, as running KSEs are not on the queue. 35 36Runnable threads are queued off the KSEGROUP in priority order. 37If there are M or more threads runnable, the top M threads 38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take 39their priority from those threads and are put on the run queue. 40 41The last thread that had a priority high enough to have a KSE associated 42with it, AND IS ON THE RUN QUEUE is pointed to by 43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 44assigned as all the available KSEs are activly running, or because there 45are no threads queued, that pointer is NULL. 46 47When a KSE is removed from the run queue to become runnable, we know 48it was associated with the highest priority thread in the queue (at the head 49of the queue). If it is also the last assigned we know M was 1 and must 50now be 0. Since the thread is no longer queued that pointer must be 51removed from it. Since we know there were no more KSEs available, 52(M was 1 and is now 0) and since we are not FREEING our KSE 53but using it, we know there are STILL no more KSEs available, we can prove 54that the next thread in the ksegrp list will not have a KSE to assign to 55it, so we can show that the pointer must be made 'invalid' (NULL). 56 57The pointer exists so that when a new thread is made runnable, it can 58have its priority compared with the last assigned thread to see if 59it should 'steal' its KSE or not.. i.e. is it 'earlier' 60on the list than that thread or later.. If it's earlier, then the KSE is 61removed from the last assigned (which is now not assigned a KSE) 62and reassigned to the new thread, which is placed earlier in the list. 63The pointer is then backed up to the previous thread (which may or may not 64be the new thread). 65 66When a thread sleeps or is removed, the KSE becomes available and if there 67are queued threads that are not assigned KSEs, the highest priority one of 68them is assigned the KSE, which is then placed back on the run queue at 69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 70to point to it. 71 72The following diagram shows 2 KSEs and 3 threads from a single process. 73 74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 75 \ \____ 76 \ \ 77 KSEGROUP---thread--thread--thread (queued in priority order) 78 \ / 79 \_______________/ 80 (last_assigned) 81 82The result of this scheme is that the M available KSEs are always 83queued at the priorities they have inherrited from the M highest priority 84threads for that KSEGROUP. If this situation changes, the KSEs are 85reassigned to keep this true. 86***/ 87 88#include <sys/cdefs.h> 89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 146362 2005-05-19 01:08:30Z ups $"); 90 91#include "opt_sched.h" 92 93#ifndef KERN_SWITCH_INCLUDE 94#include <sys/param.h> 95#include <sys/systm.h> 96#include <sys/kdb.h> 97#include <sys/kernel.h> 98#include <sys/ktr.h> 99#include <sys/lock.h> 100#include <sys/mutex.h> 101#include <sys/proc.h> 102#include <sys/queue.h> 103#include <sys/sched.h> 104#else /* KERN_SWITCH_INCLUDE */ 105#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 106#include <sys/smp.h> 107#endif 108#if defined(SMP) && defined(SCHED_4BSD) 109#include <sys/sysctl.h> 110#endif 111 112#ifdef FULL_PREEMPTION 113#ifndef PREEMPTION 114#error "The FULL_PREEMPTION option requires the PREEMPTION option" 115#endif 116#endif 117 118CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 119 120#define td_kse td_sched 121 122/* 123 * kern.sched.preemption allows user space to determine if preemption support 124 * is compiled in or not. It is not currently a boot or runtime flag that 125 * can be changed. 126 */ 127#ifdef PREEMPTION 128static int kern_sched_preemption = 1; 129#else 130static int kern_sched_preemption = 0; 131#endif 132SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD, 133 &kern_sched_preemption, 0, "Kernel preemption enabled"); 134 135/************************************************************************ 136 * Functions that manipulate runnability from a thread perspective. * 137 ************************************************************************/ 138/* 139 * Select the KSE that will be run next. From that find the thread, and 140 * remove it from the KSEGRP's run queue. If there is thread clustering, 141 * this will be what does it. 142 */ 143struct thread * 144choosethread(void) 145{ 146 struct kse *ke; 147 struct thread *td; 148 struct ksegrp *kg; 149 150#if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 151 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 152 /* Shutting down, run idlethread on AP's */ 153 td = PCPU_GET(idlethread); 154 ke = td->td_kse; 155 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 156 ke->ke_flags |= KEF_DIDRUN; 157 TD_SET_RUNNING(td); 158 return (td); 159 } 160#endif 161 162retry: 163 ke = sched_choose(); 164 if (ke) { 165 td = ke->ke_thread; 166 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 167 kg = ke->ke_ksegrp; 168 if (td->td_proc->p_flag & P_HADTHREADS) { 169 if (kg->kg_last_assigned == td) { 170 kg->kg_last_assigned = TAILQ_PREV(td, 171 threadqueue, td_runq); 172 } 173 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 174 } 175 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 176 td, td->td_priority); 177 } else { 178 /* Simulate runq_choose() having returned the idle thread */ 179 td = PCPU_GET(idlethread); 180 ke = td->td_kse; 181 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 182 } 183 ke->ke_flags |= KEF_DIDRUN; 184 185 /* 186 * If we are in panic, only allow system threads, 187 * plus the one we are running in, to be run. 188 */ 189 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 190 (td->td_flags & TDF_INPANIC) == 0)) { 191 /* note that it is no longer on the run queue */ 192 TD_SET_CAN_RUN(td); 193 goto retry; 194 } 195 196 TD_SET_RUNNING(td); 197 return (td); 198} 199 200/* 201 * Given a surplus system slot, try assign a new runnable thread to it. 202 * Called from: 203 * sched_thread_exit() (local) 204 * sched_switch() (local) 205 * sched_thread_exit() (local) 206 * remrunqueue() (local) (not at the moment) 207 */ 208static void 209slot_fill(struct ksegrp *kg) 210{ 211 struct thread *td; 212 213 mtx_assert(&sched_lock, MA_OWNED); 214 while (kg->kg_avail_opennings > 0) { 215 /* 216 * Find the first unassigned thread 217 */ 218 if ((td = kg->kg_last_assigned) != NULL) 219 td = TAILQ_NEXT(td, td_runq); 220 else 221 td = TAILQ_FIRST(&kg->kg_runq); 222 223 /* 224 * If we found one, send it to the system scheduler. 225 */ 226 if (td) { 227 kg->kg_last_assigned = td; 228 sched_add(td, SRQ_YIELDING); 229 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); 230 } else { 231 /* no threads to use up the slots. quit now */ 232 break; 233 } 234 } 235} 236 237#ifdef SCHED_4BSD 238/* 239 * Remove a thread from its KSEGRP's run queue. 240 * This in turn may remove it from a KSE if it was already assigned 241 * to one, possibly causing a new thread to be assigned to the KSE 242 * and the KSE getting a new priority. 243 */ 244static void 245remrunqueue(struct thread *td) 246{ 247 struct thread *td2, *td3; 248 struct ksegrp *kg; 249 struct kse *ke; 250 251 mtx_assert(&sched_lock, MA_OWNED); 252 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 253 kg = td->td_ksegrp; 254 ke = td->td_kse; 255 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 256 TD_SET_CAN_RUN(td); 257 /* 258 * If it is not a threaded process, take the shortcut. 259 */ 260 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 261 /* remve from sys run queue and free up a slot */ 262 sched_rem(td); 263 ke->ke_state = KES_THREAD; 264 return; 265 } 266 td3 = TAILQ_PREV(td, threadqueue, td_runq); 267 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 268 if (ke->ke_state == KES_ONRUNQ) { 269 /* 270 * This thread has been assigned to the system run queue. 271 * We need to dissociate it and try assign the 272 * KSE to the next available thread. Then, we should 273 * see if we need to move the KSE in the run queues. 274 */ 275 sched_rem(td); 276 ke->ke_state = KES_THREAD; 277 td2 = kg->kg_last_assigned; 278 KASSERT((td2 != NULL), ("last assigned has wrong value")); 279 if (td2 == td) 280 kg->kg_last_assigned = td3; 281 /* slot_fill(kg); */ /* will replace it with another */ 282 } 283} 284#endif 285 286/* 287 * Change the priority of a thread that is on the run queue. 288 */ 289void 290adjustrunqueue( struct thread *td, int newpri) 291{ 292 struct ksegrp *kg; 293 struct kse *ke; 294 295 mtx_assert(&sched_lock, MA_OWNED); 296 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 297 298 ke = td->td_kse; 299 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 300 /* 301 * If it is not a threaded process, take the shortcut. 302 */ 303 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 304 /* We only care about the kse in the run queue. */ 305 td->td_priority = newpri; 306 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 307 sched_rem(td); 308 sched_add(td, SRQ_BORING); 309 } 310 return; 311 } 312 313 /* It is a threaded process */ 314 kg = td->td_ksegrp; 315 if (ke->ke_state == KES_ONRUNQ) { 316 if (kg->kg_last_assigned == td) { 317 kg->kg_last_assigned = 318 TAILQ_PREV(td, threadqueue, td_runq); 319 } 320 sched_rem(td); 321 } 322 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 323 TD_SET_CAN_RUN(td); 324 td->td_priority = newpri; 325 setrunqueue(td, SRQ_BORING); 326} 327 328/* 329 * This function is called when a thread is about to be put on a 330 * ksegrp run queue because it has been made runnable or its 331 * priority has been adjusted and the ksegrp does not have a 332 * free kse slot. It determines if a thread from the same ksegrp 333 * should be preempted. If so, it tries to switch threads 334 * if the thread is on the same cpu or notifies another cpu that 335 * it should switch threads. 336 */ 337 338static void 339maybe_preempt_in_ksegrp(struct thread *td) 340#if !defined(SMP) 341{ 342 struct thread *running_thread; 343 344#ifndef FULL_PREEMPTION 345 int pri; 346 pri = td->td_priority; 347 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD)) 348 return; 349#endif 350 mtx_assert(&sched_lock, MA_OWNED); 351 running_thread = curthread; 352 353 if (running_thread->td_ksegrp != td->td_ksegrp) 354 return; 355 356 if (td->td_priority >= running_thread->td_priority) 357 return; 358#ifdef PREEMPTION 359 if (running_thread->td_critnest > 1) 360 running_thread->td_owepreempt = 1; 361 else 362 mi_switch(SW_INVOL, NULL); 363 364#else 365 running_thread->td_flags |= TDF_NEEDRESCHED; 366#endif 367 return; 368} 369 370#else /* SMP */ 371{ 372 struct thread *running_thread; 373 int worst_pri; 374 struct ksegrp *kg; 375 cpumask_t cpumask,dontuse; 376 struct pcpu *pc; 377 struct pcpu *best_pcpu; 378 struct thread *cputhread; 379 380#ifndef FULL_PREEMPTION 381 int pri; 382 pri = td->td_priority; 383 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD)) 384 return; 385#endif 386 387 mtx_assert(&sched_lock, MA_OWNED); 388 389 running_thread = curthread; 390 391#if !defined(KSEG_PEEMPT_BEST_CPU) 392 if (running_thread->td_ksegrp != td->td_ksegrp) { 393#endif 394 kg = td->td_ksegrp; 395 396 /* if someone is ahead of this thread, wait our turn */ 397 if (td != TAILQ_FIRST(&kg->kg_runq)) 398 return; 399 400 worst_pri = td->td_priority; 401 best_pcpu = NULL; 402 dontuse = stopped_cpus | idle_cpus_mask; 403 404 /* 405 * Find a cpu with the worst priority that runs at thread from 406 * the same ksegrp - if multiple exist give first the last run 407 * cpu and then the current cpu priority 408 */ 409 410 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 411 cpumask = pc->pc_cpumask; 412 cputhread = pc->pc_curthread; 413 414 if ((cpumask & dontuse) || 415 cputhread->td_ksegrp != kg) 416 continue; 417 418 if (cputhread->td_priority > worst_pri) { 419 worst_pri = cputhread->td_priority; 420 best_pcpu = pc; 421 continue; 422 } 423 424 if (cputhread->td_priority == worst_pri && 425 best_pcpu != NULL && 426 (td->td_lastcpu == pc->pc_cpuid || 427 (PCPU_GET(cpumask) == cpumask && 428 td->td_lastcpu != best_pcpu->pc_cpuid))) 429 best_pcpu = pc; 430 } 431 432 /* Check if we need to preempt someone */ 433 if (best_pcpu == NULL) 434 return; 435 436 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) { 437 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 438 ipi_selected(best_pcpu->pc_cpumask, IPI_AST); 439 return; 440 } 441#if !defined(KSEG_PEEMPT_BEST_CPU) 442 } 443#endif 444 445 if (td->td_priority >= running_thread->td_priority) 446 return; 447#ifdef PREEMPTION 448 if (running_thread->td_critnest > 1) 449 running_thread->td_owepreempt = 1; 450 else 451 mi_switch(SW_INVOL, NULL); 452 453#else 454 running_thread->td_flags |= TDF_NEEDRESCHED; 455#endif 456 return; 457} 458#endif /* !SMP */ 459 460 461int limitcount; 462void 463setrunqueue(struct thread *td, int flags) 464{ 465 struct ksegrp *kg; 466 struct thread *td2; 467 struct thread *tda; 468 469 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", 470 td, td->td_ksegrp, td->td_proc->p_pid); 471 CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)", 472 td, td->td_proc->p_comm, td->td_priority, curthread, 473 curthread->td_proc->p_comm); 474 mtx_assert(&sched_lock, MA_OWNED); 475 KASSERT((td->td_inhibitors == 0), 476 ("setrunqueue: trying to run inhibitted thread")); 477 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 478 ("setrunqueue: bad thread state")); 479 TD_SET_RUNQ(td); 480 kg = td->td_ksegrp; 481 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 482 /* 483 * Common path optimisation: Only one of everything 484 * and the KSE is always already attached. 485 * Totally ignore the ksegrp run queue. 486 */ 487 if (kg->kg_avail_opennings != 1) { 488 if (limitcount < 1) { 489 limitcount++; 490 printf("pid %d: corrected slot count (%d->1)\n", 491 td->td_proc->p_pid, kg->kg_avail_opennings); 492 493 } 494 kg->kg_avail_opennings = 1; 495 } 496 sched_add(td, flags); 497 return; 498 } 499 500 /* 501 * If the concurrency has reduced, and we would go in the 502 * assigned section, then keep removing entries from the 503 * system run queue, until we are not in that section 504 * or there is room for us to be put in that section. 505 * What we MUST avoid is the case where there are threads of less 506 * priority than the new one scheduled, but it can not 507 * be scheduled itself. That would lead to a non contiguous set 508 * of scheduled threads, and everything would break. 509 */ 510 tda = kg->kg_last_assigned; 511 while ((kg->kg_avail_opennings <= 0) && 512 (tda && (tda->td_priority > td->td_priority))) { 513 /* 514 * None free, but there is one we can commandeer. 515 */ 516 CTR2(KTR_RUNQ, 517 "setrunqueue: kg:%p: take slot from td: %p", kg, tda); 518 sched_rem(tda); 519 tda = kg->kg_last_assigned = 520 TAILQ_PREV(tda, threadqueue, td_runq); 521 } 522 523 /* 524 * Add the thread to the ksegrp's run queue at 525 * the appropriate place. 526 */ 527 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 528 if (td2->td_priority > td->td_priority) { 529 TAILQ_INSERT_BEFORE(td2, td, td_runq); 530 break; 531 } 532 } 533 if (td2 == NULL) { 534 /* We ran off the end of the TAILQ or it was empty. */ 535 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 536 } 537 538 /* 539 * If we have a slot to use, then put the thread on the system 540 * run queue and if needed, readjust the last_assigned pointer. 541 * it may be that we need to schedule something anyhow 542 * even if the availabel slots are -ve so that 543 * all the items < last_assigned are scheduled. 544 */ 545 if (kg->kg_avail_opennings > 0) { 546 if (tda == NULL) { 547 /* 548 * No pre-existing last assigned so whoever is first 549 * gets the slot.. (maybe us) 550 */ 551 td2 = TAILQ_FIRST(&kg->kg_runq); 552 kg->kg_last_assigned = td2; 553 } else if (tda->td_priority > td->td_priority) { 554 td2 = td; 555 } else { 556 /* 557 * We are past last_assigned, so 558 * give the next slot to whatever is next, 559 * which may or may not be us. 560 */ 561 td2 = TAILQ_NEXT(tda, td_runq); 562 kg->kg_last_assigned = td2; 563 } 564 sched_add(td2, flags); 565 } else { 566 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", 567 td, td->td_ksegrp, td->td_proc->p_pid); 568 if ((flags & SRQ_YIELDING) == 0) 569 maybe_preempt_in_ksegrp(td); 570 } 571} 572 573/* 574 * Kernel thread preemption implementation. Critical sections mark 575 * regions of code in which preemptions are not allowed. 576 */ 577void 578critical_enter(void) 579{ 580 struct thread *td; 581 582 td = curthread; 583 td->td_critnest++; 584 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, 585 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 586} 587 588void 589critical_exit(void) 590{ 591 struct thread *td; 592 593 td = curthread; 594 KASSERT(td->td_critnest != 0, 595 ("critical_exit: td_critnest == 0")); 596 if (td->td_critnest == 1) { 597 if (td->td_pflags & TDP_WAKEPROC0) { 598 td->td_pflags &= ~TDP_WAKEPROC0; 599 wakeup(&proc0); 600 } 601 602 td->td_critnest = 0; 603 604#ifdef PREEMPTION 605 mtx_assert(&sched_lock, MA_NOTOWNED); 606 if (td->td_owepreempt) { 607 td->td_critnest = 1; 608 mtx_lock_spin(&sched_lock); 609 td->td_critnest--; 610 mi_switch(SW_INVOL, NULL); 611 mtx_unlock_spin(&sched_lock); 612 } 613 614#endif 615 616 } else { 617 td->td_critnest--; 618 } 619 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, 620 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 621} 622 623/* 624 * This function is called when a thread is about to be put on run queue 625 * because it has been made runnable or its priority has been adjusted. It 626 * determines if the new thread should be immediately preempted to. If so, 627 * it switches to it and eventually returns true. If not, it returns false 628 * so that the caller may place the thread on an appropriate run queue. 629 */ 630int 631maybe_preempt(struct thread *td) 632{ 633#ifdef PREEMPTION 634 struct thread *ctd; 635 int cpri, pri; 636#endif 637 638 mtx_assert(&sched_lock, MA_OWNED); 639#ifdef PREEMPTION 640 /* 641 * The new thread should not preempt the current thread if any of the 642 * following conditions are true: 643 * 644 * - The kernel is in the throes of crashing (panicstr). 645 * - The current thread has a higher (numerically lower) or 646 * equivalent priority. Note that this prevents curthread from 647 * trying to preempt to itself. 648 * - It is too early in the boot for context switches (cold is set). 649 * - The current thread has an inhibitor set or is in the process of 650 * exiting. In this case, the current thread is about to switch 651 * out anyways, so there's no point in preempting. If we did, 652 * the current thread would not be properly resumed as well, so 653 * just avoid that whole landmine. 654 * - If the new thread's priority is not a realtime priority and 655 * the current thread's priority is not an idle priority and 656 * FULL_PREEMPTION is disabled. 657 * 658 * If all of these conditions are false, but the current thread is in 659 * a nested critical section, then we have to defer the preemption 660 * until we exit the critical section. Otherwise, switch immediately 661 * to the new thread. 662 */ 663 ctd = curthread; 664 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd), 665 ("thread has no (or wrong) sched-private part.")); 666 KASSERT((td->td_inhibitors == 0), 667 ("maybe_preempt: trying to run inhibitted thread")); 668 pri = td->td_priority; 669 cpri = ctd->td_priority; 670 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || 671 TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD) 672 return (0); 673#ifndef FULL_PREEMPTION 674 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && 675 !(cpri >= PRI_MIN_IDLE)) 676 return (0); 677#endif 678 if (ctd->td_critnest > 1) { 679 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 680 ctd->td_critnest); 681 ctd->td_owepreempt = 1; 682 return (0); 683 } 684 685 /* 686 * Thread is runnable but not yet put on system run queue. 687 */ 688 MPASS(TD_ON_RUNQ(td)); 689 MPASS(td->td_sched->ke_state != KES_ONRUNQ); 690 if (td->td_proc->p_flag & P_HADTHREADS) { 691 /* 692 * If this is a threaded process we actually ARE on the 693 * ksegrp run queue so take it off that first. 694 * Also undo any damage done to the last_assigned pointer. 695 * XXX Fix setrunqueue so this isn't needed 696 */ 697 struct ksegrp *kg; 698 699 kg = td->td_ksegrp; 700 if (kg->kg_last_assigned == td) 701 kg->kg_last_assigned = 702 TAILQ_PREV(td, threadqueue, td_runq); 703 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 704 } 705 706 TD_SET_RUNNING(td); 707 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 708 td->td_proc->p_pid, td->td_proc->p_comm); 709 mi_switch(SW_INVOL|SW_PREEMPT, td); 710 return (1); 711#else 712 return (0); 713#endif 714} 715 716#if 0 717#ifndef PREEMPTION 718/* XXX: There should be a non-static version of this. */ 719static void 720printf_caddr_t(void *data) 721{ 722 printf("%s", (char *)data); 723} 724static char preempt_warning[] = 725 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 726SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 727 preempt_warning) 728#endif 729#endif 730 731/************************************************************************ 732 * SYSTEM RUN QUEUE manipulations and tests * 733 ************************************************************************/ 734/* 735 * Initialize a run structure. 736 */ 737void 738runq_init(struct runq *rq) 739{ 740 int i; 741 742 bzero(rq, sizeof *rq); 743 for (i = 0; i < RQ_NQS; i++) 744 TAILQ_INIT(&rq->rq_queues[i]); 745} 746 747/* 748 * Clear the status bit of the queue corresponding to priority level pri, 749 * indicating that it is empty. 750 */ 751static __inline void 752runq_clrbit(struct runq *rq, int pri) 753{ 754 struct rqbits *rqb; 755 756 rqb = &rq->rq_status; 757 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 758 rqb->rqb_bits[RQB_WORD(pri)], 759 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 760 RQB_BIT(pri), RQB_WORD(pri)); 761 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 762} 763 764/* 765 * Find the index of the first non-empty run queue. This is done by 766 * scanning the status bits, a set bit indicates a non-empty queue. 767 */ 768static __inline int 769runq_findbit(struct runq *rq) 770{ 771 struct rqbits *rqb; 772 int pri; 773 int i; 774 775 rqb = &rq->rq_status; 776 for (i = 0; i < RQB_LEN; i++) 777 if (rqb->rqb_bits[i]) { 778 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 779 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 780 rqb->rqb_bits[i], i, pri); 781 return (pri); 782 } 783 784 return (-1); 785} 786 787/* 788 * Set the status bit of the queue corresponding to priority level pri, 789 * indicating that it is non-empty. 790 */ 791static __inline void 792runq_setbit(struct runq *rq, int pri) 793{ 794 struct rqbits *rqb; 795 796 rqb = &rq->rq_status; 797 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 798 rqb->rqb_bits[RQB_WORD(pri)], 799 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 800 RQB_BIT(pri), RQB_WORD(pri)); 801 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 802} 803 804/* 805 * Add the KSE to the queue specified by its priority, and set the 806 * corresponding status bit. 807 */ 808void 809runq_add(struct runq *rq, struct kse *ke, int flags) 810{ 811 struct rqhead *rqh; 812 int pri; 813 814 pri = ke->ke_thread->td_priority / RQ_PPQ; 815 ke->ke_rqindex = pri; 816 runq_setbit(rq, pri); 817 rqh = &rq->rq_queues[pri]; 818 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", 819 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 820 if (flags & SRQ_PREEMPTED) { 821 TAILQ_INSERT_HEAD(rqh, ke, ke_procq); 822 } else { 823 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 824 } 825} 826 827/* 828 * Return true if there are runnable processes of any priority on the run 829 * queue, false otherwise. Has no side effects, does not modify the run 830 * queue structure. 831 */ 832int 833runq_check(struct runq *rq) 834{ 835 struct rqbits *rqb; 836 int i; 837 838 rqb = &rq->rq_status; 839 for (i = 0; i < RQB_LEN; i++) 840 if (rqb->rqb_bits[i]) { 841 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 842 rqb->rqb_bits[i], i); 843 return (1); 844 } 845 CTR0(KTR_RUNQ, "runq_check: empty"); 846 847 return (0); 848} 849 850#if defined(SMP) && defined(SCHED_4BSD) 851int runq_fuzz = 1; 852SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 853#endif 854 855/* 856 * Find the highest priority process on the run queue. 857 */ 858struct kse * 859runq_choose(struct runq *rq) 860{ 861 struct rqhead *rqh; 862 struct kse *ke; 863 int pri; 864 865 mtx_assert(&sched_lock, MA_OWNED); 866 while ((pri = runq_findbit(rq)) != -1) { 867 rqh = &rq->rq_queues[pri]; 868#if defined(SMP) && defined(SCHED_4BSD) 869 /* fuzz == 1 is normal.. 0 or less are ignored */ 870 if (runq_fuzz > 1) { 871 /* 872 * In the first couple of entries, check if 873 * there is one for our CPU as a preference. 874 */ 875 int count = runq_fuzz; 876 int cpu = PCPU_GET(cpuid); 877 struct kse *ke2; 878 ke2 = ke = TAILQ_FIRST(rqh); 879 880 while (count-- && ke2) { 881 if (ke->ke_thread->td_lastcpu == cpu) { 882 ke = ke2; 883 break; 884 } 885 ke2 = TAILQ_NEXT(ke2, ke_procq); 886 } 887 } else 888#endif 889 ke = TAILQ_FIRST(rqh); 890 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 891 CTR3(KTR_RUNQ, 892 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 893 return (ke); 894 } 895 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 896 897 return (NULL); 898} 899 900/* 901 * Remove the KSE from the queue specified by its priority, and clear the 902 * corresponding status bit if the queue becomes empty. 903 * Caller must set ke->ke_state afterwards. 904 */ 905void 906runq_remove(struct runq *rq, struct kse *ke) 907{ 908 struct rqhead *rqh; 909 int pri; 910 911 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 912 ("runq_remove: process swapped out")); 913 pri = ke->ke_rqindex; 914 rqh = &rq->rq_queues[pri]; 915 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", 916 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 917 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 918 TAILQ_REMOVE(rqh, ke, ke_procq); 919 if (TAILQ_EMPTY(rqh)) { 920 CTR0(KTR_RUNQ, "runq_remove: empty"); 921 runq_clrbit(rq, pri); 922 } 923} 924 925/****** functions that are temporarily here ***********/ 926#include <vm/uma.h> 927extern struct mtx kse_zombie_lock; 928 929/* 930 * Allocate scheduler specific per-process resources. 931 * The thread and ksegrp have already been linked in. 932 * In this case just set the default concurrency value. 933 * 934 * Called from: 935 * proc_init() (UMA init method) 936 */ 937void 938sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) 939{ 940 941 /* This can go in sched_fork */ 942 sched_init_concurrency(kg); 943} 944 945/* 946 * thread is being either created or recycled. 947 * Fix up the per-scheduler resources associated with it. 948 * Called from: 949 * sched_fork_thread() 950 * thread_dtor() (*may go away) 951 * thread_init() (*may go away) 952 */ 953void 954sched_newthread(struct thread *td) 955{ 956 struct td_sched *ke; 957 958 ke = (struct td_sched *) (td + 1); 959 bzero(ke, sizeof(*ke)); 960 td->td_sched = ke; 961 ke->ke_thread = td; 962 ke->ke_state = KES_THREAD; 963} 964 965/* 966 * Set up an initial concurrency of 1 967 * and set the given thread (if given) to be using that 968 * concurrency slot. 969 * May be used "offline"..before the ksegrp is attached to the world 970 * and thus wouldn't need schedlock in that case. 971 * Called from: 972 * thr_create() 973 * proc_init() (UMA) via sched_newproc() 974 */ 975void 976sched_init_concurrency(struct ksegrp *kg) 977{ 978 979 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg); 980 kg->kg_concurrency = 1; 981 kg->kg_avail_opennings = 1; 982} 983 984/* 985 * Change the concurrency of an existing ksegrp to N 986 * Called from: 987 * kse_create() 988 * kse_exit() 989 * thread_exit() 990 * thread_single() 991 */ 992void 993sched_set_concurrency(struct ksegrp *kg, int concurrency) 994{ 995 996 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d", 997 kg, 998 concurrency, 999 kg->kg_avail_opennings, 1000 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency)); 1001 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency); 1002 kg->kg_concurrency = concurrency; 1003} 1004 1005/* 1006 * Called from thread_exit() for all exiting thread 1007 * 1008 * Not to be confused with sched_exit_thread() 1009 * that is only called from thread_exit() for threads exiting 1010 * without the rest of the process exiting because it is also called from 1011 * sched_exit() and we wouldn't want to call it twice. 1012 * XXX This can probably be fixed. 1013 */ 1014void 1015sched_thread_exit(struct thread *td) 1016{ 1017 1018 SLOT_RELEASE(td->td_ksegrp); 1019 slot_fill(td->td_ksegrp); 1020} 1021 1022#endif /* KERN_SWITCH_INCLUDE */ 1023