sched_ule.c revision 166229
1/*- 2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 166229 2007-01-25 19:14:11Z jeff $"); 29 30#include "opt_hwpmc_hooks.h" 31#include "opt_sched.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/kdb.h> 36#include <sys/kernel.h> 37#include <sys/ktr.h> 38#include <sys/lock.h> 39#include <sys/mutex.h> 40#include <sys/proc.h> 41#include <sys/resource.h> 42#include <sys/resourcevar.h> 43#include <sys/sched.h> 44#include <sys/smp.h> 45#include <sys/sx.h> 46#include <sys/sysctl.h> 47#include <sys/sysproto.h> 48#include <sys/turnstile.h> 49#include <sys/umtx.h> 50#include <sys/vmmeter.h> 51#ifdef KTRACE 52#include <sys/uio.h> 53#include <sys/ktrace.h> 54#endif 55 56#ifdef HWPMC_HOOKS 57#include <sys/pmckern.h> 58#endif 59 60#include <machine/cpu.h> 61#include <machine/smp.h> 62 63#ifndef PREEMPTION 64#error "SCHED_ULE requires options PREEMPTION" 65#endif 66 67/* 68 * TODO: 69 * Pick idle from affinity group or self group first. 70 * Implement pick_score. 71 */ 72 73#define KTR_ULE 0x0 /* Enable for pickpri debugging. */ 74 75/* 76 * Thread scheduler specific section. 77 */ 78struct td_sched { 79 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */ 80 int ts_flags; /* (j) TSF_* flags. */ 81 struct thread *ts_thread; /* (*) Active associated thread. */ 82 u_char ts_rqindex; /* (j) Run queue index. */ 83 int ts_slptime; 84 int ts_slice; 85 struct runq *ts_runq; 86 u_char ts_cpu; /* CPU that we have affinity for. */ 87 /* The following variables are only used for pctcpu calculation */ 88 int ts_ltick; /* Last tick that we were running on */ 89 int ts_ftick; /* First tick that we were running on */ 90 int ts_ticks; /* Tick count */ 91#ifdef SMP 92 int ts_rltick; /* Real last tick, for affinity. */ 93#endif 94 95 /* originally from kg_sched */ 96 u_int skg_slptime; /* Number of ticks we vol. slept */ 97 u_int skg_runtime; /* Number of ticks we were running */ 98}; 99/* flags kept in ts_flags */ 100#define TSF_BOUND 0x0001 /* Thread can not migrate. */ 101#define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 102 103static struct td_sched td_sched0; 104 105/* 106 * Cpu percentage computation macros and defines. 107 * 108 * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 109 * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 110 * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 111 * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 112 * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 113 * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 114 */ 115#define SCHED_TICK_SECS 10 116#define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 117#define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 118#define SCHED_TICK_SHIFT 10 119#define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 120#define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 121 122/* 123 * These macros determine priorities for non-interactive threads. They are 124 * assigned a priority based on their recent cpu utilization as expressed 125 * by the ratio of ticks to the tick total. NHALF priorities at the start 126 * and end of the MIN to MAX timeshare range are only reachable with negative 127 * or positive nice respectively. 128 * 129 * PRI_RANGE: Priority range for utilization dependent priorities. 130 * PRI_NRESV: Number of nice values. 131 * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 132 * PRI_NICE: Determines the part of the priority inherited from nice. 133 */ 134#define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 135#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 136#define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 137#define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 138#define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN + 1) 139#define SCHED_PRI_TICKS(ts) \ 140 (SCHED_TICK_HZ((ts)) / \ 141 (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 142#define SCHED_PRI_NICE(nice) (nice) 143 144/* 145 * These determine the interactivity of a process. Interactivity differs from 146 * cpu utilization in that it expresses the voluntary time slept vs time ran 147 * while cpu utilization includes all time not running. This more accurately 148 * models the intent of the thread. 149 * 150 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 151 * before throttling back. 152 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 153 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 154 * INTERACT_THRESH: Threshhold for placement on the current runq. 155 */ 156#define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 157#define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 158#define SCHED_INTERACT_MAX (100) 159#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 160#define SCHED_INTERACT_THRESH (30) 161 162/* 163 * tickincr: Converts a stathz tick into a hz domain scaled by 164 * the shift factor. Without the shift the error rate 165 * due to rounding would be unacceptably high. 166 * realstathz: stathz is sometimes 0 and run off of hz. 167 * sched_slice: Runtime of each thread before rescheduling. 168 */ 169static int sched_interact = SCHED_INTERACT_THRESH; 170static int realstathz; 171static int tickincr; 172static int sched_slice; 173 174/* 175 * tdq - per processor runqs and statistics. 176 */ 177struct tdq { 178 struct runq tdq_idle; /* Queue of IDLE threads. */ 179 struct runq tdq_timeshare; /* timeshare run queue. */ 180 struct runq tdq_realtime; /* real-time run queue. */ 181 int tdq_idx; /* Current insert index. */ 182 int tdq_ridx; /* Current removal index. */ 183 int tdq_load; /* Aggregate load. */ 184 int tdq_flags; /* Thread queue flags */ 185#ifdef SMP 186 int tdq_transferable; 187 LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */ 188 struct tdq_group *tdq_group; /* Our processor group. */ 189#else 190 int tdq_sysload; /* For loadavg, !ITHD load. */ 191#endif 192}; 193 194#define TDQF_BUSY 0x0001 /* Queue is marked as busy */ 195 196#ifdef SMP 197/* 198 * tdq groups are groups of processors which can cheaply share threads. When 199 * one processor in the group goes idle it will check the runqs of the other 200 * processors in its group prior to halting and waiting for an interrupt. 201 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 202 * In a numa environment we'd want an idle bitmap per group and a two tiered 203 * load balancer. 204 */ 205struct tdq_group { 206 int tdg_cpus; /* Count of CPUs in this tdq group. */ 207 cpumask_t tdg_cpumask; /* Mask of cpus in this group. */ 208 cpumask_t tdg_idlemask; /* Idle cpus in this group. */ 209 cpumask_t tdg_mask; /* Bit mask for first cpu. */ 210 int tdg_load; /* Total load of this group. */ 211 int tdg_transferable; /* Transferable load of this group. */ 212 LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */ 213}; 214 215#define SCHED_AFFINITY_DEFAULT (hz / 100) 216#define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity) 217 218/* 219 * Run-time tunables. 220 */ 221static int rebalance = 0; 222static int pick_pri = 1; 223static int affinity; 224static int tryself = 1; 225static int tryselfidle = 1; 226static int ipi_ast = 0; 227static int ipi_preempt = 1; 228static int ipi_thresh = PRI_MIN_KERN; 229static int steal_htt = 1; 230static int steal_busy = 1; 231static int busy_thresh = 4; 232 233/* 234 * One thread queue per processor. 235 */ 236static volatile cpumask_t tdq_idle; 237static volatile cpumask_t tdq_busy; 238static int tdg_maxid; 239static struct tdq tdq_cpu[MAXCPU]; 240static struct tdq_group tdq_groups[MAXCPU]; 241static int bal_tick; 242static int gbal_tick; 243static int balance_groups; 244 245#define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 246#define TDQ_CPU(x) (&tdq_cpu[(x)]) 247#define TDQ_ID(x) ((x) - tdq_cpu) 248#define TDQ_GROUP(x) (&tdq_groups[(x)]) 249#else /* !SMP */ 250static struct tdq tdq_cpu; 251 252#define TDQ_SELF() (&tdq_cpu) 253#define TDQ_CPU(x) (&tdq_cpu) 254#endif 255 256static void sched_priority(struct thread *); 257static void sched_thread_priority(struct thread *, u_char); 258static int sched_interact_score(struct thread *); 259static void sched_interact_update(struct thread *); 260static void sched_interact_fork(struct thread *); 261static void sched_pctcpu_update(struct td_sched *); 262static inline void sched_pin_td(struct thread *td); 263static inline void sched_unpin_td(struct thread *td); 264 265/* Operations on per processor queues */ 266static struct td_sched * tdq_choose(struct tdq *); 267static void tdq_setup(struct tdq *); 268static void tdq_load_add(struct tdq *, struct td_sched *); 269static void tdq_load_rem(struct tdq *, struct td_sched *); 270static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 271static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 272void tdq_print(int cpu); 273static void runq_print(struct runq *rq); 274#ifdef SMP 275static int tdq_pickidle(struct tdq *, struct td_sched *); 276static int tdq_pickpri(struct tdq *, struct td_sched *, int); 277static struct td_sched *runq_steal(struct runq *); 278static void sched_balance(void); 279static void sched_balance_groups(void); 280static void sched_balance_group(struct tdq_group *); 281static void sched_balance_pair(struct tdq *, struct tdq *); 282static void sched_smp_tick(struct thread *); 283static void tdq_move(struct tdq *, int); 284static int tdq_idled(struct tdq *); 285static void tdq_notify(struct td_sched *); 286static struct td_sched *tdq_steal(struct tdq *, int); 287 288#define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 289#endif 290 291static void sched_setup(void *dummy); 292SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 293 294static void sched_initticks(void *dummy); 295SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) 296 297static inline void 298sched_pin_td(struct thread *td) 299{ 300 td->td_pinned++; 301} 302 303static inline void 304sched_unpin_td(struct thread *td) 305{ 306 td->td_pinned--; 307} 308 309static void 310runq_print(struct runq *rq) 311{ 312 struct rqhead *rqh; 313 struct td_sched *ts; 314 int pri; 315 int j; 316 int i; 317 318 for (i = 0; i < RQB_LEN; i++) { 319 printf("\t\trunq bits %d 0x%zx\n", 320 i, rq->rq_status.rqb_bits[i]); 321 for (j = 0; j < RQB_BPW; j++) 322 if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 323 pri = j + (i << RQB_L2BPW); 324 rqh = &rq->rq_queues[pri]; 325 TAILQ_FOREACH(ts, rqh, ts_procq) { 326 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 327 ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 328 } 329 } 330 } 331} 332 333void 334tdq_print(int cpu) 335{ 336 struct tdq *tdq; 337 338 tdq = TDQ_CPU(cpu); 339 340 printf("tdq:\n"); 341 printf("\tload: %d\n", tdq->tdq_load); 342 printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 343 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 344 printf("\trealtime runq:\n"); 345 runq_print(&tdq->tdq_realtime); 346 printf("\ttimeshare runq:\n"); 347 runq_print(&tdq->tdq_timeshare); 348 printf("\tidle runq:\n"); 349 runq_print(&tdq->tdq_idle); 350#ifdef SMP 351 printf("\tload transferable: %d\n", tdq->tdq_transferable); 352#endif 353} 354 355static __inline void 356tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 357{ 358#ifdef SMP 359 if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 360 tdq->tdq_transferable++; 361 tdq->tdq_group->tdg_transferable++; 362 ts->ts_flags |= TSF_XFERABLE; 363 if (tdq->tdq_transferable >= busy_thresh && 364 (tdq->tdq_flags & TDQF_BUSY) == 0) { 365 tdq->tdq_flags |= TDQF_BUSY; 366 atomic_set_int(&tdq_busy, 1 << TDQ_ID(tdq)); 367 } 368 } 369#endif 370 if (ts->ts_runq == &tdq->tdq_timeshare) { 371 int pri; 372 373 pri = ts->ts_thread->td_priority; 374 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 375 ("Invalid priority %d on timeshare runq", pri)); 376 /* 377 * This queue contains only priorities between MIN and MAX 378 * realtime. Use the whole queue to represent these values. 379 */ 380#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 381 if ((flags & SRQ_BORROWING) == 0) { 382 pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 383 pri = (pri + tdq->tdq_idx) % RQ_NQS; 384 /* 385 * This effectively shortens the queue by one so we 386 * can have a one slot difference between idx and 387 * ridx while we wait for threads to drain. 388 */ 389 if (tdq->tdq_ridx != tdq->tdq_idx && 390 pri == tdq->tdq_ridx) 391 pri = (pri - 1) % RQ_NQS; 392 } else 393 pri = tdq->tdq_ridx; 394 runq_add_pri(ts->ts_runq, ts, pri, flags); 395 } else 396 runq_add(ts->ts_runq, ts, flags); 397} 398 399static __inline void 400tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 401{ 402#ifdef SMP 403 if (ts->ts_flags & TSF_XFERABLE) { 404 tdq->tdq_transferable--; 405 tdq->tdq_group->tdg_transferable--; 406 ts->ts_flags &= ~TSF_XFERABLE; 407 if (tdq->tdq_transferable < busy_thresh && 408 (tdq->tdq_flags & TDQF_BUSY)) { 409 atomic_clear_int(&tdq_busy, 1 << TDQ_ID(tdq)); 410 tdq->tdq_flags &= ~TDQF_BUSY; 411 } 412 } 413#endif 414 if (ts->ts_runq == &tdq->tdq_timeshare) { 415 if (tdq->tdq_idx != tdq->tdq_ridx) 416 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 417 else 418 runq_remove_idx(ts->ts_runq, ts, NULL); 419 /* 420 * For timeshare threads we update the priority here so 421 * the priority reflects the time we've been sleeping. 422 */ 423 ts->ts_ltick = ticks; 424 sched_pctcpu_update(ts); 425 sched_priority(ts->ts_thread); 426 } else 427 runq_remove(ts->ts_runq, ts); 428} 429 430static void 431tdq_load_add(struct tdq *tdq, struct td_sched *ts) 432{ 433 int class; 434 mtx_assert(&sched_lock, MA_OWNED); 435 class = PRI_BASE(ts->ts_thread->td_pri_class); 436 tdq->tdq_load++; 437 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 438 if (class != PRI_ITHD && 439 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 440#ifdef SMP 441 tdq->tdq_group->tdg_load++; 442#else 443 tdq->tdq_sysload++; 444#endif 445} 446 447static void 448tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 449{ 450 int class; 451 mtx_assert(&sched_lock, MA_OWNED); 452 class = PRI_BASE(ts->ts_thread->td_pri_class); 453 if (class != PRI_ITHD && 454 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 455#ifdef SMP 456 tdq->tdq_group->tdg_load--; 457#else 458 tdq->tdq_sysload--; 459#endif 460 tdq->tdq_load--; 461 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 462 ts->ts_runq = NULL; 463} 464 465#ifdef SMP 466static void 467sched_smp_tick(struct thread *td) 468{ 469 struct tdq *tdq; 470 471 tdq = TDQ_SELF(); 472 if (rebalance) { 473 if (ticks >= bal_tick) 474 sched_balance(); 475 if (ticks >= gbal_tick && balance_groups) 476 sched_balance_groups(); 477 } 478 td->td_sched->ts_rltick = ticks; 479} 480 481/* 482 * sched_balance is a simple CPU load balancing algorithm. It operates by 483 * finding the least loaded and most loaded cpu and equalizing their load 484 * by migrating some processes. 485 * 486 * Dealing only with two CPUs at a time has two advantages. Firstly, most 487 * installations will only have 2 cpus. Secondly, load balancing too much at 488 * once can have an unpleasant effect on the system. The scheduler rarely has 489 * enough information to make perfect decisions. So this algorithm chooses 490 * algorithm simplicity and more gradual effects on load in larger systems. 491 * 492 * It could be improved by considering the priorities and slices assigned to 493 * each task prior to balancing them. There are many pathological cases with 494 * any approach and so the semi random algorithm below may work as well as any. 495 * 496 */ 497static void 498sched_balance(void) 499{ 500 struct tdq_group *high; 501 struct tdq_group *low; 502 struct tdq_group *tdg; 503 int cnt; 504 int i; 505 506 bal_tick = ticks + (random() % (hz * 2)); 507 if (smp_started == 0) 508 return; 509 low = high = NULL; 510 i = random() % (tdg_maxid + 1); 511 for (cnt = 0; cnt <= tdg_maxid; cnt++) { 512 tdg = TDQ_GROUP(i); 513 /* 514 * Find the CPU with the highest load that has some 515 * threads to transfer. 516 */ 517 if ((high == NULL || tdg->tdg_load > high->tdg_load) 518 && tdg->tdg_transferable) 519 high = tdg; 520 if (low == NULL || tdg->tdg_load < low->tdg_load) 521 low = tdg; 522 if (++i > tdg_maxid) 523 i = 0; 524 } 525 if (low != NULL && high != NULL && high != low) 526 sched_balance_pair(LIST_FIRST(&high->tdg_members), 527 LIST_FIRST(&low->tdg_members)); 528} 529 530static void 531sched_balance_groups(void) 532{ 533 int i; 534 535 gbal_tick = ticks + (random() % (hz * 2)); 536 mtx_assert(&sched_lock, MA_OWNED); 537 if (smp_started) 538 for (i = 0; i <= tdg_maxid; i++) 539 sched_balance_group(TDQ_GROUP(i)); 540} 541 542static void 543sched_balance_group(struct tdq_group *tdg) 544{ 545 struct tdq *tdq; 546 struct tdq *high; 547 struct tdq *low; 548 int load; 549 550 if (tdg->tdg_transferable == 0) 551 return; 552 low = NULL; 553 high = NULL; 554 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 555 load = tdq->tdq_load; 556 if (high == NULL || load > high->tdq_load) 557 high = tdq; 558 if (low == NULL || load < low->tdq_load) 559 low = tdq; 560 } 561 if (high != NULL && low != NULL && high != low) 562 sched_balance_pair(high, low); 563} 564 565static void 566sched_balance_pair(struct tdq *high, struct tdq *low) 567{ 568 int transferable; 569 int high_load; 570 int low_load; 571 int move; 572 int diff; 573 int i; 574 575 /* 576 * If we're transfering within a group we have to use this specific 577 * tdq's transferable count, otherwise we can steal from other members 578 * of the group. 579 */ 580 if (high->tdq_group == low->tdq_group) { 581 transferable = high->tdq_transferable; 582 high_load = high->tdq_load; 583 low_load = low->tdq_load; 584 } else { 585 transferable = high->tdq_group->tdg_transferable; 586 high_load = high->tdq_group->tdg_load; 587 low_load = low->tdq_group->tdg_load; 588 } 589 if (transferable == 0) 590 return; 591 /* 592 * Determine what the imbalance is and then adjust that to how many 593 * threads we actually have to give up (transferable). 594 */ 595 diff = high_load - low_load; 596 move = diff / 2; 597 if (diff & 0x1) 598 move++; 599 move = min(move, transferable); 600 for (i = 0; i < move; i++) 601 tdq_move(high, TDQ_ID(low)); 602 return; 603} 604 605static void 606tdq_move(struct tdq *from, int cpu) 607{ 608 struct tdq *tdq; 609 struct tdq *to; 610 struct td_sched *ts; 611 612 tdq = from; 613 to = TDQ_CPU(cpu); 614 ts = tdq_steal(tdq, 1); 615 if (ts == NULL) { 616 struct tdq_group *tdg; 617 618 tdg = tdq->tdq_group; 619 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 620 if (tdq == from || tdq->tdq_transferable == 0) 621 continue; 622 ts = tdq_steal(tdq, 1); 623 break; 624 } 625 if (ts == NULL) 626 panic("tdq_move: No threads available with a " 627 "transferable count of %d\n", 628 tdg->tdg_transferable); 629 } 630 if (tdq == to) 631 return; 632 sched_rem(ts->ts_thread); 633 ts->ts_cpu = cpu; 634 sched_pin_td(ts->ts_thread); 635 sched_add(ts->ts_thread, SRQ_YIELDING); 636 sched_unpin_td(ts->ts_thread); 637} 638 639static int 640tdq_idled(struct tdq *tdq) 641{ 642 struct tdq_group *tdg; 643 struct tdq *steal; 644 struct td_sched *ts; 645 646 tdg = tdq->tdq_group; 647 /* 648 * If we're in a cpu group, try and steal threads from another cpu in 649 * the group before idling. 650 */ 651 if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) { 652 LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) { 653 if (steal == tdq || steal->tdq_transferable == 0) 654 continue; 655 ts = tdq_steal(steal, 0); 656 if (ts) 657 goto steal; 658 } 659 } 660 if (steal_busy) { 661 while (tdq_busy) { 662 int cpu; 663 664 cpu = ffs(tdq_busy); 665 if (cpu == 0) 666 break; 667 cpu--; 668 steal = TDQ_CPU(cpu); 669 if (steal->tdq_transferable == 0) 670 continue; 671 ts = tdq_steal(steal, 1); 672 if (ts == NULL) 673 continue; 674 CTR5(KTR_ULE, 675 "tdq_idled: stealing td %p(%s) pri %d from %d busy 0x%X", 676 ts->ts_thread, ts->ts_thread->td_proc->p_comm, 677 ts->ts_thread->td_priority, cpu, tdq_busy); 678 goto steal; 679 } 680 } 681 /* 682 * We only set the idled bit when all of the cpus in the group are 683 * idle. Otherwise we could get into a situation where a thread bounces 684 * back and forth between two idle cores on seperate physical CPUs. 685 */ 686 tdg->tdg_idlemask |= PCPU_GET(cpumask); 687 if (tdg->tdg_idlemask == tdg->tdg_cpumask) 688 atomic_set_int(&tdq_idle, tdg->tdg_mask); 689 return (1); 690steal: 691 sched_rem(ts->ts_thread); 692 ts->ts_cpu = PCPU_GET(cpuid); 693 sched_pin_td(ts->ts_thread); 694 sched_add(ts->ts_thread, SRQ_YIELDING); 695 sched_unpin_td(ts->ts_thread); 696 697 return (0); 698} 699 700static void 701tdq_notify(struct td_sched *ts) 702{ 703 struct thread *td; 704 struct pcpu *pcpu; 705 int prio; 706 int cpu; 707 708 prio = ts->ts_thread->td_priority; 709 cpu = ts->ts_cpu; 710 pcpu = pcpu_find(cpu); 711 td = pcpu->pc_curthread; 712 713 /* 714 * If our priority is not better than the current priority there is 715 * nothing to do. 716 */ 717 if (prio > td->td_priority) 718 return; 719 /* Always set NEEDRESCHED. */ 720 td->td_flags |= TDF_NEEDRESCHED; 721 /* 722 * IPI if we exceed the threshold or if the target cpu is running an 723 * idle thread. 724 */ 725 if (prio > ipi_thresh && td->td_priority < PRI_MIN_IDLE) 726 return; 727 if (td->td_priority < PRI_MIN_IDLE) { 728 if (ipi_ast) 729 ipi_selected(1 << cpu, IPI_AST); 730 else if (ipi_preempt) 731 ipi_selected(1 << cpu, IPI_PREEMPT); 732 } else 733 ipi_selected(1 << cpu, IPI_PREEMPT); 734} 735 736static struct td_sched * 737runq_steal(struct runq *rq) 738{ 739 struct rqhead *rqh; 740 struct rqbits *rqb; 741 struct td_sched *ts; 742 int word; 743 int bit; 744 745 mtx_assert(&sched_lock, MA_OWNED); 746 rqb = &rq->rq_status; 747 for (word = 0; word < RQB_LEN; word++) { 748 if (rqb->rqb_bits[word] == 0) 749 continue; 750 for (bit = 0; bit < RQB_BPW; bit++) { 751 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 752 continue; 753 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 754 TAILQ_FOREACH(ts, rqh, ts_procq) { 755 if (THREAD_CAN_MIGRATE(ts->ts_thread)) 756 return (ts); 757 } 758 } 759 } 760 return (NULL); 761} 762 763static struct td_sched * 764tdq_steal(struct tdq *tdq, int stealidle) 765{ 766 struct td_sched *ts; 767 768 /* 769 * Steal from next first to try to get a non-interactive task that 770 * may not have run for a while. 771 * XXX Need to effect steal order for timeshare threads. 772 */ 773 if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL) 774 return (ts); 775 if ((ts = runq_steal(&tdq->tdq_timeshare)) != NULL) 776 return (ts); 777 if (stealidle) 778 return (runq_steal(&tdq->tdq_idle)); 779 return (NULL); 780} 781 782int 783tdq_pickidle(struct tdq *tdq, struct td_sched *ts) 784{ 785 struct tdq_group *tdg; 786 int self; 787 int cpu; 788 789 self = PCPU_GET(cpuid); 790 if (smp_started == 0) 791 return (self); 792 /* 793 * If the current CPU has idled, just run it here. 794 */ 795 if ((tdq->tdq_group->tdg_idlemask & PCPU_GET(cpumask)) != 0) 796 return (self); 797 /* 798 * Try the last group we ran on. 799 */ 800 tdg = TDQ_CPU(ts->ts_cpu)->tdq_group; 801 cpu = ffs(tdg->tdg_idlemask); 802 if (cpu) 803 return (cpu - 1); 804 /* 805 * Search for an idle group. 806 */ 807 cpu = ffs(tdq_idle); 808 if (cpu) 809 return (cpu - 1); 810 /* 811 * XXX If there are no idle groups, check for an idle core. 812 */ 813 /* 814 * No idle CPUs? 815 */ 816 return (self); 817} 818 819static int 820tdq_pickpri(struct tdq *tdq, struct td_sched *ts, int flags) 821{ 822 struct pcpu *pcpu; 823 int lowpri; 824 int lowcpu; 825 int lowload; 826 int load; 827 int self; 828 int pri; 829 int cpu; 830 831 self = PCPU_GET(cpuid); 832 if (smp_started == 0) 833 return (self); 834 835 pri = ts->ts_thread->td_priority; 836 /* 837 * Regardless of affinity, if the last cpu is idle send it there. 838 */ 839 pcpu = pcpu_find(ts->ts_cpu); 840 if (pcpu->pc_curthread->td_priority > PRI_MIN_IDLE) { 841 CTR5(KTR_ULE, 842 "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d", 843 ts->ts_cpu, ts->ts_rltick, ticks, pri, 844 pcpu->pc_curthread->td_priority); 845 return (ts->ts_cpu); 846 } 847 /* 848 * If we have affinity, try to place it on the cpu we last ran on. 849 */ 850 if (SCHED_AFFINITY(ts) && pcpu->pc_curthread->td_priority > pri) { 851 CTR5(KTR_ULE, 852 "affinity for %d, ltick %d ticks %d pri %d curthread %d", 853 ts->ts_cpu, ts->ts_rltick, ticks, pri, 854 pcpu->pc_curthread->td_priority); 855 return (ts->ts_cpu); 856 } 857 /* 858 * Try ourself first; If we're running something lower priority this 859 * may have some locality with the waking thread and execute faster 860 * here. 861 */ 862 if (tryself) { 863 /* 864 * If we're being awoken by an interrupt thread or the waker 865 * is going right to sleep run here as well. 866 */ 867 if ((TDQ_SELF()->tdq_load == 1) && (flags & SRQ_YIELDING || 868 curthread->td_pri_class == PRI_ITHD)) { 869 CTR2(KTR_ULE, "tryself load %d flags %d", 870 TDQ_SELF()->tdq_load, flags); 871 return (self); 872 } 873 } 874 /* 875 * Look for an idle group. 876 */ 877 CTR1(KTR_ULE, "tdq_idle %X", tdq_idle); 878 cpu = ffs(tdq_idle); 879 if (cpu) 880 return (cpu - 1); 881 if (tryselfidle && pri < curthread->td_priority) { 882 CTR1(KTR_ULE, "tryself %d", 883 curthread->td_priority); 884 return (self); 885 } 886 /* 887 * Now search for the cpu running the lowest priority thread with 888 * the least load. 889 */ 890 lowload = 0; 891 lowpri = lowcpu = 0; 892 for (cpu = 0; cpu <= mp_maxid; cpu++) { 893 if (CPU_ABSENT(cpu)) 894 continue; 895 pcpu = pcpu_find(cpu); 896 pri = pcpu->pc_curthread->td_priority; 897 CTR4(KTR_ULE, 898 "cpu %d pri %d lowcpu %d lowpri %d", 899 cpu, pri, lowcpu, lowpri); 900 if (pri < lowpri) 901 continue; 902 load = TDQ_CPU(cpu)->tdq_load; 903 if (lowpri && lowpri == pri && load > lowload) 904 continue; 905 lowpri = pri; 906 lowcpu = cpu; 907 lowload = load; 908 } 909 910 return (lowcpu); 911} 912 913#endif /* SMP */ 914 915/* 916 * Pick the highest priority task we have and return it. 917 */ 918 919static struct td_sched * 920tdq_choose(struct tdq *tdq) 921{ 922 struct td_sched *ts; 923 924 mtx_assert(&sched_lock, MA_OWNED); 925 926 ts = runq_choose(&tdq->tdq_realtime); 927 if (ts != NULL) { 928 KASSERT(ts->ts_thread->td_priority <= PRI_MAX_REALTIME, 929 ("tdq_choose: Invalid priority on realtime queue %d", 930 ts->ts_thread->td_priority)); 931 return (ts); 932 } 933 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 934 if (ts != NULL) { 935 KASSERT(ts->ts_thread->td_priority <= PRI_MAX_TIMESHARE && 936 ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 937 ("tdq_choose: Invalid priority on timeshare queue %d", 938 ts->ts_thread->td_priority)); 939 return (ts); 940 } 941 942 ts = runq_choose(&tdq->tdq_idle); 943 if (ts != NULL) { 944 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 945 ("tdq_choose: Invalid priority on idle queue %d", 946 ts->ts_thread->td_priority)); 947 return (ts); 948 } 949 950 return (NULL); 951} 952 953static void 954tdq_setup(struct tdq *tdq) 955{ 956 runq_init(&tdq->tdq_realtime); 957 runq_init(&tdq->tdq_timeshare); 958 runq_init(&tdq->tdq_idle); 959 tdq->tdq_load = 0; 960} 961 962static void 963sched_setup(void *dummy) 964{ 965#ifdef SMP 966 int i; 967#endif 968 969 /* 970 * To avoid divide-by-zero, we set realstathz a dummy value 971 * in case which sched_clock() called before sched_initticks(). 972 */ 973 realstathz = hz; 974 sched_slice = (realstathz/10); /* ~100ms */ 975 tickincr = 1 << SCHED_TICK_SHIFT; 976 977#ifdef SMP 978 balance_groups = 0; 979 /* 980 * Initialize the tdqs. 981 */ 982 for (i = 0; i < MAXCPU; i++) { 983 struct tdq *tdq; 984 985 tdq = &tdq_cpu[i]; 986 tdq_setup(&tdq_cpu[i]); 987 } 988 if (smp_topology == NULL) { 989 struct tdq_group *tdg; 990 struct tdq *tdq; 991 int cpus; 992 993 for (cpus = 0, i = 0; i < MAXCPU; i++) { 994 if (CPU_ABSENT(i)) 995 continue; 996 tdq = &tdq_cpu[i]; 997 tdg = &tdq_groups[cpus]; 998 /* 999 * Setup a tdq group with one member. 1000 */ 1001 tdq->tdq_transferable = 0; 1002 tdq->tdq_group = tdg; 1003 tdg->tdg_cpus = 1; 1004 tdg->tdg_idlemask = 0; 1005 tdg->tdg_cpumask = tdg->tdg_mask = 1 << i; 1006 tdg->tdg_load = 0; 1007 tdg->tdg_transferable = 0; 1008 LIST_INIT(&tdg->tdg_members); 1009 LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings); 1010 cpus++; 1011 } 1012 tdg_maxid = cpus - 1; 1013 } else { 1014 struct tdq_group *tdg; 1015 struct cpu_group *cg; 1016 int j; 1017 1018 for (i = 0; i < smp_topology->ct_count; i++) { 1019 cg = &smp_topology->ct_group[i]; 1020 tdg = &tdq_groups[i]; 1021 /* 1022 * Initialize the group. 1023 */ 1024 tdg->tdg_idlemask = 0; 1025 tdg->tdg_load = 0; 1026 tdg->tdg_transferable = 0; 1027 tdg->tdg_cpus = cg->cg_count; 1028 tdg->tdg_cpumask = cg->cg_mask; 1029 LIST_INIT(&tdg->tdg_members); 1030 /* 1031 * Find all of the group members and add them. 1032 */ 1033 for (j = 0; j < MAXCPU; j++) { 1034 if ((cg->cg_mask & (1 << j)) != 0) { 1035 if (tdg->tdg_mask == 0) 1036 tdg->tdg_mask = 1 << j; 1037 tdq_cpu[j].tdq_transferable = 0; 1038 tdq_cpu[j].tdq_group = tdg; 1039 LIST_INSERT_HEAD(&tdg->tdg_members, 1040 &tdq_cpu[j], tdq_siblings); 1041 } 1042 } 1043 if (tdg->tdg_cpus > 1) 1044 balance_groups = 1; 1045 } 1046 tdg_maxid = smp_topology->ct_count - 1; 1047 } 1048 /* 1049 * Stagger the group and global load balancer so they do not 1050 * interfere with each other. 1051 */ 1052 bal_tick = ticks + hz; 1053 if (balance_groups) 1054 gbal_tick = ticks + (hz / 2); 1055#else 1056 tdq_setup(TDQ_SELF()); 1057#endif 1058 mtx_lock_spin(&sched_lock); 1059 tdq_load_add(TDQ_SELF(), &td_sched0); 1060 mtx_unlock_spin(&sched_lock); 1061} 1062 1063/* ARGSUSED */ 1064static void 1065sched_initticks(void *dummy) 1066{ 1067 mtx_lock_spin(&sched_lock); 1068 realstathz = stathz ? stathz : hz; 1069 sched_slice = (realstathz/10); /* ~100ms */ 1070 1071 /* 1072 * tickincr is shifted out by 10 to avoid rounding errors due to 1073 * hz not being evenly divisible by stathz on all platforms. 1074 */ 1075 tickincr = (hz << SCHED_TICK_SHIFT) / realstathz; 1076 /* 1077 * This does not work for values of stathz that are more than 1078 * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1079 */ 1080 if (tickincr == 0) 1081 tickincr = 1; 1082#ifdef SMP 1083 affinity = SCHED_AFFINITY_DEFAULT; 1084#endif 1085 mtx_unlock_spin(&sched_lock); 1086} 1087 1088 1089/* 1090 * Scale the scheduling priority according to the "interactivity" of this 1091 * process. 1092 */ 1093static void 1094sched_priority(struct thread *td) 1095{ 1096 int score; 1097 int pri; 1098 1099 if (td->td_pri_class != PRI_TIMESHARE) 1100 return; 1101 /* 1102 * If the score is interactive we place the thread in the realtime 1103 * queue with a priority that is less than kernel and interrupt 1104 * priorities. These threads are not subject to nice restrictions. 1105 * 1106 * Scores greater than this are placed on the normal realtime queue 1107 * where the priority is partially decided by the most recent cpu 1108 * utilization and the rest is decided by nice value. 1109 */ 1110 score = sched_interact_score(td); 1111 if (score < sched_interact) { 1112 pri = PRI_MIN_REALTIME; 1113 pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1114 * score; 1115 KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 1116 ("sched_priority: invalid interactive priority %d score %d", 1117 pri, score)); 1118 } else { 1119 pri = SCHED_PRI_MIN; 1120 if (td->td_sched->ts_ticks) 1121 pri += SCHED_PRI_TICKS(td->td_sched); 1122 pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1123 if (!(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE)) { 1124 static int once = 1; 1125 if (once) { 1126 printf("sched_priority: invalid priority %d", 1127 pri); 1128 printf("nice %d, ticks %d ftick %d ltick %d tick pri %d\n", 1129 td->td_proc->p_nice, 1130 td->td_sched->ts_ticks, 1131 td->td_sched->ts_ftick, 1132 td->td_sched->ts_ltick, 1133 SCHED_PRI_TICKS(td->td_sched)); 1134 once = 0; 1135 } 1136 pri = min(max(pri, PRI_MIN_TIMESHARE), 1137 PRI_MAX_TIMESHARE); 1138 } 1139 } 1140 sched_user_prio(td, pri); 1141 1142 return; 1143} 1144 1145/* 1146 * This routine enforces a maximum limit on the amount of scheduling history 1147 * kept. It is called after either the slptime or runtime is adjusted. 1148 */ 1149static void 1150sched_interact_update(struct thread *td) 1151{ 1152 struct td_sched *ts; 1153 u_int sum; 1154 1155 ts = td->td_sched; 1156 sum = ts->skg_runtime + ts->skg_slptime; 1157 if (sum < SCHED_SLP_RUN_MAX) 1158 return; 1159 /* 1160 * This only happens from two places: 1161 * 1) We have added an unusual amount of run time from fork_exit. 1162 * 2) We have added an unusual amount of sleep time from sched_sleep(). 1163 */ 1164 if (sum > SCHED_SLP_RUN_MAX * 2) { 1165 if (ts->skg_runtime > ts->skg_slptime) { 1166 ts->skg_runtime = SCHED_SLP_RUN_MAX; 1167 ts->skg_slptime = 1; 1168 } else { 1169 ts->skg_slptime = SCHED_SLP_RUN_MAX; 1170 ts->skg_runtime = 1; 1171 } 1172 return; 1173 } 1174 /* 1175 * If we have exceeded by more than 1/5th then the algorithm below 1176 * will not bring us back into range. Dividing by two here forces 1177 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1178 */ 1179 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1180 ts->skg_runtime /= 2; 1181 ts->skg_slptime /= 2; 1182 return; 1183 } 1184 ts->skg_runtime = (ts->skg_runtime / 5) * 4; 1185 ts->skg_slptime = (ts->skg_slptime / 5) * 4; 1186} 1187 1188static void 1189sched_interact_fork(struct thread *td) 1190{ 1191 int ratio; 1192 int sum; 1193 1194 sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime; 1195 if (sum > SCHED_SLP_RUN_FORK) { 1196 ratio = sum / SCHED_SLP_RUN_FORK; 1197 td->td_sched->skg_runtime /= ratio; 1198 td->td_sched->skg_slptime /= ratio; 1199 } 1200} 1201 1202static int 1203sched_interact_score(struct thread *td) 1204{ 1205 int div; 1206 1207 if (td->td_sched->skg_runtime > td->td_sched->skg_slptime) { 1208 div = max(1, td->td_sched->skg_runtime / SCHED_INTERACT_HALF); 1209 return (SCHED_INTERACT_HALF + 1210 (SCHED_INTERACT_HALF - (td->td_sched->skg_slptime / div))); 1211 } if (td->td_sched->skg_slptime > td->td_sched->skg_runtime) { 1212 div = max(1, td->td_sched->skg_slptime / SCHED_INTERACT_HALF); 1213 return (td->td_sched->skg_runtime / div); 1214 } 1215 1216 /* 1217 * This can happen if slptime and runtime are 0. 1218 */ 1219 return (0); 1220 1221} 1222 1223/* 1224 * Called from proc0_init() to bootstrap the scheduler. 1225 */ 1226void 1227schedinit(void) 1228{ 1229 1230 /* 1231 * Set up the scheduler specific parts of proc0. 1232 */ 1233 proc0.p_sched = NULL; /* XXX */ 1234 thread0.td_sched = &td_sched0; 1235 td_sched0.ts_ltick = ticks; 1236 td_sched0.ts_ftick = ticks; 1237 td_sched0.ts_thread = &thread0; 1238} 1239 1240/* 1241 * This is only somewhat accurate since given many processes of the same 1242 * priority they will switch when their slices run out, which will be 1243 * at most sched_slice stathz ticks. 1244 */ 1245int 1246sched_rr_interval(void) 1247{ 1248 1249 /* Convert sched_slice to hz */ 1250 return (hz/(realstathz/sched_slice)); 1251} 1252 1253static void 1254sched_pctcpu_update(struct td_sched *ts) 1255{ 1256 1257 if (ts->ts_ticks == 0) 1258 return; 1259 if (ticks - (hz / 10) < ts->ts_ltick && 1260 SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 1261 return; 1262 /* 1263 * Adjust counters and watermark for pctcpu calc. 1264 */ 1265 if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1266 ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1267 SCHED_TICK_TARG; 1268 else 1269 ts->ts_ticks = 0; 1270 ts->ts_ltick = ticks; 1271 ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 1272} 1273 1274static void 1275sched_thread_priority(struct thread *td, u_char prio) 1276{ 1277 struct td_sched *ts; 1278 1279 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1280 td, td->td_proc->p_comm, td->td_priority, prio, curthread, 1281 curthread->td_proc->p_comm); 1282 ts = td->td_sched; 1283 mtx_assert(&sched_lock, MA_OWNED); 1284 if (td->td_priority == prio) 1285 return; 1286 1287 if (TD_ON_RUNQ(td) && prio < td->td_priority) { 1288 /* 1289 * If the priority has been elevated due to priority 1290 * propagation, we may have to move ourselves to a new 1291 * queue. This could be optimized to not re-add in some 1292 * cases. 1293 */ 1294 sched_rem(td); 1295 td->td_priority = prio; 1296 sched_add(td, SRQ_BORROWING); 1297 } else 1298 td->td_priority = prio; 1299} 1300 1301/* 1302 * Update a thread's priority when it is lent another thread's 1303 * priority. 1304 */ 1305void 1306sched_lend_prio(struct thread *td, u_char prio) 1307{ 1308 1309 td->td_flags |= TDF_BORROWING; 1310 sched_thread_priority(td, prio); 1311} 1312 1313/* 1314 * Restore a thread's priority when priority propagation is 1315 * over. The prio argument is the minimum priority the thread 1316 * needs to have to satisfy other possible priority lending 1317 * requests. If the thread's regular priority is less 1318 * important than prio, the thread will keep a priority boost 1319 * of prio. 1320 */ 1321void 1322sched_unlend_prio(struct thread *td, u_char prio) 1323{ 1324 u_char base_pri; 1325 1326 if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1327 td->td_base_pri <= PRI_MAX_TIMESHARE) 1328 base_pri = td->td_user_pri; 1329 else 1330 base_pri = td->td_base_pri; 1331 if (prio >= base_pri) { 1332 td->td_flags &= ~TDF_BORROWING; 1333 sched_thread_priority(td, base_pri); 1334 } else 1335 sched_lend_prio(td, prio); 1336} 1337 1338void 1339sched_prio(struct thread *td, u_char prio) 1340{ 1341 u_char oldprio; 1342 1343 /* First, update the base priority. */ 1344 td->td_base_pri = prio; 1345 1346 /* 1347 * If the thread is borrowing another thread's priority, don't 1348 * ever lower the priority. 1349 */ 1350 if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1351 return; 1352 1353 /* Change the real priority. */ 1354 oldprio = td->td_priority; 1355 sched_thread_priority(td, prio); 1356 1357 /* 1358 * If the thread is on a turnstile, then let the turnstile update 1359 * its state. 1360 */ 1361 if (TD_ON_LOCK(td) && oldprio != prio) 1362 turnstile_adjust(td, oldprio); 1363} 1364 1365void 1366sched_user_prio(struct thread *td, u_char prio) 1367{ 1368 u_char oldprio; 1369 1370 td->td_base_user_pri = prio; 1371 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1372 return; 1373 oldprio = td->td_user_pri; 1374 td->td_user_pri = prio; 1375 1376 if (TD_ON_UPILOCK(td) && oldprio != prio) 1377 umtx_pi_adjust(td, oldprio); 1378} 1379 1380void 1381sched_lend_user_prio(struct thread *td, u_char prio) 1382{ 1383 u_char oldprio; 1384 1385 td->td_flags |= TDF_UBORROWING; 1386 1387 oldprio = td->td_user_pri; 1388 td->td_user_pri = prio; 1389 1390 if (TD_ON_UPILOCK(td) && oldprio != prio) 1391 umtx_pi_adjust(td, oldprio); 1392} 1393 1394void 1395sched_unlend_user_prio(struct thread *td, u_char prio) 1396{ 1397 u_char base_pri; 1398 1399 base_pri = td->td_base_user_pri; 1400 if (prio >= base_pri) { 1401 td->td_flags &= ~TDF_UBORROWING; 1402 sched_user_prio(td, base_pri); 1403 } else 1404 sched_lend_user_prio(td, prio); 1405} 1406 1407void 1408sched_switch(struct thread *td, struct thread *newtd, int flags) 1409{ 1410 struct tdq *tdq; 1411 struct td_sched *ts; 1412 int preempt; 1413 1414 mtx_assert(&sched_lock, MA_OWNED); 1415 1416 preempt = flags & SW_PREEMPT; 1417 tdq = TDQ_SELF(); 1418 ts = td->td_sched; 1419 td->td_lastcpu = td->td_oncpu; 1420 td->td_oncpu = NOCPU; 1421 td->td_flags &= ~TDF_NEEDRESCHED; 1422 td->td_owepreempt = 0; 1423 /* 1424 * If the thread has been assigned it may be in the process of switching 1425 * to the new cpu. This is the case in sched_bind(). 1426 */ 1427 if (td == PCPU_GET(idlethread)) { 1428 TD_SET_CAN_RUN(td); 1429 } else { 1430 tdq_load_rem(tdq, ts); 1431 if (TD_IS_RUNNING(td)) { 1432 /* 1433 * Don't allow the thread to migrate 1434 * from a preemption. 1435 */ 1436 if (preempt) 1437 sched_pin_td(td); 1438 sched_add(td, preempt ? 1439 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1440 SRQ_OURSELF|SRQ_YIELDING); 1441 if (preempt) 1442 sched_unpin_td(td); 1443 } 1444 } 1445 if (newtd != NULL) { 1446 /* 1447 * If we bring in a thread account for it as if it had been 1448 * added to the run queue and then chosen. 1449 */ 1450 TD_SET_RUNNING(newtd); 1451 tdq_load_add(TDQ_SELF(), newtd->td_sched); 1452 } else 1453 newtd = choosethread(); 1454 if (td != newtd) { 1455#ifdef HWPMC_HOOKS 1456 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1457 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1458#endif 1459 1460 cpu_switch(td, newtd); 1461#ifdef HWPMC_HOOKS 1462 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1463 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1464#endif 1465 } 1466 sched_lock.mtx_lock = (uintptr_t)td; 1467 td->td_oncpu = PCPU_GET(cpuid); 1468} 1469 1470void 1471sched_nice(struct proc *p, int nice) 1472{ 1473 struct thread *td; 1474 1475 PROC_LOCK_ASSERT(p, MA_OWNED); 1476 mtx_assert(&sched_lock, MA_OWNED); 1477 1478 p->p_nice = nice; 1479 FOREACH_THREAD_IN_PROC(p, td) { 1480 sched_priority(td); 1481 sched_prio(td, td->td_base_user_pri); 1482 } 1483} 1484 1485void 1486sched_sleep(struct thread *td) 1487{ 1488 1489 mtx_assert(&sched_lock, MA_OWNED); 1490 1491 td->td_sched->ts_slptime = ticks; 1492} 1493 1494void 1495sched_wakeup(struct thread *td) 1496{ 1497 struct td_sched *ts; 1498 int slptime; 1499 1500 mtx_assert(&sched_lock, MA_OWNED); 1501 ts = td->td_sched; 1502 /* 1503 * If we slept for more than a tick update our interactivity and 1504 * priority. 1505 */ 1506 slptime = ts->ts_slptime; 1507 ts->ts_slptime = 0; 1508 if (slptime && slptime != ticks) { 1509 u_int hzticks; 1510 1511 hzticks = (ticks - slptime) << SCHED_TICK_SHIFT; 1512 ts->skg_slptime += hzticks; 1513 sched_interact_update(td); 1514 sched_pctcpu_update(ts); 1515 sched_priority(td); 1516 } 1517 /* Reset the slice value after we sleep. */ 1518 ts->ts_slice = sched_slice; 1519 sched_add(td, SRQ_BORING); 1520} 1521 1522/* 1523 * Penalize the parent for creating a new child and initialize the child's 1524 * priority. 1525 */ 1526void 1527sched_fork(struct thread *td, struct thread *child) 1528{ 1529 mtx_assert(&sched_lock, MA_OWNED); 1530 sched_fork_thread(td, child); 1531 /* 1532 * Penalize the parent and child for forking. 1533 */ 1534 sched_interact_fork(child); 1535 sched_priority(child); 1536 td->td_sched->skg_runtime += tickincr; 1537 sched_interact_update(td); 1538 sched_priority(td); 1539} 1540 1541void 1542sched_fork_thread(struct thread *td, struct thread *child) 1543{ 1544 struct td_sched *ts; 1545 struct td_sched *ts2; 1546 1547 /* 1548 * Initialize child. 1549 */ 1550 sched_newthread(child); 1551 ts = td->td_sched; 1552 ts2 = child->td_sched; 1553 ts2->ts_cpu = ts->ts_cpu; 1554 ts2->ts_runq = NULL; 1555 /* 1556 * Grab our parents cpu estimation information and priority. 1557 */ 1558 ts2->ts_ticks = ts->ts_ticks; 1559 ts2->ts_ltick = ts->ts_ltick; 1560 ts2->ts_ftick = ts->ts_ftick; 1561 child->td_user_pri = td->td_user_pri; 1562 child->td_base_user_pri = td->td_base_user_pri; 1563 /* 1564 * And update interactivity score. 1565 */ 1566 ts2->skg_slptime = ts->skg_slptime; 1567 ts2->skg_runtime = ts->skg_runtime; 1568 ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 1569} 1570 1571void 1572sched_class(struct thread *td, int class) 1573{ 1574 1575 mtx_assert(&sched_lock, MA_OWNED); 1576 if (td->td_pri_class == class) 1577 return; 1578 1579#ifdef SMP 1580 /* 1581 * On SMP if we're on the RUNQ we must adjust the transferable 1582 * count because could be changing to or from an interrupt 1583 * class. 1584 */ 1585 if (TD_ON_RUNQ(td)) { 1586 struct tdq *tdq; 1587 1588 tdq = TDQ_CPU(td->td_sched->ts_cpu); 1589 if (THREAD_CAN_MIGRATE(td)) { 1590 tdq->tdq_transferable--; 1591 tdq->tdq_group->tdg_transferable--; 1592 } 1593 td->td_pri_class = class; 1594 if (THREAD_CAN_MIGRATE(td)) { 1595 tdq->tdq_transferable++; 1596 tdq->tdq_group->tdg_transferable++; 1597 } 1598 } 1599#endif 1600 td->td_pri_class = class; 1601} 1602 1603/* 1604 * Return some of the child's priority and interactivity to the parent. 1605 */ 1606void 1607sched_exit(struct proc *p, struct thread *child) 1608{ 1609 struct thread *td; 1610 1611 CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 1612 child, child->td_proc->p_comm, child->td_priority); 1613 1614 td = FIRST_THREAD_IN_PROC(p); 1615 sched_exit_thread(td, child); 1616} 1617 1618void 1619sched_exit_thread(struct thread *td, struct thread *child) 1620{ 1621 1622 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 1623 child, child->td_proc->p_comm, child->td_priority); 1624 1625 tdq_load_rem(TDQ_CPU(child->td_sched->ts_cpu), child->td_sched); 1626#ifdef KSE 1627 /* 1628 * KSE forks and exits so often that this penalty causes short-lived 1629 * threads to always be non-interactive. This causes mozilla to 1630 * crawl under load. 1631 */ 1632 if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc) 1633 return; 1634#endif 1635 /* 1636 * Give the child's runtime to the parent without returning the 1637 * sleep time as a penalty to the parent. This causes shells that 1638 * launch expensive things to mark their children as expensive. 1639 */ 1640 td->td_sched->skg_runtime += child->td_sched->skg_runtime; 1641 sched_interact_update(td); 1642 sched_priority(td); 1643} 1644 1645void 1646sched_userret(struct thread *td) 1647{ 1648 /* 1649 * XXX we cheat slightly on the locking here to avoid locking in 1650 * the usual case. Setting td_priority here is essentially an 1651 * incomplete workaround for not setting it properly elsewhere. 1652 * Now that some interrupt handlers are threads, not setting it 1653 * properly elsewhere can clobber it in the window between setting 1654 * it here and returning to user mode, so don't waste time setting 1655 * it perfectly here. 1656 */ 1657 KASSERT((td->td_flags & TDF_BORROWING) == 0, 1658 ("thread with borrowed priority returning to userland")); 1659 if (td->td_priority != td->td_user_pri) { 1660 mtx_lock_spin(&sched_lock); 1661 td->td_priority = td->td_user_pri; 1662 td->td_base_pri = td->td_user_pri; 1663 mtx_unlock_spin(&sched_lock); 1664 } 1665} 1666 1667void 1668sched_clock(struct thread *td) 1669{ 1670 struct tdq *tdq; 1671 struct td_sched *ts; 1672 1673 mtx_assert(&sched_lock, MA_OWNED); 1674#ifdef SMP 1675 sched_smp_tick(td); 1676#endif 1677 tdq = TDQ_SELF(); 1678 /* 1679 * Advance the insert index once for each tick to ensure that all 1680 * threads get a chance to run. 1681 */ 1682 if (tdq->tdq_idx == tdq->tdq_ridx) { 1683 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 1684 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 1685 tdq->tdq_ridx = tdq->tdq_idx; 1686 } 1687 ts = td->td_sched; 1688 /* 1689 * We only do slicing code for TIMESHARE threads. 1690 */ 1691 if (td->td_pri_class != PRI_TIMESHARE) 1692 return; 1693 /* 1694 * We used a tick; charge it to the thread so that we can compute our 1695 * interactivity. 1696 */ 1697 td->td_sched->skg_runtime += tickincr; 1698 sched_interact_update(td); 1699 /* 1700 * We used up one time slice. 1701 */ 1702 if (--ts->ts_slice > 0) 1703 return; 1704 /* 1705 * We're out of time, recompute priorities and requeue. 1706 */ 1707 sched_priority(td); 1708 td->td_flags |= TDF_NEEDRESCHED; 1709} 1710 1711int 1712sched_runnable(void) 1713{ 1714 struct tdq *tdq; 1715 int load; 1716 1717 load = 1; 1718 1719 tdq = TDQ_SELF(); 1720#ifdef SMP 1721 if (tdq_busy) 1722 goto out; 1723#endif 1724 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1725 if (tdq->tdq_load > 0) 1726 goto out; 1727 } else 1728 if (tdq->tdq_load - 1 > 0) 1729 goto out; 1730 load = 0; 1731out: 1732 return (load); 1733} 1734 1735struct thread * 1736sched_choose(void) 1737{ 1738 struct tdq *tdq; 1739 struct td_sched *ts; 1740 1741 mtx_assert(&sched_lock, MA_OWNED); 1742 tdq = TDQ_SELF(); 1743#ifdef SMP 1744restart: 1745#endif 1746 ts = tdq_choose(tdq); 1747 if (ts) { 1748#ifdef SMP 1749 if (ts->ts_thread->td_priority > PRI_MIN_IDLE) 1750 if (tdq_idled(tdq) == 0) 1751 goto restart; 1752#endif 1753 tdq_runq_rem(tdq, ts); 1754 return (ts->ts_thread); 1755 } 1756#ifdef SMP 1757 if (tdq_idled(tdq) == 0) 1758 goto restart; 1759#endif 1760 return (PCPU_GET(idlethread)); 1761} 1762 1763static int 1764sched_preempt(struct thread *td) 1765{ 1766 struct thread *ctd; 1767 int cpri; 1768 int pri; 1769 1770 ctd = curthread; 1771 pri = td->td_priority; 1772 cpri = ctd->td_priority; 1773 if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 1774 return (0); 1775 /* 1776 * Always preempt IDLE threads. Otherwise only if the preempting 1777 * thread is an ithread. 1778 */ 1779 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) 1780 return (0); 1781 if (ctd->td_critnest > 1) { 1782 CTR1(KTR_PROC, "sched_preempt: in critical section %d", 1783 ctd->td_critnest); 1784 ctd->td_owepreempt = 1; 1785 return (0); 1786 } 1787 /* 1788 * Thread is runnable but not yet put on system run queue. 1789 */ 1790 MPASS(TD_ON_RUNQ(td)); 1791 TD_SET_RUNNING(td); 1792 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 1793 td->td_proc->p_pid, td->td_proc->p_comm); 1794 mi_switch(SW_INVOL|SW_PREEMPT, td); 1795 return (1); 1796} 1797 1798void 1799sched_add(struct thread *td, int flags) 1800{ 1801 struct tdq *tdq; 1802 struct td_sched *ts; 1803 int preemptive; 1804 int class; 1805#ifdef SMP 1806 int cpuid; 1807 int cpumask; 1808#endif 1809 ts = td->td_sched; 1810 1811 mtx_assert(&sched_lock, MA_OWNED); 1812 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1813 td, td->td_proc->p_comm, td->td_priority, curthread, 1814 curthread->td_proc->p_comm); 1815 KASSERT((td->td_inhibitors == 0), 1816 ("sched_add: trying to run inhibited thread")); 1817 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1818 ("sched_add: bad thread state")); 1819 KASSERT(td->td_proc->p_sflag & PS_INMEM, 1820 ("sched_add: process swapped out")); 1821 KASSERT(ts->ts_runq == NULL, 1822 ("sched_add: thread %p is still assigned to a run queue", td)); 1823 TD_SET_RUNQ(td); 1824 tdq = TDQ_SELF(); 1825 class = PRI_BASE(td->td_pri_class); 1826 preemptive = !(flags & SRQ_YIELDING); 1827 /* 1828 * Recalculate the priority before we select the target cpu or 1829 * run-queue. 1830 */ 1831 if (class == PRI_TIMESHARE) 1832 sched_priority(td); 1833 if (ts->ts_slice == 0) 1834 ts->ts_slice = sched_slice; 1835#ifdef SMP 1836 cpuid = PCPU_GET(cpuid); 1837 /* 1838 * Pick the destination cpu and if it isn't ours transfer to the 1839 * target cpu. 1840 */ 1841 if (THREAD_CAN_MIGRATE(td)) { 1842 if (td->td_priority <= PRI_MAX_ITHD) { 1843 CTR2(KTR_ULE, "ithd %d < %d", 1844 td->td_priority, PRI_MAX_ITHD); 1845 ts->ts_cpu = cpuid; 1846 } 1847 if (pick_pri) 1848 ts->ts_cpu = tdq_pickpri(tdq, ts, flags); 1849 else 1850 ts->ts_cpu = tdq_pickidle(tdq, ts); 1851 } else 1852 CTR1(KTR_ULE, "pinned %d", td->td_pinned); 1853 if (ts->ts_cpu != cpuid) 1854 preemptive = 0; 1855 tdq = TDQ_CPU(ts->ts_cpu); 1856 cpumask = 1 << ts->ts_cpu; 1857 /* 1858 * If we had been idle, clear our bit in the group and potentially 1859 * the global bitmap. 1860 */ 1861 if ((class != PRI_IDLE && class != PRI_ITHD) && 1862 (tdq->tdq_group->tdg_idlemask & cpumask) != 0) { 1863 /* 1864 * Check to see if our group is unidling, and if so, remove it 1865 * from the global idle mask. 1866 */ 1867 if (tdq->tdq_group->tdg_idlemask == 1868 tdq->tdq_group->tdg_cpumask) 1869 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask); 1870 /* 1871 * Now remove ourselves from the group specific idle mask. 1872 */ 1873 tdq->tdq_group->tdg_idlemask &= ~cpumask; 1874 } 1875#endif 1876 /* 1877 * Pick the run queue based on priority. 1878 */ 1879 if (td->td_priority <= PRI_MAX_REALTIME) 1880 ts->ts_runq = &tdq->tdq_realtime; 1881 else if (td->td_priority <= PRI_MAX_TIMESHARE) 1882 ts->ts_runq = &tdq->tdq_timeshare; 1883 else 1884 ts->ts_runq = &tdq->tdq_idle; 1885 if (preemptive && sched_preempt(td)) 1886 return; 1887 tdq_runq_add(tdq, ts, flags); 1888 tdq_load_add(tdq, ts); 1889#ifdef SMP 1890 if (ts->ts_cpu != cpuid) { 1891 tdq_notify(ts); 1892 return; 1893 } 1894#endif 1895 if (td->td_priority < curthread->td_priority) 1896 curthread->td_flags |= TDF_NEEDRESCHED; 1897} 1898 1899void 1900sched_rem(struct thread *td) 1901{ 1902 struct tdq *tdq; 1903 struct td_sched *ts; 1904 1905 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 1906 td, td->td_proc->p_comm, td->td_priority, curthread, 1907 curthread->td_proc->p_comm); 1908 mtx_assert(&sched_lock, MA_OWNED); 1909 ts = td->td_sched; 1910 KASSERT(TD_ON_RUNQ(td), 1911 ("sched_rem: thread not on run queue")); 1912 1913 tdq = TDQ_CPU(ts->ts_cpu); 1914 tdq_runq_rem(tdq, ts); 1915 tdq_load_rem(tdq, ts); 1916 TD_SET_CAN_RUN(td); 1917} 1918 1919fixpt_t 1920sched_pctcpu(struct thread *td) 1921{ 1922 fixpt_t pctcpu; 1923 struct td_sched *ts; 1924 1925 pctcpu = 0; 1926 ts = td->td_sched; 1927 if (ts == NULL) 1928 return (0); 1929 1930 mtx_lock_spin(&sched_lock); 1931 if (ts->ts_ticks) { 1932 int rtick; 1933 1934 sched_pctcpu_update(ts); 1935 /* How many rtick per second ? */ 1936 rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 1937 pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 1938 } 1939 td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick; 1940 mtx_unlock_spin(&sched_lock); 1941 1942 return (pctcpu); 1943} 1944 1945void 1946sched_bind(struct thread *td, int cpu) 1947{ 1948 struct td_sched *ts; 1949 1950 mtx_assert(&sched_lock, MA_OWNED); 1951 ts = td->td_sched; 1952 if (ts->ts_flags & TSF_BOUND) 1953 sched_unbind(td); 1954 ts->ts_flags |= TSF_BOUND; 1955#ifdef SMP 1956 sched_pin(); 1957 if (PCPU_GET(cpuid) == cpu) 1958 return; 1959 ts->ts_cpu = cpu; 1960 /* When we return from mi_switch we'll be on the correct cpu. */ 1961 mi_switch(SW_VOL, NULL); 1962#endif 1963} 1964 1965void 1966sched_unbind(struct thread *td) 1967{ 1968 struct td_sched *ts; 1969 1970 mtx_assert(&sched_lock, MA_OWNED); 1971 ts = td->td_sched; 1972 if ((ts->ts_flags & TSF_BOUND) == 0) 1973 return; 1974 ts->ts_flags &= ~TSF_BOUND; 1975#ifdef SMP 1976 sched_unpin(); 1977#endif 1978} 1979 1980int 1981sched_is_bound(struct thread *td) 1982{ 1983 mtx_assert(&sched_lock, MA_OWNED); 1984 return (td->td_sched->ts_flags & TSF_BOUND); 1985} 1986 1987void 1988sched_relinquish(struct thread *td) 1989{ 1990 mtx_lock_spin(&sched_lock); 1991 if (td->td_pri_class == PRI_TIMESHARE) 1992 sched_prio(td, PRI_MAX_TIMESHARE); 1993 mi_switch(SW_VOL, NULL); 1994 mtx_unlock_spin(&sched_lock); 1995} 1996 1997int 1998sched_load(void) 1999{ 2000#ifdef SMP 2001 int total; 2002 int i; 2003 2004 total = 0; 2005 for (i = 0; i <= tdg_maxid; i++) 2006 total += TDQ_GROUP(i)->tdg_load; 2007 return (total); 2008#else 2009 return (TDQ_SELF()->tdq_sysload); 2010#endif 2011} 2012 2013int 2014sched_sizeof_proc(void) 2015{ 2016 return (sizeof(struct proc)); 2017} 2018 2019int 2020sched_sizeof_thread(void) 2021{ 2022 return (sizeof(struct thread) + sizeof(struct td_sched)); 2023} 2024 2025void 2026sched_tick(void) 2027{ 2028 struct td_sched *ts; 2029 2030 ts = curthread->td_sched; 2031 /* Adjust ticks for pctcpu */ 2032 ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2033 ts->ts_ltick = ticks; 2034 /* 2035 * Update if we've exceeded our desired tick threshhold by over one 2036 * second. 2037 */ 2038 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2039 sched_pctcpu_update(ts); 2040} 2041 2042/* 2043 * The actual idle process. 2044 */ 2045void 2046sched_idletd(void *dummy) 2047{ 2048 struct proc *p; 2049 struct thread *td; 2050 2051 td = curthread; 2052 p = td->td_proc; 2053 mtx_assert(&Giant, MA_NOTOWNED); 2054 /* ULE Relies on preemption for idle interruption. */ 2055 for (;;) 2056 cpu_idle(); 2057} 2058 2059static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 2060SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0, 2061 "Scheduler name"); 2062SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, ""); 2063SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, ""); 2064SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, ""); 2065SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, ""); 2066#ifdef SMP 2067SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0, ""); 2068SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_affinity, CTLFLAG_RW, 2069 &affinity, 0, ""); 2070SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_tryself, CTLFLAG_RW, 2071 &tryself, 0, ""); 2072SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_tryselfidle, CTLFLAG_RW, 2073 &tryselfidle, 0, ""); 2074SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, ""); 2075SYSCTL_INT(_kern_sched, OID_AUTO, ipi_preempt, CTLFLAG_RW, &ipi_preempt, 0, ""); 2076SYSCTL_INT(_kern_sched, OID_AUTO, ipi_ast, CTLFLAG_RW, &ipi_ast, 0, ""); 2077SYSCTL_INT(_kern_sched, OID_AUTO, ipi_thresh, CTLFLAG_RW, &ipi_thresh, 0, ""); 2078SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, ""); 2079SYSCTL_INT(_kern_sched, OID_AUTO, steal_busy, CTLFLAG_RW, &steal_busy, 0, ""); 2080SYSCTL_INT(_kern_sched, OID_AUTO, busy_thresh, CTLFLAG_RW, &busy_thresh, 0, ""); 2081#endif 2082 2083/* ps compat */ 2084static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 2085SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2086 2087 2088#define KERN_SWITCH_INCLUDE 1 2089#include "kern/kern_switch.c" 2090