sched_ule.c revision 165796
1/*- 2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 165796 2007-01-05 08:50:38Z jeff $"); 29 30#include "opt_hwpmc_hooks.h" 31#include "opt_sched.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/kdb.h> 36#include <sys/kernel.h> 37#include <sys/ktr.h> 38#include <sys/lock.h> 39#include <sys/mutex.h> 40#include <sys/proc.h> 41#include <sys/resource.h> 42#include <sys/resourcevar.h> 43#include <sys/sched.h> 44#include <sys/smp.h> 45#include <sys/sx.h> 46#include <sys/sysctl.h> 47#include <sys/sysproto.h> 48#include <sys/turnstile.h> 49#include <sys/umtx.h> 50#include <sys/vmmeter.h> 51#ifdef KTRACE 52#include <sys/uio.h> 53#include <sys/ktrace.h> 54#endif 55 56#ifdef HWPMC_HOOKS 57#include <sys/pmckern.h> 58#endif 59 60#include <machine/cpu.h> 61#include <machine/smp.h> 62 63/* 64 * Thread scheduler specific section. 65 */ 66struct td_sched { 67 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */ 68 int ts_flags; /* (j) TSF_* flags. */ 69 struct thread *ts_thread; /* (*) Active associated thread. */ 70 fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ 71 u_char ts_rqindex; /* (j) Run queue index. */ 72 enum { 73 TSS_THREAD, 74 TSS_ONRUNQ 75 } ts_state; /* (j) thread sched specific status. */ 76 int ts_slptime; 77 int ts_slice; 78 struct runq *ts_runq; 79 u_char ts_cpu; /* CPU that we have affinity for. */ 80 /* The following variables are only used for pctcpu calculation */ 81 int ts_ltick; /* Last tick that we were running on */ 82 int ts_ftick; /* First tick that we were running on */ 83 int ts_ticks; /* Tick count */ 84 85 /* originally from kg_sched */ 86 int skg_slptime; /* Number of ticks we vol. slept */ 87 int skg_runtime; /* Number of ticks we were running */ 88}; 89#define ts_assign ts_procq.tqe_next 90/* flags kept in ts_flags */ 91#define TSF_ASSIGNED 0x0001 /* Thread is being migrated. */ 92#define TSF_BOUND 0x0002 /* Thread can not migrate. */ 93#define TSF_XFERABLE 0x0004 /* Thread was added as transferable. */ 94#define TSF_HOLD 0x0008 /* Thread is temporarily bound. */ 95#define TSF_REMOVED 0x0010 /* Thread was removed while ASSIGNED */ 96#define TSF_INTERNAL 0x0020 /* Thread added due to migration. */ 97#define TSF_DIDRUN 0x2000 /* Thread actually ran. */ 98#define TSF_EXIT 0x4000 /* Thread is being killed. */ 99 100static struct td_sched td_sched0; 101 102/* 103 * Cpu percentage computation macros and defines. 104 * 105 * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 106 * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 107 * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 108 * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 109 * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 110 * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 111 */ 112#define SCHED_TICK_SECS 10 113#define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 114#define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 115#define SCHED_TICK_SHIFT 10 116#define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 117#define SCHED_TICK_TOTAL(ts) ((ts)->ts_ltick - (ts)->ts_ftick) 118 119/* 120 * These macros determine priorities for non-interactive threads. They are 121 * assigned a priority based on their recent cpu utilization as expressed 122 * by the ratio of ticks to the tick total. NHALF priorities at the start 123 * and end of the MIN to MAX timeshare range are only reachable with negative 124 * or positive nice respectively. 125 * 126 * PRI_RANGE: Priority range for utilization dependent priorities. 127 * PRI_NRESV: Number of nice values. 128 * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 129 * PRI_NICE: Determines the part of the priority inherited from nice. 130 */ 131#define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 132#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 133#define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 134#define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 135#define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN + 1) 136#define SCHED_PRI_TICKS(ts) \ 137 (SCHED_TICK_HZ((ts)) / \ 138 (max(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 139#define SCHED_PRI_NICE(nice) (nice) 140 141/* 142 * These determine the interactivity of a process. Interactivity differs from 143 * cpu utilization in that it expresses the voluntary time slept vs time ran 144 * while cpu utilization includes all time not running. This more accurately 145 * models the intent of the thread. 146 * 147 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 148 * before throttling back. 149 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 150 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 151 * INTERACT_THRESH: Threshhold for placement on the current runq. 152 */ 153#define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 154#define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 155#define SCHED_INTERACT_MAX (100) 156#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 157#define SCHED_INTERACT_THRESH (30) 158 159/* 160 * tickincr: Converts a stathz tick into a hz domain scaled by 161 * the shift factor. Without the shift the error rate 162 * due to rounding would be unacceptably high. 163 * realstathz: stathz is sometimes 0 and run off of hz. 164 * sched_slice: Runtime of each thread before rescheduling. 165 */ 166static int sched_interact = SCHED_INTERACT_THRESH; 167static int realstathz; 168static int tickincr; 169static int sched_slice; 170 171/* 172 * tdq - per processor runqs and statistics. 173 */ 174struct tdq { 175 struct runq tdq_idle; /* Queue of IDLE threads. */ 176 struct runq tdq_timeshare; /* timeshare run queue. */ 177 struct runq tdq_realtime; /* real-time run queue. */ 178 int tdq_idx; /* Current insert index. */ 179 int tdq_ridx; /* Current removal index. */ 180 int tdq_load_timeshare; /* Load for timeshare. */ 181 int tdq_load; /* Aggregate load. */ 182#ifdef SMP 183 int tdq_transferable; 184 LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */ 185 struct tdq_group *tdq_group; /* Our processor group. */ 186 volatile struct td_sched *tdq_assigned; /* assigned by another CPU. */ 187#else 188 int tdq_sysload; /* For loadavg, !ITHD load. */ 189#endif 190}; 191 192#ifdef SMP 193/* 194 * tdq groups are groups of processors which can cheaply share threads. When 195 * one processor in the group goes idle it will check the runqs of the other 196 * processors in its group prior to halting and waiting for an interrupt. 197 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 198 * In a numa environment we'd want an idle bitmap per group and a two tiered 199 * load balancer. 200 */ 201struct tdq_group { 202 int tdg_cpus; /* Count of CPUs in this tdq group. */ 203 cpumask_t tdg_cpumask; /* Mask of cpus in this group. */ 204 cpumask_t tdg_idlemask; /* Idle cpus in this group. */ 205 cpumask_t tdg_mask; /* Bit mask for first cpu. */ 206 int tdg_load; /* Total load of this group. */ 207 int tdg_transferable; /* Transferable load of this group. */ 208 LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */ 209}; 210#endif 211 212/* 213 * One thread queue per processor. 214 */ 215#ifdef SMP 216static cpumask_t tdq_idle; 217static int tdg_maxid; 218static struct tdq tdq_cpu[MAXCPU]; 219static struct tdq_group tdq_groups[MAXCPU]; 220static int bal_tick; 221static int gbal_tick; 222static int balance_groups; 223 224#define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 225#define TDQ_CPU(x) (&tdq_cpu[(x)]) 226#define TDQ_ID(x) ((x) - tdq_cpu) 227#define TDQ_GROUP(x) (&tdq_groups[(x)]) 228#else /* !SMP */ 229static struct tdq tdq_cpu; 230 231#define TDQ_SELF() (&tdq_cpu) 232#define TDQ_CPU(x) (&tdq_cpu) 233#endif 234 235static struct td_sched *sched_choose(void); /* XXX Should be thread * */ 236static void sched_priority(struct thread *); 237static void sched_thread_priority(struct thread *, u_char); 238static int sched_interact_score(struct thread *); 239static void sched_interact_update(struct thread *); 240static void sched_interact_fork(struct thread *); 241static void sched_pctcpu_update(struct td_sched *); 242 243/* Operations on per processor queues */ 244static struct td_sched * tdq_choose(struct tdq *); 245static void tdq_setup(struct tdq *); 246static void tdq_load_add(struct tdq *, struct td_sched *); 247static void tdq_load_rem(struct tdq *, struct td_sched *); 248static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 249static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 250void tdq_print(int cpu); 251static void runq_print(struct runq *rq); 252#ifdef SMP 253static int tdq_transfer(struct tdq *, struct td_sched *, int); 254static struct td_sched *runq_steal(struct runq *); 255static void sched_balance(void); 256static void sched_balance_groups(void); 257static void sched_balance_group(struct tdq_group *); 258static void sched_balance_pair(struct tdq *, struct tdq *); 259static void sched_smp_tick(void); 260static void tdq_move(struct tdq *, int); 261static int tdq_idled(struct tdq *); 262static void tdq_notify(struct td_sched *, int); 263static void tdq_assign(struct tdq *); 264static struct td_sched *tdq_steal(struct tdq *, int); 265#define THREAD_CAN_MIGRATE(td) \ 266 ((td)->td_pinned == 0 && (td)->td_pri_class != PRI_ITHD) 267#endif 268 269static void sched_setup(void *dummy); 270SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 271 272static void sched_initticks(void *dummy); 273SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) 274 275static void 276runq_print(struct runq *rq) 277{ 278 struct rqhead *rqh; 279 struct td_sched *ts; 280 int pri; 281 int j; 282 int i; 283 284 for (i = 0; i < RQB_LEN; i++) { 285 printf("\t\trunq bits %d 0x%zx\n", 286 i, rq->rq_status.rqb_bits[i]); 287 for (j = 0; j < RQB_BPW; j++) 288 if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 289 pri = j + (i << RQB_L2BPW); 290 rqh = &rq->rq_queues[pri]; 291 TAILQ_FOREACH(ts, rqh, ts_procq) { 292 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 293 ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 294 } 295 } 296 } 297} 298 299void 300tdq_print(int cpu) 301{ 302 struct tdq *tdq; 303 304 tdq = TDQ_CPU(cpu); 305 306 printf("tdq:\n"); 307 printf("\tload: %d\n", tdq->tdq_load); 308 printf("\tload TIMESHARE: %d\n", tdq->tdq_load_timeshare); 309 printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 310 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 311 printf("\trealtime runq:\n"); 312 runq_print(&tdq->tdq_realtime); 313 printf("\ttimeshare runq:\n"); 314 runq_print(&tdq->tdq_timeshare); 315 printf("\tidle runq:\n"); 316 runq_print(&tdq->tdq_idle); 317#ifdef SMP 318 printf("\tload transferable: %d\n", tdq->tdq_transferable); 319#endif 320} 321 322static __inline void 323tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 324{ 325#ifdef SMP 326 if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 327 tdq->tdq_transferable++; 328 tdq->tdq_group->tdg_transferable++; 329 ts->ts_flags |= TSF_XFERABLE; 330 } 331#endif 332 if (ts->ts_runq == &tdq->tdq_timeshare) { 333 int pri; 334 335 pri = ts->ts_thread->td_priority; 336 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 337 ("Invalid priority %d on timeshare runq", pri)); 338 /* 339 * This queue contains only priorities between MIN and MAX 340 * realtime. Use the whole queue to represent these values. 341 */ 342#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 343 if ((flags & SRQ_BORROWING) == 0) { 344 pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 345 pri = (pri + tdq->tdq_idx) % RQ_NQS; 346 /* 347 * This effectively shortens the queue by one so we 348 * can have a one slot difference between idx and 349 * ridx while we wait for threads to drain. 350 */ 351 if (tdq->tdq_ridx != tdq->tdq_idx && 352 pri == tdq->tdq_ridx) 353 pri = (pri - 1) % RQ_NQS; 354 } else 355 pri = tdq->tdq_ridx; 356 runq_add_pri(ts->ts_runq, ts, pri, flags); 357 } else 358 runq_add(ts->ts_runq, ts, flags); 359} 360 361static __inline void 362tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 363{ 364#ifdef SMP 365 if (ts->ts_flags & TSF_XFERABLE) { 366 tdq->tdq_transferable--; 367 tdq->tdq_group->tdg_transferable--; 368 ts->ts_flags &= ~TSF_XFERABLE; 369 } 370#endif 371 if (ts->ts_runq == &tdq->tdq_timeshare) { 372 if (tdq->tdq_idx != tdq->tdq_ridx) 373 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 374 else 375 runq_remove_idx(ts->ts_runq, ts, NULL); 376 /* 377 * For timeshare threads we update the priority here so 378 * the priority reflects the time we've been sleeping. 379 */ 380 ts->ts_ltick = ticks; 381 sched_pctcpu_update(ts); 382 sched_priority(ts->ts_thread); 383 } else 384 runq_remove(ts->ts_runq, ts); 385} 386 387static void 388tdq_load_add(struct tdq *tdq, struct td_sched *ts) 389{ 390 int class; 391 mtx_assert(&sched_lock, MA_OWNED); 392 class = PRI_BASE(ts->ts_thread->td_pri_class); 393 if (class == PRI_TIMESHARE) 394 tdq->tdq_load_timeshare++; 395 tdq->tdq_load++; 396 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 397 if (class != PRI_ITHD && (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 398#ifdef SMP 399 tdq->tdq_group->tdg_load++; 400#else 401 tdq->tdq_sysload++; 402#endif 403} 404 405static void 406tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 407{ 408 int class; 409 mtx_assert(&sched_lock, MA_OWNED); 410 class = PRI_BASE(ts->ts_thread->td_pri_class); 411 if (class == PRI_TIMESHARE) 412 tdq->tdq_load_timeshare--; 413 if (class != PRI_ITHD && (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 414#ifdef SMP 415 tdq->tdq_group->tdg_load--; 416#else 417 tdq->tdq_sysload--; 418#endif 419 tdq->tdq_load--; 420 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 421 ts->ts_runq = NULL; 422} 423 424#ifdef SMP 425static void 426sched_smp_tick(void) 427{ 428 struct tdq *tdq; 429 430 tdq = TDQ_SELF(); 431 if (ticks >= bal_tick) 432 sched_balance(); 433 if (ticks >= gbal_tick && balance_groups) 434 sched_balance_groups(); 435 /* 436 * We could have been assigned a non real-time thread without an 437 * IPI. 438 */ 439 if (tdq->tdq_assigned) 440 tdq_assign(tdq); /* Potentially sets NEEDRESCHED */ 441} 442 443/* 444 * sched_balance is a simple CPU load balancing algorithm. It operates by 445 * finding the least loaded and most loaded cpu and equalizing their load 446 * by migrating some processes. 447 * 448 * Dealing only with two CPUs at a time has two advantages. Firstly, most 449 * installations will only have 2 cpus. Secondly, load balancing too much at 450 * once can have an unpleasant effect on the system. The scheduler rarely has 451 * enough information to make perfect decisions. So this algorithm chooses 452 * algorithm simplicity and more gradual effects on load in larger systems. 453 * 454 * It could be improved by considering the priorities and slices assigned to 455 * each task prior to balancing them. There are many pathological cases with 456 * any approach and so the semi random algorithm below may work as well as any. 457 * 458 */ 459static void 460sched_balance(void) 461{ 462 struct tdq_group *high; 463 struct tdq_group *low; 464 struct tdq_group *tdg; 465 int cnt; 466 int i; 467 468 bal_tick = ticks + (random() % (hz * 2)); 469 if (smp_started == 0) 470 return; 471 low = high = NULL; 472 i = random() % (tdg_maxid + 1); 473 for (cnt = 0; cnt <= tdg_maxid; cnt++) { 474 tdg = TDQ_GROUP(i); 475 /* 476 * Find the CPU with the highest load that has some 477 * threads to transfer. 478 */ 479 if ((high == NULL || tdg->tdg_load > high->tdg_load) 480 && tdg->tdg_transferable) 481 high = tdg; 482 if (low == NULL || tdg->tdg_load < low->tdg_load) 483 low = tdg; 484 if (++i > tdg_maxid) 485 i = 0; 486 } 487 if (low != NULL && high != NULL && high != low) 488 sched_balance_pair(LIST_FIRST(&high->tdg_members), 489 LIST_FIRST(&low->tdg_members)); 490} 491 492static void 493sched_balance_groups(void) 494{ 495 int i; 496 497 gbal_tick = ticks + (random() % (hz * 2)); 498 mtx_assert(&sched_lock, MA_OWNED); 499 if (smp_started) 500 for (i = 0; i <= tdg_maxid; i++) 501 sched_balance_group(TDQ_GROUP(i)); 502} 503 504static void 505sched_balance_group(struct tdq_group *tdg) 506{ 507 struct tdq *tdq; 508 struct tdq *high; 509 struct tdq *low; 510 int load; 511 512 if (tdg->tdg_transferable == 0) 513 return; 514 low = NULL; 515 high = NULL; 516 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 517 load = tdq->tdq_load; 518 if (high == NULL || load > high->tdq_load) 519 high = tdq; 520 if (low == NULL || load < low->tdq_load) 521 low = tdq; 522 } 523 if (high != NULL && low != NULL && high != low) 524 sched_balance_pair(high, low); 525} 526 527static void 528sched_balance_pair(struct tdq *high, struct tdq *low) 529{ 530 int transferable; 531 int high_load; 532 int low_load; 533 int move; 534 int diff; 535 int i; 536 537 /* 538 * If we're transfering within a group we have to use this specific 539 * tdq's transferable count, otherwise we can steal from other members 540 * of the group. 541 */ 542 if (high->tdq_group == low->tdq_group) { 543 transferable = high->tdq_transferable; 544 high_load = high->tdq_load; 545 low_load = low->tdq_load; 546 } else { 547 transferable = high->tdq_group->tdg_transferable; 548 high_load = high->tdq_group->tdg_load; 549 low_load = low->tdq_group->tdg_load; 550 } 551 if (transferable == 0) 552 return; 553 /* 554 * Determine what the imbalance is and then adjust that to how many 555 * threads we actually have to give up (transferable). 556 */ 557 diff = high_load - low_load; 558 move = diff / 2; 559 if (diff & 0x1) 560 move++; 561 move = min(move, transferable); 562 for (i = 0; i < move; i++) 563 tdq_move(high, TDQ_ID(low)); 564 return; 565} 566 567static void 568tdq_move(struct tdq *from, int cpu) 569{ 570 struct tdq *tdq; 571 struct tdq *to; 572 struct td_sched *ts; 573 574 tdq = from; 575 to = TDQ_CPU(cpu); 576 ts = tdq_steal(tdq, 1); 577 if (ts == NULL) { 578 struct tdq_group *tdg; 579 580 tdg = tdq->tdq_group; 581 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 582 if (tdq == from || tdq->tdq_transferable == 0) 583 continue; 584 ts = tdq_steal(tdq, 1); 585 break; 586 } 587 if (ts == NULL) 588 panic("tdq_move: No threads available with a " 589 "transferable count of %d\n", 590 tdg->tdg_transferable); 591 } 592 if (tdq == to) 593 return; 594 ts->ts_state = TSS_THREAD; 595 tdq_runq_rem(tdq, ts); 596 tdq_load_rem(tdq, ts); 597 tdq_notify(ts, cpu); 598} 599 600static int 601tdq_idled(struct tdq *tdq) 602{ 603 struct tdq_group *tdg; 604 struct tdq *steal; 605 struct td_sched *ts; 606 607 tdg = tdq->tdq_group; 608 /* 609 * If we're in a cpu group, try and steal threads from another cpu in 610 * the group before idling. 611 */ 612 if (tdg->tdg_cpus > 1 && tdg->tdg_transferable) { 613 LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) { 614 if (steal == tdq || steal->tdq_transferable == 0) 615 continue; 616 ts = tdq_steal(steal, 0); 617 if (ts == NULL) 618 continue; 619 ts->ts_state = TSS_THREAD; 620 tdq_runq_rem(steal, ts); 621 tdq_load_rem(steal, ts); 622 ts->ts_cpu = PCPU_GET(cpuid); 623 ts->ts_flags |= TSF_INTERNAL | TSF_HOLD; 624 sched_add(ts->ts_thread, SRQ_YIELDING); 625 return (0); 626 } 627 } 628 /* 629 * We only set the idled bit when all of the cpus in the group are 630 * idle. Otherwise we could get into a situation where a thread bounces 631 * back and forth between two idle cores on seperate physical CPUs. 632 */ 633 tdg->tdg_idlemask |= PCPU_GET(cpumask); 634 if (tdg->tdg_idlemask != tdg->tdg_cpumask) 635 return (1); 636 atomic_set_int(&tdq_idle, tdg->tdg_mask); 637 return (1); 638} 639 640static void 641tdq_assign(struct tdq *tdq) 642{ 643 struct td_sched *nts; 644 struct td_sched *ts; 645 646 do { 647 *(volatile struct td_sched **)&ts = tdq->tdq_assigned; 648 } while(!atomic_cmpset_ptr((volatile uintptr_t *)&tdq->tdq_assigned, 649 (uintptr_t)ts, (uintptr_t)NULL)); 650 for (; ts != NULL; ts = nts) { 651 nts = ts->ts_assign; 652 tdq->tdq_group->tdg_load--; 653 tdq->tdq_load--; 654 ts->ts_flags &= ~TSF_ASSIGNED; 655 if (ts->ts_flags & TSF_REMOVED) { 656 ts->ts_flags &= ~TSF_REMOVED; 657 continue; 658 } 659 ts->ts_flags |= TSF_INTERNAL | TSF_HOLD; 660 sched_add(ts->ts_thread, SRQ_YIELDING); 661 } 662} 663 664static void 665tdq_notify(struct td_sched *ts, int cpu) 666{ 667 struct tdq *tdq; 668 struct thread *td; 669 struct pcpu *pcpu; 670 int class; 671 int prio; 672 673 tdq = TDQ_CPU(cpu); 674 class = PRI_BASE(ts->ts_thread->td_pri_class); 675 if ((class != PRI_IDLE && class != PRI_ITHD) 676 && (tdq_idle & tdq->tdq_group->tdg_mask)) 677 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask); 678 tdq->tdq_group->tdg_load++; 679 tdq->tdq_load++; 680 ts->ts_cpu = cpu; 681 ts->ts_flags |= TSF_ASSIGNED; 682 prio = ts->ts_thread->td_priority; 683 684 /* 685 * Place a thread on another cpu's queue and force a resched. 686 */ 687 do { 688 *(volatile struct td_sched **)&ts->ts_assign = tdq->tdq_assigned; 689 } while(!atomic_cmpset_ptr((volatile uintptr_t *)&tdq->tdq_assigned, 690 (uintptr_t)ts->ts_assign, (uintptr_t)ts)); 691 /* 692 * Without sched_lock we could lose a race where we set NEEDRESCHED 693 * on a thread that is switched out before the IPI is delivered. This 694 * would lead us to miss the resched. This will be a problem once 695 * sched_lock is pushed down. 696 */ 697 pcpu = pcpu_find(cpu); 698 td = pcpu->pc_curthread; 699 if (ts->ts_thread->td_priority < td->td_priority || 700 td == pcpu->pc_idlethread) { 701 td->td_flags |= TDF_NEEDRESCHED; 702 ipi_selected(1 << cpu, IPI_AST); 703 } 704} 705 706static struct td_sched * 707runq_steal(struct runq *rq) 708{ 709 struct rqhead *rqh; 710 struct rqbits *rqb; 711 struct td_sched *ts; 712 int word; 713 int bit; 714 715 mtx_assert(&sched_lock, MA_OWNED); 716 rqb = &rq->rq_status; 717 for (word = 0; word < RQB_LEN; word++) { 718 if (rqb->rqb_bits[word] == 0) 719 continue; 720 for (bit = 0; bit < RQB_BPW; bit++) { 721 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 722 continue; 723 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 724 TAILQ_FOREACH(ts, rqh, ts_procq) { 725 if (THREAD_CAN_MIGRATE(ts->ts_thread)) 726 return (ts); 727 } 728 } 729 } 730 return (NULL); 731} 732 733static struct td_sched * 734tdq_steal(struct tdq *tdq, int stealidle) 735{ 736 struct td_sched *ts; 737 738 /* 739 * Steal from next first to try to get a non-interactive task that 740 * may not have run for a while. 741 * XXX Need to effect steal order for timeshare threads. 742 */ 743 if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL) 744 return (ts); 745 if ((ts = runq_steal(&tdq->tdq_timeshare)) != NULL) 746 return (ts); 747 if (stealidle) 748 return (runq_steal(&tdq->tdq_idle)); 749 return (NULL); 750} 751 752int 753tdq_transfer(struct tdq *tdq, struct td_sched *ts, int class) 754{ 755 struct tdq_group *ntdg; 756 struct tdq_group *tdg; 757 struct tdq *old; 758 int cpu; 759 int idx; 760 761 if (smp_started == 0) 762 return (0); 763 cpu = 0; 764 /* 765 * If our load exceeds a certain threshold we should attempt to 766 * reassign this thread. The first candidate is the cpu that 767 * originally ran the thread. If it is idle, assign it there, 768 * otherwise, pick an idle cpu. 769 * 770 * The threshold at which we start to reassign has a large impact 771 * on the overall performance of the system. Tuned too high and 772 * some CPUs may idle. Too low and there will be excess migration 773 * and context switches. 774 */ 775 old = TDQ_CPU(ts->ts_cpu); 776 ntdg = old->tdq_group; 777 tdg = tdq->tdq_group; 778 if (tdq_idle) { 779 if (tdq_idle & ntdg->tdg_mask) { 780 cpu = ffs(ntdg->tdg_idlemask); 781 if (cpu) { 782 CTR2(KTR_SCHED, 783 "tdq_transfer: %p found old cpu %X " 784 "in idlemask.", ts, cpu); 785 goto migrate; 786 } 787 } 788 /* 789 * Multiple cpus could find this bit simultaneously 790 * but the race shouldn't be terrible. 791 */ 792 cpu = ffs(tdq_idle); 793 if (cpu) { 794 CTR2(KTR_SCHED, "tdq_transfer: %p found %X " 795 "in idlemask.", ts, cpu); 796 goto migrate; 797 } 798 } 799 idx = 0; 800#if 0 801 if (old->tdq_load < tdq->tdq_load) { 802 cpu = ts->ts_cpu + 1; 803 CTR2(KTR_SCHED, "tdq_transfer: %p old cpu %X " 804 "load less than ours.", ts, cpu); 805 goto migrate; 806 } 807 /* 808 * No new CPU was found, look for one with less load. 809 */ 810 for (idx = 0; idx <= tdg_maxid; idx++) { 811 ntdg = TDQ_GROUP(idx); 812 if (ntdg->tdg_load /*+ (ntdg->tdg_cpus * 2)*/ < tdg->tdg_load) { 813 cpu = ffs(ntdg->tdg_cpumask); 814 CTR2(KTR_SCHED, "tdq_transfer: %p cpu %X load less " 815 "than ours.", ts, cpu); 816 goto migrate; 817 } 818 } 819#endif 820 /* 821 * If another cpu in this group has idled, assign a thread over 822 * to them after checking to see if there are idled groups. 823 */ 824 if (tdg->tdg_idlemask) { 825 cpu = ffs(tdg->tdg_idlemask); 826 if (cpu) { 827 CTR2(KTR_SCHED, "tdq_transfer: %p cpu %X idle in " 828 "group.", ts, cpu); 829 goto migrate; 830 } 831 } 832 return (0); 833migrate: 834 /* 835 * Now that we've found an idle CPU, migrate the thread. 836 */ 837 cpu--; 838 ts->ts_runq = NULL; 839 tdq_notify(ts, cpu); 840 841 return (1); 842} 843 844#endif /* SMP */ 845 846/* 847 * Pick the highest priority task we have and return it. 848 */ 849 850static struct td_sched * 851tdq_choose(struct tdq *tdq) 852{ 853 struct td_sched *ts; 854 855 mtx_assert(&sched_lock, MA_OWNED); 856 857 ts = runq_choose(&tdq->tdq_realtime); 858 if (ts != NULL) { 859 KASSERT(ts->ts_thread->td_priority <= PRI_MAX_REALTIME, 860 ("tdq_choose: Invalid priority on realtime queue %d", 861 ts->ts_thread->td_priority)); 862 return (ts); 863 } 864 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 865 if (ts != NULL) { 866 KASSERT(ts->ts_thread->td_priority <= PRI_MAX_TIMESHARE && 867 ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 868 ("tdq_choose: Invalid priority on timeshare queue %d", 869 ts->ts_thread->td_priority)); 870 return (ts); 871 } 872 873 ts = runq_choose(&tdq->tdq_idle); 874 if (ts != NULL) { 875 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 876 ("tdq_choose: Invalid priority on idle queue %d", 877 ts->ts_thread->td_priority)); 878 return (ts); 879 } 880 881 return (NULL); 882} 883 884static void 885tdq_setup(struct tdq *tdq) 886{ 887 runq_init(&tdq->tdq_realtime); 888 runq_init(&tdq->tdq_timeshare); 889 runq_init(&tdq->tdq_idle); 890 tdq->tdq_load = 0; 891 tdq->tdq_load_timeshare = 0; 892} 893 894static void 895sched_setup(void *dummy) 896{ 897#ifdef SMP 898 int i; 899#endif 900 901 /* 902 * To avoid divide-by-zero, we set realstathz a dummy value 903 * in case which sched_clock() called before sched_initticks(). 904 */ 905 realstathz = hz; 906 sched_slice = (realstathz/7); /* 140ms */ 907 tickincr = 1 << SCHED_TICK_SHIFT; 908 909#ifdef SMP 910 balance_groups = 0; 911 /* 912 * Initialize the tdqs. 913 */ 914 for (i = 0; i < MAXCPU; i++) { 915 struct tdq *tdq; 916 917 tdq = &tdq_cpu[i]; 918 tdq->tdq_assigned = NULL; 919 tdq_setup(&tdq_cpu[i]); 920 } 921 if (smp_topology == NULL) { 922 struct tdq_group *tdg; 923 struct tdq *tdq; 924 int cpus; 925 926 for (cpus = 0, i = 0; i < MAXCPU; i++) { 927 if (CPU_ABSENT(i)) 928 continue; 929 tdq = &tdq_cpu[i]; 930 tdg = &tdq_groups[cpus]; 931 /* 932 * Setup a tdq group with one member. 933 */ 934 tdq->tdq_transferable = 0; 935 tdq->tdq_group = tdg; 936 tdg->tdg_cpus = 1; 937 tdg->tdg_idlemask = 0; 938 tdg->tdg_cpumask = tdg->tdg_mask = 1 << i; 939 tdg->tdg_load = 0; 940 tdg->tdg_transferable = 0; 941 LIST_INIT(&tdg->tdg_members); 942 LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings); 943 cpus++; 944 } 945 tdg_maxid = cpus - 1; 946 } else { 947 struct tdq_group *tdg; 948 struct cpu_group *cg; 949 int j; 950 951 for (i = 0; i < smp_topology->ct_count; i++) { 952 cg = &smp_topology->ct_group[i]; 953 tdg = &tdq_groups[i]; 954 /* 955 * Initialize the group. 956 */ 957 tdg->tdg_idlemask = 0; 958 tdg->tdg_load = 0; 959 tdg->tdg_transferable = 0; 960 tdg->tdg_cpus = cg->cg_count; 961 tdg->tdg_cpumask = cg->cg_mask; 962 LIST_INIT(&tdg->tdg_members); 963 /* 964 * Find all of the group members and add them. 965 */ 966 for (j = 0; j < MAXCPU; j++) { 967 if ((cg->cg_mask & (1 << j)) != 0) { 968 if (tdg->tdg_mask == 0) 969 tdg->tdg_mask = 1 << j; 970 tdq_cpu[j].tdq_transferable = 0; 971 tdq_cpu[j].tdq_group = tdg; 972 LIST_INSERT_HEAD(&tdg->tdg_members, 973 &tdq_cpu[j], tdq_siblings); 974 } 975 } 976 if (tdg->tdg_cpus > 1) 977 balance_groups = 1; 978 } 979 tdg_maxid = smp_topology->ct_count - 1; 980 } 981 /* 982 * Stagger the group and global load balancer so they do not 983 * interfere with each other. 984 */ 985 bal_tick = ticks + hz; 986 if (balance_groups) 987 gbal_tick = ticks + (hz / 2); 988#else 989 tdq_setup(TDQ_SELF()); 990#endif 991 mtx_lock_spin(&sched_lock); 992 tdq_load_add(TDQ_SELF(), &td_sched0); 993 mtx_unlock_spin(&sched_lock); 994} 995 996/* ARGSUSED */ 997static void 998sched_initticks(void *dummy) 999{ 1000 mtx_lock_spin(&sched_lock); 1001 realstathz = stathz ? stathz : hz; 1002 sched_slice = (realstathz/7); /* ~140ms */ 1003 1004 /* 1005 * tickincr is shifted out by 10 to avoid rounding errors due to 1006 * hz not being evenly divisible by stathz on all platforms. 1007 */ 1008 tickincr = (hz << SCHED_TICK_SHIFT) / realstathz; 1009 /* 1010 * This does not work for values of stathz that are more than 1011 * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1012 */ 1013 if (tickincr == 0) 1014 tickincr = 1; 1015 mtx_unlock_spin(&sched_lock); 1016} 1017 1018 1019/* 1020 * Scale the scheduling priority according to the "interactivity" of this 1021 * process. 1022 */ 1023static void 1024sched_priority(struct thread *td) 1025{ 1026 int score; 1027 int pri; 1028 1029 if (td->td_pri_class != PRI_TIMESHARE) 1030 return; 1031 /* 1032 * If the score is interactive we place the thread in the realtime 1033 * queue with a priority that is less than kernel and interrupt 1034 * priorities. These threads are not subject to nice restrictions. 1035 * 1036 * Scores greater than this are placed on the normal realtime queue 1037 * where the priority is partially decided by the most recent cpu 1038 * utilization and the rest is decided by nice value. 1039 */ 1040 score = sched_interact_score(td); 1041 if (score < sched_interact) { 1042 pri = PRI_MIN_REALTIME; 1043 pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1044 * score; 1045 KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 1046 ("sched_priority: invalid interactive priority %d", pri)); 1047 } else { 1048 pri = SCHED_PRI_MIN; 1049 if (td->td_sched->ts_ticks) 1050 pri += SCHED_PRI_TICKS(td->td_sched); 1051 pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1052 if (!(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE)) { 1053 static int once = 1; 1054 if (once) { 1055 printf("sched_priority: invalid priority %d", 1056 pri); 1057 printf("nice %d, ticks %d ftick %d ltick %d tick pri %d\n", 1058 td->td_proc->p_nice, 1059 td->td_sched->ts_ticks, 1060 td->td_sched->ts_ftick, 1061 td->td_sched->ts_ltick, 1062 SCHED_PRI_TICKS(td->td_sched)); 1063 once = 0; 1064 } 1065 pri = min(max(pri, PRI_MIN_TIMESHARE), 1066 PRI_MAX_TIMESHARE); 1067 } 1068 } 1069 sched_user_prio(td, pri); 1070 1071 return; 1072} 1073 1074/* 1075 * This routine enforces a maximum limit on the amount of scheduling history 1076 * kept. It is called after either the slptime or runtime is adjusted. 1077 * This routine will not operate correctly when slp or run times have been 1078 * adjusted to more than double their maximum. 1079 */ 1080static void 1081sched_interact_update(struct thread *td) 1082{ 1083 int sum; 1084 1085 sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime; 1086 if (sum < SCHED_SLP_RUN_MAX) 1087 return; 1088 /* 1089 * If we have exceeded by more than 1/5th then the algorithm below 1090 * will not bring us back into range. Dividing by two here forces 1091 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1092 */ 1093 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1094 td->td_sched->skg_runtime /= 2; 1095 td->td_sched->skg_slptime /= 2; 1096 return; 1097 } 1098 td->td_sched->skg_runtime = (td->td_sched->skg_runtime / 5) * 4; 1099 td->td_sched->skg_slptime = (td->td_sched->skg_slptime / 5) * 4; 1100} 1101 1102static void 1103sched_interact_fork(struct thread *td) 1104{ 1105 int ratio; 1106 int sum; 1107 1108 sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime; 1109 if (sum > SCHED_SLP_RUN_FORK) { 1110 ratio = sum / SCHED_SLP_RUN_FORK; 1111 td->td_sched->skg_runtime /= ratio; 1112 td->td_sched->skg_slptime /= ratio; 1113 } 1114} 1115 1116static int 1117sched_interact_score(struct thread *td) 1118{ 1119 int div; 1120 1121 if (td->td_sched->skg_runtime > td->td_sched->skg_slptime) { 1122 div = max(1, td->td_sched->skg_runtime / SCHED_INTERACT_HALF); 1123 return (SCHED_INTERACT_HALF + 1124 (SCHED_INTERACT_HALF - (td->td_sched->skg_slptime / div))); 1125 } if (td->td_sched->skg_slptime > td->td_sched->skg_runtime) { 1126 div = max(1, td->td_sched->skg_slptime / SCHED_INTERACT_HALF); 1127 return (td->td_sched->skg_runtime / div); 1128 } 1129 1130 /* 1131 * This can happen if slptime and runtime are 0. 1132 */ 1133 return (0); 1134 1135} 1136 1137/* 1138 * Called from proc0_init() to bootstrap the scheduler. 1139 */ 1140void 1141schedinit(void) 1142{ 1143 1144 /* 1145 * Set up the scheduler specific parts of proc0. 1146 */ 1147 proc0.p_sched = NULL; /* XXX */ 1148 thread0.td_sched = &td_sched0; 1149 td_sched0.ts_ltick = ticks; 1150 td_sched0.ts_ftick = ticks; 1151 td_sched0.ts_thread = &thread0; 1152 td_sched0.ts_state = TSS_THREAD; 1153} 1154 1155/* 1156 * This is only somewhat accurate since given many processes of the same 1157 * priority they will switch when their slices run out, which will be 1158 * at most sched_slice stathz ticks. 1159 */ 1160int 1161sched_rr_interval(void) 1162{ 1163 1164 /* Convert sched_slice to hz */ 1165 return (hz/(realstathz/sched_slice)); 1166} 1167 1168static void 1169sched_pctcpu_update(struct td_sched *ts) 1170{ 1171 1172 if (ts->ts_ticks == 0) 1173 return; 1174 if (ticks - (hz / 10) < ts->ts_ltick && 1175 SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 1176 return; 1177 /* 1178 * Adjust counters and watermark for pctcpu calc. 1179 */ 1180 if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1181 ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1182 SCHED_TICK_TARG; 1183 else 1184 ts->ts_ticks = 0; 1185 ts->ts_ltick = ticks; 1186 ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 1187} 1188 1189static void 1190sched_thread_priority(struct thread *td, u_char prio) 1191{ 1192 struct td_sched *ts; 1193 1194 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1195 td, td->td_proc->p_comm, td->td_priority, prio, curthread, 1196 curthread->td_proc->p_comm); 1197 ts = td->td_sched; 1198 mtx_assert(&sched_lock, MA_OWNED); 1199 if (td->td_priority == prio) 1200 return; 1201 1202 if (TD_ON_RUNQ(td) && prio < td->td_priority) { 1203 /* 1204 * If the priority has been elevated due to priority 1205 * propagation, we may have to move ourselves to a new 1206 * queue. This could be optimized to not re-add in some 1207 * cases. 1208 * 1209 * Hold this td_sched on this cpu so that sched_prio() doesn't 1210 * cause excessive migration. We only want migration to 1211 * happen as the result of a wakeup. 1212 */ 1213 ts->ts_flags |= TSF_HOLD; 1214 sched_rem(td); 1215 td->td_priority = prio; 1216 sched_add(td, SRQ_BORROWING); 1217 ts->ts_flags &= ~TSF_HOLD; 1218 } else 1219 td->td_priority = prio; 1220} 1221 1222/* 1223 * Update a thread's priority when it is lent another thread's 1224 * priority. 1225 */ 1226void 1227sched_lend_prio(struct thread *td, u_char prio) 1228{ 1229 1230 td->td_flags |= TDF_BORROWING; 1231 sched_thread_priority(td, prio); 1232} 1233 1234/* 1235 * Restore a thread's priority when priority propagation is 1236 * over. The prio argument is the minimum priority the thread 1237 * needs to have to satisfy other possible priority lending 1238 * requests. If the thread's regular priority is less 1239 * important than prio, the thread will keep a priority boost 1240 * of prio. 1241 */ 1242void 1243sched_unlend_prio(struct thread *td, u_char prio) 1244{ 1245 u_char base_pri; 1246 1247 if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1248 td->td_base_pri <= PRI_MAX_TIMESHARE) 1249 base_pri = td->td_user_pri; 1250 else 1251 base_pri = td->td_base_pri; 1252 if (prio >= base_pri) { 1253 td->td_flags &= ~TDF_BORROWING; 1254 sched_thread_priority(td, base_pri); 1255 } else 1256 sched_lend_prio(td, prio); 1257} 1258 1259void 1260sched_prio(struct thread *td, u_char prio) 1261{ 1262 u_char oldprio; 1263 1264 /* First, update the base priority. */ 1265 td->td_base_pri = prio; 1266 1267 /* 1268 * If the thread is borrowing another thread's priority, don't 1269 * ever lower the priority. 1270 */ 1271 if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1272 return; 1273 1274 /* Change the real priority. */ 1275 oldprio = td->td_priority; 1276 sched_thread_priority(td, prio); 1277 1278 /* 1279 * If the thread is on a turnstile, then let the turnstile update 1280 * its state. 1281 */ 1282 if (TD_ON_LOCK(td) && oldprio != prio) 1283 turnstile_adjust(td, oldprio); 1284} 1285 1286void 1287sched_user_prio(struct thread *td, u_char prio) 1288{ 1289 u_char oldprio; 1290 1291 td->td_base_user_pri = prio; 1292 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1293 return; 1294 oldprio = td->td_user_pri; 1295 td->td_user_pri = prio; 1296 1297 if (TD_ON_UPILOCK(td) && oldprio != prio) 1298 umtx_pi_adjust(td, oldprio); 1299} 1300 1301void 1302sched_lend_user_prio(struct thread *td, u_char prio) 1303{ 1304 u_char oldprio; 1305 1306 td->td_flags |= TDF_UBORROWING; 1307 1308 oldprio = td->td_user_pri; 1309 td->td_user_pri = prio; 1310 1311 if (TD_ON_UPILOCK(td) && oldprio != prio) 1312 umtx_pi_adjust(td, oldprio); 1313} 1314 1315void 1316sched_unlend_user_prio(struct thread *td, u_char prio) 1317{ 1318 u_char base_pri; 1319 1320 base_pri = td->td_base_user_pri; 1321 if (prio >= base_pri) { 1322 td->td_flags &= ~TDF_UBORROWING; 1323 sched_user_prio(td, base_pri); 1324 } else 1325 sched_lend_user_prio(td, prio); 1326} 1327 1328void 1329sched_switch(struct thread *td, struct thread *newtd, int flags) 1330{ 1331 struct tdq *tdq; 1332 struct td_sched *ts; 1333 1334 mtx_assert(&sched_lock, MA_OWNED); 1335 1336 tdq = TDQ_SELF(); 1337 ts = td->td_sched; 1338 td->td_lastcpu = td->td_oncpu; 1339 td->td_oncpu = NOCPU; 1340 td->td_flags &= ~TDF_NEEDRESCHED; 1341 td->td_owepreempt = 0; 1342 /* 1343 * If the thread has been assigned it may be in the process of switching 1344 * to the new cpu. This is the case in sched_bind(). 1345 */ 1346 if (td == PCPU_GET(idlethread)) { 1347 TD_SET_CAN_RUN(td); 1348 } else if ((ts->ts_flags & TSF_ASSIGNED) == 0) { 1349 /* We are ending our run so make our slot available again */ 1350 tdq_load_rem(tdq, ts); 1351 if (TD_IS_RUNNING(td)) { 1352 /* 1353 * Don't allow the thread to migrate 1354 * from a preemption. 1355 */ 1356 ts->ts_flags |= TSF_HOLD; 1357 setrunqueue(td, (flags & SW_PREEMPT) ? 1358 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1359 SRQ_OURSELF|SRQ_YIELDING); 1360 ts->ts_flags &= ~TSF_HOLD; 1361 } 1362 } 1363 if (newtd != NULL) { 1364 /* 1365 * If we bring in a thread account for it as if it had been 1366 * added to the run queue and then chosen. 1367 */ 1368 newtd->td_sched->ts_flags |= TSF_DIDRUN; 1369 TD_SET_RUNNING(newtd); 1370 tdq_load_add(TDQ_SELF(), newtd->td_sched); 1371 } else 1372 newtd = choosethread(); 1373 if (td != newtd) { 1374#ifdef HWPMC_HOOKS 1375 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1376 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1377#endif 1378 1379 cpu_switch(td, newtd); 1380#ifdef HWPMC_HOOKS 1381 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1382 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1383#endif 1384 } 1385 sched_lock.mtx_lock = (uintptr_t)td; 1386 td->td_oncpu = PCPU_GET(cpuid); 1387} 1388 1389void 1390sched_nice(struct proc *p, int nice) 1391{ 1392 struct thread *td; 1393 1394 PROC_LOCK_ASSERT(p, MA_OWNED); 1395 mtx_assert(&sched_lock, MA_OWNED); 1396 1397 p->p_nice = nice; 1398 FOREACH_THREAD_IN_PROC(p, td) { 1399 sched_priority(td); 1400 sched_prio(td, td->td_base_user_pri); 1401 } 1402} 1403 1404void 1405sched_sleep(struct thread *td) 1406{ 1407 1408 mtx_assert(&sched_lock, MA_OWNED); 1409 1410 td->td_sched->ts_slptime = ticks; 1411} 1412 1413void 1414sched_wakeup(struct thread *td) 1415{ 1416 int slptime; 1417 1418 mtx_assert(&sched_lock, MA_OWNED); 1419 1420 /* 1421 * If we slept for more than a tick update our interactivity and 1422 * priority. 1423 */ 1424 slptime = td->td_sched->ts_slptime; 1425 td->td_sched->ts_slptime = 0; 1426 if (slptime && slptime != ticks) { 1427 int hzticks; 1428 1429 hzticks = (ticks - slptime) << SCHED_TICK_SHIFT; 1430 if (hzticks >= SCHED_SLP_RUN_MAX) { 1431 td->td_sched->skg_slptime = SCHED_SLP_RUN_MAX; 1432 td->td_sched->skg_runtime = 1; 1433 } else { 1434 td->td_sched->skg_slptime += hzticks; 1435 sched_interact_update(td); 1436 } 1437 sched_pctcpu_update(td->td_sched); 1438 sched_priority(td); 1439 } 1440 setrunqueue(td, SRQ_BORING); 1441} 1442 1443/* 1444 * Penalize the parent for creating a new child and initialize the child's 1445 * priority. 1446 */ 1447void 1448sched_fork(struct thread *td, struct thread *child) 1449{ 1450 mtx_assert(&sched_lock, MA_OWNED); 1451 sched_fork_thread(td, child); 1452 /* 1453 * Penalize the parent and child for forking. 1454 */ 1455 sched_interact_fork(child); 1456 sched_priority(child); 1457 td->td_sched->skg_runtime += tickincr; 1458 sched_interact_update(td); 1459 sched_priority(td); 1460} 1461 1462void 1463sched_fork_thread(struct thread *td, struct thread *child) 1464{ 1465 struct td_sched *ts; 1466 struct td_sched *ts2; 1467 1468 /* 1469 * Initialize child. 1470 */ 1471 sched_newthread(child); 1472 ts = td->td_sched; 1473 ts2 = child->td_sched; 1474 ts2->ts_cpu = ts->ts_cpu; 1475 ts2->ts_runq = NULL; 1476 /* 1477 * Grab our parents cpu estimation information and priority. 1478 */ 1479 ts2->ts_ticks = ts->ts_ticks; 1480 ts2->ts_ltick = ts->ts_ltick; 1481 ts2->ts_ftick = ts->ts_ftick; 1482 child->td_user_pri = td->td_user_pri; 1483 child->td_base_user_pri = td->td_base_user_pri; 1484 /* 1485 * And update interactivity score. 1486 */ 1487 ts2->skg_slptime = ts->skg_slptime; 1488 ts2->skg_runtime = ts->skg_runtime; 1489 ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 1490} 1491 1492void 1493sched_class(struct thread *td, int class) 1494{ 1495 struct tdq *tdq; 1496 struct td_sched *ts; 1497 int nclass; 1498 int oclass; 1499 1500 mtx_assert(&sched_lock, MA_OWNED); 1501 if (td->td_pri_class == class) 1502 return; 1503 1504 nclass = PRI_BASE(class); 1505 oclass = PRI_BASE(td->td_pri_class); 1506 ts = td->td_sched; 1507 if (ts->ts_state == TSS_ONRUNQ || td->td_state == TDS_RUNNING) { 1508 tdq = TDQ_CPU(ts->ts_cpu); 1509#ifdef SMP 1510 /* 1511 * On SMP if we're on the RUNQ we must adjust the transferable 1512 * count because could be changing to or from an interrupt 1513 * class. 1514 */ 1515 if (ts->ts_state == TSS_ONRUNQ) { 1516 if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 1517 tdq->tdq_transferable--; 1518 tdq->tdq_group->tdg_transferable--; 1519 } 1520 if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 1521 tdq->tdq_transferable++; 1522 tdq->tdq_group->tdg_transferable++; 1523 } 1524 } 1525#endif 1526 if (oclass == PRI_TIMESHARE) 1527 tdq->tdq_load_timeshare--; 1528 if (nclass == PRI_TIMESHARE) 1529 tdq->tdq_load_timeshare++; 1530 } 1531 1532 td->td_pri_class = class; 1533} 1534 1535/* 1536 * Return some of the child's priority and interactivity to the parent. 1537 */ 1538void 1539sched_exit(struct proc *p, struct thread *child) 1540{ 1541 struct thread *td; 1542 1543 CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 1544 child, child->td_proc->p_comm, child->td_priority); 1545 1546 td = FIRST_THREAD_IN_PROC(p); 1547 sched_exit_thread(td, child); 1548} 1549 1550void 1551sched_exit_thread(struct thread *td, struct thread *child) 1552{ 1553 1554 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 1555 child, child->td_proc->p_comm, child->td_priority); 1556 1557 tdq_load_rem(TDQ_CPU(child->td_sched->ts_cpu), child->td_sched); 1558#ifdef KSE 1559 /* 1560 * KSE forks and exits so often that this penalty causes short-lived 1561 * threads to always be non-interactive. This causes mozilla to 1562 * crawl under load. 1563 */ 1564 if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc) 1565 return; 1566#endif 1567 /* 1568 * Give the child's runtime to the parent without returning the 1569 * sleep time as a penalty to the parent. This causes shells that 1570 * launch expensive things to mark their children as expensive. 1571 */ 1572 td->td_sched->skg_runtime += child->td_sched->skg_runtime; 1573 sched_interact_update(td); 1574 sched_priority(td); 1575} 1576 1577void 1578sched_userret(struct thread *td) 1579{ 1580 /* 1581 * XXX we cheat slightly on the locking here to avoid locking in 1582 * the usual case. Setting td_priority here is essentially an 1583 * incomplete workaround for not setting it properly elsewhere. 1584 * Now that some interrupt handlers are threads, not setting it 1585 * properly elsewhere can clobber it in the window between setting 1586 * it here and returning to user mode, so don't waste time setting 1587 * it perfectly here. 1588 */ 1589 KASSERT((td->td_flags & TDF_BORROWING) == 0, 1590 ("thread with borrowed priority returning to userland")); 1591 if (td->td_priority != td->td_user_pri) { 1592 mtx_lock_spin(&sched_lock); 1593 td->td_priority = td->td_user_pri; 1594 td->td_base_pri = td->td_user_pri; 1595 mtx_unlock_spin(&sched_lock); 1596 } 1597} 1598 1599void 1600sched_clock(struct thread *td) 1601{ 1602 struct tdq *tdq; 1603 struct td_sched *ts; 1604 1605 mtx_assert(&sched_lock, MA_OWNED); 1606#ifdef SMP 1607 sched_smp_tick(); 1608#endif 1609 tdq = TDQ_SELF(); 1610 /* 1611 * Advance the insert index once for each tick to ensure that all 1612 * threads get a chance to run. 1613 */ 1614 if (tdq->tdq_idx == tdq->tdq_ridx) { 1615 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 1616 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 1617 tdq->tdq_ridx = tdq->tdq_idx; 1618 } 1619 /* Adjust ticks for pctcpu */ 1620 ts = td->td_sched; 1621 ts->ts_ticks += tickincr; 1622 ts->ts_ltick = ticks; 1623 /* 1624 * Update if we've exceeded our desired tick threshhold by over one 1625 * second. 1626 */ 1627 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 1628 sched_pctcpu_update(ts); 1629 /* 1630 * We only do slicing code for TIMESHARE threads. 1631 */ 1632 if (td->td_pri_class != PRI_TIMESHARE) 1633 return; 1634 /* 1635 * We used a tick; charge it to the thread so that we can compute our 1636 * interactivity. 1637 */ 1638 td->td_sched->skg_runtime += tickincr; 1639 sched_interact_update(td); 1640 /* 1641 * We used up one time slice. 1642 */ 1643 if (--ts->ts_slice > 0) 1644 return; 1645 /* 1646 * We're out of time, recompute priorities and requeue. 1647 */ 1648 sched_priority(td); 1649 tdq_load_rem(tdq, ts); 1650 ts->ts_slice = sched_slice; 1651 tdq_load_add(tdq, ts); 1652 td->td_flags |= TDF_NEEDRESCHED; 1653} 1654 1655int 1656sched_runnable(void) 1657{ 1658 struct tdq *tdq; 1659 int load; 1660 1661 load = 1; 1662 1663 tdq = TDQ_SELF(); 1664#ifdef SMP 1665 if (tdq->tdq_assigned) { 1666 mtx_lock_spin(&sched_lock); 1667 tdq_assign(tdq); 1668 mtx_unlock_spin(&sched_lock); 1669 } 1670#endif 1671 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1672 if (tdq->tdq_load > 0) 1673 goto out; 1674 } else 1675 if (tdq->tdq_load - 1 > 0) 1676 goto out; 1677 load = 0; 1678out: 1679 return (load); 1680} 1681 1682struct td_sched * 1683sched_choose(void) 1684{ 1685 struct tdq *tdq; 1686 struct td_sched *ts; 1687 1688 mtx_assert(&sched_lock, MA_OWNED); 1689 tdq = TDQ_SELF(); 1690#ifdef SMP 1691restart: 1692 if (tdq->tdq_assigned) 1693 tdq_assign(tdq); 1694#endif 1695 ts = tdq_choose(tdq); 1696 if (ts) { 1697#ifdef SMP 1698 if (ts->ts_thread->td_priority <= PRI_MIN_IDLE) 1699 if (tdq_idled(tdq) == 0) 1700 goto restart; 1701#endif 1702 tdq_runq_rem(tdq, ts); 1703 ts->ts_state = TSS_THREAD; 1704 return (ts); 1705 } 1706#ifdef SMP 1707 if (tdq_idled(tdq) == 0) 1708 goto restart; 1709#endif 1710 return (NULL); 1711} 1712 1713void 1714sched_add(struct thread *td, int flags) 1715{ 1716 struct tdq *tdq; 1717 struct td_sched *ts; 1718 int preemptive; 1719 int canmigrate; 1720 int class; 1721 1722 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1723 td, td->td_proc->p_comm, td->td_priority, curthread, 1724 curthread->td_proc->p_comm); 1725 mtx_assert(&sched_lock, MA_OWNED); 1726 tdq = TDQ_SELF(); 1727 ts = td->td_sched; 1728 ts->ts_flags &= ~TSF_INTERNAL; 1729 class = PRI_BASE(td->td_pri_class); 1730 preemptive = !(flags & SRQ_YIELDING); 1731 canmigrate = 1; 1732#ifdef SMP 1733 if (ts->ts_flags & TSF_ASSIGNED) { 1734 if (ts->ts_flags & TSF_REMOVED) 1735 ts->ts_flags &= ~TSF_REMOVED; 1736 return; 1737 } 1738 canmigrate = THREAD_CAN_MIGRATE(td); 1739 /* 1740 * Don't migrate running threads here. Force the long term balancer 1741 * to do it. 1742 */ 1743 if (ts->ts_flags & TSF_HOLD) { 1744 ts->ts_flags &= ~TSF_HOLD; 1745 canmigrate = 0; 1746 } 1747#endif 1748 KASSERT(ts->ts_state != TSS_ONRUNQ, 1749 ("sched_add: thread %p (%s) already in run queue", td, 1750 td->td_proc->p_comm)); 1751 KASSERT(td->td_proc->p_sflag & PS_INMEM, 1752 ("sched_add: process swapped out")); 1753 KASSERT(ts->ts_runq == NULL, 1754 ("sched_add: thread %p is still assigned to a run queue", td)); 1755 /* 1756 * Set the slice and pick the run queue. 1757 */ 1758 if (ts->ts_slice == 0) 1759 ts->ts_slice = sched_slice; 1760 if (class == PRI_TIMESHARE) 1761 sched_priority(td); 1762 if (td->td_priority <= PRI_MAX_REALTIME) { 1763 ts->ts_runq = &tdq->tdq_realtime; 1764 /* 1765 * If the thread is not artificially pinned and it's in 1766 * the realtime queue we directly dispatch it on this cpu 1767 * for minimum latency. Interrupt handlers may also have 1768 * to complete on the cpu that dispatched them. 1769 */ 1770 if (td->td_pinned == 0) 1771 ts->ts_cpu = PCPU_GET(cpuid); 1772 } else if (td->td_priority <= PRI_MAX_TIMESHARE) 1773 ts->ts_runq = &tdq->tdq_timeshare; 1774 else 1775 ts->ts_runq = &tdq->tdq_idle; 1776 1777#ifdef SMP 1778 /* 1779 * If this thread is pinned or bound, notify the target cpu. 1780 */ 1781 if (!canmigrate && ts->ts_cpu != PCPU_GET(cpuid) ) { 1782 ts->ts_runq = NULL; 1783 tdq_notify(ts, ts->ts_cpu); 1784 return; 1785 } 1786 /* 1787 * If we had been idle, clear our bit in the group and potentially 1788 * the global bitmap. If not, see if we should transfer this thread. 1789 */ 1790 if ((class != PRI_IDLE && class != PRI_ITHD) && 1791 (tdq->tdq_group->tdg_idlemask & PCPU_GET(cpumask)) != 0) { 1792 /* 1793 * Check to see if our group is unidling, and if so, remove it 1794 * from the global idle mask. 1795 */ 1796 if (tdq->tdq_group->tdg_idlemask == 1797 tdq->tdq_group->tdg_cpumask) 1798 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask); 1799 /* 1800 * Now remove ourselves from the group specific idle mask. 1801 */ 1802 tdq->tdq_group->tdg_idlemask &= ~PCPU_GET(cpumask); 1803 } else if (canmigrate && tdq->tdq_load > 1) 1804 if (tdq_transfer(tdq, ts, class)) 1805 return; 1806 ts->ts_cpu = PCPU_GET(cpuid); 1807#endif 1808 if (td->td_priority < curthread->td_priority) 1809 curthread->td_flags |= TDF_NEEDRESCHED; 1810 if (preemptive && maybe_preempt(td)) 1811 return; 1812 ts->ts_state = TSS_ONRUNQ; 1813 1814 tdq_runq_add(tdq, ts, flags); 1815 tdq_load_add(tdq, ts); 1816} 1817 1818void 1819sched_rem(struct thread *td) 1820{ 1821 struct tdq *tdq; 1822 struct td_sched *ts; 1823 1824 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 1825 td, td->td_proc->p_comm, td->td_priority, curthread, 1826 curthread->td_proc->p_comm); 1827 mtx_assert(&sched_lock, MA_OWNED); 1828 ts = td->td_sched; 1829 if (ts->ts_flags & TSF_ASSIGNED) { 1830 ts->ts_flags |= TSF_REMOVED; 1831 return; 1832 } 1833 KASSERT((ts->ts_state == TSS_ONRUNQ), 1834 ("sched_rem: thread not on run queue")); 1835 1836 ts->ts_state = TSS_THREAD; 1837 tdq = TDQ_CPU(ts->ts_cpu); 1838 tdq_runq_rem(tdq, ts); 1839 tdq_load_rem(tdq, ts); 1840} 1841 1842fixpt_t 1843sched_pctcpu(struct thread *td) 1844{ 1845 fixpt_t pctcpu; 1846 struct td_sched *ts; 1847 1848 pctcpu = 0; 1849 ts = td->td_sched; 1850 if (ts == NULL) 1851 return (0); 1852 1853 mtx_lock_spin(&sched_lock); 1854 if (ts->ts_ticks) { 1855 int rtick; 1856 1857 sched_pctcpu_update(ts); 1858 /* How many rtick per second ? */ 1859 rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 1860 pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 1861 } 1862 td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick; 1863 mtx_unlock_spin(&sched_lock); 1864 1865 return (pctcpu); 1866} 1867 1868void 1869sched_bind(struct thread *td, int cpu) 1870{ 1871 struct td_sched *ts; 1872 1873 mtx_assert(&sched_lock, MA_OWNED); 1874 ts = td->td_sched; 1875 KASSERT((ts->ts_flags & TSF_BOUND) == 0, 1876 ("sched_bind: thread %p already bound.", td)); 1877 ts->ts_flags |= TSF_BOUND; 1878#ifdef SMP 1879 if (PCPU_GET(cpuid) == cpu) 1880 return; 1881 /* sched_rem without the runq_remove */ 1882 ts->ts_state = TSS_THREAD; 1883 tdq_load_rem(TDQ_CPU(ts->ts_cpu), ts); 1884 tdq_notify(ts, cpu); 1885 /* When we return from mi_switch we'll be on the correct cpu. */ 1886 mi_switch(SW_VOL, NULL); 1887 sched_pin(); 1888#endif 1889} 1890 1891void 1892sched_unbind(struct thread *td) 1893{ 1894 struct td_sched *ts; 1895 1896 mtx_assert(&sched_lock, MA_OWNED); 1897 ts = td->td_sched; 1898 KASSERT(ts->ts_flags & TSF_BOUND, 1899 ("sched_unbind: thread %p not bound.", td)); 1900 mtx_assert(&sched_lock, MA_OWNED); 1901 ts->ts_flags &= ~TSF_BOUND; 1902#ifdef SMP 1903 sched_unpin(); 1904#endif 1905} 1906 1907int 1908sched_is_bound(struct thread *td) 1909{ 1910 mtx_assert(&sched_lock, MA_OWNED); 1911 return (td->td_sched->ts_flags & TSF_BOUND); 1912} 1913 1914void 1915sched_relinquish(struct thread *td) 1916{ 1917 mtx_lock_spin(&sched_lock); 1918 if (td->td_pri_class == PRI_TIMESHARE) 1919 sched_prio(td, PRI_MAX_TIMESHARE); 1920 mi_switch(SW_VOL, NULL); 1921 mtx_unlock_spin(&sched_lock); 1922} 1923 1924int 1925sched_load(void) 1926{ 1927#ifdef SMP 1928 int total; 1929 int i; 1930 1931 total = 0; 1932 for (i = 0; i <= tdg_maxid; i++) 1933 total += TDQ_GROUP(i)->tdg_load; 1934 return (total); 1935#else 1936 return (TDQ_SELF()->tdq_sysload); 1937#endif 1938} 1939 1940int 1941sched_sizeof_proc(void) 1942{ 1943 return (sizeof(struct proc)); 1944} 1945 1946int 1947sched_sizeof_thread(void) 1948{ 1949 return (sizeof(struct thread) + sizeof(struct td_sched)); 1950} 1951 1952void 1953sched_tick(void) 1954{ 1955} 1956 1957static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 1958SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0, 1959 "Scheduler name"); 1960SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, ""); 1961SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, ""); 1962SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, ""); 1963SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, ""); 1964 1965/* ps compat */ 1966static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 1967SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 1968 1969 1970#define KERN_SWITCH_INCLUDE 1 1971#include "kern/kern_switch.c" 1972