sched_ule.c revision 172411
1/*- 2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27/* 28 * This file implements the ULE scheduler. ULE supports independent CPU 29 * run queues and fine grain locking. It has superior interactive 30 * performance under load even on uni-processor systems. 31 * 32 * etymology: 33 * ULE is the last three letters in schedule. It owes its name to a 34 * generic user created for a scheduling system by Paul Mikesell at 35 * Isilon Systems and a general lack of creativity on the part of the author. 36 */ 37 38#include <sys/cdefs.h> 39__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 172411 2007-10-02 01:30:18Z jeff $"); 40 41#include "opt_hwpmc_hooks.h" 42#include "opt_sched.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/kdb.h> 47#include <sys/kernel.h> 48#include <sys/ktr.h> 49#include <sys/lock.h> 50#include <sys/mutex.h> 51#include <sys/proc.h> 52#include <sys/resource.h> 53#include <sys/resourcevar.h> 54#include <sys/sched.h> 55#include <sys/smp.h> 56#include <sys/sx.h> 57#include <sys/sysctl.h> 58#include <sys/sysproto.h> 59#include <sys/turnstile.h> 60#include <sys/umtx.h> 61#include <sys/vmmeter.h> 62#ifdef KTRACE 63#include <sys/uio.h> 64#include <sys/ktrace.h> 65#endif 66 67#ifdef HWPMC_HOOKS 68#include <sys/pmckern.h> 69#endif 70 71#include <machine/cpu.h> 72#include <machine/smp.h> 73 74#if !defined(__i386__) && !defined(__amd64__) 75#error "This architecture is not currently compatible with ULE" 76#endif 77 78#define KTR_ULE 0 79 80/* 81 * Thread scheduler specific section. All fields are protected 82 * by the thread lock. 83 */ 84struct td_sched { 85 TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */ 86 struct thread *ts_thread; /* Active associated thread. */ 87 struct runq *ts_runq; /* Run-queue we're queued on. */ 88 short ts_flags; /* TSF_* flags. */ 89 u_char ts_rqindex; /* Run queue index. */ 90 u_char ts_cpu; /* CPU that we have affinity for. */ 91 int ts_slice; /* Ticks of slice remaining. */ 92 u_int ts_slptime; /* Number of ticks we vol. slept */ 93 u_int ts_runtime; /* Number of ticks we were running */ 94 /* The following variables are only used for pctcpu calculation */ 95 int ts_ltick; /* Last tick that we were running on */ 96 int ts_ftick; /* First tick that we were running on */ 97 int ts_ticks; /* Tick count */ 98#ifdef SMP 99 int ts_rltick; /* Real last tick, for affinity. */ 100#endif 101}; 102/* flags kept in ts_flags */ 103#define TSF_BOUND 0x0001 /* Thread can not migrate. */ 104#define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 105 106static struct td_sched td_sched0; 107 108/* 109 * Cpu percentage computation macros and defines. 110 * 111 * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 112 * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 113 * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 114 * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 115 * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 116 * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 117 */ 118#define SCHED_TICK_SECS 10 119#define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 120#define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 121#define SCHED_TICK_SHIFT 10 122#define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 123#define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 124 125/* 126 * These macros determine priorities for non-interactive threads. They are 127 * assigned a priority based on their recent cpu utilization as expressed 128 * by the ratio of ticks to the tick total. NHALF priorities at the start 129 * and end of the MIN to MAX timeshare range are only reachable with negative 130 * or positive nice respectively. 131 * 132 * PRI_RANGE: Priority range for utilization dependent priorities. 133 * PRI_NRESV: Number of nice values. 134 * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 135 * PRI_NICE: Determines the part of the priority inherited from nice. 136 */ 137#define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 138#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 139#define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 140#define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 141#define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 142#define SCHED_PRI_TICKS(ts) \ 143 (SCHED_TICK_HZ((ts)) / \ 144 (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 145#define SCHED_PRI_NICE(nice) (nice) 146 147/* 148 * These determine the interactivity of a process. Interactivity differs from 149 * cpu utilization in that it expresses the voluntary time slept vs time ran 150 * while cpu utilization includes all time not running. This more accurately 151 * models the intent of the thread. 152 * 153 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 154 * before throttling back. 155 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 156 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 157 * INTERACT_THRESH: Threshhold for placement on the current runq. 158 */ 159#define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 160#define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 161#define SCHED_INTERACT_MAX (100) 162#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 163#define SCHED_INTERACT_THRESH (30) 164 165/* 166 * tickincr: Converts a stathz tick into a hz domain scaled by 167 * the shift factor. Without the shift the error rate 168 * due to rounding would be unacceptably high. 169 * realstathz: stathz is sometimes 0 and run off of hz. 170 * sched_slice: Runtime of each thread before rescheduling. 171 * preempt_thresh: Priority threshold for preemption and remote IPIs. 172 */ 173static int sched_interact = SCHED_INTERACT_THRESH; 174static int realstathz; 175static int tickincr; 176static int sched_slice; 177#ifdef PREEMPTION 178#ifdef FULL_PREEMPTION 179static int preempt_thresh = PRI_MAX_IDLE; 180#else 181static int preempt_thresh = PRI_MIN_KERN; 182#endif 183#else 184static int preempt_thresh = 0; 185#endif 186 187/* 188 * tdq - per processor runqs and statistics. All fields are protected by the 189 * tdq_lock. The load and lowpri may be accessed without to avoid excess 190 * locking in sched_pickcpu(); 191 */ 192struct tdq { 193 struct mtx *tdq_lock; /* Pointer to group lock. */ 194 struct runq tdq_realtime; /* real-time run queue. */ 195 struct runq tdq_timeshare; /* timeshare run queue. */ 196 struct runq tdq_idle; /* Queue of IDLE threads. */ 197 int tdq_load; /* Aggregate load. */ 198 u_char tdq_idx; /* Current insert index. */ 199 u_char tdq_ridx; /* Current removal index. */ 200#ifdef SMP 201 u_char tdq_lowpri; /* Lowest priority thread. */ 202 int tdq_transferable; /* Transferable thread count. */ 203 LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */ 204 struct tdq_group *tdq_group; /* Our processor group. */ 205#else 206 int tdq_sysload; /* For loadavg, !ITHD load. */ 207#endif 208} __aligned(64); 209 210 211#ifdef SMP 212/* 213 * tdq groups are groups of processors which can cheaply share threads. When 214 * one processor in the group goes idle it will check the runqs of the other 215 * processors in its group prior to halting and waiting for an interrupt. 216 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 217 * In a numa environment we'd want an idle bitmap per group and a two tiered 218 * load balancer. 219 */ 220struct tdq_group { 221 struct mtx tdg_lock; /* Protects all fields below. */ 222 int tdg_cpus; /* Count of CPUs in this tdq group. */ 223 cpumask_t tdg_cpumask; /* Mask of cpus in this group. */ 224 cpumask_t tdg_idlemask; /* Idle cpus in this group. */ 225 cpumask_t tdg_mask; /* Bit mask for first cpu. */ 226 int tdg_load; /* Total load of this group. */ 227 int tdg_transferable; /* Transferable load of this group. */ 228 LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */ 229 char tdg_name[16]; /* lock name. */ 230} __aligned(64); 231 232#define SCHED_AFFINITY_DEFAULT (max(1, hz / 300)) 233#define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity) 234 235/* 236 * Run-time tunables. 237 */ 238static int rebalance = 1; 239static int balance_interval = 128; /* Default set in sched_initticks(). */ 240static int pick_pri = 1; 241static int affinity; 242static int tryself = 1; 243static int steal_htt = 1; 244static int steal_idle = 1; 245static int steal_thresh = 2; 246static int topology = 0; 247 248/* 249 * One thread queue per processor. 250 */ 251static volatile cpumask_t tdq_idle; 252static int tdg_maxid; 253static struct tdq tdq_cpu[MAXCPU]; 254static struct tdq_group tdq_groups[MAXCPU]; 255static struct tdq *balance_tdq; 256static int balance_group_ticks; 257static int balance_ticks; 258 259#define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 260#define TDQ_CPU(x) (&tdq_cpu[(x)]) 261#define TDQ_ID(x) ((int)((x) - tdq_cpu)) 262#define TDQ_GROUP(x) (&tdq_groups[(x)]) 263#define TDG_ID(x) ((int)((x) - tdq_groups)) 264#else /* !SMP */ 265static struct tdq tdq_cpu; 266static struct mtx tdq_lock; 267 268#define TDQ_ID(x) (0) 269#define TDQ_SELF() (&tdq_cpu) 270#define TDQ_CPU(x) (&tdq_cpu) 271#endif 272 273#define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 274#define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 275#define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 276#define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 277#define TDQ_LOCKPTR(t) ((t)->tdq_lock) 278 279static void sched_priority(struct thread *); 280static void sched_thread_priority(struct thread *, u_char); 281static int sched_interact_score(struct thread *); 282static void sched_interact_update(struct thread *); 283static void sched_interact_fork(struct thread *); 284static void sched_pctcpu_update(struct td_sched *); 285 286/* Operations on per processor queues */ 287static struct td_sched * tdq_choose(struct tdq *); 288static void tdq_setup(struct tdq *); 289static void tdq_load_add(struct tdq *, struct td_sched *); 290static void tdq_load_rem(struct tdq *, struct td_sched *); 291static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 292static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 293void tdq_print(int cpu); 294static void runq_print(struct runq *rq); 295static void tdq_add(struct tdq *, struct thread *, int); 296#ifdef SMP 297static void tdq_move(struct tdq *, struct tdq *); 298static int tdq_idled(struct tdq *); 299static void tdq_notify(struct td_sched *); 300static struct td_sched *tdq_steal(struct tdq *); 301static struct td_sched *runq_steal(struct runq *); 302static int sched_pickcpu(struct td_sched *, int); 303static void sched_balance(void); 304static void sched_balance_groups(void); 305static void sched_balance_group(struct tdq_group *); 306static void sched_balance_pair(struct tdq *, struct tdq *); 307static inline struct tdq *sched_setcpu(struct td_sched *, int, int); 308static inline struct mtx *thread_block_switch(struct thread *); 309static inline void thread_unblock_switch(struct thread *, struct mtx *); 310static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 311 312#define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 313#endif 314 315static void sched_setup(void *dummy); 316SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 317 318static void sched_initticks(void *dummy); 319SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) 320 321/* 322 * Print the threads waiting on a run-queue. 323 */ 324static void 325runq_print(struct runq *rq) 326{ 327 struct rqhead *rqh; 328 struct td_sched *ts; 329 int pri; 330 int j; 331 int i; 332 333 for (i = 0; i < RQB_LEN; i++) { 334 printf("\t\trunq bits %d 0x%zx\n", 335 i, rq->rq_status.rqb_bits[i]); 336 for (j = 0; j < RQB_BPW; j++) 337 if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 338 pri = j + (i << RQB_L2BPW); 339 rqh = &rq->rq_queues[pri]; 340 TAILQ_FOREACH(ts, rqh, ts_procq) { 341 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 342 ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 343 } 344 } 345 } 346} 347 348/* 349 * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 350 */ 351void 352tdq_print(int cpu) 353{ 354 struct tdq *tdq; 355 356 tdq = TDQ_CPU(cpu); 357 358 printf("tdq %d:\n", TDQ_ID(tdq)); 359 printf("\tlockptr %p\n", TDQ_LOCKPTR(tdq)); 360 printf("\tload: %d\n", tdq->tdq_load); 361 printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 362 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 363 printf("\trealtime runq:\n"); 364 runq_print(&tdq->tdq_realtime); 365 printf("\ttimeshare runq:\n"); 366 runq_print(&tdq->tdq_timeshare); 367 printf("\tidle runq:\n"); 368 runq_print(&tdq->tdq_idle); 369#ifdef SMP 370 printf("\tload transferable: %d\n", tdq->tdq_transferable); 371 printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 372 printf("\tgroup: %d\n", TDG_ID(tdq->tdq_group)); 373 printf("\tLock name: %s\n", tdq->tdq_group->tdg_name); 374#endif 375} 376 377#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 378/* 379 * Add a thread to the actual run-queue. Keeps transferable counts up to 380 * date with what is actually on the run-queue. Selects the correct 381 * queue position for timeshare threads. 382 */ 383static __inline void 384tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 385{ 386 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 387 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 388#ifdef SMP 389 if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 390 tdq->tdq_transferable++; 391 tdq->tdq_group->tdg_transferable++; 392 ts->ts_flags |= TSF_XFERABLE; 393 } 394#endif 395 if (ts->ts_runq == &tdq->tdq_timeshare) { 396 u_char pri; 397 398 pri = ts->ts_thread->td_priority; 399 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 400 ("Invalid priority %d on timeshare runq", pri)); 401 /* 402 * This queue contains only priorities between MIN and MAX 403 * realtime. Use the whole queue to represent these values. 404 */ 405 if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 406 pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 407 pri = (pri + tdq->tdq_idx) % RQ_NQS; 408 /* 409 * This effectively shortens the queue by one so we 410 * can have a one slot difference between idx and 411 * ridx while we wait for threads to drain. 412 */ 413 if (tdq->tdq_ridx != tdq->tdq_idx && 414 pri == tdq->tdq_ridx) 415 pri = (unsigned char)(pri - 1) % RQ_NQS; 416 } else 417 pri = tdq->tdq_ridx; 418 runq_add_pri(ts->ts_runq, ts, pri, flags); 419 } else 420 runq_add(ts->ts_runq, ts, flags); 421} 422 423/* 424 * Remove a thread from a run-queue. This typically happens when a thread 425 * is selected to run. Running threads are not on the queue and the 426 * transferable count does not reflect them. 427 */ 428static __inline void 429tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 430{ 431 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 432 KASSERT(ts->ts_runq != NULL, 433 ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread)); 434#ifdef SMP 435 if (ts->ts_flags & TSF_XFERABLE) { 436 tdq->tdq_transferable--; 437 tdq->tdq_group->tdg_transferable--; 438 ts->ts_flags &= ~TSF_XFERABLE; 439 } 440#endif 441 if (ts->ts_runq == &tdq->tdq_timeshare) { 442 if (tdq->tdq_idx != tdq->tdq_ridx) 443 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 444 else 445 runq_remove_idx(ts->ts_runq, ts, NULL); 446 /* 447 * For timeshare threads we update the priority here so 448 * the priority reflects the time we've been sleeping. 449 */ 450 ts->ts_ltick = ticks; 451 sched_pctcpu_update(ts); 452 sched_priority(ts->ts_thread); 453 } else 454 runq_remove(ts->ts_runq, ts); 455} 456 457/* 458 * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 459 * for this thread to the referenced thread queue. 460 */ 461static void 462tdq_load_add(struct tdq *tdq, struct td_sched *ts) 463{ 464 int class; 465 466 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 467 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 468 class = PRI_BASE(ts->ts_thread->td_pri_class); 469 tdq->tdq_load++; 470 CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 471 if (class != PRI_ITHD && 472 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 473#ifdef SMP 474 tdq->tdq_group->tdg_load++; 475#else 476 tdq->tdq_sysload++; 477#endif 478} 479 480/* 481 * Remove the load from a thread that is transitioning to a sleep state or 482 * exiting. 483 */ 484static void 485tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 486{ 487 int class; 488 489 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 490 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 491 class = PRI_BASE(ts->ts_thread->td_pri_class); 492 if (class != PRI_ITHD && 493 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 494#ifdef SMP 495 tdq->tdq_group->tdg_load--; 496#else 497 tdq->tdq_sysload--; 498#endif 499 KASSERT(tdq->tdq_load != 0, 500 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 501 tdq->tdq_load--; 502 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 503 ts->ts_runq = NULL; 504} 505 506#ifdef SMP 507/* 508 * sched_balance is a simple CPU load balancing algorithm. It operates by 509 * finding the least loaded and most loaded cpu and equalizing their load 510 * by migrating some processes. 511 * 512 * Dealing only with two CPUs at a time has two advantages. Firstly, most 513 * installations will only have 2 cpus. Secondly, load balancing too much at 514 * once can have an unpleasant effect on the system. The scheduler rarely has 515 * enough information to make perfect decisions. So this algorithm chooses 516 * simplicity and more gradual effects on load in larger systems. 517 * 518 */ 519static void 520sched_balance() 521{ 522 struct tdq_group *high; 523 struct tdq_group *low; 524 struct tdq_group *tdg; 525 struct tdq *tdq; 526 int cnt; 527 int i; 528 529 /* 530 * Select a random time between .5 * balance_interval and 531 * 1.5 * balance_interval. 532 */ 533 balance_ticks = max(balance_interval / 2, 1); 534 balance_ticks += random() % balance_interval; 535 if (smp_started == 0 || rebalance == 0) 536 return; 537 tdq = TDQ_SELF(); 538 TDQ_UNLOCK(tdq); 539 low = high = NULL; 540 i = random() % (tdg_maxid + 1); 541 for (cnt = 0; cnt <= tdg_maxid; cnt++) { 542 tdg = TDQ_GROUP(i); 543 /* 544 * Find the CPU with the highest load that has some 545 * threads to transfer. 546 */ 547 if ((high == NULL || tdg->tdg_load > high->tdg_load) 548 && tdg->tdg_transferable) 549 high = tdg; 550 if (low == NULL || tdg->tdg_load < low->tdg_load) 551 low = tdg; 552 if (++i > tdg_maxid) 553 i = 0; 554 } 555 if (low != NULL && high != NULL && high != low) 556 sched_balance_pair(LIST_FIRST(&high->tdg_members), 557 LIST_FIRST(&low->tdg_members)); 558 TDQ_LOCK(tdq); 559} 560 561/* 562 * Balance load between CPUs in a group. Will only migrate within the group. 563 */ 564static void 565sched_balance_groups() 566{ 567 struct tdq *tdq; 568 int i; 569 570 /* 571 * Select a random time between .5 * balance_interval and 572 * 1.5 * balance_interval. 573 */ 574 balance_group_ticks = max(balance_interval / 2, 1); 575 balance_group_ticks += random() % balance_interval; 576 if (smp_started == 0 || rebalance == 0) 577 return; 578 tdq = TDQ_SELF(); 579 TDQ_UNLOCK(tdq); 580 for (i = 0; i <= tdg_maxid; i++) 581 sched_balance_group(TDQ_GROUP(i)); 582 TDQ_LOCK(tdq); 583} 584 585/* 586 * Finds the greatest imbalance between two tdqs in a group. 587 */ 588static void 589sched_balance_group(struct tdq_group *tdg) 590{ 591 struct tdq *tdq; 592 struct tdq *high; 593 struct tdq *low; 594 int load; 595 596 if (tdg->tdg_transferable == 0) 597 return; 598 low = NULL; 599 high = NULL; 600 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 601 load = tdq->tdq_load; 602 if (high == NULL || load > high->tdq_load) 603 high = tdq; 604 if (low == NULL || load < low->tdq_load) 605 low = tdq; 606 } 607 if (high != NULL && low != NULL && high != low) 608 sched_balance_pair(high, low); 609} 610 611/* 612 * Lock two thread queues using their address to maintain lock order. 613 */ 614static void 615tdq_lock_pair(struct tdq *one, struct tdq *two) 616{ 617 if (one < two) { 618 TDQ_LOCK(one); 619 TDQ_LOCK_FLAGS(two, MTX_DUPOK); 620 } else { 621 TDQ_LOCK(two); 622 TDQ_LOCK_FLAGS(one, MTX_DUPOK); 623 } 624} 625 626/* 627 * Unlock two thread queues. Order is not important here. 628 */ 629static void 630tdq_unlock_pair(struct tdq *one, struct tdq *two) 631{ 632 TDQ_UNLOCK(one); 633 TDQ_UNLOCK(two); 634} 635 636/* 637 * Transfer load between two imbalanced thread queues. 638 */ 639static void 640sched_balance_pair(struct tdq *high, struct tdq *low) 641{ 642 int transferable; 643 int high_load; 644 int low_load; 645 int move; 646 int diff; 647 int i; 648 649 tdq_lock_pair(high, low); 650 /* 651 * If we're transfering within a group we have to use this specific 652 * tdq's transferable count, otherwise we can steal from other members 653 * of the group. 654 */ 655 if (high->tdq_group == low->tdq_group) { 656 transferable = high->tdq_transferable; 657 high_load = high->tdq_load; 658 low_load = low->tdq_load; 659 } else { 660 transferable = high->tdq_group->tdg_transferable; 661 high_load = high->tdq_group->tdg_load; 662 low_load = low->tdq_group->tdg_load; 663 } 664 /* 665 * Determine what the imbalance is and then adjust that to how many 666 * threads we actually have to give up (transferable). 667 */ 668 if (transferable != 0) { 669 diff = high_load - low_load; 670 move = diff / 2; 671 if (diff & 0x1) 672 move++; 673 move = min(move, transferable); 674 for (i = 0; i < move; i++) 675 tdq_move(high, low); 676 /* 677 * IPI the target cpu to force it to reschedule with the new 678 * workload. 679 */ 680 ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); 681 } 682 tdq_unlock_pair(high, low); 683 return; 684} 685 686/* 687 * Move a thread from one thread queue to another. 688 */ 689static void 690tdq_move(struct tdq *from, struct tdq *to) 691{ 692 struct td_sched *ts; 693 struct thread *td; 694 struct tdq *tdq; 695 int cpu; 696 697 TDQ_LOCK_ASSERT(from, MA_OWNED); 698 TDQ_LOCK_ASSERT(to, MA_OWNED); 699 700 tdq = from; 701 cpu = TDQ_ID(to); 702 ts = tdq_steal(tdq); 703 if (ts == NULL) { 704 struct tdq_group *tdg; 705 706 tdg = tdq->tdq_group; 707 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 708 if (tdq == from || tdq->tdq_transferable == 0) 709 continue; 710 ts = tdq_steal(tdq); 711 break; 712 } 713 if (ts == NULL) 714 return; 715 } 716 if (tdq == to) 717 return; 718 td = ts->ts_thread; 719 /* 720 * Although the run queue is locked the thread may be blocked. Lock 721 * it to clear this and acquire the run-queue lock. 722 */ 723 thread_lock(td); 724 /* Drop recursive lock on from acquired via thread_lock(). */ 725 TDQ_UNLOCK(from); 726 sched_rem(td); 727 ts->ts_cpu = cpu; 728 td->td_lock = TDQ_LOCKPTR(to); 729 tdq_add(to, td, SRQ_YIELDING); 730} 731 732/* 733 * This tdq has idled. Try to steal a thread from another cpu and switch 734 * to it. 735 */ 736static int 737tdq_idled(struct tdq *tdq) 738{ 739 struct tdq_group *tdg; 740 struct tdq *steal; 741 int highload; 742 int highcpu; 743 int load; 744 int cpu; 745 746 /* We don't want to be preempted while we're iterating over tdqs */ 747 spinlock_enter(); 748 tdg = tdq->tdq_group; 749 /* 750 * If we're in a cpu group, try and steal threads from another cpu in 751 * the group before idling. In a HTT group all cpus share the same 752 * run-queue lock, however, we still need a recursive lock to 753 * call tdq_move(). 754 */ 755 if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) { 756 TDQ_LOCK(tdq); 757 LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) { 758 if (steal == tdq || steal->tdq_transferable == 0) 759 continue; 760 TDQ_LOCK(steal); 761 goto steal; 762 } 763 TDQ_UNLOCK(tdq); 764 } 765 for (;;) { 766 if (steal_idle == 0) 767 break; 768 highcpu = 0; 769 highload = 0; 770 for (cpu = 0; cpu <= mp_maxid; cpu++) { 771 if (CPU_ABSENT(cpu)) 772 continue; 773 steal = TDQ_CPU(cpu); 774 load = TDQ_CPU(cpu)->tdq_transferable; 775 if (load < highload) 776 continue; 777 highload = load; 778 highcpu = cpu; 779 } 780 if (highload < steal_thresh) 781 break; 782 steal = TDQ_CPU(highcpu); 783 tdq_lock_pair(tdq, steal); 784 if (steal->tdq_transferable >= steal_thresh) 785 goto steal; 786 tdq_unlock_pair(tdq, steal); 787 break; 788 } 789 spinlock_exit(); 790 return (1); 791steal: 792 spinlock_exit(); 793 tdq_move(steal, tdq); 794 TDQ_UNLOCK(steal); 795 mi_switch(SW_VOL, NULL); 796 thread_unlock(curthread); 797 798 return (0); 799} 800 801/* 802 * Notify a remote cpu of new work. Sends an IPI if criteria are met. 803 */ 804static void 805tdq_notify(struct td_sched *ts) 806{ 807 struct thread *ctd; 808 struct pcpu *pcpu; 809 int cpri; 810 int pri; 811 int cpu; 812 813 cpu = ts->ts_cpu; 814 pri = ts->ts_thread->td_priority; 815 pcpu = pcpu_find(cpu); 816 ctd = pcpu->pc_curthread; 817 cpri = ctd->td_priority; 818 819 /* 820 * If our priority is not better than the current priority there is 821 * nothing to do. 822 */ 823 if (pri > cpri) 824 return; 825 /* 826 * Always IPI idle. 827 */ 828 if (cpri > PRI_MIN_IDLE) 829 goto sendipi; 830 /* 831 * If we're realtime or better and there is timeshare or worse running 832 * send an IPI. 833 */ 834 if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 835 goto sendipi; 836 /* 837 * Otherwise only IPI if we exceed the threshold. 838 */ 839 if (pri > preempt_thresh) 840 return; 841sendipi: 842 ctd->td_flags |= TDF_NEEDRESCHED; 843 ipi_selected(1 << cpu, IPI_PREEMPT); 844} 845 846/* 847 * Steals load from a timeshare queue. Honors the rotating queue head 848 * index. 849 */ 850static struct td_sched * 851runq_steal_from(struct runq *rq, u_char start) 852{ 853 struct td_sched *ts; 854 struct rqbits *rqb; 855 struct rqhead *rqh; 856 int first; 857 int bit; 858 int pri; 859 int i; 860 861 rqb = &rq->rq_status; 862 bit = start & (RQB_BPW -1); 863 pri = 0; 864 first = 0; 865again: 866 for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 867 if (rqb->rqb_bits[i] == 0) 868 continue; 869 if (bit != 0) { 870 for (pri = bit; pri < RQB_BPW; pri++) 871 if (rqb->rqb_bits[i] & (1ul << pri)) 872 break; 873 if (pri >= RQB_BPW) 874 continue; 875 } else 876 pri = RQB_FFS(rqb->rqb_bits[i]); 877 pri += (i << RQB_L2BPW); 878 rqh = &rq->rq_queues[pri]; 879 TAILQ_FOREACH(ts, rqh, ts_procq) { 880 if (first && THREAD_CAN_MIGRATE(ts->ts_thread)) 881 return (ts); 882 first = 1; 883 } 884 } 885 if (start != 0) { 886 start = 0; 887 goto again; 888 } 889 890 return (NULL); 891} 892 893/* 894 * Steals load from a standard linear queue. 895 */ 896static struct td_sched * 897runq_steal(struct runq *rq) 898{ 899 struct rqhead *rqh; 900 struct rqbits *rqb; 901 struct td_sched *ts; 902 int word; 903 int bit; 904 905 rqb = &rq->rq_status; 906 for (word = 0; word < RQB_LEN; word++) { 907 if (rqb->rqb_bits[word] == 0) 908 continue; 909 for (bit = 0; bit < RQB_BPW; bit++) { 910 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 911 continue; 912 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 913 TAILQ_FOREACH(ts, rqh, ts_procq) 914 if (THREAD_CAN_MIGRATE(ts->ts_thread)) 915 return (ts); 916 } 917 } 918 return (NULL); 919} 920 921/* 922 * Attempt to steal a thread in priority order from a thread queue. 923 */ 924static struct td_sched * 925tdq_steal(struct tdq *tdq) 926{ 927 struct td_sched *ts; 928 929 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 930 if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL) 931 return (ts); 932 if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL) 933 return (ts); 934 return (runq_steal(&tdq->tdq_idle)); 935} 936 937/* 938 * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 939 * current lock and returns with the assigned queue locked. 940 */ 941static inline struct tdq * 942sched_setcpu(struct td_sched *ts, int cpu, int flags) 943{ 944 struct thread *td; 945 struct tdq *tdq; 946 947 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 948 949 tdq = TDQ_CPU(cpu); 950 td = ts->ts_thread; 951 ts->ts_cpu = cpu; 952 953 /* If the lock matches just return the queue. */ 954 if (td->td_lock == TDQ_LOCKPTR(tdq)) 955 return (tdq); 956#ifdef notyet 957 /* 958 * If the thread isn't running its lockptr is a 959 * turnstile or a sleepqueue. We can just lock_set without 960 * blocking. 961 */ 962 if (TD_CAN_RUN(td)) { 963 TDQ_LOCK(tdq); 964 thread_lock_set(td, TDQ_LOCKPTR(tdq)); 965 return (tdq); 966 } 967#endif 968 /* 969 * The hard case, migration, we need to block the thread first to 970 * prevent order reversals with other cpus locks. 971 */ 972 thread_lock_block(td); 973 TDQ_LOCK(tdq); 974 thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 975 return (tdq); 976} 977 978/* 979 * Find the thread queue running the lowest priority thread. 980 */ 981static int 982tdq_lowestpri(void) 983{ 984 struct tdq *tdq; 985 int lowpri; 986 int lowcpu; 987 int lowload; 988 int load; 989 int cpu; 990 int pri; 991 992 lowload = 0; 993 lowpri = lowcpu = 0; 994 for (cpu = 0; cpu <= mp_maxid; cpu++) { 995 if (CPU_ABSENT(cpu)) 996 continue; 997 tdq = TDQ_CPU(cpu); 998 pri = tdq->tdq_lowpri; 999 load = TDQ_CPU(cpu)->tdq_load; 1000 CTR4(KTR_ULE, 1001 "cpu %d pri %d lowcpu %d lowpri %d", 1002 cpu, pri, lowcpu, lowpri); 1003 if (pri < lowpri) 1004 continue; 1005 if (lowpri && lowpri == pri && load > lowload) 1006 continue; 1007 lowpri = pri; 1008 lowcpu = cpu; 1009 lowload = load; 1010 } 1011 1012 return (lowcpu); 1013} 1014 1015/* 1016 * Find the thread queue with the least load. 1017 */ 1018static int 1019tdq_lowestload(void) 1020{ 1021 struct tdq *tdq; 1022 int lowload; 1023 int lowpri; 1024 int lowcpu; 1025 int load; 1026 int cpu; 1027 int pri; 1028 1029 lowcpu = 0; 1030 lowload = TDQ_CPU(0)->tdq_load; 1031 lowpri = TDQ_CPU(0)->tdq_lowpri; 1032 for (cpu = 1; cpu <= mp_maxid; cpu++) { 1033 if (CPU_ABSENT(cpu)) 1034 continue; 1035 tdq = TDQ_CPU(cpu); 1036 load = tdq->tdq_load; 1037 pri = tdq->tdq_lowpri; 1038 CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d", 1039 cpu, load, lowcpu, lowload); 1040 if (load > lowload) 1041 continue; 1042 if (load == lowload && pri < lowpri) 1043 continue; 1044 lowcpu = cpu; 1045 lowload = load; 1046 lowpri = pri; 1047 } 1048 1049 return (lowcpu); 1050} 1051 1052/* 1053 * Pick the destination cpu for sched_add(). Respects affinity and makes 1054 * a determination based on load or priority of available processors. 1055 */ 1056static int 1057sched_pickcpu(struct td_sched *ts, int flags) 1058{ 1059 struct tdq *tdq; 1060 int self; 1061 int pri; 1062 int cpu; 1063 1064 cpu = self = PCPU_GET(cpuid); 1065 if (smp_started == 0) 1066 return (self); 1067 /* 1068 * Don't migrate a running thread from sched_switch(). 1069 */ 1070 if (flags & SRQ_OURSELF) { 1071 CTR1(KTR_ULE, "YIELDING %d", 1072 curthread->td_priority); 1073 return (self); 1074 } 1075 pri = ts->ts_thread->td_priority; 1076 cpu = ts->ts_cpu; 1077 /* 1078 * Regardless of affinity, if the last cpu is idle send it there. 1079 */ 1080 tdq = TDQ_CPU(cpu); 1081 if (tdq->tdq_lowpri > PRI_MIN_IDLE) { 1082 CTR5(KTR_ULE, 1083 "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d", 1084 ts->ts_cpu, ts->ts_rltick, ticks, pri, 1085 tdq->tdq_lowpri); 1086 return (ts->ts_cpu); 1087 } 1088 /* 1089 * If we have affinity, try to place it on the cpu we last ran on. 1090 */ 1091 if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) { 1092 CTR5(KTR_ULE, 1093 "affinity for %d, ltick %d ticks %d pri %d curthread %d", 1094 ts->ts_cpu, ts->ts_rltick, ticks, pri, 1095 tdq->tdq_lowpri); 1096 return (ts->ts_cpu); 1097 } 1098 /* 1099 * Look for an idle group. 1100 */ 1101 CTR1(KTR_ULE, "tdq_idle %X", tdq_idle); 1102 cpu = ffs(tdq_idle); 1103 if (cpu) 1104 return (--cpu); 1105 /* 1106 * If there are no idle cores see if we can run the thread locally. 1107 * This may improve locality among sleepers and wakers when there 1108 * is shared data. 1109 */ 1110 if (tryself && pri < curthread->td_priority) { 1111 CTR1(KTR_ULE, "tryself %d", 1112 curthread->td_priority); 1113 return (self); 1114 } 1115 /* 1116 * Now search for the cpu running the lowest priority thread with 1117 * the least load. 1118 */ 1119 if (pick_pri) 1120 cpu = tdq_lowestpri(); 1121 else 1122 cpu = tdq_lowestload(); 1123 return (cpu); 1124} 1125 1126#endif /* SMP */ 1127 1128/* 1129 * Pick the highest priority task we have and return it. 1130 */ 1131static struct td_sched * 1132tdq_choose(struct tdq *tdq) 1133{ 1134 struct td_sched *ts; 1135 1136 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1137 ts = runq_choose(&tdq->tdq_realtime); 1138 if (ts != NULL) 1139 return (ts); 1140 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1141 if (ts != NULL) { 1142 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 1143 ("tdq_choose: Invalid priority on timeshare queue %d", 1144 ts->ts_thread->td_priority)); 1145 return (ts); 1146 } 1147 1148 ts = runq_choose(&tdq->tdq_idle); 1149 if (ts != NULL) { 1150 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 1151 ("tdq_choose: Invalid priority on idle queue %d", 1152 ts->ts_thread->td_priority)); 1153 return (ts); 1154 } 1155 1156 return (NULL); 1157} 1158 1159/* 1160 * Initialize a thread queue. 1161 */ 1162static void 1163tdq_setup(struct tdq *tdq) 1164{ 1165 1166 if (bootverbose) 1167 printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1168 runq_init(&tdq->tdq_realtime); 1169 runq_init(&tdq->tdq_timeshare); 1170 runq_init(&tdq->tdq_idle); 1171 tdq->tdq_load = 0; 1172} 1173 1174#ifdef SMP 1175static void 1176tdg_setup(struct tdq_group *tdg) 1177{ 1178 if (bootverbose) 1179 printf("ULE: setup cpu group %d\n", TDG_ID(tdg)); 1180 snprintf(tdg->tdg_name, sizeof(tdg->tdg_name), 1181 "sched lock %d", (int)TDG_ID(tdg)); 1182 mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock", 1183 MTX_SPIN | MTX_RECURSE); 1184 LIST_INIT(&tdg->tdg_members); 1185 tdg->tdg_load = 0; 1186 tdg->tdg_transferable = 0; 1187 tdg->tdg_cpus = 0; 1188 tdg->tdg_mask = 0; 1189 tdg->tdg_cpumask = 0; 1190 tdg->tdg_idlemask = 0; 1191} 1192 1193static void 1194tdg_add(struct tdq_group *tdg, struct tdq *tdq) 1195{ 1196 if (tdg->tdg_mask == 0) 1197 tdg->tdg_mask |= 1 << TDQ_ID(tdq); 1198 tdg->tdg_cpumask |= 1 << TDQ_ID(tdq); 1199 tdg->tdg_cpus++; 1200 tdq->tdq_group = tdg; 1201 tdq->tdq_lock = &tdg->tdg_lock; 1202 LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings); 1203 if (bootverbose) 1204 printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n", 1205 TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask); 1206} 1207 1208static void 1209sched_setup_topology(void) 1210{ 1211 struct tdq_group *tdg; 1212 struct cpu_group *cg; 1213 int balance_groups; 1214 struct tdq *tdq; 1215 int i; 1216 int j; 1217 1218 topology = 1; 1219 balance_groups = 0; 1220 for (i = 0; i < smp_topology->ct_count; i++) { 1221 cg = &smp_topology->ct_group[i]; 1222 tdg = &tdq_groups[i]; 1223 /* 1224 * Initialize the group. 1225 */ 1226 tdg_setup(tdg); 1227 /* 1228 * Find all of the group members and add them. 1229 */ 1230 for (j = 0; j < MAXCPU; j++) { 1231 if ((cg->cg_mask & (1 << j)) != 0) { 1232 tdq = TDQ_CPU(j); 1233 tdq_setup(tdq); 1234 tdg_add(tdg, tdq); 1235 } 1236 } 1237 if (tdg->tdg_cpus > 1) 1238 balance_groups = 1; 1239 } 1240 tdg_maxid = smp_topology->ct_count - 1; 1241 if (balance_groups) 1242 sched_balance_groups(); 1243} 1244 1245static void 1246sched_setup_smp(void) 1247{ 1248 struct tdq_group *tdg; 1249 struct tdq *tdq; 1250 int cpus; 1251 int i; 1252 1253 for (cpus = 0, i = 0; i < MAXCPU; i++) { 1254 if (CPU_ABSENT(i)) 1255 continue; 1256 tdq = &tdq_cpu[i]; 1257 tdg = &tdq_groups[i]; 1258 /* 1259 * Setup a tdq group with one member. 1260 */ 1261 tdg_setup(tdg); 1262 tdq_setup(tdq); 1263 tdg_add(tdg, tdq); 1264 cpus++; 1265 } 1266 tdg_maxid = cpus - 1; 1267} 1268 1269/* 1270 * Fake a topology with one group containing all CPUs. 1271 */ 1272static void 1273sched_fake_topo(void) 1274{ 1275#ifdef SCHED_FAKE_TOPOLOGY 1276 static struct cpu_top top; 1277 static struct cpu_group group; 1278 1279 top.ct_count = 1; 1280 top.ct_group = &group; 1281 group.cg_mask = all_cpus; 1282 group.cg_count = mp_ncpus; 1283 group.cg_children = 0; 1284 smp_topology = ⊤ 1285#endif 1286} 1287#endif 1288 1289/* 1290 * Setup the thread queues and initialize the topology based on MD 1291 * information. 1292 */ 1293static void 1294sched_setup(void *dummy) 1295{ 1296 struct tdq *tdq; 1297 1298 tdq = TDQ_SELF(); 1299#ifdef SMP 1300 sched_fake_topo(); 1301 /* 1302 * Setup tdqs based on a topology configuration or vanilla SMP based 1303 * on mp_maxid. 1304 */ 1305 if (smp_topology == NULL) 1306 sched_setup_smp(); 1307 else 1308 sched_setup_topology(); 1309 balance_tdq = tdq; 1310 sched_balance(); 1311#else 1312 tdq_setup(tdq); 1313 mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE); 1314 tdq->tdq_lock = &tdq_lock; 1315#endif 1316 /* 1317 * To avoid divide-by-zero, we set realstathz a dummy value 1318 * in case which sched_clock() called before sched_initticks(). 1319 */ 1320 realstathz = hz; 1321 sched_slice = (realstathz/10); /* ~100ms */ 1322 tickincr = 1 << SCHED_TICK_SHIFT; 1323 1324 /* Add thread0's load since it's running. */ 1325 TDQ_LOCK(tdq); 1326 thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1327 tdq_load_add(tdq, &td_sched0); 1328 TDQ_UNLOCK(tdq); 1329} 1330 1331/* 1332 * This routine determines the tickincr after stathz and hz are setup. 1333 */ 1334/* ARGSUSED */ 1335static void 1336sched_initticks(void *dummy) 1337{ 1338 int incr; 1339 1340 realstathz = stathz ? stathz : hz; 1341 sched_slice = (realstathz/10); /* ~100ms */ 1342 1343 /* 1344 * tickincr is shifted out by 10 to avoid rounding errors due to 1345 * hz not being evenly divisible by stathz on all platforms. 1346 */ 1347 incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1348 /* 1349 * This does not work for values of stathz that are more than 1350 * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1351 */ 1352 if (incr == 0) 1353 incr = 1; 1354 tickincr = incr; 1355#ifdef SMP 1356 /* 1357 * Set the default balance interval now that we know 1358 * what realstathz is. 1359 */ 1360 balance_interval = realstathz; 1361 /* 1362 * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 1363 * prevents excess thrashing on large machines and excess idle on 1364 * smaller machines. 1365 */ 1366 steal_thresh = min(ffs(mp_ncpus) - 1, 4); 1367 affinity = SCHED_AFFINITY_DEFAULT; 1368#endif 1369} 1370 1371 1372/* 1373 * This is the core of the interactivity algorithm. Determines a score based 1374 * on past behavior. It is the ratio of sleep time to run time scaled to 1375 * a [0, 100] integer. This is the voluntary sleep time of a process, which 1376 * differs from the cpu usage because it does not account for time spent 1377 * waiting on a run-queue. Would be prettier if we had floating point. 1378 */ 1379static int 1380sched_interact_score(struct thread *td) 1381{ 1382 struct td_sched *ts; 1383 int div; 1384 1385 ts = td->td_sched; 1386 /* 1387 * The score is only needed if this is likely to be an interactive 1388 * task. Don't go through the expense of computing it if there's 1389 * no chance. 1390 */ 1391 if (sched_interact <= SCHED_INTERACT_HALF && 1392 ts->ts_runtime >= ts->ts_slptime) 1393 return (SCHED_INTERACT_HALF); 1394 1395 if (ts->ts_runtime > ts->ts_slptime) { 1396 div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1397 return (SCHED_INTERACT_HALF + 1398 (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1399 } 1400 if (ts->ts_slptime > ts->ts_runtime) { 1401 div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1402 return (ts->ts_runtime / div); 1403 } 1404 /* runtime == slptime */ 1405 if (ts->ts_runtime) 1406 return (SCHED_INTERACT_HALF); 1407 1408 /* 1409 * This can happen if slptime and runtime are 0. 1410 */ 1411 return (0); 1412 1413} 1414 1415/* 1416 * Scale the scheduling priority according to the "interactivity" of this 1417 * process. 1418 */ 1419static void 1420sched_priority(struct thread *td) 1421{ 1422 int score; 1423 int pri; 1424 1425 if (td->td_pri_class != PRI_TIMESHARE) 1426 return; 1427 /* 1428 * If the score is interactive we place the thread in the realtime 1429 * queue with a priority that is less than kernel and interrupt 1430 * priorities. These threads are not subject to nice restrictions. 1431 * 1432 * Scores greater than this are placed on the normal timeshare queue 1433 * where the priority is partially decided by the most recent cpu 1434 * utilization and the rest is decided by nice value. 1435 * 1436 * The nice value of the process has a linear effect on the calculated 1437 * score. Negative nice values make it easier for a thread to be 1438 * considered interactive. 1439 */ 1440 score = imax(0, sched_interact_score(td) - td->td_proc->p_nice); 1441 if (score < sched_interact) { 1442 pri = PRI_MIN_REALTIME; 1443 pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1444 * score; 1445 KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 1446 ("sched_priority: invalid interactive priority %d score %d", 1447 pri, score)); 1448 } else { 1449 pri = SCHED_PRI_MIN; 1450 if (td->td_sched->ts_ticks) 1451 pri += SCHED_PRI_TICKS(td->td_sched); 1452 pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1453 KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1454 ("sched_priority: invalid priority %d: nice %d, " 1455 "ticks %d ftick %d ltick %d tick pri %d", 1456 pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1457 td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1458 SCHED_PRI_TICKS(td->td_sched))); 1459 } 1460 sched_user_prio(td, pri); 1461 1462 return; 1463} 1464 1465/* 1466 * This routine enforces a maximum limit on the amount of scheduling history 1467 * kept. It is called after either the slptime or runtime is adjusted. This 1468 * function is ugly due to integer math. 1469 */ 1470static void 1471sched_interact_update(struct thread *td) 1472{ 1473 struct td_sched *ts; 1474 u_int sum; 1475 1476 ts = td->td_sched; 1477 sum = ts->ts_runtime + ts->ts_slptime; 1478 if (sum < SCHED_SLP_RUN_MAX) 1479 return; 1480 /* 1481 * This only happens from two places: 1482 * 1) We have added an unusual amount of run time from fork_exit. 1483 * 2) We have added an unusual amount of sleep time from sched_sleep(). 1484 */ 1485 if (sum > SCHED_SLP_RUN_MAX * 2) { 1486 if (ts->ts_runtime > ts->ts_slptime) { 1487 ts->ts_runtime = SCHED_SLP_RUN_MAX; 1488 ts->ts_slptime = 1; 1489 } else { 1490 ts->ts_slptime = SCHED_SLP_RUN_MAX; 1491 ts->ts_runtime = 1; 1492 } 1493 return; 1494 } 1495 /* 1496 * If we have exceeded by more than 1/5th then the algorithm below 1497 * will not bring us back into range. Dividing by two here forces 1498 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1499 */ 1500 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1501 ts->ts_runtime /= 2; 1502 ts->ts_slptime /= 2; 1503 return; 1504 } 1505 ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1506 ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1507} 1508 1509/* 1510 * Scale back the interactivity history when a child thread is created. The 1511 * history is inherited from the parent but the thread may behave totally 1512 * differently. For example, a shell spawning a compiler process. We want 1513 * to learn that the compiler is behaving badly very quickly. 1514 */ 1515static void 1516sched_interact_fork(struct thread *td) 1517{ 1518 int ratio; 1519 int sum; 1520 1521 sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1522 if (sum > SCHED_SLP_RUN_FORK) { 1523 ratio = sum / SCHED_SLP_RUN_FORK; 1524 td->td_sched->ts_runtime /= ratio; 1525 td->td_sched->ts_slptime /= ratio; 1526 } 1527} 1528 1529/* 1530 * Called from proc0_init() to setup the scheduler fields. 1531 */ 1532void 1533schedinit(void) 1534{ 1535 1536 /* 1537 * Set up the scheduler specific parts of proc0. 1538 */ 1539 proc0.p_sched = NULL; /* XXX */ 1540 thread0.td_sched = &td_sched0; 1541 td_sched0.ts_ltick = ticks; 1542 td_sched0.ts_ftick = ticks; 1543 td_sched0.ts_thread = &thread0; 1544} 1545 1546/* 1547 * This is only somewhat accurate since given many processes of the same 1548 * priority they will switch when their slices run out, which will be 1549 * at most sched_slice stathz ticks. 1550 */ 1551int 1552sched_rr_interval(void) 1553{ 1554 1555 /* Convert sched_slice to hz */ 1556 return (hz/(realstathz/sched_slice)); 1557} 1558 1559/* 1560 * Update the percent cpu tracking information when it is requested or 1561 * the total history exceeds the maximum. We keep a sliding history of 1562 * tick counts that slowly decays. This is less precise than the 4BSD 1563 * mechanism since it happens with less regular and frequent events. 1564 */ 1565static void 1566sched_pctcpu_update(struct td_sched *ts) 1567{ 1568 1569 if (ts->ts_ticks == 0) 1570 return; 1571 if (ticks - (hz / 10) < ts->ts_ltick && 1572 SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 1573 return; 1574 /* 1575 * Adjust counters and watermark for pctcpu calc. 1576 */ 1577 if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1578 ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1579 SCHED_TICK_TARG; 1580 else 1581 ts->ts_ticks = 0; 1582 ts->ts_ltick = ticks; 1583 ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 1584} 1585 1586/* 1587 * Adjust the priority of a thread. Move it to the appropriate run-queue 1588 * if necessary. This is the back-end for several priority related 1589 * functions. 1590 */ 1591static void 1592sched_thread_priority(struct thread *td, u_char prio) 1593{ 1594 struct td_sched *ts; 1595 1596 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1597 td, td->td_proc->p_comm, td->td_priority, prio, curthread, 1598 curthread->td_proc->p_comm); 1599 ts = td->td_sched; 1600 THREAD_LOCK_ASSERT(td, MA_OWNED); 1601 if (td->td_priority == prio) 1602 return; 1603 1604 if (TD_ON_RUNQ(td) && prio < td->td_priority) { 1605 /* 1606 * If the priority has been elevated due to priority 1607 * propagation, we may have to move ourselves to a new 1608 * queue. This could be optimized to not re-add in some 1609 * cases. 1610 */ 1611 sched_rem(td); 1612 td->td_priority = prio; 1613 sched_add(td, SRQ_BORROWING); 1614 } else { 1615#ifdef SMP 1616 struct tdq *tdq; 1617 1618 tdq = TDQ_CPU(ts->ts_cpu); 1619 if (prio < tdq->tdq_lowpri) 1620 tdq->tdq_lowpri = prio; 1621#endif 1622 td->td_priority = prio; 1623 } 1624} 1625 1626/* 1627 * Update a thread's priority when it is lent another thread's 1628 * priority. 1629 */ 1630void 1631sched_lend_prio(struct thread *td, u_char prio) 1632{ 1633 1634 td->td_flags |= TDF_BORROWING; 1635 sched_thread_priority(td, prio); 1636} 1637 1638/* 1639 * Restore a thread's priority when priority propagation is 1640 * over. The prio argument is the minimum priority the thread 1641 * needs to have to satisfy other possible priority lending 1642 * requests. If the thread's regular priority is less 1643 * important than prio, the thread will keep a priority boost 1644 * of prio. 1645 */ 1646void 1647sched_unlend_prio(struct thread *td, u_char prio) 1648{ 1649 u_char base_pri; 1650 1651 if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1652 td->td_base_pri <= PRI_MAX_TIMESHARE) 1653 base_pri = td->td_user_pri; 1654 else 1655 base_pri = td->td_base_pri; 1656 if (prio >= base_pri) { 1657 td->td_flags &= ~TDF_BORROWING; 1658 sched_thread_priority(td, base_pri); 1659 } else 1660 sched_lend_prio(td, prio); 1661} 1662 1663/* 1664 * Standard entry for setting the priority to an absolute value. 1665 */ 1666void 1667sched_prio(struct thread *td, u_char prio) 1668{ 1669 u_char oldprio; 1670 1671 /* First, update the base priority. */ 1672 td->td_base_pri = prio; 1673 1674 /* 1675 * If the thread is borrowing another thread's priority, don't 1676 * ever lower the priority. 1677 */ 1678 if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1679 return; 1680 1681 /* Change the real priority. */ 1682 oldprio = td->td_priority; 1683 sched_thread_priority(td, prio); 1684 1685 /* 1686 * If the thread is on a turnstile, then let the turnstile update 1687 * its state. 1688 */ 1689 if (TD_ON_LOCK(td) && oldprio != prio) 1690 turnstile_adjust(td, oldprio); 1691} 1692 1693/* 1694 * Set the base user priority, does not effect current running priority. 1695 */ 1696void 1697sched_user_prio(struct thread *td, u_char prio) 1698{ 1699 u_char oldprio; 1700 1701 td->td_base_user_pri = prio; 1702 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1703 return; 1704 oldprio = td->td_user_pri; 1705 td->td_user_pri = prio; 1706 1707 if (TD_ON_UPILOCK(td) && oldprio != prio) 1708 umtx_pi_adjust(td, oldprio); 1709} 1710 1711void 1712sched_lend_user_prio(struct thread *td, u_char prio) 1713{ 1714 u_char oldprio; 1715 1716 td->td_flags |= TDF_UBORROWING; 1717 1718 oldprio = td->td_user_pri; 1719 td->td_user_pri = prio; 1720 1721 if (TD_ON_UPILOCK(td) && oldprio != prio) 1722 umtx_pi_adjust(td, oldprio); 1723} 1724 1725void 1726sched_unlend_user_prio(struct thread *td, u_char prio) 1727{ 1728 u_char base_pri; 1729 1730 base_pri = td->td_base_user_pri; 1731 if (prio >= base_pri) { 1732 td->td_flags &= ~TDF_UBORROWING; 1733 sched_user_prio(td, base_pri); 1734 } else 1735 sched_lend_user_prio(td, prio); 1736} 1737 1738/* 1739 * Add the thread passed as 'newtd' to the run queue before selecting 1740 * the next thread to run. This is only used for KSE. 1741 */ 1742static void 1743sched_switchin(struct tdq *tdq, struct thread *td) 1744{ 1745#ifdef SMP 1746 spinlock_enter(); 1747 TDQ_UNLOCK(tdq); 1748 thread_lock(td); 1749 spinlock_exit(); 1750 sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING); 1751#else 1752 td->td_lock = TDQ_LOCKPTR(tdq); 1753#endif 1754 tdq_add(tdq, td, SRQ_YIELDING); 1755 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1756} 1757 1758/* 1759 * Handle migration from sched_switch(). This happens only for 1760 * cpu binding. 1761 */ 1762static struct mtx * 1763sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1764{ 1765 struct tdq *tdn; 1766 1767 tdn = TDQ_CPU(td->td_sched->ts_cpu); 1768#ifdef SMP 1769 /* 1770 * Do the lock dance required to avoid LOR. We grab an extra 1771 * spinlock nesting to prevent preemption while we're 1772 * not holding either run-queue lock. 1773 */ 1774 spinlock_enter(); 1775 thread_block_switch(td); /* This releases the lock on tdq. */ 1776 TDQ_LOCK(tdn); 1777 tdq_add(tdn, td, flags); 1778 tdq_notify(td->td_sched); 1779 /* 1780 * After we unlock tdn the new cpu still can't switch into this 1781 * thread until we've unblocked it in cpu_switch(). The lock 1782 * pointers may match in the case of HTT cores. Don't unlock here 1783 * or we can deadlock when the other CPU runs the IPI handler. 1784 */ 1785 if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1786 TDQ_UNLOCK(tdn); 1787 TDQ_LOCK(tdq); 1788 } 1789 spinlock_exit(); 1790#endif 1791 return (TDQ_LOCKPTR(tdn)); 1792} 1793 1794/* 1795 * Block a thread for switching. Similar to thread_block() but does not 1796 * bump the spin count. 1797 */ 1798static inline struct mtx * 1799thread_block_switch(struct thread *td) 1800{ 1801 struct mtx *lock; 1802 1803 THREAD_LOCK_ASSERT(td, MA_OWNED); 1804 lock = td->td_lock; 1805 td->td_lock = &blocked_lock; 1806 mtx_unlock_spin(lock); 1807 1808 return (lock); 1809} 1810 1811/* 1812 * Release a thread that was blocked with thread_block_switch(). 1813 */ 1814static inline void 1815thread_unblock_switch(struct thread *td, struct mtx *mtx) 1816{ 1817 atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1818 (uintptr_t)mtx); 1819} 1820 1821/* 1822 * Switch threads. This function has to handle threads coming in while 1823 * blocked for some reason, running, or idle. It also must deal with 1824 * migrating a thread from one queue to another as running threads may 1825 * be assigned elsewhere via binding. 1826 */ 1827void 1828sched_switch(struct thread *td, struct thread *newtd, int flags) 1829{ 1830 struct tdq *tdq; 1831 struct td_sched *ts; 1832 struct mtx *mtx; 1833 int srqflag; 1834 int cpuid; 1835 1836 THREAD_LOCK_ASSERT(td, MA_OWNED); 1837 1838 cpuid = PCPU_GET(cpuid); 1839 tdq = TDQ_CPU(cpuid); 1840 ts = td->td_sched; 1841 mtx = td->td_lock; 1842#ifdef SMP 1843 ts->ts_rltick = ticks; 1844 if (newtd && newtd->td_priority < tdq->tdq_lowpri) 1845 tdq->tdq_lowpri = newtd->td_priority; 1846#endif 1847 td->td_lastcpu = td->td_oncpu; 1848 td->td_oncpu = NOCPU; 1849 td->td_flags &= ~TDF_NEEDRESCHED; 1850 td->td_owepreempt = 0; 1851 /* 1852 * The lock pointer in an idle thread should never change. Reset it 1853 * to CAN_RUN as well. 1854 */ 1855 if (TD_IS_IDLETHREAD(td)) { 1856 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1857 TD_SET_CAN_RUN(td); 1858 } else if (TD_IS_RUNNING(td)) { 1859 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1860 tdq_load_rem(tdq, ts); 1861 srqflag = (flags & SW_PREEMPT) ? 1862 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1863 SRQ_OURSELF|SRQ_YIELDING; 1864 if (ts->ts_cpu == cpuid) 1865 tdq_add(tdq, td, srqflag); 1866 else 1867 mtx = sched_switch_migrate(tdq, td, srqflag); 1868 } else { 1869 /* This thread must be going to sleep. */ 1870 TDQ_LOCK(tdq); 1871 mtx = thread_block_switch(td); 1872 tdq_load_rem(tdq, ts); 1873 } 1874 /* 1875 * We enter here with the thread blocked and assigned to the 1876 * appropriate cpu run-queue or sleep-queue and with the current 1877 * thread-queue locked. 1878 */ 1879 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1880 /* 1881 * If KSE assigned a new thread just add it here and let choosethread 1882 * select the best one. 1883 */ 1884 if (newtd != NULL) 1885 sched_switchin(tdq, newtd); 1886 newtd = choosethread(); 1887 /* 1888 * Call the MD code to switch contexts if necessary. 1889 */ 1890 if (td != newtd) { 1891#ifdef HWPMC_HOOKS 1892 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1893 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1894#endif 1895 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 1896 cpu_switch(td, newtd, mtx); 1897 /* 1898 * We may return from cpu_switch on a different cpu. However, 1899 * we always return with td_lock pointing to the current cpu's 1900 * run queue lock. 1901 */ 1902 cpuid = PCPU_GET(cpuid); 1903 tdq = TDQ_CPU(cpuid); 1904#ifdef HWPMC_HOOKS 1905 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1906 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1907#endif 1908 } else 1909 thread_unblock_switch(td, mtx); 1910 /* 1911 * Assert that all went well and return. 1912 */ 1913#ifdef SMP 1914 /* We should always get here with the lowest priority td possible */ 1915 tdq->tdq_lowpri = td->td_priority; 1916#endif 1917 TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1918 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1919 td->td_oncpu = cpuid; 1920} 1921 1922/* 1923 * Adjust thread priorities as a result of a nice request. 1924 */ 1925void 1926sched_nice(struct proc *p, int nice) 1927{ 1928 struct thread *td; 1929 1930 PROC_LOCK_ASSERT(p, MA_OWNED); 1931 PROC_SLOCK_ASSERT(p, MA_OWNED); 1932 1933 p->p_nice = nice; 1934 FOREACH_THREAD_IN_PROC(p, td) { 1935 thread_lock(td); 1936 sched_priority(td); 1937 sched_prio(td, td->td_base_user_pri); 1938 thread_unlock(td); 1939 } 1940} 1941 1942/* 1943 * Record the sleep time for the interactivity scorer. 1944 */ 1945void 1946sched_sleep(struct thread *td) 1947{ 1948 1949 THREAD_LOCK_ASSERT(td, MA_OWNED); 1950 1951 td->td_slptick = ticks; 1952} 1953 1954/* 1955 * Schedule a thread to resume execution and record how long it voluntarily 1956 * slept. We also update the pctcpu, interactivity, and priority. 1957 */ 1958void 1959sched_wakeup(struct thread *td) 1960{ 1961 struct td_sched *ts; 1962 int slptick; 1963 1964 THREAD_LOCK_ASSERT(td, MA_OWNED); 1965 ts = td->td_sched; 1966 /* 1967 * If we slept for more than a tick update our interactivity and 1968 * priority. 1969 */ 1970 slptick = td->td_slptick; 1971 td->td_slptick = 0; 1972 if (slptick && slptick != ticks) { 1973 u_int hzticks; 1974 1975 hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1976 ts->ts_slptime += hzticks; 1977 sched_interact_update(td); 1978 sched_pctcpu_update(ts); 1979 sched_priority(td); 1980 } 1981 /* Reset the slice value after we sleep. */ 1982 ts->ts_slice = sched_slice; 1983 sched_add(td, SRQ_BORING); 1984} 1985 1986/* 1987 * Penalize the parent for creating a new child and initialize the child's 1988 * priority. 1989 */ 1990void 1991sched_fork(struct thread *td, struct thread *child) 1992{ 1993 THREAD_LOCK_ASSERT(td, MA_OWNED); 1994 sched_fork_thread(td, child); 1995 /* 1996 * Penalize the parent and child for forking. 1997 */ 1998 sched_interact_fork(child); 1999 sched_priority(child); 2000 td->td_sched->ts_runtime += tickincr; 2001 sched_interact_update(td); 2002 sched_priority(td); 2003} 2004 2005/* 2006 * Fork a new thread, may be within the same process. 2007 */ 2008void 2009sched_fork_thread(struct thread *td, struct thread *child) 2010{ 2011 struct td_sched *ts; 2012 struct td_sched *ts2; 2013 2014 /* 2015 * Initialize child. 2016 */ 2017 THREAD_LOCK_ASSERT(td, MA_OWNED); 2018 sched_newthread(child); 2019 child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 2020 ts = td->td_sched; 2021 ts2 = child->td_sched; 2022 ts2->ts_cpu = ts->ts_cpu; 2023 ts2->ts_runq = NULL; 2024 /* 2025 * Grab our parents cpu estimation information and priority. 2026 */ 2027 ts2->ts_ticks = ts->ts_ticks; 2028 ts2->ts_ltick = ts->ts_ltick; 2029 ts2->ts_ftick = ts->ts_ftick; 2030 child->td_user_pri = td->td_user_pri; 2031 child->td_base_user_pri = td->td_base_user_pri; 2032 /* 2033 * And update interactivity score. 2034 */ 2035 ts2->ts_slptime = ts->ts_slptime; 2036 ts2->ts_runtime = ts->ts_runtime; 2037 ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 2038} 2039 2040/* 2041 * Adjust the priority class of a thread. 2042 */ 2043void 2044sched_class(struct thread *td, int class) 2045{ 2046 2047 THREAD_LOCK_ASSERT(td, MA_OWNED); 2048 if (td->td_pri_class == class) 2049 return; 2050 2051#ifdef SMP 2052 /* 2053 * On SMP if we're on the RUNQ we must adjust the transferable 2054 * count because could be changing to or from an interrupt 2055 * class. 2056 */ 2057 if (TD_ON_RUNQ(td)) { 2058 struct tdq *tdq; 2059 2060 tdq = TDQ_CPU(td->td_sched->ts_cpu); 2061 if (THREAD_CAN_MIGRATE(td)) { 2062 tdq->tdq_transferable--; 2063 tdq->tdq_group->tdg_transferable--; 2064 } 2065 td->td_pri_class = class; 2066 if (THREAD_CAN_MIGRATE(td)) { 2067 tdq->tdq_transferable++; 2068 tdq->tdq_group->tdg_transferable++; 2069 } 2070 } 2071#endif 2072 td->td_pri_class = class; 2073} 2074 2075/* 2076 * Return some of the child's priority and interactivity to the parent. 2077 */ 2078void 2079sched_exit(struct proc *p, struct thread *child) 2080{ 2081 struct thread *td; 2082 2083 CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 2084 child, child->td_proc->p_comm, child->td_priority); 2085 2086 PROC_SLOCK_ASSERT(p, MA_OWNED); 2087 td = FIRST_THREAD_IN_PROC(p); 2088 sched_exit_thread(td, child); 2089} 2090 2091/* 2092 * Penalize another thread for the time spent on this one. This helps to 2093 * worsen the priority and interactivity of processes which schedule batch 2094 * jobs such as make. This has little effect on the make process itself but 2095 * causes new processes spawned by it to receive worse scores immediately. 2096 */ 2097void 2098sched_exit_thread(struct thread *td, struct thread *child) 2099{ 2100 2101 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 2102 child, child->td_proc->p_comm, child->td_priority); 2103 2104#ifdef KSE 2105 /* 2106 * KSE forks and exits so often that this penalty causes short-lived 2107 * threads to always be non-interactive. This causes mozilla to 2108 * crawl under load. 2109 */ 2110 if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc) 2111 return; 2112#endif 2113 /* 2114 * Give the child's runtime to the parent without returning the 2115 * sleep time as a penalty to the parent. This causes shells that 2116 * launch expensive things to mark their children as expensive. 2117 */ 2118 thread_lock(td); 2119 td->td_sched->ts_runtime += child->td_sched->ts_runtime; 2120 sched_interact_update(td); 2121 sched_priority(td); 2122 thread_unlock(td); 2123} 2124 2125/* 2126 * Fix priorities on return to user-space. Priorities may be elevated due 2127 * to static priorities in msleep() or similar. 2128 */ 2129void 2130sched_userret(struct thread *td) 2131{ 2132 /* 2133 * XXX we cheat slightly on the locking here to avoid locking in 2134 * the usual case. Setting td_priority here is essentially an 2135 * incomplete workaround for not setting it properly elsewhere. 2136 * Now that some interrupt handlers are threads, not setting it 2137 * properly elsewhere can clobber it in the window between setting 2138 * it here and returning to user mode, so don't waste time setting 2139 * it perfectly here. 2140 */ 2141 KASSERT((td->td_flags & TDF_BORROWING) == 0, 2142 ("thread with borrowed priority returning to userland")); 2143 if (td->td_priority != td->td_user_pri) { 2144 thread_lock(td); 2145 td->td_priority = td->td_user_pri; 2146 td->td_base_pri = td->td_user_pri; 2147 thread_unlock(td); 2148 } 2149} 2150 2151/* 2152 * Handle a stathz tick. This is really only relevant for timeshare 2153 * threads. 2154 */ 2155void 2156sched_clock(struct thread *td) 2157{ 2158 struct tdq *tdq; 2159 struct td_sched *ts; 2160 2161 THREAD_LOCK_ASSERT(td, MA_OWNED); 2162 tdq = TDQ_SELF(); 2163#ifdef SMP 2164 /* 2165 * We run the long term load balancer infrequently on the first cpu. 2166 */ 2167 if (balance_tdq == tdq) { 2168 if (balance_ticks && --balance_ticks == 0) 2169 sched_balance(); 2170 if (balance_group_ticks && --balance_group_ticks == 0) 2171 sched_balance_groups(); 2172 } 2173#endif 2174 /* 2175 * Advance the insert index once for each tick to ensure that all 2176 * threads get a chance to run. 2177 */ 2178 if (tdq->tdq_idx == tdq->tdq_ridx) { 2179 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 2180 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 2181 tdq->tdq_ridx = tdq->tdq_idx; 2182 } 2183 ts = td->td_sched; 2184 /* 2185 * We only do slicing code for TIMESHARE threads. 2186 */ 2187 if (td->td_pri_class != PRI_TIMESHARE) 2188 return; 2189 /* 2190 * We used a tick; charge it to the thread so that we can compute our 2191 * interactivity. 2192 */ 2193 td->td_sched->ts_runtime += tickincr; 2194 sched_interact_update(td); 2195 /* 2196 * We used up one time slice. 2197 */ 2198 if (--ts->ts_slice > 0) 2199 return; 2200 /* 2201 * We're out of time, recompute priorities and requeue. 2202 */ 2203 sched_priority(td); 2204 td->td_flags |= TDF_NEEDRESCHED; 2205} 2206 2207/* 2208 * Called once per hz tick. Used for cpu utilization information. This 2209 * is easier than trying to scale based on stathz. 2210 */ 2211void 2212sched_tick(void) 2213{ 2214 struct td_sched *ts; 2215 2216 ts = curthread->td_sched; 2217 /* Adjust ticks for pctcpu */ 2218 ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2219 ts->ts_ltick = ticks; 2220 /* 2221 * Update if we've exceeded our desired tick threshhold by over one 2222 * second. 2223 */ 2224 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2225 sched_pctcpu_update(ts); 2226} 2227 2228/* 2229 * Return whether the current CPU has runnable tasks. Used for in-kernel 2230 * cooperative idle threads. 2231 */ 2232int 2233sched_runnable(void) 2234{ 2235 struct tdq *tdq; 2236 int load; 2237 2238 load = 1; 2239 2240 tdq = TDQ_SELF(); 2241 if ((curthread->td_flags & TDF_IDLETD) != 0) { 2242 if (tdq->tdq_load > 0) 2243 goto out; 2244 } else 2245 if (tdq->tdq_load - 1 > 0) 2246 goto out; 2247 load = 0; 2248out: 2249 return (load); 2250} 2251 2252/* 2253 * Choose the highest priority thread to run. The thread is removed from 2254 * the run-queue while running however the load remains. For SMP we set 2255 * the tdq in the global idle bitmask if it idles here. 2256 */ 2257struct thread * 2258sched_choose(void) 2259{ 2260#ifdef SMP 2261 struct tdq_group *tdg; 2262#endif 2263 struct td_sched *ts; 2264 struct tdq *tdq; 2265 2266 tdq = TDQ_SELF(); 2267 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2268 ts = tdq_choose(tdq); 2269 if (ts) { 2270 tdq_runq_rem(tdq, ts); 2271 return (ts->ts_thread); 2272 } 2273#ifdef SMP 2274 /* 2275 * We only set the idled bit when all of the cpus in the group are 2276 * idle. Otherwise we could get into a situation where a thread bounces 2277 * back and forth between two idle cores on seperate physical CPUs. 2278 */ 2279 tdg = tdq->tdq_group; 2280 tdg->tdg_idlemask |= PCPU_GET(cpumask); 2281 if (tdg->tdg_idlemask == tdg->tdg_cpumask) 2282 atomic_set_int(&tdq_idle, tdg->tdg_mask); 2283 tdq->tdq_lowpri = PRI_MAX_IDLE; 2284#endif 2285 return (PCPU_GET(idlethread)); 2286} 2287 2288/* 2289 * Set owepreempt if necessary. Preemption never happens directly in ULE, 2290 * we always request it once we exit a critical section. 2291 */ 2292static inline void 2293sched_setpreempt(struct thread *td) 2294{ 2295 struct thread *ctd; 2296 int cpri; 2297 int pri; 2298 2299 ctd = curthread; 2300 pri = td->td_priority; 2301 cpri = ctd->td_priority; 2302 if (td->td_priority < ctd->td_priority) 2303 curthread->td_flags |= TDF_NEEDRESCHED; 2304 if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2305 return; 2306 /* 2307 * Always preempt IDLE threads. Otherwise only if the preempting 2308 * thread is an ithread. 2309 */ 2310 if (pri > preempt_thresh && cpri < PRI_MIN_IDLE) 2311 return; 2312 ctd->td_owepreempt = 1; 2313 return; 2314} 2315 2316/* 2317 * Add a thread to a thread queue. Initializes priority, slice, runq, and 2318 * add it to the appropriate queue. This is the internal function called 2319 * when the tdq is predetermined. 2320 */ 2321void 2322tdq_add(struct tdq *tdq, struct thread *td, int flags) 2323{ 2324 struct td_sched *ts; 2325 int class; 2326#ifdef SMP 2327 int cpumask; 2328#endif 2329 2330 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2331 KASSERT((td->td_inhibitors == 0), 2332 ("sched_add: trying to run inhibited thread")); 2333 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 2334 ("sched_add: bad thread state")); 2335 KASSERT(td->td_flags & TDF_INMEM, 2336 ("sched_add: thread swapped out")); 2337 2338 ts = td->td_sched; 2339 class = PRI_BASE(td->td_pri_class); 2340 TD_SET_RUNQ(td); 2341 if (ts->ts_slice == 0) 2342 ts->ts_slice = sched_slice; 2343 /* 2344 * Pick the run queue based on priority. 2345 */ 2346 if (td->td_priority <= PRI_MAX_REALTIME) 2347 ts->ts_runq = &tdq->tdq_realtime; 2348 else if (td->td_priority <= PRI_MAX_TIMESHARE) 2349 ts->ts_runq = &tdq->tdq_timeshare; 2350 else 2351 ts->ts_runq = &tdq->tdq_idle; 2352#ifdef SMP 2353 cpumask = 1 << ts->ts_cpu; 2354 /* 2355 * If we had been idle, clear our bit in the group and potentially 2356 * the global bitmap. 2357 */ 2358 if ((class != PRI_IDLE && class != PRI_ITHD) && 2359 (tdq->tdq_group->tdg_idlemask & cpumask) != 0) { 2360 /* 2361 * Check to see if our group is unidling, and if so, remove it 2362 * from the global idle mask. 2363 */ 2364 if (tdq->tdq_group->tdg_idlemask == 2365 tdq->tdq_group->tdg_cpumask) 2366 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask); 2367 /* 2368 * Now remove ourselves from the group specific idle mask. 2369 */ 2370 tdq->tdq_group->tdg_idlemask &= ~cpumask; 2371 } 2372 if (td->td_priority < tdq->tdq_lowpri) 2373 tdq->tdq_lowpri = td->td_priority; 2374#endif 2375 tdq_runq_add(tdq, ts, flags); 2376 tdq_load_add(tdq, ts); 2377} 2378 2379/* 2380 * Select the target thread queue and add a thread to it. Request 2381 * preemption or IPI a remote processor if required. 2382 */ 2383void 2384sched_add(struct thread *td, int flags) 2385{ 2386 struct td_sched *ts; 2387 struct tdq *tdq; 2388#ifdef SMP 2389 int cpuid; 2390 int cpu; 2391#endif 2392 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2393 td, td->td_proc->p_comm, td->td_priority, curthread, 2394 curthread->td_proc->p_comm); 2395 THREAD_LOCK_ASSERT(td, MA_OWNED); 2396 ts = td->td_sched; 2397 /* 2398 * Recalculate the priority before we select the target cpu or 2399 * run-queue. 2400 */ 2401 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2402 sched_priority(td); 2403#ifdef SMP 2404 cpuid = PCPU_GET(cpuid); 2405 /* 2406 * Pick the destination cpu and if it isn't ours transfer to the 2407 * target cpu. 2408 */ 2409 if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td)) 2410 cpu = cpuid; 2411 else if (!THREAD_CAN_MIGRATE(td)) 2412 cpu = ts->ts_cpu; 2413 else 2414 cpu = sched_pickcpu(ts, flags); 2415 tdq = sched_setcpu(ts, cpu, flags); 2416 tdq_add(tdq, td, flags); 2417 if (cpu != cpuid) { 2418 tdq_notify(ts); 2419 return; 2420 } 2421#else 2422 tdq = TDQ_SELF(); 2423 TDQ_LOCK(tdq); 2424 /* 2425 * Now that the thread is moving to the run-queue, set the lock 2426 * to the scheduler's lock. 2427 */ 2428 thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2429 tdq_add(tdq, td, flags); 2430#endif 2431 if (!(flags & SRQ_YIELDING)) 2432 sched_setpreempt(td); 2433} 2434 2435/* 2436 * Remove a thread from a run-queue without running it. This is used 2437 * when we're stealing a thread from a remote queue. Otherwise all threads 2438 * exit by calling sched_exit_thread() and sched_throw() themselves. 2439 */ 2440void 2441sched_rem(struct thread *td) 2442{ 2443 struct tdq *tdq; 2444 struct td_sched *ts; 2445 2446 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 2447 td, td->td_proc->p_comm, td->td_priority, curthread, 2448 curthread->td_proc->p_comm); 2449 ts = td->td_sched; 2450 tdq = TDQ_CPU(ts->ts_cpu); 2451 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2452 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2453 KASSERT(TD_ON_RUNQ(td), 2454 ("sched_rem: thread not on run queue")); 2455 tdq_runq_rem(tdq, ts); 2456 tdq_load_rem(tdq, ts); 2457 TD_SET_CAN_RUN(td); 2458} 2459 2460/* 2461 * Fetch cpu utilization information. Updates on demand. 2462 */ 2463fixpt_t 2464sched_pctcpu(struct thread *td) 2465{ 2466 fixpt_t pctcpu; 2467 struct td_sched *ts; 2468 2469 pctcpu = 0; 2470 ts = td->td_sched; 2471 if (ts == NULL) 2472 return (0); 2473 2474 thread_lock(td); 2475 if (ts->ts_ticks) { 2476 int rtick; 2477 2478 sched_pctcpu_update(ts); 2479 /* How many rtick per second ? */ 2480 rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2481 pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 2482 } 2483 thread_unlock(td); 2484 2485 return (pctcpu); 2486} 2487 2488/* 2489 * Bind a thread to a target cpu. 2490 */ 2491void 2492sched_bind(struct thread *td, int cpu) 2493{ 2494 struct td_sched *ts; 2495 2496 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2497 ts = td->td_sched; 2498 if (ts->ts_flags & TSF_BOUND) 2499 sched_unbind(td); 2500 ts->ts_flags |= TSF_BOUND; 2501#ifdef SMP 2502 sched_pin(); 2503 if (PCPU_GET(cpuid) == cpu) 2504 return; 2505 ts->ts_cpu = cpu; 2506 /* When we return from mi_switch we'll be on the correct cpu. */ 2507 mi_switch(SW_VOL, NULL); 2508#endif 2509} 2510 2511/* 2512 * Release a bound thread. 2513 */ 2514void 2515sched_unbind(struct thread *td) 2516{ 2517 struct td_sched *ts; 2518 2519 THREAD_LOCK_ASSERT(td, MA_OWNED); 2520 ts = td->td_sched; 2521 if ((ts->ts_flags & TSF_BOUND) == 0) 2522 return; 2523 ts->ts_flags &= ~TSF_BOUND; 2524#ifdef SMP 2525 sched_unpin(); 2526#endif 2527} 2528 2529int 2530sched_is_bound(struct thread *td) 2531{ 2532 THREAD_LOCK_ASSERT(td, MA_OWNED); 2533 return (td->td_sched->ts_flags & TSF_BOUND); 2534} 2535 2536/* 2537 * Basic yield call. 2538 */ 2539void 2540sched_relinquish(struct thread *td) 2541{ 2542 thread_lock(td); 2543 if (td->td_pri_class == PRI_TIMESHARE) 2544 sched_prio(td, PRI_MAX_TIMESHARE); 2545 SCHED_STAT_INC(switch_relinquish); 2546 mi_switch(SW_VOL, NULL); 2547 thread_unlock(td); 2548} 2549 2550/* 2551 * Return the total system load. 2552 */ 2553int 2554sched_load(void) 2555{ 2556#ifdef SMP 2557 int total; 2558 int i; 2559 2560 total = 0; 2561 for (i = 0; i <= tdg_maxid; i++) 2562 total += TDQ_GROUP(i)->tdg_load; 2563 return (total); 2564#else 2565 return (TDQ_SELF()->tdq_sysload); 2566#endif 2567} 2568 2569int 2570sched_sizeof_proc(void) 2571{ 2572 return (sizeof(struct proc)); 2573} 2574 2575int 2576sched_sizeof_thread(void) 2577{ 2578 return (sizeof(struct thread) + sizeof(struct td_sched)); 2579} 2580 2581/* 2582 * The actual idle process. 2583 */ 2584void 2585sched_idletd(void *dummy) 2586{ 2587 struct thread *td; 2588 struct tdq *tdq; 2589 2590 td = curthread; 2591 tdq = TDQ_SELF(); 2592 mtx_assert(&Giant, MA_NOTOWNED); 2593 /* ULE relies on preemption for idle interruption. */ 2594 for (;;) { 2595#ifdef SMP 2596 if (tdq_idled(tdq)) 2597 cpu_idle(); 2598#else 2599 cpu_idle(); 2600#endif 2601 } 2602} 2603 2604/* 2605 * A CPU is entering for the first time or a thread is exiting. 2606 */ 2607void 2608sched_throw(struct thread *td) 2609{ 2610 struct thread *newtd; 2611 struct tdq *tdq; 2612 2613 tdq = TDQ_SELF(); 2614 if (td == NULL) { 2615 /* Correct spinlock nesting and acquire the correct lock. */ 2616 TDQ_LOCK(tdq); 2617 spinlock_exit(); 2618 } else { 2619 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2620 tdq_load_rem(tdq, td->td_sched); 2621 } 2622 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 2623 newtd = choosethread(); 2624 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 2625 PCPU_SET(switchtime, cpu_ticks()); 2626 PCPU_SET(switchticks, ticks); 2627 cpu_throw(td, newtd); /* doesn't return */ 2628} 2629 2630/* 2631 * This is called from fork_exit(). Just acquire the correct locks and 2632 * let fork do the rest of the work. 2633 */ 2634void 2635sched_fork_exit(struct thread *td) 2636{ 2637 struct td_sched *ts; 2638 struct tdq *tdq; 2639 int cpuid; 2640 2641 /* 2642 * Finish setting up thread glue so that it begins execution in a 2643 * non-nested critical section with the scheduler lock held. 2644 */ 2645 cpuid = PCPU_GET(cpuid); 2646 tdq = TDQ_CPU(cpuid); 2647 ts = td->td_sched; 2648 if (TD_IS_IDLETHREAD(td)) 2649 td->td_lock = TDQ_LOCKPTR(tdq); 2650 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2651 td->td_oncpu = cpuid; 2652 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 2653} 2654 2655static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, 2656 "Scheduler"); 2657SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2658 "Scheduler name"); 2659SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2660 "Slice size for timeshare threads"); 2661SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2662 "Interactivity score threshold"); 2663SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2664 0,"Min priority for preemption, lower priorities have greater precedence"); 2665#ifdef SMP 2666SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0, 2667 "Pick the target cpu based on priority rather than load."); 2668SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2669 "Number of hz ticks to keep thread affinity for"); 2670SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, ""); 2671SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2672 "Enables the long-term load balancer"); 2673SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 2674 &balance_interval, 0, 2675 "Average frequency in stathz ticks to run the long-term balancer"); 2676SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2677 "Steals work from another hyper-threaded core on idle"); 2678SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2679 "Attempts to steal work from other cores before idling"); 2680SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 2681 "Minimum load on remote cpu before we'll steal"); 2682SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0, 2683 "True when a topology has been specified by the MD code."); 2684#endif 2685 2686/* ps compat. All cpu percentages from ULE are weighted. */ 2687static int ccpu = 0; 2688SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2689 2690 2691#define KERN_SWITCH_INCLUDE 1 2692#include "kern/kern_switch.c" 2693