sched_ule.c revision 177368
1/*- 2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27/* 28 * This file implements the ULE scheduler. ULE supports independent CPU 29 * run queues and fine grain locking. It has superior interactive 30 * performance under load even on uni-processor systems. 31 * 32 * etymology: 33 * ULE is the last three letters in schedule. It owes its name to a 34 * generic user created for a scheduling system by Paul Mikesell at 35 * Isilon Systems and a general lack of creativity on the part of the author. 36 */ 37 38#include <sys/cdefs.h> 39__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 177368 2008-03-19 06:19:01Z jeff $"); 40 41#include "opt_hwpmc_hooks.h" 42#include "opt_sched.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/kdb.h> 47#include <sys/kernel.h> 48#include <sys/ktr.h> 49#include <sys/lock.h> 50#include <sys/mutex.h> 51#include <sys/proc.h> 52#include <sys/resource.h> 53#include <sys/resourcevar.h> 54#include <sys/sched.h> 55#include <sys/smp.h> 56#include <sys/sx.h> 57#include <sys/sysctl.h> 58#include <sys/sysproto.h> 59#include <sys/turnstile.h> 60#include <sys/umtx.h> 61#include <sys/vmmeter.h> 62#include <sys/cpuset.h> 63#ifdef KTRACE 64#include <sys/uio.h> 65#include <sys/ktrace.h> 66#endif 67 68#ifdef HWPMC_HOOKS 69#include <sys/pmckern.h> 70#endif 71 72#include <machine/cpu.h> 73#include <machine/smp.h> 74 75#if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__) 76#error "This architecture is not currently compatible with ULE" 77#endif 78 79#define KTR_ULE 0 80 81/* 82 * Thread scheduler specific section. All fields are protected 83 * by the thread lock. 84 */ 85struct td_sched { 86 TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */ 87 struct thread *ts_thread; /* Active associated thread. */ 88 struct runq *ts_runq; /* Run-queue we're queued on. */ 89 short ts_flags; /* TSF_* flags. */ 90 u_char ts_rqindex; /* Run queue index. */ 91 u_char ts_cpu; /* CPU that we have affinity for. */ 92 int ts_rltick; /* Real last tick, for affinity. */ 93 int ts_slice; /* Ticks of slice remaining. */ 94 u_int ts_slptime; /* Number of ticks we vol. slept */ 95 u_int ts_runtime; /* Number of ticks we were running */ 96 int ts_ltick; /* Last tick that we were running on */ 97 int ts_ftick; /* First tick that we were running on */ 98 int ts_ticks; /* Tick count */ 99}; 100/* flags kept in ts_flags */ 101#define TSF_BOUND 0x0001 /* Thread can not migrate. */ 102#define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 103 104static struct td_sched td_sched0; 105 106#define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 107#define THREAD_CAN_SCHED(td, cpu) \ 108 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 109 110/* 111 * Cpu percentage computation macros and defines. 112 * 113 * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 114 * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 115 * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 116 * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 117 * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 118 * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 119 */ 120#define SCHED_TICK_SECS 10 121#define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 122#define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 123#define SCHED_TICK_SHIFT 10 124#define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 125#define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 126 127/* 128 * These macros determine priorities for non-interactive threads. They are 129 * assigned a priority based on their recent cpu utilization as expressed 130 * by the ratio of ticks to the tick total. NHALF priorities at the start 131 * and end of the MIN to MAX timeshare range are only reachable with negative 132 * or positive nice respectively. 133 * 134 * PRI_RANGE: Priority range for utilization dependent priorities. 135 * PRI_NRESV: Number of nice values. 136 * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 137 * PRI_NICE: Determines the part of the priority inherited from nice. 138 */ 139#define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 140#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 141#define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 142#define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 143#define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 144#define SCHED_PRI_TICKS(ts) \ 145 (SCHED_TICK_HZ((ts)) / \ 146 (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 147#define SCHED_PRI_NICE(nice) (nice) 148 149/* 150 * These determine the interactivity of a process. Interactivity differs from 151 * cpu utilization in that it expresses the voluntary time slept vs time ran 152 * while cpu utilization includes all time not running. This more accurately 153 * models the intent of the thread. 154 * 155 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 156 * before throttling back. 157 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 158 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 159 * INTERACT_THRESH: Threshhold for placement on the current runq. 160 */ 161#define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 162#define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 163#define SCHED_INTERACT_MAX (100) 164#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 165#define SCHED_INTERACT_THRESH (30) 166 167/* 168 * tickincr: Converts a stathz tick into a hz domain scaled by 169 * the shift factor. Without the shift the error rate 170 * due to rounding would be unacceptably high. 171 * realstathz: stathz is sometimes 0 and run off of hz. 172 * sched_slice: Runtime of each thread before rescheduling. 173 * preempt_thresh: Priority threshold for preemption and remote IPIs. 174 */ 175static int sched_interact = SCHED_INTERACT_THRESH; 176static int realstathz; 177static int tickincr; 178static int sched_slice = 1; 179#ifdef PREEMPTION 180#ifdef FULL_PREEMPTION 181static int preempt_thresh = PRI_MAX_IDLE; 182#else 183static int preempt_thresh = PRI_MIN_KERN; 184#endif 185#else 186static int preempt_thresh = 0; 187#endif 188static int static_boost = 1; 189 190/* 191 * tdq - per processor runqs and statistics. All fields are protected by the 192 * tdq_lock. The load and lowpri may be accessed without to avoid excess 193 * locking in sched_pickcpu(); 194 */ 195struct tdq { 196 /* Ordered to improve efficiency of cpu_search() and switch(). */ 197 struct mtx tdq_lock; /* run queue lock. */ 198 struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ 199 int tdq_load; /* Aggregate load. */ 200 int tdq_sysload; /* For loadavg, !ITHD load. */ 201 int tdq_transferable; /* Transferable thread count. */ 202 u_char tdq_lowpri; /* Lowest priority thread. */ 203 u_char tdq_ipipending; /* IPI pending. */ 204 u_char tdq_idx; /* Current insert index. */ 205 u_char tdq_ridx; /* Current removal index. */ 206 struct runq tdq_realtime; /* real-time run queue. */ 207 struct runq tdq_timeshare; /* timeshare run queue. */ 208 struct runq tdq_idle; /* Queue of IDLE threads. */ 209 char tdq_name[sizeof("sched lock") + 6]; 210} __aligned(64); 211 212 213#ifdef SMP 214struct cpu_group *cpu_top; 215 216#define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000)) 217#define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity)) 218 219/* 220 * Run-time tunables. 221 */ 222static int rebalance = 1; 223static int balance_interval = 128; /* Default set in sched_initticks(). */ 224static int affinity; 225static int steal_htt = 1; 226static int steal_idle = 1; 227static int steal_thresh = 2; 228 229/* 230 * One thread queue per processor. 231 */ 232static struct tdq tdq_cpu[MAXCPU]; 233static struct tdq *balance_tdq; 234static int balance_ticks; 235 236#define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 237#define TDQ_CPU(x) (&tdq_cpu[(x)]) 238#define TDQ_ID(x) ((int)((x) - tdq_cpu)) 239#else /* !SMP */ 240static struct tdq tdq_cpu; 241 242#define TDQ_ID(x) (0) 243#define TDQ_SELF() (&tdq_cpu) 244#define TDQ_CPU(x) (&tdq_cpu) 245#endif 246 247#define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 248#define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 249#define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 250#define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 251#define TDQ_LOCKPTR(t) (&(t)->tdq_lock) 252 253static void sched_priority(struct thread *); 254static void sched_thread_priority(struct thread *, u_char); 255static int sched_interact_score(struct thread *); 256static void sched_interact_update(struct thread *); 257static void sched_interact_fork(struct thread *); 258static void sched_pctcpu_update(struct td_sched *); 259 260/* Operations on per processor queues */ 261static struct td_sched * tdq_choose(struct tdq *); 262static void tdq_setup(struct tdq *); 263static void tdq_load_add(struct tdq *, struct td_sched *); 264static void tdq_load_rem(struct tdq *, struct td_sched *); 265static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 266static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 267static inline int sched_shouldpreempt(int, int, int); 268void tdq_print(int cpu); 269static void runq_print(struct runq *rq); 270static void tdq_add(struct tdq *, struct thread *, int); 271#ifdef SMP 272static int tdq_move(struct tdq *, struct tdq *); 273static int tdq_idled(struct tdq *); 274static void tdq_notify(struct tdq *, struct td_sched *); 275static struct td_sched *tdq_steal(struct tdq *, int); 276static struct td_sched *runq_steal(struct runq *, int); 277static int sched_pickcpu(struct td_sched *, int); 278static void sched_balance(void); 279static int sched_balance_pair(struct tdq *, struct tdq *); 280static inline struct tdq *sched_setcpu(struct td_sched *, int, int); 281static inline struct mtx *thread_block_switch(struct thread *); 282static inline void thread_unblock_switch(struct thread *, struct mtx *); 283static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 284#endif 285 286static void sched_setup(void *dummy); 287SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 288 289static void sched_initticks(void *dummy); 290SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, 291 NULL); 292 293/* 294 * Print the threads waiting on a run-queue. 295 */ 296static void 297runq_print(struct runq *rq) 298{ 299 struct rqhead *rqh; 300 struct td_sched *ts; 301 int pri; 302 int j; 303 int i; 304 305 for (i = 0; i < RQB_LEN; i++) { 306 printf("\t\trunq bits %d 0x%zx\n", 307 i, rq->rq_status.rqb_bits[i]); 308 for (j = 0; j < RQB_BPW; j++) 309 if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 310 pri = j + (i << RQB_L2BPW); 311 rqh = &rq->rq_queues[pri]; 312 TAILQ_FOREACH(ts, rqh, ts_procq) { 313 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 314 ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 315 } 316 } 317 } 318} 319 320/* 321 * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 322 */ 323void 324tdq_print(int cpu) 325{ 326 struct tdq *tdq; 327 328 tdq = TDQ_CPU(cpu); 329 330 printf("tdq %d:\n", TDQ_ID(tdq)); 331 printf("\tlock %p\n", TDQ_LOCKPTR(tdq)); 332 printf("\tLock name: %s\n", tdq->tdq_name); 333 printf("\tload: %d\n", tdq->tdq_load); 334 printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 335 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 336 printf("\trealtime runq:\n"); 337 runq_print(&tdq->tdq_realtime); 338 printf("\ttimeshare runq:\n"); 339 runq_print(&tdq->tdq_timeshare); 340 printf("\tidle runq:\n"); 341 runq_print(&tdq->tdq_idle); 342 printf("\tload transferable: %d\n", tdq->tdq_transferable); 343 printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 344} 345 346static inline int 347sched_shouldpreempt(int pri, int cpri, int remote) 348{ 349 /* 350 * If the new priority is not better than the current priority there is 351 * nothing to do. 352 */ 353 if (pri >= cpri) 354 return (0); 355 /* 356 * Always preempt idle. 357 */ 358 if (cpri >= PRI_MIN_IDLE) 359 return (1); 360 /* 361 * If preemption is disabled don't preempt others. 362 */ 363 if (preempt_thresh == 0) 364 return (0); 365 /* 366 * Preempt if we exceed the threshold. 367 */ 368 if (pri <= preempt_thresh) 369 return (1); 370 /* 371 * If we're realtime or better and there is timeshare or worse running 372 * preempt only remote processors. 373 */ 374 if (remote && pri <= PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 375 return (1); 376 return (0); 377} 378 379#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 380/* 381 * Add a thread to the actual run-queue. Keeps transferable counts up to 382 * date with what is actually on the run-queue. Selects the correct 383 * queue position for timeshare threads. 384 */ 385static __inline void 386tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 387{ 388 u_char pri; 389 390 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 391 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 392 393 TD_SET_RUNQ(ts->ts_thread); 394 if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 395 tdq->tdq_transferable++; 396 ts->ts_flags |= TSF_XFERABLE; 397 } 398 pri = ts->ts_thread->td_priority; 399 if (pri <= PRI_MAX_REALTIME) { 400 ts->ts_runq = &tdq->tdq_realtime; 401 } else if (pri <= PRI_MAX_TIMESHARE) { 402 ts->ts_runq = &tdq->tdq_timeshare; 403 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 404 ("Invalid priority %d on timeshare runq", pri)); 405 /* 406 * This queue contains only priorities between MIN and MAX 407 * realtime. Use the whole queue to represent these values. 408 */ 409 if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 410 pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 411 pri = (pri + tdq->tdq_idx) % RQ_NQS; 412 /* 413 * This effectively shortens the queue by one so we 414 * can have a one slot difference between idx and 415 * ridx while we wait for threads to drain. 416 */ 417 if (tdq->tdq_ridx != tdq->tdq_idx && 418 pri == tdq->tdq_ridx) 419 pri = (unsigned char)(pri - 1) % RQ_NQS; 420 } else 421 pri = tdq->tdq_ridx; 422 runq_add_pri(ts->ts_runq, ts, pri, flags); 423 return; 424 } else 425 ts->ts_runq = &tdq->tdq_idle; 426 runq_add(ts->ts_runq, ts, flags); 427} 428 429/* 430 * Remove a thread from a run-queue. This typically happens when a thread 431 * is selected to run. Running threads are not on the queue and the 432 * transferable count does not reflect them. 433 */ 434static __inline void 435tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 436{ 437 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 438 KASSERT(ts->ts_runq != NULL, 439 ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread)); 440 if (ts->ts_flags & TSF_XFERABLE) { 441 tdq->tdq_transferable--; 442 ts->ts_flags &= ~TSF_XFERABLE; 443 } 444 if (ts->ts_runq == &tdq->tdq_timeshare) { 445 if (tdq->tdq_idx != tdq->tdq_ridx) 446 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 447 else 448 runq_remove_idx(ts->ts_runq, ts, NULL); 449 } else 450 runq_remove(ts->ts_runq, ts); 451} 452 453/* 454 * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 455 * for this thread to the referenced thread queue. 456 */ 457static void 458tdq_load_add(struct tdq *tdq, struct td_sched *ts) 459{ 460 int class; 461 462 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 463 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 464 class = PRI_BASE(ts->ts_thread->td_pri_class); 465 tdq->tdq_load++; 466 CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 467 if (class != PRI_ITHD && 468 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 469 tdq->tdq_sysload++; 470} 471 472/* 473 * Remove the load from a thread that is transitioning to a sleep state or 474 * exiting. 475 */ 476static void 477tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 478{ 479 int class; 480 481 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 482 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 483 class = PRI_BASE(ts->ts_thread->td_pri_class); 484 if (class != PRI_ITHD && 485 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 486 tdq->tdq_sysload--; 487 KASSERT(tdq->tdq_load != 0, 488 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 489 tdq->tdq_load--; 490 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 491 ts->ts_runq = NULL; 492} 493 494/* 495 * Set lowpri to its exact value by searching the run-queue and 496 * evaluating curthread. curthread may be passed as an optimization. 497 */ 498static void 499tdq_setlowpri(struct tdq *tdq, struct thread *ctd) 500{ 501 struct td_sched *ts; 502 struct thread *td; 503 504 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 505 if (ctd == NULL) 506 ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; 507 ts = tdq_choose(tdq); 508 if (ts) 509 td = ts->ts_thread; 510 if (ts == NULL || td->td_priority > ctd->td_priority) 511 tdq->tdq_lowpri = ctd->td_priority; 512 else 513 tdq->tdq_lowpri = td->td_priority; 514} 515 516#ifdef SMP 517struct cpu_search { 518 cpumask_t cs_mask; /* Mask of valid cpus. */ 519 u_int cs_load; 520 u_int cs_cpu; 521 int cs_limit; /* Min priority for low min load for high. */ 522}; 523 524#define CPU_SEARCH_LOWEST 0x1 525#define CPU_SEARCH_HIGHEST 0x2 526#define CPU_SEARCH_BOTH (CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST) 527 528#define CPUMASK_FOREACH(cpu, mask) \ 529 for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++) \ 530 if ((mask) & 1 << (cpu)) 531 532static __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low, 533 struct cpu_search *high, const int match); 534int cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low); 535int cpu_search_highest(struct cpu_group *cg, struct cpu_search *high); 536int cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 537 struct cpu_search *high); 538 539/* 540 * This routine compares according to the match argument and should be 541 * reduced in actual instantiations via constant propagation and dead code 542 * elimination. 543 */ 544static __inline int 545cpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high, 546 const int match) 547{ 548 struct tdq *tdq; 549 550 tdq = TDQ_CPU(cpu); 551 if (match & CPU_SEARCH_LOWEST) 552 if (low->cs_mask & (1 << cpu) && 553 tdq->tdq_load < low->cs_load && 554 tdq->tdq_lowpri > low->cs_limit) { 555 low->cs_cpu = cpu; 556 low->cs_load = tdq->tdq_load; 557 } 558 if (match & CPU_SEARCH_HIGHEST) 559 if (high->cs_mask & (1 << cpu) && 560 tdq->tdq_load >= high->cs_limit && 561 tdq->tdq_load > high->cs_load && 562 tdq->tdq_transferable) { 563 high->cs_cpu = cpu; 564 high->cs_load = tdq->tdq_load; 565 } 566 return (tdq->tdq_load); 567} 568 569/* 570 * Search the tree of cpu_groups for the lowest or highest loaded cpu 571 * according to the match argument. This routine actually compares the 572 * load on all paths through the tree and finds the least loaded cpu on 573 * the least loaded path, which may differ from the least loaded cpu in 574 * the system. This balances work among caches and busses. 575 * 576 * This inline is instantiated in three forms below using constants for the 577 * match argument. It is reduced to the minimum set for each case. It is 578 * also recursive to the depth of the tree. 579 */ 580static __inline int 581cpu_search(struct cpu_group *cg, struct cpu_search *low, 582 struct cpu_search *high, const int match) 583{ 584 int total; 585 586 total = 0; 587 if (cg->cg_children) { 588 struct cpu_search lgroup; 589 struct cpu_search hgroup; 590 struct cpu_group *child; 591 u_int lload; 592 int hload; 593 int load; 594 int i; 595 596 lload = -1; 597 hload = -1; 598 for (i = 0; i < cg->cg_children; i++) { 599 child = &cg->cg_child[i]; 600 if (match & CPU_SEARCH_LOWEST) { 601 lgroup = *low; 602 lgroup.cs_load = -1; 603 } 604 if (match & CPU_SEARCH_HIGHEST) { 605 hgroup = *high; 606 lgroup.cs_load = 0; 607 } 608 switch (match) { 609 case CPU_SEARCH_LOWEST: 610 load = cpu_search_lowest(child, &lgroup); 611 break; 612 case CPU_SEARCH_HIGHEST: 613 load = cpu_search_highest(child, &hgroup); 614 break; 615 case CPU_SEARCH_BOTH: 616 load = cpu_search_both(child, &lgroup, &hgroup); 617 break; 618 } 619 total += load; 620 if (match & CPU_SEARCH_LOWEST) 621 if (load < lload || low->cs_cpu == -1) { 622 *low = lgroup; 623 lload = load; 624 } 625 if (match & CPU_SEARCH_HIGHEST) 626 if (load > hload || high->cs_cpu == -1) { 627 hload = load; 628 *high = hgroup; 629 } 630 } 631 } else { 632 int cpu; 633 634 CPUMASK_FOREACH(cpu, cg->cg_mask) 635 total += cpu_compare(cpu, low, high, match); 636 } 637 return (total); 638} 639 640/* 641 * cpu_search instantiations must pass constants to maintain the inline 642 * optimization. 643 */ 644int 645cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low) 646{ 647 return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST); 648} 649 650int 651cpu_search_highest(struct cpu_group *cg, struct cpu_search *high) 652{ 653 return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST); 654} 655 656int 657cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 658 struct cpu_search *high) 659{ 660 return cpu_search(cg, low, high, CPU_SEARCH_BOTH); 661} 662 663/* 664 * Find the cpu with the least load via the least loaded path that has a 665 * lowpri greater than pri pri. A pri of -1 indicates any priority is 666 * acceptable. 667 */ 668static inline int 669sched_lowest(struct cpu_group *cg, cpumask_t mask, int pri) 670{ 671 struct cpu_search low; 672 673 low.cs_cpu = -1; 674 low.cs_load = -1; 675 low.cs_mask = mask; 676 low.cs_limit = pri; 677 cpu_search_lowest(cg, &low); 678 return low.cs_cpu; 679} 680 681/* 682 * Find the cpu with the highest load via the highest loaded path. 683 */ 684static inline int 685sched_highest(struct cpu_group *cg, cpumask_t mask, int minload) 686{ 687 struct cpu_search high; 688 689 high.cs_cpu = -1; 690 high.cs_load = 0; 691 high.cs_mask = mask; 692 high.cs_limit = minload; 693 cpu_search_highest(cg, &high); 694 return high.cs_cpu; 695} 696 697/* 698 * Simultaneously find the highest and lowest loaded cpu reachable via 699 * cg. 700 */ 701static inline void 702sched_both(struct cpu_group *cg, cpumask_t mask, int *lowcpu, int *highcpu) 703{ 704 struct cpu_search high; 705 struct cpu_search low; 706 707 low.cs_cpu = -1; 708 low.cs_limit = -1; 709 low.cs_load = -1; 710 low.cs_mask = mask; 711 high.cs_load = 0; 712 high.cs_cpu = -1; 713 high.cs_limit = -1; 714 high.cs_mask = mask; 715 cpu_search_both(cg, &low, &high); 716 *lowcpu = low.cs_cpu; 717 *highcpu = high.cs_cpu; 718 return; 719} 720 721static void 722sched_balance_group(struct cpu_group *cg) 723{ 724 cpumask_t mask; 725 int high; 726 int low; 727 int i; 728 729 mask = -1; 730 for (;;) { 731 sched_both(cg, mask, &low, &high); 732 if (low == high || low == -1 || high == -1) 733 break; 734 if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low))) 735 break; 736 /* 737 * If we failed to move any threads determine which cpu 738 * to kick out of the set and try again. 739 */ 740 if (TDQ_CPU(high)->tdq_transferable == 0) 741 mask &= ~(1 << high); 742 else 743 mask &= ~(1 << low); 744 } 745 746 for (i = 0; i < cg->cg_children; i++) 747 sched_balance_group(&cg->cg_child[i]); 748} 749 750static void 751sched_balance() 752{ 753 struct tdq *tdq; 754 755 /* 756 * Select a random time between .5 * balance_interval and 757 * 1.5 * balance_interval. 758 */ 759 balance_ticks = max(balance_interval / 2, 1); 760 balance_ticks += random() % balance_interval; 761 if (smp_started == 0 || rebalance == 0) 762 return; 763 tdq = TDQ_SELF(); 764 TDQ_UNLOCK(tdq); 765 sched_balance_group(cpu_top); 766 TDQ_LOCK(tdq); 767} 768 769/* 770 * Lock two thread queues using their address to maintain lock order. 771 */ 772static void 773tdq_lock_pair(struct tdq *one, struct tdq *two) 774{ 775 if (one < two) { 776 TDQ_LOCK(one); 777 TDQ_LOCK_FLAGS(two, MTX_DUPOK); 778 } else { 779 TDQ_LOCK(two); 780 TDQ_LOCK_FLAGS(one, MTX_DUPOK); 781 } 782} 783 784/* 785 * Unlock two thread queues. Order is not important here. 786 */ 787static void 788tdq_unlock_pair(struct tdq *one, struct tdq *two) 789{ 790 TDQ_UNLOCK(one); 791 TDQ_UNLOCK(two); 792} 793 794/* 795 * Transfer load between two imbalanced thread queues. 796 */ 797static int 798sched_balance_pair(struct tdq *high, struct tdq *low) 799{ 800 int transferable; 801 int high_load; 802 int low_load; 803 int moved; 804 int move; 805 int diff; 806 int i; 807 808 tdq_lock_pair(high, low); 809 transferable = high->tdq_transferable; 810 high_load = high->tdq_load; 811 low_load = low->tdq_load; 812 moved = 0; 813 /* 814 * Determine what the imbalance is and then adjust that to how many 815 * threads we actually have to give up (transferable). 816 */ 817 if (transferable != 0) { 818 diff = high_load - low_load; 819 move = diff / 2; 820 if (diff & 0x1) 821 move++; 822 move = min(move, transferable); 823 for (i = 0; i < move; i++) 824 moved += tdq_move(high, low); 825 /* 826 * IPI the target cpu to force it to reschedule with the new 827 * workload. 828 */ 829 ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); 830 } 831 tdq_unlock_pair(high, low); 832 return (moved); 833} 834 835/* 836 * Move a thread from one thread queue to another. 837 */ 838static int 839tdq_move(struct tdq *from, struct tdq *to) 840{ 841 struct td_sched *ts; 842 struct thread *td; 843 struct tdq *tdq; 844 int cpu; 845 846 TDQ_LOCK_ASSERT(from, MA_OWNED); 847 TDQ_LOCK_ASSERT(to, MA_OWNED); 848 849 tdq = from; 850 cpu = TDQ_ID(to); 851 ts = tdq_steal(tdq, cpu); 852 if (ts == NULL) 853 return (0); 854 td = ts->ts_thread; 855 /* 856 * Although the run queue is locked the thread may be blocked. Lock 857 * it to clear this and acquire the run-queue lock. 858 */ 859 thread_lock(td); 860 /* Drop recursive lock on from acquired via thread_lock(). */ 861 TDQ_UNLOCK(from); 862 sched_rem(td); 863 ts->ts_cpu = cpu; 864 td->td_lock = TDQ_LOCKPTR(to); 865 tdq_add(to, td, SRQ_YIELDING); 866 return (1); 867} 868 869/* 870 * This tdq has idled. Try to steal a thread from another cpu and switch 871 * to it. 872 */ 873static int 874tdq_idled(struct tdq *tdq) 875{ 876 struct cpu_group *cg; 877 struct tdq *steal; 878 cpumask_t mask; 879 int thresh; 880 int cpu; 881 882 if (smp_started == 0 || steal_idle == 0) 883 return (1); 884 mask = -1; 885 mask &= ~PCPU_GET(cpumask); 886 /* We don't want to be preempted while we're iterating. */ 887 spinlock_enter(); 888 for (cg = tdq->tdq_cg; cg != NULL; ) { 889 if ((cg->cg_flags & (CG_FLAG_HTT | CG_FLAG_THREAD)) == 0) 890 thresh = steal_thresh; 891 else 892 thresh = 1; 893 cpu = sched_highest(cg, mask, thresh); 894 if (cpu == -1) { 895 cg = cg->cg_parent; 896 continue; 897 } 898 steal = TDQ_CPU(cpu); 899 mask &= ~(1 << cpu); 900 tdq_lock_pair(tdq, steal); 901 if (steal->tdq_load < thresh || steal->tdq_transferable == 0) { 902 tdq_unlock_pair(tdq, steal); 903 continue; 904 } 905 /* 906 * If a thread was added while interrupts were disabled don't 907 * steal one here. If we fail to acquire one due to affinity 908 * restrictions loop again with this cpu removed from the 909 * set. 910 */ 911 if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) { 912 tdq_unlock_pair(tdq, steal); 913 continue; 914 } 915 spinlock_exit(); 916 TDQ_UNLOCK(steal); 917 mi_switch(SW_VOL, NULL); 918 thread_unlock(curthread); 919 920 return (0); 921 } 922 spinlock_exit(); 923 return (1); 924} 925 926/* 927 * Notify a remote cpu of new work. Sends an IPI if criteria are met. 928 */ 929static void 930tdq_notify(struct tdq *tdq, struct td_sched *ts) 931{ 932 int cpri; 933 int pri; 934 int cpu; 935 936 if (tdq->tdq_ipipending) 937 return; 938 cpu = ts->ts_cpu; 939 pri = ts->ts_thread->td_priority; 940 cpri = pcpu_find(cpu)->pc_curthread->td_priority; 941 if (!sched_shouldpreempt(pri, cpri, 1)) 942 return; 943 tdq->tdq_ipipending = 1; 944 ipi_selected(1 << cpu, IPI_PREEMPT); 945} 946 947/* 948 * Steals load from a timeshare queue. Honors the rotating queue head 949 * index. 950 */ 951static struct td_sched * 952runq_steal_from(struct runq *rq, int cpu, u_char start) 953{ 954 struct td_sched *ts; 955 struct rqbits *rqb; 956 struct rqhead *rqh; 957 int first; 958 int bit; 959 int pri; 960 int i; 961 962 rqb = &rq->rq_status; 963 bit = start & (RQB_BPW -1); 964 pri = 0; 965 first = 0; 966again: 967 for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 968 if (rqb->rqb_bits[i] == 0) 969 continue; 970 if (bit != 0) { 971 for (pri = bit; pri < RQB_BPW; pri++) 972 if (rqb->rqb_bits[i] & (1ul << pri)) 973 break; 974 if (pri >= RQB_BPW) 975 continue; 976 } else 977 pri = RQB_FFS(rqb->rqb_bits[i]); 978 pri += (i << RQB_L2BPW); 979 rqh = &rq->rq_queues[pri]; 980 TAILQ_FOREACH(ts, rqh, ts_procq) { 981 if (first && THREAD_CAN_MIGRATE(ts->ts_thread) && 982 THREAD_CAN_SCHED(ts->ts_thread, cpu)) 983 return (ts); 984 first = 1; 985 } 986 } 987 if (start != 0) { 988 start = 0; 989 goto again; 990 } 991 992 return (NULL); 993} 994 995/* 996 * Steals load from a standard linear queue. 997 */ 998static struct td_sched * 999runq_steal(struct runq *rq, int cpu) 1000{ 1001 struct rqhead *rqh; 1002 struct rqbits *rqb; 1003 struct td_sched *ts; 1004 int word; 1005 int bit; 1006 1007 rqb = &rq->rq_status; 1008 for (word = 0; word < RQB_LEN; word++) { 1009 if (rqb->rqb_bits[word] == 0) 1010 continue; 1011 for (bit = 0; bit < RQB_BPW; bit++) { 1012 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 1013 continue; 1014 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 1015 TAILQ_FOREACH(ts, rqh, ts_procq) 1016 if (THREAD_CAN_MIGRATE(ts->ts_thread) && 1017 THREAD_CAN_SCHED(ts->ts_thread, cpu)) 1018 return (ts); 1019 } 1020 } 1021 return (NULL); 1022} 1023 1024/* 1025 * Attempt to steal a thread in priority order from a thread queue. 1026 */ 1027static struct td_sched * 1028tdq_steal(struct tdq *tdq, int cpu) 1029{ 1030 struct td_sched *ts; 1031 1032 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1033 if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) 1034 return (ts); 1035 if ((ts = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx)) 1036 != NULL) 1037 return (ts); 1038 return (runq_steal(&tdq->tdq_idle, cpu)); 1039} 1040 1041/* 1042 * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 1043 * current lock and returns with the assigned queue locked. 1044 */ 1045static inline struct tdq * 1046sched_setcpu(struct td_sched *ts, int cpu, int flags) 1047{ 1048 struct thread *td; 1049 struct tdq *tdq; 1050 1051 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 1052 1053 tdq = TDQ_CPU(cpu); 1054 td = ts->ts_thread; 1055 ts->ts_cpu = cpu; 1056 1057 /* If the lock matches just return the queue. */ 1058 if (td->td_lock == TDQ_LOCKPTR(tdq)) 1059 return (tdq); 1060#ifdef notyet 1061 /* 1062 * If the thread isn't running its lockptr is a 1063 * turnstile or a sleepqueue. We can just lock_set without 1064 * blocking. 1065 */ 1066 if (TD_CAN_RUN(td)) { 1067 TDQ_LOCK(tdq); 1068 thread_lock_set(td, TDQ_LOCKPTR(tdq)); 1069 return (tdq); 1070 } 1071#endif 1072 /* 1073 * The hard case, migration, we need to block the thread first to 1074 * prevent order reversals with other cpus locks. 1075 */ 1076 thread_lock_block(td); 1077 TDQ_LOCK(tdq); 1078 thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 1079 return (tdq); 1080} 1081 1082static int 1083sched_pickcpu(struct td_sched *ts, int flags) 1084{ 1085 struct cpu_group *cg; 1086 struct thread *td; 1087 struct tdq *tdq; 1088 cpumask_t mask; 1089 int self; 1090 int pri; 1091 int cpu; 1092 1093 self = PCPU_GET(cpuid); 1094 td = ts->ts_thread; 1095 if (smp_started == 0) 1096 return (self); 1097 /* 1098 * Don't migrate a running thread from sched_switch(). 1099 */ 1100 if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) 1101 return (ts->ts_cpu); 1102 /* 1103 * Prefer to run interrupt threads on the processors that generate 1104 * the interrupt. 1105 */ 1106 if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) && 1107 curthread->td_intr_nesting_level) 1108 ts->ts_cpu = self; 1109 /* 1110 * If the thread can run on the last cpu and the affinity has not 1111 * expired or it is idle run it there. 1112 */ 1113 pri = td->td_priority; 1114 tdq = TDQ_CPU(ts->ts_cpu); 1115 if (THREAD_CAN_SCHED(td, ts->ts_cpu)) { 1116 if (tdq->tdq_lowpri > PRI_MIN_IDLE) 1117 return (ts->ts_cpu); 1118 if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri) 1119 return (ts->ts_cpu); 1120 } 1121 /* 1122 * Search for the highest level in the tree that still has affinity. 1123 */ 1124 cg = NULL; 1125 for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent) 1126 if (SCHED_AFFINITY(ts, cg->cg_level)) 1127 break; 1128 cpu = -1; 1129 mask = td->td_cpuset->cs_mask.__bits[0]; 1130 if (cg) 1131 cpu = sched_lowest(cg, mask, pri); 1132 if (cpu == -1) 1133 cpu = sched_lowest(cpu_top, mask, -1); 1134 /* 1135 * Compare the lowest loaded cpu to current cpu. 1136 */ 1137 if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri && 1138 TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) 1139 cpu = self; 1140 KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu.")); 1141 return (cpu); 1142} 1143#endif 1144 1145/* 1146 * Pick the highest priority task we have and return it. 1147 */ 1148static struct td_sched * 1149tdq_choose(struct tdq *tdq) 1150{ 1151 struct td_sched *ts; 1152 1153 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1154 ts = runq_choose(&tdq->tdq_realtime); 1155 if (ts != NULL) 1156 return (ts); 1157 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1158 if (ts != NULL) { 1159 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 1160 ("tdq_choose: Invalid priority on timeshare queue %d", 1161 ts->ts_thread->td_priority)); 1162 return (ts); 1163 } 1164 1165 ts = runq_choose(&tdq->tdq_idle); 1166 if (ts != NULL) { 1167 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 1168 ("tdq_choose: Invalid priority on idle queue %d", 1169 ts->ts_thread->td_priority)); 1170 return (ts); 1171 } 1172 1173 return (NULL); 1174} 1175 1176/* 1177 * Initialize a thread queue. 1178 */ 1179static void 1180tdq_setup(struct tdq *tdq) 1181{ 1182 1183 if (bootverbose) 1184 printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1185 runq_init(&tdq->tdq_realtime); 1186 runq_init(&tdq->tdq_timeshare); 1187 runq_init(&tdq->tdq_idle); 1188 snprintf(tdq->tdq_name, sizeof(tdq->tdq_name), 1189 "sched lock %d", (int)TDQ_ID(tdq)); 1190 mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", 1191 MTX_SPIN | MTX_RECURSE); 1192} 1193 1194#ifdef SMP 1195static void 1196sched_setup_smp(void) 1197{ 1198 struct tdq *tdq; 1199 int i; 1200 1201 cpu_top = smp_topo(); 1202 for (i = 0; i < MAXCPU; i++) { 1203 if (CPU_ABSENT(i)) 1204 continue; 1205 tdq = TDQ_CPU(i); 1206 tdq_setup(tdq); 1207 tdq->tdq_cg = smp_topo_find(cpu_top, i); 1208 if (tdq->tdq_cg == NULL) 1209 panic("Can't find cpu group for %d\n", i); 1210 } 1211 balance_tdq = TDQ_SELF(); 1212 sched_balance(); 1213} 1214#endif 1215 1216/* 1217 * Setup the thread queues and initialize the topology based on MD 1218 * information. 1219 */ 1220static void 1221sched_setup(void *dummy) 1222{ 1223 struct tdq *tdq; 1224 1225 tdq = TDQ_SELF(); 1226#ifdef SMP 1227 sched_setup_smp(); 1228#else 1229 tdq_setup(tdq); 1230#endif 1231 /* 1232 * To avoid divide-by-zero, we set realstathz a dummy value 1233 * in case which sched_clock() called before sched_initticks(). 1234 */ 1235 realstathz = hz; 1236 sched_slice = (realstathz/10); /* ~100ms */ 1237 tickincr = 1 << SCHED_TICK_SHIFT; 1238 1239 /* Add thread0's load since it's running. */ 1240 TDQ_LOCK(tdq); 1241 thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1242 tdq_load_add(tdq, &td_sched0); 1243 tdq->tdq_lowpri = thread0.td_priority; 1244 TDQ_UNLOCK(tdq); 1245} 1246 1247/* 1248 * This routine determines the tickincr after stathz and hz are setup. 1249 */ 1250/* ARGSUSED */ 1251static void 1252sched_initticks(void *dummy) 1253{ 1254 int incr; 1255 1256 realstathz = stathz ? stathz : hz; 1257 sched_slice = (realstathz/10); /* ~100ms */ 1258 1259 /* 1260 * tickincr is shifted out by 10 to avoid rounding errors due to 1261 * hz not being evenly divisible by stathz on all platforms. 1262 */ 1263 incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1264 /* 1265 * This does not work for values of stathz that are more than 1266 * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1267 */ 1268 if (incr == 0) 1269 incr = 1; 1270 tickincr = incr; 1271#ifdef SMP 1272 /* 1273 * Set the default balance interval now that we know 1274 * what realstathz is. 1275 */ 1276 balance_interval = realstathz; 1277 /* 1278 * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 1279 * prevents excess thrashing on large machines and excess idle on 1280 * smaller machines. 1281 */ 1282 steal_thresh = min(ffs(mp_ncpus) - 1, 3); 1283 affinity = SCHED_AFFINITY_DEFAULT; 1284#endif 1285} 1286 1287 1288/* 1289 * This is the core of the interactivity algorithm. Determines a score based 1290 * on past behavior. It is the ratio of sleep time to run time scaled to 1291 * a [0, 100] integer. This is the voluntary sleep time of a process, which 1292 * differs from the cpu usage because it does not account for time spent 1293 * waiting on a run-queue. Would be prettier if we had floating point. 1294 */ 1295static int 1296sched_interact_score(struct thread *td) 1297{ 1298 struct td_sched *ts; 1299 int div; 1300 1301 ts = td->td_sched; 1302 /* 1303 * The score is only needed if this is likely to be an interactive 1304 * task. Don't go through the expense of computing it if there's 1305 * no chance. 1306 */ 1307 if (sched_interact <= SCHED_INTERACT_HALF && 1308 ts->ts_runtime >= ts->ts_slptime) 1309 return (SCHED_INTERACT_HALF); 1310 1311 if (ts->ts_runtime > ts->ts_slptime) { 1312 div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1313 return (SCHED_INTERACT_HALF + 1314 (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1315 } 1316 if (ts->ts_slptime > ts->ts_runtime) { 1317 div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1318 return (ts->ts_runtime / div); 1319 } 1320 /* runtime == slptime */ 1321 if (ts->ts_runtime) 1322 return (SCHED_INTERACT_HALF); 1323 1324 /* 1325 * This can happen if slptime and runtime are 0. 1326 */ 1327 return (0); 1328 1329} 1330 1331/* 1332 * Scale the scheduling priority according to the "interactivity" of this 1333 * process. 1334 */ 1335static void 1336sched_priority(struct thread *td) 1337{ 1338 int score; 1339 int pri; 1340 1341 if (td->td_pri_class != PRI_TIMESHARE) 1342 return; 1343 /* 1344 * If the score is interactive we place the thread in the realtime 1345 * queue with a priority that is less than kernel and interrupt 1346 * priorities. These threads are not subject to nice restrictions. 1347 * 1348 * Scores greater than this are placed on the normal timeshare queue 1349 * where the priority is partially decided by the most recent cpu 1350 * utilization and the rest is decided by nice value. 1351 * 1352 * The nice value of the process has a linear effect on the calculated 1353 * score. Negative nice values make it easier for a thread to be 1354 * considered interactive. 1355 */ 1356 score = imax(0, sched_interact_score(td) - td->td_proc->p_nice); 1357 if (score < sched_interact) { 1358 pri = PRI_MIN_REALTIME; 1359 pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1360 * score; 1361 KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 1362 ("sched_priority: invalid interactive priority %d score %d", 1363 pri, score)); 1364 } else { 1365 pri = SCHED_PRI_MIN; 1366 if (td->td_sched->ts_ticks) 1367 pri += SCHED_PRI_TICKS(td->td_sched); 1368 pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1369 KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1370 ("sched_priority: invalid priority %d: nice %d, " 1371 "ticks %d ftick %d ltick %d tick pri %d", 1372 pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1373 td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1374 SCHED_PRI_TICKS(td->td_sched))); 1375 } 1376 sched_user_prio(td, pri); 1377 1378 return; 1379} 1380 1381/* 1382 * This routine enforces a maximum limit on the amount of scheduling history 1383 * kept. It is called after either the slptime or runtime is adjusted. This 1384 * function is ugly due to integer math. 1385 */ 1386static void 1387sched_interact_update(struct thread *td) 1388{ 1389 struct td_sched *ts; 1390 u_int sum; 1391 1392 ts = td->td_sched; 1393 sum = ts->ts_runtime + ts->ts_slptime; 1394 if (sum < SCHED_SLP_RUN_MAX) 1395 return; 1396 /* 1397 * This only happens from two places: 1398 * 1) We have added an unusual amount of run time from fork_exit. 1399 * 2) We have added an unusual amount of sleep time from sched_sleep(). 1400 */ 1401 if (sum > SCHED_SLP_RUN_MAX * 2) { 1402 if (ts->ts_runtime > ts->ts_slptime) { 1403 ts->ts_runtime = SCHED_SLP_RUN_MAX; 1404 ts->ts_slptime = 1; 1405 } else { 1406 ts->ts_slptime = SCHED_SLP_RUN_MAX; 1407 ts->ts_runtime = 1; 1408 } 1409 return; 1410 } 1411 /* 1412 * If we have exceeded by more than 1/5th then the algorithm below 1413 * will not bring us back into range. Dividing by two here forces 1414 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1415 */ 1416 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1417 ts->ts_runtime /= 2; 1418 ts->ts_slptime /= 2; 1419 return; 1420 } 1421 ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1422 ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1423} 1424 1425/* 1426 * Scale back the interactivity history when a child thread is created. The 1427 * history is inherited from the parent but the thread may behave totally 1428 * differently. For example, a shell spawning a compiler process. We want 1429 * to learn that the compiler is behaving badly very quickly. 1430 */ 1431static void 1432sched_interact_fork(struct thread *td) 1433{ 1434 int ratio; 1435 int sum; 1436 1437 sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1438 if (sum > SCHED_SLP_RUN_FORK) { 1439 ratio = sum / SCHED_SLP_RUN_FORK; 1440 td->td_sched->ts_runtime /= ratio; 1441 td->td_sched->ts_slptime /= ratio; 1442 } 1443} 1444 1445/* 1446 * Called from proc0_init() to setup the scheduler fields. 1447 */ 1448void 1449schedinit(void) 1450{ 1451 1452 /* 1453 * Set up the scheduler specific parts of proc0. 1454 */ 1455 proc0.p_sched = NULL; /* XXX */ 1456 thread0.td_sched = &td_sched0; 1457 td_sched0.ts_ltick = ticks; 1458 td_sched0.ts_ftick = ticks; 1459 td_sched0.ts_thread = &thread0; 1460 td_sched0.ts_slice = sched_slice; 1461} 1462 1463/* 1464 * This is only somewhat accurate since given many processes of the same 1465 * priority they will switch when their slices run out, which will be 1466 * at most sched_slice stathz ticks. 1467 */ 1468int 1469sched_rr_interval(void) 1470{ 1471 1472 /* Convert sched_slice to hz */ 1473 return (hz/(realstathz/sched_slice)); 1474} 1475 1476/* 1477 * Update the percent cpu tracking information when it is requested or 1478 * the total history exceeds the maximum. We keep a sliding history of 1479 * tick counts that slowly decays. This is less precise than the 4BSD 1480 * mechanism since it happens with less regular and frequent events. 1481 */ 1482static void 1483sched_pctcpu_update(struct td_sched *ts) 1484{ 1485 1486 if (ts->ts_ticks == 0) 1487 return; 1488 if (ticks - (hz / 10) < ts->ts_ltick && 1489 SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 1490 return; 1491 /* 1492 * Adjust counters and watermark for pctcpu calc. 1493 */ 1494 if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1495 ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1496 SCHED_TICK_TARG; 1497 else 1498 ts->ts_ticks = 0; 1499 ts->ts_ltick = ticks; 1500 ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 1501} 1502 1503/* 1504 * Adjust the priority of a thread. Move it to the appropriate run-queue 1505 * if necessary. This is the back-end for several priority related 1506 * functions. 1507 */ 1508static void 1509sched_thread_priority(struct thread *td, u_char prio) 1510{ 1511 struct td_sched *ts; 1512 struct tdq *tdq; 1513 int oldpri; 1514 1515 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1516 td, td->td_name, td->td_priority, prio, curthread, 1517 curthread->td_name); 1518 ts = td->td_sched; 1519 THREAD_LOCK_ASSERT(td, MA_OWNED); 1520 if (td->td_priority == prio) 1521 return; 1522 1523 if (TD_ON_RUNQ(td) && prio < td->td_priority) { 1524 /* 1525 * If the priority has been elevated due to priority 1526 * propagation, we may have to move ourselves to a new 1527 * queue. This could be optimized to not re-add in some 1528 * cases. 1529 */ 1530 sched_rem(td); 1531 td->td_priority = prio; 1532 sched_add(td, SRQ_BORROWING); 1533 return; 1534 } 1535 tdq = TDQ_CPU(ts->ts_cpu); 1536 oldpri = td->td_priority; 1537 td->td_priority = prio; 1538 if (TD_IS_RUNNING(td)) { 1539 if (prio < tdq->tdq_lowpri) 1540 tdq->tdq_lowpri = prio; 1541 else if (tdq->tdq_lowpri == oldpri) 1542 tdq_setlowpri(tdq, td); 1543 } 1544} 1545 1546/* 1547 * Update a thread's priority when it is lent another thread's 1548 * priority. 1549 */ 1550void 1551sched_lend_prio(struct thread *td, u_char prio) 1552{ 1553 1554 td->td_flags |= TDF_BORROWING; 1555 sched_thread_priority(td, prio); 1556} 1557 1558/* 1559 * Restore a thread's priority when priority propagation is 1560 * over. The prio argument is the minimum priority the thread 1561 * needs to have to satisfy other possible priority lending 1562 * requests. If the thread's regular priority is less 1563 * important than prio, the thread will keep a priority boost 1564 * of prio. 1565 */ 1566void 1567sched_unlend_prio(struct thread *td, u_char prio) 1568{ 1569 u_char base_pri; 1570 1571 if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1572 td->td_base_pri <= PRI_MAX_TIMESHARE) 1573 base_pri = td->td_user_pri; 1574 else 1575 base_pri = td->td_base_pri; 1576 if (prio >= base_pri) { 1577 td->td_flags &= ~TDF_BORROWING; 1578 sched_thread_priority(td, base_pri); 1579 } else 1580 sched_lend_prio(td, prio); 1581} 1582 1583/* 1584 * Standard entry for setting the priority to an absolute value. 1585 */ 1586void 1587sched_prio(struct thread *td, u_char prio) 1588{ 1589 u_char oldprio; 1590 1591 /* First, update the base priority. */ 1592 td->td_base_pri = prio; 1593 1594 /* 1595 * If the thread is borrowing another thread's priority, don't 1596 * ever lower the priority. 1597 */ 1598 if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1599 return; 1600 1601 /* Change the real priority. */ 1602 oldprio = td->td_priority; 1603 sched_thread_priority(td, prio); 1604 1605 /* 1606 * If the thread is on a turnstile, then let the turnstile update 1607 * its state. 1608 */ 1609 if (TD_ON_LOCK(td) && oldprio != prio) 1610 turnstile_adjust(td, oldprio); 1611} 1612 1613/* 1614 * Set the base user priority, does not effect current running priority. 1615 */ 1616void 1617sched_user_prio(struct thread *td, u_char prio) 1618{ 1619 u_char oldprio; 1620 1621 td->td_base_user_pri = prio; 1622 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1623 return; 1624 oldprio = td->td_user_pri; 1625 td->td_user_pri = prio; 1626} 1627 1628void 1629sched_lend_user_prio(struct thread *td, u_char prio) 1630{ 1631 u_char oldprio; 1632 1633 THREAD_LOCK_ASSERT(td, MA_OWNED); 1634 td->td_flags |= TDF_UBORROWING; 1635 oldprio = td->td_user_pri; 1636 td->td_user_pri = prio; 1637} 1638 1639void 1640sched_unlend_user_prio(struct thread *td, u_char prio) 1641{ 1642 u_char base_pri; 1643 1644 THREAD_LOCK_ASSERT(td, MA_OWNED); 1645 base_pri = td->td_base_user_pri; 1646 if (prio >= base_pri) { 1647 td->td_flags &= ~TDF_UBORROWING; 1648 sched_user_prio(td, base_pri); 1649 } else { 1650 sched_lend_user_prio(td, prio); 1651 } 1652} 1653 1654/* 1655 * Add the thread passed as 'newtd' to the run queue before selecting 1656 * the next thread to run. This is only used for KSE. 1657 */ 1658static void 1659sched_switchin(struct tdq *tdq, struct thread *td) 1660{ 1661#ifdef SMP 1662 spinlock_enter(); 1663 TDQ_UNLOCK(tdq); 1664 thread_lock(td); 1665 spinlock_exit(); 1666 sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING); 1667#else 1668 td->td_lock = TDQ_LOCKPTR(tdq); 1669#endif 1670 tdq_add(tdq, td, SRQ_YIELDING); 1671 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1672} 1673 1674/* 1675 * Block a thread for switching. Similar to thread_block() but does not 1676 * bump the spin count. 1677 */ 1678static inline struct mtx * 1679thread_block_switch(struct thread *td) 1680{ 1681 struct mtx *lock; 1682 1683 THREAD_LOCK_ASSERT(td, MA_OWNED); 1684 lock = td->td_lock; 1685 td->td_lock = &blocked_lock; 1686 mtx_unlock_spin(lock); 1687 1688 return (lock); 1689} 1690 1691/* 1692 * Handle migration from sched_switch(). This happens only for 1693 * cpu binding. 1694 */ 1695static struct mtx * 1696sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1697{ 1698 struct tdq *tdn; 1699 1700 tdn = TDQ_CPU(td->td_sched->ts_cpu); 1701#ifdef SMP 1702 tdq_load_rem(tdq, td->td_sched); 1703 /* 1704 * Do the lock dance required to avoid LOR. We grab an extra 1705 * spinlock nesting to prevent preemption while we're 1706 * not holding either run-queue lock. 1707 */ 1708 spinlock_enter(); 1709 thread_block_switch(td); /* This releases the lock on tdq. */ 1710 TDQ_LOCK(tdn); 1711 tdq_add(tdn, td, flags); 1712 tdq_notify(tdn, td->td_sched); 1713 /* 1714 * After we unlock tdn the new cpu still can't switch into this 1715 * thread until we've unblocked it in cpu_switch(). The lock 1716 * pointers may match in the case of HTT cores. Don't unlock here 1717 * or we can deadlock when the other CPU runs the IPI handler. 1718 */ 1719 if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1720 TDQ_UNLOCK(tdn); 1721 TDQ_LOCK(tdq); 1722 } 1723 spinlock_exit(); 1724#endif 1725 return (TDQ_LOCKPTR(tdn)); 1726} 1727 1728/* 1729 * Release a thread that was blocked with thread_block_switch(). 1730 */ 1731static inline void 1732thread_unblock_switch(struct thread *td, struct mtx *mtx) 1733{ 1734 atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1735 (uintptr_t)mtx); 1736} 1737 1738/* 1739 * Switch threads. This function has to handle threads coming in while 1740 * blocked for some reason, running, or idle. It also must deal with 1741 * migrating a thread from one queue to another as running threads may 1742 * be assigned elsewhere via binding. 1743 */ 1744void 1745sched_switch(struct thread *td, struct thread *newtd, int flags) 1746{ 1747 struct tdq *tdq; 1748 struct td_sched *ts; 1749 struct mtx *mtx; 1750 int srqflag; 1751 int cpuid; 1752 1753 THREAD_LOCK_ASSERT(td, MA_OWNED); 1754 1755 cpuid = PCPU_GET(cpuid); 1756 tdq = TDQ_CPU(cpuid); 1757 ts = td->td_sched; 1758 mtx = td->td_lock; 1759 ts->ts_rltick = ticks; 1760 td->td_lastcpu = td->td_oncpu; 1761 td->td_oncpu = NOCPU; 1762 td->td_flags &= ~TDF_NEEDRESCHED; 1763 td->td_owepreempt = 0; 1764 /* 1765 * The lock pointer in an idle thread should never change. Reset it 1766 * to CAN_RUN as well. 1767 */ 1768 if (TD_IS_IDLETHREAD(td)) { 1769 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1770 TD_SET_CAN_RUN(td); 1771 } else if (TD_IS_RUNNING(td)) { 1772 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1773 srqflag = (flags & SW_PREEMPT) ? 1774 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1775 SRQ_OURSELF|SRQ_YIELDING; 1776 if (ts->ts_cpu == cpuid) 1777 tdq_runq_add(tdq, ts, srqflag); 1778 else 1779 mtx = sched_switch_migrate(tdq, td, srqflag); 1780 } else { 1781 /* This thread must be going to sleep. */ 1782 TDQ_LOCK(tdq); 1783 mtx = thread_block_switch(td); 1784 tdq_load_rem(tdq, ts); 1785 } 1786 /* 1787 * We enter here with the thread blocked and assigned to the 1788 * appropriate cpu run-queue or sleep-queue and with the current 1789 * thread-queue locked. 1790 */ 1791 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1792 /* 1793 * If KSE assigned a new thread just add it here and let choosethread 1794 * select the best one. 1795 */ 1796 if (newtd != NULL) 1797 sched_switchin(tdq, newtd); 1798 newtd = choosethread(); 1799 /* 1800 * Call the MD code to switch contexts if necessary. 1801 */ 1802 if (td != newtd) { 1803#ifdef HWPMC_HOOKS 1804 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1805 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1806#endif 1807 lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 1808 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 1809 cpu_switch(td, newtd, mtx); 1810 /* 1811 * We may return from cpu_switch on a different cpu. However, 1812 * we always return with td_lock pointing to the current cpu's 1813 * run queue lock. 1814 */ 1815 cpuid = PCPU_GET(cpuid); 1816 tdq = TDQ_CPU(cpuid); 1817 lock_profile_obtain_lock_success( 1818 &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 1819#ifdef HWPMC_HOOKS 1820 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1821 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1822#endif 1823 } else 1824 thread_unblock_switch(td, mtx); 1825 /* 1826 * We should always get here with the lowest priority td possible. 1827 */ 1828 tdq->tdq_lowpri = td->td_priority; 1829 /* 1830 * Assert that all went well and return. 1831 */ 1832 TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1833 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1834 td->td_oncpu = cpuid; 1835} 1836 1837/* 1838 * Adjust thread priorities as a result of a nice request. 1839 */ 1840void 1841sched_nice(struct proc *p, int nice) 1842{ 1843 struct thread *td; 1844 1845 PROC_LOCK_ASSERT(p, MA_OWNED); 1846 1847 p->p_nice = nice; 1848 FOREACH_THREAD_IN_PROC(p, td) { 1849 thread_lock(td); 1850 sched_priority(td); 1851 sched_prio(td, td->td_base_user_pri); 1852 thread_unlock(td); 1853 } 1854} 1855 1856/* 1857 * Record the sleep time for the interactivity scorer. 1858 */ 1859void 1860sched_sleep(struct thread *td, int prio) 1861{ 1862 1863 THREAD_LOCK_ASSERT(td, MA_OWNED); 1864 1865 td->td_slptick = ticks; 1866 if (TD_IS_SUSPENDED(td) || prio <= PSOCK) 1867 td->td_flags |= TDF_CANSWAP; 1868 if (static_boost && prio) 1869 sched_prio(td, prio); 1870} 1871 1872/* 1873 * Schedule a thread to resume execution and record how long it voluntarily 1874 * slept. We also update the pctcpu, interactivity, and priority. 1875 */ 1876void 1877sched_wakeup(struct thread *td) 1878{ 1879 struct td_sched *ts; 1880 int slptick; 1881 1882 THREAD_LOCK_ASSERT(td, MA_OWNED); 1883 ts = td->td_sched; 1884 td->td_flags &= ~TDF_CANSWAP; 1885 /* 1886 * If we slept for more than a tick update our interactivity and 1887 * priority. 1888 */ 1889 slptick = td->td_slptick; 1890 td->td_slptick = 0; 1891 if (slptick && slptick != ticks) { 1892 u_int hzticks; 1893 1894 hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1895 ts->ts_slptime += hzticks; 1896 sched_interact_update(td); 1897 sched_pctcpu_update(ts); 1898 } 1899 /* Reset the slice value after we sleep. */ 1900 ts->ts_slice = sched_slice; 1901 sched_add(td, SRQ_BORING); 1902} 1903 1904/* 1905 * Penalize the parent for creating a new child and initialize the child's 1906 * priority. 1907 */ 1908void 1909sched_fork(struct thread *td, struct thread *child) 1910{ 1911 THREAD_LOCK_ASSERT(td, MA_OWNED); 1912 sched_fork_thread(td, child); 1913 /* 1914 * Penalize the parent and child for forking. 1915 */ 1916 sched_interact_fork(child); 1917 sched_priority(child); 1918 td->td_sched->ts_runtime += tickincr; 1919 sched_interact_update(td); 1920 sched_priority(td); 1921} 1922 1923/* 1924 * Fork a new thread, may be within the same process. 1925 */ 1926void 1927sched_fork_thread(struct thread *td, struct thread *child) 1928{ 1929 struct td_sched *ts; 1930 struct td_sched *ts2; 1931 1932 /* 1933 * Initialize child. 1934 */ 1935 THREAD_LOCK_ASSERT(td, MA_OWNED); 1936 sched_newthread(child); 1937 child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1938 child->td_cpuset = cpuset_ref(td->td_cpuset); 1939 ts = td->td_sched; 1940 ts2 = child->td_sched; 1941 ts2->ts_cpu = ts->ts_cpu; 1942 ts2->ts_runq = NULL; 1943 /* 1944 * Grab our parents cpu estimation information and priority. 1945 */ 1946 ts2->ts_ticks = ts->ts_ticks; 1947 ts2->ts_ltick = ts->ts_ltick; 1948 ts2->ts_ftick = ts->ts_ftick; 1949 child->td_user_pri = td->td_user_pri; 1950 child->td_base_user_pri = td->td_base_user_pri; 1951 /* 1952 * And update interactivity score. 1953 */ 1954 ts2->ts_slptime = ts->ts_slptime; 1955 ts2->ts_runtime = ts->ts_runtime; 1956 ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 1957} 1958 1959/* 1960 * Adjust the priority class of a thread. 1961 */ 1962void 1963sched_class(struct thread *td, int class) 1964{ 1965 1966 THREAD_LOCK_ASSERT(td, MA_OWNED); 1967 if (td->td_pri_class == class) 1968 return; 1969 /* 1970 * On SMP if we're on the RUNQ we must adjust the transferable 1971 * count because could be changing to or from an interrupt 1972 * class. 1973 */ 1974 if (TD_ON_RUNQ(td)) { 1975 struct tdq *tdq; 1976 1977 tdq = TDQ_CPU(td->td_sched->ts_cpu); 1978 if (THREAD_CAN_MIGRATE(td)) 1979 tdq->tdq_transferable--; 1980 td->td_pri_class = class; 1981 if (THREAD_CAN_MIGRATE(td)) 1982 tdq->tdq_transferable++; 1983 } 1984 td->td_pri_class = class; 1985} 1986 1987/* 1988 * Return some of the child's priority and interactivity to the parent. 1989 */ 1990void 1991sched_exit(struct proc *p, struct thread *child) 1992{ 1993 struct thread *td; 1994 1995 CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 1996 child, child->td_name, child->td_priority); 1997 1998 PROC_LOCK_ASSERT(p, MA_OWNED); 1999 td = FIRST_THREAD_IN_PROC(p); 2000 sched_exit_thread(td, child); 2001} 2002 2003/* 2004 * Penalize another thread for the time spent on this one. This helps to 2005 * worsen the priority and interactivity of processes which schedule batch 2006 * jobs such as make. This has little effect on the make process itself but 2007 * causes new processes spawned by it to receive worse scores immediately. 2008 */ 2009void 2010sched_exit_thread(struct thread *td, struct thread *child) 2011{ 2012 2013 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 2014 child, child->td_name, child->td_priority); 2015 2016 /* 2017 * Give the child's runtime to the parent without returning the 2018 * sleep time as a penalty to the parent. This causes shells that 2019 * launch expensive things to mark their children as expensive. 2020 */ 2021 thread_lock(td); 2022 td->td_sched->ts_runtime += child->td_sched->ts_runtime; 2023 sched_interact_update(td); 2024 sched_priority(td); 2025 thread_unlock(td); 2026} 2027 2028void 2029sched_preempt(struct thread *td) 2030{ 2031 struct tdq *tdq; 2032 2033 thread_lock(td); 2034 tdq = TDQ_SELF(); 2035 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2036 tdq->tdq_ipipending = 0; 2037 if (td->td_priority > tdq->tdq_lowpri) { 2038 if (td->td_critnest > 1) 2039 td->td_owepreempt = 1; 2040 else 2041 mi_switch(SW_INVOL | SW_PREEMPT, NULL); 2042 } 2043 thread_unlock(td); 2044} 2045 2046/* 2047 * Fix priorities on return to user-space. Priorities may be elevated due 2048 * to static priorities in msleep() or similar. 2049 */ 2050void 2051sched_userret(struct thread *td) 2052{ 2053 /* 2054 * XXX we cheat slightly on the locking here to avoid locking in 2055 * the usual case. Setting td_priority here is essentially an 2056 * incomplete workaround for not setting it properly elsewhere. 2057 * Now that some interrupt handlers are threads, not setting it 2058 * properly elsewhere can clobber it in the window between setting 2059 * it here and returning to user mode, so don't waste time setting 2060 * it perfectly here. 2061 */ 2062 KASSERT((td->td_flags & TDF_BORROWING) == 0, 2063 ("thread with borrowed priority returning to userland")); 2064 if (td->td_priority != td->td_user_pri) { 2065 thread_lock(td); 2066 td->td_priority = td->td_user_pri; 2067 td->td_base_pri = td->td_user_pri; 2068 tdq_setlowpri(TDQ_SELF(), td); 2069 thread_unlock(td); 2070 } 2071} 2072 2073/* 2074 * Handle a stathz tick. This is really only relevant for timeshare 2075 * threads. 2076 */ 2077void 2078sched_clock(struct thread *td) 2079{ 2080 struct tdq *tdq; 2081 struct td_sched *ts; 2082 2083 THREAD_LOCK_ASSERT(td, MA_OWNED); 2084 tdq = TDQ_SELF(); 2085#ifdef SMP 2086 /* 2087 * We run the long term load balancer infrequently on the first cpu. 2088 */ 2089 if (balance_tdq == tdq) { 2090 if (balance_ticks && --balance_ticks == 0) 2091 sched_balance(); 2092 } 2093#endif 2094 /* 2095 * Advance the insert index once for each tick to ensure that all 2096 * threads get a chance to run. 2097 */ 2098 if (tdq->tdq_idx == tdq->tdq_ridx) { 2099 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 2100 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 2101 tdq->tdq_ridx = tdq->tdq_idx; 2102 } 2103 ts = td->td_sched; 2104 if (td->td_pri_class & PRI_FIFO_BIT) 2105 return; 2106 if (td->td_pri_class == PRI_TIMESHARE) { 2107 /* 2108 * We used a tick; charge it to the thread so 2109 * that we can compute our interactivity. 2110 */ 2111 td->td_sched->ts_runtime += tickincr; 2112 sched_interact_update(td); 2113 sched_priority(td); 2114 } 2115 /* 2116 * We used up one time slice. 2117 */ 2118 if (--ts->ts_slice > 0) 2119 return; 2120 /* 2121 * We're out of time, force a requeue at userret(). 2122 */ 2123 ts->ts_slice = sched_slice; 2124 td->td_flags |= TDF_NEEDRESCHED; 2125} 2126 2127/* 2128 * Called once per hz tick. Used for cpu utilization information. This 2129 * is easier than trying to scale based on stathz. 2130 */ 2131void 2132sched_tick(void) 2133{ 2134 struct td_sched *ts; 2135 2136 ts = curthread->td_sched; 2137 /* Adjust ticks for pctcpu */ 2138 ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2139 ts->ts_ltick = ticks; 2140 /* 2141 * Update if we've exceeded our desired tick threshhold by over one 2142 * second. 2143 */ 2144 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2145 sched_pctcpu_update(ts); 2146} 2147 2148/* 2149 * Return whether the current CPU has runnable tasks. Used for in-kernel 2150 * cooperative idle threads. 2151 */ 2152int 2153sched_runnable(void) 2154{ 2155 struct tdq *tdq; 2156 int load; 2157 2158 load = 1; 2159 2160 tdq = TDQ_SELF(); 2161 if ((curthread->td_flags & TDF_IDLETD) != 0) { 2162 if (tdq->tdq_load > 0) 2163 goto out; 2164 } else 2165 if (tdq->tdq_load - 1 > 0) 2166 goto out; 2167 load = 0; 2168out: 2169 return (load); 2170} 2171 2172/* 2173 * Choose the highest priority thread to run. The thread is removed from 2174 * the run-queue while running however the load remains. For SMP we set 2175 * the tdq in the global idle bitmask if it idles here. 2176 */ 2177struct thread * 2178sched_choose(void) 2179{ 2180 struct td_sched *ts; 2181 struct tdq *tdq; 2182 2183 tdq = TDQ_SELF(); 2184 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2185 ts = tdq_choose(tdq); 2186 if (ts) { 2187 ts->ts_ltick = ticks; 2188 tdq_runq_rem(tdq, ts); 2189 return (ts->ts_thread); 2190 } 2191 return (PCPU_GET(idlethread)); 2192} 2193 2194/* 2195 * Set owepreempt if necessary. Preemption never happens directly in ULE, 2196 * we always request it once we exit a critical section. 2197 */ 2198static inline void 2199sched_setpreempt(struct thread *td) 2200{ 2201 struct thread *ctd; 2202 int cpri; 2203 int pri; 2204 2205 THREAD_LOCK_ASSERT(curthread, MA_OWNED); 2206 2207 ctd = curthread; 2208 pri = td->td_priority; 2209 cpri = ctd->td_priority; 2210 if (pri < cpri) 2211 ctd->td_flags |= TDF_NEEDRESCHED; 2212 if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2213 return; 2214 if (!sched_shouldpreempt(pri, cpri, 0)) 2215 return; 2216 ctd->td_owepreempt = 1; 2217} 2218 2219/* 2220 * Add a thread to a thread queue. Select the appropriate runq and add the 2221 * thread to it. This is the internal function called when the tdq is 2222 * predetermined. 2223 */ 2224void 2225tdq_add(struct tdq *tdq, struct thread *td, int flags) 2226{ 2227 struct td_sched *ts; 2228 2229 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2230 KASSERT((td->td_inhibitors == 0), 2231 ("sched_add: trying to run inhibited thread")); 2232 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 2233 ("sched_add: bad thread state")); 2234 KASSERT(td->td_flags & TDF_INMEM, 2235 ("sched_add: thread swapped out")); 2236 2237 ts = td->td_sched; 2238 if (td->td_priority < tdq->tdq_lowpri) 2239 tdq->tdq_lowpri = td->td_priority; 2240 tdq_runq_add(tdq, ts, flags); 2241 tdq_load_add(tdq, ts); 2242} 2243 2244/* 2245 * Select the target thread queue and add a thread to it. Request 2246 * preemption or IPI a remote processor if required. 2247 */ 2248void 2249sched_add(struct thread *td, int flags) 2250{ 2251 struct tdq *tdq; 2252#ifdef SMP 2253 struct td_sched *ts; 2254 int cpu; 2255#endif 2256 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2257 td, td->td_name, td->td_priority, curthread, 2258 curthread->td_name); 2259 THREAD_LOCK_ASSERT(td, MA_OWNED); 2260 /* 2261 * Recalculate the priority before we select the target cpu or 2262 * run-queue. 2263 */ 2264 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2265 sched_priority(td); 2266#ifdef SMP 2267 /* 2268 * Pick the destination cpu and if it isn't ours transfer to the 2269 * target cpu. 2270 */ 2271 ts = td->td_sched; 2272 cpu = sched_pickcpu(ts, flags); 2273 tdq = sched_setcpu(ts, cpu, flags); 2274 tdq_add(tdq, td, flags); 2275 if (cpu != PCPU_GET(cpuid)) { 2276 tdq_notify(tdq, ts); 2277 return; 2278 } 2279#else 2280 tdq = TDQ_SELF(); 2281 TDQ_LOCK(tdq); 2282 /* 2283 * Now that the thread is moving to the run-queue, set the lock 2284 * to the scheduler's lock. 2285 */ 2286 thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2287 tdq_add(tdq, td, flags); 2288#endif 2289 if (!(flags & SRQ_YIELDING)) 2290 sched_setpreempt(td); 2291} 2292 2293/* 2294 * Remove a thread from a run-queue without running it. This is used 2295 * when we're stealing a thread from a remote queue. Otherwise all threads 2296 * exit by calling sched_exit_thread() and sched_throw() themselves. 2297 */ 2298void 2299sched_rem(struct thread *td) 2300{ 2301 struct tdq *tdq; 2302 struct td_sched *ts; 2303 2304 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 2305 td, td->td_name, td->td_priority, curthread, 2306 curthread->td_name); 2307 ts = td->td_sched; 2308 tdq = TDQ_CPU(ts->ts_cpu); 2309 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2310 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2311 KASSERT(TD_ON_RUNQ(td), 2312 ("sched_rem: thread not on run queue")); 2313 tdq_runq_rem(tdq, ts); 2314 tdq_load_rem(tdq, ts); 2315 TD_SET_CAN_RUN(td); 2316 if (td->td_priority == tdq->tdq_lowpri) 2317 tdq_setlowpri(tdq, NULL); 2318} 2319 2320/* 2321 * Fetch cpu utilization information. Updates on demand. 2322 */ 2323fixpt_t 2324sched_pctcpu(struct thread *td) 2325{ 2326 fixpt_t pctcpu; 2327 struct td_sched *ts; 2328 2329 pctcpu = 0; 2330 ts = td->td_sched; 2331 if (ts == NULL) 2332 return (0); 2333 2334 thread_lock(td); 2335 if (ts->ts_ticks) { 2336 int rtick; 2337 2338 sched_pctcpu_update(ts); 2339 /* How many rtick per second ? */ 2340 rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2341 pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 2342 } 2343 thread_unlock(td); 2344 2345 return (pctcpu); 2346} 2347 2348/* 2349 * Enforce affinity settings for a thread. Called after adjustments to 2350 * cpumask. 2351 */ 2352void 2353sched_affinity(struct thread *td) 2354{ 2355#ifdef SMP 2356 struct td_sched *ts; 2357 int cpu; 2358 2359 THREAD_LOCK_ASSERT(td, MA_OWNED); 2360 ts = td->td_sched; 2361 if (THREAD_CAN_SCHED(td, ts->ts_cpu)) 2362 return; 2363 if (!TD_IS_RUNNING(td)) 2364 return; 2365 td->td_flags |= TDF_NEEDRESCHED; 2366 if (!THREAD_CAN_MIGRATE(td)) 2367 return; 2368 /* 2369 * Assign the new cpu and force a switch before returning to 2370 * userspace. If the target thread is not running locally send 2371 * an ipi to force the issue. 2372 */ 2373 cpu = ts->ts_cpu; 2374 ts->ts_cpu = sched_pickcpu(ts, 0); 2375 if (cpu != PCPU_GET(cpuid)) 2376 ipi_selected(1 << cpu, IPI_PREEMPT); 2377#endif 2378} 2379 2380/* 2381 * Bind a thread to a target cpu. 2382 */ 2383void 2384sched_bind(struct thread *td, int cpu) 2385{ 2386 struct td_sched *ts; 2387 2388 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2389 ts = td->td_sched; 2390 if (ts->ts_flags & TSF_BOUND) 2391 sched_unbind(td); 2392 ts->ts_flags |= TSF_BOUND; 2393 sched_pin(); 2394 if (PCPU_GET(cpuid) == cpu) 2395 return; 2396 ts->ts_cpu = cpu; 2397 /* When we return from mi_switch we'll be on the correct cpu. */ 2398 mi_switch(SW_VOL, NULL); 2399} 2400 2401/* 2402 * Release a bound thread. 2403 */ 2404void 2405sched_unbind(struct thread *td) 2406{ 2407 struct td_sched *ts; 2408 2409 THREAD_LOCK_ASSERT(td, MA_OWNED); 2410 ts = td->td_sched; 2411 if ((ts->ts_flags & TSF_BOUND) == 0) 2412 return; 2413 ts->ts_flags &= ~TSF_BOUND; 2414 sched_unpin(); 2415} 2416 2417int 2418sched_is_bound(struct thread *td) 2419{ 2420 THREAD_LOCK_ASSERT(td, MA_OWNED); 2421 return (td->td_sched->ts_flags & TSF_BOUND); 2422} 2423 2424/* 2425 * Basic yield call. 2426 */ 2427void 2428sched_relinquish(struct thread *td) 2429{ 2430 thread_lock(td); 2431 SCHED_STAT_INC(switch_relinquish); 2432 mi_switch(SW_VOL, NULL); 2433 thread_unlock(td); 2434} 2435 2436/* 2437 * Return the total system load. 2438 */ 2439int 2440sched_load(void) 2441{ 2442#ifdef SMP 2443 int total; 2444 int i; 2445 2446 total = 0; 2447 for (i = 0; i <= mp_maxid; i++) 2448 total += TDQ_CPU(i)->tdq_sysload; 2449 return (total); 2450#else 2451 return (TDQ_SELF()->tdq_sysload); 2452#endif 2453} 2454 2455int 2456sched_sizeof_proc(void) 2457{ 2458 return (sizeof(struct proc)); 2459} 2460 2461int 2462sched_sizeof_thread(void) 2463{ 2464 return (sizeof(struct thread) + sizeof(struct td_sched)); 2465} 2466 2467/* 2468 * The actual idle process. 2469 */ 2470void 2471sched_idletd(void *dummy) 2472{ 2473 struct thread *td; 2474 struct tdq *tdq; 2475 2476 td = curthread; 2477 tdq = TDQ_SELF(); 2478 mtx_assert(&Giant, MA_NOTOWNED); 2479 /* ULE relies on preemption for idle interruption. */ 2480 for (;;) { 2481#ifdef SMP 2482 if (tdq_idled(tdq)) 2483 cpu_idle(); 2484#else 2485 cpu_idle(); 2486#endif 2487 } 2488} 2489 2490/* 2491 * A CPU is entering for the first time or a thread is exiting. 2492 */ 2493void 2494sched_throw(struct thread *td) 2495{ 2496 struct thread *newtd; 2497 struct tdq *tdq; 2498 2499 tdq = TDQ_SELF(); 2500 if (td == NULL) { 2501 /* Correct spinlock nesting and acquire the correct lock. */ 2502 TDQ_LOCK(tdq); 2503 spinlock_exit(); 2504 } else { 2505 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2506 tdq_load_rem(tdq, td->td_sched); 2507 lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 2508 } 2509 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 2510 newtd = choosethread(); 2511 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 2512 PCPU_SET(switchtime, cpu_ticks()); 2513 PCPU_SET(switchticks, ticks); 2514 cpu_throw(td, newtd); /* doesn't return */ 2515} 2516 2517/* 2518 * This is called from fork_exit(). Just acquire the correct locks and 2519 * let fork do the rest of the work. 2520 */ 2521void 2522sched_fork_exit(struct thread *td) 2523{ 2524 struct td_sched *ts; 2525 struct tdq *tdq; 2526 int cpuid; 2527 2528 /* 2529 * Finish setting up thread glue so that it begins execution in a 2530 * non-nested critical section with the scheduler lock held. 2531 */ 2532 cpuid = PCPU_GET(cpuid); 2533 tdq = TDQ_CPU(cpuid); 2534 ts = td->td_sched; 2535 if (TD_IS_IDLETHREAD(td)) 2536 td->td_lock = TDQ_LOCKPTR(tdq); 2537 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2538 td->td_oncpu = cpuid; 2539 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 2540 lock_profile_obtain_lock_success( 2541 &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 2542 tdq->tdq_lowpri = td->td_priority; 2543} 2544 2545static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, 2546 "Scheduler"); 2547SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2548 "Scheduler name"); 2549SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2550 "Slice size for timeshare threads"); 2551SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2552 "Interactivity score threshold"); 2553SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2554 0,"Min priority for preemption, lower priorities have greater precedence"); 2555SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 2556 0,"Controls whether static kernel priorities are assigned to sleeping threads."); 2557#ifdef SMP 2558SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2559 "Number of hz ticks to keep thread affinity for"); 2560SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2561 "Enables the long-term load balancer"); 2562SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 2563 &balance_interval, 0, 2564 "Average frequency in stathz ticks to run the long-term balancer"); 2565SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2566 "Steals work from another hyper-threaded core on idle"); 2567SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2568 "Attempts to steal work from other cores before idling"); 2569SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 2570 "Minimum load on remote cpu before we'll steal"); 2571#endif 2572 2573/* ps compat. All cpu percentages from ULE are weighted. */ 2574static int ccpu = 0; 2575SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2576 2577 2578#define KERN_SWITCH_INCLUDE 1 2579#include "kern/kern_switch.c" 2580