sched_ule.c revision 177902
1109864Sjeff/*- 2165762Sjeff * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 3109864Sjeff * All rights reserved. 4109864Sjeff * 5109864Sjeff * Redistribution and use in source and binary forms, with or without 6109864Sjeff * modification, are permitted provided that the following conditions 7109864Sjeff * are met: 8109864Sjeff * 1. Redistributions of source code must retain the above copyright 9109864Sjeff * notice unmodified, this list of conditions, and the following 10109864Sjeff * disclaimer. 11109864Sjeff * 2. Redistributions in binary form must reproduce the above copyright 12109864Sjeff * notice, this list of conditions and the following disclaimer in the 13109864Sjeff * documentation and/or other materials provided with the distribution. 14109864Sjeff * 15109864Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16109864Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17109864Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18109864Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19109864Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20109864Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21109864Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22109864Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23109864Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24109864Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25109864Sjeff */ 26109864Sjeff 27171482Sjeff/* 28171482Sjeff * This file implements the ULE scheduler. ULE supports independent CPU 29171482Sjeff * run queues and fine grain locking. It has superior interactive 30171482Sjeff * performance under load even on uni-processor systems. 31171482Sjeff * 32171482Sjeff * etymology: 33172293Sjeff * ULE is the last three letters in schedule. It owes its name to a 34171482Sjeff * generic user created for a scheduling system by Paul Mikesell at 35171482Sjeff * Isilon Systems and a general lack of creativity on the part of the author. 36171482Sjeff */ 37171482Sjeff 38116182Sobrien#include <sys/cdefs.h> 39116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 177902 2008-04-04 01:04:43Z jeff $"); 40116182Sobrien 41147565Speter#include "opt_hwpmc_hooks.h" 42147565Speter#include "opt_sched.h" 43134649Sscottl 44109864Sjeff#include <sys/param.h> 45109864Sjeff#include <sys/systm.h> 46131929Smarcel#include <sys/kdb.h> 47109864Sjeff#include <sys/kernel.h> 48109864Sjeff#include <sys/ktr.h> 49109864Sjeff#include <sys/lock.h> 50109864Sjeff#include <sys/mutex.h> 51109864Sjeff#include <sys/proc.h> 52112966Sjeff#include <sys/resource.h> 53122038Sjeff#include <sys/resourcevar.h> 54109864Sjeff#include <sys/sched.h> 55109864Sjeff#include <sys/smp.h> 56109864Sjeff#include <sys/sx.h> 57109864Sjeff#include <sys/sysctl.h> 58109864Sjeff#include <sys/sysproto.h> 59139453Sjhb#include <sys/turnstile.h> 60161599Sdavidxu#include <sys/umtx.h> 61109864Sjeff#include <sys/vmmeter.h> 62176735Sjeff#include <sys/cpuset.h> 63109864Sjeff#ifdef KTRACE 64109864Sjeff#include <sys/uio.h> 65109864Sjeff#include <sys/ktrace.h> 66109864Sjeff#endif 67109864Sjeff 68145256Sjkoshy#ifdef HWPMC_HOOKS 69145256Sjkoshy#include <sys/pmckern.h> 70145256Sjkoshy#endif 71145256Sjkoshy 72109864Sjeff#include <machine/cpu.h> 73121790Sjeff#include <machine/smp.h> 74109864Sjeff 75172887Sgrehan#if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__) 76172345Sjeff#error "This architecture is not currently compatible with ULE" 77166190Sjeff#endif 78166190Sjeff 79171482Sjeff#define KTR_ULE 0 80166137Sjeff 81166137Sjeff/* 82171482Sjeff * Thread scheduler specific section. All fields are protected 83171482Sjeff * by the thread lock. 84146954Sjeff */ 85164936Sjulianstruct td_sched { 86171482Sjeff struct runq *ts_runq; /* Run-queue we're queued on. */ 87171482Sjeff short ts_flags; /* TSF_* flags. */ 88164936Sjulian u_char ts_cpu; /* CPU that we have affinity for. */ 89177009Sjeff int ts_rltick; /* Real last tick, for affinity. */ 90171482Sjeff int ts_slice; /* Ticks of slice remaining. */ 91171482Sjeff u_int ts_slptime; /* Number of ticks we vol. slept */ 92171482Sjeff u_int ts_runtime; /* Number of ticks we were running */ 93164936Sjulian int ts_ltick; /* Last tick that we were running on */ 94164936Sjulian int ts_ftick; /* First tick that we were running on */ 95164936Sjulian int ts_ticks; /* Tick count */ 96134791Sjulian}; 97164936Sjulian/* flags kept in ts_flags */ 98166108Sjeff#define TSF_BOUND 0x0001 /* Thread can not migrate. */ 99166108Sjeff#define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 100121790Sjeff 101164936Sjulianstatic struct td_sched td_sched0; 102109864Sjeff 103176735Sjeff#define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 104176735Sjeff#define THREAD_CAN_SCHED(td, cpu) \ 105176735Sjeff CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 106176735Sjeff 107109864Sjeff/* 108165762Sjeff * Cpu percentage computation macros and defines. 109111857Sjeff * 110165762Sjeff * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 111165762Sjeff * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 112165796Sjeff * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 113165762Sjeff * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 114165762Sjeff * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 115165762Sjeff * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 116165762Sjeff */ 117165762Sjeff#define SCHED_TICK_SECS 10 118165762Sjeff#define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 119165796Sjeff#define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 120165762Sjeff#define SCHED_TICK_SHIFT 10 121165762Sjeff#define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 122165830Sjeff#define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 123165762Sjeff 124165762Sjeff/* 125165762Sjeff * These macros determine priorities for non-interactive threads. They are 126165762Sjeff * assigned a priority based on their recent cpu utilization as expressed 127165762Sjeff * by the ratio of ticks to the tick total. NHALF priorities at the start 128165762Sjeff * and end of the MIN to MAX timeshare range are only reachable with negative 129165762Sjeff * or positive nice respectively. 130165762Sjeff * 131165762Sjeff * PRI_RANGE: Priority range for utilization dependent priorities. 132116642Sjeff * PRI_NRESV: Number of nice values. 133165762Sjeff * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 134165762Sjeff * PRI_NICE: Determines the part of the priority inherited from nice. 135109864Sjeff */ 136165762Sjeff#define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 137121869Sjeff#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 138165762Sjeff#define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 139165762Sjeff#define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 140170787Sjeff#define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 141165762Sjeff#define SCHED_PRI_TICKS(ts) \ 142165762Sjeff (SCHED_TICK_HZ((ts)) / \ 143165827Sjeff (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 144165762Sjeff#define SCHED_PRI_NICE(nice) (nice) 145109864Sjeff 146109864Sjeff/* 147165762Sjeff * These determine the interactivity of a process. Interactivity differs from 148165762Sjeff * cpu utilization in that it expresses the voluntary time slept vs time ran 149165762Sjeff * while cpu utilization includes all time not running. This more accurately 150165762Sjeff * models the intent of the thread. 151109864Sjeff * 152110645Sjeff * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 153110645Sjeff * before throttling back. 154121868Sjeff * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 155116365Sjeff * INTERACT_MAX: Maximum interactivity value. Smaller is better. 156111857Sjeff * INTERACT_THRESH: Threshhold for placement on the current runq. 157109864Sjeff */ 158165762Sjeff#define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 159165762Sjeff#define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 160116365Sjeff#define SCHED_INTERACT_MAX (100) 161116365Sjeff#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 162121126Sjeff#define SCHED_INTERACT_THRESH (30) 163111857Sjeff 164109864Sjeff/* 165165762Sjeff * tickincr: Converts a stathz tick into a hz domain scaled by 166165762Sjeff * the shift factor. Without the shift the error rate 167165762Sjeff * due to rounding would be unacceptably high. 168165762Sjeff * realstathz: stathz is sometimes 0 and run off of hz. 169165762Sjeff * sched_slice: Runtime of each thread before rescheduling. 170171482Sjeff * preempt_thresh: Priority threshold for preemption and remote IPIs. 171109864Sjeff */ 172165762Sjeffstatic int sched_interact = SCHED_INTERACT_THRESH; 173165762Sjeffstatic int realstathz; 174165762Sjeffstatic int tickincr; 175177009Sjeffstatic int sched_slice = 1; 176172345Sjeff#ifdef PREEMPTION 177172345Sjeff#ifdef FULL_PREEMPTION 178172345Sjeffstatic int preempt_thresh = PRI_MAX_IDLE; 179172345Sjeff#else 180171482Sjeffstatic int preempt_thresh = PRI_MIN_KERN; 181172345Sjeff#endif 182172345Sjeff#else 183172345Sjeffstatic int preempt_thresh = 0; 184172345Sjeff#endif 185177085Sjeffstatic int static_boost = 1; 186109864Sjeff 187109864Sjeff/* 188171482Sjeff * tdq - per processor runqs and statistics. All fields are protected by the 189171482Sjeff * tdq_lock. The load and lowpri may be accessed without to avoid excess 190171482Sjeff * locking in sched_pickcpu(); 191109864Sjeff */ 192164936Sjulianstruct tdq { 193177009Sjeff /* Ordered to improve efficiency of cpu_search() and switch(). */ 194177009Sjeff struct mtx tdq_lock; /* run queue lock. */ 195176735Sjeff struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ 196171482Sjeff int tdq_load; /* Aggregate load. */ 197176735Sjeff int tdq_sysload; /* For loadavg, !ITHD load. */ 198177009Sjeff int tdq_transferable; /* Transferable thread count. */ 199177009Sjeff u_char tdq_lowpri; /* Lowest priority thread. */ 200177009Sjeff u_char tdq_ipipending; /* IPI pending. */ 201166557Sjeff u_char tdq_idx; /* Current insert index. */ 202166557Sjeff u_char tdq_ridx; /* Current removal index. */ 203177009Sjeff struct runq tdq_realtime; /* real-time run queue. */ 204177009Sjeff struct runq tdq_timeshare; /* timeshare run queue. */ 205177009Sjeff struct runq tdq_idle; /* Queue of IDLE threads. */ 206176735Sjeff char tdq_name[sizeof("sched lock") + 6]; 207171482Sjeff} __aligned(64); 208109864Sjeff 209166108Sjeff 210123433Sjeff#ifdef SMP 211176735Sjeffstruct cpu_group *cpu_top; 212123433Sjeff 213176735Sjeff#define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000)) 214176735Sjeff#define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity)) 215166108Sjeff 216123433Sjeff/* 217166108Sjeff * Run-time tunables. 218166108Sjeff */ 219171506Sjeffstatic int rebalance = 1; 220172409Sjeffstatic int balance_interval = 128; /* Default set in sched_initticks(). */ 221166108Sjeffstatic int affinity; 222172409Sjeffstatic int steal_htt = 1; 223171506Sjeffstatic int steal_idle = 1; 224171506Sjeffstatic int steal_thresh = 2; 225166108Sjeff 226166108Sjeff/* 227165620Sjeff * One thread queue per processor. 228109864Sjeff */ 229164936Sjulianstatic struct tdq tdq_cpu[MAXCPU]; 230172409Sjeffstatic struct tdq *balance_tdq; 231172409Sjeffstatic int balance_ticks; 232129982Sjeff 233164936Sjulian#define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 234164936Sjulian#define TDQ_CPU(x) (&tdq_cpu[(x)]) 235171713Sjeff#define TDQ_ID(x) ((int)((x) - tdq_cpu)) 236123433Sjeff#else /* !SMP */ 237164936Sjulianstatic struct tdq tdq_cpu; 238129982Sjeff 239170315Sjeff#define TDQ_ID(x) (0) 240164936Sjulian#define TDQ_SELF() (&tdq_cpu) 241164936Sjulian#define TDQ_CPU(x) (&tdq_cpu) 242110028Sjeff#endif 243109864Sjeff 244171482Sjeff#define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 245171482Sjeff#define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 246171482Sjeff#define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 247171482Sjeff#define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 248176735Sjeff#define TDQ_LOCKPTR(t) (&(t)->tdq_lock) 249171482Sjeff 250163709Sjbstatic void sched_priority(struct thread *); 251146954Sjeffstatic void sched_thread_priority(struct thread *, u_char); 252163709Sjbstatic int sched_interact_score(struct thread *); 253163709Sjbstatic void sched_interact_update(struct thread *); 254163709Sjbstatic void sched_interact_fork(struct thread *); 255164936Sjulianstatic void sched_pctcpu_update(struct td_sched *); 256109864Sjeff 257110267Sjeff/* Operations on per processor queues */ 258177435Sjeffstatic struct thread *tdq_choose(struct tdq *); 259164936Sjulianstatic void tdq_setup(struct tdq *); 260177435Sjeffstatic void tdq_load_add(struct tdq *, struct thread *); 261177435Sjeffstatic void tdq_load_rem(struct tdq *, struct thread *); 262177435Sjeffstatic __inline void tdq_runq_add(struct tdq *, struct thread *, int); 263177435Sjeffstatic __inline void tdq_runq_rem(struct tdq *, struct thread *); 264177005Sjeffstatic inline int sched_shouldpreempt(int, int, int); 265164936Sjulianvoid tdq_print(int cpu); 266165762Sjeffstatic void runq_print(struct runq *rq); 267171482Sjeffstatic void tdq_add(struct tdq *, struct thread *, int); 268110267Sjeff#ifdef SMP 269176735Sjeffstatic int tdq_move(struct tdq *, struct tdq *); 270171482Sjeffstatic int tdq_idled(struct tdq *); 271177435Sjeffstatic void tdq_notify(struct tdq *, struct thread *); 272177435Sjeffstatic struct thread *tdq_steal(struct tdq *, int); 273177435Sjeffstatic struct thread *runq_steal(struct runq *, int); 274177435Sjeffstatic int sched_pickcpu(struct thread *, int); 275172409Sjeffstatic void sched_balance(void); 276176735Sjeffstatic int sched_balance_pair(struct tdq *, struct tdq *); 277177435Sjeffstatic inline struct tdq *sched_setcpu(struct thread *, int, int); 278171482Sjeffstatic inline struct mtx *thread_block_switch(struct thread *); 279171482Sjeffstatic inline void thread_unblock_switch(struct thread *, struct mtx *); 280171713Sjeffstatic struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 281121790Sjeff#endif 282110028Sjeff 283165762Sjeffstatic void sched_setup(void *dummy); 284177253SrwatsonSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 285165762Sjeff 286165762Sjeffstatic void sched_initticks(void *dummy); 287177253SrwatsonSYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, 288177253Srwatson NULL); 289165762Sjeff 290171482Sjeff/* 291171482Sjeff * Print the threads waiting on a run-queue. 292171482Sjeff */ 293165762Sjeffstatic void 294165762Sjeffrunq_print(struct runq *rq) 295165762Sjeff{ 296165762Sjeff struct rqhead *rqh; 297177435Sjeff struct thread *td; 298165762Sjeff int pri; 299165762Sjeff int j; 300165762Sjeff int i; 301165762Sjeff 302165762Sjeff for (i = 0; i < RQB_LEN; i++) { 303165762Sjeff printf("\t\trunq bits %d 0x%zx\n", 304165762Sjeff i, rq->rq_status.rqb_bits[i]); 305165762Sjeff for (j = 0; j < RQB_BPW; j++) 306165762Sjeff if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 307165762Sjeff pri = j + (i << RQB_L2BPW); 308165762Sjeff rqh = &rq->rq_queues[pri]; 309177435Sjeff TAILQ_FOREACH(td, rqh, td_runq) { 310165762Sjeff printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 311177435Sjeff td, td->td_name, td->td_priority, 312177435Sjeff td->td_rqindex, pri); 313165762Sjeff } 314165762Sjeff } 315165762Sjeff } 316165762Sjeff} 317165762Sjeff 318171482Sjeff/* 319171482Sjeff * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 320171482Sjeff */ 321113357Sjeffvoid 322164936Sjuliantdq_print(int cpu) 323110267Sjeff{ 324164936Sjulian struct tdq *tdq; 325112994Sjeff 326164936Sjulian tdq = TDQ_CPU(cpu); 327112994Sjeff 328171713Sjeff printf("tdq %d:\n", TDQ_ID(tdq)); 329176735Sjeff printf("\tlock %p\n", TDQ_LOCKPTR(tdq)); 330176735Sjeff printf("\tLock name: %s\n", tdq->tdq_name); 331165620Sjeff printf("\tload: %d\n", tdq->tdq_load); 332171482Sjeff printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 333165766Sjeff printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 334165762Sjeff printf("\trealtime runq:\n"); 335165762Sjeff runq_print(&tdq->tdq_realtime); 336165762Sjeff printf("\ttimeshare runq:\n"); 337165762Sjeff runq_print(&tdq->tdq_timeshare); 338165762Sjeff printf("\tidle runq:\n"); 339165762Sjeff runq_print(&tdq->tdq_idle); 340165620Sjeff printf("\tload transferable: %d\n", tdq->tdq_transferable); 341171713Sjeff printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 342113357Sjeff} 343112994Sjeff 344177005Sjeffstatic inline int 345177005Sjeffsched_shouldpreempt(int pri, int cpri, int remote) 346177005Sjeff{ 347177005Sjeff /* 348177005Sjeff * If the new priority is not better than the current priority there is 349177005Sjeff * nothing to do. 350177005Sjeff */ 351177005Sjeff if (pri >= cpri) 352177005Sjeff return (0); 353177005Sjeff /* 354177005Sjeff * Always preempt idle. 355177005Sjeff */ 356177005Sjeff if (cpri >= PRI_MIN_IDLE) 357177005Sjeff return (1); 358177005Sjeff /* 359177005Sjeff * If preemption is disabled don't preempt others. 360177005Sjeff */ 361177005Sjeff if (preempt_thresh == 0) 362177005Sjeff return (0); 363177005Sjeff /* 364177005Sjeff * Preempt if we exceed the threshold. 365177005Sjeff */ 366177005Sjeff if (pri <= preempt_thresh) 367177005Sjeff return (1); 368177005Sjeff /* 369177005Sjeff * If we're realtime or better and there is timeshare or worse running 370177005Sjeff * preempt only remote processors. 371177005Sjeff */ 372177005Sjeff if (remote && pri <= PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 373177005Sjeff return (1); 374177005Sjeff return (0); 375177005Sjeff} 376177005Sjeff 377171482Sjeff#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 378171482Sjeff/* 379171482Sjeff * Add a thread to the actual run-queue. Keeps transferable counts up to 380171482Sjeff * date with what is actually on the run-queue. Selects the correct 381171482Sjeff * queue position for timeshare threads. 382171482Sjeff */ 383122744Sjeffstatic __inline void 384177435Sjefftdq_runq_add(struct tdq *tdq, struct thread *td, int flags) 385122744Sjeff{ 386177435Sjeff struct td_sched *ts; 387177042Sjeff u_char pri; 388177042Sjeff 389171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 390177435Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 391177009Sjeff 392177435Sjeff pri = td->td_priority; 393177435Sjeff ts = td->td_sched; 394177435Sjeff TD_SET_RUNQ(td); 395177435Sjeff if (THREAD_CAN_MIGRATE(td)) { 396165620Sjeff tdq->tdq_transferable++; 397164936Sjulian ts->ts_flags |= TSF_XFERABLE; 398123433Sjeff } 399177042Sjeff if (pri <= PRI_MAX_REALTIME) { 400177042Sjeff ts->ts_runq = &tdq->tdq_realtime; 401177042Sjeff } else if (pri <= PRI_MAX_TIMESHARE) { 402177042Sjeff ts->ts_runq = &tdq->tdq_timeshare; 403165762Sjeff KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 404165762Sjeff ("Invalid priority %d on timeshare runq", pri)); 405165762Sjeff /* 406165762Sjeff * This queue contains only priorities between MIN and MAX 407165762Sjeff * realtime. Use the whole queue to represent these values. 408165762Sjeff */ 409171713Sjeff if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 410165762Sjeff pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 411165762Sjeff pri = (pri + tdq->tdq_idx) % RQ_NQS; 412165766Sjeff /* 413165766Sjeff * This effectively shortens the queue by one so we 414165766Sjeff * can have a one slot difference between idx and 415165766Sjeff * ridx while we wait for threads to drain. 416165766Sjeff */ 417165766Sjeff if (tdq->tdq_ridx != tdq->tdq_idx && 418165766Sjeff pri == tdq->tdq_ridx) 419167664Sjeff pri = (unsigned char)(pri - 1) % RQ_NQS; 420165762Sjeff } else 421165766Sjeff pri = tdq->tdq_ridx; 422177435Sjeff runq_add_pri(ts->ts_runq, td, pri, flags); 423177042Sjeff return; 424165762Sjeff } else 425177009Sjeff ts->ts_runq = &tdq->tdq_idle; 426177435Sjeff runq_add(ts->ts_runq, td, flags); 427177009Sjeff} 428177009Sjeff 429171482Sjeff/* 430171482Sjeff * Remove a thread from a run-queue. This typically happens when a thread 431171482Sjeff * is selected to run. Running threads are not on the queue and the 432171482Sjeff * transferable count does not reflect them. 433171482Sjeff */ 434122744Sjeffstatic __inline void 435177435Sjefftdq_runq_rem(struct tdq *tdq, struct thread *td) 436122744Sjeff{ 437177435Sjeff struct td_sched *ts; 438177435Sjeff 439177435Sjeff ts = td->td_sched; 440171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 441171482Sjeff KASSERT(ts->ts_runq != NULL, 442177435Sjeff ("tdq_runq_remove: thread %p null ts_runq", td)); 443164936Sjulian if (ts->ts_flags & TSF_XFERABLE) { 444165620Sjeff tdq->tdq_transferable--; 445164936Sjulian ts->ts_flags &= ~TSF_XFERABLE; 446123433Sjeff } 447165766Sjeff if (ts->ts_runq == &tdq->tdq_timeshare) { 448165766Sjeff if (tdq->tdq_idx != tdq->tdq_ridx) 449177435Sjeff runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx); 450165766Sjeff else 451177435Sjeff runq_remove_idx(ts->ts_runq, td, NULL); 452165766Sjeff } else 453177435Sjeff runq_remove(ts->ts_runq, td); 454122744Sjeff} 455122744Sjeff 456171482Sjeff/* 457171482Sjeff * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 458171482Sjeff * for this thread to the referenced thread queue. 459171482Sjeff */ 460113357Sjeffstatic void 461177435Sjefftdq_load_add(struct tdq *tdq, struct thread *td) 462113357Sjeff{ 463171482Sjeff 464171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 465177435Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 466177902Sjeff 467165620Sjeff tdq->tdq_load++; 468177902Sjeff if ((td->td_proc->p_flag & P_NOLOAD) == 0) 469177902Sjeff tdq->tdq_sysload++; 470171713Sjeff CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 471110267Sjeff} 472113357Sjeff 473171482Sjeff/* 474171482Sjeff * Remove the load from a thread that is transitioning to a sleep state or 475171482Sjeff * exiting. 476171482Sjeff */ 477112994Sjeffstatic void 478177435Sjefftdq_load_rem(struct tdq *tdq, struct thread *td) 479110267Sjeff{ 480171482Sjeff 481177435Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 482171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 483171482Sjeff KASSERT(tdq->tdq_load != 0, 484171713Sjeff ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 485177902Sjeff 486165620Sjeff tdq->tdq_load--; 487177902Sjeff if ((td->td_proc->p_flag & P_NOLOAD) == 0) 488177902Sjeff tdq->tdq_sysload--; 489165620Sjeff CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 490110267Sjeff} 491110267Sjeff 492176735Sjeff/* 493176735Sjeff * Set lowpri to its exact value by searching the run-queue and 494176735Sjeff * evaluating curthread. curthread may be passed as an optimization. 495176735Sjeff */ 496176735Sjeffstatic void 497176735Sjefftdq_setlowpri(struct tdq *tdq, struct thread *ctd) 498176735Sjeff{ 499176735Sjeff struct thread *td; 500176735Sjeff 501176735Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 502176735Sjeff if (ctd == NULL) 503176735Sjeff ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; 504177435Sjeff td = tdq_choose(tdq); 505177435Sjeff if (td == NULL || td->td_priority > ctd->td_priority) 506176735Sjeff tdq->tdq_lowpri = ctd->td_priority; 507176735Sjeff else 508176735Sjeff tdq->tdq_lowpri = td->td_priority; 509176735Sjeff} 510176735Sjeff 511113357Sjeff#ifdef SMP 512176735Sjeffstruct cpu_search { 513176735Sjeff cpumask_t cs_mask; /* Mask of valid cpus. */ 514176735Sjeff u_int cs_load; 515176735Sjeff u_int cs_cpu; 516176735Sjeff int cs_limit; /* Min priority for low min load for high. */ 517176735Sjeff}; 518176735Sjeff 519176735Sjeff#define CPU_SEARCH_LOWEST 0x1 520176735Sjeff#define CPU_SEARCH_HIGHEST 0x2 521176735Sjeff#define CPU_SEARCH_BOTH (CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST) 522176735Sjeff 523176735Sjeff#define CPUMASK_FOREACH(cpu, mask) \ 524176735Sjeff for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++) \ 525176735Sjeff if ((mask) & 1 << (cpu)) 526176735Sjeff 527177169Sjhbstatic __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low, 528176735Sjeff struct cpu_search *high, const int match); 529176735Sjeffint cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low); 530176735Sjeffint cpu_search_highest(struct cpu_group *cg, struct cpu_search *high); 531176735Sjeffint cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 532176735Sjeff struct cpu_search *high); 533176735Sjeff 534116069Sjeff/* 535176735Sjeff * This routine compares according to the match argument and should be 536176735Sjeff * reduced in actual instantiations via constant propagation and dead code 537176735Sjeff * elimination. 538176735Sjeff */ 539176735Sjeffstatic __inline int 540176735Sjeffcpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high, 541176735Sjeff const int match) 542176735Sjeff{ 543176735Sjeff struct tdq *tdq; 544176735Sjeff 545176735Sjeff tdq = TDQ_CPU(cpu); 546176735Sjeff if (match & CPU_SEARCH_LOWEST) 547176735Sjeff if (low->cs_mask & (1 << cpu) && 548176735Sjeff tdq->tdq_load < low->cs_load && 549176735Sjeff tdq->tdq_lowpri > low->cs_limit) { 550176735Sjeff low->cs_cpu = cpu; 551176735Sjeff low->cs_load = tdq->tdq_load; 552176735Sjeff } 553176735Sjeff if (match & CPU_SEARCH_HIGHEST) 554176735Sjeff if (high->cs_mask & (1 << cpu) && 555176735Sjeff tdq->tdq_load >= high->cs_limit && 556176735Sjeff tdq->tdq_load > high->cs_load && 557176735Sjeff tdq->tdq_transferable) { 558176735Sjeff high->cs_cpu = cpu; 559176735Sjeff high->cs_load = tdq->tdq_load; 560176735Sjeff } 561176735Sjeff return (tdq->tdq_load); 562176735Sjeff} 563176735Sjeff 564176735Sjeff/* 565176735Sjeff * Search the tree of cpu_groups for the lowest or highest loaded cpu 566176735Sjeff * according to the match argument. This routine actually compares the 567176735Sjeff * load on all paths through the tree and finds the least loaded cpu on 568176735Sjeff * the least loaded path, which may differ from the least loaded cpu in 569176735Sjeff * the system. This balances work among caches and busses. 570116069Sjeff * 571176735Sjeff * This inline is instantiated in three forms below using constants for the 572176735Sjeff * match argument. It is reduced to the minimum set for each case. It is 573176735Sjeff * also recursive to the depth of the tree. 574116069Sjeff */ 575177169Sjhbstatic __inline int 576176735Sjeffcpu_search(struct cpu_group *cg, struct cpu_search *low, 577176735Sjeff struct cpu_search *high, const int match) 578176735Sjeff{ 579176735Sjeff int total; 580176735Sjeff 581176735Sjeff total = 0; 582176735Sjeff if (cg->cg_children) { 583176735Sjeff struct cpu_search lgroup; 584176735Sjeff struct cpu_search hgroup; 585176735Sjeff struct cpu_group *child; 586176735Sjeff u_int lload; 587176735Sjeff int hload; 588176735Sjeff int load; 589176735Sjeff int i; 590176735Sjeff 591176735Sjeff lload = -1; 592176735Sjeff hload = -1; 593176735Sjeff for (i = 0; i < cg->cg_children; i++) { 594176735Sjeff child = &cg->cg_child[i]; 595176735Sjeff if (match & CPU_SEARCH_LOWEST) { 596176735Sjeff lgroup = *low; 597176735Sjeff lgroup.cs_load = -1; 598176735Sjeff } 599176735Sjeff if (match & CPU_SEARCH_HIGHEST) { 600176735Sjeff hgroup = *high; 601176735Sjeff lgroup.cs_load = 0; 602176735Sjeff } 603176735Sjeff switch (match) { 604176735Sjeff case CPU_SEARCH_LOWEST: 605176735Sjeff load = cpu_search_lowest(child, &lgroup); 606176735Sjeff break; 607176735Sjeff case CPU_SEARCH_HIGHEST: 608176735Sjeff load = cpu_search_highest(child, &hgroup); 609176735Sjeff break; 610176735Sjeff case CPU_SEARCH_BOTH: 611176735Sjeff load = cpu_search_both(child, &lgroup, &hgroup); 612176735Sjeff break; 613176735Sjeff } 614176735Sjeff total += load; 615176735Sjeff if (match & CPU_SEARCH_LOWEST) 616176735Sjeff if (load < lload || low->cs_cpu == -1) { 617176735Sjeff *low = lgroup; 618176735Sjeff lload = load; 619176735Sjeff } 620176735Sjeff if (match & CPU_SEARCH_HIGHEST) 621176735Sjeff if (load > hload || high->cs_cpu == -1) { 622176735Sjeff hload = load; 623176735Sjeff *high = hgroup; 624176735Sjeff } 625176735Sjeff } 626176735Sjeff } else { 627176735Sjeff int cpu; 628176735Sjeff 629176735Sjeff CPUMASK_FOREACH(cpu, cg->cg_mask) 630176735Sjeff total += cpu_compare(cpu, low, high, match); 631176735Sjeff } 632176735Sjeff return (total); 633176735Sjeff} 634176735Sjeff 635176735Sjeff/* 636176735Sjeff * cpu_search instantiations must pass constants to maintain the inline 637176735Sjeff * optimization. 638176735Sjeff */ 639176735Sjeffint 640176735Sjeffcpu_search_lowest(struct cpu_group *cg, struct cpu_search *low) 641176735Sjeff{ 642176735Sjeff return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST); 643176735Sjeff} 644176735Sjeff 645176735Sjeffint 646176735Sjeffcpu_search_highest(struct cpu_group *cg, struct cpu_search *high) 647176735Sjeff{ 648176735Sjeff return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST); 649176735Sjeff} 650176735Sjeff 651176735Sjeffint 652176735Sjeffcpu_search_both(struct cpu_group *cg, struct cpu_search *low, 653176735Sjeff struct cpu_search *high) 654176735Sjeff{ 655176735Sjeff return cpu_search(cg, low, high, CPU_SEARCH_BOTH); 656176735Sjeff} 657176735Sjeff 658176735Sjeff/* 659176735Sjeff * Find the cpu with the least load via the least loaded path that has a 660176735Sjeff * lowpri greater than pri pri. A pri of -1 indicates any priority is 661176735Sjeff * acceptable. 662176735Sjeff */ 663176735Sjeffstatic inline int 664176735Sjeffsched_lowest(struct cpu_group *cg, cpumask_t mask, int pri) 665176735Sjeff{ 666176735Sjeff struct cpu_search low; 667176735Sjeff 668176735Sjeff low.cs_cpu = -1; 669176735Sjeff low.cs_load = -1; 670176735Sjeff low.cs_mask = mask; 671176735Sjeff low.cs_limit = pri; 672176735Sjeff cpu_search_lowest(cg, &low); 673176735Sjeff return low.cs_cpu; 674176735Sjeff} 675176735Sjeff 676176735Sjeff/* 677176735Sjeff * Find the cpu with the highest load via the highest loaded path. 678176735Sjeff */ 679176735Sjeffstatic inline int 680176735Sjeffsched_highest(struct cpu_group *cg, cpumask_t mask, int minload) 681176735Sjeff{ 682176735Sjeff struct cpu_search high; 683176735Sjeff 684176735Sjeff high.cs_cpu = -1; 685176735Sjeff high.cs_load = 0; 686176735Sjeff high.cs_mask = mask; 687176735Sjeff high.cs_limit = minload; 688176735Sjeff cpu_search_highest(cg, &high); 689176735Sjeff return high.cs_cpu; 690176735Sjeff} 691176735Sjeff 692176735Sjeff/* 693176735Sjeff * Simultaneously find the highest and lowest loaded cpu reachable via 694176735Sjeff * cg. 695176735Sjeff */ 696176735Sjeffstatic inline void 697176735Sjeffsched_both(struct cpu_group *cg, cpumask_t mask, int *lowcpu, int *highcpu) 698176735Sjeff{ 699176735Sjeff struct cpu_search high; 700176735Sjeff struct cpu_search low; 701176735Sjeff 702176735Sjeff low.cs_cpu = -1; 703176735Sjeff low.cs_limit = -1; 704176735Sjeff low.cs_load = -1; 705176735Sjeff low.cs_mask = mask; 706176735Sjeff high.cs_load = 0; 707176735Sjeff high.cs_cpu = -1; 708176735Sjeff high.cs_limit = -1; 709176735Sjeff high.cs_mask = mask; 710176735Sjeff cpu_search_both(cg, &low, &high); 711176735Sjeff *lowcpu = low.cs_cpu; 712176735Sjeff *highcpu = high.cs_cpu; 713176735Sjeff return; 714176735Sjeff} 715176735Sjeff 716121790Sjeffstatic void 717176735Sjeffsched_balance_group(struct cpu_group *cg) 718116069Sjeff{ 719176735Sjeff cpumask_t mask; 720176735Sjeff int high; 721176735Sjeff int low; 722123487Sjeff int i; 723123487Sjeff 724176735Sjeff mask = -1; 725176735Sjeff for (;;) { 726176735Sjeff sched_both(cg, mask, &low, &high); 727176735Sjeff if (low == high || low == -1 || high == -1) 728176735Sjeff break; 729176735Sjeff if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low))) 730176735Sjeff break; 731123487Sjeff /* 732176735Sjeff * If we failed to move any threads determine which cpu 733176735Sjeff * to kick out of the set and try again. 734176735Sjeff */ 735176735Sjeff if (TDQ_CPU(high)->tdq_transferable == 0) 736176735Sjeff mask &= ~(1 << high); 737176735Sjeff else 738176735Sjeff mask &= ~(1 << low); 739123487Sjeff } 740176735Sjeff 741176735Sjeff for (i = 0; i < cg->cg_children; i++) 742176735Sjeff sched_balance_group(&cg->cg_child[i]); 743123487Sjeff} 744123487Sjeff 745123487Sjeffstatic void 746176735Sjeffsched_balance() 747123487Sjeff{ 748172409Sjeff struct tdq *tdq; 749123487Sjeff 750172409Sjeff /* 751172409Sjeff * Select a random time between .5 * balance_interval and 752172409Sjeff * 1.5 * balance_interval. 753172409Sjeff */ 754176735Sjeff balance_ticks = max(balance_interval / 2, 1); 755176735Sjeff balance_ticks += random() % balance_interval; 756171482Sjeff if (smp_started == 0 || rebalance == 0) 757171482Sjeff return; 758172409Sjeff tdq = TDQ_SELF(); 759172409Sjeff TDQ_UNLOCK(tdq); 760176735Sjeff sched_balance_group(cpu_top); 761172409Sjeff TDQ_LOCK(tdq); 762123487Sjeff} 763123487Sjeff 764171482Sjeff/* 765171482Sjeff * Lock two thread queues using their address to maintain lock order. 766171482Sjeff */ 767123487Sjeffstatic void 768171482Sjefftdq_lock_pair(struct tdq *one, struct tdq *two) 769171482Sjeff{ 770171482Sjeff if (one < two) { 771171482Sjeff TDQ_LOCK(one); 772171482Sjeff TDQ_LOCK_FLAGS(two, MTX_DUPOK); 773171482Sjeff } else { 774171482Sjeff TDQ_LOCK(two); 775171482Sjeff TDQ_LOCK_FLAGS(one, MTX_DUPOK); 776171482Sjeff } 777171482Sjeff} 778171482Sjeff 779171482Sjeff/* 780172409Sjeff * Unlock two thread queues. Order is not important here. 781172409Sjeff */ 782172409Sjeffstatic void 783172409Sjefftdq_unlock_pair(struct tdq *one, struct tdq *two) 784172409Sjeff{ 785172409Sjeff TDQ_UNLOCK(one); 786172409Sjeff TDQ_UNLOCK(two); 787172409Sjeff} 788172409Sjeff 789172409Sjeff/* 790171482Sjeff * Transfer load between two imbalanced thread queues. 791171482Sjeff */ 792176735Sjeffstatic int 793164936Sjuliansched_balance_pair(struct tdq *high, struct tdq *low) 794123487Sjeff{ 795123433Sjeff int transferable; 796116069Sjeff int high_load; 797116069Sjeff int low_load; 798176735Sjeff int moved; 799116069Sjeff int move; 800116069Sjeff int diff; 801116069Sjeff int i; 802116069Sjeff 803171482Sjeff tdq_lock_pair(high, low); 804176735Sjeff transferable = high->tdq_transferable; 805176735Sjeff high_load = high->tdq_load; 806176735Sjeff low_load = low->tdq_load; 807176735Sjeff moved = 0; 808116069Sjeff /* 809122744Sjeff * Determine what the imbalance is and then adjust that to how many 810165620Sjeff * threads we actually have to give up (transferable). 811122744Sjeff */ 812171482Sjeff if (transferable != 0) { 813171482Sjeff diff = high_load - low_load; 814171482Sjeff move = diff / 2; 815171482Sjeff if (diff & 0x1) 816171482Sjeff move++; 817171482Sjeff move = min(move, transferable); 818171482Sjeff for (i = 0; i < move; i++) 819176735Sjeff moved += tdq_move(high, low); 820172293Sjeff /* 821172293Sjeff * IPI the target cpu to force it to reschedule with the new 822172293Sjeff * workload. 823172293Sjeff */ 824172293Sjeff ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); 825171482Sjeff } 826172409Sjeff tdq_unlock_pair(high, low); 827176735Sjeff return (moved); 828116069Sjeff} 829116069Sjeff 830171482Sjeff/* 831171482Sjeff * Move a thread from one thread queue to another. 832171482Sjeff */ 833176735Sjeffstatic int 834171482Sjefftdq_move(struct tdq *from, struct tdq *to) 835116069Sjeff{ 836171482Sjeff struct td_sched *ts; 837171482Sjeff struct thread *td; 838164936Sjulian struct tdq *tdq; 839171482Sjeff int cpu; 840116069Sjeff 841172409Sjeff TDQ_LOCK_ASSERT(from, MA_OWNED); 842172409Sjeff TDQ_LOCK_ASSERT(to, MA_OWNED); 843172409Sjeff 844164936Sjulian tdq = from; 845171482Sjeff cpu = TDQ_ID(to); 846177435Sjeff td = tdq_steal(tdq, cpu); 847177435Sjeff if (td == NULL) 848176735Sjeff return (0); 849177435Sjeff ts = td->td_sched; 850171482Sjeff /* 851171482Sjeff * Although the run queue is locked the thread may be blocked. Lock 852172409Sjeff * it to clear this and acquire the run-queue lock. 853171482Sjeff */ 854171482Sjeff thread_lock(td); 855172409Sjeff /* Drop recursive lock on from acquired via thread_lock(). */ 856171482Sjeff TDQ_UNLOCK(from); 857171482Sjeff sched_rem(td); 858166108Sjeff ts->ts_cpu = cpu; 859171482Sjeff td->td_lock = TDQ_LOCKPTR(to); 860171482Sjeff tdq_add(to, td, SRQ_YIELDING); 861176735Sjeff return (1); 862116069Sjeff} 863110267Sjeff 864171482Sjeff/* 865171482Sjeff * This tdq has idled. Try to steal a thread from another cpu and switch 866171482Sjeff * to it. 867171482Sjeff */ 868123433Sjeffstatic int 869164936Sjuliantdq_idled(struct tdq *tdq) 870121790Sjeff{ 871176735Sjeff struct cpu_group *cg; 872164936Sjulian struct tdq *steal; 873176735Sjeff cpumask_t mask; 874176735Sjeff int thresh; 875171482Sjeff int cpu; 876123433Sjeff 877172484Sjeff if (smp_started == 0 || steal_idle == 0) 878172484Sjeff return (1); 879176735Sjeff mask = -1; 880176735Sjeff mask &= ~PCPU_GET(cpumask); 881176735Sjeff /* We don't want to be preempted while we're iterating. */ 882171482Sjeff spinlock_enter(); 883176735Sjeff for (cg = tdq->tdq_cg; cg != NULL; ) { 884176735Sjeff if ((cg->cg_flags & (CG_FLAG_HTT | CG_FLAG_THREAD)) == 0) 885176735Sjeff thresh = steal_thresh; 886176735Sjeff else 887176735Sjeff thresh = 1; 888176735Sjeff cpu = sched_highest(cg, mask, thresh); 889176735Sjeff if (cpu == -1) { 890176735Sjeff cg = cg->cg_parent; 891176735Sjeff continue; 892166108Sjeff } 893176735Sjeff steal = TDQ_CPU(cpu); 894176735Sjeff mask &= ~(1 << cpu); 895176735Sjeff tdq_lock_pair(tdq, steal); 896176735Sjeff if (steal->tdq_load < thresh || steal->tdq_transferable == 0) { 897176735Sjeff tdq_unlock_pair(tdq, steal); 898176735Sjeff continue; 899171482Sjeff } 900176735Sjeff /* 901176735Sjeff * If a thread was added while interrupts were disabled don't 902176735Sjeff * steal one here. If we fail to acquire one due to affinity 903176735Sjeff * restrictions loop again with this cpu removed from the 904176735Sjeff * set. 905176735Sjeff */ 906176735Sjeff if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) { 907176735Sjeff tdq_unlock_pair(tdq, steal); 908176735Sjeff continue; 909176735Sjeff } 910176735Sjeff spinlock_exit(); 911176735Sjeff TDQ_UNLOCK(steal); 912176735Sjeff mi_switch(SW_VOL, NULL); 913176735Sjeff thread_unlock(curthread); 914176735Sjeff 915176735Sjeff return (0); 916123433Sjeff } 917171482Sjeff spinlock_exit(); 918123433Sjeff return (1); 919121790Sjeff} 920121790Sjeff 921171482Sjeff/* 922171482Sjeff * Notify a remote cpu of new work. Sends an IPI if criteria are met. 923171482Sjeff */ 924121790Sjeffstatic void 925177435Sjefftdq_notify(struct tdq *tdq, struct thread *td) 926121790Sjeff{ 927166247Sjeff int cpri; 928166247Sjeff int pri; 929166108Sjeff int cpu; 930121790Sjeff 931177005Sjeff if (tdq->tdq_ipipending) 932177005Sjeff return; 933177435Sjeff cpu = td->td_sched->ts_cpu; 934177435Sjeff pri = td->td_priority; 935177005Sjeff cpri = pcpu_find(cpu)->pc_curthread->td_priority; 936177005Sjeff if (!sched_shouldpreempt(pri, cpri, 1)) 937166137Sjeff return; 938177005Sjeff tdq->tdq_ipipending = 1; 939171482Sjeff ipi_selected(1 << cpu, IPI_PREEMPT); 940121790Sjeff} 941121790Sjeff 942171482Sjeff/* 943171482Sjeff * Steals load from a timeshare queue. Honors the rotating queue head 944171482Sjeff * index. 945171482Sjeff */ 946177435Sjeffstatic struct thread * 947176735Sjeffrunq_steal_from(struct runq *rq, int cpu, u_char start) 948171482Sjeff{ 949171482Sjeff struct rqbits *rqb; 950171482Sjeff struct rqhead *rqh; 951177435Sjeff struct thread *td; 952171482Sjeff int first; 953171482Sjeff int bit; 954171482Sjeff int pri; 955171482Sjeff int i; 956171482Sjeff 957171482Sjeff rqb = &rq->rq_status; 958171482Sjeff bit = start & (RQB_BPW -1); 959171482Sjeff pri = 0; 960171482Sjeff first = 0; 961171482Sjeffagain: 962171482Sjeff for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 963171482Sjeff if (rqb->rqb_bits[i] == 0) 964171482Sjeff continue; 965171482Sjeff if (bit != 0) { 966171482Sjeff for (pri = bit; pri < RQB_BPW; pri++) 967171482Sjeff if (rqb->rqb_bits[i] & (1ul << pri)) 968171482Sjeff break; 969171482Sjeff if (pri >= RQB_BPW) 970171482Sjeff continue; 971171482Sjeff } else 972171482Sjeff pri = RQB_FFS(rqb->rqb_bits[i]); 973171482Sjeff pri += (i << RQB_L2BPW); 974171482Sjeff rqh = &rq->rq_queues[pri]; 975177435Sjeff TAILQ_FOREACH(td, rqh, td_runq) { 976177435Sjeff if (first && THREAD_CAN_MIGRATE(td) && 977177435Sjeff THREAD_CAN_SCHED(td, cpu)) 978177435Sjeff return (td); 979171482Sjeff first = 1; 980171482Sjeff } 981171482Sjeff } 982171482Sjeff if (start != 0) { 983171482Sjeff start = 0; 984171482Sjeff goto again; 985171482Sjeff } 986171482Sjeff 987171482Sjeff return (NULL); 988171482Sjeff} 989171482Sjeff 990171482Sjeff/* 991171482Sjeff * Steals load from a standard linear queue. 992171482Sjeff */ 993177435Sjeffstatic struct thread * 994176735Sjeffrunq_steal(struct runq *rq, int cpu) 995121790Sjeff{ 996121790Sjeff struct rqhead *rqh; 997121790Sjeff struct rqbits *rqb; 998177435Sjeff struct thread *td; 999121790Sjeff int word; 1000121790Sjeff int bit; 1001121790Sjeff 1002121790Sjeff rqb = &rq->rq_status; 1003121790Sjeff for (word = 0; word < RQB_LEN; word++) { 1004121790Sjeff if (rqb->rqb_bits[word] == 0) 1005121790Sjeff continue; 1006121790Sjeff for (bit = 0; bit < RQB_BPW; bit++) { 1007123231Speter if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 1008121790Sjeff continue; 1009121790Sjeff rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 1010177435Sjeff TAILQ_FOREACH(td, rqh, td_runq) 1011177435Sjeff if (THREAD_CAN_MIGRATE(td) && 1012177435Sjeff THREAD_CAN_SCHED(td, cpu)) 1013177435Sjeff return (td); 1014121790Sjeff } 1015121790Sjeff } 1016121790Sjeff return (NULL); 1017121790Sjeff} 1018121790Sjeff 1019171482Sjeff/* 1020171482Sjeff * Attempt to steal a thread in priority order from a thread queue. 1021171482Sjeff */ 1022177435Sjeffstatic struct thread * 1023176735Sjefftdq_steal(struct tdq *tdq, int cpu) 1024121790Sjeff{ 1025177435Sjeff struct thread *td; 1026121790Sjeff 1027171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1028177435Sjeff if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) 1029177435Sjeff return (td); 1030177435Sjeff if ((td = runq_steal_from(&tdq->tdq_timeshare, 1031177435Sjeff cpu, tdq->tdq_ridx)) != NULL) 1032177435Sjeff return (td); 1033176735Sjeff return (runq_steal(&tdq->tdq_idle, cpu)); 1034121790Sjeff} 1035123433Sjeff 1036171482Sjeff/* 1037171482Sjeff * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 1038172409Sjeff * current lock and returns with the assigned queue locked. 1039171482Sjeff */ 1040171482Sjeffstatic inline struct tdq * 1041177435Sjeffsched_setcpu(struct thread *td, int cpu, int flags) 1042123433Sjeff{ 1043177435Sjeff 1044171482Sjeff struct tdq *tdq; 1045123433Sjeff 1046177435Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1047171482Sjeff tdq = TDQ_CPU(cpu); 1048177435Sjeff td->td_sched->ts_cpu = cpu; 1049177435Sjeff /* 1050177435Sjeff * If the lock matches just return the queue. 1051177435Sjeff */ 1052171482Sjeff if (td->td_lock == TDQ_LOCKPTR(tdq)) 1053171482Sjeff return (tdq); 1054171482Sjeff#ifdef notyet 1055123433Sjeff /* 1056172293Sjeff * If the thread isn't running its lockptr is a 1057171482Sjeff * turnstile or a sleepqueue. We can just lock_set without 1058171482Sjeff * blocking. 1059123685Sjeff */ 1060171482Sjeff if (TD_CAN_RUN(td)) { 1061171482Sjeff TDQ_LOCK(tdq); 1062171482Sjeff thread_lock_set(td, TDQ_LOCKPTR(tdq)); 1063171482Sjeff return (tdq); 1064171482Sjeff } 1065171482Sjeff#endif 1066166108Sjeff /* 1067171482Sjeff * The hard case, migration, we need to block the thread first to 1068171482Sjeff * prevent order reversals with other cpus locks. 1069166108Sjeff */ 1070171482Sjeff thread_lock_block(td); 1071171482Sjeff TDQ_LOCK(tdq); 1072171713Sjeff thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 1073171482Sjeff return (tdq); 1074166108Sjeff} 1075166108Sjeff 1076166108Sjeffstatic int 1077177435Sjeffsched_pickcpu(struct thread *td, int flags) 1078171482Sjeff{ 1079176735Sjeff struct cpu_group *cg; 1080177435Sjeff struct td_sched *ts; 1081171482Sjeff struct tdq *tdq; 1082176735Sjeff cpumask_t mask; 1083166108Sjeff int self; 1084166108Sjeff int pri; 1085166108Sjeff int cpu; 1086166108Sjeff 1087176735Sjeff self = PCPU_GET(cpuid); 1088177435Sjeff ts = td->td_sched; 1089166108Sjeff if (smp_started == 0) 1090166108Sjeff return (self); 1091171506Sjeff /* 1092171506Sjeff * Don't migrate a running thread from sched_switch(). 1093171506Sjeff */ 1094176735Sjeff if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) 1095176735Sjeff return (ts->ts_cpu); 1096166108Sjeff /* 1097176735Sjeff * Prefer to run interrupt threads on the processors that generate 1098176735Sjeff * the interrupt. 1099166108Sjeff */ 1100176735Sjeff if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) && 1101176735Sjeff curthread->td_intr_nesting_level) 1102176735Sjeff ts->ts_cpu = self; 1103166108Sjeff /* 1104176735Sjeff * If the thread can run on the last cpu and the affinity has not 1105176735Sjeff * expired or it is idle run it there. 1106166108Sjeff */ 1107176735Sjeff pri = td->td_priority; 1108176735Sjeff tdq = TDQ_CPU(ts->ts_cpu); 1109176735Sjeff if (THREAD_CAN_SCHED(td, ts->ts_cpu)) { 1110176735Sjeff if (tdq->tdq_lowpri > PRI_MIN_IDLE) 1111176735Sjeff return (ts->ts_cpu); 1112176735Sjeff if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri) 1113176735Sjeff return (ts->ts_cpu); 1114139334Sjeff } 1115123433Sjeff /* 1116176735Sjeff * Search for the highest level in the tree that still has affinity. 1117123433Sjeff */ 1118176735Sjeff cg = NULL; 1119176735Sjeff for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent) 1120176735Sjeff if (SCHED_AFFINITY(ts, cg->cg_level)) 1121176735Sjeff break; 1122176735Sjeff cpu = -1; 1123176735Sjeff mask = td->td_cpuset->cs_mask.__bits[0]; 1124176735Sjeff if (cg) 1125176735Sjeff cpu = sched_lowest(cg, mask, pri); 1126176735Sjeff if (cpu == -1) 1127176735Sjeff cpu = sched_lowest(cpu_top, mask, -1); 1128171506Sjeff /* 1129176735Sjeff * Compare the lowest loaded cpu to current cpu. 1130171506Sjeff */ 1131177005Sjeff if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri && 1132177005Sjeff TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) 1133177005Sjeff cpu = self; 1134177005Sjeff KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu.")); 1135171482Sjeff return (cpu); 1136123433Sjeff} 1137176735Sjeff#endif 1138123433Sjeff 1139117326Sjeff/* 1140121790Sjeff * Pick the highest priority task we have and return it. 1141117326Sjeff */ 1142177435Sjeffstatic struct thread * 1143164936Sjuliantdq_choose(struct tdq *tdq) 1144110267Sjeff{ 1145177435Sjeff struct thread *td; 1146110267Sjeff 1147171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1148177435Sjeff td = runq_choose(&tdq->tdq_realtime); 1149177435Sjeff if (td != NULL) 1150177435Sjeff return (td); 1151177435Sjeff td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1152177435Sjeff if (td != NULL) { 1153177435Sjeff KASSERT(td->td_priority >= PRI_MIN_TIMESHARE, 1154165762Sjeff ("tdq_choose: Invalid priority on timeshare queue %d", 1155177435Sjeff td->td_priority)); 1156177435Sjeff return (td); 1157165762Sjeff } 1158177435Sjeff td = runq_choose(&tdq->tdq_idle); 1159177435Sjeff if (td != NULL) { 1160177435Sjeff KASSERT(td->td_priority >= PRI_MIN_IDLE, 1161165762Sjeff ("tdq_choose: Invalid priority on idle queue %d", 1162177435Sjeff td->td_priority)); 1163177435Sjeff return (td); 1164165762Sjeff } 1165165762Sjeff 1166165762Sjeff return (NULL); 1167110267Sjeff} 1168110267Sjeff 1169171482Sjeff/* 1170171482Sjeff * Initialize a thread queue. 1171171482Sjeff */ 1172109864Sjeffstatic void 1173164936Sjuliantdq_setup(struct tdq *tdq) 1174110028Sjeff{ 1175171482Sjeff 1176171713Sjeff if (bootverbose) 1177171713Sjeff printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1178165762Sjeff runq_init(&tdq->tdq_realtime); 1179165762Sjeff runq_init(&tdq->tdq_timeshare); 1180165620Sjeff runq_init(&tdq->tdq_idle); 1181176735Sjeff snprintf(tdq->tdq_name, sizeof(tdq->tdq_name), 1182176735Sjeff "sched lock %d", (int)TDQ_ID(tdq)); 1183176735Sjeff mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", 1184176735Sjeff MTX_SPIN | MTX_RECURSE); 1185110028Sjeff} 1186110028Sjeff 1187171713Sjeff#ifdef SMP 1188110028Sjeffstatic void 1189171713Sjeffsched_setup_smp(void) 1190171713Sjeff{ 1191171713Sjeff struct tdq *tdq; 1192171713Sjeff int i; 1193171713Sjeff 1194176735Sjeff cpu_top = smp_topo(); 1195176735Sjeff for (i = 0; i < MAXCPU; i++) { 1196171713Sjeff if (CPU_ABSENT(i)) 1197171713Sjeff continue; 1198176735Sjeff tdq = TDQ_CPU(i); 1199171713Sjeff tdq_setup(tdq); 1200176735Sjeff tdq->tdq_cg = smp_topo_find(cpu_top, i); 1201176735Sjeff if (tdq->tdq_cg == NULL) 1202176735Sjeff panic("Can't find cpu group for %d\n", i); 1203123433Sjeff } 1204176735Sjeff balance_tdq = TDQ_SELF(); 1205176735Sjeff sched_balance(); 1206171713Sjeff} 1207171713Sjeff#endif 1208171713Sjeff 1209171713Sjeff/* 1210171713Sjeff * Setup the thread queues and initialize the topology based on MD 1211171713Sjeff * information. 1212171713Sjeff */ 1213171713Sjeffstatic void 1214171713Sjeffsched_setup(void *dummy) 1215171713Sjeff{ 1216171713Sjeff struct tdq *tdq; 1217171713Sjeff 1218171713Sjeff tdq = TDQ_SELF(); 1219171713Sjeff#ifdef SMP 1220176734Sjeff sched_setup_smp(); 1221117237Sjeff#else 1222171713Sjeff tdq_setup(tdq); 1223116069Sjeff#endif 1224171482Sjeff /* 1225171482Sjeff * To avoid divide-by-zero, we set realstathz a dummy value 1226171482Sjeff * in case which sched_clock() called before sched_initticks(). 1227171482Sjeff */ 1228171482Sjeff realstathz = hz; 1229171482Sjeff sched_slice = (realstathz/10); /* ~100ms */ 1230171482Sjeff tickincr = 1 << SCHED_TICK_SHIFT; 1231171482Sjeff 1232171482Sjeff /* Add thread0's load since it's running. */ 1233171482Sjeff TDQ_LOCK(tdq); 1234171713Sjeff thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1235177435Sjeff tdq_load_add(tdq, &thread0); 1236176735Sjeff tdq->tdq_lowpri = thread0.td_priority; 1237171482Sjeff TDQ_UNLOCK(tdq); 1238109864Sjeff} 1239109864Sjeff 1240171482Sjeff/* 1241171482Sjeff * This routine determines the tickincr after stathz and hz are setup. 1242171482Sjeff */ 1243153533Sdavidxu/* ARGSUSED */ 1244153533Sdavidxustatic void 1245153533Sdavidxusched_initticks(void *dummy) 1246153533Sdavidxu{ 1247171482Sjeff int incr; 1248171482Sjeff 1249153533Sdavidxu realstathz = stathz ? stathz : hz; 1250166229Sjeff sched_slice = (realstathz/10); /* ~100ms */ 1251153533Sdavidxu 1252153533Sdavidxu /* 1253165762Sjeff * tickincr is shifted out by 10 to avoid rounding errors due to 1254165766Sjeff * hz not being evenly divisible by stathz on all platforms. 1255153533Sdavidxu */ 1256171482Sjeff incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1257165762Sjeff /* 1258165762Sjeff * This does not work for values of stathz that are more than 1259165762Sjeff * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1260165762Sjeff */ 1261171482Sjeff if (incr == 0) 1262171482Sjeff incr = 1; 1263171482Sjeff tickincr = incr; 1264166108Sjeff#ifdef SMP 1265171899Sjeff /* 1266172409Sjeff * Set the default balance interval now that we know 1267172409Sjeff * what realstathz is. 1268172409Sjeff */ 1269172409Sjeff balance_interval = realstathz; 1270172409Sjeff /* 1271171899Sjeff * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 1272171899Sjeff * prevents excess thrashing on large machines and excess idle on 1273171899Sjeff * smaller machines. 1274171899Sjeff */ 1275176735Sjeff steal_thresh = min(ffs(mp_ncpus) - 1, 3); 1276166108Sjeff affinity = SCHED_AFFINITY_DEFAULT; 1277166108Sjeff#endif 1278153533Sdavidxu} 1279153533Sdavidxu 1280153533Sdavidxu 1281109864Sjeff/* 1282171482Sjeff * This is the core of the interactivity algorithm. Determines a score based 1283171482Sjeff * on past behavior. It is the ratio of sleep time to run time scaled to 1284171482Sjeff * a [0, 100] integer. This is the voluntary sleep time of a process, which 1285171482Sjeff * differs from the cpu usage because it does not account for time spent 1286171482Sjeff * waiting on a run-queue. Would be prettier if we had floating point. 1287171482Sjeff */ 1288171482Sjeffstatic int 1289171482Sjeffsched_interact_score(struct thread *td) 1290171482Sjeff{ 1291171482Sjeff struct td_sched *ts; 1292171482Sjeff int div; 1293171482Sjeff 1294171482Sjeff ts = td->td_sched; 1295171482Sjeff /* 1296171482Sjeff * The score is only needed if this is likely to be an interactive 1297171482Sjeff * task. Don't go through the expense of computing it if there's 1298171482Sjeff * no chance. 1299171482Sjeff */ 1300171482Sjeff if (sched_interact <= SCHED_INTERACT_HALF && 1301171482Sjeff ts->ts_runtime >= ts->ts_slptime) 1302171482Sjeff return (SCHED_INTERACT_HALF); 1303171482Sjeff 1304171482Sjeff if (ts->ts_runtime > ts->ts_slptime) { 1305171482Sjeff div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1306171482Sjeff return (SCHED_INTERACT_HALF + 1307171482Sjeff (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1308171482Sjeff } 1309171482Sjeff if (ts->ts_slptime > ts->ts_runtime) { 1310171482Sjeff div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1311171482Sjeff return (ts->ts_runtime / div); 1312171482Sjeff } 1313171482Sjeff /* runtime == slptime */ 1314171482Sjeff if (ts->ts_runtime) 1315171482Sjeff return (SCHED_INTERACT_HALF); 1316171482Sjeff 1317171482Sjeff /* 1318171482Sjeff * This can happen if slptime and runtime are 0. 1319171482Sjeff */ 1320171482Sjeff return (0); 1321171482Sjeff 1322171482Sjeff} 1323171482Sjeff 1324171482Sjeff/* 1325109864Sjeff * Scale the scheduling priority according to the "interactivity" of this 1326109864Sjeff * process. 1327109864Sjeff */ 1328113357Sjeffstatic void 1329163709Sjbsched_priority(struct thread *td) 1330109864Sjeff{ 1331165762Sjeff int score; 1332109864Sjeff int pri; 1333109864Sjeff 1334163709Sjb if (td->td_pri_class != PRI_TIMESHARE) 1335113357Sjeff return; 1336112966Sjeff /* 1337165762Sjeff * If the score is interactive we place the thread in the realtime 1338165762Sjeff * queue with a priority that is less than kernel and interrupt 1339165762Sjeff * priorities. These threads are not subject to nice restrictions. 1340112966Sjeff * 1341171482Sjeff * Scores greater than this are placed on the normal timeshare queue 1342165762Sjeff * where the priority is partially decided by the most recent cpu 1343165762Sjeff * utilization and the rest is decided by nice value. 1344172293Sjeff * 1345172293Sjeff * The nice value of the process has a linear effect on the calculated 1346172293Sjeff * score. Negative nice values make it easier for a thread to be 1347172293Sjeff * considered interactive. 1348112966Sjeff */ 1349172308Sjeff score = imax(0, sched_interact_score(td) - td->td_proc->p_nice); 1350165762Sjeff if (score < sched_interact) { 1351165762Sjeff pri = PRI_MIN_REALTIME; 1352165762Sjeff pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1353165762Sjeff * score; 1354165762Sjeff KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 1355166208Sjeff ("sched_priority: invalid interactive priority %d score %d", 1356166208Sjeff pri, score)); 1357165762Sjeff } else { 1358165762Sjeff pri = SCHED_PRI_MIN; 1359165762Sjeff if (td->td_sched->ts_ticks) 1360165762Sjeff pri += SCHED_PRI_TICKS(td->td_sched); 1361165762Sjeff pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1362171482Sjeff KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1363171482Sjeff ("sched_priority: invalid priority %d: nice %d, " 1364171482Sjeff "ticks %d ftick %d ltick %d tick pri %d", 1365171482Sjeff pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1366171482Sjeff td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1367171482Sjeff SCHED_PRI_TICKS(td->td_sched))); 1368165762Sjeff } 1369165762Sjeff sched_user_prio(td, pri); 1370112966Sjeff 1371112966Sjeff return; 1372109864Sjeff} 1373109864Sjeff 1374121868Sjeff/* 1375121868Sjeff * This routine enforces a maximum limit on the amount of scheduling history 1376171482Sjeff * kept. It is called after either the slptime or runtime is adjusted. This 1377171482Sjeff * function is ugly due to integer math. 1378121868Sjeff */ 1379116463Sjeffstatic void 1380163709Sjbsched_interact_update(struct thread *td) 1381116463Sjeff{ 1382165819Sjeff struct td_sched *ts; 1383166208Sjeff u_int sum; 1384121605Sjeff 1385165819Sjeff ts = td->td_sched; 1386171482Sjeff sum = ts->ts_runtime + ts->ts_slptime; 1387121868Sjeff if (sum < SCHED_SLP_RUN_MAX) 1388121868Sjeff return; 1389121868Sjeff /* 1390165819Sjeff * This only happens from two places: 1391165819Sjeff * 1) We have added an unusual amount of run time from fork_exit. 1392165819Sjeff * 2) We have added an unusual amount of sleep time from sched_sleep(). 1393165819Sjeff */ 1394165819Sjeff if (sum > SCHED_SLP_RUN_MAX * 2) { 1395171482Sjeff if (ts->ts_runtime > ts->ts_slptime) { 1396171482Sjeff ts->ts_runtime = SCHED_SLP_RUN_MAX; 1397171482Sjeff ts->ts_slptime = 1; 1398165819Sjeff } else { 1399171482Sjeff ts->ts_slptime = SCHED_SLP_RUN_MAX; 1400171482Sjeff ts->ts_runtime = 1; 1401165819Sjeff } 1402165819Sjeff return; 1403165819Sjeff } 1404165819Sjeff /* 1405121868Sjeff * If we have exceeded by more than 1/5th then the algorithm below 1406121868Sjeff * will not bring us back into range. Dividing by two here forces 1407133427Sjeff * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1408121868Sjeff */ 1409127850Sjeff if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1410171482Sjeff ts->ts_runtime /= 2; 1411171482Sjeff ts->ts_slptime /= 2; 1412121868Sjeff return; 1413116463Sjeff } 1414171482Sjeff ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1415171482Sjeff ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1416116463Sjeff} 1417116463Sjeff 1418171482Sjeff/* 1419171482Sjeff * Scale back the interactivity history when a child thread is created. The 1420171482Sjeff * history is inherited from the parent but the thread may behave totally 1421171482Sjeff * differently. For example, a shell spawning a compiler process. We want 1422171482Sjeff * to learn that the compiler is behaving badly very quickly. 1423171482Sjeff */ 1424121868Sjeffstatic void 1425163709Sjbsched_interact_fork(struct thread *td) 1426121868Sjeff{ 1427121868Sjeff int ratio; 1428121868Sjeff int sum; 1429121868Sjeff 1430171482Sjeff sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1431121868Sjeff if (sum > SCHED_SLP_RUN_FORK) { 1432121868Sjeff ratio = sum / SCHED_SLP_RUN_FORK; 1433171482Sjeff td->td_sched->ts_runtime /= ratio; 1434171482Sjeff td->td_sched->ts_slptime /= ratio; 1435121868Sjeff } 1436121868Sjeff} 1437121868Sjeff 1438113357Sjeff/* 1439171482Sjeff * Called from proc0_init() to setup the scheduler fields. 1440134791Sjulian */ 1441134791Sjulianvoid 1442134791Sjulianschedinit(void) 1443134791Sjulian{ 1444165762Sjeff 1445134791Sjulian /* 1446134791Sjulian * Set up the scheduler specific parts of proc0. 1447134791Sjulian */ 1448136167Sjulian proc0.p_sched = NULL; /* XXX */ 1449164936Sjulian thread0.td_sched = &td_sched0; 1450165762Sjeff td_sched0.ts_ltick = ticks; 1451165796Sjeff td_sched0.ts_ftick = ticks; 1452177009Sjeff td_sched0.ts_slice = sched_slice; 1453134791Sjulian} 1454134791Sjulian 1455134791Sjulian/* 1456113357Sjeff * This is only somewhat accurate since given many processes of the same 1457113357Sjeff * priority they will switch when their slices run out, which will be 1458165762Sjeff * at most sched_slice stathz ticks. 1459113357Sjeff */ 1460109864Sjeffint 1461109864Sjeffsched_rr_interval(void) 1462109864Sjeff{ 1463165762Sjeff 1464165762Sjeff /* Convert sched_slice to hz */ 1465165762Sjeff return (hz/(realstathz/sched_slice)); 1466109864Sjeff} 1467109864Sjeff 1468171482Sjeff/* 1469171482Sjeff * Update the percent cpu tracking information when it is requested or 1470171482Sjeff * the total history exceeds the maximum. We keep a sliding history of 1471171482Sjeff * tick counts that slowly decays. This is less precise than the 4BSD 1472171482Sjeff * mechanism since it happens with less regular and frequent events. 1473171482Sjeff */ 1474121790Sjeffstatic void 1475164936Sjuliansched_pctcpu_update(struct td_sched *ts) 1476109864Sjeff{ 1477165762Sjeff 1478165762Sjeff if (ts->ts_ticks == 0) 1479165762Sjeff return; 1480165796Sjeff if (ticks - (hz / 10) < ts->ts_ltick && 1481165796Sjeff SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 1482165796Sjeff return; 1483109864Sjeff /* 1484109864Sjeff * Adjust counters and watermark for pctcpu calc. 1485116365Sjeff */ 1486165762Sjeff if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1487164936Sjulian ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1488165762Sjeff SCHED_TICK_TARG; 1489165762Sjeff else 1490164936Sjulian ts->ts_ticks = 0; 1491164936Sjulian ts->ts_ltick = ticks; 1492165762Sjeff ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 1493109864Sjeff} 1494109864Sjeff 1495171482Sjeff/* 1496171482Sjeff * Adjust the priority of a thread. Move it to the appropriate run-queue 1497171482Sjeff * if necessary. This is the back-end for several priority related 1498171482Sjeff * functions. 1499171482Sjeff */ 1500165762Sjeffstatic void 1501139453Sjhbsched_thread_priority(struct thread *td, u_char prio) 1502109864Sjeff{ 1503164936Sjulian struct td_sched *ts; 1504177009Sjeff struct tdq *tdq; 1505177009Sjeff int oldpri; 1506109864Sjeff 1507139316Sjeff CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1508173600Sjulian td, td->td_name, td->td_priority, prio, curthread, 1509173600Sjulian curthread->td_name); 1510164936Sjulian ts = td->td_sched; 1511170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1512139453Sjhb if (td->td_priority == prio) 1513139453Sjhb return; 1514177376Sjeff /* 1515177376Sjeff * If the priority has been elevated due to priority 1516177376Sjeff * propagation, we may have to move ourselves to a new 1517177376Sjeff * queue. This could be optimized to not re-add in some 1518177376Sjeff * cases. 1519177376Sjeff */ 1520165766Sjeff if (TD_ON_RUNQ(td) && prio < td->td_priority) { 1521165762Sjeff sched_rem(td); 1522165762Sjeff td->td_priority = prio; 1523171482Sjeff sched_add(td, SRQ_BORROWING); 1524177009Sjeff return; 1525177009Sjeff } 1526177376Sjeff /* 1527177376Sjeff * If the thread is currently running we may have to adjust the lowpri 1528177376Sjeff * information so other cpus are aware of our current priority. 1529177376Sjeff */ 1530177009Sjeff if (TD_IS_RUNNING(td)) { 1531177376Sjeff tdq = TDQ_CPU(ts->ts_cpu); 1532177376Sjeff oldpri = td->td_priority; 1533177376Sjeff td->td_priority = prio; 1534176735Sjeff if (prio < tdq->tdq_lowpri) 1535171482Sjeff tdq->tdq_lowpri = prio; 1536176735Sjeff else if (tdq->tdq_lowpri == oldpri) 1537176735Sjeff tdq_setlowpri(tdq, td); 1538177376Sjeff return; 1539177009Sjeff } 1540177376Sjeff td->td_priority = prio; 1541109864Sjeff} 1542109864Sjeff 1543139453Sjhb/* 1544139453Sjhb * Update a thread's priority when it is lent another thread's 1545139453Sjhb * priority. 1546139453Sjhb */ 1547109864Sjeffvoid 1548139453Sjhbsched_lend_prio(struct thread *td, u_char prio) 1549139453Sjhb{ 1550139453Sjhb 1551139453Sjhb td->td_flags |= TDF_BORROWING; 1552139453Sjhb sched_thread_priority(td, prio); 1553139453Sjhb} 1554139453Sjhb 1555139453Sjhb/* 1556139453Sjhb * Restore a thread's priority when priority propagation is 1557139453Sjhb * over. The prio argument is the minimum priority the thread 1558139453Sjhb * needs to have to satisfy other possible priority lending 1559139453Sjhb * requests. If the thread's regular priority is less 1560139453Sjhb * important than prio, the thread will keep a priority boost 1561139453Sjhb * of prio. 1562139453Sjhb */ 1563139453Sjhbvoid 1564139453Sjhbsched_unlend_prio(struct thread *td, u_char prio) 1565139453Sjhb{ 1566139453Sjhb u_char base_pri; 1567139453Sjhb 1568139453Sjhb if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1569139453Sjhb td->td_base_pri <= PRI_MAX_TIMESHARE) 1570163709Sjb base_pri = td->td_user_pri; 1571139453Sjhb else 1572139453Sjhb base_pri = td->td_base_pri; 1573139453Sjhb if (prio >= base_pri) { 1574139455Sjhb td->td_flags &= ~TDF_BORROWING; 1575139453Sjhb sched_thread_priority(td, base_pri); 1576139453Sjhb } else 1577139453Sjhb sched_lend_prio(td, prio); 1578139453Sjhb} 1579139453Sjhb 1580171482Sjeff/* 1581171482Sjeff * Standard entry for setting the priority to an absolute value. 1582171482Sjeff */ 1583139453Sjhbvoid 1584139453Sjhbsched_prio(struct thread *td, u_char prio) 1585139453Sjhb{ 1586139453Sjhb u_char oldprio; 1587139453Sjhb 1588139453Sjhb /* First, update the base priority. */ 1589139453Sjhb td->td_base_pri = prio; 1590139453Sjhb 1591139453Sjhb /* 1592139455Sjhb * If the thread is borrowing another thread's priority, don't 1593139453Sjhb * ever lower the priority. 1594139453Sjhb */ 1595139453Sjhb if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1596139453Sjhb return; 1597139453Sjhb 1598139453Sjhb /* Change the real priority. */ 1599139453Sjhb oldprio = td->td_priority; 1600139453Sjhb sched_thread_priority(td, prio); 1601139453Sjhb 1602139453Sjhb /* 1603139453Sjhb * If the thread is on a turnstile, then let the turnstile update 1604139453Sjhb * its state. 1605139453Sjhb */ 1606139453Sjhb if (TD_ON_LOCK(td) && oldprio != prio) 1607139453Sjhb turnstile_adjust(td, oldprio); 1608139453Sjhb} 1609139455Sjhb 1610171482Sjeff/* 1611171482Sjeff * Set the base user priority, does not effect current running priority. 1612171482Sjeff */ 1613139453Sjhbvoid 1614163709Sjbsched_user_prio(struct thread *td, u_char prio) 1615161599Sdavidxu{ 1616161599Sdavidxu u_char oldprio; 1617161599Sdavidxu 1618163709Sjb td->td_base_user_pri = prio; 1619164939Sjulian if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1620164939Sjulian return; 1621163709Sjb oldprio = td->td_user_pri; 1622163709Sjb td->td_user_pri = prio; 1623161599Sdavidxu} 1624161599Sdavidxu 1625161599Sdavidxuvoid 1626161599Sdavidxusched_lend_user_prio(struct thread *td, u_char prio) 1627161599Sdavidxu{ 1628161599Sdavidxu u_char oldprio; 1629161599Sdavidxu 1630174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 1631161599Sdavidxu td->td_flags |= TDF_UBORROWING; 1632164091Smaxim oldprio = td->td_user_pri; 1633163709Sjb td->td_user_pri = prio; 1634161599Sdavidxu} 1635161599Sdavidxu 1636161599Sdavidxuvoid 1637161599Sdavidxusched_unlend_user_prio(struct thread *td, u_char prio) 1638161599Sdavidxu{ 1639161599Sdavidxu u_char base_pri; 1640161599Sdavidxu 1641174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 1642163709Sjb base_pri = td->td_base_user_pri; 1643161599Sdavidxu if (prio >= base_pri) { 1644161599Sdavidxu td->td_flags &= ~TDF_UBORROWING; 1645163709Sjb sched_user_prio(td, base_pri); 1646174536Sdavidxu } else { 1647161599Sdavidxu sched_lend_user_prio(td, prio); 1648174536Sdavidxu } 1649161599Sdavidxu} 1650161599Sdavidxu 1651171482Sjeff/* 1652174847Swkoszek * Block a thread for switching. Similar to thread_block() but does not 1653174847Swkoszek * bump the spin count. 1654174847Swkoszek */ 1655174847Swkoszekstatic inline struct mtx * 1656174847Swkoszekthread_block_switch(struct thread *td) 1657174847Swkoszek{ 1658174847Swkoszek struct mtx *lock; 1659174847Swkoszek 1660174847Swkoszek THREAD_LOCK_ASSERT(td, MA_OWNED); 1661174847Swkoszek lock = td->td_lock; 1662174847Swkoszek td->td_lock = &blocked_lock; 1663174847Swkoszek mtx_unlock_spin(lock); 1664174847Swkoszek 1665174847Swkoszek return (lock); 1666174847Swkoszek} 1667174847Swkoszek 1668174847Swkoszek/* 1669171713Sjeff * Handle migration from sched_switch(). This happens only for 1670171713Sjeff * cpu binding. 1671171713Sjeff */ 1672171713Sjeffstatic struct mtx * 1673171713Sjeffsched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1674171713Sjeff{ 1675171713Sjeff struct tdq *tdn; 1676171713Sjeff 1677171713Sjeff tdn = TDQ_CPU(td->td_sched->ts_cpu); 1678171713Sjeff#ifdef SMP 1679177435Sjeff tdq_load_rem(tdq, td); 1680171713Sjeff /* 1681171713Sjeff * Do the lock dance required to avoid LOR. We grab an extra 1682171713Sjeff * spinlock nesting to prevent preemption while we're 1683171713Sjeff * not holding either run-queue lock. 1684171713Sjeff */ 1685171713Sjeff spinlock_enter(); 1686171713Sjeff thread_block_switch(td); /* This releases the lock on tdq. */ 1687171713Sjeff TDQ_LOCK(tdn); 1688171713Sjeff tdq_add(tdn, td, flags); 1689177435Sjeff tdq_notify(tdn, td); 1690171713Sjeff /* 1691171713Sjeff * After we unlock tdn the new cpu still can't switch into this 1692171713Sjeff * thread until we've unblocked it in cpu_switch(). The lock 1693171713Sjeff * pointers may match in the case of HTT cores. Don't unlock here 1694171713Sjeff * or we can deadlock when the other CPU runs the IPI handler. 1695171713Sjeff */ 1696171713Sjeff if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1697171713Sjeff TDQ_UNLOCK(tdn); 1698171713Sjeff TDQ_LOCK(tdq); 1699171713Sjeff } 1700171713Sjeff spinlock_exit(); 1701171713Sjeff#endif 1702171713Sjeff return (TDQ_LOCKPTR(tdn)); 1703171713Sjeff} 1704171713Sjeff 1705171713Sjeff/* 1706171482Sjeff * Release a thread that was blocked with thread_block_switch(). 1707171482Sjeff */ 1708171482Sjeffstatic inline void 1709171482Sjeffthread_unblock_switch(struct thread *td, struct mtx *mtx) 1710171482Sjeff{ 1711171482Sjeff atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1712171482Sjeff (uintptr_t)mtx); 1713171482Sjeff} 1714171482Sjeff 1715171482Sjeff/* 1716171482Sjeff * Switch threads. This function has to handle threads coming in while 1717171482Sjeff * blocked for some reason, running, or idle. It also must deal with 1718171482Sjeff * migrating a thread from one queue to another as running threads may 1719171482Sjeff * be assigned elsewhere via binding. 1720171482Sjeff */ 1721161599Sdavidxuvoid 1722135051Sjuliansched_switch(struct thread *td, struct thread *newtd, int flags) 1723109864Sjeff{ 1724165627Sjeff struct tdq *tdq; 1725164936Sjulian struct td_sched *ts; 1726171482Sjeff struct mtx *mtx; 1727171713Sjeff int srqflag; 1728171482Sjeff int cpuid; 1729109864Sjeff 1730170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1731177376Sjeff KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument")); 1732109864Sjeff 1733171482Sjeff cpuid = PCPU_GET(cpuid); 1734171482Sjeff tdq = TDQ_CPU(cpuid); 1735164936Sjulian ts = td->td_sched; 1736171713Sjeff mtx = td->td_lock; 1737171482Sjeff ts->ts_rltick = ticks; 1738133555Sjeff td->td_lastcpu = td->td_oncpu; 1739113339Sjulian td->td_oncpu = NOCPU; 1740132266Sjhb td->td_flags &= ~TDF_NEEDRESCHED; 1741144777Sups td->td_owepreempt = 0; 1742123434Sjeff /* 1743171482Sjeff * The lock pointer in an idle thread should never change. Reset it 1744171482Sjeff * to CAN_RUN as well. 1745123434Sjeff */ 1746167327Sjulian if (TD_IS_IDLETHREAD(td)) { 1747171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1748139334Sjeff TD_SET_CAN_RUN(td); 1749170293Sjeff } else if (TD_IS_RUNNING(td)) { 1750171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1751171713Sjeff srqflag = (flags & SW_PREEMPT) ? 1752170293Sjeff SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1753171713Sjeff SRQ_OURSELF|SRQ_YIELDING; 1754171713Sjeff if (ts->ts_cpu == cpuid) 1755177435Sjeff tdq_runq_add(tdq, td, srqflag); 1756171713Sjeff else 1757171713Sjeff mtx = sched_switch_migrate(tdq, td, srqflag); 1758171482Sjeff } else { 1759171482Sjeff /* This thread must be going to sleep. */ 1760171482Sjeff TDQ_LOCK(tdq); 1761171482Sjeff mtx = thread_block_switch(td); 1762177435Sjeff tdq_load_rem(tdq, td); 1763171482Sjeff } 1764171482Sjeff /* 1765171482Sjeff * We enter here with the thread blocked and assigned to the 1766171482Sjeff * appropriate cpu run-queue or sleep-queue and with the current 1767171482Sjeff * thread-queue locked. 1768171482Sjeff */ 1769171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1770171482Sjeff newtd = choosethread(); 1771171482Sjeff /* 1772171482Sjeff * Call the MD code to switch contexts if necessary. 1773171482Sjeff */ 1774145256Sjkoshy if (td != newtd) { 1775145256Sjkoshy#ifdef HWPMC_HOOKS 1776145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1777145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1778145256Sjkoshy#endif 1779174629Sjeff lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 1780172411Sjeff TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 1781171482Sjeff cpu_switch(td, newtd, mtx); 1782171482Sjeff /* 1783171482Sjeff * We may return from cpu_switch on a different cpu. However, 1784171482Sjeff * we always return with td_lock pointing to the current cpu's 1785171482Sjeff * run queue lock. 1786171482Sjeff */ 1787171482Sjeff cpuid = PCPU_GET(cpuid); 1788171482Sjeff tdq = TDQ_CPU(cpuid); 1789174629Sjeff lock_profile_obtain_lock_success( 1790174629Sjeff &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 1791145256Sjkoshy#ifdef HWPMC_HOOKS 1792145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1793145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1794145256Sjkoshy#endif 1795171482Sjeff } else 1796171482Sjeff thread_unblock_switch(td, mtx); 1797171482Sjeff /* 1798171482Sjeff * Assert that all went well and return. 1799171482Sjeff */ 1800171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1801171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1802171482Sjeff td->td_oncpu = cpuid; 1803109864Sjeff} 1804109864Sjeff 1805171482Sjeff/* 1806171482Sjeff * Adjust thread priorities as a result of a nice request. 1807171482Sjeff */ 1808109864Sjeffvoid 1809130551Sjuliansched_nice(struct proc *p, int nice) 1810109864Sjeff{ 1811109864Sjeff struct thread *td; 1812109864Sjeff 1813130551Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 1814165762Sjeff 1815130551Sjulian p->p_nice = nice; 1816163709Sjb FOREACH_THREAD_IN_PROC(p, td) { 1817170293Sjeff thread_lock(td); 1818163709Sjb sched_priority(td); 1819165762Sjeff sched_prio(td, td->td_base_user_pri); 1820170293Sjeff thread_unlock(td); 1821130551Sjulian } 1822109864Sjeff} 1823109864Sjeff 1824171482Sjeff/* 1825171482Sjeff * Record the sleep time for the interactivity scorer. 1826171482Sjeff */ 1827109864Sjeffvoid 1828177085Sjeffsched_sleep(struct thread *td, int prio) 1829109864Sjeff{ 1830165762Sjeff 1831170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1832109864Sjeff 1833172264Sjeff td->td_slptick = ticks; 1834177085Sjeff if (TD_IS_SUSPENDED(td) || prio <= PSOCK) 1835177085Sjeff td->td_flags |= TDF_CANSWAP; 1836177085Sjeff if (static_boost && prio) 1837177085Sjeff sched_prio(td, prio); 1838109864Sjeff} 1839109864Sjeff 1840171482Sjeff/* 1841171482Sjeff * Schedule a thread to resume execution and record how long it voluntarily 1842171482Sjeff * slept. We also update the pctcpu, interactivity, and priority. 1843171482Sjeff */ 1844109864Sjeffvoid 1845109864Sjeffsched_wakeup(struct thread *td) 1846109864Sjeff{ 1847166229Sjeff struct td_sched *ts; 1848171482Sjeff int slptick; 1849165762Sjeff 1850170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1851166229Sjeff ts = td->td_sched; 1852177085Sjeff td->td_flags &= ~TDF_CANSWAP; 1853109864Sjeff /* 1854165762Sjeff * If we slept for more than a tick update our interactivity and 1855165762Sjeff * priority. 1856109864Sjeff */ 1857172264Sjeff slptick = td->td_slptick; 1858172264Sjeff td->td_slptick = 0; 1859171482Sjeff if (slptick && slptick != ticks) { 1860166208Sjeff u_int hzticks; 1861109864Sjeff 1862171482Sjeff hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1863171482Sjeff ts->ts_slptime += hzticks; 1864165819Sjeff sched_interact_update(td); 1865166229Sjeff sched_pctcpu_update(ts); 1866109864Sjeff } 1867166229Sjeff /* Reset the slice value after we sleep. */ 1868166229Sjeff ts->ts_slice = sched_slice; 1869166190Sjeff sched_add(td, SRQ_BORING); 1870109864Sjeff} 1871109864Sjeff 1872109864Sjeff/* 1873109864Sjeff * Penalize the parent for creating a new child and initialize the child's 1874109864Sjeff * priority. 1875109864Sjeff */ 1876109864Sjeffvoid 1877163709Sjbsched_fork(struct thread *td, struct thread *child) 1878109864Sjeff{ 1879170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1880164936Sjulian sched_fork_thread(td, child); 1881165762Sjeff /* 1882165762Sjeff * Penalize the parent and child for forking. 1883165762Sjeff */ 1884165762Sjeff sched_interact_fork(child); 1885165762Sjeff sched_priority(child); 1886171482Sjeff td->td_sched->ts_runtime += tickincr; 1887165762Sjeff sched_interact_update(td); 1888165762Sjeff sched_priority(td); 1889164936Sjulian} 1890109864Sjeff 1891171482Sjeff/* 1892171482Sjeff * Fork a new thread, may be within the same process. 1893171482Sjeff */ 1894164936Sjulianvoid 1895164936Sjuliansched_fork_thread(struct thread *td, struct thread *child) 1896164936Sjulian{ 1897164936Sjulian struct td_sched *ts; 1898164936Sjulian struct td_sched *ts2; 1899164936Sjulian 1900177426Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1901165762Sjeff /* 1902165762Sjeff * Initialize child. 1903165762Sjeff */ 1904177426Sjeff ts = td->td_sched; 1905177426Sjeff ts2 = child->td_sched; 1906171482Sjeff child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1907176735Sjeff child->td_cpuset = cpuset_ref(td->td_cpuset); 1908164936Sjulian ts2->ts_cpu = ts->ts_cpu; 1909177426Sjeff ts2->ts_flags = 0; 1910165762Sjeff /* 1911165762Sjeff * Grab our parents cpu estimation information and priority. 1912165762Sjeff */ 1913164936Sjulian ts2->ts_ticks = ts->ts_ticks; 1914164936Sjulian ts2->ts_ltick = ts->ts_ltick; 1915164936Sjulian ts2->ts_ftick = ts->ts_ftick; 1916165762Sjeff child->td_user_pri = td->td_user_pri; 1917165762Sjeff child->td_base_user_pri = td->td_base_user_pri; 1918165762Sjeff /* 1919165762Sjeff * And update interactivity score. 1920165762Sjeff */ 1921171482Sjeff ts2->ts_slptime = ts->ts_slptime; 1922171482Sjeff ts2->ts_runtime = ts->ts_runtime; 1923165762Sjeff ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 1924113357Sjeff} 1925113357Sjeff 1926171482Sjeff/* 1927171482Sjeff * Adjust the priority class of a thread. 1928171482Sjeff */ 1929113357Sjeffvoid 1930163709Sjbsched_class(struct thread *td, int class) 1931113357Sjeff{ 1932113357Sjeff 1933170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1934163709Sjb if (td->td_pri_class == class) 1935113357Sjeff return; 1936163709Sjb td->td_pri_class = class; 1937109864Sjeff} 1938109864Sjeff 1939109864Sjeff/* 1940109864Sjeff * Return some of the child's priority and interactivity to the parent. 1941109864Sjeff */ 1942109864Sjeffvoid 1943164939Sjuliansched_exit(struct proc *p, struct thread *child) 1944109864Sjeff{ 1945165762Sjeff struct thread *td; 1946164939Sjulian 1947163709Sjb CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 1948173600Sjulian child, child->td_name, child->td_priority); 1949113372Sjeff 1950177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 1951165762Sjeff td = FIRST_THREAD_IN_PROC(p); 1952165762Sjeff sched_exit_thread(td, child); 1953113372Sjeff} 1954113372Sjeff 1955171482Sjeff/* 1956171482Sjeff * Penalize another thread for the time spent on this one. This helps to 1957171482Sjeff * worsen the priority and interactivity of processes which schedule batch 1958171482Sjeff * jobs such as make. This has little effect on the make process itself but 1959171482Sjeff * causes new processes spawned by it to receive worse scores immediately. 1960171482Sjeff */ 1961113372Sjeffvoid 1962164939Sjuliansched_exit_thread(struct thread *td, struct thread *child) 1963164936Sjulian{ 1964165762Sjeff 1965164939Sjulian CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 1966173600Sjulian child, child->td_name, child->td_priority); 1967164939Sjulian 1968165762Sjeff /* 1969165762Sjeff * Give the child's runtime to the parent without returning the 1970165762Sjeff * sleep time as a penalty to the parent. This causes shells that 1971165762Sjeff * launch expensive things to mark their children as expensive. 1972165762Sjeff */ 1973170293Sjeff thread_lock(td); 1974171482Sjeff td->td_sched->ts_runtime += child->td_sched->ts_runtime; 1975164939Sjulian sched_interact_update(td); 1976165762Sjeff sched_priority(td); 1977170293Sjeff thread_unlock(td); 1978164936Sjulian} 1979164936Sjulian 1980177005Sjeffvoid 1981177005Sjeffsched_preempt(struct thread *td) 1982177005Sjeff{ 1983177005Sjeff struct tdq *tdq; 1984177005Sjeff 1985177005Sjeff thread_lock(td); 1986177005Sjeff tdq = TDQ_SELF(); 1987177005Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1988177005Sjeff tdq->tdq_ipipending = 0; 1989177005Sjeff if (td->td_priority > tdq->tdq_lowpri) { 1990177005Sjeff if (td->td_critnest > 1) 1991177005Sjeff td->td_owepreempt = 1; 1992177005Sjeff else 1993177005Sjeff mi_switch(SW_INVOL | SW_PREEMPT, NULL); 1994177005Sjeff } 1995177005Sjeff thread_unlock(td); 1996177005Sjeff} 1997177005Sjeff 1998171482Sjeff/* 1999171482Sjeff * Fix priorities on return to user-space. Priorities may be elevated due 2000171482Sjeff * to static priorities in msleep() or similar. 2001171482Sjeff */ 2002164936Sjulianvoid 2003164936Sjuliansched_userret(struct thread *td) 2004164936Sjulian{ 2005164936Sjulian /* 2006164936Sjulian * XXX we cheat slightly on the locking here to avoid locking in 2007164936Sjulian * the usual case. Setting td_priority here is essentially an 2008164936Sjulian * incomplete workaround for not setting it properly elsewhere. 2009164936Sjulian * Now that some interrupt handlers are threads, not setting it 2010164936Sjulian * properly elsewhere can clobber it in the window between setting 2011164936Sjulian * it here and returning to user mode, so don't waste time setting 2012164936Sjulian * it perfectly here. 2013164936Sjulian */ 2014164936Sjulian KASSERT((td->td_flags & TDF_BORROWING) == 0, 2015164936Sjulian ("thread with borrowed priority returning to userland")); 2016164936Sjulian if (td->td_priority != td->td_user_pri) { 2017170293Sjeff thread_lock(td); 2018164936Sjulian td->td_priority = td->td_user_pri; 2019164936Sjulian td->td_base_pri = td->td_user_pri; 2020177005Sjeff tdq_setlowpri(TDQ_SELF(), td); 2021170293Sjeff thread_unlock(td); 2022164936Sjulian } 2023164936Sjulian} 2024164936Sjulian 2025171482Sjeff/* 2026171482Sjeff * Handle a stathz tick. This is really only relevant for timeshare 2027171482Sjeff * threads. 2028171482Sjeff */ 2029164936Sjulianvoid 2030121127Sjeffsched_clock(struct thread *td) 2031109864Sjeff{ 2032164936Sjulian struct tdq *tdq; 2033164936Sjulian struct td_sched *ts; 2034109864Sjeff 2035171482Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2036164936Sjulian tdq = TDQ_SELF(); 2037172409Sjeff#ifdef SMP 2038133427Sjeff /* 2039172409Sjeff * We run the long term load balancer infrequently on the first cpu. 2040172409Sjeff */ 2041172409Sjeff if (balance_tdq == tdq) { 2042172409Sjeff if (balance_ticks && --balance_ticks == 0) 2043172409Sjeff sched_balance(); 2044172409Sjeff } 2045172409Sjeff#endif 2046172409Sjeff /* 2047165766Sjeff * Advance the insert index once for each tick to ensure that all 2048165766Sjeff * threads get a chance to run. 2049133427Sjeff */ 2050165766Sjeff if (tdq->tdq_idx == tdq->tdq_ridx) { 2051165766Sjeff tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 2052165766Sjeff if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 2053165766Sjeff tdq->tdq_ridx = tdq->tdq_idx; 2054165766Sjeff } 2055165766Sjeff ts = td->td_sched; 2056175104Sjeff if (td->td_pri_class & PRI_FIFO_BIT) 2057113357Sjeff return; 2058175104Sjeff if (td->td_pri_class == PRI_TIMESHARE) { 2059175104Sjeff /* 2060175104Sjeff * We used a tick; charge it to the thread so 2061175104Sjeff * that we can compute our interactivity. 2062175104Sjeff */ 2063175104Sjeff td->td_sched->ts_runtime += tickincr; 2064175104Sjeff sched_interact_update(td); 2065177009Sjeff sched_priority(td); 2066175104Sjeff } 2067113357Sjeff /* 2068109864Sjeff * We used up one time slice. 2069109864Sjeff */ 2070164936Sjulian if (--ts->ts_slice > 0) 2071113357Sjeff return; 2072109864Sjeff /* 2073177009Sjeff * We're out of time, force a requeue at userret(). 2074109864Sjeff */ 2075177009Sjeff ts->ts_slice = sched_slice; 2076113357Sjeff td->td_flags |= TDF_NEEDRESCHED; 2077109864Sjeff} 2078109864Sjeff 2079171482Sjeff/* 2080171482Sjeff * Called once per hz tick. Used for cpu utilization information. This 2081171482Sjeff * is easier than trying to scale based on stathz. 2082171482Sjeff */ 2083171482Sjeffvoid 2084171482Sjeffsched_tick(void) 2085171482Sjeff{ 2086171482Sjeff struct td_sched *ts; 2087171482Sjeff 2088171482Sjeff ts = curthread->td_sched; 2089171482Sjeff /* Adjust ticks for pctcpu */ 2090171482Sjeff ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2091171482Sjeff ts->ts_ltick = ticks; 2092171482Sjeff /* 2093171482Sjeff * Update if we've exceeded our desired tick threshhold by over one 2094171482Sjeff * second. 2095171482Sjeff */ 2096171482Sjeff if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2097171482Sjeff sched_pctcpu_update(ts); 2098171482Sjeff} 2099171482Sjeff 2100171482Sjeff/* 2101171482Sjeff * Return whether the current CPU has runnable tasks. Used for in-kernel 2102171482Sjeff * cooperative idle threads. 2103171482Sjeff */ 2104109864Sjeffint 2105109864Sjeffsched_runnable(void) 2106109864Sjeff{ 2107164936Sjulian struct tdq *tdq; 2108115998Sjeff int load; 2109109864Sjeff 2110115998Sjeff load = 1; 2111115998Sjeff 2112164936Sjulian tdq = TDQ_SELF(); 2113121605Sjeff if ((curthread->td_flags & TDF_IDLETD) != 0) { 2114165620Sjeff if (tdq->tdq_load > 0) 2115121605Sjeff goto out; 2116121605Sjeff } else 2117165620Sjeff if (tdq->tdq_load - 1 > 0) 2118121605Sjeff goto out; 2119115998Sjeff load = 0; 2120115998Sjeffout: 2121115998Sjeff return (load); 2122109864Sjeff} 2123109864Sjeff 2124171482Sjeff/* 2125171482Sjeff * Choose the highest priority thread to run. The thread is removed from 2126171482Sjeff * the run-queue while running however the load remains. For SMP we set 2127171482Sjeff * the tdq in the global idle bitmask if it idles here. 2128171482Sjeff */ 2129166190Sjeffstruct thread * 2130109970Sjeffsched_choose(void) 2131109970Sjeff{ 2132177435Sjeff struct thread *td; 2133164936Sjulian struct tdq *tdq; 2134109970Sjeff 2135164936Sjulian tdq = TDQ_SELF(); 2136171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2137177435Sjeff td = tdq_choose(tdq); 2138177435Sjeff if (td) { 2139177435Sjeff td->td_sched->ts_ltick = ticks; 2140177435Sjeff tdq_runq_rem(tdq, td); 2141177435Sjeff return (td); 2142109864Sjeff } 2143176735Sjeff return (PCPU_GET(idlethread)); 2144109864Sjeff} 2145109864Sjeff 2146171482Sjeff/* 2147171482Sjeff * Set owepreempt if necessary. Preemption never happens directly in ULE, 2148171482Sjeff * we always request it once we exit a critical section. 2149171482Sjeff */ 2150171482Sjeffstatic inline void 2151171482Sjeffsched_setpreempt(struct thread *td) 2152166190Sjeff{ 2153166190Sjeff struct thread *ctd; 2154166190Sjeff int cpri; 2155166190Sjeff int pri; 2156166190Sjeff 2157177005Sjeff THREAD_LOCK_ASSERT(curthread, MA_OWNED); 2158177005Sjeff 2159166190Sjeff ctd = curthread; 2160166190Sjeff pri = td->td_priority; 2161166190Sjeff cpri = ctd->td_priority; 2162177005Sjeff if (pri < cpri) 2163177005Sjeff ctd->td_flags |= TDF_NEEDRESCHED; 2164166190Sjeff if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2165171482Sjeff return; 2166177005Sjeff if (!sched_shouldpreempt(pri, cpri, 0)) 2167171482Sjeff return; 2168171482Sjeff ctd->td_owepreempt = 1; 2169166190Sjeff} 2170166190Sjeff 2171171482Sjeff/* 2172177009Sjeff * Add a thread to a thread queue. Select the appropriate runq and add the 2173177009Sjeff * thread to it. This is the internal function called when the tdq is 2174177009Sjeff * predetermined. 2175171482Sjeff */ 2176109864Sjeffvoid 2177171482Sjefftdq_add(struct tdq *tdq, struct thread *td, int flags) 2178109864Sjeff{ 2179109864Sjeff 2180171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2181166190Sjeff KASSERT((td->td_inhibitors == 0), 2182166190Sjeff ("sched_add: trying to run inhibited thread")); 2183166190Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 2184166190Sjeff ("sched_add: bad thread state")); 2185172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 2186172207Sjeff ("sched_add: thread swapped out")); 2187171482Sjeff 2188171482Sjeff if (td->td_priority < tdq->tdq_lowpri) 2189171482Sjeff tdq->tdq_lowpri = td->td_priority; 2190177435Sjeff tdq_runq_add(tdq, td, flags); 2191177435Sjeff tdq_load_add(tdq, td); 2192171482Sjeff} 2193171482Sjeff 2194171482Sjeff/* 2195171482Sjeff * Select the target thread queue and add a thread to it. Request 2196171482Sjeff * preemption or IPI a remote processor if required. 2197171482Sjeff */ 2198171482Sjeffvoid 2199171482Sjeffsched_add(struct thread *td, int flags) 2200171482Sjeff{ 2201171482Sjeff struct tdq *tdq; 2202171482Sjeff#ifdef SMP 2203171482Sjeff int cpu; 2204171482Sjeff#endif 2205171482Sjeff CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2206173600Sjulian td, td->td_name, td->td_priority, curthread, 2207173600Sjulian curthread->td_name); 2208171482Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2209166108Sjeff /* 2210171482Sjeff * Recalculate the priority before we select the target cpu or 2211171482Sjeff * run-queue. 2212166108Sjeff */ 2213171482Sjeff if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2214171482Sjeff sched_priority(td); 2215171482Sjeff#ifdef SMP 2216171482Sjeff /* 2217171482Sjeff * Pick the destination cpu and if it isn't ours transfer to the 2218171482Sjeff * target cpu. 2219171482Sjeff */ 2220177435Sjeff cpu = sched_pickcpu(td, flags); 2221177435Sjeff tdq = sched_setcpu(td, cpu, flags); 2222171482Sjeff tdq_add(tdq, td, flags); 2223177009Sjeff if (cpu != PCPU_GET(cpuid)) { 2224177435Sjeff tdq_notify(tdq, td); 2225166108Sjeff return; 2226166108Sjeff } 2227171482Sjeff#else 2228171482Sjeff tdq = TDQ_SELF(); 2229171482Sjeff TDQ_LOCK(tdq); 2230171482Sjeff /* 2231171482Sjeff * Now that the thread is moving to the run-queue, set the lock 2232171482Sjeff * to the scheduler's lock. 2233171482Sjeff */ 2234171482Sjeff thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2235171482Sjeff tdq_add(tdq, td, flags); 2236166108Sjeff#endif 2237171482Sjeff if (!(flags & SRQ_YIELDING)) 2238171482Sjeff sched_setpreempt(td); 2239109864Sjeff} 2240109864Sjeff 2241171482Sjeff/* 2242171482Sjeff * Remove a thread from a run-queue without running it. This is used 2243171482Sjeff * when we're stealing a thread from a remote queue. Otherwise all threads 2244171482Sjeff * exit by calling sched_exit_thread() and sched_throw() themselves. 2245171482Sjeff */ 2246109864Sjeffvoid 2247121127Sjeffsched_rem(struct thread *td) 2248109864Sjeff{ 2249164936Sjulian struct tdq *tdq; 2250113357Sjeff 2251139316Sjeff CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 2252173600Sjulian td, td->td_name, td->td_priority, curthread, 2253173600Sjulian curthread->td_name); 2254177435Sjeff tdq = TDQ_CPU(td->td_sched->ts_cpu); 2255171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2256171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2257166190Sjeff KASSERT(TD_ON_RUNQ(td), 2258164936Sjulian ("sched_rem: thread not on run queue")); 2259177435Sjeff tdq_runq_rem(tdq, td); 2260177435Sjeff tdq_load_rem(tdq, td); 2261166190Sjeff TD_SET_CAN_RUN(td); 2262176735Sjeff if (td->td_priority == tdq->tdq_lowpri) 2263176735Sjeff tdq_setlowpri(tdq, NULL); 2264109864Sjeff} 2265109864Sjeff 2266171482Sjeff/* 2267171482Sjeff * Fetch cpu utilization information. Updates on demand. 2268171482Sjeff */ 2269109864Sjefffixpt_t 2270121127Sjeffsched_pctcpu(struct thread *td) 2271109864Sjeff{ 2272109864Sjeff fixpt_t pctcpu; 2273164936Sjulian struct td_sched *ts; 2274109864Sjeff 2275109864Sjeff pctcpu = 0; 2276164936Sjulian ts = td->td_sched; 2277164936Sjulian if (ts == NULL) 2278121290Sjeff return (0); 2279109864Sjeff 2280170293Sjeff thread_lock(td); 2281164936Sjulian if (ts->ts_ticks) { 2282109864Sjeff int rtick; 2283109864Sjeff 2284165796Sjeff sched_pctcpu_update(ts); 2285109864Sjeff /* How many rtick per second ? */ 2286165762Sjeff rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2287165762Sjeff pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 2288109864Sjeff } 2289170293Sjeff thread_unlock(td); 2290109864Sjeff 2291109864Sjeff return (pctcpu); 2292109864Sjeff} 2293109864Sjeff 2294176735Sjeff/* 2295176735Sjeff * Enforce affinity settings for a thread. Called after adjustments to 2296176735Sjeff * cpumask. 2297176735Sjeff */ 2298176729Sjeffvoid 2299176729Sjeffsched_affinity(struct thread *td) 2300176729Sjeff{ 2301176735Sjeff#ifdef SMP 2302176735Sjeff struct td_sched *ts; 2303176735Sjeff int cpu; 2304176735Sjeff 2305176735Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2306176735Sjeff ts = td->td_sched; 2307176735Sjeff if (THREAD_CAN_SCHED(td, ts->ts_cpu)) 2308176735Sjeff return; 2309176735Sjeff if (!TD_IS_RUNNING(td)) 2310176735Sjeff return; 2311176735Sjeff td->td_flags |= TDF_NEEDRESCHED; 2312176735Sjeff if (!THREAD_CAN_MIGRATE(td)) 2313176735Sjeff return; 2314176735Sjeff /* 2315176735Sjeff * Assign the new cpu and force a switch before returning to 2316176735Sjeff * userspace. If the target thread is not running locally send 2317176735Sjeff * an ipi to force the issue. 2318176735Sjeff */ 2319176735Sjeff cpu = ts->ts_cpu; 2320177435Sjeff ts->ts_cpu = sched_pickcpu(td, 0); 2321176735Sjeff if (cpu != PCPU_GET(cpuid)) 2322176735Sjeff ipi_selected(1 << cpu, IPI_PREEMPT); 2323176735Sjeff#endif 2324176729Sjeff} 2325176729Sjeff 2326171482Sjeff/* 2327171482Sjeff * Bind a thread to a target cpu. 2328171482Sjeff */ 2329122038Sjeffvoid 2330122038Sjeffsched_bind(struct thread *td, int cpu) 2331122038Sjeff{ 2332164936Sjulian struct td_sched *ts; 2333122038Sjeff 2334171713Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2335164936Sjulian ts = td->td_sched; 2336166137Sjeff if (ts->ts_flags & TSF_BOUND) 2337166152Sjeff sched_unbind(td); 2338164936Sjulian ts->ts_flags |= TSF_BOUND; 2339166137Sjeff sched_pin(); 2340123433Sjeff if (PCPU_GET(cpuid) == cpu) 2341122038Sjeff return; 2342166137Sjeff ts->ts_cpu = cpu; 2343122038Sjeff /* When we return from mi_switch we'll be on the correct cpu. */ 2344131527Sphk mi_switch(SW_VOL, NULL); 2345122038Sjeff} 2346122038Sjeff 2347171482Sjeff/* 2348171482Sjeff * Release a bound thread. 2349171482Sjeff */ 2350122038Sjeffvoid 2351122038Sjeffsched_unbind(struct thread *td) 2352122038Sjeff{ 2353165762Sjeff struct td_sched *ts; 2354165762Sjeff 2355170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2356165762Sjeff ts = td->td_sched; 2357166137Sjeff if ((ts->ts_flags & TSF_BOUND) == 0) 2358166137Sjeff return; 2359165762Sjeff ts->ts_flags &= ~TSF_BOUND; 2360165762Sjeff sched_unpin(); 2361122038Sjeff} 2362122038Sjeff 2363109864Sjeffint 2364145256Sjkoshysched_is_bound(struct thread *td) 2365145256Sjkoshy{ 2366170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2367164936Sjulian return (td->td_sched->ts_flags & TSF_BOUND); 2368145256Sjkoshy} 2369145256Sjkoshy 2370171482Sjeff/* 2371171482Sjeff * Basic yield call. 2372171482Sjeff */ 2373159630Sdavidxuvoid 2374159630Sdavidxusched_relinquish(struct thread *td) 2375159630Sdavidxu{ 2376170293Sjeff thread_lock(td); 2377170293Sjeff SCHED_STAT_INC(switch_relinquish); 2378159630Sdavidxu mi_switch(SW_VOL, NULL); 2379170293Sjeff thread_unlock(td); 2380159630Sdavidxu} 2381159630Sdavidxu 2382171482Sjeff/* 2383171482Sjeff * Return the total system load. 2384171482Sjeff */ 2385145256Sjkoshyint 2386125289Sjeffsched_load(void) 2387125289Sjeff{ 2388125289Sjeff#ifdef SMP 2389125289Sjeff int total; 2390125289Sjeff int i; 2391125289Sjeff 2392125289Sjeff total = 0; 2393176735Sjeff for (i = 0; i <= mp_maxid; i++) 2394176735Sjeff total += TDQ_CPU(i)->tdq_sysload; 2395125289Sjeff return (total); 2396125289Sjeff#else 2397165620Sjeff return (TDQ_SELF()->tdq_sysload); 2398125289Sjeff#endif 2399125289Sjeff} 2400125289Sjeff 2401125289Sjeffint 2402109864Sjeffsched_sizeof_proc(void) 2403109864Sjeff{ 2404109864Sjeff return (sizeof(struct proc)); 2405109864Sjeff} 2406109864Sjeff 2407109864Sjeffint 2408109864Sjeffsched_sizeof_thread(void) 2409109864Sjeff{ 2410109864Sjeff return (sizeof(struct thread) + sizeof(struct td_sched)); 2411109864Sjeff} 2412159570Sdavidxu 2413166190Sjeff/* 2414166190Sjeff * The actual idle process. 2415166190Sjeff */ 2416166190Sjeffvoid 2417166190Sjeffsched_idletd(void *dummy) 2418166190Sjeff{ 2419166190Sjeff struct thread *td; 2420171482Sjeff struct tdq *tdq; 2421166190Sjeff 2422166190Sjeff td = curthread; 2423171482Sjeff tdq = TDQ_SELF(); 2424166190Sjeff mtx_assert(&Giant, MA_NOTOWNED); 2425171482Sjeff /* ULE relies on preemption for idle interruption. */ 2426171482Sjeff for (;;) { 2427171482Sjeff#ifdef SMP 2428171482Sjeff if (tdq_idled(tdq)) 2429171482Sjeff cpu_idle(); 2430171482Sjeff#else 2431166190Sjeff cpu_idle(); 2432171482Sjeff#endif 2433171482Sjeff } 2434166190Sjeff} 2435166190Sjeff 2436170293Sjeff/* 2437170293Sjeff * A CPU is entering for the first time or a thread is exiting. 2438170293Sjeff */ 2439170293Sjeffvoid 2440170293Sjeffsched_throw(struct thread *td) 2441170293Sjeff{ 2442172411Sjeff struct thread *newtd; 2443171482Sjeff struct tdq *tdq; 2444171482Sjeff 2445171482Sjeff tdq = TDQ_SELF(); 2446170293Sjeff if (td == NULL) { 2447171482Sjeff /* Correct spinlock nesting and acquire the correct lock. */ 2448171482Sjeff TDQ_LOCK(tdq); 2449170293Sjeff spinlock_exit(); 2450170293Sjeff } else { 2451171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2452177435Sjeff tdq_load_rem(tdq, td); 2453174629Sjeff lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 2454170293Sjeff } 2455170293Sjeff KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 2456172411Sjeff newtd = choosethread(); 2457172411Sjeff TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 2458170293Sjeff PCPU_SET(switchtime, cpu_ticks()); 2459170293Sjeff PCPU_SET(switchticks, ticks); 2460172411Sjeff cpu_throw(td, newtd); /* doesn't return */ 2461170293Sjeff} 2462170293Sjeff 2463171482Sjeff/* 2464171482Sjeff * This is called from fork_exit(). Just acquire the correct locks and 2465171482Sjeff * let fork do the rest of the work. 2466171482Sjeff */ 2467170293Sjeffvoid 2468170600Sjeffsched_fork_exit(struct thread *td) 2469170293Sjeff{ 2470171482Sjeff struct td_sched *ts; 2471171482Sjeff struct tdq *tdq; 2472171482Sjeff int cpuid; 2473170293Sjeff 2474170293Sjeff /* 2475170293Sjeff * Finish setting up thread glue so that it begins execution in a 2476171482Sjeff * non-nested critical section with the scheduler lock held. 2477170293Sjeff */ 2478171482Sjeff cpuid = PCPU_GET(cpuid); 2479171482Sjeff tdq = TDQ_CPU(cpuid); 2480171482Sjeff ts = td->td_sched; 2481171482Sjeff if (TD_IS_IDLETHREAD(td)) 2482171482Sjeff td->td_lock = TDQ_LOCKPTR(tdq); 2483171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2484171482Sjeff td->td_oncpu = cpuid; 2485172411Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 2486174629Sjeff lock_profile_obtain_lock_success( 2487174629Sjeff &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 2488170293Sjeff} 2489170293Sjeff 2490177435SjeffSYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 2491171482SjeffSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2492165762Sjeff "Scheduler name"); 2493171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2494171482Sjeff "Slice size for timeshare threads"); 2495171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2496171482Sjeff "Interactivity score threshold"); 2497171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2498171482Sjeff 0,"Min priority for preemption, lower priorities have greater precedence"); 2499177085SjeffSYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 2500177085Sjeff 0,"Controls whether static kernel priorities are assigned to sleeping threads."); 2501166108Sjeff#ifdef SMP 2502171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2503171482Sjeff "Number of hz ticks to keep thread affinity for"); 2504171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2505171482Sjeff "Enables the long-term load balancer"); 2506172409SjeffSYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 2507172409Sjeff &balance_interval, 0, 2508172409Sjeff "Average frequency in stathz ticks to run the long-term balancer"); 2509171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2510171482Sjeff "Steals work from another hyper-threaded core on idle"); 2511171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2512171482Sjeff "Attempts to steal work from other cores before idling"); 2513171506SjeffSYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 2514171506Sjeff "Minimum load on remote cpu before we'll steal"); 2515166108Sjeff#endif 2516165762Sjeff 2517172264Sjeff/* ps compat. All cpu percentages from ULE are weighted. */ 2518172293Sjeffstatic int ccpu = 0; 2519165762SjeffSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2520