sched_ule.c revision 177435
1109864Sjeff/*- 2165762Sjeff * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 3109864Sjeff * All rights reserved. 4109864Sjeff * 5109864Sjeff * Redistribution and use in source and binary forms, with or without 6109864Sjeff * modification, are permitted provided that the following conditions 7109864Sjeff * are met: 8109864Sjeff * 1. Redistributions of source code must retain the above copyright 9109864Sjeff * notice unmodified, this list of conditions, and the following 10109864Sjeff * disclaimer. 11109864Sjeff * 2. Redistributions in binary form must reproduce the above copyright 12109864Sjeff * notice, this list of conditions and the following disclaimer in the 13109864Sjeff * documentation and/or other materials provided with the distribution. 14109864Sjeff * 15109864Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16109864Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17109864Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18109864Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19109864Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20109864Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21109864Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22109864Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23109864Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24109864Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25109864Sjeff */ 26109864Sjeff 27171482Sjeff/* 28171482Sjeff * This file implements the ULE scheduler. ULE supports independent CPU 29171482Sjeff * run queues and fine grain locking. It has superior interactive 30171482Sjeff * performance under load even on uni-processor systems. 31171482Sjeff * 32171482Sjeff * etymology: 33172293Sjeff * ULE is the last three letters in schedule. It owes its name to a 34171482Sjeff * generic user created for a scheduling system by Paul Mikesell at 35171482Sjeff * Isilon Systems and a general lack of creativity on the part of the author. 36171482Sjeff */ 37171482Sjeff 38116182Sobrien#include <sys/cdefs.h> 39116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 177435 2008-03-20 05:51:16Z jeff $"); 40116182Sobrien 41147565Speter#include "opt_hwpmc_hooks.h" 42147565Speter#include "opt_sched.h" 43134649Sscottl 44109864Sjeff#include <sys/param.h> 45109864Sjeff#include <sys/systm.h> 46131929Smarcel#include <sys/kdb.h> 47109864Sjeff#include <sys/kernel.h> 48109864Sjeff#include <sys/ktr.h> 49109864Sjeff#include <sys/lock.h> 50109864Sjeff#include <sys/mutex.h> 51109864Sjeff#include <sys/proc.h> 52112966Sjeff#include <sys/resource.h> 53122038Sjeff#include <sys/resourcevar.h> 54109864Sjeff#include <sys/sched.h> 55109864Sjeff#include <sys/smp.h> 56109864Sjeff#include <sys/sx.h> 57109864Sjeff#include <sys/sysctl.h> 58109864Sjeff#include <sys/sysproto.h> 59139453Sjhb#include <sys/turnstile.h> 60161599Sdavidxu#include <sys/umtx.h> 61109864Sjeff#include <sys/vmmeter.h> 62176735Sjeff#include <sys/cpuset.h> 63109864Sjeff#ifdef KTRACE 64109864Sjeff#include <sys/uio.h> 65109864Sjeff#include <sys/ktrace.h> 66109864Sjeff#endif 67109864Sjeff 68145256Sjkoshy#ifdef HWPMC_HOOKS 69145256Sjkoshy#include <sys/pmckern.h> 70145256Sjkoshy#endif 71145256Sjkoshy 72109864Sjeff#include <machine/cpu.h> 73121790Sjeff#include <machine/smp.h> 74109864Sjeff 75172887Sgrehan#if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__) 76172345Sjeff#error "This architecture is not currently compatible with ULE" 77166190Sjeff#endif 78166190Sjeff 79171482Sjeff#define KTR_ULE 0 80166137Sjeff 81166137Sjeff/* 82171482Sjeff * Thread scheduler specific section. All fields are protected 83171482Sjeff * by the thread lock. 84146954Sjeff */ 85164936Sjulianstruct td_sched { 86171482Sjeff struct runq *ts_runq; /* Run-queue we're queued on. */ 87171482Sjeff short ts_flags; /* TSF_* flags. */ 88164936Sjulian u_char ts_cpu; /* CPU that we have affinity for. */ 89177009Sjeff int ts_rltick; /* Real last tick, for affinity. */ 90171482Sjeff int ts_slice; /* Ticks of slice remaining. */ 91171482Sjeff u_int ts_slptime; /* Number of ticks we vol. slept */ 92171482Sjeff u_int ts_runtime; /* Number of ticks we were running */ 93164936Sjulian int ts_ltick; /* Last tick that we were running on */ 94164936Sjulian int ts_ftick; /* First tick that we were running on */ 95164936Sjulian int ts_ticks; /* Tick count */ 96134791Sjulian}; 97164936Sjulian/* flags kept in ts_flags */ 98166108Sjeff#define TSF_BOUND 0x0001 /* Thread can not migrate. */ 99166108Sjeff#define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 100121790Sjeff 101164936Sjulianstatic struct td_sched td_sched0; 102109864Sjeff 103176735Sjeff#define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 104176735Sjeff#define THREAD_CAN_SCHED(td, cpu) \ 105176735Sjeff CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 106176735Sjeff 107109864Sjeff/* 108165762Sjeff * Cpu percentage computation macros and defines. 109111857Sjeff * 110165762Sjeff * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 111165762Sjeff * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 112165796Sjeff * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 113165762Sjeff * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 114165762Sjeff * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 115165762Sjeff * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 116165762Sjeff */ 117165762Sjeff#define SCHED_TICK_SECS 10 118165762Sjeff#define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 119165796Sjeff#define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 120165762Sjeff#define SCHED_TICK_SHIFT 10 121165762Sjeff#define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 122165830Sjeff#define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 123165762Sjeff 124165762Sjeff/* 125165762Sjeff * These macros determine priorities for non-interactive threads. They are 126165762Sjeff * assigned a priority based on their recent cpu utilization as expressed 127165762Sjeff * by the ratio of ticks to the tick total. NHALF priorities at the start 128165762Sjeff * and end of the MIN to MAX timeshare range are only reachable with negative 129165762Sjeff * or positive nice respectively. 130165762Sjeff * 131165762Sjeff * PRI_RANGE: Priority range for utilization dependent priorities. 132116642Sjeff * PRI_NRESV: Number of nice values. 133165762Sjeff * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 134165762Sjeff * PRI_NICE: Determines the part of the priority inherited from nice. 135109864Sjeff */ 136165762Sjeff#define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 137121869Sjeff#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 138165762Sjeff#define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 139165762Sjeff#define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 140170787Sjeff#define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 141165762Sjeff#define SCHED_PRI_TICKS(ts) \ 142165762Sjeff (SCHED_TICK_HZ((ts)) / \ 143165827Sjeff (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 144165762Sjeff#define SCHED_PRI_NICE(nice) (nice) 145109864Sjeff 146109864Sjeff/* 147165762Sjeff * These determine the interactivity of a process. Interactivity differs from 148165762Sjeff * cpu utilization in that it expresses the voluntary time slept vs time ran 149165762Sjeff * while cpu utilization includes all time not running. This more accurately 150165762Sjeff * models the intent of the thread. 151109864Sjeff * 152110645Sjeff * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 153110645Sjeff * before throttling back. 154121868Sjeff * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 155116365Sjeff * INTERACT_MAX: Maximum interactivity value. Smaller is better. 156111857Sjeff * INTERACT_THRESH: Threshhold for placement on the current runq. 157109864Sjeff */ 158165762Sjeff#define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 159165762Sjeff#define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 160116365Sjeff#define SCHED_INTERACT_MAX (100) 161116365Sjeff#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 162121126Sjeff#define SCHED_INTERACT_THRESH (30) 163111857Sjeff 164109864Sjeff/* 165165762Sjeff * tickincr: Converts a stathz tick into a hz domain scaled by 166165762Sjeff * the shift factor. Without the shift the error rate 167165762Sjeff * due to rounding would be unacceptably high. 168165762Sjeff * realstathz: stathz is sometimes 0 and run off of hz. 169165762Sjeff * sched_slice: Runtime of each thread before rescheduling. 170171482Sjeff * preempt_thresh: Priority threshold for preemption and remote IPIs. 171109864Sjeff */ 172165762Sjeffstatic int sched_interact = SCHED_INTERACT_THRESH; 173165762Sjeffstatic int realstathz; 174165762Sjeffstatic int tickincr; 175177009Sjeffstatic int sched_slice = 1; 176172345Sjeff#ifdef PREEMPTION 177172345Sjeff#ifdef FULL_PREEMPTION 178172345Sjeffstatic int preempt_thresh = PRI_MAX_IDLE; 179172345Sjeff#else 180171482Sjeffstatic int preempt_thresh = PRI_MIN_KERN; 181172345Sjeff#endif 182172345Sjeff#else 183172345Sjeffstatic int preempt_thresh = 0; 184172345Sjeff#endif 185177085Sjeffstatic int static_boost = 1; 186109864Sjeff 187109864Sjeff/* 188171482Sjeff * tdq - per processor runqs and statistics. All fields are protected by the 189171482Sjeff * tdq_lock. The load and lowpri may be accessed without to avoid excess 190171482Sjeff * locking in sched_pickcpu(); 191109864Sjeff */ 192164936Sjulianstruct tdq { 193177009Sjeff /* Ordered to improve efficiency of cpu_search() and switch(). */ 194177009Sjeff struct mtx tdq_lock; /* run queue lock. */ 195176735Sjeff struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ 196171482Sjeff int tdq_load; /* Aggregate load. */ 197176735Sjeff int tdq_sysload; /* For loadavg, !ITHD load. */ 198177009Sjeff int tdq_transferable; /* Transferable thread count. */ 199177009Sjeff u_char tdq_lowpri; /* Lowest priority thread. */ 200177009Sjeff u_char tdq_ipipending; /* IPI pending. */ 201166557Sjeff u_char tdq_idx; /* Current insert index. */ 202166557Sjeff u_char tdq_ridx; /* Current removal index. */ 203177009Sjeff struct runq tdq_realtime; /* real-time run queue. */ 204177009Sjeff struct runq tdq_timeshare; /* timeshare run queue. */ 205177009Sjeff struct runq tdq_idle; /* Queue of IDLE threads. */ 206176735Sjeff char tdq_name[sizeof("sched lock") + 6]; 207171482Sjeff} __aligned(64); 208109864Sjeff 209166108Sjeff 210123433Sjeff#ifdef SMP 211176735Sjeffstruct cpu_group *cpu_top; 212123433Sjeff 213176735Sjeff#define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000)) 214176735Sjeff#define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity)) 215166108Sjeff 216123433Sjeff/* 217166108Sjeff * Run-time tunables. 218166108Sjeff */ 219171506Sjeffstatic int rebalance = 1; 220172409Sjeffstatic int balance_interval = 128; /* Default set in sched_initticks(). */ 221166108Sjeffstatic int affinity; 222172409Sjeffstatic int steal_htt = 1; 223171506Sjeffstatic int steal_idle = 1; 224171506Sjeffstatic int steal_thresh = 2; 225166108Sjeff 226166108Sjeff/* 227165620Sjeff * One thread queue per processor. 228109864Sjeff */ 229164936Sjulianstatic struct tdq tdq_cpu[MAXCPU]; 230172409Sjeffstatic struct tdq *balance_tdq; 231172409Sjeffstatic int balance_ticks; 232129982Sjeff 233164936Sjulian#define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 234164936Sjulian#define TDQ_CPU(x) (&tdq_cpu[(x)]) 235171713Sjeff#define TDQ_ID(x) ((int)((x) - tdq_cpu)) 236123433Sjeff#else /* !SMP */ 237164936Sjulianstatic struct tdq tdq_cpu; 238129982Sjeff 239170315Sjeff#define TDQ_ID(x) (0) 240164936Sjulian#define TDQ_SELF() (&tdq_cpu) 241164936Sjulian#define TDQ_CPU(x) (&tdq_cpu) 242110028Sjeff#endif 243109864Sjeff 244171482Sjeff#define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 245171482Sjeff#define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 246171482Sjeff#define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 247171482Sjeff#define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 248176735Sjeff#define TDQ_LOCKPTR(t) (&(t)->tdq_lock) 249171482Sjeff 250163709Sjbstatic void sched_priority(struct thread *); 251146954Sjeffstatic void sched_thread_priority(struct thread *, u_char); 252163709Sjbstatic int sched_interact_score(struct thread *); 253163709Sjbstatic void sched_interact_update(struct thread *); 254163709Sjbstatic void sched_interact_fork(struct thread *); 255164936Sjulianstatic void sched_pctcpu_update(struct td_sched *); 256109864Sjeff 257110267Sjeff/* Operations on per processor queues */ 258177435Sjeffstatic struct thread *tdq_choose(struct tdq *); 259164936Sjulianstatic void tdq_setup(struct tdq *); 260177435Sjeffstatic void tdq_load_add(struct tdq *, struct thread *); 261177435Sjeffstatic void tdq_load_rem(struct tdq *, struct thread *); 262177435Sjeffstatic __inline void tdq_runq_add(struct tdq *, struct thread *, int); 263177435Sjeffstatic __inline void tdq_runq_rem(struct tdq *, struct thread *); 264177005Sjeffstatic inline int sched_shouldpreempt(int, int, int); 265164936Sjulianvoid tdq_print(int cpu); 266165762Sjeffstatic void runq_print(struct runq *rq); 267171482Sjeffstatic void tdq_add(struct tdq *, struct thread *, int); 268110267Sjeff#ifdef SMP 269176735Sjeffstatic int tdq_move(struct tdq *, struct tdq *); 270171482Sjeffstatic int tdq_idled(struct tdq *); 271177435Sjeffstatic void tdq_notify(struct tdq *, struct thread *); 272177435Sjeffstatic struct thread *tdq_steal(struct tdq *, int); 273177435Sjeffstatic struct thread *runq_steal(struct runq *, int); 274177435Sjeffstatic int sched_pickcpu(struct thread *, int); 275172409Sjeffstatic void sched_balance(void); 276176735Sjeffstatic int sched_balance_pair(struct tdq *, struct tdq *); 277177435Sjeffstatic inline struct tdq *sched_setcpu(struct thread *, int, int); 278171482Sjeffstatic inline struct mtx *thread_block_switch(struct thread *); 279171482Sjeffstatic inline void thread_unblock_switch(struct thread *, struct mtx *); 280171713Sjeffstatic struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 281121790Sjeff#endif 282110028Sjeff 283165762Sjeffstatic void sched_setup(void *dummy); 284177253SrwatsonSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 285165762Sjeff 286165762Sjeffstatic void sched_initticks(void *dummy); 287177253SrwatsonSYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, 288177253Srwatson NULL); 289165762Sjeff 290171482Sjeff/* 291171482Sjeff * Print the threads waiting on a run-queue. 292171482Sjeff */ 293165762Sjeffstatic void 294165762Sjeffrunq_print(struct runq *rq) 295165762Sjeff{ 296165762Sjeff struct rqhead *rqh; 297177435Sjeff struct thread *td; 298165762Sjeff int pri; 299165762Sjeff int j; 300165762Sjeff int i; 301165762Sjeff 302165762Sjeff for (i = 0; i < RQB_LEN; i++) { 303165762Sjeff printf("\t\trunq bits %d 0x%zx\n", 304165762Sjeff i, rq->rq_status.rqb_bits[i]); 305165762Sjeff for (j = 0; j < RQB_BPW; j++) 306165762Sjeff if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 307165762Sjeff pri = j + (i << RQB_L2BPW); 308165762Sjeff rqh = &rq->rq_queues[pri]; 309177435Sjeff TAILQ_FOREACH(td, rqh, td_runq) { 310165762Sjeff printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 311177435Sjeff td, td->td_name, td->td_priority, 312177435Sjeff td->td_rqindex, pri); 313165762Sjeff } 314165762Sjeff } 315165762Sjeff } 316165762Sjeff} 317165762Sjeff 318171482Sjeff/* 319171482Sjeff * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 320171482Sjeff */ 321113357Sjeffvoid 322164936Sjuliantdq_print(int cpu) 323110267Sjeff{ 324164936Sjulian struct tdq *tdq; 325112994Sjeff 326164936Sjulian tdq = TDQ_CPU(cpu); 327112994Sjeff 328171713Sjeff printf("tdq %d:\n", TDQ_ID(tdq)); 329176735Sjeff printf("\tlock %p\n", TDQ_LOCKPTR(tdq)); 330176735Sjeff printf("\tLock name: %s\n", tdq->tdq_name); 331165620Sjeff printf("\tload: %d\n", tdq->tdq_load); 332171482Sjeff printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 333165766Sjeff printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 334165762Sjeff printf("\trealtime runq:\n"); 335165762Sjeff runq_print(&tdq->tdq_realtime); 336165762Sjeff printf("\ttimeshare runq:\n"); 337165762Sjeff runq_print(&tdq->tdq_timeshare); 338165762Sjeff printf("\tidle runq:\n"); 339165762Sjeff runq_print(&tdq->tdq_idle); 340165620Sjeff printf("\tload transferable: %d\n", tdq->tdq_transferable); 341171713Sjeff printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 342113357Sjeff} 343112994Sjeff 344177005Sjeffstatic inline int 345177005Sjeffsched_shouldpreempt(int pri, int cpri, int remote) 346177005Sjeff{ 347177005Sjeff /* 348177005Sjeff * If the new priority is not better than the current priority there is 349177005Sjeff * nothing to do. 350177005Sjeff */ 351177005Sjeff if (pri >= cpri) 352177005Sjeff return (0); 353177005Sjeff /* 354177005Sjeff * Always preempt idle. 355177005Sjeff */ 356177005Sjeff if (cpri >= PRI_MIN_IDLE) 357177005Sjeff return (1); 358177005Sjeff /* 359177005Sjeff * If preemption is disabled don't preempt others. 360177005Sjeff */ 361177005Sjeff if (preempt_thresh == 0) 362177005Sjeff return (0); 363177005Sjeff /* 364177005Sjeff * Preempt if we exceed the threshold. 365177005Sjeff */ 366177005Sjeff if (pri <= preempt_thresh) 367177005Sjeff return (1); 368177005Sjeff /* 369177005Sjeff * If we're realtime or better and there is timeshare or worse running 370177005Sjeff * preempt only remote processors. 371177005Sjeff */ 372177005Sjeff if (remote && pri <= PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 373177005Sjeff return (1); 374177005Sjeff return (0); 375177005Sjeff} 376177005Sjeff 377171482Sjeff#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 378171482Sjeff/* 379171482Sjeff * Add a thread to the actual run-queue. Keeps transferable counts up to 380171482Sjeff * date with what is actually on the run-queue. Selects the correct 381171482Sjeff * queue position for timeshare threads. 382171482Sjeff */ 383122744Sjeffstatic __inline void 384177435Sjefftdq_runq_add(struct tdq *tdq, struct thread *td, int flags) 385122744Sjeff{ 386177435Sjeff struct td_sched *ts; 387177042Sjeff u_char pri; 388177042Sjeff 389171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 390177435Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 391177009Sjeff 392177435Sjeff pri = td->td_priority; 393177435Sjeff ts = td->td_sched; 394177435Sjeff TD_SET_RUNQ(td); 395177435Sjeff if (THREAD_CAN_MIGRATE(td)) { 396165620Sjeff tdq->tdq_transferable++; 397164936Sjulian ts->ts_flags |= TSF_XFERABLE; 398123433Sjeff } 399177042Sjeff if (pri <= PRI_MAX_REALTIME) { 400177042Sjeff ts->ts_runq = &tdq->tdq_realtime; 401177042Sjeff } else if (pri <= PRI_MAX_TIMESHARE) { 402177042Sjeff ts->ts_runq = &tdq->tdq_timeshare; 403165762Sjeff KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 404165762Sjeff ("Invalid priority %d on timeshare runq", pri)); 405165762Sjeff /* 406165762Sjeff * This queue contains only priorities between MIN and MAX 407165762Sjeff * realtime. Use the whole queue to represent these values. 408165762Sjeff */ 409171713Sjeff if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 410165762Sjeff pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 411165762Sjeff pri = (pri + tdq->tdq_idx) % RQ_NQS; 412165766Sjeff /* 413165766Sjeff * This effectively shortens the queue by one so we 414165766Sjeff * can have a one slot difference between idx and 415165766Sjeff * ridx while we wait for threads to drain. 416165766Sjeff */ 417165766Sjeff if (tdq->tdq_ridx != tdq->tdq_idx && 418165766Sjeff pri == tdq->tdq_ridx) 419167664Sjeff pri = (unsigned char)(pri - 1) % RQ_NQS; 420165762Sjeff } else 421165766Sjeff pri = tdq->tdq_ridx; 422177435Sjeff runq_add_pri(ts->ts_runq, td, pri, flags); 423177042Sjeff return; 424165762Sjeff } else 425177009Sjeff ts->ts_runq = &tdq->tdq_idle; 426177435Sjeff runq_add(ts->ts_runq, td, flags); 427177009Sjeff} 428177009Sjeff 429171482Sjeff/* 430171482Sjeff * Remove a thread from a run-queue. This typically happens when a thread 431171482Sjeff * is selected to run. Running threads are not on the queue and the 432171482Sjeff * transferable count does not reflect them. 433171482Sjeff */ 434122744Sjeffstatic __inline void 435177435Sjefftdq_runq_rem(struct tdq *tdq, struct thread *td) 436122744Sjeff{ 437177435Sjeff struct td_sched *ts; 438177435Sjeff 439177435Sjeff ts = td->td_sched; 440171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 441171482Sjeff KASSERT(ts->ts_runq != NULL, 442177435Sjeff ("tdq_runq_remove: thread %p null ts_runq", td)); 443164936Sjulian if (ts->ts_flags & TSF_XFERABLE) { 444165620Sjeff tdq->tdq_transferable--; 445164936Sjulian ts->ts_flags &= ~TSF_XFERABLE; 446123433Sjeff } 447165766Sjeff if (ts->ts_runq == &tdq->tdq_timeshare) { 448165766Sjeff if (tdq->tdq_idx != tdq->tdq_ridx) 449177435Sjeff runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx); 450165766Sjeff else 451177435Sjeff runq_remove_idx(ts->ts_runq, td, NULL); 452165766Sjeff } else 453177435Sjeff runq_remove(ts->ts_runq, td); 454122744Sjeff} 455122744Sjeff 456171482Sjeff/* 457171482Sjeff * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 458171482Sjeff * for this thread to the referenced thread queue. 459171482Sjeff */ 460113357Sjeffstatic void 461177435Sjefftdq_load_add(struct tdq *tdq, struct thread *td) 462113357Sjeff{ 463177435Sjeff struct td_sched *ts; 464121896Sjeff int class; 465171482Sjeff 466177435Sjeff ts = td->td_sched; 467171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 468177435Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 469177435Sjeff class = PRI_BASE(td->td_pri_class); 470165620Sjeff tdq->tdq_load++; 471171713Sjeff CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 472177435Sjeff if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0) 473165620Sjeff tdq->tdq_sysload++; 474110267Sjeff} 475113357Sjeff 476171482Sjeff/* 477171482Sjeff * Remove the load from a thread that is transitioning to a sleep state or 478171482Sjeff * exiting. 479171482Sjeff */ 480112994Sjeffstatic void 481177435Sjefftdq_load_rem(struct tdq *tdq, struct thread *td) 482110267Sjeff{ 483177435Sjeff struct td_sched *ts; 484121896Sjeff int class; 485171482Sjeff 486177435Sjeff ts = td->td_sched; 487177435Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 488171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 489177435Sjeff class = PRI_BASE(td->td_pri_class); 490177435Sjeff if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0) 491165620Sjeff tdq->tdq_sysload--; 492171482Sjeff KASSERT(tdq->tdq_load != 0, 493171713Sjeff ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 494165620Sjeff tdq->tdq_load--; 495165620Sjeff CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 496110267Sjeff} 497110267Sjeff 498176735Sjeff/* 499176735Sjeff * Set lowpri to its exact value by searching the run-queue and 500176735Sjeff * evaluating curthread. curthread may be passed as an optimization. 501176735Sjeff */ 502176735Sjeffstatic void 503176735Sjefftdq_setlowpri(struct tdq *tdq, struct thread *ctd) 504176735Sjeff{ 505176735Sjeff struct thread *td; 506176735Sjeff 507176735Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 508176735Sjeff if (ctd == NULL) 509176735Sjeff ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; 510177435Sjeff td = tdq_choose(tdq); 511177435Sjeff if (td == NULL || td->td_priority > ctd->td_priority) 512176735Sjeff tdq->tdq_lowpri = ctd->td_priority; 513176735Sjeff else 514176735Sjeff tdq->tdq_lowpri = td->td_priority; 515176735Sjeff} 516176735Sjeff 517113357Sjeff#ifdef SMP 518176735Sjeffstruct cpu_search { 519176735Sjeff cpumask_t cs_mask; /* Mask of valid cpus. */ 520176735Sjeff u_int cs_load; 521176735Sjeff u_int cs_cpu; 522176735Sjeff int cs_limit; /* Min priority for low min load for high. */ 523176735Sjeff}; 524176735Sjeff 525176735Sjeff#define CPU_SEARCH_LOWEST 0x1 526176735Sjeff#define CPU_SEARCH_HIGHEST 0x2 527176735Sjeff#define CPU_SEARCH_BOTH (CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST) 528176735Sjeff 529176735Sjeff#define CPUMASK_FOREACH(cpu, mask) \ 530176735Sjeff for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++) \ 531176735Sjeff if ((mask) & 1 << (cpu)) 532176735Sjeff 533177169Sjhbstatic __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low, 534176735Sjeff struct cpu_search *high, const int match); 535176735Sjeffint cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low); 536176735Sjeffint cpu_search_highest(struct cpu_group *cg, struct cpu_search *high); 537176735Sjeffint cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 538176735Sjeff struct cpu_search *high); 539176735Sjeff 540116069Sjeff/* 541176735Sjeff * This routine compares according to the match argument and should be 542176735Sjeff * reduced in actual instantiations via constant propagation and dead code 543176735Sjeff * elimination. 544176735Sjeff */ 545176735Sjeffstatic __inline int 546176735Sjeffcpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high, 547176735Sjeff const int match) 548176735Sjeff{ 549176735Sjeff struct tdq *tdq; 550176735Sjeff 551176735Sjeff tdq = TDQ_CPU(cpu); 552176735Sjeff if (match & CPU_SEARCH_LOWEST) 553176735Sjeff if (low->cs_mask & (1 << cpu) && 554176735Sjeff tdq->tdq_load < low->cs_load && 555176735Sjeff tdq->tdq_lowpri > low->cs_limit) { 556176735Sjeff low->cs_cpu = cpu; 557176735Sjeff low->cs_load = tdq->tdq_load; 558176735Sjeff } 559176735Sjeff if (match & CPU_SEARCH_HIGHEST) 560176735Sjeff if (high->cs_mask & (1 << cpu) && 561176735Sjeff tdq->tdq_load >= high->cs_limit && 562176735Sjeff tdq->tdq_load > high->cs_load && 563176735Sjeff tdq->tdq_transferable) { 564176735Sjeff high->cs_cpu = cpu; 565176735Sjeff high->cs_load = tdq->tdq_load; 566176735Sjeff } 567176735Sjeff return (tdq->tdq_load); 568176735Sjeff} 569176735Sjeff 570176735Sjeff/* 571176735Sjeff * Search the tree of cpu_groups for the lowest or highest loaded cpu 572176735Sjeff * according to the match argument. This routine actually compares the 573176735Sjeff * load on all paths through the tree and finds the least loaded cpu on 574176735Sjeff * the least loaded path, which may differ from the least loaded cpu in 575176735Sjeff * the system. This balances work among caches and busses. 576116069Sjeff * 577176735Sjeff * This inline is instantiated in three forms below using constants for the 578176735Sjeff * match argument. It is reduced to the minimum set for each case. It is 579176735Sjeff * also recursive to the depth of the tree. 580116069Sjeff */ 581177169Sjhbstatic __inline int 582176735Sjeffcpu_search(struct cpu_group *cg, struct cpu_search *low, 583176735Sjeff struct cpu_search *high, const int match) 584176735Sjeff{ 585176735Sjeff int total; 586176735Sjeff 587176735Sjeff total = 0; 588176735Sjeff if (cg->cg_children) { 589176735Sjeff struct cpu_search lgroup; 590176735Sjeff struct cpu_search hgroup; 591176735Sjeff struct cpu_group *child; 592176735Sjeff u_int lload; 593176735Sjeff int hload; 594176735Sjeff int load; 595176735Sjeff int i; 596176735Sjeff 597176735Sjeff lload = -1; 598176735Sjeff hload = -1; 599176735Sjeff for (i = 0; i < cg->cg_children; i++) { 600176735Sjeff child = &cg->cg_child[i]; 601176735Sjeff if (match & CPU_SEARCH_LOWEST) { 602176735Sjeff lgroup = *low; 603176735Sjeff lgroup.cs_load = -1; 604176735Sjeff } 605176735Sjeff if (match & CPU_SEARCH_HIGHEST) { 606176735Sjeff hgroup = *high; 607176735Sjeff lgroup.cs_load = 0; 608176735Sjeff } 609176735Sjeff switch (match) { 610176735Sjeff case CPU_SEARCH_LOWEST: 611176735Sjeff load = cpu_search_lowest(child, &lgroup); 612176735Sjeff break; 613176735Sjeff case CPU_SEARCH_HIGHEST: 614176735Sjeff load = cpu_search_highest(child, &hgroup); 615176735Sjeff break; 616176735Sjeff case CPU_SEARCH_BOTH: 617176735Sjeff load = cpu_search_both(child, &lgroup, &hgroup); 618176735Sjeff break; 619176735Sjeff } 620176735Sjeff total += load; 621176735Sjeff if (match & CPU_SEARCH_LOWEST) 622176735Sjeff if (load < lload || low->cs_cpu == -1) { 623176735Sjeff *low = lgroup; 624176735Sjeff lload = load; 625176735Sjeff } 626176735Sjeff if (match & CPU_SEARCH_HIGHEST) 627176735Sjeff if (load > hload || high->cs_cpu == -1) { 628176735Sjeff hload = load; 629176735Sjeff *high = hgroup; 630176735Sjeff } 631176735Sjeff } 632176735Sjeff } else { 633176735Sjeff int cpu; 634176735Sjeff 635176735Sjeff CPUMASK_FOREACH(cpu, cg->cg_mask) 636176735Sjeff total += cpu_compare(cpu, low, high, match); 637176735Sjeff } 638176735Sjeff return (total); 639176735Sjeff} 640176735Sjeff 641176735Sjeff/* 642176735Sjeff * cpu_search instantiations must pass constants to maintain the inline 643176735Sjeff * optimization. 644176735Sjeff */ 645176735Sjeffint 646176735Sjeffcpu_search_lowest(struct cpu_group *cg, struct cpu_search *low) 647176735Sjeff{ 648176735Sjeff return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST); 649176735Sjeff} 650176735Sjeff 651176735Sjeffint 652176735Sjeffcpu_search_highest(struct cpu_group *cg, struct cpu_search *high) 653176735Sjeff{ 654176735Sjeff return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST); 655176735Sjeff} 656176735Sjeff 657176735Sjeffint 658176735Sjeffcpu_search_both(struct cpu_group *cg, struct cpu_search *low, 659176735Sjeff struct cpu_search *high) 660176735Sjeff{ 661176735Sjeff return cpu_search(cg, low, high, CPU_SEARCH_BOTH); 662176735Sjeff} 663176735Sjeff 664176735Sjeff/* 665176735Sjeff * Find the cpu with the least load via the least loaded path that has a 666176735Sjeff * lowpri greater than pri pri. A pri of -1 indicates any priority is 667176735Sjeff * acceptable. 668176735Sjeff */ 669176735Sjeffstatic inline int 670176735Sjeffsched_lowest(struct cpu_group *cg, cpumask_t mask, int pri) 671176735Sjeff{ 672176735Sjeff struct cpu_search low; 673176735Sjeff 674176735Sjeff low.cs_cpu = -1; 675176735Sjeff low.cs_load = -1; 676176735Sjeff low.cs_mask = mask; 677176735Sjeff low.cs_limit = pri; 678176735Sjeff cpu_search_lowest(cg, &low); 679176735Sjeff return low.cs_cpu; 680176735Sjeff} 681176735Sjeff 682176735Sjeff/* 683176735Sjeff * Find the cpu with the highest load via the highest loaded path. 684176735Sjeff */ 685176735Sjeffstatic inline int 686176735Sjeffsched_highest(struct cpu_group *cg, cpumask_t mask, int minload) 687176735Sjeff{ 688176735Sjeff struct cpu_search high; 689176735Sjeff 690176735Sjeff high.cs_cpu = -1; 691176735Sjeff high.cs_load = 0; 692176735Sjeff high.cs_mask = mask; 693176735Sjeff high.cs_limit = minload; 694176735Sjeff cpu_search_highest(cg, &high); 695176735Sjeff return high.cs_cpu; 696176735Sjeff} 697176735Sjeff 698176735Sjeff/* 699176735Sjeff * Simultaneously find the highest and lowest loaded cpu reachable via 700176735Sjeff * cg. 701176735Sjeff */ 702176735Sjeffstatic inline void 703176735Sjeffsched_both(struct cpu_group *cg, cpumask_t mask, int *lowcpu, int *highcpu) 704176735Sjeff{ 705176735Sjeff struct cpu_search high; 706176735Sjeff struct cpu_search low; 707176735Sjeff 708176735Sjeff low.cs_cpu = -1; 709176735Sjeff low.cs_limit = -1; 710176735Sjeff low.cs_load = -1; 711176735Sjeff low.cs_mask = mask; 712176735Sjeff high.cs_load = 0; 713176735Sjeff high.cs_cpu = -1; 714176735Sjeff high.cs_limit = -1; 715176735Sjeff high.cs_mask = mask; 716176735Sjeff cpu_search_both(cg, &low, &high); 717176735Sjeff *lowcpu = low.cs_cpu; 718176735Sjeff *highcpu = high.cs_cpu; 719176735Sjeff return; 720176735Sjeff} 721176735Sjeff 722121790Sjeffstatic void 723176735Sjeffsched_balance_group(struct cpu_group *cg) 724116069Sjeff{ 725176735Sjeff cpumask_t mask; 726176735Sjeff int high; 727176735Sjeff int low; 728123487Sjeff int i; 729123487Sjeff 730176735Sjeff mask = -1; 731176735Sjeff for (;;) { 732176735Sjeff sched_both(cg, mask, &low, &high); 733176735Sjeff if (low == high || low == -1 || high == -1) 734176735Sjeff break; 735176735Sjeff if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low))) 736176735Sjeff break; 737123487Sjeff /* 738176735Sjeff * If we failed to move any threads determine which cpu 739176735Sjeff * to kick out of the set and try again. 740176735Sjeff */ 741176735Sjeff if (TDQ_CPU(high)->tdq_transferable == 0) 742176735Sjeff mask &= ~(1 << high); 743176735Sjeff else 744176735Sjeff mask &= ~(1 << low); 745123487Sjeff } 746176735Sjeff 747176735Sjeff for (i = 0; i < cg->cg_children; i++) 748176735Sjeff sched_balance_group(&cg->cg_child[i]); 749123487Sjeff} 750123487Sjeff 751123487Sjeffstatic void 752176735Sjeffsched_balance() 753123487Sjeff{ 754172409Sjeff struct tdq *tdq; 755123487Sjeff 756172409Sjeff /* 757172409Sjeff * Select a random time between .5 * balance_interval and 758172409Sjeff * 1.5 * balance_interval. 759172409Sjeff */ 760176735Sjeff balance_ticks = max(balance_interval / 2, 1); 761176735Sjeff balance_ticks += random() % balance_interval; 762171482Sjeff if (smp_started == 0 || rebalance == 0) 763171482Sjeff return; 764172409Sjeff tdq = TDQ_SELF(); 765172409Sjeff TDQ_UNLOCK(tdq); 766176735Sjeff sched_balance_group(cpu_top); 767172409Sjeff TDQ_LOCK(tdq); 768123487Sjeff} 769123487Sjeff 770171482Sjeff/* 771171482Sjeff * Lock two thread queues using their address to maintain lock order. 772171482Sjeff */ 773123487Sjeffstatic void 774171482Sjefftdq_lock_pair(struct tdq *one, struct tdq *two) 775171482Sjeff{ 776171482Sjeff if (one < two) { 777171482Sjeff TDQ_LOCK(one); 778171482Sjeff TDQ_LOCK_FLAGS(two, MTX_DUPOK); 779171482Sjeff } else { 780171482Sjeff TDQ_LOCK(two); 781171482Sjeff TDQ_LOCK_FLAGS(one, MTX_DUPOK); 782171482Sjeff } 783171482Sjeff} 784171482Sjeff 785171482Sjeff/* 786172409Sjeff * Unlock two thread queues. Order is not important here. 787172409Sjeff */ 788172409Sjeffstatic void 789172409Sjefftdq_unlock_pair(struct tdq *one, struct tdq *two) 790172409Sjeff{ 791172409Sjeff TDQ_UNLOCK(one); 792172409Sjeff TDQ_UNLOCK(two); 793172409Sjeff} 794172409Sjeff 795172409Sjeff/* 796171482Sjeff * Transfer load between two imbalanced thread queues. 797171482Sjeff */ 798176735Sjeffstatic int 799164936Sjuliansched_balance_pair(struct tdq *high, struct tdq *low) 800123487Sjeff{ 801123433Sjeff int transferable; 802116069Sjeff int high_load; 803116069Sjeff int low_load; 804176735Sjeff int moved; 805116069Sjeff int move; 806116069Sjeff int diff; 807116069Sjeff int i; 808116069Sjeff 809171482Sjeff tdq_lock_pair(high, low); 810176735Sjeff transferable = high->tdq_transferable; 811176735Sjeff high_load = high->tdq_load; 812176735Sjeff low_load = low->tdq_load; 813176735Sjeff moved = 0; 814116069Sjeff /* 815122744Sjeff * Determine what the imbalance is and then adjust that to how many 816165620Sjeff * threads we actually have to give up (transferable). 817122744Sjeff */ 818171482Sjeff if (transferable != 0) { 819171482Sjeff diff = high_load - low_load; 820171482Sjeff move = diff / 2; 821171482Sjeff if (diff & 0x1) 822171482Sjeff move++; 823171482Sjeff move = min(move, transferable); 824171482Sjeff for (i = 0; i < move; i++) 825176735Sjeff moved += tdq_move(high, low); 826172293Sjeff /* 827172293Sjeff * IPI the target cpu to force it to reschedule with the new 828172293Sjeff * workload. 829172293Sjeff */ 830172293Sjeff ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); 831171482Sjeff } 832172409Sjeff tdq_unlock_pair(high, low); 833176735Sjeff return (moved); 834116069Sjeff} 835116069Sjeff 836171482Sjeff/* 837171482Sjeff * Move a thread from one thread queue to another. 838171482Sjeff */ 839176735Sjeffstatic int 840171482Sjefftdq_move(struct tdq *from, struct tdq *to) 841116069Sjeff{ 842171482Sjeff struct td_sched *ts; 843171482Sjeff struct thread *td; 844164936Sjulian struct tdq *tdq; 845171482Sjeff int cpu; 846116069Sjeff 847172409Sjeff TDQ_LOCK_ASSERT(from, MA_OWNED); 848172409Sjeff TDQ_LOCK_ASSERT(to, MA_OWNED); 849172409Sjeff 850164936Sjulian tdq = from; 851171482Sjeff cpu = TDQ_ID(to); 852177435Sjeff td = tdq_steal(tdq, cpu); 853177435Sjeff if (td == NULL) 854176735Sjeff return (0); 855177435Sjeff ts = td->td_sched; 856171482Sjeff /* 857171482Sjeff * Although the run queue is locked the thread may be blocked. Lock 858172409Sjeff * it to clear this and acquire the run-queue lock. 859171482Sjeff */ 860171482Sjeff thread_lock(td); 861172409Sjeff /* Drop recursive lock on from acquired via thread_lock(). */ 862171482Sjeff TDQ_UNLOCK(from); 863171482Sjeff sched_rem(td); 864166108Sjeff ts->ts_cpu = cpu; 865171482Sjeff td->td_lock = TDQ_LOCKPTR(to); 866171482Sjeff tdq_add(to, td, SRQ_YIELDING); 867176735Sjeff return (1); 868116069Sjeff} 869110267Sjeff 870171482Sjeff/* 871171482Sjeff * This tdq has idled. Try to steal a thread from another cpu and switch 872171482Sjeff * to it. 873171482Sjeff */ 874123433Sjeffstatic int 875164936Sjuliantdq_idled(struct tdq *tdq) 876121790Sjeff{ 877176735Sjeff struct cpu_group *cg; 878164936Sjulian struct tdq *steal; 879176735Sjeff cpumask_t mask; 880176735Sjeff int thresh; 881171482Sjeff int cpu; 882123433Sjeff 883172484Sjeff if (smp_started == 0 || steal_idle == 0) 884172484Sjeff return (1); 885176735Sjeff mask = -1; 886176735Sjeff mask &= ~PCPU_GET(cpumask); 887176735Sjeff /* We don't want to be preempted while we're iterating. */ 888171482Sjeff spinlock_enter(); 889176735Sjeff for (cg = tdq->tdq_cg; cg != NULL; ) { 890176735Sjeff if ((cg->cg_flags & (CG_FLAG_HTT | CG_FLAG_THREAD)) == 0) 891176735Sjeff thresh = steal_thresh; 892176735Sjeff else 893176735Sjeff thresh = 1; 894176735Sjeff cpu = sched_highest(cg, mask, thresh); 895176735Sjeff if (cpu == -1) { 896176735Sjeff cg = cg->cg_parent; 897176735Sjeff continue; 898166108Sjeff } 899176735Sjeff steal = TDQ_CPU(cpu); 900176735Sjeff mask &= ~(1 << cpu); 901176735Sjeff tdq_lock_pair(tdq, steal); 902176735Sjeff if (steal->tdq_load < thresh || steal->tdq_transferable == 0) { 903176735Sjeff tdq_unlock_pair(tdq, steal); 904176735Sjeff continue; 905171482Sjeff } 906176735Sjeff /* 907176735Sjeff * If a thread was added while interrupts were disabled don't 908176735Sjeff * steal one here. If we fail to acquire one due to affinity 909176735Sjeff * restrictions loop again with this cpu removed from the 910176735Sjeff * set. 911176735Sjeff */ 912176735Sjeff if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) { 913176735Sjeff tdq_unlock_pair(tdq, steal); 914176735Sjeff continue; 915176735Sjeff } 916176735Sjeff spinlock_exit(); 917176735Sjeff TDQ_UNLOCK(steal); 918176735Sjeff mi_switch(SW_VOL, NULL); 919176735Sjeff thread_unlock(curthread); 920176735Sjeff 921176735Sjeff return (0); 922123433Sjeff } 923171482Sjeff spinlock_exit(); 924123433Sjeff return (1); 925121790Sjeff} 926121790Sjeff 927171482Sjeff/* 928171482Sjeff * Notify a remote cpu of new work. Sends an IPI if criteria are met. 929171482Sjeff */ 930121790Sjeffstatic void 931177435Sjefftdq_notify(struct tdq *tdq, struct thread *td) 932121790Sjeff{ 933166247Sjeff int cpri; 934166247Sjeff int pri; 935166108Sjeff int cpu; 936121790Sjeff 937177005Sjeff if (tdq->tdq_ipipending) 938177005Sjeff return; 939177435Sjeff cpu = td->td_sched->ts_cpu; 940177435Sjeff pri = td->td_priority; 941177005Sjeff cpri = pcpu_find(cpu)->pc_curthread->td_priority; 942177005Sjeff if (!sched_shouldpreempt(pri, cpri, 1)) 943166137Sjeff return; 944177005Sjeff tdq->tdq_ipipending = 1; 945171482Sjeff ipi_selected(1 << cpu, IPI_PREEMPT); 946121790Sjeff} 947121790Sjeff 948171482Sjeff/* 949171482Sjeff * Steals load from a timeshare queue. Honors the rotating queue head 950171482Sjeff * index. 951171482Sjeff */ 952177435Sjeffstatic struct thread * 953176735Sjeffrunq_steal_from(struct runq *rq, int cpu, u_char start) 954171482Sjeff{ 955171482Sjeff struct rqbits *rqb; 956171482Sjeff struct rqhead *rqh; 957177435Sjeff struct thread *td; 958171482Sjeff int first; 959171482Sjeff int bit; 960171482Sjeff int pri; 961171482Sjeff int i; 962171482Sjeff 963171482Sjeff rqb = &rq->rq_status; 964171482Sjeff bit = start & (RQB_BPW -1); 965171482Sjeff pri = 0; 966171482Sjeff first = 0; 967171482Sjeffagain: 968171482Sjeff for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 969171482Sjeff if (rqb->rqb_bits[i] == 0) 970171482Sjeff continue; 971171482Sjeff if (bit != 0) { 972171482Sjeff for (pri = bit; pri < RQB_BPW; pri++) 973171482Sjeff if (rqb->rqb_bits[i] & (1ul << pri)) 974171482Sjeff break; 975171482Sjeff if (pri >= RQB_BPW) 976171482Sjeff continue; 977171482Sjeff } else 978171482Sjeff pri = RQB_FFS(rqb->rqb_bits[i]); 979171482Sjeff pri += (i << RQB_L2BPW); 980171482Sjeff rqh = &rq->rq_queues[pri]; 981177435Sjeff TAILQ_FOREACH(td, rqh, td_runq) { 982177435Sjeff if (first && THREAD_CAN_MIGRATE(td) && 983177435Sjeff THREAD_CAN_SCHED(td, cpu)) 984177435Sjeff return (td); 985171482Sjeff first = 1; 986171482Sjeff } 987171482Sjeff } 988171482Sjeff if (start != 0) { 989171482Sjeff start = 0; 990171482Sjeff goto again; 991171482Sjeff } 992171482Sjeff 993171482Sjeff return (NULL); 994171482Sjeff} 995171482Sjeff 996171482Sjeff/* 997171482Sjeff * Steals load from a standard linear queue. 998171482Sjeff */ 999177435Sjeffstatic struct thread * 1000176735Sjeffrunq_steal(struct runq *rq, int cpu) 1001121790Sjeff{ 1002121790Sjeff struct rqhead *rqh; 1003121790Sjeff struct rqbits *rqb; 1004177435Sjeff struct thread *td; 1005121790Sjeff int word; 1006121790Sjeff int bit; 1007121790Sjeff 1008121790Sjeff rqb = &rq->rq_status; 1009121790Sjeff for (word = 0; word < RQB_LEN; word++) { 1010121790Sjeff if (rqb->rqb_bits[word] == 0) 1011121790Sjeff continue; 1012121790Sjeff for (bit = 0; bit < RQB_BPW; bit++) { 1013123231Speter if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 1014121790Sjeff continue; 1015121790Sjeff rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 1016177435Sjeff TAILQ_FOREACH(td, rqh, td_runq) 1017177435Sjeff if (THREAD_CAN_MIGRATE(td) && 1018177435Sjeff THREAD_CAN_SCHED(td, cpu)) 1019177435Sjeff return (td); 1020121790Sjeff } 1021121790Sjeff } 1022121790Sjeff return (NULL); 1023121790Sjeff} 1024121790Sjeff 1025171482Sjeff/* 1026171482Sjeff * Attempt to steal a thread in priority order from a thread queue. 1027171482Sjeff */ 1028177435Sjeffstatic struct thread * 1029176735Sjefftdq_steal(struct tdq *tdq, int cpu) 1030121790Sjeff{ 1031177435Sjeff struct thread *td; 1032121790Sjeff 1033171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1034177435Sjeff if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) 1035177435Sjeff return (td); 1036177435Sjeff if ((td = runq_steal_from(&tdq->tdq_timeshare, 1037177435Sjeff cpu, tdq->tdq_ridx)) != NULL) 1038177435Sjeff return (td); 1039176735Sjeff return (runq_steal(&tdq->tdq_idle, cpu)); 1040121790Sjeff} 1041123433Sjeff 1042171482Sjeff/* 1043171482Sjeff * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 1044172409Sjeff * current lock and returns with the assigned queue locked. 1045171482Sjeff */ 1046171482Sjeffstatic inline struct tdq * 1047177435Sjeffsched_setcpu(struct thread *td, int cpu, int flags) 1048123433Sjeff{ 1049177435Sjeff 1050171482Sjeff struct tdq *tdq; 1051123433Sjeff 1052177435Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1053171482Sjeff tdq = TDQ_CPU(cpu); 1054177435Sjeff td->td_sched->ts_cpu = cpu; 1055177435Sjeff /* 1056177435Sjeff * If the lock matches just return the queue. 1057177435Sjeff */ 1058171482Sjeff if (td->td_lock == TDQ_LOCKPTR(tdq)) 1059171482Sjeff return (tdq); 1060171482Sjeff#ifdef notyet 1061123433Sjeff /* 1062172293Sjeff * If the thread isn't running its lockptr is a 1063171482Sjeff * turnstile or a sleepqueue. We can just lock_set without 1064171482Sjeff * blocking. 1065123685Sjeff */ 1066171482Sjeff if (TD_CAN_RUN(td)) { 1067171482Sjeff TDQ_LOCK(tdq); 1068171482Sjeff thread_lock_set(td, TDQ_LOCKPTR(tdq)); 1069171482Sjeff return (tdq); 1070171482Sjeff } 1071171482Sjeff#endif 1072166108Sjeff /* 1073171482Sjeff * The hard case, migration, we need to block the thread first to 1074171482Sjeff * prevent order reversals with other cpus locks. 1075166108Sjeff */ 1076171482Sjeff thread_lock_block(td); 1077171482Sjeff TDQ_LOCK(tdq); 1078171713Sjeff thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 1079171482Sjeff return (tdq); 1080166108Sjeff} 1081166108Sjeff 1082166108Sjeffstatic int 1083177435Sjeffsched_pickcpu(struct thread *td, int flags) 1084171482Sjeff{ 1085176735Sjeff struct cpu_group *cg; 1086177435Sjeff struct td_sched *ts; 1087171482Sjeff struct tdq *tdq; 1088176735Sjeff cpumask_t mask; 1089166108Sjeff int self; 1090166108Sjeff int pri; 1091166108Sjeff int cpu; 1092166108Sjeff 1093176735Sjeff self = PCPU_GET(cpuid); 1094177435Sjeff ts = td->td_sched; 1095166108Sjeff if (smp_started == 0) 1096166108Sjeff return (self); 1097171506Sjeff /* 1098171506Sjeff * Don't migrate a running thread from sched_switch(). 1099171506Sjeff */ 1100176735Sjeff if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) 1101176735Sjeff return (ts->ts_cpu); 1102166108Sjeff /* 1103176735Sjeff * Prefer to run interrupt threads on the processors that generate 1104176735Sjeff * the interrupt. 1105166108Sjeff */ 1106176735Sjeff if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) && 1107176735Sjeff curthread->td_intr_nesting_level) 1108176735Sjeff ts->ts_cpu = self; 1109166108Sjeff /* 1110176735Sjeff * If the thread can run on the last cpu and the affinity has not 1111176735Sjeff * expired or it is idle run it there. 1112166108Sjeff */ 1113176735Sjeff pri = td->td_priority; 1114176735Sjeff tdq = TDQ_CPU(ts->ts_cpu); 1115176735Sjeff if (THREAD_CAN_SCHED(td, ts->ts_cpu)) { 1116176735Sjeff if (tdq->tdq_lowpri > PRI_MIN_IDLE) 1117176735Sjeff return (ts->ts_cpu); 1118176735Sjeff if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri) 1119176735Sjeff return (ts->ts_cpu); 1120139334Sjeff } 1121123433Sjeff /* 1122176735Sjeff * Search for the highest level in the tree that still has affinity. 1123123433Sjeff */ 1124176735Sjeff cg = NULL; 1125176735Sjeff for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent) 1126176735Sjeff if (SCHED_AFFINITY(ts, cg->cg_level)) 1127176735Sjeff break; 1128176735Sjeff cpu = -1; 1129176735Sjeff mask = td->td_cpuset->cs_mask.__bits[0]; 1130176735Sjeff if (cg) 1131176735Sjeff cpu = sched_lowest(cg, mask, pri); 1132176735Sjeff if (cpu == -1) 1133176735Sjeff cpu = sched_lowest(cpu_top, mask, -1); 1134171506Sjeff /* 1135176735Sjeff * Compare the lowest loaded cpu to current cpu. 1136171506Sjeff */ 1137177005Sjeff if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri && 1138177005Sjeff TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) 1139177005Sjeff cpu = self; 1140177005Sjeff KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu.")); 1141171482Sjeff return (cpu); 1142123433Sjeff} 1143176735Sjeff#endif 1144123433Sjeff 1145117326Sjeff/* 1146121790Sjeff * Pick the highest priority task we have and return it. 1147117326Sjeff */ 1148177435Sjeffstatic struct thread * 1149164936Sjuliantdq_choose(struct tdq *tdq) 1150110267Sjeff{ 1151177435Sjeff struct thread *td; 1152110267Sjeff 1153171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1154177435Sjeff td = runq_choose(&tdq->tdq_realtime); 1155177435Sjeff if (td != NULL) 1156177435Sjeff return (td); 1157177435Sjeff td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1158177435Sjeff if (td != NULL) { 1159177435Sjeff KASSERT(td->td_priority >= PRI_MIN_TIMESHARE, 1160165762Sjeff ("tdq_choose: Invalid priority on timeshare queue %d", 1161177435Sjeff td->td_priority)); 1162177435Sjeff return (td); 1163165762Sjeff } 1164177435Sjeff td = runq_choose(&tdq->tdq_idle); 1165177435Sjeff if (td != NULL) { 1166177435Sjeff KASSERT(td->td_priority >= PRI_MIN_IDLE, 1167165762Sjeff ("tdq_choose: Invalid priority on idle queue %d", 1168177435Sjeff td->td_priority)); 1169177435Sjeff return (td); 1170165762Sjeff } 1171165762Sjeff 1172165762Sjeff return (NULL); 1173110267Sjeff} 1174110267Sjeff 1175171482Sjeff/* 1176171482Sjeff * Initialize a thread queue. 1177171482Sjeff */ 1178109864Sjeffstatic void 1179164936Sjuliantdq_setup(struct tdq *tdq) 1180110028Sjeff{ 1181171482Sjeff 1182171713Sjeff if (bootverbose) 1183171713Sjeff printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1184165762Sjeff runq_init(&tdq->tdq_realtime); 1185165762Sjeff runq_init(&tdq->tdq_timeshare); 1186165620Sjeff runq_init(&tdq->tdq_idle); 1187176735Sjeff snprintf(tdq->tdq_name, sizeof(tdq->tdq_name), 1188176735Sjeff "sched lock %d", (int)TDQ_ID(tdq)); 1189176735Sjeff mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", 1190176735Sjeff MTX_SPIN | MTX_RECURSE); 1191110028Sjeff} 1192110028Sjeff 1193171713Sjeff#ifdef SMP 1194110028Sjeffstatic void 1195171713Sjeffsched_setup_smp(void) 1196171713Sjeff{ 1197171713Sjeff struct tdq *tdq; 1198171713Sjeff int i; 1199171713Sjeff 1200176735Sjeff cpu_top = smp_topo(); 1201176735Sjeff for (i = 0; i < MAXCPU; i++) { 1202171713Sjeff if (CPU_ABSENT(i)) 1203171713Sjeff continue; 1204176735Sjeff tdq = TDQ_CPU(i); 1205171713Sjeff tdq_setup(tdq); 1206176735Sjeff tdq->tdq_cg = smp_topo_find(cpu_top, i); 1207176735Sjeff if (tdq->tdq_cg == NULL) 1208176735Sjeff panic("Can't find cpu group for %d\n", i); 1209123433Sjeff } 1210176735Sjeff balance_tdq = TDQ_SELF(); 1211176735Sjeff sched_balance(); 1212171713Sjeff} 1213171713Sjeff#endif 1214171713Sjeff 1215171713Sjeff/* 1216171713Sjeff * Setup the thread queues and initialize the topology based on MD 1217171713Sjeff * information. 1218171713Sjeff */ 1219171713Sjeffstatic void 1220171713Sjeffsched_setup(void *dummy) 1221171713Sjeff{ 1222171713Sjeff struct tdq *tdq; 1223171713Sjeff 1224171713Sjeff tdq = TDQ_SELF(); 1225171713Sjeff#ifdef SMP 1226176734Sjeff sched_setup_smp(); 1227117237Sjeff#else 1228171713Sjeff tdq_setup(tdq); 1229116069Sjeff#endif 1230171482Sjeff /* 1231171482Sjeff * To avoid divide-by-zero, we set realstathz a dummy value 1232171482Sjeff * in case which sched_clock() called before sched_initticks(). 1233171482Sjeff */ 1234171482Sjeff realstathz = hz; 1235171482Sjeff sched_slice = (realstathz/10); /* ~100ms */ 1236171482Sjeff tickincr = 1 << SCHED_TICK_SHIFT; 1237171482Sjeff 1238171482Sjeff /* Add thread0's load since it's running. */ 1239171482Sjeff TDQ_LOCK(tdq); 1240171713Sjeff thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1241177435Sjeff tdq_load_add(tdq, &thread0); 1242176735Sjeff tdq->tdq_lowpri = thread0.td_priority; 1243171482Sjeff TDQ_UNLOCK(tdq); 1244109864Sjeff} 1245109864Sjeff 1246171482Sjeff/* 1247171482Sjeff * This routine determines the tickincr after stathz and hz are setup. 1248171482Sjeff */ 1249153533Sdavidxu/* ARGSUSED */ 1250153533Sdavidxustatic void 1251153533Sdavidxusched_initticks(void *dummy) 1252153533Sdavidxu{ 1253171482Sjeff int incr; 1254171482Sjeff 1255153533Sdavidxu realstathz = stathz ? stathz : hz; 1256166229Sjeff sched_slice = (realstathz/10); /* ~100ms */ 1257153533Sdavidxu 1258153533Sdavidxu /* 1259165762Sjeff * tickincr is shifted out by 10 to avoid rounding errors due to 1260165766Sjeff * hz not being evenly divisible by stathz on all platforms. 1261153533Sdavidxu */ 1262171482Sjeff incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1263165762Sjeff /* 1264165762Sjeff * This does not work for values of stathz that are more than 1265165762Sjeff * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1266165762Sjeff */ 1267171482Sjeff if (incr == 0) 1268171482Sjeff incr = 1; 1269171482Sjeff tickincr = incr; 1270166108Sjeff#ifdef SMP 1271171899Sjeff /* 1272172409Sjeff * Set the default balance interval now that we know 1273172409Sjeff * what realstathz is. 1274172409Sjeff */ 1275172409Sjeff balance_interval = realstathz; 1276172409Sjeff /* 1277171899Sjeff * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 1278171899Sjeff * prevents excess thrashing on large machines and excess idle on 1279171899Sjeff * smaller machines. 1280171899Sjeff */ 1281176735Sjeff steal_thresh = min(ffs(mp_ncpus) - 1, 3); 1282166108Sjeff affinity = SCHED_AFFINITY_DEFAULT; 1283166108Sjeff#endif 1284153533Sdavidxu} 1285153533Sdavidxu 1286153533Sdavidxu 1287109864Sjeff/* 1288171482Sjeff * This is the core of the interactivity algorithm. Determines a score based 1289171482Sjeff * on past behavior. It is the ratio of sleep time to run time scaled to 1290171482Sjeff * a [0, 100] integer. This is the voluntary sleep time of a process, which 1291171482Sjeff * differs from the cpu usage because it does not account for time spent 1292171482Sjeff * waiting on a run-queue. Would be prettier if we had floating point. 1293171482Sjeff */ 1294171482Sjeffstatic int 1295171482Sjeffsched_interact_score(struct thread *td) 1296171482Sjeff{ 1297171482Sjeff struct td_sched *ts; 1298171482Sjeff int div; 1299171482Sjeff 1300171482Sjeff ts = td->td_sched; 1301171482Sjeff /* 1302171482Sjeff * The score is only needed if this is likely to be an interactive 1303171482Sjeff * task. Don't go through the expense of computing it if there's 1304171482Sjeff * no chance. 1305171482Sjeff */ 1306171482Sjeff if (sched_interact <= SCHED_INTERACT_HALF && 1307171482Sjeff ts->ts_runtime >= ts->ts_slptime) 1308171482Sjeff return (SCHED_INTERACT_HALF); 1309171482Sjeff 1310171482Sjeff if (ts->ts_runtime > ts->ts_slptime) { 1311171482Sjeff div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1312171482Sjeff return (SCHED_INTERACT_HALF + 1313171482Sjeff (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1314171482Sjeff } 1315171482Sjeff if (ts->ts_slptime > ts->ts_runtime) { 1316171482Sjeff div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1317171482Sjeff return (ts->ts_runtime / div); 1318171482Sjeff } 1319171482Sjeff /* runtime == slptime */ 1320171482Sjeff if (ts->ts_runtime) 1321171482Sjeff return (SCHED_INTERACT_HALF); 1322171482Sjeff 1323171482Sjeff /* 1324171482Sjeff * This can happen if slptime and runtime are 0. 1325171482Sjeff */ 1326171482Sjeff return (0); 1327171482Sjeff 1328171482Sjeff} 1329171482Sjeff 1330171482Sjeff/* 1331109864Sjeff * Scale the scheduling priority according to the "interactivity" of this 1332109864Sjeff * process. 1333109864Sjeff */ 1334113357Sjeffstatic void 1335163709Sjbsched_priority(struct thread *td) 1336109864Sjeff{ 1337165762Sjeff int score; 1338109864Sjeff int pri; 1339109864Sjeff 1340163709Sjb if (td->td_pri_class != PRI_TIMESHARE) 1341113357Sjeff return; 1342112966Sjeff /* 1343165762Sjeff * If the score is interactive we place the thread in the realtime 1344165762Sjeff * queue with a priority that is less than kernel and interrupt 1345165762Sjeff * priorities. These threads are not subject to nice restrictions. 1346112966Sjeff * 1347171482Sjeff * Scores greater than this are placed on the normal timeshare queue 1348165762Sjeff * where the priority is partially decided by the most recent cpu 1349165762Sjeff * utilization and the rest is decided by nice value. 1350172293Sjeff * 1351172293Sjeff * The nice value of the process has a linear effect on the calculated 1352172293Sjeff * score. Negative nice values make it easier for a thread to be 1353172293Sjeff * considered interactive. 1354112966Sjeff */ 1355172308Sjeff score = imax(0, sched_interact_score(td) - td->td_proc->p_nice); 1356165762Sjeff if (score < sched_interact) { 1357165762Sjeff pri = PRI_MIN_REALTIME; 1358165762Sjeff pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1359165762Sjeff * score; 1360165762Sjeff KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 1361166208Sjeff ("sched_priority: invalid interactive priority %d score %d", 1362166208Sjeff pri, score)); 1363165762Sjeff } else { 1364165762Sjeff pri = SCHED_PRI_MIN; 1365165762Sjeff if (td->td_sched->ts_ticks) 1366165762Sjeff pri += SCHED_PRI_TICKS(td->td_sched); 1367165762Sjeff pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1368171482Sjeff KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1369171482Sjeff ("sched_priority: invalid priority %d: nice %d, " 1370171482Sjeff "ticks %d ftick %d ltick %d tick pri %d", 1371171482Sjeff pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1372171482Sjeff td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1373171482Sjeff SCHED_PRI_TICKS(td->td_sched))); 1374165762Sjeff } 1375165762Sjeff sched_user_prio(td, pri); 1376112966Sjeff 1377112966Sjeff return; 1378109864Sjeff} 1379109864Sjeff 1380121868Sjeff/* 1381121868Sjeff * This routine enforces a maximum limit on the amount of scheduling history 1382171482Sjeff * kept. It is called after either the slptime or runtime is adjusted. This 1383171482Sjeff * function is ugly due to integer math. 1384121868Sjeff */ 1385116463Sjeffstatic void 1386163709Sjbsched_interact_update(struct thread *td) 1387116463Sjeff{ 1388165819Sjeff struct td_sched *ts; 1389166208Sjeff u_int sum; 1390121605Sjeff 1391165819Sjeff ts = td->td_sched; 1392171482Sjeff sum = ts->ts_runtime + ts->ts_slptime; 1393121868Sjeff if (sum < SCHED_SLP_RUN_MAX) 1394121868Sjeff return; 1395121868Sjeff /* 1396165819Sjeff * This only happens from two places: 1397165819Sjeff * 1) We have added an unusual amount of run time from fork_exit. 1398165819Sjeff * 2) We have added an unusual amount of sleep time from sched_sleep(). 1399165819Sjeff */ 1400165819Sjeff if (sum > SCHED_SLP_RUN_MAX * 2) { 1401171482Sjeff if (ts->ts_runtime > ts->ts_slptime) { 1402171482Sjeff ts->ts_runtime = SCHED_SLP_RUN_MAX; 1403171482Sjeff ts->ts_slptime = 1; 1404165819Sjeff } else { 1405171482Sjeff ts->ts_slptime = SCHED_SLP_RUN_MAX; 1406171482Sjeff ts->ts_runtime = 1; 1407165819Sjeff } 1408165819Sjeff return; 1409165819Sjeff } 1410165819Sjeff /* 1411121868Sjeff * If we have exceeded by more than 1/5th then the algorithm below 1412121868Sjeff * will not bring us back into range. Dividing by two here forces 1413133427Sjeff * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1414121868Sjeff */ 1415127850Sjeff if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1416171482Sjeff ts->ts_runtime /= 2; 1417171482Sjeff ts->ts_slptime /= 2; 1418121868Sjeff return; 1419116463Sjeff } 1420171482Sjeff ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1421171482Sjeff ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1422116463Sjeff} 1423116463Sjeff 1424171482Sjeff/* 1425171482Sjeff * Scale back the interactivity history when a child thread is created. The 1426171482Sjeff * history is inherited from the parent but the thread may behave totally 1427171482Sjeff * differently. For example, a shell spawning a compiler process. We want 1428171482Sjeff * to learn that the compiler is behaving badly very quickly. 1429171482Sjeff */ 1430121868Sjeffstatic void 1431163709Sjbsched_interact_fork(struct thread *td) 1432121868Sjeff{ 1433121868Sjeff int ratio; 1434121868Sjeff int sum; 1435121868Sjeff 1436171482Sjeff sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1437121868Sjeff if (sum > SCHED_SLP_RUN_FORK) { 1438121868Sjeff ratio = sum / SCHED_SLP_RUN_FORK; 1439171482Sjeff td->td_sched->ts_runtime /= ratio; 1440171482Sjeff td->td_sched->ts_slptime /= ratio; 1441121868Sjeff } 1442121868Sjeff} 1443121868Sjeff 1444113357Sjeff/* 1445171482Sjeff * Called from proc0_init() to setup the scheduler fields. 1446134791Sjulian */ 1447134791Sjulianvoid 1448134791Sjulianschedinit(void) 1449134791Sjulian{ 1450165762Sjeff 1451134791Sjulian /* 1452134791Sjulian * Set up the scheduler specific parts of proc0. 1453134791Sjulian */ 1454136167Sjulian proc0.p_sched = NULL; /* XXX */ 1455164936Sjulian thread0.td_sched = &td_sched0; 1456165762Sjeff td_sched0.ts_ltick = ticks; 1457165796Sjeff td_sched0.ts_ftick = ticks; 1458177009Sjeff td_sched0.ts_slice = sched_slice; 1459134791Sjulian} 1460134791Sjulian 1461134791Sjulian/* 1462113357Sjeff * This is only somewhat accurate since given many processes of the same 1463113357Sjeff * priority they will switch when their slices run out, which will be 1464165762Sjeff * at most sched_slice stathz ticks. 1465113357Sjeff */ 1466109864Sjeffint 1467109864Sjeffsched_rr_interval(void) 1468109864Sjeff{ 1469165762Sjeff 1470165762Sjeff /* Convert sched_slice to hz */ 1471165762Sjeff return (hz/(realstathz/sched_slice)); 1472109864Sjeff} 1473109864Sjeff 1474171482Sjeff/* 1475171482Sjeff * Update the percent cpu tracking information when it is requested or 1476171482Sjeff * the total history exceeds the maximum. We keep a sliding history of 1477171482Sjeff * tick counts that slowly decays. This is less precise than the 4BSD 1478171482Sjeff * mechanism since it happens with less regular and frequent events. 1479171482Sjeff */ 1480121790Sjeffstatic void 1481164936Sjuliansched_pctcpu_update(struct td_sched *ts) 1482109864Sjeff{ 1483165762Sjeff 1484165762Sjeff if (ts->ts_ticks == 0) 1485165762Sjeff return; 1486165796Sjeff if (ticks - (hz / 10) < ts->ts_ltick && 1487165796Sjeff SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 1488165796Sjeff return; 1489109864Sjeff /* 1490109864Sjeff * Adjust counters and watermark for pctcpu calc. 1491116365Sjeff */ 1492165762Sjeff if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1493164936Sjulian ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1494165762Sjeff SCHED_TICK_TARG; 1495165762Sjeff else 1496164936Sjulian ts->ts_ticks = 0; 1497164936Sjulian ts->ts_ltick = ticks; 1498165762Sjeff ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 1499109864Sjeff} 1500109864Sjeff 1501171482Sjeff/* 1502171482Sjeff * Adjust the priority of a thread. Move it to the appropriate run-queue 1503171482Sjeff * if necessary. This is the back-end for several priority related 1504171482Sjeff * functions. 1505171482Sjeff */ 1506165762Sjeffstatic void 1507139453Sjhbsched_thread_priority(struct thread *td, u_char prio) 1508109864Sjeff{ 1509164936Sjulian struct td_sched *ts; 1510177009Sjeff struct tdq *tdq; 1511177009Sjeff int oldpri; 1512109864Sjeff 1513139316Sjeff CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1514173600Sjulian td, td->td_name, td->td_priority, prio, curthread, 1515173600Sjulian curthread->td_name); 1516164936Sjulian ts = td->td_sched; 1517170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1518139453Sjhb if (td->td_priority == prio) 1519139453Sjhb return; 1520177376Sjeff /* 1521177376Sjeff * If the priority has been elevated due to priority 1522177376Sjeff * propagation, we may have to move ourselves to a new 1523177376Sjeff * queue. This could be optimized to not re-add in some 1524177376Sjeff * cases. 1525177376Sjeff */ 1526165766Sjeff if (TD_ON_RUNQ(td) && prio < td->td_priority) { 1527165762Sjeff sched_rem(td); 1528165762Sjeff td->td_priority = prio; 1529171482Sjeff sched_add(td, SRQ_BORROWING); 1530177009Sjeff return; 1531177009Sjeff } 1532177376Sjeff /* 1533177376Sjeff * If the thread is currently running we may have to adjust the lowpri 1534177376Sjeff * information so other cpus are aware of our current priority. 1535177376Sjeff */ 1536177009Sjeff if (TD_IS_RUNNING(td)) { 1537177376Sjeff tdq = TDQ_CPU(ts->ts_cpu); 1538177376Sjeff oldpri = td->td_priority; 1539177376Sjeff td->td_priority = prio; 1540176735Sjeff if (prio < tdq->tdq_lowpri) 1541171482Sjeff tdq->tdq_lowpri = prio; 1542176735Sjeff else if (tdq->tdq_lowpri == oldpri) 1543176735Sjeff tdq_setlowpri(tdq, td); 1544177376Sjeff return; 1545177009Sjeff } 1546177376Sjeff td->td_priority = prio; 1547109864Sjeff} 1548109864Sjeff 1549139453Sjhb/* 1550139453Sjhb * Update a thread's priority when it is lent another thread's 1551139453Sjhb * priority. 1552139453Sjhb */ 1553109864Sjeffvoid 1554139453Sjhbsched_lend_prio(struct thread *td, u_char prio) 1555139453Sjhb{ 1556139453Sjhb 1557139453Sjhb td->td_flags |= TDF_BORROWING; 1558139453Sjhb sched_thread_priority(td, prio); 1559139453Sjhb} 1560139453Sjhb 1561139453Sjhb/* 1562139453Sjhb * Restore a thread's priority when priority propagation is 1563139453Sjhb * over. The prio argument is the minimum priority the thread 1564139453Sjhb * needs to have to satisfy other possible priority lending 1565139453Sjhb * requests. If the thread's regular priority is less 1566139453Sjhb * important than prio, the thread will keep a priority boost 1567139453Sjhb * of prio. 1568139453Sjhb */ 1569139453Sjhbvoid 1570139453Sjhbsched_unlend_prio(struct thread *td, u_char prio) 1571139453Sjhb{ 1572139453Sjhb u_char base_pri; 1573139453Sjhb 1574139453Sjhb if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1575139453Sjhb td->td_base_pri <= PRI_MAX_TIMESHARE) 1576163709Sjb base_pri = td->td_user_pri; 1577139453Sjhb else 1578139453Sjhb base_pri = td->td_base_pri; 1579139453Sjhb if (prio >= base_pri) { 1580139455Sjhb td->td_flags &= ~TDF_BORROWING; 1581139453Sjhb sched_thread_priority(td, base_pri); 1582139453Sjhb } else 1583139453Sjhb sched_lend_prio(td, prio); 1584139453Sjhb} 1585139453Sjhb 1586171482Sjeff/* 1587171482Sjeff * Standard entry for setting the priority to an absolute value. 1588171482Sjeff */ 1589139453Sjhbvoid 1590139453Sjhbsched_prio(struct thread *td, u_char prio) 1591139453Sjhb{ 1592139453Sjhb u_char oldprio; 1593139453Sjhb 1594139453Sjhb /* First, update the base priority. */ 1595139453Sjhb td->td_base_pri = prio; 1596139453Sjhb 1597139453Sjhb /* 1598139455Sjhb * If the thread is borrowing another thread's priority, don't 1599139453Sjhb * ever lower the priority. 1600139453Sjhb */ 1601139453Sjhb if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1602139453Sjhb return; 1603139453Sjhb 1604139453Sjhb /* Change the real priority. */ 1605139453Sjhb oldprio = td->td_priority; 1606139453Sjhb sched_thread_priority(td, prio); 1607139453Sjhb 1608139453Sjhb /* 1609139453Sjhb * If the thread is on a turnstile, then let the turnstile update 1610139453Sjhb * its state. 1611139453Sjhb */ 1612139453Sjhb if (TD_ON_LOCK(td) && oldprio != prio) 1613139453Sjhb turnstile_adjust(td, oldprio); 1614139453Sjhb} 1615139455Sjhb 1616171482Sjeff/* 1617171482Sjeff * Set the base user priority, does not effect current running priority. 1618171482Sjeff */ 1619139453Sjhbvoid 1620163709Sjbsched_user_prio(struct thread *td, u_char prio) 1621161599Sdavidxu{ 1622161599Sdavidxu u_char oldprio; 1623161599Sdavidxu 1624163709Sjb td->td_base_user_pri = prio; 1625164939Sjulian if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1626164939Sjulian return; 1627163709Sjb oldprio = td->td_user_pri; 1628163709Sjb td->td_user_pri = prio; 1629161599Sdavidxu} 1630161599Sdavidxu 1631161599Sdavidxuvoid 1632161599Sdavidxusched_lend_user_prio(struct thread *td, u_char prio) 1633161599Sdavidxu{ 1634161599Sdavidxu u_char oldprio; 1635161599Sdavidxu 1636174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 1637161599Sdavidxu td->td_flags |= TDF_UBORROWING; 1638164091Smaxim oldprio = td->td_user_pri; 1639163709Sjb td->td_user_pri = prio; 1640161599Sdavidxu} 1641161599Sdavidxu 1642161599Sdavidxuvoid 1643161599Sdavidxusched_unlend_user_prio(struct thread *td, u_char prio) 1644161599Sdavidxu{ 1645161599Sdavidxu u_char base_pri; 1646161599Sdavidxu 1647174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 1648163709Sjb base_pri = td->td_base_user_pri; 1649161599Sdavidxu if (prio >= base_pri) { 1650161599Sdavidxu td->td_flags &= ~TDF_UBORROWING; 1651163709Sjb sched_user_prio(td, base_pri); 1652174536Sdavidxu } else { 1653161599Sdavidxu sched_lend_user_prio(td, prio); 1654174536Sdavidxu } 1655161599Sdavidxu} 1656161599Sdavidxu 1657171482Sjeff/* 1658174847Swkoszek * Block a thread for switching. Similar to thread_block() but does not 1659174847Swkoszek * bump the spin count. 1660174847Swkoszek */ 1661174847Swkoszekstatic inline struct mtx * 1662174847Swkoszekthread_block_switch(struct thread *td) 1663174847Swkoszek{ 1664174847Swkoszek struct mtx *lock; 1665174847Swkoszek 1666174847Swkoszek THREAD_LOCK_ASSERT(td, MA_OWNED); 1667174847Swkoszek lock = td->td_lock; 1668174847Swkoszek td->td_lock = &blocked_lock; 1669174847Swkoszek mtx_unlock_spin(lock); 1670174847Swkoszek 1671174847Swkoszek return (lock); 1672174847Swkoszek} 1673174847Swkoszek 1674174847Swkoszek/* 1675171713Sjeff * Handle migration from sched_switch(). This happens only for 1676171713Sjeff * cpu binding. 1677171713Sjeff */ 1678171713Sjeffstatic struct mtx * 1679171713Sjeffsched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1680171713Sjeff{ 1681171713Sjeff struct tdq *tdn; 1682171713Sjeff 1683171713Sjeff tdn = TDQ_CPU(td->td_sched->ts_cpu); 1684171713Sjeff#ifdef SMP 1685177435Sjeff tdq_load_rem(tdq, td); 1686171713Sjeff /* 1687171713Sjeff * Do the lock dance required to avoid LOR. We grab an extra 1688171713Sjeff * spinlock nesting to prevent preemption while we're 1689171713Sjeff * not holding either run-queue lock. 1690171713Sjeff */ 1691171713Sjeff spinlock_enter(); 1692171713Sjeff thread_block_switch(td); /* This releases the lock on tdq. */ 1693171713Sjeff TDQ_LOCK(tdn); 1694171713Sjeff tdq_add(tdn, td, flags); 1695177435Sjeff tdq_notify(tdn, td); 1696171713Sjeff /* 1697171713Sjeff * After we unlock tdn the new cpu still can't switch into this 1698171713Sjeff * thread until we've unblocked it in cpu_switch(). The lock 1699171713Sjeff * pointers may match in the case of HTT cores. Don't unlock here 1700171713Sjeff * or we can deadlock when the other CPU runs the IPI handler. 1701171713Sjeff */ 1702171713Sjeff if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1703171713Sjeff TDQ_UNLOCK(tdn); 1704171713Sjeff TDQ_LOCK(tdq); 1705171713Sjeff } 1706171713Sjeff spinlock_exit(); 1707171713Sjeff#endif 1708171713Sjeff return (TDQ_LOCKPTR(tdn)); 1709171713Sjeff} 1710171713Sjeff 1711171713Sjeff/* 1712171482Sjeff * Release a thread that was blocked with thread_block_switch(). 1713171482Sjeff */ 1714171482Sjeffstatic inline void 1715171482Sjeffthread_unblock_switch(struct thread *td, struct mtx *mtx) 1716171482Sjeff{ 1717171482Sjeff atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1718171482Sjeff (uintptr_t)mtx); 1719171482Sjeff} 1720171482Sjeff 1721171482Sjeff/* 1722171482Sjeff * Switch threads. This function has to handle threads coming in while 1723171482Sjeff * blocked for some reason, running, or idle. It also must deal with 1724171482Sjeff * migrating a thread from one queue to another as running threads may 1725171482Sjeff * be assigned elsewhere via binding. 1726171482Sjeff */ 1727161599Sdavidxuvoid 1728135051Sjuliansched_switch(struct thread *td, struct thread *newtd, int flags) 1729109864Sjeff{ 1730165627Sjeff struct tdq *tdq; 1731164936Sjulian struct td_sched *ts; 1732171482Sjeff struct mtx *mtx; 1733171713Sjeff int srqflag; 1734171482Sjeff int cpuid; 1735109864Sjeff 1736170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1737177376Sjeff KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument")); 1738109864Sjeff 1739171482Sjeff cpuid = PCPU_GET(cpuid); 1740171482Sjeff tdq = TDQ_CPU(cpuid); 1741164936Sjulian ts = td->td_sched; 1742171713Sjeff mtx = td->td_lock; 1743171482Sjeff ts->ts_rltick = ticks; 1744133555Sjeff td->td_lastcpu = td->td_oncpu; 1745113339Sjulian td->td_oncpu = NOCPU; 1746132266Sjhb td->td_flags &= ~TDF_NEEDRESCHED; 1747144777Sups td->td_owepreempt = 0; 1748123434Sjeff /* 1749171482Sjeff * The lock pointer in an idle thread should never change. Reset it 1750171482Sjeff * to CAN_RUN as well. 1751123434Sjeff */ 1752167327Sjulian if (TD_IS_IDLETHREAD(td)) { 1753171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1754139334Sjeff TD_SET_CAN_RUN(td); 1755170293Sjeff } else if (TD_IS_RUNNING(td)) { 1756171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1757171713Sjeff srqflag = (flags & SW_PREEMPT) ? 1758170293Sjeff SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1759171713Sjeff SRQ_OURSELF|SRQ_YIELDING; 1760171713Sjeff if (ts->ts_cpu == cpuid) 1761177435Sjeff tdq_runq_add(tdq, td, srqflag); 1762171713Sjeff else 1763171713Sjeff mtx = sched_switch_migrate(tdq, td, srqflag); 1764171482Sjeff } else { 1765171482Sjeff /* This thread must be going to sleep. */ 1766171482Sjeff TDQ_LOCK(tdq); 1767171482Sjeff mtx = thread_block_switch(td); 1768177435Sjeff tdq_load_rem(tdq, td); 1769171482Sjeff } 1770171482Sjeff /* 1771171482Sjeff * We enter here with the thread blocked and assigned to the 1772171482Sjeff * appropriate cpu run-queue or sleep-queue and with the current 1773171482Sjeff * thread-queue locked. 1774171482Sjeff */ 1775171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1776171482Sjeff newtd = choosethread(); 1777171482Sjeff /* 1778171482Sjeff * Call the MD code to switch contexts if necessary. 1779171482Sjeff */ 1780145256Sjkoshy if (td != newtd) { 1781145256Sjkoshy#ifdef HWPMC_HOOKS 1782145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1783145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1784145256Sjkoshy#endif 1785174629Sjeff lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 1786172411Sjeff TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 1787171482Sjeff cpu_switch(td, newtd, mtx); 1788171482Sjeff /* 1789171482Sjeff * We may return from cpu_switch on a different cpu. However, 1790171482Sjeff * we always return with td_lock pointing to the current cpu's 1791171482Sjeff * run queue lock. 1792171482Sjeff */ 1793171482Sjeff cpuid = PCPU_GET(cpuid); 1794171482Sjeff tdq = TDQ_CPU(cpuid); 1795174629Sjeff lock_profile_obtain_lock_success( 1796174629Sjeff &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 1797145256Sjkoshy#ifdef HWPMC_HOOKS 1798145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1799145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1800145256Sjkoshy#endif 1801171482Sjeff } else 1802171482Sjeff thread_unblock_switch(td, mtx); 1803171482Sjeff /* 1804171482Sjeff * Assert that all went well and return. 1805171482Sjeff */ 1806171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1807171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1808171482Sjeff td->td_oncpu = cpuid; 1809109864Sjeff} 1810109864Sjeff 1811171482Sjeff/* 1812171482Sjeff * Adjust thread priorities as a result of a nice request. 1813171482Sjeff */ 1814109864Sjeffvoid 1815130551Sjuliansched_nice(struct proc *p, int nice) 1816109864Sjeff{ 1817109864Sjeff struct thread *td; 1818109864Sjeff 1819130551Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 1820165762Sjeff 1821130551Sjulian p->p_nice = nice; 1822163709Sjb FOREACH_THREAD_IN_PROC(p, td) { 1823170293Sjeff thread_lock(td); 1824163709Sjb sched_priority(td); 1825165762Sjeff sched_prio(td, td->td_base_user_pri); 1826170293Sjeff thread_unlock(td); 1827130551Sjulian } 1828109864Sjeff} 1829109864Sjeff 1830171482Sjeff/* 1831171482Sjeff * Record the sleep time for the interactivity scorer. 1832171482Sjeff */ 1833109864Sjeffvoid 1834177085Sjeffsched_sleep(struct thread *td, int prio) 1835109864Sjeff{ 1836165762Sjeff 1837170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1838109864Sjeff 1839172264Sjeff td->td_slptick = ticks; 1840177085Sjeff if (TD_IS_SUSPENDED(td) || prio <= PSOCK) 1841177085Sjeff td->td_flags |= TDF_CANSWAP; 1842177085Sjeff if (static_boost && prio) 1843177085Sjeff sched_prio(td, prio); 1844109864Sjeff} 1845109864Sjeff 1846171482Sjeff/* 1847171482Sjeff * Schedule a thread to resume execution and record how long it voluntarily 1848171482Sjeff * slept. We also update the pctcpu, interactivity, and priority. 1849171482Sjeff */ 1850109864Sjeffvoid 1851109864Sjeffsched_wakeup(struct thread *td) 1852109864Sjeff{ 1853166229Sjeff struct td_sched *ts; 1854171482Sjeff int slptick; 1855165762Sjeff 1856170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1857166229Sjeff ts = td->td_sched; 1858177085Sjeff td->td_flags &= ~TDF_CANSWAP; 1859109864Sjeff /* 1860165762Sjeff * If we slept for more than a tick update our interactivity and 1861165762Sjeff * priority. 1862109864Sjeff */ 1863172264Sjeff slptick = td->td_slptick; 1864172264Sjeff td->td_slptick = 0; 1865171482Sjeff if (slptick && slptick != ticks) { 1866166208Sjeff u_int hzticks; 1867109864Sjeff 1868171482Sjeff hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1869171482Sjeff ts->ts_slptime += hzticks; 1870165819Sjeff sched_interact_update(td); 1871166229Sjeff sched_pctcpu_update(ts); 1872109864Sjeff } 1873166229Sjeff /* Reset the slice value after we sleep. */ 1874166229Sjeff ts->ts_slice = sched_slice; 1875166190Sjeff sched_add(td, SRQ_BORING); 1876109864Sjeff} 1877109864Sjeff 1878109864Sjeff/* 1879109864Sjeff * Penalize the parent for creating a new child and initialize the child's 1880109864Sjeff * priority. 1881109864Sjeff */ 1882109864Sjeffvoid 1883163709Sjbsched_fork(struct thread *td, struct thread *child) 1884109864Sjeff{ 1885170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1886164936Sjulian sched_fork_thread(td, child); 1887165762Sjeff /* 1888165762Sjeff * Penalize the parent and child for forking. 1889165762Sjeff */ 1890165762Sjeff sched_interact_fork(child); 1891165762Sjeff sched_priority(child); 1892171482Sjeff td->td_sched->ts_runtime += tickincr; 1893165762Sjeff sched_interact_update(td); 1894165762Sjeff sched_priority(td); 1895164936Sjulian} 1896109864Sjeff 1897171482Sjeff/* 1898171482Sjeff * Fork a new thread, may be within the same process. 1899171482Sjeff */ 1900164936Sjulianvoid 1901164936Sjuliansched_fork_thread(struct thread *td, struct thread *child) 1902164936Sjulian{ 1903164936Sjulian struct td_sched *ts; 1904164936Sjulian struct td_sched *ts2; 1905164936Sjulian 1906177426Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1907165762Sjeff /* 1908165762Sjeff * Initialize child. 1909165762Sjeff */ 1910177426Sjeff ts = td->td_sched; 1911177426Sjeff ts2 = child->td_sched; 1912171482Sjeff child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1913176735Sjeff child->td_cpuset = cpuset_ref(td->td_cpuset); 1914164936Sjulian ts2->ts_cpu = ts->ts_cpu; 1915177426Sjeff ts2->ts_flags = 0; 1916165762Sjeff /* 1917165762Sjeff * Grab our parents cpu estimation information and priority. 1918165762Sjeff */ 1919164936Sjulian ts2->ts_ticks = ts->ts_ticks; 1920164936Sjulian ts2->ts_ltick = ts->ts_ltick; 1921164936Sjulian ts2->ts_ftick = ts->ts_ftick; 1922165762Sjeff child->td_user_pri = td->td_user_pri; 1923165762Sjeff child->td_base_user_pri = td->td_base_user_pri; 1924165762Sjeff /* 1925165762Sjeff * And update interactivity score. 1926165762Sjeff */ 1927171482Sjeff ts2->ts_slptime = ts->ts_slptime; 1928171482Sjeff ts2->ts_runtime = ts->ts_runtime; 1929165762Sjeff ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 1930113357Sjeff} 1931113357Sjeff 1932171482Sjeff/* 1933171482Sjeff * Adjust the priority class of a thread. 1934171482Sjeff */ 1935113357Sjeffvoid 1936163709Sjbsched_class(struct thread *td, int class) 1937113357Sjeff{ 1938113357Sjeff 1939170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1940163709Sjb if (td->td_pri_class == class) 1941113357Sjeff return; 1942163709Sjb td->td_pri_class = class; 1943109864Sjeff} 1944109864Sjeff 1945109864Sjeff/* 1946109864Sjeff * Return some of the child's priority and interactivity to the parent. 1947109864Sjeff */ 1948109864Sjeffvoid 1949164939Sjuliansched_exit(struct proc *p, struct thread *child) 1950109864Sjeff{ 1951165762Sjeff struct thread *td; 1952164939Sjulian 1953163709Sjb CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 1954173600Sjulian child, child->td_name, child->td_priority); 1955113372Sjeff 1956177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 1957165762Sjeff td = FIRST_THREAD_IN_PROC(p); 1958165762Sjeff sched_exit_thread(td, child); 1959113372Sjeff} 1960113372Sjeff 1961171482Sjeff/* 1962171482Sjeff * Penalize another thread for the time spent on this one. This helps to 1963171482Sjeff * worsen the priority and interactivity of processes which schedule batch 1964171482Sjeff * jobs such as make. This has little effect on the make process itself but 1965171482Sjeff * causes new processes spawned by it to receive worse scores immediately. 1966171482Sjeff */ 1967113372Sjeffvoid 1968164939Sjuliansched_exit_thread(struct thread *td, struct thread *child) 1969164936Sjulian{ 1970165762Sjeff 1971164939Sjulian CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 1972173600Sjulian child, child->td_name, child->td_priority); 1973164939Sjulian 1974165762Sjeff /* 1975165762Sjeff * Give the child's runtime to the parent without returning the 1976165762Sjeff * sleep time as a penalty to the parent. This causes shells that 1977165762Sjeff * launch expensive things to mark their children as expensive. 1978165762Sjeff */ 1979170293Sjeff thread_lock(td); 1980171482Sjeff td->td_sched->ts_runtime += child->td_sched->ts_runtime; 1981164939Sjulian sched_interact_update(td); 1982165762Sjeff sched_priority(td); 1983170293Sjeff thread_unlock(td); 1984164936Sjulian} 1985164936Sjulian 1986177005Sjeffvoid 1987177005Sjeffsched_preempt(struct thread *td) 1988177005Sjeff{ 1989177005Sjeff struct tdq *tdq; 1990177005Sjeff 1991177005Sjeff thread_lock(td); 1992177005Sjeff tdq = TDQ_SELF(); 1993177005Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1994177005Sjeff tdq->tdq_ipipending = 0; 1995177005Sjeff if (td->td_priority > tdq->tdq_lowpri) { 1996177005Sjeff if (td->td_critnest > 1) 1997177005Sjeff td->td_owepreempt = 1; 1998177005Sjeff else 1999177005Sjeff mi_switch(SW_INVOL | SW_PREEMPT, NULL); 2000177005Sjeff } 2001177005Sjeff thread_unlock(td); 2002177005Sjeff} 2003177005Sjeff 2004171482Sjeff/* 2005171482Sjeff * Fix priorities on return to user-space. Priorities may be elevated due 2006171482Sjeff * to static priorities in msleep() or similar. 2007171482Sjeff */ 2008164936Sjulianvoid 2009164936Sjuliansched_userret(struct thread *td) 2010164936Sjulian{ 2011164936Sjulian /* 2012164936Sjulian * XXX we cheat slightly on the locking here to avoid locking in 2013164936Sjulian * the usual case. Setting td_priority here is essentially an 2014164936Sjulian * incomplete workaround for not setting it properly elsewhere. 2015164936Sjulian * Now that some interrupt handlers are threads, not setting it 2016164936Sjulian * properly elsewhere can clobber it in the window between setting 2017164936Sjulian * it here and returning to user mode, so don't waste time setting 2018164936Sjulian * it perfectly here. 2019164936Sjulian */ 2020164936Sjulian KASSERT((td->td_flags & TDF_BORROWING) == 0, 2021164936Sjulian ("thread with borrowed priority returning to userland")); 2022164936Sjulian if (td->td_priority != td->td_user_pri) { 2023170293Sjeff thread_lock(td); 2024164936Sjulian td->td_priority = td->td_user_pri; 2025164936Sjulian td->td_base_pri = td->td_user_pri; 2026177005Sjeff tdq_setlowpri(TDQ_SELF(), td); 2027170293Sjeff thread_unlock(td); 2028164936Sjulian } 2029164936Sjulian} 2030164936Sjulian 2031171482Sjeff/* 2032171482Sjeff * Handle a stathz tick. This is really only relevant for timeshare 2033171482Sjeff * threads. 2034171482Sjeff */ 2035164936Sjulianvoid 2036121127Sjeffsched_clock(struct thread *td) 2037109864Sjeff{ 2038164936Sjulian struct tdq *tdq; 2039164936Sjulian struct td_sched *ts; 2040109864Sjeff 2041171482Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2042164936Sjulian tdq = TDQ_SELF(); 2043172409Sjeff#ifdef SMP 2044133427Sjeff /* 2045172409Sjeff * We run the long term load balancer infrequently on the first cpu. 2046172409Sjeff */ 2047172409Sjeff if (balance_tdq == tdq) { 2048172409Sjeff if (balance_ticks && --balance_ticks == 0) 2049172409Sjeff sched_balance(); 2050172409Sjeff } 2051172409Sjeff#endif 2052172409Sjeff /* 2053165766Sjeff * Advance the insert index once for each tick to ensure that all 2054165766Sjeff * threads get a chance to run. 2055133427Sjeff */ 2056165766Sjeff if (tdq->tdq_idx == tdq->tdq_ridx) { 2057165766Sjeff tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 2058165766Sjeff if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 2059165766Sjeff tdq->tdq_ridx = tdq->tdq_idx; 2060165766Sjeff } 2061165766Sjeff ts = td->td_sched; 2062175104Sjeff if (td->td_pri_class & PRI_FIFO_BIT) 2063113357Sjeff return; 2064175104Sjeff if (td->td_pri_class == PRI_TIMESHARE) { 2065175104Sjeff /* 2066175104Sjeff * We used a tick; charge it to the thread so 2067175104Sjeff * that we can compute our interactivity. 2068175104Sjeff */ 2069175104Sjeff td->td_sched->ts_runtime += tickincr; 2070175104Sjeff sched_interact_update(td); 2071177009Sjeff sched_priority(td); 2072175104Sjeff } 2073113357Sjeff /* 2074109864Sjeff * We used up one time slice. 2075109864Sjeff */ 2076164936Sjulian if (--ts->ts_slice > 0) 2077113357Sjeff return; 2078109864Sjeff /* 2079177009Sjeff * We're out of time, force a requeue at userret(). 2080109864Sjeff */ 2081177009Sjeff ts->ts_slice = sched_slice; 2082113357Sjeff td->td_flags |= TDF_NEEDRESCHED; 2083109864Sjeff} 2084109864Sjeff 2085171482Sjeff/* 2086171482Sjeff * Called once per hz tick. Used for cpu utilization information. This 2087171482Sjeff * is easier than trying to scale based on stathz. 2088171482Sjeff */ 2089171482Sjeffvoid 2090171482Sjeffsched_tick(void) 2091171482Sjeff{ 2092171482Sjeff struct td_sched *ts; 2093171482Sjeff 2094171482Sjeff ts = curthread->td_sched; 2095171482Sjeff /* Adjust ticks for pctcpu */ 2096171482Sjeff ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2097171482Sjeff ts->ts_ltick = ticks; 2098171482Sjeff /* 2099171482Sjeff * Update if we've exceeded our desired tick threshhold by over one 2100171482Sjeff * second. 2101171482Sjeff */ 2102171482Sjeff if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2103171482Sjeff sched_pctcpu_update(ts); 2104171482Sjeff} 2105171482Sjeff 2106171482Sjeff/* 2107171482Sjeff * Return whether the current CPU has runnable tasks. Used for in-kernel 2108171482Sjeff * cooperative idle threads. 2109171482Sjeff */ 2110109864Sjeffint 2111109864Sjeffsched_runnable(void) 2112109864Sjeff{ 2113164936Sjulian struct tdq *tdq; 2114115998Sjeff int load; 2115109864Sjeff 2116115998Sjeff load = 1; 2117115998Sjeff 2118164936Sjulian tdq = TDQ_SELF(); 2119121605Sjeff if ((curthread->td_flags & TDF_IDLETD) != 0) { 2120165620Sjeff if (tdq->tdq_load > 0) 2121121605Sjeff goto out; 2122121605Sjeff } else 2123165620Sjeff if (tdq->tdq_load - 1 > 0) 2124121605Sjeff goto out; 2125115998Sjeff load = 0; 2126115998Sjeffout: 2127115998Sjeff return (load); 2128109864Sjeff} 2129109864Sjeff 2130171482Sjeff/* 2131171482Sjeff * Choose the highest priority thread to run. The thread is removed from 2132171482Sjeff * the run-queue while running however the load remains. For SMP we set 2133171482Sjeff * the tdq in the global idle bitmask if it idles here. 2134171482Sjeff */ 2135166190Sjeffstruct thread * 2136109970Sjeffsched_choose(void) 2137109970Sjeff{ 2138177435Sjeff struct thread *td; 2139164936Sjulian struct tdq *tdq; 2140109970Sjeff 2141164936Sjulian tdq = TDQ_SELF(); 2142171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2143177435Sjeff td = tdq_choose(tdq); 2144177435Sjeff if (td) { 2145177435Sjeff td->td_sched->ts_ltick = ticks; 2146177435Sjeff tdq_runq_rem(tdq, td); 2147177435Sjeff return (td); 2148109864Sjeff } 2149176735Sjeff return (PCPU_GET(idlethread)); 2150109864Sjeff} 2151109864Sjeff 2152171482Sjeff/* 2153171482Sjeff * Set owepreempt if necessary. Preemption never happens directly in ULE, 2154171482Sjeff * we always request it once we exit a critical section. 2155171482Sjeff */ 2156171482Sjeffstatic inline void 2157171482Sjeffsched_setpreempt(struct thread *td) 2158166190Sjeff{ 2159166190Sjeff struct thread *ctd; 2160166190Sjeff int cpri; 2161166190Sjeff int pri; 2162166190Sjeff 2163177005Sjeff THREAD_LOCK_ASSERT(curthread, MA_OWNED); 2164177005Sjeff 2165166190Sjeff ctd = curthread; 2166166190Sjeff pri = td->td_priority; 2167166190Sjeff cpri = ctd->td_priority; 2168177005Sjeff if (pri < cpri) 2169177005Sjeff ctd->td_flags |= TDF_NEEDRESCHED; 2170166190Sjeff if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2171171482Sjeff return; 2172177005Sjeff if (!sched_shouldpreempt(pri, cpri, 0)) 2173171482Sjeff return; 2174171482Sjeff ctd->td_owepreempt = 1; 2175166190Sjeff} 2176166190Sjeff 2177171482Sjeff/* 2178177009Sjeff * Add a thread to a thread queue. Select the appropriate runq and add the 2179177009Sjeff * thread to it. This is the internal function called when the tdq is 2180177009Sjeff * predetermined. 2181171482Sjeff */ 2182109864Sjeffvoid 2183171482Sjefftdq_add(struct tdq *tdq, struct thread *td, int flags) 2184109864Sjeff{ 2185109864Sjeff 2186171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2187166190Sjeff KASSERT((td->td_inhibitors == 0), 2188166190Sjeff ("sched_add: trying to run inhibited thread")); 2189166190Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 2190166190Sjeff ("sched_add: bad thread state")); 2191172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 2192172207Sjeff ("sched_add: thread swapped out")); 2193171482Sjeff 2194171482Sjeff if (td->td_priority < tdq->tdq_lowpri) 2195171482Sjeff tdq->tdq_lowpri = td->td_priority; 2196177435Sjeff tdq_runq_add(tdq, td, flags); 2197177435Sjeff tdq_load_add(tdq, td); 2198171482Sjeff} 2199171482Sjeff 2200171482Sjeff/* 2201171482Sjeff * Select the target thread queue and add a thread to it. Request 2202171482Sjeff * preemption or IPI a remote processor if required. 2203171482Sjeff */ 2204171482Sjeffvoid 2205171482Sjeffsched_add(struct thread *td, int flags) 2206171482Sjeff{ 2207171482Sjeff struct tdq *tdq; 2208171482Sjeff#ifdef SMP 2209171482Sjeff int cpu; 2210171482Sjeff#endif 2211171482Sjeff CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2212173600Sjulian td, td->td_name, td->td_priority, curthread, 2213173600Sjulian curthread->td_name); 2214171482Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2215166108Sjeff /* 2216171482Sjeff * Recalculate the priority before we select the target cpu or 2217171482Sjeff * run-queue. 2218166108Sjeff */ 2219171482Sjeff if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2220171482Sjeff sched_priority(td); 2221171482Sjeff#ifdef SMP 2222171482Sjeff /* 2223171482Sjeff * Pick the destination cpu and if it isn't ours transfer to the 2224171482Sjeff * target cpu. 2225171482Sjeff */ 2226177435Sjeff cpu = sched_pickcpu(td, flags); 2227177435Sjeff tdq = sched_setcpu(td, cpu, flags); 2228171482Sjeff tdq_add(tdq, td, flags); 2229177009Sjeff if (cpu != PCPU_GET(cpuid)) { 2230177435Sjeff tdq_notify(tdq, td); 2231166108Sjeff return; 2232166108Sjeff } 2233171482Sjeff#else 2234171482Sjeff tdq = TDQ_SELF(); 2235171482Sjeff TDQ_LOCK(tdq); 2236171482Sjeff /* 2237171482Sjeff * Now that the thread is moving to the run-queue, set the lock 2238171482Sjeff * to the scheduler's lock. 2239171482Sjeff */ 2240171482Sjeff thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2241171482Sjeff tdq_add(tdq, td, flags); 2242166108Sjeff#endif 2243171482Sjeff if (!(flags & SRQ_YIELDING)) 2244171482Sjeff sched_setpreempt(td); 2245109864Sjeff} 2246109864Sjeff 2247171482Sjeff/* 2248171482Sjeff * Remove a thread from a run-queue without running it. This is used 2249171482Sjeff * when we're stealing a thread from a remote queue. Otherwise all threads 2250171482Sjeff * exit by calling sched_exit_thread() and sched_throw() themselves. 2251171482Sjeff */ 2252109864Sjeffvoid 2253121127Sjeffsched_rem(struct thread *td) 2254109864Sjeff{ 2255164936Sjulian struct tdq *tdq; 2256113357Sjeff 2257139316Sjeff CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 2258173600Sjulian td, td->td_name, td->td_priority, curthread, 2259173600Sjulian curthread->td_name); 2260177435Sjeff tdq = TDQ_CPU(td->td_sched->ts_cpu); 2261171482Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2262171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2263166190Sjeff KASSERT(TD_ON_RUNQ(td), 2264164936Sjulian ("sched_rem: thread not on run queue")); 2265177435Sjeff tdq_runq_rem(tdq, td); 2266177435Sjeff tdq_load_rem(tdq, td); 2267166190Sjeff TD_SET_CAN_RUN(td); 2268176735Sjeff if (td->td_priority == tdq->tdq_lowpri) 2269176735Sjeff tdq_setlowpri(tdq, NULL); 2270109864Sjeff} 2271109864Sjeff 2272171482Sjeff/* 2273171482Sjeff * Fetch cpu utilization information. Updates on demand. 2274171482Sjeff */ 2275109864Sjefffixpt_t 2276121127Sjeffsched_pctcpu(struct thread *td) 2277109864Sjeff{ 2278109864Sjeff fixpt_t pctcpu; 2279164936Sjulian struct td_sched *ts; 2280109864Sjeff 2281109864Sjeff pctcpu = 0; 2282164936Sjulian ts = td->td_sched; 2283164936Sjulian if (ts == NULL) 2284121290Sjeff return (0); 2285109864Sjeff 2286170293Sjeff thread_lock(td); 2287164936Sjulian if (ts->ts_ticks) { 2288109864Sjeff int rtick; 2289109864Sjeff 2290165796Sjeff sched_pctcpu_update(ts); 2291109864Sjeff /* How many rtick per second ? */ 2292165762Sjeff rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2293165762Sjeff pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 2294109864Sjeff } 2295170293Sjeff thread_unlock(td); 2296109864Sjeff 2297109864Sjeff return (pctcpu); 2298109864Sjeff} 2299109864Sjeff 2300176735Sjeff/* 2301176735Sjeff * Enforce affinity settings for a thread. Called after adjustments to 2302176735Sjeff * cpumask. 2303176735Sjeff */ 2304176729Sjeffvoid 2305176729Sjeffsched_affinity(struct thread *td) 2306176729Sjeff{ 2307176735Sjeff#ifdef SMP 2308176735Sjeff struct td_sched *ts; 2309176735Sjeff int cpu; 2310176735Sjeff 2311176735Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2312176735Sjeff ts = td->td_sched; 2313176735Sjeff if (THREAD_CAN_SCHED(td, ts->ts_cpu)) 2314176735Sjeff return; 2315176735Sjeff if (!TD_IS_RUNNING(td)) 2316176735Sjeff return; 2317176735Sjeff td->td_flags |= TDF_NEEDRESCHED; 2318176735Sjeff if (!THREAD_CAN_MIGRATE(td)) 2319176735Sjeff return; 2320176735Sjeff /* 2321176735Sjeff * Assign the new cpu and force a switch before returning to 2322176735Sjeff * userspace. If the target thread is not running locally send 2323176735Sjeff * an ipi to force the issue. 2324176735Sjeff */ 2325176735Sjeff cpu = ts->ts_cpu; 2326177435Sjeff ts->ts_cpu = sched_pickcpu(td, 0); 2327176735Sjeff if (cpu != PCPU_GET(cpuid)) 2328176735Sjeff ipi_selected(1 << cpu, IPI_PREEMPT); 2329176735Sjeff#endif 2330176729Sjeff} 2331176729Sjeff 2332171482Sjeff/* 2333171482Sjeff * Bind a thread to a target cpu. 2334171482Sjeff */ 2335122038Sjeffvoid 2336122038Sjeffsched_bind(struct thread *td, int cpu) 2337122038Sjeff{ 2338164936Sjulian struct td_sched *ts; 2339122038Sjeff 2340171713Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2341164936Sjulian ts = td->td_sched; 2342166137Sjeff if (ts->ts_flags & TSF_BOUND) 2343166152Sjeff sched_unbind(td); 2344164936Sjulian ts->ts_flags |= TSF_BOUND; 2345166137Sjeff sched_pin(); 2346123433Sjeff if (PCPU_GET(cpuid) == cpu) 2347122038Sjeff return; 2348166137Sjeff ts->ts_cpu = cpu; 2349122038Sjeff /* When we return from mi_switch we'll be on the correct cpu. */ 2350131527Sphk mi_switch(SW_VOL, NULL); 2351122038Sjeff} 2352122038Sjeff 2353171482Sjeff/* 2354171482Sjeff * Release a bound thread. 2355171482Sjeff */ 2356122038Sjeffvoid 2357122038Sjeffsched_unbind(struct thread *td) 2358122038Sjeff{ 2359165762Sjeff struct td_sched *ts; 2360165762Sjeff 2361170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2362165762Sjeff ts = td->td_sched; 2363166137Sjeff if ((ts->ts_flags & TSF_BOUND) == 0) 2364166137Sjeff return; 2365165762Sjeff ts->ts_flags &= ~TSF_BOUND; 2366165762Sjeff sched_unpin(); 2367122038Sjeff} 2368122038Sjeff 2369109864Sjeffint 2370145256Sjkoshysched_is_bound(struct thread *td) 2371145256Sjkoshy{ 2372170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 2373164936Sjulian return (td->td_sched->ts_flags & TSF_BOUND); 2374145256Sjkoshy} 2375145256Sjkoshy 2376171482Sjeff/* 2377171482Sjeff * Basic yield call. 2378171482Sjeff */ 2379159630Sdavidxuvoid 2380159630Sdavidxusched_relinquish(struct thread *td) 2381159630Sdavidxu{ 2382170293Sjeff thread_lock(td); 2383170293Sjeff SCHED_STAT_INC(switch_relinquish); 2384159630Sdavidxu mi_switch(SW_VOL, NULL); 2385170293Sjeff thread_unlock(td); 2386159630Sdavidxu} 2387159630Sdavidxu 2388171482Sjeff/* 2389171482Sjeff * Return the total system load. 2390171482Sjeff */ 2391145256Sjkoshyint 2392125289Sjeffsched_load(void) 2393125289Sjeff{ 2394125289Sjeff#ifdef SMP 2395125289Sjeff int total; 2396125289Sjeff int i; 2397125289Sjeff 2398125289Sjeff total = 0; 2399176735Sjeff for (i = 0; i <= mp_maxid; i++) 2400176735Sjeff total += TDQ_CPU(i)->tdq_sysload; 2401125289Sjeff return (total); 2402125289Sjeff#else 2403165620Sjeff return (TDQ_SELF()->tdq_sysload); 2404125289Sjeff#endif 2405125289Sjeff} 2406125289Sjeff 2407125289Sjeffint 2408109864Sjeffsched_sizeof_proc(void) 2409109864Sjeff{ 2410109864Sjeff return (sizeof(struct proc)); 2411109864Sjeff} 2412109864Sjeff 2413109864Sjeffint 2414109864Sjeffsched_sizeof_thread(void) 2415109864Sjeff{ 2416109864Sjeff return (sizeof(struct thread) + sizeof(struct td_sched)); 2417109864Sjeff} 2418159570Sdavidxu 2419166190Sjeff/* 2420166190Sjeff * The actual idle process. 2421166190Sjeff */ 2422166190Sjeffvoid 2423166190Sjeffsched_idletd(void *dummy) 2424166190Sjeff{ 2425166190Sjeff struct thread *td; 2426171482Sjeff struct tdq *tdq; 2427166190Sjeff 2428166190Sjeff td = curthread; 2429171482Sjeff tdq = TDQ_SELF(); 2430166190Sjeff mtx_assert(&Giant, MA_NOTOWNED); 2431171482Sjeff /* ULE relies on preemption for idle interruption. */ 2432171482Sjeff for (;;) { 2433171482Sjeff#ifdef SMP 2434171482Sjeff if (tdq_idled(tdq)) 2435171482Sjeff cpu_idle(); 2436171482Sjeff#else 2437166190Sjeff cpu_idle(); 2438171482Sjeff#endif 2439171482Sjeff } 2440166190Sjeff} 2441166190Sjeff 2442170293Sjeff/* 2443170293Sjeff * A CPU is entering for the first time or a thread is exiting. 2444170293Sjeff */ 2445170293Sjeffvoid 2446170293Sjeffsched_throw(struct thread *td) 2447170293Sjeff{ 2448172411Sjeff struct thread *newtd; 2449171482Sjeff struct tdq *tdq; 2450171482Sjeff 2451171482Sjeff tdq = TDQ_SELF(); 2452170293Sjeff if (td == NULL) { 2453171482Sjeff /* Correct spinlock nesting and acquire the correct lock. */ 2454171482Sjeff TDQ_LOCK(tdq); 2455170293Sjeff spinlock_exit(); 2456170293Sjeff } else { 2457171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2458177435Sjeff tdq_load_rem(tdq, td); 2459174629Sjeff lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 2460170293Sjeff } 2461170293Sjeff KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 2462172411Sjeff newtd = choosethread(); 2463172411Sjeff TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 2464170293Sjeff PCPU_SET(switchtime, cpu_ticks()); 2465170293Sjeff PCPU_SET(switchticks, ticks); 2466172411Sjeff cpu_throw(td, newtd); /* doesn't return */ 2467170293Sjeff} 2468170293Sjeff 2469171482Sjeff/* 2470171482Sjeff * This is called from fork_exit(). Just acquire the correct locks and 2471171482Sjeff * let fork do the rest of the work. 2472171482Sjeff */ 2473170293Sjeffvoid 2474170600Sjeffsched_fork_exit(struct thread *td) 2475170293Sjeff{ 2476171482Sjeff struct td_sched *ts; 2477171482Sjeff struct tdq *tdq; 2478171482Sjeff int cpuid; 2479170293Sjeff 2480170293Sjeff /* 2481170293Sjeff * Finish setting up thread glue so that it begins execution in a 2482171482Sjeff * non-nested critical section with the scheduler lock held. 2483170293Sjeff */ 2484171482Sjeff cpuid = PCPU_GET(cpuid); 2485171482Sjeff tdq = TDQ_CPU(cpuid); 2486171482Sjeff ts = td->td_sched; 2487171482Sjeff if (TD_IS_IDLETHREAD(td)) 2488171482Sjeff td->td_lock = TDQ_LOCKPTR(tdq); 2489171482Sjeff MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2490171482Sjeff td->td_oncpu = cpuid; 2491172411Sjeff TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 2492174629Sjeff lock_profile_obtain_lock_success( 2493174629Sjeff &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 2494170293Sjeff} 2495170293Sjeff 2496177435SjeffSYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 2497171482SjeffSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2498165762Sjeff "Scheduler name"); 2499171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2500171482Sjeff "Slice size for timeshare threads"); 2501171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2502171482Sjeff "Interactivity score threshold"); 2503171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2504171482Sjeff 0,"Min priority for preemption, lower priorities have greater precedence"); 2505177085SjeffSYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 2506177085Sjeff 0,"Controls whether static kernel priorities are assigned to sleeping threads."); 2507166108Sjeff#ifdef SMP 2508171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2509171482Sjeff "Number of hz ticks to keep thread affinity for"); 2510171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2511171482Sjeff "Enables the long-term load balancer"); 2512172409SjeffSYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 2513172409Sjeff &balance_interval, 0, 2514172409Sjeff "Average frequency in stathz ticks to run the long-term balancer"); 2515171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2516171482Sjeff "Steals work from another hyper-threaded core on idle"); 2517171482SjeffSYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2518171482Sjeff "Attempts to steal work from other cores before idling"); 2519171506SjeffSYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 2520171506Sjeff "Minimum load on remote cpu before we'll steal"); 2521166108Sjeff#endif 2522165762Sjeff 2523172264Sjeff/* ps compat. All cpu percentages from ULE are weighted. */ 2524172293Sjeffstatic int ccpu = 0; 2525165762SjeffSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2526