sched_4bsd.c revision 217078
1104964Sjeff/*- 2104964Sjeff * Copyright (c) 1982, 1986, 1990, 1991, 1993 3104964Sjeff * The Regents of the University of California. All rights reserved. 4104964Sjeff * (c) UNIX System Laboratories, Inc. 5104964Sjeff * All or some portions of this file are derived from material licensed 6104964Sjeff * to the University of California by American Telephone and Telegraph 7104964Sjeff * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8104964Sjeff * the permission of UNIX System Laboratories, Inc. 9104964Sjeff * 10104964Sjeff * Redistribution and use in source and binary forms, with or without 11104964Sjeff * modification, are permitted provided that the following conditions 12104964Sjeff * are met: 13104964Sjeff * 1. Redistributions of source code must retain the above copyright 14104964Sjeff * notice, this list of conditions and the following disclaimer. 15104964Sjeff * 2. Redistributions in binary form must reproduce the above copyright 16104964Sjeff * notice, this list of conditions and the following disclaimer in the 17104964Sjeff * documentation and/or other materials provided with the distribution. 18104964Sjeff * 4. Neither the name of the University nor the names of its contributors 19104964Sjeff * may be used to endorse or promote products derived from this software 20104964Sjeff * without specific prior written permission. 21104964Sjeff * 22104964Sjeff * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23104964Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24104964Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25104964Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26104964Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27104964Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28104964Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29104964Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30104964Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31104964Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32104964Sjeff * SUCH DAMAGE. 33104964Sjeff */ 34104964Sjeff 35116182Sobrien#include <sys/cdefs.h> 36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 217078 2011-01-06 22:24:00Z jhb $"); 37116182Sobrien 38147565Speter#include "opt_hwpmc_hooks.h" 39177418Sjeff#include "opt_sched.h" 40179297Sjb#include "opt_kdtrace.h" 41147565Speter 42104964Sjeff#include <sys/param.h> 43104964Sjeff#include <sys/systm.h> 44176750Smarcel#include <sys/cpuset.h> 45104964Sjeff#include <sys/kernel.h> 46104964Sjeff#include <sys/ktr.h> 47104964Sjeff#include <sys/lock.h> 48123871Sjhb#include <sys/kthread.h> 49104964Sjeff#include <sys/mutex.h> 50104964Sjeff#include <sys/proc.h> 51104964Sjeff#include <sys/resourcevar.h> 52104964Sjeff#include <sys/sched.h> 53104964Sjeff#include <sys/smp.h> 54104964Sjeff#include <sys/sysctl.h> 55104964Sjeff#include <sys/sx.h> 56139453Sjhb#include <sys/turnstile.h> 57161599Sdavidxu#include <sys/umtx.h> 58160039Sobrien#include <machine/pcb.h> 59134689Sjulian#include <machine/smp.h> 60104964Sjeff 61145256Sjkoshy#ifdef HWPMC_HOOKS 62145256Sjkoshy#include <sys/pmckern.h> 63145256Sjkoshy#endif 64145256Sjkoshy 65179297Sjb#ifdef KDTRACE_HOOKS 66179297Sjb#include <sys/dtrace_bsd.h> 67179297Sjbint dtrace_vtime_active; 68179297Sjbdtrace_vtime_switch_func_t dtrace_vtime_switch_func; 69179297Sjb#endif 70179297Sjb 71107135Sjeff/* 72107135Sjeff * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 73107135Sjeff * the range 100-256 Hz (approximately). 74107135Sjeff */ 75107135Sjeff#define ESTCPULIM(e) \ 76107135Sjeff min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 77107135Sjeff RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 78122355Sbde#ifdef SMP 79122355Sbde#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 80122355Sbde#else 81107135Sjeff#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 82122355Sbde#endif 83107135Sjeff#define NICE_WEIGHT 1 /* Priorities per nice level. */ 84107135Sjeff 85187679Sjeff#define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX))) 86187357Sjeff 87134791Sjulian/* 88163709Sjb * The schedulable entity that runs a context. 89164936Sjulian * This is an extension to the thread structure and is tailored to 90164936Sjulian * the requirements of this scheduler 91163709Sjb */ 92164936Sjulianstruct td_sched { 93164936Sjulian fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ 94164936Sjulian int ts_cpticks; /* (j) Ticks of cpu time. */ 95172264Sjeff int ts_slptime; /* (j) Seconds !RUNNING. */ 96180923Sjhb int ts_flags; 97164936Sjulian struct runq *ts_runq; /* runq the thread is currently on */ 98187357Sjeff#ifdef KTR 99187357Sjeff char ts_name[TS_NAME_LEN]; 100187357Sjeff#endif 101109145Sjeff}; 102109145Sjeff 103134791Sjulian/* flags kept in td_flags */ 104164936Sjulian#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ 105177435Sjeff#define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */ 106134791Sjulian 107180923Sjhb/* flags kept in ts_flags */ 108180923Sjhb#define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */ 109180923Sjhb 110164936Sjulian#define SKE_RUNQ_PCPU(ts) \ 111164936Sjulian ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) 112124955Sjeff 113180923Sjhb#define THREAD_CAN_SCHED(td, cpu) \ 114180923Sjhb CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 115180923Sjhb 116164936Sjulianstatic struct td_sched td_sched0; 117171488Sjeffstruct mtx sched_lock; 118134791Sjulian 119125288Sjeffstatic int sched_tdcnt; /* Total runnable threads in the system. */ 120104964Sjeffstatic int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 121112535Smux#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 122104964Sjeff 123124955Sjeffstatic void setup_runqs(void); 124123871Sjhbstatic void schedcpu(void); 125124955Sjeffstatic void schedcpu_thread(void); 126139453Sjhbstatic void sched_priority(struct thread *td, u_char prio); 127104964Sjeffstatic void sched_setup(void *dummy); 128104964Sjeffstatic void maybe_resched(struct thread *td); 129163709Sjbstatic void updatepri(struct thread *td); 130163709Sjbstatic void resetpriority(struct thread *td); 131163709Sjbstatic void resetpriority_thread(struct thread *td); 132134694Sjulian#ifdef SMP 133180923Sjhbstatic int sched_pickcpu(struct thread *td); 134180879Sjhbstatic int forward_wakeup(int cpunum); 135180879Sjhbstatic void kick_other_cpu(int pri, int cpuid); 136134694Sjulian#endif 137104964Sjeff 138124955Sjeffstatic struct kproc_desc sched_kp = { 139124955Sjeff "schedcpu", 140124955Sjeff schedcpu_thread, 141124955Sjeff NULL 142124955Sjeff}; 143177253SrwatsonSYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, 144177253Srwatson &sched_kp); 145177253SrwatsonSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 146104964Sjeff 147104964Sjeff/* 148104964Sjeff * Global run queue. 149104964Sjeff */ 150104964Sjeffstatic struct runq runq; 151104964Sjeff 152124955Sjeff#ifdef SMP 153124955Sjeff/* 154124955Sjeff * Per-CPU run queues 155124955Sjeff */ 156124955Sjeffstatic struct runq runq_pcpu[MAXCPU]; 157180923Sjhblong runq_length[MAXCPU]; 158124955Sjeff#endif 159124955Sjeff 160212455Smavstruct pcpuidlestat { 161212455Smav u_int idlecalls; 162212455Smav u_int oldidlecalls; 163212455Smav}; 164215701Sdimstatic DPCPU_DEFINE(struct pcpuidlestat, idlestat); 165212455Smav 166124955Sjeffstatic void 167124955Sjeffsetup_runqs(void) 168124955Sjeff{ 169124955Sjeff#ifdef SMP 170124955Sjeff int i; 171124955Sjeff 172124955Sjeff for (i = 0; i < MAXCPU; ++i) 173124955Sjeff runq_init(&runq_pcpu[i]); 174124955Sjeff#endif 175124955Sjeff 176124955Sjeff runq_init(&runq); 177124955Sjeff} 178124955Sjeff 179104964Sjeffstatic int 180104964Sjeffsysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 181104964Sjeff{ 182104964Sjeff int error, new_val; 183104964Sjeff 184104964Sjeff new_val = sched_quantum * tick; 185104964Sjeff error = sysctl_handle_int(oidp, &new_val, 0, req); 186104964Sjeff if (error != 0 || req->newptr == NULL) 187104964Sjeff return (error); 188104964Sjeff if (new_val < tick) 189104964Sjeff return (EINVAL); 190104964Sjeff sched_quantum = new_val / tick; 191104964Sjeff hogticks = 2 * sched_quantum; 192104964Sjeff return (0); 193104964Sjeff} 194104964Sjeff 195132589SscottlSYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); 196130881Sscottl 197132589SscottlSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, 198132589Sscottl "Scheduler name"); 199130881Sscottl 200132589SscottlSYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, 201132589Sscottl 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 202132589Sscottl "Roundrobin scheduling quantum in microseconds"); 203104964Sjeff 204134693Sjulian#ifdef SMP 205134688Sjulian/* Enable forwarding of wakeups to all other cpus */ 206134688SjulianSYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP"); 207134688Sjulian 208177419Sjeffstatic int runq_fuzz = 1; 209177419SjeffSYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 210177419Sjeff 211134792Sjulianstatic int forward_wakeup_enabled = 1; 212134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, 213134688Sjulian &forward_wakeup_enabled, 0, 214134688Sjulian "Forwarding of wakeup to idle CPUs"); 215134688Sjulian 216134688Sjulianstatic int forward_wakeups_requested = 0; 217134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, 218134688Sjulian &forward_wakeups_requested, 0, 219134688Sjulian "Requests for Forwarding of wakeup to idle CPUs"); 220134688Sjulian 221134688Sjulianstatic int forward_wakeups_delivered = 0; 222134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, 223134688Sjulian &forward_wakeups_delivered, 0, 224134688Sjulian "Completed Forwarding of wakeup to idle CPUs"); 225134688Sjulian 226134792Sjulianstatic int forward_wakeup_use_mask = 1; 227134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, 228134688Sjulian &forward_wakeup_use_mask, 0, 229134688Sjulian "Use the mask of idle cpus"); 230134688Sjulian 231134688Sjulianstatic int forward_wakeup_use_loop = 0; 232134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, 233134688Sjulian &forward_wakeup_use_loop, 0, 234134688Sjulian "Use a loop to find idle cpus"); 235134688Sjulian 236134688Sjulianstatic int forward_wakeup_use_single = 0; 237134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW, 238134688Sjulian &forward_wakeup_use_single, 0, 239134688Sjulian "Only signal one idle cpu"); 240134688Sjulian 241134688Sjulianstatic int forward_wakeup_use_htt = 0; 242134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW, 243134688Sjulian &forward_wakeup_use_htt, 0, 244134688Sjulian "account for htt"); 245135051Sjulian 246134693Sjulian#endif 247164936Sjulian#if 0 248135051Sjulianstatic int sched_followon = 0; 249135051SjulianSYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, 250135051Sjulian &sched_followon, 0, 251135051Sjulian "allow threads to share a quantum"); 252163709Sjb#endif 253135051Sjulian 254139317Sjeffstatic __inline void 255139317Sjeffsched_load_add(void) 256139317Sjeff{ 257187357Sjeff 258139317Sjeff sched_tdcnt++; 259187357Sjeff KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); 260139317Sjeff} 261139317Sjeff 262139317Sjeffstatic __inline void 263139317Sjeffsched_load_rem(void) 264139317Sjeff{ 265187357Sjeff 266139317Sjeff sched_tdcnt--; 267187357Sjeff KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); 268139317Sjeff} 269104964Sjeff/* 270104964Sjeff * Arrange to reschedule if necessary, taking the priorities and 271104964Sjeff * schedulers into account. 272104964Sjeff */ 273104964Sjeffstatic void 274104964Sjeffmaybe_resched(struct thread *td) 275104964Sjeff{ 276104964Sjeff 277170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 278134791Sjulian if (td->td_priority < curthread->td_priority) 279111032Sjulian curthread->td_flags |= TDF_NEEDRESCHED; 280104964Sjeff} 281104964Sjeff 282104964Sjeff/* 283177419Sjeff * This function is called when a thread is about to be put on run queue 284177419Sjeff * because it has been made runnable or its priority has been adjusted. It 285177419Sjeff * determines if the new thread should be immediately preempted to. If so, 286177419Sjeff * it switches to it and eventually returns true. If not, it returns false 287177419Sjeff * so that the caller may place the thread on an appropriate run queue. 288177419Sjeff */ 289177419Sjeffint 290177419Sjeffmaybe_preempt(struct thread *td) 291177419Sjeff{ 292177419Sjeff#ifdef PREEMPTION 293177419Sjeff struct thread *ctd; 294177419Sjeff int cpri, pri; 295177419Sjeff 296177419Sjeff /* 297177419Sjeff * The new thread should not preempt the current thread if any of the 298177419Sjeff * following conditions are true: 299177419Sjeff * 300177419Sjeff * - The kernel is in the throes of crashing (panicstr). 301177419Sjeff * - The current thread has a higher (numerically lower) or 302177419Sjeff * equivalent priority. Note that this prevents curthread from 303177419Sjeff * trying to preempt to itself. 304177419Sjeff * - It is too early in the boot for context switches (cold is set). 305177419Sjeff * - The current thread has an inhibitor set or is in the process of 306177419Sjeff * exiting. In this case, the current thread is about to switch 307177419Sjeff * out anyways, so there's no point in preempting. If we did, 308177419Sjeff * the current thread would not be properly resumed as well, so 309177419Sjeff * just avoid that whole landmine. 310177419Sjeff * - If the new thread's priority is not a realtime priority and 311177419Sjeff * the current thread's priority is not an idle priority and 312177419Sjeff * FULL_PREEMPTION is disabled. 313177419Sjeff * 314177419Sjeff * If all of these conditions are false, but the current thread is in 315177419Sjeff * a nested critical section, then we have to defer the preemption 316177419Sjeff * until we exit the critical section. Otherwise, switch immediately 317177419Sjeff * to the new thread. 318177419Sjeff */ 319177419Sjeff ctd = curthread; 320177419Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 321177419Sjeff KASSERT((td->td_inhibitors == 0), 322177419Sjeff ("maybe_preempt: trying to run inhibited thread")); 323177419Sjeff pri = td->td_priority; 324177419Sjeff cpri = ctd->td_priority; 325177419Sjeff if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || 326177419Sjeff TD_IS_INHIBITED(ctd)) 327177419Sjeff return (0); 328177419Sjeff#ifndef FULL_PREEMPTION 329177419Sjeff if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) 330177419Sjeff return (0); 331177419Sjeff#endif 332177419Sjeff 333177419Sjeff if (ctd->td_critnest > 1) { 334177419Sjeff CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 335177419Sjeff ctd->td_critnest); 336177419Sjeff ctd->td_owepreempt = 1; 337177419Sjeff return (0); 338177419Sjeff } 339177419Sjeff /* 340177419Sjeff * Thread is runnable but not yet put on system run queue. 341177419Sjeff */ 342177419Sjeff MPASS(ctd->td_lock == td->td_lock); 343177419Sjeff MPASS(TD_ON_RUNQ(td)); 344177419Sjeff TD_SET_RUNNING(td); 345177419Sjeff CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 346177419Sjeff td->td_proc->p_pid, td->td_name); 347178272Sjeff mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td); 348177419Sjeff /* 349177419Sjeff * td's lock pointer may have changed. We have to return with it 350177419Sjeff * locked. 351177419Sjeff */ 352177419Sjeff spinlock_enter(); 353177419Sjeff thread_unlock(ctd); 354177419Sjeff thread_lock(td); 355177419Sjeff spinlock_exit(); 356177419Sjeff return (1); 357177419Sjeff#else 358177419Sjeff return (0); 359177419Sjeff#endif 360177419Sjeff} 361177419Sjeff 362177419Sjeff/* 363104964Sjeff * Constants for digital decay and forget: 364163709Sjb * 90% of (td_estcpu) usage in 5 * loadav time 365164936Sjulian * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive) 366104964Sjeff * Note that, as ps(1) mentions, this can let percentages 367104964Sjeff * total over 100% (I've seen 137.9% for 3 processes). 368104964Sjeff * 369163709Sjb * Note that schedclock() updates td_estcpu and p_cpticks asynchronously. 370104964Sjeff * 371163709Sjb * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds. 372104964Sjeff * That is, the system wants to compute a value of decay such 373104964Sjeff * that the following for loop: 374104964Sjeff * for (i = 0; i < (5 * loadavg); i++) 375163709Sjb * td_estcpu *= decay; 376104964Sjeff * will compute 377163709Sjb * td_estcpu *= 0.1; 378104964Sjeff * for all values of loadavg: 379104964Sjeff * 380104964Sjeff * Mathematically this loop can be expressed by saying: 381104964Sjeff * decay ** (5 * loadavg) ~= .1 382104964Sjeff * 383104964Sjeff * The system computes decay as: 384104964Sjeff * decay = (2 * loadavg) / (2 * loadavg + 1) 385104964Sjeff * 386104964Sjeff * We wish to prove that the system's computation of decay 387104964Sjeff * will always fulfill the equation: 388104964Sjeff * decay ** (5 * loadavg) ~= .1 389104964Sjeff * 390104964Sjeff * If we compute b as: 391104964Sjeff * b = 2 * loadavg 392104964Sjeff * then 393104964Sjeff * decay = b / (b + 1) 394104964Sjeff * 395104964Sjeff * We now need to prove two things: 396104964Sjeff * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 397104964Sjeff * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 398104964Sjeff * 399104964Sjeff * Facts: 400104964Sjeff * For x close to zero, exp(x) =~ 1 + x, since 401104964Sjeff * exp(x) = 0! + x**1/1! + x**2/2! + ... . 402104964Sjeff * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 403104964Sjeff * For x close to zero, ln(1+x) =~ x, since 404104964Sjeff * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 405104964Sjeff * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 406104964Sjeff * ln(.1) =~ -2.30 407104964Sjeff * 408104964Sjeff * Proof of (1): 409104964Sjeff * Solve (factor)**(power) =~ .1 given power (5*loadav): 410104964Sjeff * solving for factor, 411104964Sjeff * ln(factor) =~ (-2.30/5*loadav), or 412104964Sjeff * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 413104964Sjeff * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 414104964Sjeff * 415104964Sjeff * Proof of (2): 416104964Sjeff * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 417104964Sjeff * solving for power, 418104964Sjeff * power*ln(b/(b+1)) =~ -2.30, or 419104964Sjeff * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 420104964Sjeff * 421104964Sjeff * Actual power values for the implemented algorithm are as follows: 422104964Sjeff * loadav: 1 2 3 4 423104964Sjeff * power: 5.68 10.32 14.94 19.55 424104964Sjeff */ 425104964Sjeff 426104964Sjeff/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 427104964Sjeff#define loadfactor(loadav) (2 * (loadav)) 428104964Sjeff#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 429104964Sjeff 430164936Sjulian/* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 431104964Sjeffstatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 432158082SjmgSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 433104964Sjeff 434104964Sjeff/* 435104964Sjeff * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 436104964Sjeff * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 437104964Sjeff * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 438104964Sjeff * 439104964Sjeff * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 440104964Sjeff * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 441104964Sjeff * 442104964Sjeff * If you don't want to bother with the faster/more-accurate formula, you 443104964Sjeff * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 444104964Sjeff * (more general) method of calculating the %age of CPU used by a process. 445104964Sjeff */ 446104964Sjeff#define CCPU_SHIFT 11 447104964Sjeff 448104964Sjeff/* 449104964Sjeff * Recompute process priorities, every hz ticks. 450104964Sjeff * MP-safe, called without the Giant mutex. 451104964Sjeff */ 452104964Sjeff/* ARGSUSED */ 453104964Sjeffstatic void 454123871Sjhbschedcpu(void) 455104964Sjeff{ 456104964Sjeff register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 457104964Sjeff struct thread *td; 458104964Sjeff struct proc *p; 459164936Sjulian struct td_sched *ts; 460118972Sjhb int awake, realstathz; 461104964Sjeff 462104964Sjeff realstathz = stathz ? stathz : hz; 463104964Sjeff sx_slock(&allproc_lock); 464104964Sjeff FOREACH_PROC_IN_SYSTEM(p) { 465177368Sjeff PROC_LOCK(p); 466180879Sjhb FOREACH_THREAD_IN_PROC(p, td) { 467104964Sjeff awake = 0; 468170293Sjeff thread_lock(td); 469164936Sjulian ts = td->td_sched; 470163709Sjb /* 471163709Sjb * Increment sleep time (if sleeping). We 472163709Sjb * ignore overflow, as above. 473163709Sjb */ 474163709Sjb /* 475164936Sjulian * The td_sched slptimes are not touched in wakeup 476164936Sjulian * because the thread may not HAVE everything in 477164936Sjulian * memory? XXX I think this is out of date. 478163709Sjb */ 479166188Sjeff if (TD_ON_RUNQ(td)) { 480163709Sjb awake = 1; 481177435Sjeff td->td_flags &= ~TDF_DIDRUN; 482166188Sjeff } else if (TD_IS_RUNNING(td)) { 483163709Sjb awake = 1; 484177435Sjeff /* Do not clear TDF_DIDRUN */ 485177435Sjeff } else if (td->td_flags & TDF_DIDRUN) { 486163709Sjb awake = 1; 487177435Sjeff td->td_flags &= ~TDF_DIDRUN; 488163709Sjb } 489163709Sjb 490163709Sjb /* 491164936Sjulian * ts_pctcpu is only for ps and ttyinfo(). 492163709Sjb */ 493164936Sjulian ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; 494163709Sjb /* 495164936Sjulian * If the td_sched has been idle the entire second, 496163709Sjb * stop recalculating its priority until 497163709Sjb * it wakes up. 498163709Sjb */ 499164936Sjulian if (ts->ts_cpticks != 0) { 500163709Sjb#if (FSHIFT >= CCPU_SHIFT) 501164936Sjulian ts->ts_pctcpu += (realstathz == 100) 502164936Sjulian ? ((fixpt_t) ts->ts_cpticks) << 503164936Sjulian (FSHIFT - CCPU_SHIFT) : 504164936Sjulian 100 * (((fixpt_t) ts->ts_cpticks) 505164936Sjulian << (FSHIFT - CCPU_SHIFT)) / realstathz; 506163709Sjb#else 507164936Sjulian ts->ts_pctcpu += ((FSCALE - ccpu) * 508164936Sjulian (ts->ts_cpticks * 509164936Sjulian FSCALE / realstathz)) >> FSHIFT; 510163709Sjb#endif 511164936Sjulian ts->ts_cpticks = 0; 512164267Sdavidxu } 513180879Sjhb /* 514163709Sjb * If there are ANY running threads in this process, 515104964Sjeff * then don't count it as sleeping. 516180879Sjhb * XXX: this is broken. 517104964Sjeff */ 518104964Sjeff if (awake) { 519172264Sjeff if (ts->ts_slptime > 1) { 520104964Sjeff /* 521104964Sjeff * In an ideal world, this should not 522104964Sjeff * happen, because whoever woke us 523104964Sjeff * up from the long sleep should have 524104964Sjeff * unwound the slptime and reset our 525104964Sjeff * priority before we run at the stale 526104964Sjeff * priority. Should KASSERT at some 527104964Sjeff * point when all the cases are fixed. 528104964Sjeff */ 529163709Sjb updatepri(td); 530163709Sjb } 531172264Sjeff ts->ts_slptime = 0; 532163709Sjb } else 533172264Sjeff ts->ts_slptime++; 534172264Sjeff if (ts->ts_slptime > 1) { 535170293Sjeff thread_unlock(td); 536163709Sjb continue; 537170293Sjeff } 538163709Sjb td->td_estcpu = decay_cpu(loadfac, td->td_estcpu); 539163709Sjb resetpriority(td); 540163709Sjb resetpriority_thread(td); 541170293Sjeff thread_unlock(td); 542180879Sjhb } 543177368Sjeff PROC_UNLOCK(p); 544180879Sjhb } 545104964Sjeff sx_sunlock(&allproc_lock); 546104964Sjeff} 547104964Sjeff 548104964Sjeff/* 549123871Sjhb * Main loop for a kthread that executes schedcpu once a second. 550123871Sjhb */ 551123871Sjhbstatic void 552124955Sjeffschedcpu_thread(void) 553123871Sjhb{ 554123871Sjhb 555123871Sjhb for (;;) { 556123871Sjhb schedcpu(); 557167086Sjhb pause("-", hz); 558123871Sjhb } 559123871Sjhb} 560123871Sjhb 561123871Sjhb/* 562104964Sjeff * Recalculate the priority of a process after it has slept for a while. 563163709Sjb * For all load averages >= 1 and max td_estcpu of 255, sleeping for at 564163709Sjb * least six times the loadfactor will decay td_estcpu to zero. 565104964Sjeff */ 566104964Sjeffstatic void 567163709Sjbupdatepri(struct thread *td) 568104964Sjeff{ 569172264Sjeff struct td_sched *ts; 570172264Sjeff fixpt_t loadfac; 571172264Sjeff unsigned int newcpu; 572104964Sjeff 573172264Sjeff ts = td->td_sched; 574118972Sjhb loadfac = loadfactor(averunnable.ldavg[0]); 575172264Sjeff if (ts->ts_slptime > 5 * loadfac) 576163709Sjb td->td_estcpu = 0; 577104964Sjeff else { 578163709Sjb newcpu = td->td_estcpu; 579172264Sjeff ts->ts_slptime--; /* was incremented in schedcpu() */ 580172264Sjeff while (newcpu && --ts->ts_slptime) 581104964Sjeff newcpu = decay_cpu(loadfac, newcpu); 582163709Sjb td->td_estcpu = newcpu; 583104964Sjeff } 584104964Sjeff} 585104964Sjeff 586104964Sjeff/* 587104964Sjeff * Compute the priority of a process when running in user mode. 588104964Sjeff * Arrange to reschedule if the resulting priority is better 589104964Sjeff * than that of the current process. 590104964Sjeff */ 591104964Sjeffstatic void 592163709Sjbresetpriority(struct thread *td) 593104964Sjeff{ 594104964Sjeff register unsigned int newpriority; 595104964Sjeff 596163709Sjb if (td->td_pri_class == PRI_TIMESHARE) { 597163709Sjb newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT + 598163709Sjb NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); 599104964Sjeff newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 600104964Sjeff PRI_MAX_TIMESHARE); 601163709Sjb sched_user_prio(td, newpriority); 602104964Sjeff } 603104964Sjeff} 604104964Sjeff 605139453Sjhb/* 606164936Sjulian * Update the thread's priority when the associated process's user 607139453Sjhb * priority changes. 608139453Sjhb */ 609139453Sjhbstatic void 610163709Sjbresetpriority_thread(struct thread *td) 611139453Sjhb{ 612139453Sjhb 613139453Sjhb /* Only change threads with a time sharing user priority. */ 614139453Sjhb if (td->td_priority < PRI_MIN_TIMESHARE || 615139453Sjhb td->td_priority > PRI_MAX_TIMESHARE) 616139453Sjhb return; 617139453Sjhb 618139453Sjhb /* XXX the whole needresched thing is broken, but not silly. */ 619139453Sjhb maybe_resched(td); 620139453Sjhb 621163709Sjb sched_prio(td, td->td_user_pri); 622139453Sjhb} 623139453Sjhb 624104964Sjeff/* ARGSUSED */ 625104964Sjeffstatic void 626104964Sjeffsched_setup(void *dummy) 627104964Sjeff{ 628124955Sjeff setup_runqs(); 629118972Sjhb 630104964Sjeff if (sched_quantum == 0) 631104964Sjeff sched_quantum = SCHED_QUANTUM; 632104964Sjeff hogticks = 2 * sched_quantum; 633104964Sjeff 634125288Sjeff /* Account for thread0. */ 635139317Sjeff sched_load_add(); 636104964Sjeff} 637104964Sjeff 638104964Sjeff/* External interfaces start here */ 639180879Sjhb 640134791Sjulian/* 641134791Sjulian * Very early in the boot some setup of scheduler-specific 642145109Smaxim * parts of proc0 and of some scheduler resources needs to be done. 643134791Sjulian * Called from: 644134791Sjulian * proc0_init() 645134791Sjulian */ 646134791Sjulianvoid 647134791Sjulianschedinit(void) 648134791Sjulian{ 649134791Sjulian /* 650134791Sjulian * Set up the scheduler specific parts of proc0. 651134791Sjulian */ 652134791Sjulian proc0.p_sched = NULL; /* XXX */ 653164936Sjulian thread0.td_sched = &td_sched0; 654170293Sjeff thread0.td_lock = &sched_lock; 655171488Sjeff mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 656134791Sjulian} 657134791Sjulian 658104964Sjeffint 659104964Sjeffsched_runnable(void) 660104964Sjeff{ 661124955Sjeff#ifdef SMP 662124955Sjeff return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 663124955Sjeff#else 664124955Sjeff return runq_check(&runq); 665124955Sjeff#endif 666104964Sjeff} 667104964Sjeff 668180879Sjhbint 669104964Sjeffsched_rr_interval(void) 670104964Sjeff{ 671104964Sjeff if (sched_quantum == 0) 672104964Sjeff sched_quantum = SCHED_QUANTUM; 673104964Sjeff return (sched_quantum); 674104964Sjeff} 675104964Sjeff 676104964Sjeff/* 677104964Sjeff * We adjust the priority of the current process. The priority of 678104964Sjeff * a process gets worse as it accumulates CPU time. The cpu usage 679163709Sjb * estimator (td_estcpu) is increased here. resetpriority() will 680163709Sjb * compute a different priority each time td_estcpu increases by 681104964Sjeff * INVERSE_ESTCPU_WEIGHT 682104964Sjeff * (until MAXPRI is reached). The cpu usage estimator ramps up 683104964Sjeff * quite quickly when the process is running (linearly), and decays 684104964Sjeff * away exponentially, at a rate which is proportionally slower when 685104964Sjeff * the system is busy. The basic principle is that the system will 686104964Sjeff * 90% forget that the process used a lot of CPU time in 5 * loadav 687104964Sjeff * seconds. This causes the system to favor processes which haven't 688104964Sjeff * run much recently, and to round-robin among other processes. 689104964Sjeff */ 690104964Sjeffvoid 691121127Sjeffsched_clock(struct thread *td) 692104964Sjeff{ 693212455Smav struct pcpuidlestat *stat; 694164936Sjulian struct td_sched *ts; 695104964Sjeff 696170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 697164936Sjulian ts = td->td_sched; 698113356Sjeff 699164936Sjulian ts->ts_cpticks++; 700163709Sjb td->td_estcpu = ESTCPULIM(td->td_estcpu + 1); 701163709Sjb if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 702163709Sjb resetpriority(td); 703163709Sjb resetpriority_thread(td); 704104964Sjeff } 705173081Sjhb 706173081Sjhb /* 707173081Sjhb * Force a context switch if the current thread has used up a full 708173081Sjhb * quantum (default quantum is 100ms). 709173081Sjhb */ 710173081Sjhb if (!TD_IS_IDLETHREAD(td) && 711173081Sjhb ticks - PCPU_GET(switchticks) >= sched_quantum) 712173081Sjhb td->td_flags |= TDF_NEEDRESCHED; 713212455Smav 714212455Smav stat = DPCPU_PTR(idlestat); 715212455Smav stat->oldidlecalls = stat->idlecalls; 716212455Smav stat->idlecalls = 0; 717104964Sjeff} 718118972Sjhb 719104964Sjeff/* 720180879Sjhb * Charge child's scheduling CPU usage to parent. 721104964Sjeff */ 722104964Sjeffvoid 723132372Sjuliansched_exit(struct proc *p, struct thread *td) 724104964Sjeff{ 725163709Sjb 726187357Sjeff KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit", 727187357Sjeff "prio:td", td->td_priority); 728187357Sjeff 729177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 730164936Sjulian sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 731113356Sjeff} 732113356Sjeff 733113356Sjeffvoid 734164936Sjuliansched_exit_thread(struct thread *td, struct thread *child) 735113356Sjeff{ 736113923Sjhb 737187357Sjeff KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit", 738187357Sjeff "prio:td", child->td_priority); 739170293Sjeff thread_lock(td); 740164936Sjulian td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); 741170293Sjeff thread_unlock(td); 742198854Sattilio thread_lock(child); 743198854Sattilio if ((child->td_flags & TDF_NOLOAD) == 0) 744139317Sjeff sched_load_rem(); 745198854Sattilio thread_unlock(child); 746113356Sjeff} 747109145Sjeff 748113356Sjeffvoid 749134791Sjuliansched_fork(struct thread *td, struct thread *childtd) 750113356Sjeff{ 751134791Sjulian sched_fork_thread(td, childtd); 752113356Sjeff} 753113356Sjeff 754113356Sjeffvoid 755134791Sjuliansched_fork_thread(struct thread *td, struct thread *childtd) 756113356Sjeff{ 757177426Sjeff struct td_sched *ts; 758177426Sjeff 759164936Sjulian childtd->td_estcpu = td->td_estcpu; 760170293Sjeff childtd->td_lock = &sched_lock; 761176750Smarcel childtd->td_cpuset = cpuset_ref(td->td_cpuset); 762217078Sjhb childtd->td_priority = childtd->td_base_pri; 763177426Sjeff ts = childtd->td_sched; 764177426Sjeff bzero(ts, sizeof(*ts)); 765180923Sjhb ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY); 766104964Sjeff} 767104964Sjeff 768104964Sjeffvoid 769130551Sjuliansched_nice(struct proc *p, int nice) 770104964Sjeff{ 771139453Sjhb struct thread *td; 772113873Sjhb 773130551Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 774130551Sjulian p->p_nice = nice; 775163709Sjb FOREACH_THREAD_IN_PROC(p, td) { 776170293Sjeff thread_lock(td); 777163709Sjb resetpriority(td); 778163709Sjb resetpriority_thread(td); 779170293Sjeff thread_unlock(td); 780163709Sjb } 781104964Sjeff} 782104964Sjeff 783113356Sjeffvoid 784163709Sjbsched_class(struct thread *td, int class) 785113356Sjeff{ 786170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 787163709Sjb td->td_pri_class = class; 788113356Sjeff} 789113356Sjeff 790105127Sjulian/* 791105127Sjulian * Adjust the priority of a thread. 792105127Sjulian */ 793139453Sjhbstatic void 794139453Sjhbsched_priority(struct thread *td, u_char prio) 795104964Sjeff{ 796104964Sjeff 797187357Sjeff 798187357Sjeff KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change", 799187357Sjeff "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, 800187357Sjeff sched_tdname(curthread)); 801187357Sjeff if (td != curthread && prio > td->td_priority) { 802187357Sjeff KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread), 803187357Sjeff "lend prio", "prio:%d", td->td_priority, "new prio:%d", 804187357Sjeff prio, KTR_ATTR_LINKED, sched_tdname(td)); 805187357Sjeff } 806170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 807139453Sjhb if (td->td_priority == prio) 808139453Sjhb return; 809166188Sjeff td->td_priority = prio; 810177435Sjeff if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) { 811166188Sjeff sched_rem(td); 812166188Sjeff sched_add(td, SRQ_BORING); 813104964Sjeff } 814104964Sjeff} 815104964Sjeff 816139453Sjhb/* 817139453Sjhb * Update a thread's priority when it is lent another thread's 818139453Sjhb * priority. 819139453Sjhb */ 820104964Sjeffvoid 821139453Sjhbsched_lend_prio(struct thread *td, u_char prio) 822139453Sjhb{ 823139453Sjhb 824139453Sjhb td->td_flags |= TDF_BORROWING; 825139453Sjhb sched_priority(td, prio); 826139453Sjhb} 827139453Sjhb 828139453Sjhb/* 829139453Sjhb * Restore a thread's priority when priority propagation is 830139453Sjhb * over. The prio argument is the minimum priority the thread 831139453Sjhb * needs to have to satisfy other possible priority lending 832139453Sjhb * requests. If the thread's regulary priority is less 833139453Sjhb * important than prio the thread will keep a priority boost 834139453Sjhb * of prio. 835139453Sjhb */ 836139453Sjhbvoid 837139453Sjhbsched_unlend_prio(struct thread *td, u_char prio) 838139453Sjhb{ 839139453Sjhb u_char base_pri; 840139453Sjhb 841139453Sjhb if (td->td_base_pri >= PRI_MIN_TIMESHARE && 842139453Sjhb td->td_base_pri <= PRI_MAX_TIMESHARE) 843163709Sjb base_pri = td->td_user_pri; 844139453Sjhb else 845139453Sjhb base_pri = td->td_base_pri; 846139453Sjhb if (prio >= base_pri) { 847139453Sjhb td->td_flags &= ~TDF_BORROWING; 848139453Sjhb sched_prio(td, base_pri); 849139453Sjhb } else 850139453Sjhb sched_lend_prio(td, prio); 851139453Sjhb} 852139453Sjhb 853139453Sjhbvoid 854139453Sjhbsched_prio(struct thread *td, u_char prio) 855139453Sjhb{ 856139453Sjhb u_char oldprio; 857139453Sjhb 858139453Sjhb /* First, update the base priority. */ 859139453Sjhb td->td_base_pri = prio; 860139453Sjhb 861139453Sjhb /* 862139453Sjhb * If the thread is borrowing another thread's priority, don't ever 863139453Sjhb * lower the priority. 864139453Sjhb */ 865139453Sjhb if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 866139453Sjhb return; 867139453Sjhb 868139453Sjhb /* Change the real priority. */ 869139453Sjhb oldprio = td->td_priority; 870139453Sjhb sched_priority(td, prio); 871139453Sjhb 872139453Sjhb /* 873139453Sjhb * If the thread is on a turnstile, then let the turnstile update 874139453Sjhb * its state. 875139453Sjhb */ 876139453Sjhb if (TD_ON_LOCK(td) && oldprio != prio) 877139453Sjhb turnstile_adjust(td, oldprio); 878139453Sjhb} 879139453Sjhb 880139453Sjhbvoid 881163709Sjbsched_user_prio(struct thread *td, u_char prio) 882161599Sdavidxu{ 883161599Sdavidxu 884174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 885163709Sjb td->td_base_user_pri = prio; 886216313Sdavidxu if (td->td_lend_user_pri <= prio) 887164177Sdavidxu return; 888163709Sjb td->td_user_pri = prio; 889161599Sdavidxu} 890161599Sdavidxu 891161599Sdavidxuvoid 892161599Sdavidxusched_lend_user_prio(struct thread *td, u_char prio) 893161599Sdavidxu{ 894161599Sdavidxu 895174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 896216313Sdavidxu td->td_lend_user_pri = prio; 897216791Sdavidxu td->td_user_pri = min(prio, td->td_base_user_pri); 898216791Sdavidxu if (td->td_priority > td->td_user_pri) 899216791Sdavidxu sched_prio(td, td->td_user_pri); 900216791Sdavidxu else if (td->td_priority != td->td_user_pri) 901216791Sdavidxu td->td_flags |= TDF_NEEDRESCHED; 902161599Sdavidxu} 903161599Sdavidxu 904161599Sdavidxuvoid 905177085Sjeffsched_sleep(struct thread *td, int pri) 906104964Sjeff{ 907113923Sjhb 908170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 909172264Sjeff td->td_slptick = ticks; 910172264Sjeff td->td_sched->ts_slptime = 0; 911177085Sjeff if (pri) 912177085Sjeff sched_prio(td, pri); 913201347Skib if (TD_IS_SUSPENDED(td) || pri >= PSOCK) 914177085Sjeff td->td_flags |= TDF_CANSWAP; 915104964Sjeff} 916104964Sjeff 917104964Sjeffvoid 918135051Sjuliansched_switch(struct thread *td, struct thread *newtd, int flags) 919104964Sjeff{ 920202889Sattilio struct mtx *tmtx; 921164936Sjulian struct td_sched *ts; 922104964Sjeff struct proc *p; 923104964Sjeff 924202889Sattilio tmtx = NULL; 925164936Sjulian ts = td->td_sched; 926104964Sjeff p = td->td_proc; 927104964Sjeff 928170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 929180879Sjhb 930180879Sjhb /* 931170293Sjeff * Switch to the sched lock to fix things up and pick 932170293Sjeff * a new thread. 933202889Sattilio * Block the td_lock in order to avoid breaking the critical path. 934170293Sjeff */ 935170293Sjeff if (td->td_lock != &sched_lock) { 936170293Sjeff mtx_lock_spin(&sched_lock); 937202889Sattilio tmtx = thread_lock_block(td); 938170293Sjeff } 939104964Sjeff 940198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 941139317Sjeff sched_load_rem(); 942135051Sjulian 943202940Sattilio if (newtd) { 944202940Sattilio MPASS(newtd->td_lock == &sched_lock); 945138527Sups newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED); 946202940Sattilio } 947138527Sups 948113339Sjulian td->td_lastcpu = td->td_oncpu; 949132266Sjhb td->td_flags &= ~TDF_NEEDRESCHED; 950144777Sups td->td_owepreempt = 0; 951113339Sjulian td->td_oncpu = NOCPU; 952180879Sjhb 953104964Sjeff /* 954104964Sjeff * At the last moment, if this thread is still marked RUNNING, 955104964Sjeff * then put it back on the run queue as it has not been suspended 956131473Sjhb * or stopped or any thing else similar. We never put the idle 957131473Sjhb * threads on the run queue, however. 958104964Sjeff */ 959166415Sjulian if (td->td_flags & TDF_IDLETD) { 960131473Sjhb TD_SET_CAN_RUN(td); 961166415Sjulian#ifdef SMP 962166415Sjulian idle_cpus_mask &= ~PCPU_GET(cpumask); 963166415Sjulian#endif 964166415Sjulian } else { 965134791Sjulian if (TD_IS_RUNNING(td)) { 966164936Sjulian /* Put us back on the run queue. */ 967166188Sjeff sched_add(td, (flags & SW_PREEMPT) ? 968136170Sjulian SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 969136170Sjulian SRQ_OURSELF|SRQ_YIELDING); 970134791Sjulian } 971104964Sjeff } 972136170Sjulian if (newtd) { 973180879Sjhb /* 974136170Sjulian * The thread we are about to run needs to be counted 975136170Sjulian * as if it had been added to the run queue and selected. 976136170Sjulian * It came from: 977136170Sjulian * * A preemption 978180879Sjhb * * An upcall 979136170Sjulian * * A followon 980136170Sjulian */ 981136170Sjulian KASSERT((newtd->td_inhibitors == 0), 982165693Srwatson ("trying to run inhibited thread")); 983177435Sjeff newtd->td_flags |= TDF_DIDRUN; 984136170Sjulian TD_SET_RUNNING(newtd); 985198854Sattilio if ((newtd->td_flags & TDF_NOLOAD) == 0) 986139317Sjeff sched_load_add(); 987136170Sjulian } else { 988131473Sjhb newtd = choosethread(); 989202940Sattilio MPASS(newtd->td_lock == &sched_lock); 990136170Sjulian } 991136170Sjulian 992145256Sjkoshy if (td != newtd) { 993145256Sjkoshy#ifdef HWPMC_HOOKS 994145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 995145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 996145256Sjkoshy#endif 997166415Sjulian /* I feel sleepy */ 998174629Sjeff lock_profile_release_lock(&sched_lock.lock_object); 999179297Sjb#ifdef KDTRACE_HOOKS 1000179297Sjb /* 1001179297Sjb * If DTrace has set the active vtime enum to anything 1002179297Sjb * other than INACTIVE (0), then it should have set the 1003179297Sjb * function to call. 1004179297Sjb */ 1005179297Sjb if (dtrace_vtime_active) 1006179297Sjb (*dtrace_vtime_switch_func)(newtd); 1007179297Sjb#endif 1008179297Sjb 1009202889Sattilio cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock); 1010174629Sjeff lock_profile_obtain_lock_success(&sched_lock.lock_object, 1011174629Sjeff 0, 0, __FILE__, __LINE__); 1012166415Sjulian /* 1013166415Sjulian * Where am I? What year is it? 1014166415Sjulian * We are in the same thread that went to sleep above, 1015180879Sjhb * but any amount of time may have passed. All our context 1016166415Sjulian * will still be available as will local variables. 1017166415Sjulian * PCPU values however may have changed as we may have 1018166415Sjulian * changed CPU so don't trust cached values of them. 1019166415Sjulian * New threads will go to fork_exit() instead of here 1020166415Sjulian * so if you change things here you may need to change 1021166415Sjulian * things there too. 1022180879Sjhb * 1023166415Sjulian * If the thread above was exiting it will never wake 1024166415Sjulian * up again here, so either it has saved everything it 1025166415Sjulian * needed to, or the thread_wait() or wait() will 1026166415Sjulian * need to reap it. 1027166415Sjulian */ 1028145256Sjkoshy#ifdef HWPMC_HOOKS 1029145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1030145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1031145256Sjkoshy#endif 1032145256Sjkoshy } 1033145256Sjkoshy 1034166415Sjulian#ifdef SMP 1035166415Sjulian if (td->td_flags & TDF_IDLETD) 1036166415Sjulian idle_cpus_mask |= PCPU_GET(cpumask); 1037166415Sjulian#endif 1038121128Sjeff sched_lock.mtx_lock = (uintptr_t)td; 1039121128Sjeff td->td_oncpu = PCPU_GET(cpuid); 1040170293Sjeff MPASS(td->td_lock == &sched_lock); 1041104964Sjeff} 1042104964Sjeff 1043104964Sjeffvoid 1044104964Sjeffsched_wakeup(struct thread *td) 1045104964Sjeff{ 1046172264Sjeff struct td_sched *ts; 1047172264Sjeff 1048170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1049172264Sjeff ts = td->td_sched; 1050177085Sjeff td->td_flags &= ~TDF_CANSWAP; 1051172264Sjeff if (ts->ts_slptime > 1) { 1052163709Sjb updatepri(td); 1053163709Sjb resetpriority(td); 1054163709Sjb } 1055201790Sattilio td->td_slptick = 0; 1056172264Sjeff ts->ts_slptime = 0; 1057166188Sjeff sched_add(td, SRQ_BORING); 1058104964Sjeff} 1059104964Sjeff 1060134693Sjulian#ifdef SMP 1061134688Sjulianstatic int 1062180879Sjhbforward_wakeup(int cpunum) 1063134688Sjulian{ 1064134688Sjulian struct pcpu *pc; 1065180879Sjhb cpumask_t dontuse, id, map, map2, map3, me; 1066134688Sjulian 1067134688Sjulian mtx_assert(&sched_lock, MA_OWNED); 1068134688Sjulian 1069134791Sjulian CTR0(KTR_RUNQ, "forward_wakeup()"); 1070134688Sjulian 1071134688Sjulian if ((!forward_wakeup_enabled) || 1072134688Sjulian (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) 1073134688Sjulian return (0); 1074134688Sjulian if (!smp_started || cold || panicstr) 1075134688Sjulian return (0); 1076134688Sjulian 1077134688Sjulian forward_wakeups_requested++; 1078134688Sjulian 1079180879Sjhb /* 1080180879Sjhb * Check the idle mask we received against what we calculated 1081180879Sjhb * before in the old version. 1082180879Sjhb */ 1083134688Sjulian me = PCPU_GET(cpumask); 1084180879Sjhb 1085180879Sjhb /* Don't bother if we should be doing it ourself. */ 1086134688Sjulian if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum))) 1087134688Sjulian return (0); 1088134688Sjulian 1089134688Sjulian dontuse = me | stopped_cpus | hlt_cpus_mask; 1090134688Sjulian map3 = 0; 1091134688Sjulian if (forward_wakeup_use_loop) { 1092134688Sjulian SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 1093134688Sjulian id = pc->pc_cpumask; 1094180879Sjhb if ((id & dontuse) == 0 && 1095134688Sjulian pc->pc_curthread == pc->pc_idlethread) { 1096134688Sjulian map3 |= id; 1097134688Sjulian } 1098134688Sjulian } 1099134688Sjulian } 1100134688Sjulian 1101134688Sjulian if (forward_wakeup_use_mask) { 1102134688Sjulian map = 0; 1103134688Sjulian map = idle_cpus_mask & ~dontuse; 1104134688Sjulian 1105180879Sjhb /* If they are both on, compare and use loop if different. */ 1106134688Sjulian if (forward_wakeup_use_loop) { 1107134688Sjulian if (map != map3) { 1108180879Sjhb printf("map (%02X) != map3 (%02X)\n", map, 1109180879Sjhb map3); 1110134688Sjulian map = map3; 1111134688Sjulian } 1112134688Sjulian } 1113134688Sjulian } else { 1114134688Sjulian map = map3; 1115134688Sjulian } 1116180879Sjhb 1117180879Sjhb /* If we only allow a specific CPU, then mask off all the others. */ 1118134688Sjulian if (cpunum != NOCPU) { 1119134688Sjulian KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); 1120134688Sjulian map &= (1 << cpunum); 1121134688Sjulian } else { 1122134688Sjulian /* Try choose an idle die. */ 1123134688Sjulian if (forward_wakeup_use_htt) { 1124134688Sjulian map2 = (map & (map >> 1)) & 0x5555; 1125134688Sjulian if (map2) { 1126134688Sjulian map = map2; 1127134688Sjulian } 1128134688Sjulian } 1129134688Sjulian 1130180879Sjhb /* Set only one bit. */ 1131134688Sjulian if (forward_wakeup_use_single) { 1132134688Sjulian map = map & ((~map) + 1); 1133134688Sjulian } 1134134688Sjulian } 1135134688Sjulian if (map) { 1136134688Sjulian forward_wakeups_delivered++; 1137212455Smav SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 1138212455Smav id = pc->pc_cpumask; 1139212455Smav if ((map & id) == 0) 1140212455Smav continue; 1141212455Smav if (cpu_idle_wakeup(pc->pc_cpuid)) 1142212455Smav map &= ~id; 1143212455Smav } 1144212455Smav if (map) 1145212455Smav ipi_selected(map, IPI_AST); 1146134688Sjulian return (1); 1147134688Sjulian } 1148134688Sjulian if (cpunum == NOCPU) 1149134688Sjulian printf("forward_wakeup: Idle processor not found\n"); 1150134688Sjulian return (0); 1151134688Sjulian} 1152134688Sjulian 1153147182Supsstatic void 1154180879Sjhbkick_other_cpu(int pri, int cpuid) 1155180879Sjhb{ 1156180879Sjhb struct pcpu *pcpu; 1157180879Sjhb int cpri; 1158147182Sups 1159180879Sjhb pcpu = pcpu_find(cpuid); 1160147182Sups if (idle_cpus_mask & pcpu->pc_cpumask) { 1161147182Sups forward_wakeups_delivered++; 1162212455Smav if (!cpu_idle_wakeup(cpuid)) 1163212455Smav ipi_cpu(cpuid, IPI_AST); 1164147182Sups return; 1165147182Sups } 1166147182Sups 1167180879Sjhb cpri = pcpu->pc_curthread->td_priority; 1168147182Sups if (pri >= cpri) 1169147182Sups return; 1170147182Sups 1171147182Sups#if defined(IPI_PREEMPTION) && defined(PREEMPTION) 1172147182Sups#if !defined(FULL_PREEMPTION) 1173147182Sups if (pri <= PRI_MAX_ITHD) 1174147182Sups#endif /* ! FULL_PREEMPTION */ 1175147182Sups { 1176210939Sjhb ipi_cpu(cpuid, IPI_PREEMPT); 1177147182Sups return; 1178147182Sups } 1179147182Sups#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ 1180147182Sups 1181147182Sups pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 1182210939Sjhb ipi_cpu(cpuid, IPI_AST); 1183147182Sups return; 1184147182Sups} 1185147182Sups#endif /* SMP */ 1186147182Sups 1187180923Sjhb#ifdef SMP 1188180923Sjhbstatic int 1189180923Sjhbsched_pickcpu(struct thread *td) 1190180923Sjhb{ 1191180923Sjhb int best, cpu; 1192180923Sjhb 1193180923Sjhb mtx_assert(&sched_lock, MA_OWNED); 1194180923Sjhb 1195180937Sjhb if (THREAD_CAN_SCHED(td, td->td_lastcpu)) 1196180937Sjhb best = td->td_lastcpu; 1197180937Sjhb else 1198180937Sjhb best = NOCPU; 1199209059Sjhb CPU_FOREACH(cpu) { 1200180923Sjhb if (!THREAD_CAN_SCHED(td, cpu)) 1201180923Sjhb continue; 1202180923Sjhb 1203180923Sjhb if (best == NOCPU) 1204180923Sjhb best = cpu; 1205180923Sjhb else if (runq_length[cpu] < runq_length[best]) 1206180923Sjhb best = cpu; 1207180923Sjhb } 1208180923Sjhb KASSERT(best != NOCPU, ("no valid CPUs")); 1209180923Sjhb 1210180923Sjhb return (best); 1211180923Sjhb} 1212180923Sjhb#endif 1213180923Sjhb 1214104964Sjeffvoid 1215134586Sjuliansched_add(struct thread *td, int flags) 1216147182Sups#ifdef SMP 1217104964Sjeff{ 1218164936Sjulian struct td_sched *ts; 1219134591Sjulian int forwarded = 0; 1220134591Sjulian int cpu; 1221147182Sups int single_cpu = 0; 1222121127Sjeff 1223164936Sjulian ts = td->td_sched; 1224170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1225166188Sjeff KASSERT((td->td_inhibitors == 0), 1226166188Sjeff ("sched_add: trying to run inhibited thread")); 1227166188Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1228166188Sjeff ("sched_add: bad thread state")); 1229172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1230172207Sjeff ("sched_add: thread swapped out")); 1231180879Sjhb 1232187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 1233187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1234187357Sjeff sched_tdname(curthread)); 1235187357Sjeff KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 1236187357Sjeff KTR_ATTR_LINKED, sched_tdname(td)); 1237187357Sjeff 1238187357Sjeff 1239170293Sjeff /* 1240170293Sjeff * Now that the thread is moving to the run-queue, set the lock 1241170293Sjeff * to the scheduler's lock. 1242170293Sjeff */ 1243170293Sjeff if (td->td_lock != &sched_lock) { 1244170293Sjeff mtx_lock_spin(&sched_lock); 1245170293Sjeff thread_lock_set(td, &sched_lock); 1246170293Sjeff } 1247166188Sjeff TD_SET_RUNQ(td); 1248131481Sjhb 1249147182Sups if (td->td_pinned != 0) { 1250147182Sups cpu = td->td_lastcpu; 1251164936Sjulian ts->ts_runq = &runq_pcpu[cpu]; 1252147182Sups single_cpu = 1; 1253147182Sups CTR3(KTR_RUNQ, 1254180879Sjhb "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, 1255180879Sjhb cpu); 1256180879Sjhb } else if (td->td_flags & TDF_BOUND) { 1257180879Sjhb /* Find CPU from bound runq. */ 1258180879Sjhb KASSERT(SKE_RUNQ_PCPU(ts), 1259180879Sjhb ("sched_add: bound td_sched not on cpu runq")); 1260164936Sjulian cpu = ts->ts_runq - &runq_pcpu[0]; 1261147182Sups single_cpu = 1; 1262147182Sups CTR3(KTR_RUNQ, 1263180879Sjhb "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, 1264180879Sjhb cpu); 1265180923Sjhb } else if (ts->ts_flags & TSF_AFFINITY) { 1266180923Sjhb /* Find a valid CPU for our cpuset */ 1267180923Sjhb cpu = sched_pickcpu(td); 1268180923Sjhb ts->ts_runq = &runq_pcpu[cpu]; 1269180923Sjhb single_cpu = 1; 1270180923Sjhb CTR3(KTR_RUNQ, 1271180923Sjhb "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, 1272180923Sjhb cpu); 1273180879Sjhb } else { 1274134591Sjulian CTR2(KTR_RUNQ, 1275180879Sjhb "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, 1276180879Sjhb td); 1277134591Sjulian cpu = NOCPU; 1278164936Sjulian ts->ts_runq = &runq; 1279147182Sups } 1280180879Sjhb 1281147190Sups if (single_cpu && (cpu != PCPU_GET(cpuid))) { 1282180879Sjhb kick_other_cpu(td->td_priority, cpu); 1283124955Sjeff } else { 1284147190Sups if (!single_cpu) { 1285147182Sups cpumask_t me = PCPU_GET(cpumask); 1286180879Sjhb cpumask_t idle = idle_cpus_mask & me; 1287147182Sups 1288147190Sups if (!idle && ((flags & SRQ_INTR) == 0) && 1289147190Sups (idle_cpus_mask & ~(hlt_cpus_mask | me))) 1290147182Sups forwarded = forward_wakeup(cpu); 1291147182Sups } 1292147182Sups 1293147182Sups if (!forwarded) { 1294147190Sups if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td)) 1295147182Sups return; 1296147182Sups else 1297147182Sups maybe_resched(td); 1298147182Sups } 1299124955Sjeff } 1300180879Sjhb 1301198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1302147182Sups sched_load_add(); 1303177435Sjeff runq_add(ts->ts_runq, td, flags); 1304180923Sjhb if (cpu != NOCPU) 1305180923Sjhb runq_length[cpu]++; 1306147182Sups} 1307147182Sups#else /* SMP */ 1308147182Sups{ 1309164936Sjulian struct td_sched *ts; 1310180923Sjhb 1311164936Sjulian ts = td->td_sched; 1312170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1313166188Sjeff KASSERT((td->td_inhibitors == 0), 1314166188Sjeff ("sched_add: trying to run inhibited thread")); 1315166188Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1316166188Sjeff ("sched_add: bad thread state")); 1317172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1318172207Sjeff ("sched_add: thread swapped out")); 1319187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 1320187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1321187357Sjeff sched_tdname(curthread)); 1322187357Sjeff KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 1323187357Sjeff KTR_ATTR_LINKED, sched_tdname(td)); 1324180879Sjhb 1325170293Sjeff /* 1326170293Sjeff * Now that the thread is moving to the run-queue, set the lock 1327170293Sjeff * to the scheduler's lock. 1328170293Sjeff */ 1329170293Sjeff if (td->td_lock != &sched_lock) { 1330170293Sjeff mtx_lock_spin(&sched_lock); 1331170293Sjeff thread_lock_set(td, &sched_lock); 1332170293Sjeff } 1333166188Sjeff TD_SET_RUNQ(td); 1334164936Sjulian CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); 1335164936Sjulian ts->ts_runq = &runq; 1336134591Sjulian 1337180879Sjhb /* 1338180879Sjhb * If we are yielding (on the way out anyhow) or the thread 1339180879Sjhb * being saved is US, then don't try be smart about preemption 1340180879Sjhb * or kicking off another CPU as it won't help and may hinder. 1341180879Sjhb * In the YIEDLING case, we are about to run whoever is being 1342180879Sjhb * put in the queue anyhow, and in the OURSELF case, we are 1343180879Sjhb * puting ourself on the run queue which also only happens 1344180879Sjhb * when we are about to yield. 1345134591Sjulian */ 1346180879Sjhb if ((flags & SRQ_YIELDING) == 0) { 1347147182Sups if (maybe_preempt(td)) 1348147182Sups return; 1349180879Sjhb } 1350198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1351139317Sjeff sched_load_add(); 1352177435Sjeff runq_add(ts->ts_runq, td, flags); 1353132118Sjhb maybe_resched(td); 1354104964Sjeff} 1355147182Sups#endif /* SMP */ 1356147182Sups 1357104964Sjeffvoid 1358121127Sjeffsched_rem(struct thread *td) 1359104964Sjeff{ 1360164936Sjulian struct td_sched *ts; 1361121127Sjeff 1362164936Sjulian ts = td->td_sched; 1363172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1364172207Sjeff ("sched_rem: thread swapped out")); 1365166188Sjeff KASSERT(TD_ON_RUNQ(td), 1366164936Sjulian ("sched_rem: thread not on run queue")); 1367104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 1368187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem", 1369187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1370187357Sjeff sched_tdname(curthread)); 1371104964Sjeff 1372198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1373139317Sjeff sched_load_rem(); 1374180923Sjhb#ifdef SMP 1375180923Sjhb if (ts->ts_runq != &runq) 1376180923Sjhb runq_length[ts->ts_runq - runq_pcpu]--; 1377180923Sjhb#endif 1378177435Sjeff runq_remove(ts->ts_runq, td); 1379166188Sjeff TD_SET_CAN_RUN(td); 1380104964Sjeff} 1381104964Sjeff 1382135295Sjulian/* 1383180879Sjhb * Select threads to run. Note that running threads still consume a 1384180879Sjhb * slot. 1385135295Sjulian */ 1386166188Sjeffstruct thread * 1387104964Sjeffsched_choose(void) 1388104964Sjeff{ 1389177435Sjeff struct thread *td; 1390124955Sjeff struct runq *rq; 1391104964Sjeff 1392170293Sjeff mtx_assert(&sched_lock, MA_OWNED); 1393124955Sjeff#ifdef SMP 1394177435Sjeff struct thread *tdcpu; 1395124955Sjeff 1396124955Sjeff rq = &runq; 1397177435Sjeff td = runq_choose_fuzz(&runq, runq_fuzz); 1398177435Sjeff tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 1399104964Sjeff 1400180879Sjhb if (td == NULL || 1401180879Sjhb (tdcpu != NULL && 1402177435Sjeff tdcpu->td_priority < td->td_priority)) { 1403177435Sjeff CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu, 1404124955Sjeff PCPU_GET(cpuid)); 1405177435Sjeff td = tdcpu; 1406124955Sjeff rq = &runq_pcpu[PCPU_GET(cpuid)]; 1407180879Sjhb } else { 1408177435Sjeff CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td); 1409124955Sjeff } 1410124955Sjeff 1411124955Sjeff#else 1412124955Sjeff rq = &runq; 1413177435Sjeff td = runq_choose(&runq); 1414124955Sjeff#endif 1415124955Sjeff 1416177435Sjeff if (td) { 1417180923Sjhb#ifdef SMP 1418180923Sjhb if (td == tdcpu) 1419180923Sjhb runq_length[PCPU_GET(cpuid)]--; 1420180923Sjhb#endif 1421177435Sjeff runq_remove(rq, td); 1422177435Sjeff td->td_flags |= TDF_DIDRUN; 1423104964Sjeff 1424177435Sjeff KASSERT(td->td_flags & TDF_INMEM, 1425172207Sjeff ("sched_choose: thread swapped out")); 1426177435Sjeff return (td); 1427180879Sjhb } 1428166188Sjeff return (PCPU_GET(idlethread)); 1429104964Sjeff} 1430104964Sjeff 1431104964Sjeffvoid 1432177004Sjeffsched_preempt(struct thread *td) 1433177004Sjeff{ 1434177004Sjeff thread_lock(td); 1435177004Sjeff if (td->td_critnest > 1) 1436177004Sjeff td->td_owepreempt = 1; 1437177004Sjeff else 1438178272Sjeff mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL); 1439177004Sjeff thread_unlock(td); 1440177004Sjeff} 1441177004Sjeff 1442177004Sjeffvoid 1443104964Sjeffsched_userret(struct thread *td) 1444104964Sjeff{ 1445104964Sjeff /* 1446104964Sjeff * XXX we cheat slightly on the locking here to avoid locking in 1447104964Sjeff * the usual case. Setting td_priority here is essentially an 1448104964Sjeff * incomplete workaround for not setting it properly elsewhere. 1449104964Sjeff * Now that some interrupt handlers are threads, not setting it 1450104964Sjeff * properly elsewhere can clobber it in the window between setting 1451104964Sjeff * it here and returning to user mode, so don't waste time setting 1452104964Sjeff * it perfectly here. 1453104964Sjeff */ 1454139453Sjhb KASSERT((td->td_flags & TDF_BORROWING) == 0, 1455139453Sjhb ("thread with borrowed priority returning to userland")); 1456163709Sjb if (td->td_priority != td->td_user_pri) { 1457170293Sjeff thread_lock(td); 1458163709Sjb td->td_priority = td->td_user_pri; 1459163709Sjb td->td_base_pri = td->td_user_pri; 1460170293Sjeff thread_unlock(td); 1461163709Sjb } 1462104964Sjeff} 1463107126Sjeff 1464124955Sjeffvoid 1465124955Sjeffsched_bind(struct thread *td, int cpu) 1466124955Sjeff{ 1467164936Sjulian struct td_sched *ts; 1468124955Sjeff 1469208391Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 1470208391Sjhb KASSERT(td == curthread, ("sched_bind: can only bind curthread")); 1471124955Sjeff 1472164936Sjulian ts = td->td_sched; 1473124955Sjeff 1474177435Sjeff td->td_flags |= TDF_BOUND; 1475124955Sjeff#ifdef SMP 1476164936Sjulian ts->ts_runq = &runq_pcpu[cpu]; 1477124955Sjeff if (PCPU_GET(cpuid) == cpu) 1478124955Sjeff return; 1479124955Sjeff 1480131473Sjhb mi_switch(SW_VOL, NULL); 1481124955Sjeff#endif 1482124955Sjeff} 1483124955Sjeff 1484124955Sjeffvoid 1485124955Sjeffsched_unbind(struct thread* td) 1486124955Sjeff{ 1487170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1488208391Sjhb KASSERT(td == curthread, ("sched_unbind: can only bind curthread")); 1489177435Sjeff td->td_flags &= ~TDF_BOUND; 1490124955Sjeff} 1491124955Sjeff 1492107126Sjeffint 1493145256Sjkoshysched_is_bound(struct thread *td) 1494145256Sjkoshy{ 1495170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1496177435Sjeff return (td->td_flags & TDF_BOUND); 1497145256Sjkoshy} 1498145256Sjkoshy 1499159630Sdavidxuvoid 1500159630Sdavidxusched_relinquish(struct thread *td) 1501159630Sdavidxu{ 1502170293Sjeff thread_lock(td); 1503178272Sjeff mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 1504170293Sjeff thread_unlock(td); 1505159630Sdavidxu} 1506159630Sdavidxu 1507145256Sjkoshyint 1508125288Sjeffsched_load(void) 1509125288Sjeff{ 1510125288Sjeff return (sched_tdcnt); 1511125288Sjeff} 1512125288Sjeff 1513125288Sjeffint 1514107126Sjeffsched_sizeof_proc(void) 1515107126Sjeff{ 1516107126Sjeff return (sizeof(struct proc)); 1517107126Sjeff} 1518159630Sdavidxu 1519107126Sjeffint 1520107126Sjeffsched_sizeof_thread(void) 1521107126Sjeff{ 1522164936Sjulian return (sizeof(struct thread) + sizeof(struct td_sched)); 1523107126Sjeff} 1524107137Sjeff 1525107137Sjefffixpt_t 1526121127Sjeffsched_pctcpu(struct thread *td) 1527107137Sjeff{ 1528164936Sjulian struct td_sched *ts; 1529121147Sjeff 1530208787Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED); 1531164936Sjulian ts = td->td_sched; 1532164936Sjulian return (ts->ts_pctcpu); 1533107137Sjeff} 1534159570Sdavidxu 1535159570Sdavidxuvoid 1536212541Smavsched_tick(int cnt) 1537159570Sdavidxu{ 1538159570Sdavidxu} 1539166188Sjeff 1540166188Sjeff/* 1541166188Sjeff * The actual idle process. 1542166188Sjeff */ 1543166188Sjeffvoid 1544166188Sjeffsched_idletd(void *dummy) 1545166188Sjeff{ 1546212455Smav struct pcpuidlestat *stat; 1547166188Sjeff 1548212455Smav stat = DPCPU_PTR(idlestat); 1549166188Sjeff for (;;) { 1550166188Sjeff mtx_assert(&Giant, MA_NOTOWNED); 1551166188Sjeff 1552212455Smav while (sched_runnable() == 0) { 1553212455Smav cpu_idle(stat->idlecalls + stat->oldidlecalls > 64); 1554212455Smav stat->idlecalls++; 1555212455Smav } 1556166188Sjeff 1557166188Sjeff mtx_lock_spin(&sched_lock); 1558178272Sjeff mi_switch(SW_VOL | SWT_IDLE, NULL); 1559166188Sjeff mtx_unlock_spin(&sched_lock); 1560166188Sjeff } 1561166188Sjeff} 1562166188Sjeff 1563170293Sjeff/* 1564170293Sjeff * A CPU is entering for the first time or a thread is exiting. 1565170293Sjeff */ 1566170293Sjeffvoid 1567170293Sjeffsched_throw(struct thread *td) 1568170293Sjeff{ 1569170293Sjeff /* 1570170293Sjeff * Correct spinlock nesting. The idle thread context that we are 1571170293Sjeff * borrowing was created so that it would start out with a single 1572170293Sjeff * spin lock (sched_lock) held in fork_trampoline(). Since we've 1573170293Sjeff * explicitly acquired locks in this function, the nesting count 1574170293Sjeff * is now 2 rather than 1. Since we are nested, calling 1575170293Sjeff * spinlock_exit() will simply adjust the counts without allowing 1576170293Sjeff * spin lock using code to interrupt us. 1577170293Sjeff */ 1578170293Sjeff if (td == NULL) { 1579170293Sjeff mtx_lock_spin(&sched_lock); 1580170293Sjeff spinlock_exit(); 1581170293Sjeff } else { 1582174629Sjeff lock_profile_release_lock(&sched_lock.lock_object); 1583170293Sjeff MPASS(td->td_lock == &sched_lock); 1584170293Sjeff } 1585170293Sjeff mtx_assert(&sched_lock, MA_OWNED); 1586170293Sjeff KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 1587170293Sjeff PCPU_SET(switchtime, cpu_ticks()); 1588170293Sjeff PCPU_SET(switchticks, ticks); 1589170293Sjeff cpu_throw(td, choosethread()); /* doesn't return */ 1590170293Sjeff} 1591170293Sjeff 1592170293Sjeffvoid 1593170600Sjeffsched_fork_exit(struct thread *td) 1594170293Sjeff{ 1595170293Sjeff 1596170293Sjeff /* 1597170293Sjeff * Finish setting up thread glue so that it begins execution in a 1598170293Sjeff * non-nested critical section with sched_lock held but not recursed. 1599170293Sjeff */ 1600170600Sjeff td->td_oncpu = PCPU_GET(cpuid); 1601170600Sjeff sched_lock.mtx_lock = (uintptr_t)td; 1602174629Sjeff lock_profile_obtain_lock_success(&sched_lock.lock_object, 1603174629Sjeff 0, 0, __FILE__, __LINE__); 1604170600Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 1605170293Sjeff} 1606170293Sjeff 1607187357Sjeffchar * 1608187357Sjeffsched_tdname(struct thread *td) 1609187357Sjeff{ 1610187357Sjeff#ifdef KTR 1611187357Sjeff struct td_sched *ts; 1612187357Sjeff 1613187357Sjeff ts = td->td_sched; 1614187357Sjeff if (ts->ts_name[0] == '\0') 1615187357Sjeff snprintf(ts->ts_name, sizeof(ts->ts_name), 1616187357Sjeff "%s tid %d", td->td_name, td->td_tid); 1617187357Sjeff return (ts->ts_name); 1618187357Sjeff#else 1619187357Sjeff return (td->td_name); 1620187357Sjeff#endif 1621187357Sjeff} 1622187357Sjeff 1623176729Sjeffvoid 1624176729Sjeffsched_affinity(struct thread *td) 1625176729Sjeff{ 1626180923Sjhb#ifdef SMP 1627180923Sjhb struct td_sched *ts; 1628180923Sjhb int cpu; 1629180923Sjhb 1630180923Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED); 1631180923Sjhb 1632180923Sjhb /* 1633180923Sjhb * Set the TSF_AFFINITY flag if there is at least one CPU this 1634180923Sjhb * thread can't run on. 1635180923Sjhb */ 1636180923Sjhb ts = td->td_sched; 1637180923Sjhb ts->ts_flags &= ~TSF_AFFINITY; 1638209059Sjhb CPU_FOREACH(cpu) { 1639180923Sjhb if (!THREAD_CAN_SCHED(td, cpu)) { 1640180923Sjhb ts->ts_flags |= TSF_AFFINITY; 1641180923Sjhb break; 1642180923Sjhb } 1643180923Sjhb } 1644180923Sjhb 1645180923Sjhb /* 1646180923Sjhb * If this thread can run on all CPUs, nothing else to do. 1647180923Sjhb */ 1648180923Sjhb if (!(ts->ts_flags & TSF_AFFINITY)) 1649180923Sjhb return; 1650180923Sjhb 1651180923Sjhb /* Pinned threads and bound threads should be left alone. */ 1652180923Sjhb if (td->td_pinned != 0 || td->td_flags & TDF_BOUND) 1653180923Sjhb return; 1654180923Sjhb 1655180923Sjhb switch (td->td_state) { 1656180923Sjhb case TDS_RUNQ: 1657180923Sjhb /* 1658180923Sjhb * If we are on a per-CPU runqueue that is in the set, 1659180923Sjhb * then nothing needs to be done. 1660180923Sjhb */ 1661180923Sjhb if (ts->ts_runq != &runq && 1662180923Sjhb THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu)) 1663180923Sjhb return; 1664180923Sjhb 1665180923Sjhb /* Put this thread on a valid per-CPU runqueue. */ 1666180923Sjhb sched_rem(td); 1667180923Sjhb sched_add(td, SRQ_BORING); 1668180923Sjhb break; 1669180923Sjhb case TDS_RUNNING: 1670180923Sjhb /* 1671180923Sjhb * See if our current CPU is in the set. If not, force a 1672180923Sjhb * context switch. 1673180923Sjhb */ 1674180923Sjhb if (THREAD_CAN_SCHED(td, td->td_oncpu)) 1675180923Sjhb return; 1676180923Sjhb 1677180923Sjhb td->td_flags |= TDF_NEEDRESCHED; 1678180923Sjhb if (td != curthread) 1679210939Sjhb ipi_cpu(cpu, IPI_AST); 1680180923Sjhb break; 1681180923Sjhb default: 1682180923Sjhb break; 1683180923Sjhb } 1684180923Sjhb#endif 1685176729Sjeff} 1686