sched_4bsd.c revision 232700
1104964Sjeff/*- 2104964Sjeff * Copyright (c) 1982, 1986, 1990, 1991, 1993 3104964Sjeff * The Regents of the University of California. All rights reserved. 4104964Sjeff * (c) UNIX System Laboratories, Inc. 5104964Sjeff * All or some portions of this file are derived from material licensed 6104964Sjeff * to the University of California by American Telephone and Telegraph 7104964Sjeff * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8104964Sjeff * the permission of UNIX System Laboratories, Inc. 9104964Sjeff * 10104964Sjeff * Redistribution and use in source and binary forms, with or without 11104964Sjeff * modification, are permitted provided that the following conditions 12104964Sjeff * are met: 13104964Sjeff * 1. Redistributions of source code must retain the above copyright 14104964Sjeff * notice, this list of conditions and the following disclaimer. 15104964Sjeff * 2. Redistributions in binary form must reproduce the above copyright 16104964Sjeff * notice, this list of conditions and the following disclaimer in the 17104964Sjeff * documentation and/or other materials provided with the distribution. 18104964Sjeff * 4. Neither the name of the University nor the names of its contributors 19104964Sjeff * may be used to endorse or promote products derived from this software 20104964Sjeff * without specific prior written permission. 21104964Sjeff * 22104964Sjeff * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23104964Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24104964Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25104964Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26104964Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27104964Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28104964Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29104964Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30104964Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31104964Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32104964Sjeff * SUCH DAMAGE. 33104964Sjeff */ 34104964Sjeff 35116182Sobrien#include <sys/cdefs.h> 36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 232700 2012-03-08 19:41:05Z jhb $"); 37116182Sobrien 38147565Speter#include "opt_hwpmc_hooks.h" 39177418Sjeff#include "opt_sched.h" 40179297Sjb#include "opt_kdtrace.h" 41147565Speter 42104964Sjeff#include <sys/param.h> 43104964Sjeff#include <sys/systm.h> 44176750Smarcel#include <sys/cpuset.h> 45104964Sjeff#include <sys/kernel.h> 46104964Sjeff#include <sys/ktr.h> 47104964Sjeff#include <sys/lock.h> 48123871Sjhb#include <sys/kthread.h> 49104964Sjeff#include <sys/mutex.h> 50104964Sjeff#include <sys/proc.h> 51104964Sjeff#include <sys/resourcevar.h> 52104964Sjeff#include <sys/sched.h> 53104964Sjeff#include <sys/smp.h> 54104964Sjeff#include <sys/sysctl.h> 55104964Sjeff#include <sys/sx.h> 56139453Sjhb#include <sys/turnstile.h> 57161599Sdavidxu#include <sys/umtx.h> 58160039Sobrien#include <machine/pcb.h> 59134689Sjulian#include <machine/smp.h> 60104964Sjeff 61145256Sjkoshy#ifdef HWPMC_HOOKS 62145256Sjkoshy#include <sys/pmckern.h> 63145256Sjkoshy#endif 64145256Sjkoshy 65179297Sjb#ifdef KDTRACE_HOOKS 66179297Sjb#include <sys/dtrace_bsd.h> 67179297Sjbint dtrace_vtime_active; 68179297Sjbdtrace_vtime_switch_func_t dtrace_vtime_switch_func; 69179297Sjb#endif 70179297Sjb 71107135Sjeff/* 72107135Sjeff * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 73107135Sjeff * the range 100-256 Hz (approximately). 74107135Sjeff */ 75107135Sjeff#define ESTCPULIM(e) \ 76107135Sjeff min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 77107135Sjeff RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 78122355Sbde#ifdef SMP 79122355Sbde#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 80122355Sbde#else 81107135Sjeff#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 82122355Sbde#endif 83107135Sjeff#define NICE_WEIGHT 1 /* Priorities per nice level. */ 84107135Sjeff 85187679Sjeff#define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX))) 86187357Sjeff 87134791Sjulian/* 88163709Sjb * The schedulable entity that runs a context. 89164936Sjulian * This is an extension to the thread structure and is tailored to 90164936Sjulian * the requirements of this scheduler 91163709Sjb */ 92164936Sjulianstruct td_sched { 93164936Sjulian fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ 94164936Sjulian int ts_cpticks; /* (j) Ticks of cpu time. */ 95172264Sjeff int ts_slptime; /* (j) Seconds !RUNNING. */ 96180923Sjhb int ts_flags; 97164936Sjulian struct runq *ts_runq; /* runq the thread is currently on */ 98187357Sjeff#ifdef KTR 99187357Sjeff char ts_name[TS_NAME_LEN]; 100187357Sjeff#endif 101109145Sjeff}; 102109145Sjeff 103134791Sjulian/* flags kept in td_flags */ 104164936Sjulian#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ 105177435Sjeff#define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */ 106134791Sjulian 107180923Sjhb/* flags kept in ts_flags */ 108180923Sjhb#define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */ 109180923Sjhb 110164936Sjulian#define SKE_RUNQ_PCPU(ts) \ 111164936Sjulian ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) 112124955Sjeff 113180923Sjhb#define THREAD_CAN_SCHED(td, cpu) \ 114180923Sjhb CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 115180923Sjhb 116164936Sjulianstatic struct td_sched td_sched0; 117171488Sjeffstruct mtx sched_lock; 118134791Sjulian 119125288Sjeffstatic int sched_tdcnt; /* Total runnable threads in the system. */ 120104964Sjeffstatic int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 121112535Smux#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 122104964Sjeff 123124955Sjeffstatic void setup_runqs(void); 124123871Sjhbstatic void schedcpu(void); 125124955Sjeffstatic void schedcpu_thread(void); 126139453Sjhbstatic void sched_priority(struct thread *td, u_char prio); 127104964Sjeffstatic void sched_setup(void *dummy); 128104964Sjeffstatic void maybe_resched(struct thread *td); 129163709Sjbstatic void updatepri(struct thread *td); 130163709Sjbstatic void resetpriority(struct thread *td); 131163709Sjbstatic void resetpriority_thread(struct thread *td); 132134694Sjulian#ifdef SMP 133180923Sjhbstatic int sched_pickcpu(struct thread *td); 134180879Sjhbstatic int forward_wakeup(int cpunum); 135180879Sjhbstatic void kick_other_cpu(int pri, int cpuid); 136134694Sjulian#endif 137104964Sjeff 138124955Sjeffstatic struct kproc_desc sched_kp = { 139124955Sjeff "schedcpu", 140124955Sjeff schedcpu_thread, 141124955Sjeff NULL 142124955Sjeff}; 143177253SrwatsonSYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, 144177253Srwatson &sched_kp); 145177253SrwatsonSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 146104964Sjeff 147104964Sjeff/* 148104964Sjeff * Global run queue. 149104964Sjeff */ 150104964Sjeffstatic struct runq runq; 151104964Sjeff 152124955Sjeff#ifdef SMP 153124955Sjeff/* 154124955Sjeff * Per-CPU run queues 155124955Sjeff */ 156124955Sjeffstatic struct runq runq_pcpu[MAXCPU]; 157180923Sjhblong runq_length[MAXCPU]; 158222001Sattilio 159222813Sattiliostatic cpuset_t idle_cpus_mask; 160124955Sjeff#endif 161124955Sjeff 162212455Smavstruct pcpuidlestat { 163212455Smav u_int idlecalls; 164212455Smav u_int oldidlecalls; 165212455Smav}; 166215701Sdimstatic DPCPU_DEFINE(struct pcpuidlestat, idlestat); 167212455Smav 168124955Sjeffstatic void 169124955Sjeffsetup_runqs(void) 170124955Sjeff{ 171124955Sjeff#ifdef SMP 172124955Sjeff int i; 173124955Sjeff 174124955Sjeff for (i = 0; i < MAXCPU; ++i) 175124955Sjeff runq_init(&runq_pcpu[i]); 176124955Sjeff#endif 177124955Sjeff 178124955Sjeff runq_init(&runq); 179124955Sjeff} 180124955Sjeff 181104964Sjeffstatic int 182104964Sjeffsysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 183104964Sjeff{ 184104964Sjeff int error, new_val; 185104964Sjeff 186104964Sjeff new_val = sched_quantum * tick; 187104964Sjeff error = sysctl_handle_int(oidp, &new_val, 0, req); 188104964Sjeff if (error != 0 || req->newptr == NULL) 189104964Sjeff return (error); 190104964Sjeff if (new_val < tick) 191104964Sjeff return (EINVAL); 192104964Sjeff sched_quantum = new_val / tick; 193104964Sjeff hogticks = 2 * sched_quantum; 194104964Sjeff return (0); 195104964Sjeff} 196104964Sjeff 197132589SscottlSYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); 198130881Sscottl 199132589SscottlSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, 200132589Sscottl "Scheduler name"); 201130881Sscottl 202132589SscottlSYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, 203132589Sscottl 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 204132589Sscottl "Roundrobin scheduling quantum in microseconds"); 205104964Sjeff 206134693Sjulian#ifdef SMP 207134688Sjulian/* Enable forwarding of wakeups to all other cpus */ 208227309Sedstatic SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, 209227309Sed "Kernel SMP"); 210134688Sjulian 211177419Sjeffstatic int runq_fuzz = 1; 212177419SjeffSYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 213177419Sjeff 214134792Sjulianstatic int forward_wakeup_enabled = 1; 215134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, 216134688Sjulian &forward_wakeup_enabled, 0, 217134688Sjulian "Forwarding of wakeup to idle CPUs"); 218134688Sjulian 219134688Sjulianstatic int forward_wakeups_requested = 0; 220134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, 221134688Sjulian &forward_wakeups_requested, 0, 222134688Sjulian "Requests for Forwarding of wakeup to idle CPUs"); 223134688Sjulian 224134688Sjulianstatic int forward_wakeups_delivered = 0; 225134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, 226134688Sjulian &forward_wakeups_delivered, 0, 227134688Sjulian "Completed Forwarding of wakeup to idle CPUs"); 228134688Sjulian 229134792Sjulianstatic int forward_wakeup_use_mask = 1; 230134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, 231134688Sjulian &forward_wakeup_use_mask, 0, 232134688Sjulian "Use the mask of idle cpus"); 233134688Sjulian 234134688Sjulianstatic int forward_wakeup_use_loop = 0; 235134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, 236134688Sjulian &forward_wakeup_use_loop, 0, 237134688Sjulian "Use a loop to find idle cpus"); 238134688Sjulian 239134693Sjulian#endif 240164936Sjulian#if 0 241135051Sjulianstatic int sched_followon = 0; 242135051SjulianSYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, 243135051Sjulian &sched_followon, 0, 244135051Sjulian "allow threads to share a quantum"); 245163709Sjb#endif 246135051Sjulian 247139317Sjeffstatic __inline void 248139317Sjeffsched_load_add(void) 249139317Sjeff{ 250187357Sjeff 251139317Sjeff sched_tdcnt++; 252187357Sjeff KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); 253139317Sjeff} 254139317Sjeff 255139317Sjeffstatic __inline void 256139317Sjeffsched_load_rem(void) 257139317Sjeff{ 258187357Sjeff 259139317Sjeff sched_tdcnt--; 260187357Sjeff KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); 261139317Sjeff} 262104964Sjeff/* 263104964Sjeff * Arrange to reschedule if necessary, taking the priorities and 264104964Sjeff * schedulers into account. 265104964Sjeff */ 266104964Sjeffstatic void 267104964Sjeffmaybe_resched(struct thread *td) 268104964Sjeff{ 269104964Sjeff 270170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 271134791Sjulian if (td->td_priority < curthread->td_priority) 272111032Sjulian curthread->td_flags |= TDF_NEEDRESCHED; 273104964Sjeff} 274104964Sjeff 275104964Sjeff/* 276177419Sjeff * This function is called when a thread is about to be put on run queue 277177419Sjeff * because it has been made runnable or its priority has been adjusted. It 278177419Sjeff * determines if the new thread should be immediately preempted to. If so, 279177419Sjeff * it switches to it and eventually returns true. If not, it returns false 280177419Sjeff * so that the caller may place the thread on an appropriate run queue. 281177419Sjeff */ 282177419Sjeffint 283177419Sjeffmaybe_preempt(struct thread *td) 284177419Sjeff{ 285177419Sjeff#ifdef PREEMPTION 286177419Sjeff struct thread *ctd; 287177419Sjeff int cpri, pri; 288177419Sjeff 289177419Sjeff /* 290177419Sjeff * The new thread should not preempt the current thread if any of the 291177419Sjeff * following conditions are true: 292177419Sjeff * 293177419Sjeff * - The kernel is in the throes of crashing (panicstr). 294177419Sjeff * - The current thread has a higher (numerically lower) or 295177419Sjeff * equivalent priority. Note that this prevents curthread from 296177419Sjeff * trying to preempt to itself. 297177419Sjeff * - It is too early in the boot for context switches (cold is set). 298177419Sjeff * - The current thread has an inhibitor set or is in the process of 299177419Sjeff * exiting. In this case, the current thread is about to switch 300177419Sjeff * out anyways, so there's no point in preempting. If we did, 301177419Sjeff * the current thread would not be properly resumed as well, so 302177419Sjeff * just avoid that whole landmine. 303177419Sjeff * - If the new thread's priority is not a realtime priority and 304177419Sjeff * the current thread's priority is not an idle priority and 305177419Sjeff * FULL_PREEMPTION is disabled. 306177419Sjeff * 307177419Sjeff * If all of these conditions are false, but the current thread is in 308177419Sjeff * a nested critical section, then we have to defer the preemption 309177419Sjeff * until we exit the critical section. Otherwise, switch immediately 310177419Sjeff * to the new thread. 311177419Sjeff */ 312177419Sjeff ctd = curthread; 313177419Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 314177419Sjeff KASSERT((td->td_inhibitors == 0), 315177419Sjeff ("maybe_preempt: trying to run inhibited thread")); 316177419Sjeff pri = td->td_priority; 317177419Sjeff cpri = ctd->td_priority; 318177419Sjeff if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || 319177419Sjeff TD_IS_INHIBITED(ctd)) 320177419Sjeff return (0); 321177419Sjeff#ifndef FULL_PREEMPTION 322177419Sjeff if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) 323177419Sjeff return (0); 324177419Sjeff#endif 325177419Sjeff 326177419Sjeff if (ctd->td_critnest > 1) { 327177419Sjeff CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 328177419Sjeff ctd->td_critnest); 329177419Sjeff ctd->td_owepreempt = 1; 330177419Sjeff return (0); 331177419Sjeff } 332177419Sjeff /* 333177419Sjeff * Thread is runnable but not yet put on system run queue. 334177419Sjeff */ 335177419Sjeff MPASS(ctd->td_lock == td->td_lock); 336177419Sjeff MPASS(TD_ON_RUNQ(td)); 337177419Sjeff TD_SET_RUNNING(td); 338177419Sjeff CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 339177419Sjeff td->td_proc->p_pid, td->td_name); 340178272Sjeff mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td); 341177419Sjeff /* 342177419Sjeff * td's lock pointer may have changed. We have to return with it 343177419Sjeff * locked. 344177419Sjeff */ 345177419Sjeff spinlock_enter(); 346177419Sjeff thread_unlock(ctd); 347177419Sjeff thread_lock(td); 348177419Sjeff spinlock_exit(); 349177419Sjeff return (1); 350177419Sjeff#else 351177419Sjeff return (0); 352177419Sjeff#endif 353177419Sjeff} 354177419Sjeff 355177419Sjeff/* 356104964Sjeff * Constants for digital decay and forget: 357163709Sjb * 90% of (td_estcpu) usage in 5 * loadav time 358164936Sjulian * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive) 359104964Sjeff * Note that, as ps(1) mentions, this can let percentages 360104964Sjeff * total over 100% (I've seen 137.9% for 3 processes). 361104964Sjeff * 362163709Sjb * Note that schedclock() updates td_estcpu and p_cpticks asynchronously. 363104964Sjeff * 364163709Sjb * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds. 365104964Sjeff * That is, the system wants to compute a value of decay such 366104964Sjeff * that the following for loop: 367104964Sjeff * for (i = 0; i < (5 * loadavg); i++) 368163709Sjb * td_estcpu *= decay; 369104964Sjeff * will compute 370163709Sjb * td_estcpu *= 0.1; 371104964Sjeff * for all values of loadavg: 372104964Sjeff * 373104964Sjeff * Mathematically this loop can be expressed by saying: 374104964Sjeff * decay ** (5 * loadavg) ~= .1 375104964Sjeff * 376104964Sjeff * The system computes decay as: 377104964Sjeff * decay = (2 * loadavg) / (2 * loadavg + 1) 378104964Sjeff * 379104964Sjeff * We wish to prove that the system's computation of decay 380104964Sjeff * will always fulfill the equation: 381104964Sjeff * decay ** (5 * loadavg) ~= .1 382104964Sjeff * 383104964Sjeff * If we compute b as: 384104964Sjeff * b = 2 * loadavg 385104964Sjeff * then 386104964Sjeff * decay = b / (b + 1) 387104964Sjeff * 388104964Sjeff * We now need to prove two things: 389104964Sjeff * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 390104964Sjeff * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 391104964Sjeff * 392104964Sjeff * Facts: 393104964Sjeff * For x close to zero, exp(x) =~ 1 + x, since 394104964Sjeff * exp(x) = 0! + x**1/1! + x**2/2! + ... . 395104964Sjeff * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 396104964Sjeff * For x close to zero, ln(1+x) =~ x, since 397104964Sjeff * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 398104964Sjeff * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 399104964Sjeff * ln(.1) =~ -2.30 400104964Sjeff * 401104964Sjeff * Proof of (1): 402104964Sjeff * Solve (factor)**(power) =~ .1 given power (5*loadav): 403104964Sjeff * solving for factor, 404104964Sjeff * ln(factor) =~ (-2.30/5*loadav), or 405104964Sjeff * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 406104964Sjeff * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 407104964Sjeff * 408104964Sjeff * Proof of (2): 409104964Sjeff * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 410104964Sjeff * solving for power, 411104964Sjeff * power*ln(b/(b+1)) =~ -2.30, or 412104964Sjeff * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 413104964Sjeff * 414104964Sjeff * Actual power values for the implemented algorithm are as follows: 415104964Sjeff * loadav: 1 2 3 4 416104964Sjeff * power: 5.68 10.32 14.94 19.55 417104964Sjeff */ 418104964Sjeff 419104964Sjeff/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 420104964Sjeff#define loadfactor(loadav) (2 * (loadav)) 421104964Sjeff#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 422104964Sjeff 423164936Sjulian/* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 424104964Sjeffstatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 425217370SmdfSYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 426104964Sjeff 427104964Sjeff/* 428104964Sjeff * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 429104964Sjeff * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 430104964Sjeff * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 431104964Sjeff * 432104964Sjeff * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 433104964Sjeff * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 434104964Sjeff * 435104964Sjeff * If you don't want to bother with the faster/more-accurate formula, you 436104964Sjeff * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 437104964Sjeff * (more general) method of calculating the %age of CPU used by a process. 438104964Sjeff */ 439104964Sjeff#define CCPU_SHIFT 11 440104964Sjeff 441104964Sjeff/* 442104964Sjeff * Recompute process priorities, every hz ticks. 443104964Sjeff * MP-safe, called without the Giant mutex. 444104964Sjeff */ 445104964Sjeff/* ARGSUSED */ 446104964Sjeffstatic void 447123871Sjhbschedcpu(void) 448104964Sjeff{ 449104964Sjeff register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 450104964Sjeff struct thread *td; 451104964Sjeff struct proc *p; 452164936Sjulian struct td_sched *ts; 453118972Sjhb int awake, realstathz; 454104964Sjeff 455104964Sjeff realstathz = stathz ? stathz : hz; 456104964Sjeff sx_slock(&allproc_lock); 457104964Sjeff FOREACH_PROC_IN_SYSTEM(p) { 458177368Sjeff PROC_LOCK(p); 459220390Sjhb if (p->p_state == PRS_NEW) { 460220390Sjhb PROC_UNLOCK(p); 461220390Sjhb continue; 462220390Sjhb } 463180879Sjhb FOREACH_THREAD_IN_PROC(p, td) { 464104964Sjeff awake = 0; 465170293Sjeff thread_lock(td); 466164936Sjulian ts = td->td_sched; 467163709Sjb /* 468163709Sjb * Increment sleep time (if sleeping). We 469163709Sjb * ignore overflow, as above. 470163709Sjb */ 471163709Sjb /* 472164936Sjulian * The td_sched slptimes are not touched in wakeup 473164936Sjulian * because the thread may not HAVE everything in 474164936Sjulian * memory? XXX I think this is out of date. 475163709Sjb */ 476166188Sjeff if (TD_ON_RUNQ(td)) { 477163709Sjb awake = 1; 478177435Sjeff td->td_flags &= ~TDF_DIDRUN; 479166188Sjeff } else if (TD_IS_RUNNING(td)) { 480163709Sjb awake = 1; 481177435Sjeff /* Do not clear TDF_DIDRUN */ 482177435Sjeff } else if (td->td_flags & TDF_DIDRUN) { 483163709Sjb awake = 1; 484177435Sjeff td->td_flags &= ~TDF_DIDRUN; 485163709Sjb } 486163709Sjb 487163709Sjb /* 488164936Sjulian * ts_pctcpu is only for ps and ttyinfo(). 489163709Sjb */ 490164936Sjulian ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; 491163709Sjb /* 492164936Sjulian * If the td_sched has been idle the entire second, 493163709Sjb * stop recalculating its priority until 494163709Sjb * it wakes up. 495163709Sjb */ 496164936Sjulian if (ts->ts_cpticks != 0) { 497163709Sjb#if (FSHIFT >= CCPU_SHIFT) 498164936Sjulian ts->ts_pctcpu += (realstathz == 100) 499164936Sjulian ? ((fixpt_t) ts->ts_cpticks) << 500164936Sjulian (FSHIFT - CCPU_SHIFT) : 501164936Sjulian 100 * (((fixpt_t) ts->ts_cpticks) 502164936Sjulian << (FSHIFT - CCPU_SHIFT)) / realstathz; 503163709Sjb#else 504164936Sjulian ts->ts_pctcpu += ((FSCALE - ccpu) * 505164936Sjulian (ts->ts_cpticks * 506164936Sjulian FSCALE / realstathz)) >> FSHIFT; 507163709Sjb#endif 508164936Sjulian ts->ts_cpticks = 0; 509164267Sdavidxu } 510180879Sjhb /* 511163709Sjb * If there are ANY running threads in this process, 512104964Sjeff * then don't count it as sleeping. 513180879Sjhb * XXX: this is broken. 514104964Sjeff */ 515104964Sjeff if (awake) { 516172264Sjeff if (ts->ts_slptime > 1) { 517104964Sjeff /* 518104964Sjeff * In an ideal world, this should not 519104964Sjeff * happen, because whoever woke us 520104964Sjeff * up from the long sleep should have 521104964Sjeff * unwound the slptime and reset our 522104964Sjeff * priority before we run at the stale 523104964Sjeff * priority. Should KASSERT at some 524104964Sjeff * point when all the cases are fixed. 525104964Sjeff */ 526163709Sjb updatepri(td); 527163709Sjb } 528172264Sjeff ts->ts_slptime = 0; 529163709Sjb } else 530172264Sjeff ts->ts_slptime++; 531172264Sjeff if (ts->ts_slptime > 1) { 532170293Sjeff thread_unlock(td); 533163709Sjb continue; 534170293Sjeff } 535163709Sjb td->td_estcpu = decay_cpu(loadfac, td->td_estcpu); 536163709Sjb resetpriority(td); 537163709Sjb resetpriority_thread(td); 538170293Sjeff thread_unlock(td); 539180879Sjhb } 540177368Sjeff PROC_UNLOCK(p); 541180879Sjhb } 542104964Sjeff sx_sunlock(&allproc_lock); 543104964Sjeff} 544104964Sjeff 545104964Sjeff/* 546123871Sjhb * Main loop for a kthread that executes schedcpu once a second. 547123871Sjhb */ 548123871Sjhbstatic void 549124955Sjeffschedcpu_thread(void) 550123871Sjhb{ 551123871Sjhb 552123871Sjhb for (;;) { 553123871Sjhb schedcpu(); 554167086Sjhb pause("-", hz); 555123871Sjhb } 556123871Sjhb} 557123871Sjhb 558123871Sjhb/* 559104964Sjeff * Recalculate the priority of a process after it has slept for a while. 560163709Sjb * For all load averages >= 1 and max td_estcpu of 255, sleeping for at 561163709Sjb * least six times the loadfactor will decay td_estcpu to zero. 562104964Sjeff */ 563104964Sjeffstatic void 564163709Sjbupdatepri(struct thread *td) 565104964Sjeff{ 566172264Sjeff struct td_sched *ts; 567172264Sjeff fixpt_t loadfac; 568172264Sjeff unsigned int newcpu; 569104964Sjeff 570172264Sjeff ts = td->td_sched; 571118972Sjhb loadfac = loadfactor(averunnable.ldavg[0]); 572172264Sjeff if (ts->ts_slptime > 5 * loadfac) 573163709Sjb td->td_estcpu = 0; 574104964Sjeff else { 575163709Sjb newcpu = td->td_estcpu; 576172264Sjeff ts->ts_slptime--; /* was incremented in schedcpu() */ 577172264Sjeff while (newcpu && --ts->ts_slptime) 578104964Sjeff newcpu = decay_cpu(loadfac, newcpu); 579163709Sjb td->td_estcpu = newcpu; 580104964Sjeff } 581104964Sjeff} 582104964Sjeff 583104964Sjeff/* 584104964Sjeff * Compute the priority of a process when running in user mode. 585104964Sjeff * Arrange to reschedule if the resulting priority is better 586104964Sjeff * than that of the current process. 587104964Sjeff */ 588104964Sjeffstatic void 589163709Sjbresetpriority(struct thread *td) 590104964Sjeff{ 591104964Sjeff register unsigned int newpriority; 592104964Sjeff 593163709Sjb if (td->td_pri_class == PRI_TIMESHARE) { 594163709Sjb newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT + 595163709Sjb NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); 596104964Sjeff newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 597104964Sjeff PRI_MAX_TIMESHARE); 598163709Sjb sched_user_prio(td, newpriority); 599104964Sjeff } 600104964Sjeff} 601104964Sjeff 602139453Sjhb/* 603164936Sjulian * Update the thread's priority when the associated process's user 604139453Sjhb * priority changes. 605139453Sjhb */ 606139453Sjhbstatic void 607163709Sjbresetpriority_thread(struct thread *td) 608139453Sjhb{ 609139453Sjhb 610139453Sjhb /* Only change threads with a time sharing user priority. */ 611139453Sjhb if (td->td_priority < PRI_MIN_TIMESHARE || 612139453Sjhb td->td_priority > PRI_MAX_TIMESHARE) 613139453Sjhb return; 614139453Sjhb 615139453Sjhb /* XXX the whole needresched thing is broken, but not silly. */ 616139453Sjhb maybe_resched(td); 617139453Sjhb 618163709Sjb sched_prio(td, td->td_user_pri); 619139453Sjhb} 620139453Sjhb 621104964Sjeff/* ARGSUSED */ 622104964Sjeffstatic void 623104964Sjeffsched_setup(void *dummy) 624104964Sjeff{ 625124955Sjeff setup_runqs(); 626118972Sjhb 627104964Sjeff if (sched_quantum == 0) 628104964Sjeff sched_quantum = SCHED_QUANTUM; 629104964Sjeff hogticks = 2 * sched_quantum; 630104964Sjeff 631125288Sjeff /* Account for thread0. */ 632139317Sjeff sched_load_add(); 633104964Sjeff} 634104964Sjeff 635104964Sjeff/* External interfaces start here */ 636180879Sjhb 637134791Sjulian/* 638134791Sjulian * Very early in the boot some setup of scheduler-specific 639145109Smaxim * parts of proc0 and of some scheduler resources needs to be done. 640134791Sjulian * Called from: 641134791Sjulian * proc0_init() 642134791Sjulian */ 643134791Sjulianvoid 644134791Sjulianschedinit(void) 645134791Sjulian{ 646134791Sjulian /* 647134791Sjulian * Set up the scheduler specific parts of proc0. 648134791Sjulian */ 649134791Sjulian proc0.p_sched = NULL; /* XXX */ 650164936Sjulian thread0.td_sched = &td_sched0; 651170293Sjeff thread0.td_lock = &sched_lock; 652171488Sjeff mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 653134791Sjulian} 654134791Sjulian 655104964Sjeffint 656104964Sjeffsched_runnable(void) 657104964Sjeff{ 658124955Sjeff#ifdef SMP 659124955Sjeff return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 660124955Sjeff#else 661124955Sjeff return runq_check(&runq); 662124955Sjeff#endif 663104964Sjeff} 664104964Sjeff 665180879Sjhbint 666104964Sjeffsched_rr_interval(void) 667104964Sjeff{ 668104964Sjeff if (sched_quantum == 0) 669104964Sjeff sched_quantum = SCHED_QUANTUM; 670104964Sjeff return (sched_quantum); 671104964Sjeff} 672104964Sjeff 673104964Sjeff/* 674104964Sjeff * We adjust the priority of the current process. The priority of 675104964Sjeff * a process gets worse as it accumulates CPU time. The cpu usage 676163709Sjb * estimator (td_estcpu) is increased here. resetpriority() will 677163709Sjb * compute a different priority each time td_estcpu increases by 678104964Sjeff * INVERSE_ESTCPU_WEIGHT 679104964Sjeff * (until MAXPRI is reached). The cpu usage estimator ramps up 680104964Sjeff * quite quickly when the process is running (linearly), and decays 681104964Sjeff * away exponentially, at a rate which is proportionally slower when 682104964Sjeff * the system is busy. The basic principle is that the system will 683104964Sjeff * 90% forget that the process used a lot of CPU time in 5 * loadav 684104964Sjeff * seconds. This causes the system to favor processes which haven't 685104964Sjeff * run much recently, and to round-robin among other processes. 686104964Sjeff */ 687104964Sjeffvoid 688121127Sjeffsched_clock(struct thread *td) 689104964Sjeff{ 690212455Smav struct pcpuidlestat *stat; 691164936Sjulian struct td_sched *ts; 692104964Sjeff 693170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 694164936Sjulian ts = td->td_sched; 695113356Sjeff 696164936Sjulian ts->ts_cpticks++; 697163709Sjb td->td_estcpu = ESTCPULIM(td->td_estcpu + 1); 698163709Sjb if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 699163709Sjb resetpriority(td); 700163709Sjb resetpriority_thread(td); 701104964Sjeff } 702173081Sjhb 703173081Sjhb /* 704173081Sjhb * Force a context switch if the current thread has used up a full 705173081Sjhb * quantum (default quantum is 100ms). 706173081Sjhb */ 707173081Sjhb if (!TD_IS_IDLETHREAD(td) && 708173081Sjhb ticks - PCPU_GET(switchticks) >= sched_quantum) 709173081Sjhb td->td_flags |= TDF_NEEDRESCHED; 710212455Smav 711212455Smav stat = DPCPU_PTR(idlestat); 712212455Smav stat->oldidlecalls = stat->idlecalls; 713212455Smav stat->idlecalls = 0; 714104964Sjeff} 715118972Sjhb 716104964Sjeff/* 717180879Sjhb * Charge child's scheduling CPU usage to parent. 718104964Sjeff */ 719104964Sjeffvoid 720132372Sjuliansched_exit(struct proc *p, struct thread *td) 721104964Sjeff{ 722163709Sjb 723187357Sjeff KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit", 724225199Sdelphij "prio:%d", td->td_priority); 725187357Sjeff 726177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 727164936Sjulian sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 728113356Sjeff} 729113356Sjeff 730113356Sjeffvoid 731164936Sjuliansched_exit_thread(struct thread *td, struct thread *child) 732113356Sjeff{ 733113923Sjhb 734187357Sjeff KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit", 735225199Sdelphij "prio:%d", child->td_priority); 736170293Sjeff thread_lock(td); 737164936Sjulian td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); 738170293Sjeff thread_unlock(td); 739198854Sattilio thread_lock(child); 740198854Sattilio if ((child->td_flags & TDF_NOLOAD) == 0) 741139317Sjeff sched_load_rem(); 742198854Sattilio thread_unlock(child); 743113356Sjeff} 744109145Sjeff 745113356Sjeffvoid 746134791Sjuliansched_fork(struct thread *td, struct thread *childtd) 747113356Sjeff{ 748134791Sjulian sched_fork_thread(td, childtd); 749113356Sjeff} 750113356Sjeff 751113356Sjeffvoid 752134791Sjuliansched_fork_thread(struct thread *td, struct thread *childtd) 753113356Sjeff{ 754177426Sjeff struct td_sched *ts; 755177426Sjeff 756164936Sjulian childtd->td_estcpu = td->td_estcpu; 757170293Sjeff childtd->td_lock = &sched_lock; 758176750Smarcel childtd->td_cpuset = cpuset_ref(td->td_cpuset); 759217078Sjhb childtd->td_priority = childtd->td_base_pri; 760177426Sjeff ts = childtd->td_sched; 761177426Sjeff bzero(ts, sizeof(*ts)); 762180923Sjhb ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY); 763104964Sjeff} 764104964Sjeff 765104964Sjeffvoid 766130551Sjuliansched_nice(struct proc *p, int nice) 767104964Sjeff{ 768139453Sjhb struct thread *td; 769113873Sjhb 770130551Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 771130551Sjulian p->p_nice = nice; 772163709Sjb FOREACH_THREAD_IN_PROC(p, td) { 773170293Sjeff thread_lock(td); 774163709Sjb resetpriority(td); 775163709Sjb resetpriority_thread(td); 776170293Sjeff thread_unlock(td); 777163709Sjb } 778104964Sjeff} 779104964Sjeff 780113356Sjeffvoid 781163709Sjbsched_class(struct thread *td, int class) 782113356Sjeff{ 783170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 784163709Sjb td->td_pri_class = class; 785113356Sjeff} 786113356Sjeff 787105127Sjulian/* 788105127Sjulian * Adjust the priority of a thread. 789105127Sjulian */ 790139453Sjhbstatic void 791139453Sjhbsched_priority(struct thread *td, u_char prio) 792104964Sjeff{ 793104964Sjeff 794187357Sjeff 795187357Sjeff KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change", 796187357Sjeff "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, 797187357Sjeff sched_tdname(curthread)); 798187357Sjeff if (td != curthread && prio > td->td_priority) { 799187357Sjeff KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread), 800187357Sjeff "lend prio", "prio:%d", td->td_priority, "new prio:%d", 801187357Sjeff prio, KTR_ATTR_LINKED, sched_tdname(td)); 802187357Sjeff } 803170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 804139453Sjhb if (td->td_priority == prio) 805139453Sjhb return; 806166188Sjeff td->td_priority = prio; 807177435Sjeff if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) { 808166188Sjeff sched_rem(td); 809166188Sjeff sched_add(td, SRQ_BORING); 810104964Sjeff } 811104964Sjeff} 812104964Sjeff 813139453Sjhb/* 814139453Sjhb * Update a thread's priority when it is lent another thread's 815139453Sjhb * priority. 816139453Sjhb */ 817104964Sjeffvoid 818139453Sjhbsched_lend_prio(struct thread *td, u_char prio) 819139453Sjhb{ 820139453Sjhb 821139453Sjhb td->td_flags |= TDF_BORROWING; 822139453Sjhb sched_priority(td, prio); 823139453Sjhb} 824139453Sjhb 825139453Sjhb/* 826139453Sjhb * Restore a thread's priority when priority propagation is 827139453Sjhb * over. The prio argument is the minimum priority the thread 828139453Sjhb * needs to have to satisfy other possible priority lending 829139453Sjhb * requests. If the thread's regulary priority is less 830139453Sjhb * important than prio the thread will keep a priority boost 831139453Sjhb * of prio. 832139453Sjhb */ 833139453Sjhbvoid 834139453Sjhbsched_unlend_prio(struct thread *td, u_char prio) 835139453Sjhb{ 836139453Sjhb u_char base_pri; 837139453Sjhb 838139453Sjhb if (td->td_base_pri >= PRI_MIN_TIMESHARE && 839139453Sjhb td->td_base_pri <= PRI_MAX_TIMESHARE) 840163709Sjb base_pri = td->td_user_pri; 841139453Sjhb else 842139453Sjhb base_pri = td->td_base_pri; 843139453Sjhb if (prio >= base_pri) { 844139453Sjhb td->td_flags &= ~TDF_BORROWING; 845139453Sjhb sched_prio(td, base_pri); 846139453Sjhb } else 847139453Sjhb sched_lend_prio(td, prio); 848139453Sjhb} 849139453Sjhb 850139453Sjhbvoid 851139453Sjhbsched_prio(struct thread *td, u_char prio) 852139453Sjhb{ 853139453Sjhb u_char oldprio; 854139453Sjhb 855139453Sjhb /* First, update the base priority. */ 856139453Sjhb td->td_base_pri = prio; 857139453Sjhb 858139453Sjhb /* 859139453Sjhb * If the thread is borrowing another thread's priority, don't ever 860139453Sjhb * lower the priority. 861139453Sjhb */ 862139453Sjhb if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 863139453Sjhb return; 864139453Sjhb 865139453Sjhb /* Change the real priority. */ 866139453Sjhb oldprio = td->td_priority; 867139453Sjhb sched_priority(td, prio); 868139453Sjhb 869139453Sjhb /* 870139453Sjhb * If the thread is on a turnstile, then let the turnstile update 871139453Sjhb * its state. 872139453Sjhb */ 873139453Sjhb if (TD_ON_LOCK(td) && oldprio != prio) 874139453Sjhb turnstile_adjust(td, oldprio); 875139453Sjhb} 876139453Sjhb 877139453Sjhbvoid 878163709Sjbsched_user_prio(struct thread *td, u_char prio) 879161599Sdavidxu{ 880161599Sdavidxu 881174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 882163709Sjb td->td_base_user_pri = prio; 883216313Sdavidxu if (td->td_lend_user_pri <= prio) 884164177Sdavidxu return; 885163709Sjb td->td_user_pri = prio; 886161599Sdavidxu} 887161599Sdavidxu 888161599Sdavidxuvoid 889161599Sdavidxusched_lend_user_prio(struct thread *td, u_char prio) 890161599Sdavidxu{ 891161599Sdavidxu 892174536Sdavidxu THREAD_LOCK_ASSERT(td, MA_OWNED); 893216313Sdavidxu td->td_lend_user_pri = prio; 894216791Sdavidxu td->td_user_pri = min(prio, td->td_base_user_pri); 895216791Sdavidxu if (td->td_priority > td->td_user_pri) 896216791Sdavidxu sched_prio(td, td->td_user_pri); 897216791Sdavidxu else if (td->td_priority != td->td_user_pri) 898216791Sdavidxu td->td_flags |= TDF_NEEDRESCHED; 899161599Sdavidxu} 900161599Sdavidxu 901161599Sdavidxuvoid 902177085Sjeffsched_sleep(struct thread *td, int pri) 903104964Sjeff{ 904113923Sjhb 905170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 906172264Sjeff td->td_slptick = ticks; 907172264Sjeff td->td_sched->ts_slptime = 0; 908217410Sjhb if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 909177085Sjeff sched_prio(td, pri); 910201347Skib if (TD_IS_SUSPENDED(td) || pri >= PSOCK) 911177085Sjeff td->td_flags |= TDF_CANSWAP; 912104964Sjeff} 913104964Sjeff 914104964Sjeffvoid 915135051Sjuliansched_switch(struct thread *td, struct thread *newtd, int flags) 916104964Sjeff{ 917202889Sattilio struct mtx *tmtx; 918164936Sjulian struct td_sched *ts; 919104964Sjeff struct proc *p; 920104964Sjeff 921202889Sattilio tmtx = NULL; 922164936Sjulian ts = td->td_sched; 923104964Sjeff p = td->td_proc; 924104964Sjeff 925170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 926180879Sjhb 927180879Sjhb /* 928170293Sjeff * Switch to the sched lock to fix things up and pick 929170293Sjeff * a new thread. 930202889Sattilio * Block the td_lock in order to avoid breaking the critical path. 931170293Sjeff */ 932170293Sjeff if (td->td_lock != &sched_lock) { 933170293Sjeff mtx_lock_spin(&sched_lock); 934202889Sattilio tmtx = thread_lock_block(td); 935170293Sjeff } 936104964Sjeff 937198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 938139317Sjeff sched_load_rem(); 939135051Sjulian 940113339Sjulian td->td_lastcpu = td->td_oncpu; 941220198Sfabient if (!(flags & SW_PREEMPT)) 942220198Sfabient td->td_flags &= ~TDF_NEEDRESCHED; 943144777Sups td->td_owepreempt = 0; 944113339Sjulian td->td_oncpu = NOCPU; 945180879Sjhb 946104964Sjeff /* 947104964Sjeff * At the last moment, if this thread is still marked RUNNING, 948104964Sjeff * then put it back on the run queue as it has not been suspended 949131473Sjhb * or stopped or any thing else similar. We never put the idle 950131473Sjhb * threads on the run queue, however. 951104964Sjeff */ 952166415Sjulian if (td->td_flags & TDF_IDLETD) { 953131473Sjhb TD_SET_CAN_RUN(td); 954166415Sjulian#ifdef SMP 955223758Sattilio CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask); 956166415Sjulian#endif 957166415Sjulian } else { 958134791Sjulian if (TD_IS_RUNNING(td)) { 959164936Sjulian /* Put us back on the run queue. */ 960166188Sjeff sched_add(td, (flags & SW_PREEMPT) ? 961136170Sjulian SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 962136170Sjulian SRQ_OURSELF|SRQ_YIELDING); 963134791Sjulian } 964104964Sjeff } 965136170Sjulian if (newtd) { 966180879Sjhb /* 967136170Sjulian * The thread we are about to run needs to be counted 968136170Sjulian * as if it had been added to the run queue and selected. 969136170Sjulian * It came from: 970136170Sjulian * * A preemption 971180879Sjhb * * An upcall 972136170Sjulian * * A followon 973136170Sjulian */ 974136170Sjulian KASSERT((newtd->td_inhibitors == 0), 975165693Srwatson ("trying to run inhibited thread")); 976177435Sjeff newtd->td_flags |= TDF_DIDRUN; 977136170Sjulian TD_SET_RUNNING(newtd); 978198854Sattilio if ((newtd->td_flags & TDF_NOLOAD) == 0) 979139317Sjeff sched_load_add(); 980136170Sjulian } else { 981131473Sjhb newtd = choosethread(); 982202940Sattilio MPASS(newtd->td_lock == &sched_lock); 983136170Sjulian } 984136170Sjulian 985145256Sjkoshy if (td != newtd) { 986145256Sjkoshy#ifdef HWPMC_HOOKS 987145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 988145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 989145256Sjkoshy#endif 990166415Sjulian /* I feel sleepy */ 991174629Sjeff lock_profile_release_lock(&sched_lock.lock_object); 992179297Sjb#ifdef KDTRACE_HOOKS 993179297Sjb /* 994179297Sjb * If DTrace has set the active vtime enum to anything 995179297Sjb * other than INACTIVE (0), then it should have set the 996179297Sjb * function to call. 997179297Sjb */ 998179297Sjb if (dtrace_vtime_active) 999179297Sjb (*dtrace_vtime_switch_func)(newtd); 1000179297Sjb#endif 1001179297Sjb 1002202889Sattilio cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock); 1003174629Sjeff lock_profile_obtain_lock_success(&sched_lock.lock_object, 1004174629Sjeff 0, 0, __FILE__, __LINE__); 1005166415Sjulian /* 1006166415Sjulian * Where am I? What year is it? 1007166415Sjulian * We are in the same thread that went to sleep above, 1008180879Sjhb * but any amount of time may have passed. All our context 1009166415Sjulian * will still be available as will local variables. 1010166415Sjulian * PCPU values however may have changed as we may have 1011166415Sjulian * changed CPU so don't trust cached values of them. 1012166415Sjulian * New threads will go to fork_exit() instead of here 1013166415Sjulian * so if you change things here you may need to change 1014166415Sjulian * things there too. 1015180879Sjhb * 1016166415Sjulian * If the thread above was exiting it will never wake 1017166415Sjulian * up again here, so either it has saved everything it 1018166415Sjulian * needed to, or the thread_wait() or wait() will 1019166415Sjulian * need to reap it. 1020166415Sjulian */ 1021145256Sjkoshy#ifdef HWPMC_HOOKS 1022145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1023145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1024145256Sjkoshy#endif 1025145256Sjkoshy } 1026145256Sjkoshy 1027166415Sjulian#ifdef SMP 1028166415Sjulian if (td->td_flags & TDF_IDLETD) 1029223758Sattilio CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask); 1030166415Sjulian#endif 1031121128Sjeff sched_lock.mtx_lock = (uintptr_t)td; 1032121128Sjeff td->td_oncpu = PCPU_GET(cpuid); 1033170293Sjeff MPASS(td->td_lock == &sched_lock); 1034104964Sjeff} 1035104964Sjeff 1036104964Sjeffvoid 1037104964Sjeffsched_wakeup(struct thread *td) 1038104964Sjeff{ 1039172264Sjeff struct td_sched *ts; 1040172264Sjeff 1041170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1042172264Sjeff ts = td->td_sched; 1043177085Sjeff td->td_flags &= ~TDF_CANSWAP; 1044172264Sjeff if (ts->ts_slptime > 1) { 1045163709Sjb updatepri(td); 1046163709Sjb resetpriority(td); 1047163709Sjb } 1048201790Sattilio td->td_slptick = 0; 1049172264Sjeff ts->ts_slptime = 0; 1050166188Sjeff sched_add(td, SRQ_BORING); 1051104964Sjeff} 1052104964Sjeff 1053134693Sjulian#ifdef SMP 1054134688Sjulianstatic int 1055180879Sjhbforward_wakeup(int cpunum) 1056134688Sjulian{ 1057134688Sjulian struct pcpu *pc; 1058223758Sattilio cpuset_t dontuse, map, map2; 1059223758Sattilio u_int id, me; 1060222813Sattilio int iscpuset; 1061134688Sjulian 1062134688Sjulian mtx_assert(&sched_lock, MA_OWNED); 1063134688Sjulian 1064134791Sjulian CTR0(KTR_RUNQ, "forward_wakeup()"); 1065134688Sjulian 1066134688Sjulian if ((!forward_wakeup_enabled) || 1067134688Sjulian (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) 1068134688Sjulian return (0); 1069134688Sjulian if (!smp_started || cold || panicstr) 1070134688Sjulian return (0); 1071134688Sjulian 1072134688Sjulian forward_wakeups_requested++; 1073134688Sjulian 1074180879Sjhb /* 1075180879Sjhb * Check the idle mask we received against what we calculated 1076180879Sjhb * before in the old version. 1077180879Sjhb */ 1078223758Sattilio me = PCPU_GET(cpuid); 1079180879Sjhb 1080180879Sjhb /* Don't bother if we should be doing it ourself. */ 1081223758Sattilio if (CPU_ISSET(me, &idle_cpus_mask) && 1082223758Sattilio (cpunum == NOCPU || me == cpunum)) 1083134688Sjulian return (0); 1084134688Sjulian 1085223758Sattilio CPU_SETOF(me, &dontuse); 1086222813Sattilio CPU_OR(&dontuse, &stopped_cpus); 1087222813Sattilio CPU_OR(&dontuse, &hlt_cpus_mask); 1088222813Sattilio CPU_ZERO(&map2); 1089134688Sjulian if (forward_wakeup_use_loop) { 1090222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1091223758Sattilio id = pc->pc_cpuid; 1092223758Sattilio if (!CPU_ISSET(id, &dontuse) && 1093134688Sjulian pc->pc_curthread == pc->pc_idlethread) { 1094223758Sattilio CPU_SET(id, &map2); 1095134688Sjulian } 1096134688Sjulian } 1097134688Sjulian } 1098134688Sjulian 1099134688Sjulian if (forward_wakeup_use_mask) { 1100222813Sattilio map = idle_cpus_mask; 1101222813Sattilio CPU_NAND(&map, &dontuse); 1102134688Sjulian 1103180879Sjhb /* If they are both on, compare and use loop if different. */ 1104134688Sjulian if (forward_wakeup_use_loop) { 1105222813Sattilio if (CPU_CMP(&map, &map2)) { 1106222040Sattilio printf("map != map2, loop method preferred\n"); 1107222040Sattilio map = map2; 1108134688Sjulian } 1109134688Sjulian } 1110134688Sjulian } else { 1111222040Sattilio map = map2; 1112134688Sjulian } 1113180879Sjhb 1114180879Sjhb /* If we only allow a specific CPU, then mask off all the others. */ 1115134688Sjulian if (cpunum != NOCPU) { 1116134688Sjulian KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); 1117222813Sattilio iscpuset = CPU_ISSET(cpunum, &map); 1118222813Sattilio if (iscpuset == 0) 1119222813Sattilio CPU_ZERO(&map); 1120222813Sattilio else 1121222813Sattilio CPU_SETOF(cpunum, &map); 1122134688Sjulian } 1123222813Sattilio if (!CPU_EMPTY(&map)) { 1124134688Sjulian forward_wakeups_delivered++; 1125222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1126223758Sattilio id = pc->pc_cpuid; 1127223758Sattilio if (!CPU_ISSET(id, &map)) 1128212455Smav continue; 1129212455Smav if (cpu_idle_wakeup(pc->pc_cpuid)) 1130223758Sattilio CPU_CLR(id, &map); 1131212455Smav } 1132222813Sattilio if (!CPU_EMPTY(&map)) 1133212455Smav ipi_selected(map, IPI_AST); 1134134688Sjulian return (1); 1135134688Sjulian } 1136134688Sjulian if (cpunum == NOCPU) 1137134688Sjulian printf("forward_wakeup: Idle processor not found\n"); 1138134688Sjulian return (0); 1139134688Sjulian} 1140134688Sjulian 1141147182Supsstatic void 1142180879Sjhbkick_other_cpu(int pri, int cpuid) 1143180879Sjhb{ 1144180879Sjhb struct pcpu *pcpu; 1145180879Sjhb int cpri; 1146147182Sups 1147180879Sjhb pcpu = pcpu_find(cpuid); 1148223758Sattilio if (CPU_ISSET(cpuid, &idle_cpus_mask)) { 1149147182Sups forward_wakeups_delivered++; 1150212455Smav if (!cpu_idle_wakeup(cpuid)) 1151212455Smav ipi_cpu(cpuid, IPI_AST); 1152147182Sups return; 1153147182Sups } 1154147182Sups 1155180879Sjhb cpri = pcpu->pc_curthread->td_priority; 1156147182Sups if (pri >= cpri) 1157147182Sups return; 1158147182Sups 1159147182Sups#if defined(IPI_PREEMPTION) && defined(PREEMPTION) 1160147182Sups#if !defined(FULL_PREEMPTION) 1161147182Sups if (pri <= PRI_MAX_ITHD) 1162147182Sups#endif /* ! FULL_PREEMPTION */ 1163147182Sups { 1164210939Sjhb ipi_cpu(cpuid, IPI_PREEMPT); 1165147182Sups return; 1166147182Sups } 1167147182Sups#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ 1168147182Sups 1169147182Sups pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 1170210939Sjhb ipi_cpu(cpuid, IPI_AST); 1171147182Sups return; 1172147182Sups} 1173147182Sups#endif /* SMP */ 1174147182Sups 1175180923Sjhb#ifdef SMP 1176180923Sjhbstatic int 1177180923Sjhbsched_pickcpu(struct thread *td) 1178180923Sjhb{ 1179180923Sjhb int best, cpu; 1180180923Sjhb 1181180923Sjhb mtx_assert(&sched_lock, MA_OWNED); 1182180923Sjhb 1183180937Sjhb if (THREAD_CAN_SCHED(td, td->td_lastcpu)) 1184180937Sjhb best = td->td_lastcpu; 1185180937Sjhb else 1186180937Sjhb best = NOCPU; 1187209059Sjhb CPU_FOREACH(cpu) { 1188180923Sjhb if (!THREAD_CAN_SCHED(td, cpu)) 1189180923Sjhb continue; 1190180923Sjhb 1191180923Sjhb if (best == NOCPU) 1192180923Sjhb best = cpu; 1193180923Sjhb else if (runq_length[cpu] < runq_length[best]) 1194180923Sjhb best = cpu; 1195180923Sjhb } 1196180923Sjhb KASSERT(best != NOCPU, ("no valid CPUs")); 1197180923Sjhb 1198180923Sjhb return (best); 1199180923Sjhb} 1200180923Sjhb#endif 1201180923Sjhb 1202104964Sjeffvoid 1203134586Sjuliansched_add(struct thread *td, int flags) 1204147182Sups#ifdef SMP 1205104964Sjeff{ 1206223758Sattilio cpuset_t tidlemsk; 1207164936Sjulian struct td_sched *ts; 1208223758Sattilio u_int cpu, cpuid; 1209134591Sjulian int forwarded = 0; 1210147182Sups int single_cpu = 0; 1211121127Sjeff 1212164936Sjulian ts = td->td_sched; 1213170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1214166188Sjeff KASSERT((td->td_inhibitors == 0), 1215166188Sjeff ("sched_add: trying to run inhibited thread")); 1216166188Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1217166188Sjeff ("sched_add: bad thread state")); 1218172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1219172207Sjeff ("sched_add: thread swapped out")); 1220180879Sjhb 1221187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 1222187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1223187357Sjeff sched_tdname(curthread)); 1224187357Sjeff KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 1225187357Sjeff KTR_ATTR_LINKED, sched_tdname(td)); 1226187357Sjeff 1227187357Sjeff 1228170293Sjeff /* 1229170293Sjeff * Now that the thread is moving to the run-queue, set the lock 1230170293Sjeff * to the scheduler's lock. 1231170293Sjeff */ 1232170293Sjeff if (td->td_lock != &sched_lock) { 1233170293Sjeff mtx_lock_spin(&sched_lock); 1234170293Sjeff thread_lock_set(td, &sched_lock); 1235170293Sjeff } 1236166188Sjeff TD_SET_RUNQ(td); 1237131481Sjhb 1238221081Srstone /* 1239221081Srstone * If SMP is started and the thread is pinned or otherwise limited to 1240221081Srstone * a specific set of CPUs, queue the thread to a per-CPU run queue. 1241221081Srstone * Otherwise, queue the thread to the global run queue. 1242221081Srstone * 1243221081Srstone * If SMP has not yet been started we must use the global run queue 1244221081Srstone * as per-CPU state may not be initialized yet and we may crash if we 1245221081Srstone * try to access the per-CPU run queues. 1246221081Srstone */ 1247221081Srstone if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND || 1248221081Srstone ts->ts_flags & TSF_AFFINITY)) { 1249221081Srstone if (td->td_pinned != 0) 1250221081Srstone cpu = td->td_lastcpu; 1251221081Srstone else if (td->td_flags & TDF_BOUND) { 1252221081Srstone /* Find CPU from bound runq. */ 1253221081Srstone KASSERT(SKE_RUNQ_PCPU(ts), 1254221081Srstone ("sched_add: bound td_sched not on cpu runq")); 1255221081Srstone cpu = ts->ts_runq - &runq_pcpu[0]; 1256221081Srstone } else 1257221081Srstone /* Find a valid CPU for our cpuset */ 1258221081Srstone cpu = sched_pickcpu(td); 1259164936Sjulian ts->ts_runq = &runq_pcpu[cpu]; 1260147182Sups single_cpu = 1; 1261147182Sups CTR3(KTR_RUNQ, 1262180879Sjhb "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, 1263180879Sjhb cpu); 1264180879Sjhb } else { 1265134591Sjulian CTR2(KTR_RUNQ, 1266180879Sjhb "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, 1267180879Sjhb td); 1268134591Sjulian cpu = NOCPU; 1269164936Sjulian ts->ts_runq = &runq; 1270147182Sups } 1271180879Sjhb 1272223758Sattilio cpuid = PCPU_GET(cpuid); 1273223758Sattilio if (single_cpu && cpu != cpuid) { 1274180879Sjhb kick_other_cpu(td->td_priority, cpu); 1275124955Sjeff } else { 1276147190Sups if (!single_cpu) { 1277223758Sattilio tidlemsk = idle_cpus_mask; 1278223758Sattilio CPU_NAND(&tidlemsk, &hlt_cpus_mask); 1279223758Sattilio CPU_CLR(cpuid, &tidlemsk); 1280147182Sups 1281223758Sattilio if (!CPU_ISSET(cpuid, &idle_cpus_mask) && 1282223758Sattilio ((flags & SRQ_INTR) == 0) && 1283222813Sattilio !CPU_EMPTY(&tidlemsk)) 1284147182Sups forwarded = forward_wakeup(cpu); 1285147182Sups } 1286147182Sups 1287147182Sups if (!forwarded) { 1288147190Sups if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td)) 1289147182Sups return; 1290147182Sups else 1291147182Sups maybe_resched(td); 1292147182Sups } 1293124955Sjeff } 1294180879Sjhb 1295198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1296147182Sups sched_load_add(); 1297177435Sjeff runq_add(ts->ts_runq, td, flags); 1298180923Sjhb if (cpu != NOCPU) 1299180923Sjhb runq_length[cpu]++; 1300147182Sups} 1301147182Sups#else /* SMP */ 1302147182Sups{ 1303164936Sjulian struct td_sched *ts; 1304180923Sjhb 1305164936Sjulian ts = td->td_sched; 1306170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1307166188Sjeff KASSERT((td->td_inhibitors == 0), 1308166188Sjeff ("sched_add: trying to run inhibited thread")); 1309166188Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1310166188Sjeff ("sched_add: bad thread state")); 1311172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1312172207Sjeff ("sched_add: thread swapped out")); 1313187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 1314187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1315187357Sjeff sched_tdname(curthread)); 1316187357Sjeff KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 1317187357Sjeff KTR_ATTR_LINKED, sched_tdname(td)); 1318180879Sjhb 1319170293Sjeff /* 1320170293Sjeff * Now that the thread is moving to the run-queue, set the lock 1321170293Sjeff * to the scheduler's lock. 1322170293Sjeff */ 1323170293Sjeff if (td->td_lock != &sched_lock) { 1324170293Sjeff mtx_lock_spin(&sched_lock); 1325170293Sjeff thread_lock_set(td, &sched_lock); 1326170293Sjeff } 1327166188Sjeff TD_SET_RUNQ(td); 1328164936Sjulian CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); 1329164936Sjulian ts->ts_runq = &runq; 1330134591Sjulian 1331180879Sjhb /* 1332180879Sjhb * If we are yielding (on the way out anyhow) or the thread 1333180879Sjhb * being saved is US, then don't try be smart about preemption 1334180879Sjhb * or kicking off another CPU as it won't help and may hinder. 1335180879Sjhb * In the YIEDLING case, we are about to run whoever is being 1336180879Sjhb * put in the queue anyhow, and in the OURSELF case, we are 1337180879Sjhb * puting ourself on the run queue which also only happens 1338180879Sjhb * when we are about to yield. 1339134591Sjulian */ 1340180879Sjhb if ((flags & SRQ_YIELDING) == 0) { 1341147182Sups if (maybe_preempt(td)) 1342147182Sups return; 1343180879Sjhb } 1344198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1345139317Sjeff sched_load_add(); 1346177435Sjeff runq_add(ts->ts_runq, td, flags); 1347132118Sjhb maybe_resched(td); 1348104964Sjeff} 1349147182Sups#endif /* SMP */ 1350147182Sups 1351104964Sjeffvoid 1352121127Sjeffsched_rem(struct thread *td) 1353104964Sjeff{ 1354164936Sjulian struct td_sched *ts; 1355121127Sjeff 1356164936Sjulian ts = td->td_sched; 1357172207Sjeff KASSERT(td->td_flags & TDF_INMEM, 1358172207Sjeff ("sched_rem: thread swapped out")); 1359166188Sjeff KASSERT(TD_ON_RUNQ(td), 1360164936Sjulian ("sched_rem: thread not on run queue")); 1361104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 1362187357Sjeff KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem", 1363187357Sjeff "prio:%d", td->td_priority, KTR_ATTR_LINKED, 1364187357Sjeff sched_tdname(curthread)); 1365104964Sjeff 1366198854Sattilio if ((td->td_flags & TDF_NOLOAD) == 0) 1367139317Sjeff sched_load_rem(); 1368180923Sjhb#ifdef SMP 1369180923Sjhb if (ts->ts_runq != &runq) 1370180923Sjhb runq_length[ts->ts_runq - runq_pcpu]--; 1371180923Sjhb#endif 1372177435Sjeff runq_remove(ts->ts_runq, td); 1373166188Sjeff TD_SET_CAN_RUN(td); 1374104964Sjeff} 1375104964Sjeff 1376135295Sjulian/* 1377180879Sjhb * Select threads to run. Note that running threads still consume a 1378180879Sjhb * slot. 1379135295Sjulian */ 1380166188Sjeffstruct thread * 1381104964Sjeffsched_choose(void) 1382104964Sjeff{ 1383177435Sjeff struct thread *td; 1384124955Sjeff struct runq *rq; 1385104964Sjeff 1386170293Sjeff mtx_assert(&sched_lock, MA_OWNED); 1387124955Sjeff#ifdef SMP 1388177435Sjeff struct thread *tdcpu; 1389124955Sjeff 1390124955Sjeff rq = &runq; 1391177435Sjeff td = runq_choose_fuzz(&runq, runq_fuzz); 1392177435Sjeff tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 1393104964Sjeff 1394180879Sjhb if (td == NULL || 1395180879Sjhb (tdcpu != NULL && 1396177435Sjeff tdcpu->td_priority < td->td_priority)) { 1397177435Sjeff CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu, 1398124955Sjeff PCPU_GET(cpuid)); 1399177435Sjeff td = tdcpu; 1400124955Sjeff rq = &runq_pcpu[PCPU_GET(cpuid)]; 1401180879Sjhb } else { 1402177435Sjeff CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td); 1403124955Sjeff } 1404124955Sjeff 1405124955Sjeff#else 1406124955Sjeff rq = &runq; 1407177435Sjeff td = runq_choose(&runq); 1408124955Sjeff#endif 1409124955Sjeff 1410177435Sjeff if (td) { 1411180923Sjhb#ifdef SMP 1412180923Sjhb if (td == tdcpu) 1413180923Sjhb runq_length[PCPU_GET(cpuid)]--; 1414180923Sjhb#endif 1415177435Sjeff runq_remove(rq, td); 1416177435Sjeff td->td_flags |= TDF_DIDRUN; 1417104964Sjeff 1418177435Sjeff KASSERT(td->td_flags & TDF_INMEM, 1419172207Sjeff ("sched_choose: thread swapped out")); 1420177435Sjeff return (td); 1421180879Sjhb } 1422166188Sjeff return (PCPU_GET(idlethread)); 1423104964Sjeff} 1424104964Sjeff 1425104964Sjeffvoid 1426177004Sjeffsched_preempt(struct thread *td) 1427177004Sjeff{ 1428177004Sjeff thread_lock(td); 1429177004Sjeff if (td->td_critnest > 1) 1430177004Sjeff td->td_owepreempt = 1; 1431177004Sjeff else 1432178272Sjeff mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL); 1433177004Sjeff thread_unlock(td); 1434177004Sjeff} 1435177004Sjeff 1436177004Sjeffvoid 1437104964Sjeffsched_userret(struct thread *td) 1438104964Sjeff{ 1439104964Sjeff /* 1440104964Sjeff * XXX we cheat slightly on the locking here to avoid locking in 1441104964Sjeff * the usual case. Setting td_priority here is essentially an 1442104964Sjeff * incomplete workaround for not setting it properly elsewhere. 1443104964Sjeff * Now that some interrupt handlers are threads, not setting it 1444104964Sjeff * properly elsewhere can clobber it in the window between setting 1445104964Sjeff * it here and returning to user mode, so don't waste time setting 1446104964Sjeff * it perfectly here. 1447104964Sjeff */ 1448139453Sjhb KASSERT((td->td_flags & TDF_BORROWING) == 0, 1449139453Sjhb ("thread with borrowed priority returning to userland")); 1450163709Sjb if (td->td_priority != td->td_user_pri) { 1451170293Sjeff thread_lock(td); 1452163709Sjb td->td_priority = td->td_user_pri; 1453163709Sjb td->td_base_pri = td->td_user_pri; 1454170293Sjeff thread_unlock(td); 1455163709Sjb } 1456104964Sjeff} 1457107126Sjeff 1458124955Sjeffvoid 1459124955Sjeffsched_bind(struct thread *td, int cpu) 1460124955Sjeff{ 1461164936Sjulian struct td_sched *ts; 1462124955Sjeff 1463208391Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 1464208391Sjhb KASSERT(td == curthread, ("sched_bind: can only bind curthread")); 1465124955Sjeff 1466164936Sjulian ts = td->td_sched; 1467124955Sjeff 1468177435Sjeff td->td_flags |= TDF_BOUND; 1469124955Sjeff#ifdef SMP 1470164936Sjulian ts->ts_runq = &runq_pcpu[cpu]; 1471124955Sjeff if (PCPU_GET(cpuid) == cpu) 1472124955Sjeff return; 1473124955Sjeff 1474131473Sjhb mi_switch(SW_VOL, NULL); 1475124955Sjeff#endif 1476124955Sjeff} 1477124955Sjeff 1478124955Sjeffvoid 1479124955Sjeffsched_unbind(struct thread* td) 1480124955Sjeff{ 1481170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1482208391Sjhb KASSERT(td == curthread, ("sched_unbind: can only bind curthread")); 1483177435Sjeff td->td_flags &= ~TDF_BOUND; 1484124955Sjeff} 1485124955Sjeff 1486107126Sjeffint 1487145256Sjkoshysched_is_bound(struct thread *td) 1488145256Sjkoshy{ 1489170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1490177435Sjeff return (td->td_flags & TDF_BOUND); 1491145256Sjkoshy} 1492145256Sjkoshy 1493159630Sdavidxuvoid 1494159630Sdavidxusched_relinquish(struct thread *td) 1495159630Sdavidxu{ 1496170293Sjeff thread_lock(td); 1497178272Sjeff mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 1498170293Sjeff thread_unlock(td); 1499159630Sdavidxu} 1500159630Sdavidxu 1501145256Sjkoshyint 1502125288Sjeffsched_load(void) 1503125288Sjeff{ 1504125288Sjeff return (sched_tdcnt); 1505125288Sjeff} 1506125288Sjeff 1507125288Sjeffint 1508107126Sjeffsched_sizeof_proc(void) 1509107126Sjeff{ 1510107126Sjeff return (sizeof(struct proc)); 1511107126Sjeff} 1512159630Sdavidxu 1513107126Sjeffint 1514107126Sjeffsched_sizeof_thread(void) 1515107126Sjeff{ 1516164936Sjulian return (sizeof(struct thread) + sizeof(struct td_sched)); 1517107126Sjeff} 1518107137Sjeff 1519107137Sjefffixpt_t 1520121127Sjeffsched_pctcpu(struct thread *td) 1521107137Sjeff{ 1522164936Sjulian struct td_sched *ts; 1523121147Sjeff 1524208787Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED); 1525164936Sjulian ts = td->td_sched; 1526164936Sjulian return (ts->ts_pctcpu); 1527107137Sjeff} 1528159570Sdavidxu 1529159570Sdavidxuvoid 1530212541Smavsched_tick(int cnt) 1531159570Sdavidxu{ 1532159570Sdavidxu} 1533166188Sjeff 1534166188Sjeff/* 1535166188Sjeff * The actual idle process. 1536166188Sjeff */ 1537166188Sjeffvoid 1538166188Sjeffsched_idletd(void *dummy) 1539166188Sjeff{ 1540212455Smav struct pcpuidlestat *stat; 1541166188Sjeff 1542212455Smav stat = DPCPU_PTR(idlestat); 1543166188Sjeff for (;;) { 1544166188Sjeff mtx_assert(&Giant, MA_NOTOWNED); 1545166188Sjeff 1546212455Smav while (sched_runnable() == 0) { 1547212455Smav cpu_idle(stat->idlecalls + stat->oldidlecalls > 64); 1548212455Smav stat->idlecalls++; 1549212455Smav } 1550166188Sjeff 1551166188Sjeff mtx_lock_spin(&sched_lock); 1552178272Sjeff mi_switch(SW_VOL | SWT_IDLE, NULL); 1553166188Sjeff mtx_unlock_spin(&sched_lock); 1554166188Sjeff } 1555166188Sjeff} 1556166188Sjeff 1557170293Sjeff/* 1558170293Sjeff * A CPU is entering for the first time or a thread is exiting. 1559170293Sjeff */ 1560170293Sjeffvoid 1561170293Sjeffsched_throw(struct thread *td) 1562170293Sjeff{ 1563170293Sjeff /* 1564170293Sjeff * Correct spinlock nesting. The idle thread context that we are 1565170293Sjeff * borrowing was created so that it would start out with a single 1566170293Sjeff * spin lock (sched_lock) held in fork_trampoline(). Since we've 1567170293Sjeff * explicitly acquired locks in this function, the nesting count 1568170293Sjeff * is now 2 rather than 1. Since we are nested, calling 1569170293Sjeff * spinlock_exit() will simply adjust the counts without allowing 1570170293Sjeff * spin lock using code to interrupt us. 1571170293Sjeff */ 1572170293Sjeff if (td == NULL) { 1573170293Sjeff mtx_lock_spin(&sched_lock); 1574170293Sjeff spinlock_exit(); 1575229429Sjhb PCPU_SET(switchtime, cpu_ticks()); 1576229429Sjhb PCPU_SET(switchticks, ticks); 1577170293Sjeff } else { 1578174629Sjeff lock_profile_release_lock(&sched_lock.lock_object); 1579170293Sjeff MPASS(td->td_lock == &sched_lock); 1580170293Sjeff } 1581170293Sjeff mtx_assert(&sched_lock, MA_OWNED); 1582170293Sjeff KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 1583170293Sjeff cpu_throw(td, choosethread()); /* doesn't return */ 1584170293Sjeff} 1585170293Sjeff 1586170293Sjeffvoid 1587170600Sjeffsched_fork_exit(struct thread *td) 1588170293Sjeff{ 1589170293Sjeff 1590170293Sjeff /* 1591170293Sjeff * Finish setting up thread glue so that it begins execution in a 1592170293Sjeff * non-nested critical section with sched_lock held but not recursed. 1593170293Sjeff */ 1594170600Sjeff td->td_oncpu = PCPU_GET(cpuid); 1595170600Sjeff sched_lock.mtx_lock = (uintptr_t)td; 1596174629Sjeff lock_profile_obtain_lock_success(&sched_lock.lock_object, 1597174629Sjeff 0, 0, __FILE__, __LINE__); 1598170600Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 1599170293Sjeff} 1600170293Sjeff 1601187357Sjeffchar * 1602187357Sjeffsched_tdname(struct thread *td) 1603187357Sjeff{ 1604187357Sjeff#ifdef KTR 1605187357Sjeff struct td_sched *ts; 1606187357Sjeff 1607187357Sjeff ts = td->td_sched; 1608187357Sjeff if (ts->ts_name[0] == '\0') 1609187357Sjeff snprintf(ts->ts_name, sizeof(ts->ts_name), 1610187357Sjeff "%s tid %d", td->td_name, td->td_tid); 1611187357Sjeff return (ts->ts_name); 1612187357Sjeff#else 1613187357Sjeff return (td->td_name); 1614187357Sjeff#endif 1615187357Sjeff} 1616187357Sjeff 1617232700Sjhb#ifdef KTR 1618176729Sjeffvoid 1619232700Sjhbsched_clear_tdname(struct thread *td) 1620232700Sjhb{ 1621232700Sjhb struct td_sched *ts; 1622232700Sjhb 1623232700Sjhb ts = td->td_sched; 1624232700Sjhb ts->ts_name[0] = '\0'; 1625232700Sjhb} 1626232700Sjhb#endif 1627232700Sjhb 1628232700Sjhbvoid 1629176729Sjeffsched_affinity(struct thread *td) 1630176729Sjeff{ 1631180923Sjhb#ifdef SMP 1632180923Sjhb struct td_sched *ts; 1633180923Sjhb int cpu; 1634180923Sjhb 1635180923Sjhb THREAD_LOCK_ASSERT(td, MA_OWNED); 1636180923Sjhb 1637180923Sjhb /* 1638180923Sjhb * Set the TSF_AFFINITY flag if there is at least one CPU this 1639180923Sjhb * thread can't run on. 1640180923Sjhb */ 1641180923Sjhb ts = td->td_sched; 1642180923Sjhb ts->ts_flags &= ~TSF_AFFINITY; 1643209059Sjhb CPU_FOREACH(cpu) { 1644180923Sjhb if (!THREAD_CAN_SCHED(td, cpu)) { 1645180923Sjhb ts->ts_flags |= TSF_AFFINITY; 1646180923Sjhb break; 1647180923Sjhb } 1648180923Sjhb } 1649180923Sjhb 1650180923Sjhb /* 1651180923Sjhb * If this thread can run on all CPUs, nothing else to do. 1652180923Sjhb */ 1653180923Sjhb if (!(ts->ts_flags & TSF_AFFINITY)) 1654180923Sjhb return; 1655180923Sjhb 1656180923Sjhb /* Pinned threads and bound threads should be left alone. */ 1657180923Sjhb if (td->td_pinned != 0 || td->td_flags & TDF_BOUND) 1658180923Sjhb return; 1659180923Sjhb 1660180923Sjhb switch (td->td_state) { 1661180923Sjhb case TDS_RUNQ: 1662180923Sjhb /* 1663180923Sjhb * If we are on a per-CPU runqueue that is in the set, 1664180923Sjhb * then nothing needs to be done. 1665180923Sjhb */ 1666180923Sjhb if (ts->ts_runq != &runq && 1667180923Sjhb THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu)) 1668180923Sjhb return; 1669180923Sjhb 1670180923Sjhb /* Put this thread on a valid per-CPU runqueue. */ 1671180923Sjhb sched_rem(td); 1672180923Sjhb sched_add(td, SRQ_BORING); 1673180923Sjhb break; 1674180923Sjhb case TDS_RUNNING: 1675180923Sjhb /* 1676180923Sjhb * See if our current CPU is in the set. If not, force a 1677180923Sjhb * context switch. 1678180923Sjhb */ 1679180923Sjhb if (THREAD_CAN_SCHED(td, td->td_oncpu)) 1680180923Sjhb return; 1681180923Sjhb 1682180923Sjhb td->td_flags |= TDF_NEEDRESCHED; 1683180923Sjhb if (td != curthread) 1684210939Sjhb ipi_cpu(cpu, IPI_AST); 1685180923Sjhb break; 1686180923Sjhb default: 1687180923Sjhb break; 1688180923Sjhb } 1689180923Sjhb#endif 1690176729Sjeff} 1691