sched_4bsd.c revision 170293
1104964Sjeff/*- 2104964Sjeff * Copyright (c) 1982, 1986, 1990, 1991, 1993 3104964Sjeff * The Regents of the University of California. All rights reserved. 4104964Sjeff * (c) UNIX System Laboratories, Inc. 5104964Sjeff * All or some portions of this file are derived from material licensed 6104964Sjeff * to the University of California by American Telephone and Telegraph 7104964Sjeff * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8104964Sjeff * the permission of UNIX System Laboratories, Inc. 9104964Sjeff * 10104964Sjeff * Redistribution and use in source and binary forms, with or without 11104964Sjeff * modification, are permitted provided that the following conditions 12104964Sjeff * are met: 13104964Sjeff * 1. Redistributions of source code must retain the above copyright 14104964Sjeff * notice, this list of conditions and the following disclaimer. 15104964Sjeff * 2. Redistributions in binary form must reproduce the above copyright 16104964Sjeff * notice, this list of conditions and the following disclaimer in the 17104964Sjeff * documentation and/or other materials provided with the distribution. 18104964Sjeff * 4. Neither the name of the University nor the names of its contributors 19104964Sjeff * may be used to endorse or promote products derived from this software 20104964Sjeff * without specific prior written permission. 21104964Sjeff * 22104964Sjeff * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23104964Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24104964Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25104964Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26104964Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27104964Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28104964Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29104964Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30104964Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31104964Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32104964Sjeff * SUCH DAMAGE. 33104964Sjeff */ 34104964Sjeff 35116182Sobrien#include <sys/cdefs.h> 36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 170293 2007-06-04 23:50:30Z jeff $"); 37116182Sobrien 38147565Speter#include "opt_hwpmc_hooks.h" 39147565Speter 40104964Sjeff#include <sys/param.h> 41104964Sjeff#include <sys/systm.h> 42104964Sjeff#include <sys/kernel.h> 43104964Sjeff#include <sys/ktr.h> 44104964Sjeff#include <sys/lock.h> 45123871Sjhb#include <sys/kthread.h> 46104964Sjeff#include <sys/mutex.h> 47104964Sjeff#include <sys/proc.h> 48104964Sjeff#include <sys/resourcevar.h> 49104964Sjeff#include <sys/sched.h> 50104964Sjeff#include <sys/smp.h> 51104964Sjeff#include <sys/sysctl.h> 52104964Sjeff#include <sys/sx.h> 53139453Sjhb#include <sys/turnstile.h> 54161599Sdavidxu#include <sys/umtx.h> 55160039Sobrien#include <machine/pcb.h> 56134689Sjulian#include <machine/smp.h> 57104964Sjeff 58145256Sjkoshy#ifdef HWPMC_HOOKS 59145256Sjkoshy#include <sys/pmckern.h> 60145256Sjkoshy#endif 61145256Sjkoshy 62107135Sjeff/* 63107135Sjeff * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 64107135Sjeff * the range 100-256 Hz (approximately). 65107135Sjeff */ 66107135Sjeff#define ESTCPULIM(e) \ 67107135Sjeff min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 68107135Sjeff RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 69122355Sbde#ifdef SMP 70122355Sbde#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 71122355Sbde#else 72107135Sjeff#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 73122355Sbde#endif 74107135Sjeff#define NICE_WEIGHT 1 /* Priorities per nice level. */ 75107135Sjeff 76134791Sjulian/* 77163709Sjb * The schedulable entity that runs a context. 78164936Sjulian * This is an extension to the thread structure and is tailored to 79164936Sjulian * the requirements of this scheduler 80163709Sjb */ 81164936Sjulianstruct td_sched { 82164936Sjulian TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */ 83164936Sjulian struct thread *ts_thread; /* (*) Active associated thread. */ 84164936Sjulian fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ 85164936Sjulian u_char ts_rqindex; /* (j) Run queue index. */ 86164936Sjulian int ts_cpticks; /* (j) Ticks of cpu time. */ 87164936Sjulian struct runq *ts_runq; /* runq the thread is currently on */ 88109145Sjeff}; 89109145Sjeff 90134791Sjulian/* flags kept in td_flags */ 91164936Sjulian#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ 92164936Sjulian#define TDF_EXIT TDF_SCHED1 /* thread is being killed. */ 93134791Sjulian#define TDF_BOUND TDF_SCHED2 94134791Sjulian 95164936Sjulian#define ts_flags ts_thread->td_flags 96164936Sjulian#define TSF_DIDRUN TDF_DIDRUN /* thread actually ran. */ 97164936Sjulian#define TSF_EXIT TDF_EXIT /* thread is being killed. */ 98164936Sjulian#define TSF_BOUND TDF_BOUND /* stuck to one CPU */ 99134791Sjulian 100164936Sjulian#define SKE_RUNQ_PCPU(ts) \ 101164936Sjulian ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) 102124955Sjeff 103164936Sjulianstatic struct td_sched td_sched0; 104134791Sjulian 105125288Sjeffstatic int sched_tdcnt; /* Total runnable threads in the system. */ 106104964Sjeffstatic int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 107112535Smux#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 108104964Sjeff 109104964Sjeffstatic struct callout roundrobin_callout; 110104964Sjeff 111124955Sjeffstatic void setup_runqs(void); 112104964Sjeffstatic void roundrobin(void *arg); 113123871Sjhbstatic void schedcpu(void); 114124955Sjeffstatic void schedcpu_thread(void); 115139453Sjhbstatic void sched_priority(struct thread *td, u_char prio); 116104964Sjeffstatic void sched_setup(void *dummy); 117104964Sjeffstatic void maybe_resched(struct thread *td); 118163709Sjbstatic void updatepri(struct thread *td); 119163709Sjbstatic void resetpriority(struct thread *td); 120163709Sjbstatic void resetpriority_thread(struct thread *td); 121134694Sjulian#ifdef SMP 122134688Sjulianstatic int forward_wakeup(int cpunum); 123134694Sjulian#endif 124104964Sjeff 125124955Sjeffstatic struct kproc_desc sched_kp = { 126124955Sjeff "schedcpu", 127124955Sjeff schedcpu_thread, 128124955Sjeff NULL 129124955Sjeff}; 130124955SjeffSYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp) 131124955SjeffSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 132104964Sjeff 133104964Sjeff/* 134104964Sjeff * Global run queue. 135104964Sjeff */ 136104964Sjeffstatic struct runq runq; 137104964Sjeff 138124955Sjeff#ifdef SMP 139124955Sjeff/* 140124955Sjeff * Per-CPU run queues 141124955Sjeff */ 142124955Sjeffstatic struct runq runq_pcpu[MAXCPU]; 143124955Sjeff#endif 144124955Sjeff 145124955Sjeffstatic void 146124955Sjeffsetup_runqs(void) 147124955Sjeff{ 148124955Sjeff#ifdef SMP 149124955Sjeff int i; 150124955Sjeff 151124955Sjeff for (i = 0; i < MAXCPU; ++i) 152124955Sjeff runq_init(&runq_pcpu[i]); 153124955Sjeff#endif 154124955Sjeff 155124955Sjeff runq_init(&runq); 156124955Sjeff} 157124955Sjeff 158104964Sjeffstatic int 159104964Sjeffsysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 160104964Sjeff{ 161104964Sjeff int error, new_val; 162104964Sjeff 163104964Sjeff new_val = sched_quantum * tick; 164104964Sjeff error = sysctl_handle_int(oidp, &new_val, 0, req); 165104964Sjeff if (error != 0 || req->newptr == NULL) 166104964Sjeff return (error); 167104964Sjeff if (new_val < tick) 168104964Sjeff return (EINVAL); 169104964Sjeff sched_quantum = new_val / tick; 170104964Sjeff hogticks = 2 * sched_quantum; 171104964Sjeff return (0); 172104964Sjeff} 173104964Sjeff 174132589SscottlSYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); 175130881Sscottl 176132589SscottlSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, 177132589Sscottl "Scheduler name"); 178130881Sscottl 179132589SscottlSYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, 180132589Sscottl 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 181132589Sscottl "Roundrobin scheduling quantum in microseconds"); 182104964Sjeff 183134693Sjulian#ifdef SMP 184134688Sjulian/* Enable forwarding of wakeups to all other cpus */ 185134688SjulianSYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP"); 186134688Sjulian 187134792Sjulianstatic int forward_wakeup_enabled = 1; 188134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, 189134688Sjulian &forward_wakeup_enabled, 0, 190134688Sjulian "Forwarding of wakeup to idle CPUs"); 191134688Sjulian 192134688Sjulianstatic int forward_wakeups_requested = 0; 193134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, 194134688Sjulian &forward_wakeups_requested, 0, 195134688Sjulian "Requests for Forwarding of wakeup to idle CPUs"); 196134688Sjulian 197134688Sjulianstatic int forward_wakeups_delivered = 0; 198134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, 199134688Sjulian &forward_wakeups_delivered, 0, 200134688Sjulian "Completed Forwarding of wakeup to idle CPUs"); 201134688Sjulian 202134792Sjulianstatic int forward_wakeup_use_mask = 1; 203134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, 204134688Sjulian &forward_wakeup_use_mask, 0, 205134688Sjulian "Use the mask of idle cpus"); 206134688Sjulian 207134688Sjulianstatic int forward_wakeup_use_loop = 0; 208134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, 209134688Sjulian &forward_wakeup_use_loop, 0, 210134688Sjulian "Use a loop to find idle cpus"); 211134688Sjulian 212134688Sjulianstatic int forward_wakeup_use_single = 0; 213134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW, 214134688Sjulian &forward_wakeup_use_single, 0, 215134688Sjulian "Only signal one idle cpu"); 216134688Sjulian 217134688Sjulianstatic int forward_wakeup_use_htt = 0; 218134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW, 219134688Sjulian &forward_wakeup_use_htt, 0, 220134688Sjulian "account for htt"); 221135051Sjulian 222134693Sjulian#endif 223164936Sjulian#if 0 224135051Sjulianstatic int sched_followon = 0; 225135051SjulianSYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, 226135051Sjulian &sched_followon, 0, 227135051Sjulian "allow threads to share a quantum"); 228163709Sjb#endif 229135051Sjulian 230139317Sjeffstatic __inline void 231139317Sjeffsched_load_add(void) 232139317Sjeff{ 233139317Sjeff sched_tdcnt++; 234139317Sjeff CTR1(KTR_SCHED, "global load: %d", sched_tdcnt); 235139317Sjeff} 236139317Sjeff 237139317Sjeffstatic __inline void 238139317Sjeffsched_load_rem(void) 239139317Sjeff{ 240139317Sjeff sched_tdcnt--; 241139317Sjeff CTR1(KTR_SCHED, "global load: %d", sched_tdcnt); 242139317Sjeff} 243104964Sjeff/* 244104964Sjeff * Arrange to reschedule if necessary, taking the priorities and 245104964Sjeff * schedulers into account. 246104964Sjeff */ 247104964Sjeffstatic void 248104964Sjeffmaybe_resched(struct thread *td) 249104964Sjeff{ 250104964Sjeff 251170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 252134791Sjulian if (td->td_priority < curthread->td_priority) 253111032Sjulian curthread->td_flags |= TDF_NEEDRESCHED; 254104964Sjeff} 255104964Sjeff 256104964Sjeff/* 257104964Sjeff * Force switch among equal priority processes every 100ms. 258104964Sjeff * We don't actually need to force a context switch of the current process. 259104964Sjeff * The act of firing the event triggers a context switch to softclock() and 260104964Sjeff * then switching back out again which is equivalent to a preemption, thus 261104964Sjeff * no further work is needed on the local CPU. 262104964Sjeff */ 263104964Sjeff/* ARGSUSED */ 264104964Sjeffstatic void 265104964Sjeffroundrobin(void *arg) 266104964Sjeff{ 267104964Sjeff 268104964Sjeff#ifdef SMP 269104964Sjeff mtx_lock_spin(&sched_lock); 270104964Sjeff forward_roundrobin(); 271104964Sjeff mtx_unlock_spin(&sched_lock); 272104964Sjeff#endif 273104964Sjeff 274104964Sjeff callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 275104964Sjeff} 276104964Sjeff 277104964Sjeff/* 278104964Sjeff * Constants for digital decay and forget: 279163709Sjb * 90% of (td_estcpu) usage in 5 * loadav time 280164936Sjulian * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive) 281104964Sjeff * Note that, as ps(1) mentions, this can let percentages 282104964Sjeff * total over 100% (I've seen 137.9% for 3 processes). 283104964Sjeff * 284163709Sjb * Note that schedclock() updates td_estcpu and p_cpticks asynchronously. 285104964Sjeff * 286163709Sjb * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds. 287104964Sjeff * That is, the system wants to compute a value of decay such 288104964Sjeff * that the following for loop: 289104964Sjeff * for (i = 0; i < (5 * loadavg); i++) 290163709Sjb * td_estcpu *= decay; 291104964Sjeff * will compute 292163709Sjb * td_estcpu *= 0.1; 293104964Sjeff * for all values of loadavg: 294104964Sjeff * 295104964Sjeff * Mathematically this loop can be expressed by saying: 296104964Sjeff * decay ** (5 * loadavg) ~= .1 297104964Sjeff * 298104964Sjeff * The system computes decay as: 299104964Sjeff * decay = (2 * loadavg) / (2 * loadavg + 1) 300104964Sjeff * 301104964Sjeff * We wish to prove that the system's computation of decay 302104964Sjeff * will always fulfill the equation: 303104964Sjeff * decay ** (5 * loadavg) ~= .1 304104964Sjeff * 305104964Sjeff * If we compute b as: 306104964Sjeff * b = 2 * loadavg 307104964Sjeff * then 308104964Sjeff * decay = b / (b + 1) 309104964Sjeff * 310104964Sjeff * We now need to prove two things: 311104964Sjeff * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 312104964Sjeff * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 313104964Sjeff * 314104964Sjeff * Facts: 315104964Sjeff * For x close to zero, exp(x) =~ 1 + x, since 316104964Sjeff * exp(x) = 0! + x**1/1! + x**2/2! + ... . 317104964Sjeff * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 318104964Sjeff * For x close to zero, ln(1+x) =~ x, since 319104964Sjeff * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 320104964Sjeff * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 321104964Sjeff * ln(.1) =~ -2.30 322104964Sjeff * 323104964Sjeff * Proof of (1): 324104964Sjeff * Solve (factor)**(power) =~ .1 given power (5*loadav): 325104964Sjeff * solving for factor, 326104964Sjeff * ln(factor) =~ (-2.30/5*loadav), or 327104964Sjeff * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 328104964Sjeff * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 329104964Sjeff * 330104964Sjeff * Proof of (2): 331104964Sjeff * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 332104964Sjeff * solving for power, 333104964Sjeff * power*ln(b/(b+1)) =~ -2.30, or 334104964Sjeff * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 335104964Sjeff * 336104964Sjeff * Actual power values for the implemented algorithm are as follows: 337104964Sjeff * loadav: 1 2 3 4 338104964Sjeff * power: 5.68 10.32 14.94 19.55 339104964Sjeff */ 340104964Sjeff 341104964Sjeff/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 342104964Sjeff#define loadfactor(loadav) (2 * (loadav)) 343104964Sjeff#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 344104964Sjeff 345164936Sjulian/* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 346104964Sjeffstatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 347158082SjmgSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 348104964Sjeff 349104964Sjeff/* 350104964Sjeff * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 351104964Sjeff * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 352104964Sjeff * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 353104964Sjeff * 354104964Sjeff * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 355104964Sjeff * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 356104964Sjeff * 357104964Sjeff * If you don't want to bother with the faster/more-accurate formula, you 358104964Sjeff * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 359104964Sjeff * (more general) method of calculating the %age of CPU used by a process. 360104964Sjeff */ 361104964Sjeff#define CCPU_SHIFT 11 362104964Sjeff 363104964Sjeff/* 364104964Sjeff * Recompute process priorities, every hz ticks. 365104964Sjeff * MP-safe, called without the Giant mutex. 366104964Sjeff */ 367104964Sjeff/* ARGSUSED */ 368104964Sjeffstatic void 369123871Sjhbschedcpu(void) 370104964Sjeff{ 371104964Sjeff register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 372104964Sjeff struct thread *td; 373104964Sjeff struct proc *p; 374164936Sjulian struct td_sched *ts; 375118972Sjhb int awake, realstathz; 376104964Sjeff 377104964Sjeff realstathz = stathz ? stathz : hz; 378104964Sjeff sx_slock(&allproc_lock); 379104964Sjeff FOREACH_PROC_IN_SYSTEM(p) { 380170293Sjeff PROC_SLOCK(p); 381118972Sjhb /* 382118972Sjhb * Increment time in/out of memory. We ignore overflow; with 383118972Sjhb * 16-bit int's (remember them?) overflow takes 45 days. 384118972Sjhb */ 385104964Sjeff p->p_swtime++; 386163709Sjb FOREACH_THREAD_IN_PROC(p, td) { 387104964Sjeff awake = 0; 388170293Sjeff thread_lock(td); 389164936Sjulian ts = td->td_sched; 390163709Sjb /* 391163709Sjb * Increment sleep time (if sleeping). We 392163709Sjb * ignore overflow, as above. 393163709Sjb */ 394163709Sjb /* 395164936Sjulian * The td_sched slptimes are not touched in wakeup 396164936Sjulian * because the thread may not HAVE everything in 397164936Sjulian * memory? XXX I think this is out of date. 398163709Sjb */ 399166188Sjeff if (TD_ON_RUNQ(td)) { 400163709Sjb awake = 1; 401164936Sjulian ts->ts_flags &= ~TSF_DIDRUN; 402166188Sjeff } else if (TD_IS_RUNNING(td)) { 403163709Sjb awake = 1; 404164936Sjulian /* Do not clear TSF_DIDRUN */ 405164936Sjulian } else if (ts->ts_flags & TSF_DIDRUN) { 406163709Sjb awake = 1; 407164936Sjulian ts->ts_flags &= ~TSF_DIDRUN; 408163709Sjb } 409163709Sjb 410163709Sjb /* 411164936Sjulian * ts_pctcpu is only for ps and ttyinfo(). 412164936Sjulian * Do it per td_sched, and add them up at the end? 413163709Sjb * XXXKSE 414163709Sjb */ 415164936Sjulian ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; 416163709Sjb /* 417164936Sjulian * If the td_sched has been idle the entire second, 418163709Sjb * stop recalculating its priority until 419163709Sjb * it wakes up. 420163709Sjb */ 421164936Sjulian if (ts->ts_cpticks != 0) { 422163709Sjb#if (FSHIFT >= CCPU_SHIFT) 423164936Sjulian ts->ts_pctcpu += (realstathz == 100) 424164936Sjulian ? ((fixpt_t) ts->ts_cpticks) << 425164936Sjulian (FSHIFT - CCPU_SHIFT) : 426164936Sjulian 100 * (((fixpt_t) ts->ts_cpticks) 427164936Sjulian << (FSHIFT - CCPU_SHIFT)) / realstathz; 428163709Sjb#else 429164936Sjulian ts->ts_pctcpu += ((FSCALE - ccpu) * 430164936Sjulian (ts->ts_cpticks * 431164936Sjulian FSCALE / realstathz)) >> FSHIFT; 432163709Sjb#endif 433164936Sjulian ts->ts_cpticks = 0; 434164267Sdavidxu } 435104964Sjeff /* 436163709Sjb * If there are ANY running threads in this process, 437104964Sjeff * then don't count it as sleeping. 438164936SjulianXXX this is broken 439164936Sjulian 440104964Sjeff */ 441104964Sjeff if (awake) { 442165513Sdavidxu if (td->td_slptime > 1) { 443104964Sjeff /* 444104964Sjeff * In an ideal world, this should not 445104964Sjeff * happen, because whoever woke us 446104964Sjeff * up from the long sleep should have 447104964Sjeff * unwound the slptime and reset our 448104964Sjeff * priority before we run at the stale 449104964Sjeff * priority. Should KASSERT at some 450104964Sjeff * point when all the cases are fixed. 451104964Sjeff */ 452163709Sjb updatepri(td); 453163709Sjb } 454163709Sjb td->td_slptime = 0; 455163709Sjb } else 456163709Sjb td->td_slptime++; 457170293Sjeff if (td->td_slptime > 1) { 458170293Sjeff thread_unlock(td); 459163709Sjb continue; 460170293Sjeff } 461163709Sjb td->td_estcpu = decay_cpu(loadfac, td->td_estcpu); 462163709Sjb resetpriority(td); 463163709Sjb resetpriority_thread(td); 464170293Sjeff thread_unlock(td); 465163709Sjb } /* end of thread loop */ 466170293Sjeff PROC_SUNLOCK(p); 467104964Sjeff } /* end of process loop */ 468104964Sjeff sx_sunlock(&allproc_lock); 469104964Sjeff} 470104964Sjeff 471104964Sjeff/* 472123871Sjhb * Main loop for a kthread that executes schedcpu once a second. 473123871Sjhb */ 474123871Sjhbstatic void 475124955Sjeffschedcpu_thread(void) 476123871Sjhb{ 477123871Sjhb 478123871Sjhb for (;;) { 479123871Sjhb schedcpu(); 480167086Sjhb pause("-", hz); 481123871Sjhb } 482123871Sjhb} 483123871Sjhb 484123871Sjhb/* 485104964Sjeff * Recalculate the priority of a process after it has slept for a while. 486163709Sjb * For all load averages >= 1 and max td_estcpu of 255, sleeping for at 487163709Sjb * least six times the loadfactor will decay td_estcpu to zero. 488104964Sjeff */ 489104964Sjeffstatic void 490163709Sjbupdatepri(struct thread *td) 491104964Sjeff{ 492118972Sjhb register fixpt_t loadfac; 493104964Sjeff register unsigned int newcpu; 494104964Sjeff 495118972Sjhb loadfac = loadfactor(averunnable.ldavg[0]); 496163709Sjb if (td->td_slptime > 5 * loadfac) 497163709Sjb td->td_estcpu = 0; 498104964Sjeff else { 499163709Sjb newcpu = td->td_estcpu; 500163709Sjb td->td_slptime--; /* was incremented in schedcpu() */ 501163709Sjb while (newcpu && --td->td_slptime) 502104964Sjeff newcpu = decay_cpu(loadfac, newcpu); 503163709Sjb td->td_estcpu = newcpu; 504104964Sjeff } 505104964Sjeff} 506104964Sjeff 507104964Sjeff/* 508104964Sjeff * Compute the priority of a process when running in user mode. 509104964Sjeff * Arrange to reschedule if the resulting priority is better 510104964Sjeff * than that of the current process. 511104964Sjeff */ 512104964Sjeffstatic void 513163709Sjbresetpriority(struct thread *td) 514104964Sjeff{ 515104964Sjeff register unsigned int newpriority; 516104964Sjeff 517163709Sjb if (td->td_pri_class == PRI_TIMESHARE) { 518163709Sjb newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT + 519163709Sjb NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); 520104964Sjeff newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 521104964Sjeff PRI_MAX_TIMESHARE); 522163709Sjb sched_user_prio(td, newpriority); 523104964Sjeff } 524104964Sjeff} 525104964Sjeff 526139453Sjhb/* 527164936Sjulian * Update the thread's priority when the associated process's user 528139453Sjhb * priority changes. 529139453Sjhb */ 530139453Sjhbstatic void 531163709Sjbresetpriority_thread(struct thread *td) 532139453Sjhb{ 533139453Sjhb 534139453Sjhb /* Only change threads with a time sharing user priority. */ 535139453Sjhb if (td->td_priority < PRI_MIN_TIMESHARE || 536139453Sjhb td->td_priority > PRI_MAX_TIMESHARE) 537139453Sjhb return; 538139453Sjhb 539139453Sjhb /* XXX the whole needresched thing is broken, but not silly. */ 540139453Sjhb maybe_resched(td); 541139453Sjhb 542163709Sjb sched_prio(td, td->td_user_pri); 543139453Sjhb} 544139453Sjhb 545104964Sjeff/* ARGSUSED */ 546104964Sjeffstatic void 547104964Sjeffsched_setup(void *dummy) 548104964Sjeff{ 549124955Sjeff setup_runqs(); 550118972Sjhb 551104964Sjeff if (sched_quantum == 0) 552104964Sjeff sched_quantum = SCHED_QUANTUM; 553104964Sjeff hogticks = 2 * sched_quantum; 554104964Sjeff 555126665Srwatson callout_init(&roundrobin_callout, CALLOUT_MPSAFE); 556104964Sjeff 557104964Sjeff /* Kick off timeout driven events by calling first time. */ 558104964Sjeff roundrobin(NULL); 559125288Sjeff 560125288Sjeff /* Account for thread0. */ 561139317Sjeff sched_load_add(); 562104964Sjeff} 563104964Sjeff 564104964Sjeff/* External interfaces start here */ 565134791Sjulian/* 566134791Sjulian * Very early in the boot some setup of scheduler-specific 567145109Smaxim * parts of proc0 and of some scheduler resources needs to be done. 568134791Sjulian * Called from: 569134791Sjulian * proc0_init() 570134791Sjulian */ 571134791Sjulianvoid 572134791Sjulianschedinit(void) 573134791Sjulian{ 574134791Sjulian /* 575134791Sjulian * Set up the scheduler specific parts of proc0. 576134791Sjulian */ 577134791Sjulian proc0.p_sched = NULL; /* XXX */ 578164936Sjulian thread0.td_sched = &td_sched0; 579170293Sjeff thread0.td_lock = &sched_lock; 580164936Sjulian td_sched0.ts_thread = &thread0; 581134791Sjulian} 582134791Sjulian 583104964Sjeffint 584104964Sjeffsched_runnable(void) 585104964Sjeff{ 586124955Sjeff#ifdef SMP 587124955Sjeff return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 588124955Sjeff#else 589124955Sjeff return runq_check(&runq); 590124955Sjeff#endif 591104964Sjeff} 592104964Sjeff 593104964Sjeffint 594104964Sjeffsched_rr_interval(void) 595104964Sjeff{ 596104964Sjeff if (sched_quantum == 0) 597104964Sjeff sched_quantum = SCHED_QUANTUM; 598104964Sjeff return (sched_quantum); 599104964Sjeff} 600104964Sjeff 601104964Sjeff/* 602104964Sjeff * We adjust the priority of the current process. The priority of 603104964Sjeff * a process gets worse as it accumulates CPU time. The cpu usage 604163709Sjb * estimator (td_estcpu) is increased here. resetpriority() will 605163709Sjb * compute a different priority each time td_estcpu increases by 606104964Sjeff * INVERSE_ESTCPU_WEIGHT 607104964Sjeff * (until MAXPRI is reached). The cpu usage estimator ramps up 608104964Sjeff * quite quickly when the process is running (linearly), and decays 609104964Sjeff * away exponentially, at a rate which is proportionally slower when 610104964Sjeff * the system is busy. The basic principle is that the system will 611104964Sjeff * 90% forget that the process used a lot of CPU time in 5 * loadav 612104964Sjeff * seconds. This causes the system to favor processes which haven't 613104964Sjeff * run much recently, and to round-robin among other processes. 614104964Sjeff */ 615104964Sjeffvoid 616121127Sjeffsched_clock(struct thread *td) 617104964Sjeff{ 618164936Sjulian struct td_sched *ts; 619104964Sjeff 620170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 621164936Sjulian ts = td->td_sched; 622113356Sjeff 623164936Sjulian ts->ts_cpticks++; 624163709Sjb td->td_estcpu = ESTCPULIM(td->td_estcpu + 1); 625163709Sjb if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 626163709Sjb resetpriority(td); 627163709Sjb resetpriority_thread(td); 628104964Sjeff } 629104964Sjeff} 630118972Sjhb 631104964Sjeff/* 632104964Sjeff * charge childs scheduling cpu usage to parent. 633104964Sjeff */ 634104964Sjeffvoid 635132372Sjuliansched_exit(struct proc *p, struct thread *td) 636104964Sjeff{ 637163709Sjb 638163709Sjb CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 639163709Sjb td, td->td_proc->p_comm, td->td_priority); 640170293Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 641164936Sjulian sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 642113356Sjeff} 643113356Sjeff 644113356Sjeffvoid 645164936Sjuliansched_exit_thread(struct thread *td, struct thread *child) 646113356Sjeff{ 647113923Sjhb 648139317Sjeff CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 649170293Sjeff child, child->td_proc->p_comm, child->td_priority); 650170293Sjeff thread_lock(td); 651164936Sjulian td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); 652170293Sjeff thread_unlock(td); 653170293Sjeff mtx_lock_spin(&sched_lock); 654127894Sdfr if ((child->td_proc->p_flag & P_NOLOAD) == 0) 655139317Sjeff sched_load_rem(); 656170293Sjeff mtx_unlock_spin(&sched_lock); 657113356Sjeff} 658109145Sjeff 659113356Sjeffvoid 660134791Sjuliansched_fork(struct thread *td, struct thread *childtd) 661113356Sjeff{ 662134791Sjulian sched_fork_thread(td, childtd); 663113356Sjeff} 664113356Sjeff 665113356Sjeffvoid 666134791Sjuliansched_fork_thread(struct thread *td, struct thread *childtd) 667113356Sjeff{ 668164936Sjulian childtd->td_estcpu = td->td_estcpu; 669170293Sjeff childtd->td_lock = &sched_lock; 670134791Sjulian sched_newthread(childtd); 671104964Sjeff} 672104964Sjeff 673104964Sjeffvoid 674130551Sjuliansched_nice(struct proc *p, int nice) 675104964Sjeff{ 676139453Sjhb struct thread *td; 677113873Sjhb 678130551Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 679170293Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 680130551Sjulian p->p_nice = nice; 681163709Sjb FOREACH_THREAD_IN_PROC(p, td) { 682170293Sjeff thread_lock(td); 683163709Sjb resetpriority(td); 684163709Sjb resetpriority_thread(td); 685170293Sjeff thread_unlock(td); 686163709Sjb } 687104964Sjeff} 688104964Sjeff 689113356Sjeffvoid 690163709Sjbsched_class(struct thread *td, int class) 691113356Sjeff{ 692170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 693163709Sjb td->td_pri_class = class; 694113356Sjeff} 695113356Sjeff 696105127Sjulian/* 697105127Sjulian * Adjust the priority of a thread. 698105127Sjulian */ 699139453Sjhbstatic void 700139453Sjhbsched_priority(struct thread *td, u_char prio) 701104964Sjeff{ 702139317Sjeff CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 703139317Sjeff td, td->td_proc->p_comm, td->td_priority, prio, curthread, 704139317Sjeff curthread->td_proc->p_comm); 705104964Sjeff 706170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 707139453Sjhb if (td->td_priority == prio) 708139453Sjhb return; 709166188Sjeff td->td_priority = prio; 710166188Sjeff if (TD_ON_RUNQ(td) && 711166188Sjeff td->td_sched->ts_rqindex != (prio / RQ_PPQ)) { 712166188Sjeff sched_rem(td); 713166188Sjeff sched_add(td, SRQ_BORING); 714104964Sjeff } 715104964Sjeff} 716104964Sjeff 717139453Sjhb/* 718139453Sjhb * Update a thread's priority when it is lent another thread's 719139453Sjhb * priority. 720139453Sjhb */ 721104964Sjeffvoid 722139453Sjhbsched_lend_prio(struct thread *td, u_char prio) 723139453Sjhb{ 724139453Sjhb 725139453Sjhb td->td_flags |= TDF_BORROWING; 726139453Sjhb sched_priority(td, prio); 727139453Sjhb} 728139453Sjhb 729139453Sjhb/* 730139453Sjhb * Restore a thread's priority when priority propagation is 731139453Sjhb * over. The prio argument is the minimum priority the thread 732139453Sjhb * needs to have to satisfy other possible priority lending 733139453Sjhb * requests. If the thread's regulary priority is less 734139453Sjhb * important than prio the thread will keep a priority boost 735139453Sjhb * of prio. 736139453Sjhb */ 737139453Sjhbvoid 738139453Sjhbsched_unlend_prio(struct thread *td, u_char prio) 739139453Sjhb{ 740139453Sjhb u_char base_pri; 741139453Sjhb 742139453Sjhb if (td->td_base_pri >= PRI_MIN_TIMESHARE && 743139453Sjhb td->td_base_pri <= PRI_MAX_TIMESHARE) 744163709Sjb base_pri = td->td_user_pri; 745139453Sjhb else 746139453Sjhb base_pri = td->td_base_pri; 747139453Sjhb if (prio >= base_pri) { 748139453Sjhb td->td_flags &= ~TDF_BORROWING; 749139453Sjhb sched_prio(td, base_pri); 750139453Sjhb } else 751139453Sjhb sched_lend_prio(td, prio); 752139453Sjhb} 753139453Sjhb 754139453Sjhbvoid 755139453Sjhbsched_prio(struct thread *td, u_char prio) 756139453Sjhb{ 757139453Sjhb u_char oldprio; 758139453Sjhb 759139453Sjhb /* First, update the base priority. */ 760139453Sjhb td->td_base_pri = prio; 761139453Sjhb 762139453Sjhb /* 763139453Sjhb * If the thread is borrowing another thread's priority, don't ever 764139453Sjhb * lower the priority. 765139453Sjhb */ 766139453Sjhb if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 767139453Sjhb return; 768139453Sjhb 769139453Sjhb /* Change the real priority. */ 770139453Sjhb oldprio = td->td_priority; 771139453Sjhb sched_priority(td, prio); 772139453Sjhb 773139453Sjhb /* 774139453Sjhb * If the thread is on a turnstile, then let the turnstile update 775139453Sjhb * its state. 776139453Sjhb */ 777139453Sjhb if (TD_ON_LOCK(td) && oldprio != prio) 778139453Sjhb turnstile_adjust(td, oldprio); 779139453Sjhb} 780139453Sjhb 781139453Sjhbvoid 782163709Sjbsched_user_prio(struct thread *td, u_char prio) 783161599Sdavidxu{ 784161599Sdavidxu u_char oldprio; 785161599Sdavidxu 786163709Sjb td->td_base_user_pri = prio; 787164177Sdavidxu if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 788164177Sdavidxu return; 789163709Sjb oldprio = td->td_user_pri; 790163709Sjb td->td_user_pri = prio; 791163709Sjb 792161599Sdavidxu if (TD_ON_UPILOCK(td) && oldprio != prio) 793161599Sdavidxu umtx_pi_adjust(td, oldprio); 794161599Sdavidxu} 795161599Sdavidxu 796161599Sdavidxuvoid 797161599Sdavidxusched_lend_user_prio(struct thread *td, u_char prio) 798161599Sdavidxu{ 799161599Sdavidxu u_char oldprio; 800161599Sdavidxu 801161599Sdavidxu td->td_flags |= TDF_UBORROWING; 802161599Sdavidxu 803163709Sjb oldprio = td->td_user_pri; 804163709Sjb td->td_user_pri = prio; 805161599Sdavidxu 806161599Sdavidxu if (TD_ON_UPILOCK(td) && oldprio != prio) 807161599Sdavidxu umtx_pi_adjust(td, oldprio); 808161599Sdavidxu} 809161599Sdavidxu 810161599Sdavidxuvoid 811161599Sdavidxusched_unlend_user_prio(struct thread *td, u_char prio) 812161599Sdavidxu{ 813161599Sdavidxu u_char base_pri; 814161599Sdavidxu 815163709Sjb base_pri = td->td_base_user_pri; 816161599Sdavidxu if (prio >= base_pri) { 817161599Sdavidxu td->td_flags &= ~TDF_UBORROWING; 818163709Sjb sched_user_prio(td, base_pri); 819161599Sdavidxu } else 820161599Sdavidxu sched_lend_user_prio(td, prio); 821161599Sdavidxu} 822161599Sdavidxu 823161599Sdavidxuvoid 824126326Sjhbsched_sleep(struct thread *td) 825104964Sjeff{ 826113923Sjhb 827170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 828163709Sjb td->td_slptime = 0; 829104964Sjeff} 830104964Sjeff 831104964Sjeffvoid 832135051Sjuliansched_switch(struct thread *td, struct thread *newtd, int flags) 833104964Sjeff{ 834164936Sjulian struct td_sched *ts; 835104964Sjeff struct proc *p; 836104964Sjeff 837164936Sjulian ts = td->td_sched; 838104964Sjeff p = td->td_proc; 839104964Sjeff 840170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 841170293Sjeff /* 842170293Sjeff * Switch to the sched lock to fix things up and pick 843170293Sjeff * a new thread. 844170293Sjeff */ 845170293Sjeff if (td->td_lock != &sched_lock) { 846170293Sjeff mtx_lock_spin(&sched_lock); 847170293Sjeff thread_unlock(td); 848170293Sjeff } 849104964Sjeff 850125295Sjeff if ((p->p_flag & P_NOLOAD) == 0) 851139317Sjeff sched_load_rem(); 852135051Sjulian 853138527Sups if (newtd) 854138527Sups newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED); 855138527Sups 856113339Sjulian td->td_lastcpu = td->td_oncpu; 857132266Sjhb td->td_flags &= ~TDF_NEEDRESCHED; 858144777Sups td->td_owepreempt = 0; 859113339Sjulian td->td_oncpu = NOCPU; 860104964Sjeff /* 861104964Sjeff * At the last moment, if this thread is still marked RUNNING, 862104964Sjeff * then put it back on the run queue as it has not been suspended 863131473Sjhb * or stopped or any thing else similar. We never put the idle 864131473Sjhb * threads on the run queue, however. 865104964Sjeff */ 866166415Sjulian if (td->td_flags & TDF_IDLETD) { 867131473Sjhb TD_SET_CAN_RUN(td); 868166415Sjulian#ifdef SMP 869166415Sjulian idle_cpus_mask &= ~PCPU_GET(cpumask); 870166415Sjulian#endif 871166415Sjulian } else { 872134791Sjulian if (TD_IS_RUNNING(td)) { 873164936Sjulian /* Put us back on the run queue. */ 874166188Sjeff sched_add(td, (flags & SW_PREEMPT) ? 875136170Sjulian SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 876136170Sjulian SRQ_OURSELF|SRQ_YIELDING); 877134791Sjulian } 878104964Sjeff } 879136170Sjulian if (newtd) { 880136170Sjulian /* 881136170Sjulian * The thread we are about to run needs to be counted 882136170Sjulian * as if it had been added to the run queue and selected. 883136170Sjulian * It came from: 884136170Sjulian * * A preemption 885136170Sjulian * * An upcall 886136170Sjulian * * A followon 887136170Sjulian */ 888136170Sjulian KASSERT((newtd->td_inhibitors == 0), 889165693Srwatson ("trying to run inhibited thread")); 890164936Sjulian newtd->td_sched->ts_flags |= TSF_DIDRUN; 891136170Sjulian TD_SET_RUNNING(newtd); 892136170Sjulian if ((newtd->td_proc->p_flag & P_NOLOAD) == 0) 893139317Sjeff sched_load_add(); 894136170Sjulian } else { 895131473Sjhb newtd = choosethread(); 896136170Sjulian } 897170293Sjeff MPASS(newtd->td_lock == &sched_lock); 898136170Sjulian 899145256Sjkoshy if (td != newtd) { 900145256Sjkoshy#ifdef HWPMC_HOOKS 901145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 902145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 903145256Sjkoshy#endif 904163709Sjb 905166415Sjulian /* I feel sleepy */ 906170293Sjeff cpu_switch(td, newtd, td->td_lock); 907166415Sjulian /* 908166415Sjulian * Where am I? What year is it? 909166415Sjulian * We are in the same thread that went to sleep above, 910166415Sjulian * but any amount of time may have passed. All out context 911166415Sjulian * will still be available as will local variables. 912166415Sjulian * PCPU values however may have changed as we may have 913166415Sjulian * changed CPU so don't trust cached values of them. 914166415Sjulian * New threads will go to fork_exit() instead of here 915166415Sjulian * so if you change things here you may need to change 916166415Sjulian * things there too. 917166415Sjulian * If the thread above was exiting it will never wake 918166415Sjulian * up again here, so either it has saved everything it 919166415Sjulian * needed to, or the thread_wait() or wait() will 920166415Sjulian * need to reap it. 921166415Sjulian */ 922145256Sjkoshy#ifdef HWPMC_HOOKS 923145256Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 924145256Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 925145256Sjkoshy#endif 926145256Sjkoshy } 927145256Sjkoshy 928166415Sjulian#ifdef SMP 929166415Sjulian if (td->td_flags & TDF_IDLETD) 930166415Sjulian idle_cpus_mask |= PCPU_GET(cpumask); 931166415Sjulian#endif 932121128Sjeff sched_lock.mtx_lock = (uintptr_t)td; 933121128Sjeff td->td_oncpu = PCPU_GET(cpuid); 934170293Sjeff MPASS(td->td_lock == &sched_lock); 935104964Sjeff} 936104964Sjeff 937104964Sjeffvoid 938104964Sjeffsched_wakeup(struct thread *td) 939104964Sjeff{ 940170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 941163709Sjb if (td->td_slptime > 1) { 942163709Sjb updatepri(td); 943163709Sjb resetpriority(td); 944163709Sjb } 945163709Sjb td->td_slptime = 0; 946166188Sjeff sched_add(td, SRQ_BORING); 947104964Sjeff} 948104964Sjeff 949134693Sjulian#ifdef SMP 950134688Sjulian/* enable HTT_2 if you have a 2-way HTT cpu.*/ 951134688Sjulianstatic int 952134688Sjulianforward_wakeup(int cpunum) 953134688Sjulian{ 954134688Sjulian cpumask_t map, me, dontuse; 955134688Sjulian cpumask_t map2; 956134688Sjulian struct pcpu *pc; 957134688Sjulian cpumask_t id, map3; 958134688Sjulian 959134688Sjulian mtx_assert(&sched_lock, MA_OWNED); 960134688Sjulian 961134791Sjulian CTR0(KTR_RUNQ, "forward_wakeup()"); 962134688Sjulian 963134688Sjulian if ((!forward_wakeup_enabled) || 964134688Sjulian (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) 965134688Sjulian return (0); 966134688Sjulian if (!smp_started || cold || panicstr) 967134688Sjulian return (0); 968134688Sjulian 969134688Sjulian forward_wakeups_requested++; 970134688Sjulian 971134688Sjulian/* 972134688Sjulian * check the idle mask we received against what we calculated before 973134688Sjulian * in the old version. 974134688Sjulian */ 975134688Sjulian me = PCPU_GET(cpumask); 976134688Sjulian /* 977134688Sjulian * don't bother if we should be doing it ourself.. 978134688Sjulian */ 979134688Sjulian if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum))) 980134688Sjulian return (0); 981134688Sjulian 982134688Sjulian dontuse = me | stopped_cpus | hlt_cpus_mask; 983134688Sjulian map3 = 0; 984134688Sjulian if (forward_wakeup_use_loop) { 985134688Sjulian SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 986134688Sjulian id = pc->pc_cpumask; 987134688Sjulian if ( (id & dontuse) == 0 && 988134688Sjulian pc->pc_curthread == pc->pc_idlethread) { 989134688Sjulian map3 |= id; 990134688Sjulian } 991134688Sjulian } 992134688Sjulian } 993134688Sjulian 994134688Sjulian if (forward_wakeup_use_mask) { 995134688Sjulian map = 0; 996134688Sjulian map = idle_cpus_mask & ~dontuse; 997134688Sjulian 998134688Sjulian /* If they are both on, compare and use loop if different */ 999134688Sjulian if (forward_wakeup_use_loop) { 1000134688Sjulian if (map != map3) { 1001134688Sjulian printf("map (%02X) != map3 (%02X)\n", 1002134688Sjulian map, map3); 1003134688Sjulian map = map3; 1004134688Sjulian } 1005134688Sjulian } 1006134688Sjulian } else { 1007134688Sjulian map = map3; 1008134688Sjulian } 1009134688Sjulian /* If we only allow a specific CPU, then mask off all the others */ 1010134688Sjulian if (cpunum != NOCPU) { 1011134688Sjulian KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); 1012134688Sjulian map &= (1 << cpunum); 1013134688Sjulian } else { 1014134688Sjulian /* Try choose an idle die. */ 1015134688Sjulian if (forward_wakeup_use_htt) { 1016134688Sjulian map2 = (map & (map >> 1)) & 0x5555; 1017134688Sjulian if (map2) { 1018134688Sjulian map = map2; 1019134688Sjulian } 1020134688Sjulian } 1021134688Sjulian 1022134688Sjulian /* set only one bit */ 1023134688Sjulian if (forward_wakeup_use_single) { 1024134688Sjulian map = map & ((~map) + 1); 1025134688Sjulian } 1026134688Sjulian } 1027134688Sjulian if (map) { 1028134688Sjulian forward_wakeups_delivered++; 1029134688Sjulian ipi_selected(map, IPI_AST); 1030134688Sjulian return (1); 1031134688Sjulian } 1032134688Sjulian if (cpunum == NOCPU) 1033134688Sjulian printf("forward_wakeup: Idle processor not found\n"); 1034134688Sjulian return (0); 1035134688Sjulian} 1036134693Sjulian#endif 1037134688Sjulian 1038147182Sups#ifdef SMP 1039147190Supsstatic void kick_other_cpu(int pri,int cpuid); 1040147182Sups 1041147182Supsstatic void 1042147182Supskick_other_cpu(int pri,int cpuid) 1043147182Sups{ 1044147182Sups struct pcpu * pcpu = pcpu_find(cpuid); 1045147182Sups int cpri = pcpu->pc_curthread->td_priority; 1046147182Sups 1047147182Sups if (idle_cpus_mask & pcpu->pc_cpumask) { 1048147182Sups forward_wakeups_delivered++; 1049147182Sups ipi_selected(pcpu->pc_cpumask, IPI_AST); 1050147182Sups return; 1051147182Sups } 1052147182Sups 1053147182Sups if (pri >= cpri) 1054147182Sups return; 1055147182Sups 1056147182Sups#if defined(IPI_PREEMPTION) && defined(PREEMPTION) 1057147182Sups#if !defined(FULL_PREEMPTION) 1058147182Sups if (pri <= PRI_MAX_ITHD) 1059147182Sups#endif /* ! FULL_PREEMPTION */ 1060147182Sups { 1061147182Sups ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT); 1062147182Sups return; 1063147182Sups } 1064147182Sups#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ 1065147182Sups 1066147182Sups pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 1067147182Sups ipi_selected( pcpu->pc_cpumask , IPI_AST); 1068147182Sups return; 1069147182Sups} 1070147182Sups#endif /* SMP */ 1071147182Sups 1072104964Sjeffvoid 1073134586Sjuliansched_add(struct thread *td, int flags) 1074147182Sups#ifdef SMP 1075104964Sjeff{ 1076164936Sjulian struct td_sched *ts; 1077134591Sjulian int forwarded = 0; 1078134591Sjulian int cpu; 1079147182Sups int single_cpu = 0; 1080121127Sjeff 1081164936Sjulian ts = td->td_sched; 1082170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1083166188Sjeff KASSERT((td->td_inhibitors == 0), 1084166188Sjeff ("sched_add: trying to run inhibited thread")); 1085166188Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1086166188Sjeff ("sched_add: bad thread state")); 1087163709Sjb KASSERT(td->td_proc->p_sflag & PS_INMEM, 1088124957Sjeff ("sched_add: process swapped out")); 1089139317Sjeff CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1090139317Sjeff td, td->td_proc->p_comm, td->td_priority, curthread, 1091139317Sjeff curthread->td_proc->p_comm); 1092170293Sjeff /* 1093170293Sjeff * Now that the thread is moving to the run-queue, set the lock 1094170293Sjeff * to the scheduler's lock. 1095170293Sjeff */ 1096170293Sjeff if (td->td_lock != &sched_lock) { 1097170293Sjeff mtx_lock_spin(&sched_lock); 1098170293Sjeff thread_lock_set(td, &sched_lock); 1099170293Sjeff } 1100166188Sjeff TD_SET_RUNQ(td); 1101131481Sjhb 1102147182Sups if (td->td_pinned != 0) { 1103147182Sups cpu = td->td_lastcpu; 1104164936Sjulian ts->ts_runq = &runq_pcpu[cpu]; 1105147182Sups single_cpu = 1; 1106147182Sups CTR3(KTR_RUNQ, 1107164936Sjulian "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); 1108164936Sjulian } else if ((ts)->ts_flags & TSF_BOUND) { 1109147182Sups /* Find CPU from bound runq */ 1110164936Sjulian KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq")); 1111164936Sjulian cpu = ts->ts_runq - &runq_pcpu[0]; 1112147182Sups single_cpu = 1; 1113147182Sups CTR3(KTR_RUNQ, 1114164936Sjulian "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); 1115147182Sups } else { 1116134591Sjulian CTR2(KTR_RUNQ, 1117164936Sjulian "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td); 1118134591Sjulian cpu = NOCPU; 1119164936Sjulian ts->ts_runq = &runq; 1120147182Sups } 1121147182Sups 1122147190Sups if (single_cpu && (cpu != PCPU_GET(cpuid))) { 1123147182Sups kick_other_cpu(td->td_priority,cpu); 1124124955Sjeff } else { 1125147182Sups 1126147190Sups if (!single_cpu) { 1127147182Sups cpumask_t me = PCPU_GET(cpumask); 1128147182Sups int idle = idle_cpus_mask & me; 1129147182Sups 1130147190Sups if (!idle && ((flags & SRQ_INTR) == 0) && 1131147190Sups (idle_cpus_mask & ~(hlt_cpus_mask | me))) 1132147182Sups forwarded = forward_wakeup(cpu); 1133147182Sups } 1134147182Sups 1135147182Sups if (!forwarded) { 1136147190Sups if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td)) 1137147182Sups return; 1138147182Sups else 1139147182Sups maybe_resched(td); 1140147182Sups } 1141124955Sjeff } 1142147182Sups 1143147182Sups if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1144147182Sups sched_load_add(); 1145164936Sjulian runq_add(ts->ts_runq, ts, flags); 1146147182Sups} 1147147182Sups#else /* SMP */ 1148147182Sups{ 1149164936Sjulian struct td_sched *ts; 1150164936Sjulian ts = td->td_sched; 1151170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1152166188Sjeff KASSERT((td->td_inhibitors == 0), 1153166188Sjeff ("sched_add: trying to run inhibited thread")); 1154166188Sjeff KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1155166188Sjeff ("sched_add: bad thread state")); 1156163709Sjb KASSERT(td->td_proc->p_sflag & PS_INMEM, 1157147182Sups ("sched_add: process swapped out")); 1158147182Sups CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1159147182Sups td, td->td_proc->p_comm, td->td_priority, curthread, 1160147182Sups curthread->td_proc->p_comm); 1161170293Sjeff /* 1162170293Sjeff * Now that the thread is moving to the run-queue, set the lock 1163170293Sjeff * to the scheduler's lock. 1164170293Sjeff */ 1165170293Sjeff if (td->td_lock != &sched_lock) { 1166170293Sjeff mtx_lock_spin(&sched_lock); 1167170293Sjeff thread_lock_set(td, &sched_lock); 1168170293Sjeff } 1169166188Sjeff TD_SET_RUNQ(td); 1170164936Sjulian CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); 1171164936Sjulian ts->ts_runq = &runq; 1172134591Sjulian 1173134591Sjulian /* 1174134591Sjulian * If we are yielding (on the way out anyhow) 1175134591Sjulian * or the thread being saved is US, 1176134591Sjulian * then don't try be smart about preemption 1177134591Sjulian * or kicking off another CPU 1178134591Sjulian * as it won't help and may hinder. 1179134591Sjulian * In the YIEDLING case, we are about to run whoever is 1180134591Sjulian * being put in the queue anyhow, and in the 1181134591Sjulian * OURSELF case, we are puting ourself on the run queue 1182134591Sjulian * which also only happens when we are about to yield. 1183134591Sjulian */ 1184134591Sjulian if((flags & SRQ_YIELDING) == 0) { 1185147182Sups if (maybe_preempt(td)) 1186147182Sups return; 1187147182Sups } 1188125295Sjeff if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1189139317Sjeff sched_load_add(); 1190164936Sjulian runq_add(ts->ts_runq, ts, flags); 1191132118Sjhb maybe_resched(td); 1192104964Sjeff} 1193147182Sups#endif /* SMP */ 1194147182Sups 1195104964Sjeffvoid 1196121127Sjeffsched_rem(struct thread *td) 1197104964Sjeff{ 1198164936Sjulian struct td_sched *ts; 1199121127Sjeff 1200164936Sjulian ts = td->td_sched; 1201163709Sjb KASSERT(td->td_proc->p_sflag & PS_INMEM, 1202124957Sjeff ("sched_rem: process swapped out")); 1203166188Sjeff KASSERT(TD_ON_RUNQ(td), 1204164936Sjulian ("sched_rem: thread not on run queue")); 1205104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 1206139317Sjeff CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 1207139317Sjeff td, td->td_proc->p_comm, td->td_priority, curthread, 1208139317Sjeff curthread->td_proc->p_comm); 1209104964Sjeff 1210125295Sjeff if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1211139317Sjeff sched_load_rem(); 1212164936Sjulian runq_remove(ts->ts_runq, ts); 1213166188Sjeff TD_SET_CAN_RUN(td); 1214104964Sjeff} 1215104964Sjeff 1216135295Sjulian/* 1217135295Sjulian * Select threads to run. 1218135295Sjulian * Notice that the running threads still consume a slot. 1219135295Sjulian */ 1220166188Sjeffstruct thread * 1221104964Sjeffsched_choose(void) 1222104964Sjeff{ 1223164936Sjulian struct td_sched *ts; 1224124955Sjeff struct runq *rq; 1225104964Sjeff 1226170293Sjeff mtx_assert(&sched_lock, MA_OWNED); 1227124955Sjeff#ifdef SMP 1228164936Sjulian struct td_sched *kecpu; 1229124955Sjeff 1230124955Sjeff rq = &runq; 1231164936Sjulian ts = runq_choose(&runq); 1232124955Sjeff kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 1233104964Sjeff 1234164936Sjulian if (ts == NULL || 1235124955Sjeff (kecpu != NULL && 1236164936Sjulian kecpu->ts_thread->td_priority < ts->ts_thread->td_priority)) { 1237164936Sjulian CTR2(KTR_RUNQ, "choosing td_sched %p from pcpu runq %d", kecpu, 1238124955Sjeff PCPU_GET(cpuid)); 1239164936Sjulian ts = kecpu; 1240124955Sjeff rq = &runq_pcpu[PCPU_GET(cpuid)]; 1241124955Sjeff } else { 1242164936Sjulian CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", ts); 1243124955Sjeff } 1244124955Sjeff 1245124955Sjeff#else 1246124955Sjeff rq = &runq; 1247164936Sjulian ts = runq_choose(&runq); 1248124955Sjeff#endif 1249124955Sjeff 1250164936Sjulian if (ts) { 1251164936Sjulian runq_remove(rq, ts); 1252166188Sjeff ts->ts_flags |= TSF_DIDRUN; 1253104964Sjeff 1254164936Sjulian KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM, 1255124957Sjeff ("sched_choose: process swapped out")); 1256166188Sjeff return (ts->ts_thread); 1257166188Sjeff } 1258166188Sjeff return (PCPU_GET(idlethread)); 1259104964Sjeff} 1260104964Sjeff 1261104964Sjeffvoid 1262104964Sjeffsched_userret(struct thread *td) 1263104964Sjeff{ 1264104964Sjeff /* 1265104964Sjeff * XXX we cheat slightly on the locking here to avoid locking in 1266104964Sjeff * the usual case. Setting td_priority here is essentially an 1267104964Sjeff * incomplete workaround for not setting it properly elsewhere. 1268104964Sjeff * Now that some interrupt handlers are threads, not setting it 1269104964Sjeff * properly elsewhere can clobber it in the window between setting 1270104964Sjeff * it here and returning to user mode, so don't waste time setting 1271104964Sjeff * it perfectly here. 1272104964Sjeff */ 1273139453Sjhb KASSERT((td->td_flags & TDF_BORROWING) == 0, 1274139453Sjhb ("thread with borrowed priority returning to userland")); 1275163709Sjb if (td->td_priority != td->td_user_pri) { 1276170293Sjeff thread_lock(td); 1277163709Sjb td->td_priority = td->td_user_pri; 1278163709Sjb td->td_base_pri = td->td_user_pri; 1279170293Sjeff thread_unlock(td); 1280163709Sjb } 1281104964Sjeff} 1282107126Sjeff 1283124955Sjeffvoid 1284124955Sjeffsched_bind(struct thread *td, int cpu) 1285124955Sjeff{ 1286164936Sjulian struct td_sched *ts; 1287124955Sjeff 1288170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1289124955Sjeff KASSERT(TD_IS_RUNNING(td), 1290124955Sjeff ("sched_bind: cannot bind non-running thread")); 1291124955Sjeff 1292164936Sjulian ts = td->td_sched; 1293124955Sjeff 1294164936Sjulian ts->ts_flags |= TSF_BOUND; 1295124955Sjeff#ifdef SMP 1296164936Sjulian ts->ts_runq = &runq_pcpu[cpu]; 1297124955Sjeff if (PCPU_GET(cpuid) == cpu) 1298124955Sjeff return; 1299124955Sjeff 1300131473Sjhb mi_switch(SW_VOL, NULL); 1301124955Sjeff#endif 1302124955Sjeff} 1303124955Sjeff 1304124955Sjeffvoid 1305124955Sjeffsched_unbind(struct thread* td) 1306124955Sjeff{ 1307170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1308164936Sjulian td->td_sched->ts_flags &= ~TSF_BOUND; 1309124955Sjeff} 1310124955Sjeff 1311107126Sjeffint 1312145256Sjkoshysched_is_bound(struct thread *td) 1313145256Sjkoshy{ 1314170293Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 1315164936Sjulian return (td->td_sched->ts_flags & TSF_BOUND); 1316145256Sjkoshy} 1317145256Sjkoshy 1318159630Sdavidxuvoid 1319159630Sdavidxusched_relinquish(struct thread *td) 1320159630Sdavidxu{ 1321170293Sjeff thread_lock(td); 1322163709Sjb if (td->td_pri_class == PRI_TIMESHARE) 1323159630Sdavidxu sched_prio(td, PRI_MAX_TIMESHARE); 1324170293Sjeff SCHED_STAT_INC(switch_relinquish); 1325159630Sdavidxu mi_switch(SW_VOL, NULL); 1326170293Sjeff thread_unlock(td); 1327159630Sdavidxu} 1328159630Sdavidxu 1329145256Sjkoshyint 1330125288Sjeffsched_load(void) 1331125288Sjeff{ 1332125288Sjeff return (sched_tdcnt); 1333125288Sjeff} 1334125288Sjeff 1335125288Sjeffint 1336107126Sjeffsched_sizeof_proc(void) 1337107126Sjeff{ 1338107126Sjeff return (sizeof(struct proc)); 1339107126Sjeff} 1340159630Sdavidxu 1341107126Sjeffint 1342107126Sjeffsched_sizeof_thread(void) 1343107126Sjeff{ 1344164936Sjulian return (sizeof(struct thread) + sizeof(struct td_sched)); 1345107126Sjeff} 1346107137Sjeff 1347107137Sjefffixpt_t 1348121127Sjeffsched_pctcpu(struct thread *td) 1349107137Sjeff{ 1350164936Sjulian struct td_sched *ts; 1351121147Sjeff 1352164936Sjulian ts = td->td_sched; 1353164936Sjulian return (ts->ts_pctcpu); 1354107137Sjeff} 1355159570Sdavidxu 1356159570Sdavidxuvoid 1357159570Sdavidxusched_tick(void) 1358159570Sdavidxu{ 1359159570Sdavidxu} 1360166188Sjeff 1361166188Sjeff/* 1362166188Sjeff * The actual idle process. 1363166188Sjeff */ 1364166188Sjeffvoid 1365166188Sjeffsched_idletd(void *dummy) 1366166188Sjeff{ 1367166188Sjeff struct proc *p; 1368166188Sjeff struct thread *td; 1369166188Sjeff 1370166188Sjeff td = curthread; 1371166188Sjeff p = td->td_proc; 1372166188Sjeff for (;;) { 1373166188Sjeff mtx_assert(&Giant, MA_NOTOWNED); 1374166188Sjeff 1375166188Sjeff while (sched_runnable() == 0) 1376166188Sjeff cpu_idle(); 1377166188Sjeff 1378166188Sjeff mtx_lock_spin(&sched_lock); 1379166188Sjeff mi_switch(SW_VOL, NULL); 1380166188Sjeff mtx_unlock_spin(&sched_lock); 1381166188Sjeff } 1382166188Sjeff} 1383166188Sjeff 1384170293Sjeff/* 1385170293Sjeff * A CPU is entering for the first time or a thread is exiting. 1386170293Sjeff */ 1387170293Sjeffvoid 1388170293Sjeffsched_throw(struct thread *td) 1389170293Sjeff{ 1390170293Sjeff /* 1391170293Sjeff * Correct spinlock nesting. The idle thread context that we are 1392170293Sjeff * borrowing was created so that it would start out with a single 1393170293Sjeff * spin lock (sched_lock) held in fork_trampoline(). Since we've 1394170293Sjeff * explicitly acquired locks in this function, the nesting count 1395170293Sjeff * is now 2 rather than 1. Since we are nested, calling 1396170293Sjeff * spinlock_exit() will simply adjust the counts without allowing 1397170293Sjeff * spin lock using code to interrupt us. 1398170293Sjeff */ 1399170293Sjeff if (td == NULL) { 1400170293Sjeff mtx_lock_spin(&sched_lock); 1401170293Sjeff spinlock_exit(); 1402170293Sjeff } else { 1403170293Sjeff MPASS(td->td_lock == &sched_lock); 1404170293Sjeff } 1405170293Sjeff mtx_assert(&sched_lock, MA_OWNED); 1406170293Sjeff KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 1407170293Sjeff PCPU_SET(switchtime, cpu_ticks()); 1408170293Sjeff PCPU_SET(switchticks, ticks); 1409170293Sjeff cpu_throw(td, choosethread()); /* doesn't return */ 1410170293Sjeff} 1411170293Sjeff 1412170293Sjeffvoid 1413170293Sjeffsched_fork_exit(struct thread *ctd) 1414170293Sjeff{ 1415170293Sjeff struct thread *td; 1416170293Sjeff 1417170293Sjeff /* 1418170293Sjeff * Finish setting up thread glue so that it begins execution in a 1419170293Sjeff * non-nested critical section with sched_lock held but not recursed. 1420170293Sjeff */ 1421170293Sjeff ctd->td_oncpu = PCPU_GET(cpuid); 1422170293Sjeff sched_lock.mtx_lock = (uintptr_t)ctd; 1423170293Sjeff THREAD_LOCK_ASSERT(ctd, MA_OWNED | MA_NOTRECURSED); 1424170293Sjeff /* 1425170293Sjeff * Processes normally resume in mi_switch() after being 1426170293Sjeff * cpu_switch()'ed to, but when children start up they arrive here 1427170293Sjeff * instead, so we must do much the same things as mi_switch() would. 1428170293Sjeff */ 1429170293Sjeff if ((td = PCPU_GET(deadthread))) { 1430170293Sjeff PCPU_SET(deadthread, NULL); 1431170293Sjeff thread_stash(td); 1432170293Sjeff } 1433170293Sjeff thread_unlock(ctd); 1434170293Sjeff} 1435170293Sjeff 1436134791Sjulian#define KERN_SWITCH_INCLUDE 1 1437134791Sjulian#include "kern/kern_switch.c" 1438