sched_4bsd.c revision 132372
1104964Sjeff/*- 2104964Sjeff * Copyright (c) 1982, 1986, 1990, 1991, 1993 3104964Sjeff * The Regents of the University of California. All rights reserved. 4104964Sjeff * (c) UNIX System Laboratories, Inc. 5104964Sjeff * All or some portions of this file are derived from material licensed 6104964Sjeff * to the University of California by American Telephone and Telegraph 7104964Sjeff * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8104964Sjeff * the permission of UNIX System Laboratories, Inc. 9104964Sjeff * 10104964Sjeff * Redistribution and use in source and binary forms, with or without 11104964Sjeff * modification, are permitted provided that the following conditions 12104964Sjeff * are met: 13104964Sjeff * 1. Redistributions of source code must retain the above copyright 14104964Sjeff * notice, this list of conditions and the following disclaimer. 15104964Sjeff * 2. Redistributions in binary form must reproduce the above copyright 16104964Sjeff * notice, this list of conditions and the following disclaimer in the 17104964Sjeff * documentation and/or other materials provided with the distribution. 18104964Sjeff * 4. Neither the name of the University nor the names of its contributors 19104964Sjeff * may be used to endorse or promote products derived from this software 20104964Sjeff * without specific prior written permission. 21104964Sjeff * 22104964Sjeff * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23104964Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24104964Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25104964Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26104964Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27104964Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28104964Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29104964Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30104964Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31104964Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32104964Sjeff * SUCH DAMAGE. 33104964Sjeff */ 34104964Sjeff 35116182Sobrien#include <sys/cdefs.h> 36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 132372 2004-07-18 23:36:13Z julian $"); 37116182Sobrien 38104964Sjeff#include <sys/param.h> 39104964Sjeff#include <sys/systm.h> 40104964Sjeff#include <sys/kernel.h> 41104964Sjeff#include <sys/ktr.h> 42104964Sjeff#include <sys/lock.h> 43123871Sjhb#include <sys/kthread.h> 44104964Sjeff#include <sys/mutex.h> 45104964Sjeff#include <sys/proc.h> 46104964Sjeff#include <sys/resourcevar.h> 47104964Sjeff#include <sys/sched.h> 48104964Sjeff#include <sys/smp.h> 49104964Sjeff#include <sys/sysctl.h> 50104964Sjeff#include <sys/sx.h> 51104964Sjeff 52124955Sjeff#define KTR_4BSD 0x0 53124955Sjeff 54107135Sjeff/* 55107135Sjeff * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 56107135Sjeff * the range 100-256 Hz (approximately). 57107135Sjeff */ 58107135Sjeff#define ESTCPULIM(e) \ 59107135Sjeff min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 60107135Sjeff RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 61122355Sbde#ifdef SMP 62122355Sbde#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 63122355Sbde#else 64107135Sjeff#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 65122355Sbde#endif 66107135Sjeff#define NICE_WEIGHT 1 /* Priorities per nice level. */ 67107135Sjeff 68109145Sjeffstruct ke_sched { 69124955Sjeff int ske_cpticks; /* (j) Ticks of cpu time. */ 70124955Sjeff struct runq *ske_runq; /* runq the kse is currently on */ 71109145Sjeff}; 72124955Sjeff#define ke_runq ke_sched->ske_runq 73124955Sjeff#define KEF_BOUND KEF_SCHED1 74109145Sjeff 75124955Sjeff#define SKE_RUNQ_PCPU(ke) \ 76124955Sjeff ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq) 77124955Sjeff 78124955Sjeff/* 79124955Sjeff * KSE_CAN_MIGRATE macro returns true if the kse can migrate between 80125295Sjeff * cpus. 81124955Sjeff */ 82124955Sjeff#define KSE_CAN_MIGRATE(ke) \ 83124955Sjeff ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 84114293Smarkmstatic struct ke_sched ke_sched; 85109145Sjeff 86109145Sjeffstruct ke_sched *kse0_sched = &ke_sched; 87107126Sjeffstruct kg_sched *ksegrp0_sched = NULL; 88107126Sjeffstruct p_sched *proc0_sched = NULL; 89107126Sjeffstruct td_sched *thread0_sched = NULL; 90104964Sjeff 91125288Sjeffstatic int sched_tdcnt; /* Total runnable threads in the system. */ 92104964Sjeffstatic int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 93112535Smux#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 94104964Sjeff 95104964Sjeffstatic struct callout roundrobin_callout; 96104964Sjeff 97124955Sjeffstatic void setup_runqs(void); 98104964Sjeffstatic void roundrobin(void *arg); 99123871Sjhbstatic void schedcpu(void); 100124955Sjeffstatic void schedcpu_thread(void); 101104964Sjeffstatic void sched_setup(void *dummy); 102104964Sjeffstatic void maybe_resched(struct thread *td); 103104964Sjeffstatic void updatepri(struct ksegrp *kg); 104104964Sjeffstatic void resetpriority(struct ksegrp *kg); 105104964Sjeff 106124955Sjeffstatic struct kproc_desc sched_kp = { 107124955Sjeff "schedcpu", 108124955Sjeff schedcpu_thread, 109124955Sjeff NULL 110124955Sjeff}; 111124955SjeffSYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp) 112124955SjeffSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 113104964Sjeff 114104964Sjeff/* 115104964Sjeff * Global run queue. 116104964Sjeff */ 117104964Sjeffstatic struct runq runq; 118104964Sjeff 119124955Sjeff#ifdef SMP 120124955Sjeff/* 121124955Sjeff * Per-CPU run queues 122124955Sjeff */ 123124955Sjeffstatic struct runq runq_pcpu[MAXCPU]; 124124955Sjeff#endif 125124955Sjeff 126124955Sjeffstatic void 127124955Sjeffsetup_runqs(void) 128124955Sjeff{ 129124955Sjeff#ifdef SMP 130124955Sjeff int i; 131124955Sjeff 132124955Sjeff for (i = 0; i < MAXCPU; ++i) 133124955Sjeff runq_init(&runq_pcpu[i]); 134124955Sjeff#endif 135124955Sjeff 136124955Sjeff runq_init(&runq); 137124955Sjeff} 138124955Sjeff 139104964Sjeffstatic int 140104964Sjeffsysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 141104964Sjeff{ 142104964Sjeff int error, new_val; 143104964Sjeff 144104964Sjeff new_val = sched_quantum * tick; 145104964Sjeff error = sysctl_handle_int(oidp, &new_val, 0, req); 146104964Sjeff if (error != 0 || req->newptr == NULL) 147104964Sjeff return (error); 148104964Sjeff if (new_val < tick) 149104964Sjeff return (EINVAL); 150104964Sjeff sched_quantum = new_val / tick; 151104964Sjeff hogticks = 2 * sched_quantum; 152104964Sjeff return (0); 153104964Sjeff} 154104964Sjeff 155130881SscottlSYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "SCHED"); 156130881Sscottl 157130881Sscottl#define SCHD_NAME "4bsd" 158130881Sscottl#define SCHD_NAME_LEN 4 159130893SscottlSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, SCHD_NAME, SCHD_NAME_LEN, 160130881Sscottl "System is using the 4BSD scheduler"); 161130881Sscottl 162130881SscottlSYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 163104964Sjeff 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 164104964Sjeff "Roundrobin scheduling quantum in microseconds"); 165104964Sjeff 166104964Sjeff/* 167104964Sjeff * Arrange to reschedule if necessary, taking the priorities and 168104964Sjeff * schedulers into account. 169104964Sjeff */ 170104964Sjeffstatic void 171104964Sjeffmaybe_resched(struct thread *td) 172104964Sjeff{ 173104964Sjeff 174104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 175108338Sjulian if (td->td_priority < curthread->td_priority && curthread->td_kse) 176111032Sjulian curthread->td_flags |= TDF_NEEDRESCHED; 177104964Sjeff} 178104964Sjeff 179104964Sjeff/* 180104964Sjeff * Force switch among equal priority processes every 100ms. 181104964Sjeff * We don't actually need to force a context switch of the current process. 182104964Sjeff * The act of firing the event triggers a context switch to softclock() and 183104964Sjeff * then switching back out again which is equivalent to a preemption, thus 184104964Sjeff * no further work is needed on the local CPU. 185104964Sjeff */ 186104964Sjeff/* ARGSUSED */ 187104964Sjeffstatic void 188104964Sjeffroundrobin(void *arg) 189104964Sjeff{ 190104964Sjeff 191104964Sjeff#ifdef SMP 192104964Sjeff mtx_lock_spin(&sched_lock); 193104964Sjeff forward_roundrobin(); 194104964Sjeff mtx_unlock_spin(&sched_lock); 195104964Sjeff#endif 196104964Sjeff 197104964Sjeff callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 198104964Sjeff} 199104964Sjeff 200104964Sjeff/* 201104964Sjeff * Constants for digital decay and forget: 202118972Sjhb * 90% of (kg_estcpu) usage in 5 * loadav time 203118972Sjhb * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive) 204104964Sjeff * Note that, as ps(1) mentions, this can let percentages 205104964Sjeff * total over 100% (I've seen 137.9% for 3 processes). 206104964Sjeff * 207118972Sjhb * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously. 208104964Sjeff * 209118972Sjhb * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds. 210104964Sjeff * That is, the system wants to compute a value of decay such 211104964Sjeff * that the following for loop: 212104964Sjeff * for (i = 0; i < (5 * loadavg); i++) 213118972Sjhb * kg_estcpu *= decay; 214104964Sjeff * will compute 215118972Sjhb * kg_estcpu *= 0.1; 216104964Sjeff * for all values of loadavg: 217104964Sjeff * 218104964Sjeff * Mathematically this loop can be expressed by saying: 219104964Sjeff * decay ** (5 * loadavg) ~= .1 220104964Sjeff * 221104964Sjeff * The system computes decay as: 222104964Sjeff * decay = (2 * loadavg) / (2 * loadavg + 1) 223104964Sjeff * 224104964Sjeff * We wish to prove that the system's computation of decay 225104964Sjeff * will always fulfill the equation: 226104964Sjeff * decay ** (5 * loadavg) ~= .1 227104964Sjeff * 228104964Sjeff * If we compute b as: 229104964Sjeff * b = 2 * loadavg 230104964Sjeff * then 231104964Sjeff * decay = b / (b + 1) 232104964Sjeff * 233104964Sjeff * We now need to prove two things: 234104964Sjeff * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 235104964Sjeff * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 236104964Sjeff * 237104964Sjeff * Facts: 238104964Sjeff * For x close to zero, exp(x) =~ 1 + x, since 239104964Sjeff * exp(x) = 0! + x**1/1! + x**2/2! + ... . 240104964Sjeff * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 241104964Sjeff * For x close to zero, ln(1+x) =~ x, since 242104964Sjeff * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 243104964Sjeff * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 244104964Sjeff * ln(.1) =~ -2.30 245104964Sjeff * 246104964Sjeff * Proof of (1): 247104964Sjeff * Solve (factor)**(power) =~ .1 given power (5*loadav): 248104964Sjeff * solving for factor, 249104964Sjeff * ln(factor) =~ (-2.30/5*loadav), or 250104964Sjeff * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 251104964Sjeff * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 252104964Sjeff * 253104964Sjeff * Proof of (2): 254104964Sjeff * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 255104964Sjeff * solving for power, 256104964Sjeff * power*ln(b/(b+1)) =~ -2.30, or 257104964Sjeff * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 258104964Sjeff * 259104964Sjeff * Actual power values for the implemented algorithm are as follows: 260104964Sjeff * loadav: 1 2 3 4 261104964Sjeff * power: 5.68 10.32 14.94 19.55 262104964Sjeff */ 263104964Sjeff 264104964Sjeff/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 265104964Sjeff#define loadfactor(loadav) (2 * (loadav)) 266104964Sjeff#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 267104964Sjeff 268118972Sjhb/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 269104964Sjeffstatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 270104964SjeffSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 271104964Sjeff 272104964Sjeff/* 273104964Sjeff * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 274104964Sjeff * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 275104964Sjeff * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 276104964Sjeff * 277104964Sjeff * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 278104964Sjeff * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 279104964Sjeff * 280104964Sjeff * If you don't want to bother with the faster/more-accurate formula, you 281104964Sjeff * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 282104964Sjeff * (more general) method of calculating the %age of CPU used by a process. 283104964Sjeff */ 284104964Sjeff#define CCPU_SHIFT 11 285104964Sjeff 286104964Sjeff/* 287104964Sjeff * Recompute process priorities, every hz ticks. 288104964Sjeff * MP-safe, called without the Giant mutex. 289104964Sjeff */ 290104964Sjeff/* ARGSUSED */ 291104964Sjeffstatic void 292123871Sjhbschedcpu(void) 293104964Sjeff{ 294104964Sjeff register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 295104964Sjeff struct thread *td; 296104964Sjeff struct proc *p; 297104964Sjeff struct kse *ke; 298104964Sjeff struct ksegrp *kg; 299118972Sjhb int awake, realstathz; 300104964Sjeff 301104964Sjeff realstathz = stathz ? stathz : hz; 302104964Sjeff sx_slock(&allproc_lock); 303104964Sjeff FOREACH_PROC_IN_SYSTEM(p) { 304118972Sjhb /* 305118972Sjhb * Prevent state changes and protect run queue. 306118972Sjhb */ 307104964Sjeff mtx_lock_spin(&sched_lock); 308118972Sjhb /* 309118972Sjhb * Increment time in/out of memory. We ignore overflow; with 310118972Sjhb * 16-bit int's (remember them?) overflow takes 45 days. 311118972Sjhb */ 312104964Sjeff p->p_swtime++; 313104964Sjeff FOREACH_KSEGRP_IN_PROC(p, kg) { 314104964Sjeff awake = 0; 315104964Sjeff FOREACH_KSE_IN_GROUP(kg, ke) { 316104964Sjeff /* 317118972Sjhb * Increment sleep time (if sleeping). We 318118972Sjhb * ignore overflow, as above. 319104964Sjeff */ 320104964Sjeff /* 321104964Sjeff * The kse slptimes are not touched in wakeup 322104964Sjeff * because the thread may not HAVE a KSE. 323104964Sjeff */ 324104964Sjeff if (ke->ke_state == KES_ONRUNQ) { 325104964Sjeff awake = 1; 326104964Sjeff ke->ke_flags &= ~KEF_DIDRUN; 327104964Sjeff } else if ((ke->ke_state == KES_THREAD) && 328104964Sjeff (TD_IS_RUNNING(ke->ke_thread))) { 329104964Sjeff awake = 1; 330104964Sjeff /* Do not clear KEF_DIDRUN */ 331104964Sjeff } else if (ke->ke_flags & KEF_DIDRUN) { 332104964Sjeff awake = 1; 333104964Sjeff ke->ke_flags &= ~KEF_DIDRUN; 334104964Sjeff } 335104964Sjeff 336104964Sjeff /* 337118972Sjhb * ke_pctcpu is only for ps and ttyinfo(). 338118972Sjhb * Do it per kse, and add them up at the end? 339104964Sjeff * XXXKSE 340104964Sjeff */ 341118972Sjhb ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> 342109145Sjeff FSHIFT; 343104964Sjeff /* 344104964Sjeff * If the kse has been idle the entire second, 345104964Sjeff * stop recalculating its priority until 346104964Sjeff * it wakes up. 347104964Sjeff */ 348109145Sjeff if (ke->ke_sched->ske_cpticks == 0) 349104964Sjeff continue; 350104964Sjeff#if (FSHIFT >= CCPU_SHIFT) 351109157Sjeff ke->ke_pctcpu += (realstathz == 100) 352109145Sjeff ? ((fixpt_t) ke->ke_sched->ske_cpticks) << 353104964Sjeff (FSHIFT - CCPU_SHIFT) : 354109145Sjeff 100 * (((fixpt_t) ke->ke_sched->ske_cpticks) 355109145Sjeff << (FSHIFT - CCPU_SHIFT)) / realstathz; 356104964Sjeff#else 357109157Sjeff ke->ke_pctcpu += ((FSCALE - ccpu) * 358109145Sjeff (ke->ke_sched->ske_cpticks * 359109145Sjeff FSCALE / realstathz)) >> FSHIFT; 360104964Sjeff#endif 361109145Sjeff ke->ke_sched->ske_cpticks = 0; 362104964Sjeff } /* end of kse loop */ 363104964Sjeff /* 364104964Sjeff * If there are ANY running threads in this KSEGRP, 365104964Sjeff * then don't count it as sleeping. 366104964Sjeff */ 367104964Sjeff if (awake) { 368104964Sjeff if (kg->kg_slptime > 1) { 369104964Sjeff /* 370104964Sjeff * In an ideal world, this should not 371104964Sjeff * happen, because whoever woke us 372104964Sjeff * up from the long sleep should have 373104964Sjeff * unwound the slptime and reset our 374104964Sjeff * priority before we run at the stale 375104964Sjeff * priority. Should KASSERT at some 376104964Sjeff * point when all the cases are fixed. 377104964Sjeff */ 378104964Sjeff updatepri(kg); 379104964Sjeff } 380104964Sjeff kg->kg_slptime = 0; 381118972Sjhb } else 382104964Sjeff kg->kg_slptime++; 383104964Sjeff if (kg->kg_slptime > 1) 384104964Sjeff continue; 385104964Sjeff kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 386104964Sjeff resetpriority(kg); 387104964Sjeff FOREACH_THREAD_IN_GROUP(kg, td) { 388104964Sjeff if (td->td_priority >= PUSER) { 389105127Sjulian sched_prio(td, kg->kg_user_pri); 390104964Sjeff } 391104964Sjeff } 392104964Sjeff } /* end of ksegrp loop */ 393104964Sjeff mtx_unlock_spin(&sched_lock); 394104964Sjeff } /* end of process loop */ 395104964Sjeff sx_sunlock(&allproc_lock); 396104964Sjeff} 397104964Sjeff 398104964Sjeff/* 399123871Sjhb * Main loop for a kthread that executes schedcpu once a second. 400123871Sjhb */ 401123871Sjhbstatic void 402124955Sjeffschedcpu_thread(void) 403123871Sjhb{ 404123871Sjhb int nowake; 405123871Sjhb 406123871Sjhb for (;;) { 407123871Sjhb schedcpu(); 408123871Sjhb tsleep(&nowake, curthread->td_priority, "-", hz); 409123871Sjhb } 410123871Sjhb} 411123871Sjhb 412123871Sjhb/* 413104964Sjeff * Recalculate the priority of a process after it has slept for a while. 414118972Sjhb * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at 415118972Sjhb * least six times the loadfactor will decay kg_estcpu to zero. 416104964Sjeff */ 417104964Sjeffstatic void 418104964Sjeffupdatepri(struct ksegrp *kg) 419104964Sjeff{ 420118972Sjhb register fixpt_t loadfac; 421104964Sjeff register unsigned int newcpu; 422104964Sjeff 423118972Sjhb loadfac = loadfactor(averunnable.ldavg[0]); 424104964Sjeff if (kg->kg_slptime > 5 * loadfac) 425104964Sjeff kg->kg_estcpu = 0; 426104964Sjeff else { 427118972Sjhb newcpu = kg->kg_estcpu; 428118972Sjhb kg->kg_slptime--; /* was incremented in schedcpu() */ 429104964Sjeff while (newcpu && --kg->kg_slptime) 430104964Sjeff newcpu = decay_cpu(loadfac, newcpu); 431104964Sjeff kg->kg_estcpu = newcpu; 432104964Sjeff } 433104964Sjeff resetpriority(kg); 434104964Sjeff} 435104964Sjeff 436104964Sjeff/* 437104964Sjeff * Compute the priority of a process when running in user mode. 438104964Sjeff * Arrange to reschedule if the resulting priority is better 439104964Sjeff * than that of the current process. 440104964Sjeff */ 441104964Sjeffstatic void 442104964Sjeffresetpriority(struct ksegrp *kg) 443104964Sjeff{ 444104964Sjeff register unsigned int newpriority; 445104964Sjeff struct thread *td; 446104964Sjeff 447104964Sjeff if (kg->kg_pri_class == PRI_TIMESHARE) { 448104964Sjeff newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 449130551Sjulian NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN); 450104964Sjeff newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 451104964Sjeff PRI_MAX_TIMESHARE); 452104964Sjeff kg->kg_user_pri = newpriority; 453104964Sjeff } 454104964Sjeff FOREACH_THREAD_IN_GROUP(kg, td) { 455104964Sjeff maybe_resched(td); /* XXXKSE silly */ 456104964Sjeff } 457104964Sjeff} 458104964Sjeff 459104964Sjeff/* ARGSUSED */ 460104964Sjeffstatic void 461104964Sjeffsched_setup(void *dummy) 462104964Sjeff{ 463124955Sjeff setup_runqs(); 464118972Sjhb 465104964Sjeff if (sched_quantum == 0) 466104964Sjeff sched_quantum = SCHED_QUANTUM; 467104964Sjeff hogticks = 2 * sched_quantum; 468104964Sjeff 469126665Srwatson callout_init(&roundrobin_callout, CALLOUT_MPSAFE); 470104964Sjeff 471104964Sjeff /* Kick off timeout driven events by calling first time. */ 472104964Sjeff roundrobin(NULL); 473125288Sjeff 474125288Sjeff /* Account for thread0. */ 475125288Sjeff sched_tdcnt++; 476104964Sjeff} 477104964Sjeff 478104964Sjeff/* External interfaces start here */ 479104964Sjeffint 480104964Sjeffsched_runnable(void) 481104964Sjeff{ 482124955Sjeff#ifdef SMP 483124955Sjeff return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 484124955Sjeff#else 485124955Sjeff return runq_check(&runq); 486124955Sjeff#endif 487104964Sjeff} 488104964Sjeff 489104964Sjeffint 490104964Sjeffsched_rr_interval(void) 491104964Sjeff{ 492104964Sjeff if (sched_quantum == 0) 493104964Sjeff sched_quantum = SCHED_QUANTUM; 494104964Sjeff return (sched_quantum); 495104964Sjeff} 496104964Sjeff 497104964Sjeff/* 498104964Sjeff * We adjust the priority of the current process. The priority of 499104964Sjeff * a process gets worse as it accumulates CPU time. The cpu usage 500118972Sjhb * estimator (kg_estcpu) is increased here. resetpriority() will 501118972Sjhb * compute a different priority each time kg_estcpu increases by 502104964Sjeff * INVERSE_ESTCPU_WEIGHT 503104964Sjeff * (until MAXPRI is reached). The cpu usage estimator ramps up 504104964Sjeff * quite quickly when the process is running (linearly), and decays 505104964Sjeff * away exponentially, at a rate which is proportionally slower when 506104964Sjeff * the system is busy. The basic principle is that the system will 507104964Sjeff * 90% forget that the process used a lot of CPU time in 5 * loadav 508104964Sjeff * seconds. This causes the system to favor processes which haven't 509104964Sjeff * run much recently, and to round-robin among other processes. 510104964Sjeff */ 511104964Sjeffvoid 512121127Sjeffsched_clock(struct thread *td) 513104964Sjeff{ 514104964Sjeff struct ksegrp *kg; 515121127Sjeff struct kse *ke; 516104964Sjeff 517113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 518121127Sjeff kg = td->td_ksegrp; 519121127Sjeff ke = td->td_kse; 520113356Sjeff 521109145Sjeff ke->ke_sched->ske_cpticks++; 522104964Sjeff kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 523104964Sjeff if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 524104964Sjeff resetpriority(kg); 525104964Sjeff if (td->td_priority >= PUSER) 526104964Sjeff td->td_priority = kg->kg_user_pri; 527104964Sjeff } 528104964Sjeff} 529118972Sjhb 530104964Sjeff/* 531104964Sjeff * charge childs scheduling cpu usage to parent. 532104964Sjeff * 533104964Sjeff * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. 534104964Sjeff * Charge it to the ksegrp that did the wait since process estcpu is sum of 535104964Sjeff * all ksegrps, this is strictly as expected. Assume that the child process 536104964Sjeff * aggregated all the estcpu into the 'built-in' ksegrp. 537104964Sjeff */ 538104964Sjeffvoid 539132372Sjuliansched_exit(struct proc *p, struct thread *td) 540104964Sjeff{ 541132372Sjulian sched_exit_kse(FIRST_KSE_IN_PROC(p), td); 542132372Sjulian sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td); 543132372Sjulian sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 544113356Sjeff} 545113356Sjeff 546113356Sjeffvoid 547132372Sjuliansched_exit_kse(struct kse *ke, struct thread *child) 548113356Sjeff{ 549113356Sjeff} 550113356Sjeff 551113356Sjeffvoid 552132372Sjuliansched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd) 553113356Sjeff{ 554113923Sjhb 555113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 556132372Sjulian kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu); 557104964Sjeff} 558104964Sjeff 559104964Sjeffvoid 560113356Sjeffsched_exit_thread(struct thread *td, struct thread *child) 561104964Sjeff{ 562127894Sdfr if ((child->td_proc->p_flag & P_NOLOAD) == 0) 563125288Sjeff sched_tdcnt--; 564113356Sjeff} 565109145Sjeff 566113356Sjeffvoid 567132372Sjuliansched_fork(struct thread *td, struct proc *p1) 568113356Sjeff{ 569132372Sjulian sched_fork_kse(td, FIRST_KSE_IN_PROC(p1)); 570132372Sjulian sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1)); 571132372Sjulian sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1)); 572113356Sjeff} 573113356Sjeff 574113356Sjeffvoid 575132372Sjuliansched_fork_kse(struct thread *td, struct kse *child) 576113356Sjeff{ 577113356Sjeff child->ke_sched->ske_cpticks = 0; 578113356Sjeff} 579113356Sjeff 580113356Sjeffvoid 581132372Sjuliansched_fork_ksegrp(struct thread *td, struct ksegrp *child) 582113356Sjeff{ 583113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 584132372Sjulian child->kg_estcpu = td->td_ksegrp->kg_estcpu; 585113356Sjeff} 586109145Sjeff 587113356Sjeffvoid 588113356Sjeffsched_fork_thread(struct thread *td, struct thread *child) 589113356Sjeff{ 590104964Sjeff} 591104964Sjeff 592104964Sjeffvoid 593130551Sjuliansched_nice(struct proc *p, int nice) 594104964Sjeff{ 595130551Sjulian struct ksegrp *kg; 596113873Sjhb 597130551Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 598113873Sjhb mtx_assert(&sched_lock, MA_OWNED); 599130551Sjulian p->p_nice = nice; 600130551Sjulian FOREACH_KSEGRP_IN_PROC(p, kg) { 601130551Sjulian resetpriority(kg); 602130551Sjulian } 603104964Sjeff} 604104964Sjeff 605113356Sjeffvoid 606113356Sjeffsched_class(struct ksegrp *kg, int class) 607113356Sjeff{ 608113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 609113356Sjeff kg->kg_pri_class = class; 610113356Sjeff} 611113356Sjeff 612105127Sjulian/* 613105127Sjulian * Adjust the priority of a thread. 614105127Sjulian * This may include moving the thread within the KSEGRP, 615105127Sjulian * changing the assignment of a kse to the thread, 616105127Sjulian * and moving a KSE in the system run queue. 617105127Sjulian */ 618104964Sjeffvoid 619104964Sjeffsched_prio(struct thread *td, u_char prio) 620104964Sjeff{ 621104964Sjeff 622113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 623104964Sjeff if (TD_ON_RUNQ(td)) { 624105127Sjulian adjustrunqueue(td, prio); 625105127Sjulian } else { 626105127Sjulian td->td_priority = prio; 627104964Sjeff } 628104964Sjeff} 629104964Sjeff 630104964Sjeffvoid 631126326Sjhbsched_sleep(struct thread *td) 632104964Sjeff{ 633113923Sjhb 634113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 635104964Sjeff td->td_ksegrp->kg_slptime = 0; 636126326Sjhb td->td_base_pri = td->td_priority; 637104964Sjeff} 638104964Sjeff 639104964Sjeffvoid 640131473Sjhbsched_switch(struct thread *td, struct thread *newtd) 641104964Sjeff{ 642104964Sjeff struct kse *ke; 643104964Sjeff struct proc *p; 644104964Sjeff 645104964Sjeff ke = td->td_kse; 646104964Sjeff p = td->td_proc; 647104964Sjeff 648113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 649124957Sjeff KASSERT((ke->ke_state == KES_THREAD), ("sched_switch: kse state?")); 650104964Sjeff 651125295Sjeff if ((p->p_flag & P_NOLOAD) == 0) 652125288Sjeff sched_tdcnt--; 653131473Sjhb if (newtd != NULL && (newtd->td_proc->p_flag & P_NOLOAD) == 0) 654131473Sjhb sched_tdcnt++; 655113339Sjulian td->td_lastcpu = td->td_oncpu; 656105127Sjulian td->td_last_kse = ke; 657132266Sjhb td->td_flags &= ~TDF_NEEDRESCHED; 658132266Sjhb td->td_pflags &= ~TDP_OWEPREEMPT; 659113339Sjulian td->td_oncpu = NOCPU; 660104964Sjeff /* 661104964Sjeff * At the last moment, if this thread is still marked RUNNING, 662104964Sjeff * then put it back on the run queue as it has not been suspended 663131473Sjhb * or stopped or any thing else similar. We never put the idle 664131473Sjhb * threads on the run queue, however. 665104964Sjeff */ 666131473Sjhb if (td == PCPU_GET(idlethread)) 667131473Sjhb TD_SET_CAN_RUN(td); 668131473Sjhb else if (TD_IS_RUNNING(td)) { 669104964Sjeff /* Put us back on the run queue (kse and all). */ 670104964Sjeff setrunqueue(td); 671116361Sdavidxu } else if (p->p_flag & P_SA) { 672104964Sjeff /* 673104964Sjeff * We will not be on the run queue. So we must be 674104964Sjeff * sleeping or similar. As it's available, 675104964Sjeff * someone else can use the KSE if they need it. 676104964Sjeff */ 677104964Sjeff kse_reassign(ke); 678104964Sjeff } 679131473Sjhb if (newtd == NULL) 680131473Sjhb newtd = choosethread(); 681121128Sjeff if (td != newtd) 682121128Sjeff cpu_switch(td, newtd); 683121128Sjeff sched_lock.mtx_lock = (uintptr_t)td; 684121128Sjeff td->td_oncpu = PCPU_GET(cpuid); 685104964Sjeff} 686104964Sjeff 687104964Sjeffvoid 688104964Sjeffsched_wakeup(struct thread *td) 689104964Sjeff{ 690104964Sjeff struct ksegrp *kg; 691104964Sjeff 692113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 693104964Sjeff kg = td->td_ksegrp; 694104964Sjeff if (kg->kg_slptime > 1) 695104964Sjeff updatepri(kg); 696104964Sjeff kg->kg_slptime = 0; 697104964Sjeff setrunqueue(td); 698104964Sjeff} 699104964Sjeff 700104964Sjeffvoid 701121127Sjeffsched_add(struct thread *td) 702104964Sjeff{ 703121127Sjeff struct kse *ke; 704121127Sjeff 705121127Sjeff ke = td->td_kse; 706104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 707124957Sjeff KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 708104964Sjeff KASSERT((ke->ke_thread->td_kse != NULL), 709124957Sjeff ("sched_add: No KSE on thread")); 710104964Sjeff KASSERT(ke->ke_state != KES_ONRUNQ, 711124957Sjeff ("sched_add: kse %p (%s) already in run queue", ke, 712104964Sjeff ke->ke_proc->p_comm)); 713104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 714124957Sjeff ("sched_add: process swapped out")); 715131481Sjhb 716131481Sjhb#ifdef SMP 717131481Sjhb /* 718131481Sjhb * Only try to preempt if the thread is unpinned or pinned to the 719131481Sjhb * current CPU. 720131481Sjhb */ 721131481Sjhb if (KSE_CAN_MIGRATE(ke) || ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)]) 722131481Sjhb#endif 723131481Sjhb if (maybe_preempt(td)) 724131481Sjhb return; 725104964Sjeff ke->ke_ksegrp->kg_runq_kses++; 726104964Sjeff ke->ke_state = KES_ONRUNQ; 727104964Sjeff 728124955Sjeff#ifdef SMP 729124955Sjeff if (KSE_CAN_MIGRATE(ke)) { 730124955Sjeff CTR1(KTR_4BSD, "adding kse:%p to gbl runq", ke); 731124955Sjeff ke->ke_runq = &runq; 732124955Sjeff } else { 733124955Sjeff CTR1(KTR_4BSD, "adding kse:%p to pcpu runq", ke); 734124955Sjeff if (!SKE_RUNQ_PCPU(ke)) 735124955Sjeff ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)]; 736124955Sjeff } 737124955Sjeff#else 738124955Sjeff ke->ke_runq = &runq; 739124955Sjeff#endif 740125295Sjeff if ((td->td_proc->p_flag & P_NOLOAD) == 0) 741125288Sjeff sched_tdcnt++; 742124955Sjeff runq_add(ke->ke_runq, ke); 743132118Sjhb maybe_resched(td); 744104964Sjeff} 745104964Sjeff 746104964Sjeffvoid 747121127Sjeffsched_rem(struct thread *td) 748104964Sjeff{ 749121127Sjeff struct kse *ke; 750121127Sjeff 751121127Sjeff ke = td->td_kse; 752104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 753124957Sjeff ("sched_rem: process swapped out")); 754124957Sjeff KASSERT((ke->ke_state == KES_ONRUNQ), 755124957Sjeff ("sched_rem: KSE not on run queue")); 756104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 757104964Sjeff 758125295Sjeff if ((td->td_proc->p_flag & P_NOLOAD) == 0) 759125288Sjeff sched_tdcnt--; 760124955Sjeff runq_remove(ke->ke_sched->ske_runq, ke); 761124955Sjeff 762104964Sjeff ke->ke_state = KES_THREAD; 763104964Sjeff ke->ke_ksegrp->kg_runq_kses--; 764104964Sjeff} 765104964Sjeff 766104964Sjeffstruct kse * 767104964Sjeffsched_choose(void) 768104964Sjeff{ 769104964Sjeff struct kse *ke; 770124955Sjeff struct runq *rq; 771104964Sjeff 772124955Sjeff#ifdef SMP 773124955Sjeff struct kse *kecpu; 774124955Sjeff 775124955Sjeff rq = &runq; 776104964Sjeff ke = runq_choose(&runq); 777124955Sjeff kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 778104964Sjeff 779124955Sjeff if (ke == NULL || 780124955Sjeff (kecpu != NULL && 781124955Sjeff kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) { 782124955Sjeff CTR2(KTR_4BSD, "choosing kse %p from pcpu runq %d", kecpu, 783124955Sjeff PCPU_GET(cpuid)); 784124955Sjeff ke = kecpu; 785124955Sjeff rq = &runq_pcpu[PCPU_GET(cpuid)]; 786124955Sjeff } else { 787124955Sjeff CTR1(KTR_4BSD, "choosing kse %p from main runq", ke); 788124955Sjeff } 789124955Sjeff 790124955Sjeff#else 791124955Sjeff rq = &runq; 792124955Sjeff ke = runq_choose(&runq); 793124955Sjeff#endif 794124955Sjeff 795104964Sjeff if (ke != NULL) { 796124955Sjeff runq_remove(rq, ke); 797104964Sjeff ke->ke_state = KES_THREAD; 798104964Sjeff 799104964Sjeff KASSERT((ke->ke_thread != NULL), 800124957Sjeff ("sched_choose: No thread on KSE")); 801104964Sjeff KASSERT((ke->ke_thread->td_kse != NULL), 802124957Sjeff ("sched_choose: No KSE on thread")); 803104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 804124957Sjeff ("sched_choose: process swapped out")); 805104964Sjeff } 806104964Sjeff return (ke); 807104964Sjeff} 808104964Sjeff 809104964Sjeffvoid 810104964Sjeffsched_userret(struct thread *td) 811104964Sjeff{ 812104964Sjeff struct ksegrp *kg; 813104964Sjeff /* 814104964Sjeff * XXX we cheat slightly on the locking here to avoid locking in 815104964Sjeff * the usual case. Setting td_priority here is essentially an 816104964Sjeff * incomplete workaround for not setting it properly elsewhere. 817104964Sjeff * Now that some interrupt handlers are threads, not setting it 818104964Sjeff * properly elsewhere can clobber it in the window between setting 819104964Sjeff * it here and returning to user mode, so don't waste time setting 820104964Sjeff * it perfectly here. 821104964Sjeff */ 822104964Sjeff kg = td->td_ksegrp; 823104964Sjeff if (td->td_priority != kg->kg_user_pri) { 824104964Sjeff mtx_lock_spin(&sched_lock); 825104964Sjeff td->td_priority = kg->kg_user_pri; 826104964Sjeff mtx_unlock_spin(&sched_lock); 827104964Sjeff } 828104964Sjeff} 829107126Sjeff 830124955Sjeffvoid 831124955Sjeffsched_bind(struct thread *td, int cpu) 832124955Sjeff{ 833124955Sjeff struct kse *ke; 834124955Sjeff 835124955Sjeff mtx_assert(&sched_lock, MA_OWNED); 836124955Sjeff KASSERT(TD_IS_RUNNING(td), 837124955Sjeff ("sched_bind: cannot bind non-running thread")); 838124955Sjeff 839124955Sjeff ke = td->td_kse; 840124955Sjeff 841124955Sjeff ke->ke_flags |= KEF_BOUND; 842124955Sjeff#ifdef SMP 843124955Sjeff ke->ke_runq = &runq_pcpu[cpu]; 844124955Sjeff if (PCPU_GET(cpuid) == cpu) 845124955Sjeff return; 846124955Sjeff 847124955Sjeff ke->ke_state = KES_THREAD; 848124955Sjeff 849131473Sjhb mi_switch(SW_VOL, NULL); 850124955Sjeff#endif 851124955Sjeff} 852124955Sjeff 853124955Sjeffvoid 854124955Sjeffsched_unbind(struct thread* td) 855124955Sjeff{ 856124955Sjeff mtx_assert(&sched_lock, MA_OWNED); 857124955Sjeff td->td_kse->ke_flags &= ~KEF_BOUND; 858124955Sjeff} 859124955Sjeff 860107126Sjeffint 861125288Sjeffsched_load(void) 862125288Sjeff{ 863125288Sjeff return (sched_tdcnt); 864125288Sjeff} 865125288Sjeff 866125288Sjeffint 867107126Sjeffsched_sizeof_kse(void) 868107126Sjeff{ 869109145Sjeff return (sizeof(struct kse) + sizeof(struct ke_sched)); 870107126Sjeff} 871107126Sjeffint 872107126Sjeffsched_sizeof_ksegrp(void) 873107126Sjeff{ 874107126Sjeff return (sizeof(struct ksegrp)); 875107126Sjeff} 876107126Sjeffint 877107126Sjeffsched_sizeof_proc(void) 878107126Sjeff{ 879107126Sjeff return (sizeof(struct proc)); 880107126Sjeff} 881107126Sjeffint 882107126Sjeffsched_sizeof_thread(void) 883107126Sjeff{ 884107126Sjeff return (sizeof(struct thread)); 885107126Sjeff} 886107137Sjeff 887107137Sjefffixpt_t 888121127Sjeffsched_pctcpu(struct thread *td) 889107137Sjeff{ 890121147Sjeff struct kse *ke; 891121147Sjeff 892121147Sjeff ke = td->td_kse; 893122286Sdavidxu if (ke == NULL) 894122286Sdavidxu ke = td->td_last_kse; 895121147Sjeff if (ke) 896121147Sjeff return (ke->ke_pctcpu); 897121147Sjeff 898121147Sjeff return (0); 899107137Sjeff} 900