sched_4bsd.c revision 124957
1104964Sjeff/*- 2104964Sjeff * Copyright (c) 1982, 1986, 1990, 1991, 1993 3104964Sjeff * The Regents of the University of California. All rights reserved. 4104964Sjeff * (c) UNIX System Laboratories, Inc. 5104964Sjeff * All or some portions of this file are derived from material licensed 6104964Sjeff * to the University of California by American Telephone and Telegraph 7104964Sjeff * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8104964Sjeff * the permission of UNIX System Laboratories, Inc. 9104964Sjeff * 10104964Sjeff * Redistribution and use in source and binary forms, with or without 11104964Sjeff * modification, are permitted provided that the following conditions 12104964Sjeff * are met: 13104964Sjeff * 1. Redistributions of source code must retain the above copyright 14104964Sjeff * notice, this list of conditions and the following disclaimer. 15104964Sjeff * 2. Redistributions in binary form must reproduce the above copyright 16104964Sjeff * notice, this list of conditions and the following disclaimer in the 17104964Sjeff * documentation and/or other materials provided with the distribution. 18104964Sjeff * 3. All advertising materials mentioning features or use of this software 19104964Sjeff * must display the following acknowledgement: 20104964Sjeff * This product includes software developed by the University of 21104964Sjeff * California, Berkeley and its contributors. 22104964Sjeff * 4. Neither the name of the University nor the names of its contributors 23104964Sjeff * may be used to endorse or promote products derived from this software 24104964Sjeff * without specific prior written permission. 25104964Sjeff * 26104964Sjeff * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27104964Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28104964Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29104964Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30104964Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31104964Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32104964Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33104964Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34104964Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35104964Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36104964Sjeff * SUCH DAMAGE. 37104964Sjeff */ 38104964Sjeff 39116182Sobrien#include <sys/cdefs.h> 40116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 124957 2004-01-25 08:21:46Z jeff $"); 41116182Sobrien 42104964Sjeff#include <sys/param.h> 43104964Sjeff#include <sys/systm.h> 44104964Sjeff#include <sys/kernel.h> 45104964Sjeff#include <sys/ktr.h> 46104964Sjeff#include <sys/lock.h> 47123871Sjhb#include <sys/kthread.h> 48104964Sjeff#include <sys/mutex.h> 49104964Sjeff#include <sys/proc.h> 50104964Sjeff#include <sys/resourcevar.h> 51104964Sjeff#include <sys/sched.h> 52104964Sjeff#include <sys/smp.h> 53104964Sjeff#include <sys/sysctl.h> 54104964Sjeff#include <sys/sx.h> 55104964Sjeff 56124955Sjeff#define KTR_4BSD 0x0 57124955Sjeff 58107135Sjeff/* 59107135Sjeff * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 60107135Sjeff * the range 100-256 Hz (approximately). 61107135Sjeff */ 62107135Sjeff#define ESTCPULIM(e) \ 63107135Sjeff min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 64107135Sjeff RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 65122355Sbde#ifdef SMP 66122355Sbde#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 67122355Sbde#else 68107135Sjeff#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 69122355Sbde#endif 70107135Sjeff#define NICE_WEIGHT 1 /* Priorities per nice level. */ 71107135Sjeff 72109145Sjeffstruct ke_sched { 73124955Sjeff int ske_cpticks; /* (j) Ticks of cpu time. */ 74124955Sjeff struct runq *ske_runq; /* runq the kse is currently on */ 75109145Sjeff}; 76124955Sjeff#define ke_runq ke_sched->ske_runq 77124955Sjeff#define KEF_BOUND KEF_SCHED1 78109145Sjeff 79124955Sjeff#define SKE_RUNQ_PCPU(ke) \ 80124955Sjeff ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq) 81124955Sjeff 82124955Sjeff/* 83124955Sjeff * KSE_CAN_MIGRATE macro returns true if the kse can migrate between 84124955Sjeff * cpus. Currently ithread cpu binding is disabled on x86 due to a 85124955Sjeff * bug in the Xeon round-robin interrupt delivery that delivers all 86124955Sjeff * interrupts to cpu 0. 87124955Sjeff */ 88124955Sjeff#ifdef __i386__ 89124955Sjeff#define KSE_CAN_MIGRATE(ke) \ 90124955Sjeff ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 91124955Sjeff#else 92124955Sjeff#define KSE_CAN_MIGRATE(ke) \ 93124955Sjeff PRI_BASE((ke)->ke_ksegrp->kg_pri_class) != PRI_ITHD && \ 94124955Sjeff ((ke)->ke_thread->td_pinned == 0 &&((ke)->ke_flags & KEF_BOUND) == 0) 95124955Sjeff#endif 96114293Smarkmstatic struct ke_sched ke_sched; 97109145Sjeff 98109145Sjeffstruct ke_sched *kse0_sched = &ke_sched; 99107126Sjeffstruct kg_sched *ksegrp0_sched = NULL; 100107126Sjeffstruct p_sched *proc0_sched = NULL; 101107126Sjeffstruct td_sched *thread0_sched = NULL; 102104964Sjeff 103104964Sjeffstatic int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 104112535Smux#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 105104964Sjeff 106104964Sjeffstatic struct callout roundrobin_callout; 107104964Sjeff 108124955Sjeffstatic void setup_runqs(void); 109104964Sjeffstatic void roundrobin(void *arg); 110123871Sjhbstatic void schedcpu(void); 111124955Sjeffstatic void schedcpu_thread(void); 112104964Sjeffstatic void sched_setup(void *dummy); 113104964Sjeffstatic void maybe_resched(struct thread *td); 114104964Sjeffstatic void updatepri(struct ksegrp *kg); 115104964Sjeffstatic void resetpriority(struct ksegrp *kg); 116104964Sjeff 117124955Sjeffstatic struct kproc_desc sched_kp = { 118124955Sjeff "schedcpu", 119124955Sjeff schedcpu_thread, 120124955Sjeff NULL 121124955Sjeff}; 122124955SjeffSYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp) 123124955SjeffSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 124104964Sjeff 125104964Sjeff/* 126104964Sjeff * Global run queue. 127104964Sjeff */ 128104964Sjeffstatic struct runq runq; 129104964Sjeff 130124955Sjeff#ifdef SMP 131124955Sjeff/* 132124955Sjeff * Per-CPU run queues 133124955Sjeff */ 134124955Sjeffstatic struct runq runq_pcpu[MAXCPU]; 135124955Sjeff#endif 136124955Sjeff 137124955Sjeffstatic void 138124955Sjeffsetup_runqs(void) 139124955Sjeff{ 140124955Sjeff#ifdef SMP 141124955Sjeff int i; 142124955Sjeff 143124955Sjeff for (i = 0; i < MAXCPU; ++i) 144124955Sjeff runq_init(&runq_pcpu[i]); 145124955Sjeff#endif 146124955Sjeff 147124955Sjeff runq_init(&runq); 148124955Sjeff} 149124955Sjeff 150104964Sjeffstatic int 151104964Sjeffsysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 152104964Sjeff{ 153104964Sjeff int error, new_val; 154104964Sjeff 155104964Sjeff new_val = sched_quantum * tick; 156104964Sjeff error = sysctl_handle_int(oidp, &new_val, 0, req); 157104964Sjeff if (error != 0 || req->newptr == NULL) 158104964Sjeff return (error); 159104964Sjeff if (new_val < tick) 160104964Sjeff return (EINVAL); 161104964Sjeff sched_quantum = new_val / tick; 162104964Sjeff hogticks = 2 * sched_quantum; 163104964Sjeff return (0); 164104964Sjeff} 165104964Sjeff 166104964SjeffSYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 167104964Sjeff 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 168104964Sjeff "Roundrobin scheduling quantum in microseconds"); 169104964Sjeff 170104964Sjeff/* 171104964Sjeff * Arrange to reschedule if necessary, taking the priorities and 172104964Sjeff * schedulers into account. 173104964Sjeff */ 174104964Sjeffstatic void 175104964Sjeffmaybe_resched(struct thread *td) 176104964Sjeff{ 177104964Sjeff 178104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 179108338Sjulian if (td->td_priority < curthread->td_priority && curthread->td_kse) 180111032Sjulian curthread->td_flags |= TDF_NEEDRESCHED; 181104964Sjeff} 182104964Sjeff 183104964Sjeff/* 184104964Sjeff * Force switch among equal priority processes every 100ms. 185104964Sjeff * We don't actually need to force a context switch of the current process. 186104964Sjeff * The act of firing the event triggers a context switch to softclock() and 187104964Sjeff * then switching back out again which is equivalent to a preemption, thus 188104964Sjeff * no further work is needed on the local CPU. 189104964Sjeff */ 190104964Sjeff/* ARGSUSED */ 191104964Sjeffstatic void 192104964Sjeffroundrobin(void *arg) 193104964Sjeff{ 194104964Sjeff 195104964Sjeff#ifdef SMP 196104964Sjeff mtx_lock_spin(&sched_lock); 197104964Sjeff forward_roundrobin(); 198104964Sjeff mtx_unlock_spin(&sched_lock); 199104964Sjeff#endif 200104964Sjeff 201104964Sjeff callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 202104964Sjeff} 203104964Sjeff 204104964Sjeff/* 205104964Sjeff * Constants for digital decay and forget: 206118972Sjhb * 90% of (kg_estcpu) usage in 5 * loadav time 207118972Sjhb * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive) 208104964Sjeff * Note that, as ps(1) mentions, this can let percentages 209104964Sjeff * total over 100% (I've seen 137.9% for 3 processes). 210104964Sjeff * 211118972Sjhb * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously. 212104964Sjeff * 213118972Sjhb * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds. 214104964Sjeff * That is, the system wants to compute a value of decay such 215104964Sjeff * that the following for loop: 216104964Sjeff * for (i = 0; i < (5 * loadavg); i++) 217118972Sjhb * kg_estcpu *= decay; 218104964Sjeff * will compute 219118972Sjhb * kg_estcpu *= 0.1; 220104964Sjeff * for all values of loadavg: 221104964Sjeff * 222104964Sjeff * Mathematically this loop can be expressed by saying: 223104964Sjeff * decay ** (5 * loadavg) ~= .1 224104964Sjeff * 225104964Sjeff * The system computes decay as: 226104964Sjeff * decay = (2 * loadavg) / (2 * loadavg + 1) 227104964Sjeff * 228104964Sjeff * We wish to prove that the system's computation of decay 229104964Sjeff * will always fulfill the equation: 230104964Sjeff * decay ** (5 * loadavg) ~= .1 231104964Sjeff * 232104964Sjeff * If we compute b as: 233104964Sjeff * b = 2 * loadavg 234104964Sjeff * then 235104964Sjeff * decay = b / (b + 1) 236104964Sjeff * 237104964Sjeff * We now need to prove two things: 238104964Sjeff * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 239104964Sjeff * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 240104964Sjeff * 241104964Sjeff * Facts: 242104964Sjeff * For x close to zero, exp(x) =~ 1 + x, since 243104964Sjeff * exp(x) = 0! + x**1/1! + x**2/2! + ... . 244104964Sjeff * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 245104964Sjeff * For x close to zero, ln(1+x) =~ x, since 246104964Sjeff * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 247104964Sjeff * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 248104964Sjeff * ln(.1) =~ -2.30 249104964Sjeff * 250104964Sjeff * Proof of (1): 251104964Sjeff * Solve (factor)**(power) =~ .1 given power (5*loadav): 252104964Sjeff * solving for factor, 253104964Sjeff * ln(factor) =~ (-2.30/5*loadav), or 254104964Sjeff * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 255104964Sjeff * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 256104964Sjeff * 257104964Sjeff * Proof of (2): 258104964Sjeff * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 259104964Sjeff * solving for power, 260104964Sjeff * power*ln(b/(b+1)) =~ -2.30, or 261104964Sjeff * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 262104964Sjeff * 263104964Sjeff * Actual power values for the implemented algorithm are as follows: 264104964Sjeff * loadav: 1 2 3 4 265104964Sjeff * power: 5.68 10.32 14.94 19.55 266104964Sjeff */ 267104964Sjeff 268104964Sjeff/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 269104964Sjeff#define loadfactor(loadav) (2 * (loadav)) 270104964Sjeff#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 271104964Sjeff 272118972Sjhb/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 273104964Sjeffstatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 274104964SjeffSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 275104964Sjeff 276104964Sjeff/* 277104964Sjeff * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 278104964Sjeff * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 279104964Sjeff * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 280104964Sjeff * 281104964Sjeff * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 282104964Sjeff * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 283104964Sjeff * 284104964Sjeff * If you don't want to bother with the faster/more-accurate formula, you 285104964Sjeff * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 286104964Sjeff * (more general) method of calculating the %age of CPU used by a process. 287104964Sjeff */ 288104964Sjeff#define CCPU_SHIFT 11 289104964Sjeff 290104964Sjeff/* 291104964Sjeff * Recompute process priorities, every hz ticks. 292104964Sjeff * MP-safe, called without the Giant mutex. 293104964Sjeff */ 294104964Sjeff/* ARGSUSED */ 295104964Sjeffstatic void 296123871Sjhbschedcpu(void) 297104964Sjeff{ 298104964Sjeff register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 299104964Sjeff struct thread *td; 300104964Sjeff struct proc *p; 301104964Sjeff struct kse *ke; 302104964Sjeff struct ksegrp *kg; 303118972Sjhb int awake, realstathz; 304104964Sjeff 305104964Sjeff realstathz = stathz ? stathz : hz; 306104964Sjeff sx_slock(&allproc_lock); 307104964Sjeff FOREACH_PROC_IN_SYSTEM(p) { 308118972Sjhb /* 309118972Sjhb * Prevent state changes and protect run queue. 310118972Sjhb */ 311104964Sjeff mtx_lock_spin(&sched_lock); 312118972Sjhb /* 313118972Sjhb * Increment time in/out of memory. We ignore overflow; with 314118972Sjhb * 16-bit int's (remember them?) overflow takes 45 days. 315118972Sjhb */ 316104964Sjeff p->p_swtime++; 317104964Sjeff FOREACH_KSEGRP_IN_PROC(p, kg) { 318104964Sjeff awake = 0; 319104964Sjeff FOREACH_KSE_IN_GROUP(kg, ke) { 320104964Sjeff /* 321118972Sjhb * Increment sleep time (if sleeping). We 322118972Sjhb * ignore overflow, as above. 323104964Sjeff */ 324104964Sjeff /* 325104964Sjeff * The kse slptimes are not touched in wakeup 326104964Sjeff * because the thread may not HAVE a KSE. 327104964Sjeff */ 328104964Sjeff if (ke->ke_state == KES_ONRUNQ) { 329104964Sjeff awake = 1; 330104964Sjeff ke->ke_flags &= ~KEF_DIDRUN; 331104964Sjeff } else if ((ke->ke_state == KES_THREAD) && 332104964Sjeff (TD_IS_RUNNING(ke->ke_thread))) { 333104964Sjeff awake = 1; 334104964Sjeff /* Do not clear KEF_DIDRUN */ 335104964Sjeff } else if (ke->ke_flags & KEF_DIDRUN) { 336104964Sjeff awake = 1; 337104964Sjeff ke->ke_flags &= ~KEF_DIDRUN; 338104964Sjeff } 339104964Sjeff 340104964Sjeff /* 341118972Sjhb * ke_pctcpu is only for ps and ttyinfo(). 342118972Sjhb * Do it per kse, and add them up at the end? 343104964Sjeff * XXXKSE 344104964Sjeff */ 345118972Sjhb ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> 346109145Sjeff FSHIFT; 347104964Sjeff /* 348104964Sjeff * If the kse has been idle the entire second, 349104964Sjeff * stop recalculating its priority until 350104964Sjeff * it wakes up. 351104964Sjeff */ 352109145Sjeff if (ke->ke_sched->ske_cpticks == 0) 353104964Sjeff continue; 354104964Sjeff#if (FSHIFT >= CCPU_SHIFT) 355109157Sjeff ke->ke_pctcpu += (realstathz == 100) 356109145Sjeff ? ((fixpt_t) ke->ke_sched->ske_cpticks) << 357104964Sjeff (FSHIFT - CCPU_SHIFT) : 358109145Sjeff 100 * (((fixpt_t) ke->ke_sched->ske_cpticks) 359109145Sjeff << (FSHIFT - CCPU_SHIFT)) / realstathz; 360104964Sjeff#else 361109157Sjeff ke->ke_pctcpu += ((FSCALE - ccpu) * 362109145Sjeff (ke->ke_sched->ske_cpticks * 363109145Sjeff FSCALE / realstathz)) >> FSHIFT; 364104964Sjeff#endif 365109145Sjeff ke->ke_sched->ske_cpticks = 0; 366104964Sjeff } /* end of kse loop */ 367104964Sjeff /* 368104964Sjeff * If there are ANY running threads in this KSEGRP, 369104964Sjeff * then don't count it as sleeping. 370104964Sjeff */ 371104964Sjeff if (awake) { 372104964Sjeff if (kg->kg_slptime > 1) { 373104964Sjeff /* 374104964Sjeff * In an ideal world, this should not 375104964Sjeff * happen, because whoever woke us 376104964Sjeff * up from the long sleep should have 377104964Sjeff * unwound the slptime and reset our 378104964Sjeff * priority before we run at the stale 379104964Sjeff * priority. Should KASSERT at some 380104964Sjeff * point when all the cases are fixed. 381104964Sjeff */ 382104964Sjeff updatepri(kg); 383104964Sjeff } 384104964Sjeff kg->kg_slptime = 0; 385118972Sjhb } else 386104964Sjeff kg->kg_slptime++; 387104964Sjeff if (kg->kg_slptime > 1) 388104964Sjeff continue; 389104964Sjeff kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 390104964Sjeff resetpriority(kg); 391104964Sjeff FOREACH_THREAD_IN_GROUP(kg, td) { 392104964Sjeff if (td->td_priority >= PUSER) { 393105127Sjulian sched_prio(td, kg->kg_user_pri); 394104964Sjeff } 395104964Sjeff } 396104964Sjeff } /* end of ksegrp loop */ 397104964Sjeff mtx_unlock_spin(&sched_lock); 398104964Sjeff } /* end of process loop */ 399104964Sjeff sx_sunlock(&allproc_lock); 400104964Sjeff} 401104964Sjeff 402104964Sjeff/* 403123871Sjhb * Main loop for a kthread that executes schedcpu once a second. 404123871Sjhb */ 405123871Sjhbstatic void 406124955Sjeffschedcpu_thread(void) 407123871Sjhb{ 408123871Sjhb int nowake; 409123871Sjhb 410123871Sjhb for (;;) { 411123871Sjhb schedcpu(); 412123871Sjhb tsleep(&nowake, curthread->td_priority, "-", hz); 413123871Sjhb } 414123871Sjhb} 415123871Sjhb 416123871Sjhb/* 417104964Sjeff * Recalculate the priority of a process after it has slept for a while. 418118972Sjhb * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at 419118972Sjhb * least six times the loadfactor will decay kg_estcpu to zero. 420104964Sjeff */ 421104964Sjeffstatic void 422104964Sjeffupdatepri(struct ksegrp *kg) 423104964Sjeff{ 424118972Sjhb register fixpt_t loadfac; 425104964Sjeff register unsigned int newcpu; 426104964Sjeff 427118972Sjhb loadfac = loadfactor(averunnable.ldavg[0]); 428104964Sjeff if (kg->kg_slptime > 5 * loadfac) 429104964Sjeff kg->kg_estcpu = 0; 430104964Sjeff else { 431118972Sjhb newcpu = kg->kg_estcpu; 432118972Sjhb kg->kg_slptime--; /* was incremented in schedcpu() */ 433104964Sjeff while (newcpu && --kg->kg_slptime) 434104964Sjeff newcpu = decay_cpu(loadfac, newcpu); 435104964Sjeff kg->kg_estcpu = newcpu; 436104964Sjeff } 437104964Sjeff resetpriority(kg); 438104964Sjeff} 439104964Sjeff 440104964Sjeff/* 441104964Sjeff * Compute the priority of a process when running in user mode. 442104964Sjeff * Arrange to reschedule if the resulting priority is better 443104964Sjeff * than that of the current process. 444104964Sjeff */ 445104964Sjeffstatic void 446104964Sjeffresetpriority(struct ksegrp *kg) 447104964Sjeff{ 448104964Sjeff register unsigned int newpriority; 449104964Sjeff struct thread *td; 450104964Sjeff 451104964Sjeff if (kg->kg_pri_class == PRI_TIMESHARE) { 452104964Sjeff newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 453104964Sjeff NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); 454104964Sjeff newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 455104964Sjeff PRI_MAX_TIMESHARE); 456104964Sjeff kg->kg_user_pri = newpriority; 457104964Sjeff } 458104964Sjeff FOREACH_THREAD_IN_GROUP(kg, td) { 459104964Sjeff maybe_resched(td); /* XXXKSE silly */ 460104964Sjeff } 461104964Sjeff} 462104964Sjeff 463104964Sjeff/* ARGSUSED */ 464104964Sjeffstatic void 465104964Sjeffsched_setup(void *dummy) 466104964Sjeff{ 467124955Sjeff setup_runqs(); 468118972Sjhb 469104964Sjeff if (sched_quantum == 0) 470104964Sjeff sched_quantum = SCHED_QUANTUM; 471104964Sjeff hogticks = 2 * sched_quantum; 472104964Sjeff 473104964Sjeff callout_init(&roundrobin_callout, 0); 474104964Sjeff 475104964Sjeff /* Kick off timeout driven events by calling first time. */ 476104964Sjeff roundrobin(NULL); 477104964Sjeff} 478104964Sjeff 479104964Sjeff/* External interfaces start here */ 480104964Sjeffint 481104964Sjeffsched_runnable(void) 482104964Sjeff{ 483124955Sjeff#ifdef SMP 484124955Sjeff return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 485124955Sjeff#else 486124955Sjeff return runq_check(&runq); 487124955Sjeff#endif 488104964Sjeff} 489104964Sjeff 490104964Sjeffint 491104964Sjeffsched_rr_interval(void) 492104964Sjeff{ 493104964Sjeff if (sched_quantum == 0) 494104964Sjeff sched_quantum = SCHED_QUANTUM; 495104964Sjeff return (sched_quantum); 496104964Sjeff} 497104964Sjeff 498104964Sjeff/* 499104964Sjeff * We adjust the priority of the current process. The priority of 500104964Sjeff * a process gets worse as it accumulates CPU time. The cpu usage 501118972Sjhb * estimator (kg_estcpu) is increased here. resetpriority() will 502118972Sjhb * compute a different priority each time kg_estcpu increases by 503104964Sjeff * INVERSE_ESTCPU_WEIGHT 504104964Sjeff * (until MAXPRI is reached). The cpu usage estimator ramps up 505104964Sjeff * quite quickly when the process is running (linearly), and decays 506104964Sjeff * away exponentially, at a rate which is proportionally slower when 507104964Sjeff * the system is busy. The basic principle is that the system will 508104964Sjeff * 90% forget that the process used a lot of CPU time in 5 * loadav 509104964Sjeff * seconds. This causes the system to favor processes which haven't 510104964Sjeff * run much recently, and to round-robin among other processes. 511104964Sjeff */ 512104964Sjeffvoid 513121127Sjeffsched_clock(struct thread *td) 514104964Sjeff{ 515104964Sjeff struct ksegrp *kg; 516121127Sjeff struct kse *ke; 517104964Sjeff 518113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 519121127Sjeff kg = td->td_ksegrp; 520121127Sjeff ke = td->td_kse; 521113356Sjeff 522109145Sjeff ke->ke_sched->ske_cpticks++; 523104964Sjeff kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 524104964Sjeff if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 525104964Sjeff resetpriority(kg); 526104964Sjeff if (td->td_priority >= PUSER) 527104964Sjeff td->td_priority = kg->kg_user_pri; 528104964Sjeff } 529104964Sjeff} 530118972Sjhb 531104964Sjeff/* 532104964Sjeff * charge childs scheduling cpu usage to parent. 533104964Sjeff * 534104964Sjeff * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. 535104964Sjeff * Charge it to the ksegrp that did the wait since process estcpu is sum of 536104964Sjeff * all ksegrps, this is strictly as expected. Assume that the child process 537104964Sjeff * aggregated all the estcpu into the 'built-in' ksegrp. 538104964Sjeff */ 539104964Sjeffvoid 540113356Sjeffsched_exit(struct proc *p, struct proc *p1) 541104964Sjeff{ 542113356Sjeff sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 543113356Sjeff sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 544113356Sjeff sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 545113356Sjeff} 546113356Sjeff 547113356Sjeffvoid 548113356Sjeffsched_exit_kse(struct kse *ke, struct kse *child) 549113356Sjeff{ 550113356Sjeff} 551113356Sjeff 552113356Sjeffvoid 553113356Sjeffsched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 554113356Sjeff{ 555113923Sjhb 556113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 557104964Sjeff kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu); 558104964Sjeff} 559104964Sjeff 560104964Sjeffvoid 561113356Sjeffsched_exit_thread(struct thread *td, struct thread *child) 562104964Sjeff{ 563113356Sjeff} 564109145Sjeff 565113356Sjeffvoid 566113356Sjeffsched_fork(struct proc *p, struct proc *p1) 567113356Sjeff{ 568113356Sjeff sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 569113356Sjeff sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 570113356Sjeff sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 571113356Sjeff} 572113356Sjeff 573113356Sjeffvoid 574113356Sjeffsched_fork_kse(struct kse *ke, struct kse *child) 575113356Sjeff{ 576113356Sjeff child->ke_sched->ske_cpticks = 0; 577113356Sjeff} 578113356Sjeff 579113356Sjeffvoid 580113356Sjeffsched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 581113356Sjeff{ 582113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 583104964Sjeff child->kg_estcpu = kg->kg_estcpu; 584113356Sjeff} 585109145Sjeff 586113356Sjeffvoid 587113356Sjeffsched_fork_thread(struct thread *td, struct thread *child) 588113356Sjeff{ 589104964Sjeff} 590104964Sjeff 591104964Sjeffvoid 592104964Sjeffsched_nice(struct ksegrp *kg, int nice) 593104964Sjeff{ 594113873Sjhb 595113873Sjhb PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 596113873Sjhb mtx_assert(&sched_lock, MA_OWNED); 597104964Sjeff kg->kg_nice = nice; 598104964Sjeff resetpriority(kg); 599104964Sjeff} 600104964Sjeff 601113356Sjeffvoid 602113356Sjeffsched_class(struct ksegrp *kg, int class) 603113356Sjeff{ 604113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 605113356Sjeff kg->kg_pri_class = class; 606113356Sjeff} 607113356Sjeff 608105127Sjulian/* 609105127Sjulian * Adjust the priority of a thread. 610105127Sjulian * This may include moving the thread within the KSEGRP, 611105127Sjulian * changing the assignment of a kse to the thread, 612105127Sjulian * and moving a KSE in the system run queue. 613105127Sjulian */ 614104964Sjeffvoid 615104964Sjeffsched_prio(struct thread *td, u_char prio) 616104964Sjeff{ 617104964Sjeff 618113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 619104964Sjeff if (TD_ON_RUNQ(td)) { 620105127Sjulian adjustrunqueue(td, prio); 621105127Sjulian } else { 622105127Sjulian td->td_priority = prio; 623104964Sjeff } 624104964Sjeff} 625104964Sjeff 626104964Sjeffvoid 627104964Sjeffsched_sleep(struct thread *td, u_char prio) 628104964Sjeff{ 629113923Sjhb 630113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 631104964Sjeff td->td_ksegrp->kg_slptime = 0; 632104964Sjeff td->td_priority = prio; 633104964Sjeff} 634104964Sjeff 635104964Sjeffvoid 636121128Sjeffsched_switch(struct thread *td) 637104964Sjeff{ 638121128Sjeff struct thread *newtd; 639104964Sjeff struct kse *ke; 640104964Sjeff struct proc *p; 641104964Sjeff 642104964Sjeff ke = td->td_kse; 643104964Sjeff p = td->td_proc; 644104964Sjeff 645113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 646124957Sjeff KASSERT((ke->ke_state == KES_THREAD), ("sched_switch: kse state?")); 647104964Sjeff 648113339Sjulian td->td_lastcpu = td->td_oncpu; 649105127Sjulian td->td_last_kse = ke; 650113339Sjulian td->td_oncpu = NOCPU; 651111032Sjulian td->td_flags &= ~TDF_NEEDRESCHED; 652104964Sjeff /* 653104964Sjeff * At the last moment, if this thread is still marked RUNNING, 654104964Sjeff * then put it back on the run queue as it has not been suspended 655104964Sjeff * or stopped or any thing else similar. 656104964Sjeff */ 657104964Sjeff if (TD_IS_RUNNING(td)) { 658104964Sjeff /* Put us back on the run queue (kse and all). */ 659104964Sjeff setrunqueue(td); 660116361Sdavidxu } else if (p->p_flag & P_SA) { 661104964Sjeff /* 662104964Sjeff * We will not be on the run queue. So we must be 663104964Sjeff * sleeping or similar. As it's available, 664104964Sjeff * someone else can use the KSE if they need it. 665104964Sjeff */ 666104964Sjeff kse_reassign(ke); 667104964Sjeff } 668121128Sjeff newtd = choosethread(); 669121128Sjeff if (td != newtd) 670121128Sjeff cpu_switch(td, newtd); 671121128Sjeff sched_lock.mtx_lock = (uintptr_t)td; 672121128Sjeff td->td_oncpu = PCPU_GET(cpuid); 673104964Sjeff} 674104964Sjeff 675104964Sjeffvoid 676104964Sjeffsched_wakeup(struct thread *td) 677104964Sjeff{ 678104964Sjeff struct ksegrp *kg; 679104964Sjeff 680113923Sjhb mtx_assert(&sched_lock, MA_OWNED); 681104964Sjeff kg = td->td_ksegrp; 682104964Sjeff if (kg->kg_slptime > 1) 683104964Sjeff updatepri(kg); 684104964Sjeff kg->kg_slptime = 0; 685104964Sjeff setrunqueue(td); 686104964Sjeff maybe_resched(td); 687104964Sjeff} 688104964Sjeff 689104964Sjeffvoid 690121127Sjeffsched_add(struct thread *td) 691104964Sjeff{ 692121127Sjeff struct kse *ke; 693121127Sjeff 694121127Sjeff ke = td->td_kse; 695104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 696124957Sjeff KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 697104964Sjeff KASSERT((ke->ke_thread->td_kse != NULL), 698124957Sjeff ("sched_add: No KSE on thread")); 699104964Sjeff KASSERT(ke->ke_state != KES_ONRUNQ, 700124957Sjeff ("sched_add: kse %p (%s) already in run queue", ke, 701104964Sjeff ke->ke_proc->p_comm)); 702104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 703124957Sjeff ("sched_add: process swapped out")); 704104964Sjeff ke->ke_ksegrp->kg_runq_kses++; 705104964Sjeff ke->ke_state = KES_ONRUNQ; 706104964Sjeff 707124955Sjeff#ifdef SMP 708124955Sjeff if (KSE_CAN_MIGRATE(ke)) { 709124955Sjeff CTR1(KTR_4BSD, "adding kse:%p to gbl runq", ke); 710124955Sjeff ke->ke_runq = &runq; 711124955Sjeff } else { 712124955Sjeff CTR1(KTR_4BSD, "adding kse:%p to pcpu runq", ke); 713124955Sjeff if (!SKE_RUNQ_PCPU(ke)) 714124955Sjeff ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)]; 715124955Sjeff } 716124955Sjeff#else 717124955Sjeff ke->ke_runq = &runq; 718124955Sjeff#endif 719124955Sjeff 720124955Sjeff runq_add(ke->ke_runq, ke); 721104964Sjeff} 722104964Sjeff 723104964Sjeffvoid 724121127Sjeffsched_rem(struct thread *td) 725104964Sjeff{ 726121127Sjeff struct kse *ke; 727121127Sjeff 728121127Sjeff ke = td->td_kse; 729104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 730124957Sjeff ("sched_rem: process swapped out")); 731124957Sjeff KASSERT((ke->ke_state == KES_ONRUNQ), 732124957Sjeff ("sched_rem: KSE not on run queue")); 733104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 734104964Sjeff 735124955Sjeff runq_remove(ke->ke_sched->ske_runq, ke); 736124955Sjeff 737104964Sjeff ke->ke_state = KES_THREAD; 738104964Sjeff ke->ke_ksegrp->kg_runq_kses--; 739104964Sjeff} 740104964Sjeff 741104964Sjeffstruct kse * 742104964Sjeffsched_choose(void) 743104964Sjeff{ 744104964Sjeff struct kse *ke; 745124955Sjeff struct runq *rq; 746104964Sjeff 747124955Sjeff#ifdef SMP 748124955Sjeff struct kse *kecpu; 749124955Sjeff 750124955Sjeff rq = &runq; 751104964Sjeff ke = runq_choose(&runq); 752124955Sjeff kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 753104964Sjeff 754124955Sjeff if (ke == NULL || 755124955Sjeff (kecpu != NULL && 756124955Sjeff kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) { 757124955Sjeff CTR2(KTR_4BSD, "choosing kse %p from pcpu runq %d", kecpu, 758124955Sjeff PCPU_GET(cpuid)); 759124955Sjeff ke = kecpu; 760124955Sjeff rq = &runq_pcpu[PCPU_GET(cpuid)]; 761124955Sjeff } else { 762124955Sjeff CTR1(KTR_4BSD, "choosing kse %p from main runq", ke); 763124955Sjeff } 764124955Sjeff 765124955Sjeff#else 766124955Sjeff rq = &runq; 767124955Sjeff ke = runq_choose(&runq); 768124955Sjeff#endif 769124955Sjeff 770104964Sjeff if (ke != NULL) { 771124955Sjeff runq_remove(rq, ke); 772104964Sjeff ke->ke_state = KES_THREAD; 773104964Sjeff 774104964Sjeff KASSERT((ke->ke_thread != NULL), 775124957Sjeff ("sched_choose: No thread on KSE")); 776104964Sjeff KASSERT((ke->ke_thread->td_kse != NULL), 777124957Sjeff ("sched_choose: No KSE on thread")); 778104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 779124957Sjeff ("sched_choose: process swapped out")); 780104964Sjeff } 781104964Sjeff return (ke); 782104964Sjeff} 783104964Sjeff 784104964Sjeffvoid 785104964Sjeffsched_userret(struct thread *td) 786104964Sjeff{ 787104964Sjeff struct ksegrp *kg; 788104964Sjeff /* 789104964Sjeff * XXX we cheat slightly on the locking here to avoid locking in 790104964Sjeff * the usual case. Setting td_priority here is essentially an 791104964Sjeff * incomplete workaround for not setting it properly elsewhere. 792104964Sjeff * Now that some interrupt handlers are threads, not setting it 793104964Sjeff * properly elsewhere can clobber it in the window between setting 794104964Sjeff * it here and returning to user mode, so don't waste time setting 795104964Sjeff * it perfectly here. 796104964Sjeff */ 797104964Sjeff kg = td->td_ksegrp; 798104964Sjeff if (td->td_priority != kg->kg_user_pri) { 799104964Sjeff mtx_lock_spin(&sched_lock); 800104964Sjeff td->td_priority = kg->kg_user_pri; 801104964Sjeff mtx_unlock_spin(&sched_lock); 802104964Sjeff } 803104964Sjeff} 804107126Sjeff 805124955Sjeffvoid 806124955Sjeffsched_bind(struct thread *td, int cpu) 807124955Sjeff{ 808124955Sjeff struct kse *ke; 809124955Sjeff 810124955Sjeff mtx_assert(&sched_lock, MA_OWNED); 811124955Sjeff KASSERT(TD_IS_RUNNING(td), 812124955Sjeff ("sched_bind: cannot bind non-running thread")); 813124955Sjeff 814124955Sjeff ke = td->td_kse; 815124955Sjeff 816124955Sjeff ke->ke_flags |= KEF_BOUND; 817124955Sjeff#ifdef SMP 818124955Sjeff ke->ke_runq = &runq_pcpu[cpu]; 819124955Sjeff if (PCPU_GET(cpuid) == cpu) 820124955Sjeff return; 821124955Sjeff 822124955Sjeff ke->ke_state = KES_THREAD; 823124955Sjeff 824124955Sjeff mi_switch(SW_VOL); 825124955Sjeff#endif 826124955Sjeff} 827124955Sjeff 828124955Sjeffvoid 829124955Sjeffsched_unbind(struct thread* td) 830124955Sjeff{ 831124955Sjeff mtx_assert(&sched_lock, MA_OWNED); 832124955Sjeff td->td_kse->ke_flags &= ~KEF_BOUND; 833124955Sjeff} 834124955Sjeff 835107126Sjeffint 836107126Sjeffsched_sizeof_kse(void) 837107126Sjeff{ 838109145Sjeff return (sizeof(struct kse) + sizeof(struct ke_sched)); 839107126Sjeff} 840107126Sjeffint 841107126Sjeffsched_sizeof_ksegrp(void) 842107126Sjeff{ 843107126Sjeff return (sizeof(struct ksegrp)); 844107126Sjeff} 845107126Sjeffint 846107126Sjeffsched_sizeof_proc(void) 847107126Sjeff{ 848107126Sjeff return (sizeof(struct proc)); 849107126Sjeff} 850107126Sjeffint 851107126Sjeffsched_sizeof_thread(void) 852107126Sjeff{ 853107126Sjeff return (sizeof(struct thread)); 854107126Sjeff} 855107137Sjeff 856107137Sjefffixpt_t 857121127Sjeffsched_pctcpu(struct thread *td) 858107137Sjeff{ 859121147Sjeff struct kse *ke; 860121147Sjeff 861121147Sjeff ke = td->td_kse; 862122286Sdavidxu if (ke == NULL) 863122286Sdavidxu ke = td->td_last_kse; 864121147Sjeff if (ke) 865121147Sjeff return (ke->ke_pctcpu); 866121147Sjeff 867121147Sjeff return (0); 868107137Sjeff} 869