sched_4bsd.c revision 107126
1104964Sjeff/*- 2104964Sjeff * Copyright (c) 1982, 1986, 1990, 1991, 1993 3104964Sjeff * The Regents of the University of California. All rights reserved. 4104964Sjeff * (c) UNIX System Laboratories, Inc. 5104964Sjeff * All or some portions of this file are derived from material licensed 6104964Sjeff * to the University of California by American Telephone and Telegraph 7104964Sjeff * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8104964Sjeff * the permission of UNIX System Laboratories, Inc. 9104964Sjeff * 10104964Sjeff * Redistribution and use in source and binary forms, with or without 11104964Sjeff * modification, are permitted provided that the following conditions 12104964Sjeff * are met: 13104964Sjeff * 1. Redistributions of source code must retain the above copyright 14104964Sjeff * notice, this list of conditions and the following disclaimer. 15104964Sjeff * 2. Redistributions in binary form must reproduce the above copyright 16104964Sjeff * notice, this list of conditions and the following disclaimer in the 17104964Sjeff * documentation and/or other materials provided with the distribution. 18104964Sjeff * 3. All advertising materials mentioning features or use of this software 19104964Sjeff * must display the following acknowledgement: 20104964Sjeff * This product includes software developed by the University of 21104964Sjeff * California, Berkeley and its contributors. 22104964Sjeff * 4. Neither the name of the University nor the names of its contributors 23104964Sjeff * may be used to endorse or promote products derived from this software 24104964Sjeff * without specific prior written permission. 25104964Sjeff * 26104964Sjeff * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27104964Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28104964Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29104964Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30104964Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31104964Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32104964Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33104964Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34104964Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35104964Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36104964Sjeff * SUCH DAMAGE. 37104964Sjeff * 38104964Sjeff * $FreeBSD: head/sys/kern/sched_4bsd.c 107126 2002-11-21 01:22:38Z jeff $ 39104964Sjeff */ 40104964Sjeff 41104964Sjeff#include <sys/param.h> 42104964Sjeff#include <sys/systm.h> 43104964Sjeff#include <sys/kernel.h> 44104964Sjeff#include <sys/ktr.h> 45104964Sjeff#include <sys/lock.h> 46104964Sjeff#include <sys/mutex.h> 47104964Sjeff#include <sys/proc.h> 48104964Sjeff#include <sys/resourcevar.h> 49104964Sjeff#include <sys/sched.h> 50104964Sjeff#include <sys/smp.h> 51104964Sjeff#include <sys/sysctl.h> 52104964Sjeff#include <sys/sx.h> 53104964Sjeff 54107126Sjeffstruct ke_sched *kse0_sched = NULL; 55107126Sjeffstruct kg_sched *ksegrp0_sched = NULL; 56107126Sjeffstruct p_sched *proc0_sched = NULL; 57107126Sjeffstruct td_sched *thread0_sched = NULL; 58104964Sjeff 59104964Sjeffstatic int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 60104964Sjeff#define SCHED_QUANTUM (hz / 10); /* Default sched quantum */ 61104964Sjeff 62104964Sjeffstatic struct callout schedcpu_callout; 63104964Sjeffstatic struct callout roundrobin_callout; 64104964Sjeff 65104964Sjeffstatic void roundrobin(void *arg); 66104964Sjeffstatic void schedcpu(void *arg); 67104964Sjeffstatic void sched_setup(void *dummy); 68104964Sjeffstatic void maybe_resched(struct thread *td); 69104964Sjeffstatic void updatepri(struct ksegrp *kg); 70104964Sjeffstatic void resetpriority(struct ksegrp *kg); 71104964Sjeff 72104964SjeffSYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 73104964Sjeff 74104964Sjeff/* 75104964Sjeff * Global run queue. 76104964Sjeff */ 77104964Sjeffstatic struct runq runq; 78104964SjeffSYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq) 79104964Sjeff 80104964Sjeffstatic int 81104964Sjeffsysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 82104964Sjeff{ 83104964Sjeff int error, new_val; 84104964Sjeff 85104964Sjeff new_val = sched_quantum * tick; 86104964Sjeff error = sysctl_handle_int(oidp, &new_val, 0, req); 87104964Sjeff if (error != 0 || req->newptr == NULL) 88104964Sjeff return (error); 89104964Sjeff if (new_val < tick) 90104964Sjeff return (EINVAL); 91104964Sjeff sched_quantum = new_val / tick; 92104964Sjeff hogticks = 2 * sched_quantum; 93104964Sjeff return (0); 94104964Sjeff} 95104964Sjeff 96104964SjeffSYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 97104964Sjeff 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 98104964Sjeff "Roundrobin scheduling quantum in microseconds"); 99104964Sjeff 100104964Sjeff/* 101104964Sjeff * Arrange to reschedule if necessary, taking the priorities and 102104964Sjeff * schedulers into account. 103104964Sjeff */ 104104964Sjeffstatic void 105104964Sjeffmaybe_resched(struct thread *td) 106104964Sjeff{ 107104964Sjeff 108104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 109104964Sjeff if (td->td_priority < curthread->td_priority) 110104964Sjeff curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; 111104964Sjeff} 112104964Sjeff 113104964Sjeff/* 114104964Sjeff * Force switch among equal priority processes every 100ms. 115104964Sjeff * We don't actually need to force a context switch of the current process. 116104964Sjeff * The act of firing the event triggers a context switch to softclock() and 117104964Sjeff * then switching back out again which is equivalent to a preemption, thus 118104964Sjeff * no further work is needed on the local CPU. 119104964Sjeff */ 120104964Sjeff/* ARGSUSED */ 121104964Sjeffstatic void 122104964Sjeffroundrobin(void *arg) 123104964Sjeff{ 124104964Sjeff 125104964Sjeff#ifdef SMP 126104964Sjeff mtx_lock_spin(&sched_lock); 127104964Sjeff forward_roundrobin(); 128104964Sjeff mtx_unlock_spin(&sched_lock); 129104964Sjeff#endif 130104964Sjeff 131104964Sjeff callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 132104964Sjeff} 133104964Sjeff 134104964Sjeff/* 135104964Sjeff * Constants for digital decay and forget: 136104964Sjeff * 90% of (p_estcpu) usage in 5 * loadav time 137104964Sjeff * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 138104964Sjeff * Note that, as ps(1) mentions, this can let percentages 139104964Sjeff * total over 100% (I've seen 137.9% for 3 processes). 140104964Sjeff * 141104964Sjeff * Note that schedclock() updates p_estcpu and p_cpticks asynchronously. 142104964Sjeff * 143104964Sjeff * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 144104964Sjeff * That is, the system wants to compute a value of decay such 145104964Sjeff * that the following for loop: 146104964Sjeff * for (i = 0; i < (5 * loadavg); i++) 147104964Sjeff * p_estcpu *= decay; 148104964Sjeff * will compute 149104964Sjeff * p_estcpu *= 0.1; 150104964Sjeff * for all values of loadavg: 151104964Sjeff * 152104964Sjeff * Mathematically this loop can be expressed by saying: 153104964Sjeff * decay ** (5 * loadavg) ~= .1 154104964Sjeff * 155104964Sjeff * The system computes decay as: 156104964Sjeff * decay = (2 * loadavg) / (2 * loadavg + 1) 157104964Sjeff * 158104964Sjeff * We wish to prove that the system's computation of decay 159104964Sjeff * will always fulfill the equation: 160104964Sjeff * decay ** (5 * loadavg) ~= .1 161104964Sjeff * 162104964Sjeff * If we compute b as: 163104964Sjeff * b = 2 * loadavg 164104964Sjeff * then 165104964Sjeff * decay = b / (b + 1) 166104964Sjeff * 167104964Sjeff * We now need to prove two things: 168104964Sjeff * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 169104964Sjeff * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 170104964Sjeff * 171104964Sjeff * Facts: 172104964Sjeff * For x close to zero, exp(x) =~ 1 + x, since 173104964Sjeff * exp(x) = 0! + x**1/1! + x**2/2! + ... . 174104964Sjeff * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 175104964Sjeff * For x close to zero, ln(1+x) =~ x, since 176104964Sjeff * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 177104964Sjeff * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 178104964Sjeff * ln(.1) =~ -2.30 179104964Sjeff * 180104964Sjeff * Proof of (1): 181104964Sjeff * Solve (factor)**(power) =~ .1 given power (5*loadav): 182104964Sjeff * solving for factor, 183104964Sjeff * ln(factor) =~ (-2.30/5*loadav), or 184104964Sjeff * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 185104964Sjeff * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 186104964Sjeff * 187104964Sjeff * Proof of (2): 188104964Sjeff * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 189104964Sjeff * solving for power, 190104964Sjeff * power*ln(b/(b+1)) =~ -2.30, or 191104964Sjeff * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 192104964Sjeff * 193104964Sjeff * Actual power values for the implemented algorithm are as follows: 194104964Sjeff * loadav: 1 2 3 4 195104964Sjeff * power: 5.68 10.32 14.94 19.55 196104964Sjeff */ 197104964Sjeff 198104964Sjeff/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 199104964Sjeff#define loadfactor(loadav) (2 * (loadav)) 200104964Sjeff#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 201104964Sjeff 202104964Sjeff/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 203104964Sjeffstatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 204104964SjeffSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 205104964Sjeff 206104964Sjeff/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 207104964Sjeffstatic int fscale __unused = FSCALE; 208104964SjeffSYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 209104964Sjeff 210104964Sjeff/* 211104964Sjeff * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 212104964Sjeff * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 213104964Sjeff * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 214104964Sjeff * 215104964Sjeff * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 216104964Sjeff * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 217104964Sjeff * 218104964Sjeff * If you don't want to bother with the faster/more-accurate formula, you 219104964Sjeff * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 220104964Sjeff * (more general) method of calculating the %age of CPU used by a process. 221104964Sjeff */ 222104964Sjeff#define CCPU_SHIFT 11 223104964Sjeff 224104964Sjeff/* 225104964Sjeff * Recompute process priorities, every hz ticks. 226104964Sjeff * MP-safe, called without the Giant mutex. 227104964Sjeff */ 228104964Sjeff/* ARGSUSED */ 229104964Sjeffstatic void 230104964Sjeffschedcpu(void *arg) 231104964Sjeff{ 232104964Sjeff register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 233104964Sjeff struct thread *td; 234104964Sjeff struct proc *p; 235104964Sjeff struct kse *ke; 236104964Sjeff struct ksegrp *kg; 237104964Sjeff int realstathz; 238104964Sjeff int awake; 239104964Sjeff 240104964Sjeff realstathz = stathz ? stathz : hz; 241104964Sjeff sx_slock(&allproc_lock); 242104964Sjeff FOREACH_PROC_IN_SYSTEM(p) { 243104964Sjeff mtx_lock_spin(&sched_lock); 244104964Sjeff p->p_swtime++; 245104964Sjeff FOREACH_KSEGRP_IN_PROC(p, kg) { 246104964Sjeff awake = 0; 247104964Sjeff FOREACH_KSE_IN_GROUP(kg, ke) { 248104964Sjeff /* 249104964Sjeff * Increment time in/out of memory and sleep 250104964Sjeff * time (if sleeping). We ignore overflow; 251104964Sjeff * with 16-bit int's (remember them?) 252104964Sjeff * overflow takes 45 days. 253104964Sjeff */ 254104964Sjeff /* 255104964Sjeff * The kse slptimes are not touched in wakeup 256104964Sjeff * because the thread may not HAVE a KSE. 257104964Sjeff */ 258104964Sjeff if (ke->ke_state == KES_ONRUNQ) { 259104964Sjeff awake = 1; 260104964Sjeff ke->ke_flags &= ~KEF_DIDRUN; 261104964Sjeff } else if ((ke->ke_state == KES_THREAD) && 262104964Sjeff (TD_IS_RUNNING(ke->ke_thread))) { 263104964Sjeff awake = 1; 264104964Sjeff /* Do not clear KEF_DIDRUN */ 265104964Sjeff } else if (ke->ke_flags & KEF_DIDRUN) { 266104964Sjeff awake = 1; 267104964Sjeff ke->ke_flags &= ~KEF_DIDRUN; 268104964Sjeff } 269104964Sjeff 270104964Sjeff /* 271104964Sjeff * pctcpu is only for ps? 272104964Sjeff * Do it per kse.. and add them up at the end? 273104964Sjeff * XXXKSE 274104964Sjeff */ 275104964Sjeff ke->ke_pctcpu 276104964Sjeff = (ke->ke_pctcpu * ccpu) >> FSHIFT; 277104964Sjeff /* 278104964Sjeff * If the kse has been idle the entire second, 279104964Sjeff * stop recalculating its priority until 280104964Sjeff * it wakes up. 281104964Sjeff */ 282104964Sjeff if (ke->ke_cpticks == 0) 283104964Sjeff continue; 284104964Sjeff#if (FSHIFT >= CCPU_SHIFT) 285104964Sjeff ke->ke_pctcpu += (realstathz == 100) ? 286104964Sjeff ((fixpt_t) ke->ke_cpticks) << 287104964Sjeff (FSHIFT - CCPU_SHIFT) : 288104964Sjeff 100 * (((fixpt_t) ke->ke_cpticks) << 289104964Sjeff (FSHIFT - CCPU_SHIFT)) / realstathz; 290104964Sjeff#else 291104964Sjeff ke->ke_pctcpu += ((FSCALE - ccpu) * 292104964Sjeff (ke->ke_cpticks * FSCALE / realstathz)) >> 293104964Sjeff FSHIFT; 294104964Sjeff#endif 295104964Sjeff ke->ke_cpticks = 0; 296104964Sjeff } /* end of kse loop */ 297104964Sjeff /* 298104964Sjeff * If there are ANY running threads in this KSEGRP, 299104964Sjeff * then don't count it as sleeping. 300104964Sjeff */ 301104964Sjeff if (awake) { 302104964Sjeff if (kg->kg_slptime > 1) { 303104964Sjeff /* 304104964Sjeff * In an ideal world, this should not 305104964Sjeff * happen, because whoever woke us 306104964Sjeff * up from the long sleep should have 307104964Sjeff * unwound the slptime and reset our 308104964Sjeff * priority before we run at the stale 309104964Sjeff * priority. Should KASSERT at some 310104964Sjeff * point when all the cases are fixed. 311104964Sjeff */ 312104964Sjeff updatepri(kg); 313104964Sjeff } 314104964Sjeff kg->kg_slptime = 0; 315104964Sjeff } else { 316104964Sjeff kg->kg_slptime++; 317104964Sjeff } 318104964Sjeff if (kg->kg_slptime > 1) 319104964Sjeff continue; 320104964Sjeff kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 321104964Sjeff resetpriority(kg); 322104964Sjeff FOREACH_THREAD_IN_GROUP(kg, td) { 323104964Sjeff if (td->td_priority >= PUSER) { 324105127Sjulian sched_prio(td, kg->kg_user_pri); 325104964Sjeff } 326104964Sjeff } 327104964Sjeff } /* end of ksegrp loop */ 328104964Sjeff mtx_unlock_spin(&sched_lock); 329104964Sjeff } /* end of process loop */ 330104964Sjeff sx_sunlock(&allproc_lock); 331104964Sjeff wakeup(&lbolt); 332104964Sjeff callout_reset(&schedcpu_callout, hz, schedcpu, NULL); 333104964Sjeff} 334104964Sjeff 335104964Sjeff/* 336104964Sjeff * Recalculate the priority of a process after it has slept for a while. 337104964Sjeff * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 338104964Sjeff * least six times the loadfactor will decay p_estcpu to zero. 339104964Sjeff */ 340104964Sjeffstatic void 341104964Sjeffupdatepri(struct ksegrp *kg) 342104964Sjeff{ 343104964Sjeff register unsigned int newcpu; 344104964Sjeff register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 345104964Sjeff 346104964Sjeff newcpu = kg->kg_estcpu; 347104964Sjeff if (kg->kg_slptime > 5 * loadfac) 348104964Sjeff kg->kg_estcpu = 0; 349104964Sjeff else { 350104964Sjeff kg->kg_slptime--; /* the first time was done in schedcpu */ 351104964Sjeff while (newcpu && --kg->kg_slptime) 352104964Sjeff newcpu = decay_cpu(loadfac, newcpu); 353104964Sjeff kg->kg_estcpu = newcpu; 354104964Sjeff } 355104964Sjeff resetpriority(kg); 356104964Sjeff} 357104964Sjeff 358104964Sjeff/* 359104964Sjeff * Compute the priority of a process when running in user mode. 360104964Sjeff * Arrange to reschedule if the resulting priority is better 361104964Sjeff * than that of the current process. 362104964Sjeff */ 363104964Sjeffstatic void 364104964Sjeffresetpriority(struct ksegrp *kg) 365104964Sjeff{ 366104964Sjeff register unsigned int newpriority; 367104964Sjeff struct thread *td; 368104964Sjeff 369104964Sjeff mtx_lock_spin(&sched_lock); 370104964Sjeff if (kg->kg_pri_class == PRI_TIMESHARE) { 371104964Sjeff newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 372104964Sjeff NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); 373104964Sjeff newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 374104964Sjeff PRI_MAX_TIMESHARE); 375104964Sjeff kg->kg_user_pri = newpriority; 376104964Sjeff } 377104964Sjeff FOREACH_THREAD_IN_GROUP(kg, td) { 378104964Sjeff maybe_resched(td); /* XXXKSE silly */ 379104964Sjeff } 380104964Sjeff mtx_unlock_spin(&sched_lock); 381104964Sjeff} 382104964Sjeff 383104964Sjeff/* ARGSUSED */ 384104964Sjeffstatic void 385104964Sjeffsched_setup(void *dummy) 386104964Sjeff{ 387104964Sjeff if (sched_quantum == 0) 388104964Sjeff sched_quantum = SCHED_QUANTUM; 389104964Sjeff hogticks = 2 * sched_quantum; 390104964Sjeff 391104964Sjeff callout_init(&schedcpu_callout, 1); 392104964Sjeff callout_init(&roundrobin_callout, 0); 393104964Sjeff 394104964Sjeff /* Kick off timeout driven events by calling first time. */ 395104964Sjeff roundrobin(NULL); 396104964Sjeff schedcpu(NULL); 397104964Sjeff} 398104964Sjeff 399104964Sjeff/* External interfaces start here */ 400104964Sjeffint 401104964Sjeffsched_runnable(void) 402104964Sjeff{ 403104964Sjeff return runq_check(&runq); 404104964Sjeff} 405104964Sjeff 406104964Sjeffint 407104964Sjeffsched_rr_interval(void) 408104964Sjeff{ 409104964Sjeff if (sched_quantum == 0) 410104964Sjeff sched_quantum = SCHED_QUANTUM; 411104964Sjeff return (sched_quantum); 412104964Sjeff} 413104964Sjeff 414104964Sjeff/* 415104964Sjeff * We adjust the priority of the current process. The priority of 416104964Sjeff * a process gets worse as it accumulates CPU time. The cpu usage 417104964Sjeff * estimator (p_estcpu) is increased here. resetpriority() will 418104964Sjeff * compute a different priority each time p_estcpu increases by 419104964Sjeff * INVERSE_ESTCPU_WEIGHT 420104964Sjeff * (until MAXPRI is reached). The cpu usage estimator ramps up 421104964Sjeff * quite quickly when the process is running (linearly), and decays 422104964Sjeff * away exponentially, at a rate which is proportionally slower when 423104964Sjeff * the system is busy. The basic principle is that the system will 424104964Sjeff * 90% forget that the process used a lot of CPU time in 5 * loadav 425104964Sjeff * seconds. This causes the system to favor processes which haven't 426104964Sjeff * run much recently, and to round-robin among other processes. 427104964Sjeff */ 428104964Sjeffvoid 429104964Sjeffsched_clock(struct thread *td) 430104964Sjeff{ 431104964Sjeff struct kse *ke; 432104964Sjeff struct ksegrp *kg; 433104964Sjeff 434104964Sjeff KASSERT((td != NULL), ("schedclock: null thread pointer")); 435104964Sjeff ke = td->td_kse; 436104964Sjeff kg = td->td_ksegrp; 437104964Sjeff ke->ke_cpticks++; 438104964Sjeff kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 439104964Sjeff if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 440104964Sjeff resetpriority(kg); 441104964Sjeff if (td->td_priority >= PUSER) 442104964Sjeff td->td_priority = kg->kg_user_pri; 443104964Sjeff } 444104964Sjeff} 445104964Sjeff/* 446104964Sjeff * charge childs scheduling cpu usage to parent. 447104964Sjeff * 448104964Sjeff * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. 449104964Sjeff * Charge it to the ksegrp that did the wait since process estcpu is sum of 450104964Sjeff * all ksegrps, this is strictly as expected. Assume that the child process 451104964Sjeff * aggregated all the estcpu into the 'built-in' ksegrp. 452104964Sjeff */ 453104964Sjeffvoid 454104964Sjeffsched_exit(struct ksegrp *kg, struct ksegrp *child) 455104964Sjeff{ 456104964Sjeff kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu); 457104964Sjeff} 458104964Sjeff 459104964Sjeffvoid 460104964Sjeffsched_fork(struct ksegrp *kg, struct ksegrp *child) 461104964Sjeff{ 462104964Sjeff /* 463104964Sjeff * set priority of child to be that of parent. 464104964Sjeff * XXXKSE this needs redefining.. 465104964Sjeff */ 466104964Sjeff child->kg_estcpu = kg->kg_estcpu; 467104964Sjeff} 468104964Sjeff 469104964Sjeffvoid 470104964Sjeffsched_nice(struct ksegrp *kg, int nice) 471104964Sjeff{ 472104964Sjeff kg->kg_nice = nice; 473104964Sjeff resetpriority(kg); 474104964Sjeff} 475104964Sjeff 476105127Sjulian/* 477105127Sjulian * Adjust the priority of a thread. 478105127Sjulian * This may include moving the thread within the KSEGRP, 479105127Sjulian * changing the assignment of a kse to the thread, 480105127Sjulian * and moving a KSE in the system run queue. 481105127Sjulian */ 482104964Sjeffvoid 483104964Sjeffsched_prio(struct thread *td, u_char prio) 484104964Sjeff{ 485104964Sjeff 486104964Sjeff if (TD_ON_RUNQ(td)) { 487105127Sjulian adjustrunqueue(td, prio); 488105127Sjulian } else { 489105127Sjulian td->td_priority = prio; 490104964Sjeff } 491104964Sjeff} 492104964Sjeff 493104964Sjeffvoid 494104964Sjeffsched_sleep(struct thread *td, u_char prio) 495104964Sjeff{ 496104964Sjeff td->td_ksegrp->kg_slptime = 0; 497104964Sjeff td->td_priority = prio; 498104964Sjeff} 499104964Sjeff 500104964Sjeffvoid 501104964Sjeffsched_switchin(struct thread *td) 502104964Sjeff{ 503104964Sjeff td->td_kse->ke_oncpu = PCPU_GET(cpuid); 504104964Sjeff} 505104964Sjeff 506104964Sjeffvoid 507104964Sjeffsched_switchout(struct thread *td) 508104964Sjeff{ 509104964Sjeff struct kse *ke; 510104964Sjeff struct proc *p; 511104964Sjeff 512104964Sjeff ke = td->td_kse; 513104964Sjeff p = td->td_proc; 514104964Sjeff 515104964Sjeff KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?")); 516104964Sjeff 517104964Sjeff td->td_lastcpu = ke->ke_oncpu; 518105127Sjulian td->td_last_kse = ke; 519104964Sjeff ke->ke_oncpu = NOCPU; 520104964Sjeff ke->ke_flags &= ~KEF_NEEDRESCHED; 521104964Sjeff /* 522104964Sjeff * At the last moment, if this thread is still marked RUNNING, 523104964Sjeff * then put it back on the run queue as it has not been suspended 524104964Sjeff * or stopped or any thing else similar. 525104964Sjeff */ 526104964Sjeff if (TD_IS_RUNNING(td)) { 527104964Sjeff /* Put us back on the run queue (kse and all). */ 528104964Sjeff setrunqueue(td); 529104964Sjeff } else if (p->p_flag & P_KSES) { 530104964Sjeff /* 531104964Sjeff * We will not be on the run queue. So we must be 532104964Sjeff * sleeping or similar. As it's available, 533104964Sjeff * someone else can use the KSE if they need it. 534104964Sjeff * (If bound LOANING can still occur). 535104964Sjeff */ 536104964Sjeff kse_reassign(ke); 537104964Sjeff } 538104964Sjeff} 539104964Sjeff 540104964Sjeffvoid 541104964Sjeffsched_wakeup(struct thread *td) 542104964Sjeff{ 543104964Sjeff struct ksegrp *kg; 544104964Sjeff 545104964Sjeff kg = td->td_ksegrp; 546104964Sjeff if (kg->kg_slptime > 1) 547104964Sjeff updatepri(kg); 548104964Sjeff kg->kg_slptime = 0; 549104964Sjeff setrunqueue(td); 550104964Sjeff maybe_resched(td); 551104964Sjeff} 552104964Sjeff 553104964Sjeffvoid 554104964Sjeffsched_add(struct kse *ke) 555104964Sjeff{ 556104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 557104964Sjeff KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE")); 558104964Sjeff KASSERT((ke->ke_thread->td_kse != NULL), 559104964Sjeff ("runq_add: No KSE on thread")); 560104964Sjeff KASSERT(ke->ke_state != KES_ONRUNQ, 561104964Sjeff ("runq_add: kse %p (%s) already in run queue", ke, 562104964Sjeff ke->ke_proc->p_comm)); 563104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 564104964Sjeff ("runq_add: process swapped out")); 565104964Sjeff ke->ke_ksegrp->kg_runq_kses++; 566104964Sjeff ke->ke_state = KES_ONRUNQ; 567104964Sjeff 568104964Sjeff runq_add(&runq, ke); 569104964Sjeff} 570104964Sjeff 571104964Sjeffvoid 572104964Sjeffsched_rem(struct kse *ke) 573104964Sjeff{ 574104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 575104964Sjeff ("runq_remove: process swapped out")); 576104964Sjeff KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 577104964Sjeff mtx_assert(&sched_lock, MA_OWNED); 578104964Sjeff 579104964Sjeff runq_remove(&runq, ke); 580104964Sjeff ke->ke_state = KES_THREAD; 581104964Sjeff ke->ke_ksegrp->kg_runq_kses--; 582104964Sjeff} 583104964Sjeff 584104964Sjeffstruct kse * 585104964Sjeffsched_choose(void) 586104964Sjeff{ 587104964Sjeff struct kse *ke; 588104964Sjeff 589104964Sjeff ke = runq_choose(&runq); 590104964Sjeff 591104964Sjeff if (ke != NULL) { 592104964Sjeff runq_remove(&runq, ke); 593104964Sjeff ke->ke_state = KES_THREAD; 594104964Sjeff 595104964Sjeff KASSERT((ke->ke_thread != NULL), 596104964Sjeff ("runq_choose: No thread on KSE")); 597104964Sjeff KASSERT((ke->ke_thread->td_kse != NULL), 598104964Sjeff ("runq_choose: No KSE on thread")); 599104964Sjeff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 600104964Sjeff ("runq_choose: process swapped out")); 601104964Sjeff } 602104964Sjeff return (ke); 603104964Sjeff} 604104964Sjeff 605104964Sjeffvoid 606104964Sjeffsched_userret(struct thread *td) 607104964Sjeff{ 608104964Sjeff struct ksegrp *kg; 609104964Sjeff /* 610104964Sjeff * XXX we cheat slightly on the locking here to avoid locking in 611104964Sjeff * the usual case. Setting td_priority here is essentially an 612104964Sjeff * incomplete workaround for not setting it properly elsewhere. 613104964Sjeff * Now that some interrupt handlers are threads, not setting it 614104964Sjeff * properly elsewhere can clobber it in the window between setting 615104964Sjeff * it here and returning to user mode, so don't waste time setting 616104964Sjeff * it perfectly here. 617104964Sjeff */ 618104964Sjeff kg = td->td_ksegrp; 619104964Sjeff if (td->td_priority != kg->kg_user_pri) { 620104964Sjeff mtx_lock_spin(&sched_lock); 621104964Sjeff td->td_priority = kg->kg_user_pri; 622104964Sjeff mtx_unlock_spin(&sched_lock); 623104964Sjeff } 624104964Sjeff} 625107126Sjeff 626107126Sjeffint 627107126Sjeffsched_sizeof_kse(void) 628107126Sjeff{ 629107126Sjeff return (sizeof(struct kse)); 630107126Sjeff} 631107126Sjeffint 632107126Sjeffsched_sizeof_ksegrp(void) 633107126Sjeff{ 634107126Sjeff return (sizeof(struct ksegrp)); 635107126Sjeff} 636107126Sjeffint 637107126Sjeffsched_sizeof_proc(void) 638107126Sjeff{ 639107126Sjeff return (sizeof(struct proc)); 640107126Sjeff} 641107126Sjeffint 642107126Sjeffsched_sizeof_thread(void) 643107126Sjeff{ 644107126Sjeff return (sizeof(struct thread)); 645107126Sjeff} 646