sched_4bsd.c revision 107126
1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * $FreeBSD: head/sys/kern/sched_4bsd.c 107126 2002-11-21 01:22:38Z jeff $ 39 */ 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/kernel.h> 44#include <sys/ktr.h> 45#include <sys/lock.h> 46#include <sys/mutex.h> 47#include <sys/proc.h> 48#include <sys/resourcevar.h> 49#include <sys/sched.h> 50#include <sys/smp.h> 51#include <sys/sysctl.h> 52#include <sys/sx.h> 53 54struct ke_sched *kse0_sched = NULL; 55struct kg_sched *ksegrp0_sched = NULL; 56struct p_sched *proc0_sched = NULL; 57struct td_sched *thread0_sched = NULL; 58 59static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 60#define SCHED_QUANTUM (hz / 10); /* Default sched quantum */ 61 62static struct callout schedcpu_callout; 63static struct callout roundrobin_callout; 64 65static void roundrobin(void *arg); 66static void schedcpu(void *arg); 67static void sched_setup(void *dummy); 68static void maybe_resched(struct thread *td); 69static void updatepri(struct ksegrp *kg); 70static void resetpriority(struct ksegrp *kg); 71 72SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 73 74/* 75 * Global run queue. 76 */ 77static struct runq runq; 78SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq) 79 80static int 81sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 82{ 83 int error, new_val; 84 85 new_val = sched_quantum * tick; 86 error = sysctl_handle_int(oidp, &new_val, 0, req); 87 if (error != 0 || req->newptr == NULL) 88 return (error); 89 if (new_val < tick) 90 return (EINVAL); 91 sched_quantum = new_val / tick; 92 hogticks = 2 * sched_quantum; 93 return (0); 94} 95 96SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 97 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 98 "Roundrobin scheduling quantum in microseconds"); 99 100/* 101 * Arrange to reschedule if necessary, taking the priorities and 102 * schedulers into account. 103 */ 104static void 105maybe_resched(struct thread *td) 106{ 107 108 mtx_assert(&sched_lock, MA_OWNED); 109 if (td->td_priority < curthread->td_priority) 110 curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; 111} 112 113/* 114 * Force switch among equal priority processes every 100ms. 115 * We don't actually need to force a context switch of the current process. 116 * The act of firing the event triggers a context switch to softclock() and 117 * then switching back out again which is equivalent to a preemption, thus 118 * no further work is needed on the local CPU. 119 */ 120/* ARGSUSED */ 121static void 122roundrobin(void *arg) 123{ 124 125#ifdef SMP 126 mtx_lock_spin(&sched_lock); 127 forward_roundrobin(); 128 mtx_unlock_spin(&sched_lock); 129#endif 130 131 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 132} 133 134/* 135 * Constants for digital decay and forget: 136 * 90% of (p_estcpu) usage in 5 * loadav time 137 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 138 * Note that, as ps(1) mentions, this can let percentages 139 * total over 100% (I've seen 137.9% for 3 processes). 140 * 141 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously. 142 * 143 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 144 * That is, the system wants to compute a value of decay such 145 * that the following for loop: 146 * for (i = 0; i < (5 * loadavg); i++) 147 * p_estcpu *= decay; 148 * will compute 149 * p_estcpu *= 0.1; 150 * for all values of loadavg: 151 * 152 * Mathematically this loop can be expressed by saying: 153 * decay ** (5 * loadavg) ~= .1 154 * 155 * The system computes decay as: 156 * decay = (2 * loadavg) / (2 * loadavg + 1) 157 * 158 * We wish to prove that the system's computation of decay 159 * will always fulfill the equation: 160 * decay ** (5 * loadavg) ~= .1 161 * 162 * If we compute b as: 163 * b = 2 * loadavg 164 * then 165 * decay = b / (b + 1) 166 * 167 * We now need to prove two things: 168 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 169 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 170 * 171 * Facts: 172 * For x close to zero, exp(x) =~ 1 + x, since 173 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 174 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 175 * For x close to zero, ln(1+x) =~ x, since 176 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 177 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 178 * ln(.1) =~ -2.30 179 * 180 * Proof of (1): 181 * Solve (factor)**(power) =~ .1 given power (5*loadav): 182 * solving for factor, 183 * ln(factor) =~ (-2.30/5*loadav), or 184 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 185 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 186 * 187 * Proof of (2): 188 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 189 * solving for power, 190 * power*ln(b/(b+1)) =~ -2.30, or 191 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 192 * 193 * Actual power values for the implemented algorithm are as follows: 194 * loadav: 1 2 3 4 195 * power: 5.68 10.32 14.94 19.55 196 */ 197 198/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 199#define loadfactor(loadav) (2 * (loadav)) 200#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 201 202/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 203static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 204SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 205 206/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 207static int fscale __unused = FSCALE; 208SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 209 210/* 211 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 212 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 213 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 214 * 215 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 216 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 217 * 218 * If you don't want to bother with the faster/more-accurate formula, you 219 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 220 * (more general) method of calculating the %age of CPU used by a process. 221 */ 222#define CCPU_SHIFT 11 223 224/* 225 * Recompute process priorities, every hz ticks. 226 * MP-safe, called without the Giant mutex. 227 */ 228/* ARGSUSED */ 229static void 230schedcpu(void *arg) 231{ 232 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 233 struct thread *td; 234 struct proc *p; 235 struct kse *ke; 236 struct ksegrp *kg; 237 int realstathz; 238 int awake; 239 240 realstathz = stathz ? stathz : hz; 241 sx_slock(&allproc_lock); 242 FOREACH_PROC_IN_SYSTEM(p) { 243 mtx_lock_spin(&sched_lock); 244 p->p_swtime++; 245 FOREACH_KSEGRP_IN_PROC(p, kg) { 246 awake = 0; 247 FOREACH_KSE_IN_GROUP(kg, ke) { 248 /* 249 * Increment time in/out of memory and sleep 250 * time (if sleeping). We ignore overflow; 251 * with 16-bit int's (remember them?) 252 * overflow takes 45 days. 253 */ 254 /* 255 * The kse slptimes are not touched in wakeup 256 * because the thread may not HAVE a KSE. 257 */ 258 if (ke->ke_state == KES_ONRUNQ) { 259 awake = 1; 260 ke->ke_flags &= ~KEF_DIDRUN; 261 } else if ((ke->ke_state == KES_THREAD) && 262 (TD_IS_RUNNING(ke->ke_thread))) { 263 awake = 1; 264 /* Do not clear KEF_DIDRUN */ 265 } else if (ke->ke_flags & KEF_DIDRUN) { 266 awake = 1; 267 ke->ke_flags &= ~KEF_DIDRUN; 268 } 269 270 /* 271 * pctcpu is only for ps? 272 * Do it per kse.. and add them up at the end? 273 * XXXKSE 274 */ 275 ke->ke_pctcpu 276 = (ke->ke_pctcpu * ccpu) >> FSHIFT; 277 /* 278 * If the kse has been idle the entire second, 279 * stop recalculating its priority until 280 * it wakes up. 281 */ 282 if (ke->ke_cpticks == 0) 283 continue; 284#if (FSHIFT >= CCPU_SHIFT) 285 ke->ke_pctcpu += (realstathz == 100) ? 286 ((fixpt_t) ke->ke_cpticks) << 287 (FSHIFT - CCPU_SHIFT) : 288 100 * (((fixpt_t) ke->ke_cpticks) << 289 (FSHIFT - CCPU_SHIFT)) / realstathz; 290#else 291 ke->ke_pctcpu += ((FSCALE - ccpu) * 292 (ke->ke_cpticks * FSCALE / realstathz)) >> 293 FSHIFT; 294#endif 295 ke->ke_cpticks = 0; 296 } /* end of kse loop */ 297 /* 298 * If there are ANY running threads in this KSEGRP, 299 * then don't count it as sleeping. 300 */ 301 if (awake) { 302 if (kg->kg_slptime > 1) { 303 /* 304 * In an ideal world, this should not 305 * happen, because whoever woke us 306 * up from the long sleep should have 307 * unwound the slptime and reset our 308 * priority before we run at the stale 309 * priority. Should KASSERT at some 310 * point when all the cases are fixed. 311 */ 312 updatepri(kg); 313 } 314 kg->kg_slptime = 0; 315 } else { 316 kg->kg_slptime++; 317 } 318 if (kg->kg_slptime > 1) 319 continue; 320 kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 321 resetpriority(kg); 322 FOREACH_THREAD_IN_GROUP(kg, td) { 323 if (td->td_priority >= PUSER) { 324 sched_prio(td, kg->kg_user_pri); 325 } 326 } 327 } /* end of ksegrp loop */ 328 mtx_unlock_spin(&sched_lock); 329 } /* end of process loop */ 330 sx_sunlock(&allproc_lock); 331 wakeup(&lbolt); 332 callout_reset(&schedcpu_callout, hz, schedcpu, NULL); 333} 334 335/* 336 * Recalculate the priority of a process after it has slept for a while. 337 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 338 * least six times the loadfactor will decay p_estcpu to zero. 339 */ 340static void 341updatepri(struct ksegrp *kg) 342{ 343 register unsigned int newcpu; 344 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 345 346 newcpu = kg->kg_estcpu; 347 if (kg->kg_slptime > 5 * loadfac) 348 kg->kg_estcpu = 0; 349 else { 350 kg->kg_slptime--; /* the first time was done in schedcpu */ 351 while (newcpu && --kg->kg_slptime) 352 newcpu = decay_cpu(loadfac, newcpu); 353 kg->kg_estcpu = newcpu; 354 } 355 resetpriority(kg); 356} 357 358/* 359 * Compute the priority of a process when running in user mode. 360 * Arrange to reschedule if the resulting priority is better 361 * than that of the current process. 362 */ 363static void 364resetpriority(struct ksegrp *kg) 365{ 366 register unsigned int newpriority; 367 struct thread *td; 368 369 mtx_lock_spin(&sched_lock); 370 if (kg->kg_pri_class == PRI_TIMESHARE) { 371 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 372 NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); 373 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 374 PRI_MAX_TIMESHARE); 375 kg->kg_user_pri = newpriority; 376 } 377 FOREACH_THREAD_IN_GROUP(kg, td) { 378 maybe_resched(td); /* XXXKSE silly */ 379 } 380 mtx_unlock_spin(&sched_lock); 381} 382 383/* ARGSUSED */ 384static void 385sched_setup(void *dummy) 386{ 387 if (sched_quantum == 0) 388 sched_quantum = SCHED_QUANTUM; 389 hogticks = 2 * sched_quantum; 390 391 callout_init(&schedcpu_callout, 1); 392 callout_init(&roundrobin_callout, 0); 393 394 /* Kick off timeout driven events by calling first time. */ 395 roundrobin(NULL); 396 schedcpu(NULL); 397} 398 399/* External interfaces start here */ 400int 401sched_runnable(void) 402{ 403 return runq_check(&runq); 404} 405 406int 407sched_rr_interval(void) 408{ 409 if (sched_quantum == 0) 410 sched_quantum = SCHED_QUANTUM; 411 return (sched_quantum); 412} 413 414/* 415 * We adjust the priority of the current process. The priority of 416 * a process gets worse as it accumulates CPU time. The cpu usage 417 * estimator (p_estcpu) is increased here. resetpriority() will 418 * compute a different priority each time p_estcpu increases by 419 * INVERSE_ESTCPU_WEIGHT 420 * (until MAXPRI is reached). The cpu usage estimator ramps up 421 * quite quickly when the process is running (linearly), and decays 422 * away exponentially, at a rate which is proportionally slower when 423 * the system is busy. The basic principle is that the system will 424 * 90% forget that the process used a lot of CPU time in 5 * loadav 425 * seconds. This causes the system to favor processes which haven't 426 * run much recently, and to round-robin among other processes. 427 */ 428void 429sched_clock(struct thread *td) 430{ 431 struct kse *ke; 432 struct ksegrp *kg; 433 434 KASSERT((td != NULL), ("schedclock: null thread pointer")); 435 ke = td->td_kse; 436 kg = td->td_ksegrp; 437 ke->ke_cpticks++; 438 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 439 if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 440 resetpriority(kg); 441 if (td->td_priority >= PUSER) 442 td->td_priority = kg->kg_user_pri; 443 } 444} 445/* 446 * charge childs scheduling cpu usage to parent. 447 * 448 * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. 449 * Charge it to the ksegrp that did the wait since process estcpu is sum of 450 * all ksegrps, this is strictly as expected. Assume that the child process 451 * aggregated all the estcpu into the 'built-in' ksegrp. 452 */ 453void 454sched_exit(struct ksegrp *kg, struct ksegrp *child) 455{ 456 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu); 457} 458 459void 460sched_fork(struct ksegrp *kg, struct ksegrp *child) 461{ 462 /* 463 * set priority of child to be that of parent. 464 * XXXKSE this needs redefining.. 465 */ 466 child->kg_estcpu = kg->kg_estcpu; 467} 468 469void 470sched_nice(struct ksegrp *kg, int nice) 471{ 472 kg->kg_nice = nice; 473 resetpriority(kg); 474} 475 476/* 477 * Adjust the priority of a thread. 478 * This may include moving the thread within the KSEGRP, 479 * changing the assignment of a kse to the thread, 480 * and moving a KSE in the system run queue. 481 */ 482void 483sched_prio(struct thread *td, u_char prio) 484{ 485 486 if (TD_ON_RUNQ(td)) { 487 adjustrunqueue(td, prio); 488 } else { 489 td->td_priority = prio; 490 } 491} 492 493void 494sched_sleep(struct thread *td, u_char prio) 495{ 496 td->td_ksegrp->kg_slptime = 0; 497 td->td_priority = prio; 498} 499 500void 501sched_switchin(struct thread *td) 502{ 503 td->td_kse->ke_oncpu = PCPU_GET(cpuid); 504} 505 506void 507sched_switchout(struct thread *td) 508{ 509 struct kse *ke; 510 struct proc *p; 511 512 ke = td->td_kse; 513 p = td->td_proc; 514 515 KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?")); 516 517 td->td_lastcpu = ke->ke_oncpu; 518 td->td_last_kse = ke; 519 ke->ke_oncpu = NOCPU; 520 ke->ke_flags &= ~KEF_NEEDRESCHED; 521 /* 522 * At the last moment, if this thread is still marked RUNNING, 523 * then put it back on the run queue as it has not been suspended 524 * or stopped or any thing else similar. 525 */ 526 if (TD_IS_RUNNING(td)) { 527 /* Put us back on the run queue (kse and all). */ 528 setrunqueue(td); 529 } else if (p->p_flag & P_KSES) { 530 /* 531 * We will not be on the run queue. So we must be 532 * sleeping or similar. As it's available, 533 * someone else can use the KSE if they need it. 534 * (If bound LOANING can still occur). 535 */ 536 kse_reassign(ke); 537 } 538} 539 540void 541sched_wakeup(struct thread *td) 542{ 543 struct ksegrp *kg; 544 545 kg = td->td_ksegrp; 546 if (kg->kg_slptime > 1) 547 updatepri(kg); 548 kg->kg_slptime = 0; 549 setrunqueue(td); 550 maybe_resched(td); 551} 552 553void 554sched_add(struct kse *ke) 555{ 556 mtx_assert(&sched_lock, MA_OWNED); 557 KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE")); 558 KASSERT((ke->ke_thread->td_kse != NULL), 559 ("runq_add: No KSE on thread")); 560 KASSERT(ke->ke_state != KES_ONRUNQ, 561 ("runq_add: kse %p (%s) already in run queue", ke, 562 ke->ke_proc->p_comm)); 563 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 564 ("runq_add: process swapped out")); 565 ke->ke_ksegrp->kg_runq_kses++; 566 ke->ke_state = KES_ONRUNQ; 567 568 runq_add(&runq, ke); 569} 570 571void 572sched_rem(struct kse *ke) 573{ 574 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 575 ("runq_remove: process swapped out")); 576 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 577 mtx_assert(&sched_lock, MA_OWNED); 578 579 runq_remove(&runq, ke); 580 ke->ke_state = KES_THREAD; 581 ke->ke_ksegrp->kg_runq_kses--; 582} 583 584struct kse * 585sched_choose(void) 586{ 587 struct kse *ke; 588 589 ke = runq_choose(&runq); 590 591 if (ke != NULL) { 592 runq_remove(&runq, ke); 593 ke->ke_state = KES_THREAD; 594 595 KASSERT((ke->ke_thread != NULL), 596 ("runq_choose: No thread on KSE")); 597 KASSERT((ke->ke_thread->td_kse != NULL), 598 ("runq_choose: No KSE on thread")); 599 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 600 ("runq_choose: process swapped out")); 601 } 602 return (ke); 603} 604 605void 606sched_userret(struct thread *td) 607{ 608 struct ksegrp *kg; 609 /* 610 * XXX we cheat slightly on the locking here to avoid locking in 611 * the usual case. Setting td_priority here is essentially an 612 * incomplete workaround for not setting it properly elsewhere. 613 * Now that some interrupt handlers are threads, not setting it 614 * properly elsewhere can clobber it in the window between setting 615 * it here and returning to user mode, so don't waste time setting 616 * it perfectly here. 617 */ 618 kg = td->td_ksegrp; 619 if (td->td_priority != kg->kg_user_pri) { 620 mtx_lock_spin(&sched_lock); 621 td->td_priority = kg->kg_user_pri; 622 mtx_unlock_spin(&sched_lock); 623 } 624} 625 626int 627sched_sizeof_kse(void) 628{ 629 return (sizeof(struct kse)); 630} 631int 632sched_sizeof_ksegrp(void) 633{ 634 return (sizeof(struct ksegrp)); 635} 636int 637sched_sizeof_proc(void) 638{ 639 return (sizeof(struct proc)); 640} 641int 642sched_sizeof_thread(void) 643{ 644 return (sizeof(struct thread)); 645} 646