kern_synch.c revision 12577
1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 39 * $Id: kern_synch.c,v 1.14 1995/12/02 17:10:35 bde Exp $ 40 */ 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/proc.h> 45#include <sys/kernel.h> 46#include <sys/buf.h> 47#include <sys/signalvar.h> 48#include <sys/resourcevar.h> 49#include <sys/signalvar.h> 50#include <vm/vm.h> 51#ifdef KTRACE 52#include <sys/ktrace.h> 53#endif 54 55#include <machine/cpu.h> 56 57static void rqinit __P((void *)); 58SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL) 59 60u_char curpriority; /* usrpri of curproc */ 61int lbolt; /* once a second sleep address */ 62 63extern void endtsleep __P((void *)); 64extern void updatepri __P((struct proc *p)); 65 66/* 67 * Force switch among equal priority processes every 100ms. 68 */ 69/* ARGSUSED */ 70void 71roundrobin(arg) 72 void *arg; 73{ 74 75 need_resched(); 76 timeout(roundrobin, NULL, hz / 10); 77} 78 79/* 80 * Constants for digital decay and forget: 81 * 90% of (p_estcpu) usage in 5 * loadav time 82 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 83 * Note that, as ps(1) mentions, this can let percentages 84 * total over 100% (I've seen 137.9% for 3 processes). 85 * 86 * Note that hardclock updates p_estcpu and p_cpticks independently. 87 * 88 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 89 * That is, the system wants to compute a value of decay such 90 * that the following for loop: 91 * for (i = 0; i < (5 * loadavg); i++) 92 * p_estcpu *= decay; 93 * will compute 94 * p_estcpu *= 0.1; 95 * for all values of loadavg: 96 * 97 * Mathematically this loop can be expressed by saying: 98 * decay ** (5 * loadavg) ~= .1 99 * 100 * The system computes decay as: 101 * decay = (2 * loadavg) / (2 * loadavg + 1) 102 * 103 * We wish to prove that the system's computation of decay 104 * will always fulfill the equation: 105 * decay ** (5 * loadavg) ~= .1 106 * 107 * If we compute b as: 108 * b = 2 * loadavg 109 * then 110 * decay = b / (b + 1) 111 * 112 * We now need to prove two things: 113 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 114 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 115 * 116 * Facts: 117 * For x close to zero, exp(x) =~ 1 + x, since 118 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 119 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 120 * For x close to zero, ln(1+x) =~ x, since 121 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 122 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 123 * ln(.1) =~ -2.30 124 * 125 * Proof of (1): 126 * Solve (factor)**(power) =~ .1 given power (5*loadav): 127 * solving for factor, 128 * ln(factor) =~ (-2.30/5*loadav), or 129 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 130 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 131 * 132 * Proof of (2): 133 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 134 * solving for power, 135 * power*ln(b/(b+1)) =~ -2.30, or 136 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 137 * 138 * Actual power values for the implemented algorithm are as follows: 139 * loadav: 1 2 3 4 140 * power: 5.68 10.32 14.94 19.55 141 */ 142 143/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 144#define loadfactor(loadav) (2 * (loadav)) 145#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 146 147/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 148fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 149 150/* 151 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 152 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 153 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 154 * 155 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 156 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 157 * 158 * If you dont want to bother with the faster/more-accurate formula, you 159 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 160 * (more general) method of calculating the %age of CPU used by a process. 161 */ 162#define CCPU_SHIFT 11 163 164/* 165 * Recompute process priorities, every hz ticks. 166 */ 167/* ARGSUSED */ 168void 169schedcpu(arg) 170 void *arg; 171{ 172 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 173 register struct proc *p; 174 register int s; 175 register unsigned int newcpu; 176 177 wakeup((caddr_t)&lbolt); 178 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 179 /* 180 * Increment time in/out of memory and sleep time 181 * (if sleeping). We ignore overflow; with 16-bit int's 182 * (remember them?) overflow takes 45 days. 183 */ 184 p->p_swtime++; 185 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 186 p->p_slptime++; 187 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 188 /* 189 * If the process has slept the entire second, 190 * stop recalculating its priority until it wakes up. 191 */ 192 if (p->p_slptime > 1) 193 continue; 194 s = splstatclock(); /* prevent state changes */ 195 /* 196 * p_pctcpu is only for ps. 197 */ 198#if (FSHIFT >= CCPU_SHIFT) 199 p->p_pctcpu += (hz == 100)? 200 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 201 100 * (((fixpt_t) p->p_cpticks) 202 << (FSHIFT - CCPU_SHIFT)) / hz; 203#else 204 p->p_pctcpu += ((FSCALE - ccpu) * 205 (p->p_cpticks * FSCALE / hz)) >> FSHIFT; 206#endif 207 p->p_cpticks = 0; 208 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice; 209 p->p_estcpu = min(newcpu, UCHAR_MAX); 210 resetpriority(p); 211 if (p->p_priority >= PUSER) { 212#define PPQ (128 / NQS) /* priorities per queue */ 213 if ((p != curproc) && 214 p->p_stat == SRUN && 215 (p->p_flag & P_INMEM) && 216 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) { 217 remrq(p); 218 p->p_priority = p->p_usrpri; 219 setrunqueue(p); 220 } else 221 p->p_priority = p->p_usrpri; 222 } 223 splx(s); 224 } 225 vmmeter(); 226 timeout(schedcpu, (void *)0, hz); 227} 228 229/* 230 * Recalculate the priority of a process after it has slept for a while. 231 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 232 * least six times the loadfactor will decay p_estcpu to zero. 233 */ 234void 235updatepri(p) 236 register struct proc *p; 237{ 238 register unsigned int newcpu = p->p_estcpu; 239 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 240 241 if (p->p_slptime > 5 * loadfac) 242 p->p_estcpu = 0; 243 else { 244 p->p_slptime--; /* the first time was done in schedcpu */ 245 while (newcpu && --p->p_slptime) 246 newcpu = (int) decay_cpu(loadfac, newcpu); 247 p->p_estcpu = min(newcpu, UCHAR_MAX); 248 } 249 resetpriority(p); 250} 251 252/* 253 * We're only looking at 7 bits of the address; everything is 254 * aligned to 4, lots of things are aligned to greater powers 255 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 256 */ 257#define TABLESIZE 128 258#define LOOKUP(x) (((int)(x) >> 8) & (TABLESIZE - 1)) 259struct slpque { 260 struct proc *sq_head; 261 struct proc **sq_tailp; 262} slpque[TABLESIZE]; 263 264/* 265 * During autoconfiguration or after a panic, a sleep will simply 266 * lower the priority briefly to allow interrupts, then return. 267 * The priority to be used (safepri) is machine-dependent, thus this 268 * value is initialized and maintained in the machine-dependent layers. 269 * This priority will typically be 0, or the lowest priority 270 * that is safe for use on the interrupt stack; it can be made 271 * higher to block network software interrupts after panics. 272 */ 273int safepri; 274 275/* 276 * General sleep call. Suspends the current process until a wakeup is 277 * performed on the specified identifier. The process will then be made 278 * runnable with the specified priority. Sleeps at most timo/hz seconds 279 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 280 * before and after sleeping, else signals are not checked. Returns 0 if 281 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 282 * signal needs to be delivered, ERESTART is returned if the current system 283 * call should be restarted if possible, and EINTR is returned if the system 284 * call should be interrupted by the signal (return EINTR). 285 */ 286int 287tsleep(ident, priority, wmesg, timo) 288 void *ident; 289 int priority, timo; 290 char *wmesg; 291{ 292 register struct proc *p = curproc; 293 register struct slpque *qp; 294 register s; 295 int sig, catch = priority & PCATCH; 296 297#ifdef KTRACE 298 if (KTRPOINT(p, KTR_CSW)) 299 ktrcsw(p->p_tracep, 1, 0); 300#endif 301 s = splhigh(); 302 if (cold || panicstr) { 303 /* 304 * After a panic, or during autoconfiguration, 305 * just give interrupts a chance, then just return; 306 * don't run any other procs or panic below, 307 * in case this is the idle process and already asleep. 308 */ 309 splx(safepri); 310 splx(s); 311 return (0); 312 } 313#ifdef DIAGNOSTIC 314 if (ident == NULL || p->p_stat != SRUN || p->p_back) 315 panic("tsleep"); 316#endif 317 p->p_wchan = ident; 318 p->p_wmesg = wmesg; 319 p->p_slptime = 0; 320 p->p_priority = priority & PRIMASK; 321 qp = &slpque[LOOKUP(ident)]; 322 if (qp->sq_head == 0) 323 qp->sq_head = p; 324 else 325 *qp->sq_tailp = p; 326 *(qp->sq_tailp = &p->p_forw) = 0; 327 if (timo) 328 timeout(endtsleep, (void *)p, timo); 329 /* 330 * We put ourselves on the sleep queue and start our timeout 331 * before calling CURSIG, as we could stop there, and a wakeup 332 * or a SIGCONT (or both) could occur while we were stopped. 333 * A SIGCONT would cause us to be marked as SSLEEP 334 * without resuming us, thus we must be ready for sleep 335 * when CURSIG is called. If the wakeup happens while we're 336 * stopped, p->p_wchan will be 0 upon return from CURSIG. 337 */ 338 if (catch) { 339 p->p_flag |= P_SINTR; 340 if ((sig = CURSIG(p))) { 341 if (p->p_wchan) 342 unsleep(p); 343 p->p_stat = SRUN; 344 goto resume; 345 } 346 if (p->p_wchan == 0) { 347 catch = 0; 348 goto resume; 349 } 350 } else 351 sig = 0; 352 p->p_stat = SSLEEP; 353 p->p_stats->p_ru.ru_nvcsw++; 354 mi_switch(); 355resume: 356 curpriority = p->p_usrpri; 357 splx(s); 358 p->p_flag &= ~P_SINTR; 359 if (p->p_flag & P_TIMEOUT) { 360 p->p_flag &= ~P_TIMEOUT; 361 if (sig == 0) { 362#ifdef KTRACE 363 if (KTRPOINT(p, KTR_CSW)) 364 ktrcsw(p->p_tracep, 0, 0); 365#endif 366 return (EWOULDBLOCK); 367 } 368 } else if (timo) 369 untimeout(endtsleep, (void *)p); 370 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 371#ifdef KTRACE 372 if (KTRPOINT(p, KTR_CSW)) 373 ktrcsw(p->p_tracep, 0, 0); 374#endif 375 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 376 return (EINTR); 377 return (ERESTART); 378 } 379#ifdef KTRACE 380 if (KTRPOINT(p, KTR_CSW)) 381 ktrcsw(p->p_tracep, 0, 0); 382#endif 383 return (0); 384} 385 386/* 387 * Implement timeout for tsleep. 388 * If process hasn't been awakened (wchan non-zero), 389 * set timeout flag and undo the sleep. If proc 390 * is stopped, just unsleep so it will remain stopped. 391 */ 392void 393endtsleep(arg) 394 void *arg; 395{ 396 register struct proc *p; 397 int s; 398 399 p = (struct proc *)arg; 400 s = splhigh(); 401 if (p->p_wchan) { 402 if (p->p_stat == SSLEEP) 403 setrunnable(p); 404 else 405 unsleep(p); 406 p->p_flag |= P_TIMEOUT; 407 } 408 splx(s); 409} 410 411/* 412 * Short-term, non-interruptable sleep. 413 */ 414void 415sleep(ident, priority) 416 void *ident; 417 int priority; 418{ 419 register struct proc *p = curproc; 420 register struct slpque *qp; 421 register s; 422 423#ifdef DIAGNOSTIC 424 if (priority > PZERO) { 425 printf("sleep called with priority %d > PZERO, wchan: %p\n", 426 priority, ident); 427 panic("old sleep"); 428 } 429#endif 430 s = splhigh(); 431 if (cold || panicstr) { 432 /* 433 * After a panic, or during autoconfiguration, 434 * just give interrupts a chance, then just return; 435 * don't run any other procs or panic below, 436 * in case this is the idle process and already asleep. 437 */ 438 splx(safepri); 439 splx(s); 440 return; 441 } 442#ifdef DIAGNOSTIC 443 if (ident == NULL || p->p_stat != SRUN || p->p_back) 444 panic("sleep"); 445#endif 446 p->p_wchan = ident; 447 p->p_wmesg = NULL; 448 p->p_slptime = 0; 449 p->p_priority = priority; 450 qp = &slpque[LOOKUP(ident)]; 451 if (qp->sq_head == 0) 452 qp->sq_head = p; 453 else 454 *qp->sq_tailp = p; 455 *(qp->sq_tailp = &p->p_forw) = 0; 456 p->p_stat = SSLEEP; 457 p->p_stats->p_ru.ru_nvcsw++; 458#ifdef KTRACE 459 if (KTRPOINT(p, KTR_CSW)) 460 ktrcsw(p->p_tracep, 1, 0); 461#endif 462 mi_switch(); 463#ifdef KTRACE 464 if (KTRPOINT(p, KTR_CSW)) 465 ktrcsw(p->p_tracep, 0, 0); 466#endif 467 curpriority = p->p_usrpri; 468 splx(s); 469} 470 471/* 472 * Remove a process from its wait queue 473 */ 474void 475unsleep(p) 476 register struct proc *p; 477{ 478 register struct slpque *qp; 479 register struct proc **hp; 480 int s; 481 482 s = splhigh(); 483 if (p->p_wchan) { 484 hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head; 485 while (*hp != p) 486 hp = &(*hp)->p_forw; 487 *hp = p->p_forw; 488 if (qp->sq_tailp == &p->p_forw) 489 qp->sq_tailp = hp; 490 p->p_wchan = 0; 491 } 492 splx(s); 493} 494 495/* 496 * Make all processes sleeping on the specified identifier runnable. 497 */ 498void 499wakeup(ident) 500 register void *ident; 501{ 502 register struct slpque *qp; 503 register struct proc *p, **q; 504 int s; 505 506 s = splhigh(); 507 qp = &slpque[LOOKUP(ident)]; 508restart: 509 for (q = &qp->sq_head; *q; ) { 510 p = *q; 511#ifdef DIAGNOSTIC 512 if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP)) 513 panic("wakeup"); 514#endif 515 if (p->p_wchan == ident) { 516 p->p_wchan = 0; 517 *q = p->p_forw; 518 if (qp->sq_tailp == &p->p_forw) 519 qp->sq_tailp = q; 520 if (p->p_stat == SSLEEP) { 521 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 522 if (p->p_slptime > 1) 523 updatepri(p); 524 p->p_slptime = 0; 525 p->p_stat = SRUN; 526 if (p->p_flag & P_INMEM) 527 setrunqueue(p); 528 /* 529 * Since curpriority is a user priority, 530 * p->p_priority is always better than 531 * curpriority. 532 */ 533 if ((p->p_flag & P_INMEM) == 0) 534 wakeup((caddr_t)&proc0); 535 else 536 need_resched(); 537 /* END INLINE EXPANSION */ 538 goto restart; 539 } 540 } else 541 q = &p->p_forw; 542 } 543 splx(s); 544} 545 546/* 547 * The machine independent parts of mi_switch(). 548 * Must be called at splstatclock() or higher. 549 */ 550void 551mi_switch() 552{ 553 register struct proc *p = curproc; /* XXX */ 554 register struct rlimit *rlim; 555 register long s, u; 556 struct timeval tv; 557 558 /* 559 * Compute the amount of time during which the current 560 * process was running, and add that to its total so far. 561 */ 562 microtime(&tv); 563 u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec); 564 s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec); 565 if (u < 0) { 566 u += 1000000; 567 s--; 568 } else if (u >= 1000000) { 569 u -= 1000000; 570 s++; 571 } 572 p->p_rtime.tv_usec = u; 573 p->p_rtime.tv_sec = s; 574 575 /* 576 * Check if the process exceeds its cpu resource allocation. 577 * If over max, kill it. In any case, if it has run for more 578 * than 10 minutes, reduce priority to give others a chance. 579 */ 580 if (p->p_stat != SZOMB) { 581 rlim = &p->p_rlimit[RLIMIT_CPU]; 582 if (s >= rlim->rlim_cur) { 583 if (s >= rlim->rlim_max) 584 psignal(p, SIGKILL); 585 else { 586 psignal(p, SIGXCPU); 587 if (rlim->rlim_cur < rlim->rlim_max) 588 rlim->rlim_cur += 5; 589 } 590 } 591 if (s > 10 * 60 && p->p_ucred->cr_uid && p->p_nice == NZERO) { 592 p->p_nice = NZERO + 4; 593 resetpriority(p); 594 } 595 } 596 597 /* 598 * Pick a new current process and record its start time. 599 */ 600 cnt.v_swtch++; 601 cpu_switch(p); 602 microtime(&runtime); 603} 604 605/* 606 * Initialize the (doubly-linked) run queues 607 * to be empty. 608 */ 609/* ARGSUSED*/ 610static void 611rqinit(dummy) 612 void *dummy; 613{ 614 register int i; 615 616 for (i = 0; i < NQS; i++) { 617 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 618 rtqs[i].ph_link = rtqs[i].ph_rlink = (struct proc *)&rtqs[i]; 619 idqs[i].ph_link = idqs[i].ph_rlink = (struct proc *)&idqs[i]; 620 } 621} 622 623/* 624 * Change process state to be runnable, 625 * placing it on the run queue if it is in memory, 626 * and awakening the swapper if it isn't in memory. 627 */ 628void 629setrunnable(p) 630 register struct proc *p; 631{ 632 register int s; 633 634 s = splhigh(); 635 switch (p->p_stat) { 636 case 0: 637 case SRUN: 638 case SZOMB: 639 default: 640 panic("setrunnable"); 641 case SSTOP: 642 case SSLEEP: 643 unsleep(p); /* e.g. when sending signals */ 644 break; 645 646 case SIDL: 647 break; 648 } 649 p->p_stat = SRUN; 650 if (p->p_flag & P_INMEM) 651 setrunqueue(p); 652 splx(s); 653 if (p->p_slptime > 1) 654 updatepri(p); 655 p->p_slptime = 0; 656 if ((p->p_flag & P_INMEM) == 0) 657 wakeup((caddr_t)&proc0); 658 else if (p->p_priority < curpriority) 659 need_resched(); 660} 661 662/* 663 * Compute the priority of a process when running in user mode. 664 * Arrange to reschedule if the resulting priority is better 665 * than that of the current process. 666 */ 667void 668resetpriority(p) 669 register struct proc *p; 670{ 671 register unsigned int newpriority; 672 673 if (p->p_rtprio.type == RTP_PRIO_NORMAL) { 674 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice; 675 newpriority = min(newpriority, MAXPRI); 676 p->p_usrpri = newpriority; 677 if (newpriority < curpriority) 678 need_resched(); 679 } else { 680 need_resched(); 681 } 682} 683