kern_synch.c revision 33108
1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $Id: kern_synch.c,v 1.44 1997/12/29 08:54:52 bde Exp $ 40 */ 41 42#include "opt_diagnostic.h" 43#include "opt_ktrace.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/proc.h> 48#include <sys/kernel.h> 49#include <sys/signalvar.h> 50#include <sys/resourcevar.h> 51#include <sys/vmmeter.h> 52#include <sys/sysctl.h> 53#include <vm/vm.h> 54#include <vm/vm_extern.h> 55#ifdef KTRACE 56#include <sys/ktrace.h> 57#endif 58 59#include <machine/cpu.h> 60#include <machine/limits.h> /* for UCHAR_MAX = typeof(p_priority)_MAX */ 61 62static void rqinit __P((void *)); 63SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL) 64 65u_char curpriority; /* usrpri of curproc */ 66int lbolt; /* once a second sleep address */ 67 68static void endtsleep __P((void *)); 69static void roundrobin __P((void *arg)); 70static void schedcpu __P((void *arg)); 71static void updatepri __P((struct proc *p)); 72 73#define MAXIMUM_SCHEDULE_QUANTUM (1000000) /* arbitrary limit */ 74#ifndef DEFAULT_SCHEDULE_QUANTUM 75#define DEFAULT_SCHEDULE_QUANTUM 10 76#endif 77static int quantum = DEFAULT_SCHEDULE_QUANTUM; /* default value */ 78 79static int 80sysctl_kern_quantum SYSCTL_HANDLER_ARGS 81{ 82 int error; 83 int new_val = quantum; 84 85 new_val = quantum; 86 error = sysctl_handle_int(oidp, &new_val, 0, req); 87 if (error == 0) { 88 if ((new_val > 0) && (new_val < MAXIMUM_SCHEDULE_QUANTUM)) { 89 quantum = new_val; 90 } else { 91 error = EINVAL; 92 } 93 } 94 return (error); 95} 96 97SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 98 0, sizeof quantum, sysctl_kern_quantum, "I", ""); 99 100/* 101 * Force switch among equal priority processes every 100ms. 102 */ 103/* ARGSUSED */ 104static void 105roundrobin(arg) 106 void *arg; 107{ 108 109 need_resched(); 110 timeout(roundrobin, NULL, hz / quantum); 111} 112 113/* 114 * Constants for digital decay and forget: 115 * 90% of (p_estcpu) usage in 5 * loadav time 116 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 117 * Note that, as ps(1) mentions, this can let percentages 118 * total over 100% (I've seen 137.9% for 3 processes). 119 * 120 * Note that statclock updates p_estcpu and p_cpticks independently. 121 * 122 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 123 * That is, the system wants to compute a value of decay such 124 * that the following for loop: 125 * for (i = 0; i < (5 * loadavg); i++) 126 * p_estcpu *= decay; 127 * will compute 128 * p_estcpu *= 0.1; 129 * for all values of loadavg: 130 * 131 * Mathematically this loop can be expressed by saying: 132 * decay ** (5 * loadavg) ~= .1 133 * 134 * The system computes decay as: 135 * decay = (2 * loadavg) / (2 * loadavg + 1) 136 * 137 * We wish to prove that the system's computation of decay 138 * will always fulfill the equation: 139 * decay ** (5 * loadavg) ~= .1 140 * 141 * If we compute b as: 142 * b = 2 * loadavg 143 * then 144 * decay = b / (b + 1) 145 * 146 * We now need to prove two things: 147 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 148 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 149 * 150 * Facts: 151 * For x close to zero, exp(x) =~ 1 + x, since 152 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 153 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 154 * For x close to zero, ln(1+x) =~ x, since 155 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 156 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 157 * ln(.1) =~ -2.30 158 * 159 * Proof of (1): 160 * Solve (factor)**(power) =~ .1 given power (5*loadav): 161 * solving for factor, 162 * ln(factor) =~ (-2.30/5*loadav), or 163 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 164 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 165 * 166 * Proof of (2): 167 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 168 * solving for power, 169 * power*ln(b/(b+1)) =~ -2.30, or 170 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 171 * 172 * Actual power values for the implemented algorithm are as follows: 173 * loadav: 1 2 3 4 174 * power: 5.68 10.32 14.94 19.55 175 */ 176 177/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 178#define loadfactor(loadav) (2 * (loadav)) 179#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 180 181/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 182static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 183 184/* 185 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 186 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 187 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 188 * 189 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 190 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 191 * 192 * If you dont want to bother with the faster/more-accurate formula, you 193 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 194 * (more general) method of calculating the %age of CPU used by a process. 195 */ 196#define CCPU_SHIFT 11 197 198/* 199 * Recompute process priorities, every hz ticks. 200 */ 201/* ARGSUSED */ 202static void 203schedcpu(arg) 204 void *arg; 205{ 206 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 207 register struct proc *p; 208 register int s; 209 register unsigned int newcpu; 210 211 wakeup((caddr_t)&lbolt); 212 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 213 /* 214 * Increment time in/out of memory and sleep time 215 * (if sleeping). We ignore overflow; with 16-bit int's 216 * (remember them?) overflow takes 45 days. 217 */ 218 p->p_swtime++; 219 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 220 p->p_slptime++; 221 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 222 /* 223 * If the process has slept the entire second, 224 * stop recalculating its priority until it wakes up. 225 */ 226 if (p->p_slptime > 1) 227 continue; 228 s = splhigh(); /* prevent state changes and protect run queue */ 229 /* 230 * p_pctcpu is only for ps. 231 */ 232#if (FSHIFT >= CCPU_SHIFT) 233 p->p_pctcpu += (hz == 100)? 234 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 235 100 * (((fixpt_t) p->p_cpticks) 236 << (FSHIFT - CCPU_SHIFT)) / hz; 237#else 238 p->p_pctcpu += ((FSCALE - ccpu) * 239 (p->p_cpticks * FSCALE / hz)) >> FSHIFT; 240#endif 241 p->p_cpticks = 0; 242 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice; 243 p->p_estcpu = min(newcpu, UCHAR_MAX); 244 resetpriority(p); 245 if (p->p_priority >= PUSER) { 246#define PPQ (128 / NQS) /* priorities per queue */ 247 if ((p != curproc) && 248#ifdef SMP 249 (u_char)p->p_oncpu == 0xff && /* idle */ 250#endif 251 p->p_stat == SRUN && 252 (p->p_flag & P_INMEM) && 253 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) { 254 remrq(p); 255 p->p_priority = p->p_usrpri; 256 setrunqueue(p); 257 } else 258 p->p_priority = p->p_usrpri; 259 } 260 splx(s); 261 } 262 vmmeter(); 263 timeout(schedcpu, (void *)0, hz); 264} 265 266/* 267 * Recalculate the priority of a process after it has slept for a while. 268 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 269 * least six times the loadfactor will decay p_estcpu to zero. 270 */ 271static void 272updatepri(p) 273 register struct proc *p; 274{ 275 register unsigned int newcpu = p->p_estcpu; 276 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 277 278 if (p->p_slptime > 5 * loadfac) 279 p->p_estcpu = 0; 280 else { 281 p->p_slptime--; /* the first time was done in schedcpu */ 282 while (newcpu && --p->p_slptime) 283 newcpu = (int) decay_cpu(loadfac, newcpu); 284 p->p_estcpu = min(newcpu, UCHAR_MAX); 285 } 286 resetpriority(p); 287} 288 289/* 290 * We're only looking at 7 bits of the address; everything is 291 * aligned to 4, lots of things are aligned to greater powers 292 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 293 */ 294#define TABLESIZE 128 295static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE]; 296#define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 297 298/* 299 * During autoconfiguration or after a panic, a sleep will simply 300 * lower the priority briefly to allow interrupts, then return. 301 * The priority to be used (safepri) is machine-dependent, thus this 302 * value is initialized and maintained in the machine-dependent layers. 303 * This priority will typically be 0, or the lowest priority 304 * that is safe for use on the interrupt stack; it can be made 305 * higher to block network software interrupts after panics. 306 */ 307int safepri; 308 309void 310sleepinit() 311{ 312 int i; 313 314 for (i = 0; i < TABLESIZE; i++) 315 TAILQ_INIT(&slpque[i]); 316} 317 318/* 319 * General sleep call. Suspends the current process until a wakeup is 320 * performed on the specified identifier. The process will then be made 321 * runnable with the specified priority. Sleeps at most timo/hz seconds 322 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 323 * before and after sleeping, else signals are not checked. Returns 0 if 324 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 325 * signal needs to be delivered, ERESTART is returned if the current system 326 * call should be restarted if possible, and EINTR is returned if the system 327 * call should be interrupted by the signal (return EINTR). 328 */ 329int 330tsleep(ident, priority, wmesg, timo) 331 void *ident; 332 int priority, timo; 333 const char *wmesg; 334{ 335 struct proc *p = curproc; 336 int s, sig, catch = priority & PCATCH; 337 struct callout_handle thandle; 338 339#ifdef KTRACE 340 if (KTRPOINT(p, KTR_CSW)) 341 ktrcsw(p->p_tracep, 1, 0); 342#endif 343 s = splhigh(); 344 if (cold || panicstr) { 345 /* 346 * After a panic, or during autoconfiguration, 347 * just give interrupts a chance, then just return; 348 * don't run any other procs or panic below, 349 * in case this is the idle process and already asleep. 350 */ 351 splx(safepri); 352 splx(s); 353 return (0); 354 } 355#ifdef DIAGNOSTIC 356 if(p == NULL) 357 panic("tsleep1"); 358 if (ident == NULL || p->p_stat != SRUN) 359 panic("tsleep"); 360 /* XXX This is not exhaustive, just the most common case */ 361 if ((p->p_procq.tqe_prev != NULL) && (*p->p_procq.tqe_prev == p)) 362 panic("sleeping process already on another queue"); 363#endif 364 p->p_wchan = ident; 365 p->p_wmesg = wmesg; 366 p->p_slptime = 0; 367 p->p_priority = priority & PRIMASK; 368 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq); 369 if (timo) 370 thandle = timeout(endtsleep, (void *)p, timo); 371 /* 372 * We put ourselves on the sleep queue and start our timeout 373 * before calling CURSIG, as we could stop there, and a wakeup 374 * or a SIGCONT (or both) could occur while we were stopped. 375 * A SIGCONT would cause us to be marked as SSLEEP 376 * without resuming us, thus we must be ready for sleep 377 * when CURSIG is called. If the wakeup happens while we're 378 * stopped, p->p_wchan will be 0 upon return from CURSIG. 379 */ 380 if (catch) { 381 p->p_flag |= P_SINTR; 382 if ((sig = CURSIG(p))) { 383 if (p->p_wchan) 384 unsleep(p); 385 p->p_stat = SRUN; 386 goto resume; 387 } 388 if (p->p_wchan == 0) { 389 catch = 0; 390 goto resume; 391 } 392 } else 393 sig = 0; 394 p->p_stat = SSLEEP; 395 p->p_stats->p_ru.ru_nvcsw++; 396 mi_switch(); 397resume: 398 curpriority = p->p_usrpri; 399 splx(s); 400 p->p_flag &= ~P_SINTR; 401 if (p->p_flag & P_TIMEOUT) { 402 p->p_flag &= ~P_TIMEOUT; 403 if (sig == 0) { 404#ifdef KTRACE 405 if (KTRPOINT(p, KTR_CSW)) 406 ktrcsw(p->p_tracep, 0, 0); 407#endif 408 return (EWOULDBLOCK); 409 } 410 } else if (timo) 411 untimeout(endtsleep, (void *)p, thandle); 412 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 413#ifdef KTRACE 414 if (KTRPOINT(p, KTR_CSW)) 415 ktrcsw(p->p_tracep, 0, 0); 416#endif 417 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 418 return (EINTR); 419 return (ERESTART); 420 } 421#ifdef KTRACE 422 if (KTRPOINT(p, KTR_CSW)) 423 ktrcsw(p->p_tracep, 0, 0); 424#endif 425 return (0); 426} 427 428/* 429 * Implement timeout for tsleep. 430 * If process hasn't been awakened (wchan non-zero), 431 * set timeout flag and undo the sleep. If proc 432 * is stopped, just unsleep so it will remain stopped. 433 */ 434static void 435endtsleep(arg) 436 void *arg; 437{ 438 register struct proc *p; 439 int s; 440 441 p = (struct proc *)arg; 442 s = splhigh(); 443 if (p->p_wchan) { 444 if (p->p_stat == SSLEEP) 445 setrunnable(p); 446 else 447 unsleep(p); 448 p->p_flag |= P_TIMEOUT; 449 } 450 splx(s); 451} 452 453/* 454 * Remove a process from its wait queue 455 */ 456void 457unsleep(p) 458 register struct proc *p; 459{ 460 int s; 461 462 s = splhigh(); 463 if (p->p_wchan) { 464 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq); 465 p->p_wchan = 0; 466 } 467 splx(s); 468} 469 470/* 471 * Make all processes sleeping on the specified identifier runnable. 472 */ 473void 474wakeup(ident) 475 register void *ident; 476{ 477 register struct slpquehead *qp; 478 register struct proc *p; 479 int s; 480 481 s = splhigh(); 482 qp = &slpque[LOOKUP(ident)]; 483restart: 484 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { 485#ifdef DIAGNOSTIC 486 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 487 panic("wakeup"); 488#endif 489 if (p->p_wchan == ident) { 490 TAILQ_REMOVE(qp, p, p_procq); 491 p->p_wchan = 0; 492 if (p->p_stat == SSLEEP) { 493 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 494 if (p->p_slptime > 1) 495 updatepri(p); 496 p->p_slptime = 0; 497 p->p_stat = SRUN; 498 if (p->p_flag & P_INMEM) { 499 setrunqueue(p); 500 need_resched(); 501 } else { 502 p->p_flag |= P_SWAPINREQ; 503 wakeup((caddr_t)&proc0); 504 } 505 /* END INLINE EXPANSION */ 506 goto restart; 507 } 508 } 509 } 510 splx(s); 511} 512 513/* 514 * Make a process sleeping on the specified identifier runnable. 515 * May wake more than one process if a target prcoess is currently 516 * swapped out. 517 */ 518void 519wakeup_one(ident) 520 register void *ident; 521{ 522 register struct slpquehead *qp; 523 register struct proc *p; 524 int s; 525 526 s = splhigh(); 527 qp = &slpque[LOOKUP(ident)]; 528 529 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { 530#ifdef DIAGNOSTIC 531 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 532 panic("wakeup_one"); 533#endif 534 if (p->p_wchan == ident) { 535 TAILQ_REMOVE(qp, p, p_procq); 536 p->p_wchan = 0; 537 if (p->p_stat == SSLEEP) { 538 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 539 if (p->p_slptime > 1) 540 updatepri(p); 541 p->p_slptime = 0; 542 p->p_stat = SRUN; 543 if (p->p_flag & P_INMEM) { 544 setrunqueue(p); 545 need_resched(); 546 break; 547 } else { 548 p->p_flag |= P_SWAPINREQ; 549 wakeup((caddr_t)&proc0); 550 } 551 /* END INLINE EXPANSION */ 552 } 553 } 554 } 555 splx(s); 556} 557 558/* 559 * The machine independent parts of mi_switch(). 560 * Must be called at splstatclock() or higher. 561 */ 562void 563mi_switch() 564{ 565 register struct proc *p = curproc; /* XXX */ 566 register struct rlimit *rlim; 567 register long s, u; 568 int x; 569 struct timeval tv; 570 571 /* 572 * XXX this spl is almost unnecessary. It is partly to allow for 573 * sloppy callers that don't do it (issignal() via CURSIG() is the 574 * main offender). It is partly to work around a bug in the i386 575 * cpu_switch() (the ipl is not preserved). We ran for years 576 * without it. I think there was only a interrupt latency problem. 577 * The main caller, tsleep(), does an splx() a couple of instructions 578 * after calling here. The buggy caller, issignal(), usually calls 579 * here at spl0() and sometimes returns at splhigh(). The process 580 * then runs for a little too long at splhigh(). The ipl gets fixed 581 * when the process returns to user mode (or earlier). 582 * 583 * It would probably be better to always call here at spl0(). Callers 584 * are prepared to give up control to another process, so they must 585 * be prepared to be interrupted. The clock stuff here may not 586 * actually need splstatclock(). 587 */ 588 x = splstatclock(); 589 590#ifdef SIMPLELOCK_DEBUG 591 if (p->p_simple_locks) 592 printf("sleep: holding simple lock"); 593#endif 594 /* 595 * Compute the amount of time during which the current 596 * process was running, and add that to its total so far. 597 */ 598 microtime(&tv); 599 u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec); 600 s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec); 601 if (u < 0) { 602 u += 1000000; 603 s--; 604 } else if (u >= 1000000) { 605 u -= 1000000; 606 s++; 607 } 608#ifdef SMP 609 if (s < 0) 610 s = u = 0; 611#endif 612 p->p_rtime.tv_usec = u; 613 p->p_rtime.tv_sec = s; 614 615 /* 616 * Check if the process exceeds its cpu resource allocation. 617 * If over max, kill it. 618 */ 619 if (p->p_stat != SZOMB) { 620 rlim = &p->p_rlimit[RLIMIT_CPU]; 621 if (s >= rlim->rlim_cur) { 622 if (s >= rlim->rlim_max) 623 killproc(p, "exceeded maximum CPU limit"); 624 else { 625 psignal(p, SIGXCPU); 626 if (rlim->rlim_cur < rlim->rlim_max) 627 rlim->rlim_cur += 5; 628 } 629 } 630 } 631 632 /* 633 * Pick a new current process and record its start time. 634 */ 635 cnt.v_swtch++; 636 cpu_switch(p); 637 microtime(&runtime); 638 splx(x); 639} 640 641/* 642 * Initialize the (doubly-linked) run queues 643 * to be empty. 644 */ 645/* ARGSUSED*/ 646static void 647rqinit(dummy) 648 void *dummy; 649{ 650 register int i; 651 652 for (i = 0; i < NQS; i++) { 653 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 654 rtqs[i].ph_link = rtqs[i].ph_rlink = (struct proc *)&rtqs[i]; 655 idqs[i].ph_link = idqs[i].ph_rlink = (struct proc *)&idqs[i]; 656 } 657} 658 659/* 660 * Change process state to be runnable, 661 * placing it on the run queue if it is in memory, 662 * and awakening the swapper if it isn't in memory. 663 */ 664void 665setrunnable(p) 666 register struct proc *p; 667{ 668 register int s; 669 670 s = splhigh(); 671 switch (p->p_stat) { 672 case 0: 673 case SRUN: 674 case SZOMB: 675 default: 676 panic("setrunnable"); 677 case SSTOP: 678 case SSLEEP: 679 unsleep(p); /* e.g. when sending signals */ 680 break; 681 682 case SIDL: 683 break; 684 } 685 p->p_stat = SRUN; 686 if (p->p_flag & P_INMEM) 687 setrunqueue(p); 688 splx(s); 689 if (p->p_slptime > 1) 690 updatepri(p); 691 p->p_slptime = 0; 692 if ((p->p_flag & P_INMEM) == 0) { 693 p->p_flag |= P_SWAPINREQ; 694 wakeup((caddr_t)&proc0); 695 } 696 else if (p->p_priority < curpriority) 697 need_resched(); 698} 699 700/* 701 * Compute the priority of a process when running in user mode. 702 * Arrange to reschedule if the resulting priority is better 703 * than that of the current process. 704 */ 705void 706resetpriority(p) 707 register struct proc *p; 708{ 709 register unsigned int newpriority; 710 711 if (p->p_rtprio.type == RTP_PRIO_NORMAL) { 712 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice; 713 newpriority = min(newpriority, MAXPRI); 714 p->p_usrpri = newpriority; 715 if (newpriority < curpriority) 716 need_resched(); 717 } else { 718 need_resched(); 719 } 720} 721 722/* ARGSUSED */ 723static void sched_setup __P((void *dummy)); 724static void 725sched_setup(dummy) 726 void *dummy; 727{ 728 /* Kick off timeout driven events by calling first time. */ 729 roundrobin(NULL); 730 schedcpu(NULL); 731} 732SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 733 734