kern_synch.c revision 32071
1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $Id: kern_synch.c,v 1.42 1997/11/25 07:07:44 julian Exp $ 40 */ 41 42#include "opt_ktrace.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/proc.h> 47#include <sys/kernel.h> 48#include <sys/signalvar.h> 49#include <sys/resourcevar.h> 50#include <sys/vmmeter.h> 51#include <sys/sysctl.h> 52#include <vm/vm.h> 53#include <vm/vm_extern.h> 54#ifdef KTRACE 55#include <sys/ktrace.h> 56#endif 57 58#include <machine/cpu.h> 59#include <machine/limits.h> /* for UCHAR_MAX = typeof(p_priority)_MAX */ 60 61static void rqinit __P((void *)); 62SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL) 63 64u_char curpriority; /* usrpri of curproc */ 65int lbolt; /* once a second sleep address */ 66 67static void endtsleep __P((void *)); 68static void updatepri __P((struct proc *p)); 69static void roundrobin __P((void *arg)); 70static void schedcpu __P((void *arg)); 71 72#define MAXIMUM_SCHEDULE_QUANTUM (1000000) /* arbitrary limit */ 73#ifndef DEFAULT_SCHEDULE_QUANTUM 74#define DEFAULT_SCHEDULE_QUANTUM 10 75#endif 76static int quantum = DEFAULT_SCHEDULE_QUANTUM; /* default value */ 77 78static int 79sysctl_kern_quantum SYSCTL_HANDLER_ARGS 80{ 81 int error; 82 int new_val = quantum; 83 84 new_val = quantum; 85 error = sysctl_handle_int(oidp, &new_val, 0, req); 86 if (error == 0) { 87 if ((new_val > 0) && (new_val < MAXIMUM_SCHEDULE_QUANTUM)) { 88 quantum = new_val; 89 } else { 90 error = EINVAL; 91 } 92 } 93 return (error); 94} 95 96SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 97 0, sizeof quantum, sysctl_kern_quantum, "I", ""); 98 99/* 100 * Force switch among equal priority processes every 100ms. 101 */ 102/* ARGSUSED */ 103static void 104roundrobin(arg) 105 void *arg; 106{ 107 108 need_resched(); 109 timeout(roundrobin, NULL, hz / quantum); 110} 111 112/* 113 * Constants for digital decay and forget: 114 * 90% of (p_estcpu) usage in 5 * loadav time 115 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 116 * Note that, as ps(1) mentions, this can let percentages 117 * total over 100% (I've seen 137.9% for 3 processes). 118 * 119 * Note that statclock updates p_estcpu and p_cpticks independently. 120 * 121 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 122 * That is, the system wants to compute a value of decay such 123 * that the following for loop: 124 * for (i = 0; i < (5 * loadavg); i++) 125 * p_estcpu *= decay; 126 * will compute 127 * p_estcpu *= 0.1; 128 * for all values of loadavg: 129 * 130 * Mathematically this loop can be expressed by saying: 131 * decay ** (5 * loadavg) ~= .1 132 * 133 * The system computes decay as: 134 * decay = (2 * loadavg) / (2 * loadavg + 1) 135 * 136 * We wish to prove that the system's computation of decay 137 * will always fulfill the equation: 138 * decay ** (5 * loadavg) ~= .1 139 * 140 * If we compute b as: 141 * b = 2 * loadavg 142 * then 143 * decay = b / (b + 1) 144 * 145 * We now need to prove two things: 146 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 147 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 148 * 149 * Facts: 150 * For x close to zero, exp(x) =~ 1 + x, since 151 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 152 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 153 * For x close to zero, ln(1+x) =~ x, since 154 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 155 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 156 * ln(.1) =~ -2.30 157 * 158 * Proof of (1): 159 * Solve (factor)**(power) =~ .1 given power (5*loadav): 160 * solving for factor, 161 * ln(factor) =~ (-2.30/5*loadav), or 162 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 163 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 164 * 165 * Proof of (2): 166 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 167 * solving for power, 168 * power*ln(b/(b+1)) =~ -2.30, or 169 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 170 * 171 * Actual power values for the implemented algorithm are as follows: 172 * loadav: 1 2 3 4 173 * power: 5.68 10.32 14.94 19.55 174 */ 175 176/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 177#define loadfactor(loadav) (2 * (loadav)) 178#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 179 180/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 181static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 182 183/* 184 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 185 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 186 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 187 * 188 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 189 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 190 * 191 * If you dont want to bother with the faster/more-accurate formula, you 192 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 193 * (more general) method of calculating the %age of CPU used by a process. 194 */ 195#define CCPU_SHIFT 11 196 197/* 198 * Recompute process priorities, every hz ticks. 199 */ 200/* ARGSUSED */ 201static void 202schedcpu(arg) 203 void *arg; 204{ 205 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 206 register struct proc *p; 207 register int s; 208 register unsigned int newcpu; 209 210 wakeup((caddr_t)&lbolt); 211 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 212 /* 213 * Increment time in/out of memory and sleep time 214 * (if sleeping). We ignore overflow; with 16-bit int's 215 * (remember them?) overflow takes 45 days. 216 */ 217 p->p_swtime++; 218 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 219 p->p_slptime++; 220 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 221 /* 222 * If the process has slept the entire second, 223 * stop recalculating its priority until it wakes up. 224 */ 225 if (p->p_slptime > 1) 226 continue; 227 s = splhigh(); /* prevent state changes and protect run queue */ 228 /* 229 * p_pctcpu is only for ps. 230 */ 231#if (FSHIFT >= CCPU_SHIFT) 232 p->p_pctcpu += (hz == 100)? 233 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 234 100 * (((fixpt_t) p->p_cpticks) 235 << (FSHIFT - CCPU_SHIFT)) / hz; 236#else 237 p->p_pctcpu += ((FSCALE - ccpu) * 238 (p->p_cpticks * FSCALE / hz)) >> FSHIFT; 239#endif 240 p->p_cpticks = 0; 241 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice; 242 p->p_estcpu = min(newcpu, UCHAR_MAX); 243 resetpriority(p); 244 if (p->p_priority >= PUSER) { 245#define PPQ (128 / NQS) /* priorities per queue */ 246 if ((p != curproc) && 247#ifdef SMP 248 (u_char)p->p_oncpu == 0xff && /* idle */ 249#endif 250 p->p_stat == SRUN && 251 (p->p_flag & P_INMEM) && 252 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) { 253 remrq(p); 254 p->p_priority = p->p_usrpri; 255 setrunqueue(p); 256 } else 257 p->p_priority = p->p_usrpri; 258 } 259 splx(s); 260 } 261 vmmeter(); 262 timeout(schedcpu, (void *)0, hz); 263} 264 265/* 266 * Recalculate the priority of a process after it has slept for a while. 267 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 268 * least six times the loadfactor will decay p_estcpu to zero. 269 */ 270static void 271updatepri(p) 272 register struct proc *p; 273{ 274 register unsigned int newcpu = p->p_estcpu; 275 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 276 277 if (p->p_slptime > 5 * loadfac) 278 p->p_estcpu = 0; 279 else { 280 p->p_slptime--; /* the first time was done in schedcpu */ 281 while (newcpu && --p->p_slptime) 282 newcpu = (int) decay_cpu(loadfac, newcpu); 283 p->p_estcpu = min(newcpu, UCHAR_MAX); 284 } 285 resetpriority(p); 286} 287 288/* 289 * We're only looking at 7 bits of the address; everything is 290 * aligned to 4, lots of things are aligned to greater powers 291 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 292 */ 293#define TABLESIZE 128 294static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE]; 295#define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 296 297/* 298 * During autoconfiguration or after a panic, a sleep will simply 299 * lower the priority briefly to allow interrupts, then return. 300 * The priority to be used (safepri) is machine-dependent, thus this 301 * value is initialized and maintained in the machine-dependent layers. 302 * This priority will typically be 0, or the lowest priority 303 * that is safe for use on the interrupt stack; it can be made 304 * higher to block network software interrupts after panics. 305 */ 306int safepri; 307 308void 309sleepinit() 310{ 311 int i; 312 313 for (i = 0; i < TABLESIZE; i++) 314 TAILQ_INIT(&slpque[i]); 315} 316 317/* 318 * General sleep call. Suspends the current process until a wakeup is 319 * performed on the specified identifier. The process will then be made 320 * runnable with the specified priority. Sleeps at most timo/hz seconds 321 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 322 * before and after sleeping, else signals are not checked. Returns 0 if 323 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 324 * signal needs to be delivered, ERESTART is returned if the current system 325 * call should be restarted if possible, and EINTR is returned if the system 326 * call should be interrupted by the signal (return EINTR). 327 */ 328int 329tsleep(ident, priority, wmesg, timo) 330 void *ident; 331 int priority, timo; 332 const char *wmesg; 333{ 334 struct proc *p = curproc; 335 int s, sig, catch = priority & PCATCH; 336 struct callout_handle thandle; 337 338#ifdef KTRACE 339 if (KTRPOINT(p, KTR_CSW)) 340 ktrcsw(p->p_tracep, 1, 0); 341#endif 342 s = splhigh(); 343 if (cold || panicstr) { 344 /* 345 * After a panic, or during autoconfiguration, 346 * just give interrupts a chance, then just return; 347 * don't run any other procs or panic below, 348 * in case this is the idle process and already asleep. 349 */ 350 splx(safepri); 351 splx(s); 352 return (0); 353 } 354#ifdef DIAGNOSTIC 355 if(p == NULL) 356 panic("tsleep1"); 357 if (ident == NULL || p->p_stat != SRUN) 358 panic("tsleep"); 359 /* XXX This is not exhaustive, just the most common case */ 360 if ((p->p_procq.tqe_prev != NULL) && (*p->p_procq.tqe_prev == p)) 361 panic("sleeping process already on another queue"); 362#endif 363 p->p_wchan = ident; 364 p->p_wmesg = wmesg; 365 p->p_slptime = 0; 366 p->p_priority = priority & PRIMASK; 367 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq); 368 if (timo) 369 thandle = timeout(endtsleep, (void *)p, timo); 370 /* 371 * We put ourselves on the sleep queue and start our timeout 372 * before calling CURSIG, as we could stop there, and a wakeup 373 * or a SIGCONT (or both) could occur while we were stopped. 374 * A SIGCONT would cause us to be marked as SSLEEP 375 * without resuming us, thus we must be ready for sleep 376 * when CURSIG is called. If the wakeup happens while we're 377 * stopped, p->p_wchan will be 0 upon return from CURSIG. 378 */ 379 if (catch) { 380 p->p_flag |= P_SINTR; 381 if ((sig = CURSIG(p))) { 382 if (p->p_wchan) 383 unsleep(p); 384 p->p_stat = SRUN; 385 goto resume; 386 } 387 if (p->p_wchan == 0) { 388 catch = 0; 389 goto resume; 390 } 391 } else 392 sig = 0; 393 p->p_stat = SSLEEP; 394 p->p_stats->p_ru.ru_nvcsw++; 395 mi_switch(); 396resume: 397 curpriority = p->p_usrpri; 398 splx(s); 399 p->p_flag &= ~P_SINTR; 400 if (p->p_flag & P_TIMEOUT) { 401 p->p_flag &= ~P_TIMEOUT; 402 if (sig == 0) { 403#ifdef KTRACE 404 if (KTRPOINT(p, KTR_CSW)) 405 ktrcsw(p->p_tracep, 0, 0); 406#endif 407 return (EWOULDBLOCK); 408 } 409 } else if (timo) 410 untimeout(endtsleep, (void *)p, thandle); 411 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 412#ifdef KTRACE 413 if (KTRPOINT(p, KTR_CSW)) 414 ktrcsw(p->p_tracep, 0, 0); 415#endif 416 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 417 return (EINTR); 418 return (ERESTART); 419 } 420#ifdef KTRACE 421 if (KTRPOINT(p, KTR_CSW)) 422 ktrcsw(p->p_tracep, 0, 0); 423#endif 424 return (0); 425} 426 427/* 428 * Implement timeout for tsleep. 429 * If process hasn't been awakened (wchan non-zero), 430 * set timeout flag and undo the sleep. If proc 431 * is stopped, just unsleep so it will remain stopped. 432 */ 433static void 434endtsleep(arg) 435 void *arg; 436{ 437 register struct proc *p; 438 int s; 439 440 p = (struct proc *)arg; 441 s = splhigh(); 442 if (p->p_wchan) { 443 if (p->p_stat == SSLEEP) 444 setrunnable(p); 445 else 446 unsleep(p); 447 p->p_flag |= P_TIMEOUT; 448 } 449 splx(s); 450} 451 452/* 453 * Remove a process from its wait queue 454 */ 455void 456unsleep(p) 457 register struct proc *p; 458{ 459 int s; 460 461 s = splhigh(); 462 if (p->p_wchan) { 463 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq); 464 p->p_wchan = 0; 465 } 466 splx(s); 467} 468 469/* 470 * Make all processes sleeping on the specified identifier runnable. 471 */ 472void 473wakeup(ident) 474 register void *ident; 475{ 476 register struct slpquehead *qp; 477 register struct proc *p; 478 int s; 479 480 s = splhigh(); 481 qp = &slpque[LOOKUP(ident)]; 482restart: 483 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { 484#ifdef DIAGNOSTIC 485 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 486 panic("wakeup"); 487#endif 488 if (p->p_wchan == ident) { 489 TAILQ_REMOVE(qp, p, p_procq); 490 p->p_wchan = 0; 491 if (p->p_stat == SSLEEP) { 492 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 493 if (p->p_slptime > 1) 494 updatepri(p); 495 p->p_slptime = 0; 496 p->p_stat = SRUN; 497 if (p->p_flag & P_INMEM) { 498 setrunqueue(p); 499 need_resched(); 500 } else { 501 p->p_flag |= P_SWAPINREQ; 502 wakeup((caddr_t)&proc0); 503 } 504 /* END INLINE EXPANSION */ 505 goto restart; 506 } 507 } 508 } 509 splx(s); 510} 511 512/* 513 * Make a process sleeping on the specified identifier runnable. 514 * May wake more than one process if a target prcoess is currently 515 * swapped out. 516 */ 517void 518wakeup_one(ident) 519 register void *ident; 520{ 521 register struct slpquehead *qp; 522 register struct proc *p; 523 int s; 524 525 s = splhigh(); 526 qp = &slpque[LOOKUP(ident)]; 527 528 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { 529#ifdef DIAGNOSTIC 530 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 531 panic("wakeup_one"); 532#endif 533 if (p->p_wchan == ident) { 534 TAILQ_REMOVE(qp, p, p_procq); 535 p->p_wchan = 0; 536 if (p->p_stat == SSLEEP) { 537 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 538 if (p->p_slptime > 1) 539 updatepri(p); 540 p->p_slptime = 0; 541 p->p_stat = SRUN; 542 if (p->p_flag & P_INMEM) { 543 setrunqueue(p); 544 need_resched(); 545 break; 546 } else { 547 p->p_flag |= P_SWAPINREQ; 548 wakeup((caddr_t)&proc0); 549 } 550 /* END INLINE EXPANSION */ 551 } 552 } 553 } 554 splx(s); 555} 556 557/* 558 * The machine independent parts of mi_switch(). 559 * Must be called at splstatclock() or higher. 560 */ 561void 562mi_switch() 563{ 564 register struct proc *p = curproc; /* XXX */ 565 register struct rlimit *rlim; 566 register long s, u; 567 int x; 568 struct timeval tv; 569 570 /* 571 * XXX this spl is almost unnecessary. It is partly to allow for 572 * sloppy callers that don't do it (issignal() via CURSIG() is the 573 * main offender). It is partly to work around a bug in the i386 574 * cpu_switch() (the ipl is not preserved). We ran for years 575 * without it. I think there was only a interrupt latency problem. 576 * The main caller, tsleep(), does an splx() a couple of instructions 577 * after calling here. The buggy caller, issignal(), usually calls 578 * here at spl0() and sometimes returns at splhigh(). The process 579 * then runs for a little too long at splhigh(). The ipl gets fixed 580 * when the process returns to user mode (or earlier). 581 * 582 * It would probably be better to always call here at spl0(). Callers 583 * are prepared to give up control to another process, so they must 584 * be prepared to be interrupted. The clock stuff here may not 585 * actually need splstatclock(). 586 */ 587 x = splstatclock(); 588 589#ifdef SIMPLELOCK_DEBUG 590 if (p->p_simple_locks) 591 printf("sleep: holding simple lock"); 592#endif 593 /* 594 * Compute the amount of time during which the current 595 * process was running, and add that to its total so far. 596 */ 597 microtime(&tv); 598 u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec); 599 s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec); 600 if (u < 0) { 601 u += 1000000; 602 s--; 603 } else if (u >= 1000000) { 604 u -= 1000000; 605 s++; 606 } 607#ifdef SMP 608 if (s < 0) 609 s = u = 0; 610#endif 611 p->p_rtime.tv_usec = u; 612 p->p_rtime.tv_sec = s; 613 614 /* 615 * Check if the process exceeds its cpu resource allocation. 616 * If over max, kill it. 617 */ 618 if (p->p_stat != SZOMB) { 619 rlim = &p->p_rlimit[RLIMIT_CPU]; 620 if (s >= rlim->rlim_cur) { 621 if (s >= rlim->rlim_max) 622 killproc(p, "exceeded maximum CPU limit"); 623 else { 624 psignal(p, SIGXCPU); 625 if (rlim->rlim_cur < rlim->rlim_max) 626 rlim->rlim_cur += 5; 627 } 628 } 629 } 630 631 /* 632 * Pick a new current process and record its start time. 633 */ 634 cnt.v_swtch++; 635 cpu_switch(p); 636 microtime(&runtime); 637 splx(x); 638} 639 640/* 641 * Initialize the (doubly-linked) run queues 642 * to be empty. 643 */ 644/* ARGSUSED*/ 645static void 646rqinit(dummy) 647 void *dummy; 648{ 649 register int i; 650 651 for (i = 0; i < NQS; i++) { 652 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 653 rtqs[i].ph_link = rtqs[i].ph_rlink = (struct proc *)&rtqs[i]; 654 idqs[i].ph_link = idqs[i].ph_rlink = (struct proc *)&idqs[i]; 655 } 656} 657 658/* 659 * Change process state to be runnable, 660 * placing it on the run queue if it is in memory, 661 * and awakening the swapper if it isn't in memory. 662 */ 663void 664setrunnable(p) 665 register struct proc *p; 666{ 667 register int s; 668 669 s = splhigh(); 670 switch (p->p_stat) { 671 case 0: 672 case SRUN: 673 case SZOMB: 674 default: 675 panic("setrunnable"); 676 case SSTOP: 677 case SSLEEP: 678 unsleep(p); /* e.g. when sending signals */ 679 break; 680 681 case SIDL: 682 break; 683 } 684 p->p_stat = SRUN; 685 if (p->p_flag & P_INMEM) 686 setrunqueue(p); 687 splx(s); 688 if (p->p_slptime > 1) 689 updatepri(p); 690 p->p_slptime = 0; 691 if ((p->p_flag & P_INMEM) == 0) { 692 p->p_flag |= P_SWAPINREQ; 693 wakeup((caddr_t)&proc0); 694 } 695 else if (p->p_priority < curpriority) 696 need_resched(); 697} 698 699/* 700 * Compute the priority of a process when running in user mode. 701 * Arrange to reschedule if the resulting priority is better 702 * than that of the current process. 703 */ 704void 705resetpriority(p) 706 register struct proc *p; 707{ 708 register unsigned int newpriority; 709 710 if (p->p_rtprio.type == RTP_PRIO_NORMAL) { 711 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice; 712 newpriority = min(newpriority, MAXPRI); 713 p->p_usrpri = newpriority; 714 if (newpriority < curpriority) 715 need_resched(); 716 } else { 717 need_resched(); 718 } 719} 720 721/* ARGSUSED */ 722static void sched_setup __P((void *dummy)); 723static void 724sched_setup(dummy) 725 void *dummy; 726{ 727 /* Kick off timeout driven events by calling first time. */ 728 roundrobin(NULL); 729 schedcpu(NULL); 730} 731SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 732 733