kern_synch.c revision 34266
1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $Id: kern_synch.c,v 1.48 1998/03/04 10:25:55 dufault Exp $ 40 */ 41 42#include "opt_ktrace.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/proc.h> 47#include <sys/kernel.h> 48#include <sys/signalvar.h> 49#include <sys/resourcevar.h> 50#include <sys/vmmeter.h> 51#include <sys/sysctl.h> 52#include <vm/vm.h> 53#include <vm/vm_extern.h> 54#ifdef KTRACE 55#include <sys/ktrace.h> 56#endif 57 58#include <machine/cpu.h> 59#include <machine/limits.h> /* for UCHAR_MAX = typeof(p_priority)_MAX */ 60 61static void rqinit __P((void *)); 62SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL) 63 64u_char curpriority; /* usrpri of curproc */ 65int lbolt; /* once a second sleep address */ 66 67static void endtsleep __P((void *)); 68static void roundrobin __P((void *arg)); 69static void schedcpu __P((void *arg)); 70static void updatepri __P((struct proc *p)); 71 72#define MAXIMUM_SCHEDULE_QUANTUM (1000000) /* arbitrary limit */ 73#ifndef DEFAULT_SCHEDULE_QUANTUM 74#define DEFAULT_SCHEDULE_QUANTUM 10 75#endif 76static int quantum = DEFAULT_SCHEDULE_QUANTUM; /* default value */ 77 78static int 79sysctl_kern_quantum SYSCTL_HANDLER_ARGS 80{ 81 int error; 82 int new_val = quantum; 83 84 new_val = quantum; 85 error = sysctl_handle_int(oidp, &new_val, 0, req); 86 if (error == 0) { 87 if ((new_val > 0) && (new_val < MAXIMUM_SCHEDULE_QUANTUM)) { 88 quantum = new_val; 89 } else { 90 error = EINVAL; 91 } 92 } 93 return (error); 94} 95 96SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 97 0, sizeof quantum, sysctl_kern_quantum, "I", ""); 98 99/* maybe_resched: Decide if you need to reschedule or not 100 * taking the priorities and schedulers into account. 101 */ 102static void maybe_resched(struct proc *chk) 103{ 104 struct proc *p = curproc; /* XXX */ 105 106 if (p == 0 || 107 ((chk->p_priority < curpriority) && 108 ((RTP_PRIO_BASE(chk->p_rtprio.type) == 109 RTP_PRIO_BASE(p->p_rtprio.type))))) 110 need_resched(); 111} 112 113#define ROUNDROBIN_INTERVAL (hz / quantum) 114int roundrobin_interval(void) 115{ 116 return ROUNDROBIN_INTERVAL; 117} 118 119/* 120 * Force switch among equal priority processes every 100ms. 121 */ 122/* ARGSUSED */ 123static void 124roundrobin(arg) 125 void *arg; 126{ 127 struct proc *p = curproc; /* XXX */ 128 129 if (p == 0 || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 130 need_resched(); 131 132 timeout(roundrobin, NULL, ROUNDROBIN_INTERVAL); 133} 134 135/* 136 * Constants for digital decay and forget: 137 * 90% of (p_estcpu) usage in 5 * loadav time 138 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 139 * Note that, as ps(1) mentions, this can let percentages 140 * total over 100% (I've seen 137.9% for 3 processes). 141 * 142 * Note that statclock() updates p_estcpu and p_cpticks asynchronously. 143 * 144 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 145 * That is, the system wants to compute a value of decay such 146 * that the following for loop: 147 * for (i = 0; i < (5 * loadavg); i++) 148 * p_estcpu *= decay; 149 * will compute 150 * p_estcpu *= 0.1; 151 * for all values of loadavg: 152 * 153 * Mathematically this loop can be expressed by saying: 154 * decay ** (5 * loadavg) ~= .1 155 * 156 * The system computes decay as: 157 * decay = (2 * loadavg) / (2 * loadavg + 1) 158 * 159 * We wish to prove that the system's computation of decay 160 * will always fulfill the equation: 161 * decay ** (5 * loadavg) ~= .1 162 * 163 * If we compute b as: 164 * b = 2 * loadavg 165 * then 166 * decay = b / (b + 1) 167 * 168 * We now need to prove two things: 169 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 170 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 171 * 172 * Facts: 173 * For x close to zero, exp(x) =~ 1 + x, since 174 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 175 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 176 * For x close to zero, ln(1+x) =~ x, since 177 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 178 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 179 * ln(.1) =~ -2.30 180 * 181 * Proof of (1): 182 * Solve (factor)**(power) =~ .1 given power (5*loadav): 183 * solving for factor, 184 * ln(factor) =~ (-2.30/5*loadav), or 185 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 186 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 187 * 188 * Proof of (2): 189 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 190 * solving for power, 191 * power*ln(b/(b+1)) =~ -2.30, or 192 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 193 * 194 * Actual power values for the implemented algorithm are as follows: 195 * loadav: 1 2 3 4 196 * power: 5.68 10.32 14.94 19.55 197 */ 198 199/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 200#define loadfactor(loadav) (2 * (loadav)) 201#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 202 203/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 204static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 205 206/* 207 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 208 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 209 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 210 * 211 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 212 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 213 * 214 * If you don't want to bother with the faster/more-accurate formula, you 215 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 216 * (more general) method of calculating the %age of CPU used by a process. 217 */ 218#define CCPU_SHIFT 11 219 220/* 221 * Recompute process priorities, every hz ticks. 222 */ 223/* ARGSUSED */ 224static void 225schedcpu(arg) 226 void *arg; 227{ 228 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 229 register struct proc *p; 230 register int s; 231 register unsigned int newcpu; 232 233 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 234 /* 235 * Increment time in/out of memory and sleep time 236 * (if sleeping). We ignore overflow; with 16-bit int's 237 * (remember them?) overflow takes 45 days. 238 */ 239 p->p_swtime++; 240 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 241 p->p_slptime++; 242 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 243 /* 244 * If the process has slept the entire second, 245 * stop recalculating its priority until it wakes up. 246 */ 247 if (p->p_slptime > 1) 248 continue; 249 s = splhigh(); /* prevent state changes and protect run queue */ 250 /* 251 * p_pctcpu is only for ps. 252 */ 253#if (FSHIFT >= CCPU_SHIFT) 254 p->p_pctcpu += (hz == 100)? 255 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 256 100 * (((fixpt_t) p->p_cpticks) 257 << (FSHIFT - CCPU_SHIFT)) / hz; 258#else 259 p->p_pctcpu += ((FSCALE - ccpu) * 260 (p->p_cpticks * FSCALE / hz)) >> FSHIFT; 261#endif 262 p->p_cpticks = 0; 263 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice; 264 p->p_estcpu = min(newcpu, UCHAR_MAX); 265 resetpriority(p); 266 if (p->p_priority >= PUSER) { 267#define PPQ (128 / NQS) /* priorities per queue */ 268 if ((p != curproc) && 269#ifdef SMP 270 (u_char)p->p_oncpu == 0xff && /* idle */ 271#endif 272 p->p_stat == SRUN && 273 (p->p_flag & P_INMEM) && 274 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) { 275 remrq(p); 276 p->p_priority = p->p_usrpri; 277 setrunqueue(p); 278 } else 279 p->p_priority = p->p_usrpri; 280 } 281 splx(s); 282 } 283 vmmeter(); 284 wakeup((caddr_t)&lbolt); 285 timeout(schedcpu, (void *)0, hz); 286} 287 288/* 289 * Recalculate the priority of a process after it has slept for a while. 290 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 291 * least six times the loadfactor will decay p_estcpu to zero. 292 */ 293static void 294updatepri(p) 295 register struct proc *p; 296{ 297 register unsigned int newcpu = p->p_estcpu; 298 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 299 300 if (p->p_slptime > 5 * loadfac) 301 p->p_estcpu = 0; 302 else { 303 p->p_slptime--; /* the first time was done in schedcpu */ 304 while (newcpu && --p->p_slptime) 305 newcpu = (int) decay_cpu(loadfac, newcpu); 306 p->p_estcpu = min(newcpu, UCHAR_MAX); 307 } 308 resetpriority(p); 309} 310 311/* 312 * We're only looking at 7 bits of the address; everything is 313 * aligned to 4, lots of things are aligned to greater powers 314 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 315 */ 316#define TABLESIZE 128 317static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE]; 318#define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 319 320/* 321 * During autoconfiguration or after a panic, a sleep will simply 322 * lower the priority briefly to allow interrupts, then return. 323 * The priority to be used (safepri) is machine-dependent, thus this 324 * value is initialized and maintained in the machine-dependent layers. 325 * This priority will typically be 0, or the lowest priority 326 * that is safe for use on the interrupt stack; it can be made 327 * higher to block network software interrupts after panics. 328 */ 329int safepri; 330 331void 332sleepinit() 333{ 334 int i; 335 336 for (i = 0; i < TABLESIZE; i++) 337 TAILQ_INIT(&slpque[i]); 338} 339 340/* 341 * General sleep call. Suspends the current process until a wakeup is 342 * performed on the specified identifier. The process will then be made 343 * runnable with the specified priority. Sleeps at most timo/hz seconds 344 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 345 * before and after sleeping, else signals are not checked. Returns 0 if 346 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 347 * signal needs to be delivered, ERESTART is returned if the current system 348 * call should be restarted if possible, and EINTR is returned if the system 349 * call should be interrupted by the signal (return EINTR). 350 */ 351int 352tsleep(ident, priority, wmesg, timo) 353 void *ident; 354 int priority, timo; 355 const char *wmesg; 356{ 357 struct proc *p = curproc; 358 int s, sig, catch = priority & PCATCH; 359 struct callout_handle thandle; 360 361#ifdef KTRACE 362 if (KTRPOINT(p, KTR_CSW)) 363 ktrcsw(p->p_tracep, 1, 0); 364#endif 365 s = splhigh(); 366 if (cold || panicstr) { 367 /* 368 * After a panic, or during autoconfiguration, 369 * just give interrupts a chance, then just return; 370 * don't run any other procs or panic below, 371 * in case this is the idle process and already asleep. 372 */ 373 splx(safepri); 374 splx(s); 375 return (0); 376 } 377#ifdef DIAGNOSTIC 378 if(p == NULL) 379 panic("tsleep1"); 380 if (ident == NULL || p->p_stat != SRUN) 381 panic("tsleep"); 382 /* XXX This is not exhaustive, just the most common case */ 383 if ((p->p_procq.tqe_prev != NULL) && (*p->p_procq.tqe_prev == p)) 384 panic("sleeping process already on another queue"); 385#endif 386 p->p_wchan = ident; 387 p->p_wmesg = wmesg; 388 p->p_slptime = 0; 389 p->p_priority = priority & PRIMASK; 390 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq); 391 if (timo) 392 thandle = timeout(endtsleep, (void *)p, timo); 393 /* 394 * We put ourselves on the sleep queue and start our timeout 395 * before calling CURSIG, as we could stop there, and a wakeup 396 * or a SIGCONT (or both) could occur while we were stopped. 397 * A SIGCONT would cause us to be marked as SSLEEP 398 * without resuming us, thus we must be ready for sleep 399 * when CURSIG is called. If the wakeup happens while we're 400 * stopped, p->p_wchan will be 0 upon return from CURSIG. 401 */ 402 if (catch) { 403 p->p_flag |= P_SINTR; 404 if ((sig = CURSIG(p))) { 405 if (p->p_wchan) 406 unsleep(p); 407 p->p_stat = SRUN; 408 goto resume; 409 } 410 if (p->p_wchan == 0) { 411 catch = 0; 412 goto resume; 413 } 414 } else 415 sig = 0; 416 p->p_stat = SSLEEP; 417 p->p_stats->p_ru.ru_nvcsw++; 418 mi_switch(); 419resume: 420 curpriority = p->p_usrpri; 421 splx(s); 422 p->p_flag &= ~P_SINTR; 423 if (p->p_flag & P_TIMEOUT) { 424 p->p_flag &= ~P_TIMEOUT; 425 if (sig == 0) { 426#ifdef KTRACE 427 if (KTRPOINT(p, KTR_CSW)) 428 ktrcsw(p->p_tracep, 0, 0); 429#endif 430 return (EWOULDBLOCK); 431 } 432 } else if (timo) 433 untimeout(endtsleep, (void *)p, thandle); 434 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 435#ifdef KTRACE 436 if (KTRPOINT(p, KTR_CSW)) 437 ktrcsw(p->p_tracep, 0, 0); 438#endif 439 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 440 return (EINTR); 441 return (ERESTART); 442 } 443#ifdef KTRACE 444 if (KTRPOINT(p, KTR_CSW)) 445 ktrcsw(p->p_tracep, 0, 0); 446#endif 447 return (0); 448} 449 450/* 451 * Implement timeout for tsleep. 452 * If process hasn't been awakened (wchan non-zero), 453 * set timeout flag and undo the sleep. If proc 454 * is stopped, just unsleep so it will remain stopped. 455 */ 456static void 457endtsleep(arg) 458 void *arg; 459{ 460 register struct proc *p; 461 int s; 462 463 p = (struct proc *)arg; 464 s = splhigh(); 465 if (p->p_wchan) { 466 if (p->p_stat == SSLEEP) 467 setrunnable(p); 468 else 469 unsleep(p); 470 p->p_flag |= P_TIMEOUT; 471 } 472 splx(s); 473} 474 475/* 476 * Remove a process from its wait queue 477 */ 478void 479unsleep(p) 480 register struct proc *p; 481{ 482 int s; 483 484 s = splhigh(); 485 if (p->p_wchan) { 486 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq); 487 p->p_wchan = 0; 488 } 489 splx(s); 490} 491 492/* 493 * Make all processes sleeping on the specified identifier runnable. 494 */ 495void 496wakeup(ident) 497 register void *ident; 498{ 499 register struct slpquehead *qp; 500 register struct proc *p; 501 int s; 502 503 s = splhigh(); 504 qp = &slpque[LOOKUP(ident)]; 505restart: 506 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { 507#ifdef DIAGNOSTIC 508 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 509 panic("wakeup"); 510#endif 511 if (p->p_wchan == ident) { 512 TAILQ_REMOVE(qp, p, p_procq); 513 p->p_wchan = 0; 514 if (p->p_stat == SSLEEP) { 515 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 516 if (p->p_slptime > 1) 517 updatepri(p); 518 p->p_slptime = 0; 519 p->p_stat = SRUN; 520 if (p->p_flag & P_INMEM) { 521 setrunqueue(p); 522 maybe_resched(p); 523 } else { 524 p->p_flag |= P_SWAPINREQ; 525 wakeup((caddr_t)&proc0); 526 } 527 /* END INLINE EXPANSION */ 528 goto restart; 529 } 530 } 531 } 532 splx(s); 533} 534 535/* 536 * Make a process sleeping on the specified identifier runnable. 537 * May wake more than one process if a target prcoess is currently 538 * swapped out. 539 */ 540void 541wakeup_one(ident) 542 register void *ident; 543{ 544 register struct slpquehead *qp; 545 register struct proc *p; 546 int s; 547 548 s = splhigh(); 549 qp = &slpque[LOOKUP(ident)]; 550 551 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { 552#ifdef DIAGNOSTIC 553 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 554 panic("wakeup_one"); 555#endif 556 if (p->p_wchan == ident) { 557 TAILQ_REMOVE(qp, p, p_procq); 558 p->p_wchan = 0; 559 if (p->p_stat == SSLEEP) { 560 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 561 if (p->p_slptime > 1) 562 updatepri(p); 563 p->p_slptime = 0; 564 p->p_stat = SRUN; 565 if (p->p_flag & P_INMEM) { 566 setrunqueue(p); 567 maybe_resched(p); 568 break; 569 } else { 570 p->p_flag |= P_SWAPINREQ; 571 wakeup((caddr_t)&proc0); 572 } 573 /* END INLINE EXPANSION */ 574 } 575 } 576 } 577 splx(s); 578} 579 580/* 581 * The machine independent parts of mi_switch(). 582 * Must be called at splstatclock() or higher. 583 */ 584void 585mi_switch() 586{ 587 register struct proc *p = curproc; /* XXX */ 588 register struct rlimit *rlim; 589 register long s, u; 590 int x; 591 struct timeval tv; 592 593 /* 594 * XXX this spl is almost unnecessary. It is partly to allow for 595 * sloppy callers that don't do it (issignal() via CURSIG() is the 596 * main offender). It is partly to work around a bug in the i386 597 * cpu_switch() (the ipl is not preserved). We ran for years 598 * without it. I think there was only a interrupt latency problem. 599 * The main caller, tsleep(), does an splx() a couple of instructions 600 * after calling here. The buggy caller, issignal(), usually calls 601 * here at spl0() and sometimes returns at splhigh(). The process 602 * then runs for a little too long at splhigh(). The ipl gets fixed 603 * when the process returns to user mode (or earlier). 604 * 605 * It would probably be better to always call here at spl0(). Callers 606 * are prepared to give up control to another process, so they must 607 * be prepared to be interrupted. The clock stuff here may not 608 * actually need splstatclock(). 609 */ 610 x = splstatclock(); 611 612#ifdef SIMPLELOCK_DEBUG 613 if (p->p_simple_locks) 614 printf("sleep: holding simple lock\n"); 615#endif 616 /* 617 * Compute the amount of time during which the current 618 * process was running, and add that to its total so far. 619 */ 620 microtime(&tv); 621 u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec); 622 s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec); 623 if (u < 0) { 624 u += 1000000; 625 s--; 626 } else if (u >= 1000000) { 627 u -= 1000000; 628 s++; 629 } 630#ifdef SMP 631 if (s < 0) 632 s = u = 0; 633#endif 634 p->p_rtime.tv_usec = u; 635 p->p_rtime.tv_sec = s; 636 637 /* 638 * Check if the process exceeds its cpu resource allocation. 639 * If over max, kill it. 640 */ 641 if (p->p_stat != SZOMB) { 642 rlim = &p->p_rlimit[RLIMIT_CPU]; 643 if (s >= rlim->rlim_cur) { 644 if (s >= rlim->rlim_max) 645 killproc(p, "exceeded maximum CPU limit"); 646 else { 647 psignal(p, SIGXCPU); 648 if (rlim->rlim_cur < rlim->rlim_max) 649 rlim->rlim_cur += 5; 650 } 651 } 652 } 653 654 /* 655 * Pick a new current process and record its start time. 656 */ 657 cnt.v_swtch++; 658 cpu_switch(p); 659 microtime(&runtime); 660 splx(x); 661} 662 663/* 664 * Initialize the (doubly-linked) run queues 665 * to be empty. 666 */ 667/* ARGSUSED*/ 668static void 669rqinit(dummy) 670 void *dummy; 671{ 672 register int i; 673 674 for (i = 0; i < NQS; i++) { 675 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 676 rtqs[i].ph_link = rtqs[i].ph_rlink = (struct proc *)&rtqs[i]; 677 idqs[i].ph_link = idqs[i].ph_rlink = (struct proc *)&idqs[i]; 678 } 679} 680 681/* 682 * Change process state to be runnable, 683 * placing it on the run queue if it is in memory, 684 * and awakening the swapper if it isn't in memory. 685 */ 686void 687setrunnable(p) 688 register struct proc *p; 689{ 690 register int s; 691 692 s = splhigh(); 693 switch (p->p_stat) { 694 case 0: 695 case SRUN: 696 case SZOMB: 697 default: 698 panic("setrunnable"); 699 case SSTOP: 700 case SSLEEP: 701 unsleep(p); /* e.g. when sending signals */ 702 break; 703 704 case SIDL: 705 break; 706 } 707 p->p_stat = SRUN; 708 if (p->p_flag & P_INMEM) 709 setrunqueue(p); 710 splx(s); 711 if (p->p_slptime > 1) 712 updatepri(p); 713 p->p_slptime = 0; 714 if ((p->p_flag & P_INMEM) == 0) { 715 p->p_flag |= P_SWAPINREQ; 716 wakeup((caddr_t)&proc0); 717 } 718 else 719 maybe_resched(p); 720} 721 722/* 723 * Compute the priority of a process when running in user mode. 724 * Arrange to reschedule if the resulting priority is better 725 * than that of the current process. 726 */ 727void 728resetpriority(p) 729 register struct proc *p; 730{ 731 register unsigned int newpriority; 732 733 if (p->p_rtprio.type == RTP_PRIO_NORMAL) { 734 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice; 735 newpriority = min(newpriority, MAXPRI); 736 p->p_usrpri = newpriority; 737 } 738 maybe_resched(p); 739} 740 741/* ARGSUSED */ 742static void sched_setup __P((void *dummy)); 743static void 744sched_setup(dummy) 745 void *dummy; 746{ 747 /* Kick off timeout driven events by calling first time. */ 748 roundrobin(NULL); 749 schedcpu(NULL); 750} 751SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 752 753