kern_synch.c revision 37649
1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $Id: kern_synch.c,v 1.60 1998/07/11 13:06:41 bde Exp $ 40 */ 41 42#include "opt_ktrace.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/proc.h> 47#include <sys/kernel.h> 48#include <sys/signalvar.h> 49#include <sys/resourcevar.h> 50#include <sys/vmmeter.h> 51#include <sys/sysctl.h> 52#include <vm/vm.h> 53#include <vm/vm_extern.h> 54#ifdef KTRACE 55#include <sys/uio.h> 56#include <sys/ktrace.h> 57#endif 58 59#include <machine/cpu.h> 60#ifdef SMP 61#include <machine/smp.h> 62#endif 63#include <machine/limits.h> /* for UCHAR_MAX = typeof(p_priority)_MAX */ 64 65static void rqinit __P((void *)); 66SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL) 67 68u_char curpriority; /* usrpri of curproc */ 69int lbolt; /* once a second sleep address */ 70 71static void endtsleep __P((void *)); 72static void roundrobin __P((void *arg)); 73static void schedcpu __P((void *arg)); 74static void updatepri __P((struct proc *p)); 75 76#define MAXIMUM_SCHEDULE_QUANTUM (1000000) /* arbitrary limit */ 77#ifndef DEFAULT_SCHEDULE_QUANTUM 78#define DEFAULT_SCHEDULE_QUANTUM 10 79#endif 80static int quantum = DEFAULT_SCHEDULE_QUANTUM; /* default value */ 81 82static int 83sysctl_kern_quantum SYSCTL_HANDLER_ARGS 84{ 85 int error; 86 int new_val = quantum; 87 88 new_val = quantum; 89 error = sysctl_handle_int(oidp, &new_val, 0, req); 90 if (error == 0) { 91 if ((new_val > 0) && (new_val < MAXIMUM_SCHEDULE_QUANTUM)) { 92 quantum = new_val; 93 } else { 94 error = EINVAL; 95 } 96 } 97 return (error); 98} 99 100SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 101 0, sizeof quantum, sysctl_kern_quantum, "I", ""); 102 103/* maybe_resched: Decide if you need to reschedule or not 104 * taking the priorities and schedulers into account. 105 */ 106static void maybe_resched(struct proc *chk) 107{ 108 struct proc *p = curproc; /* XXX */ 109 110 /* If the current scheduler is the idle scheduler or 111 * the priority of the new one is higher then reschedule. 112 */ 113 if (p == 0 || 114 RTP_PRIO_BASE(p->p_rtprio.type) == RTP_PRIO_IDLE || 115 (chk->p_priority < curpriority && 116 RTP_PRIO_BASE(p->p_rtprio.type) == RTP_PRIO_BASE(chk->p_rtprio.type)) ) 117 need_resched(); 118} 119 120#define ROUNDROBIN_INTERVAL (hz / quantum) 121int roundrobin_interval(void) 122{ 123 return ROUNDROBIN_INTERVAL; 124} 125 126/* 127 * Force switch among equal priority processes every 100ms. 128 */ 129/* ARGSUSED */ 130static void 131roundrobin(arg) 132 void *arg; 133{ 134 struct proc *p = curproc; /* XXX */ 135 136#ifdef SMP 137 need_resched(); 138 forward_roundrobin(); 139#else 140 if (p == 0 || RTP_PRIO_NEED_RR(p->p_rtprio.type)) 141 need_resched(); 142#endif 143 144 timeout(roundrobin, NULL, ROUNDROBIN_INTERVAL); 145} 146 147/* 148 * Constants for digital decay and forget: 149 * 90% of (p_estcpu) usage in 5 * loadav time 150 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 151 * Note that, as ps(1) mentions, this can let percentages 152 * total over 100% (I've seen 137.9% for 3 processes). 153 * 154 * Note that statclock() updates p_estcpu and p_cpticks asynchronously. 155 * 156 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 157 * That is, the system wants to compute a value of decay such 158 * that the following for loop: 159 * for (i = 0; i < (5 * loadavg); i++) 160 * p_estcpu *= decay; 161 * will compute 162 * p_estcpu *= 0.1; 163 * for all values of loadavg: 164 * 165 * Mathematically this loop can be expressed by saying: 166 * decay ** (5 * loadavg) ~= .1 167 * 168 * The system computes decay as: 169 * decay = (2 * loadavg) / (2 * loadavg + 1) 170 * 171 * We wish to prove that the system's computation of decay 172 * will always fulfill the equation: 173 * decay ** (5 * loadavg) ~= .1 174 * 175 * If we compute b as: 176 * b = 2 * loadavg 177 * then 178 * decay = b / (b + 1) 179 * 180 * We now need to prove two things: 181 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 182 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 183 * 184 * Facts: 185 * For x close to zero, exp(x) =~ 1 + x, since 186 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 187 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 188 * For x close to zero, ln(1+x) =~ x, since 189 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 190 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 191 * ln(.1) =~ -2.30 192 * 193 * Proof of (1): 194 * Solve (factor)**(power) =~ .1 given power (5*loadav): 195 * solving for factor, 196 * ln(factor) =~ (-2.30/5*loadav), or 197 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 198 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 199 * 200 * Proof of (2): 201 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 202 * solving for power, 203 * power*ln(b/(b+1)) =~ -2.30, or 204 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 205 * 206 * Actual power values for the implemented algorithm are as follows: 207 * loadav: 1 2 3 4 208 * power: 5.68 10.32 14.94 19.55 209 */ 210 211/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 212#define loadfactor(loadav) (2 * (loadav)) 213#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 214 215/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 216static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 217SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 218 219/* kernel uses `FSCALE', user uses `fscale' */ 220static int fscale = FSCALE; 221SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 222 223/* 224 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 225 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 226 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 227 * 228 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 229 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 230 * 231 * If you don't want to bother with the faster/more-accurate formula, you 232 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 233 * (more general) method of calculating the %age of CPU used by a process. 234 */ 235#define CCPU_SHIFT 11 236 237/* 238 * Recompute process priorities, every hz ticks. 239 */ 240/* ARGSUSED */ 241static void 242schedcpu(arg) 243 void *arg; 244{ 245 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 246 register struct proc *p; 247 register int s; 248 register unsigned int newcpu; 249 250 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 251 /* 252 * Increment time in/out of memory and sleep time 253 * (if sleeping). We ignore overflow; with 16-bit int's 254 * (remember them?) overflow takes 45 days. 255 */ 256 p->p_swtime++; 257 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 258 p->p_slptime++; 259 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 260 /* 261 * If the process has slept the entire second, 262 * stop recalculating its priority until it wakes up. 263 */ 264 if (p->p_slptime > 1) 265 continue; 266 s = splhigh(); /* prevent state changes and protect run queue */ 267 /* 268 * p_pctcpu is only for ps. 269 */ 270#if (FSHIFT >= CCPU_SHIFT) 271 p->p_pctcpu += (hz == 100)? 272 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 273 100 * (((fixpt_t) p->p_cpticks) 274 << (FSHIFT - CCPU_SHIFT)) / hz; 275#else 276 p->p_pctcpu += ((FSCALE - ccpu) * 277 (p->p_cpticks * FSCALE / hz)) >> FSHIFT; 278#endif 279 p->p_cpticks = 0; 280 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice; 281 p->p_estcpu = min(newcpu, UCHAR_MAX); 282 resetpriority(p); 283 if (p->p_priority >= PUSER) { 284#define PPQ (128 / NQS) /* priorities per queue */ 285 if ((p != curproc) && 286#ifdef SMP 287 (u_char)p->p_oncpu == 0xff && /* idle */ 288#endif 289 p->p_stat == SRUN && 290 (p->p_flag & P_INMEM) && 291 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) { 292 remrq(p); 293 p->p_priority = p->p_usrpri; 294 setrunqueue(p); 295 } else 296 p->p_priority = p->p_usrpri; 297 } 298 splx(s); 299 } 300 vmmeter(); 301 wakeup((caddr_t)&lbolt); 302 timeout(schedcpu, (void *)0, hz); 303} 304 305/* 306 * Recalculate the priority of a process after it has slept for a while. 307 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 308 * least six times the loadfactor will decay p_estcpu to zero. 309 */ 310static void 311updatepri(p) 312 register struct proc *p; 313{ 314 register unsigned int newcpu = p->p_estcpu; 315 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 316 317 if (p->p_slptime > 5 * loadfac) 318 p->p_estcpu = 0; 319 else { 320 p->p_slptime--; /* the first time was done in schedcpu */ 321 while (newcpu && --p->p_slptime) 322 newcpu = (int) decay_cpu(loadfac, newcpu); 323 p->p_estcpu = min(newcpu, UCHAR_MAX); 324 } 325 resetpriority(p); 326} 327 328/* 329 * We're only looking at 7 bits of the address; everything is 330 * aligned to 4, lots of things are aligned to greater powers 331 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 332 */ 333#define TABLESIZE 128 334static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE]; 335#define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 336 337/* 338 * During autoconfiguration or after a panic, a sleep will simply 339 * lower the priority briefly to allow interrupts, then return. 340 * The priority to be used (safepri) is machine-dependent, thus this 341 * value is initialized and maintained in the machine-dependent layers. 342 * This priority will typically be 0, or the lowest priority 343 * that is safe for use on the interrupt stack; it can be made 344 * higher to block network software interrupts after panics. 345 */ 346int safepri; 347 348void 349sleepinit() 350{ 351 int i; 352 353 for (i = 0; i < TABLESIZE; i++) 354 TAILQ_INIT(&slpque[i]); 355} 356 357/* 358 * General sleep call. Suspends the current process until a wakeup is 359 * performed on the specified identifier. The process will then be made 360 * runnable with the specified priority. Sleeps at most timo/hz seconds 361 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 362 * before and after sleeping, else signals are not checked. Returns 0 if 363 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 364 * signal needs to be delivered, ERESTART is returned if the current system 365 * call should be restarted if possible, and EINTR is returned if the system 366 * call should be interrupted by the signal (return EINTR). 367 */ 368int 369tsleep(ident, priority, wmesg, timo) 370 void *ident; 371 int priority, timo; 372 const char *wmesg; 373{ 374 struct proc *p = curproc; 375 int s, sig, catch = priority & PCATCH; 376 struct callout_handle thandle; 377 378#ifdef KTRACE 379 if (KTRPOINT(p, KTR_CSW)) 380 ktrcsw(p->p_tracep, 1, 0); 381#endif 382 s = splhigh(); 383 if (cold || panicstr) { 384 /* 385 * After a panic, or during autoconfiguration, 386 * just give interrupts a chance, then just return; 387 * don't run any other procs or panic below, 388 * in case this is the idle process and already asleep. 389 */ 390 splx(safepri); 391 splx(s); 392 return (0); 393 } 394#ifdef DIAGNOSTIC 395 if(p == NULL) 396 panic("tsleep1"); 397 if (ident == NULL || p->p_stat != SRUN) 398 panic("tsleep"); 399 /* XXX This is not exhaustive, just the most common case */ 400 if ((p->p_procq.tqe_prev != NULL) && (*p->p_procq.tqe_prev == p)) 401 panic("sleeping process already on another queue"); 402#endif 403 p->p_wchan = ident; 404 p->p_wmesg = wmesg; 405 p->p_slptime = 0; 406 p->p_priority = priority & PRIMASK; 407 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq); 408 if (timo) 409 thandle = timeout(endtsleep, (void *)p, timo); 410 /* 411 * We put ourselves on the sleep queue and start our timeout 412 * before calling CURSIG, as we could stop there, and a wakeup 413 * or a SIGCONT (or both) could occur while we were stopped. 414 * A SIGCONT would cause us to be marked as SSLEEP 415 * without resuming us, thus we must be ready for sleep 416 * when CURSIG is called. If the wakeup happens while we're 417 * stopped, p->p_wchan will be 0 upon return from CURSIG. 418 */ 419 if (catch) { 420 p->p_flag |= P_SINTR; 421 if ((sig = CURSIG(p))) { 422 if (p->p_wchan) 423 unsleep(p); 424 p->p_stat = SRUN; 425 goto resume; 426 } 427 if (p->p_wchan == 0) { 428 catch = 0; 429 goto resume; 430 } 431 } else 432 sig = 0; 433 p->p_stat = SSLEEP; 434 p->p_stats->p_ru.ru_nvcsw++; 435 mi_switch(); 436resume: 437 curpriority = p->p_usrpri; 438 splx(s); 439 p->p_flag &= ~P_SINTR; 440 if (p->p_flag & P_TIMEOUT) { 441 p->p_flag &= ~P_TIMEOUT; 442 if (sig == 0) { 443#ifdef KTRACE 444 if (KTRPOINT(p, KTR_CSW)) 445 ktrcsw(p->p_tracep, 0, 0); 446#endif 447 return (EWOULDBLOCK); 448 } 449 } else if (timo) 450 untimeout(endtsleep, (void *)p, thandle); 451 if (catch && (sig != 0 || (sig = CURSIG(p)))) { 452#ifdef KTRACE 453 if (KTRPOINT(p, KTR_CSW)) 454 ktrcsw(p->p_tracep, 0, 0); 455#endif 456 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 457 return (EINTR); 458 return (ERESTART); 459 } 460#ifdef KTRACE 461 if (KTRPOINT(p, KTR_CSW)) 462 ktrcsw(p->p_tracep, 0, 0); 463#endif 464 return (0); 465} 466 467/* 468 * Implement timeout for tsleep. 469 * If process hasn't been awakened (wchan non-zero), 470 * set timeout flag and undo the sleep. If proc 471 * is stopped, just unsleep so it will remain stopped. 472 */ 473static void 474endtsleep(arg) 475 void *arg; 476{ 477 register struct proc *p; 478 int s; 479 480 p = (struct proc *)arg; 481 s = splhigh(); 482 if (p->p_wchan) { 483 if (p->p_stat == SSLEEP) 484 setrunnable(p); 485 else 486 unsleep(p); 487 p->p_flag |= P_TIMEOUT; 488 } 489 splx(s); 490} 491 492/* 493 * Remove a process from its wait queue 494 */ 495void 496unsleep(p) 497 register struct proc *p; 498{ 499 int s; 500 501 s = splhigh(); 502 if (p->p_wchan) { 503 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq); 504 p->p_wchan = 0; 505 } 506 splx(s); 507} 508 509/* 510 * Make all processes sleeping on the specified identifier runnable. 511 */ 512void 513wakeup(ident) 514 register void *ident; 515{ 516 register struct slpquehead *qp; 517 register struct proc *p; 518 int s; 519 520 s = splhigh(); 521 qp = &slpque[LOOKUP(ident)]; 522restart: 523 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { 524#ifdef DIAGNOSTIC 525 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 526 panic("wakeup"); 527#endif 528 if (p->p_wchan == ident) { 529 TAILQ_REMOVE(qp, p, p_procq); 530 p->p_wchan = 0; 531 if (p->p_stat == SSLEEP) { 532 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 533 if (p->p_slptime > 1) 534 updatepri(p); 535 p->p_slptime = 0; 536 p->p_stat = SRUN; 537 if (p->p_flag & P_INMEM) { 538 setrunqueue(p); 539 maybe_resched(p); 540 } else { 541 p->p_flag |= P_SWAPINREQ; 542 wakeup((caddr_t)&proc0); 543 } 544 /* END INLINE EXPANSION */ 545 goto restart; 546 } 547 } 548 } 549 splx(s); 550} 551 552/* 553 * Make a process sleeping on the specified identifier runnable. 554 * May wake more than one process if a target prcoess is currently 555 * swapped out. 556 */ 557void 558wakeup_one(ident) 559 register void *ident; 560{ 561 register struct slpquehead *qp; 562 register struct proc *p; 563 int s; 564 565 s = splhigh(); 566 qp = &slpque[LOOKUP(ident)]; 567 568 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) { 569#ifdef DIAGNOSTIC 570 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 571 panic("wakeup_one"); 572#endif 573 if (p->p_wchan == ident) { 574 TAILQ_REMOVE(qp, p, p_procq); 575 p->p_wchan = 0; 576 if (p->p_stat == SSLEEP) { 577 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 578 if (p->p_slptime > 1) 579 updatepri(p); 580 p->p_slptime = 0; 581 p->p_stat = SRUN; 582 if (p->p_flag & P_INMEM) { 583 setrunqueue(p); 584 maybe_resched(p); 585 break; 586 } else { 587 p->p_flag |= P_SWAPINREQ; 588 wakeup((caddr_t)&proc0); 589 } 590 /* END INLINE EXPANSION */ 591 } 592 } 593 } 594 splx(s); 595} 596 597/* 598 * The machine independent parts of mi_switch(). 599 * Must be called at splstatclock() or higher. 600 */ 601void 602mi_switch() 603{ 604 register struct proc *p = curproc; /* XXX */ 605 register struct rlimit *rlim; 606 int x; 607 608 /* 609 * XXX this spl is almost unnecessary. It is partly to allow for 610 * sloppy callers that don't do it (issignal() via CURSIG() is the 611 * main offender). It is partly to work around a bug in the i386 612 * cpu_switch() (the ipl is not preserved). We ran for years 613 * without it. I think there was only a interrupt latency problem. 614 * The main caller, tsleep(), does an splx() a couple of instructions 615 * after calling here. The buggy caller, issignal(), usually calls 616 * here at spl0() and sometimes returns at splhigh(). The process 617 * then runs for a little too long at splhigh(). The ipl gets fixed 618 * when the process returns to user mode (or earlier). 619 * 620 * It would probably be better to always call here at spl0(). Callers 621 * are prepared to give up control to another process, so they must 622 * be prepared to be interrupted. The clock stuff here may not 623 * actually need splstatclock(). 624 */ 625 x = splstatclock(); 626 627#ifdef SIMPLELOCK_DEBUG 628 if (p->p_simple_locks) 629 printf("sleep: holding simple lock\n"); 630#endif 631 /* 632 * Compute the amount of time during which the current 633 * process was running, and add that to its total so far. 634 */ 635 microuptime(&switchtime); 636 p->p_runtime += (switchtime.tv_usec - p->p_switchtime.tv_usec) + 637 (switchtime.tv_sec - p->p_switchtime.tv_sec) * (int64_t)1000000; 638 639 /* 640 * Check if the process exceeds its cpu resource allocation. 641 * If over max, kill it. 642 */ 643 if (p->p_stat != SZOMB && p->p_runtime > p->p_limit->p_cpulimit) { 644 rlim = &p->p_rlimit[RLIMIT_CPU]; 645 if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) { 646 killproc(p, "exceeded maximum CPU limit"); 647 } else { 648 psignal(p, SIGXCPU); 649 if (rlim->rlim_cur < rlim->rlim_max) { 650 /* XXX: we should make a private copy */ 651 rlim->rlim_cur += 5; 652 } 653 } 654 } 655 656 /* 657 * Pick a new current process and record its start time. 658 */ 659 cnt.v_swtch++; 660 cpu_switch(p); 661 if (switchtime.tv_sec) 662 p->p_switchtime = switchtime; 663 else 664 microuptime(&p->p_switchtime); 665 splx(x); 666} 667 668/* 669 * Initialize the (doubly-linked) run queues 670 * to be empty. 671 */ 672/* ARGSUSED*/ 673static void 674rqinit(dummy) 675 void *dummy; 676{ 677 register int i; 678 679 for (i = 0; i < NQS; i++) { 680 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 681 rtqs[i].ph_link = rtqs[i].ph_rlink = (struct proc *)&rtqs[i]; 682 idqs[i].ph_link = idqs[i].ph_rlink = (struct proc *)&idqs[i]; 683 } 684} 685 686/* 687 * Change process state to be runnable, 688 * placing it on the run queue if it is in memory, 689 * and awakening the swapper if it isn't in memory. 690 */ 691void 692setrunnable(p) 693 register struct proc *p; 694{ 695 register int s; 696 697 s = splhigh(); 698 switch (p->p_stat) { 699 case 0: 700 case SRUN: 701 case SZOMB: 702 default: 703 panic("setrunnable"); 704 case SSTOP: 705 case SSLEEP: 706 unsleep(p); /* e.g. when sending signals */ 707 break; 708 709 case SIDL: 710 break; 711 } 712 p->p_stat = SRUN; 713 if (p->p_flag & P_INMEM) 714 setrunqueue(p); 715 splx(s); 716 if (p->p_slptime > 1) 717 updatepri(p); 718 p->p_slptime = 0; 719 if ((p->p_flag & P_INMEM) == 0) { 720 p->p_flag |= P_SWAPINREQ; 721 wakeup((caddr_t)&proc0); 722 } 723 else 724 maybe_resched(p); 725} 726 727/* 728 * Compute the priority of a process when running in user mode. 729 * Arrange to reschedule if the resulting priority is better 730 * than that of the current process. 731 */ 732void 733resetpriority(p) 734 register struct proc *p; 735{ 736 register unsigned int newpriority; 737 738 if (p->p_rtprio.type == RTP_PRIO_NORMAL) { 739 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice; 740 newpriority = min(newpriority, MAXPRI); 741 p->p_usrpri = newpriority; 742 } 743 maybe_resched(p); 744} 745 746/* ARGSUSED */ 747static void sched_setup __P((void *dummy)); 748static void 749sched_setup(dummy) 750 void *dummy; 751{ 752 /* Kick off timeout driven events by calling first time. */ 753 roundrobin(NULL); 754 schedcpu(NULL); 755} 756SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 757 758