kern_synch.c revision 81493
1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD: head/sys/kern/kern_synch.c 81493 2001-08-10 22:53:32Z jhb $ 40 */ 41 42#include "opt_ktrace.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/condvar.h> 47#include <sys/kernel.h> 48#include <sys/ktr.h> 49#include <sys/lock.h> 50#include <sys/mutex.h> 51#include <sys/proc.h> 52#include <sys/resourcevar.h> 53#include <sys/signalvar.h> 54#include <sys/smp.h> 55#include <sys/sx.h> 56#include <sys/sysctl.h> 57#include <sys/sysproto.h> 58#include <sys/vmmeter.h> 59#include <vm/vm.h> 60#include <vm/vm_extern.h> 61#ifdef KTRACE 62#include <sys/uio.h> 63#include <sys/ktrace.h> 64#endif 65 66#include <machine/cpu.h> 67 68static void sched_setup __P((void *dummy)); 69SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 70 71int hogticks; 72int lbolt; 73int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 74 75static struct callout schedcpu_callout; 76static struct callout roundrobin_callout; 77 78static void endtsleep __P((void *)); 79static void roundrobin __P((void *arg)); 80static void schedcpu __P((void *arg)); 81 82static int 83sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 84{ 85 int error, new_val; 86 87 new_val = sched_quantum * tick; 88 error = sysctl_handle_int(oidp, &new_val, 0, req); 89 if (error != 0 || req->newptr == NULL) 90 return (error); 91 if (new_val < tick) 92 return (EINVAL); 93 sched_quantum = new_val / tick; 94 hogticks = 2 * sched_quantum; 95 return (0); 96} 97 98SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 99 0, sizeof sched_quantum, sysctl_kern_quantum, "I", ""); 100 101/* 102 * Arrange to reschedule if necessary, taking the priorities and 103 * schedulers into account. 104 */ 105void 106maybe_resched(p) 107 struct proc *p; 108{ 109 110 mtx_assert(&sched_lock, MA_OWNED); 111 if (p->p_pri.pri_level < curproc->p_pri.pri_level) 112 curproc->p_sflag |= PS_NEEDRESCHED; 113} 114 115int 116roundrobin_interval(void) 117{ 118 return (sched_quantum); 119} 120 121/* 122 * Force switch among equal priority processes every 100ms. 123 * We don't actually need to force a context switch of the current process. 124 * The act of firing the event triggers a context switch to softclock() and 125 * then switching back out again which is equivalent to a preemption, thus 126 * no further work is needed on the local CPU. 127 */ 128/* ARGSUSED */ 129static void 130roundrobin(arg) 131 void *arg; 132{ 133 134#ifdef SMP 135 mtx_lock_spin(&sched_lock); 136 forward_roundrobin(); 137 mtx_unlock_spin(&sched_lock); 138#endif 139 140 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 141} 142 143/* 144 * Constants for digital decay and forget: 145 * 90% of (p_estcpu) usage in 5 * loadav time 146 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 147 * Note that, as ps(1) mentions, this can let percentages 148 * total over 100% (I've seen 137.9% for 3 processes). 149 * 150 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously. 151 * 152 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 153 * That is, the system wants to compute a value of decay such 154 * that the following for loop: 155 * for (i = 0; i < (5 * loadavg); i++) 156 * p_estcpu *= decay; 157 * will compute 158 * p_estcpu *= 0.1; 159 * for all values of loadavg: 160 * 161 * Mathematically this loop can be expressed by saying: 162 * decay ** (5 * loadavg) ~= .1 163 * 164 * The system computes decay as: 165 * decay = (2 * loadavg) / (2 * loadavg + 1) 166 * 167 * We wish to prove that the system's computation of decay 168 * will always fulfill the equation: 169 * decay ** (5 * loadavg) ~= .1 170 * 171 * If we compute b as: 172 * b = 2 * loadavg 173 * then 174 * decay = b / (b + 1) 175 * 176 * We now need to prove two things: 177 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 178 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 179 * 180 * Facts: 181 * For x close to zero, exp(x) =~ 1 + x, since 182 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 183 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 184 * For x close to zero, ln(1+x) =~ x, since 185 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 186 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 187 * ln(.1) =~ -2.30 188 * 189 * Proof of (1): 190 * Solve (factor)**(power) =~ .1 given power (5*loadav): 191 * solving for factor, 192 * ln(factor) =~ (-2.30/5*loadav), or 193 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 194 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 195 * 196 * Proof of (2): 197 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 198 * solving for power, 199 * power*ln(b/(b+1)) =~ -2.30, or 200 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 201 * 202 * Actual power values for the implemented algorithm are as follows: 203 * loadav: 1 2 3 4 204 * power: 5.68 10.32 14.94 19.55 205 */ 206 207/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 208#define loadfactor(loadav) (2 * (loadav)) 209#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 210 211/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 212static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 213SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 214 215/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 216static int fscale __unused = FSCALE; 217SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 218 219/* 220 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 221 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 222 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 223 * 224 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 225 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 226 * 227 * If you don't want to bother with the faster/more-accurate formula, you 228 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 229 * (more general) method of calculating the %age of CPU used by a process. 230 */ 231#define CCPU_SHIFT 11 232 233/* 234 * Recompute process priorities, every hz ticks. 235 * MP-safe, called without the Giant mutex. 236 */ 237/* ARGSUSED */ 238static void 239schedcpu(arg) 240 void *arg; 241{ 242 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 243 register struct proc *p; 244 register int realstathz; 245 246 realstathz = stathz ? stathz : hz; 247 sx_slock(&allproc_lock); 248 LIST_FOREACH(p, &allproc, p_list) { 249 /* 250 * Increment time in/out of memory and sleep time 251 * (if sleeping). We ignore overflow; with 16-bit int's 252 * (remember them?) overflow takes 45 days. 253 */ 254 mtx_lock_spin(&sched_lock); 255 p->p_swtime++; 256 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 257 p->p_slptime++; 258 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 259 /* 260 * If the process has slept the entire second, 261 * stop recalculating its priority until it wakes up. 262 */ 263 if (p->p_slptime > 1) { 264 mtx_unlock_spin(&sched_lock); 265 continue; 266 } 267 268 /* 269 * p_pctcpu is only for ps. 270 */ 271#if (FSHIFT >= CCPU_SHIFT) 272 p->p_pctcpu += (realstathz == 100)? 273 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 274 100 * (((fixpt_t) p->p_cpticks) 275 << (FSHIFT - CCPU_SHIFT)) / realstathz; 276#else 277 p->p_pctcpu += ((FSCALE - ccpu) * 278 (p->p_cpticks * FSCALE / realstathz)) >> FSHIFT; 279#endif 280 p->p_cpticks = 0; 281 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu); 282 resetpriority(p); 283 if (p->p_pri.pri_level >= PUSER) { 284 if (p->p_oncpu == NOCPU && /* idle */ 285 p->p_stat == SRUN && 286 (p->p_sflag & PS_INMEM) && 287 (p->p_pri.pri_level / RQ_PPQ) != 288 (p->p_pri.pri_user / RQ_PPQ)) { 289 remrunqueue(p); 290 p->p_pri.pri_level = p->p_pri.pri_user; 291 setrunqueue(p); 292 } else 293 p->p_pri.pri_level = p->p_pri.pri_user; 294 } 295 mtx_unlock_spin(&sched_lock); 296 } 297 sx_sunlock(&allproc_lock); 298 vmmeter(); 299 wakeup((caddr_t)&lbolt); 300 callout_reset(&schedcpu_callout, hz, schedcpu, NULL); 301} 302 303/* 304 * Recalculate the priority of a process after it has slept for a while. 305 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 306 * least six times the loadfactor will decay p_estcpu to zero. 307 */ 308void 309updatepri(p) 310 register struct proc *p; 311{ 312 register unsigned int newcpu = p->p_estcpu; 313 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 314 315 if (p->p_slptime > 5 * loadfac) 316 p->p_estcpu = 0; 317 else { 318 p->p_slptime--; /* the first time was done in schedcpu */ 319 while (newcpu && --p->p_slptime) 320 newcpu = decay_cpu(loadfac, newcpu); 321 p->p_estcpu = newcpu; 322 } 323 resetpriority(p); 324} 325 326/* 327 * We're only looking at 7 bits of the address; everything is 328 * aligned to 4, lots of things are aligned to greater powers 329 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 330 */ 331#define TABLESIZE 128 332static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE]; 333#define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 334 335void 336sleepinit(void) 337{ 338 int i; 339 340 sched_quantum = hz/10; 341 hogticks = 2 * sched_quantum; 342 for (i = 0; i < TABLESIZE; i++) 343 TAILQ_INIT(&slpque[i]); 344} 345 346/* 347 * General sleep call. Suspends the current process until a wakeup is 348 * performed on the specified identifier. The process will then be made 349 * runnable with the specified priority. Sleeps at most timo/hz seconds 350 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 351 * before and after sleeping, else signals are not checked. Returns 0 if 352 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 353 * signal needs to be delivered, ERESTART is returned if the current system 354 * call should be restarted if possible, and EINTR is returned if the system 355 * call should be interrupted by the signal (return EINTR). 356 * 357 * The mutex argument is exited before the caller is suspended, and 358 * entered before msleep returns. If priority includes the PDROP 359 * flag the mutex is not entered before returning. 360 */ 361int 362msleep(ident, mtx, priority, wmesg, timo) 363 void *ident; 364 struct mtx *mtx; 365 int priority, timo; 366 const char *wmesg; 367{ 368 struct proc *p = curproc; 369 int sig, catch = priority & PCATCH; 370 int rval = 0; 371 WITNESS_SAVE_DECL(mtx); 372 373#ifdef KTRACE 374 if (p && KTRPOINT(p, KTR_CSW)) 375 ktrcsw(p->p_tracep, 1, 0); 376#endif 377 WITNESS_SLEEP(0, &mtx->mtx_object); 378 KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL, 379 ("sleeping without a mutex")); 380 mtx_lock_spin(&sched_lock); 381 if (cold || panicstr) { 382 /* 383 * After a panic, or during autoconfiguration, 384 * just give interrupts a chance, then just return; 385 * don't run any other procs or panic below, 386 * in case this is the idle process and already asleep. 387 */ 388 if (mtx != NULL && priority & PDROP) 389 mtx_unlock_flags(mtx, MTX_NOSWITCH); 390 mtx_unlock_spin(&sched_lock); 391 return (0); 392 } 393 394 DROP_GIANT_NOSWITCH(); 395 396 if (mtx != NULL) { 397 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); 398 WITNESS_SAVE(&mtx->mtx_object, mtx); 399 mtx_unlock_flags(mtx, MTX_NOSWITCH); 400 if (priority & PDROP) 401 mtx = NULL; 402 } 403 404 KASSERT(p != NULL, ("msleep1")); 405 KASSERT(ident != NULL && p->p_stat == SRUN, ("msleep")); 406 407 p->p_wchan = ident; 408 p->p_wmesg = wmesg; 409 p->p_slptime = 0; 410 p->p_pri.pri_level = priority & PRIMASK; 411 CTR5(KTR_PROC, "msleep: proc %p (pid %d, %s) on %s (%p)", p, p->p_pid, 412 p->p_comm, wmesg, ident); 413 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq); 414 if (timo) 415 callout_reset(&p->p_slpcallout, timo, endtsleep, p); 416 /* 417 * We put ourselves on the sleep queue and start our timeout 418 * before calling CURSIG, as we could stop there, and a wakeup 419 * or a SIGCONT (or both) could occur while we were stopped. 420 * A SIGCONT would cause us to be marked as SSLEEP 421 * without resuming us, thus we must be ready for sleep 422 * when CURSIG is called. If the wakeup happens while we're 423 * stopped, p->p_wchan will be 0 upon return from CURSIG. 424 */ 425 if (catch) { 426 CTR3(KTR_PROC, "msleep caught: proc %p (pid %d, %s)", p, 427 p->p_pid, p->p_comm); 428 p->p_sflag |= PS_SINTR; 429 mtx_unlock_spin(&sched_lock); 430 PROC_LOCK(p); 431 sig = CURSIG(p); 432 mtx_lock_spin(&sched_lock); 433 PROC_UNLOCK_NOSWITCH(p); 434 if (sig != 0) { 435 if (p->p_wchan != NULL) 436 unsleep(p); 437 } else if (p->p_wchan == NULL) 438 catch = 0; 439 } else 440 sig = 0; 441 if (p->p_wchan != NULL) { 442 p->p_stat = SSLEEP; 443 p->p_stats->p_ru.ru_nvcsw++; 444 mi_switch(); 445 } 446 CTR3(KTR_PROC, "msleep resume: proc %p (pid %d, %s)", p, p->p_pid, 447 p->p_comm); 448 KASSERT(p->p_stat == SRUN, ("running but not SRUN")); 449 p->p_sflag &= ~PS_SINTR; 450 if (p->p_sflag & PS_TIMEOUT) { 451 p->p_sflag &= ~PS_TIMEOUT; 452 if (sig == 0) 453 rval = EWOULDBLOCK; 454 } else if (timo && callout_stop(&p->p_slpcallout) == 0) { 455 /* 456 * This isn't supposed to be pretty. If we are here, then 457 * the endtsleep() callout is currently executing on another 458 * CPU and is either spinning on the sched_lock or will be 459 * soon. If we don't synchronize here, there is a chance 460 * that this process may msleep() again before the callout 461 * has a chance to run and the callout may end up waking up 462 * the wrong msleep(). Yuck. 463 */ 464 p->p_sflag |= PS_TIMEOUT; 465 p->p_stats->p_ru.ru_nivcsw++; 466 mi_switch(); 467 } 468 mtx_unlock_spin(&sched_lock); 469 470 if (rval == 0 && catch) { 471 PROC_LOCK(p); 472 /* XXX: shouldn't we always be calling CURSIG() */ 473 if (sig != 0 || (sig = CURSIG(p))) { 474 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 475 rval = EINTR; 476 else 477 rval = ERESTART; 478 } 479 PROC_UNLOCK(p); 480 } 481 PICKUP_GIANT(); 482#ifdef KTRACE 483 mtx_lock(&Giant); 484 if (KTRPOINT(p, KTR_CSW)) 485 ktrcsw(p->p_tracep, 0, 0); 486 mtx_unlock(&Giant); 487#endif 488 if (mtx != NULL) { 489 mtx_lock(mtx); 490 WITNESS_RESTORE(&mtx->mtx_object, mtx); 491 } 492 return (rval); 493} 494 495/* 496 * Implement timeout for msleep() 497 * 498 * If process hasn't been awakened (wchan non-zero), 499 * set timeout flag and undo the sleep. If proc 500 * is stopped, just unsleep so it will remain stopped. 501 * MP-safe, called without the Giant mutex. 502 */ 503static void 504endtsleep(arg) 505 void *arg; 506{ 507 register struct proc *p; 508 509 p = (struct proc *)arg; 510 CTR3(KTR_PROC, "endtsleep: proc %p (pid %d, %s)", p, p->p_pid, 511 p->p_comm); 512 mtx_lock_spin(&sched_lock); 513 /* 514 * This is the other half of the synchronization with msleep() 515 * described above. If the PS_TIMEOUT flag is set, we lost the 516 * race and just need to put the process back on the runqueue. 517 */ 518 if ((p->p_sflag & PS_TIMEOUT) != 0) { 519 p->p_sflag &= ~PS_TIMEOUT; 520 setrunqueue(p); 521 } else if (p->p_wchan != NULL) { 522 if (p->p_stat == SSLEEP) 523 setrunnable(p); 524 else 525 unsleep(p); 526 p->p_sflag |= PS_TIMEOUT; 527 } 528 mtx_unlock_spin(&sched_lock); 529} 530 531/* 532 * Remove a process from its wait queue 533 */ 534void 535unsleep(p) 536 register struct proc *p; 537{ 538 539 mtx_lock_spin(&sched_lock); 540 if (p->p_wchan != NULL) { 541 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_slpq); 542 p->p_wchan = NULL; 543 } 544 mtx_unlock_spin(&sched_lock); 545} 546 547/* 548 * Make all processes sleeping on the specified identifier runnable. 549 */ 550void 551wakeup(ident) 552 register void *ident; 553{ 554 register struct slpquehead *qp; 555 register struct proc *p; 556 557 mtx_lock_spin(&sched_lock); 558 qp = &slpque[LOOKUP(ident)]; 559restart: 560 TAILQ_FOREACH(p, qp, p_slpq) { 561 if (p->p_wchan == ident) { 562 TAILQ_REMOVE(qp, p, p_slpq); 563 p->p_wchan = NULL; 564 if (p->p_stat == SSLEEP) { 565 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 566 CTR3(KTR_PROC, "wakeup: proc %p (pid %d, %s)", 567 p, p->p_pid, p->p_comm); 568 if (p->p_slptime > 1) 569 updatepri(p); 570 p->p_slptime = 0; 571 p->p_stat = SRUN; 572 if (p->p_sflag & PS_INMEM) { 573 setrunqueue(p); 574 maybe_resched(p); 575 } else { 576 p->p_sflag |= PS_SWAPINREQ; 577 wakeup((caddr_t)&proc0); 578 } 579 /* END INLINE EXPANSION */ 580 goto restart; 581 } 582 } 583 } 584 mtx_unlock_spin(&sched_lock); 585} 586 587/* 588 * Make a process sleeping on the specified identifier runnable. 589 * May wake more than one process if a target process is currently 590 * swapped out. 591 */ 592void 593wakeup_one(ident) 594 register void *ident; 595{ 596 register struct slpquehead *qp; 597 register struct proc *p; 598 599 mtx_lock_spin(&sched_lock); 600 qp = &slpque[LOOKUP(ident)]; 601 602 TAILQ_FOREACH(p, qp, p_slpq) { 603 if (p->p_wchan == ident) { 604 TAILQ_REMOVE(qp, p, p_slpq); 605 p->p_wchan = NULL; 606 if (p->p_stat == SSLEEP) { 607 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 608 CTR3(KTR_PROC, "wakeup1: proc %p (pid %d, %s)", 609 p, p->p_pid, p->p_comm); 610 if (p->p_slptime > 1) 611 updatepri(p); 612 p->p_slptime = 0; 613 p->p_stat = SRUN; 614 if (p->p_sflag & PS_INMEM) { 615 setrunqueue(p); 616 maybe_resched(p); 617 break; 618 } else { 619 p->p_sflag |= PS_SWAPINREQ; 620 wakeup((caddr_t)&proc0); 621 } 622 /* END INLINE EXPANSION */ 623 } 624 } 625 } 626 mtx_unlock_spin(&sched_lock); 627} 628 629/* 630 * The machine independent parts of mi_switch(). 631 */ 632void 633mi_switch() 634{ 635 struct timeval new_switchtime; 636 register struct proc *p = curproc; /* XXX */ 637#if 0 638 register struct rlimit *rlim; 639#endif 640 critical_t sched_crit; 641 u_int sched_nest; 642 643 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 644 645 /* 646 * Compute the amount of time during which the current 647 * process was running, and add that to its total so far. 648 */ 649 microuptime(&new_switchtime); 650 if (timevalcmp(&new_switchtime, PCPU_PTR(switchtime), <)) { 651#if 0 652 /* XXX: This doesn't play well with sched_lock right now. */ 653 printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n", 654 PCPU_GET(switchtime.tv_sec), PCPU_GET(switchtime.tv_usec), 655 new_switchtime.tv_sec, new_switchtime.tv_usec); 656#endif 657 new_switchtime = PCPU_GET(switchtime); 658 } else { 659 p->p_runtime += (new_switchtime.tv_usec - PCPU_GET(switchtime.tv_usec)) + 660 (new_switchtime.tv_sec - PCPU_GET(switchtime.tv_sec)) * 661 (int64_t)1000000; 662 } 663 664#if 0 665 /* 666 * Check if the process exceeds its cpu resource allocation. 667 * If over max, kill it. 668 * 669 * XXX drop sched_lock, pickup Giant 670 */ 671 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY && 672 p->p_runtime > p->p_limit->p_cpulimit) { 673 rlim = &p->p_rlimit[RLIMIT_CPU]; 674 if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) { 675 mtx_unlock_spin(&sched_lock); 676 PROC_LOCK(p); 677 killproc(p, "exceeded maximum CPU limit"); 678 mtx_lock_spin(&sched_lock); 679 PROC_UNLOCK_NOSWITCH(p); 680 } else { 681 mtx_unlock_spin(&sched_lock); 682 PROC_LOCK(p); 683 psignal(p, SIGXCPU); 684 mtx_lock_spin(&sched_lock); 685 PROC_UNLOCK_NOSWITCH(p); 686 if (rlim->rlim_cur < rlim->rlim_max) { 687 /* XXX: we should make a private copy */ 688 rlim->rlim_cur += 5; 689 } 690 } 691 } 692#endif 693 694 /* 695 * Pick a new current process and record its start time. 696 */ 697 cnt.v_swtch++; 698 PCPU_SET(switchtime, new_switchtime); 699 CTR3(KTR_PROC, "mi_switch: old proc %p (pid %d, %s)", p, p->p_pid, 700 p->p_comm); 701 sched_crit = sched_lock.mtx_savecrit; 702 sched_nest = sched_lock.mtx_recurse; 703 p->p_lastcpu = p->p_oncpu; 704 p->p_oncpu = NOCPU; 705 p->p_sflag &= ~PS_NEEDRESCHED; 706 cpu_switch(); 707 p->p_oncpu = PCPU_GET(cpuid); 708 sched_lock.mtx_savecrit = sched_crit; 709 sched_lock.mtx_recurse = sched_nest; 710 sched_lock.mtx_lock = (uintptr_t)p; 711 CTR3(KTR_PROC, "mi_switch: new proc %p (pid %d, %s)", p, p->p_pid, 712 p->p_comm); 713 if (PCPU_GET(switchtime.tv_sec) == 0) 714 microuptime(PCPU_PTR(switchtime)); 715 PCPU_SET(switchticks, ticks); 716} 717 718/* 719 * Change process state to be runnable, 720 * placing it on the run queue if it is in memory, 721 * and awakening the swapper if it isn't in memory. 722 */ 723void 724setrunnable(p) 725 register struct proc *p; 726{ 727 728 mtx_lock_spin(&sched_lock); 729 switch (p->p_stat) { 730 case 0: 731 case SRUN: 732 case SZOMB: 733 case SWAIT: 734 default: 735 panic("setrunnable"); 736 case SSTOP: 737 case SSLEEP: /* e.g. when sending signals */ 738 if (p->p_sflag & PS_CVWAITQ) 739 cv_waitq_remove(p); 740 else 741 unsleep(p); 742 break; 743 744 case SIDL: 745 break; 746 } 747 p->p_stat = SRUN; 748 if (p->p_slptime > 1) 749 updatepri(p); 750 p->p_slptime = 0; 751 if ((p->p_sflag & PS_INMEM) == 0) { 752 p->p_sflag |= PS_SWAPINREQ; 753 wakeup((caddr_t)&proc0); 754 } else { 755 setrunqueue(p); 756 maybe_resched(p); 757 } 758 mtx_unlock_spin(&sched_lock); 759} 760 761/* 762 * Compute the priority of a process when running in user mode. 763 * Arrange to reschedule if the resulting priority is better 764 * than that of the current process. 765 */ 766void 767resetpriority(p) 768 register struct proc *p; 769{ 770 register unsigned int newpriority; 771 772 mtx_lock_spin(&sched_lock); 773 if (p->p_pri.pri_class == PRI_TIMESHARE) { 774 newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT + 775 NICE_WEIGHT * (p->p_nice - PRIO_MIN); 776 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 777 PRI_MAX_TIMESHARE); 778 p->p_pri.pri_user = newpriority; 779 } 780 maybe_resched(p); 781 mtx_unlock_spin(&sched_lock); 782} 783 784/* ARGSUSED */ 785static void 786sched_setup(dummy) 787 void *dummy; 788{ 789 790 callout_init(&schedcpu_callout, 1); 791 callout_init(&roundrobin_callout, 0); 792 793 /* Kick off timeout driven events by calling first time. */ 794 roundrobin(NULL); 795 schedcpu(NULL); 796} 797 798/* 799 * We adjust the priority of the current process. The priority of 800 * a process gets worse as it accumulates CPU time. The cpu usage 801 * estimator (p_estcpu) is increased here. resetpriority() will 802 * compute a different priority each time p_estcpu increases by 803 * INVERSE_ESTCPU_WEIGHT 804 * (until MAXPRI is reached). The cpu usage estimator ramps up 805 * quite quickly when the process is running (linearly), and decays 806 * away exponentially, at a rate which is proportionally slower when 807 * the system is busy. The basic principle is that the system will 808 * 90% forget that the process used a lot of CPU time in 5 * loadav 809 * seconds. This causes the system to favor processes which haven't 810 * run much recently, and to round-robin among other processes. 811 */ 812void 813schedclock(p) 814 struct proc *p; 815{ 816 817 p->p_cpticks++; 818 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1); 819 if ((p->p_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 820 resetpriority(p); 821 if (p->p_pri.pri_level >= PUSER) 822 p->p_pri.pri_level = p->p_pri.pri_user; 823 } 824} 825 826/* 827 * General purpose yield system call 828 */ 829int 830yield(struct proc *p, struct yield_args *uap) 831{ 832 833 p->p_retval[0] = 0; 834 835 mtx_lock_spin(&sched_lock); 836 DROP_GIANT_NOSWITCH(); 837 p->p_pri.pri_level = PRI_MAX_TIMESHARE; 838 setrunqueue(p); 839 p->p_stats->p_ru.ru_nvcsw++; 840 mi_switch(); 841 mtx_unlock_spin(&sched_lock); 842 PICKUP_GIANT(); 843 844 return (0); 845} 846