Deleted Added
full compact
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: head/sys/kern/kern_synch.c 68808 2000-11-16 02:16:44Z jhb $
39 * $FreeBSD: head/sys/kern/kern_synch.c 68862 2000-11-17 18:09:18Z jake $
40 */
41
42#include "opt_ktrace.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/ipl.h>
48#include <sys/kernel.h>
49#include <sys/ktr.h>
50#include <sys/mutex.h>
51#include <sys/signalvar.h>
52#include <sys/resourcevar.h>
53#include <sys/vmmeter.h>
54#include <sys/sysctl.h>
55#include <vm/vm.h>
56#include <vm/vm_extern.h>
57#ifdef KTRACE
58#include <sys/uio.h>
59#include <sys/ktrace.h>
60#endif
61
62#include <machine/cpu.h>
63#include <machine/smp.h>
64
65static void sched_setup __P((void *dummy));
66SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
67
68u_char curpriority;
69int hogticks;
70int lbolt;
71int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
72
73static int curpriority_cmp __P((struct proc *p));
74static void endtsleep __P((void *));
75static void maybe_resched __P((struct proc *chk));
76static void roundrobin __P((void *arg));
77static void schedcpu __P((void *arg));
78static void updatepri __P((struct proc *p));
79
80static int
81sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
82{
83 int error, new_val;
84
85 new_val = sched_quantum * tick;
86 error = sysctl_handle_int(oidp, &new_val, 0, req);
87 if (error != 0 || req->newptr == NULL)
88 return (error);
89 if (new_val < tick)
90 return (EINVAL);
91 sched_quantum = new_val / tick;
92 hogticks = 2 * sched_quantum;
93 return (0);
94}
95
96SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
97 0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
98
99/*-
100 * Compare priorities. Return:
101 * <0: priority of p < current priority
102 * 0: priority of p == current priority
103 * >0: priority of p > current priority
104 * The priorities are the normal priorities or the normal realtime priorities
105 * if p is on the same scheduler as curproc. Otherwise the process on the
106 * more realtimeish scheduler has lowest priority. As usual, a higher
107 * priority really means a lower priority.
108 */
109static int
110curpriority_cmp(p)
111 struct proc *p;
112{
113 int c_class, p_class;
114
115 c_class = RTP_PRIO_BASE(curproc->p_rtprio.type);
116 p_class = RTP_PRIO_BASE(p->p_rtprio.type);
117 if (p_class != c_class)
118 return (p_class - c_class);
119 if (p_class == RTP_PRIO_NORMAL)
120 return (((int)p->p_priority - (int)curpriority) / PPQ);
121 return ((int)p->p_rtprio.prio - (int)curproc->p_rtprio.prio);
122}
123
124/*
125 * Arrange to reschedule if necessary, taking the priorities and
126 * schedulers into account.
127 */
128static void
129maybe_resched(chk)
130 struct proc *chk;
131{
132 struct proc *p = curproc; /* XXX */
133
134 /*
135 * XXX idle scheduler still broken because proccess stays on idle
136 * scheduler during waits (such as when getting FS locks). If a
137 * standard process becomes runaway cpu-bound, the system can lockup
138 * due to idle-scheduler processes in wakeup never getting any cpu.
139 */
140 if (p == idleproc) {
141#if 0
142 need_resched();
143#endif
144 } else if (chk == p) {
145 /* We may need to yield if our priority has been raised. */
146 if (curpriority_cmp(chk) > 0)
147 need_resched();
148 } else if (curpriority_cmp(chk) < 0)
149 need_resched();
150}
151
152int
153roundrobin_interval(void)
154{
155 return (sched_quantum);
156}
157
158/*
159 * Force switch among equal priority processes every 100ms.
160 */
161/* ARGSUSED */
162static void
163roundrobin(arg)
164 void *arg;
165{
166#ifndef SMP
167 struct proc *p = curproc; /* XXX */
168#endif
169
170#ifdef SMP
171 need_resched();
172 forward_roundrobin();
173#else
174 if (p == idleproc || RTP_PRIO_NEED_RR(p->p_rtprio.type))
175 need_resched();
176#endif
177
178 timeout(roundrobin, NULL, sched_quantum);
179}
180
181/*
182 * Constants for digital decay and forget:
183 * 90% of (p_estcpu) usage in 5 * loadav time
184 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
185 * Note that, as ps(1) mentions, this can let percentages
186 * total over 100% (I've seen 137.9% for 3 processes).
187 *
188 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
189 *
190 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
191 * That is, the system wants to compute a value of decay such
192 * that the following for loop:
193 * for (i = 0; i < (5 * loadavg); i++)
194 * p_estcpu *= decay;
195 * will compute
196 * p_estcpu *= 0.1;
197 * for all values of loadavg:
198 *
199 * Mathematically this loop can be expressed by saying:
200 * decay ** (5 * loadavg) ~= .1
201 *
202 * The system computes decay as:
203 * decay = (2 * loadavg) / (2 * loadavg + 1)
204 *
205 * We wish to prove that the system's computation of decay
206 * will always fulfill the equation:
207 * decay ** (5 * loadavg) ~= .1
208 *
209 * If we compute b as:
210 * b = 2 * loadavg
211 * then
212 * decay = b / (b + 1)
213 *
214 * We now need to prove two things:
215 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
216 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
217 *
218 * Facts:
219 * For x close to zero, exp(x) =~ 1 + x, since
220 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
221 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
222 * For x close to zero, ln(1+x) =~ x, since
223 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
224 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
225 * ln(.1) =~ -2.30
226 *
227 * Proof of (1):
228 * Solve (factor)**(power) =~ .1 given power (5*loadav):
229 * solving for factor,
230 * ln(factor) =~ (-2.30/5*loadav), or
231 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
232 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
233 *
234 * Proof of (2):
235 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
236 * solving for power,
237 * power*ln(b/(b+1)) =~ -2.30, or
238 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
239 *
240 * Actual power values for the implemented algorithm are as follows:
241 * loadav: 1 2 3 4
242 * power: 5.68 10.32 14.94 19.55
243 */
244
245/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
246#define loadfactor(loadav) (2 * (loadav))
247#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
248
249/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
250static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
251SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
252
253/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
254static int fscale __unused = FSCALE;
255SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
256
257/*
258 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
259 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
260 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
261 *
262 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
263 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
264 *
265 * If you don't want to bother with the faster/more-accurate formula, you
266 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
267 * (more general) method of calculating the %age of CPU used by a process.
268 */
269#define CCPU_SHIFT 11
270
271/*
272 * Recompute process priorities, every hz ticks.
273 */
274/* ARGSUSED */
275static void
276schedcpu(arg)
277 void *arg;
278{
279 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
280 register struct proc *p;
281 register int realstathz, s;
282
283 realstathz = stathz ? stathz : hz;
284 LIST_FOREACH(p, &allproc, p_list) {
285 /*
286 * Increment time in/out of memory and sleep time
287 * (if sleeping). We ignore overflow; with 16-bit int's
288 * (remember them?) overflow takes 45 days.
289 if (p->p_stat == SWAIT)
290 continue;
291 */
292 mtx_enter(&sched_lock, MTX_SPIN);
293 p->p_swtime++;
294 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
295 p->p_slptime++;
296 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
297 /*
298 * If the process has slept the entire second,
299 * stop recalculating its priority until it wakes up.
300 */
301 if (p->p_slptime > 1) {
302 mtx_exit(&sched_lock, MTX_SPIN);
303 continue;
304 }
305
306 /*
307 * prevent state changes and protect run queue
308 */
309 s = splhigh();
310
311 /*
312 * p_pctcpu is only for ps.
313 */
314#if (FSHIFT >= CCPU_SHIFT)
315 p->p_pctcpu += (realstathz == 100)?
316 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
317 100 * (((fixpt_t) p->p_cpticks)
318 << (FSHIFT - CCPU_SHIFT)) / realstathz;
319#else
320 p->p_pctcpu += ((FSCALE - ccpu) *
321 (p->p_cpticks * FSCALE / realstathz)) >> FSHIFT;
322#endif
323 p->p_cpticks = 0;
324 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
325 resetpriority(p);
326 if (p->p_priority >= PUSER) {
327 if ((p != curproc) &&
328#ifdef SMP
329 p->p_oncpu == 0xff && /* idle */
330#endif
331 p->p_stat == SRUN &&
332 (p->p_flag & P_INMEM) &&
333 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
334 remrunqueue(p);
335 p->p_priority = p->p_usrpri;
336 setrunqueue(p);
337 } else
338 p->p_priority = p->p_usrpri;
339 }
340 mtx_exit(&sched_lock, MTX_SPIN);
341 splx(s);
342 }
343 vmmeter();
344 wakeup((caddr_t)&lbolt);
345 timeout(schedcpu, (void *)0, hz);
346}
347
348/*
349 * Recalculate the priority of a process after it has slept for a while.
350 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
351 * least six times the loadfactor will decay p_estcpu to zero.
352 */
353static void
354updatepri(p)
355 register struct proc *p;
356{
357 register unsigned int newcpu = p->p_estcpu;
358 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
359
360 if (p->p_slptime > 5 * loadfac)
361 p->p_estcpu = 0;
362 else {
363 p->p_slptime--; /* the first time was done in schedcpu */
364 while (newcpu && --p->p_slptime)
365 newcpu = decay_cpu(loadfac, newcpu);
366 p->p_estcpu = newcpu;
367 }
368 resetpriority(p);
369}
370
371/*
372 * We're only looking at 7 bits of the address; everything is
373 * aligned to 4, lots of things are aligned to greater powers
374 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
375 */
376#define TABLESIZE 128
377static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE];
378#define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
379
380void
381sleepinit(void)
382{
383 int i;
384
385 sched_quantum = hz/10;
386 hogticks = 2 * sched_quantum;
387 for (i = 0; i < TABLESIZE; i++)
388 TAILQ_INIT(&slpque[i]);
389}
390
391/*
392 * General sleep call. Suspends the current process until a wakeup is
393 * performed on the specified identifier. The process will then be made
394 * runnable with the specified priority. Sleeps at most timo/hz seconds
395 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
396 * before and after sleeping, else signals are not checked. Returns 0 if
397 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
398 * signal needs to be delivered, ERESTART is returned if the current system
399 * call should be restarted if possible, and EINTR is returned if the system
400 * call should be interrupted by the signal (return EINTR).
401 *
402 * The mutex argument is exited before the caller is suspended, and
403 * entered before msleep returns. If priority includes the PDROP
404 * flag the mutex is not entered before returning.
405 */
406int
407msleep(ident, mtx, priority, wmesg, timo)
408 void *ident;
409 struct mtx *mtx;
410 int priority, timo;
411 const char *wmesg;
412{
413 struct proc *p = curproc;
414 int s, sig, catch = priority & PCATCH;
415 struct callout_handle thandle;
416 int rval = 0;
417 WITNESS_SAVE_DECL(mtx);
418
419#ifdef KTRACE
420 if (p && KTRPOINT(p, KTR_CSW))
421 ktrcsw(p->p_tracep, 1, 0);
422#endif
423 DROP_GIANT_NOSWITCH();
423 WITNESS_SLEEP(0, mtx);
424 mtx_enter(&sched_lock, MTX_SPIN);
425 DROP_GIANT_NOSWITCH();
426
427 if (mtx != NULL) {
428 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
429 WITNESS_SAVE(mtx, mtx);
430 mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH);
431 if (priority & PDROP)
432 mtx = NULL;
433 }
434
435 s = splhigh();
436 if (cold || panicstr) {
437 /*
438 * After a panic, or during autoconfiguration,
439 * just give interrupts a chance, then just return;
440 * don't run any other procs or panic below,
441 * in case this is the idle process and already asleep.
442 */
443 mtx_exit(&sched_lock, MTX_SPIN);
444 splx(s);
445 return (0);
446 }
447
448 KASSERT(p != NULL, ("msleep1"));
449 KASSERT(ident != NULL && p->p_stat == SRUN, ("msleep"));
450 /*
451 * Process may be sitting on a slpque if asleep() was called, remove
452 * it before re-adding.
453 */
454 if (p->p_wchan != NULL)
455 unsleep(p);
456
457 p->p_wchan = ident;
458 p->p_wmesg = wmesg;
459 p->p_slptime = 0;
460 p->p_priority = priority & PRIMASK;
461 p->p_nativepri = p->p_priority;
462 CTR4(KTR_PROC, "msleep: proc %p (pid %d, %s), schedlock %p",
463 p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
464 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq);
464 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq);
465 if (timo)
466 thandle = timeout(endtsleep, (void *)p, timo);
467 /*
468 * We put ourselves on the sleep queue and start our timeout
469 * before calling CURSIG, as we could stop there, and a wakeup
470 * or a SIGCONT (or both) could occur while we were stopped.
471 * A SIGCONT would cause us to be marked as SSLEEP
472 * without resuming us, thus we must be ready for sleep
473 * when CURSIG is called. If the wakeup happens while we're
474 * stopped, p->p_wchan will be 0 upon return from CURSIG.
475 */
476 if (catch) {
477 CTR4(KTR_PROC,
478 "msleep caught: proc %p (pid %d, %s), schedlock %p",
479 p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
480 p->p_flag |= P_SINTR;
481 mtx_exit(&sched_lock, MTX_SPIN);
482 if ((sig = CURSIG(p))) {
483 mtx_enter(&sched_lock, MTX_SPIN);
484 if (p->p_wchan)
485 unsleep(p);
486 p->p_stat = SRUN;
487 goto resume;
488 }
489 mtx_enter(&sched_lock, MTX_SPIN);
490 if (p->p_wchan == 0) {
491 catch = 0;
492 goto resume;
493 }
494 } else
495 sig = 0;
496 p->p_stat = SSLEEP;
497 p->p_stats->p_ru.ru_nvcsw++;
498 mi_switch();
499 CTR4(KTR_PROC,
500 "msleep resume: proc %p (pid %d, %s), schedlock %p",
501 p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
502resume:
503 curpriority = p->p_usrpri;
504 splx(s);
505 p->p_flag &= ~P_SINTR;
506 if (p->p_flag & P_TIMEOUT) {
507 p->p_flag &= ~P_TIMEOUT;
508 if (sig == 0) {
509#ifdef KTRACE
510 if (KTRPOINT(p, KTR_CSW))
511 ktrcsw(p->p_tracep, 0, 0);
512#endif
513 rval = EWOULDBLOCK;
514 mtx_exit(&sched_lock, MTX_SPIN);
515 goto out;
516 }
517 } else if (timo)
518 untimeout(endtsleep, (void *)p, thandle);
519 mtx_exit(&sched_lock, MTX_SPIN);
520
521 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
522#ifdef KTRACE
523 if (KTRPOINT(p, KTR_CSW))
524 ktrcsw(p->p_tracep, 0, 0);
525#endif
526 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
527 rval = EINTR;
528 else
529 rval = ERESTART;
530 goto out;
531 }
532out:
533#ifdef KTRACE
534 if (KTRPOINT(p, KTR_CSW))
535 ktrcsw(p->p_tracep, 0, 0);
536#endif
537 PICKUP_GIANT();
538 if (mtx != NULL) {
539 mtx_enter(mtx, MTX_DEF);
540 WITNESS_RESTORE(mtx, mtx);
541 }
542 return (rval);
543}
544
545/*
546 * asleep() - async sleep call. Place process on wait queue and return
547 * immediately without blocking. The process stays runnable until mawait()
548 * is called. If ident is NULL, remove process from wait queue if it is still
549 * on one.
550 *
551 * Only the most recent sleep condition is effective when making successive
552 * calls to asleep() or when calling msleep().
553 *
554 * The timeout, if any, is not initiated until mawait() is called. The sleep
555 * priority, signal, and timeout is specified in the asleep() call but may be
556 * overriden in the mawait() call.
557 *
558 * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>>
559 */
560
561int
562asleep(void *ident, int priority, const char *wmesg, int timo)
563{
564 struct proc *p = curproc;
565 int s;
566
567 /*
568 * obtain sched_lock while manipulating sleep structures and slpque.
569 *
570 * Remove preexisting wait condition (if any) and place process
571 * on appropriate slpque, but do not put process to sleep.
572 */
573
574 s = splhigh();
575 mtx_enter(&sched_lock, MTX_SPIN);
576
577 if (p->p_wchan != NULL)
578 unsleep(p);
579
580 if (ident) {
581 p->p_wchan = ident;
582 p->p_wmesg = wmesg;
583 p->p_slptime = 0;
584 p->p_asleep.as_priority = priority;
585 p->p_asleep.as_timo = timo;
586 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq);
586 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq);
587 }
588
589 mtx_exit(&sched_lock, MTX_SPIN);
590 splx(s);
591
592 return(0);
593}
594
595/*
596 * mawait() - wait for async condition to occur. The process blocks until
597 * wakeup() is called on the most recent asleep() address. If wakeup is called
598 * prior to mawait(), mawait() winds up being a NOP.
599 *
600 * If mawait() is called more then once (without an intervening asleep() call),
601 * mawait() is still effectively a NOP but it calls mi_switch() to give other
602 * processes some cpu before returning. The process is left runnable.
603 *
604 * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>>
605 */
606
607int
608mawait(struct mtx *mtx, int priority, int timo)
609{
610 struct proc *p = curproc;
611 int rval = 0;
612 int s;
613 WITNESS_SAVE_DECL(mtx);
614
615 DROP_GIANT_NOSWITCH();
615 WITNESS_SLEEP(0, mtx);
616 mtx_enter(&sched_lock, MTX_SPIN);
617 DROP_GIANT_NOSWITCH();
618 if (mtx != NULL) {
619 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
620 WITNESS_SAVE(mtx, mtx);
621 mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH);
622 if (priority & PDROP)
623 mtx = NULL;
624 }
625
626 s = splhigh();
627
628 if (p->p_wchan != NULL) {
629 struct callout_handle thandle;
630 int sig;
631 int catch;
632
633 /*
634 * The call to mawait() can override defaults specified in
635 * the original asleep().
636 */
637 if (priority < 0)
638 priority = p->p_asleep.as_priority;
639 if (timo < 0)
640 timo = p->p_asleep.as_timo;
641
642 /*
643 * Install timeout
644 */
645
646 if (timo)
647 thandle = timeout(endtsleep, (void *)p, timo);
648
649 sig = 0;
650 catch = priority & PCATCH;
651
652 if (catch) {
653 p->p_flag |= P_SINTR;
654 mtx_exit(&sched_lock, MTX_SPIN);
655 if ((sig = CURSIG(p))) {
656 mtx_enter(&sched_lock, MTX_SPIN);
657 if (p->p_wchan)
658 unsleep(p);
659 p->p_stat = SRUN;
660 goto resume;
661 }
662 mtx_enter(&sched_lock, MTX_SPIN);
663 if (p->p_wchan == NULL) {
664 catch = 0;
665 goto resume;
666 }
667 }
668 p->p_stat = SSLEEP;
669 p->p_stats->p_ru.ru_nvcsw++;
670 mi_switch();
671resume:
672 curpriority = p->p_usrpri;
673
674 splx(s);
675 p->p_flag &= ~P_SINTR;
676 if (p->p_flag & P_TIMEOUT) {
677 p->p_flag &= ~P_TIMEOUT;
678 if (sig == 0) {
679#ifdef KTRACE
680 if (KTRPOINT(p, KTR_CSW))
681 ktrcsw(p->p_tracep, 0, 0);
682#endif
683 rval = EWOULDBLOCK;
684 mtx_exit(&sched_lock, MTX_SPIN);
685 goto out;
686 }
687 } else if (timo)
688 untimeout(endtsleep, (void *)p, thandle);
689 mtx_exit(&sched_lock, MTX_SPIN);
690
691 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
692#ifdef KTRACE
693 if (KTRPOINT(p, KTR_CSW))
694 ktrcsw(p->p_tracep, 0, 0);
695#endif
696 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
697 rval = EINTR;
698 else
699 rval = ERESTART;
700 goto out;
701 }
702#ifdef KTRACE
703 if (KTRPOINT(p, KTR_CSW))
704 ktrcsw(p->p_tracep, 0, 0);
705#endif
706 } else {
707 /*
708 * If as_priority is 0, mawait() has been called without an
709 * intervening asleep(). We are still effectively a NOP,
710 * but we call mi_switch() for safety.
711 */
712
713 if (p->p_asleep.as_priority == 0) {
714 p->p_stats->p_ru.ru_nvcsw++;
715 mi_switch();
716 }
717 mtx_exit(&sched_lock, MTX_SPIN);
718 splx(s);
719 }
720
721 /*
722 * clear p_asleep.as_priority as an indication that mawait() has been
723 * called. If mawait() is called again without an intervening asleep(),
724 * mawait() is still effectively a NOP but the above mi_switch() code
725 * is triggered as a safety.
726 */
727 p->p_asleep.as_priority = 0;
728
729out:
730 PICKUP_GIANT();
731 if (mtx != NULL) {
732 mtx_enter(mtx, MTX_DEF);
733 WITNESS_RESTORE(mtx, mtx);
734 }
735 return (rval);
736}
737
738/*
739 * Implement timeout for msleep or asleep()/mawait()
740 *
741 * If process hasn't been awakened (wchan non-zero),
742 * set timeout flag and undo the sleep. If proc
743 * is stopped, just unsleep so it will remain stopped.
744 */
745static void
746endtsleep(arg)
747 void *arg;
748{
749 register struct proc *p;
750 int s;
751
752 p = (struct proc *)arg;
753 CTR4(KTR_PROC,
754 "endtsleep: proc %p (pid %d, %s), schedlock %p",
755 p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
756 s = splhigh();
757 mtx_enter(&sched_lock, MTX_SPIN);
758 if (p->p_wchan) {
759 if (p->p_stat == SSLEEP)
760 setrunnable(p);
761 else
762 unsleep(p);
763 p->p_flag |= P_TIMEOUT;
764 }
765 mtx_exit(&sched_lock, MTX_SPIN);
766 splx(s);
767}
768
769/*
770 * Remove a process from its wait queue
771 */
772void
773unsleep(p)
774 register struct proc *p;
775{
776 int s;
777
778 s = splhigh();
779 mtx_enter(&sched_lock, MTX_SPIN);
780 if (p->p_wchan) {
781 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq);
781 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_slpq);
782 p->p_wchan = 0;
783 }
784 mtx_exit(&sched_lock, MTX_SPIN);
785 splx(s);
786}
787
788/*
789 * Make all processes sleeping on the specified identifier runnable.
790 */
791void
792wakeup(ident)
793 register void *ident;
794{
795 register struct slpquehead *qp;
796 register struct proc *p;
797 int s;
798
799 s = splhigh();
800 mtx_enter(&sched_lock, MTX_SPIN);
801 qp = &slpque[LOOKUP(ident)];
802restart:
803 TAILQ_FOREACH(p, qp, p_procq) {
803 TAILQ_FOREACH(p, qp, p_slpq) {
804 if (p->p_wchan == ident) {
805 TAILQ_REMOVE(qp, p, p_procq);
805 TAILQ_REMOVE(qp, p, p_slpq);
806 p->p_wchan = 0;
807 if (p->p_stat == SSLEEP) {
808 /* OPTIMIZED EXPANSION OF setrunnable(p); */
809 CTR4(KTR_PROC,
810 "wakeup: proc %p (pid %d, %s), schedlock %p",
811 p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
812 if (p->p_slptime > 1)
813 updatepri(p);
814 p->p_slptime = 0;
815 p->p_stat = SRUN;
816 if (p->p_flag & P_INMEM) {
817 setrunqueue(p);
818 maybe_resched(p);
819 } else {
820 p->p_flag |= P_SWAPINREQ;
821 wakeup((caddr_t)&proc0);
822 }
823 /* END INLINE EXPANSION */
824 goto restart;
825 }
826 }
827 }
828 mtx_exit(&sched_lock, MTX_SPIN);
829 splx(s);
830}
831
832/*
833 * Make a process sleeping on the specified identifier runnable.
834 * May wake more than one process if a target process is currently
835 * swapped out.
836 */
837void
838wakeup_one(ident)
839 register void *ident;
840{
841 register struct slpquehead *qp;
842 register struct proc *p;
843 int s;
844
845 s = splhigh();
846 mtx_enter(&sched_lock, MTX_SPIN);
847 qp = &slpque[LOOKUP(ident)];
848
849 TAILQ_FOREACH(p, qp, p_procq) {
849 TAILQ_FOREACH(p, qp, p_slpq) {
850 if (p->p_wchan == ident) {
851 TAILQ_REMOVE(qp, p, p_procq);
851 TAILQ_REMOVE(qp, p, p_slpq);
852 p->p_wchan = 0;
853 if (p->p_stat == SSLEEP) {
854 /* OPTIMIZED EXPANSION OF setrunnable(p); */
855 CTR4(KTR_PROC,
856 "wakeup1: proc %p (pid %d, %s), schedlock %p",
857 p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
858 if (p->p_slptime > 1)
859 updatepri(p);
860 p->p_slptime = 0;
861 p->p_stat = SRUN;
862 if (p->p_flag & P_INMEM) {
863 setrunqueue(p);
864 maybe_resched(p);
865 break;
866 } else {
867 p->p_flag |= P_SWAPINREQ;
868 wakeup((caddr_t)&proc0);
869 }
870 /* END INLINE EXPANSION */
871 }
872 }
873 }
874 mtx_exit(&sched_lock, MTX_SPIN);
875 splx(s);
876}
877
878/*
879 * The machine independent parts of mi_switch().
880 * Must be called at splstatclock() or higher.
881 */
882void
883mi_switch()
884{
885 struct timeval new_switchtime;
886 register struct proc *p = curproc; /* XXX */
887 register struct rlimit *rlim;
888 int x;
889
890 /*
891 * XXX this spl is almost unnecessary. It is partly to allow for
892 * sloppy callers that don't do it (issignal() via CURSIG() is the
893 * main offender). It is partly to work around a bug in the i386
894 * cpu_switch() (the ipl is not preserved). We ran for years
895 * without it. I think there was only a interrupt latency problem.
896 * The main caller, msleep(), does an splx() a couple of instructions
897 * after calling here. The buggy caller, issignal(), usually calls
898 * here at spl0() and sometimes returns at splhigh(). The process
899 * then runs for a little too long at splhigh(). The ipl gets fixed
900 * when the process returns to user mode (or earlier).
901 *
902 * It would probably be better to always call here at spl0(). Callers
903 * are prepared to give up control to another process, so they must
904 * be prepared to be interrupted. The clock stuff here may not
905 * actually need splstatclock().
906 */
907 x = splstatclock();
908
909 mtx_assert(&sched_lock, MA_OWNED);
910
911#ifdef SIMPLELOCK_DEBUG
912 if (p->p_simple_locks)
913 printf("sleep: holding simple lock\n");
914#endif
915 /*
916 * Compute the amount of time during which the current
917 * process was running, and add that to its total so far.
918 */
919 microuptime(&new_switchtime);
920 if (timevalcmp(&new_switchtime, &switchtime, <)) {
921 printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
922 switchtime.tv_sec, switchtime.tv_usec,
923 new_switchtime.tv_sec, new_switchtime.tv_usec);
924 new_switchtime = switchtime;
925 } else {
926 p->p_runtime += (new_switchtime.tv_usec - switchtime.tv_usec) +
927 (new_switchtime.tv_sec - switchtime.tv_sec) * (int64_t)1000000;
928 }
929
930 /*
931 * Check if the process exceeds its cpu resource allocation.
932 * If over max, kill it.
933 *
934 * XXX drop sched_lock, pickup Giant
935 */
936 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
937 p->p_runtime > p->p_limit->p_cpulimit) {
938 rlim = &p->p_rlimit[RLIMIT_CPU];
939 if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) {
940 killproc(p, "exceeded maximum CPU limit");
941 } else {
942 psignal(p, SIGXCPU);
943 if (rlim->rlim_cur < rlim->rlim_max) {
944 /* XXX: we should make a private copy */
945 rlim->rlim_cur += 5;
946 }
947 }
948 }
949
950 /*
951 * Pick a new current process and record its start time.
952 */
953 cnt.v_swtch++;
954 switchtime = new_switchtime;
955 CTR4(KTR_PROC, "mi_switch: old proc %p (pid %d, %s), schedlock %p",
956 p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
957 cpu_switch();
958 CTR4(KTR_PROC, "mi_switch: new proc %p (pid %d, %s), schedlock %p",
959 p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
960 if (switchtime.tv_sec == 0)
961 microuptime(&switchtime);
962 switchticks = ticks;
963 splx(x);
964}
965
966/*
967 * Change process state to be runnable,
968 * placing it on the run queue if it is in memory,
969 * and awakening the swapper if it isn't in memory.
970 */
971void
972setrunnable(p)
973 register struct proc *p;
974{
975 register int s;
976
977 s = splhigh();
978 mtx_enter(&sched_lock, MTX_SPIN);
979 switch (p->p_stat) {
980 case 0:
981 case SRUN:
982 case SZOMB:
983 case SWAIT:
984 default:
985 panic("setrunnable");
986 case SSTOP:
987 case SSLEEP:
988 unsleep(p); /* e.g. when sending signals */
989 break;
990
991 case SIDL:
992 break;
993 }
994 p->p_stat = SRUN;
995 if (p->p_flag & P_INMEM)
996 setrunqueue(p);
997 splx(s);
998 if (p->p_slptime > 1)
999 updatepri(p);
1000 p->p_slptime = 0;
1001 if ((p->p_flag & P_INMEM) == 0) {
1002 p->p_flag |= P_SWAPINREQ;
1003 wakeup((caddr_t)&proc0);
1004 }
1005 else
1006 maybe_resched(p);
1007 mtx_exit(&sched_lock, MTX_SPIN);
1008}
1009
1010/*
1011 * Compute the priority of a process when running in user mode.
1012 * Arrange to reschedule if the resulting priority is better
1013 * than that of the current process.
1014 */
1015void
1016resetpriority(p)
1017 register struct proc *p;
1018{
1019 register unsigned int newpriority;
1020
1021 mtx_enter(&sched_lock, MTX_SPIN);
1022 if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
1023 newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT +
1024 NICE_WEIGHT * (p->p_nice - PRIO_MIN);
1025 newpriority = min(newpriority, MAXPRI);
1026 p->p_usrpri = newpriority;
1027 }
1028 maybe_resched(p);
1029 mtx_exit(&sched_lock, MTX_SPIN);
1030}
1031
1032/* ARGSUSED */
1033static void
1034sched_setup(dummy)
1035 void *dummy;
1036{
1037 /* Kick off timeout driven events by calling first time. */
1038 roundrobin(NULL);
1039 schedcpu(NULL);
1040}
1041
1042/*
1043 * We adjust the priority of the current process. The priority of
1044 * a process gets worse as it accumulates CPU time. The cpu usage
1045 * estimator (p_estcpu) is increased here. resetpriority() will
1046 * compute a different priority each time p_estcpu increases by
1047 * INVERSE_ESTCPU_WEIGHT
1048 * (until MAXPRI is reached). The cpu usage estimator ramps up
1049 * quite quickly when the process is running (linearly), and decays
1050 * away exponentially, at a rate which is proportionally slower when
1051 * the system is busy. The basic principle is that the system will
1052 * 90% forget that the process used a lot of CPU time in 5 * loadav
1053 * seconds. This causes the system to favor processes which haven't
1054 * run much recently, and to round-robin among other processes.
1055 */
1056void
1057schedclock(p)
1058 struct proc *p;
1059{
1060
1061 p->p_cpticks++;
1062 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
1063 if ((p->p_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
1064 resetpriority(p);
1065 if (p->p_priority >= PUSER)
1066 p->p_priority = p->p_usrpri;
1067 }
1068}