kern_synch.c revision 36135
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39 * $Id: kern_synch.c,v 1.55 1998/05/17 11:52:45 phk Exp $
40 */
41
42#include "opt_ktrace.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/kernel.h>
48#include <sys/signalvar.h>
49#include <sys/resourcevar.h>
50#include <sys/vmmeter.h>
51#include <sys/sysctl.h>
52#include <vm/vm.h>
53#include <vm/vm_extern.h>
54#ifdef KTRACE
55#include <sys/uio.h>
56#include <sys/ktrace.h>
57#endif
58
59#include <machine/cpu.h>
60#ifdef SMP
61#include <machine/smp.h>
62#endif
63#include <machine/ipl.h>
64#include <machine/limits.h>	/* for UCHAR_MAX = typeof(p_priority)_MAX */
65
66static void rqinit __P((void *));
67SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL)
68
69u_char	curpriority;		/* usrpri of curproc */
70int	lbolt;			/* once a second sleep address */
71
72static void	endtsleep __P((void *));
73static void	roundrobin __P((void *arg));
74static void	schedcpu __P((void *arg));
75static void	updatepri __P((struct proc *p));
76
77#define MAXIMUM_SCHEDULE_QUANTUM	(1000000) /* arbitrary limit */
78#ifndef DEFAULT_SCHEDULE_QUANTUM
79#define DEFAULT_SCHEDULE_QUANTUM 10
80#endif
81static int quantum = DEFAULT_SCHEDULE_QUANTUM; /* default value */
82
83static int
84sysctl_kern_quantum SYSCTL_HANDLER_ARGS
85{
86	int error;
87	int new_val = quantum;
88
89	new_val = quantum;
90	error = sysctl_handle_int(oidp, &new_val, 0, req);
91	if (error == 0) {
92		if ((new_val > 0) && (new_val < MAXIMUM_SCHEDULE_QUANTUM)) {
93			quantum = new_val;
94		} else {
95			error = EINVAL;
96		}
97	}
98	return (error);
99}
100
101SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
102	0, sizeof quantum, sysctl_kern_quantum, "I", "");
103
104/* maybe_resched: Decide if you need to reschedule or not
105 * taking the priorities and schedulers into account.
106 */
107static void maybe_resched(struct proc *chk)
108{
109	struct proc *p = curproc; /* XXX */
110
111	/* If the current scheduler is the idle scheduler or
112	 * the priority of the new one is higher then reschedule.
113	 */
114	if (p == 0 ||
115	RTP_PRIO_BASE(p->p_rtprio.type) == RTP_PRIO_IDLE ||
116	(chk->p_priority < curpriority &&
117	RTP_PRIO_BASE(p->p_rtprio.type) == RTP_PRIO_BASE(chk->p_rtprio.type)) )
118		need_resched();
119}
120
121#define ROUNDROBIN_INTERVAL (hz / quantum)
122int roundrobin_interval(void)
123{
124	return ROUNDROBIN_INTERVAL;
125}
126
127/*
128 * Force switch among equal priority processes every 100ms.
129 */
130/* ARGSUSED */
131static void
132roundrobin(arg)
133	void *arg;
134{
135 	struct proc *p = curproc; /* XXX */
136
137#ifdef SMP
138	need_resched();
139	forward_roundrobin();
140#else
141 	if (p == 0 || RTP_PRIO_NEED_RR(p->p_rtprio.type))
142 		need_resched();
143#endif
144
145 	timeout(roundrobin, NULL, ROUNDROBIN_INTERVAL);
146}
147
148/*
149 * Constants for digital decay and forget:
150 *	90% of (p_estcpu) usage in 5 * loadav time
151 *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
152 *          Note that, as ps(1) mentions, this can let percentages
153 *          total over 100% (I've seen 137.9% for 3 processes).
154 *
155 * Note that statclock() updates p_estcpu and p_cpticks asynchronously.
156 *
157 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
158 * That is, the system wants to compute a value of decay such
159 * that the following for loop:
160 * 	for (i = 0; i < (5 * loadavg); i++)
161 * 		p_estcpu *= decay;
162 * will compute
163 * 	p_estcpu *= 0.1;
164 * for all values of loadavg:
165 *
166 * Mathematically this loop can be expressed by saying:
167 * 	decay ** (5 * loadavg) ~= .1
168 *
169 * The system computes decay as:
170 * 	decay = (2 * loadavg) / (2 * loadavg + 1)
171 *
172 * We wish to prove that the system's computation of decay
173 * will always fulfill the equation:
174 * 	decay ** (5 * loadavg) ~= .1
175 *
176 * If we compute b as:
177 * 	b = 2 * loadavg
178 * then
179 * 	decay = b / (b + 1)
180 *
181 * We now need to prove two things:
182 *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
183 *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
184 *
185 * Facts:
186 *         For x close to zero, exp(x) =~ 1 + x, since
187 *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
188 *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
189 *         For x close to zero, ln(1+x) =~ x, since
190 *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
191 *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
192 *         ln(.1) =~ -2.30
193 *
194 * Proof of (1):
195 *    Solve (factor)**(power) =~ .1 given power (5*loadav):
196 *	solving for factor,
197 *      ln(factor) =~ (-2.30/5*loadav), or
198 *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
199 *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
200 *
201 * Proof of (2):
202 *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
203 *	solving for power,
204 *      power*ln(b/(b+1)) =~ -2.30, or
205 *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
206 *
207 * Actual power values for the implemented algorithm are as follows:
208 *      loadav: 1       2       3       4
209 *      power:  5.68    10.32   14.94   19.55
210 */
211
212/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
213#define	loadfactor(loadav)	(2 * (loadav))
214#define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
215
216/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
217static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
218
219/*
220 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
221 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
222 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
223 *
224 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
225 *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
226 *
227 * If you don't want to bother with the faster/more-accurate formula, you
228 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
229 * (more general) method of calculating the %age of CPU used by a process.
230 */
231#define	CCPU_SHIFT	11
232
233/*
234 * Recompute process priorities, every hz ticks.
235 */
236/* ARGSUSED */
237static void
238schedcpu(arg)
239	void *arg;
240{
241	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
242	register struct proc *p;
243	register int s;
244	register unsigned int newcpu;
245
246	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
247		/*
248		 * Increment time in/out of memory and sleep time
249		 * (if sleeping).  We ignore overflow; with 16-bit int's
250		 * (remember them?) overflow takes 45 days.
251		 */
252		p->p_swtime++;
253		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
254			p->p_slptime++;
255		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
256		/*
257		 * If the process has slept the entire second,
258		 * stop recalculating its priority until it wakes up.
259		 */
260		if (p->p_slptime > 1)
261			continue;
262		s = splhigh();	/* prevent state changes and protect run queue */
263		/*
264		 * p_pctcpu is only for ps.
265		 */
266#if	(FSHIFT >= CCPU_SHIFT)
267		p->p_pctcpu += (hz == 100)?
268			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
269                	100 * (((fixpt_t) p->p_cpticks)
270				<< (FSHIFT - CCPU_SHIFT)) / hz;
271#else
272		p->p_pctcpu += ((FSCALE - ccpu) *
273			(p->p_cpticks * FSCALE / hz)) >> FSHIFT;
274#endif
275		p->p_cpticks = 0;
276		newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice;
277		p->p_estcpu = min(newcpu, UCHAR_MAX);
278		resetpriority(p);
279		if (p->p_priority >= PUSER) {
280#define	PPQ	(128 / NQS)		/* priorities per queue */
281			if ((p != curproc) &&
282#ifdef SMP
283			    (u_char)p->p_oncpu == 0xff && 	/* idle */
284#endif
285			    p->p_stat == SRUN &&
286			    (p->p_flag & P_INMEM) &&
287			    (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
288				remrq(p);
289				p->p_priority = p->p_usrpri;
290				setrunqueue(p);
291			} else
292				p->p_priority = p->p_usrpri;
293		}
294		splx(s);
295	}
296	vmmeter();
297	wakeup((caddr_t)&lbolt);
298	timeout(schedcpu, (void *)0, hz);
299}
300
301/*
302 * Recalculate the priority of a process after it has slept for a while.
303 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
304 * least six times the loadfactor will decay p_estcpu to zero.
305 */
306static void
307updatepri(p)
308	register struct proc *p;
309{
310	register unsigned int newcpu = p->p_estcpu;
311	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
312
313	if (p->p_slptime > 5 * loadfac)
314		p->p_estcpu = 0;
315	else {
316		p->p_slptime--;	/* the first time was done in schedcpu */
317		while (newcpu && --p->p_slptime)
318			newcpu = (int) decay_cpu(loadfac, newcpu);
319		p->p_estcpu = min(newcpu, UCHAR_MAX);
320	}
321	resetpriority(p);
322}
323
324/*
325 * We're only looking at 7 bits of the address; everything is
326 * aligned to 4, lots of things are aligned to greater powers
327 * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
328 */
329#define TABLESIZE	128
330static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE];
331#define LOOKUP(x)	(((long)(x) >> 8) & (TABLESIZE - 1))
332
333/*
334 * During autoconfiguration or after a panic, a sleep will simply
335 * lower the priority briefly to allow interrupts, then return.
336 * The priority to be used (safepri) is machine-dependent, thus this
337 * value is initialized and maintained in the machine-dependent layers.
338 * This priority will typically be 0, or the lowest priority
339 * that is safe for use on the interrupt stack; it can be made
340 * higher to block network software interrupts after panics.
341 */
342int safepri;
343
344void
345sleepinit()
346{
347	int i;
348
349	for (i = 0; i < TABLESIZE; i++)
350		TAILQ_INIT(&slpque[i]);
351}
352
353/*
354 * General sleep call.  Suspends the current process until a wakeup is
355 * performed on the specified identifier.  The process will then be made
356 * runnable with the specified priority.  Sleeps at most timo/hz seconds
357 * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
358 * before and after sleeping, else signals are not checked.  Returns 0 if
359 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
360 * signal needs to be delivered, ERESTART is returned if the current system
361 * call should be restarted if possible, and EINTR is returned if the system
362 * call should be interrupted by the signal (return EINTR).
363 */
364int
365tsleep(ident, priority, wmesg, timo)
366	void *ident;
367	int priority, timo;
368	const char *wmesg;
369{
370	struct proc *p = curproc;
371	int s, sig, catch = priority & PCATCH;
372	struct callout_handle thandle;
373
374#ifdef KTRACE
375	if (KTRPOINT(p, KTR_CSW))
376		ktrcsw(p->p_tracep, 1, 0);
377#endif
378	s = splhigh();
379	if (cold || panicstr) {
380		/*
381		 * After a panic, or during autoconfiguration,
382		 * just give interrupts a chance, then just return;
383		 * don't run any other procs or panic below,
384		 * in case this is the idle process and already asleep.
385		 */
386		splx(safepri);
387		splx(s);
388		return (0);
389	}
390#ifdef DIAGNOSTIC
391	if(p == NULL)
392		panic("tsleep1");
393	if (ident == NULL || p->p_stat != SRUN)
394		panic("tsleep");
395	/* XXX This is not exhaustive, just the most common case */
396	if ((p->p_procq.tqe_prev != NULL) && (*p->p_procq.tqe_prev == p))
397		panic("sleeping process already on another queue");
398#endif
399	p->p_wchan = ident;
400	p->p_wmesg = wmesg;
401	p->p_slptime = 0;
402	p->p_priority = priority & PRIMASK;
403	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq);
404	if (timo)
405		thandle = timeout(endtsleep, (void *)p, timo);
406	/*
407	 * We put ourselves on the sleep queue and start our timeout
408	 * before calling CURSIG, as we could stop there, and a wakeup
409	 * or a SIGCONT (or both) could occur while we were stopped.
410	 * A SIGCONT would cause us to be marked as SSLEEP
411	 * without resuming us, thus we must be ready for sleep
412	 * when CURSIG is called.  If the wakeup happens while we're
413	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
414	 */
415	if (catch) {
416		p->p_flag |= P_SINTR;
417		if ((sig = CURSIG(p))) {
418			if (p->p_wchan)
419				unsleep(p);
420			p->p_stat = SRUN;
421			goto resume;
422		}
423		if (p->p_wchan == 0) {
424			catch = 0;
425			goto resume;
426		}
427	} else
428		sig = 0;
429	p->p_stat = SSLEEP;
430	p->p_stats->p_ru.ru_nvcsw++;
431	mi_switch();
432resume:
433	curpriority = p->p_usrpri;
434	splx(s);
435	p->p_flag &= ~P_SINTR;
436	if (p->p_flag & P_TIMEOUT) {
437		p->p_flag &= ~P_TIMEOUT;
438		if (sig == 0) {
439#ifdef KTRACE
440			if (KTRPOINT(p, KTR_CSW))
441				ktrcsw(p->p_tracep, 0, 0);
442#endif
443			return (EWOULDBLOCK);
444		}
445	} else if (timo)
446		untimeout(endtsleep, (void *)p, thandle);
447	if (catch && (sig != 0 || (sig = CURSIG(p)))) {
448#ifdef KTRACE
449		if (KTRPOINT(p, KTR_CSW))
450			ktrcsw(p->p_tracep, 0, 0);
451#endif
452		if (p->p_sigacts->ps_sigintr & sigmask(sig))
453			return (EINTR);
454		return (ERESTART);
455	}
456#ifdef KTRACE
457	if (KTRPOINT(p, KTR_CSW))
458		ktrcsw(p->p_tracep, 0, 0);
459#endif
460	return (0);
461}
462
463/*
464 * Implement timeout for tsleep.
465 * If process hasn't been awakened (wchan non-zero),
466 * set timeout flag and undo the sleep.  If proc
467 * is stopped, just unsleep so it will remain stopped.
468 */
469static void
470endtsleep(arg)
471	void *arg;
472{
473	register struct proc *p;
474	int s;
475
476	p = (struct proc *)arg;
477	s = splhigh();
478	if (p->p_wchan) {
479		if (p->p_stat == SSLEEP)
480			setrunnable(p);
481		else
482			unsleep(p);
483		p->p_flag |= P_TIMEOUT;
484	}
485	splx(s);
486}
487
488/*
489 * Remove a process from its wait queue
490 */
491void
492unsleep(p)
493	register struct proc *p;
494{
495	int s;
496
497	s = splhigh();
498	if (p->p_wchan) {
499		TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq);
500		p->p_wchan = 0;
501	}
502	splx(s);
503}
504
505/*
506 * Make all processes sleeping on the specified identifier runnable.
507 */
508void
509wakeup(ident)
510	register void *ident;
511{
512	register struct slpquehead *qp;
513	register struct proc *p;
514	int s;
515
516	s = splhigh();
517	qp = &slpque[LOOKUP(ident)];
518restart:
519	for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) {
520#ifdef DIAGNOSTIC
521		if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
522			panic("wakeup");
523#endif
524		if (p->p_wchan == ident) {
525			TAILQ_REMOVE(qp, p, p_procq);
526			p->p_wchan = 0;
527			if (p->p_stat == SSLEEP) {
528				/* OPTIMIZED EXPANSION OF setrunnable(p); */
529				if (p->p_slptime > 1)
530					updatepri(p);
531				p->p_slptime = 0;
532				p->p_stat = SRUN;
533				if (p->p_flag & P_INMEM) {
534					setrunqueue(p);
535					maybe_resched(p);
536				} else {
537					p->p_flag |= P_SWAPINREQ;
538					wakeup((caddr_t)&proc0);
539				}
540				/* END INLINE EXPANSION */
541				goto restart;
542			}
543		}
544	}
545	splx(s);
546}
547
548/*
549 * Make a process sleeping on the specified identifier runnable.
550 * May wake more than one process if a target prcoess is currently
551 * swapped out.
552 */
553void
554wakeup_one(ident)
555	register void *ident;
556{
557	register struct slpquehead *qp;
558	register struct proc *p;
559	int s;
560
561	s = splhigh();
562	qp = &slpque[LOOKUP(ident)];
563
564	for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) {
565#ifdef DIAGNOSTIC
566		if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
567			panic("wakeup_one");
568#endif
569		if (p->p_wchan == ident) {
570			TAILQ_REMOVE(qp, p, p_procq);
571			p->p_wchan = 0;
572			if (p->p_stat == SSLEEP) {
573				/* OPTIMIZED EXPANSION OF setrunnable(p); */
574				if (p->p_slptime > 1)
575					updatepri(p);
576				p->p_slptime = 0;
577				p->p_stat = SRUN;
578				if (p->p_flag & P_INMEM) {
579					setrunqueue(p);
580					maybe_resched(p);
581					break;
582				} else {
583					p->p_flag |= P_SWAPINREQ;
584					wakeup((caddr_t)&proc0);
585				}
586				/* END INLINE EXPANSION */
587			}
588		}
589	}
590	splx(s);
591}
592
593/*
594 * The machine independent parts of mi_switch().
595 * Must be called at splstatclock() or higher.
596 */
597void
598mi_switch()
599{
600	register struct proc *p = curproc;	/* XXX */
601	register struct rlimit *rlim;
602	register long s, u;
603	int x;
604	struct timeval tv;
605
606	/*
607	 * XXX this spl is almost unnecessary.  It is partly to allow for
608	 * sloppy callers that don't do it (issignal() via CURSIG() is the
609	 * main offender).  It is partly to work around a bug in the i386
610	 * cpu_switch() (the ipl is not preserved).  We ran for years
611	 * without it.  I think there was only a interrupt latency problem.
612	 * The main caller, tsleep(), does an splx() a couple of instructions
613	 * after calling here.  The buggy caller, issignal(), usually calls
614	 * here at spl0() and sometimes returns at splhigh().  The process
615	 * then runs for a little too long at splhigh().  The ipl gets fixed
616	 * when the process returns to user mode (or earlier).
617	 *
618	 * It would probably be better to always call here at spl0(). Callers
619	 * are prepared to give up control to another process, so they must
620	 * be prepared to be interrupted.  The clock stuff here may not
621	 * actually need splstatclock().
622	 */
623	x = splstatclock();
624
625#ifdef SIMPLELOCK_DEBUG
626	if (p->p_simple_locks)
627		printf("sleep: holding simple lock\n");
628#endif
629	/*
630	 * Compute the amount of time during which the current
631	 * process was running, and add that to its total so far.
632	 */
633	microuptime(&tv);
634	u = p->p_rtime.tv_usec + (tv.tv_usec - p->p_runtime.tv_usec);
635	s = p->p_rtime.tv_sec + (tv.tv_sec - p->p_runtime.tv_sec);
636	if (u < 0) {
637		u += 1000000;
638		s--;
639	} else if (u >= 1000000) {
640		u -= 1000000;
641		s++;
642	}
643#ifdef SMP
644	if (s < 0)
645		s = u = 0;
646#endif
647	p->p_rtime.tv_usec = u;
648	p->p_rtime.tv_sec = s;
649
650	/*
651	 * Check if the process exceeds its cpu resource allocation.
652	 * If over max, kill it.
653	 */
654	if (p->p_stat != SZOMB) {
655		rlim = &p->p_rlimit[RLIMIT_CPU];
656		if (s >= rlim->rlim_cur) {
657			if (s >= rlim->rlim_max)
658				killproc(p, "exceeded maximum CPU limit");
659			else {
660				psignal(p, SIGXCPU);
661				if (rlim->rlim_cur < rlim->rlim_max)
662					rlim->rlim_cur += 5;
663			}
664		}
665	}
666
667	/*
668	 * Pick a new current process and record its start time.
669	 */
670	cnt.v_swtch++;
671	cpu_switch(p);
672	microuptime(&p->p_runtime);
673	splx(x);
674}
675
676/*
677 * Initialize the (doubly-linked) run queues
678 * to be empty.
679 */
680/* ARGSUSED*/
681static void
682rqinit(dummy)
683	void *dummy;
684{
685	register int i;
686
687	for (i = 0; i < NQS; i++) {
688		qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
689		rtqs[i].ph_link = rtqs[i].ph_rlink = (struct proc *)&rtqs[i];
690		idqs[i].ph_link = idqs[i].ph_rlink = (struct proc *)&idqs[i];
691	}
692}
693
694/*
695 * Change process state to be runnable,
696 * placing it on the run queue if it is in memory,
697 * and awakening the swapper if it isn't in memory.
698 */
699void
700setrunnable(p)
701	register struct proc *p;
702{
703	register int s;
704
705	s = splhigh();
706	switch (p->p_stat) {
707	case 0:
708	case SRUN:
709	case SZOMB:
710	default:
711		panic("setrunnable");
712	case SSTOP:
713	case SSLEEP:
714		unsleep(p);		/* e.g. when sending signals */
715		break;
716
717	case SIDL:
718		break;
719	}
720	p->p_stat = SRUN;
721	if (p->p_flag & P_INMEM)
722		setrunqueue(p);
723	splx(s);
724	if (p->p_slptime > 1)
725		updatepri(p);
726	p->p_slptime = 0;
727	if ((p->p_flag & P_INMEM) == 0) {
728		p->p_flag |= P_SWAPINREQ;
729		wakeup((caddr_t)&proc0);
730	}
731	else
732		maybe_resched(p);
733}
734
735/*
736 * Compute the priority of a process when running in user mode.
737 * Arrange to reschedule if the resulting priority is better
738 * than that of the current process.
739 */
740void
741resetpriority(p)
742	register struct proc *p;
743{
744	register unsigned int newpriority;
745
746	if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
747		newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice;
748		newpriority = min(newpriority, MAXPRI);
749		p->p_usrpri = newpriority;
750	}
751	maybe_resched(p);
752}
753
754/* ARGSUSED */
755static void sched_setup __P((void *dummy));
756static void
757sched_setup(dummy)
758	void *dummy;
759{
760	/* Kick off timeout driven events by calling first time. */
761	roundrobin(NULL);
762	schedcpu(NULL);
763}
764SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
765
766