kern_synch.c revision 31352
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39 * $Id: kern_synch.c,v 1.40 1997/11/21 11:36:56 bde Exp $
40 */
41
42#include "opt_ktrace.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/kernel.h>
48#include <sys/signalvar.h>
49#include <sys/resourcevar.h>
50#include <sys/vmmeter.h>
51#include <sys/sysctl.h>
52#include <vm/vm.h>
53#include <vm/vm_extern.h>
54#ifdef KTRACE
55#include <sys/ktrace.h>
56#endif
57
58#include <machine/cpu.h>
59#include <machine/limits.h>	/* for UCHAR_MAX = typeof(p_priority)_MAX */
60
61static void rqinit __P((void *));
62SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL)
63
64u_char	curpriority;		/* usrpri of curproc */
65int	lbolt;			/* once a second sleep address */
66
67static void	endtsleep __P((void *));
68static void	updatepri __P((struct proc *p));
69
70#define MAXIMUM_SCHEDULE_QUANTUM	(1000000) /* arbitrary limit */
71#ifndef DEFAULT_SCHEDULE_QUANTUM
72#define DEFAULT_SCHEDULE_QUANTUM 10
73#endif
74static int quantum = DEFAULT_SCHEDULE_QUANTUM; /* default value */
75
76static int
77sysctl_kern_quantum SYSCTL_HANDLER_ARGS
78{
79	int error;
80	int new_val = quantum;
81
82	new_val = quantum;
83	error = sysctl_handle_int(oidp, &new_val, 0, req);
84	if (error == 0) {
85		if ((new_val > 0) && (new_val < MAXIMUM_SCHEDULE_QUANTUM)) {
86			quantum = new_val;
87		} else {
88			error = EINVAL;
89		}
90	}
91	return (error);
92}
93
94SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
95	0, sizeof quantum, sysctl_kern_quantum, "I", "");
96
97/*
98 * Force switch among equal priority processes every 100ms.
99 */
100/* ARGSUSED */
101void
102roundrobin(arg)
103	void *arg;
104{
105
106	need_resched();
107	timeout(roundrobin, NULL, hz / quantum);
108}
109
110/*
111 * Constants for digital decay and forget:
112 *	90% of (p_estcpu) usage in 5 * loadav time
113 *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
114 *          Note that, as ps(1) mentions, this can let percentages
115 *          total over 100% (I've seen 137.9% for 3 processes).
116 *
117 * Note that statclock updates p_estcpu and p_cpticks independently.
118 *
119 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
120 * That is, the system wants to compute a value of decay such
121 * that the following for loop:
122 * 	for (i = 0; i < (5 * loadavg); i++)
123 * 		p_estcpu *= decay;
124 * will compute
125 * 	p_estcpu *= 0.1;
126 * for all values of loadavg:
127 *
128 * Mathematically this loop can be expressed by saying:
129 * 	decay ** (5 * loadavg) ~= .1
130 *
131 * The system computes decay as:
132 * 	decay = (2 * loadavg) / (2 * loadavg + 1)
133 *
134 * We wish to prove that the system's computation of decay
135 * will always fulfill the equation:
136 * 	decay ** (5 * loadavg) ~= .1
137 *
138 * If we compute b as:
139 * 	b = 2 * loadavg
140 * then
141 * 	decay = b / (b + 1)
142 *
143 * We now need to prove two things:
144 *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
145 *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
146 *
147 * Facts:
148 *         For x close to zero, exp(x) =~ 1 + x, since
149 *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
150 *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
151 *         For x close to zero, ln(1+x) =~ x, since
152 *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
153 *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
154 *         ln(.1) =~ -2.30
155 *
156 * Proof of (1):
157 *    Solve (factor)**(power) =~ .1 given power (5*loadav):
158 *	solving for factor,
159 *      ln(factor) =~ (-2.30/5*loadav), or
160 *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
161 *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
162 *
163 * Proof of (2):
164 *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
165 *	solving for power,
166 *      power*ln(b/(b+1)) =~ -2.30, or
167 *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
168 *
169 * Actual power values for the implemented algorithm are as follows:
170 *      loadav: 1       2       3       4
171 *      power:  5.68    10.32   14.94   19.55
172 */
173
174/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
175#define	loadfactor(loadav)	(2 * (loadav))
176#define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
177
178/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
179static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
180
181/*
182 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
183 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
184 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
185 *
186 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
187 *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
188 *
189 * If you dont want to bother with the faster/more-accurate formula, you
190 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
191 * (more general) method of calculating the %age of CPU used by a process.
192 */
193#define	CCPU_SHIFT	11
194
195/*
196 * Recompute process priorities, every hz ticks.
197 */
198/* ARGSUSED */
199void
200schedcpu(arg)
201	void *arg;
202{
203	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
204	register struct proc *p;
205	register int s;
206	register unsigned int newcpu;
207
208	wakeup((caddr_t)&lbolt);
209	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
210		/*
211		 * Increment time in/out of memory and sleep time
212		 * (if sleeping).  We ignore overflow; with 16-bit int's
213		 * (remember them?) overflow takes 45 days.
214		 */
215		p->p_swtime++;
216		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
217			p->p_slptime++;
218		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
219		/*
220		 * If the process has slept the entire second,
221		 * stop recalculating its priority until it wakes up.
222		 */
223		if (p->p_slptime > 1)
224			continue;
225		s = splhigh();	/* prevent state changes and protect run queue */
226		/*
227		 * p_pctcpu is only for ps.
228		 */
229#if	(FSHIFT >= CCPU_SHIFT)
230		p->p_pctcpu += (hz == 100)?
231			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
232                	100 * (((fixpt_t) p->p_cpticks)
233				<< (FSHIFT - CCPU_SHIFT)) / hz;
234#else
235		p->p_pctcpu += ((FSCALE - ccpu) *
236			(p->p_cpticks * FSCALE / hz)) >> FSHIFT;
237#endif
238		p->p_cpticks = 0;
239		newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice;
240		p->p_estcpu = min(newcpu, UCHAR_MAX);
241		resetpriority(p);
242		if (p->p_priority >= PUSER) {
243#define	PPQ	(128 / NQS)		/* priorities per queue */
244			if ((p != curproc) &&
245#ifdef SMP
246			    (u_char)p->p_oncpu == 0xff && 	/* idle */
247#endif
248			    p->p_stat == SRUN &&
249			    (p->p_flag & P_INMEM) &&
250			    (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
251				remrq(p);
252				p->p_priority = p->p_usrpri;
253				setrunqueue(p);
254			} else
255				p->p_priority = p->p_usrpri;
256		}
257		splx(s);
258	}
259	vmmeter();
260	timeout(schedcpu, (void *)0, hz);
261}
262
263/*
264 * Recalculate the priority of a process after it has slept for a while.
265 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
266 * least six times the loadfactor will decay p_estcpu to zero.
267 */
268static void
269updatepri(p)
270	register struct proc *p;
271{
272	register unsigned int newcpu = p->p_estcpu;
273	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
274
275	if (p->p_slptime > 5 * loadfac)
276		p->p_estcpu = 0;
277	else {
278		p->p_slptime--;	/* the first time was done in schedcpu */
279		while (newcpu && --p->p_slptime)
280			newcpu = (int) decay_cpu(loadfac, newcpu);
281		p->p_estcpu = min(newcpu, UCHAR_MAX);
282	}
283	resetpriority(p);
284}
285
286/*
287 * We're only looking at 7 bits of the address; everything is
288 * aligned to 4, lots of things are aligned to greater powers
289 * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
290 */
291#define TABLESIZE	128
292static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE];
293#define LOOKUP(x)	(((long)(x) >> 8) & (TABLESIZE - 1))
294
295/*
296 * During autoconfiguration or after a panic, a sleep will simply
297 * lower the priority briefly to allow interrupts, then return.
298 * The priority to be used (safepri) is machine-dependent, thus this
299 * value is initialized and maintained in the machine-dependent layers.
300 * This priority will typically be 0, or the lowest priority
301 * that is safe for use on the interrupt stack; it can be made
302 * higher to block network software interrupts after panics.
303 */
304int safepri;
305
306void
307sleepinit()
308{
309	int i;
310
311	for (i = 0; i < TABLESIZE; i++)
312		TAILQ_INIT(&slpque[i]);
313}
314
315/*
316 * General sleep call.  Suspends the current process until a wakeup is
317 * performed on the specified identifier.  The process will then be made
318 * runnable with the specified priority.  Sleeps at most timo/hz seconds
319 * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
320 * before and after sleeping, else signals are not checked.  Returns 0 if
321 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
322 * signal needs to be delivered, ERESTART is returned if the current system
323 * call should be restarted if possible, and EINTR is returned if the system
324 * call should be interrupted by the signal (return EINTR).
325 */
326int
327tsleep(ident, priority, wmesg, timo)
328	void *ident;
329	int priority, timo;
330	const char *wmesg;
331{
332	struct proc *p = curproc;
333	int s, sig, catch = priority & PCATCH;
334	struct callout_handle thandle;
335
336#ifdef KTRACE
337	if (KTRPOINT(p, KTR_CSW))
338		ktrcsw(p->p_tracep, 1, 0);
339#endif
340	s = splhigh();
341	if (cold || panicstr) {
342		/*
343		 * After a panic, or during autoconfiguration,
344		 * just give interrupts a chance, then just return;
345		 * don't run any other procs or panic below,
346		 * in case this is the idle process and already asleep.
347		 */
348		splx(safepri);
349		splx(s);
350		return (0);
351	}
352#ifdef DIAGNOSTIC
353	if(p == NULL)
354		panic("tsleep1");
355	if (ident == NULL || p->p_stat != SRUN)
356		panic("tsleep");
357	/* XXX This is not exhaustive, just the most common case */
358	if ((p->p_procq.tqe_prev != NULL) && (*p->p_procq.tqe_prev == p))
359		panic("sleeping process already on another queue");
360#endif
361	p->p_wchan = ident;
362	p->p_wmesg = wmesg;
363	p->p_slptime = 0;
364	p->p_priority = priority & PRIMASK;
365	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq);
366	if (timo)
367		thandle = timeout(endtsleep, (void *)p, timo);
368	/*
369	 * We put ourselves on the sleep queue and start our timeout
370	 * before calling CURSIG, as we could stop there, and a wakeup
371	 * or a SIGCONT (or both) could occur while we were stopped.
372	 * A SIGCONT would cause us to be marked as SSLEEP
373	 * without resuming us, thus we must be ready for sleep
374	 * when CURSIG is called.  If the wakeup happens while we're
375	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
376	 */
377	if (catch) {
378		p->p_flag |= P_SINTR;
379		if ((sig = CURSIG(p))) {
380			if (p->p_wchan)
381				unsleep(p);
382			p->p_stat = SRUN;
383			goto resume;
384		}
385		if (p->p_wchan == 0) {
386			catch = 0;
387			goto resume;
388		}
389	} else
390		sig = 0;
391	p->p_stat = SSLEEP;
392	p->p_stats->p_ru.ru_nvcsw++;
393	mi_switch();
394resume:
395	curpriority = p->p_usrpri;
396	splx(s);
397	p->p_flag &= ~P_SINTR;
398	if (p->p_flag & P_TIMEOUT) {
399		p->p_flag &= ~P_TIMEOUT;
400		if (sig == 0) {
401#ifdef KTRACE
402			if (KTRPOINT(p, KTR_CSW))
403				ktrcsw(p->p_tracep, 0, 0);
404#endif
405			return (EWOULDBLOCK);
406		}
407	} else if (timo)
408		untimeout(endtsleep, (void *)p, thandle);
409	if (catch && (sig != 0 || (sig = CURSIG(p)))) {
410#ifdef KTRACE
411		if (KTRPOINT(p, KTR_CSW))
412			ktrcsw(p->p_tracep, 0, 0);
413#endif
414		if (p->p_sigacts->ps_sigintr & sigmask(sig))
415			return (EINTR);
416		return (ERESTART);
417	}
418#ifdef KTRACE
419	if (KTRPOINT(p, KTR_CSW))
420		ktrcsw(p->p_tracep, 0, 0);
421#endif
422	return (0);
423}
424
425/*
426 * Implement timeout for tsleep.
427 * If process hasn't been awakened (wchan non-zero),
428 * set timeout flag and undo the sleep.  If proc
429 * is stopped, just unsleep so it will remain stopped.
430 */
431static void
432endtsleep(arg)
433	void *arg;
434{
435	register struct proc *p;
436	int s;
437
438	p = (struct proc *)arg;
439	s = splhigh();
440	if (p->p_wchan) {
441		if (p->p_stat == SSLEEP)
442			setrunnable(p);
443		else
444			unsleep(p);
445		p->p_flag |= P_TIMEOUT;
446	}
447	splx(s);
448}
449
450/*
451 * Remove a process from its wait queue
452 */
453void
454unsleep(p)
455	register struct proc *p;
456{
457	int s;
458
459	s = splhigh();
460	if (p->p_wchan) {
461		TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq);
462		p->p_wchan = 0;
463	}
464	splx(s);
465}
466
467/*
468 * Make all processes sleeping on the specified identifier runnable.
469 */
470void
471wakeup(ident)
472	register void *ident;
473{
474	register struct slpquehead *qp;
475	register struct proc *p;
476	int s;
477
478	s = splhigh();
479	qp = &slpque[LOOKUP(ident)];
480restart:
481	for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) {
482#ifdef DIAGNOSTIC
483		if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
484			panic("wakeup");
485#endif
486		if (p->p_wchan == ident) {
487			TAILQ_REMOVE(qp, p, p_procq);
488			p->p_wchan = 0;
489			if (p->p_stat == SSLEEP) {
490				/* OPTIMIZED EXPANSION OF setrunnable(p); */
491				if (p->p_slptime > 1)
492					updatepri(p);
493				p->p_slptime = 0;
494				p->p_stat = SRUN;
495				if (p->p_flag & P_INMEM) {
496					setrunqueue(p);
497					need_resched();
498				} else {
499					p->p_flag |= P_SWAPINREQ;
500					wakeup((caddr_t)&proc0);
501				}
502				/* END INLINE EXPANSION */
503				goto restart;
504			}
505		}
506	}
507	splx(s);
508}
509
510/*
511 * Make a process sleeping on the specified identifier runnable.
512 * May wake more than one process if a target prcoess is currently
513 * swapped out.
514 */
515void
516wakeup_one(ident)
517	register void *ident;
518{
519	register struct slpquehead *qp;
520	register struct proc *p;
521	int s;
522
523	s = splhigh();
524	qp = &slpque[LOOKUP(ident)];
525
526	for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) {
527#ifdef DIAGNOSTIC
528		if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
529			panic("wakeup_one");
530#endif
531		if (p->p_wchan == ident) {
532			TAILQ_REMOVE(qp, p, p_procq);
533			p->p_wchan = 0;
534			if (p->p_stat == SSLEEP) {
535				/* OPTIMIZED EXPANSION OF setrunnable(p); */
536				if (p->p_slptime > 1)
537					updatepri(p);
538				p->p_slptime = 0;
539				p->p_stat = SRUN;
540				if (p->p_flag & P_INMEM) {
541					setrunqueue(p);
542					need_resched();
543					break;
544				} else {
545					p->p_flag |= P_SWAPINREQ;
546					wakeup((caddr_t)&proc0);
547				}
548				/* END INLINE EXPANSION */
549			}
550		}
551	}
552	splx(s);
553}
554
555/*
556 * The machine independent parts of mi_switch().
557 * Must be called at splstatclock() or higher.
558 */
559void
560mi_switch()
561{
562	register struct proc *p = curproc;	/* XXX */
563	register struct rlimit *rlim;
564	register long s, u;
565	int x;
566	struct timeval tv;
567
568	/*
569	 * XXX this spl is almost unnecessary.  It is partly to allow for
570	 * sloppy callers that don't do it (issignal() via CURSIG() is the
571	 * main offender).  It is partly to work around a bug in the i386
572	 * cpu_switch() (the ipl is not preserved).  We ran for years
573	 * without it.  I think there was only a interrupt latency problem.
574	 * The main caller, tsleep(), does an splx() a couple of instructions
575	 * after calling here.  The buggy caller, issignal(), usually calls
576	 * here at spl0() and sometimes returns at splhigh().  The process
577	 * then runs for a little too long at splhigh().  The ipl gets fixed
578	 * when the process returns to user mode (or earlier).
579	 *
580	 * It would probably be better to always call here at spl0(). Callers
581	 * are prepared to give up control to another process, so they must
582	 * be prepared to be interrupted.  The clock stuff here may not
583	 * actually need splstatclock().
584	 */
585	x = splstatclock();
586
587#ifdef SIMPLELOCK_DEBUG
588	if (p->p_simple_locks)
589		printf("sleep: holding simple lock");
590#endif
591	/*
592	 * Compute the amount of time during which the current
593	 * process was running, and add that to its total so far.
594	 */
595	microtime(&tv);
596	u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec);
597	s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec);
598	if (u < 0) {
599		u += 1000000;
600		s--;
601	} else if (u >= 1000000) {
602		u -= 1000000;
603		s++;
604	}
605#ifdef SMP
606	if (s < 0)
607		s = u = 0;
608#endif
609	p->p_rtime.tv_usec = u;
610	p->p_rtime.tv_sec = s;
611
612	/*
613	 * Check if the process exceeds its cpu resource allocation.
614	 * If over max, kill it.
615	 */
616	if (p->p_stat != SZOMB) {
617		rlim = &p->p_rlimit[RLIMIT_CPU];
618		if (s >= rlim->rlim_cur) {
619			if (s >= rlim->rlim_max)
620				killproc(p, "exceeded maximum CPU limit");
621			else {
622				psignal(p, SIGXCPU);
623				if (rlim->rlim_cur < rlim->rlim_max)
624					rlim->rlim_cur += 5;
625			}
626		}
627	}
628
629	/*
630	 * Pick a new current process and record its start time.
631	 */
632	cnt.v_swtch++;
633	cpu_switch(p);
634	microtime(&runtime);
635	splx(x);
636}
637
638/*
639 * Initialize the (doubly-linked) run queues
640 * to be empty.
641 */
642/* ARGSUSED*/
643static void
644rqinit(dummy)
645	void *dummy;
646{
647	register int i;
648
649	for (i = 0; i < NQS; i++) {
650		qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
651		rtqs[i].ph_link = rtqs[i].ph_rlink = (struct proc *)&rtqs[i];
652		idqs[i].ph_link = idqs[i].ph_rlink = (struct proc *)&idqs[i];
653	}
654}
655
656/*
657 * Change process state to be runnable,
658 * placing it on the run queue if it is in memory,
659 * and awakening the swapper if it isn't in memory.
660 */
661void
662setrunnable(p)
663	register struct proc *p;
664{
665	register int s;
666
667	s = splhigh();
668	switch (p->p_stat) {
669	case 0:
670	case SRUN:
671	case SZOMB:
672	default:
673		panic("setrunnable");
674	case SSTOP:
675	case SSLEEP:
676		unsleep(p);		/* e.g. when sending signals */
677		break;
678
679	case SIDL:
680		break;
681	}
682	p->p_stat = SRUN;
683	if (p->p_flag & P_INMEM)
684		setrunqueue(p);
685	splx(s);
686	if (p->p_slptime > 1)
687		updatepri(p);
688	p->p_slptime = 0;
689	if ((p->p_flag & P_INMEM) == 0) {
690		p->p_flag |= P_SWAPINREQ;
691		wakeup((caddr_t)&proc0);
692	}
693	else if (p->p_priority < curpriority)
694		need_resched();
695}
696
697/*
698 * Compute the priority of a process when running in user mode.
699 * Arrange to reschedule if the resulting priority is better
700 * than that of the current process.
701 */
702void
703resetpriority(p)
704	register struct proc *p;
705{
706	register unsigned int newpriority;
707
708	if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
709		newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice;
710		newpriority = min(newpriority, MAXPRI);
711		p->p_usrpri = newpriority;
712		if (newpriority < curpriority)
713			need_resched();
714	} else {
715		need_resched();
716	}
717}
718