kern_synch.c revision 1.12
1/*	$OpenBSD: kern_synch.c,v 1.12 1998/02/03 19:06:25 deraadt Exp $	*/
2/*	$NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $	*/
3
4/*-
5 * Copyright (c) 1982, 1986, 1990, 1991, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	@(#)kern_synch.c	8.6 (Berkeley) 1/21/94
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/kernel.h>
48#include <sys/buf.h>
49#include <sys/signalvar.h>
50#include <sys/resourcevar.h>
51#include <vm/vm.h>
52#ifdef KTRACE
53#include <sys/ktrace.h>
54#endif
55
56#include <machine/cpu.h>
57
58u_char	curpriority;		/* usrpri of curproc */
59int	lbolt;			/* once a second sleep address */
60
61void roundrobin __P((void *));
62void schedcpu __P((void *));
63void updatepri __P((struct proc *));
64void endtsleep __P((void *));
65
66/*
67 * Force switch among equal priority processes every 100ms.
68 */
69/* ARGSUSED */
70void
71roundrobin(arg)
72	void *arg;
73{
74
75	need_resched();
76	timeout(roundrobin, NULL, hz / 10);
77}
78
79/*
80 * Constants for digital decay and forget:
81 *	90% of (p_estcpu) usage in 5 * loadav time
82 *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
83 *          Note that, as ps(1) mentions, this can let percentages
84 *          total over 100% (I've seen 137.9% for 3 processes).
85 *
86 * Note that hardclock updates p_estcpu and p_cpticks independently.
87 *
88 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
89 * That is, the system wants to compute a value of decay such
90 * that the following for loop:
91 * 	for (i = 0; i < (5 * loadavg); i++)
92 * 		p_estcpu *= decay;
93 * will compute
94 * 	p_estcpu *= 0.1;
95 * for all values of loadavg:
96 *
97 * Mathematically this loop can be expressed by saying:
98 * 	decay ** (5 * loadavg) ~= .1
99 *
100 * The system computes decay as:
101 * 	decay = (2 * loadavg) / (2 * loadavg + 1)
102 *
103 * We wish to prove that the system's computation of decay
104 * will always fulfill the equation:
105 * 	decay ** (5 * loadavg) ~= .1
106 *
107 * If we compute b as:
108 * 	b = 2 * loadavg
109 * then
110 * 	decay = b / (b + 1)
111 *
112 * We now need to prove two things:
113 *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
114 *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
115 *
116 * Facts:
117 *         For x close to zero, exp(x) =~ 1 + x, since
118 *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
119 *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
120 *         For x close to zero, ln(1+x) =~ x, since
121 *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
122 *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
123 *         ln(.1) =~ -2.30
124 *
125 * Proof of (1):
126 *    Solve (factor)**(power) =~ .1 given power (5*loadav):
127 *	solving for factor,
128 *      ln(factor) =~ (-2.30/5*loadav), or
129 *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
130 *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
131 *
132 * Proof of (2):
133 *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
134 *	solving for power,
135 *      power*ln(b/(b+1)) =~ -2.30, or
136 *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
137 *
138 * Actual power values for the implemented algorithm are as follows:
139 *      loadav: 1       2       3       4
140 *      power:  5.68    10.32   14.94   19.55
141 */
142
143/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
144#define	loadfactor(loadav)	(2 * (loadav))
145#define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
146
147/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
148fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
149
150/*
151 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
152 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
153 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
154 *
155 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
156 *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
157 *
158 * If you dont want to bother with the faster/more-accurate formula, you
159 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
160 * (more general) method of calculating the %age of CPU used by a process.
161 */
162#define	CCPU_SHIFT	11
163
164/*
165 * Recompute process priorities, every hz ticks.
166 */
167/* ARGSUSED */
168void
169schedcpu(arg)
170	void *arg;
171{
172	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
173	register struct proc *p;
174	register int s;
175	register unsigned int newcpu;
176
177	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
178		/*
179		 * Increment time in/out of memory and sleep time
180		 * (if sleeping).  We ignore overflow; with 16-bit int's
181		 * (remember them?) overflow takes 45 days.
182		 */
183		p->p_swtime++;
184		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
185			p->p_slptime++;
186		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
187		/*
188		 * If the process has slept the entire second,
189		 * stop recalculating its priority until it wakes up.
190		 */
191		if (p->p_slptime > 1)
192			continue;
193		s = splstatclock();	/* prevent state changes */
194		/*
195		 * p_pctcpu is only for ps.
196		 */
197#if	(FSHIFT >= CCPU_SHIFT)
198		p->p_pctcpu += (hz == 100)?
199			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
200                	100 * (((fixpt_t) p->p_cpticks)
201				<< (FSHIFT - CCPU_SHIFT)) / hz;
202#else
203		p->p_pctcpu += ((FSCALE - ccpu) *
204			(p->p_cpticks * FSCALE / hz)) >> FSHIFT;
205#endif
206		p->p_cpticks = 0;
207		newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice;
208		p->p_estcpu = min(newcpu, UCHAR_MAX);
209		resetpriority(p);
210		if (p->p_priority >= PUSER) {
211#define	PPQ	(128 / NQS)		/* priorities per queue */
212			if ((p != curproc) &&
213			    p->p_stat == SRUN &&
214			    (p->p_flag & P_INMEM) &&
215			    (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
216				remrunqueue(p);
217				p->p_priority = p->p_usrpri;
218				setrunqueue(p);
219			} else
220				p->p_priority = p->p_usrpri;
221		}
222		splx(s);
223	}
224	vmmeter();
225	wakeup((caddr_t)&lbolt);
226	timeout(schedcpu, (void *)0, hz);
227}
228
229/*
230 * Recalculate the priority of a process after it has slept for a while.
231 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
232 * least six times the loadfactor will decay p_estcpu to zero.
233 */
234void
235updatepri(p)
236	register struct proc *p;
237{
238	register unsigned int newcpu = p->p_estcpu;
239	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
240
241	if (p->p_slptime > 5 * loadfac)
242		p->p_estcpu = 0;
243	else {
244		p->p_slptime--;	/* the first time was done in schedcpu */
245		while (newcpu && --p->p_slptime)
246			newcpu = (int) decay_cpu(loadfac, newcpu);
247		p->p_estcpu = min(newcpu, UCHAR_MAX);
248	}
249	resetpriority(p);
250}
251
252/*
253 * We're only looking at 7 bits of the address; everything is
254 * aligned to 4, lots of things are aligned to greater powers
255 * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
256 */
257#define TABLESIZE	128
258#define LOOKUP(x)	(((long)(x) >> 8) & (TABLESIZE - 1))
259struct slpque {
260	struct proc *sq_head;
261	struct proc **sq_tailp;
262} slpque[TABLESIZE];
263
264/*
265 * During autoconfiguration or after a panic, a sleep will simply
266 * lower the priority briefly to allow interrupts, then return.
267 * The priority to be used (safepri) is machine-dependent, thus this
268 * value is initialized and maintained in the machine-dependent layers.
269 * This priority will typically be 0, or the lowest priority
270 * that is safe for use on the interrupt stack; it can be made
271 * higher to block network software interrupts after panics.
272 */
273int safepri;
274
275/*
276 * General sleep call.  Suspends the current process until a wakeup is
277 * performed on the specified identifier.  The process will then be made
278 * runnable with the specified priority.  Sleeps at most timo/hz seconds
279 * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
280 * before and after sleeping, else signals are not checked.  Returns 0 if
281 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
282 * signal needs to be delivered, ERESTART is returned if the current system
283 * call should be restarted if possible, and EINTR is returned if the system
284 * call should be interrupted by the signal (return EINTR).
285 */
286int
287tsleep(ident, priority, wmesg, timo)
288	void *ident;
289	int priority, timo;
290	char *wmesg;
291{
292	register struct proc *p = curproc;
293	register struct slpque *qp;
294	register int s;
295	int sig, catch = priority & PCATCH;
296	extern int cold;
297	void endtsleep __P((void *));
298
299#ifdef KTRACE
300	if (KTRPOINT(p, KTR_CSW))
301		ktrcsw(p->p_tracep, 1, 0);
302#endif
303	s = splhigh();
304	if (cold || panicstr) {
305		/*
306		 * After a panic, or during autoconfiguration,
307		 * just give interrupts a chance, then just return;
308		 * don't run any other procs or panic below,
309		 * in case this is the idle process and already asleep.
310		 */
311		splx(safepri);
312		splx(s);
313		return (0);
314	}
315#ifdef DIAGNOSTIC
316	if (ident == NULL || p->p_stat != SRUN || p->p_back)
317		panic("tsleep");
318#endif
319	p->p_wchan = ident;
320	p->p_wmesg = wmesg;
321	p->p_slptime = 0;
322	p->p_priority = priority & PRIMASK;
323	qp = &slpque[LOOKUP(ident)];
324	if (qp->sq_head == 0)
325		qp->sq_head = p;
326	else
327		*qp->sq_tailp = p;
328	*(qp->sq_tailp = &p->p_forw) = 0;
329	if (timo)
330		timeout(endtsleep, (void *)p, timo);
331	/*
332	 * We put ourselves on the sleep queue and start our timeout
333	 * before calling CURSIG, as we could stop there, and a wakeup
334	 * or a SIGCONT (or both) could occur while we were stopped.
335	 * A SIGCONT would cause us to be marked as SSLEEP
336	 * without resuming us, thus we must be ready for sleep
337	 * when CURSIG is called.  If the wakeup happens while we're
338	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
339	 */
340	if (catch) {
341		p->p_flag |= P_SINTR;
342		if ((sig = CURSIG(p)) != 0) {
343			if (p->p_wchan)
344				unsleep(p);
345			p->p_stat = SRUN;
346			goto resume;
347		}
348		if (p->p_wchan == 0) {
349			catch = 0;
350			goto resume;
351		}
352	} else
353		sig = 0;
354	p->p_stat = SSLEEP;
355	p->p_stats->p_ru.ru_nvcsw++;
356	mi_switch();
357#ifdef	DDB
358	/* handy breakpoint location after process "wakes" */
359	__asm(".globl bpendtsleep ; bpendtsleep:");
360#endif
361resume:
362	curpriority = p->p_usrpri;
363	splx(s);
364	p->p_flag &= ~P_SINTR;
365	if (p->p_flag & P_TIMEOUT) {
366		p->p_flag &= ~P_TIMEOUT;
367		if (sig == 0) {
368#ifdef KTRACE
369			if (KTRPOINT(p, KTR_CSW))
370				ktrcsw(p->p_tracep, 0, 0);
371#endif
372			return (EWOULDBLOCK);
373		}
374	} else if (timo)
375		untimeout(endtsleep, (void *)p);
376	if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) {
377#ifdef KTRACE
378		if (KTRPOINT(p, KTR_CSW))
379			ktrcsw(p->p_tracep, 0, 0);
380#endif
381		if (p->p_sigacts->ps_sigintr & sigmask(sig))
382			return (EINTR);
383		return (ERESTART);
384	}
385#ifdef KTRACE
386	if (KTRPOINT(p, KTR_CSW))
387		ktrcsw(p->p_tracep, 0, 0);
388#endif
389	return (0);
390}
391
392/*
393 * Implement timeout for tsleep.
394 * If process hasn't been awakened (wchan non-zero),
395 * set timeout flag and undo the sleep.  If proc
396 * is stopped, just unsleep so it will remain stopped.
397 */
398void
399endtsleep(arg)
400	void *arg;
401{
402	register struct proc *p;
403	int s;
404
405	p = (struct proc *)arg;
406	s = splhigh();
407	if (p->p_wchan) {
408		if (p->p_stat == SSLEEP)
409			setrunnable(p);
410		else
411			unsleep(p);
412		p->p_flag |= P_TIMEOUT;
413	}
414	splx(s);
415}
416
417/*
418 * Short-term, non-interruptable sleep.
419 */
420void
421sleep(ident, priority)
422	void *ident;
423	int priority;
424{
425	register struct proc *p = curproc;
426	register struct slpque *qp;
427	register int s;
428	extern int cold;
429
430#ifdef DIAGNOSTIC
431	if (priority > PZERO) {
432		printf("sleep called with priority %d > PZERO, wchan: %p\n",
433		    priority, ident);
434		panic("old sleep");
435	}
436#endif
437	s = splhigh();
438	if (cold || panicstr) {
439		/*
440		 * After a panic, or during autoconfiguration,
441		 * just give interrupts a chance, then just return;
442		 * don't run any other procs or panic below,
443		 * in case this is the idle process and already asleep.
444		 */
445		splx(safepri);
446		splx(s);
447		return;
448	}
449#ifdef DIAGNOSTIC
450	if (ident == NULL || p->p_stat != SRUN || p->p_back)
451		panic("sleep");
452#endif
453	p->p_wchan = ident;
454	p->p_wmesg = NULL;
455	p->p_slptime = 0;
456	p->p_priority = priority;
457	qp = &slpque[LOOKUP(ident)];
458	if (qp->sq_head == 0)
459		qp->sq_head = p;
460	else
461		*qp->sq_tailp = p;
462	*(qp->sq_tailp = &p->p_forw) = 0;
463	p->p_stat = SSLEEP;
464	p->p_stats->p_ru.ru_nvcsw++;
465#ifdef KTRACE
466	if (KTRPOINT(p, KTR_CSW))
467		ktrcsw(p->p_tracep, 1, 0);
468#endif
469	mi_switch();
470#ifdef	DDB
471	/* handy breakpoint location after process "wakes" */
472	__asm(".globl bpendsleep ; bpendsleep:");
473#endif
474#ifdef KTRACE
475	if (KTRPOINT(p, KTR_CSW))
476		ktrcsw(p->p_tracep, 0, 0);
477#endif
478	curpriority = p->p_usrpri;
479	splx(s);
480}
481
482/*
483 * Remove a process from its wait queue
484 */
485void
486unsleep(p)
487	register struct proc *p;
488{
489	register struct slpque *qp;
490	register struct proc **hp;
491	int s;
492
493	s = splhigh();
494	if (p->p_wchan) {
495		hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head;
496		while (*hp != p)
497			hp = &(*hp)->p_forw;
498		*hp = p->p_forw;
499		if (qp->sq_tailp == &p->p_forw)
500			qp->sq_tailp = hp;
501		p->p_wchan = 0;
502	}
503	splx(s);
504}
505
506/*
507 * Make all processes sleeping on the specified identifier runnable.
508 */
509void
510wakeup(ident)
511	register void *ident;
512{
513	register struct slpque *qp;
514	register struct proc *p, **q;
515	int s;
516
517	s = splhigh();
518	qp = &slpque[LOOKUP(ident)];
519restart:
520	for (q = &qp->sq_head; (p = *q) != NULL; ) {
521#ifdef DIAGNOSTIC
522		if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
523			panic("wakeup");
524#endif
525		if (p->p_wchan == ident) {
526			p->p_wchan = 0;
527			*q = p->p_forw;
528			if (qp->sq_tailp == &p->p_forw)
529				qp->sq_tailp = q;
530			if (p->p_stat == SSLEEP) {
531				/* OPTIMIZED EXPANSION OF setrunnable(p); */
532				if (p->p_slptime > 1)
533					updatepri(p);
534				p->p_slptime = 0;
535				p->p_stat = SRUN;
536				if (p->p_flag & P_INMEM)
537					setrunqueue(p);
538				/*
539				 * Since curpriority is a user priority,
540				 * p->p_priority is always better than
541				 * curpriority.
542				 */
543				if ((p->p_flag & P_INMEM) == 0)
544					wakeup((caddr_t)&proc0);
545				else
546					need_resched();
547				/* END INLINE EXPANSION */
548				goto restart;
549			}
550		} else
551			q = &p->p_forw;
552	}
553	splx(s);
554}
555
556/*
557 * The machine independent parts of mi_switch().
558 * Must be called at splstatclock() or higher.
559 */
560void
561mi_switch()
562{
563	register struct proc *p = curproc;	/* XXX */
564	register struct rlimit *rlim;
565	register long s, u;
566	struct timeval tv;
567
568	/*
569	 * Compute the amount of time during which the current
570	 * process was running, and add that to its total so far.
571	 */
572	microtime(&tv);
573	u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec);
574	s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec);
575	if (u < 0) {
576		u += 1000000;
577		s--;
578	} else if (u >= 1000000) {
579		u -= 1000000;
580		s++;
581	}
582	p->p_rtime.tv_usec = u;
583	p->p_rtime.tv_sec = s;
584
585	/*
586	 * Check if the process exceeds its cpu resource allocation.
587	 * If over max, kill it.  In any case, if it has run for more
588	 * than 10 minutes, reduce priority to give others a chance.
589	 */
590	rlim = &p->p_rlimit[RLIMIT_CPU];
591	if (s >= rlim->rlim_cur) {
592		if (s >= rlim->rlim_max)
593			psignal(p, SIGKILL);
594		else {
595			psignal(p, SIGXCPU);
596			if (rlim->rlim_cur < rlim->rlim_max)
597				rlim->rlim_cur += 5;
598		}
599	}
600	if (s > 10 * 60 && p->p_ucred->cr_uid && p->p_nice == NZERO) {
601		p->p_nice = NZERO + 4;
602		resetpriority(p);
603	}
604
605	/*
606	 * Pick a new current process and record its start time.
607	 */
608	cnt.v_swtch++;
609	cpu_switch(p);
610	microtime(&runtime);
611}
612
613/*
614 * Initialize the (doubly-linked) run queues
615 * to be empty.
616 */
617void
618rqinit()
619{
620	register int i;
621
622	for (i = 0; i < NQS; i++)
623		qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
624}
625
626/*
627 * Change process state to be runnable,
628 * placing it on the run queue if it is in memory,
629 * and awakening the swapper if it isn't in memory.
630 */
631void
632setrunnable(p)
633	register struct proc *p;
634{
635	register int s;
636
637	s = splhigh();
638	switch (p->p_stat) {
639	case 0:
640	case SRUN:
641	case SZOMB:
642	default:
643		panic("setrunnable");
644	case SSTOP:
645		/*
646		 * If we're being traced (possibly because someone attached us
647		 * while we were stopped), check for a signal from the debugger.
648		 */
649		if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0)
650			p->p_siglist |= sigmask(p->p_xstat);
651	case SSLEEP:
652		unsleep(p);		/* e.g. when sending signals */
653		break;
654
655	case SIDL:
656		break;
657	}
658	p->p_stat = SRUN;
659	if (p->p_flag & P_INMEM)
660		setrunqueue(p);
661	splx(s);
662	if (p->p_slptime > 1)
663		updatepri(p);
664	p->p_slptime = 0;
665	if ((p->p_flag & P_INMEM) == 0)
666		wakeup((caddr_t)&proc0);
667	else if (p->p_priority < curpriority)
668		need_resched();
669}
670
671/*
672 * Compute the priority of a process when running in user mode.
673 * Arrange to reschedule if the resulting priority is better
674 * than that of the current process.
675 */
676void
677resetpriority(p)
678	register struct proc *p;
679{
680	register unsigned int newpriority;
681
682	newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice;
683	newpriority = min(newpriority, MAXPRI);
684	p->p_usrpri = newpriority;
685	if (newpriority < curpriority)
686		need_resched();
687}
688
689#ifdef DDB
690#include <machine/db_machdep.h>
691
692#include <ddb/db_interface.h>
693#include <ddb/db_output.h>
694
695void
696db_show_all_procs(addr, haddr, count, modif)
697	db_expr_t addr;
698	int haddr;
699	db_expr_t count;
700	char *modif;
701{
702	char *mode;
703	int doingzomb = 0;
704	struct proc *p, *pp;
705
706	if (modif[0] == 0)
707		modif[0] = 'n';			/* default == normal mode */
708
709	mode = "mawn";
710	while (*mode && *mode != modif[0])
711		mode++;
712	if (*mode == 0 || *mode == 'm') {
713		db_printf("usage: show all procs [/a] [/n] [/w]\n");
714		db_printf("\t/a == show process address info\n");
715		db_printf("\t/n == show normal process info [default]\n");
716		db_printf("\t/w == show process wait/emul info\n");
717		return;
718	}
719
720	p = allproc.lh_first;
721
722	switch (*mode) {
723
724	case 'a':
725		db_printf("PID        %10s %18s %18s %18s\n",
726		    "COMMAND", "STRUCT PROC *", "UAREA *", "VMSPACE/VM_MAP");
727		break;
728	case 'n':
729		db_printf("PID        %10s %10s %10s S %7s %16s %7s\n",
730		    "PPID", "PGRP", "UID", "FLAGS", "COMMAND", "WAIT");
731		break;
732	case 'w':
733		db_printf("PID        %16s %8s %18s %s\n",
734		    "COMMAND", "EMUL", "WAIT-CHANNEL", "WAIT-MSG");
735		break;
736	}
737
738	while (p != 0) {
739		pp = p->p_pptr;
740		if (p->p_stat) {
741
742			db_printf("%-10d ", p->p_pid);
743
744			switch (*mode) {
745
746			case 'a':
747				db_printf("%10.10s %18p %18p %18p\n",
748				    p->p_comm, p, p->p_addr, p->p_vmspace);
749				break;
750
751			case 'n':
752				db_printf("%10d %10d %10d %d %#7x %16s %7.7s\n",
753				    pp ? pp->p_pid : -1, p->p_pgrp->pg_id,
754				    p->p_cred->p_ruid, p->p_stat, p->p_flag,
755				    p->p_comm, (p->p_wchan && p->p_wmesg) ?
756					p->p_wmesg : "");
757				break;
758
759			case 'w':
760				db_printf("%16s %8s %18p %s\n", p->p_comm,
761				    p->p_emul->e_name, p->p_wchan,
762				    (p->p_wchan && p->p_wmesg) ?
763					p->p_wmesg : "");
764				break;
765
766			}
767		}
768		p = p->p_list.le_next;
769		if (p == 0 && doingzomb == 0) {
770			doingzomb = 1;
771			p = zombproc.lh_first;
772		}
773	}
774}
775#endif
776