kern_timeout.c revision 2112
11541Srgrimes/*-
21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes * (c) UNIX System Laboratories, Inc.
51541Srgrimes * All or some portions of this file are derived from material licensed
61541Srgrimes * to the University of California by American Telephone and Telegraph
71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with
81541Srgrimes * the permission of UNIX System Laboratories, Inc.
91541Srgrimes *
101541Srgrimes * Redistribution and use in source and binary forms, with or without
111541Srgrimes * modification, are permitted provided that the following conditions
121541Srgrimes * are met:
131541Srgrimes * 1. Redistributions of source code must retain the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer.
151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
161541Srgrimes *    notice, this list of conditions and the following disclaimer in the
171541Srgrimes *    documentation and/or other materials provided with the distribution.
181541Srgrimes * 3. All advertising materials mentioning features or use of this software
191541Srgrimes *    must display the following acknowledgement:
201541Srgrimes *	This product includes software developed by the University of
211541Srgrimes *	California, Berkeley and its contributors.
221541Srgrimes * 4. Neither the name of the University nor the names of its contributors
231541Srgrimes *    may be used to endorse or promote products derived from this software
241541Srgrimes *    without specific prior written permission.
251541Srgrimes *
261541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
271541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
281541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
291541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
301541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
311541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
321541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
331541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
341541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
351541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
361541Srgrimes * SUCH DAMAGE.
371541Srgrimes *
381541Srgrimes *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
392112Swollman * $Id: kern_clock.c,v 1.3 1994/08/02 07:41:54 davidg Exp $
401541Srgrimes */
411541Srgrimes
421541Srgrimes#include <sys/param.h>
431541Srgrimes#include <sys/systm.h>
441541Srgrimes#include <sys/dkstat.h>
451541Srgrimes#include <sys/callout.h>
461541Srgrimes#include <sys/kernel.h>
471541Srgrimes#include <sys/proc.h>
481541Srgrimes#include <sys/resourcevar.h>
491541Srgrimes
501541Srgrimes#include <machine/cpu.h>
511541Srgrimes
521541Srgrimes#ifdef GPROF
531541Srgrimes#include <sys/gmon.h>
541541Srgrimes#endif
551541Srgrimes
562112Swollman/* Does anybody else really care about these? */
572112Swollmanstruct callout *callfree, *callout, calltodo;
582112Swollmanint ncallout;
592112Swollman
602112Swollman/* Some of these don't belong here, but it's easiest to concentrate them. */
612112Swollmanlong cp_time[CPUSTATES];
622112Swollmanlong dk_seek[DK_NDRIVE];
632112Swollmanlong dk_time[DK_NDRIVE];
642112Swollmanlong dk_wds[DK_NDRIVE];
652112Swollmanlong dk_wpms[DK_NDRIVE];
662112Swollmanlong dk_xfer[DK_NDRIVE];
672112Swollman
682112Swollmanint dk_busy;
692112Swollmanint dk_ndrive = DK_NDRIVE;
702112Swollman
712112Swollmanlong tk_cancc;
722112Swollmanlong tk_nin;
732112Swollmanlong tk_nout;
742112Swollmanlong tk_rawcc;
752112Swollman
761541Srgrimes/*
771541Srgrimes * Clock handling routines.
781541Srgrimes *
791541Srgrimes * This code is written to operate with two timers that run independently of
801541Srgrimes * each other.  The main clock, running hz times per second, is used to keep
811541Srgrimes * track of real time.  The second timer handles kernel and user profiling,
821541Srgrimes * and does resource use estimation.  If the second timer is programmable,
831541Srgrimes * it is randomized to avoid aliasing between the two clocks.  For example,
841541Srgrimes * the randomization prevents an adversary from always giving up the cpu
851541Srgrimes * just before its quantum expires.  Otherwise, it would never accumulate
861541Srgrimes * cpu ticks.  The mean frequency of the second timer is stathz.
871541Srgrimes *
881541Srgrimes * If no second timer exists, stathz will be zero; in this case we drive
891541Srgrimes * profiling and statistics off the main clock.  This WILL NOT be accurate;
901541Srgrimes * do not do it unless absolutely necessary.
911541Srgrimes *
921541Srgrimes * The statistics clock may (or may not) be run at a higher rate while
931541Srgrimes * profiling.  This profile clock runs at profhz.  We require that profhz
941541Srgrimes * be an integral multiple of stathz.
951541Srgrimes *
961541Srgrimes * If the statistics clock is running fast, it must be divided by the ratio
971541Srgrimes * profhz/stathz for statistics.  (For profiling, every tick counts.)
981541Srgrimes */
991541Srgrimes
1001541Srgrimes/*
1011541Srgrimes * TODO:
1021541Srgrimes *	allocate more timeout table slots when table overflows.
1031541Srgrimes */
1041541Srgrimes
1051541Srgrimes/*
1061541Srgrimes * Bump a timeval by a small number of usec's.
1071541Srgrimes */
1081541Srgrimes#define BUMPTIME(t, usec) { \
1091541Srgrimes	register volatile struct timeval *tp = (t); \
1101541Srgrimes	register long us; \
1111541Srgrimes \
1121541Srgrimes	tp->tv_usec = us = tp->tv_usec + (usec); \
1131541Srgrimes	if (us >= 1000000) { \
1141541Srgrimes		tp->tv_usec = us - 1000000; \
1151541Srgrimes		tp->tv_sec++; \
1161541Srgrimes	} \
1171541Srgrimes}
1181541Srgrimes
1191541Srgrimesint	stathz;
1201541Srgrimesint	profhz;
1211541Srgrimesint	profprocs;
1221541Srgrimesint	ticks;
1231541Srgrimesstatic int psdiv, pscnt;	/* prof => stat divider */
1241541Srgrimesint	psratio;		/* ratio: prof / stat */
1251541Srgrimes
1261541Srgrimesvolatile struct	timeval time;
1271541Srgrimesvolatile struct	timeval mono_time;
1281541Srgrimes
1291541Srgrimes/*
1301541Srgrimes * Initialize clock frequencies and start both clocks running.
1311541Srgrimes */
1321541Srgrimesvoid
1331541Srgrimesinitclocks()
1341541Srgrimes{
1351541Srgrimes	register int i;
1361541Srgrimes
1371541Srgrimes	/*
1381541Srgrimes	 * Set divisors to 1 (normal case) and let the machine-specific
1391541Srgrimes	 * code do its bit.
1401541Srgrimes	 */
1411541Srgrimes	psdiv = pscnt = 1;
1421541Srgrimes	cpu_initclocks();
1431541Srgrimes
1441541Srgrimes	/*
1451541Srgrimes	 * Compute profhz/stathz, and fix profhz if needed.
1461541Srgrimes	 */
1471541Srgrimes	i = stathz ? stathz : hz;
1481541Srgrimes	if (profhz == 0)
1491541Srgrimes		profhz = i;
1501541Srgrimes	psratio = profhz / i;
1511541Srgrimes}
1521541Srgrimes
1531541Srgrimes/*
1541541Srgrimes * The real-time timer, interrupting hz times per second.
1551541Srgrimes */
1561541Srgrimesvoid
1571541Srgrimeshardclock(frame)
1581541Srgrimes	register struct clockframe *frame;
1591541Srgrimes{
1601541Srgrimes	register struct callout *p1;
1611541Srgrimes	register struct proc *p;
1621541Srgrimes	register int delta, needsoft;
1631541Srgrimes	extern int tickdelta;
1641541Srgrimes	extern long timedelta;
1651541Srgrimes
1661541Srgrimes	/*
1671541Srgrimes	 * Update real-time timeout queue.
1681541Srgrimes	 * At front of queue are some number of events which are ``due''.
1691541Srgrimes	 * The time to these is <= 0 and if negative represents the
1701541Srgrimes	 * number of ticks which have passed since it was supposed to happen.
1711541Srgrimes	 * The rest of the q elements (times > 0) are events yet to happen,
1721541Srgrimes	 * where the time for each is given as a delta from the previous.
1731541Srgrimes	 * Decrementing just the first of these serves to decrement the time
1741541Srgrimes	 * to all events.
1751541Srgrimes	 */
1761541Srgrimes	needsoft = 0;
1771541Srgrimes	for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
1781541Srgrimes		if (--p1->c_time > 0)
1791541Srgrimes			break;
1801541Srgrimes		needsoft = 1;
1811541Srgrimes		if (p1->c_time == 0)
1821541Srgrimes			break;
1831541Srgrimes	}
1841541Srgrimes
1851541Srgrimes	p = curproc;
1861541Srgrimes	if (p) {
1871541Srgrimes		register struct pstats *pstats;
1881541Srgrimes
1891541Srgrimes		/*
1901541Srgrimes		 * Run current process's virtual and profile time, as needed.
1911541Srgrimes		 */
1921541Srgrimes		pstats = p->p_stats;
1931541Srgrimes		if (CLKF_USERMODE(frame) &&
1941541Srgrimes		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
1951541Srgrimes		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
1961541Srgrimes			psignal(p, SIGVTALRM);
1971541Srgrimes		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
1981541Srgrimes		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
1991541Srgrimes			psignal(p, SIGPROF);
2001541Srgrimes	}
2011541Srgrimes
2021541Srgrimes	/*
2031541Srgrimes	 * If no separate statistics clock is available, run it from here.
2041541Srgrimes	 */
2051541Srgrimes	if (stathz == 0)
2061541Srgrimes		statclock(frame);
2071541Srgrimes
2081541Srgrimes	/*
2091541Srgrimes	 * Increment the time-of-day.  The increment is just ``tick'' unless
2101541Srgrimes	 * we are still adjusting the clock; see adjtime().
2111541Srgrimes	 */
2121541Srgrimes	ticks++;
2131541Srgrimes	if (timedelta == 0)
2141541Srgrimes		delta = tick;
2151541Srgrimes	else {
2161541Srgrimes		delta = tick + tickdelta;
2171541Srgrimes		timedelta -= tickdelta;
2181541Srgrimes	}
2191541Srgrimes	BUMPTIME(&time, delta);
2201541Srgrimes	BUMPTIME(&mono_time, delta);
2211541Srgrimes
2221541Srgrimes	/*
2231541Srgrimes	 * Process callouts at a very low cpu priority, so we don't keep the
2241541Srgrimes	 * relatively high clock interrupt priority any longer than necessary.
2251541Srgrimes	 */
2261541Srgrimes	if (needsoft) {
2271541Srgrimes		if (CLKF_BASEPRI(frame)) {
2281541Srgrimes			/*
2291541Srgrimes			 * Save the overhead of a software interrupt;
2301541Srgrimes			 * it will happen as soon as we return, so do it now.
2311541Srgrimes			 */
2321541Srgrimes			(void)splsoftclock();
2331541Srgrimes			softclock();
2341541Srgrimes		} else
2351541Srgrimes			setsoftclock();
2361541Srgrimes	}
2371541Srgrimes}
2381541Srgrimes
2391541Srgrimes/*
2401541Srgrimes * Software (low priority) clock interrupt.
2411541Srgrimes * Run periodic events from timeout queue.
2421541Srgrimes */
2431541Srgrimes/*ARGSUSED*/
2441541Srgrimesvoid
2451541Srgrimessoftclock()
2461541Srgrimes{
2471541Srgrimes	register struct callout *c;
2481541Srgrimes	register void *arg;
2491541Srgrimes	register void (*func) __P((void *));
2501541Srgrimes	register int s;
2511541Srgrimes
2521541Srgrimes	s = splhigh();
2531541Srgrimes	while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
2541541Srgrimes		func = c->c_func;
2551541Srgrimes		arg = c->c_arg;
2561541Srgrimes		calltodo.c_next = c->c_next;
2571541Srgrimes		c->c_next = callfree;
2581541Srgrimes		callfree = c;
2591541Srgrimes		splx(s);
2601541Srgrimes		(*func)(arg);
2611541Srgrimes		(void) splhigh();
2621541Srgrimes	}
2631541Srgrimes	splx(s);
2641541Srgrimes}
2651541Srgrimes
2661541Srgrimes/*
2671541Srgrimes * timeout --
2681541Srgrimes *	Execute a function after a specified length of time.
2691541Srgrimes *
2701541Srgrimes * untimeout --
2711541Srgrimes *	Cancel previous timeout function call.
2721541Srgrimes *
2731541Srgrimes *	See AT&T BCI Driver Reference Manual for specification.  This
2741541Srgrimes *	implementation differs from that one in that no identification
2751541Srgrimes *	value is returned from timeout, rather, the original arguments
2761541Srgrimes *	to timeout are used to identify entries for untimeout.
2771541Srgrimes */
2781541Srgrimesvoid
2791541Srgrimestimeout(ftn, arg, ticks)
2802112Swollman	timeout_t ftn;
2811541Srgrimes	void *arg;
2821541Srgrimes	register int ticks;
2831541Srgrimes{
2841541Srgrimes	register struct callout *new, *p, *t;
2851541Srgrimes	register int s;
2861541Srgrimes
2871541Srgrimes	if (ticks <= 0)
2881541Srgrimes		ticks = 1;
2891541Srgrimes
2901541Srgrimes	/* Lock out the clock. */
2911541Srgrimes	s = splhigh();
2921541Srgrimes
2931541Srgrimes	/* Fill in the next free callout structure. */
2941541Srgrimes	if (callfree == NULL)
2951541Srgrimes		panic("timeout table full");
2961541Srgrimes	new = callfree;
2971541Srgrimes	callfree = new->c_next;
2981541Srgrimes	new->c_arg = arg;
2991541Srgrimes	new->c_func = ftn;
3001541Srgrimes
3011541Srgrimes	/*
3021541Srgrimes	 * The time for each event is stored as a difference from the time
3031541Srgrimes	 * of the previous event on the queue.  Walk the queue, correcting
3041541Srgrimes	 * the ticks argument for queue entries passed.  Correct the ticks
3051541Srgrimes	 * value for the queue entry immediately after the insertion point
3061541Srgrimes	 * as well.  Watch out for negative c_time values; these represent
3071541Srgrimes	 * overdue events.
3081541Srgrimes	 */
3091541Srgrimes	for (p = &calltodo;
3101541Srgrimes	    (t = p->c_next) != NULL && ticks > t->c_time; p = t)
3111541Srgrimes		if (t->c_time > 0)
3121541Srgrimes			ticks -= t->c_time;
3131541Srgrimes	new->c_time = ticks;
3141541Srgrimes	if (t != NULL)
3151541Srgrimes		t->c_time -= ticks;
3161541Srgrimes
3171541Srgrimes	/* Insert the new entry into the queue. */
3181541Srgrimes	p->c_next = new;
3191541Srgrimes	new->c_next = t;
3201541Srgrimes	splx(s);
3211541Srgrimes}
3221541Srgrimes
3231541Srgrimesvoid
3241541Srgrimesuntimeout(ftn, arg)
3252112Swollman	timeout_t ftn;
3261541Srgrimes	void *arg;
3271541Srgrimes{
3281541Srgrimes	register struct callout *p, *t;
3291541Srgrimes	register int s;
3301541Srgrimes
3311541Srgrimes	s = splhigh();
3321541Srgrimes	for (p = &calltodo; (t = p->c_next) != NULL; p = t)
3331541Srgrimes		if (t->c_func == ftn && t->c_arg == arg) {
3341541Srgrimes			/* Increment next entry's tick count. */
3351541Srgrimes			if (t->c_next && t->c_time > 0)
3361541Srgrimes				t->c_next->c_time += t->c_time;
3371541Srgrimes
3381541Srgrimes			/* Move entry from callout queue to callfree queue. */
3391541Srgrimes			p->c_next = t->c_next;
3401541Srgrimes			t->c_next = callfree;
3411541Srgrimes			callfree = t;
3421541Srgrimes			break;
3431541Srgrimes		}
3441541Srgrimes	splx(s);
3451541Srgrimes}
3461541Srgrimes
3471541Srgrimes/*
3481541Srgrimes * Compute number of hz until specified time.  Used to
3491541Srgrimes * compute third argument to timeout() from an absolute time.
3501541Srgrimes */
3511541Srgrimesint
3521541Srgrimeshzto(tv)
3531541Srgrimes	struct timeval *tv;
3541541Srgrimes{
3551541Srgrimes	register long ticks, sec;
3561541Srgrimes	int s;
3571541Srgrimes
3581541Srgrimes	/*
3591541Srgrimes	 * If number of milliseconds will fit in 32 bit arithmetic,
3601541Srgrimes	 * then compute number of milliseconds to time and scale to
3611541Srgrimes	 * ticks.  Otherwise just compute number of hz in time, rounding
3621541Srgrimes	 * times greater than representible to maximum value.
3631541Srgrimes	 *
3641541Srgrimes	 * Delta times less than 25 days can be computed ``exactly''.
3651541Srgrimes	 * Maximum value for any timeout in 10ms ticks is 250 days.
3661541Srgrimes	 */
3671541Srgrimes	s = splhigh();
3681541Srgrimes	sec = tv->tv_sec - time.tv_sec;
3691541Srgrimes	if (sec <= 0x7fffffff / 1000 - 1000)
3701541Srgrimes		ticks = ((tv->tv_sec - time.tv_sec) * 1000 +
3711541Srgrimes			(tv->tv_usec - time.tv_usec) / 1000) / (tick / 1000);
3721541Srgrimes	else if (sec <= 0x7fffffff / hz)
3731541Srgrimes		ticks = sec * hz;
3741541Srgrimes	else
3751541Srgrimes		ticks = 0x7fffffff;
3761541Srgrimes	splx(s);
3771541Srgrimes	return (ticks);
3781541Srgrimes}
3791541Srgrimes
3801541Srgrimes/*
3811541Srgrimes * Start profiling on a process.
3821541Srgrimes *
3831541Srgrimes * Kernel profiling passes proc0 which never exits and hence
3841541Srgrimes * keeps the profile clock running constantly.
3851541Srgrimes */
3861541Srgrimesvoid
3871541Srgrimesstartprofclock(p)
3881541Srgrimes	register struct proc *p;
3891541Srgrimes{
3901541Srgrimes	int s;
3911541Srgrimes
3921541Srgrimes	if ((p->p_flag & P_PROFIL) == 0) {
3931541Srgrimes		p->p_flag |= P_PROFIL;
3941541Srgrimes		if (++profprocs == 1 && stathz != 0) {
3951541Srgrimes			s = splstatclock();
3961541Srgrimes			psdiv = pscnt = psratio;
3971541Srgrimes			setstatclockrate(profhz);
3981541Srgrimes			splx(s);
3991541Srgrimes		}
4001541Srgrimes	}
4011541Srgrimes}
4021541Srgrimes
4031541Srgrimes/*
4041541Srgrimes * Stop profiling on a process.
4051541Srgrimes */
4061541Srgrimesvoid
4071541Srgrimesstopprofclock(p)
4081541Srgrimes	register struct proc *p;
4091541Srgrimes{
4101541Srgrimes	int s;
4111541Srgrimes
4121541Srgrimes	if (p->p_flag & P_PROFIL) {
4131541Srgrimes		p->p_flag &= ~P_PROFIL;
4141541Srgrimes		if (--profprocs == 0 && stathz != 0) {
4151541Srgrimes			s = splstatclock();
4161541Srgrimes			psdiv = pscnt = 1;
4171541Srgrimes			setstatclockrate(stathz);
4181541Srgrimes			splx(s);
4191541Srgrimes		}
4201541Srgrimes	}
4211541Srgrimes}
4221541Srgrimes
4231541Srgrimes/*
4241541Srgrimes * Statistics clock.  Grab profile sample, and if divider reaches 0,
4251541Srgrimes * do process and kernel statistics.
4261541Srgrimes */
4271541Srgrimesvoid
4281541Srgrimesstatclock(frame)
4291541Srgrimes	register struct clockframe *frame;
4301541Srgrimes{
4311541Srgrimes#ifdef GPROF
4321541Srgrimes	register struct gmonparam *g;
4331541Srgrimes#endif
4341541Srgrimes	register struct proc *p;
4351541Srgrimes	register int i;
4361541Srgrimes
4371541Srgrimes	if (CLKF_USERMODE(frame)) {
4381541Srgrimes		p = curproc;
4391541Srgrimes		if (p->p_flag & P_PROFIL)
4401541Srgrimes			addupc_intr(p, CLKF_PC(frame), 1);
4411541Srgrimes		if (--pscnt > 0)
4421541Srgrimes			return;
4431541Srgrimes		/*
4441541Srgrimes		 * Came from user mode; CPU was in user state.
4451541Srgrimes		 * If this process is being profiled record the tick.
4461541Srgrimes		 */
4471541Srgrimes		p->p_uticks++;
4481541Srgrimes		if (p->p_nice > NZERO)
4491541Srgrimes			cp_time[CP_NICE]++;
4501541Srgrimes		else
4511541Srgrimes			cp_time[CP_USER]++;
4521541Srgrimes	} else {
4531541Srgrimes#ifdef GPROF
4541541Srgrimes		/*
4551541Srgrimes		 * Kernel statistics are just like addupc_intr, only easier.
4561541Srgrimes		 */
4571541Srgrimes		g = &_gmonparam;
4581541Srgrimes		if (g->state == GMON_PROF_ON) {
4591541Srgrimes			i = CLKF_PC(frame) - g->lowpc;
4601541Srgrimes			if (i < g->textsize) {
4611541Srgrimes				i /= HISTFRACTION * sizeof(*g->kcount);
4621541Srgrimes				g->kcount[i]++;
4631541Srgrimes			}
4641541Srgrimes		}
4651541Srgrimes#endif
4661541Srgrimes		if (--pscnt > 0)
4671541Srgrimes			return;
4681541Srgrimes		/*
4691541Srgrimes		 * Came from kernel mode, so we were:
4701541Srgrimes		 * - handling an interrupt,
4711541Srgrimes		 * - doing syscall or trap work on behalf of the current
4721541Srgrimes		 *   user process, or
4731541Srgrimes		 * - spinning in the idle loop.
4741541Srgrimes		 * Whichever it is, charge the time as appropriate.
4751541Srgrimes		 * Note that we charge interrupts to the current process,
4761541Srgrimes		 * regardless of whether they are ``for'' that process,
4771541Srgrimes		 * so that we know how much of its real time was spent
4781541Srgrimes		 * in ``non-process'' (i.e., interrupt) work.
4791541Srgrimes		 */
4801541Srgrimes		p = curproc;
4811541Srgrimes		if (CLKF_INTR(frame)) {
4821541Srgrimes			if (p != NULL)
4831541Srgrimes				p->p_iticks++;
4841541Srgrimes			cp_time[CP_INTR]++;
4851541Srgrimes		} else if (p != NULL) {
4861541Srgrimes			p->p_sticks++;
4871541Srgrimes			cp_time[CP_SYS]++;
4881541Srgrimes		} else
4891541Srgrimes			cp_time[CP_IDLE]++;
4901541Srgrimes	}
4911541Srgrimes	pscnt = psdiv;
4921541Srgrimes
4931541Srgrimes	/*
4941541Srgrimes	 * We maintain statistics shown by user-level statistics
4951541Srgrimes	 * programs:  the amount of time in each cpu state, and
4961541Srgrimes	 * the amount of time each of DK_NDRIVE ``drives'' is busy.
4971541Srgrimes	 *
4981541Srgrimes	 * XXX	should either run linked list of drives, or (better)
4991541Srgrimes	 *	grab timestamps in the start & done code.
5001541Srgrimes	 */
5011541Srgrimes	for (i = 0; i < DK_NDRIVE; i++)
5021541Srgrimes		if (dk_busy & (1 << i))
5031541Srgrimes			dk_time[i]++;
5041541Srgrimes
5051541Srgrimes	/*
5061541Srgrimes	 * We adjust the priority of the current process.  The priority of
5071541Srgrimes	 * a process gets worse as it accumulates CPU time.  The cpu usage
5081541Srgrimes	 * estimator (p_estcpu) is increased here.  The formula for computing
5091541Srgrimes	 * priorities (in kern_synch.c) will compute a different value each
5101541Srgrimes	 * time p_estcpu increases by 4.  The cpu usage estimator ramps up
5111541Srgrimes	 * quite quickly when the process is running (linearly), and decays
5121541Srgrimes	 * away exponentially, at a rate which is proportionally slower when
5131541Srgrimes	 * the system is busy.  The basic principal is that the system will
5141541Srgrimes	 * 90% forget that the process used a lot of CPU time in 5 * loadav
5151541Srgrimes	 * seconds.  This causes the system to favor processes which haven't
5161541Srgrimes	 * run much recently, and to round-robin among other processes.
5171541Srgrimes	 */
5181541Srgrimes	if (p != NULL) {
5191541Srgrimes		p->p_cpticks++;
5201541Srgrimes		if (++p->p_estcpu == 0)
5211541Srgrimes			p->p_estcpu--;
5221541Srgrimes		if ((p->p_estcpu & 3) == 0) {
5231541Srgrimes			resetpriority(p);
5241541Srgrimes			if (p->p_priority >= PUSER)
5251541Srgrimes				p->p_priority = p->p_usrpri;
5261541Srgrimes		}
5271541Srgrimes	}
5281541Srgrimes}
5291541Srgrimes
5301541Srgrimes/*
5311541Srgrimes * Return information about system clocks.
5321541Srgrimes */
5331549Srgrimesint
5341541Srgrimessysctl_clockrate(where, sizep)
5351541Srgrimes	register char *where;
5361541Srgrimes	size_t *sizep;
5371541Srgrimes{
5381541Srgrimes	struct clockinfo clkinfo;
5391541Srgrimes
5401541Srgrimes	/*
5411541Srgrimes	 * Construct clockinfo structure.
5421541Srgrimes	 */
5431541Srgrimes	clkinfo.hz = hz;
5441541Srgrimes	clkinfo.tick = tick;
5451541Srgrimes	clkinfo.profhz = profhz;
5461541Srgrimes	clkinfo.stathz = stathz ? stathz : hz;
5471541Srgrimes	return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
5481541Srgrimes}
549