kern_timeout.c revision 12243
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39 * $Id: kern_clock.c,v 1.18 1995/11/08 08:45:58 phk Exp $
40 */
41
42/* Portions of this software are covered by the following: */
43/******************************************************************************
44 *                                                                            *
45 * Copyright (c) David L. Mills 1993, 1994                                    *
46 *                                                                            *
47 * Permission to use, copy, modify, and distribute this software and its      *
48 * documentation for any purpose and without fee is hereby granted, provided  *
49 * that the above copyright notice appears in all copies and that both the    *
50 * copyright notice and this permission notice appear in supporting           *
51 * documentation, and that the name University of Delaware not be used in     *
52 * advertising or publicity pertaining to distribution of the software        *
53 * without specific, written prior permission.  The University of Delaware    *
54 * makes no representations about the suitability this software for any       *
55 * purpose.  It is provided "as is" without express or implied warranty.      *
56 *                                                                            *
57 *****************************************************************************/
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/dkstat.h>
62#include <sys/callout.h>
63#include <sys/kernel.h>
64#include <sys/proc.h>
65#include <sys/resourcevar.h>
66#include <sys/signalvar.h>
67#include <sys/timex.h>
68#include <vm/vm.h>
69#include <sys/sysctl.h>
70
71#include <machine/cpu.h>
72#include <machine/clock.h>
73
74#ifdef GPROF
75#include <sys/gmon.h>
76#endif
77
78/*
79 * System initialization
80 */
81
82static void initclocks __P((void *udata));
83SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
84
85
86/* Does anybody else really care about these? */
87struct callout *callfree, *callout, calltodo;
88
89/* Some of these don't belong here, but it's easiest to concentrate them. */
90long cp_time[CPUSTATES];
91long dk_seek[DK_NDRIVE];
92long dk_time[DK_NDRIVE];
93long dk_wds[DK_NDRIVE];
94long dk_wpms[DK_NDRIVE];
95long dk_xfer[DK_NDRIVE];
96
97int dk_busy;
98int dk_ndrive = 0;
99char dk_names[DK_NDRIVE][DK_NAMELEN];
100
101long tk_cancc;
102long tk_nin;
103long tk_nout;
104long tk_rawcc;
105
106/*
107 * Clock handling routines.
108 *
109 * This code is written to operate with two timers that run independently of
110 * each other.  The main clock, running hz times per second, is used to keep
111 * track of real time.  The second timer handles kernel and user profiling,
112 * and does resource use estimation.  If the second timer is programmable,
113 * it is randomized to avoid aliasing between the two clocks.  For example,
114 * the randomization prevents an adversary from always giving up the cpu
115 * just before its quantum expires.  Otherwise, it would never accumulate
116 * cpu ticks.  The mean frequency of the second timer is stathz.
117 *
118 * If no second timer exists, stathz will be zero; in this case we drive
119 * profiling and statistics off the main clock.  This WILL NOT be accurate;
120 * do not do it unless absolutely necessary.
121 *
122 * The statistics clock may (or may not) be run at a higher rate while
123 * profiling.  This profile clock runs at profhz.  We require that profhz
124 * be an integral multiple of stathz.
125 *
126 * If the statistics clock is running fast, it must be divided by the ratio
127 * profhz/stathz for statistics.  (For profiling, every tick counts.)
128 */
129
130/*
131 * TODO:
132 *	allocate more timeout table slots when table overflows.
133 */
134
135/*
136 * Bump a timeval by a small number of usec's.
137 */
138#define BUMPTIME(t, usec) { \
139	register volatile struct timeval *tp = (t); \
140	register long us; \
141 \
142	tp->tv_usec = us = tp->tv_usec + (usec); \
143	if (us >= 1000000) { \
144		tp->tv_usec = us - 1000000; \
145		tp->tv_sec++; \
146	} \
147}
148
149int	stathz;
150int	profhz;
151int	profprocs;
152int	ticks;
153static int psdiv, pscnt;	/* prof => stat divider */
154int	psratio;		/* ratio: prof / stat */
155
156volatile struct	timeval time;
157volatile struct	timeval mono_time;
158
159/*
160 * Phase-lock loop (PLL) definitions
161 *
162 * The following variables are read and set by the ntp_adjtime() system
163 * call.
164 *
165 * time_state shows the state of the system clock, with values defined
166 * in the timex.h header file.
167 *
168 * time_status shows the status of the system clock, with bits defined
169 * in the timex.h header file.
170 *
171 * time_offset is used by the PLL to adjust the system time in small
172 * increments.
173 *
174 * time_constant determines the bandwidth or "stiffness" of the PLL.
175 *
176 * time_tolerance determines maximum frequency error or tolerance of the
177 * CPU clock oscillator and is a property of the architecture; however,
178 * in principle it could change as result of the presence of external
179 * discipline signals, for instance.
180 *
181 * time_precision is usually equal to the kernel tick variable; however,
182 * in cases where a precision clock counter or external clock is
183 * available, the resolution can be much less than this and depend on
184 * whether the external clock is working or not.
185 *
186 * time_maxerror is initialized by a ntp_adjtime() call and increased by
187 * the kernel once each second to reflect the maximum error
188 * bound growth.
189 *
190 * time_esterror is set and read by the ntp_adjtime() call, but
191 * otherwise not used by the kernel.
192 */
193int time_status = STA_UNSYNC;	/* clock status bits */
194int time_state = TIME_OK;	/* clock state */
195long time_offset = 0;		/* time offset (us) */
196long time_constant = 0;		/* pll time constant */
197long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
198long time_precision = 1;	/* clock precision (us) */
199long time_maxerror = MAXPHASE;	/* maximum error (us) */
200long time_esterror = MAXPHASE;	/* estimated error (us) */
201
202/*
203 * The following variables establish the state of the PLL and the
204 * residual time and frequency offset of the local clock. The scale
205 * factors are defined in the timex.h header file.
206 *
207 * time_phase and time_freq are the phase increment and the frequency
208 * increment, respectively, of the kernel time variable at each tick of
209 * the clock.
210 *
211 * time_freq is set via ntp_adjtime() from a value stored in a file when
212 * the synchronization daemon is first started. Its value is retrieved
213 * via ntp_adjtime() and written to the file about once per hour by the
214 * daemon.
215 *
216 * time_adj is the adjustment added to the value of tick at each timer
217 * interrupt and is recomputed at each timer interrupt.
218 *
219 * time_reftime is the second's portion of the system time on the last
220 * call to ntp_adjtime(). It is used to adjust the time_freq variable
221 * and to increase the time_maxerror as the time since last update
222 * increases.
223 */
224long time_phase = 0;		/* phase offset (scaled us) */
225long time_freq = 0;		/* frequency offset (scaled ppm) */
226long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
227long time_reftime = 0;		/* time at last adjustment (s) */
228
229#ifdef PPS_SYNC
230/*
231 * The following variables are used only if the if the kernel PPS
232 * discipline code is configured (PPS_SYNC). The scale factors are
233 * defined in the timex.h header file.
234 *
235 * pps_time contains the time at each calibration interval, as read by
236 * microtime().
237 *
238 * pps_offset is the time offset produced by the time median filter
239 * pps_tf[], while pps_jitter is the dispersion measured by this
240 * filter.
241 *
242 * pps_freq is the frequency offset produced by the frequency median
243 * filter pps_ff[], while pps_stabil is the dispersion measured by
244 * this filter.
245 *
246 * pps_usec is latched from a high resolution counter or external clock
247 * at pps_time. Here we want the hardware counter contents only, not the
248 * contents plus the time_tv.usec as usual.
249 *
250 * pps_valid counts the number of seconds since the last PPS update. It
251 * is used as a watchdog timer to disable the PPS discipline should the
252 * PPS signal be lost.
253 *
254 * pps_glitch counts the number of seconds since the beginning of an
255 * offset burst more than tick/2 from current nominal offset. It is used
256 * mainly to suppress error bursts due to priority conflicts between the
257 * PPS interrupt and timer interrupt.
258 *
259 * pps_count counts the seconds of the calibration interval, the
260 * duration of which is pps_shift in powers of two.
261 *
262 * pps_intcnt counts the calibration intervals for use in the interval-
263 * adaptation algorithm. It's just too complicated for words.
264 */
265struct timeval pps_time;	/* kernel time at last interval */
266long pps_offset = 0;		/* pps time offset (us) */
267long pps_jitter = MAXTIME;	/* pps time dispersion (jitter) (us) */
268long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
269long pps_freq = 0;		/* frequency offset (scaled ppm) */
270long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
271long pps_ff[] = {0, 0, 0};	/* frequency offset median filter */
272long pps_usec = 0;		/* microsec counter at last interval */
273long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
274int pps_glitch = 0;		/* pps signal glitch counter */
275int pps_count = 0;		/* calibration interval counter (s) */
276int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
277int pps_intcnt = 0;		/* intervals at current duration */
278
279/*
280 * PPS signal quality monitors
281 *
282 * pps_jitcnt counts the seconds that have been discarded because the
283 * jitter measured by the time median filter exceeds the limit MAXTIME
284 * (100 us).
285 *
286 * pps_calcnt counts the frequency calibration intervals, which are
287 * variable from 4 s to 256 s.
288 *
289 * pps_errcnt counts the calibration intervals which have been discarded
290 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
291 * calibration interval jitter exceeds two ticks.
292 *
293 * pps_stbcnt counts the calibration intervals that have been discarded
294 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
295 */
296long pps_jitcnt = 0;		/* jitter limit exceeded */
297long pps_calcnt = 0;		/* calibration intervals */
298long pps_errcnt = 0;		/* calibration errors */
299long pps_stbcnt = 0;		/* stability limit exceeded */
300#endif /* PPS_SYNC */
301
302/* XXX none of this stuff works under FreeBSD */
303#ifdef EXT_CLOCK
304/*
305 * External clock definitions
306 *
307 * The following definitions and declarations are used only if an
308 * external clock (HIGHBALL or TPRO) is configured on the system.
309 */
310#define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
311
312/*
313 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
314 * interrupt and decremented once each second.
315 */
316int clock_count = 0;		/* CPU clock counter */
317
318#ifdef HIGHBALL
319/*
320 * The clock_offset and clock_cpu variables are used by the HIGHBALL
321 * interface. The clock_offset variable defines the offset between
322 * system time and the HIGBALL counters. The clock_cpu variable contains
323 * the offset between the system clock and the HIGHBALL clock for use in
324 * disciplining the kernel time variable.
325 */
326extern struct timeval clock_offset; /* Highball clock offset */
327long clock_cpu = 0;		/* CPU clock adjust */
328#endif /* HIGHBALL */
329#endif /* EXT_CLOCK */
330
331/*
332 * hardupdate() - local clock update
333 *
334 * This routine is called by ntp_adjtime() to update the local clock
335 * phase and frequency. This is used to implement an adaptive-parameter,
336 * first-order, type-II phase-lock loop. The code computes new time and
337 * frequency offsets each time it is called. The hardclock() routine
338 * amortizes these offsets at each tick interrupt. If the kernel PPS
339 * discipline code is configured (PPS_SYNC), the PPS signal itself
340 * determines the new time offset, instead of the calling argument.
341 * Presumably, calls to ntp_adjtime() occur only when the caller
342 * believes the local clock is valid within some bound (+-128 ms with
343 * NTP). If the caller's time is far different than the PPS time, an
344 * argument will ensue, and it's not clear who will lose.
345 *
346 * For default SHIFT_UPDATE = 12, the offset is limited to +-512 ms, the
347 * maximum interval between updates is 4096 s and the maximum frequency
348 * offset is +-31.25 ms/s.
349 *
350 * Note: splclock() is in effect.
351 */
352void
353hardupdate(offset)
354	long offset;
355{
356	long ltemp, mtemp;
357
358	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
359		return;
360	ltemp = offset;
361#ifdef PPS_SYNC
362	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
363		ltemp = pps_offset;
364#endif /* PPS_SYNC */
365	if (ltemp > MAXPHASE)
366		time_offset = MAXPHASE << SHIFT_UPDATE;
367	else if (ltemp < -MAXPHASE)
368		time_offset = -(MAXPHASE << SHIFT_UPDATE);
369	else
370		time_offset = ltemp << SHIFT_UPDATE;
371	mtemp = time.tv_sec - time_reftime;
372	time_reftime = time.tv_sec;
373	if (mtemp > MAXSEC)
374		mtemp = 0;
375
376	/* ugly multiply should be replaced */
377	if (ltemp < 0)
378		time_freq -= (-ltemp * mtemp) >> (time_constant +
379		    time_constant + SHIFT_KF - SHIFT_USEC);
380	else
381		time_freq += (ltemp * mtemp) >> (time_constant +
382		    time_constant + SHIFT_KF - SHIFT_USEC);
383	if (time_freq > time_tolerance)
384		time_freq = time_tolerance;
385	else if (time_freq < -time_tolerance)
386		time_freq = -time_tolerance;
387}
388
389
390
391/*
392 * Initialize clock frequencies and start both clocks running.
393 */
394/* ARGSUSED*/
395static void
396initclocks(udata)
397	void *udata;		/* not used*/
398{
399	register int i;
400
401	/*
402	 * Set divisors to 1 (normal case) and let the machine-specific
403	 * code do its bit.
404	 */
405	psdiv = pscnt = 1;
406	cpu_initclocks();
407
408	/*
409	 * Compute profhz/stathz, and fix profhz if needed.
410	 */
411	i = stathz ? stathz : hz;
412	if (profhz == 0)
413		profhz = i;
414	psratio = profhz / i;
415}
416
417/*
418 * The real-time timer, interrupting hz times per second.
419 */
420void
421hardclock(frame)
422	register struct clockframe *frame;
423{
424	register struct callout *p1;
425	register struct proc *p;
426	register int needsoft;
427
428	/*
429	 * Update real-time timeout queue.
430	 * At front of queue are some number of events which are ``due''.
431	 * The time to these is <= 0 and if negative represents the
432	 * number of ticks which have passed since it was supposed to happen.
433	 * The rest of the q elements (times > 0) are events yet to happen,
434	 * where the time for each is given as a delta from the previous.
435	 * Decrementing just the first of these serves to decrement the time
436	 * to all events.
437	 */
438	needsoft = 0;
439	for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
440		if (--p1->c_time > 0)
441			break;
442		needsoft = 1;
443		if (p1->c_time == 0)
444			break;
445	}
446
447	p = curproc;
448	if (p) {
449		register struct pstats *pstats;
450
451		/*
452		 * Run current process's virtual and profile time, as needed.
453		 */
454		pstats = p->p_stats;
455		if (CLKF_USERMODE(frame) &&
456		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
457		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
458			psignal(p, SIGVTALRM);
459		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
460		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
461			psignal(p, SIGPROF);
462	}
463
464	/*
465	 * If no separate statistics clock is available, run it from here.
466	 */
467	if (stathz == 0)
468		statclock(frame);
469
470	/*
471	 * Increment the time-of-day.
472	 */
473	ticks++;
474	{
475		int time_update;
476		struct timeval newtime = time;
477		long ltemp;
478
479		if (timedelta == 0) {
480			time_update = CPU_THISTICKLEN(tick);
481		} else {
482			time_update = CPU_THISTICKLEN(tick) + tickdelta;
483			timedelta -= tickdelta;
484		}
485		BUMPTIME(&mono_time, time_update);
486
487		/*
488		 * Compute the phase adjustment. If the low-order bits
489		 * (time_phase) of the update overflow, bump the high-order bits
490		 * (time_update).
491		 */
492		time_phase += time_adj;
493		if (time_phase <= -FINEUSEC) {
494		  ltemp = -time_phase >> SHIFT_SCALE;
495		  time_phase += ltemp << SHIFT_SCALE;
496		  time_update -= ltemp;
497		}
498		else if (time_phase >= FINEUSEC) {
499		  ltemp = time_phase >> SHIFT_SCALE;
500		  time_phase -= ltemp << SHIFT_SCALE;
501		  time_update += ltemp;
502		}
503
504		newtime.tv_usec += time_update;
505		/*
506		 * On rollover of the second the phase adjustment to be used for
507		 * the next second is calculated. Also, the maximum error is
508		 * increased by the tolerance. If the PPS frequency discipline
509		 * code is present, the phase is increased to compensate for the
510		 * CPU clock oscillator frequency error.
511		 *
512		 * With SHIFT_SCALE = 23, the maximum frequency adjustment is
513		 * +-256 us per tick, or 25.6 ms/s at a clock frequency of 100
514		 * Hz. The time contribution is shifted right a minimum of two
515		 * bits, while the frequency contribution is a right shift.
516		 * Thus, overflow is prevented if the frequency contribution is
517		 * limited to half the maximum or 15.625 ms/s.
518		 */
519		if (newtime.tv_usec >= 1000000) {
520		  newtime.tv_usec -= 1000000;
521		  newtime.tv_sec++;
522		  time_maxerror += time_tolerance >> SHIFT_USEC;
523		  if (time_offset < 0) {
524		    ltemp = -time_offset >>
525		      (SHIFT_KG + time_constant);
526		    time_offset += ltemp;
527		    time_adj = -ltemp <<
528		      (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
529		  } else {
530		    ltemp = time_offset >>
531		      (SHIFT_KG + time_constant);
532		    time_offset -= ltemp;
533		    time_adj = ltemp <<
534		      (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
535		  }
536#ifdef PPS_SYNC
537		  /*
538		   * Gnaw on the watchdog counter and update the frequency
539		   * computed by the pll and the PPS signal.
540		   */
541		  pps_valid++;
542		  if (pps_valid == PPS_VALID) {
543		    pps_jitter = MAXTIME;
544		    pps_stabil = MAXFREQ;
545		    time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
546				     STA_PPSWANDER | STA_PPSERROR);
547		  }
548		  ltemp = time_freq + pps_freq;
549#else
550		  ltemp = time_freq;
551#endif /* PPS_SYNC */
552		  if (ltemp < 0)
553		    time_adj -= -ltemp >>
554		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
555		  else
556		    time_adj += ltemp >>
557		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
558
559		  /*
560		   * When the CPU clock oscillator frequency is not a
561		   * power of two in Hz, the SHIFT_HZ is only an
562		   * approximate scale factor. In the SunOS kernel, this
563		   * results in a PLL gain factor of 1/1.28 = 0.78 what it
564		   * should be. In the following code the overall gain is
565		   * increased by a factor of 1.25, which results in a
566		   * residual error less than 3 percent.
567		   */
568		  /* Same thing applies for FreeBSD --GAW */
569		  if (hz == 100) {
570		    if (time_adj < 0)
571		      time_adj -= -time_adj >> 2;
572		    else
573		      time_adj += time_adj >> 2;
574		  }
575
576		  /* XXX - this is really bogus, but can't be fixed until
577		     xntpd's idea of the system clock is fixed to know how
578		     the user wants leap seconds handled; in the mean time,
579		     we assume that users of NTP are running without proper
580		     leap second support (this is now the default anyway) */
581		  /*
582		   * Leap second processing. If in leap-insert state at
583		   * the end of the day, the system clock is set back one
584		   * second; if in leap-delete state, the system clock is
585		   * set ahead one second. The microtime() routine or
586		   * external clock driver will insure that reported time
587		   * is always monotonic. The ugly divides should be
588		   * replaced.
589		   */
590		  switch (time_state) {
591
592		  case TIME_OK:
593		    if (time_status & STA_INS)
594		      time_state = TIME_INS;
595		    else if (time_status & STA_DEL)
596		      time_state = TIME_DEL;
597		    break;
598
599		  case TIME_INS:
600		    if (newtime.tv_sec % 86400 == 0) {
601		      newtime.tv_sec--;
602		      time_state = TIME_OOP;
603		    }
604		    break;
605
606		  case TIME_DEL:
607		    if ((newtime.tv_sec + 1) % 86400 == 0) {
608		      newtime.tv_sec++;
609		      time_state = TIME_WAIT;
610		    }
611		    break;
612
613		  case TIME_OOP:
614		    time_state = TIME_WAIT;
615		    break;
616
617		  case TIME_WAIT:
618		    if (!(time_status & (STA_INS | STA_DEL)))
619		      time_state = TIME_OK;
620		  }
621		}
622		CPU_CLOCKUPDATE(&time, &newtime);
623	}
624
625	/*
626	 * Process callouts at a very low cpu priority, so we don't keep the
627	 * relatively high clock interrupt priority any longer than necessary.
628	 */
629	if (needsoft) {
630		if (CLKF_BASEPRI(frame)) {
631			/*
632			 * Save the overhead of a software interrupt;
633			 * it will happen as soon as we return, so do it now.
634			 */
635			(void)splsoftclock();
636			softclock();
637		} else
638			setsoftclock();
639	}
640}
641
642/*
643 * Software (low priority) clock interrupt.
644 * Run periodic events from timeout queue.
645 */
646/*ARGSUSED*/
647void
648softclock()
649{
650	register struct callout *c;
651	register void *arg;
652	register void (*func) __P((void *));
653	register int s;
654
655	s = splhigh();
656	while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
657		func = c->c_func;
658		arg = c->c_arg;
659		calltodo.c_next = c->c_next;
660		c->c_next = callfree;
661		callfree = c;
662		splx(s);
663		(*func)(arg);
664		(void) splhigh();
665	}
666	splx(s);
667}
668
669/*
670 * timeout --
671 *	Execute a function after a specified length of time.
672 *
673 * untimeout --
674 *	Cancel previous timeout function call.
675 *
676 *	See AT&T BCI Driver Reference Manual for specification.  This
677 *	implementation differs from that one in that no identification
678 *	value is returned from timeout, rather, the original arguments
679 *	to timeout are used to identify entries for untimeout.
680 */
681void
682timeout(ftn, arg, ticks)
683	timeout_t ftn;
684	void *arg;
685	register int ticks;
686{
687	register struct callout *new, *p, *t;
688	register int s;
689
690	if (ticks <= 0)
691		ticks = 1;
692
693	/* Lock out the clock. */
694	s = splhigh();
695
696	/* Fill in the next free callout structure. */
697	if (callfree == NULL)
698		panic("timeout table full");
699	new = callfree;
700	callfree = new->c_next;
701	new->c_arg = arg;
702	new->c_func = ftn;
703
704	/*
705	 * The time for each event is stored as a difference from the time
706	 * of the previous event on the queue.  Walk the queue, correcting
707	 * the ticks argument for queue entries passed.  Correct the ticks
708	 * value for the queue entry immediately after the insertion point
709	 * as well.  Watch out for negative c_time values; these represent
710	 * overdue events.
711	 */
712	for (p = &calltodo;
713	    (t = p->c_next) != NULL && ticks > t->c_time; p = t)
714		if (t->c_time > 0)
715			ticks -= t->c_time;
716	new->c_time = ticks;
717	if (t != NULL)
718		t->c_time -= ticks;
719
720	/* Insert the new entry into the queue. */
721	p->c_next = new;
722	new->c_next = t;
723	splx(s);
724}
725
726void
727untimeout(ftn, arg)
728	timeout_t ftn;
729	void *arg;
730{
731	register struct callout *p, *t;
732	register int s;
733
734	s = splhigh();
735	for (p = &calltodo; (t = p->c_next) != NULL; p = t)
736		if (t->c_func == ftn && t->c_arg == arg) {
737			/* Increment next entry's tick count. */
738			if (t->c_next && t->c_time > 0)
739				t->c_next->c_time += t->c_time;
740
741			/* Move entry from callout queue to callfree queue. */
742			p->c_next = t->c_next;
743			t->c_next = callfree;
744			callfree = t;
745			break;
746		}
747	splx(s);
748}
749
750/*
751 * Compute number of hz until specified time.  Used to
752 * compute third argument to timeout() from an absolute time.
753 */
754int
755hzto(tv)
756	struct timeval *tv;
757{
758	register unsigned long ticks;
759	register long sec, usec;
760	int s;
761
762	/*
763	 * If the number of usecs in the whole seconds part of the time
764	 * difference fits in a long, then the total number of usecs will
765	 * fit in an unsigned long.  Compute the total and convert it to
766	 * ticks, rounding up and adding 1 to allow for the current tick
767	 * to expire.  Rounding also depends on unsigned long arithmetic
768	 * to avoid overflow.
769	 *
770	 * Otherwise, if the number of ticks in the whole seconds part of
771	 * the time difference fits in a long, then convert the parts to
772	 * ticks separately and add, using similar rounding methods and
773	 * overflow avoidance.  This method would work in the previous
774	 * case but it is slightly slower and assumes that hz is integral.
775	 *
776	 * Otherwise, round the time difference down to the maximum
777	 * representable value.
778	 *
779	 * If ints have 32 bits, then the maximum value for any timeout in
780	 * 10ms ticks is 248 days.
781	 */
782	s = splclock();
783	sec = tv->tv_sec - time.tv_sec;
784	usec = tv->tv_usec - time.tv_usec;
785	splx(s);
786	if (usec < 0) {
787		sec--;
788		usec += 1000000;
789	}
790	if (sec < 0) {
791#ifdef DIAGNOSTIC
792		printf("hzto: negative time difference %ld sec %ld usec\n",
793		       sec, usec);
794#endif
795		ticks = 1;
796	} else if (sec <= LONG_MAX / 1000000)
797		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
798			/ tick + 1;
799	else if (sec <= LONG_MAX / hz)
800		ticks = sec * hz
801			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
802	else
803		ticks = LONG_MAX;
804	if (ticks > INT_MAX)
805		ticks = INT_MAX;
806	return (ticks);
807}
808
809/*
810 * Start profiling on a process.
811 *
812 * Kernel profiling passes proc0 which never exits and hence
813 * keeps the profile clock running constantly.
814 */
815void
816startprofclock(p)
817	register struct proc *p;
818{
819	int s;
820
821	if ((p->p_flag & P_PROFIL) == 0) {
822		p->p_flag |= P_PROFIL;
823		if (++profprocs == 1 && stathz != 0) {
824			s = splstatclock();
825			psdiv = pscnt = psratio;
826			setstatclockrate(profhz);
827			splx(s);
828		}
829	}
830}
831
832/*
833 * Stop profiling on a process.
834 */
835void
836stopprofclock(p)
837	register struct proc *p;
838{
839	int s;
840
841	if (p->p_flag & P_PROFIL) {
842		p->p_flag &= ~P_PROFIL;
843		if (--profprocs == 0 && stathz != 0) {
844			s = splstatclock();
845			psdiv = pscnt = 1;
846			setstatclockrate(stathz);
847			splx(s);
848		}
849	}
850}
851
852/*
853 * Statistics clock.  Grab profile sample, and if divider reaches 0,
854 * do process and kernel statistics.
855 */
856void
857statclock(frame)
858	register struct clockframe *frame;
859{
860#ifdef GPROF
861	register struct gmonparam *g;
862#endif
863	register struct proc *p = curproc;
864	register int i;
865
866	if (p) {
867		struct pstats *pstats;
868		struct rusage *ru;
869		struct vmspace *vm;
870
871		/* bump the resource usage of integral space use */
872		if ((pstats = p->p_stats) && (ru = &pstats->p_ru) && (vm = p->p_vmspace)) {
873			ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024;
874			ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024;
875			ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024;
876			if ((vm->vm_pmap.pm_stats.resident_count * PAGE_SIZE / 1024) >
877			    ru->ru_maxrss) {
878				ru->ru_maxrss =
879				    vm->vm_pmap.pm_stats.resident_count * PAGE_SIZE / 1024;
880			}
881        	}
882	}
883
884	if (CLKF_USERMODE(frame)) {
885		if (p->p_flag & P_PROFIL)
886			addupc_intr(p, CLKF_PC(frame), 1);
887		if (--pscnt > 0)
888			return;
889		/*
890		 * Came from user mode; CPU was in user state.
891		 * If this process is being profiled record the tick.
892		 */
893		p->p_uticks++;
894		if (p->p_nice > NZERO)
895			cp_time[CP_NICE]++;
896		else
897			cp_time[CP_USER]++;
898	} else {
899#ifdef GPROF
900		/*
901		 * Kernel statistics are just like addupc_intr, only easier.
902		 */
903		g = &_gmonparam;
904		if (g->state == GMON_PROF_ON) {
905			i = CLKF_PC(frame) - g->lowpc;
906			if (i < g->textsize) {
907				i /= HISTFRACTION * sizeof(*g->kcount);
908				g->kcount[i]++;
909			}
910		}
911#endif
912		if (--pscnt > 0)
913			return;
914		/*
915		 * Came from kernel mode, so we were:
916		 * - handling an interrupt,
917		 * - doing syscall or trap work on behalf of the current
918		 *   user process, or
919		 * - spinning in the idle loop.
920		 * Whichever it is, charge the time as appropriate.
921		 * Note that we charge interrupts to the current process,
922		 * regardless of whether they are ``for'' that process,
923		 * so that we know how much of its real time was spent
924		 * in ``non-process'' (i.e., interrupt) work.
925		 */
926		if (CLKF_INTR(frame)) {
927			if (p != NULL)
928				p->p_iticks++;
929			cp_time[CP_INTR]++;
930		} else if (p != NULL) {
931			p->p_sticks++;
932			cp_time[CP_SYS]++;
933		} else
934			cp_time[CP_IDLE]++;
935	}
936	pscnt = psdiv;
937
938	/*
939	 * We maintain statistics shown by user-level statistics
940	 * programs:  the amount of time in each cpu state, and
941	 * the amount of time each of DK_NDRIVE ``drives'' is busy.
942	 *
943	 * XXX	should either run linked list of drives, or (better)
944	 *	grab timestamps in the start & done code.
945	 */
946	for (i = 0; i < DK_NDRIVE; i++)
947		if (dk_busy & (1 << i))
948			dk_time[i]++;
949
950	/*
951	 * We adjust the priority of the current process.  The priority of
952	 * a process gets worse as it accumulates CPU time.  The cpu usage
953	 * estimator (p_estcpu) is increased here.  The formula for computing
954	 * priorities (in kern_synch.c) will compute a different value each
955	 * time p_estcpu increases by 4.  The cpu usage estimator ramps up
956	 * quite quickly when the process is running (linearly), and decays
957	 * away exponentially, at a rate which is proportionally slower when
958	 * the system is busy.  The basic principal is that the system will
959	 * 90% forget that the process used a lot of CPU time in 5 * loadav
960	 * seconds.  This causes the system to favor processes which haven't
961	 * run much recently, and to round-robin among other processes.
962	 */
963	if (p != NULL) {
964		p->p_cpticks++;
965		if (++p->p_estcpu == 0)
966			p->p_estcpu--;
967		if ((p->p_estcpu & 3) == 0) {
968			resetpriority(p);
969			if (p->p_priority >= PUSER)
970				p->p_priority = p->p_usrpri;
971		}
972	}
973}
974
975/*
976 * Return information about system clocks.
977 */
978static int
979sysctl_kern_clockrate SYSCTL_HANDLER_ARGS
980{
981	struct clockinfo clkinfo;
982	/*
983	 * Construct clockinfo structure.
984	 */
985	clkinfo.hz = hz;
986	clkinfo.tick = tick;
987	clkinfo.profhz = profhz;
988	clkinfo.stathz = stathz ? stathz : hz;
989	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
990}
991
992SYSCTL_OID(_kern, KERN_CLOCKRATE, clockrate,
993	CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, sysctl_kern_clockrate, "");
994
995/*#ifdef PPS_SYNC*/
996#if 0
997/* This code is completely bogus; if anybody ever wants to use it, get
998 * the current version from Dave Mills. */
999
1000/*
1001 * hardpps() - discipline CPU clock oscillator to external pps signal
1002 *
1003 * This routine is called at each PPS interrupt in order to discipline
1004 * the CPU clock oscillator to the PPS signal. It integrates successive
1005 * phase differences between the two oscillators and calculates the
1006 * frequency offset. This is used in hardclock() to discipline the CPU
1007 * clock oscillator so that intrinsic frequency error is cancelled out.
1008 * The code requires the caller to capture the time and hardware
1009 * counter value at the designated PPS signal transition.
1010 */
1011void
1012hardpps(tvp, usec)
1013	struct timeval *tvp;		/* time at PPS */
1014	long usec;			/* hardware counter at PPS */
1015{
1016	long u_usec, v_usec, bigtick;
1017	long cal_sec, cal_usec;
1018
1019	/*
1020	 * During the calibration interval adjust the starting time when
1021	 * the tick overflows. At the end of the interval compute the
1022	 * duration of the interval and the difference of the hardware
1023	 * counters at the beginning and end of the interval. This code
1024	 * is deliciously complicated by the fact valid differences may
1025	 * exceed the value of tick when using long calibration
1026	 * intervals and small ticks. Note that the counter can be
1027	 * greater than tick if caught at just the wrong instant, but
1028	 * the values returned and used here are correct.
1029	 */
1030	bigtick = (long)tick << SHIFT_USEC;
1031	pps_usec -= ntp_pll.ybar;
1032	if (pps_usec >= bigtick)
1033		pps_usec -= bigtick;
1034	if (pps_usec < 0)
1035		pps_usec += bigtick;
1036	pps_time.tv_sec++;
1037	pps_count++;
1038	if (pps_count < (1 << pps_shift))
1039		return;
1040	pps_count = 0;
1041	ntp_pll.calcnt++;
1042	u_usec = usec << SHIFT_USEC;
1043	v_usec = pps_usec - u_usec;
1044	if (v_usec >= bigtick >> 1)
1045		v_usec -= bigtick;
1046	if (v_usec < -(bigtick >> 1))
1047		v_usec += bigtick;
1048	if (v_usec < 0)
1049		v_usec = -(-v_usec >> ntp_pll.shift);
1050	else
1051		v_usec = v_usec >> ntp_pll.shift;
1052	pps_usec = u_usec;
1053	cal_sec = tvp->tv_sec;
1054	cal_usec = tvp->tv_usec;
1055	cal_sec -= pps_time.tv_sec;
1056	cal_usec -= pps_time.tv_usec;
1057	if (cal_usec < 0) {
1058		cal_usec += 1000000;
1059		cal_sec--;
1060	}
1061	pps_time = *tvp;
1062
1063	/*
1064	 * Check for lost interrupts, noise, excessive jitter and
1065	 * excessive frequency error. The number of timer ticks during
1066	 * the interval may vary +-1 tick. Add to this a margin of one
1067	 * tick for the PPS signal jitter and maximum frequency
1068	 * deviation. If the limits are exceeded, the calibration
1069	 * interval is reset to the minimum and we start over.
1070	 */
1071	u_usec = (long)tick << 1;
1072	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1073	    || (cal_sec == 0 && cal_usec < u_usec))
1074	    || v_usec > ntp_pll.tolerance || v_usec < -ntp_pll.tolerance) {
1075		ntp_pll.jitcnt++;
1076		ntp_pll.shift = NTP_PLL.SHIFT;
1077		pps_dispinc = PPS_DISPINC;
1078		ntp_pll.intcnt = 0;
1079		return;
1080	}
1081
1082	/*
1083	 * A three-stage median filter is used to help deglitch the pps
1084	 * signal. The median sample becomes the offset estimate; the
1085	 * difference between the other two samples becomes the
1086	 * dispersion estimate.
1087	 */
1088	pps_mf[2] = pps_mf[1];
1089	pps_mf[1] = pps_mf[0];
1090	pps_mf[0] = v_usec;
1091	if (pps_mf[0] > pps_mf[1]) {
1092		if (pps_mf[1] > pps_mf[2]) {
1093			u_usec = pps_mf[1];		/* 0 1 2 */
1094			v_usec = pps_mf[0] - pps_mf[2];
1095		} else if (pps_mf[2] > pps_mf[0]) {
1096			u_usec = pps_mf[0];		/* 2 0 1 */
1097			v_usec = pps_mf[2] - pps_mf[1];
1098		} else {
1099			u_usec = pps_mf[2];		/* 0 2 1 */
1100			v_usec = pps_mf[0] - pps_mf[1];
1101		}
1102	} else {
1103		if (pps_mf[1] < pps_mf[2]) {
1104			u_usec = pps_mf[1];		/* 2 1 0 */
1105			v_usec = pps_mf[2] - pps_mf[0];
1106		} else  if (pps_mf[2] < pps_mf[0]) {
1107			u_usec = pps_mf[0];		/* 1 0 2 */
1108			v_usec = pps_mf[1] - pps_mf[2];
1109		} else {
1110			u_usec = pps_mf[2];		/* 1 2 0 */
1111			v_usec = pps_mf[1] - pps_mf[0];
1112		}
1113	}
1114
1115	/*
1116	 * Here the dispersion average is updated. If it is less than
1117	 * the threshold pps_dispmax, the frequency average is updated
1118	 * as well, but clamped to the tolerance.
1119	 */
1120	v_usec = (v_usec >> 1) - ntp_pll.disp;
1121	if (v_usec < 0)
1122		ntp_pll.disp -= -v_usec >> PPS_AVG;
1123	else
1124		ntp_pll.disp += v_usec >> PPS_AVG;
1125	if (ntp_pll.disp > pps_dispmax) {
1126		ntp_pll.discnt++;
1127		return;
1128	}
1129	if (u_usec < 0) {
1130		ntp_pll.ybar -= -u_usec >> PPS_AVG;
1131		if (ntp_pll.ybar < -ntp_pll.tolerance)
1132			ntp_pll.ybar = -ntp_pll.tolerance;
1133		u_usec = -u_usec;
1134	} else {
1135		ntp_pll.ybar += u_usec >> PPS_AVG;
1136		if (ntp_pll.ybar > ntp_pll.tolerance)
1137			ntp_pll.ybar = ntp_pll.tolerance;
1138	}
1139
1140	/*
1141	 * Here the calibration interval is adjusted. If the maximum
1142	 * time difference is greater than tick/4, reduce the interval
1143	 * by half. If this is not the case for four consecutive
1144	 * intervals, double the interval.
1145	 */
1146	if (u_usec << ntp_pll.shift > bigtick >> 2) {
1147		ntp_pll.intcnt = 0;
1148		if (ntp_pll.shift > NTP_PLL.SHIFT) {
1149			ntp_pll.shift--;
1150			pps_dispinc <<= 1;
1151		}
1152	} else if (ntp_pll.intcnt >= 4) {
1153		ntp_pll.intcnt = 0;
1154		if (ntp_pll.shift < NTP_PLL.SHIFTMAX) {
1155			ntp_pll.shift++;
1156			pps_dispinc >>= 1;
1157		}
1158	} else
1159		ntp_pll.intcnt++;
1160}
1161#endif /* PPS_SYNC */
1162