kern_timeout.c revision 24101
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39 * $Id: kern_clock.c,v 1.32 1997/02/22 09:39:02 peter Exp $
40 */
41
42/* Portions of this software are covered by the following: */
43/******************************************************************************
44 *                                                                            *
45 * Copyright (c) David L. Mills 1993, 1994                                    *
46 *                                                                            *
47 * Permission to use, copy, modify, and distribute this software and its      *
48 * documentation for any purpose and without fee is hereby granted, provided  *
49 * that the above copyright notice appears in all copies and that both the    *
50 * copyright notice and this permission notice appear in supporting           *
51 * documentation, and that the name University of Delaware not be used in     *
52 * advertising or publicity pertaining to distribution of the software        *
53 * without specific, written prior permission.  The University of Delaware    *
54 * makes no representations about the suitability this software for any       *
55 * purpose.  It is provided "as is" without express or implied warranty.      *
56 *                                                                            *
57 *****************************************************************************/
58
59#include "opt_cpu.h"		/* XXX */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/dkstat.h>
64#include <sys/callout.h>
65#include <sys/kernel.h>
66#include <sys/proc.h>
67#include <sys/resourcevar.h>
68#include <sys/signalvar.h>
69#include <sys/timex.h>
70#include <vm/vm.h>
71#include <vm/vm_param.h>
72#include <vm/vm_prot.h>
73#include <sys/lock.h>
74#include <vm/pmap.h>
75#include <vm/vm_map.h>
76#include <sys/sysctl.h>
77
78#include <machine/cpu.h>
79#define CLOCK_HAIR		/* XXX */
80#include <machine/clock.h>
81
82#ifdef GPROF
83#include <sys/gmon.h>
84#endif
85
86static void initclocks __P((void *dummy));
87SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
88
89/* Exported to machdep.c. */
90struct callout *callfree, *callout;
91
92static struct callout calltodo;
93
94/* Some of these don't belong here, but it's easiest to concentrate them. */
95static long cp_time[CPUSTATES];
96long dk_seek[DK_NDRIVE];
97static long dk_time[DK_NDRIVE];
98long dk_wds[DK_NDRIVE];
99long dk_wpms[DK_NDRIVE];
100long dk_xfer[DK_NDRIVE];
101
102int dk_busy;
103int dk_ndrive = 0;
104char dk_names[DK_NDRIVE][DK_NAMELEN];
105
106long tk_cancc;
107long tk_nin;
108long tk_nout;
109long tk_rawcc;
110
111/*
112 * Clock handling routines.
113 *
114 * This code is written to operate with two timers that run independently of
115 * each other.  The main clock, running hz times per second, is used to keep
116 * track of real time.  The second timer handles kernel and user profiling,
117 * and does resource use estimation.  If the second timer is programmable,
118 * it is randomized to avoid aliasing between the two clocks.  For example,
119 * the randomization prevents an adversary from always giving up the cpu
120 * just before its quantum expires.  Otherwise, it would never accumulate
121 * cpu ticks.  The mean frequency of the second timer is stathz.
122 *
123 * If no second timer exists, stathz will be zero; in this case we drive
124 * profiling and statistics off the main clock.  This WILL NOT be accurate;
125 * do not do it unless absolutely necessary.
126 *
127 * The statistics clock may (or may not) be run at a higher rate while
128 * profiling.  This profile clock runs at profhz.  We require that profhz
129 * be an integral multiple of stathz.
130 *
131 * If the statistics clock is running fast, it must be divided by the ratio
132 * profhz/stathz for statistics.  (For profiling, every tick counts.)
133 */
134
135/*
136 * TODO:
137 *	allocate more timeout table slots when table overflows.
138 */
139
140/*
141 * Bump a timeval by a small number of usec's.
142 */
143#define BUMPTIME(t, usec) { \
144	register volatile struct timeval *tp = (t); \
145	register long us; \
146 \
147	tp->tv_usec = us = tp->tv_usec + (usec); \
148	if (us >= 1000000) { \
149		tp->tv_usec = us - 1000000; \
150		tp->tv_sec++; \
151	} \
152}
153
154int	stathz;
155int	profhz;
156static int profprocs;
157int	ticks;
158static int psdiv, pscnt;	/* prof => stat divider */
159int psratio;			/* ratio: prof / stat */
160
161volatile struct	timeval time;
162volatile struct	timeval mono_time;
163
164/*
165 * Phase/frequency-lock loop (PLL/FLL) definitions
166 *
167 * The following variables are read and set by the ntp_adjtime() system
168 * call.
169 *
170 * time_state shows the state of the system clock, with values defined
171 * in the timex.h header file.
172 *
173 * time_status shows the status of the system clock, with bits defined
174 * in the timex.h header file.
175 *
176 * time_offset is used by the PLL/FLL to adjust the system time in small
177 * increments.
178 *
179 * time_constant determines the bandwidth or "stiffness" of the PLL.
180 *
181 * time_tolerance determines maximum frequency error or tolerance of the
182 * CPU clock oscillator and is a property of the architecture; however,
183 * in principle it could change as result of the presence of external
184 * discipline signals, for instance.
185 *
186 * time_precision is usually equal to the kernel tick variable; however,
187 * in cases where a precision clock counter or external clock is
188 * available, the resolution can be much less than this and depend on
189 * whether the external clock is working or not.
190 *
191 * time_maxerror is initialized by a ntp_adjtime() call and increased by
192 * the kernel once each second to reflect the maximum error
193 * bound growth.
194 *
195 * time_esterror is set and read by the ntp_adjtime() call, but
196 * otherwise not used by the kernel.
197 */
198int time_status = STA_UNSYNC;	/* clock status bits */
199int time_state = TIME_OK;	/* clock state */
200long time_offset = 0;		/* time offset (us) */
201long time_constant = 0;		/* pll time constant */
202long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
203long time_precision = 1;	/* clock precision (us) */
204long time_maxerror = MAXPHASE;	/* maximum error (us) */
205long time_esterror = MAXPHASE;	/* estimated error (us) */
206
207/*
208 * The following variables establish the state of the PLL/FLL and the
209 * residual time and frequency offset of the local clock. The scale
210 * factors are defined in the timex.h header file.
211 *
212 * time_phase and time_freq are the phase increment and the frequency
213 * increment, respectively, of the kernel time variable at each tick of
214 * the clock.
215 *
216 * time_freq is set via ntp_adjtime() from a value stored in a file when
217 * the synchronization daemon is first started. Its value is retrieved
218 * via ntp_adjtime() and written to the file about once per hour by the
219 * daemon.
220 *
221 * time_adj is the adjustment added to the value of tick at each timer
222 * interrupt and is recomputed from time_phase and time_freq at each
223 * seconds rollover.
224 *
225 * time_reftime is the second's portion of the system time on the last
226 * call to ntp_adjtime(). It is used to adjust the time_freq variable
227 * and to increase the time_maxerror as the time since last update
228 * increases.
229 */
230static long time_phase = 0;		/* phase offset (scaled us) */
231long time_freq = 0;			/* frequency offset (scaled ppm) */
232static long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
233static long time_reftime = 0;		/* time at last adjustment (s) */
234
235#ifdef PPS_SYNC
236/*
237 * The following variables are used only if the kernel PPS discipline
238 * code is configured (PPS_SYNC). The scale factors are defined in the
239 * timex.h header file.
240 *
241 * pps_time contains the time at each calibration interval, as read by
242 * microtime(). pps_count counts the seconds of the calibration
243 * interval, the duration of which is nominally pps_shift in powers of
244 * two.
245 *
246 * pps_offset is the time offset produced by the time median filter
247 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
248 * this filter.
249 *
250 * pps_freq is the frequency offset produced by the frequency median
251 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
252 * by this filter.
253 *
254 * pps_usec is latched from a high resolution counter or external clock
255 * at pps_time. Here we want the hardware counter contents only, not the
256 * contents plus the time_tv.usec as usual.
257 *
258 * pps_valid counts the number of seconds since the last PPS update. It
259 * is used as a watchdog timer to disable the PPS discipline should the
260 * PPS signal be lost.
261 *
262 * pps_glitch counts the number of seconds since the beginning of an
263 * offset burst more than tick/2 from current nominal offset. It is used
264 * mainly to suppress error bursts due to priority conflicts between the
265 * PPS interrupt and timer interrupt.
266 *
267 * pps_intcnt counts the calibration intervals for use in the interval-
268 * adaptation algorithm. It's just too complicated for words.
269 */
270struct timeval pps_time;	/* kernel time at last interval */
271long pps_offset = 0;		/* pps time offset (us) */
272long pps_jitter = MAXTIME;	/* pps time dispersion (jitter) (us) */
273long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
274long pps_freq = 0;		/* frequency offset (scaled ppm) */
275long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
276long pps_ff[] = {0, 0, 0};	/* frequency offset median filter */
277long pps_usec = 0;		/* microsec counter at last interval */
278long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
279int pps_glitch = 0;		/* pps signal glitch counter */
280int pps_count = 0;		/* calibration interval counter (s) */
281int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
282int pps_intcnt = 0;		/* intervals at current duration */
283
284/*
285 * PPS signal quality monitors
286 *
287 * pps_jitcnt counts the seconds that have been discarded because the
288 * jitter measured by the time median filter exceeds the limit MAXTIME
289 * (100 us).
290 *
291 * pps_calcnt counts the frequency calibration intervals, which are
292 * variable from 4 s to 256 s.
293 *
294 * pps_errcnt counts the calibration intervals which have been discarded
295 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
296 * calibration interval jitter exceeds two ticks.
297 *
298 * pps_stbcnt counts the calibration intervals that have been discarded
299 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
300 */
301long pps_jitcnt = 0;		/* jitter limit exceeded */
302long pps_calcnt = 0;		/* calibration intervals */
303long pps_errcnt = 0;		/* calibration errors */
304long pps_stbcnt = 0;		/* stability limit exceeded */
305#endif /* PPS_SYNC */
306
307/* XXX none of this stuff works under FreeBSD */
308#ifdef EXT_CLOCK
309/*
310 * External clock definitions
311 *
312 * The following definitions and declarations are used only if an
313 * external clock (HIGHBALL or TPRO) is configured on the system.
314 */
315#define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
316
317/*
318 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
319 * interrupt and decremented once each second.
320 */
321int clock_count = 0;		/* CPU clock counter */
322
323#ifdef HIGHBALL
324/*
325 * The clock_offset and clock_cpu variables are used by the HIGHBALL
326 * interface. The clock_offset variable defines the offset between
327 * system time and the HIGBALL counters. The clock_cpu variable contains
328 * the offset between the system clock and the HIGHBALL clock for use in
329 * disciplining the kernel time variable.
330 */
331extern struct timeval clock_offset; /* Highball clock offset */
332long clock_cpu = 0;		/* CPU clock adjust */
333#endif /* HIGHBALL */
334#endif /* EXT_CLOCK */
335
336/*
337 * hardupdate() - local clock update
338 *
339 * This routine is called by ntp_adjtime() to update the local clock
340 * phase and frequency. The implementation is of an adaptive-parameter,
341 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
342 * time and frequency offset estimates for each call. If the kernel PPS
343 * discipline code is configured (PPS_SYNC), the PPS signal itself
344 * determines the new time offset, instead of the calling argument.
345 * Presumably, calls to ntp_adjtime() occur only when the caller
346 * believes the local clock is valid within some bound (+-128 ms with
347 * NTP). If the caller's time is far different than the PPS time, an
348 * argument will ensue, and it's not clear who will lose.
349 *
350 * For uncompensated quartz crystal oscillatores and nominal update
351 * intervals less than 1024 s, operation should be in phase-lock mode
352 * (STA_FLL = 0), where the loop is disciplined to phase. For update
353 * intervals greater than thiss, operation should be in frequency-lock
354 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
355 *
356 * Note: splclock() is in effect.
357 */
358void
359hardupdate(offset)
360	long offset;
361{
362	long ltemp, mtemp;
363
364	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
365		return;
366	ltemp = offset;
367#ifdef PPS_SYNC
368	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
369		ltemp = pps_offset;
370#endif /* PPS_SYNC */
371
372	/*
373	 * Scale the phase adjustment and clamp to the operating range.
374	 */
375	if (ltemp > MAXPHASE)
376		time_offset = MAXPHASE << SHIFT_UPDATE;
377	else if (ltemp < -MAXPHASE)
378		time_offset = -(MAXPHASE << SHIFT_UPDATE);
379	else
380		time_offset = ltemp << SHIFT_UPDATE;
381
382	/*
383	 * Select whether the frequency is to be controlled and in which
384	 * mode (PLL or FLL). Clamp to the operating range. Ugly
385	 * multiply/divide should be replaced someday.
386	 */
387	if (time_status & STA_FREQHOLD || time_reftime == 0)
388		time_reftime = time.tv_sec;
389	mtemp = time.tv_sec - time_reftime;
390	time_reftime = time.tv_sec;
391	if (time_status & STA_FLL) {
392		if (mtemp >= MINSEC) {
393			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
394			    SHIFT_UPDATE));
395			if (ltemp < 0)
396				time_freq -= -ltemp >> SHIFT_KH;
397			else
398				time_freq += ltemp >> SHIFT_KH;
399		}
400	} else {
401		if (mtemp < MAXSEC) {
402			ltemp *= mtemp;
403			if (ltemp < 0)
404				time_freq -= -ltemp >> (time_constant +
405				    time_constant + SHIFT_KF -
406				    SHIFT_USEC);
407			else
408				time_freq += ltemp >> (time_constant +
409				    time_constant + SHIFT_KF -
410				    SHIFT_USEC);
411		}
412	}
413	if (time_freq > time_tolerance)
414		time_freq = time_tolerance;
415	else if (time_freq < -time_tolerance)
416		time_freq = -time_tolerance;
417}
418
419
420
421/*
422 * Initialize clock frequencies and start both clocks running.
423 */
424/* ARGSUSED*/
425static void
426initclocks(dummy)
427	void *dummy;
428{
429	register int i;
430
431	/*
432	 * Set divisors to 1 (normal case) and let the machine-specific
433	 * code do its bit.
434	 */
435	psdiv = pscnt = 1;
436	cpu_initclocks();
437
438	/*
439	 * Compute profhz/stathz, and fix profhz if needed.
440	 */
441	i = stathz ? stathz : hz;
442	if (profhz == 0)
443		profhz = i;
444	psratio = profhz / i;
445}
446
447/*
448 * The real-time timer, interrupting hz times per second.
449 */
450void
451hardclock(frame)
452	register struct clockframe *frame;
453{
454	register struct callout *p1;
455	register struct proc *p;
456	register int needsoft;
457
458	/*
459	 * Update real-time timeout queue.
460	 * At front of queue are some number of events which are ``due''.
461	 * The time to these is <= 0 and if negative represents the
462	 * number of ticks which have passed since it was supposed to happen.
463	 * The rest of the q elements (times > 0) are events yet to happen,
464	 * where the time for each is given as a delta from the previous.
465	 * Decrementing just the first of these serves to decrement the time
466	 * to all events.
467	 */
468	needsoft = 0;
469	for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
470		if (--p1->c_time > 0)
471			break;
472		needsoft = 1;
473		if (p1->c_time == 0)
474			break;
475	}
476
477	p = curproc;
478	if (p) {
479		register struct pstats *pstats;
480
481		/*
482		 * Run current process's virtual and profile time, as needed.
483		 */
484		pstats = p->p_stats;
485		if (CLKF_USERMODE(frame) &&
486		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
487		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
488			psignal(p, SIGVTALRM);
489		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
490		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
491			psignal(p, SIGPROF);
492	}
493
494	/*
495	 * If no separate statistics clock is available, run it from here.
496	 */
497	if (stathz == 0)
498		statclock(frame);
499
500	/*
501	 * Increment the time-of-day.
502	 */
503	ticks++;
504	{
505		int time_update;
506		struct timeval newtime = time;
507		long ltemp;
508
509		if (timedelta == 0) {
510			time_update = CPU_THISTICKLEN(tick);
511		} else {
512			time_update = CPU_THISTICKLEN(tick) + tickdelta;
513			timedelta -= tickdelta;
514		}
515		BUMPTIME(&mono_time, time_update);
516
517		/*
518		 * Compute the phase adjustment. If the low-order bits
519		 * (time_phase) of the update overflow, bump the high-order bits
520		 * (time_update).
521		 */
522		time_phase += time_adj;
523		if (time_phase <= -FINEUSEC) {
524		  ltemp = -time_phase >> SHIFT_SCALE;
525		  time_phase += ltemp << SHIFT_SCALE;
526		  time_update -= ltemp;
527		}
528		else if (time_phase >= FINEUSEC) {
529		  ltemp = time_phase >> SHIFT_SCALE;
530		  time_phase -= ltemp << SHIFT_SCALE;
531		  time_update += ltemp;
532		}
533
534		newtime.tv_usec += time_update;
535		/*
536		 * On rollover of the second the phase adjustment to be used for
537		 * the next second is calculated. Also, the maximum error is
538		 * increased by the tolerance. If the PPS frequency discipline
539		 * code is present, the phase is increased to compensate for the
540		 * CPU clock oscillator frequency error.
541		 *
542		 * On a 32-bit machine and given parameters in the timex.h
543		 * header file, the maximum phase adjustment is +-512 ms and
544		 * maximum frequency offset is a tad less than) +-512 ppm. On a
545		 * 64-bit machine, you shouldn't need to ask.
546		 */
547		if (newtime.tv_usec >= 1000000) {
548		  newtime.tv_usec -= 1000000;
549		  newtime.tv_sec++;
550		  time_maxerror += time_tolerance >> SHIFT_USEC;
551
552		  /*
553		   * Compute the phase adjustment for the next second. In
554		   * PLL mode, the offset is reduced by a fixed factor
555		   * times the time constant. In FLL mode the offset is
556		   * used directly. In either mode, the maximum phase
557		   * adjustment for each second is clamped so as to spread
558		   * the adjustment over not more than the number of
559		   * seconds between updates.
560		   */
561		  if (time_offset < 0) {
562		    ltemp = -time_offset;
563		    if (!(time_status & STA_FLL))
564			ltemp >>= SHIFT_KG + time_constant;
565		    if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
566			ltemp = (MAXPHASE / MINSEC) <<
567			    SHIFT_UPDATE;
568		    time_offset += ltemp;
569		    time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
570			SHIFT_UPDATE);
571		    } else {
572		        ltemp = time_offset;
573			if (!(time_status & STA_FLL))
574				ltemp >>= SHIFT_KG + time_constant;
575			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
576				ltemp = (MAXPHASE / MINSEC) <<
577				    SHIFT_UPDATE;
578			time_offset -= ltemp;
579			time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
580			    SHIFT_UPDATE);
581		    }
582
583		  /*
584		   * Compute the frequency estimate and additional phase
585		   * adjustment due to frequency error for the next
586		   * second. When the PPS signal is engaged, gnaw on the
587		   * watchdog counter and update the frequency computed by
588		   * the pll and the PPS signal.
589		   */
590#ifdef PPS_SYNC
591		  pps_valid++;
592		  if (pps_valid == PPS_VALID) {
593		    pps_jitter = MAXTIME;
594		    pps_stabil = MAXFREQ;
595		    time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
596				     STA_PPSWANDER | STA_PPSERROR);
597		  }
598		  ltemp = time_freq + pps_freq;
599#else
600		  ltemp = time_freq;
601#endif /* PPS_SYNC */
602		  if (ltemp < 0)
603		    time_adj -= -ltemp >>
604		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
605		  else
606		    time_adj += ltemp >>
607		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
608
609#if SHIFT_HZ == 7
610		  /*
611		   * When the CPU clock oscillator frequency is not a
612		   * power of two in Hz, the SHIFT_HZ is only an
613		   * approximate scale factor. In the SunOS kernel, this
614		   * results in a PLL gain factor of 1/1.28 = 0.78 what it
615		   * should be. In the following code the overall gain is
616		   * increased by a factor of 1.25, which results in a
617		   * residual error less than 3 percent.
618		   */
619		  /* Same thing applies for FreeBSD --GAW */
620		  if (hz == 100) {
621		    if (time_adj < 0)
622		      time_adj -= -time_adj >> 2;
623		    else
624		      time_adj += time_adj >> 2;
625		  }
626#endif /* SHIFT_HZ */
627
628		  /* XXX - this is really bogus, but can't be fixed until
629		     xntpd's idea of the system clock is fixed to know how
630		     the user wants leap seconds handled; in the mean time,
631		     we assume that users of NTP are running without proper
632		     leap second support (this is now the default anyway) */
633		  /*
634		   * Leap second processing. If in leap-insert state at
635		   * the end of the day, the system clock is set back one
636		   * second; if in leap-delete state, the system clock is
637		   * set ahead one second. The microtime() routine or
638		   * external clock driver will insure that reported time
639		   * is always monotonic. The ugly divides should be
640		   * replaced.
641		   */
642		  switch (time_state) {
643
644		  case TIME_OK:
645		    if (time_status & STA_INS)
646		      time_state = TIME_INS;
647		    else if (time_status & STA_DEL)
648		      time_state = TIME_DEL;
649		    break;
650
651		  case TIME_INS:
652		    if (newtime.tv_sec % 86400 == 0) {
653		      newtime.tv_sec--;
654		      time_state = TIME_OOP;
655		    }
656		    break;
657
658		  case TIME_DEL:
659		    if ((newtime.tv_sec + 1) % 86400 == 0) {
660		      newtime.tv_sec++;
661		      time_state = TIME_WAIT;
662		    }
663		    break;
664
665		  case TIME_OOP:
666		    time_state = TIME_WAIT;
667		    break;
668
669		  case TIME_WAIT:
670		    if (!(time_status & (STA_INS | STA_DEL)))
671		      time_state = TIME_OK;
672		  }
673		}
674		CPU_CLOCKUPDATE(&time, &newtime);
675	}
676
677	/*
678	 * Process callouts at a very low cpu priority, so we don't keep the
679	 * relatively high clock interrupt priority any longer than necessary.
680	 */
681	if (needsoft) {
682		if (CLKF_BASEPRI(frame)) {
683			/*
684			 * Save the overhead of a software interrupt;
685			 * it will happen as soon as we return, so do it now.
686			 */
687			(void)splsoftclock();
688			softclock();
689		} else
690			setsoftclock();
691	}
692}
693
694/*
695 * Software (low priority) clock interrupt.
696 * Run periodic events from timeout queue.
697 */
698/*ARGSUSED*/
699void
700softclock()
701{
702	register struct callout *c;
703	register void *arg;
704	register void (*func) __P((void *));
705	register int s;
706
707	s = splhigh();
708	while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
709		func = c->c_func;
710		arg = c->c_arg;
711		calltodo.c_next = c->c_next;
712		c->c_next = callfree;
713		callfree = c;
714		splx(s);
715		(*func)(arg);
716		(void) splhigh();
717	}
718	splx(s);
719}
720
721/*
722 * timeout --
723 *	Execute a function after a specified length of time.
724 *
725 * untimeout --
726 *	Cancel previous timeout function call.
727 *
728 *	See AT&T BCI Driver Reference Manual for specification.  This
729 *	implementation differs from that one in that no identification
730 *	value is returned from timeout, rather, the original arguments
731 *	to timeout are used to identify entries for untimeout.
732 */
733void
734timeout(ftn, arg, ticks)
735	timeout_t ftn;
736	void *arg;
737	register int ticks;
738{
739	register struct callout *new, *p, *t;
740	register int s;
741
742	if (ticks <= 0)
743		ticks = 1;
744
745	/* Lock out the clock. */
746	s = splhigh();
747
748	/* Fill in the next free callout structure. */
749	if (callfree == NULL)
750		panic("timeout table full");
751	new = callfree;
752	callfree = new->c_next;
753	new->c_arg = arg;
754	new->c_func = ftn;
755
756	/*
757	 * The time for each event is stored as a difference from the time
758	 * of the previous event on the queue.  Walk the queue, correcting
759	 * the ticks argument for queue entries passed.  Correct the ticks
760	 * value for the queue entry immediately after the insertion point
761	 * as well.  Watch out for negative c_time values; these represent
762	 * overdue events.
763	 */
764	for (p = &calltodo;
765	    (t = p->c_next) != NULL && ticks > t->c_time; p = t)
766		if (t->c_time > 0)
767			ticks -= t->c_time;
768	new->c_time = ticks;
769	if (t != NULL)
770		t->c_time -= ticks;
771
772	/* Insert the new entry into the queue. */
773	p->c_next = new;
774	new->c_next = t;
775	splx(s);
776}
777
778void
779untimeout(ftn, arg)
780	timeout_t ftn;
781	void *arg;
782{
783	register struct callout *p, *t;
784	register int s;
785
786	s = splhigh();
787	for (p = &calltodo; (t = p->c_next) != NULL; p = t)
788		if (t->c_func == ftn && t->c_arg == arg) {
789			/* Increment next entry's tick count. */
790			if (t->c_next && t->c_time > 0)
791				t->c_next->c_time += t->c_time;
792
793			/* Move entry from callout queue to callfree queue. */
794			p->c_next = t->c_next;
795			t->c_next = callfree;
796			callfree = t;
797			break;
798		}
799	splx(s);
800}
801
802void
803gettime(struct timeval *tvp)
804{
805	int s;
806
807	s = splclock();
808	/* XXX should use microtime() iff tv_usec is used. */
809	*tvp = time;
810	splx(s);
811}
812
813/*
814 * Compute number of hz until specified time.  Used to
815 * compute third argument to timeout() from an absolute time.
816 */
817int
818hzto(tv)
819	struct timeval *tv;
820{
821	register unsigned long ticks;
822	register long sec, usec;
823	int s;
824
825	/*
826	 * If the number of usecs in the whole seconds part of the time
827	 * difference fits in a long, then the total number of usecs will
828	 * fit in an unsigned long.  Compute the total and convert it to
829	 * ticks, rounding up and adding 1 to allow for the current tick
830	 * to expire.  Rounding also depends on unsigned long arithmetic
831	 * to avoid overflow.
832	 *
833	 * Otherwise, if the number of ticks in the whole seconds part of
834	 * the time difference fits in a long, then convert the parts to
835	 * ticks separately and add, using similar rounding methods and
836	 * overflow avoidance.  This method would work in the previous
837	 * case but it is slightly slower and assumes that hz is integral.
838	 *
839	 * Otherwise, round the time difference down to the maximum
840	 * representable value.
841	 *
842	 * If ints have 32 bits, then the maximum value for any timeout in
843	 * 10ms ticks is 248 days.
844	 */
845	s = splclock();
846	sec = tv->tv_sec - time.tv_sec;
847	usec = tv->tv_usec - time.tv_usec;
848	splx(s);
849	if (usec < 0) {
850		sec--;
851		usec += 1000000;
852	}
853	if (sec < 0) {
854#ifdef DIAGNOSTIC
855		printf("hzto: negative time difference %ld sec %ld usec\n",
856		       sec, usec);
857#endif
858		ticks = 1;
859	} else if (sec <= LONG_MAX / 1000000)
860		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
861			/ tick + 1;
862	else if (sec <= LONG_MAX / hz)
863		ticks = sec * hz
864			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
865	else
866		ticks = LONG_MAX;
867	if (ticks > INT_MAX)
868		ticks = INT_MAX;
869	return (ticks);
870}
871
872/*
873 * Start profiling on a process.
874 *
875 * Kernel profiling passes proc0 which never exits and hence
876 * keeps the profile clock running constantly.
877 */
878void
879startprofclock(p)
880	register struct proc *p;
881{
882	int s;
883
884	if ((p->p_flag & P_PROFIL) == 0) {
885		p->p_flag |= P_PROFIL;
886		if (++profprocs == 1 && stathz != 0) {
887			s = splstatclock();
888			psdiv = pscnt = psratio;
889			setstatclockrate(profhz);
890			splx(s);
891		}
892	}
893}
894
895/*
896 * Stop profiling on a process.
897 */
898void
899stopprofclock(p)
900	register struct proc *p;
901{
902	int s;
903
904	if (p->p_flag & P_PROFIL) {
905		p->p_flag &= ~P_PROFIL;
906		if (--profprocs == 0 && stathz != 0) {
907			s = splstatclock();
908			psdiv = pscnt = 1;
909			setstatclockrate(stathz);
910			splx(s);
911		}
912	}
913}
914
915/*
916 * Statistics clock.  Grab profile sample, and if divider reaches 0,
917 * do process and kernel statistics.
918 */
919void
920statclock(frame)
921	register struct clockframe *frame;
922{
923#ifdef GPROF
924	register struct gmonparam *g;
925#endif
926	register struct proc *p;
927	register int i;
928	struct pstats *pstats;
929	long rss;
930	struct rusage *ru;
931	struct vmspace *vm;
932
933	if (CLKF_USERMODE(frame)) {
934		p = curproc;
935		if (p->p_flag & P_PROFIL)
936			addupc_intr(p, CLKF_PC(frame), 1);
937		if (--pscnt > 0)
938			return;
939		/*
940		 * Came from user mode; CPU was in user state.
941		 * If this process is being profiled record the tick.
942		 */
943		p->p_uticks++;
944		if (p->p_nice > NZERO)
945			cp_time[CP_NICE]++;
946		else
947			cp_time[CP_USER]++;
948	} else {
949#ifdef GPROF
950		/*
951		 * Kernel statistics are just like addupc_intr, only easier.
952		 */
953		g = &_gmonparam;
954		if (g->state == GMON_PROF_ON) {
955			i = CLKF_PC(frame) - g->lowpc;
956			if (i < g->textsize) {
957				i /= HISTFRACTION * sizeof(*g->kcount);
958				g->kcount[i]++;
959			}
960		}
961#endif
962		if (--pscnt > 0)
963			return;
964		/*
965		 * Came from kernel mode, so we were:
966		 * - handling an interrupt,
967		 * - doing syscall or trap work on behalf of the current
968		 *   user process, or
969		 * - spinning in the idle loop.
970		 * Whichever it is, charge the time as appropriate.
971		 * Note that we charge interrupts to the current process,
972		 * regardless of whether they are ``for'' that process,
973		 * so that we know how much of its real time was spent
974		 * in ``non-process'' (i.e., interrupt) work.
975		 */
976		p = curproc;
977		if (CLKF_INTR(frame)) {
978			if (p != NULL)
979				p->p_iticks++;
980			cp_time[CP_INTR]++;
981		} else if (p != NULL) {
982			p->p_sticks++;
983			cp_time[CP_SYS]++;
984		} else
985			cp_time[CP_IDLE]++;
986	}
987	pscnt = psdiv;
988
989	/*
990	 * We maintain statistics shown by user-level statistics
991	 * programs:  the amount of time in each cpu state, and
992	 * the amount of time each of DK_NDRIVE ``drives'' is busy.
993	 *
994	 * XXX	should either run linked list of drives, or (better)
995	 *	grab timestamps in the start & done code.
996	 */
997	for (i = 0; i < DK_NDRIVE; i++)
998		if (dk_busy & (1 << i))
999			dk_time[i]++;
1000
1001	/*
1002	 * We adjust the priority of the current process.  The priority of
1003	 * a process gets worse as it accumulates CPU time.  The cpu usage
1004	 * estimator (p_estcpu) is increased here.  The formula for computing
1005	 * priorities (in kern_synch.c) will compute a different value each
1006	 * time p_estcpu increases by 4.  The cpu usage estimator ramps up
1007	 * quite quickly when the process is running (linearly), and decays
1008	 * away exponentially, at a rate which is proportionally slower when
1009	 * the system is busy.  The basic principal is that the system will
1010	 * 90% forget that the process used a lot of CPU time in 5 * loadav
1011	 * seconds.  This causes the system to favor processes which haven't
1012	 * run much recently, and to round-robin among other processes.
1013	 */
1014	if (p != NULL) {
1015		p->p_cpticks++;
1016		if (++p->p_estcpu == 0)
1017			p->p_estcpu--;
1018		if ((p->p_estcpu & 3) == 0) {
1019			resetpriority(p);
1020			if (p->p_priority >= PUSER)
1021				p->p_priority = p->p_usrpri;
1022		}
1023
1024		/* Update resource usage integrals and maximums. */
1025		if ((pstats = p->p_stats) != NULL &&
1026		    (ru = &pstats->p_ru) != NULL &&
1027		    (vm = p->p_vmspace) != NULL) {
1028			ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024;
1029			ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024;
1030			ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024;
1031			rss = vm->vm_pmap.pm_stats.resident_count *
1032			      PAGE_SIZE / 1024;
1033			if (ru->ru_maxrss < rss)
1034				ru->ru_maxrss = rss;
1035        	}
1036	}
1037}
1038
1039/*
1040 * Return information about system clocks.
1041 */
1042static int
1043sysctl_kern_clockrate SYSCTL_HANDLER_ARGS
1044{
1045	struct clockinfo clkinfo;
1046	/*
1047	 * Construct clockinfo structure.
1048	 */
1049	clkinfo.hz = hz;
1050	clkinfo.tick = tick;
1051	clkinfo.profhz = profhz;
1052	clkinfo.stathz = stathz ? stathz : hz;
1053	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
1054}
1055
1056SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
1057	0, 0, sysctl_kern_clockrate, "S,clockinfo","");
1058
1059#ifdef PPS_SYNC
1060/*
1061 * hardpps() - discipline CPU clock oscillator to external PPS signal
1062 *
1063 * This routine is called at each PPS interrupt in order to discipline
1064 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1065 * and leaves it in a handy spot for the hardclock() routine. It
1066 * integrates successive PPS phase differences and calculates the
1067 * frequency offset. This is used in hardclock() to discipline the CPU
1068 * clock oscillator so that intrinsic frequency error is cancelled out.
1069 * The code requires the caller to capture the time and hardware counter
1070 * value at the on-time PPS signal transition.
1071 *
1072 * Note that, on some Unix systems, this routine runs at an interrupt
1073 * priority level higher than the timer interrupt routine hardclock().
1074 * Therefore, the variables used are distinct from the hardclock()
1075 * variables, except for certain exceptions: The PPS frequency pps_freq
1076 * and phase pps_offset variables are determined by this routine and
1077 * updated atomically. The time_tolerance variable can be considered a
1078 * constant, since it is infrequently changed, and then only when the
1079 * PPS signal is disabled. The watchdog counter pps_valid is updated
1080 * once per second by hardclock() and is atomically cleared in this
1081 * routine.
1082 */
1083void
1084hardpps(tvp, usec)
1085	struct timeval *tvp;		/* time at PPS */
1086	long usec;			/* hardware counter at PPS */
1087{
1088	long u_usec, v_usec, bigtick;
1089	long cal_sec, cal_usec;
1090
1091	/*
1092	 * An occasional glitch can be produced when the PPS interrupt
1093	 * occurs in the hardclock() routine before the time variable is
1094	 * updated. Here the offset is discarded when the difference
1095	 * between it and the last one is greater than tick/2, but not
1096	 * if the interval since the first discard exceeds 30 s.
1097	 */
1098	time_status |= STA_PPSSIGNAL;
1099	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1100	pps_valid = 0;
1101	u_usec = -tvp->tv_usec;
1102	if (u_usec < -500000)
1103		u_usec += 1000000;
1104	v_usec = pps_offset - u_usec;
1105	if (v_usec < 0)
1106		v_usec = -v_usec;
1107	if (v_usec > (tick >> 1)) {
1108		if (pps_glitch > MAXGLITCH) {
1109			pps_glitch = 0;
1110			pps_tf[2] = u_usec;
1111			pps_tf[1] = u_usec;
1112		} else {
1113			pps_glitch++;
1114			u_usec = pps_offset;
1115		}
1116	} else
1117		pps_glitch = 0;
1118
1119	/*
1120	 * A three-stage median filter is used to help deglitch the pps
1121	 * time. The median sample becomes the time offset estimate; the
1122	 * difference between the other two samples becomes the time
1123	 * dispersion (jitter) estimate.
1124	 */
1125	pps_tf[2] = pps_tf[1];
1126	pps_tf[1] = pps_tf[0];
1127	pps_tf[0] = u_usec;
1128	if (pps_tf[0] > pps_tf[1]) {
1129		if (pps_tf[1] > pps_tf[2]) {
1130			pps_offset = pps_tf[1];		/* 0 1 2 */
1131			v_usec = pps_tf[0] - pps_tf[2];
1132		} else if (pps_tf[2] > pps_tf[0]) {
1133			pps_offset = pps_tf[0];		/* 2 0 1 */
1134			v_usec = pps_tf[2] - pps_tf[1];
1135		} else {
1136			pps_offset = pps_tf[2];		/* 0 2 1 */
1137			v_usec = pps_tf[0] - pps_tf[1];
1138		}
1139	} else {
1140		if (pps_tf[1] < pps_tf[2]) {
1141			pps_offset = pps_tf[1];		/* 2 1 0 */
1142			v_usec = pps_tf[2] - pps_tf[0];
1143		} else  if (pps_tf[2] < pps_tf[0]) {
1144			pps_offset = pps_tf[0];		/* 1 0 2 */
1145			v_usec = pps_tf[1] - pps_tf[2];
1146		} else {
1147			pps_offset = pps_tf[2];		/* 1 2 0 */
1148			v_usec = pps_tf[1] - pps_tf[0];
1149		}
1150	}
1151	if (v_usec > MAXTIME)
1152		pps_jitcnt++;
1153	v_usec = (v_usec << PPS_AVG) - pps_jitter;
1154	if (v_usec < 0)
1155		pps_jitter -= -v_usec >> PPS_AVG;
1156	else
1157		pps_jitter += v_usec >> PPS_AVG;
1158	if (pps_jitter > (MAXTIME >> 1))
1159		time_status |= STA_PPSJITTER;
1160
1161	/*
1162	 * During the calibration interval adjust the starting time when
1163	 * the tick overflows. At the end of the interval compute the
1164	 * duration of the interval and the difference of the hardware
1165	 * counters at the beginning and end of the interval. This code
1166	 * is deliciously complicated by the fact valid differences may
1167	 * exceed the value of tick when using long calibration
1168	 * intervals and small ticks. Note that the counter can be
1169	 * greater than tick if caught at just the wrong instant, but
1170	 * the values returned and used here are correct.
1171	 */
1172	bigtick = (long)tick << SHIFT_USEC;
1173	pps_usec -= pps_freq;
1174	if (pps_usec >= bigtick)
1175		pps_usec -= bigtick;
1176	if (pps_usec < 0)
1177		pps_usec += bigtick;
1178	pps_time.tv_sec++;
1179	pps_count++;
1180	if (pps_count < (1 << pps_shift))
1181		return;
1182	pps_count = 0;
1183	pps_calcnt++;
1184	u_usec = usec << SHIFT_USEC;
1185	v_usec = pps_usec - u_usec;
1186	if (v_usec >= bigtick >> 1)
1187		v_usec -= bigtick;
1188	if (v_usec < -(bigtick >> 1))
1189		v_usec += bigtick;
1190	if (v_usec < 0)
1191		v_usec = -(-v_usec >> pps_shift);
1192	else
1193		v_usec = v_usec >> pps_shift;
1194	pps_usec = u_usec;
1195	cal_sec = tvp->tv_sec;
1196	cal_usec = tvp->tv_usec;
1197	cal_sec -= pps_time.tv_sec;
1198	cal_usec -= pps_time.tv_usec;
1199	if (cal_usec < 0) {
1200		cal_usec += 1000000;
1201		cal_sec--;
1202	}
1203	pps_time = *tvp;
1204
1205	/*
1206	 * Check for lost interrupts, noise, excessive jitter and
1207	 * excessive frequency error. The number of timer ticks during
1208	 * the interval may vary +-1 tick. Add to this a margin of one
1209	 * tick for the PPS signal jitter and maximum frequency
1210	 * deviation. If the limits are exceeded, the calibration
1211	 * interval is reset to the minimum and we start over.
1212	 */
1213	u_usec = (long)tick << 1;
1214	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1215	    || (cal_sec == 0 && cal_usec < u_usec))
1216	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
1217		pps_errcnt++;
1218		pps_shift = PPS_SHIFT;
1219		pps_intcnt = 0;
1220		time_status |= STA_PPSERROR;
1221		return;
1222	}
1223
1224	/*
1225	 * A three-stage median filter is used to help deglitch the pps
1226	 * frequency. The median sample becomes the frequency offset
1227	 * estimate; the difference between the other two samples
1228	 * becomes the frequency dispersion (stability) estimate.
1229	 */
1230	pps_ff[2] = pps_ff[1];
1231	pps_ff[1] = pps_ff[0];
1232	pps_ff[0] = v_usec;
1233	if (pps_ff[0] > pps_ff[1]) {
1234		if (pps_ff[1] > pps_ff[2]) {
1235			u_usec = pps_ff[1];		/* 0 1 2 */
1236			v_usec = pps_ff[0] - pps_ff[2];
1237		} else if (pps_ff[2] > pps_ff[0]) {
1238			u_usec = pps_ff[0];		/* 2 0 1 */
1239			v_usec = pps_ff[2] - pps_ff[1];
1240		} else {
1241			u_usec = pps_ff[2];		/* 0 2 1 */
1242			v_usec = pps_ff[0] - pps_ff[1];
1243		}
1244	} else {
1245		if (pps_ff[1] < pps_ff[2]) {
1246			u_usec = pps_ff[1];		/* 2 1 0 */
1247			v_usec = pps_ff[2] - pps_ff[0];
1248		} else  if (pps_ff[2] < pps_ff[0]) {
1249			u_usec = pps_ff[0];		/* 1 0 2 */
1250			v_usec = pps_ff[1] - pps_ff[2];
1251		} else {
1252			u_usec = pps_ff[2];		/* 1 2 0 */
1253			v_usec = pps_ff[1] - pps_ff[0];
1254		}
1255	}
1256
1257	/*
1258	 * Here the frequency dispersion (stability) is updated. If it
1259	 * is less than one-fourth the maximum (MAXFREQ), the frequency
1260	 * offset is updated as well, but clamped to the tolerance. It
1261	 * will be processed later by the hardclock() routine.
1262	 */
1263	v_usec = (v_usec >> 1) - pps_stabil;
1264	if (v_usec < 0)
1265		pps_stabil -= -v_usec >> PPS_AVG;
1266	else
1267		pps_stabil += v_usec >> PPS_AVG;
1268	if (pps_stabil > MAXFREQ >> 2) {
1269		pps_stbcnt++;
1270		time_status |= STA_PPSWANDER;
1271		return;
1272	}
1273	if (time_status & STA_PPSFREQ) {
1274		if (u_usec < 0) {
1275			pps_freq -= -u_usec >> PPS_AVG;
1276			if (pps_freq < -time_tolerance)
1277				pps_freq = -time_tolerance;
1278			u_usec = -u_usec;
1279		} else {
1280			pps_freq += u_usec >> PPS_AVG;
1281			if (pps_freq > time_tolerance)
1282				pps_freq = time_tolerance;
1283		}
1284	}
1285
1286	/*
1287	 * Here the calibration interval is adjusted. If the maximum
1288	 * time difference is greater than tick / 4, reduce the interval
1289	 * by half. If this is not the case for four consecutive
1290	 * intervals, double the interval.
1291	 */
1292	if (u_usec << pps_shift > bigtick >> 2) {
1293		pps_intcnt = 0;
1294		if (pps_shift > PPS_SHIFT)
1295			pps_shift--;
1296	} else if (pps_intcnt >= 4) {
1297		pps_intcnt = 0;
1298		if (pps_shift < PPS_SHIFTMAX)
1299			pps_shift++;
1300	} else
1301		pps_intcnt++;
1302}
1303#endif /* PPS_SYNC */
1304