kern_timeout.c revision 29805
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39 * $Id: kern_clock.c,v 1.41 1997/09/21 22:00:07 gibbs Exp $
40 */
41
42/* Portions of this software are covered by the following: */
43/******************************************************************************
44 *                                                                            *
45 * Copyright (c) David L. Mills 1993, 1994                                    *
46 *                                                                            *
47 * Permission to use, copy, modify, and distribute this software and its      *
48 * documentation for any purpose and without fee is hereby granted, provided  *
49 * that the above copyright notice appears in all copies and that both the    *
50 * copyright notice and this permission notice appear in supporting           *
51 * documentation, and that the name University of Delaware not be used in     *
52 * advertising or publicity pertaining to distribution of the software        *
53 * without specific, written prior permission.  The University of Delaware    *
54 * makes no representations about the suitability this software for any       *
55 * purpose.  It is provided "as is" without express or implied warranty.      *
56 *                                                                            *
57 *****************************************************************************/
58
59#include "opt_cpu.h"		/* XXX */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/dkstat.h>
64#include <sys/callout.h>
65#include <sys/kernel.h>
66#include <sys/proc.h>
67#include <sys/resourcevar.h>
68#include <sys/signalvar.h>
69#include <sys/timex.h>
70#include <vm/vm.h>
71#include <sys/lock.h>
72#include <vm/pmap.h>
73#include <vm/vm_map.h>
74#include <sys/sysctl.h>
75
76#include <machine/cpu.h>
77#define CLOCK_HAIR		/* XXX */
78#include <machine/clock.h>
79#include <machine/limits.h>
80
81#ifdef GPROF
82#include <sys/gmon.h>
83#endif
84
85static void initclocks __P((void *dummy));
86SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
87
88/* Exported to machdep.c. */
89struct callout *callout;
90struct callout_list callfree;
91int callwheelsize, callwheelbits, callwheelmask;
92struct callout_tailq *callwheel;
93
94
95/* Some of these don't belong here, but it's easiest to concentrate them. */
96static long cp_time[CPUSTATES];
97long dk_seek[DK_NDRIVE];
98static long dk_time[DK_NDRIVE];	/* time busy (in statclock ticks) */
99long dk_wds[DK_NDRIVE];
100long dk_wpms[DK_NDRIVE];
101long dk_xfer[DK_NDRIVE];
102
103int dk_busy;
104int dk_ndrive = 0;
105char dk_names[DK_NDRIVE][DK_NAMELEN];
106
107long tk_cancc;
108long tk_nin;
109long tk_nout;
110long tk_rawcc;
111
112/*
113 * Clock handling routines.
114 *
115 * This code is written to operate with two timers that run independently of
116 * each other.  The main clock, running hz times per second, is used to keep
117 * track of real time.  The second timer handles kernel and user profiling,
118 * and does resource use estimation.  If the second timer is programmable,
119 * it is randomized to avoid aliasing between the two clocks.  For example,
120 * the randomization prevents an adversary from always giving up the cpu
121 * just before its quantum expires.  Otherwise, it would never accumulate
122 * cpu ticks.  The mean frequency of the second timer is stathz.
123 *
124 * If no second timer exists, stathz will be zero; in this case we drive
125 * profiling and statistics off the main clock.  This WILL NOT be accurate;
126 * do not do it unless absolutely necessary.
127 *
128 * The statistics clock may (or may not) be run at a higher rate while
129 * profiling.  This profile clock runs at profhz.  We require that profhz
130 * be an integral multiple of stathz.
131 *
132 * If the statistics clock is running fast, it must be divided by the ratio
133 * profhz/stathz for statistics.  (For profiling, every tick counts.)
134 */
135
136/*
137 * TODO:
138 *	allocate more timeout table slots when table overflows.
139 */
140
141/*
142 * Bump a timeval by a small number of usec's.
143 */
144#define BUMPTIME(t, usec) { \
145	register volatile struct timeval *tp = (t); \
146	register long us; \
147 \
148	tp->tv_usec = us = tp->tv_usec + (usec); \
149	if (us >= 1000000) { \
150		tp->tv_usec = us - 1000000; \
151		tp->tv_sec++; \
152	} \
153}
154
155int	stathz;
156int	profhz;
157static int profprocs;
158int	ticks;
159static int softticks;			/* Like ticks, but for softclock(). */
160static struct callout *nextsoftcheck;	/* Next callout to be checked. */
161static int psdiv, pscnt;		/* prof => stat divider */
162int psratio;				/* ratio: prof / stat */
163
164volatile struct	timeval time;
165volatile struct	timeval mono_time;
166
167/*
168 * Phase/frequency-lock loop (PLL/FLL) definitions
169 *
170 * The following variables are read and set by the ntp_adjtime() system
171 * call.
172 *
173 * time_state shows the state of the system clock, with values defined
174 * in the timex.h header file.
175 *
176 * time_status shows the status of the system clock, with bits defined
177 * in the timex.h header file.
178 *
179 * time_offset is used by the PLL/FLL to adjust the system time in small
180 * increments.
181 *
182 * time_constant determines the bandwidth or "stiffness" of the PLL.
183 *
184 * time_tolerance determines maximum frequency error or tolerance of the
185 * CPU clock oscillator and is a property of the architecture; however,
186 * in principle it could change as result of the presence of external
187 * discipline signals, for instance.
188 *
189 * time_precision is usually equal to the kernel tick variable; however,
190 * in cases where a precision clock counter or external clock is
191 * available, the resolution can be much less than this and depend on
192 * whether the external clock is working or not.
193 *
194 * time_maxerror is initialized by a ntp_adjtime() call and increased by
195 * the kernel once each second to reflect the maximum error
196 * bound growth.
197 *
198 * time_esterror is set and read by the ntp_adjtime() call, but
199 * otherwise not used by the kernel.
200 */
201int time_status = STA_UNSYNC;	/* clock status bits */
202int time_state = TIME_OK;	/* clock state */
203long time_offset = 0;		/* time offset (us) */
204long time_constant = 0;		/* pll time constant */
205long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
206long time_precision = 1;	/* clock precision (us) */
207long time_maxerror = MAXPHASE;	/* maximum error (us) */
208long time_esterror = MAXPHASE;	/* estimated error (us) */
209
210/*
211 * The following variables establish the state of the PLL/FLL and the
212 * residual time and frequency offset of the local clock. The scale
213 * factors are defined in the timex.h header file.
214 *
215 * time_phase and time_freq are the phase increment and the frequency
216 * increment, respectively, of the kernel time variable at each tick of
217 * the clock.
218 *
219 * time_freq is set via ntp_adjtime() from a value stored in a file when
220 * the synchronization daemon is first started. Its value is retrieved
221 * via ntp_adjtime() and written to the file about once per hour by the
222 * daemon.
223 *
224 * time_adj is the adjustment added to the value of tick at each timer
225 * interrupt and is recomputed from time_phase and time_freq at each
226 * seconds rollover.
227 *
228 * time_reftime is the second's portion of the system time on the last
229 * call to ntp_adjtime(). It is used to adjust the time_freq variable
230 * and to increase the time_maxerror as the time since last update
231 * increases.
232 */
233static long time_phase = 0;		/* phase offset (scaled us) */
234long time_freq = 0;			/* frequency offset (scaled ppm) */
235static long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
236static long time_reftime = 0;		/* time at last adjustment (s) */
237
238#ifdef PPS_SYNC
239/*
240 * The following variables are used only if the kernel PPS discipline
241 * code is configured (PPS_SYNC). The scale factors are defined in the
242 * timex.h header file.
243 *
244 * pps_time contains the time at each calibration interval, as read by
245 * microtime(). pps_count counts the seconds of the calibration
246 * interval, the duration of which is nominally pps_shift in powers of
247 * two.
248 *
249 * pps_offset is the time offset produced by the time median filter
250 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
251 * this filter.
252 *
253 * pps_freq is the frequency offset produced by the frequency median
254 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
255 * by this filter.
256 *
257 * pps_usec is latched from a high resolution counter or external clock
258 * at pps_time. Here we want the hardware counter contents only, not the
259 * contents plus the time_tv.usec as usual.
260 *
261 * pps_valid counts the number of seconds since the last PPS update. It
262 * is used as a watchdog timer to disable the PPS discipline should the
263 * PPS signal be lost.
264 *
265 * pps_glitch counts the number of seconds since the beginning of an
266 * offset burst more than tick/2 from current nominal offset. It is used
267 * mainly to suppress error bursts due to priority conflicts between the
268 * PPS interrupt and timer interrupt.
269 *
270 * pps_intcnt counts the calibration intervals for use in the interval-
271 * adaptation algorithm. It's just too complicated for words.
272 */
273struct timeval pps_time;	/* kernel time at last interval */
274long pps_offset = 0;		/* pps time offset (us) */
275long pps_jitter = MAXTIME;	/* pps time dispersion (jitter) (us) */
276long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
277long pps_freq = 0;		/* frequency offset (scaled ppm) */
278long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
279long pps_ff[] = {0, 0, 0};	/* frequency offset median filter */
280long pps_usec = 0;		/* microsec counter at last interval */
281long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
282int pps_glitch = 0;		/* pps signal glitch counter */
283int pps_count = 0;		/* calibration interval counter (s) */
284int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
285int pps_intcnt = 0;		/* intervals at current duration */
286
287/*
288 * PPS signal quality monitors
289 *
290 * pps_jitcnt counts the seconds that have been discarded because the
291 * jitter measured by the time median filter exceeds the limit MAXTIME
292 * (100 us).
293 *
294 * pps_calcnt counts the frequency calibration intervals, which are
295 * variable from 4 s to 256 s.
296 *
297 * pps_errcnt counts the calibration intervals which have been discarded
298 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
299 * calibration interval jitter exceeds two ticks.
300 *
301 * pps_stbcnt counts the calibration intervals that have been discarded
302 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
303 */
304long pps_jitcnt = 0;		/* jitter limit exceeded */
305long pps_calcnt = 0;		/* calibration intervals */
306long pps_errcnt = 0;		/* calibration errors */
307long pps_stbcnt = 0;		/* stability limit exceeded */
308#endif /* PPS_SYNC */
309
310/* XXX none of this stuff works under FreeBSD */
311#ifdef EXT_CLOCK
312/*
313 * External clock definitions
314 *
315 * The following definitions and declarations are used only if an
316 * external clock (HIGHBALL or TPRO) is configured on the system.
317 */
318#define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
319
320/*
321 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
322 * interrupt and decremented once each second.
323 */
324int clock_count = 0;		/* CPU clock counter */
325
326#ifdef HIGHBALL
327/*
328 * The clock_offset and clock_cpu variables are used by the HIGHBALL
329 * interface. The clock_offset variable defines the offset between
330 * system time and the HIGBALL counters. The clock_cpu variable contains
331 * the offset between the system clock and the HIGHBALL clock for use in
332 * disciplining the kernel time variable.
333 */
334extern struct timeval clock_offset; /* Highball clock offset */
335long clock_cpu = 0;		/* CPU clock adjust */
336#endif /* HIGHBALL */
337#endif /* EXT_CLOCK */
338
339/*
340 * hardupdate() - local clock update
341 *
342 * This routine is called by ntp_adjtime() to update the local clock
343 * phase and frequency. The implementation is of an adaptive-parameter,
344 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
345 * time and frequency offset estimates for each call. If the kernel PPS
346 * discipline code is configured (PPS_SYNC), the PPS signal itself
347 * determines the new time offset, instead of the calling argument.
348 * Presumably, calls to ntp_adjtime() occur only when the caller
349 * believes the local clock is valid within some bound (+-128 ms with
350 * NTP). If the caller's time is far different than the PPS time, an
351 * argument will ensue, and it's not clear who will lose.
352 *
353 * For uncompensated quartz crystal oscillatores and nominal update
354 * intervals less than 1024 s, operation should be in phase-lock mode
355 * (STA_FLL = 0), where the loop is disciplined to phase. For update
356 * intervals greater than thiss, operation should be in frequency-lock
357 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
358 *
359 * Note: splclock() is in effect.
360 */
361void
362hardupdate(offset)
363	long offset;
364{
365	long ltemp, mtemp;
366
367	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
368		return;
369	ltemp = offset;
370#ifdef PPS_SYNC
371	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
372		ltemp = pps_offset;
373#endif /* PPS_SYNC */
374
375	/*
376	 * Scale the phase adjustment and clamp to the operating range.
377	 */
378	if (ltemp > MAXPHASE)
379		time_offset = MAXPHASE << SHIFT_UPDATE;
380	else if (ltemp < -MAXPHASE)
381		time_offset = -(MAXPHASE << SHIFT_UPDATE);
382	else
383		time_offset = ltemp << SHIFT_UPDATE;
384
385	/*
386	 * Select whether the frequency is to be controlled and in which
387	 * mode (PLL or FLL). Clamp to the operating range. Ugly
388	 * multiply/divide should be replaced someday.
389	 */
390	if (time_status & STA_FREQHOLD || time_reftime == 0)
391		time_reftime = time.tv_sec;
392	mtemp = time.tv_sec - time_reftime;
393	time_reftime = time.tv_sec;
394	if (time_status & STA_FLL) {
395		if (mtemp >= MINSEC) {
396			ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
397			    SHIFT_UPDATE));
398			if (ltemp < 0)
399				time_freq -= -ltemp >> SHIFT_KH;
400			else
401				time_freq += ltemp >> SHIFT_KH;
402		}
403	} else {
404		if (mtemp < MAXSEC) {
405			ltemp *= mtemp;
406			if (ltemp < 0)
407				time_freq -= -ltemp >> (time_constant +
408				    time_constant + SHIFT_KF -
409				    SHIFT_USEC);
410			else
411				time_freq += ltemp >> (time_constant +
412				    time_constant + SHIFT_KF -
413				    SHIFT_USEC);
414		}
415	}
416	if (time_freq > time_tolerance)
417		time_freq = time_tolerance;
418	else if (time_freq < -time_tolerance)
419		time_freq = -time_tolerance;
420}
421
422
423
424/*
425 * Initialize clock frequencies and start both clocks running.
426 */
427/* ARGSUSED*/
428static void
429initclocks(dummy)
430	void *dummy;
431{
432	register int i;
433
434	/*
435	 * Set divisors to 1 (normal case) and let the machine-specific
436	 * code do its bit.
437	 */
438	psdiv = pscnt = 1;
439	cpu_initclocks();
440
441	/*
442	 * Compute profhz/stathz, and fix profhz if needed.
443	 */
444	i = stathz ? stathz : hz;
445	if (profhz == 0)
446		profhz = i;
447	psratio = profhz / i;
448}
449
450/*
451 * The real-time timer, interrupting hz times per second.
452 */
453void
454hardclock(frame)
455	register struct clockframe *frame;
456{
457	register struct callout *p1;
458	register struct proc *p;
459
460	p = curproc;
461	if (p) {
462		register struct pstats *pstats;
463
464		/*
465		 * Run current process's virtual and profile time, as needed.
466		 */
467		pstats = p->p_stats;
468		if (CLKF_USERMODE(frame) &&
469		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
470		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
471			psignal(p, SIGVTALRM);
472		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
473		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
474			psignal(p, SIGPROF);
475	}
476
477	/*
478	 * If no separate statistics clock is available, run it from here.
479	 */
480	if (stathz == 0)
481		statclock(frame);
482
483	/*
484	 * Increment the time-of-day.
485	 */
486	ticks++;
487	{
488		int time_update;
489		struct timeval newtime = time;
490		long ltemp;
491
492		if (timedelta == 0) {
493			time_update = CPU_THISTICKLEN(tick);
494		} else {
495			time_update = CPU_THISTICKLEN(tick) + tickdelta;
496			timedelta -= tickdelta;
497		}
498		BUMPTIME(&mono_time, time_update);
499
500		/*
501		 * Compute the phase adjustment. If the low-order bits
502		 * (time_phase) of the update overflow, bump the high-order bits
503		 * (time_update).
504		 */
505		time_phase += time_adj;
506		if (time_phase <= -FINEUSEC) {
507		  ltemp = -time_phase >> SHIFT_SCALE;
508		  time_phase += ltemp << SHIFT_SCALE;
509		  time_update -= ltemp;
510		}
511		else if (time_phase >= FINEUSEC) {
512		  ltemp = time_phase >> SHIFT_SCALE;
513		  time_phase -= ltemp << SHIFT_SCALE;
514		  time_update += ltemp;
515		}
516
517		newtime.tv_usec += time_update;
518		/*
519		 * On rollover of the second the phase adjustment to be used for
520		 * the next second is calculated. Also, the maximum error is
521		 * increased by the tolerance. If the PPS frequency discipline
522		 * code is present, the phase is increased to compensate for the
523		 * CPU clock oscillator frequency error.
524		 *
525		 * On a 32-bit machine and given parameters in the timex.h
526		 * header file, the maximum phase adjustment is +-512 ms and
527		 * maximum frequency offset is a tad less than) +-512 ppm. On a
528		 * 64-bit machine, you shouldn't need to ask.
529		 */
530		if (newtime.tv_usec >= 1000000) {
531		  newtime.tv_usec -= 1000000;
532		  newtime.tv_sec++;
533		  time_maxerror += time_tolerance >> SHIFT_USEC;
534
535		  /*
536		   * Compute the phase adjustment for the next second. In
537		   * PLL mode, the offset is reduced by a fixed factor
538		   * times the time constant. In FLL mode the offset is
539		   * used directly. In either mode, the maximum phase
540		   * adjustment for each second is clamped so as to spread
541		   * the adjustment over not more than the number of
542		   * seconds between updates.
543		   */
544		  if (time_offset < 0) {
545		    ltemp = -time_offset;
546		    if (!(time_status & STA_FLL))
547			ltemp >>= SHIFT_KG + time_constant;
548		    if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
549			ltemp = (MAXPHASE / MINSEC) <<
550			    SHIFT_UPDATE;
551		    time_offset += ltemp;
552		    time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
553			SHIFT_UPDATE);
554		    } else {
555		        ltemp = time_offset;
556			if (!(time_status & STA_FLL))
557				ltemp >>= SHIFT_KG + time_constant;
558			if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
559				ltemp = (MAXPHASE / MINSEC) <<
560				    SHIFT_UPDATE;
561			time_offset -= ltemp;
562			time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
563			    SHIFT_UPDATE);
564		    }
565
566		  /*
567		   * Compute the frequency estimate and additional phase
568		   * adjustment due to frequency error for the next
569		   * second. When the PPS signal is engaged, gnaw on the
570		   * watchdog counter and update the frequency computed by
571		   * the pll and the PPS signal.
572		   */
573#ifdef PPS_SYNC
574		  pps_valid++;
575		  if (pps_valid == PPS_VALID) {
576		    pps_jitter = MAXTIME;
577		    pps_stabil = MAXFREQ;
578		    time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
579				     STA_PPSWANDER | STA_PPSERROR);
580		  }
581		  ltemp = time_freq + pps_freq;
582#else
583		  ltemp = time_freq;
584#endif /* PPS_SYNC */
585		  if (ltemp < 0)
586		    time_adj -= -ltemp >>
587		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
588		  else
589		    time_adj += ltemp >>
590		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
591
592#if SHIFT_HZ == 7
593		  /*
594		   * When the CPU clock oscillator frequency is not a
595		   * power of two in Hz, the SHIFT_HZ is only an
596		   * approximate scale factor. In the SunOS kernel, this
597		   * results in a PLL gain factor of 1/1.28 = 0.78 what it
598		   * should be. In the following code the overall gain is
599		   * increased by a factor of 1.25, which results in a
600		   * residual error less than 3 percent.
601		   */
602		  /* Same thing applies for FreeBSD --GAW */
603		  if (hz == 100) {
604		    if (time_adj < 0)
605		      time_adj -= -time_adj >> 2;
606		    else
607		      time_adj += time_adj >> 2;
608		  }
609#endif /* SHIFT_HZ */
610
611		  /* XXX - this is really bogus, but can't be fixed until
612		     xntpd's idea of the system clock is fixed to know how
613		     the user wants leap seconds handled; in the mean time,
614		     we assume that users of NTP are running without proper
615		     leap second support (this is now the default anyway) */
616		  /*
617		   * Leap second processing. If in leap-insert state at
618		   * the end of the day, the system clock is set back one
619		   * second; if in leap-delete state, the system clock is
620		   * set ahead one second. The microtime() routine or
621		   * external clock driver will insure that reported time
622		   * is always monotonic. The ugly divides should be
623		   * replaced.
624		   */
625		  switch (time_state) {
626
627		  case TIME_OK:
628		    if (time_status & STA_INS)
629		      time_state = TIME_INS;
630		    else if (time_status & STA_DEL)
631		      time_state = TIME_DEL;
632		    break;
633
634		  case TIME_INS:
635		    if (newtime.tv_sec % 86400 == 0) {
636		      newtime.tv_sec--;
637		      time_state = TIME_OOP;
638		    }
639		    break;
640
641		  case TIME_DEL:
642		    if ((newtime.tv_sec + 1) % 86400 == 0) {
643		      newtime.tv_sec++;
644		      time_state = TIME_WAIT;
645		    }
646		    break;
647
648		  case TIME_OOP:
649		    time_state = TIME_WAIT;
650		    break;
651
652		  case TIME_WAIT:
653		    if (!(time_status & (STA_INS | STA_DEL)))
654		      time_state = TIME_OK;
655		  }
656		}
657		CPU_CLOCKUPDATE(&time, &newtime);
658	}
659
660	/*
661	 * Process callouts at a very low cpu priority, so we don't keep the
662	 * relatively high clock interrupt priority any longer than necessary.
663	 */
664	if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
665		if (CLKF_BASEPRI(frame)) {
666			/*
667			 * Save the overhead of a software interrupt;
668			 * it will happen as soon as we return, so do it now.
669			 */
670			(void)splsoftclock();
671			softclock();
672		} else
673			setsoftclock();
674	} else if (softticks + 1 == ticks) {
675		++softticks;
676	}
677}
678
679/*
680 * The callout mechanism is based on the work of Adam M. Costello and
681 * George Varghese, published in a technical report entitled "Redesigning
682 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
683 * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
684 * used in this implementation was published by G.Varghese and A. Lauck in
685 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
686 * the Efficient Implementation of a Timer Facility" in the Proceedings of
687 * the 11th ACM Annual Symposium on Operating Systems Principles,
688 * Austin, Texas Nov 1987.
689 */
690/*
691 * Software (low priority) clock interrupt.
692 * Run periodic events from timeout queue.
693 */
694/*ARGSUSED*/
695void
696softclock()
697{
698	register struct callout *c;
699	register struct callout_tailq *bucket;
700	register int s;
701	register int curticks;
702	register int steps;	/*
703				 * Number of steps taken since
704				 * we last allowed interrupts.
705				 */
706
707	#ifndef MAX_SOFTCLOCK_STEPS
708	#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
709	#endif /* MAX_SOFTCLOCK_STEPS */
710
711	steps = 0;
712	s = splhigh();
713	while (softticks != ticks) {
714		softticks++;
715		/*
716		 * softticks may be modified by hard clock, so cache
717		 * it while we work on a given bucket.
718		 */
719		curticks = softticks;
720		bucket = &callwheel[curticks & callwheelmask];
721		c = TAILQ_FIRST(bucket);
722		while (c) {
723			if (c->c_time != curticks) {
724				c = TAILQ_NEXT(c, c_links.tqe);
725				++steps;
726				if (steps >= MAX_SOFTCLOCK_STEPS) {
727					nextsoftcheck = c;
728					/* Give interrupts a chance. */
729					splx(s);
730					s = splhigh();
731					c = nextsoftcheck;
732					steps = 0;
733				}
734			} else {
735				void (*c_func)(void *);
736				void *c_arg;
737
738				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
739				TAILQ_REMOVE(bucket, c, c_links.tqe);
740				c_func = c->c_func;
741				c_arg = c->c_arg;
742				c->c_func = NULL;
743				SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
744				splx(s);
745				c_func(c_arg);
746				s = splhigh();
747				steps = 0;
748				c = nextsoftcheck;
749			}
750		}
751	}
752	nextsoftcheck = NULL;
753	splx(s);
754}
755
756/*
757 * timeout --
758 *	Execute a function after a specified length of time.
759 *
760 * untimeout --
761 *	Cancel previous timeout function call.
762 *
763 * callout_handle_init --
764 *	Initialize a handle so that using it with untimeout is benign.
765 *
766 *	See AT&T BCI Driver Reference Manual for specification.  This
767 *	implementation differs from that one in that although an
768 *	identification value is returned from timeout, the original
769 *	arguments to timeout as well as the identifier are used to
770 *	identify entries for untimeout.
771 */
772struct callout_handle
773timeout(ftn, arg, to_ticks)
774	timeout_t ftn;
775	void *arg;
776	register int to_ticks;
777{
778	int s;
779	struct callout *new;
780	struct callout_handle handle;
781
782	if (to_ticks <= 0)
783		to_ticks = 1;
784
785	/* Lock out the clock. */
786	s = splhigh();
787
788	/* Fill in the next free callout structure. */
789	new = SLIST_FIRST(&callfree);
790	if (new == NULL)
791		/* XXX Attempt to malloc first */
792		panic("timeout table full");
793
794	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
795	new->c_arg = arg;
796	new->c_func = ftn;
797	new->c_time = ticks + to_ticks;
798	TAILQ_INSERT_TAIL(&callwheel[new->c_time & callwheelmask],
799			  new, c_links.tqe);
800
801	splx(s);
802	handle.callout = new;
803	return (handle);
804}
805
806void
807untimeout(ftn, arg, handle)
808	timeout_t ftn;
809	void *arg;
810	struct callout_handle handle;
811{
812	register struct callout *p, *t;
813	register int s;
814
815	/*
816	 * Check for a handle that was initialized
817	 * by callout_handle_init, but never used
818	 * for a real timeout.
819	 */
820	if (handle.callout == NULL)
821		return;
822
823	s = splhigh();
824	if ((handle.callout->c_func == ftn)
825	 && (handle.callout->c_arg == arg)) {
826		if (nextsoftcheck == handle.callout) {
827			nextsoftcheck = TAILQ_NEXT(handle.callout, c_links.tqe);
828		}
829		TAILQ_REMOVE(&callwheel[handle.callout->c_time & callwheelmask],
830			     handle.callout, c_links.tqe);
831		handle.callout->c_func = NULL;
832		SLIST_INSERT_HEAD(&callfree, handle.callout, c_links.sle);
833	}
834	splx(s);
835}
836
837void
838callout_handle_init(struct callout_handle *handle)
839{
840	handle->callout = NULL;
841}
842
843void
844gettime(struct timeval *tvp)
845{
846	int s;
847
848	s = splclock();
849	/* XXX should use microtime() iff tv_usec is used. */
850	*tvp = time;
851	splx(s);
852}
853
854/*
855 * Compute number of hz until specified time.  Used to
856 * compute third argument to timeout() from an absolute time.
857 */
858int
859hzto(tv)
860	struct timeval *tv;
861{
862	register unsigned long ticks;
863	register long sec, usec;
864	int s;
865
866	/*
867	 * If the number of usecs in the whole seconds part of the time
868	 * difference fits in a long, then the total number of usecs will
869	 * fit in an unsigned long.  Compute the total and convert it to
870	 * ticks, rounding up and adding 1 to allow for the current tick
871	 * to expire.  Rounding also depends on unsigned long arithmetic
872	 * to avoid overflow.
873	 *
874	 * Otherwise, if the number of ticks in the whole seconds part of
875	 * the time difference fits in a long, then convert the parts to
876	 * ticks separately and add, using similar rounding methods and
877	 * overflow avoidance.  This method would work in the previous
878	 * case but it is slightly slower and assumes that hz is integral.
879	 *
880	 * Otherwise, round the time difference down to the maximum
881	 * representable value.
882	 *
883	 * If ints have 32 bits, then the maximum value for any timeout in
884	 * 10ms ticks is 248 days.
885	 */
886	s = splclock();
887	sec = tv->tv_sec - time.tv_sec;
888	usec = tv->tv_usec - time.tv_usec;
889	splx(s);
890	if (usec < 0) {
891		sec--;
892		usec += 1000000;
893	}
894	if (sec < 0) {
895#ifdef DIAGNOSTIC
896		printf("hzto: negative time difference %ld sec %ld usec\n",
897		       sec, usec);
898#endif
899		ticks = 1;
900	} else if (sec <= LONG_MAX / 1000000)
901		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
902			/ tick + 1;
903	else if (sec <= LONG_MAX / hz)
904		ticks = sec * hz
905			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
906	else
907		ticks = LONG_MAX;
908	if (ticks > INT_MAX)
909		ticks = INT_MAX;
910	return (ticks);
911}
912
913/*
914 * Start profiling on a process.
915 *
916 * Kernel profiling passes proc0 which never exits and hence
917 * keeps the profile clock running constantly.
918 */
919void
920startprofclock(p)
921	register struct proc *p;
922{
923	int s;
924
925	if ((p->p_flag & P_PROFIL) == 0) {
926		p->p_flag |= P_PROFIL;
927		if (++profprocs == 1 && stathz != 0) {
928			s = splstatclock();
929			psdiv = pscnt = psratio;
930			setstatclockrate(profhz);
931			splx(s);
932		}
933	}
934}
935
936/*
937 * Stop profiling on a process.
938 */
939void
940stopprofclock(p)
941	register struct proc *p;
942{
943	int s;
944
945	if (p->p_flag & P_PROFIL) {
946		p->p_flag &= ~P_PROFIL;
947		if (--profprocs == 0 && stathz != 0) {
948			s = splstatclock();
949			psdiv = pscnt = 1;
950			setstatclockrate(stathz);
951			splx(s);
952		}
953	}
954}
955
956/*
957 * Statistics clock.  Grab profile sample, and if divider reaches 0,
958 * do process and kernel statistics.
959 */
960void
961statclock(frame)
962	register struct clockframe *frame;
963{
964#ifdef GPROF
965	register struct gmonparam *g;
966#endif
967	register struct proc *p;
968	register int i;
969	struct pstats *pstats;
970	long rss;
971	struct rusage *ru;
972	struct vmspace *vm;
973
974	if (CLKF_USERMODE(frame)) {
975		p = curproc;
976		if (p->p_flag & P_PROFIL)
977			addupc_intr(p, CLKF_PC(frame), 1);
978		if (--pscnt > 0)
979			return;
980		/*
981		 * Came from user mode; CPU was in user state.
982		 * If this process is being profiled record the tick.
983		 */
984		p->p_uticks++;
985		if (p->p_nice > NZERO)
986			cp_time[CP_NICE]++;
987		else
988			cp_time[CP_USER]++;
989	} else {
990#ifdef GPROF
991		/*
992		 * Kernel statistics are just like addupc_intr, only easier.
993		 */
994		g = &_gmonparam;
995		if (g->state == GMON_PROF_ON) {
996			i = CLKF_PC(frame) - g->lowpc;
997			if (i < g->textsize) {
998				i /= HISTFRACTION * sizeof(*g->kcount);
999				g->kcount[i]++;
1000			}
1001		}
1002#endif
1003		if (--pscnt > 0)
1004			return;
1005		/*
1006		 * Came from kernel mode, so we were:
1007		 * - handling an interrupt,
1008		 * - doing syscall or trap work on behalf of the current
1009		 *   user process, or
1010		 * - spinning in the idle loop.
1011		 * Whichever it is, charge the time as appropriate.
1012		 * Note that we charge interrupts to the current process,
1013		 * regardless of whether they are ``for'' that process,
1014		 * so that we know how much of its real time was spent
1015		 * in ``non-process'' (i.e., interrupt) work.
1016		 */
1017		p = curproc;
1018		if (CLKF_INTR(frame)) {
1019			if (p != NULL)
1020				p->p_iticks++;
1021			cp_time[CP_INTR]++;
1022		} else if (p != NULL && !(p->p_flag & P_IDLEPROC)) {
1023			p->p_sticks++;
1024			cp_time[CP_SYS]++;
1025		} else
1026			cp_time[CP_IDLE]++;
1027	}
1028	pscnt = psdiv;
1029
1030	/*
1031	 * We maintain statistics shown by user-level statistics
1032	 * programs:  the amount of time in each cpu state, and
1033	 * the amount of time each of DK_NDRIVE ``drives'' is busy.
1034	 *
1035	 * XXX	should either run linked list of drives, or (better)
1036	 *	grab timestamps in the start & done code.
1037	 */
1038	for (i = 0; i < DK_NDRIVE; i++)
1039		if (dk_busy & (1 << i))
1040			dk_time[i]++;
1041
1042	/*
1043	 * We adjust the priority of the current process.  The priority of
1044	 * a process gets worse as it accumulates CPU time.  The cpu usage
1045	 * estimator (p_estcpu) is increased here.  The formula for computing
1046	 * priorities (in kern_synch.c) will compute a different value each
1047	 * time p_estcpu increases by 4.  The cpu usage estimator ramps up
1048	 * quite quickly when the process is running (linearly), and decays
1049	 * away exponentially, at a rate which is proportionally slower when
1050	 * the system is busy.  The basic principal is that the system will
1051	 * 90% forget that the process used a lot of CPU time in 5 * loadav
1052	 * seconds.  This causes the system to favor processes which haven't
1053	 * run much recently, and to round-robin among other processes.
1054	 */
1055	if (p != NULL) {
1056		p->p_cpticks++;
1057		if (++p->p_estcpu == 0)
1058			p->p_estcpu--;
1059		if ((p->p_estcpu & 3) == 0) {
1060			resetpriority(p);
1061			if (p->p_priority >= PUSER)
1062				p->p_priority = p->p_usrpri;
1063		}
1064
1065		/* Update resource usage integrals and maximums. */
1066		if ((pstats = p->p_stats) != NULL &&
1067		    (ru = &pstats->p_ru) != NULL &&
1068		    (vm = p->p_vmspace) != NULL) {
1069			ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024;
1070			ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024;
1071			ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024;
1072			rss = vm->vm_pmap.pm_stats.resident_count *
1073			      PAGE_SIZE / 1024;
1074			if (ru->ru_maxrss < rss)
1075				ru->ru_maxrss = rss;
1076        	}
1077	}
1078}
1079
1080/*
1081 * Return information about system clocks.
1082 */
1083static int
1084sysctl_kern_clockrate SYSCTL_HANDLER_ARGS
1085{
1086	struct clockinfo clkinfo;
1087	/*
1088	 * Construct clockinfo structure.
1089	 */
1090	clkinfo.hz = hz;
1091	clkinfo.tick = tick;
1092	clkinfo.tickadj = tickadj;
1093	clkinfo.profhz = profhz;
1094	clkinfo.stathz = stathz ? stathz : hz;
1095	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
1096}
1097
1098SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
1099	0, 0, sysctl_kern_clockrate, "S,clockinfo","");
1100
1101#ifdef PPS_SYNC
1102/*
1103 * hardpps() - discipline CPU clock oscillator to external PPS signal
1104 *
1105 * This routine is called at each PPS interrupt in order to discipline
1106 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1107 * and leaves it in a handy spot for the hardclock() routine. It
1108 * integrates successive PPS phase differences and calculates the
1109 * frequency offset. This is used in hardclock() to discipline the CPU
1110 * clock oscillator so that intrinsic frequency error is cancelled out.
1111 * The code requires the caller to capture the time and hardware counter
1112 * value at the on-time PPS signal transition.
1113 *
1114 * Note that, on some Unix systems, this routine runs at an interrupt
1115 * priority level higher than the timer interrupt routine hardclock().
1116 * Therefore, the variables used are distinct from the hardclock()
1117 * variables, except for certain exceptions: The PPS frequency pps_freq
1118 * and phase pps_offset variables are determined by this routine and
1119 * updated atomically. The time_tolerance variable can be considered a
1120 * constant, since it is infrequently changed, and then only when the
1121 * PPS signal is disabled. The watchdog counter pps_valid is updated
1122 * once per second by hardclock() and is atomically cleared in this
1123 * routine.
1124 */
1125void
1126hardpps(tvp, usec)
1127	struct timeval *tvp;		/* time at PPS */
1128	long usec;			/* hardware counter at PPS */
1129{
1130	long u_usec, v_usec, bigtick;
1131	long cal_sec, cal_usec;
1132
1133	/*
1134	 * An occasional glitch can be produced when the PPS interrupt
1135	 * occurs in the hardclock() routine before the time variable is
1136	 * updated. Here the offset is discarded when the difference
1137	 * between it and the last one is greater than tick/2, but not
1138	 * if the interval since the first discard exceeds 30 s.
1139	 */
1140	time_status |= STA_PPSSIGNAL;
1141	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1142	pps_valid = 0;
1143	u_usec = -tvp->tv_usec;
1144	if (u_usec < -500000)
1145		u_usec += 1000000;
1146	v_usec = pps_offset - u_usec;
1147	if (v_usec < 0)
1148		v_usec = -v_usec;
1149	if (v_usec > (tick >> 1)) {
1150		if (pps_glitch > MAXGLITCH) {
1151			pps_glitch = 0;
1152			pps_tf[2] = u_usec;
1153			pps_tf[1] = u_usec;
1154		} else {
1155			pps_glitch++;
1156			u_usec = pps_offset;
1157		}
1158	} else
1159		pps_glitch = 0;
1160
1161	/*
1162	 * A three-stage median filter is used to help deglitch the pps
1163	 * time. The median sample becomes the time offset estimate; the
1164	 * difference between the other two samples becomes the time
1165	 * dispersion (jitter) estimate.
1166	 */
1167	pps_tf[2] = pps_tf[1];
1168	pps_tf[1] = pps_tf[0];
1169	pps_tf[0] = u_usec;
1170	if (pps_tf[0] > pps_tf[1]) {
1171		if (pps_tf[1] > pps_tf[2]) {
1172			pps_offset = pps_tf[1];		/* 0 1 2 */
1173			v_usec = pps_tf[0] - pps_tf[2];
1174		} else if (pps_tf[2] > pps_tf[0]) {
1175			pps_offset = pps_tf[0];		/* 2 0 1 */
1176			v_usec = pps_tf[2] - pps_tf[1];
1177		} else {
1178			pps_offset = pps_tf[2];		/* 0 2 1 */
1179			v_usec = pps_tf[0] - pps_tf[1];
1180		}
1181	} else {
1182		if (pps_tf[1] < pps_tf[2]) {
1183			pps_offset = pps_tf[1];		/* 2 1 0 */
1184			v_usec = pps_tf[2] - pps_tf[0];
1185		} else  if (pps_tf[2] < pps_tf[0]) {
1186			pps_offset = pps_tf[0];		/* 1 0 2 */
1187			v_usec = pps_tf[1] - pps_tf[2];
1188		} else {
1189			pps_offset = pps_tf[2];		/* 1 2 0 */
1190			v_usec = pps_tf[1] - pps_tf[0];
1191		}
1192	}
1193	if (v_usec > MAXTIME)
1194		pps_jitcnt++;
1195	v_usec = (v_usec << PPS_AVG) - pps_jitter;
1196	if (v_usec < 0)
1197		pps_jitter -= -v_usec >> PPS_AVG;
1198	else
1199		pps_jitter += v_usec >> PPS_AVG;
1200	if (pps_jitter > (MAXTIME >> 1))
1201		time_status |= STA_PPSJITTER;
1202
1203	/*
1204	 * During the calibration interval adjust the starting time when
1205	 * the tick overflows. At the end of the interval compute the
1206	 * duration of the interval and the difference of the hardware
1207	 * counters at the beginning and end of the interval. This code
1208	 * is deliciously complicated by the fact valid differences may
1209	 * exceed the value of tick when using long calibration
1210	 * intervals and small ticks. Note that the counter can be
1211	 * greater than tick if caught at just the wrong instant, but
1212	 * the values returned and used here are correct.
1213	 */
1214	bigtick = (long)tick << SHIFT_USEC;
1215	pps_usec -= pps_freq;
1216	if (pps_usec >= bigtick)
1217		pps_usec -= bigtick;
1218	if (pps_usec < 0)
1219		pps_usec += bigtick;
1220	pps_time.tv_sec++;
1221	pps_count++;
1222	if (pps_count < (1 << pps_shift))
1223		return;
1224	pps_count = 0;
1225	pps_calcnt++;
1226	u_usec = usec << SHIFT_USEC;
1227	v_usec = pps_usec - u_usec;
1228	if (v_usec >= bigtick >> 1)
1229		v_usec -= bigtick;
1230	if (v_usec < -(bigtick >> 1))
1231		v_usec += bigtick;
1232	if (v_usec < 0)
1233		v_usec = -(-v_usec >> pps_shift);
1234	else
1235		v_usec = v_usec >> pps_shift;
1236	pps_usec = u_usec;
1237	cal_sec = tvp->tv_sec;
1238	cal_usec = tvp->tv_usec;
1239	cal_sec -= pps_time.tv_sec;
1240	cal_usec -= pps_time.tv_usec;
1241	if (cal_usec < 0) {
1242		cal_usec += 1000000;
1243		cal_sec--;
1244	}
1245	pps_time = *tvp;
1246
1247	/*
1248	 * Check for lost interrupts, noise, excessive jitter and
1249	 * excessive frequency error. The number of timer ticks during
1250	 * the interval may vary +-1 tick. Add to this a margin of one
1251	 * tick for the PPS signal jitter and maximum frequency
1252	 * deviation. If the limits are exceeded, the calibration
1253	 * interval is reset to the minimum and we start over.
1254	 */
1255	u_usec = (long)tick << 1;
1256	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1257	    || (cal_sec == 0 && cal_usec < u_usec))
1258	    || v_usec > time_tolerance || v_usec < -time_tolerance) {
1259		pps_errcnt++;
1260		pps_shift = PPS_SHIFT;
1261		pps_intcnt = 0;
1262		time_status |= STA_PPSERROR;
1263		return;
1264	}
1265
1266	/*
1267	 * A three-stage median filter is used to help deglitch the pps
1268	 * frequency. The median sample becomes the frequency offset
1269	 * estimate; the difference between the other two samples
1270	 * becomes the frequency dispersion (stability) estimate.
1271	 */
1272	pps_ff[2] = pps_ff[1];
1273	pps_ff[1] = pps_ff[0];
1274	pps_ff[0] = v_usec;
1275	if (pps_ff[0] > pps_ff[1]) {
1276		if (pps_ff[1] > pps_ff[2]) {
1277			u_usec = pps_ff[1];		/* 0 1 2 */
1278			v_usec = pps_ff[0] - pps_ff[2];
1279		} else if (pps_ff[2] > pps_ff[0]) {
1280			u_usec = pps_ff[0];		/* 2 0 1 */
1281			v_usec = pps_ff[2] - pps_ff[1];
1282		} else {
1283			u_usec = pps_ff[2];		/* 0 2 1 */
1284			v_usec = pps_ff[0] - pps_ff[1];
1285		}
1286	} else {
1287		if (pps_ff[1] < pps_ff[2]) {
1288			u_usec = pps_ff[1];		/* 2 1 0 */
1289			v_usec = pps_ff[2] - pps_ff[0];
1290		} else  if (pps_ff[2] < pps_ff[0]) {
1291			u_usec = pps_ff[0];		/* 1 0 2 */
1292			v_usec = pps_ff[1] - pps_ff[2];
1293		} else {
1294			u_usec = pps_ff[2];		/* 1 2 0 */
1295			v_usec = pps_ff[1] - pps_ff[0];
1296		}
1297	}
1298
1299	/*
1300	 * Here the frequency dispersion (stability) is updated. If it
1301	 * is less than one-fourth the maximum (MAXFREQ), the frequency
1302	 * offset is updated as well, but clamped to the tolerance. It
1303	 * will be processed later by the hardclock() routine.
1304	 */
1305	v_usec = (v_usec >> 1) - pps_stabil;
1306	if (v_usec < 0)
1307		pps_stabil -= -v_usec >> PPS_AVG;
1308	else
1309		pps_stabil += v_usec >> PPS_AVG;
1310	if (pps_stabil > MAXFREQ >> 2) {
1311		pps_stbcnt++;
1312		time_status |= STA_PPSWANDER;
1313		return;
1314	}
1315	if (time_status & STA_PPSFREQ) {
1316		if (u_usec < 0) {
1317			pps_freq -= -u_usec >> PPS_AVG;
1318			if (pps_freq < -time_tolerance)
1319				pps_freq = -time_tolerance;
1320			u_usec = -u_usec;
1321		} else {
1322			pps_freq += u_usec >> PPS_AVG;
1323			if (pps_freq > time_tolerance)
1324				pps_freq = time_tolerance;
1325		}
1326	}
1327
1328	/*
1329	 * Here the calibration interval is adjusted. If the maximum
1330	 * time difference is greater than tick / 4, reduce the interval
1331	 * by half. If this is not the case for four consecutive
1332	 * intervals, double the interval.
1333	 */
1334	if (u_usec << pps_shift > bigtick >> 2) {
1335		pps_intcnt = 0;
1336		if (pps_shift > PPS_SHIFT)
1337			pps_shift--;
1338	} else if (pps_intcnt >= 4) {
1339		pps_intcnt = 0;
1340		if (pps_shift < PPS_SHIFTMAX)
1341			pps_shift++;
1342	} else
1343		pps_intcnt++;
1344}
1345#endif /* PPS_SYNC */
1346