kern_timeout.c revision 17342
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39 * $Id: kern_clock.c,v 1.25 1996/06/23 17:40:42 bde Exp $
40 */
41
42/* Portions of this software are covered by the following: */
43/******************************************************************************
44 *                                                                            *
45 * Copyright (c) David L. Mills 1993, 1994                                    *
46 *                                                                            *
47 * Permission to use, copy, modify, and distribute this software and its      *
48 * documentation for any purpose and without fee is hereby granted, provided  *
49 * that the above copyright notice appears in all copies and that both the    *
50 * copyright notice and this permission notice appear in supporting           *
51 * documentation, and that the name University of Delaware not be used in     *
52 * advertising or publicity pertaining to distribution of the software        *
53 * without specific, written prior permission.  The University of Delaware    *
54 * makes no representations about the suitability this software for any       *
55 * purpose.  It is provided "as is" without express or implied warranty.      *
56 *                                                                            *
57 *****************************************************************************/
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/dkstat.h>
62#include <sys/callout.h>
63#include <sys/kernel.h>
64#include <sys/proc.h>
65#include <sys/resourcevar.h>
66#include <sys/signalvar.h>
67#include <sys/timex.h>
68#include <vm/vm.h>
69#include <vm/vm_param.h>
70#include <vm/vm_prot.h>
71#include <vm/lock.h>
72#include <vm/pmap.h>
73#include <vm/vm_map.h>
74#include <sys/sysctl.h>
75
76#include <machine/cpu.h>
77#include <machine/clock.h>
78
79#ifdef GPROF
80#include <sys/gmon.h>
81#endif
82
83static void initclocks __P((void *dummy));
84SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
85
86/* Exported to machdep.c. */
87struct callout *callfree, *callout;
88
89static struct callout calltodo;
90
91/* Some of these don't belong here, but it's easiest to concentrate them. */
92static long cp_time[CPUSTATES];
93long dk_seek[DK_NDRIVE];
94static long dk_time[DK_NDRIVE];
95long dk_wds[DK_NDRIVE];
96long dk_wpms[DK_NDRIVE];
97long dk_xfer[DK_NDRIVE];
98
99int dk_busy;
100int dk_ndrive = 0;
101char dk_names[DK_NDRIVE][DK_NAMELEN];
102
103long tk_cancc;
104long tk_nin;
105long tk_nout;
106long tk_rawcc;
107
108/*
109 * Clock handling routines.
110 *
111 * This code is written to operate with two timers that run independently of
112 * each other.  The main clock, running hz times per second, is used to keep
113 * track of real time.  The second timer handles kernel and user profiling,
114 * and does resource use estimation.  If the second timer is programmable,
115 * it is randomized to avoid aliasing between the two clocks.  For example,
116 * the randomization prevents an adversary from always giving up the cpu
117 * just before its quantum expires.  Otherwise, it would never accumulate
118 * cpu ticks.  The mean frequency of the second timer is stathz.
119 *
120 * If no second timer exists, stathz will be zero; in this case we drive
121 * profiling and statistics off the main clock.  This WILL NOT be accurate;
122 * do not do it unless absolutely necessary.
123 *
124 * The statistics clock may (or may not) be run at a higher rate while
125 * profiling.  This profile clock runs at profhz.  We require that profhz
126 * be an integral multiple of stathz.
127 *
128 * If the statistics clock is running fast, it must be divided by the ratio
129 * profhz/stathz for statistics.  (For profiling, every tick counts.)
130 */
131
132/*
133 * TODO:
134 *	allocate more timeout table slots when table overflows.
135 */
136
137/*
138 * Bump a timeval by a small number of usec's.
139 */
140#define BUMPTIME(t, usec) { \
141	register volatile struct timeval *tp = (t); \
142	register long us; \
143 \
144	tp->tv_usec = us = tp->tv_usec + (usec); \
145	if (us >= 1000000) { \
146		tp->tv_usec = us - 1000000; \
147		tp->tv_sec++; \
148	} \
149}
150
151int	stathz;
152int	profhz;
153static int profprocs;
154int	ticks;
155static int psdiv, pscnt;	/* prof => stat divider */
156int psratio;			/* ratio: prof / stat */
157
158volatile struct	timeval time;
159volatile struct	timeval mono_time;
160
161/*
162 * Phase-lock loop (PLL) definitions
163 *
164 * The following variables are read and set by the ntp_adjtime() system
165 * call.
166 *
167 * time_state shows the state of the system clock, with values defined
168 * in the timex.h header file.
169 *
170 * time_status shows the status of the system clock, with bits defined
171 * in the timex.h header file.
172 *
173 * time_offset is used by the PLL to adjust the system time in small
174 * increments.
175 *
176 * time_constant determines the bandwidth or "stiffness" of the PLL.
177 *
178 * time_tolerance determines maximum frequency error or tolerance of the
179 * CPU clock oscillator and is a property of the architecture; however,
180 * in principle it could change as result of the presence of external
181 * discipline signals, for instance.
182 *
183 * time_precision is usually equal to the kernel tick variable; however,
184 * in cases where a precision clock counter or external clock is
185 * available, the resolution can be much less than this and depend on
186 * whether the external clock is working or not.
187 *
188 * time_maxerror is initialized by a ntp_adjtime() call and increased by
189 * the kernel once each second to reflect the maximum error
190 * bound growth.
191 *
192 * time_esterror is set and read by the ntp_adjtime() call, but
193 * otherwise not used by the kernel.
194 */
195int time_status = STA_UNSYNC;	/* clock status bits */
196int time_state = TIME_OK;	/* clock state */
197long time_offset = 0;		/* time offset (us) */
198long time_constant = 0;		/* pll time constant */
199long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
200long time_precision = 1;	/* clock precision (us) */
201long time_maxerror = MAXPHASE;	/* maximum error (us) */
202long time_esterror = MAXPHASE;	/* estimated error (us) */
203
204/*
205 * The following variables establish the state of the PLL and the
206 * residual time and frequency offset of the local clock. The scale
207 * factors are defined in the timex.h header file.
208 *
209 * time_phase and time_freq are the phase increment and the frequency
210 * increment, respectively, of the kernel time variable at each tick of
211 * the clock.
212 *
213 * time_freq is set via ntp_adjtime() from a value stored in a file when
214 * the synchronization daemon is first started. Its value is retrieved
215 * via ntp_adjtime() and written to the file about once per hour by the
216 * daemon.
217 *
218 * time_adj is the adjustment added to the value of tick at each timer
219 * interrupt and is recomputed at each timer interrupt.
220 *
221 * time_reftime is the second's portion of the system time on the last
222 * call to ntp_adjtime(). It is used to adjust the time_freq variable
223 * and to increase the time_maxerror as the time since last update
224 * increases.
225 */
226static long time_phase = 0;		/* phase offset (scaled us) */
227long time_freq = 0;		/* frequency offset (scaled ppm) */
228static long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
229static long time_reftime = 0;		/* time at last adjustment (s) */
230
231#ifdef PPS_SYNC
232/*
233 * The following variables are used only if the if the kernel PPS
234 * discipline code is configured (PPS_SYNC). The scale factors are
235 * defined in the timex.h header file.
236 *
237 * pps_time contains the time at each calibration interval, as read by
238 * microtime().
239 *
240 * pps_offset is the time offset produced by the time median filter
241 * pps_tf[], while pps_jitter is the dispersion measured by this
242 * filter.
243 *
244 * pps_freq is the frequency offset produced by the frequency median
245 * filter pps_ff[], while pps_stabil is the dispersion measured by
246 * this filter.
247 *
248 * pps_usec is latched from a high resolution counter or external clock
249 * at pps_time. Here we want the hardware counter contents only, not the
250 * contents plus the time_tv.usec as usual.
251 *
252 * pps_valid counts the number of seconds since the last PPS update. It
253 * is used as a watchdog timer to disable the PPS discipline should the
254 * PPS signal be lost.
255 *
256 * pps_glitch counts the number of seconds since the beginning of an
257 * offset burst more than tick/2 from current nominal offset. It is used
258 * mainly to suppress error bursts due to priority conflicts between the
259 * PPS interrupt and timer interrupt.
260 *
261 * pps_count counts the seconds of the calibration interval, the
262 * duration of which is pps_shift in powers of two.
263 *
264 * pps_intcnt counts the calibration intervals for use in the interval-
265 * adaptation algorithm. It's just too complicated for words.
266 */
267struct timeval pps_time;	/* kernel time at last interval */
268long pps_offset = 0;		/* pps time offset (us) */
269long pps_jitter = MAXTIME;	/* pps time dispersion (jitter) (us) */
270long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
271long pps_freq = 0;		/* frequency offset (scaled ppm) */
272long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
273long pps_ff[] = {0, 0, 0};	/* frequency offset median filter */
274long pps_usec = 0;		/* microsec counter at last interval */
275long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
276int pps_glitch = 0;		/* pps signal glitch counter */
277int pps_count = 0;		/* calibration interval counter (s) */
278int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
279int pps_intcnt = 0;		/* intervals at current duration */
280
281/*
282 * PPS signal quality monitors
283 *
284 * pps_jitcnt counts the seconds that have been discarded because the
285 * jitter measured by the time median filter exceeds the limit MAXTIME
286 * (100 us).
287 *
288 * pps_calcnt counts the frequency calibration intervals, which are
289 * variable from 4 s to 256 s.
290 *
291 * pps_errcnt counts the calibration intervals which have been discarded
292 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
293 * calibration interval jitter exceeds two ticks.
294 *
295 * pps_stbcnt counts the calibration intervals that have been discarded
296 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
297 */
298long pps_jitcnt = 0;		/* jitter limit exceeded */
299long pps_calcnt = 0;		/* calibration intervals */
300long pps_errcnt = 0;		/* calibration errors */
301long pps_stbcnt = 0;		/* stability limit exceeded */
302#endif /* PPS_SYNC */
303
304/* XXX none of this stuff works under FreeBSD */
305#ifdef EXT_CLOCK
306/*
307 * External clock definitions
308 *
309 * The following definitions and declarations are used only if an
310 * external clock (HIGHBALL or TPRO) is configured on the system.
311 */
312#define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
313
314/*
315 * The clock_count variable is set to CLOCK_INTERVAL at each PPS
316 * interrupt and decremented once each second.
317 */
318int clock_count = 0;		/* CPU clock counter */
319
320#ifdef HIGHBALL
321/*
322 * The clock_offset and clock_cpu variables are used by the HIGHBALL
323 * interface. The clock_offset variable defines the offset between
324 * system time and the HIGBALL counters. The clock_cpu variable contains
325 * the offset between the system clock and the HIGHBALL clock for use in
326 * disciplining the kernel time variable.
327 */
328extern struct timeval clock_offset; /* Highball clock offset */
329long clock_cpu = 0;		/* CPU clock adjust */
330#endif /* HIGHBALL */
331#endif /* EXT_CLOCK */
332
333/*
334 * hardupdate() - local clock update
335 *
336 * This routine is called by ntp_adjtime() to update the local clock
337 * phase and frequency. This is used to implement an adaptive-parameter,
338 * first-order, type-II phase-lock loop. The code computes new time and
339 * frequency offsets each time it is called. The hardclock() routine
340 * amortizes these offsets at each tick interrupt. If the kernel PPS
341 * discipline code is configured (PPS_SYNC), the PPS signal itself
342 * determines the new time offset, instead of the calling argument.
343 * Presumably, calls to ntp_adjtime() occur only when the caller
344 * believes the local clock is valid within some bound (+-128 ms with
345 * NTP). If the caller's time is far different than the PPS time, an
346 * argument will ensue, and it's not clear who will lose.
347 *
348 * For default SHIFT_UPDATE = 12, the offset is limited to +-512 ms, the
349 * maximum interval between updates is 4096 s and the maximum frequency
350 * offset is +-31.25 ms/s.
351 *
352 * Note: splclock() is in effect.
353 */
354void
355hardupdate(offset)
356	long offset;
357{
358	long ltemp, mtemp;
359
360	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
361		return;
362	ltemp = offset;
363#ifdef PPS_SYNC
364	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
365		ltemp = pps_offset;
366#endif /* PPS_SYNC */
367	if (ltemp > MAXPHASE)
368		time_offset = MAXPHASE << SHIFT_UPDATE;
369	else if (ltemp < -MAXPHASE)
370		time_offset = -(MAXPHASE << SHIFT_UPDATE);
371	else
372		time_offset = ltemp << SHIFT_UPDATE;
373	mtemp = time.tv_sec - time_reftime;
374	time_reftime = time.tv_sec;
375	if (mtemp > MAXSEC)
376		mtemp = 0;
377
378	/* ugly multiply should be replaced */
379	if (ltemp < 0)
380		time_freq -= (-ltemp * mtemp) >> (time_constant +
381		    time_constant + SHIFT_KF - SHIFT_USEC);
382	else
383		time_freq += (ltemp * mtemp) >> (time_constant +
384		    time_constant + SHIFT_KF - SHIFT_USEC);
385	if (time_freq > time_tolerance)
386		time_freq = time_tolerance;
387	else if (time_freq < -time_tolerance)
388		time_freq = -time_tolerance;
389}
390
391
392
393/*
394 * Initialize clock frequencies and start both clocks running.
395 */
396/* ARGSUSED*/
397static void
398initclocks(dummy)
399	void *dummy;
400{
401	register int i;
402
403	/*
404	 * Set divisors to 1 (normal case) and let the machine-specific
405	 * code do its bit.
406	 */
407	psdiv = pscnt = 1;
408	cpu_initclocks();
409
410	/*
411	 * Compute profhz/stathz, and fix profhz if needed.
412	 */
413	i = stathz ? stathz : hz;
414	if (profhz == 0)
415		profhz = i;
416	psratio = profhz / i;
417}
418
419/*
420 * The real-time timer, interrupting hz times per second.
421 */
422void
423hardclock(frame)
424	register struct clockframe *frame;
425{
426	register struct callout *p1;
427	register struct proc *p;
428	register int needsoft;
429
430	/*
431	 * Update real-time timeout queue.
432	 * At front of queue are some number of events which are ``due''.
433	 * The time to these is <= 0 and if negative represents the
434	 * number of ticks which have passed since it was supposed to happen.
435	 * The rest of the q elements (times > 0) are events yet to happen,
436	 * where the time for each is given as a delta from the previous.
437	 * Decrementing just the first of these serves to decrement the time
438	 * to all events.
439	 */
440	needsoft = 0;
441	for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
442		if (--p1->c_time > 0)
443			break;
444		needsoft = 1;
445		if (p1->c_time == 0)
446			break;
447	}
448
449	p = curproc;
450	if (p) {
451		register struct pstats *pstats;
452
453		/*
454		 * Run current process's virtual and profile time, as needed.
455		 */
456		pstats = p->p_stats;
457		if (CLKF_USERMODE(frame) &&
458		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
459		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
460			psignal(p, SIGVTALRM);
461		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
462		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
463			psignal(p, SIGPROF);
464	}
465
466	/*
467	 * If no separate statistics clock is available, run it from here.
468	 */
469	if (stathz == 0)
470		statclock(frame);
471
472	/*
473	 * Increment the time-of-day.
474	 */
475	ticks++;
476	{
477		int time_update;
478		struct timeval newtime = time;
479		long ltemp;
480
481		if (timedelta == 0) {
482			time_update = CPU_THISTICKLEN(tick);
483		} else {
484			time_update = CPU_THISTICKLEN(tick) + tickdelta;
485			timedelta -= tickdelta;
486		}
487		BUMPTIME(&mono_time, time_update);
488
489		/*
490		 * Compute the phase adjustment. If the low-order bits
491		 * (time_phase) of the update overflow, bump the high-order bits
492		 * (time_update).
493		 */
494		time_phase += time_adj;
495		if (time_phase <= -FINEUSEC) {
496		  ltemp = -time_phase >> SHIFT_SCALE;
497		  time_phase += ltemp << SHIFT_SCALE;
498		  time_update -= ltemp;
499		}
500		else if (time_phase >= FINEUSEC) {
501		  ltemp = time_phase >> SHIFT_SCALE;
502		  time_phase -= ltemp << SHIFT_SCALE;
503		  time_update += ltemp;
504		}
505
506		newtime.tv_usec += time_update;
507		/*
508		 * On rollover of the second the phase adjustment to be used for
509		 * the next second is calculated. Also, the maximum error is
510		 * increased by the tolerance. If the PPS frequency discipline
511		 * code is present, the phase is increased to compensate for the
512		 * CPU clock oscillator frequency error.
513		 *
514		 * With SHIFT_SCALE = 23, the maximum frequency adjustment is
515		 * +-256 us per tick, or 25.6 ms/s at a clock frequency of 100
516		 * Hz. The time contribution is shifted right a minimum of two
517		 * bits, while the frequency contribution is a right shift.
518		 * Thus, overflow is prevented if the frequency contribution is
519		 * limited to half the maximum or 15.625 ms/s.
520		 */
521		if (newtime.tv_usec >= 1000000) {
522		  newtime.tv_usec -= 1000000;
523		  newtime.tv_sec++;
524		  time_maxerror += time_tolerance >> SHIFT_USEC;
525		  if (time_offset < 0) {
526		    ltemp = -time_offset >>
527		      (SHIFT_KG + time_constant);
528		    time_offset += ltemp;
529		    time_adj = -ltemp <<
530		      (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
531		  } else {
532		    ltemp = time_offset >>
533		      (SHIFT_KG + time_constant);
534		    time_offset -= ltemp;
535		    time_adj = ltemp <<
536		      (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
537		  }
538#ifdef PPS_SYNC
539		  /*
540		   * Gnaw on the watchdog counter and update the frequency
541		   * computed by the pll and the PPS signal.
542		   */
543		  pps_valid++;
544		  if (pps_valid == PPS_VALID) {
545		    pps_jitter = MAXTIME;
546		    pps_stabil = MAXFREQ;
547		    time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
548				     STA_PPSWANDER | STA_PPSERROR);
549		  }
550		  ltemp = time_freq + pps_freq;
551#else
552		  ltemp = time_freq;
553#endif /* PPS_SYNC */
554		  if (ltemp < 0)
555		    time_adj -= -ltemp >>
556		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
557		  else
558		    time_adj += ltemp >>
559		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
560
561		  /*
562		   * When the CPU clock oscillator frequency is not a
563		   * power of two in Hz, the SHIFT_HZ is only an
564		   * approximate scale factor. In the SunOS kernel, this
565		   * results in a PLL gain factor of 1/1.28 = 0.78 what it
566		   * should be. In the following code the overall gain is
567		   * increased by a factor of 1.25, which results in a
568		   * residual error less than 3 percent.
569		   */
570		  /* Same thing applies for FreeBSD --GAW */
571		  if (hz == 100) {
572		    if (time_adj < 0)
573		      time_adj -= -time_adj >> 2;
574		    else
575		      time_adj += time_adj >> 2;
576		  }
577
578		  /* XXX - this is really bogus, but can't be fixed until
579		     xntpd's idea of the system clock is fixed to know how
580		     the user wants leap seconds handled; in the mean time,
581		     we assume that users of NTP are running without proper
582		     leap second support (this is now the default anyway) */
583		  /*
584		   * Leap second processing. If in leap-insert state at
585		   * the end of the day, the system clock is set back one
586		   * second; if in leap-delete state, the system clock is
587		   * set ahead one second. The microtime() routine or
588		   * external clock driver will insure that reported time
589		   * is always monotonic. The ugly divides should be
590		   * replaced.
591		   */
592		  switch (time_state) {
593
594		  case TIME_OK:
595		    if (time_status & STA_INS)
596		      time_state = TIME_INS;
597		    else if (time_status & STA_DEL)
598		      time_state = TIME_DEL;
599		    break;
600
601		  case TIME_INS:
602		    if (newtime.tv_sec % 86400 == 0) {
603		      newtime.tv_sec--;
604		      time_state = TIME_OOP;
605		    }
606		    break;
607
608		  case TIME_DEL:
609		    if ((newtime.tv_sec + 1) % 86400 == 0) {
610		      newtime.tv_sec++;
611		      time_state = TIME_WAIT;
612		    }
613		    break;
614
615		  case TIME_OOP:
616		    time_state = TIME_WAIT;
617		    break;
618
619		  case TIME_WAIT:
620		    if (!(time_status & (STA_INS | STA_DEL)))
621		      time_state = TIME_OK;
622		  }
623		}
624		CPU_CLOCKUPDATE(&time, &newtime);
625	}
626
627	/*
628	 * Process callouts at a very low cpu priority, so we don't keep the
629	 * relatively high clock interrupt priority any longer than necessary.
630	 */
631	if (needsoft) {
632		if (CLKF_BASEPRI(frame)) {
633			/*
634			 * Save the overhead of a software interrupt;
635			 * it will happen as soon as we return, so do it now.
636			 */
637			(void)splsoftclock();
638			softclock();
639		} else
640			setsoftclock();
641	}
642}
643
644/*
645 * Software (low priority) clock interrupt.
646 * Run periodic events from timeout queue.
647 */
648/*ARGSUSED*/
649void
650softclock()
651{
652	register struct callout *c;
653	register void *arg;
654	register void (*func) __P((void *));
655	register int s;
656
657	s = splhigh();
658	while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
659		func = c->c_func;
660		arg = c->c_arg;
661		calltodo.c_next = c->c_next;
662		c->c_next = callfree;
663		callfree = c;
664		splx(s);
665		(*func)(arg);
666		(void) splhigh();
667	}
668	splx(s);
669}
670
671/*
672 * timeout --
673 *	Execute a function after a specified length of time.
674 *
675 * untimeout --
676 *	Cancel previous timeout function call.
677 *
678 *	See AT&T BCI Driver Reference Manual for specification.  This
679 *	implementation differs from that one in that no identification
680 *	value is returned from timeout, rather, the original arguments
681 *	to timeout are used to identify entries for untimeout.
682 */
683void
684timeout(ftn, arg, ticks)
685	timeout_t ftn;
686	void *arg;
687	register int ticks;
688{
689	register struct callout *new, *p, *t;
690	register int s;
691
692	if (ticks <= 0)
693		ticks = 1;
694
695	/* Lock out the clock. */
696	s = splhigh();
697
698	/* Fill in the next free callout structure. */
699	if (callfree == NULL)
700		panic("timeout table full");
701	new = callfree;
702	callfree = new->c_next;
703	new->c_arg = arg;
704	new->c_func = ftn;
705
706	/*
707	 * The time for each event is stored as a difference from the time
708	 * of the previous event on the queue.  Walk the queue, correcting
709	 * the ticks argument for queue entries passed.  Correct the ticks
710	 * value for the queue entry immediately after the insertion point
711	 * as well.  Watch out for negative c_time values; these represent
712	 * overdue events.
713	 */
714	for (p = &calltodo;
715	    (t = p->c_next) != NULL && ticks > t->c_time; p = t)
716		if (t->c_time > 0)
717			ticks -= t->c_time;
718	new->c_time = ticks;
719	if (t != NULL)
720		t->c_time -= ticks;
721
722	/* Insert the new entry into the queue. */
723	p->c_next = new;
724	new->c_next = t;
725	splx(s);
726}
727
728void
729untimeout(ftn, arg)
730	timeout_t ftn;
731	void *arg;
732{
733	register struct callout *p, *t;
734	register int s;
735
736	s = splhigh();
737	for (p = &calltodo; (t = p->c_next) != NULL; p = t)
738		if (t->c_func == ftn && t->c_arg == arg) {
739			/* Increment next entry's tick count. */
740			if (t->c_next && t->c_time > 0)
741				t->c_next->c_time += t->c_time;
742
743			/* Move entry from callout queue to callfree queue. */
744			p->c_next = t->c_next;
745			t->c_next = callfree;
746			callfree = t;
747			break;
748		}
749	splx(s);
750}
751
752/*
753 * Compute number of hz until specified time.  Used to
754 * compute third argument to timeout() from an absolute time.
755 */
756int
757hzto(tv)
758	struct timeval *tv;
759{
760	register unsigned long ticks;
761	register long sec, usec;
762	int s;
763
764	/*
765	 * If the number of usecs in the whole seconds part of the time
766	 * difference fits in a long, then the total number of usecs will
767	 * fit in an unsigned long.  Compute the total and convert it to
768	 * ticks, rounding up and adding 1 to allow for the current tick
769	 * to expire.  Rounding also depends on unsigned long arithmetic
770	 * to avoid overflow.
771	 *
772	 * Otherwise, if the number of ticks in the whole seconds part of
773	 * the time difference fits in a long, then convert the parts to
774	 * ticks separately and add, using similar rounding methods and
775	 * overflow avoidance.  This method would work in the previous
776	 * case but it is slightly slower and assumes that hz is integral.
777	 *
778	 * Otherwise, round the time difference down to the maximum
779	 * representable value.
780	 *
781	 * If ints have 32 bits, then the maximum value for any timeout in
782	 * 10ms ticks is 248 days.
783	 */
784	s = splclock();
785	sec = tv->tv_sec - time.tv_sec;
786	usec = tv->tv_usec - time.tv_usec;
787	splx(s);
788	if (usec < 0) {
789		sec--;
790		usec += 1000000;
791	}
792	if (sec < 0) {
793#ifdef DIAGNOSTIC
794		printf("hzto: negative time difference %ld sec %ld usec\n",
795		       sec, usec);
796#endif
797		ticks = 1;
798	} else if (sec <= LONG_MAX / 1000000)
799		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
800			/ tick + 1;
801	else if (sec <= LONG_MAX / hz)
802		ticks = sec * hz
803			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
804	else
805		ticks = LONG_MAX;
806	if (ticks > INT_MAX)
807		ticks = INT_MAX;
808	return (ticks);
809}
810
811/*
812 * Start profiling on a process.
813 *
814 * Kernel profiling passes proc0 which never exits and hence
815 * keeps the profile clock running constantly.
816 */
817void
818startprofclock(p)
819	register struct proc *p;
820{
821	int s;
822
823	if ((p->p_flag & P_PROFIL) == 0) {
824		p->p_flag |= P_PROFIL;
825		if (++profprocs == 1 && stathz != 0) {
826			s = splstatclock();
827			psdiv = pscnt = psratio;
828			setstatclockrate(profhz);
829			splx(s);
830		}
831	}
832}
833
834/*
835 * Stop profiling on a process.
836 */
837void
838stopprofclock(p)
839	register struct proc *p;
840{
841	int s;
842
843	if (p->p_flag & P_PROFIL) {
844		p->p_flag &= ~P_PROFIL;
845		if (--profprocs == 0 && stathz != 0) {
846			s = splstatclock();
847			psdiv = pscnt = 1;
848			setstatclockrate(stathz);
849			splx(s);
850		}
851	}
852}
853
854/*
855 * Statistics clock.  Grab profile sample, and if divider reaches 0,
856 * do process and kernel statistics.
857 */
858void
859statclock(frame)
860	register struct clockframe *frame;
861{
862#ifdef GPROF
863	register struct gmonparam *g;
864#endif
865	register struct proc *p;
866	register int i;
867	struct pstats *pstats;
868	long rss;
869	struct rusage *ru;
870	struct vmspace *vm;
871
872	if (CLKF_USERMODE(frame)) {
873		p = curproc;
874		if (p->p_flag & P_PROFIL)
875			addupc_intr(p, CLKF_PC(frame), 1);
876		if (--pscnt > 0)
877			return;
878		/*
879		 * Came from user mode; CPU was in user state.
880		 * If this process is being profiled record the tick.
881		 */
882		p->p_uticks++;
883		if (p->p_nice > NZERO)
884			cp_time[CP_NICE]++;
885		else
886			cp_time[CP_USER]++;
887	} else {
888#ifdef GPROF
889		/*
890		 * Kernel statistics are just like addupc_intr, only easier.
891		 */
892		g = &_gmonparam;
893		if (g->state == GMON_PROF_ON) {
894			i = CLKF_PC(frame) - g->lowpc;
895			if (i < g->textsize) {
896				i /= HISTFRACTION * sizeof(*g->kcount);
897				g->kcount[i]++;
898			}
899		}
900#endif
901		if (--pscnt > 0)
902			return;
903		/*
904		 * Came from kernel mode, so we were:
905		 * - handling an interrupt,
906		 * - doing syscall or trap work on behalf of the current
907		 *   user process, or
908		 * - spinning in the idle loop.
909		 * Whichever it is, charge the time as appropriate.
910		 * Note that we charge interrupts to the current process,
911		 * regardless of whether they are ``for'' that process,
912		 * so that we know how much of its real time was spent
913		 * in ``non-process'' (i.e., interrupt) work.
914		 */
915		p = curproc;
916		if (CLKF_INTR(frame)) {
917			if (p != NULL)
918				p->p_iticks++;
919			cp_time[CP_INTR]++;
920		} else if (p != NULL) {
921			p->p_sticks++;
922			cp_time[CP_SYS]++;
923		} else
924			cp_time[CP_IDLE]++;
925	}
926	pscnt = psdiv;
927
928	/*
929	 * We maintain statistics shown by user-level statistics
930	 * programs:  the amount of time in each cpu state, and
931	 * the amount of time each of DK_NDRIVE ``drives'' is busy.
932	 *
933	 * XXX	should either run linked list of drives, or (better)
934	 *	grab timestamps in the start & done code.
935	 */
936	for (i = 0; i < DK_NDRIVE; i++)
937		if (dk_busy & (1 << i))
938			dk_time[i]++;
939
940	/*
941	 * We adjust the priority of the current process.  The priority of
942	 * a process gets worse as it accumulates CPU time.  The cpu usage
943	 * estimator (p_estcpu) is increased here.  The formula for computing
944	 * priorities (in kern_synch.c) will compute a different value each
945	 * time p_estcpu increases by 4.  The cpu usage estimator ramps up
946	 * quite quickly when the process is running (linearly), and decays
947	 * away exponentially, at a rate which is proportionally slower when
948	 * the system is busy.  The basic principal is that the system will
949	 * 90% forget that the process used a lot of CPU time in 5 * loadav
950	 * seconds.  This causes the system to favor processes which haven't
951	 * run much recently, and to round-robin among other processes.
952	 */
953	if (p != NULL) {
954		p->p_cpticks++;
955		if (++p->p_estcpu == 0)
956			p->p_estcpu--;
957		if ((p->p_estcpu & 3) == 0) {
958			resetpriority(p);
959			if (p->p_priority >= PUSER)
960				p->p_priority = p->p_usrpri;
961		}
962
963		/* Update resource usage integrals and maximums. */
964		if ((pstats = p->p_stats) != NULL &&
965		    (ru = &pstats->p_ru) != NULL &&
966		    (vm = p->p_vmspace) != NULL) {
967			ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024;
968			ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024;
969			ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024;
970			rss = vm->vm_pmap.pm_stats.resident_count *
971			      PAGE_SIZE / 1024;
972			if (ru->ru_maxrss < rss)
973				ru->ru_maxrss = rss;
974        	}
975	}
976}
977
978/*
979 * Return information about system clocks.
980 */
981static int
982sysctl_kern_clockrate SYSCTL_HANDLER_ARGS
983{
984	struct clockinfo clkinfo;
985	/*
986	 * Construct clockinfo structure.
987	 */
988	clkinfo.hz = hz;
989	clkinfo.tick = tick;
990	clkinfo.profhz = profhz;
991	clkinfo.stathz = stathz ? stathz : hz;
992	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
993}
994
995SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
996	0, 0, sysctl_kern_clockrate, "S,clockinfo","");
997
998/*#ifdef PPS_SYNC*/
999#if 0
1000/* This code is completely bogus; if anybody ever wants to use it, get
1001 * the current version from Dave Mills. */
1002
1003/*
1004 * hardpps() - discipline CPU clock oscillator to external pps signal
1005 *
1006 * This routine is called at each PPS interrupt in order to discipline
1007 * the CPU clock oscillator to the PPS signal. It integrates successive
1008 * phase differences between the two oscillators and calculates the
1009 * frequency offset. This is used in hardclock() to discipline the CPU
1010 * clock oscillator so that intrinsic frequency error is cancelled out.
1011 * The code requires the caller to capture the time and hardware
1012 * counter value at the designated PPS signal transition.
1013 */
1014void
1015hardpps(tvp, usec)
1016	struct timeval *tvp;		/* time at PPS */
1017	long usec;			/* hardware counter at PPS */
1018{
1019	long u_usec, v_usec, bigtick;
1020	long cal_sec, cal_usec;
1021
1022	/*
1023	 * During the calibration interval adjust the starting time when
1024	 * the tick overflows. At the end of the interval compute the
1025	 * duration of the interval and the difference of the hardware
1026	 * counters at the beginning and end of the interval. This code
1027	 * is deliciously complicated by the fact valid differences may
1028	 * exceed the value of tick when using long calibration
1029	 * intervals and small ticks. Note that the counter can be
1030	 * greater than tick if caught at just the wrong instant, but
1031	 * the values returned and used here are correct.
1032	 */
1033	bigtick = (long)tick << SHIFT_USEC;
1034	pps_usec -= ntp_pll.ybar;
1035	if (pps_usec >= bigtick)
1036		pps_usec -= bigtick;
1037	if (pps_usec < 0)
1038		pps_usec += bigtick;
1039	pps_time.tv_sec++;
1040	pps_count++;
1041	if (pps_count < (1 << pps_shift))
1042		return;
1043	pps_count = 0;
1044	ntp_pll.calcnt++;
1045	u_usec = usec << SHIFT_USEC;
1046	v_usec = pps_usec - u_usec;
1047	if (v_usec >= bigtick >> 1)
1048		v_usec -= bigtick;
1049	if (v_usec < -(bigtick >> 1))
1050		v_usec += bigtick;
1051	if (v_usec < 0)
1052		v_usec = -(-v_usec >> ntp_pll.shift);
1053	else
1054		v_usec = v_usec >> ntp_pll.shift;
1055	pps_usec = u_usec;
1056	cal_sec = tvp->tv_sec;
1057	cal_usec = tvp->tv_usec;
1058	cal_sec -= pps_time.tv_sec;
1059	cal_usec -= pps_time.tv_usec;
1060	if (cal_usec < 0) {
1061		cal_usec += 1000000;
1062		cal_sec--;
1063	}
1064	pps_time = *tvp;
1065
1066	/*
1067	 * Check for lost interrupts, noise, excessive jitter and
1068	 * excessive frequency error. The number of timer ticks during
1069	 * the interval may vary +-1 tick. Add to this a margin of one
1070	 * tick for the PPS signal jitter and maximum frequency
1071	 * deviation. If the limits are exceeded, the calibration
1072	 * interval is reset to the minimum and we start over.
1073	 */
1074	u_usec = (long)tick << 1;
1075	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1076	    || (cal_sec == 0 && cal_usec < u_usec))
1077	    || v_usec > ntp_pll.tolerance || v_usec < -ntp_pll.tolerance) {
1078		ntp_pll.jitcnt++;
1079		ntp_pll.shift = NTP_PLL.SHIFT;
1080		pps_dispinc = PPS_DISPINC;
1081		ntp_pll.intcnt = 0;
1082		return;
1083	}
1084
1085	/*
1086	 * A three-stage median filter is used to help deglitch the pps
1087	 * signal. The median sample becomes the offset estimate; the
1088	 * difference between the other two samples becomes the
1089	 * dispersion estimate.
1090	 */
1091	pps_mf[2] = pps_mf[1];
1092	pps_mf[1] = pps_mf[0];
1093	pps_mf[0] = v_usec;
1094	if (pps_mf[0] > pps_mf[1]) {
1095		if (pps_mf[1] > pps_mf[2]) {
1096			u_usec = pps_mf[1];		/* 0 1 2 */
1097			v_usec = pps_mf[0] - pps_mf[2];
1098		} else if (pps_mf[2] > pps_mf[0]) {
1099			u_usec = pps_mf[0];		/* 2 0 1 */
1100			v_usec = pps_mf[2] - pps_mf[1];
1101		} else {
1102			u_usec = pps_mf[2];		/* 0 2 1 */
1103			v_usec = pps_mf[0] - pps_mf[1];
1104		}
1105	} else {
1106		if (pps_mf[1] < pps_mf[2]) {
1107			u_usec = pps_mf[1];		/* 2 1 0 */
1108			v_usec = pps_mf[2] - pps_mf[0];
1109		} else  if (pps_mf[2] < pps_mf[0]) {
1110			u_usec = pps_mf[0];		/* 1 0 2 */
1111			v_usec = pps_mf[1] - pps_mf[2];
1112		} else {
1113			u_usec = pps_mf[2];		/* 1 2 0 */
1114			v_usec = pps_mf[1] - pps_mf[0];
1115		}
1116	}
1117
1118	/*
1119	 * Here the dispersion average is updated. If it is less than
1120	 * the threshold pps_dispmax, the frequency average is updated
1121	 * as well, but clamped to the tolerance.
1122	 */
1123	v_usec = (v_usec >> 1) - ntp_pll.disp;
1124	if (v_usec < 0)
1125		ntp_pll.disp -= -v_usec >> PPS_AVG;
1126	else
1127		ntp_pll.disp += v_usec >> PPS_AVG;
1128	if (ntp_pll.disp > pps_dispmax) {
1129		ntp_pll.discnt++;
1130		return;
1131	}
1132	if (u_usec < 0) {
1133		ntp_pll.ybar -= -u_usec >> PPS_AVG;
1134		if (ntp_pll.ybar < -ntp_pll.tolerance)
1135			ntp_pll.ybar = -ntp_pll.tolerance;
1136		u_usec = -u_usec;
1137	} else {
1138		ntp_pll.ybar += u_usec >> PPS_AVG;
1139		if (ntp_pll.ybar > ntp_pll.tolerance)
1140			ntp_pll.ybar = ntp_pll.tolerance;
1141	}
1142
1143	/*
1144	 * Here the calibration interval is adjusted. If the maximum
1145	 * time difference is greater than tick/4, reduce the interval
1146	 * by half. If this is not the case for four consecutive
1147	 * intervals, double the interval.
1148	 */
1149	if (u_usec << ntp_pll.shift > bigtick >> 2) {
1150		ntp_pll.intcnt = 0;
1151		if (ntp_pll.shift > NTP_PLL.SHIFT) {
1152			ntp_pll.shift--;
1153			pps_dispinc <<= 1;
1154		}
1155	} else if (ntp_pll.intcnt >= 4) {
1156		ntp_pll.intcnt = 0;
1157		if (ntp_pll.shift < NTP_PLL.SHIFTMAX) {
1158			ntp_pll.shift++;
1159			pps_dispinc >>= 1;
1160		}
1161	} else
1162		ntp_pll.intcnt++;
1163}
1164#endif /* PPS_SYNC */
1165