1/*
2 * ntp_loopfilter.c - implements the NTP loop filter algorithm
3 *
4 * ATTENTION: Get approval from Dave Mills on all changes to this file!
5 *
6 */
7#ifdef HAVE_CONFIG_H
8# include <config.h>
9#endif
10
11#include "ntpd.h"
12#include "ntp_io.h"
13#include "ntp_unixtime.h"
14#include "ntp_stdlib.h"
15
16#include <stdio.h>
17#include <ctype.h>
18
19#include <signal.h>
20#include <setjmp.h>
21
22#if defined(VMS) && defined(VMS_LOCALUNIT)	/*wjm*/
23#include "ntp_refclock.h"
24#endif /* VMS */
25
26#ifdef KERNEL_PLL
27#include "ntp_syscall.h"
28#endif /* KERNEL_PLL */
29
30/*
31 * This is an implementation of the clock discipline algorithm described
32 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
33 * hybrid phase/frequency-lock loop. A number of sanity checks are
34 * included to protect against timewarps, timespikes and general mayhem.
35 * All units are in s and s/s, unless noted otherwise.
36 */
37#define CLOCK_MAX	.128	/* default step threshold (s) */
38#define CLOCK_MINSTEP	900.	/* default stepout threshold (s) */
39#define CLOCK_PANIC	1000.	/* default panic threshold (s) */
40#define	CLOCK_PHI	15e-6	/* max frequency error (s/s) */
41#define CLOCK_PLL	16.	/* PLL loop gain (log2) */
42#define CLOCK_AVG	8.	/* parameter averaging constant */
43#define CLOCK_FLL	.25	/* FLL loop gain */
44#define	CLOCK_ALLAN	11	/* Allan intercept (log2 s) */
45#define CLOCK_DAY	86400.	/* one day in seconds (s) */
46#define CLOCK_JUNE	(CLOCK_DAY * 30) /* June in seconds (s) */
47#define CLOCK_LIMIT	30	/* poll-adjust threshold */
48#define CLOCK_PGATE	4.	/* poll-adjust gate */
49#define PPS_MAXAGE	120	/* kernel pps signal timeout (s) */
50#define	FREQTOD(x)	((x) / 65536e6) /* NTP to double */
51#define	DTOFREQ(x)	((int32)((x) * 65536e6)) /* double to NTP */
52
53/*
54 * Clock discipline state machine. This is used to control the
55 * synchronization behavior during initialization and following a
56 * timewarp.
57 *
58 *	State	< step		> step		Comments
59 *	========================================================
60 *	NSET	FREQ		step, FREQ	freq not set
61 *
62 *	FSET	SYNC		step, SYNC	freq set
63 *
64 *	FREQ	if (mu < 900)	if (mu < 900)	set freq direct
65 *		    ignore	    ignore
66 *		else		else
67 *		    freq, SYNC	    freq, step, SYNC
68 *
69 *	SYNC	SYNC		SPIK, ignore	adjust phase/freq
70 *
71 *	SPIK	SYNC		if (mu < 900)	adjust phase/freq
72 *				    ignore
73 *				step, SYNC
74 */
75/*
76 * Kernel PLL/PPS state machine. This is used with the kernel PLL
77 * modifications described in the documentation.
78 *
79 * If kernel support for the ntp_adjtime() system call is available, the
80 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
81 * set at configuration time or run time using ntpdc. If ntp_enable is
82 * false, the discipline loop is unlocked and no corrections of any kind
83 * are made. If both ntp_control and kern_enable are set, the kernel
84 * support is used as described above; if false, the kernel is bypassed
85 * entirely and the daemon discipline used instead.
86 *
87 * There have been three versions of the kernel discipline code. The
88 * first (microkernel) now in Solaris discipilnes the microseconds. The
89 * second and third (nanokernel) disciplines the clock in nanoseconds.
90 * These versions are identifed if the symbol STA_PLL is present in the
91 * header file /usr/include/sys/timex.h. The third and current version
92 * includes TAI offset and is identified by the symbol NTP_API with
93 * value 4.
94 *
95 * Each PPS time/frequency discipline can be enabled by the atom driver
96 * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
97 * set in the kernel status word; otherwise, these bits are cleared.
98 * These bits are also cleard if the kernel reports an error.
99 *
100 * If an external clock is present, the clock driver sets STA_CLK in the
101 * status word. When the local clock driver sees this bit, it updates
102 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
103 * set to zero, in which case the system clock is not adjusted. This is
104 * also a signal for the external clock driver to discipline the system
105 * clock. Unless specified otherwise, all times are in seconds.
106 */
107/*
108 * Program variables that can be tinkered.
109 */
110double	clock_max = CLOCK_MAX;	/* step threshold */
111double	clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
112double	clock_panic = CLOCK_PANIC; /* panic threshold */
113double	clock_phi = CLOCK_PHI;	/* dispersion rate (s/s) */
114u_char	allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
115
116/*
117 * Program variables
118 */
119static double clock_offset;	/* offset */
120double	clock_jitter;		/* offset jitter */
121double	drift_comp;		/* frequency (s/s) */
122double	clock_stability;	/* frequency stability (wander) (s/s) */
123double	clock_codec;		/* audio codec frequency (samples/s) */
124static u_long clock_epoch;	/* last update */
125u_int	sys_tai;		/* TAI offset from UTC */
126static void rstclock (int, double); /* transition function */
127static double direct_freq(double); /* direct set frequency */
128static void set_freq(double);	/* set frequency */
129
130#ifdef KERNEL_PLL
131static struct timex ntv;	/* ntp_adjtime() parameters */
132int	pll_status;		/* last kernel status bits */
133#if defined(STA_NANO) && NTP_API == 4
134static u_int loop_tai;		/* last TAI offset */
135#endif /* STA_NANO */
136#endif /* KERNEL_PLL */
137
138/*
139 * Clock state machine control flags
140 */
141int	ntp_enable = 1;		/* clock discipline enabled */
142int	pll_control;		/* kernel support available */
143int	kern_enable = 1;	/* kernel support enabled */
144int	pps_enable;		/* kernel PPS discipline enabled */
145int	ext_enable;		/* external clock enabled */
146int	pps_stratum;		/* pps stratum */
147int	allow_panic = FALSE;	/* allow panic correction */
148int	mode_ntpdate = FALSE;	/* exit on first clock set */
149
150/*
151 * Clock state machine variables
152 */
153int	state;			/* clock discipline state */
154u_char	sys_poll;		/* time constant/poll (log2 s) */
155int	tc_counter;		/* jiggle counter */
156double	last_offset;		/* last offset (s) */
157static u_long last_step;	/* last clock step */
158
159/*
160 * Huff-n'-puff filter variables
161 */
162static double *sys_huffpuff;	/* huff-n'-puff filter */
163static int sys_hufflen;		/* huff-n'-puff filter stages */
164static int sys_huffptr;		/* huff-n'-puff filter pointer */
165static double sys_mindly;	/* huff-n'-puff filter min delay */
166
167#if defined(KERNEL_PLL)
168/* Emacs cc-mode goes nuts if we split the next line... */
169#define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
170    MOD_STATUS | MOD_TIMECONST)
171#ifdef SIGSYS
172static void pll_trap (int);	/* configuration trap */
173static struct sigaction sigsys;	/* current sigaction status */
174static struct sigaction newsigsys; /* new sigaction status */
175static sigjmp_buf env;		/* environment var. for pll_trap() */
176#endif /* SIGSYS */
177#endif /* KERNEL_PLL */
178
179/*
180 * init_loopfilter - initialize loop filter data
181 */
182void
183init_loopfilter(void)
184{
185	/*
186	 * Initialize state variables.
187	 */
188	sys_poll = ntp_minpoll;
189	clock_jitter = LOGTOD(sys_precision);
190}
191
192/*
193 * local_clock - the NTP logical clock loop filter.
194 *
195 * Return codes:
196 * -1	update ignored: exceeds panic threshold
197 * 0	update ignored: popcorn or exceeds step threshold
198 * 1	clock was slewed
199 * 2	clock was stepped
200 *
201 * LOCKCLOCK: The only thing this routine does is set the
202 * sys_rootdisp variable equal to the peer dispersion.
203 */
204int
205local_clock(
206	struct	peer *peer,	/* synch source peer structure */
207	double	fp_offset	/* clock offset (s) */
208	)
209{
210	int	rval;		/* return code */
211	int	osys_poll;	/* old system poll */
212	double	mu;		/* interval since last update */
213	double	clock_frequency; /* clock frequency */
214	double	dtemp, etemp;	/* double temps */
215	char	tbuf[80];	/* report buffer */
216
217	/*
218	 * If the loop is opened or the NIST LOCKCLOCK is in use,
219	 * monitor and record the offsets anyway in order to determine
220	 * the open-loop response and then go home.
221	 */
222#ifdef LOCKCLOCK
223	return (0);
224
225#else /* LOCKCLOCK */
226	if (!ntp_enable) {
227		record_loop_stats(fp_offset, drift_comp, clock_jitter,
228		    clock_stability, sys_poll);
229		return (0);
230	}
231
232	/*
233	 * If the clock is way off, panic is declared. The clock_panic
234	 * defaults to 1000 s; if set to zero, the panic will never
235	 * occur. The allow_panic defaults to FALSE, so the first panic
236	 * will exit. It can be set TRUE by a command line option, in
237	 * which case the clock will be set anyway and time marches on.
238	 * But, allow_panic will be set FALSE when the update is less
239	 * than the step threshold; so, subsequent panics will exit.
240	 */
241	if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
242	    !allow_panic) {
243		snprintf(tbuf, sizeof(tbuf),
244		    "%+.0f s; set clock manually within %.0f s.",
245		    fp_offset, clock_panic);
246		report_event(EVNT_SYSFAULT, NULL, tbuf);
247		return (-1);
248	}
249
250	/*
251	 * This section simulates ntpdate. If the offset exceeds the
252	 * step threshold (128 ms), step the clock to that time and
253	 * exit. Othewise, slew the clock to that time and exit. Note
254	 * that the slew will persist and eventually complete beyond the
255	 * life of this program. Note that while ntpdate is active, the
256	 * terminal does not detach, so the termination message prints
257	 * directly to the terminal.
258	 */
259	if (mode_ntpdate) {
260		if (fabs(fp_offset) > clock_max && clock_max > 0) {
261			step_systime(fp_offset);
262			msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
263	   		    fp_offset);
264			printf("ntpd: time set %+.6fs\n", fp_offset);
265		} else {
266			adj_systime(fp_offset);
267			msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
268			    fp_offset);
269			printf("ntpd: time slew %+.6fs\n", fp_offset);
270		}
271		record_loop_stats(fp_offset, drift_comp, clock_jitter,
272		    clock_stability, sys_poll);
273		exit (0);
274	}
275
276	/*
277	 * The huff-n'-puff filter finds the lowest delay in the recent
278	 * interval. This is used to correct the offset by one-half the
279	 * difference between the sample delay and minimum delay. This
280	 * is most effective if the delays are highly assymetric and
281	 * clockhopping is avoided and the clock frequency wander is
282	 * relatively small.
283	 */
284	if (sys_huffpuff != NULL) {
285		if (peer->delay < sys_huffpuff[sys_huffptr])
286			sys_huffpuff[sys_huffptr] = peer->delay;
287		if (peer->delay < sys_mindly)
288			sys_mindly = peer->delay;
289		if (fp_offset > 0)
290			dtemp = -(peer->delay - sys_mindly) / 2;
291		else
292			dtemp = (peer->delay - sys_mindly) / 2;
293		fp_offset += dtemp;
294#ifdef DEBUG
295		if (debug)
296			printf(
297		    "local_clock: size %d mindly %.6f huffpuff %.6f\n",
298			    sys_hufflen, sys_mindly, dtemp);
299#endif
300	}
301
302	/*
303	 * Clock state machine transition function which defines how the
304	 * system reacts to large phase and frequency excursion. There
305	 * are two main regimes: when the offset exceeds the step
306	 * threshold (128 ms) and when it does not. Under certain
307	 * conditions updates are suspended until the stepout theshold
308	 * (900 s) is exceeded. See the documentation on how these
309	 * thresholds interact with commands and command line options.
310	 *
311	 * Note the kernel is disabled if step is disabled or greater
312	 * than 0.5 s or in ntpdate mode.
313	 */
314	osys_poll = sys_poll;
315	if (sys_poll < peer->minpoll)
316		sys_poll = peer->minpoll;
317	if (sys_poll > peer->maxpoll)
318		sys_poll = peer->maxpoll;
319	mu = current_time - clock_epoch;
320	clock_frequency = drift_comp;
321	rval = 1;
322	if (fabs(fp_offset) > clock_max && clock_max > 0) {
323		switch (state) {
324
325		/*
326		 * In SYNC state we ignore the first outlyer amd switch
327		 * to SPIK state.
328		 */
329		case EVNT_SYNC:
330			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
331			    fp_offset);
332			report_event(EVNT_SPIK, NULL, tbuf);
333			state = EVNT_SPIK;
334			msyslog(LOG_NOTICE, "SYNC state ignoring %+.6f s",
335						fp_offset);
336			return (0);
337
338		/*
339		 * In FREQ state we ignore outlyers and inlyers. At the
340		 * first outlyer after the stepout threshold, compute
341		 * the apparent frequency correction and step the phase.
342		 */
343		case EVNT_FREQ:
344			if (mu < clock_minstep) {
345				msyslog(LOG_NOTICE, "FREQ state ignoring %+.6f s",
346						fp_offset);
347
348				return (0);
349			}
350			clock_frequency = direct_freq(fp_offset);
351
352			/* fall through to S_SPIK */
353
354		/*
355		 * In SPIK state we ignore succeeding outlyers until
356		 * either an inlyer is found or the stepout threshold is
357		 * exceeded.
358		 */
359		case EVNT_SPIK:
360			if (mu < clock_minstep) {
361				msyslog(LOG_NOTICE, "SPIK state ignoring %+.6f s",
362						fp_offset);
363
364				return (0);
365			}
366
367			/* fall through to default */
368
369		/*
370		 * We get here by default in NSET and FSET states and
371		 * from above in FREQ or SPIK states.
372		 *
373		 * In NSET state an initial frequency correction is not
374		 * available, usually because the frequency file has not
375		 * yet been written. Since the time is outside the step
376		 * threshold, the clock is stepped. The frequency will
377		 * be set directly following the stepout interval.
378		 *
379		 * In FSET state the initial frequency has been set from
380		 * the frequency file. Since the time is outside the
381		 * step threshold, the clock is stepped immediately,
382		 * rather than after the stepout interval. Guys get
383		 * nervous if it takes 15 minutes to set the clock for
384		 * the first time.
385		 *
386		 * In FREQ and SPIK states the stepout threshold has
387		 * expired and the phase is still above the step
388		 * threshold. Note that a single spike greater than the
389		 * step threshold is always suppressed, even with a
390		 * long time constant.
391		 */
392		default:
393			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
394			    fp_offset);
395			report_event(EVNT_CLOCKRESET, NULL, tbuf);
396			step_systime(fp_offset);
397			msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
398	   		    fp_offset);
399			reinit_timer();
400			tc_counter = 0;
401			clock_jitter = LOGTOD(sys_precision);
402			rval = 2;
403			if (state == EVNT_NSET || (current_time -
404			    last_step) < clock_minstep * 2) {
405				rstclock(EVNT_FREQ, 0);
406				return (rval);
407			}
408			last_step = current_time;
409			break;
410		}
411		rstclock(EVNT_SYNC, 0);
412	} else {
413
414		/*
415		 * The offset is less than the step threshold. Calculate
416		 * the jitter as the exponentially weighted offset
417		 * differences.
418 	      	 */
419		etemp = SQUARE(clock_jitter);
420		dtemp = SQUARE(max(fabs(fp_offset - last_offset),
421		    LOGTOD(sys_precision)));
422		clock_jitter = SQRT(etemp + (dtemp - etemp) /
423		    CLOCK_AVG);
424		switch (state) {
425
426		/*
427		 * In NSET state this is the first update received and
428		 * the frequency has not been initialized. Adjust the
429		 * phase, but do not adjust the frequency until after
430		 * the stepout threshold.
431		 */
432		case EVNT_NSET:
433			rstclock(EVNT_FREQ, fp_offset);
434			break;
435
436		/*
437		 * In FSET state this is the first update received and
438		 * the frequency has been initialized. Adjust the phase,
439		 * but do not adjust the frequency until the next
440		 * update.
441		 */
442		case EVNT_FSET:
443			rstclock(EVNT_SYNC, fp_offset);
444			break;
445
446		/*
447		 * In FREQ state ignore updates until the stepout
448		 * threshold. After that, compute the new frequency, but
449		 * do not adjust the phase or frequency until the next
450		 * update.
451		 */
452		case EVNT_FREQ:
453			if (mu < clock_minstep) {
454				msyslog(LOG_NOTICE, "FREQ state ignoring %+.6f s",
455						fp_offset);
456				return (0);
457			}
458
459			clock_frequency = direct_freq(fp_offset);
460			rstclock(EVNT_SYNC, 0);
461			break;
462
463
464		/*
465		 * We get here by default in SYNC and SPIK states. Here
466		 * we compute the frequency update due to PLL and FLL
467		 * contributions.
468		 */
469		default:
470			allow_panic = FALSE;
471
472			/*
473			 * The FLL and PLL frequency gain constants
474			 * depend on the time constant and Allan
475			 * intercept. The PLL is always used, but
476			 * becomes ineffective above the Allan intercept
477			 * where the FLL becomes effective.
478			 */
479			if (sys_poll >= allan_xpt)
480				clock_frequency += (fp_offset -
481				    clock_offset) /
482				    max(ULOGTOD(sys_poll), mu) *
483				    CLOCK_FLL;
484
485			/*
486			 * The PLL frequency gain (numerator) depends on
487			 * the minimum of the update interval and Allan
488			 * intercept. This reduces the PLL gain when the
489			 * FLL becomes effective.
490			 */
491			etemp = min(ULOGTOD(allan_xpt), mu);
492			dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
493			clock_frequency += fp_offset * etemp / (dtemp *
494			    dtemp);
495			rstclock(EVNT_SYNC, fp_offset);
496			break;
497		}
498	}
499
500#ifdef KERNEL_PLL
501	/*
502	 * This code segment works when clock adjustments are made using
503	 * precision time kernel support and the ntp_adjtime() system
504	 * call. This support is available in Solaris 2.6 and later,
505	 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
506	 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
507	 * DECstation 5000/240 and Alpha AXP, additional kernel
508	 * modifications provide a true microsecond clock and nanosecond
509	 * clock, respectively.
510	 *
511	 * Important note: The kernel discipline is used only if the
512	 * step threshold is less than 0.5 s, as anything higher can
513	 * lead to overflow problems. This might occur if some misguided
514	 * lad set the step threshold to something ridiculous.
515	 */
516	if (pll_control && kern_enable) {
517
518		/*
519		 * We initialize the structure for the ntp_adjtime()
520		 * system call. We have to convert everything to
521		 * microseconds or nanoseconds first. Do not update the
522		 * system variables if the ext_enable flag is set. In
523		 * this case, the external clock driver will update the
524		 * variables, which will be read later by the local
525		 * clock driver. Afterwards, remember the time and
526		 * frequency offsets for jitter and stability values and
527		 * to update the frequency file.
528		 */
529		memset(&ntv,  0, sizeof(ntv));
530		if (ext_enable) {
531			ntv.modes = MOD_STATUS;
532		} else {
533#ifdef STA_NANO
534			ntv.modes = MOD_BITS | MOD_NANO;
535#else /* STA_NANO */
536			ntv.modes = MOD_BITS;
537#endif /* STA_NANO */
538			if (clock_offset < 0)
539				dtemp = -.5;
540			else
541				dtemp = .5;
542#ifdef STA_NANO
543			ntv.offset = (int32)(clock_offset * 1e9 +
544			    dtemp);
545			ntv.constant = sys_poll;
546#else /* STA_NANO */
547			ntv.offset = (int32)(clock_offset * 1e6 +
548			    dtemp);
549			ntv.constant = sys_poll - 4;
550#endif /* STA_NANO */
551			ntv.esterror = (u_int32)(clock_jitter * 1e6);
552			ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
553			    sys_rootdisp) * 1e6);
554			ntv.status = STA_PLL;
555
556			/*
557			 * Enable/disable the PPS if requested.
558			 */
559			if (pps_enable) {
560				if (!(pll_status & STA_PPSTIME))
561					report_event(EVNT_KERN,
562					    NULL, "PPS enabled");
563				ntv.status |= STA_PPSTIME | STA_PPSFREQ;
564			} else {
565				if (pll_status & STA_PPSTIME)
566					report_event(EVNT_KERN,
567					    NULL, "PPS disabled");
568				ntv.status &= ~(STA_PPSTIME |
569				    STA_PPSFREQ);
570			}
571			if (sys_leap == LEAP_ADDSECOND)
572				ntv.status |= STA_INS;
573			else if (sys_leap == LEAP_DELSECOND)
574				ntv.status |= STA_DEL;
575		}
576
577		/*
578		 * Pass the stuff to the kernel. If it squeals, turn off
579		 * the pps. In any case, fetch the kernel offset,
580		 * frequency and jitter.
581		 */
582		if (ntp_adjtime(&ntv) == TIME_ERROR) {
583			if (!(ntv.status & STA_PPSSIGNAL))
584				report_event(EVNT_KERN, NULL,
585				    "PPS no signal");
586		}
587		pll_status = ntv.status;
588#ifdef STA_NANO
589		clock_offset = ntv.offset / 1e9;
590#else /* STA_NANO */
591		clock_offset = ntv.offset / 1e6;
592#endif /* STA_NANO */
593		clock_frequency = FREQTOD(ntv.freq);
594
595		/*
596		 * If the kernel PPS is lit, monitor its performance.
597		 */
598		if (ntv.status & STA_PPSTIME) {
599#ifdef STA_NANO
600			clock_jitter = ntv.jitter / 1e9;
601#else /* STA_NANO */
602			clock_jitter = ntv.jitter / 1e6;
603#endif /* STA_NANO */
604		}
605
606#if defined(STA_NANO) && NTP_API == 4
607		/*
608		 * If the TAI changes, update the kernel TAI.
609		 */
610		if (loop_tai != sys_tai) {
611			loop_tai = sys_tai;
612			ntv.modes = MOD_TAI;
613			ntv.constant = sys_tai;
614			ntp_adjtime(&ntv);
615		}
616#endif /* STA_NANO */
617	}
618#endif /* KERNEL_PLL */
619
620	/*
621	 * Clamp the frequency within the tolerance range and calculate
622	 * the frequency difference since the last update.
623	 */
624	if (fabs(clock_frequency) > NTP_MAXFREQ)
625		msyslog(LOG_NOTICE,
626		    "frequency error %.0f PPM exceeds tolerance %.0f PPM",
627		    clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
628	dtemp = SQUARE(clock_frequency - drift_comp);
629	if (clock_frequency > NTP_MAXFREQ)
630		drift_comp = NTP_MAXFREQ;
631	else if (clock_frequency < -NTP_MAXFREQ)
632		drift_comp = -NTP_MAXFREQ;
633	else
634		drift_comp = clock_frequency;
635
636	/*
637	 * Calculate the wander as the exponentially weighted RMS
638	 * frequency differences. Record the change for the frequency
639	 * file update.
640	 */
641	etemp = SQUARE(clock_stability);
642	clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
643	drift_file_sw = TRUE;
644
645	/*
646	 * Here we adjust the timeconstan by comparing the current
647	 * offset with the clock jitter. If the offset is less than the
648	 * clock jitter times a constant, then the averaging interval is
649	 * increased, otherwise it is decreased. A bit of hysteresis
650	 * helps calm the dance. Works best using burst mode.
651	 */
652	if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
653		tc_counter += sys_poll;
654		if (tc_counter > CLOCK_LIMIT) {
655			tc_counter = CLOCK_LIMIT;
656			if (sys_poll < peer->maxpoll) {
657				tc_counter = 0;
658				sys_poll++;
659			}
660		}
661	} else {
662		tc_counter -= sys_poll << 1;
663		if (tc_counter < -CLOCK_LIMIT) {
664			tc_counter = -CLOCK_LIMIT;
665			if (sys_poll > peer->minpoll) {
666				tc_counter = 0;
667				sys_poll--;
668			}
669		}
670	}
671
672	/*
673	 * If the time constant has changed, update the poll variables.
674	 */
675	if (osys_poll != sys_poll)
676		poll_update(peer, sys_poll);
677
678	/*
679	 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
680	 */
681	record_loop_stats(clock_offset, drift_comp, clock_jitter,
682	    clock_stability, sys_poll);
683#ifdef DEBUG
684	if (debug)
685		printf(
686		    "local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
687		    clock_offset, clock_jitter, drift_comp * 1e6,
688		    clock_stability * 1e6, sys_poll);
689#endif /* DEBUG */
690	return (rval);
691#endif /* LOCKCLOCK */
692}
693
694
695/*
696 * adj_host_clock - Called once every second to update the local clock.
697 *
698 * LOCKCLOCK: The only thing this routine does is increment the
699 * sys_rootdisp variable.
700 */
701void
702adj_host_clock(
703	void
704	)
705{
706	double	adjustment;
707
708	/*
709	 * Update the dispersion since the last update. In contrast to
710	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
711	 * since the dispersion check serves this function. Also,
712	 * since the poll interval can exceed one day, the old test
713	 * would be counterproductive.
714	 */
715	sys_rootdisp += clock_phi;
716
717#ifndef LOCKCLOCK
718	/*
719	 * If clock discipline is disabled or if the kernel is enabled,
720	 * get out of Dodge quick.
721	 */
722	if (!ntp_enable || mode_ntpdate || (pll_control &&
723	    kern_enable))
724		return;
725
726	/*
727	 * Implement the phase and frequency adjustments. The gain
728	 * factor (denominator) increases with poll interval, so is
729	 * dominated by the FLL above the Allan intercept.
730 	 */
731	adjustment = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
732	clock_offset -= adjustment;
733	adj_systime(adjustment + drift_comp);
734#endif /* LOCKCLOCK */
735}
736
737
738/*
739 * Clock state machine. Enter new state and set state variables.
740 */
741static void
742rstclock(
743	int	trans,		/* new state */
744	double	offset		/* new offset */
745	)
746{
747#ifdef DEBUG
748	if (debug > 1)
749		printf("local_clock: mu %lu state %d poll %d count %d\n",
750		    current_time - clock_epoch, trans, sys_poll,
751		    tc_counter);
752#endif
753	if (trans != state && trans != EVNT_FSET)
754		report_event(trans, NULL, NULL);
755	state = trans;
756	last_offset = clock_offset = offset;
757	clock_epoch = current_time;
758}
759
760/*
761 * calc_freq - calculate frequency directly
762 *
763 * This is very carefully done. When the offset is first computed at the
764 * first update, a residual frequency component results. Subsequently,
765 * updates are suppresed until the end of the measurement interval while
766 * the offset is amortized. At the end of the interval the frequency is
767 * calculated from the current offset, residual offset, length of the
768 * interval and residual frequency component. At the same time the
769 * frequenchy file is armed for update at the next hourly stats.
770 */
771static double
772direct_freq(
773	double	fp_offset
774	)
775{
776
777#ifdef KERNEL_PLL
778	/*
779	 * If the kernel is enabled, we need the residual offset to
780	 * calculate the frequency correction.
781	 */
782	if (pll_control && kern_enable) {
783		memset(&ntv,  0, sizeof(ntv));
784		ntp_adjtime(&ntv);
785#ifdef STA_NANO
786		clock_offset = ntv.offset / 1e9;
787#else /* STA_NANO */
788		clock_offset = ntv.offset / 1e6;
789#endif /* STA_NANO */
790		drift_comp = FREQTOD(ntv.freq);
791	}
792#endif /* KERNEL_PLL */
793	set_freq((fp_offset - clock_offset) / (current_time -
794	    clock_epoch) + drift_comp);
795	wander_resid = 0;
796	return (drift_comp);
797}
798
799
800/*
801 * set_freq - set clock frequency
802 */
803static void
804set_freq(
805	double	freq		/* frequency update */
806	)
807{
808	char	tbuf[80];
809
810	drift_comp = freq;
811
812#ifdef KERNEL_PLL
813	/*
814	 * If the kernel is enabled, update the kernel frequency.
815	 */
816	if (pll_control && kern_enable) {
817		memset(&ntv,  0, sizeof(ntv));
818		ntv.modes = MOD_FREQUENCY;
819		ntv.freq = DTOFREQ(drift_comp);
820		ntp_adjtime(&ntv);
821		snprintf(tbuf, sizeof(tbuf), "kernel %.3f PPM",
822		    drift_comp * 1e6);
823		report_event(EVNT_FSET, NULL, tbuf);
824	} else {
825		snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM",
826		    drift_comp * 1e6);
827		report_event(EVNT_FSET, NULL, tbuf);
828	}
829#else /* KERNEL_PLL */
830	snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp *
831	    1e6);
832	report_event(EVNT_FSET, NULL, tbuf);
833#endif /* KERNEL_PLL */
834}
835
836/*
837 * huff-n'-puff filter
838 */
839void
840huffpuff()
841{
842	int i;
843
844	if (sys_huffpuff == NULL)
845		return;
846
847	sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
848	sys_huffpuff[sys_huffptr] = 1e9;
849	sys_mindly = 1e9;
850	for (i = 0; i < sys_hufflen; i++) {
851		if (sys_huffpuff[i] < sys_mindly)
852			sys_mindly = sys_huffpuff[i];
853	}
854}
855
856
857/*
858 * loop_config - configure the loop filter
859 *
860 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
861 */
862void
863loop_config(
864	int	item,
865	double	freq
866	)
867{
868	int i;
869
870#ifdef DEBUG
871	if (debug > 1)
872		printf("loop_config: item %d freq %f\n", item, freq);
873#endif
874	switch (item) {
875
876	/*
877	 * We first assume the kernel supports the ntp_adjtime()
878	 * syscall. If that syscall works, initialize the kernel time
879	 * variables. Otherwise, continue leaving no harm behind.
880	 */
881	case LOOP_DRIFTINIT:
882#ifndef LOCKCLOCK
883#ifdef KERNEL_PLL
884		if (mode_ntpdate)
885			break;
886
887		pll_control = 1;
888		memset(&ntv, 0, sizeof(ntv));
889		ntv.modes = MOD_BITS;
890		ntv.status = STA_PLL;
891		ntv.maxerror = MAXDISPERSE;
892		ntv.esterror = MAXDISPERSE;
893		ntv.constant = sys_poll;
894#ifdef SIGSYS
895		/*
896		 * Use sigsetjmp() to save state and then call
897		 * ntp_adjtime(); if it fails, then siglongjmp() is used
898		 * to return control
899		 */
900		newsigsys.sa_handler = pll_trap;
901		newsigsys.sa_flags = 0;
902		if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
903			msyslog(LOG_ERR,
904			    "sigaction() fails to save SIGSYS trap: %m");
905			pll_control = 0;
906		}
907		if (sigsetjmp(env, 1) == 0)
908			ntp_adjtime(&ntv);
909		if ((sigaction(SIGSYS, &sigsys,
910		    (struct sigaction *)NULL))) {
911			msyslog(LOG_ERR,
912			    "sigaction() fails to restore SIGSYS trap: %m");
913			pll_control = 0;
914		}
915#else /* SIGSYS */
916		ntp_adjtime(&ntv);
917#endif /* SIGSYS */
918
919		/*
920		 * Save the result status and light up an external clock
921		 * if available.
922		 */
923		pll_status = ntv.status;
924		if (pll_control) {
925#ifdef STA_NANO
926			if (pll_status & STA_CLK)
927				ext_enable = 1;
928#endif /* STA_NANO */
929			report_event(EVNT_KERN, NULL,
930 		  	    "kernel time sync enabled");
931		}
932#endif /* KERNEL_PLL */
933#endif /* LOCKCLOCK */
934		break;
935
936	/*
937	 * Initialize the frequency. If the frequency file is missing or
938	 * broken, set the initial frequency to zero and set the state
939	 * to NSET. Otherwise, set the initial frequency to the given
940	 * value and the state to FSET.
941	 */
942	case LOOP_DRIFTCOMP:
943#ifndef LOCKCLOCK
944		if (freq > NTP_MAXFREQ || freq < -NTP_MAXFREQ) {
945			set_freq(0);
946			rstclock(EVNT_NSET, 0);
947		} else {
948			set_freq(freq);
949			rstclock(EVNT_FSET, 0);
950		}
951#endif /* LOCKCLOCK */
952		break;
953
954	/*
955	 * Disable the kernel at shutdown. The microkernel just abandons
956	 * ship. The nanokernel carefully cleans up so applications can
957	 * see this. Note the last programmed offset and frequency are
958	 * left in place.
959	 */
960	case LOOP_KERN_CLEAR:
961#ifndef LOCKCLOCK
962#ifdef KERNEL_PLL
963		if (pll_control && kern_enable) {
964			memset((char *)&ntv, 0, sizeof(ntv));
965			ntv.modes = MOD_STATUS;
966			ntv.status = STA_UNSYNC;
967			ntp_adjtime(&ntv);
968			report_event(EVNT_KERN, NULL,
969 		  	    "kernel time sync disabledx");
970		   }
971#endif /* KERNEL_PLL */
972#endif /* LOCKCLOCK */
973		break;
974
975	/*
976	 * Tinker command variables for Ulrich Windl. Very dangerous.
977	 */
978	case LOOP_ALLAN:	/* Allan intercept (log2) (allan) */
979		allan_xpt = (u_char)freq;
980		break;
981
982	case LOOP_CODEC:	/* audio codec frequency (codec) */
983		clock_codec = freq / 1e6;
984		break;
985
986	case LOOP_PHI:		/* dispersion threshold (dispersion) */
987		clock_phi = freq / 1e6;
988		break;
989
990	case LOOP_FREQ:		/* initial frequency (freq) */
991		set_freq(freq / 1e6);
992		rstclock(EVNT_FSET, 0);
993		break;
994
995	case LOOP_HUFFPUFF:	/* huff-n'-puff length (huffpuff) */
996		if (freq < HUFFPUFF)
997			freq = HUFFPUFF;
998		sys_hufflen = (int)(freq / HUFFPUFF);
999		sys_huffpuff = (double *)emalloc(sizeof(double) *
1000		    sys_hufflen);
1001		for (i = 0; i < sys_hufflen; i++)
1002			sys_huffpuff[i] = 1e9;
1003		sys_mindly = 1e9;
1004		break;
1005
1006	case LOOP_PANIC:	/* panic threshold (panic) */
1007		clock_panic = freq;
1008		break;
1009
1010	case LOOP_MAX:		/* step threshold (step) */
1011		clock_max = freq;
1012		if (clock_max == 0 || clock_max > 0.5)
1013			kern_enable = 0;
1014		break;
1015
1016	case LOOP_MINSTEP:	/* stepout threshold (stepout) */
1017		clock_minstep = freq;
1018		break;
1019
1020	case LOOP_LEAP:		/* not used */
1021	default:
1022		msyslog(LOG_NOTICE,
1023		    "loop_config: unsupported option %d", item);
1024	}
1025}
1026
1027
1028#if defined(KERNEL_PLL) && defined(SIGSYS)
1029/*
1030 * _trap - trap processor for undefined syscalls
1031 *
1032 * This nugget is called by the kernel when the SYS_ntp_adjtime()
1033 * syscall bombs because the silly thing has not been implemented in
1034 * the kernel. In this case the phase-lock loop is emulated by
1035 * the stock adjtime() syscall and a lot of indelicate abuse.
1036 */
1037static RETSIGTYPE
1038pll_trap(
1039	int arg
1040	)
1041{
1042	pll_control = 0;
1043	siglongjmp(env, 1);
1044}
1045#endif /* KERNEL_PLL && SIGSYS */
1046