1/*
2 * ntp_loopfilter.c - implements the NTP loop filter algorithm
3 *
4 * ATTENTION: Get approval from Dave Mills on all changes to this file!
5 *
6 */
7#ifdef HAVE_CONFIG_H
8# include <config.h>
9#endif
10
11#ifdef USE_SNPRINTB
12# include <util.h>
13#endif
14#include "ntpd.h"
15#include "ntp_io.h"
16#include "ntp_unixtime.h"
17#include "ntp_stdlib.h"
18#include "timexsup.h"
19
20#include <limits.h>
21#include <stdio.h>
22#include <ctype.h>
23
24#include <signal.h>
25#include <setjmp.h>
26
27#ifdef KERNEL_PLL
28#include "ntp_syscall.h"
29#endif /* KERNEL_PLL */
30
31/*
32 * This is an implementation of the clock discipline algorithm described
33 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
34 * hybrid phase/frequency-lock loop. A number of sanity checks are
35 * included to protect against timewarps, timespikes and general mayhem.
36 * All units are in s and s/s, unless noted otherwise.
37 */
38#define CLOCK_MAX	.128	/* default step threshold (s) */
39#define CLOCK_MINSTEP	300.	/* default stepout threshold (s) */
40#define CLOCK_PANIC	1000.	/* default panic threshold (s) */
41#define	CLOCK_PHI	15e-6	/* max frequency error (s/s) */
42#define CLOCK_PLL	16.	/* PLL loop gain (log2) */
43#define CLOCK_AVG	8.	/* parameter averaging constant */
44#define CLOCK_FLL	.25	/* FLL loop gain */
45#define	CLOCK_FLOOR	.0005	/* startup offset floor (s) */
46#define	CLOCK_ALLAN	11	/* Allan intercept (log2 s) */
47#define CLOCK_LIMIT	30	/* poll-adjust threshold */
48#define CLOCK_PGATE	4.	/* poll-adjust gate */
49#define PPS_MAXAGE	120	/* kernel pps signal timeout (s) */
50#define	FREQTOD(x)	((x) / 65536e6) /* NTP to double */
51#define	DTOFREQ(x)	((int32)((x) * 65536e6)) /* double to NTP */
52
53/*
54 * Clock discipline state machine. This is used to control the
55 * synchronization behavior during initialization and following a
56 * timewarp.
57 *
58 *	State	< step		> step		Comments
59 *	========================================================
60 *	NSET	FREQ		step, FREQ	freq not set
61 *
62 *	FSET	SYNC		step, SYNC	freq set
63 *
64 *	FREQ	if (mu < 900)	if (mu < 900)	set freq direct
65 *		    ignore	    ignore
66 *		else		else
67 *		    freq, SYNC	    freq, step, SYNC
68 *
69 *	SYNC	SYNC		SPIK, ignore	adjust phase/freq
70 *
71 *	SPIK	SYNC		if (mu < 900)	adjust phase/freq
72 *				    ignore
73 *				step, SYNC
74 */
75/*
76 * Kernel PLL/PPS state machine. This is used with the kernel PLL
77 * modifications described in the documentation.
78 *
79 * If kernel support for the ntp_adjtime() system call is available, the
80 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
81 * set at configuration time or run time using ntpdc. If ntp_enable is
82 * false, the discipline loop is unlocked and no corrections of any kind
83 * are made. If both ntp_control and kern_enable are set, the kernel
84 * support is used as described above; if false, the kernel is bypassed
85 * entirely and the daemon discipline used instead.
86 *
87 * There have been three versions of the kernel discipline code. The
88 * first (microkernel) now in Solaris discipilnes the microseconds. The
89 * second and third (nanokernel) disciplines the clock in nanoseconds.
90 * These versions are identifed if the symbol STA_PLL is present in the
91 * header file /usr/include/sys/timex.h. The third and current version
92 * includes TAI offset and is identified by the symbol NTP_API with
93 * value 4.
94 *
95 * Each PPS time/frequency discipline can be enabled by the atom driver
96 * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
97 * set in the kernel status word; otherwise, these bits are cleared.
98 * These bits are also cleard if the kernel reports an error.
99 *
100 * If an external clock is present, the clock driver sets STA_CLK in the
101 * status word. When the local clock driver sees this bit, it updates
102 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
103 * set to zero, in which case the system clock is not adjusted. This is
104 * also a signal for the external clock driver to discipline the system
105 * clock. Unless specified otherwise, all times are in seconds.
106 */
107/*
108 * Program variables that can be tinkered.
109 */
110double	clock_max_back = CLOCK_MAX;	/* step threshold */
111double	clock_max_fwd =  CLOCK_MAX;	/* step threshold */
112double	clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
113double	clock_panic = CLOCK_PANIC; /* panic threshold */
114double	clock_phi = CLOCK_PHI;	/* dispersion rate (s/s) */
115u_char	allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
116
117/*
118 * Program variables
119 */
120static double clock_offset;	/* offset */
121double	clock_jitter;		/* offset jitter */
122double	drift_comp;		/* frequency (s/s) */
123static double init_drift_comp; /* initial frequency (PPM) */
124double	clock_stability;	/* frequency stability (wander) (s/s) */
125double	clock_codec;		/* audio codec frequency (samples/s) */
126static u_long clock_epoch;	/* last update */
127u_int	sys_tai;		/* TAI offset from UTC */
128static int loop_started;	/* TRUE after LOOP_DRIFTINIT */
129static void rstclock (int, double); /* transition function */
130static double direct_freq(double); /* direct set frequency */
131static void set_freq(double);	/* set frequency */
132static char relative_path[PATH_MAX + 1]; /* relative path per recursive make */
133static char *this_file = NULL;
134
135#ifdef KERNEL_PLL
136static struct timex ntv;	/* ntp_adjtime() parameters */
137int	pll_status;		/* last kernel status bits */
138#if defined(STA_NANO) && NTP_API == 4
139static u_int loop_tai;		/* last TAI offset */
140#endif /* STA_NANO */
141static	void	start_kern_loop(void);
142static	void	stop_kern_loop(void);
143#endif /* KERNEL_PLL */
144
145/*
146 * Clock state machine control flags
147 */
148int	ntp_enable = TRUE;	/* clock discipline enabled */
149int	pll_control;		/* kernel support available */
150int	kern_enable = TRUE;	/* kernel support enabled */
151int	hardpps_enable;		/* kernel PPS discipline enabled */
152int	ext_enable;		/* external clock enabled */
153int	pps_stratum;		/* pps stratum */
154int	kernel_status;		/* from ntp_adjtime */
155int	force_step_once = FALSE; /* always step time once at startup (-G) */
156int	mode_ntpdate = FALSE;	/* exit on first clock set (-q) */
157int	freq_cnt;		/* initial frequency clamp */
158int	freq_set;		/* initial set frequency switch */
159
160/*
161 * Clock state machine variables
162 */
163int	state = 0;		/* clock discipline state */
164u_char	sys_poll;		/* time constant/poll (log2 s) */
165int	tc_counter;		/* jiggle counter */
166double	last_offset;		/* last offset (s) */
167
168u_int	tc_twinlo;		/* TC step down not before this time */
169u_int	tc_twinhi;		/* TC step up not before this time */
170
171/*
172 * Huff-n'-puff filter variables
173 */
174static double *sys_huffpuff;	/* huff-n'-puff filter */
175static int sys_hufflen;		/* huff-n'-puff filter stages */
176static int sys_huffptr;		/* huff-n'-puff filter pointer */
177static double sys_mindly;	/* huff-n'-puff filter min delay */
178
179#if defined(KERNEL_PLL)
180/* Emacs cc-mode goes nuts if we split the next line... */
181#define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
182    MOD_STATUS | MOD_TIMECONST)
183#ifdef SIGSYS
184static void pll_trap (int);	/* configuration trap */
185static struct sigaction sigsys;	/* current sigaction status */
186static struct sigaction newsigsys; /* new sigaction status */
187static sigjmp_buf env;		/* environment var. for pll_trap() */
188#endif /* SIGSYS */
189#endif /* KERNEL_PLL */
190
191static void
192sync_status(const char *what, int ostatus, int nstatus)
193{
194	char obuf[256], nbuf[256], tbuf[1024];
195#if defined(USE_SNPRINTB) && defined (STA_FMT)
196	snprintb(obuf, sizeof(obuf), STA_FMT, ostatus);
197	snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus);
198#else
199	snprintf(obuf, sizeof(obuf), "%04x", ostatus);
200	snprintf(nbuf, sizeof(nbuf), "%04x", nstatus);
201#endif
202	snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf);
203	report_event(EVNT_KERN, NULL, tbuf);
204}
205
206/*
207 * file_name - return pointer to non-relative portion of this C file pathname
208 */
209static char *file_name(void)
210{
211	if (this_file == NULL) {
212	    (void)strncpy(relative_path, __FILE__, PATH_MAX);
213	    for (this_file=relative_path;
214		*this_file && ! isalnum((unsigned char)*this_file);
215		this_file++) ;
216	}
217	return this_file;
218}
219
220/*
221 * init_loopfilter - initialize loop filter data
222 */
223void
224init_loopfilter(void)
225{
226	/*
227	 * Initialize state variables.
228	 */
229	sys_poll = ntp_minpoll;
230	clock_jitter = LOGTOD(sys_precision);
231	freq_cnt = (int)clock_minstep;
232}
233
234#ifdef KERNEL_PLL
235/*
236 * ntp_adjtime_error_handler - process errors from ntp_adjtime
237 */
238static void
239ntp_adjtime_error_handler(
240	const char *caller,	/* name of calling function */
241	struct timex *ptimex,	/* pointer to struct timex */
242	int ret,		/* return value from ntp_adjtime */
243	int saved_errno,	/* value of errno when ntp_adjtime returned */
244	int pps_call,		/* ntp_adjtime call was PPS-related */
245	int tai_call,		/* ntp_adjtime call was TAI-related */
246	int line		/* line number of ntp_adjtime call */
247	)
248{
249	char des[1024] = "";	/* Decoded Error Status */
250	char *dbp, *ebp;
251
252	dbp = des;
253	ebp = dbp + sizeof(des);
254
255	switch (ret) {
256	    case -1:
257		switch (saved_errno) {
258		    case EFAULT:
259			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex pointer: 0x%lx",
260			    caller, file_name(), line,
261			    (long)((void *)ptimex)
262			);
263		    break;
264		    case EINVAL:
265			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex \"constant\" element value: %ld",
266			    caller, file_name(), line,
267			    (long)(ptimex->constant)
268			);
269		    break;
270		    case EPERM:
271			if (tai_call) {
272			    errno = saved_errno;
273			    msyslog(LOG_ERR,
274				"%s: ntp_adjtime(TAI) failed: %m",
275				caller);
276			}
277			errno = saved_errno;
278			msyslog(LOG_ERR, "%s: %s line %d: ntp_adjtime: %m",
279			    caller, file_name(), line
280			);
281		    break;
282		    default:
283			msyslog(LOG_NOTICE, "%s: %s line %d: unhandled errno value %d after failed ntp_adjtime call",
284			    caller, file_name(), line,
285			    saved_errno
286			);
287		    break;
288		}
289	    break;
290#ifdef TIME_OK
291	    case TIME_OK: /* 0: synchronized, no leap second warning */
292		/* msyslog(LOG_INFO, "kernel reports time is synchronized normally"); */
293	    break;
294#else
295# warning TIME_OK is not defined
296#endif
297#ifdef TIME_INS
298	    case TIME_INS: /* 1: positive leap second warning */
299		msyslog(LOG_INFO, "kernel reports leap second insertion scheduled");
300	    break;
301#else
302# warning TIME_INS is not defined
303#endif
304#ifdef TIME_DEL
305	    case TIME_DEL: /* 2: negative leap second warning */
306		msyslog(LOG_INFO, "kernel reports leap second deletion scheduled");
307	    break;
308#else
309# warning TIME_DEL is not defined
310#endif
311#ifdef TIME_OOP
312	    case TIME_OOP: /* 3: leap second in progress */
313		msyslog(LOG_INFO, "kernel reports leap second in progress");
314	    break;
315#else
316# warning TIME_OOP is not defined
317#endif
318#ifdef TIME_WAIT
319	    case TIME_WAIT: /* 4: leap second has occured */
320		msyslog(LOG_INFO, "kernel reports leap second has occurred");
321	    break;
322#else
323# warning TIME_WAIT is not defined
324#endif
325#ifdef TIME_ERROR
326#if 0
327
328from the reference implementation of ntp_gettime():
329
330		// Hardware or software error
331        if ((time_status & (STA_UNSYNC | STA_CLOCKERR))
332
333	/*
334         * PPS signal lost when either time or frequency synchronization
335         * requested
336         */
337	|| (time_status & (STA_PPSFREQ | STA_PPSTIME)
338	    && !(time_status & STA_PPSSIGNAL))
339
340        /*
341         * PPS jitter exceeded when time synchronization requested
342         */
343	|| (time_status & STA_PPSTIME &&
344            time_status & STA_PPSJITTER)
345
346        /*
347         * PPS wander exceeded or calibration error when frequency
348         * synchronization requested
349         */
350	|| (time_status & STA_PPSFREQ &&
351            time_status & (STA_PPSWANDER | STA_PPSERROR)))
352                return (TIME_ERROR);
353
354or, from ntp_adjtime():
355
356	if (  (time_status & (STA_UNSYNC | STA_CLOCKERR))
357	    || (time_status & (STA_PPSFREQ | STA_PPSTIME)
358		&& !(time_status & STA_PPSSIGNAL))
359	    || (time_status & STA_PPSTIME
360		&& time_status & STA_PPSJITTER)
361	    || (time_status & STA_PPSFREQ
362		&& time_status & (STA_PPSWANDER | STA_PPSERROR))
363	   )
364		return (TIME_ERROR);
365#endif
366
367	    case TIME_ERROR: /* 5: unsynchronized, or loss of synchronization */
368				/* error (see status word) */
369
370		if (ptimex->status & STA_UNSYNC)
371			xsbprintf(&dbp, ebp, "%sClock Unsynchronized",
372				 (*des) ? "; " : "");
373
374		if (ptimex->status & STA_CLOCKERR)
375		    xsbprintf(&dbp, ebp, "%sClock Error",
376			      (*des) ? "; " : "");
377
378		if (!(ptimex->status & STA_PPSSIGNAL)
379		    && ptimex->status & STA_PPSFREQ)
380		    xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but no PPS",
381			      (*des) ? "; " : "");
382
383		if (!(ptimex->status & STA_PPSSIGNAL)
384		    && ptimex->status & STA_PPSTIME)
385			xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but no PPS signal",
386				  (*des) ? "; " : "");
387
388		if (   ptimex->status & STA_PPSTIME
389		    && ptimex->status & STA_PPSJITTER)
390			xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but PPS Jitter exceeded",
391				  (*des) ? "; " : "");
392
393		if (   ptimex->status & STA_PPSFREQ
394		    && ptimex->status & STA_PPSWANDER)
395			xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but PPS Wander exceeded",
396				  (*des) ? "; " : "");
397
398		if (   ptimex->status & STA_PPSFREQ
399		    && ptimex->status & STA_PPSERROR)
400			xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but Calibration error detected",
401				  (*des) ? "; " : "");
402
403		if (pps_call && !(ptimex->status & STA_PPSSIGNAL))
404			report_event(EVNT_KERN, NULL,
405			    "no PPS signal");
406		DPRINTF(1, ("kernel loop status %#x (%s)\n",
407			ptimex->status, des));
408		/*
409		 * This code may be returned when ntp_adjtime() has just
410		 * been called for the first time, quite a while after
411		 * startup, when ntpd just starts to discipline the kernel
412		 * time. In this case the occurrence of this message
413		 * can be pretty confusing.
414		 *
415		 * HMS: How about a message when we begin kernel processing:
416		 *    Determining kernel clock state...
417		 * so an initial TIME_ERROR message is less confising,
418		 * or skipping the first message (ugh),
419		 * or ???
420		 * msyslog(LOG_INFO, "kernel reports time synchronization lost");
421		 */
422		msyslog(LOG_INFO, "kernel reports TIME_ERROR: %#x: %s",
423			ptimex->status, des);
424	    break;
425#else
426# warning TIME_ERROR is not defined
427#endif
428	    default:
429		msyslog(LOG_NOTICE, "%s: %s line %d: unhandled return value %d from ntp_adjtime() in %s at line %d",
430		    caller, file_name(), line,
431		    ret,
432		    __func__, __LINE__
433		);
434	    break;
435	}
436	return;
437}
438#endif
439
440/*
441 * local_clock - the NTP logical clock loop filter.
442 *
443 * Return codes:
444 * -1	update ignored: exceeds panic threshold
445 * 0	update ignored: popcorn or exceeds step threshold
446 * 1	clock was slewed
447 * 2	clock was stepped
448 *
449 * LOCKCLOCK: The only thing this routine does is set the
450 * sys_rootdisp variable equal to the peer dispersion.
451 */
452int
453local_clock(
454	struct	peer *peer,	/* synch source peer structure */
455	double	fp_offset	/* clock offset (s) */
456	)
457{
458	int	rval;		/* return code */
459	int	osys_poll;	/* old system poll */
460	int	ntp_adj_ret;	/* returned by ntp_adjtime */
461	double	mu;		/* interval since last update */
462	double	clock_frequency; /* clock frequency */
463	double	dtemp, etemp;	/* double temps */
464	char	tbuf[80];	/* report buffer */
465
466	(void)ntp_adj_ret; /* not always used below... */
467	/*
468	 * If the loop is opened or the NIST LOCKCLOCK is in use,
469	 * monitor and record the offsets anyway in order to determine
470	 * the open-loop response and then go home.
471	 */
472#ifndef LOCKCLOCK
473	if (!ntp_enable)
474#endif /* not LOCKCLOCK */
475	{
476		record_loop_stats(fp_offset, drift_comp, clock_jitter,
477		    clock_stability, sys_poll);
478		return (0);
479	}
480
481#ifndef LOCKCLOCK
482	/*
483	 * If the clock is way off, panic is declared. The clock_panic
484	 * defaults to 1000 s; if set to zero, the panic will never
485	 * occur. The allow_panic defaults to FALSE, so the first panic
486	 * will exit. It can be set TRUE by a command line option, in
487	 * which case the clock will be set anyway and time marches on.
488	 * But, allow_panic will be set FALSE when the update is less
489	 * than the step threshold; so, subsequent panics will exit.
490	 */
491	if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
492	    !allow_panic) {
493		snprintf(tbuf, sizeof(tbuf),
494		    "%+.0f s; set clock manually within %.0f s.",
495		    fp_offset, clock_panic);
496		report_event(EVNT_SYSFAULT, NULL, tbuf);
497		return (-1);
498	}
499
500	allow_panic = FALSE;
501
502	/*
503	 * This section simulates ntpdate. If the offset exceeds the
504	 * step threshold (128 ms), step the clock to that time and
505	 * exit. Otherwise, slew the clock to that time and exit. Note
506	 * that the slew will persist and eventually complete beyond the
507	 * life of this program. Note that while ntpdate is active, the
508	 * terminal does not detach, so the termination message prints
509	 * directly to the terminal.
510	 */
511	if (mode_ntpdate) {
512		if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
513		   || (-fp_offset > clock_max_back && clock_max_back > 0)) {
514			step_systime(fp_offset);
515			msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
516			    fp_offset);
517			printf("ntpd: time set %+.6fs\n", fp_offset);
518		} else {
519			adj_systime(fp_offset);
520			msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
521			    fp_offset);
522			printf("ntpd: time slew %+.6fs\n", fp_offset);
523		}
524		record_loop_stats(fp_offset, drift_comp, clock_jitter,
525		    clock_stability, sys_poll);
526		exit (0);
527	}
528
529	/*
530	 * The huff-n'-puff filter finds the lowest delay in the recent
531	 * interval. This is used to correct the offset by one-half the
532	 * difference between the sample delay and minimum delay. This
533	 * is most effective if the delays are highly assymetric and
534	 * clockhopping is avoided and the clock frequency wander is
535	 * relatively small.
536	 */
537	if (sys_huffpuff != NULL) {
538		if (peer->delay < sys_huffpuff[sys_huffptr])
539			sys_huffpuff[sys_huffptr] = peer->delay;
540		if (peer->delay < sys_mindly)
541			sys_mindly = peer->delay;
542		if (fp_offset > 0)
543			dtemp = -(peer->delay - sys_mindly) / 2;
544		else
545			dtemp = (peer->delay - sys_mindly) / 2;
546		fp_offset += dtemp;
547		DPRINTF(1, ("local_clock: size %d mindly %.6f huffpuff %.6f\n",
548			    sys_hufflen, sys_mindly, dtemp));
549	}
550
551	/*
552	 * Clock state machine transition function which defines how the
553	 * system reacts to large phase and frequency excursion. There
554	 * are two main regimes: when the offset exceeds the step
555	 * threshold (128 ms) and when it does not. Under certain
556	 * conditions updates are suspended until the stepout theshold
557	 * (900 s) is exceeded. See the documentation on how these
558	 * thresholds interact with commands and command line options.
559	 *
560	 * Note the kernel is disabled if step is disabled or greater
561	 * than 0.5 s or in ntpdate mode.
562	 */
563	osys_poll = sys_poll;
564	if (sys_poll < peer->minpoll)
565		sys_poll = peer->minpoll;
566	if (sys_poll > peer->maxpoll)
567		sys_poll = peer->maxpoll;
568	mu = current_time - clock_epoch;
569	clock_frequency = drift_comp;
570	rval = 1;
571	if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
572	   || (-fp_offset > clock_max_back && clock_max_back > 0)
573	   || force_step_once ) {
574		if (force_step_once) {
575			force_step_once = FALSE;  /* we want this only once after startup */
576			msyslog(LOG_NOTICE, "Doing intital time step" );
577		}
578
579		switch (state) {
580
581		/*
582		 * In SYNC state we ignore the first outlier and switch
583		 * to SPIK state.
584		 */
585		case EVNT_SYNC:
586			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
587			    fp_offset);
588			report_event(EVNT_SPIK, NULL, tbuf);
589			state = EVNT_SPIK;
590			return (0);
591
592		/*
593		 * In FREQ state we ignore outliers and inlyers. At the
594		 * first outlier after the stepout threshold, compute
595		 * the apparent frequency correction and step the phase.
596		 */
597		case EVNT_FREQ:
598			if (mu < clock_minstep)
599				return (0);
600
601			clock_frequency = direct_freq(fp_offset);
602
603			/* fall through to EVNT_SPIK */
604
605		/*
606		 * In SPIK state we ignore succeeding outliers until
607		 * either an inlyer is found or the stepout threshold is
608		 * exceeded.
609		 */
610		case EVNT_SPIK:
611			if (mu < clock_minstep)
612				return (0);
613
614			/* fall through to default */
615
616		/*
617		 * We get here by default in NSET and FSET states and
618		 * from above in FREQ or SPIK states.
619		 *
620		 * In NSET state an initial frequency correction is not
621		 * available, usually because the frequency file has not
622		 * yet been written. Since the time is outside the step
623		 * threshold, the clock is stepped. The frequency will
624		 * be set directly following the stepout interval.
625		 *
626		 * In FSET state the initial frequency has been set from
627		 * the frequency file. Since the time is outside the
628		 * step threshold, the clock is stepped immediately,
629		 * rather than after the stepout interval. Guys get
630		 * nervous if it takes 15 minutes to set the clock for
631		 * the first time.
632		 *
633		 * In FREQ and SPIK states the stepout threshold has
634		 * expired and the phase is still above the step
635		 * threshold. Note that a single spike greater than the
636		 * step threshold is always suppressed, even with a
637		 * long time constant.
638		 */
639		default:
640			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
641			    fp_offset);
642			report_event(EVNT_CLOCKRESET, NULL, tbuf);
643			step_systime(fp_offset);
644			reinit_timer();
645			tc_counter = 0;
646			clock_jitter = LOGTOD(sys_precision);
647			rval = 2;
648			if (state == EVNT_NSET) {
649				rstclock(EVNT_FREQ, 0);
650				return (rval);
651			}
652			break;
653		}
654		rstclock(EVNT_SYNC, 0);
655	} else {
656		/*
657		 * The offset is less than the step threshold. Calculate
658		 * the jitter as the exponentially weighted offset
659		 * differences.
660		 */
661		etemp = SQUARE(clock_jitter);
662		dtemp = SQUARE(max(fabs(fp_offset - last_offset),
663		    LOGTOD(sys_precision)));
664		clock_jitter = SQRT(etemp + (dtemp - etemp) /
665		    CLOCK_AVG);
666		switch (state) {
667
668		/*
669		 * In NSET state this is the first update received and
670		 * the frequency has not been initialized. Adjust the
671		 * phase, but do not adjust the frequency until after
672		 * the stepout threshold.
673		 */
674		case EVNT_NSET:
675			adj_systime(fp_offset);
676			rstclock(EVNT_FREQ, fp_offset);
677			break;
678
679		/*
680		 * In FREQ state ignore updates until the stepout
681		 * threshold. After that, compute the new frequency, but
682		 * do not adjust the frequency until the holdoff counter
683		 * decrements to zero.
684		 */
685		case EVNT_FREQ:
686			if (mu < clock_minstep)
687				return (0);
688
689			clock_frequency = direct_freq(fp_offset);
690			/* fall through */
691
692		/*
693		 * We get here by default in FSET, SPIK and SYNC states.
694		 * Here compute the frequency update due to PLL and FLL
695		 * contributions. Note, we avoid frequency discipline at
696		 * startup until the initial transient has subsided.
697		 */
698		default:
699			if (freq_cnt == 0) {
700
701				/*
702				 * The FLL and PLL frequency gain constants
703				 * depend on the time constant and Allan
704				 * intercept. The PLL is always used, but
705				 * becomes ineffective above the Allan intercept
706				 * where the FLL becomes effective.
707				 */
708				if (sys_poll >= allan_xpt)
709					clock_frequency +=
710					      (fp_offset - clock_offset)
711					    / ( max(ULOGTOD(sys_poll), mu)
712					       * CLOCK_FLL);
713
714				/*
715				 * The PLL frequency gain (numerator) depends on
716				 * the minimum of the update interval and Allan
717				 * intercept. This reduces the PLL gain when the
718				 * FLL becomes effective.
719				 */
720				etemp = min(ULOGTOD(allan_xpt), mu);
721				dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
722				clock_frequency +=
723				    fp_offset * etemp / (dtemp * dtemp);
724			}
725			rstclock(EVNT_SYNC, fp_offset);
726			if (fabs(fp_offset) < CLOCK_FLOOR)
727				freq_cnt = 0;
728			break;
729		}
730	}
731
732#ifdef KERNEL_PLL
733	/*
734	 * This code segment works when clock adjustments are made using
735	 * precision time kernel support and the ntp_adjtime() system
736	 * call. This support is available in Solaris 2.6 and later,
737	 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
738	 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
739	 * DECstation 5000/240 and Alpha AXP, additional kernel
740	 * modifications provide a true microsecond clock and nanosecond
741	 * clock, respectively.
742	 *
743	 * Important note: The kernel discipline is used only if the
744	 * step threshold is less than 0.5 s, as anything higher can
745	 * lead to overflow problems. This might occur if some misguided
746	 * lad set the step threshold to something ridiculous.
747	 */
748	if (pll_control && kern_enable && freq_cnt == 0) {
749
750		/*
751		 * We initialize the structure for the ntp_adjtime()
752		 * system call. We have to convert everything to
753		 * microseconds or nanoseconds first. Do not update the
754		 * system variables if the ext_enable flag is set. In
755		 * this case, the external clock driver will update the
756		 * variables, which will be read later by the local
757		 * clock driver. Afterwards, remember the time and
758		 * frequency offsets for jitter and stability values and
759		 * to update the frequency file.
760		 */
761		ZERO(ntv);
762		if (ext_enable) {
763			ntv.modes = MOD_STATUS;
764		} else {
765			ntv.modes = MOD_BITS;
766			ntv.offset = var_long_from_dbl(
767			    clock_offset, &ntv.modes);
768#ifdef STA_NANO
769			ntv.constant = sys_poll;
770#else /* STA_NANO */
771			ntv.constant = sys_poll - 4;
772#endif /* STA_NANO */
773			if (ntv.constant < 0)
774				ntv.constant = 0;
775
776			ntv.esterror = usec_long_from_dbl(
777				clock_jitter);
778			ntv.maxerror = usec_long_from_dbl(
779				sys_rootdelay / 2 + sys_rootdisp);
780			ntv.status = STA_PLL;
781
782			/*
783			 * Enable/disable the PPS if requested.
784			 */
785			if (hardpps_enable) {
786				ntv.status |= (STA_PPSTIME | STA_PPSFREQ);
787				if (!(pll_status & STA_PPSTIME))
788					sync_status("PPS enabled",
789						pll_status,
790						ntv.status);
791			} else {
792				ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
793				if (pll_status & STA_PPSTIME)
794					sync_status("PPS disabled",
795						pll_status,
796						ntv.status);
797			}
798			if (sys_leap == LEAP_ADDSECOND)
799				ntv.status |= STA_INS;
800			else if (sys_leap == LEAP_DELSECOND)
801				ntv.status |= STA_DEL;
802		}
803
804		/*
805		 * Pass the stuff to the kernel. If it squeals, turn off
806		 * the pps. In any case, fetch the kernel offset,
807		 * frequency and jitter.
808		 */
809		ntp_adj_ret = ntp_adjtime(&ntv);
810		/*
811		 * A squeal is a return status < 0, or a state change.
812		 */
813		if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) {
814			kernel_status = ntp_adj_ret;
815			ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1);
816		}
817		pll_status = ntv.status;
818		clock_offset = dbl_from_var_long(ntv.offset, ntv.status);
819		clock_frequency = FREQTOD(ntv.freq);
820
821		/*
822		 * If the kernel PPS is lit, monitor its performance.
823		 */
824		if (ntv.status & STA_PPSTIME) {
825			clock_jitter = dbl_from_var_long(
826				ntv.jitter, ntv.status);
827		}
828
829#if defined(STA_NANO) && NTP_API == 4
830		/*
831		 * If the TAI changes, update the kernel TAI.
832		 */
833		if (loop_tai != sys_tai) {
834			loop_tai = sys_tai;
835			ntv.modes = MOD_TAI;
836			ntv.constant = sys_tai;
837			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
838			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1);
839			}
840		}
841#endif /* STA_NANO */
842	}
843#endif /* KERNEL_PLL */
844
845	/*
846	 * Clamp the frequency within the tolerance range and calculate
847	 * the frequency difference since the last update.
848	 */
849	if (fabs(clock_frequency) > NTP_MAXFREQ)
850		msyslog(LOG_NOTICE,
851		    "frequency error %.0f PPM exceeds tolerance %.0f PPM",
852		    clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
853	dtemp = SQUARE(clock_frequency - drift_comp);
854	if (clock_frequency > NTP_MAXFREQ)
855		drift_comp = NTP_MAXFREQ;
856	else if (clock_frequency < -NTP_MAXFREQ)
857		drift_comp = -NTP_MAXFREQ;
858	else
859		drift_comp = clock_frequency;
860
861	/*
862	 * Calculate the wander as the exponentially weighted RMS
863	 * frequency differences. Record the change for the frequency
864	 * file update.
865	 */
866	etemp = SQUARE(clock_stability);
867	clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
868
869	/*
870	 * Here we adjust the time constant by comparing the current
871	 * offset with the clock jitter. If the offset is less than the
872	 * clock jitter times a constant, then the averaging interval is
873	 * increased, otherwise it is decreased. A bit of hysteresis
874	 * helps calm the dance. Works best using burst mode. Don't
875	 * fiddle with the poll during the startup clamp period.
876	 * [Bug 3615] also observe time gates to avoid eager stepping
877	 */
878	if (freq_cnt > 0) {
879		tc_counter = 0;
880		tc_twinlo  = current_time;
881		tc_twinhi  = current_time;
882	} else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
883		tc_counter += sys_poll;
884		if (tc_counter > CLOCK_LIMIT) {
885			tc_counter = CLOCK_LIMIT;
886			if (sys_poll < peer->maxpoll)
887				sys_poll += (current_time >= tc_twinhi);
888		}
889	} else {
890		tc_counter -= sys_poll << 1;
891		if (tc_counter < -CLOCK_LIMIT) {
892			tc_counter = -CLOCK_LIMIT;
893			if (sys_poll > peer->minpoll)
894				sys_poll -= (current_time >= tc_twinlo);
895		}
896	}
897
898	/*
899	 * If the time constant has changed, update the poll variables.
900	 *
901	 * [bug 3615] also set new time gates
902	 * The time limit for stepping down will be half the TC interval
903	 * or 60 secs from now, whatever is bigger, and the step up time
904	 * limit will be half the TC interval after the step down limit.
905	 *
906	 * The 'sys_poll' value affects the servo loop gain, and
907	 * overshooting sys_poll slows it down unnecessarily.  Stepping
908	 * down too fast also has bad effects.
909	 *
910	 * The 'tc_counter' dance itself is something that *should*
911	 * happen *once* every (1 << sys_poll) seconds, I think, but
912	 * that's not how it works right now, and adding time guards
913	 * seems the least intrusive way to handle this.
914	 */
915	if (osys_poll != sys_poll) {
916		u_int deadband = 1u << (sys_poll - 1);
917		tc_counter = 0;
918		tc_twinlo  = current_time + max(deadband, 60);
919		tc_twinhi  = tc_twinlo + deadband;
920		poll_update(peer, sys_poll, 0);
921	}
922
923	/*
924	 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
925	 */
926	record_loop_stats(clock_offset, drift_comp, clock_jitter,
927	    clock_stability, sys_poll);
928	DPRINTF(1, ("local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
929		    clock_offset, clock_jitter, drift_comp * 1e6,
930		    clock_stability * 1e6, sys_poll));
931	return (rval);
932#endif /* not LOCKCLOCK */
933}
934
935
936/*
937 * adj_host_clock - Called once every second to update the local clock.
938 *
939 * LOCKCLOCK: The only thing this routine does is increment the
940 * sys_rootdisp variable.
941 */
942void
943adj_host_clock(
944	void
945	)
946{
947	double	offset_adj;
948	double	freq_adj;
949
950	/*
951	 * Update the dispersion since the last update. In contrast to
952	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
953	 * since the dispersion check serves this function. Also,
954	 * since the poll interval can exceed one day, the old test
955	 * would be counterproductive. During the startup clamp period, the
956	 * time constant is clamped at 2.
957	 */
958	sys_rootdisp += clock_phi;
959#ifndef LOCKCLOCK
960	if (!ntp_enable || mode_ntpdate)
961		return;
962	/*
963	 * Determine the phase adjustment. The gain factor (denominator)
964	 * increases with poll interval, so is dominated by the FLL
965	 * above the Allan intercept. Note the reduced time constant at
966	 * startup.
967	 */
968	if (state != EVNT_SYNC) {
969		offset_adj = 0.;
970	} else if (freq_cnt > 0) {
971		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1));
972		freq_cnt--;
973#ifdef KERNEL_PLL
974	} else if (pll_control && kern_enable) {
975		offset_adj = 0.;
976#endif /* KERNEL_PLL */
977	} else {
978		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
979	}
980
981	/*
982	 * If the kernel discipline is enabled the frequency correction
983	 * drift_comp has already been engaged via ntp_adjtime() in
984	 * set_freq().  Otherwise it is a component of the adj_systime()
985	 * offset.
986	 */
987#ifdef KERNEL_PLL
988	if (pll_control && kern_enable)
989		freq_adj = 0.;
990	else
991#endif /* KERNEL_PLL */
992		freq_adj = drift_comp;
993
994	/* Bound absolute value of total adjustment to NTP_MAXFREQ. */
995	if (offset_adj + freq_adj > NTP_MAXFREQ)
996		offset_adj = NTP_MAXFREQ - freq_adj;
997	else if (offset_adj + freq_adj < -NTP_MAXFREQ)
998		offset_adj = -NTP_MAXFREQ - freq_adj;
999
1000	clock_offset -= offset_adj;
1001	/*
1002	 * Windows port adj_systime() must be called each second,
1003	 * even if the argument is zero, to ease emulation of
1004	 * adjtime() using Windows' slew API which controls the rate
1005	 * but does not automatically stop slewing when an offset
1006	 * has decayed to zero.
1007	 */
1008	DEBUG_INSIST(enable_panic_check == TRUE);
1009	enable_panic_check = FALSE;
1010	adj_systime(offset_adj + freq_adj);
1011	enable_panic_check = TRUE;
1012#endif /* LOCKCLOCK */
1013}
1014
1015
1016/*
1017 * Clock state machine. Enter new state and set state variables.
1018 */
1019static void
1020rstclock(
1021	int	trans,		/* new state */
1022	double	offset		/* new offset */
1023	)
1024{
1025	DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n",
1026		    current_time - clock_epoch, trans, sys_poll,
1027		    tc_counter));
1028	if (trans != state && trans != EVNT_FSET)
1029		report_event(trans, NULL, NULL);
1030#ifdef HAVE_WORKING_FORK
1031	if (trans != state && EVNT_SYNC == trans) {
1032		/*
1033		 * If our parent process is waiting for the
1034		 * first clock sync, send them home satisfied.
1035		 */
1036		if (daemon_pipe[1] != -1) {
1037			if (2 != write(daemon_pipe[1], "S\n", 2)) {
1038				msyslog(LOG_ERR, "daemon failed to notify parent ntpd (--wait-sync)");
1039			}
1040			close(daemon_pipe[1]);
1041			daemon_pipe[1] = -1;
1042		}
1043	}
1044#endif /* HAVE_WORKING_FORK */
1045
1046	state = trans;
1047	last_offset = clock_offset = offset;
1048	clock_epoch = current_time;
1049}
1050
1051
1052/*
1053 * calc_freq - calculate frequency directly
1054 *
1055 * This is very carefully done. When the offset is first computed at the
1056 * first update, a residual frequency component results. Subsequently,
1057 * updates are suppresed until the end of the measurement interval while
1058 * the offset is amortized. At the end of the interval the frequency is
1059 * calculated from the current offset, residual offset, length of the
1060 * interval and residual frequency component. At the same time the
1061 * frequenchy file is armed for update at the next hourly stats.
1062 */
1063static double
1064direct_freq(
1065	double	fp_offset
1066	)
1067{
1068	set_freq(fp_offset / (current_time - clock_epoch));
1069
1070	return drift_comp;
1071}
1072
1073
1074/*
1075 * set_freq - set clock frequency correction
1076 *
1077 * Used to step the frequency correction at startup, possibly again once
1078 * the frequency is measured (that is, transitioning from EVNT_NSET to
1079 * EVNT_FSET), and finally to switch between daemon and kernel loop
1080 * discipline at runtime.
1081 *
1082 * When the kernel loop discipline is available but the daemon loop is
1083 * in use, the kernel frequency correction is disabled (set to 0) to
1084 * ensure drift_comp is applied by only one of the loops.
1085 */
1086static void
1087set_freq(
1088	double	freq		/* frequency update */
1089	)
1090{
1091	const char *	loop_desc;
1092	int ntp_adj_ret;
1093
1094	(void)ntp_adj_ret; /* not always used below... */
1095	drift_comp = freq;
1096	loop_desc = "ntpd";
1097#ifdef KERNEL_PLL
1098	if (pll_control) {
1099		ZERO(ntv);
1100		ntv.modes = MOD_FREQUENCY;
1101		if (kern_enable) {
1102			loop_desc = "kernel";
1103			ntv.freq = DTOFREQ(drift_comp);
1104		}
1105		if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1106		    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1107		}
1108	}
1109#endif /* KERNEL_PLL */
1110	mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc,
1111	    drift_comp * 1e6);
1112}
1113
1114
1115#ifdef KERNEL_PLL
1116static void
1117start_kern_loop(void)
1118{
1119	static int atexit_done;
1120	int ntp_adj_ret;
1121
1122	pll_control = TRUE;
1123	ZERO(ntv);
1124	ntv.modes = MOD_BITS;
1125	ntv.status = STA_PLL | STA_UNSYNC;
1126	ntv.maxerror = MAXDISPERSE * 1.0e6;
1127	ntv.esterror = MAXDISPERSE * 1.0e6;
1128	ntv.constant = sys_poll;
1129	/*             ^^^^^^^^ why is it that here constant is
1130	 * unconditionally set to sys_poll, whereas elsewhere is is
1131	 * modified depending on nanosecond vs. microsecond kernel?
1132	 */
1133	/*[bug 3699] make sure kernel PLL sees our initial drift compensation */
1134	if (freq_set) {
1135		ntv.modes |= MOD_FREQUENCY;
1136		ntv.freq = DTOFREQ(drift_comp);
1137	}
1138#ifdef SIGSYS
1139	/*
1140	 * Use sigsetjmp() to save state and then call ntp_adjtime(); if
1141	 * it fails, then pll_trap() will set pll_control FALSE before
1142	 * returning control using siglogjmp().
1143	 */
1144	newsigsys.sa_handler = pll_trap;
1145	newsigsys.sa_flags = 0;
1146	if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
1147		msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m");
1148		pll_control = FALSE;
1149	} else {
1150		if (sigsetjmp(env, 1) == 0) {
1151			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1152			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1153			}
1154		}
1155		if (sigaction(SIGSYS, &sigsys, NULL)) {
1156			msyslog(LOG_ERR,
1157			    "sigaction() restore SIGSYS: %m");
1158			pll_control = FALSE;
1159		}
1160	}
1161#else /* SIGSYS */
1162	if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1163	    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1164	}
1165#endif /* SIGSYS */
1166
1167	/*
1168	 * Save the result status and light up an external clock
1169	 * if available.
1170	 */
1171	pll_status = ntv.status;
1172	if (pll_control) {
1173		if (!atexit_done) {
1174			atexit_done = TRUE;
1175			atexit(&stop_kern_loop);
1176		}
1177#ifdef STA_NANO
1178		if (pll_status & STA_CLK)
1179			ext_enable = TRUE;
1180#endif /* STA_NANO */
1181		report_event(EVNT_KERN, NULL,
1182	  	    "kernel time sync enabled");
1183	}
1184}
1185#endif	/* KERNEL_PLL */
1186
1187
1188#ifdef KERNEL_PLL
1189static void
1190stop_kern_loop(void)
1191{
1192	if (pll_control && kern_enable)
1193		report_event(EVNT_KERN, NULL,
1194		    "kernel time sync disabled");
1195}
1196#endif	/* KERNEL_PLL */
1197
1198
1199/*
1200 * select_loop() - choose kernel or daemon loop discipline.
1201 */
1202void
1203select_loop(
1204	int	use_kern_loop
1205	)
1206{
1207	if (kern_enable == use_kern_loop)
1208		return;
1209#ifdef KERNEL_PLL
1210	if (pll_control && !use_kern_loop)
1211		stop_kern_loop();
1212#endif
1213	kern_enable = use_kern_loop;
1214#ifdef KERNEL_PLL
1215	if (pll_control && use_kern_loop)
1216		start_kern_loop();
1217#endif
1218	/*
1219	 * If this loop selection change occurs after initial startup,
1220	 * call set_freq() to switch the frequency compensation to or
1221	 * from the kernel loop.
1222	 */
1223#ifdef KERNEL_PLL
1224	if (pll_control && loop_started)
1225		set_freq(drift_comp);
1226#endif
1227}
1228
1229
1230/*
1231 * huff-n'-puff filter
1232 */
1233void
1234huffpuff(void)
1235{
1236	int i;
1237
1238	if (sys_huffpuff == NULL)
1239		return;
1240
1241	sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
1242	sys_huffpuff[sys_huffptr] = 1e9;
1243	sys_mindly = 1e9;
1244	for (i = 0; i < sys_hufflen; i++) {
1245		if (sys_huffpuff[i] < sys_mindly)
1246			sys_mindly = sys_huffpuff[i];
1247	}
1248}
1249
1250
1251/*
1252 * loop_config - configure the loop filter
1253 *
1254 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
1255 */
1256void
1257loop_config(
1258	int	item,
1259	double	freq
1260	)
1261{
1262	int	i;
1263	double	ftemp;
1264
1265	DPRINTF(2, ("loop_config: item %d freq %f\n", item, freq));
1266	switch (item) {
1267
1268	/*
1269	 * We first assume the kernel supports the ntp_adjtime()
1270	 * syscall. If that syscall works, initialize the kernel time
1271	 * variables. Otherwise, continue leaving no harm behind.
1272	 */
1273	case LOOP_DRIFTINIT:
1274#ifndef LOCKCLOCK
1275#ifdef KERNEL_PLL
1276		if (mode_ntpdate)
1277			break;
1278
1279		start_kern_loop();
1280#endif /* KERNEL_PLL */
1281
1282		/*
1283		 * Initialize frequency if given; otherwise, begin frequency
1284		 * calibration phase.
1285		 */
1286		ftemp = init_drift_comp / 1e6;
1287		if (ftemp > NTP_MAXFREQ)
1288			ftemp = NTP_MAXFREQ;
1289		else if (ftemp < -NTP_MAXFREQ)
1290			ftemp = -NTP_MAXFREQ;
1291		set_freq(ftemp);
1292		if (freq_set)
1293			rstclock(EVNT_FSET, 0);
1294		else
1295			rstclock(EVNT_NSET, 0);
1296		loop_started = TRUE;
1297#endif /* LOCKCLOCK */
1298		break;
1299
1300	case LOOP_KERN_CLEAR:
1301#if 0		/* XXX: needs more review, and how can we get here? */
1302#ifndef LOCKCLOCK
1303# ifdef KERNEL_PLL
1304		if (pll_control && kern_enable) {
1305			memset((char *)&ntv, 0, sizeof(ntv));
1306			ntv.modes = MOD_STATUS;
1307			ntv.status = STA_UNSYNC;
1308			ntp_adjtime(&ntv);
1309			sync_status("kernel time sync disabled",
1310				pll_status,
1311				ntv.status);
1312		   }
1313# endif /* KERNEL_PLL */
1314#endif /* LOCKCLOCK */
1315#endif
1316		break;
1317
1318	/*
1319	 * Tinker command variables for Ulrich Windl. Very dangerous.
1320	 */
1321	case LOOP_ALLAN:	/* Allan intercept (log2) (allan) */
1322		allan_xpt = (u_char)freq;
1323		break;
1324
1325	case LOOP_CODEC:	/* audio codec frequency (codec) */
1326		clock_codec = freq / 1e6;
1327		break;
1328
1329	case LOOP_PHI:		/* dispersion threshold (dispersion) */
1330		clock_phi = freq / 1e6;
1331		break;
1332
1333	case LOOP_FREQ:		/* initial frequency (freq) */
1334		init_drift_comp = freq;
1335		freq_set = 1;
1336		break;
1337
1338	case LOOP_NOFREQ:	/* remove any initial drift comp spec */
1339		init_drift_comp = 0;
1340		freq_set = 0;
1341		break;
1342
1343	case LOOP_HUFFPUFF:	/* huff-n'-puff length (huffpuff) */
1344		if (freq < HUFFPUFF)
1345			freq = HUFFPUFF;
1346		sys_hufflen = (int)(freq / HUFFPUFF);
1347		sys_huffpuff = eallocarray(sys_hufflen, sizeof(sys_huffpuff[0]));
1348		for (i = 0; i < sys_hufflen; i++)
1349			sys_huffpuff[i] = 1e9;
1350		sys_mindly = 1e9;
1351		break;
1352
1353	case LOOP_PANIC:	/* panic threshold (panic) */
1354		clock_panic = freq;
1355		break;
1356
1357	case LOOP_MAX:		/* step threshold (step) */
1358		clock_max_fwd = clock_max_back = freq;
1359		if (freq == 0 || freq > 0.5)
1360			select_loop(FALSE);
1361		break;
1362
1363	case LOOP_MAX_BACK:	/* step threshold (step) */
1364		clock_max_back = freq;
1365		/*
1366		 * Leave using the kernel discipline code unless both
1367		 * limits are massive.  This assumes the reason to stop
1368		 * using it is that it's pointless, not that it goes wrong.
1369		 */
1370		if (  (clock_max_back == 0 || clock_max_back > 0.5)
1371		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
1372			select_loop(FALSE);
1373		break;
1374
1375	case LOOP_MAX_FWD:	/* step threshold (step) */
1376		clock_max_fwd = freq;
1377		if (  (clock_max_back == 0 || clock_max_back > 0.5)
1378		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
1379			select_loop(FALSE);
1380		break;
1381
1382	case LOOP_MINSTEP:	/* stepout threshold (stepout) */
1383		if (freq < CLOCK_MINSTEP)
1384			clock_minstep = CLOCK_MINSTEP;
1385		else
1386			clock_minstep = freq;
1387		break;
1388
1389	case LOOP_TICK:		/* tick increment (tick) */
1390		set_sys_tick_precision(freq);
1391		break;
1392
1393	case LOOP_LEAP:		/* not used, fall through */
1394	default:
1395		msyslog(LOG_NOTICE,
1396		    "loop_config: unsupported option %d", item);
1397	}
1398}
1399
1400
1401#if defined(KERNEL_PLL) && defined(SIGSYS)
1402/*
1403 * _trap - trap processor for undefined syscalls
1404 *
1405 * This nugget is called by the kernel when the SYS_ntp_adjtime()
1406 * syscall bombs because the silly thing has not been implemented in
1407 * the kernel. In this case the phase-lock loop is emulated by
1408 * the stock adjtime() syscall and a lot of indelicate abuse.
1409 */
1410static RETSIGTYPE
1411pll_trap(
1412	int arg
1413	)
1414{
1415	pll_control = FALSE;
1416	siglongjmp(env, 1);
1417}
1418#endif /* KERNEL_PLL && SIGSYS */
1419