1/*
2 * ntp_loopfilter.c - implements the NTP loop filter algorithm
3 *
4 * ATTENTION: Get approval from Dave Mills on all changes to this file!
5 *
6 */
7#ifdef HAVE_CONFIG_H
8# include <config.h>
9#endif
10
11#ifdef USE_SNPRINTB
12# include <util.h>
13#endif
14#include "ntpd.h"
15#include "ntp_io.h"
16#include "ntp_unixtime.h"
17#include "ntp_stdlib.h"
18#include "timexsup.h"
19
20#include <limits.h>
21#include <stdio.h>
22#include <ctype.h>
23
24#include <signal.h>
25#include <setjmp.h>
26
27#ifdef KERNEL_PLL
28#include "ntp_syscall.h"
29#endif /* KERNEL_PLL */
30
31/*
32 * This is an implementation of the clock discipline algorithm described
33 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
34 * hybrid phase/frequency-lock loop. A number of sanity checks are
35 * included to protect against timewarps, timespikes and general mayhem.
36 * All units are in s and s/s, unless noted otherwise.
37 */
38#define CLOCK_MAX	.128	/* default step threshold (s) */
39#define CLOCK_MINSTEP	300.	/* default stepout threshold (s) */
40#define CLOCK_PANIC	1000.	/* default panic threshold (s) */
41#define	CLOCK_PHI	15e-6	/* max frequency error (s/s) */
42#define CLOCK_PLL	16.	/* PLL loop gain (log2) */
43#define CLOCK_AVG	8.	/* parameter averaging constant */
44#define CLOCK_FLL	.25	/* FLL loop gain */
45#define	CLOCK_FLOOR	.0005	/* startup offset floor (s) */
46#define	CLOCK_ALLAN	11	/* Allan intercept (log2 s) */
47#define CLOCK_LIMIT	30	/* poll-adjust threshold */
48#define CLOCK_PGATE	4.	/* poll-adjust gate */
49#define PPS_MAXAGE	120	/* kernel pps signal timeout (s) */
50#define	FREQTOD(x)	((x) / 65536e6) /* NTP to double */
51#define	DTOFREQ(x)	((int32)((x) * 65536e6)) /* double to NTP */
52
53/*
54 * Clock discipline state machine. This is used to control the
55 * synchronization behavior during initialization and following a
56 * timewarp.
57 *
58 *	State	< step		> step		Comments
59 *	========================================================
60 *	NSET	FREQ		step, FREQ	freq not set
61 *
62 *	FSET	SYNC		step, SYNC	freq set
63 *
64 *	FREQ	if (mu < 900)	if (mu < 900)	set freq direct
65 *		    ignore	    ignore
66 *		else		else
67 *		    freq, SYNC	    freq, step, SYNC
68 *
69 *	SYNC	SYNC		SPIK, ignore	adjust phase/freq
70 *
71 *	SPIK	SYNC		if (mu < 900)	adjust phase/freq
72 *				    ignore
73 *				step, SYNC
74 */
75/*
76 * Kernel PLL/PPS state machine. This is used with the kernel PLL
77 * modifications described in the documentation.
78 *
79 * If kernel support for the ntp_adjtime() system call is available, the
80 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
81 * set at configuration time or run time using ntpdc. If ntp_enable is
82 * false, the discipline loop is unlocked and no corrections of any kind
83 * are made. If both ntp_control and kern_enable are set, the kernel
84 * support is used as described above; if false, the kernel is bypassed
85 * entirely and the daemon discipline used instead.
86 *
87 * There have been three versions of the kernel discipline code. The
88 * first (microkernel) now in Solaris discipilnes the microseconds. The
89 * second and third (nanokernel) disciplines the clock in nanoseconds.
90 * These versions are identifed if the symbol STA_PLL is present in the
91 * header file /usr/include/sys/timex.h. The third and current version
92 * includes TAI offset and is identified by the symbol NTP_API with
93 * value 4.
94 *
95 * Each PPS time/frequency discipline can be enabled by the atom driver
96 * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
97 * set in the kernel status word; otherwise, these bits are cleared.
98 * These bits are also cleard if the kernel reports an error.
99 *
100 * If an external clock is present, the clock driver sets STA_CLK in the
101 * status word. When the local clock driver sees this bit, it updates
102 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
103 * set to zero, in which case the system clock is not adjusted. This is
104 * also a signal for the external clock driver to discipline the system
105 * clock. Unless specified otherwise, all times are in seconds.
106 */
107/*
108 * Program variables that can be tinkered.
109 */
110double	clock_max_back = CLOCK_MAX;	/* step threshold */
111double	clock_max_fwd =  CLOCK_MAX;	/* step threshold */
112double	clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
113double	clock_panic = CLOCK_PANIC; /* panic threshold */
114double	clock_phi = CLOCK_PHI;	/* dispersion rate (s/s) */
115u_char	allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */
116
117/*
118 * Program variables
119 */
120static double clock_offset;	/* offset */
121double	clock_jitter;		/* offset jitter */
122double	drift_comp;		/* frequency (s/s) */
123static double init_drift_comp; /* initial frequency (PPM) */
124double	clock_stability;	/* frequency stability (wander) (s/s) */
125double	clock_codec;		/* audio codec frequency (samples/s) */
126static u_long clock_epoch;	/* last update */
127u_int	sys_tai;		/* TAI offset from UTC */
128static int loop_started;	/* TRUE after LOOP_DRIFTINIT */
129static void rstclock (int, double); /* transition function */
130static double direct_freq(double); /* direct set frequency */
131static void set_freq(double);	/* set frequency */
132#ifndef PATH_MAX
133# define PATH_MAX MAX_PATH
134#endif
135static char relative_path[PATH_MAX + 1]; /* relative path per recursive make */
136static char *this_file = NULL;
137
138#ifdef KERNEL_PLL
139static struct timex ntv;	/* ntp_adjtime() parameters */
140int	pll_status;		/* last kernel status bits */
141#if defined(STA_NANO) && NTP_API == 4
142static u_int loop_tai;		/* last TAI offset */
143#endif /* STA_NANO */
144static	void	start_kern_loop(void);
145static	void	stop_kern_loop(void);
146#endif /* KERNEL_PLL */
147
148/*
149 * Clock state machine control flags
150 */
151int	ntp_enable = TRUE;	/* clock discipline enabled */
152int	pll_control;		/* kernel support available */
153int	kern_enable = TRUE;	/* kernel support enabled */
154int	hardpps_enable;		/* kernel PPS discipline enabled */
155int	ext_enable;		/* external clock enabled */
156int	pps_stratum;		/* pps stratum */
157int	kernel_status;		/* from ntp_adjtime */
158int	force_step_once = FALSE; /* always step time once at startup (-G) */
159int	mode_ntpdate = FALSE;	/* exit on first clock set (-q) */
160int	freq_cnt;		/* initial frequency clamp */
161int	freq_set;		/* initial set frequency switch */
162
163/*
164 * Clock state machine variables
165 */
166int	state = 0;		/* clock discipline state */
167u_char	sys_poll;		/* time constant/poll (log2 s) */
168int	tc_counter;		/* jiggle counter */
169double	last_offset;		/* last offset (s) */
170
171u_int	tc_twinlo;		/* TC step down not before this time */
172u_int	tc_twinhi;		/* TC step up not before this time */
173
174/*
175 * Huff-n'-puff filter variables
176 */
177static double *sys_huffpuff;	/* huff-n'-puff filter */
178static int sys_hufflen;		/* huff-n'-puff filter stages */
179static int sys_huffptr;		/* huff-n'-puff filter pointer */
180static double sys_mindly;	/* huff-n'-puff filter min delay */
181
182#if defined(KERNEL_PLL)
183/* Emacs cc-mode goes nuts if we split the next line... */
184#define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
185    MOD_STATUS | MOD_TIMECONST)
186#ifdef SIGSYS
187static void pll_trap (int);	/* configuration trap */
188static struct sigaction sigsys;	/* current sigaction status */
189static struct sigaction newsigsys; /* new sigaction status */
190static sigjmp_buf env;		/* environment var. for pll_trap() */
191#endif /* SIGSYS */
192#endif /* KERNEL_PLL */
193
194static void
195sync_status(const char *what, int ostatus, int nstatus)
196{
197	char obuf[256], nbuf[256], tbuf[1024];
198#if defined(USE_SNPRINTB) && defined (STA_FMT)
199	snprintb(obuf, sizeof(obuf), STA_FMT, ostatus);
200	snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus);
201#else
202	snprintf(obuf, sizeof(obuf), "%04x", ostatus);
203	snprintf(nbuf, sizeof(nbuf), "%04x", nstatus);
204#endif
205	snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf);
206	report_event(EVNT_KERN, NULL, tbuf);
207}
208
209/*
210 * file_name - return pointer to non-relative portion of this C file pathname
211 */
212static char *file_name(void)
213{
214	if (this_file == NULL) {
215	    (void)strncpy(relative_path, __FILE__, PATH_MAX);
216	    for (this_file=relative_path;
217		*this_file && ! isalnum((unsigned char)*this_file);
218		this_file++) ;
219	}
220	return this_file;
221}
222
223/*
224 * init_loopfilter - initialize loop filter data
225 */
226void
227init_loopfilter(void)
228{
229	/*
230	 * Initialize state variables.
231	 */
232	sys_poll = ntp_minpoll;
233	clock_jitter = LOGTOD(sys_precision);
234	freq_cnt = (int)clock_minstep;
235}
236
237#ifdef KERNEL_PLL
238/*
239 * ntp_adjtime_error_handler - process errors from ntp_adjtime
240 */
241static void
242ntp_adjtime_error_handler(
243	const char *caller,	/* name of calling function */
244	struct timex *ptimex,	/* pointer to struct timex */
245	int ret,		/* return value from ntp_adjtime */
246	int saved_errno,	/* value of errno when ntp_adjtime returned */
247	int pps_call,		/* ntp_adjtime call was PPS-related */
248	int tai_call,		/* ntp_adjtime call was TAI-related */
249	int line		/* line number of ntp_adjtime call */
250	)
251{
252	char des[1024] = "";	/* Decoded Error Status */
253	char *dbp, *ebp;
254
255	dbp = des;
256	ebp = dbp + sizeof(des);
257
258	switch (ret) {
259	    case -1:
260		switch (saved_errno) {
261		    case EFAULT:
262			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex pointer: 0x%lx",
263			    caller, file_name(), line,
264			    (long)((void *)ptimex)
265			);
266		    break;
267		    case EINVAL:
268			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex \"constant\" element value: %ld",
269			    caller, file_name(), line,
270			    (long)(ptimex->constant)
271			);
272		    break;
273		    case EPERM:
274			if (tai_call) {
275			    errno = saved_errno;
276			    msyslog(LOG_ERR,
277				"%s: ntp_adjtime(TAI) failed: %m",
278				caller);
279			}
280			errno = saved_errno;
281			msyslog(LOG_ERR, "%s: %s line %d: ntp_adjtime: %m",
282			    caller, file_name(), line
283			);
284		    break;
285		    default:
286			msyslog(LOG_NOTICE, "%s: %s line %d: unhandled errno value %d after failed ntp_adjtime call",
287			    caller, file_name(), line,
288			    saved_errno
289			);
290		    break;
291		}
292	    break;
293#ifdef TIME_OK
294	    case TIME_OK: /* 0: synchronized, no leap second warning */
295		/* msyslog(LOG_INFO, "kernel reports time is synchronized normally"); */
296	    break;
297#else
298# warning TIME_OK is not defined
299#endif
300#ifdef TIME_INS
301	    case TIME_INS: /* 1: positive leap second warning */
302		msyslog(LOG_INFO, "kernel reports leap second insertion scheduled");
303	    break;
304#else
305# warning TIME_INS is not defined
306#endif
307#ifdef TIME_DEL
308	    case TIME_DEL: /* 2: negative leap second warning */
309		msyslog(LOG_INFO, "kernel reports leap second deletion scheduled");
310	    break;
311#else
312# warning TIME_DEL is not defined
313#endif
314#ifdef TIME_OOP
315	    case TIME_OOP: /* 3: leap second in progress */
316		msyslog(LOG_INFO, "kernel reports leap second in progress");
317	    break;
318#else
319# warning TIME_OOP is not defined
320#endif
321#ifdef TIME_WAIT
322	    case TIME_WAIT: /* 4: leap second has occured */
323		msyslog(LOG_INFO, "kernel reports leap second has occurred");
324	    break;
325#else
326# warning TIME_WAIT is not defined
327#endif
328#ifdef TIME_ERROR
329#if 0
330
331from the reference implementation of ntp_gettime():
332
333		// Hardware or software error
334        if ((time_status & (STA_UNSYNC | STA_CLOCKERR))
335
336	/*
337         * PPS signal lost when either time or frequency synchronization
338         * requested
339         */
340	|| (time_status & (STA_PPSFREQ | STA_PPSTIME)
341	    && !(time_status & STA_PPSSIGNAL))
342
343        /*
344         * PPS jitter exceeded when time synchronization requested
345         */
346	|| (time_status & STA_PPSTIME &&
347            time_status & STA_PPSJITTER)
348
349        /*
350         * PPS wander exceeded or calibration error when frequency
351         * synchronization requested
352         */
353	|| (time_status & STA_PPSFREQ &&
354            time_status & (STA_PPSWANDER | STA_PPSERROR)))
355                return (TIME_ERROR);
356
357or, from ntp_adjtime():
358
359	if (  (time_status & (STA_UNSYNC | STA_CLOCKERR))
360	    || (time_status & (STA_PPSFREQ | STA_PPSTIME)
361		&& !(time_status & STA_PPSSIGNAL))
362	    || (time_status & STA_PPSTIME
363		&& time_status & STA_PPSJITTER)
364	    || (time_status & STA_PPSFREQ
365		&& time_status & (STA_PPSWANDER | STA_PPSERROR))
366	   )
367		return (TIME_ERROR);
368#endif
369
370	    case TIME_ERROR: /* 5: unsynchronized, or loss of synchronization */
371				/* error (see status word) */
372
373		if (ptimex->status & STA_UNSYNC)
374			xsbprintf(&dbp, ebp, "%sClock Unsynchronized",
375				 (*des) ? "; " : "");
376
377		if (ptimex->status & STA_CLOCKERR)
378		    xsbprintf(&dbp, ebp, "%sClock Error",
379			      (*des) ? "; " : "");
380
381		if (!(ptimex->status & STA_PPSSIGNAL)
382		    && ptimex->status & STA_PPSFREQ)
383		    xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but no PPS",
384			      (*des) ? "; " : "");
385
386		if (!(ptimex->status & STA_PPSSIGNAL)
387		    && ptimex->status & STA_PPSTIME)
388			xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but no PPS signal",
389				  (*des) ? "; " : "");
390
391		if (   ptimex->status & STA_PPSTIME
392		    && ptimex->status & STA_PPSJITTER)
393			xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but PPS Jitter exceeded",
394				  (*des) ? "; " : "");
395
396		if (   ptimex->status & STA_PPSFREQ
397		    && ptimex->status & STA_PPSWANDER)
398			xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but PPS Wander exceeded",
399				  (*des) ? "; " : "");
400
401		if (   ptimex->status & STA_PPSFREQ
402		    && ptimex->status & STA_PPSERROR)
403			xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but Calibration error detected",
404				  (*des) ? "; " : "");
405
406		if (pps_call && !(ptimex->status & STA_PPSSIGNAL))
407			report_event(EVNT_KERN, NULL,
408			    "no PPS signal");
409		DPRINTF(1, ("kernel loop status %#x (%s)\n",
410			ptimex->status, des));
411		/*
412		 * This code may be returned when ntp_adjtime() has just
413		 * been called for the first time, quite a while after
414		 * startup, when ntpd just starts to discipline the kernel
415		 * time. In this case the occurrence of this message
416		 * can be pretty confusing.
417		 *
418		 * HMS: How about a message when we begin kernel processing:
419		 *    Determining kernel clock state...
420		 * so an initial TIME_ERROR message is less confising,
421		 * or skipping the first message (ugh),
422		 * or ???
423		 * msyslog(LOG_INFO, "kernel reports time synchronization lost");
424		 */
425		msyslog(LOG_INFO, "kernel reports TIME_ERROR: %#x: %s",
426			ptimex->status, des);
427	    break;
428#else
429# warning TIME_ERROR is not defined
430#endif
431	    default:
432		msyslog(LOG_NOTICE, "%s: %s line %d: unhandled return value %d from ntp_adjtime() in %s at line %d",
433		    caller, file_name(), line,
434		    ret,
435		    __func__, __LINE__
436		);
437	    break;
438	}
439	return;
440}
441#endif
442
443/*
444 * local_clock - the NTP logical clock loop filter.
445 *
446 * Return codes:
447 * -1	update ignored: exceeds panic threshold
448 * 0	update ignored: popcorn or exceeds step threshold
449 * 1	clock was slewed
450 * 2	clock was stepped
451 *
452 * LOCKCLOCK: The only thing this routine does is set the
453 * sys_rootdisp variable equal to the peer dispersion.
454 */
455int
456local_clock(
457	struct	peer *peer,	/* synch source peer structure */
458	double	fp_offset	/* clock offset (s) */
459	)
460{
461	int	rval;		/* return code */
462	int	osys_poll;	/* old system poll */
463	int	ntp_adj_ret;	/* returned by ntp_adjtime */
464	double	mu;		/* interval since last update */
465	double	clock_frequency; /* clock frequency */
466	double	dtemp, etemp;	/* double temps */
467	char	tbuf[80];	/* report buffer */
468
469	(void)ntp_adj_ret; /* not always used below... */
470	/*
471	 * If the loop is opened or the NIST LOCKCLOCK is in use,
472	 * monitor and record the offsets anyway in order to determine
473	 * the open-loop response and then go home.
474	 */
475#ifndef LOCKCLOCK
476	if (!ntp_enable)
477#endif /* not LOCKCLOCK */
478	{
479		record_loop_stats(fp_offset, drift_comp, clock_jitter,
480		    clock_stability, sys_poll);
481		return (0);
482	}
483
484#ifndef LOCKCLOCK
485	/*
486	 * If the clock is way off, panic is declared. The clock_panic
487	 * defaults to 1000 s; if set to zero, the panic will never
488	 * occur. The allow_panic defaults to FALSE, so the first panic
489	 * will exit. It can be set TRUE by a command line option, in
490	 * which case the clock will be set anyway and time marches on.
491	 * But, allow_panic will be set FALSE when the update is less
492	 * than the step threshold; so, subsequent panics will exit.
493	 */
494	if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
495	    !allow_panic) {
496		snprintf(tbuf, sizeof(tbuf),
497		    "%+.0f s; set clock manually within %.0f s.",
498		    fp_offset, clock_panic);
499		report_event(EVNT_SYSFAULT, NULL, tbuf);
500		return (-1);
501	}
502
503	allow_panic = FALSE;
504
505	/*
506	 * This section simulates ntpdate. If the offset exceeds the
507	 * step threshold (128 ms), step the clock to that time and
508	 * exit. Otherwise, slew the clock to that time and exit. Note
509	 * that the slew will persist and eventually complete beyond the
510	 * life of this program. Note that while ntpdate is active, the
511	 * terminal does not detach, so the termination message prints
512	 * directly to the terminal.
513	 */
514	if (mode_ntpdate) {
515		if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
516		   || (-fp_offset > clock_max_back && clock_max_back > 0)) {
517			step_systime(fp_offset);
518			msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
519			    fp_offset);
520			printf("ntpd: time set %+.6fs\n", fp_offset);
521		} else {
522			adj_systime(fp_offset);
523			msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
524			    fp_offset);
525			printf("ntpd: time slew %+.6fs\n", fp_offset);
526		}
527		record_loop_stats(fp_offset, drift_comp, clock_jitter,
528		    clock_stability, sys_poll);
529		exit (0);
530	}
531
532	/*
533	 * The huff-n'-puff filter finds the lowest delay in the recent
534	 * interval. This is used to correct the offset by one-half the
535	 * difference between the sample delay and minimum delay. This
536	 * is most effective if the delays are highly assymetric and
537	 * clockhopping is avoided and the clock frequency wander is
538	 * relatively small.
539	 */
540	if (sys_huffpuff != NULL) {
541		if (peer->delay < sys_huffpuff[sys_huffptr])
542			sys_huffpuff[sys_huffptr] = peer->delay;
543		if (peer->delay < sys_mindly)
544			sys_mindly = peer->delay;
545		if (fp_offset > 0)
546			dtemp = -(peer->delay - sys_mindly) / 2;
547		else
548			dtemp = (peer->delay - sys_mindly) / 2;
549		fp_offset += dtemp;
550		DPRINTF(1, ("local_clock: size %d mindly %.6f huffpuff %.6f\n",
551			    sys_hufflen, sys_mindly, dtemp));
552	}
553
554	/*
555	 * Clock state machine transition function which defines how the
556	 * system reacts to large phase and frequency excursion. There
557	 * are two main regimes: when the offset exceeds the step
558	 * threshold (128 ms) and when it does not. Under certain
559	 * conditions updates are suspended until the stepout theshold
560	 * (900 s) is exceeded. See the documentation on how these
561	 * thresholds interact with commands and command line options.
562	 *
563	 * Note the kernel is disabled if step is disabled or greater
564	 * than 0.5 s or in ntpdate mode.
565	 */
566	osys_poll = sys_poll;
567	if (sys_poll < peer->minpoll)
568		sys_poll = peer->minpoll;
569	if (sys_poll > peer->maxpoll)
570		sys_poll = peer->maxpoll;
571	mu = current_time - clock_epoch;
572	clock_frequency = drift_comp;
573	rval = 1;
574	if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
575	   || (-fp_offset > clock_max_back && clock_max_back > 0)
576	   || force_step_once ) {
577		if (force_step_once) {
578			force_step_once = FALSE;  /* we want this only once after startup */
579			msyslog(LOG_NOTICE, "Doing intital time step" );
580		}
581
582		switch (state) {
583
584		/*
585		 * In SYNC state we ignore the first outlier and switch
586		 * to SPIK state.
587		 */
588		case EVNT_SYNC:
589			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
590			    fp_offset);
591			report_event(EVNT_SPIK, NULL, tbuf);
592			state = EVNT_SPIK;
593			return (0);
594
595		/*
596		 * In FREQ state we ignore outliers and inlyers. At the
597		 * first outlier after the stepout threshold, compute
598		 * the apparent frequency correction and step the phase.
599		 */
600		case EVNT_FREQ:
601			if (mu < clock_minstep)
602				return (0);
603
604			clock_frequency = direct_freq(fp_offset);
605
606			/* fall through to EVNT_SPIK */
607
608		/*
609		 * In SPIK state we ignore succeeding outliers until
610		 * either an inlyer is found or the stepout threshold is
611		 * exceeded.
612		 */
613		case EVNT_SPIK:
614			if (mu < clock_minstep)
615				return (0);
616
617			/* fall through to default */
618
619		/*
620		 * We get here by default in NSET and FSET states and
621		 * from above in FREQ or SPIK states.
622		 *
623		 * In NSET state an initial frequency correction is not
624		 * available, usually because the frequency file has not
625		 * yet been written. Since the time is outside the step
626		 * threshold, the clock is stepped. The frequency will
627		 * be set directly following the stepout interval.
628		 *
629		 * In FSET state the initial frequency has been set from
630		 * the frequency file. Since the time is outside the
631		 * step threshold, the clock is stepped immediately,
632		 * rather than after the stepout interval. Guys get
633		 * nervous if it takes 15 minutes to set the clock for
634		 * the first time.
635		 *
636		 * In FREQ and SPIK states the stepout threshold has
637		 * expired and the phase is still above the step
638		 * threshold. Note that a single spike greater than the
639		 * step threshold is always suppressed, even with a
640		 * long time constant.
641		 */
642		default:
643			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
644			    fp_offset);
645			report_event(EVNT_CLOCKRESET, NULL, tbuf);
646			step_systime(fp_offset);
647			reinit_timer();
648			tc_counter = 0;
649			clock_jitter = LOGTOD(sys_precision);
650			rval = 2;
651			if (state == EVNT_NSET) {
652				rstclock(EVNT_FREQ, 0);
653				return (rval);
654			}
655			break;
656		}
657		rstclock(EVNT_SYNC, 0);
658	} else {
659		/*
660		 * The offset is less than the step threshold. Calculate
661		 * the jitter as the exponentially weighted offset
662		 * differences.
663		 */
664		etemp = SQUARE(clock_jitter);
665		dtemp = SQUARE(max(fabs(fp_offset - last_offset),
666		    LOGTOD(sys_precision)));
667		clock_jitter = SQRT(etemp + (dtemp - etemp) /
668		    CLOCK_AVG);
669		switch (state) {
670
671		/*
672		 * In NSET state this is the first update received and
673		 * the frequency has not been initialized. Adjust the
674		 * phase, but do not adjust the frequency until after
675		 * the stepout threshold.
676		 */
677		case EVNT_NSET:
678			adj_systime(fp_offset);
679			rstclock(EVNT_FREQ, fp_offset);
680			break;
681
682		/*
683		 * In FREQ state ignore updates until the stepout
684		 * threshold. After that, compute the new frequency, but
685		 * do not adjust the frequency until the holdoff counter
686		 * decrements to zero.
687		 */
688		case EVNT_FREQ:
689			if (mu < clock_minstep)
690				return (0);
691
692			clock_frequency = direct_freq(fp_offset);
693			/* fall through */
694
695		/*
696		 * We get here by default in FSET, SPIK and SYNC states.
697		 * Here compute the frequency update due to PLL and FLL
698		 * contributions. Note, we avoid frequency discipline at
699		 * startup until the initial transient has subsided.
700		 */
701		default:
702			if (freq_cnt == 0) {
703
704				/*
705				 * The FLL and PLL frequency gain constants
706				 * depend on the time constant and Allan
707				 * intercept. The PLL is always used, but
708				 * becomes ineffective above the Allan intercept
709				 * where the FLL becomes effective.
710				 */
711				if (sys_poll >= allan_xpt)
712					clock_frequency +=
713					      (fp_offset - clock_offset)
714					    / ( max(ULOGTOD(sys_poll), mu)
715					       * CLOCK_FLL);
716
717				/*
718				 * The PLL frequency gain (numerator) depends on
719				 * the minimum of the update interval and Allan
720				 * intercept. This reduces the PLL gain when the
721				 * FLL becomes effective.
722				 */
723				etemp = min(ULOGTOD(allan_xpt), mu);
724				dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
725				clock_frequency +=
726				    fp_offset * etemp / (dtemp * dtemp);
727			}
728			rstclock(EVNT_SYNC, fp_offset);
729			if (fabs(fp_offset) < CLOCK_FLOOR)
730				freq_cnt = 0;
731			break;
732		}
733	}
734
735#ifdef KERNEL_PLL
736	/*
737	 * This code segment works when clock adjustments are made using
738	 * precision time kernel support and the ntp_adjtime() system
739	 * call. This support is available in Solaris 2.6 and later,
740	 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
741	 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
742	 * DECstation 5000/240 and Alpha AXP, additional kernel
743	 * modifications provide a true microsecond clock and nanosecond
744	 * clock, respectively.
745	 *
746	 * Important note: The kernel discipline is used only if the
747	 * step threshold is less than 0.5 s, as anything higher can
748	 * lead to overflow problems. This might occur if some misguided
749	 * lad set the step threshold to something ridiculous.
750	 */
751	if (pll_control && kern_enable && freq_cnt == 0) {
752
753		/*
754		 * We initialize the structure for the ntp_adjtime()
755		 * system call. We have to convert everything to
756		 * microseconds or nanoseconds first. Do not update the
757		 * system variables if the ext_enable flag is set. In
758		 * this case, the external clock driver will update the
759		 * variables, which will be read later by the local
760		 * clock driver. Afterwards, remember the time and
761		 * frequency offsets for jitter and stability values and
762		 * to update the frequency file.
763		 */
764		ZERO(ntv);
765		if (ext_enable) {
766			ntv.modes = MOD_STATUS;
767		} else {
768			ntv.modes = MOD_BITS;
769			ntv.offset = var_long_from_dbl(
770			    clock_offset, &ntv.modes);
771#ifdef STA_NANO
772			ntv.constant = sys_poll;
773#else /* STA_NANO */
774			ntv.constant = sys_poll - 4;
775#endif /* STA_NANO */
776			if (ntv.constant < 0)
777				ntv.constant = 0;
778
779			ntv.esterror = usec_long_from_dbl(
780				clock_jitter);
781			ntv.maxerror = usec_long_from_dbl(
782				sys_rootdelay / 2 + sys_rootdisp);
783			ntv.status = STA_PLL;
784
785			/*
786			 * Enable/disable the PPS if requested.
787			 */
788			if (hardpps_enable) {
789				ntv.status |= (STA_PPSTIME | STA_PPSFREQ);
790				if (!(pll_status & STA_PPSTIME))
791					sync_status("PPS enabled",
792						pll_status,
793						ntv.status);
794			} else {
795				ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
796				if (pll_status & STA_PPSTIME)
797					sync_status("PPS disabled",
798						pll_status,
799						ntv.status);
800			}
801			if (sys_leap == LEAP_ADDSECOND)
802				ntv.status |= STA_INS;
803			else if (sys_leap == LEAP_DELSECOND)
804				ntv.status |= STA_DEL;
805		}
806
807		/*
808		 * Pass the stuff to the kernel. If it squeals, turn off
809		 * the pps. In any case, fetch the kernel offset,
810		 * frequency and jitter.
811		 */
812		ntp_adj_ret = ntp_adjtime(&ntv);
813		/*
814		 * A squeal is a return status < 0, or a state change.
815		 */
816		if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) {
817			kernel_status = ntp_adj_ret;
818			ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1);
819		}
820		pll_status = ntv.status;
821		clock_offset = dbl_from_var_long(ntv.offset, ntv.status);
822		clock_frequency = FREQTOD(ntv.freq);
823
824		/*
825		 * If the kernel PPS is lit, monitor its performance.
826		 */
827		if (ntv.status & STA_PPSTIME) {
828			clock_jitter = dbl_from_var_long(
829				ntv.jitter, ntv.status);
830		}
831
832#if defined(STA_NANO) && NTP_API == 4
833		/*
834		 * If the TAI changes, update the kernel TAI.
835		 */
836		if (loop_tai != sys_tai) {
837			loop_tai = sys_tai;
838			ntv.modes = MOD_TAI;
839			ntv.constant = sys_tai;
840			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
841			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1);
842			}
843		}
844#endif /* STA_NANO */
845	}
846#endif /* KERNEL_PLL */
847
848	/*
849	 * Clamp the frequency within the tolerance range and calculate
850	 * the frequency difference since the last update.
851	 */
852	if (fabs(clock_frequency) > NTP_MAXFREQ)
853		msyslog(LOG_NOTICE,
854		    "frequency error %.0f PPM exceeds tolerance %.0f PPM",
855		    clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
856	dtemp = SQUARE(clock_frequency - drift_comp);
857	if (clock_frequency > NTP_MAXFREQ)
858		drift_comp = NTP_MAXFREQ;
859	else if (clock_frequency < -NTP_MAXFREQ)
860		drift_comp = -NTP_MAXFREQ;
861	else
862		drift_comp = clock_frequency;
863
864	/*
865	 * Calculate the wander as the exponentially weighted RMS
866	 * frequency differences. Record the change for the frequency
867	 * file update.
868	 */
869	etemp = SQUARE(clock_stability);
870	clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
871
872	/*
873	 * Here we adjust the time constant by comparing the current
874	 * offset with the clock jitter. If the offset is less than the
875	 * clock jitter times a constant, then the averaging interval is
876	 * increased, otherwise it is decreased. A bit of hysteresis
877	 * helps calm the dance. Works best using burst mode. Don't
878	 * fiddle with the poll during the startup clamp period.
879	 * [Bug 3615] also observe time gates to avoid eager stepping
880	 */
881	if (freq_cnt > 0) {
882		tc_counter = 0;
883		tc_twinlo  = current_time;
884		tc_twinhi  = current_time;
885	} else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
886		tc_counter += sys_poll;
887		if (tc_counter > CLOCK_LIMIT) {
888			tc_counter = CLOCK_LIMIT;
889			if (sys_poll < peer->maxpoll)
890				sys_poll += (current_time >= tc_twinhi);
891		}
892	} else {
893		tc_counter -= sys_poll << 1;
894		if (tc_counter < -CLOCK_LIMIT) {
895			tc_counter = -CLOCK_LIMIT;
896			if (sys_poll > peer->minpoll)
897				sys_poll -= (current_time >= tc_twinlo);
898		}
899	}
900
901	/*
902	 * If the time constant has changed, update the poll variables.
903	 *
904	 * [bug 3615] also set new time gates
905	 * The time limit for stepping down will be half the TC interval
906	 * or 60 secs from now, whatever is bigger, and the step up time
907	 * limit will be half the TC interval after the step down limit.
908	 *
909	 * The 'sys_poll' value affects the servo loop gain, and
910	 * overshooting sys_poll slows it down unnecessarily.  Stepping
911	 * down too fast also has bad effects.
912	 *
913	 * The 'tc_counter' dance itself is something that *should*
914	 * happen *once* every (1 << sys_poll) seconds, I think, but
915	 * that's not how it works right now, and adding time guards
916	 * seems the least intrusive way to handle this.
917	 */
918	if (osys_poll != sys_poll) {
919		u_int deadband = 1u << (sys_poll - 1);
920		tc_counter = 0;
921		tc_twinlo  = current_time + max(deadband, 60);
922		tc_twinhi  = tc_twinlo + deadband;
923		poll_update(peer, sys_poll, 0);
924	}
925
926	/*
927	 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
928	 */
929	record_loop_stats(clock_offset, drift_comp, clock_jitter,
930	    clock_stability, sys_poll);
931	DPRINTF(1, ("local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
932		    clock_offset, clock_jitter, drift_comp * 1e6,
933		    clock_stability * 1e6, sys_poll));
934	return (rval);
935#endif /* not LOCKCLOCK */
936}
937
938
939/*
940 * adj_host_clock - Called once every second to update the local clock.
941 *
942 * LOCKCLOCK: The only thing this routine does is increment the
943 * sys_rootdisp variable.
944 */
945void
946adj_host_clock(
947	void
948	)
949{
950	double	offset_adj;
951	double	freq_adj;
952
953	/*
954	 * Update the dispersion since the last update. In contrast to
955	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
956	 * since the dispersion check serves this function. Also,
957	 * since the poll interval can exceed one day, the old test
958	 * would be counterproductive. During the startup clamp period, the
959	 * time constant is clamped at 2.
960	 */
961	sys_rootdisp += clock_phi;
962#ifndef LOCKCLOCK
963	if (!ntp_enable || mode_ntpdate)
964		return;
965	/*
966	 * Determine the phase adjustment. The gain factor (denominator)
967	 * increases with poll interval, so is dominated by the FLL
968	 * above the Allan intercept. Note the reduced time constant at
969	 * startup.
970	 */
971	if (state != EVNT_SYNC) {
972		offset_adj = 0.;
973	} else if (freq_cnt > 0) {
974		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1));
975		freq_cnt--;
976#ifdef KERNEL_PLL
977	} else if (pll_control && kern_enable) {
978		offset_adj = 0.;
979#endif /* KERNEL_PLL */
980	} else {
981		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
982	}
983
984	/*
985	 * If the kernel discipline is enabled the frequency correction
986	 * drift_comp has already been engaged via ntp_adjtime() in
987	 * set_freq().  Otherwise it is a component of the adj_systime()
988	 * offset.
989	 */
990#ifdef KERNEL_PLL
991	if (pll_control && kern_enable)
992		freq_adj = 0.;
993	else
994#endif /* KERNEL_PLL */
995		freq_adj = drift_comp;
996
997	/* Bound absolute value of total adjustment to NTP_MAXFREQ. */
998	if (offset_adj + freq_adj > NTP_MAXFREQ)
999		offset_adj = NTP_MAXFREQ - freq_adj;
1000	else if (offset_adj + freq_adj < -NTP_MAXFREQ)
1001		offset_adj = -NTP_MAXFREQ - freq_adj;
1002
1003	clock_offset -= offset_adj;
1004	/*
1005	 * Windows port adj_systime() must be called each second,
1006	 * even if the argument is zero, to ease emulation of
1007	 * adjtime() using Windows' slew API which controls the rate
1008	 * but does not automatically stop slewing when an offset
1009	 * has decayed to zero.
1010	 */
1011	DEBUG_INSIST(enable_panic_check == TRUE);
1012	enable_panic_check = FALSE;
1013	adj_systime(offset_adj + freq_adj);
1014	enable_panic_check = TRUE;
1015#endif /* LOCKCLOCK */
1016}
1017
1018
1019/*
1020 * Clock state machine. Enter new state and set state variables.
1021 */
1022static void
1023rstclock(
1024	int	trans,		/* new state */
1025	double	offset		/* new offset */
1026	)
1027{
1028	DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n",
1029		    current_time - clock_epoch, trans, sys_poll,
1030		    tc_counter));
1031	if (trans != state && trans != EVNT_FSET)
1032		report_event(trans, NULL, NULL);
1033	state = trans;
1034	last_offset = clock_offset = offset;
1035	clock_epoch = current_time;
1036}
1037
1038
1039/*
1040 * calc_freq - calculate frequency directly
1041 *
1042 * This is very carefully done. When the offset is first computed at the
1043 * first update, a residual frequency component results. Subsequently,
1044 * updates are suppresed until the end of the measurement interval while
1045 * the offset is amortized. At the end of the interval the frequency is
1046 * calculated from the current offset, residual offset, length of the
1047 * interval and residual frequency component. At the same time the
1048 * frequenchy file is armed for update at the next hourly stats.
1049 */
1050static double
1051direct_freq(
1052	double	fp_offset
1053	)
1054{
1055	set_freq(fp_offset / (current_time - clock_epoch));
1056
1057	return drift_comp;
1058}
1059
1060
1061/*
1062 * set_freq - set clock frequency correction
1063 *
1064 * Used to step the frequency correction at startup, possibly again once
1065 * the frequency is measured (that is, transitioning from EVNT_NSET to
1066 * EVNT_FSET), and finally to switch between daemon and kernel loop
1067 * discipline at runtime.
1068 *
1069 * When the kernel loop discipline is available but the daemon loop is
1070 * in use, the kernel frequency correction is disabled (set to 0) to
1071 * ensure drift_comp is applied by only one of the loops.
1072 */
1073static void
1074set_freq(
1075	double	freq		/* frequency update */
1076	)
1077{
1078	const char *	loop_desc;
1079	int ntp_adj_ret;
1080
1081	(void)ntp_adj_ret; /* not always used below... */
1082	drift_comp = freq;
1083	loop_desc = "ntpd";
1084#ifdef KERNEL_PLL
1085	if (pll_control) {
1086		ZERO(ntv);
1087		ntv.modes = MOD_FREQUENCY;
1088		if (kern_enable) {
1089			loop_desc = "kernel";
1090			ntv.freq = DTOFREQ(drift_comp);
1091		}
1092		if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1093		    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1094		}
1095	}
1096#endif /* KERNEL_PLL */
1097	mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc,
1098	    drift_comp * 1e6);
1099}
1100
1101
1102#ifdef KERNEL_PLL
1103static void
1104start_kern_loop(void)
1105{
1106	static int atexit_done;
1107	int ntp_adj_ret;
1108
1109	pll_control = TRUE;
1110	ZERO(ntv);
1111	ntv.modes = MOD_BITS;
1112	ntv.status = STA_PLL | STA_UNSYNC;
1113	ntv.maxerror = MAXDISPERSE * 1.0e6;
1114	ntv.esterror = MAXDISPERSE * 1.0e6;
1115	ntv.constant = sys_poll;
1116	/*             ^^^^^^^^ why is it that here constant is
1117	 * unconditionally set to sys_poll, whereas elsewhere is is
1118	 * modified depending on nanosecond vs. microsecond kernel?
1119	 */
1120#ifdef SIGSYS
1121	/*
1122	 * Use sigsetjmp() to save state and then call ntp_adjtime(); if
1123	 * it fails, then pll_trap() will set pll_control FALSE before
1124	 * returning control using siglogjmp().
1125	 */
1126	newsigsys.sa_handler = pll_trap;
1127	newsigsys.sa_flags = 0;
1128	if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
1129		msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m");
1130		pll_control = FALSE;
1131	} else {
1132		if (sigsetjmp(env, 1) == 0) {
1133			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1134			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1135			}
1136		}
1137		if (sigaction(SIGSYS, &sigsys, NULL)) {
1138			msyslog(LOG_ERR,
1139			    "sigaction() restore SIGSYS: %m");
1140			pll_control = FALSE;
1141		}
1142	}
1143#else /* SIGSYS */
1144	if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
1145	    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
1146	}
1147#endif /* SIGSYS */
1148
1149	/*
1150	 * Save the result status and light up an external clock
1151	 * if available.
1152	 */
1153	pll_status = ntv.status;
1154	if (pll_control) {
1155		if (!atexit_done) {
1156			atexit_done = TRUE;
1157			atexit(&stop_kern_loop);
1158		}
1159#ifdef STA_NANO
1160		if (pll_status & STA_CLK)
1161			ext_enable = TRUE;
1162#endif /* STA_NANO */
1163		report_event(EVNT_KERN, NULL,
1164	  	    "kernel time sync enabled");
1165	}
1166}
1167#endif	/* KERNEL_PLL */
1168
1169
1170#ifdef KERNEL_PLL
1171static void
1172stop_kern_loop(void)
1173{
1174	if (pll_control && kern_enable)
1175		report_event(EVNT_KERN, NULL,
1176		    "kernel time sync disabled");
1177}
1178#endif	/* KERNEL_PLL */
1179
1180
1181/*
1182 * select_loop() - choose kernel or daemon loop discipline.
1183 */
1184void
1185select_loop(
1186	int	use_kern_loop
1187	)
1188{
1189	if (kern_enable == use_kern_loop)
1190		return;
1191#ifdef KERNEL_PLL
1192	if (pll_control && !use_kern_loop)
1193		stop_kern_loop();
1194#endif
1195	kern_enable = use_kern_loop;
1196#ifdef KERNEL_PLL
1197	if (pll_control && use_kern_loop)
1198		start_kern_loop();
1199#endif
1200	/*
1201	 * If this loop selection change occurs after initial startup,
1202	 * call set_freq() to switch the frequency compensation to or
1203	 * from the kernel loop.
1204	 */
1205#ifdef KERNEL_PLL
1206	if (pll_control && loop_started)
1207		set_freq(drift_comp);
1208#endif
1209}
1210
1211
1212/*
1213 * huff-n'-puff filter
1214 */
1215void
1216huffpuff(void)
1217{
1218	int i;
1219
1220	if (sys_huffpuff == NULL)
1221		return;
1222
1223	sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
1224	sys_huffpuff[sys_huffptr] = 1e9;
1225	sys_mindly = 1e9;
1226	for (i = 0; i < sys_hufflen; i++) {
1227		if (sys_huffpuff[i] < sys_mindly)
1228			sys_mindly = sys_huffpuff[i];
1229	}
1230}
1231
1232
1233/*
1234 * loop_config - configure the loop filter
1235 *
1236 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
1237 */
1238void
1239loop_config(
1240	int	item,
1241	double	freq
1242	)
1243{
1244	int	i;
1245	double	ftemp;
1246
1247	DPRINTF(2, ("loop_config: item %d freq %f\n", item, freq));
1248	switch (item) {
1249
1250	/*
1251	 * We first assume the kernel supports the ntp_adjtime()
1252	 * syscall. If that syscall works, initialize the kernel time
1253	 * variables. Otherwise, continue leaving no harm behind.
1254	 */
1255	case LOOP_DRIFTINIT:
1256#ifndef LOCKCLOCK
1257#ifdef KERNEL_PLL
1258		if (mode_ntpdate)
1259			break;
1260
1261		start_kern_loop();
1262#endif /* KERNEL_PLL */
1263
1264		/*
1265		 * Initialize frequency if given; otherwise, begin frequency
1266		 * calibration phase.
1267		 */
1268		ftemp = init_drift_comp / 1e6;
1269		if (ftemp > NTP_MAXFREQ)
1270			ftemp = NTP_MAXFREQ;
1271		else if (ftemp < -NTP_MAXFREQ)
1272			ftemp = -NTP_MAXFREQ;
1273		set_freq(ftemp);
1274		if (freq_set)
1275			rstclock(EVNT_FSET, 0);
1276		else
1277			rstclock(EVNT_NSET, 0);
1278		loop_started = TRUE;
1279#endif /* LOCKCLOCK */
1280		break;
1281
1282	case LOOP_KERN_CLEAR:
1283#if 0		/* XXX: needs more review, and how can we get here? */
1284#ifndef LOCKCLOCK
1285# ifdef KERNEL_PLL
1286		if (pll_control && kern_enable) {
1287			memset((char *)&ntv, 0, sizeof(ntv));
1288			ntv.modes = MOD_STATUS;
1289			ntv.status = STA_UNSYNC;
1290			ntp_adjtime(&ntv);
1291			sync_status("kernel time sync disabled",
1292				pll_status,
1293				ntv.status);
1294		   }
1295# endif /* KERNEL_PLL */
1296#endif /* LOCKCLOCK */
1297#endif
1298		break;
1299
1300	/*
1301	 * Tinker command variables for Ulrich Windl. Very dangerous.
1302	 */
1303	case LOOP_ALLAN:	/* Allan intercept (log2) (allan) */
1304		allan_xpt = (u_char)freq;
1305		break;
1306
1307	case LOOP_CODEC:	/* audio codec frequency (codec) */
1308		clock_codec = freq / 1e6;
1309		break;
1310
1311	case LOOP_PHI:		/* dispersion threshold (dispersion) */
1312		clock_phi = freq / 1e6;
1313		break;
1314
1315	case LOOP_FREQ:		/* initial frequency (freq) */
1316		init_drift_comp = freq;
1317		freq_set++;
1318		break;
1319
1320	case LOOP_HUFFPUFF:	/* huff-n'-puff length (huffpuff) */
1321		if (freq < HUFFPUFF)
1322			freq = HUFFPUFF;
1323		sys_hufflen = (int)(freq / HUFFPUFF);
1324		sys_huffpuff = eallocarray(sys_hufflen, sizeof(sys_huffpuff[0]));
1325		for (i = 0; i < sys_hufflen; i++)
1326			sys_huffpuff[i] = 1e9;
1327		sys_mindly = 1e9;
1328		break;
1329
1330	case LOOP_PANIC:	/* panic threshold (panic) */
1331		clock_panic = freq;
1332		break;
1333
1334	case LOOP_MAX:		/* step threshold (step) */
1335		clock_max_fwd = clock_max_back = freq;
1336		if (freq == 0 || freq > 0.5)
1337			select_loop(FALSE);
1338		break;
1339
1340	case LOOP_MAX_BACK:	/* step threshold (step) */
1341		clock_max_back = freq;
1342		/*
1343		 * Leave using the kernel discipline code unless both
1344		 * limits are massive.  This assumes the reason to stop
1345		 * using it is that it's pointless, not that it goes wrong.
1346		 */
1347		if (  (clock_max_back == 0 || clock_max_back > 0.5)
1348		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
1349			select_loop(FALSE);
1350		break;
1351
1352	case LOOP_MAX_FWD:	/* step threshold (step) */
1353		clock_max_fwd = freq;
1354		if (  (clock_max_back == 0 || clock_max_back > 0.5)
1355		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
1356			select_loop(FALSE);
1357		break;
1358
1359	case LOOP_MINSTEP:	/* stepout threshold (stepout) */
1360		if (freq < CLOCK_MINSTEP)
1361			clock_minstep = CLOCK_MINSTEP;
1362		else
1363			clock_minstep = freq;
1364		break;
1365
1366	case LOOP_TICK:		/* tick increment (tick) */
1367		set_sys_tick_precision(freq);
1368		break;
1369
1370	case LOOP_LEAP:		/* not used, fall through */
1371	default:
1372		msyslog(LOG_NOTICE,
1373		    "loop_config: unsupported option %d", item);
1374	}
1375}
1376
1377
1378#if defined(KERNEL_PLL) && defined(SIGSYS)
1379/*
1380 * _trap - trap processor for undefined syscalls
1381 *
1382 * This nugget is called by the kernel when the SYS_ntp_adjtime()
1383 * syscall bombs because the silly thing has not been implemented in
1384 * the kernel. In this case the phase-lock loop is emulated by
1385 * the stock adjtime() syscall and a lot of indelicate abuse.
1386 */
1387static RETSIGTYPE
1388pll_trap(
1389	int arg
1390	)
1391{
1392	pll_control = FALSE;
1393	siglongjmp(env, 1);
1394}
1395#endif /* KERNEL_PLL && SIGSYS */
1396