Deleted Added
full compact
1/*
2 * ntp_loopfilter.c - implements the NTP loop filter algorithm
3 *
4 * ATTENTION: Get approval from Dave Mills on all changes to this file!
5 *
6 */
7#ifdef HAVE_CONFIG_H
8# include <config.h>
9#endif
10
11#include "ntpd.h"
12#include "ntp_io.h"
13#include "ntp_unixtime.h"
14#include "ntp_stdlib.h"
15
16#include <stdio.h>
17#include <ctype.h>
18
19#include <signal.h>
20#include <setjmp.h>
21
22#if defined(VMS) && defined(VMS_LOCALUNIT) /*wjm*/
23#include "ntp_refclock.h"
24#endif /* VMS */
25
26#ifdef KERNEL_PLL
27#include "ntp_syscall.h"
28#endif /* KERNEL_PLL */
29
30/*
31 * This is an implementation of the clock discipline algorithm described
32 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
33 * hybrid phase/frequency-lock loop. A number of sanity checks are
34 * included to protect against timewarps, timespikes and general mayhem.
35 * All units are in s and s/s, unless noted otherwise.
36 */
35#define CLOCK_MAX .128 /* default step offset (s) */
36#define CLOCK_PANIC 1000. /* default panic offset (s) */
37#define CLOCK_MAX .128 /* default step threshold (s) */
38#define CLOCK_MINSTEP 900. /* default stepout threshold (s) */
39#define CLOCK_PANIC 1000. /* default panic threshold (s) */
40#define CLOCK_PHI 15e-6 /* max frequency error (s/s) */
38#define SHIFT_PLL 4 /* PLL loop gain (shift) */
41#define CLOCK_PLL 16. /* PLL loop gain */
42#define CLOCK_FLL 8. /* FLL loop gain */
43#define CLOCK_AVG 4. /* parameter averaging constant */
41#define CLOCK_MINSEC 256. /* min FLL update interval (s) */
42#define CLOCK_MINSTEP 900. /* step-change timeout (s) */
43#define CLOCK_DAY 86400. /* one day of seconds (s) */
44#define CLOCK_ALLAN 1500. /* compromise Allan intercept (s) */
45#define CLOCK_DAY 86400. /* one day in seconds (s) */
46#define CLOCK_LIMIT 30 /* poll-adjust threshold */
47#define CLOCK_PGATE 4. /* poll-adjust gate */
46#define CLOCK_ALLAN 10 /* min Allan intercept (log2 s) */
48#define PPS_MAXAGE 120 /* kernel pps signal timeout (s) */
49
50/*
51 * Clock discipline state machine. This is used to control the
52 * synchronization behavior during initialization and following a
53 * timewarp.
54 *
55 * State < max > max Comments
56 * ====================================================
57 * NSET FREQ FREQ no ntp.drift
58 *
59 * FSET TSET if (allow) TSET, ntp.drift
60 * else FREQ
61 *
62 * TSET SYNC FREQ time set
63 *
64 * FREQ SYNC if (mu < 900) FREQ calculate frequency
65 * else if (allow) TSET
66 * else FREQ
67 *
68 * SYNC SYNC if (mu < 900) SYNC normal state
69 * else SPIK
70 *
71 * SPIK SYNC if (allow) TSET spike detector
72 * else FREQ
73 */
74#define S_NSET 0 /* clock never set */
75#define S_FSET 1 /* frequency set from the drift file */
76#define S_TSET 2 /* time set */
77#define S_FREQ 3 /* frequency mode */
78#define S_SYNC 4 /* clock synchronized */
79#define S_SPIK 5 /* spike detected */
80
81/*
82 * Kernel PLL/PPS state machine. This is used with the kernel PLL
83 * modifications described in the README.kernel file.
84 *
85 * If kernel support for the ntp_adjtime() system call is available, the
86 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
87 * set at configuration time or run time using ntpdc. If ntp_enable is
88 * false, the discipline loop is unlocked and no correctios of any kind
89 * are made. If both ntp_control and kern_enable are set, the kernel
90 * support is used as described above; if false, the kernel is bypassed
91 * entirely and the daemon PLL used instead.
92 *
93 * Each update to a prefer peer sets pps_stratum if it survives the
94 * intersection algorithm and its time is within range. The PPS time
95 * discipline is enabled (STA_PPSTIME bit set in the status word) when
96 * pps_stratum is true and the PPS frequency discipline is enabled. If
97 * the PPS time discipline is enabled and the kernel reports a PPS
98 * signal is present, the pps_control variable is set to the current
99 * time. If the current time is later than pps_control by PPS_MAXAGE
100 * (120 s), this variable is set to zero.
101 *
102 * If an external clock is present, the clock driver sets STA_CLK in the
103 * status word. When the local clock driver sees this bit, it updates
104 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
105 * set to zero, in which case the system clock is not adjusted. This is
106 * also a signal for the external clock driver to discipline the system
107 * clock.
108 */
109/*
110 * Program variables that can be tinkered.
111 */
111double clock_max = CLOCK_MAX; /* max offset before step (s) */
112double clock_panic = CLOCK_PANIC; /* max offset before panic (s) */
112double clock_max = CLOCK_MAX; /* step threshold (s) */
113double clock_minstep = CLOCK_MINSTEP; /* stepout threshold (s) */
114double clock_panic = CLOCK_PANIC; /* panic threshold (s) */
115double clock_phi = CLOCK_PHI; /* dispersion rate (s/s) */
114double clock_minstep = CLOCK_MINSTEP; /* step timeout (s) */
115u_char allan_xpt = CLOCK_ALLAN; /* minimum Allan intercept (log2 s) */
116double allan_xpt = CLOCK_ALLAN; /* Allan intercept (s) */
117
118/*
118 * Hybrid PLL/FLL parameters. These were chosen by experiment using a
119 * MatLab program. The parameters were fudged to match a pure PLL at
120 * poll intervals of 64 s and lower and a pure FLL at poll intervals of
121 * 4096 s and higher. Between these extremes the parameters were chosen
122 * as a geometric series of intervals while holding the overshoot to
123 * less than 5 percent.
124 */
125static double fll[] = {0., 1./64, 1./32, 1./16, 1./8, 1./4, 1.};
126static double pll[] = {1., 1.4, 2., 2.8, 4.1, 7., 12.};
127
128/*
119 * Program variables
120 */
121static double clock_offset; /* clock offset adjustment (s) */
122double drift_comp; /* clock frequency (s/s) */
123double clock_stability; /* clock stability (s/s) */
124u_long pps_control; /* last pps sample time */
135static void rstclock P((int, double, double)); /* transition function */
125static void rstclock P((int, u_long, double)); /* transition function */
126
127#ifdef KERNEL_PLL
128struct timex ntv; /* kernel API parameters */
129int pll_status; /* status bits for kernel pll */
130int pll_nano; /* nanosecond kernel switch */
131#endif /* KERNEL_PLL */
132
133/*
134 * Clock state machine control flags
135 */
136int ntp_enable; /* clock discipline enabled */
137int pll_control; /* kernel support available */
138int kern_enable; /* kernel support enabled */
139int pps_enable; /* kernel PPS discipline enabled */
140int ext_enable; /* external clock enabled */
141int pps_stratum; /* pps stratum */
152int allow_step = TRUE; /* allow step correction */
142int allow_panic = FALSE; /* allow panic correction */
143int mode_ntpdate = FALSE; /* exit on first clock set */
144
145/*
146 * Clock state machine variables
147 */
159u_char sys_minpoll = NTP_MINDPOLL; /* min sys poll interval (log2 s) */
148u_char sys_poll = NTP_MINDPOLL; /* system poll interval (log2 s) */
149int state; /* clock discipline state */
162int tc_counter; /* poll-adjust counter */
150int tc_counter; /* hysteresis counter */
151u_long last_time; /* time of last clock update (s) */
152double last_offset; /* last clock offset (s) */
153double sys_jitter; /* system RMS jitter (s) */
154
155/*
156 * Huff-n'-puff filter variables
157 */
158static double *sys_huffpuff; /* huff-n'-puff filter */
159static int sys_hufflen; /* huff-n'-puff filter stages */
160static int sys_huffptr; /* huff-n'-puff filter pointer */
161static double sys_mindly; /* huff-n'-puff filter min delay */
162
163#if defined(KERNEL_PLL)
164/* Emacs cc-mode goes nuts if we split the next line... */
165#define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
166 MOD_STATUS | MOD_TIMECONST)
167#ifdef SIGSYS
168static void pll_trap P((int)); /* configuration trap */
169static struct sigaction sigsys; /* current sigaction status */
170static struct sigaction newsigsys; /* new sigaction status */
171static sigjmp_buf env; /* environment var. for pll_trap() */
172#endif /* SIGSYS */
173#endif /* KERNEL_PLL */
174
175/*
176 * init_loopfilter - initialize loop filter data
177 */
178void
179init_loopfilter(void)
180{
181 /*
182 * Initialize state variables. Initially, we expect no drift
183 * file, so set the state to S_NSET.
184 */
185 rstclock(S_NSET, current_time, 0);
186}
187
188/*
189 * local_clock - the NTP logical clock loop filter. Returns 1 if the
190 * clock was stepped, 0 if it was slewed and -1 if it is hopeless.
191 *
192 * LOCKCLOCK: The only thing this routine does is set the
193 * sys_rootdispersion variable equal to the peer dispersion.
194 */
195int
196local_clock(
197 struct peer *peer, /* synch source peer structure */
198 double fp_offset, /* clock offset (s) */
199 double epsil /* jittter (square s*s) */
200 )
201{
211 double mu; /* interval since last update (s) */
202 u_long mu; /* interval since last update (s) */
203 double oerror; /* previous error estimate */
204 double flladj; /* FLL frequency adjustment (ppm) */
205 double plladj; /* PLL frequency adjustment (ppm) */
206 double clock_frequency; /* clock frequency adjustment (ppm) */
207 double dtemp, etemp; /* double temps */
208 int retval; /* return value */
218 int i;
209
210 /*
211 * If the loop is opened, monitor and record the offsets
212 * anyway in order to determine the open-loop response.
213 */
214#ifdef DEBUG
215 if (debug)
216 printf(
227 "local_clock: assocID %d off %.6f jit %.6f sta %d\n",
217 "local_clock: assocID %d offset %.9f jitter %.9f state %d\n",
218 peer->associd, fp_offset, SQRT(epsil), state);
219#endif
220#ifdef LOCKCLOCK
221 sys_rootdispersion = peer->rootdispersion;
222 return (0);
223
224#else /* LOCKCLOCK */
225 if (!ntp_enable) {
226 record_loop_stats(fp_offset, drift_comp, SQRT(epsil),
227 clock_stability, sys_poll);
228 return (0);
229 }
230
231 /*
232 * If the clock is way off, panic is declared. The clock_panic
233 * defaults to 1000 s; if set to zero, the panic will never
234 * occur. The allow_panic defaults to FALSE, so the first panic
235 * will exit. It can be set TRUE by a command line option, in
236 * which case the clock will be set anyway and time marches on.
237 * But, allow_panic will be set it FALSE when the update is
238 * within the step range; so, subsequent panics will exit.
239 */
240 if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
241 !allow_panic) {
242 msyslog(LOG_ERR,
243 "time correction of %.0f seconds exceeds sanity limit (%.0f); set clock manually to the correct UTC time.",
244 fp_offset, clock_panic);
245 return (-1);
246 }
247
248 /*
249 * If simulating ntpdate, set the clock directly, rather than
250 * using the discipline. The clock_max defines the step
251 * threshold, above which the clock will be stepped instead of
252 * slewed. The value defaults to 128 ms, but can be set to even
253 * unreasonable values. If set to zero, the clock will never be
254 * stepped.
255 *
256 * Note that if ntpdate is active, the terminal does not detach,
257 * so the termination comments print directly to the console.
258 */
259 if (mode_ntpdate) {
265 if (allow_step && fabs(fp_offset) > clock_max &&
266 clock_max > 0) {
260 if (fabs(fp_offset) > clock_max && clock_max > 0) {
261 step_systime(fp_offset);
268 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
269 msyslog(LOG_NOTICE, "time reset %.6f s",
262 msyslog(LOG_NOTICE, "time reset %+.6f s",
263 fp_offset);
271 printf("ntpd: time reset %.6fs\n", fp_offset);
264 printf("ntpd: time set %+.6fs\n", fp_offset);
265 } else {
266 adj_systime(fp_offset);
274 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
275 msyslog(LOG_NOTICE, "time slew %.6f s",
267 msyslog(LOG_NOTICE, "time slew %+.6f s",
268 fp_offset);
277 printf("ntpd: time slew %.6fs\n", fp_offset);
269 printf("ntpd: time slew %+.6fs\n", fp_offset);
270 }
271 record_loop_stats(fp_offset, drift_comp, SQRT(epsil),
272 clock_stability, sys_poll);
273 exit (0);
274 }
275
276 /*
277 * If the clock has never been set, set it and initialize the
278 * discipline parameters. We then switch to frequency mode to
279 * speed the inital convergence process. If lucky, after an hour
280 * the ntp.drift file is created and initialized and we don't
281 * get here again.
282 */
283 if (state == S_NSET) {
292 step_systime(fp_offset);
293 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
294 msyslog(LOG_NOTICE, "time set %.6f s", fp_offset);
284 if (fabs(fp_offset) > clock_max && clock_max > 0) {
285 step_systime(fp_offset);
286 msyslog(LOG_NOTICE, "time reset %+.6f s",
287 fp_offset);
288 reinit_timer();
289 }
290 rstclock(S_FREQ, peer->epoch, 0);
291 return (1);
292 }
293
294 /*
295 * Update the jitter estimate.
296 */
297 oerror = sys_jitter;
298 dtemp = SQUARE(sys_jitter);
299 sys_jitter = SQRT(dtemp + (epsil - dtemp) / CLOCK_AVG);
300
301 /*
302 * The huff-n'-puff filter finds the lowest delay in the recent
303 * interval. This is used to correct the offset by one-half the
304 * difference between the sample delay and minimum delay. This
305 * is most effective if the delays are highly assymetric and
306 * clockhopping is avoided and the clock frequency wander is
307 * relatively small.
308 */
309 if (sys_huffpuff != NULL) {
310 if (peer->delay < sys_huffpuff[sys_huffptr])
311 sys_huffpuff[sys_huffptr] = peer->delay;
312 if (peer->delay < sys_mindly)
313 sys_mindly = peer->delay;
314 if (fp_offset > 0)
315 dtemp = -(peer->delay - sys_mindly) / 2;
316 else
317 dtemp = (peer->delay - sys_mindly) / 2;
318 fp_offset += dtemp;
319#ifdef DEBUG
320 if (debug)
321 printf(
322 "local_clock: size %d mindly %.6f huffpuff %.6f\n",
323 sys_hufflen, sys_mindly, dtemp);
324#endif
325 }
326
327 /*
328 * Clock state machine transition function. This is where the
329 * action is and defines how the system reacts to large phase
330 * and frequency errors. There are two main regimes: when the
331 * offset exceeds the step threshold and when it does not.
332 * However, if the step threshold is set to zero, a step will
333 * never occur. See the instruction manual for the details how
334 * these actions interact with the command line options.
335 */
336 retval = 0;
337 if (sys_poll > peer->maxpoll)
338 sys_poll = peer->maxpoll;
339 else if (sys_poll < peer->minpoll)
340 sys_poll = peer->minpoll;
341 clock_frequency = flladj = plladj = 0;
342 mu = peer->epoch - last_time;
343 if (fabs(fp_offset) > clock_max && clock_max > 0) {
344 switch (state) {
345
346 /*
347 * In S_TSET state the time has been set at the last
348 * valid update and the offset at that time set to zero.
349 * If following that we cruise outside the capture
350 * range, assume a really bad frequency error and switch
351 * to S_FREQ state.
352 */
353 case S_TSET:
354 state = S_FREQ;
355 break;
356
357 /*
358 * In S_SYNC state we ignore outlyers. At the first
359 * outlyer after the stepout threshold, switch to S_SPIK
360 * state.
361 */
362 case S_SYNC:
363 if (mu < clock_minstep)
364 return (0);
365 state = S_SPIK;
366 return (0);
367
368 /*
369 * In S_FREQ state we ignore outlyers. At the first
370 * outlyer after 900 s, compute the apparent phase and
371 * frequency correction.
372 */
373 case S_FREQ:
374 if (mu < clock_minstep)
375 return (0);
376 /* fall through to S_SPIK */
377
378 /*
379 * In S_SPIK state a large correction is necessary.
380 * Since the outlyer may be due to a large frequency
381 * error, compute the apparent frequency correction.
382 */
383 case S_SPIK:
384 clock_frequency = (fp_offset - clock_offset) /
385 mu;
386 /* fall through to default */
387
388 /*
389 * We get here directly in S_FSET state and indirectly
390 * from S_FREQ and S_SPIK states. The clock is either
391 * reset or shaken, but never stirred.
392 */
393 default:
399 if (allow_step) {
400 step_systime(fp_offset);
401 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
402 msyslog(LOG_NOTICE, "time reset %.6f s",
403 fp_offset);
404 rstclock(S_TSET, peer->epoch, 0);
405 retval = 1;
406 } else {
407 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
408 msyslog(LOG_NOTICE, "time slew %.6f s",
409 fp_offset);
410 rstclock(S_FREQ, peer->epoch,
411 fp_offset);
412 }
394 step_systime(fp_offset);
395 msyslog(LOG_NOTICE, "time reset %+.6f s",
396 fp_offset);
397 reinit_timer();
398 rstclock(S_TSET, peer->epoch, 0);
399 retval = 1;
400 break;
401 }
402 } else {
403 switch (state) {
404
405 /*
406 * In S_FSET state this is the first update. Adjust the
407 * phase, but don't adjust the frequency until the next
408 * update.
409 */
410 case S_FSET:
411 rstclock(S_TSET, peer->epoch, fp_offset);
412 break;
413
414 /*
415 * In S_FREQ state ignore updates until the stepout
416 * threshold. After that, correct the phase and
417 * frequency and switch to S_SYNC state.
418 */
419 case S_FREQ:
420 if (mu < clock_minstep)
421 return (0);
422 clock_frequency = (fp_offset - clock_offset) /
423 mu;
424 rstclock(S_SYNC, peer->epoch, fp_offset);
425 break;
426
427 /*
428 * Either the clock has just been set or the previous
429 * update was a spike and ignored. Since this update is
430 * not an outlyer, fold the tent and resume life.
431 */
432 case S_TSET:
433 case S_SPIK:
434 state = S_SYNC;
435 /* fall through to default */
436
437 /*
438 * We come here in the normal case for linear phase and
452 * frequency adjustments. If the offset exceeds the
453 * previous time error estimate by CLOCK_SGATE and the
454 * interval since the last update is less than twice the
455 * poll interval, consider the update a popcorn spike
456 * and ignore it.
439 * frequency adjustments. If the difference between the
440 * last offset and the current one exceeds the jitter by
441 * CLOCK_SGATE and the interval since the last update is
442 * less than twice the system poll interval, consider
443 * the update a popcorn spike and ignore it..
444 */
445 default:
446 allow_panic = FALSE;
460 if (fabs(fp_offset - last_offset) >
461 CLOCK_SGATE * oerror && mu <
462 ULOGTOD(sys_poll + 1)) {
447 dtemp = fabs(fp_offset - last_offset);
448/*
449 if (dtemp > CLOCK_SGATE * oerror && mu <
450 (u_long) ULOGTOD(sys_poll + 1)) {
451#ifdef DEBUG
452 if (debug)
453 printf(
454 "local_clock: popcorn %.6f %.6f\n",
467 fabs(fp_offset -
468 last_offset), CLOCK_SGATE *
469 oerror);
455 dtemp, oerror);
456#endif
457 last_offset = fp_offset;
458 return (0);
459 }
460*/
461
462 /*
476 * Compute the FLL and PLL frequency adjustments
477 * conditioned on intricate weighting factors.
478 * The gain factors depend on the poll interval
479 * and Allan intercept. For the FLL, the
480 * averaging interval is clamped to a minimum of
481 * 1024 s and the gain increased in stages from
482 * zero for poll intervals below half the Allan
483 * intercept to unity above twice the Allan
484 * intercept. For the PLL, the averaging
485 * interval is clamped not to exceed the poll
486 * interval. No gain factor is necessary, since
487 * the frequency steering above the Allan
488 * intercept is negligible. Particularly for the
489 * PLL, these measures allow oversampling, but
490 * not undersampling and insure stability even
491 * when the rules of fair engagement are broken.
463 * The FLL and PLL frequency gain constants
464 * depend on the poll interval and Allan
465 * intercept. The PLL constant is calculated
466 * throughout the poll interval range, but the
467 * update interval is clamped so as not to
468 * exceed the poll interval. The FLL gain is
469 * zero below one-half the Allan intercept and
470 * unity at MAXPOLL. It decreases as 1 /
471 * (MAXPOLL + 1 - poll interval) in a feeble
472 * effort to match the loop stiffness to the
473 * Allan wobble. Particularly for the PLL, these
474 * measures allow oversampling, but not
475 * undersampling and insure stability even when
476 * the rules of fair engagement are broken.
477 */
493 i = sys_poll - allan_xpt + 4;
494 if (i < 0)
495 i = 0;
496 else if (i > 6)
497 i = 6;
498 etemp = fll[i];
499 dtemp = max(mu, ULOGTOD(allan_xpt));
500 flladj = (fp_offset - clock_offset) * etemp /
501 (dtemp * CLOCK_FLL);
502 dtemp = ULOGTOD(SHIFT_PLL + 2 + sys_poll);
503 etemp = min(mu, ULOGTOD(sys_poll));
478 if (ULOGTOD(sys_poll) > allan_xpt / 2) {
479 dtemp = NTP_MAXPOLL + 1 - sys_poll;
480 flladj = (fp_offset - clock_offset) /
481 (max(mu, allan_xpt) * dtemp);
482 }
483 etemp = min(mu, (u_long)ULOGTOD(sys_poll));
484 dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
485 plladj = fp_offset * etemp / (dtemp * dtemp);
486 last_time = peer->epoch;
487 last_offset = clock_offset = fp_offset;
488 break;
489 }
490 }
491
511#if defined(KERNEL_PLL)
492#ifdef KERNEL_PLL
493 /*
494 * This code segment works when clock adjustments are made using
495 * precision time kernel support and the ntp_adjtime() system
496 * call. This support is available in Solaris 2.6 and later,
497 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
498 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
499 * DECstation 5000/240 and Alpha AXP, additional kernel
500 * modifications provide a true microsecond clock and nanosecond
501 * clock, respectively.
502 */
503 if (pll_control && kern_enable) {
504
505 /*
506 * We initialize the structure for the ntp_adjtime()
507 * system call. We have to convert everything to
508 * microseconds or nanoseconds first. Do not update the
509 * system variables if the ext_enable flag is set. In
510 * this case, the external clock driver will update the
511 * variables, which will be read later by the local
512 * clock driver. Afterwards, remember the time and
513 * frequency offsets for jitter and stability values and
514 * to update the drift file.
515 */
516 memset(&ntv, 0, sizeof(ntv));
517 if (ext_enable) {
518 ntv.modes = MOD_STATUS;
519 } else {
520 ntv.modes = MOD_BITS;
521 if (clock_offset < 0)
522 dtemp = -.5;
523 else
524 dtemp = .5;
525 if (pll_nano) {
526 ntv.offset = (int32)(clock_offset *
527 1e9 + dtemp);
528 ntv.constant = sys_poll;
529 } else {
530 ntv.offset = (int32)(clock_offset *
531 1e6 + dtemp);
532 ntv.constant = sys_poll - 4;
533 }
534 if (clock_frequency != 0) {
535 ntv.modes |= MOD_FREQUENCY;
536 ntv.freq = (int32)((clock_frequency +
537 drift_comp) * 65536e6);
538 }
539 ntv.esterror = (u_int32)(sys_jitter * 1e6);
540 ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
541 sys_rootdispersion) * 1e6);
542 ntv.status = STA_PLL;
543
544 /*
545 * Set the leap bits in the status word.
546 */
547 if (sys_leap == LEAP_NOTINSYNC) {
548 ntv.status |= STA_UNSYNC;
549 } else if (calleapwhen(sys_reftime.l_ui) <
550 CLOCK_DAY) {
551 if (sys_leap & LEAP_ADDSECOND)
552 ntv.status |= STA_INS;
553 else if (sys_leap & LEAP_DELSECOND)
554 ntv.status |= STA_DEL;
555 }
556
557 /*
558 * Switch to FLL mode if the poll interval is
559 * greater than MAXDPOLL, so that the kernel
560 * loop behaves as the daemon loop; viz.,
561 * selects the FLL when necessary, etc. For
562 * legacy only.
563 */
564 if (sys_poll > NTP_MAXDPOLL)
565 ntv.status |= STA_FLL;
566
567 /*
568 * If the PPS signal is up and enabled, light
569 * the frequency bit. If the PPS driver is
570 * working, light the phase bit as well. If not,
571 * douse the lights, since somebody else may
572 * have left the switch on.
573 */
574 if (pps_enable && pll_status & STA_PPSSIGNAL) {
575 ntv.status |= STA_PPSFREQ;
576 if (pps_stratum < STRATUM_UNSPEC)
577 ntv.status |= STA_PPSTIME;
578 } else {
579 ntv.status &= ~(STA_PPSFREQ |
580 STA_PPSTIME);
581 }
582 }
583
584 /*
585 * Pass the stuff to the kernel. If it squeals, turn off
586 * the pigs. In any case, fetch the kernel offset and
587 * frequency and pretend we did it here.
588 */
589 if (ntp_adjtime(&ntv) == TIME_ERROR) {
590 if (ntv.status != pll_status)
610 msyslog(LOG_ERR,
611 "kernel time discipline status change %x",
591 NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
592 msyslog(LOG_NOTICE,
593 "kernel time sync disabled %04x",
594 ntv.status);
595 ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME);
596 } else {
597 if (ntv.status != pll_status)
598 NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
599 msyslog(LOG_NOTICE,
600 "kernel time sync enabled %04x",
601 ntv.status);
602 }
603 pll_status = ntv.status;
604 if (pll_nano)
605 clock_offset = ntv.offset / 1e9;
606 else
607 clock_offset = ntv.offset / 1e6;
608 clock_frequency = ntv.freq / 65536e6 - drift_comp;
609 flladj = plladj = 0;
610
611 /*
612 * If the kernel PPS is lit, monitor its performance.
613 */
614 if (ntv.status & STA_PPSTIME) {
615 pps_control = current_time;
616 if (pll_nano)
617 sys_jitter = ntv.jitter / 1e9;
618 else
619 sys_jitter = ntv.jitter / 1e6;
620 }
621 }
622#endif /* KERNEL_PLL */
623
624 /*
625 * Adjust the clock frequency and calculate the stability. If
626 * kernel support is available, we use the results of the kernel
627 * discipline instead of the PLL/FLL discipline. In this case,
628 * drift_comp is a sham and used only for updating the drift
629 * file and for billboard eye candy.
630 */
643 etemp = clock_frequency + flladj + plladj;
644 drift_comp += etemp;
645 if (drift_comp > NTP_MAXFREQ)
631 dtemp = clock_frequency + flladj + plladj;
632 etemp = drift_comp + dtemp;
633 if (etemp > NTP_MAXFREQ)
634 drift_comp = NTP_MAXFREQ;
647 else if (drift_comp <= -NTP_MAXFREQ)
635 else if (etemp <= -NTP_MAXFREQ)
636 drift_comp = -NTP_MAXFREQ;
649 dtemp = SQUARE(clock_stability);
650 etemp = SQUARE(etemp) - dtemp;
651 clock_stability = SQRT(dtemp + etemp / CLOCK_AVG);
637 else
638 drift_comp = etemp;
639 if (fabs(etemp) > NTP_MAXFREQ)
640 NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
641 msyslog(LOG_NOTICE,
642 "frequency error %.0f PPM exceeds tolerance %.0f PPM",
643 etemp * 1e6, NTP_MAXFREQ * 1e6);
644
645 etemp = SQUARE(clock_stability);
646 dtemp = SQUARE(dtemp);
647 clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
648
649 /*
650 * In SYNC state, adjust the poll interval. The trick here is to
651 * compare the apparent frequency change induced by the system
652 * jitter over the poll interval, or fritter, to the frequency
653 * stability. If the fritter is greater than the stability,
654 * phase noise predominates and the averaging interval is
655 * increased; otherwise, it is decreased. A bit of hysteresis
656 * helps calm the dance. Works best using burst mode.
657 */
658 if (state == S_SYNC) {
663 if (sys_jitter / ULOGTOD(sys_poll) > clock_stability &&
659 if (sys_jitter > ULOGTOD(sys_poll) * clock_stability &&
660 fabs(clock_offset) < CLOCK_PGATE * sys_jitter) {
661 tc_counter += sys_poll;
662 if (tc_counter > CLOCK_LIMIT) {
663 tc_counter = CLOCK_LIMIT;
664 if (sys_poll < peer->maxpoll) {
665 tc_counter = 0;
666 sys_poll++;
667 }
668 }
669 } else {
670 tc_counter -= sys_poll << 1;
671 if (tc_counter < -CLOCK_LIMIT) {
672 tc_counter = -CLOCK_LIMIT;
673 if (sys_poll > peer->minpoll) {
674 tc_counter = 0;
675 sys_poll--;
676 }
677 }
678 }
679 }
680
681 /*
682 * Update the system time variables.
683 */
688 dtemp = peer->disp + sys_jitter;
689 if ((peer->flags & FLAG_REFCLOCK) == 0 && dtemp < MINDISPERSE)
684 dtemp = peer->disp + (current_time - peer->epoch) * clock_phi +
685 sys_jitter + fabs(last_offset);
686 if (!(peer->flags & FLAG_REFCLOCK) && dtemp < MINDISPERSE)
687 dtemp = MINDISPERSE;
688 sys_rootdispersion = peer->rootdispersion + dtemp;
689 record_loop_stats(last_offset, drift_comp, sys_jitter,
690 clock_stability, sys_poll);
691
692#ifdef DEBUG
693 if (debug)
694 printf(
697 "local_clock: mu %.0f noi %.3f stb %.3f pol %d cnt %d\n",
698 mu, sys_jitter * 1e6, clock_stability * 1e6, sys_poll,
695 "local_clock: mu %lu rootjit %.6f stab %.3f poll %d count %d\n",
696 mu, dtemp, clock_stability * 1e6, sys_poll,
697 tc_counter);
698#endif /* DEBUG */
699 return (retval);
700#endif /* LOCKCLOCK */
701}
702
703
704/*
705 * adj_host_clock - Called once every second to update the local clock.
706 *
707 * LOCKCLOCK: The only thing this routine does is increment the
708 * sys_rootdispersion variable.
709 */
710void
711adj_host_clock(
712 void
713 )
714{
713 double adjustment;
714 int i;
715 double adjustment;
716
717 /*
718 * Update the dispersion since the last update. In contrast to
719 * NTPv3, NTPv4 does not declare unsynchronized after one day,
720 * since the dispersion check serves this function. Also,
721 * since the poll interval can exceed one day, the old test
722 * would be counterproductive. Note we do this even with
723 * external clocks, since the clock driver will recompute the
724 * maximum error and the local clock driver will pick it up and
725 * pass to the common refclock routines. Very elegant.
726 */
727 sys_rootdispersion += clock_phi;
728
729#ifndef LOCKCLOCK
730 /*
731 * Declare PPS kernel unsync if the pps signal has not been
732 * heard for a few minutes.
733 */
734 if (pps_control && current_time - pps_control > PPS_MAXAGE) {
735 if (pps_control)
734 NLOG(NLOG_SYSEVENT) /* conditional if clause */
735 msyslog(LOG_INFO, "pps sync disabled");
736 NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
737 msyslog(LOG_NOTICE, "pps sync disabled");
738 pps_control = 0;
739 }
738 if (!ntp_enable)
739 return;
740
741 /*
742 * If the phase-lock loop is implemented in the kernel, we
743 * have no business going further.
742 * If NTP is disabled or ntpdate mode enabled or the kernel
743 * discipline enabled, we have no business going further.
744 */
745 if (pll_control && kern_enable)
745 if (!ntp_enable || mode_ntpdate || (pll_control && kern_enable))
746 return;
747
748 /*
749 * Intricate wrinkle for legacy only. If the local clock driver
750 * is in use and selected for synchronization, somebody else may
751 * tinker the adjtime() syscall. If this is the case, the driver
752 * is marked prefer and we have to avoid calling adjtime(),
753 * since that may truncate the other guy's requests.
754 */
755 if (sys_peer != 0) {
756 if (sys_peer->refclktype == REFCLK_LOCALCLOCK &&
757 sys_peer->flags & FLAG_PREFER)
758 return;
759 }
760
761 /*
762 * This ugly bit of business is necessary in order to move the
763 * pole frequency higher in FLL mode. This is necessary for loop
764 * stability.
762 * Implement the phase and frequency adjustments. Note the
763 * black art formerly practiced here has been whitewashed.
764 */
766 i = sys_poll - allan_xpt + 4;
767 if (i < 0)
768 i = 0;
769 else if (i > 6)
770 i = 6;
771 adjustment = clock_offset / (pll[i] * ULOGTOD(SHIFT_PLL +
772 sys_poll));
765 adjustment = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
766 clock_offset -= adjustment;
767 adj_systime(adjustment + drift_comp);
768#endif /* LOCKCLOCK */
769}
770
771
772/*
773 * Clock state machine. Enter new state and set state variables.
774 */
775static void
776rstclock(
777 int trans, /* new state */
784 double epoch, /* last time */
778 u_long epoch, /* last time */
779 double offset /* last offset */
780 )
781{
782 tc_counter = 0;
783 sys_poll = NTP_MINPOLL;
784 state = trans;
785 last_time = epoch;
786 last_offset = clock_offset = offset;
787#ifdef DEBUG
788 if (debug)
789 printf("local_clock: at %lu state %d\n", last_time,
790 trans);
791#endif
792}
793
794
795/*
796 * huff-n'-puff filter
797 */
798void
799huffpuff()
800{
801 int i;
802
803 if (sys_huffpuff == NULL)
804 return;
805 sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
806 sys_huffpuff[sys_huffptr] = 1e9;
807 sys_mindly = 1e9;
808 for (i = 0; i < sys_hufflen; i++) {
809 if (sys_huffpuff[i] < sys_mindly)
810 sys_mindly = sys_huffpuff[i];
811 }
812}
813
814
815/*
816 * loop_config - configure the loop filter
817 *
818 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
819 */
820void
821loop_config(
822 int item,
823 double freq
824 )
825{
826 int i;
827
828 switch (item) {
829
830 case LOOP_DRIFTINIT:
831
832#ifndef LOCKCLOCK
833#ifdef KERNEL_PLL
834 /*
835 * Assume the kernel supports the ntp_adjtime() syscall.
836 * If that syscall works, initialize the kernel
837 * variables. Otherwise, continue leaving no harm
838 * behind. While at it, ask to set nanosecond mode. If
839 * the kernel agrees, rejoice; othewise, it does only
840 * microseconds.
841 *
842 * Call out the safety patrol. If ntpdate mode or if the
843 * step threshold has been changed by the -x option or
844 * tinker command, kernel discipline is unsafe, so don't
845 * do any of this stuff.
846 */
847 if (mode_ntpdate || clock_max != CLOCK_MAX)
848 break;
849
850 pll_control = 1;
851 memset(&ntv, 0, sizeof(ntv));
852#ifdef STA_NANO
853 ntv.modes = MOD_BITS | MOD_NANO;
854#else
855 ntv.modes = MOD_BITS;
856#endif /* STA_NANO */
857 ntv.maxerror = MAXDISPERSE;
858 ntv.esterror = MAXDISPERSE;
859 ntv.status = STA_UNSYNC;
860#ifdef SIGSYS
861 /*
862 * Use sigsetjmp() to save state and then call
863 * ntp_adjtime(); if it fails, then siglongjmp() is used
864 * to return control
865 */
866 newsigsys.sa_handler = pll_trap;
867 newsigsys.sa_flags = 0;
868 if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
869 msyslog(LOG_ERR,
870 "sigaction() fails to save SIGSYS trap: %m");
871 pll_control = 0;
872 }
873 if (sigsetjmp(env, 1) == 0)
874 ntp_adjtime(&ntv);
875 if ((sigaction(SIGSYS, &sigsys,
876 (struct sigaction *)NULL))) {
877 msyslog(LOG_ERR,
878 "sigaction() fails to restore SIGSYS trap: %m");
879 pll_control = 0;
880 }
881#else /* SIGSYS */
882 ntp_adjtime(&ntv);
883#endif /* SIGSYS */
884 pll_status = ntv.status;
885 if (pll_control) {
886#ifdef STA_NANO
887 if (pll_status & STA_NANO)
888 pll_nano = 1;
889 if (pll_status & STA_CLK)
890 ext_enable = 1;
891#endif /* STA_NANO */
882 msyslog(LOG_NOTICE,
883 "kernel time discipline status %04x",
892 NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
893 msyslog(LOG_INFO,
894 "kernel time sync status %04x",
895 pll_status);
896 }
897#endif /* KERNEL_PLL */
898#endif /* LOCKCLOCK */
899 break;
900
901 case LOOP_DRIFTCOMP:
902
903#ifndef LOCKCLOCK
904 /*
892 * Initialize the kernel frequency and clamp to
893 * reasonable value. Also set the initial state to
894 * S_FSET to indicated the frequency has been
895 * initialized from the previously saved drift file.
905 * If the frequency value is reasonable, set the initial
906 * frequency to the given value and the state to S_FSET.
907 * Otherwise, the drift file may be missing or broken,
908 * so set the frequency to zero. This erases past
909 * history should somebody break something.
910 */
897 rstclock(S_FSET, current_time, 0);
898 drift_comp = freq;
899 if (drift_comp > NTP_MAXFREQ)
900 drift_comp = NTP_MAXFREQ;
901 if (drift_comp < -NTP_MAXFREQ)
902 drift_comp = -NTP_MAXFREQ;
911 if (freq <= NTP_MAXFREQ && freq >= -NTP_MAXFREQ) {
912 drift_comp = freq;
913 rstclock(S_FSET, current_time, 0);
914 } else {
915 drift_comp = 0;
916 }
917
918#ifdef KERNEL_PLL
919 /*
920 * Sanity check. If the kernel is enabled, load the
921 * frequency and light up the loop. If not, set the
922 * kernel frequency to zero and leave the loop dark. In
923 * either case set the time to zero to cancel any
924 * previous nonsense.
925 */
926 if (pll_control) {
927 memset((char *)&ntv, 0, sizeof(ntv));
928 ntv.modes = MOD_OFFSET | MOD_FREQUENCY;
929 if (kern_enable) {
930 ntv.modes |= MOD_STATUS;
931 ntv.status = STA_PLL;
932 ntv.freq = (int32)(drift_comp *
933 65536e6);
934 }
935 (void)ntp_adjtime(&ntv);
936 }
937#endif /* KERNEL_PLL */
938#endif /* LOCKCLOCK */
939 break;
940
941 /*
942 * Special tinker variables for Ulrich Windl. Very dangerous.
943 */
944 case LOOP_MAX: /* step threshold */
945 clock_max = freq;
946 break;
947
933 case LOOP_PANIC: /* panic exit threshold */
948 case LOOP_PANIC: /* panic threshold */
949 clock_panic = freq;
950 break;
951
952 case LOOP_PHI: /* dispersion rate */
953 clock_phi = freq;
954 break;
955
956 case LOOP_MINSTEP: /* watchdog bark */
957 clock_minstep = freq;
958 break;
959
945 case LOOP_MINPOLL: /* ephemeral association poll */
946 if (freq < NTP_MINPOLL)
947 freq = NTP_MINPOLL;
948 sys_minpoll = (u_char)freq;
960 case LOOP_ALLAN: /* Allan intercept */
961 allan_xpt = freq;
962 break;
950
951 case LOOP_ALLAN: /* minimum Allan intercept */
952 if (freq < CLOCK_ALLAN)
953 freq = CLOCK_ALLAN;
954 allan_xpt = (u_char)freq;
955 break;
963
964 case LOOP_HUFFPUFF: /* huff-n'-puff filter length */
965 if (freq < HUFFPUFF)
966 freq = HUFFPUFF;
967 sys_hufflen = (int)(freq / HUFFPUFF);
968 sys_huffpuff = (double *)emalloc(sizeof(double) *
969 sys_hufflen);
970 for (i = 0; i < sys_hufflen; i++)
971 sys_huffpuff[i] = 1e9;
972 sys_mindly = 1e9;
973 break;
974
975 case LOOP_FREQ: /* initial frequency */
976 drift_comp = freq / 1e6;
977 rstclock(S_FSET, current_time, 0);
978 break;
979 }
980}
981
982
983#if defined(KERNEL_PLL) && defined(SIGSYS)
984/*
985 * _trap - trap processor for undefined syscalls
986 *
987 * This nugget is called by the kernel when the SYS_ntp_adjtime()
988 * syscall bombs because the silly thing has not been implemented in
989 * the kernel. In this case the phase-lock loop is emulated by
990 * the stock adjtime() syscall and a lot of indelicate abuse.
991 */
992static RETSIGTYPE
993pll_trap(
994 int arg
995 )
996{
997 pll_control = 0;
998 siglongjmp(env, 1);
999}
1000#endif /* KERNEL_PLL && SIGSYS */