Deleted Added
sdiff udiff text old ( 106424 ) new ( 132451 )
full compact
1/*
2 * ntp_loopfilter.c - implements the NTP loop filter algorithm
3 *
4 */
5#ifdef HAVE_CONFIG_H
6# include <config.h>
7#endif
8
9#include "ntpd.h"
10#include "ntp_io.h"
11#include "ntp_unixtime.h"
12#include "ntp_stdlib.h"
13
14#include <stdio.h>
15#include <ctype.h>
16
17#include <signal.h>
18#include <setjmp.h>
19
20#if defined(VMS) && defined(VMS_LOCALUNIT) /*wjm*/
21#include "ntp_refclock.h"
22#endif /* VMS */
23
24#ifdef KERNEL_PLL
25#include "ntp_syscall.h"
26#endif /* KERNEL_PLL */
27
28/*
29 * This is an implementation of the clock discipline algorithm described
30 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
31 * hybrid phase/frequency-lock loop. A number of sanity checks are
32 * included to protect against timewarps, timespikes and general mayhem.
33 * All units are in s and s/s, unless noted otherwise.
34 */
35#define CLOCK_MAX .128 /* default step offset (s) */
36#define CLOCK_PANIC 1000. /* default panic offset (s) */
37#define CLOCK_PHI 15e-6 /* max frequency error (s/s) */
38#define SHIFT_PLL 4 /* PLL loop gain (shift) */
39#define CLOCK_FLL 8. /* FLL loop gain */
40#define CLOCK_AVG 4. /* parameter averaging constant */
41#define CLOCK_MINSEC 256. /* min FLL update interval (s) */
42#define CLOCK_MINSTEP 900. /* step-change timeout (s) */
43#define CLOCK_DAY 86400. /* one day of seconds (s) */
44#define CLOCK_LIMIT 30 /* poll-adjust threshold */
45#define CLOCK_PGATE 4. /* poll-adjust gate */
46#define CLOCK_ALLAN 10 /* min Allan intercept (log2 s) */
47#define PPS_MAXAGE 120 /* kernel pps signal timeout (s) */
48
49/*
50 * Clock discipline state machine. This is used to control the
51 * synchronization behavior during initialization and following a
52 * timewarp.
53 *
54 * State < max > max Comments
55 * ====================================================
56 * NSET FREQ FREQ no ntp.drift
57 *
58 * FSET TSET if (allow) TSET, ntp.drift
59 * else FREQ
60 *
61 * TSET SYNC FREQ time set
62 *
63 * FREQ SYNC if (mu < 900) FREQ calculate frequency
64 * else if (allow) TSET
65 * else FREQ
66 *
67 * SYNC SYNC if (mu < 900) SYNC normal state
68 * else SPIK
69 *
70 * SPIK SYNC if (allow) TSET spike detector
71 * else FREQ
72 */
73#define S_NSET 0 /* clock never set */
74#define S_FSET 1 /* frequency set from the drift file */
75#define S_TSET 2 /* time set */
76#define S_FREQ 3 /* frequency mode */
77#define S_SYNC 4 /* clock synchronized */
78#define S_SPIK 5 /* spike detected */
79
80/*
81 * Kernel PLL/PPS state machine. This is used with the kernel PLL
82 * modifications described in the README.kernel file.
83 *
84 * If kernel support for the ntp_adjtime() system call is available, the
85 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
86 * set at configuration time or run time using ntpdc. If ntp_enable is
87 * false, the discipline loop is unlocked and no correctios of any kind
88 * are made. If both ntp_control and kern_enable are set, the kernel
89 * support is used as described above; if false, the kernel is bypassed
90 * entirely and the daemon PLL used instead.
91 *
92 * Each update to a prefer peer sets pps_stratum if it survives the
93 * intersection algorithm and its time is within range. The PPS time
94 * discipline is enabled (STA_PPSTIME bit set in the status word) when
95 * pps_stratum is true and the PPS frequency discipline is enabled. If
96 * the PPS time discipline is enabled and the kernel reports a PPS
97 * signal is present, the pps_control variable is set to the current
98 * time. If the current time is later than pps_control by PPS_MAXAGE
99 * (120 s), this variable is set to zero.
100 *
101 * If an external clock is present, the clock driver sets STA_CLK in the
102 * status word. When the local clock driver sees this bit, it updates
103 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
104 * set to zero, in which case the system clock is not adjusted. This is
105 * also a signal for the external clock driver to discipline the system
106 * clock.
107 */
108/*
109 * Program variables that can be tinkered.
110 */
111double clock_max = CLOCK_MAX; /* max offset before step (s) */
112double clock_panic = CLOCK_PANIC; /* max offset before panic (s) */
113double clock_phi = CLOCK_PHI; /* dispersion rate (s/s) */
114double clock_minstep = CLOCK_MINSTEP; /* step timeout (s) */
115u_char allan_xpt = CLOCK_ALLAN; /* minimum Allan intercept (log2 s) */
116
117/*
118 * Hybrid PLL/FLL parameters. These were chosen by experiment using a
119 * MatLab program. The parameters were fudged to match a pure PLL at
120 * poll intervals of 64 s and lower and a pure FLL at poll intervals of
121 * 4096 s and higher. Between these extremes the parameters were chosen
122 * as a geometric series of intervals while holding the overshoot to
123 * less than 5 percent.
124 */
125static double fll[] = {0., 1./64, 1./32, 1./16, 1./8, 1./4, 1.};
126static double pll[] = {1., 1.4, 2., 2.8, 4.1, 7., 12.};
127
128/*
129 * Program variables
130 */
131static double clock_offset; /* clock offset adjustment (s) */
132double drift_comp; /* clock frequency (s/s) */
133double clock_stability; /* clock stability (s/s) */
134u_long pps_control; /* last pps sample time */
135static void rstclock P((int, double, double)); /* transition function */
136
137#ifdef KERNEL_PLL
138struct timex ntv; /* kernel API parameters */
139int pll_status; /* status bits for kernel pll */
140int pll_nano; /* nanosecond kernel switch */
141#endif /* KERNEL_PLL */
142
143/*
144 * Clock state machine control flags
145 */
146int ntp_enable; /* clock discipline enabled */
147int pll_control; /* kernel support available */
148int kern_enable; /* kernel support enabled */
149int pps_enable; /* kernel PPS discipline enabled */
150int ext_enable; /* external clock enabled */
151int pps_stratum; /* pps stratum */
152int allow_step = TRUE; /* allow step correction */
153int allow_panic = FALSE; /* allow panic correction */
154int mode_ntpdate = FALSE; /* exit on first clock set */
155
156/*
157 * Clock state machine variables
158 */
159u_char sys_minpoll = NTP_MINDPOLL; /* min sys poll interval (log2 s) */
160u_char sys_poll = NTP_MINDPOLL; /* system poll interval (log2 s) */
161int state; /* clock discipline state */
162int tc_counter; /* poll-adjust counter */
163u_long last_time; /* time of last clock update (s) */
164double last_offset; /* last clock offset (s) */
165double sys_jitter; /* system RMS jitter (s) */
166
167/*
168 * Huff-n'-puff filter variables
169 */
170static double *sys_huffpuff; /* huff-n'-puff filter */
171static int sys_hufflen; /* huff-n'-puff filter stages */
172static int sys_huffptr; /* huff-n'-puff filter pointer */
173static double sys_mindly; /* huff-n'-puff filter min delay */
174
175#if defined(KERNEL_PLL)
176/* Emacs cc-mode goes nuts if we split the next line... */
177#define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
178 MOD_STATUS | MOD_TIMECONST)
179#ifdef SIGSYS
180static void pll_trap P((int)); /* configuration trap */
181static struct sigaction sigsys; /* current sigaction status */
182static struct sigaction newsigsys; /* new sigaction status */
183static sigjmp_buf env; /* environment var. for pll_trap() */
184#endif /* SIGSYS */
185#endif /* KERNEL_PLL */
186
187/*
188 * init_loopfilter - initialize loop filter data
189 */
190void
191init_loopfilter(void)
192{
193 /*
194 * Initialize state variables. Initially, we expect no drift
195 * file, so set the state to S_NSET.
196 */
197 rstclock(S_NSET, current_time, 0);
198}
199
200/*
201 * local_clock - the NTP logical clock loop filter. Returns 1 if the
202 * clock was stepped, 0 if it was slewed and -1 if it is hopeless.
203 */
204int
205local_clock(
206 struct peer *peer, /* synch source peer structure */
207 double fp_offset, /* clock offset (s) */
208 double epsil /* jittter (square s*s) */
209 )
210{
211 double mu; /* interval since last update (s) */
212 double oerror; /* previous error estimate */
213 double flladj; /* FLL frequency adjustment (ppm) */
214 double plladj; /* PLL frequency adjustment (ppm) */
215 double clock_frequency; /* clock frequency adjustment (ppm) */
216 double dtemp, etemp; /* double temps */
217 int retval; /* return value */
218 int i;
219
220 /*
221 * If the loop is opened, monitor and record the offsets
222 * anyway in order to determine the open-loop response.
223 */
224#ifdef DEBUG
225 if (debug)
226 printf(
227 "local_clock: assocID %d off %.6f jit %.6f sta %d\n",
228 peer->associd, fp_offset, SQRT(epsil), state);
229#endif
230 if (!ntp_enable) {
231 record_loop_stats(fp_offset, drift_comp, SQRT(epsil),
232 clock_stability, sys_poll);
233 return (0);
234 }
235
236 /*
237 * If the clock is way off, panic is declared. The clock_panic
238 * defaults to 1000 s; if set to zero, the panic will never
239 * occur. The allow_panic defaults to FALSE, so the first panic
240 * will exit. It can be set TRUE by a command line option, in
241 * which case the clock will be set anyway and time marches on.
242 * But, allow_panic will be set it FALSE when the update is
243 * within the step range; so, subsequent panics will exit.
244 */
245 if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
246 !allow_panic) {
247 msyslog(LOG_ERR,
248 "time correction of %.0f seconds exceeds sanity limit (%.0f); set clock manually to the correct UTC time.",
249 fp_offset, clock_panic);
250 return (-1);
251 }
252
253 /*
254 * If simulating ntpdate, set the clock directly, rather than
255 * using the discipline. The clock_max defines the step
256 * threshold, above which the clock will be stepped instead of
257 * slewed. The value defaults to 128 ms, but can be set to even
258 * unreasonable values. If set to zero, the clock will never be
259 * stepped.
260 *
261 * Note that if ntpdate is active, the terminal does not detach,
262 * so the termination comments print directly to the console.
263 */
264 if (mode_ntpdate) {
265 if (allow_step && fabs(fp_offset) > clock_max &&
266 clock_max > 0) {
267 step_systime(fp_offset);
268 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
269 msyslog(LOG_NOTICE, "time reset %.6f s",
270 fp_offset);
271 printf("ntpd: time reset %.6fs\n", fp_offset);
272 } else {
273 adj_systime(fp_offset);
274 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
275 msyslog(LOG_NOTICE, "time slew %.6f s",
276 fp_offset);
277 printf("ntpd: time slew %.6fs\n", fp_offset);
278 }
279 record_loop_stats(fp_offset, drift_comp, SQRT(epsil),
280 clock_stability, sys_poll);
281 exit (0);
282 }
283
284 /*
285 * If the clock has never been set, set it and initialize the
286 * discipline parameters. We then switch to frequency mode to
287 * speed the inital convergence process. If lucky, after an hour
288 * the ntp.drift file is created and initialized and we don't
289 * get here again.
290 */
291 if (state == S_NSET) {
292 step_systime(fp_offset);
293 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
294 msyslog(LOG_NOTICE, "time set %.6f s", fp_offset);
295 rstclock(S_FREQ, peer->epoch, 0);
296 return (1);
297 }
298
299 /*
300 * Update the jitter estimate.
301 */
302 oerror = sys_jitter;
303 dtemp = SQUARE(sys_jitter);
304 sys_jitter = SQRT(dtemp + (epsil - dtemp) / CLOCK_AVG);
305
306 /*
307 * The huff-n'-puff filter finds the lowest delay in the recent
308 * interval. This is used to correct the offset by one-half the
309 * difference between the sample delay and minimum delay. This
310 * is most effective if the delays are highly assymetric and
311 * clockhopping is avoided and the clock frequency wander is
312 * relatively small.
313 */
314 if (sys_huffpuff != NULL) {
315 if (peer->delay < sys_huffpuff[sys_huffptr])
316 sys_huffpuff[sys_huffptr] = peer->delay;
317 if (peer->delay < sys_mindly)
318 sys_mindly = peer->delay;
319 if (fp_offset > 0)
320 dtemp = -(peer->delay - sys_mindly) / 2;
321 else
322 dtemp = (peer->delay - sys_mindly) / 2;
323 fp_offset += dtemp;
324#ifdef DEBUG
325 if (debug)
326 printf(
327 "local_clock: size %d mindly %.6f huffpuff %.6f\n",
328 sys_hufflen, sys_mindly, dtemp);
329#endif
330 }
331
332 /*
333 * Clock state machine transition function. This is where the
334 * action is and defines how the system reacts to large phase
335 * and frequency errors. There are two main regimes: when the
336 * offset exceeds the step threshold and when it does not.
337 * However, if the step threshold is set to zero, a step will
338 * never occur. See the instruction manual for the details how
339 * these actions interact with the command line options.
340 */
341 retval = 0;
342 if (sys_poll > peer->maxpoll)
343 sys_poll = peer->maxpoll;
344 else if (sys_poll < peer->minpoll)
345 sys_poll = peer->minpoll;
346 clock_frequency = flladj = plladj = 0;
347 mu = peer->epoch - last_time;
348 if (fabs(fp_offset) > clock_max && clock_max > 0) {
349 switch (state) {
350
351 /*
352 * In S_TSET state the time has been set at the last
353 * valid update and the offset at that time set to zero.
354 * If following that we cruise outside the capture
355 * range, assume a really bad frequency error and switch
356 * to S_FREQ state.
357 */
358 case S_TSET:
359 state = S_FREQ;
360 break;
361
362 /*
363 * In S_SYNC state we ignore outlyers. At the first
364 * outlyer after the stepout threshold, switch to S_SPIK
365 * state.
366 */
367 case S_SYNC:
368 if (mu < clock_minstep)
369 return (0);
370 state = S_SPIK;
371 return (0);
372
373 /*
374 * In S_FREQ state we ignore outlyers. At the first
375 * outlyer after 900 s, compute the apparent phase and
376 * frequency correction.
377 */
378 case S_FREQ:
379 if (mu < clock_minstep)
380 return (0);
381 /* fall through to S_SPIK */
382
383 /*
384 * In S_SPIK state a large correction is necessary.
385 * Since the outlyer may be due to a large frequency
386 * error, compute the apparent frequency correction.
387 */
388 case S_SPIK:
389 clock_frequency = (fp_offset - clock_offset) /
390 mu;
391 /* fall through to default */
392
393 /*
394 * We get here directly in S_FSET state and indirectly
395 * from S_FREQ and S_SPIK states. The clock is either
396 * reset or shaken, but never stirred.
397 */
398 default:
399 if (allow_step) {
400 step_systime(fp_offset);
401 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
402 msyslog(LOG_NOTICE, "time reset %.6f s",
403 fp_offset);
404 rstclock(S_TSET, peer->epoch, 0);
405 retval = 1;
406 } else {
407 NLOG(NLOG_SYNCEVENT|NLOG_SYSEVENT)
408 msyslog(LOG_NOTICE, "time slew %.6f s",
409 fp_offset);
410 rstclock(S_FREQ, peer->epoch,
411 fp_offset);
412 }
413 break;
414 }
415 } else {
416 switch (state) {
417
418 /*
419 * In S_FSET state this is the first update. Adjust the
420 * phase, but don't adjust the frequency until the next
421 * update.
422 */
423 case S_FSET:
424 rstclock(S_TSET, peer->epoch, fp_offset);
425 break;
426
427 /*
428 * In S_FREQ state ignore updates until the stepout
429 * threshold. After that, correct the phase and
430 * frequency and switch to S_SYNC state.
431 */
432 case S_FREQ:
433 if (mu < clock_minstep)
434 return (0);
435 clock_frequency = (fp_offset - clock_offset) /
436 mu;
437 rstclock(S_SYNC, peer->epoch, fp_offset);
438 break;
439
440 /*
441 * Either the clock has just been set or the previous
442 * update was a spike and ignored. Since this update is
443 * not an outlyer, fold the tent and resume life.
444 */
445 case S_TSET:
446 case S_SPIK:
447 state = S_SYNC;
448 /* fall through to default */
449
450 /*
451 * We come here in the normal case for linear phase and
452 * frequency adjustments. If the offset exceeds the
453 * previous time error estimate by CLOCK_SGATE and the
454 * interval since the last update is less than twice the
455 * poll interval, consider the update a popcorn spike
456 * and ignore it.
457 */
458 default:
459 allow_panic = FALSE;
460 if (fabs(fp_offset - last_offset) >
461 CLOCK_SGATE * oerror && mu <
462 ULOGTOD(sys_poll + 1)) {
463#ifdef DEBUG
464 if (debug)
465 printf(
466 "local_clock: popcorn %.6f %.6f\n",
467 fabs(fp_offset -
468 last_offset), CLOCK_SGATE *
469 oerror);
470#endif
471 last_offset = fp_offset;
472 return (0);
473 }
474
475 /*
476 * Compute the FLL and PLL frequency adjustments
477 * conditioned on intricate weighting factors.
478 * The gain factors depend on the poll interval
479 * and Allan intercept. For the FLL, the
480 * averaging interval is clamped to a minimum of
481 * 1024 s and the gain increased in stages from
482 * zero for poll intervals below half the Allan
483 * intercept to unity above twice the Allan
484 * intercept. For the PLL, the averaging
485 * interval is clamped not to exceed the poll
486 * interval. No gain factor is necessary, since
487 * the frequency steering above the Allan
488 * intercept is negligible. Particularly for the
489 * PLL, these measures allow oversampling, but
490 * not undersampling and insure stability even
491 * when the rules of fair engagement are broken.
492 */
493 i = sys_poll - allan_xpt + 4;
494 if (i < 0)
495 i = 0;
496 else if (i > 6)
497 i = 6;
498 etemp = fll[i];
499 dtemp = max(mu, ULOGTOD(allan_xpt));
500 flladj = (fp_offset - clock_offset) * etemp /
501 (dtemp * CLOCK_FLL);
502 dtemp = ULOGTOD(SHIFT_PLL + 2 + sys_poll);
503 etemp = min(mu, ULOGTOD(sys_poll));
504 plladj = fp_offset * etemp / (dtemp * dtemp);
505 last_time = peer->epoch;
506 last_offset = clock_offset = fp_offset;
507 break;
508 }
509 }
510
511#if defined(KERNEL_PLL)
512 /*
513 * This code segment works when clock adjustments are made using
514 * precision time kernel support and the ntp_adjtime() system
515 * call. This support is available in Solaris 2.6 and later,
516 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
517 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
518 * DECstation 5000/240 and Alpha AXP, additional kernel
519 * modifications provide a true microsecond clock and nanosecond
520 * clock, respectively.
521 */
522 if (pll_control && kern_enable) {
523
524 /*
525 * We initialize the structure for the ntp_adjtime()
526 * system call. We have to convert everything to
527 * microseconds or nanoseconds first. Do not update the
528 * system variables if the ext_enable flag is set. In
529 * this case, the external clock driver will update the
530 * variables, which will be read later by the local
531 * clock driver. Afterwards, remember the time and
532 * frequency offsets for jitter and stability values and
533 * to update the drift file.
534 */
535 memset(&ntv, 0, sizeof(ntv));
536 if (ext_enable) {
537 ntv.modes = MOD_STATUS;
538 } else {
539 ntv.modes = MOD_BITS;
540 if (clock_offset < 0)
541 dtemp = -.5;
542 else
543 dtemp = .5;
544 if (pll_nano) {
545 ntv.offset = (int32)(clock_offset *
546 1e9 + dtemp);
547 ntv.constant = sys_poll;
548 } else {
549 ntv.offset = (int32)(clock_offset *
550 1e6 + dtemp);
551 ntv.constant = sys_poll - 4;
552 }
553 if (clock_frequency != 0) {
554 ntv.modes |= MOD_FREQUENCY;
555 ntv.freq = (int32)((clock_frequency +
556 drift_comp) * 65536e6);
557 }
558 ntv.esterror = (u_int32)(sys_jitter * 1e6);
559 ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
560 sys_rootdispersion) * 1e6);
561 ntv.status = STA_PLL;
562
563 /*
564 * Set the leap bits in the status word.
565 */
566 if (sys_leap == LEAP_NOTINSYNC) {
567 ntv.status |= STA_UNSYNC;
568 } else if (calleapwhen(sys_reftime.l_ui) <
569 CLOCK_DAY) {
570 if (sys_leap & LEAP_ADDSECOND)
571 ntv.status |= STA_INS;
572 else if (sys_leap & LEAP_DELSECOND)
573 ntv.status |= STA_DEL;
574 }
575
576 /*
577 * Switch to FLL mode if the poll interval is
578 * greater than MAXDPOLL, so that the kernel
579 * loop behaves as the daemon loop; viz.,
580 * selects the FLL when necessary, etc. For
581 * legacy only.
582 */
583 if (sys_poll > NTP_MAXDPOLL)
584 ntv.status |= STA_FLL;
585
586 /*
587 * If the PPS signal is up and enabled, light
588 * the frequency bit. If the PPS driver is
589 * working, light the phase bit as well. If not,
590 * douse the lights, since somebody else may
591 * have left the switch on.
592 */
593 if (pps_enable && pll_status & STA_PPSSIGNAL) {
594 ntv.status |= STA_PPSFREQ;
595 if (pps_stratum < STRATUM_UNSPEC)
596 ntv.status |= STA_PPSTIME;
597 } else {
598 ntv.status &= ~(STA_PPSFREQ |
599 STA_PPSTIME);
600 }
601 }
602
603 /*
604 * Pass the stuff to the kernel. If it squeals, turn off
605 * the pigs. In any case, fetch the kernel offset and
606 * frequency and pretend we did it here.
607 */
608 if (ntp_adjtime(&ntv) == TIME_ERROR) {
609 if (ntv.status != pll_status)
610 msyslog(LOG_ERR,
611 "kernel time discipline status change %x",
612 ntv.status);
613 ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME);
614 }
615 pll_status = ntv.status;
616 if (pll_nano)
617 clock_offset = ntv.offset / 1e9;
618 else
619 clock_offset = ntv.offset / 1e6;
620 clock_frequency = ntv.freq / 65536e6 - drift_comp;
621 flladj = plladj = 0;
622
623 /*
624 * If the kernel PPS is lit, monitor its performance.
625 */
626 if (ntv.status & STA_PPSTIME) {
627 pps_control = current_time;
628 if (pll_nano)
629 sys_jitter = ntv.jitter / 1e9;
630 else
631 sys_jitter = ntv.jitter / 1e6;
632 }
633 }
634#endif /* KERNEL_PLL */
635
636 /*
637 * Adjust the clock frequency and calculate the stability. If
638 * kernel support is available, we use the results of the kernel
639 * discipline instead of the PLL/FLL discipline. In this case,
640 * drift_comp is a sham and used only for updating the drift
641 * file and for billboard eye candy.
642 */
643 etemp = clock_frequency + flladj + plladj;
644 drift_comp += etemp;
645 if (drift_comp > NTP_MAXFREQ)
646 drift_comp = NTP_MAXFREQ;
647 else if (drift_comp <= -NTP_MAXFREQ)
648 drift_comp = -NTP_MAXFREQ;
649 dtemp = SQUARE(clock_stability);
650 etemp = SQUARE(etemp) - dtemp;
651 clock_stability = SQRT(dtemp + etemp / CLOCK_AVG);
652
653 /*
654 * In SYNC state, adjust the poll interval. The trick here is to
655 * compare the apparent frequency change induced by the system
656 * jitter over the poll interval, or fritter, to the frequency
657 * stability. If the fritter is greater than the stability,
658 * phase noise predominates and the averaging interval is
659 * increased; otherwise, it is decreased. A bit of hysteresis
660 * helps calm the dance. Works best using burst mode.
661 */
662 if (state == S_SYNC) {
663 if (sys_jitter / ULOGTOD(sys_poll) > clock_stability &&
664 fabs(clock_offset) < CLOCK_PGATE * sys_jitter) {
665 tc_counter += sys_poll;
666 if (tc_counter > CLOCK_LIMIT) {
667 tc_counter = CLOCK_LIMIT;
668 if (sys_poll < peer->maxpoll) {
669 tc_counter = 0;
670 sys_poll++;
671 }
672 }
673 } else {
674 tc_counter -= sys_poll << 1;
675 if (tc_counter < -CLOCK_LIMIT) {
676 tc_counter = -CLOCK_LIMIT;
677 if (sys_poll > peer->minpoll) {
678 tc_counter = 0;
679 sys_poll--;
680 }
681 }
682 }
683 }
684
685 /*
686 * Update the system time variables.
687 */
688 dtemp = peer->disp + sys_jitter;
689 if ((peer->flags & FLAG_REFCLOCK) == 0 && dtemp < MINDISPERSE)
690 dtemp = MINDISPERSE;
691 sys_rootdispersion = peer->rootdispersion + dtemp;
692 record_loop_stats(last_offset, drift_comp, sys_jitter,
693 clock_stability, sys_poll);
694#ifdef DEBUG
695 if (debug)
696 printf(
697 "local_clock: mu %.0f noi %.3f stb %.3f pol %d cnt %d\n",
698 mu, sys_jitter * 1e6, clock_stability * 1e6, sys_poll,
699 tc_counter);
700#endif /* DEBUG */
701 return (retval);
702}
703
704
705/*
706 * adj_host_clock - Called once every second to update the local clock.
707 */
708void
709adj_host_clock(
710 void
711 )
712{
713 double adjustment;
714 int i;
715
716 /*
717 * Update the dispersion since the last update. In contrast to
718 * NTPv3, NTPv4 does not declare unsynchronized after one day,
719 * since the dispersion check serves this function. Also,
720 * since the poll interval can exceed one day, the old test
721 * would be counterproductive. Note we do this even with
722 * external clocks, since the clock driver will recompute the
723 * maximum error and the local clock driver will pick it up and
724 * pass to the common refclock routines. Very elegant.
725 */
726 sys_rootdispersion += clock_phi;
727
728 /*
729 * Declare PPS kernel unsync if the pps signal has not been
730 * heard for a few minutes.
731 */
732 if (pps_control && current_time - pps_control > PPS_MAXAGE) {
733 if (pps_control)
734 NLOG(NLOG_SYSEVENT) /* conditional if clause */
735 msyslog(LOG_INFO, "pps sync disabled");
736 pps_control = 0;
737 }
738 if (!ntp_enable)
739 return;
740
741 /*
742 * If the phase-lock loop is implemented in the kernel, we
743 * have no business going further.
744 */
745 if (pll_control && kern_enable)
746 return;
747
748 /*
749 * Intricate wrinkle for legacy only. If the local clock driver
750 * is in use and selected for synchronization, somebody else may
751 * tinker the adjtime() syscall. If this is the case, the driver
752 * is marked prefer and we have to avoid calling adjtime(),
753 * since that may truncate the other guy's requests.
754 */
755 if (sys_peer != 0) {
756 if (sys_peer->refclktype == REFCLK_LOCALCLOCK &&
757 sys_peer->flags & FLAG_PREFER)
758 return;
759 }
760
761 /*
762 * This ugly bit of business is necessary in order to move the
763 * pole frequency higher in FLL mode. This is necessary for loop
764 * stability.
765 */
766 i = sys_poll - allan_xpt + 4;
767 if (i < 0)
768 i = 0;
769 else if (i > 6)
770 i = 6;
771 adjustment = clock_offset / (pll[i] * ULOGTOD(SHIFT_PLL +
772 sys_poll));
773 clock_offset -= adjustment;
774 adj_systime(adjustment + drift_comp);
775}
776
777
778/*
779 * Clock state machine. Enter new state and set state variables.
780 */
781static void
782rstclock(
783 int trans, /* new state */
784 double epoch, /* last time */
785 double offset /* last offset */
786 )
787{
788 tc_counter = 0;
789 sys_poll = NTP_MINPOLL;
790 state = trans;
791 last_time = epoch;
792 last_offset = clock_offset = offset;
793}
794
795
796/*
797 * huff-n'-puff filter
798 */
799void
800huffpuff()
801{
802 int i;
803
804 if (sys_huffpuff == NULL)
805 return;
806 sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
807 sys_huffpuff[sys_huffptr] = 1e9;
808 sys_mindly = 1e9;
809 for (i = 0; i < sys_hufflen; i++) {
810 if (sys_huffpuff[i] < sys_mindly)
811 sys_mindly = sys_huffpuff[i];
812 }
813}
814
815
816/*
817 * loop_config - configure the loop filter
818 */
819void
820loop_config(
821 int item,
822 double freq
823 )
824{
825 int i;
826
827 switch (item) {
828
829 case LOOP_DRIFTINIT:
830
831#ifdef KERNEL_PLL
832 /*
833 * Assume the kernel supports the ntp_adjtime() syscall.
834 * If that syscall works, initialize the kernel
835 * variables. Otherwise, continue leaving no harm
836 * behind. While at it, ask to set nanosecond mode. If
837 * the kernel agrees, rejoice; othewise, it does only
838 * microseconds.
839 */
840 pll_control = 1;
841 memset(&ntv, 0, sizeof(ntv));
842#ifdef STA_NANO
843 ntv.modes = MOD_BITS | MOD_NANO;
844#else
845 ntv.modes = MOD_BITS;
846#endif /* STA_NANO */
847 ntv.maxerror = MAXDISPERSE;
848 ntv.esterror = MAXDISPERSE;
849 ntv.status = STA_UNSYNC;
850#ifdef SIGSYS
851 /*
852 * Use sigsetjmp() to save state and then call
853 * ntp_adjtime(); if it fails, then siglongjmp() is used
854 * to return control
855 */
856 newsigsys.sa_handler = pll_trap;
857 newsigsys.sa_flags = 0;
858 if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
859 msyslog(LOG_ERR,
860 "sigaction() fails to save SIGSYS trap: %m");
861 pll_control = 0;
862 }
863 if (sigsetjmp(env, 1) == 0)
864 ntp_adjtime(&ntv);
865 if ((sigaction(SIGSYS, &sigsys,
866 (struct sigaction *)NULL))) {
867 msyslog(LOG_ERR,
868 "sigaction() fails to restore SIGSYS trap: %m");
869 pll_control = 0;
870 }
871#else /* SIGSYS */
872 ntp_adjtime(&ntv);
873#endif /* SIGSYS */
874 pll_status = ntv.status;
875 if (pll_control) {
876#ifdef STA_NANO
877 if (pll_status & STA_NANO)
878 pll_nano = 1;
879 if (pll_status & STA_CLK)
880 ext_enable = 1;
881#endif /* STA_NANO */
882 msyslog(LOG_NOTICE,
883 "kernel time discipline status %04x",
884 pll_status);
885 }
886#endif /* KERNEL_PLL */
887 break;
888
889 case LOOP_DRIFTCOMP:
890
891 /*
892 * Initialize the kernel frequency and clamp to
893 * reasonable value. Also set the initial state to
894 * S_FSET to indicated the frequency has been
895 * initialized from the previously saved drift file.
896 */
897 rstclock(S_FSET, current_time, 0);
898 drift_comp = freq;
899 if (drift_comp > NTP_MAXFREQ)
900 drift_comp = NTP_MAXFREQ;
901 if (drift_comp < -NTP_MAXFREQ)
902 drift_comp = -NTP_MAXFREQ;
903
904#ifdef KERNEL_PLL
905 /*
906 * Sanity check. If the kernel is enabled, load the
907 * frequency and light up the loop. If not, set the
908 * kernel frequency to zero and leave the loop dark. In
909 * either case set the time to zero to cancel any
910 * previous nonsense.
911 */
912 if (pll_control) {
913 memset((char *)&ntv, 0, sizeof(ntv));
914 ntv.modes = MOD_OFFSET | MOD_FREQUENCY;
915 if (kern_enable) {
916 ntv.modes |= MOD_STATUS;
917 ntv.status = STA_PLL;
918 ntv.freq = (int32)(drift_comp *
919 65536e6);
920 }
921 (void)ntp_adjtime(&ntv);
922 }
923#endif /* KERNEL_PLL */
924 break;
925
926 /*
927 * Special tinker variables for Ulrich Windl. Very dangerous.
928 */
929 case LOOP_MAX: /* step threshold */
930 clock_max = freq;
931 break;
932
933 case LOOP_PANIC: /* panic exit threshold */
934 clock_panic = freq;
935 break;
936
937 case LOOP_PHI: /* dispersion rate */
938 clock_phi = freq;
939 break;
940
941 case LOOP_MINSTEP: /* watchdog bark */
942 clock_minstep = freq;
943 break;
944
945 case LOOP_MINPOLL: /* ephemeral association poll */
946 if (freq < NTP_MINPOLL)
947 freq = NTP_MINPOLL;
948 sys_minpoll = (u_char)freq;
949 break;
950
951 case LOOP_ALLAN: /* minimum Allan intercept */
952 if (freq < CLOCK_ALLAN)
953 freq = CLOCK_ALLAN;
954 allan_xpt = (u_char)freq;
955 break;
956
957 case LOOP_HUFFPUFF: /* huff-n'-puff filter length */
958 if (freq < HUFFPUFF)
959 freq = HUFFPUFF;
960 sys_hufflen = (int)(freq / HUFFPUFF);
961 sys_huffpuff = (double *)emalloc(sizeof(double) *
962 sys_hufflen);
963 for (i = 0; i < sys_hufflen; i++)
964 sys_huffpuff[i] = 1e9;
965 sys_mindly = 1e9;
966 break;
967 }
968}
969
970
971#if defined(KERNEL_PLL) && defined(SIGSYS)
972/*
973 * _trap - trap processor for undefined syscalls
974 *
975 * This nugget is called by the kernel when the SYS_ntp_adjtime()
976 * syscall bombs because the silly thing has not been implemented in
977 * the kernel. In this case the phase-lock loop is emulated by
978 * the stock adjtime() syscall and a lot of indelicate abuse.
979 */
980static RETSIGTYPE
981pll_trap(
982 int arg
983 )
984{
985 pll_control = 0;
986 siglongjmp(env, 1);
987}
988#endif /* KERNEL_PLL && SIGSYS */