ntp_loopfilter.c revision 344884
1/* 2 * ntp_loopfilter.c - implements the NTP loop filter algorithm 3 * 4 * ATTENTION: Get approval from Dave Mills on all changes to this file! 5 * 6 */ 7#ifdef HAVE_CONFIG_H 8# include <config.h> 9#endif 10 11#ifdef USE_SNPRINTB 12# include <util.h> 13#endif 14#include "ntpd.h" 15#include "ntp_io.h" 16#include "ntp_unixtime.h" 17#include "ntp_stdlib.h" 18 19#include <limits.h> 20#include <stdio.h> 21#include <ctype.h> 22 23#include <signal.h> 24#include <setjmp.h> 25 26#ifdef KERNEL_PLL 27#include "ntp_syscall.h" 28#endif /* KERNEL_PLL */ 29 30/* 31 * This is an implementation of the clock discipline algorithm described 32 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter, 33 * hybrid phase/frequency-lock loop. A number of sanity checks are 34 * included to protect against timewarps, timespikes and general mayhem. 35 * All units are in s and s/s, unless noted otherwise. 36 */ 37#define CLOCK_MAX .128 /* default step threshold (s) */ 38#define CLOCK_MINSTEP 300. /* default stepout threshold (s) */ 39#define CLOCK_PANIC 1000. /* default panic threshold (s) */ 40#define CLOCK_PHI 15e-6 /* max frequency error (s/s) */ 41#define CLOCK_PLL 16. /* PLL loop gain (log2) */ 42#define CLOCK_AVG 8. /* parameter averaging constant */ 43#define CLOCK_FLL .25 /* FLL loop gain */ 44#define CLOCK_FLOOR .0005 /* startup offset floor (s) */ 45#define CLOCK_ALLAN 11 /* Allan intercept (log2 s) */ 46#define CLOCK_LIMIT 30 /* poll-adjust threshold */ 47#define CLOCK_PGATE 4. /* poll-adjust gate */ 48#define PPS_MAXAGE 120 /* kernel pps signal timeout (s) */ 49#define FREQTOD(x) ((x) / 65536e6) /* NTP to double */ 50#define DTOFREQ(x) ((int32)((x) * 65536e6)) /* double to NTP */ 51 52/* 53 * Clock discipline state machine. This is used to control the 54 * synchronization behavior during initialization and following a 55 * timewarp. 56 * 57 * State < step > step Comments 58 * ======================================================== 59 * NSET FREQ step, FREQ freq not set 60 * 61 * FSET SYNC step, SYNC freq set 62 * 63 * FREQ if (mu < 900) if (mu < 900) set freq direct 64 * ignore ignore 65 * else else 66 * freq, SYNC freq, step, SYNC 67 * 68 * SYNC SYNC SPIK, ignore adjust phase/freq 69 * 70 * SPIK SYNC if (mu < 900) adjust phase/freq 71 * ignore 72 * step, SYNC 73 */ 74/* 75 * Kernel PLL/PPS state machine. This is used with the kernel PLL 76 * modifications described in the documentation. 77 * 78 * If kernel support for the ntp_adjtime() system call is available, the 79 * ntp_control flag is set. The ntp_enable and kern_enable flags can be 80 * set at configuration time or run time using ntpdc. If ntp_enable is 81 * false, the discipline loop is unlocked and no corrections of any kind 82 * are made. If both ntp_control and kern_enable are set, the kernel 83 * support is used as described above; if false, the kernel is bypassed 84 * entirely and the daemon discipline used instead. 85 * 86 * There have been three versions of the kernel discipline code. The 87 * first (microkernel) now in Solaris discipilnes the microseconds. The 88 * second and third (nanokernel) disciplines the clock in nanoseconds. 89 * These versions are identifed if the symbol STA_PLL is present in the 90 * header file /usr/include/sys/timex.h. The third and current version 91 * includes TAI offset and is identified by the symbol NTP_API with 92 * value 4. 93 * 94 * Each PPS time/frequency discipline can be enabled by the atom driver 95 * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are 96 * set in the kernel status word; otherwise, these bits are cleared. 97 * These bits are also cleard if the kernel reports an error. 98 * 99 * If an external clock is present, the clock driver sets STA_CLK in the 100 * status word. When the local clock driver sees this bit, it updates 101 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit 102 * set to zero, in which case the system clock is not adjusted. This is 103 * also a signal for the external clock driver to discipline the system 104 * clock. Unless specified otherwise, all times are in seconds. 105 */ 106/* 107 * Program variables that can be tinkered. 108 */ 109double clock_max_back = CLOCK_MAX; /* step threshold */ 110double clock_max_fwd = CLOCK_MAX; /* step threshold */ 111double clock_minstep = CLOCK_MINSTEP; /* stepout threshold */ 112double clock_panic = CLOCK_PANIC; /* panic threshold */ 113double clock_phi = CLOCK_PHI; /* dispersion rate (s/s) */ 114u_char allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */ 115 116/* 117 * Program variables 118 */ 119static double clock_offset; /* offset */ 120double clock_jitter; /* offset jitter */ 121double drift_comp; /* frequency (s/s) */ 122static double init_drift_comp; /* initial frequency (PPM) */ 123double clock_stability; /* frequency stability (wander) (s/s) */ 124double clock_codec; /* audio codec frequency (samples/s) */ 125static u_long clock_epoch; /* last update */ 126u_int sys_tai; /* TAI offset from UTC */ 127static int loop_started; /* TRUE after LOOP_DRIFTINIT */ 128static void rstclock (int, double); /* transition function */ 129static double direct_freq(double); /* direct set frequency */ 130static void set_freq(double); /* set frequency */ 131#ifndef PATH_MAX 132# define PATH_MAX MAX_PATH 133#endif 134static char relative_path[PATH_MAX + 1]; /* relative path per recursive make */ 135static char *this_file = NULL; 136 137#ifdef KERNEL_PLL 138static struct timex ntv; /* ntp_adjtime() parameters */ 139int pll_status; /* last kernel status bits */ 140#if defined(STA_NANO) && NTP_API == 4 141static u_int loop_tai; /* last TAI offset */ 142#endif /* STA_NANO */ 143static void start_kern_loop(void); 144static void stop_kern_loop(void); 145#endif /* KERNEL_PLL */ 146 147/* 148 * Clock state machine control flags 149 */ 150int ntp_enable = TRUE; /* clock discipline enabled */ 151int pll_control; /* kernel support available */ 152int kern_enable = TRUE; /* kernel support enabled */ 153int hardpps_enable; /* kernel PPS discipline enabled */ 154int ext_enable; /* external clock enabled */ 155int pps_stratum; /* pps stratum */ 156int kernel_status; /* from ntp_adjtime */ 157int force_step_once = FALSE; /* always step time once at startup (-G) */ 158int mode_ntpdate = FALSE; /* exit on first clock set (-q) */ 159int freq_cnt; /* initial frequency clamp */ 160int freq_set; /* initial set frequency switch */ 161 162/* 163 * Clock state machine variables 164 */ 165int state = 0; /* clock discipline state */ 166u_char sys_poll; /* time constant/poll (log2 s) */ 167int tc_counter; /* jiggle counter */ 168double last_offset; /* last offset (s) */ 169 170/* 171 * Huff-n'-puff filter variables 172 */ 173static double *sys_huffpuff; /* huff-n'-puff filter */ 174static int sys_hufflen; /* huff-n'-puff filter stages */ 175static int sys_huffptr; /* huff-n'-puff filter pointer */ 176static double sys_mindly; /* huff-n'-puff filter min delay */ 177 178#if defined(KERNEL_PLL) 179/* Emacs cc-mode goes nuts if we split the next line... */ 180#define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \ 181 MOD_STATUS | MOD_TIMECONST) 182#ifdef SIGSYS 183static void pll_trap (int); /* configuration trap */ 184static struct sigaction sigsys; /* current sigaction status */ 185static struct sigaction newsigsys; /* new sigaction status */ 186static sigjmp_buf env; /* environment var. for pll_trap() */ 187#endif /* SIGSYS */ 188#endif /* KERNEL_PLL */ 189 190static void 191sync_status(const char *what, int ostatus, int nstatus) 192{ 193 char obuf[256], nbuf[256], tbuf[1024]; 194#if defined(USE_SNPRINTB) && defined (STA_FMT) 195 snprintb(obuf, sizeof(obuf), STA_FMT, ostatus); 196 snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus); 197#else 198 snprintf(obuf, sizeof(obuf), "%04x", ostatus); 199 snprintf(nbuf, sizeof(nbuf), "%04x", nstatus); 200#endif 201 snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf); 202 report_event(EVNT_KERN, NULL, tbuf); 203} 204 205/* 206 * file_name - return pointer to non-relative portion of this C file pathname 207 */ 208static char *file_name(void) 209{ 210 if (this_file == NULL) { 211 (void)strncpy(relative_path, __FILE__, PATH_MAX); 212 for (this_file=relative_path; 213 *this_file && ! isalnum((unsigned char)*this_file); 214 this_file++) ; 215 } 216 return this_file; 217} 218 219/* 220 * init_loopfilter - initialize loop filter data 221 */ 222void 223init_loopfilter(void) 224{ 225 /* 226 * Initialize state variables. 227 */ 228 sys_poll = ntp_minpoll; 229 clock_jitter = LOGTOD(sys_precision); 230 freq_cnt = (int)clock_minstep; 231} 232 233#ifdef KERNEL_PLL 234/* 235 * ntp_adjtime_error_handler - process errors from ntp_adjtime 236 */ 237static void 238ntp_adjtime_error_handler( 239 const char *caller, /* name of calling function */ 240 struct timex *ptimex, /* pointer to struct timex */ 241 int ret, /* return value from ntp_adjtime */ 242 int saved_errno, /* value of errno when ntp_adjtime returned */ 243 int pps_call, /* ntp_adjtime call was PPS-related */ 244 int tai_call, /* ntp_adjtime call was TAI-related */ 245 int line /* line number of ntp_adjtime call */ 246 ) 247{ 248 char des[1024] = ""; /* Decoded Error Status */ 249 char *dbp, *ebp; 250 251 dbp = des; 252 ebp = dbp + sizeof(des); 253 254 switch (ret) { 255 case -1: 256 switch (saved_errno) { 257 case EFAULT: 258 msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex pointer: 0x%lx", 259 caller, file_name(), line, 260 (long)((void *)ptimex) 261 ); 262 break; 263 case EINVAL: 264 msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex \"constant\" element value: %ld", 265 caller, file_name(), line, 266 (long)(ptimex->constant) 267 ); 268 break; 269 case EPERM: 270 if (tai_call) { 271 errno = saved_errno; 272 msyslog(LOG_ERR, 273 "%s: ntp_adjtime(TAI) failed: %m", 274 caller); 275 } 276 errno = saved_errno; 277 msyslog(LOG_ERR, "%s: %s line %d: ntp_adjtime: %m", 278 caller, file_name(), line 279 ); 280 break; 281 default: 282 msyslog(LOG_NOTICE, "%s: %s line %d: unhandled errno value %d after failed ntp_adjtime call", 283 caller, file_name(), line, 284 saved_errno 285 ); 286 break; 287 } 288 break; 289#ifdef TIME_OK 290 case TIME_OK: /* 0: synchronized, no leap second warning */ 291 /* msyslog(LOG_INFO, "kernel reports time is synchronized normally"); */ 292 break; 293#else 294# warning TIME_OK is not defined 295#endif 296#ifdef TIME_INS 297 case TIME_INS: /* 1: positive leap second warning */ 298 msyslog(LOG_INFO, "kernel reports leap second insertion scheduled"); 299 break; 300#else 301# warning TIME_INS is not defined 302#endif 303#ifdef TIME_DEL 304 case TIME_DEL: /* 2: negative leap second warning */ 305 msyslog(LOG_INFO, "kernel reports leap second deletion scheduled"); 306 break; 307#else 308# warning TIME_DEL is not defined 309#endif 310#ifdef TIME_OOP 311 case TIME_OOP: /* 3: leap second in progress */ 312 msyslog(LOG_INFO, "kernel reports leap second in progress"); 313 break; 314#else 315# warning TIME_OOP is not defined 316#endif 317#ifdef TIME_WAIT 318 case TIME_WAIT: /* 4: leap second has occured */ 319 msyslog(LOG_INFO, "kernel reports leap second has occurred"); 320 break; 321#else 322# warning TIME_WAIT is not defined 323#endif 324#ifdef TIME_ERROR 325#if 0 326 327from the reference implementation of ntp_gettime(): 328 329 // Hardware or software error 330 if ((time_status & (STA_UNSYNC | STA_CLOCKERR)) 331 332 /* 333 * PPS signal lost when either time or frequency synchronization 334 * requested 335 */ 336 || (time_status & (STA_PPSFREQ | STA_PPSTIME) 337 && !(time_status & STA_PPSSIGNAL)) 338 339 /* 340 * PPS jitter exceeded when time synchronization requested 341 */ 342 || (time_status & STA_PPSTIME && 343 time_status & STA_PPSJITTER) 344 345 /* 346 * PPS wander exceeded or calibration error when frequency 347 * synchronization requested 348 */ 349 || (time_status & STA_PPSFREQ && 350 time_status & (STA_PPSWANDER | STA_PPSERROR))) 351 return (TIME_ERROR); 352 353or, from ntp_adjtime(): 354 355 if ( (time_status & (STA_UNSYNC | STA_CLOCKERR)) 356 || (time_status & (STA_PPSFREQ | STA_PPSTIME) 357 && !(time_status & STA_PPSSIGNAL)) 358 || (time_status & STA_PPSTIME 359 && time_status & STA_PPSJITTER) 360 || (time_status & STA_PPSFREQ 361 && time_status & (STA_PPSWANDER | STA_PPSERROR)) 362 ) 363 return (TIME_ERROR); 364#endif 365 366 case TIME_ERROR: /* 5: unsynchronized, or loss of synchronization */ 367 /* error (see status word) */ 368 369 if (ptimex->status & STA_UNSYNC) 370 xsbprintf(&dbp, ebp, "%sClock Unsynchronized", 371 (*des) ? "; " : ""); 372 373 if (ptimex->status & STA_CLOCKERR) 374 xsbprintf(&dbp, ebp, "%sClock Error", 375 (*des) ? "; " : ""); 376 377 if (!(ptimex->status & STA_PPSSIGNAL) 378 && ptimex->status & STA_PPSFREQ) 379 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but no PPS", 380 (*des) ? "; " : ""); 381 382 if (!(ptimex->status & STA_PPSSIGNAL) 383 && ptimex->status & STA_PPSTIME) 384 xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but no PPS signal", 385 (*des) ? "; " : ""); 386 387 if ( ptimex->status & STA_PPSTIME 388 && ptimex->status & STA_PPSJITTER) 389 xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but PPS Jitter exceeded", 390 (*des) ? "; " : ""); 391 392 if ( ptimex->status & STA_PPSFREQ 393 && ptimex->status & STA_PPSWANDER) 394 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but PPS Wander exceeded", 395 (*des) ? "; " : ""); 396 397 if ( ptimex->status & STA_PPSFREQ 398 && ptimex->status & STA_PPSERROR) 399 xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but Calibration error detected", 400 (*des) ? "; " : ""); 401 402 if (pps_call && !(ptimex->status & STA_PPSSIGNAL)) 403 report_event(EVNT_KERN, NULL, 404 "no PPS signal"); 405 DPRINTF(1, ("kernel loop status %#x (%s)\n", 406 ptimex->status, des)); 407 /* 408 * This code may be returned when ntp_adjtime() has just 409 * been called for the first time, quite a while after 410 * startup, when ntpd just starts to discipline the kernel 411 * time. In this case the occurrence of this message 412 * can be pretty confusing. 413 * 414 * HMS: How about a message when we begin kernel processing: 415 * Determining kernel clock state... 416 * so an initial TIME_ERROR message is less confising, 417 * or skipping the first message (ugh), 418 * or ??? 419 * msyslog(LOG_INFO, "kernel reports time synchronization lost"); 420 */ 421 msyslog(LOG_INFO, "kernel reports TIME_ERROR: %#x: %s", 422 ptimex->status, des); 423 break; 424#else 425# warning TIME_ERROR is not defined 426#endif 427 default: 428 msyslog(LOG_NOTICE, "%s: %s line %d: unhandled return value %d from ntp_adjtime() in %s at line %d", 429 caller, file_name(), line, 430 ret, 431 __func__, __LINE__ 432 ); 433 break; 434 } 435 return; 436} 437#endif 438 439/* 440 * local_clock - the NTP logical clock loop filter. 441 * 442 * Return codes: 443 * -1 update ignored: exceeds panic threshold 444 * 0 update ignored: popcorn or exceeds step threshold 445 * 1 clock was slewed 446 * 2 clock was stepped 447 * 448 * LOCKCLOCK: The only thing this routine does is set the 449 * sys_rootdisp variable equal to the peer dispersion. 450 */ 451int 452local_clock( 453 struct peer *peer, /* synch source peer structure */ 454 double fp_offset /* clock offset (s) */ 455 ) 456{ 457 int rval; /* return code */ 458 int osys_poll; /* old system poll */ 459 int ntp_adj_ret; /* returned by ntp_adjtime */ 460 double mu; /* interval since last update */ 461 double clock_frequency; /* clock frequency */ 462 double dtemp, etemp; /* double temps */ 463 char tbuf[80]; /* report buffer */ 464 465 (void)ntp_adj_ret; /* not always used below... */ 466 /* 467 * If the loop is opened or the NIST LOCKCLOCK is in use, 468 * monitor and record the offsets anyway in order to determine 469 * the open-loop response and then go home. 470 */ 471#ifndef LOCKCLOCK 472 if (!ntp_enable) 473#endif /* not LOCKCLOCK */ 474 { 475 record_loop_stats(fp_offset, drift_comp, clock_jitter, 476 clock_stability, sys_poll); 477 return (0); 478 } 479 480#ifndef LOCKCLOCK 481 /* 482 * If the clock is way off, panic is declared. The clock_panic 483 * defaults to 1000 s; if set to zero, the panic will never 484 * occur. The allow_panic defaults to FALSE, so the first panic 485 * will exit. It can be set TRUE by a command line option, in 486 * which case the clock will be set anyway and time marches on. 487 * But, allow_panic will be set FALSE when the update is less 488 * than the step threshold; so, subsequent panics will exit. 489 */ 490 if (fabs(fp_offset) > clock_panic && clock_panic > 0 && 491 !allow_panic) { 492 snprintf(tbuf, sizeof(tbuf), 493 "%+.0f s; set clock manually within %.0f s.", 494 fp_offset, clock_panic); 495 report_event(EVNT_SYSFAULT, NULL, tbuf); 496 return (-1); 497 } 498 499 allow_panic = FALSE; 500 501 /* 502 * This section simulates ntpdate. If the offset exceeds the 503 * step threshold (128 ms), step the clock to that time and 504 * exit. Otherwise, slew the clock to that time and exit. Note 505 * that the slew will persist and eventually complete beyond the 506 * life of this program. Note that while ntpdate is active, the 507 * terminal does not detach, so the termination message prints 508 * directly to the terminal. 509 */ 510 if (mode_ntpdate) { 511 if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) 512 || (-fp_offset > clock_max_back && clock_max_back > 0)) { 513 step_systime(fp_offset); 514 msyslog(LOG_NOTICE, "ntpd: time set %+.6f s", 515 fp_offset); 516 printf("ntpd: time set %+.6fs\n", fp_offset); 517 } else { 518 adj_systime(fp_offset); 519 msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s", 520 fp_offset); 521 printf("ntpd: time slew %+.6fs\n", fp_offset); 522 } 523 record_loop_stats(fp_offset, drift_comp, clock_jitter, 524 clock_stability, sys_poll); 525 exit (0); 526 } 527 528 /* 529 * The huff-n'-puff filter finds the lowest delay in the recent 530 * interval. This is used to correct the offset by one-half the 531 * difference between the sample delay and minimum delay. This 532 * is most effective if the delays are highly assymetric and 533 * clockhopping is avoided and the clock frequency wander is 534 * relatively small. 535 */ 536 if (sys_huffpuff != NULL) { 537 if (peer->delay < sys_huffpuff[sys_huffptr]) 538 sys_huffpuff[sys_huffptr] = peer->delay; 539 if (peer->delay < sys_mindly) 540 sys_mindly = peer->delay; 541 if (fp_offset > 0) 542 dtemp = -(peer->delay - sys_mindly) / 2; 543 else 544 dtemp = (peer->delay - sys_mindly) / 2; 545 fp_offset += dtemp; 546 DPRINTF(1, ("local_clock: size %d mindly %.6f huffpuff %.6f\n", 547 sys_hufflen, sys_mindly, dtemp)); 548 } 549 550 /* 551 * Clock state machine transition function which defines how the 552 * system reacts to large phase and frequency excursion. There 553 * are two main regimes: when the offset exceeds the step 554 * threshold (128 ms) and when it does not. Under certain 555 * conditions updates are suspended until the stepout theshold 556 * (900 s) is exceeded. See the documentation on how these 557 * thresholds interact with commands and command line options. 558 * 559 * Note the kernel is disabled if step is disabled or greater 560 * than 0.5 s or in ntpdate mode. 561 */ 562 osys_poll = sys_poll; 563 if (sys_poll < peer->minpoll) 564 sys_poll = peer->minpoll; 565 if (sys_poll > peer->maxpoll) 566 sys_poll = peer->maxpoll; 567 mu = current_time - clock_epoch; 568 clock_frequency = drift_comp; 569 rval = 1; 570 if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) 571 || (-fp_offset > clock_max_back && clock_max_back > 0) 572 || force_step_once ) { 573 if (force_step_once) { 574 force_step_once = FALSE; /* we want this only once after startup */ 575 msyslog(LOG_NOTICE, "Doing intital time step" ); 576 } 577 578 switch (state) { 579 580 /* 581 * In SYNC state we ignore the first outlier and switch 582 * to SPIK state. 583 */ 584 case EVNT_SYNC: 585 snprintf(tbuf, sizeof(tbuf), "%+.6f s", 586 fp_offset); 587 report_event(EVNT_SPIK, NULL, tbuf); 588 state = EVNT_SPIK; 589 return (0); 590 591 /* 592 * In FREQ state we ignore outliers and inlyers. At the 593 * first outlier after the stepout threshold, compute 594 * the apparent frequency correction and step the phase. 595 */ 596 case EVNT_FREQ: 597 if (mu < clock_minstep) 598 return (0); 599 600 clock_frequency = direct_freq(fp_offset); 601 602 /* fall through to EVNT_SPIK */ 603 604 /* 605 * In SPIK state we ignore succeeding outliers until 606 * either an inlyer is found or the stepout threshold is 607 * exceeded. 608 */ 609 case EVNT_SPIK: 610 if (mu < clock_minstep) 611 return (0); 612 613 /* fall through to default */ 614 615 /* 616 * We get here by default in NSET and FSET states and 617 * from above in FREQ or SPIK states. 618 * 619 * In NSET state an initial frequency correction is not 620 * available, usually because the frequency file has not 621 * yet been written. Since the time is outside the step 622 * threshold, the clock is stepped. The frequency will 623 * be set directly following the stepout interval. 624 * 625 * In FSET state the initial frequency has been set from 626 * the frequency file. Since the time is outside the 627 * step threshold, the clock is stepped immediately, 628 * rather than after the stepout interval. Guys get 629 * nervous if it takes 15 minutes to set the clock for 630 * the first time. 631 * 632 * In FREQ and SPIK states the stepout threshold has 633 * expired and the phase is still above the step 634 * threshold. Note that a single spike greater than the 635 * step threshold is always suppressed, even with a 636 * long time constant. 637 */ 638 default: 639 snprintf(tbuf, sizeof(tbuf), "%+.6f s", 640 fp_offset); 641 report_event(EVNT_CLOCKRESET, NULL, tbuf); 642 step_systime(fp_offset); 643 reinit_timer(); 644 tc_counter = 0; 645 clock_jitter = LOGTOD(sys_precision); 646 rval = 2; 647 if (state == EVNT_NSET) { 648 rstclock(EVNT_FREQ, 0); 649 return (rval); 650 } 651 break; 652 } 653 rstclock(EVNT_SYNC, 0); 654 } else { 655 /* 656 * The offset is less than the step threshold. Calculate 657 * the jitter as the exponentially weighted offset 658 * differences. 659 */ 660 etemp = SQUARE(clock_jitter); 661 dtemp = SQUARE(max(fabs(fp_offset - last_offset), 662 LOGTOD(sys_precision))); 663 clock_jitter = SQRT(etemp + (dtemp - etemp) / 664 CLOCK_AVG); 665 switch (state) { 666 667 /* 668 * In NSET state this is the first update received and 669 * the frequency has not been initialized. Adjust the 670 * phase, but do not adjust the frequency until after 671 * the stepout threshold. 672 */ 673 case EVNT_NSET: 674 adj_systime(fp_offset); 675 rstclock(EVNT_FREQ, fp_offset); 676 break; 677 678 /* 679 * In FREQ state ignore updates until the stepout 680 * threshold. After that, compute the new frequency, but 681 * do not adjust the frequency until the holdoff counter 682 * decrements to zero. 683 */ 684 case EVNT_FREQ: 685 if (mu < clock_minstep) 686 return (0); 687 688 clock_frequency = direct_freq(fp_offset); 689 /* fall through */ 690 691 /* 692 * We get here by default in FSET, SPIK and SYNC states. 693 * Here compute the frequency update due to PLL and FLL 694 * contributions. Note, we avoid frequency discipline at 695 * startup until the initial transient has subsided. 696 */ 697 default: 698 if (freq_cnt == 0) { 699 700 /* 701 * The FLL and PLL frequency gain constants 702 * depend on the time constant and Allan 703 * intercept. The PLL is always used, but 704 * becomes ineffective above the Allan intercept 705 * where the FLL becomes effective. 706 */ 707 if (sys_poll >= allan_xpt) 708 clock_frequency += 709 (fp_offset - clock_offset) 710 / ( max(ULOGTOD(sys_poll), mu) 711 * CLOCK_FLL); 712 713 /* 714 * The PLL frequency gain (numerator) depends on 715 * the minimum of the update interval and Allan 716 * intercept. This reduces the PLL gain when the 717 * FLL becomes effective. 718 */ 719 etemp = min(ULOGTOD(allan_xpt), mu); 720 dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll); 721 clock_frequency += 722 fp_offset * etemp / (dtemp * dtemp); 723 } 724 rstclock(EVNT_SYNC, fp_offset); 725 if (fabs(fp_offset) < CLOCK_FLOOR) 726 freq_cnt = 0; 727 break; 728 } 729 } 730 731#ifdef KERNEL_PLL 732 /* 733 * This code segment works when clock adjustments are made using 734 * precision time kernel support and the ntp_adjtime() system 735 * call. This support is available in Solaris 2.6 and later, 736 * Digital Unix 4.0 and later, FreeBSD, Linux and specially 737 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the 738 * DECstation 5000/240 and Alpha AXP, additional kernel 739 * modifications provide a true microsecond clock and nanosecond 740 * clock, respectively. 741 * 742 * Important note: The kernel discipline is used only if the 743 * step threshold is less than 0.5 s, as anything higher can 744 * lead to overflow problems. This might occur if some misguided 745 * lad set the step threshold to something ridiculous. 746 */ 747 if (pll_control && kern_enable && freq_cnt == 0) { 748 749 /* 750 * We initialize the structure for the ntp_adjtime() 751 * system call. We have to convert everything to 752 * microseconds or nanoseconds first. Do not update the 753 * system variables if the ext_enable flag is set. In 754 * this case, the external clock driver will update the 755 * variables, which will be read later by the local 756 * clock driver. Afterwards, remember the time and 757 * frequency offsets for jitter and stability values and 758 * to update the frequency file. 759 */ 760 ZERO(ntv); 761 if (ext_enable) { 762 ntv.modes = MOD_STATUS; 763 } else { 764#ifdef STA_NANO 765 ntv.modes = MOD_BITS | MOD_NANO; 766#else /* STA_NANO */ 767 ntv.modes = MOD_BITS; 768#endif /* STA_NANO */ 769 if (clock_offset < 0) 770 dtemp = -.5; 771 else 772 dtemp = .5; 773#ifdef STA_NANO 774 ntv.offset = (int32)(clock_offset * 1e9 + 775 dtemp); 776 ntv.constant = sys_poll; 777#else /* STA_NANO */ 778 ntv.offset = (int32)(clock_offset * 1e6 + 779 dtemp); 780 ntv.constant = sys_poll - 4; 781#endif /* STA_NANO */ 782 if (ntv.constant < 0) 783 ntv.constant = 0; 784 785 ntv.esterror = (u_int32)(clock_jitter * 1e6); 786 ntv.maxerror = (u_int32)((sys_rootdelay / 2 + 787 sys_rootdisp) * 1e6); 788 ntv.status = STA_PLL; 789 790 /* 791 * Enable/disable the PPS if requested. 792 */ 793 if (hardpps_enable) { 794 ntv.status |= (STA_PPSTIME | STA_PPSFREQ); 795 if (!(pll_status & STA_PPSTIME)) 796 sync_status("PPS enabled", 797 pll_status, 798 ntv.status); 799 } else { 800 ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ); 801 if (pll_status & STA_PPSTIME) 802 sync_status("PPS disabled", 803 pll_status, 804 ntv.status); 805 } 806 if (sys_leap == LEAP_ADDSECOND) 807 ntv.status |= STA_INS; 808 else if (sys_leap == LEAP_DELSECOND) 809 ntv.status |= STA_DEL; 810 } 811 812 /* 813 * Pass the stuff to the kernel. If it squeals, turn off 814 * the pps. In any case, fetch the kernel offset, 815 * frequency and jitter. 816 */ 817 ntp_adj_ret = ntp_adjtime(&ntv); 818 /* 819 * A squeal is a return status < 0, or a state change. 820 */ 821 if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) { 822 kernel_status = ntp_adj_ret; 823 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1); 824 } 825 pll_status = ntv.status; 826#ifdef STA_NANO 827 clock_offset = ntv.offset / 1e9; 828#else /* STA_NANO */ 829 clock_offset = ntv.offset / 1e6; 830#endif /* STA_NANO */ 831 clock_frequency = FREQTOD(ntv.freq); 832 833 /* 834 * If the kernel PPS is lit, monitor its performance. 835 */ 836 if (ntv.status & STA_PPSTIME) { 837#ifdef STA_NANO 838 clock_jitter = ntv.jitter / 1e9; 839#else /* STA_NANO */ 840 clock_jitter = ntv.jitter / 1e6; 841#endif /* STA_NANO */ 842 } 843 844#if defined(STA_NANO) && NTP_API == 4 845 /* 846 * If the TAI changes, update the kernel TAI. 847 */ 848 if (loop_tai != sys_tai) { 849 loop_tai = sys_tai; 850 ntv.modes = MOD_TAI; 851 ntv.constant = sys_tai; 852 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { 853 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1); 854 } 855 } 856#endif /* STA_NANO */ 857 } 858#endif /* KERNEL_PLL */ 859 860 /* 861 * Clamp the frequency within the tolerance range and calculate 862 * the frequency difference since the last update. 863 */ 864 if (fabs(clock_frequency) > NTP_MAXFREQ) 865 msyslog(LOG_NOTICE, 866 "frequency error %.0f PPM exceeds tolerance %.0f PPM", 867 clock_frequency * 1e6, NTP_MAXFREQ * 1e6); 868 dtemp = SQUARE(clock_frequency - drift_comp); 869 if (clock_frequency > NTP_MAXFREQ) 870 drift_comp = NTP_MAXFREQ; 871 else if (clock_frequency < -NTP_MAXFREQ) 872 drift_comp = -NTP_MAXFREQ; 873 else 874 drift_comp = clock_frequency; 875 876 /* 877 * Calculate the wander as the exponentially weighted RMS 878 * frequency differences. Record the change for the frequency 879 * file update. 880 */ 881 etemp = SQUARE(clock_stability); 882 clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); 883 884 /* 885 * Here we adjust the time constant by comparing the current 886 * offset with the clock jitter. If the offset is less than the 887 * clock jitter times a constant, then the averaging interval is 888 * increased, otherwise it is decreased. A bit of hysteresis 889 * helps calm the dance. Works best using burst mode. Don't 890 * fiddle with the poll during the startup clamp period. 891 */ 892 if (freq_cnt > 0) { 893 tc_counter = 0; 894 } else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { 895 tc_counter += sys_poll; 896 if (tc_counter > CLOCK_LIMIT) { 897 tc_counter = CLOCK_LIMIT; 898 if (sys_poll < peer->maxpoll) { 899 tc_counter = 0; 900 sys_poll++; 901 } 902 } 903 } else { 904 tc_counter -= sys_poll << 1; 905 if (tc_counter < -CLOCK_LIMIT) { 906 tc_counter = -CLOCK_LIMIT; 907 if (sys_poll > peer->minpoll) { 908 tc_counter = 0; 909 sys_poll--; 910 } 911 } 912 } 913 914 /* 915 * If the time constant has changed, update the poll variables. 916 */ 917 if (osys_poll != sys_poll) 918 poll_update(peer, sys_poll); 919 920 /* 921 * Yibbidy, yibbbidy, yibbidy; that'h all folks. 922 */ 923 record_loop_stats(clock_offset, drift_comp, clock_jitter, 924 clock_stability, sys_poll); 925 DPRINTF(1, ("local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n", 926 clock_offset, clock_jitter, drift_comp * 1e6, 927 clock_stability * 1e6, sys_poll)); 928 return (rval); 929#endif /* not LOCKCLOCK */ 930} 931 932 933/* 934 * adj_host_clock - Called once every second to update the local clock. 935 * 936 * LOCKCLOCK: The only thing this routine does is increment the 937 * sys_rootdisp variable. 938 */ 939void 940adj_host_clock( 941 void 942 ) 943{ 944 double offset_adj; 945 double freq_adj; 946 947 /* 948 * Update the dispersion since the last update. In contrast to 949 * NTPv3, NTPv4 does not declare unsynchronized after one day, 950 * since the dispersion check serves this function. Also, 951 * since the poll interval can exceed one day, the old test 952 * would be counterproductive. During the startup clamp period, the 953 * time constant is clamped at 2. 954 */ 955 sys_rootdisp += clock_phi; 956#ifndef LOCKCLOCK 957 if (!ntp_enable || mode_ntpdate) 958 return; 959 /* 960 * Determine the phase adjustment. The gain factor (denominator) 961 * increases with poll interval, so is dominated by the FLL 962 * above the Allan intercept. Note the reduced time constant at 963 * startup. 964 */ 965 if (state != EVNT_SYNC) { 966 offset_adj = 0.; 967 } else if (freq_cnt > 0) { 968 offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1)); 969 freq_cnt--; 970#ifdef KERNEL_PLL 971 } else if (pll_control && kern_enable) { 972 offset_adj = 0.; 973#endif /* KERNEL_PLL */ 974 } else { 975 offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll)); 976 } 977 978 /* 979 * If the kernel discipline is enabled the frequency correction 980 * drift_comp has already been engaged via ntp_adjtime() in 981 * set_freq(). Otherwise it is a component of the adj_systime() 982 * offset. 983 */ 984#ifdef KERNEL_PLL 985 if (pll_control && kern_enable) 986 freq_adj = 0.; 987 else 988#endif /* KERNEL_PLL */ 989 freq_adj = drift_comp; 990 991 /* Bound absolute value of total adjustment to NTP_MAXFREQ. */ 992 if (offset_adj + freq_adj > NTP_MAXFREQ) 993 offset_adj = NTP_MAXFREQ - freq_adj; 994 else if (offset_adj + freq_adj < -NTP_MAXFREQ) 995 offset_adj = -NTP_MAXFREQ - freq_adj; 996 997 clock_offset -= offset_adj; 998 /* 999 * Windows port adj_systime() must be called each second, 1000 * even if the argument is zero, to ease emulation of 1001 * adjtime() using Windows' slew API which controls the rate 1002 * but does not automatically stop slewing when an offset 1003 * has decayed to zero. 1004 */ 1005 DEBUG_INSIST(enable_panic_check == TRUE); 1006 enable_panic_check = FALSE; 1007 adj_systime(offset_adj + freq_adj); 1008 enable_panic_check = TRUE; 1009#endif /* LOCKCLOCK */ 1010} 1011 1012 1013/* 1014 * Clock state machine. Enter new state and set state variables. 1015 */ 1016static void 1017rstclock( 1018 int trans, /* new state */ 1019 double offset /* new offset */ 1020 ) 1021{ 1022 DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n", 1023 current_time - clock_epoch, trans, sys_poll, 1024 tc_counter)); 1025 if (trans != state && trans != EVNT_FSET) 1026 report_event(trans, NULL, NULL); 1027 state = trans; 1028 last_offset = clock_offset = offset; 1029 clock_epoch = current_time; 1030} 1031 1032 1033/* 1034 * calc_freq - calculate frequency directly 1035 * 1036 * This is very carefully done. When the offset is first computed at the 1037 * first update, a residual frequency component results. Subsequently, 1038 * updates are suppresed until the end of the measurement interval while 1039 * the offset is amortized. At the end of the interval the frequency is 1040 * calculated from the current offset, residual offset, length of the 1041 * interval and residual frequency component. At the same time the 1042 * frequenchy file is armed for update at the next hourly stats. 1043 */ 1044static double 1045direct_freq( 1046 double fp_offset 1047 ) 1048{ 1049 set_freq(fp_offset / (current_time - clock_epoch)); 1050 1051 return drift_comp; 1052} 1053 1054 1055/* 1056 * set_freq - set clock frequency correction 1057 * 1058 * Used to step the frequency correction at startup, possibly again once 1059 * the frequency is measured (that is, transitioning from EVNT_NSET to 1060 * EVNT_FSET), and finally to switch between daemon and kernel loop 1061 * discipline at runtime. 1062 * 1063 * When the kernel loop discipline is available but the daemon loop is 1064 * in use, the kernel frequency correction is disabled (set to 0) to 1065 * ensure drift_comp is applied by only one of the loops. 1066 */ 1067static void 1068set_freq( 1069 double freq /* frequency update */ 1070 ) 1071{ 1072 const char * loop_desc; 1073 int ntp_adj_ret; 1074 1075 (void)ntp_adj_ret; /* not always used below... */ 1076 drift_comp = freq; 1077 loop_desc = "ntpd"; 1078#ifdef KERNEL_PLL 1079 if (pll_control) { 1080 ZERO(ntv); 1081 ntv.modes = MOD_FREQUENCY; 1082 if (kern_enable) { 1083 loop_desc = "kernel"; 1084 ntv.freq = DTOFREQ(drift_comp); 1085 } 1086 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { 1087 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1); 1088 } 1089 } 1090#endif /* KERNEL_PLL */ 1091 mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc, 1092 drift_comp * 1e6); 1093} 1094 1095 1096#ifdef KERNEL_PLL 1097static void 1098start_kern_loop(void) 1099{ 1100 static int atexit_done; 1101 int ntp_adj_ret; 1102 1103 pll_control = TRUE; 1104 ZERO(ntv); 1105 ntv.modes = MOD_BITS; 1106 ntv.status = STA_PLL | STA_UNSYNC; 1107 ntv.maxerror = MAXDISPERSE * 1.0e6; 1108 ntv.esterror = MAXDISPERSE * 1.0e6; 1109 ntv.constant = sys_poll; 1110 /* ^^^^^^^^ why is it that here constant is 1111 * unconditionally set to sys_poll, whereas elsewhere is is 1112 * modified depending on nanosecond vs. microsecond kernel? 1113 */ 1114#ifdef SIGSYS 1115 /* 1116 * Use sigsetjmp() to save state and then call ntp_adjtime(); if 1117 * it fails, then pll_trap() will set pll_control FALSE before 1118 * returning control using siglogjmp(). 1119 */ 1120 newsigsys.sa_handler = pll_trap; 1121 newsigsys.sa_flags = 0; 1122 if (sigaction(SIGSYS, &newsigsys, &sigsys)) { 1123 msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m"); 1124 pll_control = FALSE; 1125 } else { 1126 if (sigsetjmp(env, 1) == 0) { 1127 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { 1128 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1); 1129 } 1130 } 1131 if (sigaction(SIGSYS, &sigsys, NULL)) { 1132 msyslog(LOG_ERR, 1133 "sigaction() restore SIGSYS: %m"); 1134 pll_control = FALSE; 1135 } 1136 } 1137#else /* SIGSYS */ 1138 if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { 1139 ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1); 1140 } 1141#endif /* SIGSYS */ 1142 1143 /* 1144 * Save the result status and light up an external clock 1145 * if available. 1146 */ 1147 pll_status = ntv.status; 1148 if (pll_control) { 1149 if (!atexit_done) { 1150 atexit_done = TRUE; 1151 atexit(&stop_kern_loop); 1152 } 1153#ifdef STA_NANO 1154 if (pll_status & STA_CLK) 1155 ext_enable = TRUE; 1156#endif /* STA_NANO */ 1157 report_event(EVNT_KERN, NULL, 1158 "kernel time sync enabled"); 1159 } 1160} 1161#endif /* KERNEL_PLL */ 1162 1163 1164#ifdef KERNEL_PLL 1165static void 1166stop_kern_loop(void) 1167{ 1168 if (pll_control && kern_enable) 1169 report_event(EVNT_KERN, NULL, 1170 "kernel time sync disabled"); 1171} 1172#endif /* KERNEL_PLL */ 1173 1174 1175/* 1176 * select_loop() - choose kernel or daemon loop discipline. 1177 */ 1178void 1179select_loop( 1180 int use_kern_loop 1181 ) 1182{ 1183 if (kern_enable == use_kern_loop) 1184 return; 1185#ifdef KERNEL_PLL 1186 if (pll_control && !use_kern_loop) 1187 stop_kern_loop(); 1188#endif 1189 kern_enable = use_kern_loop; 1190#ifdef KERNEL_PLL 1191 if (pll_control && use_kern_loop) 1192 start_kern_loop(); 1193#endif 1194 /* 1195 * If this loop selection change occurs after initial startup, 1196 * call set_freq() to switch the frequency compensation to or 1197 * from the kernel loop. 1198 */ 1199#ifdef KERNEL_PLL 1200 if (pll_control && loop_started) 1201 set_freq(drift_comp); 1202#endif 1203} 1204 1205 1206/* 1207 * huff-n'-puff filter 1208 */ 1209void 1210huffpuff(void) 1211{ 1212 int i; 1213 1214 if (sys_huffpuff == NULL) 1215 return; 1216 1217 sys_huffptr = (sys_huffptr + 1) % sys_hufflen; 1218 sys_huffpuff[sys_huffptr] = 1e9; 1219 sys_mindly = 1e9; 1220 for (i = 0; i < sys_hufflen; i++) { 1221 if (sys_huffpuff[i] < sys_mindly) 1222 sys_mindly = sys_huffpuff[i]; 1223 } 1224} 1225 1226 1227/* 1228 * loop_config - configure the loop filter 1229 * 1230 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops. 1231 */ 1232void 1233loop_config( 1234 int item, 1235 double freq 1236 ) 1237{ 1238 int i; 1239 double ftemp; 1240 1241 DPRINTF(2, ("loop_config: item %d freq %f\n", item, freq)); 1242 switch (item) { 1243 1244 /* 1245 * We first assume the kernel supports the ntp_adjtime() 1246 * syscall. If that syscall works, initialize the kernel time 1247 * variables. Otherwise, continue leaving no harm behind. 1248 */ 1249 case LOOP_DRIFTINIT: 1250#ifndef LOCKCLOCK 1251#ifdef KERNEL_PLL 1252 if (mode_ntpdate) 1253 break; 1254 1255 start_kern_loop(); 1256#endif /* KERNEL_PLL */ 1257 1258 /* 1259 * Initialize frequency if given; otherwise, begin frequency 1260 * calibration phase. 1261 */ 1262 ftemp = init_drift_comp / 1e6; 1263 if (ftemp > NTP_MAXFREQ) 1264 ftemp = NTP_MAXFREQ; 1265 else if (ftemp < -NTP_MAXFREQ) 1266 ftemp = -NTP_MAXFREQ; 1267 set_freq(ftemp); 1268 if (freq_set) 1269 rstclock(EVNT_FSET, 0); 1270 else 1271 rstclock(EVNT_NSET, 0); 1272 loop_started = TRUE; 1273#endif /* LOCKCLOCK */ 1274 break; 1275 1276 case LOOP_KERN_CLEAR: 1277#if 0 /* XXX: needs more review, and how can we get here? */ 1278#ifndef LOCKCLOCK 1279# ifdef KERNEL_PLL 1280 if (pll_control && kern_enable) { 1281 memset((char *)&ntv, 0, sizeof(ntv)); 1282 ntv.modes = MOD_STATUS; 1283 ntv.status = STA_UNSYNC; 1284 ntp_adjtime(&ntv); 1285 sync_status("kernel time sync disabled", 1286 pll_status, 1287 ntv.status); 1288 } 1289# endif /* KERNEL_PLL */ 1290#endif /* LOCKCLOCK */ 1291#endif 1292 break; 1293 1294 /* 1295 * Tinker command variables for Ulrich Windl. Very dangerous. 1296 */ 1297 case LOOP_ALLAN: /* Allan intercept (log2) (allan) */ 1298 allan_xpt = (u_char)freq; 1299 break; 1300 1301 case LOOP_CODEC: /* audio codec frequency (codec) */ 1302 clock_codec = freq / 1e6; 1303 break; 1304 1305 case LOOP_PHI: /* dispersion threshold (dispersion) */ 1306 clock_phi = freq / 1e6; 1307 break; 1308 1309 case LOOP_FREQ: /* initial frequency (freq) */ 1310 init_drift_comp = freq; 1311 freq_set++; 1312 break; 1313 1314 case LOOP_HUFFPUFF: /* huff-n'-puff length (huffpuff) */ 1315 if (freq < HUFFPUFF) 1316 freq = HUFFPUFF; 1317 sys_hufflen = (int)(freq / HUFFPUFF); 1318 sys_huffpuff = eallocarray(sys_hufflen, sizeof(sys_huffpuff[0])); 1319 for (i = 0; i < sys_hufflen; i++) 1320 sys_huffpuff[i] = 1e9; 1321 sys_mindly = 1e9; 1322 break; 1323 1324 case LOOP_PANIC: /* panic threshold (panic) */ 1325 clock_panic = freq; 1326 break; 1327 1328 case LOOP_MAX: /* step threshold (step) */ 1329 clock_max_fwd = clock_max_back = freq; 1330 if (freq == 0 || freq > 0.5) 1331 select_loop(FALSE); 1332 break; 1333 1334 case LOOP_MAX_BACK: /* step threshold (step) */ 1335 clock_max_back = freq; 1336 /* 1337 * Leave using the kernel discipline code unless both 1338 * limits are massive. This assumes the reason to stop 1339 * using it is that it's pointless, not that it goes wrong. 1340 */ 1341 if ( (clock_max_back == 0 || clock_max_back > 0.5) 1342 || (clock_max_fwd == 0 || clock_max_fwd > 0.5)) 1343 select_loop(FALSE); 1344 break; 1345 1346 case LOOP_MAX_FWD: /* step threshold (step) */ 1347 clock_max_fwd = freq; 1348 if ( (clock_max_back == 0 || clock_max_back > 0.5) 1349 || (clock_max_fwd == 0 || clock_max_fwd > 0.5)) 1350 select_loop(FALSE); 1351 break; 1352 1353 case LOOP_MINSTEP: /* stepout threshold (stepout) */ 1354 if (freq < CLOCK_MINSTEP) 1355 clock_minstep = CLOCK_MINSTEP; 1356 else 1357 clock_minstep = freq; 1358 break; 1359 1360 case LOOP_TICK: /* tick increment (tick) */ 1361 set_sys_tick_precision(freq); 1362 break; 1363 1364 case LOOP_LEAP: /* not used, fall through */ 1365 default: 1366 msyslog(LOG_NOTICE, 1367 "loop_config: unsupported option %d", item); 1368 } 1369} 1370 1371 1372#if defined(KERNEL_PLL) && defined(SIGSYS) 1373/* 1374 * _trap - trap processor for undefined syscalls 1375 * 1376 * This nugget is called by the kernel when the SYS_ntp_adjtime() 1377 * syscall bombs because the silly thing has not been implemented in 1378 * the kernel. In this case the phase-lock loop is emulated by 1379 * the stock adjtime() syscall and a lot of indelicate abuse. 1380 */ 1381static RETSIGTYPE 1382pll_trap( 1383 int arg 1384 ) 1385{ 1386 pll_control = FALSE; 1387 siglongjmp(env, 1); 1388} 1389#endif /* KERNEL_PLL && SIGSYS */ 1390