kern_ntptime.c revision 44776
1/*********************************************************************** 2 * * 3 * Copyright (c) David L. Mills 1993-1998 * 4 * * 5 * Permission to use, copy, modify, and distribute this software and * 6 * its documentation for any purpose and without fee is hereby * 7 * granted, provided that the above copyright notice appears in all * 8 * copies and that both the copyright notice and this permission * 9 * notice appear in supporting documentation, and that the name * 10 * University of Delaware not be used in advertising or publicity * 11 * pertaining to distribution of the software without specific, * 12 * written prior permission. The University of Delaware makes no * 13 * representations about the suitability this software for any * 14 * purpose. It is provided "as is" without express or implied * 15 * warranty. * 16 * * 17 **********************************************************************/ 18 19/* 20 * Adapted from the original sources for FreeBSD and timecounters by: 21 * Poul-Henning Kamp <phk@FreeBSD.org>. 22 * 23 * The 32bit version of the "LP" macros seems a bit past its "sell by" 24 * date so I have retained only the 64bit version and included it directly 25 * in this file. 26 * 27 * Only minor changes done to interface with the timecounters over in 28 * sys/kern/kern_clock.c. Some of the comments below may be (even more) 29 * confusing and/or plain wrong in that context. 30 */ 31 32#include "opt_ntp.h" 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/sysproto.h> 37#include <sys/kernel.h> 38#include <sys/proc.h> 39#include <sys/time.h> 40#include <sys/timex.h> 41#include <sys/timepps.h> 42#include <sys/sysctl.h> 43 44/* 45 * Single-precision macros for 64-bit machines 46 */ 47typedef long long l_fp; 48#define L_ADD(v, u) ((v) += (u)) 49#define L_SUB(v, u) ((v) -= (u)) 50#define L_ADDHI(v, a) ((v) += (long long)(a) << 32) 51#define L_NEG(v) ((v) = -(v)) 52#define L_RSHIFT(v, n) \ 53 do { \ 54 if ((v) < 0) \ 55 (v) = -(-(v) >> (n)); \ 56 else \ 57 (v) = (v) >> (n); \ 58 } while (0) 59#define L_MPY(v, a) ((v) *= (a)) 60#define L_CLR(v) ((v) = 0) 61#define L_ISNEG(v) ((v) < 0) 62#define L_LINT(v, a) ((v) = (long long)(a) << 32) 63#define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) 64 65/* 66 * Generic NTP kernel interface 67 * 68 * These routines constitute the Network Time Protocol (NTP) interfaces 69 * for user and daemon application programs. The ntp_gettime() routine 70 * provides the time, maximum error (synch distance) and estimated error 71 * (dispersion) to client user application programs. The ntp_adjtime() 72 * routine is used by the NTP daemon to adjust the system clock to an 73 * externally derived time. The time offset and related variables set by 74 * this routine are used by other routines in this module to adjust the 75 * phase and frequency of the clock discipline loop which controls the 76 * system clock. 77 * 78 * When the kernel time is reckoned directly in nanoseconds (NANO 79 * defined), the time at each tick interrupt is derived directly from 80 * the kernel time variable. When the kernel time is reckoned in 81 * microseconds, (NANO undefined), the time is derived from the kernel 82 * time variable together with a variable representing the leftover 83 * nanoseconds at the last tick interrupt. In either case, the current 84 * nanosecond time is reckoned from these values plus an interpolated 85 * value derived by the clock routines in another architecture-specific 86 * module. The interpolation can use either a dedicated counter or a 87 * processor cycle counter (PCC) implemented in some architectures. 88 * 89 * Note that all routines must run at priority splclock or higher. 90 */ 91 92/* 93 * Phase/frequency-lock loop (PLL/FLL) definitions 94 * 95 * The nanosecond clock discipline uses two variable types, time 96 * variables and frequency variables. Both types are represented as 64- 97 * bit fixed-point quantities with the decimal point between two 32-bit 98 * halves. On a 32-bit machine, each half is represented as a single 99 * word and mathematical operations are done using multiple-precision 100 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is 101 * used. 102 * 103 * A time variable is a signed 64-bit fixed-point number in ns and 104 * fraction. It represents the remaining time offset to be amortized 105 * over succeeding tick interrupts. The maximum time offset is about 106 * 0.512 s and the resolution is about 2.3e-10 ns. 107 * 108 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 109 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 110 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 111 * |s s s| ns | 112 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 113 * | fraction | 114 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 115 * 116 * A frequency variable is a signed 64-bit fixed-point number in ns/s 117 * and fraction. It represents the ns and fraction to be added to the 118 * kernel time variable at each second. The maximum frequency offset is 119 * about +-512000 ns/s and the resolution is about 2.3e-10 ns/s. 120 * 121 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 122 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 123 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 124 * |s s s s s s s s s s s s s| ns/s | 125 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 126 * | fraction | 127 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 128 */ 129/* 130 * The following variables establish the state of the PLL/FLL and the 131 * residual time and frequency offset of the local clock. 132 */ 133#define SHIFT_PLL 4 /* PLL loop gain (shift) */ 134#define SHIFT_FLL 2 /* FLL loop gain (shift) */ 135 136static int time_state = TIME_OK; /* clock state */ 137static int time_status = STA_UNSYNC; /* clock status bits */ 138static long time_constant; /* poll interval (shift) (s) */ 139static long time_precision = 1; /* clock precision (ns) */ 140static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */ 141static long time_esterror = MAXPHASE / 1000; /* estimated error (us) */ 142static long time_reftime; /* time at last adjustment (s) */ 143static long time_tick; /* nanoseconds per tick (ns) */ 144static l_fp time_offset; /* time offset (ns) */ 145static l_fp time_freq; /* frequency offset (ns/s) */ 146 147#ifdef PPS_SYNC 148/* 149 * The following variables are used when a pulse-per-second (PPS) signal 150 * is available and connected via a modem control lead. They establish 151 * the engineering parameters of the clock discipline loop when 152 * controlled by the PPS signal. 153 */ 154#define PPS_FAVG 2 /* min freq avg interval (s) (shift) */ 155#define PPS_FAVGMAX 8 /* max freq avg interval (s) (shift) */ 156#define PPS_PAVG 4 /* phase avg interval (s) (shift) */ 157#define PPS_VALID 120 /* PPS signal watchdog max (s) */ 158#define MAXTIME 500000 /* max PPS error (jitter) (ns) */ 159#define MAXWANDER 500000 /* max PPS wander (ns/s/s) */ 160 161struct ppstime { 162 long sec; /* PPS seconds */ 163 long nsec; /* PPS nanoseconds */ 164 long count; /* PPS nanosecond counter */ 165}; 166static struct ppstime pps_tf[3]; /* phase median filter */ 167static struct ppstime pps_filt; /* phase offset */ 168static l_fp pps_freq; /* scaled frequency offset (ns/s) */ 169static long pps_offacc; /* offset accumulator */ 170static long pps_jitter; /* scaled time dispersion (ns) */ 171static long pps_stabil; /* scaled frequency dispersion (ns/s) */ 172static long pps_lastcount; /* last counter offset */ 173static long pps_lastsec; /* time at last calibration (s) */ 174static int pps_valid; /* signal watchdog counter */ 175static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */ 176static int pps_intcnt; /* wander counter */ 177static int pps_offcnt; /* offset accumulator counter */ 178 179/* 180 * PPS signal quality monitors 181 */ 182static long pps_calcnt; /* calibration intervals */ 183static long pps_jitcnt; /* jitter limit exceeded */ 184static long pps_stbcnt; /* stability limit exceeded */ 185static long pps_errcnt; /* calibration errors */ 186#endif /* PPS_SYNC */ 187/* 188 * End of phase/frequency-lock loop (PLL/FLL) definitions 189 */ 190 191static void ntp_init(void); 192static void hardupdate(long offset); 193 194/* 195 * ntp_gettime() - NTP user application interface 196 * 197 * See the timex.h header file for synopsis and API description. 198 */ 199static int 200ntp_sysctl SYSCTL_HANDLER_ARGS 201{ 202 struct ntptimeval ntv; /* temporary structure */ 203 struct timespec atv; /* nanosecond time */ 204 205 nanotime(&atv); 206 ntv.time.tv_sec = atv.tv_sec; 207 ntv.time.tv_nsec = atv.tv_nsec; 208 ntv.maxerror = time_maxerror; 209 ntv.esterror = time_esterror; 210 ntv.time_state = time_state; 211 212 /* 213 * Status word error decode. If any of these conditions occur, 214 * an error is returned, instead of the status word. Most 215 * applications will care only about the fact the system clock 216 * may not be trusted, not about the details. 217 * 218 * Hardware or software error 219 */ 220 if ((time_status & (STA_UNSYNC | STA_CLOCKERR)) || 221 222 /* 223 * PPS signal lost when either time or frequency synchronization 224 * requested 225 */ 226 (time_status & (STA_PPSFREQ | STA_PPSTIME) && 227 !(time_status & STA_PPSSIGNAL)) || 228 229 /* 230 * PPS jitter exceeded when time synchronization requested 231 */ 232 (time_status & STA_PPSTIME && 233 time_status & STA_PPSJITTER) || 234 235 /* 236 * PPS wander exceeded or calibration error when frequency 237 * synchronization requested 238 */ 239 (time_status & STA_PPSFREQ && 240 time_status & (STA_PPSWANDER | STA_PPSERROR))) 241 ntv.time_state = TIME_ERROR; 242 return (sysctl_handle_opaque(oidp, &ntv, sizeof ntv, req)); 243} 244 245SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, ""); 246SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE|CTLFLAG_RD, 247 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval", ""); 248 249 250/* 251 * ntp_adjtime() - NTP daemon application interface 252 * 253 * See the timex.h header file for synopsis and API description. 254 */ 255#ifndef _SYS_SYSPROTO_H_ 256struct ntp_adjtime_args { 257 struct timex *tp; 258}; 259#endif 260 261int 262ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap) 263{ 264 struct timex ntv; /* temporary structure */ 265 int modes; /* mode bits from structure */ 266 int s; /* caller priority */ 267 int error; 268 269 error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv)); 270 if (error) 271 return(error); 272 273 /* 274 * Update selected clock variables - only the superuser can 275 * change anything. Note that there is no error checking here on 276 * the assumption the superuser should know what it is doing. 277 */ 278 modes = ntv.modes; 279 if (modes) 280 error = suser(p->p_cred->pc_ucred, &p->p_acflag); 281 if (error) 282 return (error); 283 s = splclock(); 284 if (modes & MOD_FREQUENCY) { 285 L_LINT(time_freq, ntv.freq / SCALE_PPM); 286#ifdef PPS_SYNC 287 pps_freq = time_freq; 288#endif /* PPS_SYNC */ 289 } 290 if (modes & MOD_MAXERROR) 291 time_maxerror = ntv.maxerror; 292 if (modes & MOD_ESTERROR) 293 time_esterror = ntv.esterror; 294 if (modes & MOD_STATUS) { 295 time_status &= STA_RONLY; 296 time_status |= ntv.status & ~STA_RONLY; 297 } 298 if (modes & MOD_TIMECONST) 299 time_constant = ntv.constant; 300 if (modes & MOD_NANO) 301 time_status |= STA_NANO; 302 if (modes & MOD_MICRO) 303 time_status &= ~STA_NANO; 304 if (modes & MOD_CLKB) 305 time_status |= STA_CLK; 306 if (modes & MOD_CLKA) 307 time_status &= ~STA_CLK; 308 if (modes & MOD_OFFSET) { 309 if (time_status & STA_NANO) 310 hardupdate(ntv.offset); 311 else 312 hardupdate(ntv.offset * 1000); 313 } 314 315 /* 316 * Retrieve all clock variables 317 */ 318 if (time_status & STA_NANO) 319 ntv.offset = L_GINT(time_offset); 320 else 321 ntv.offset = L_GINT(time_offset) / 1000; 322 ntv.freq = L_GINT(time_freq) * SCALE_PPM; 323 ntv.maxerror = time_maxerror; 324 ntv.esterror = time_esterror; 325 ntv.status = time_status; 326 if (ntv.constant < 0) 327 time_constant = 0; 328 else if (ntv.constant > MAXTC) 329 time_constant = MAXTC; 330 else 331 time_constant = ntv.constant; 332 if (time_status & STA_NANO) 333 ntv.precision = time_precision; 334 else 335 ntv.precision = time_precision / 1000; 336 ntv.tolerance = MAXFREQ * SCALE_PPM; 337#ifdef PPS_SYNC 338 ntv.shift = pps_shift; 339 ntv.ppsfreq = L_GINT(pps_freq) * SCALE_PPM; 340 ntv.jitter = pps_jitter; 341 if (time_status & STA_NANO) 342 ntv.jitter = pps_jitter; 343 else 344 ntv.jitter = pps_jitter / 1000; 345 ntv.stabil = pps_stabil; 346 ntv.calcnt = pps_calcnt; 347 ntv.errcnt = pps_errcnt; 348 ntv.jitcnt = pps_jitcnt; 349 ntv.stbcnt = pps_stbcnt; 350#endif /* PPS_SYNC */ 351 splx(s); 352 353 error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv)); 354 if (error) 355 return (error); 356 357 /* 358 * Status word error decode. See comments in 359 * ntp_gettime() routine. 360 */ 361 if ((time_status & (STA_UNSYNC | STA_CLOCKERR)) || 362 (time_status & (STA_PPSFREQ | STA_PPSTIME) && 363 !(time_status & STA_PPSSIGNAL)) || 364 (time_status & STA_PPSTIME && 365 time_status & STA_PPSJITTER) || 366 (time_status & STA_PPSFREQ && 367 time_status & (STA_PPSWANDER | STA_PPSERROR))) 368 return (TIME_ERROR); 369 return (time_state); 370} 371 372/* 373 * second_overflow() - called after ntp_tick_adjust() 374 * 375 * This routine is ordinarily called immediately following the above 376 * routine ntp_tick_adjust(). While these two routines are normally 377 * combined, they are separated here only for the purposes of 378 * simulation. 379 */ 380void 381ntp_update_second(struct timecounter *tcp) 382{ 383 u_int32_t *newsec; 384 l_fp ftemp, time_adj; /* 32/64-bit temporaries */ 385 386 newsec = &tcp->tc_offset_sec; 387 time_maxerror += MAXFREQ / 1000; 388 389 /* 390 * Leap second processing. If in leap-insert state at 391 * the end of the day, the system clock is set back one 392 * second; if in leap-delete state, the system clock is 393 * set ahead one second. The nano_time() routine or 394 * external clock driver will insure that reported time 395 * is always monotonic. 396 */ 397 switch (time_state) { 398 399 /* 400 * No warning. 401 */ 402 case TIME_OK: 403 if (time_status & STA_INS) 404 time_state = TIME_INS; 405 else if (time_status & STA_DEL) 406 time_state = TIME_DEL; 407 break; 408 409 /* 410 * Insert second 23:59:60 following second 411 * 23:59:59. 412 */ 413 case TIME_INS: 414 if (!(time_status & STA_INS)) 415 time_state = TIME_OK; 416 else if ((*newsec) % 86400 == 0) { 417 (*newsec)--; 418 time_state = TIME_OOP; 419 } 420 break; 421 422 /* 423 * Delete second 23:59:59. 424 */ 425 case TIME_DEL: 426 if (!(time_status & STA_DEL)) 427 time_state = TIME_OK; 428 else if (((*newsec) + 1) % 86400 == 0) { 429 (*newsec)++; 430 time_state = TIME_WAIT; 431 } 432 break; 433 434 /* 435 * Insert second in progress. 436 */ 437 case TIME_OOP: 438 time_state = TIME_WAIT; 439 break; 440 441 /* 442 * Wait for status bits to clear. 443 */ 444 case TIME_WAIT: 445 if (!(time_status & (STA_INS | STA_DEL))) 446 time_state = TIME_OK; 447 } 448 449 /* 450 * Compute the total time adjustment for the next 451 * second in ns. The offset is reduced by a factor 452 * depending on FLL or PLL mode and whether the PPS 453 * signal is operating. Note that the value is in effect 454 * scaled by the clock frequency, since the adjustment 455 * is added at each tick interrupt. 456 */ 457 ftemp = time_offset; 458#ifdef PPS_SYNC 459 if (time_status & STA_PPSTIME && time_status & 460 STA_PPSSIGNAL) 461 L_RSHIFT(ftemp, PPS_FAVG); 462 else if (time_status & STA_MODE) 463#else 464 if (time_status & STA_MODE) 465#endif /* PPS_SYNC */ 466 L_RSHIFT(ftemp, SHIFT_FLL); 467 else 468 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 469 time_adj = ftemp; 470 L_SUB(time_offset, ftemp); 471 L_ADD(time_adj, time_freq); 472 tcp->tc_adjustment = time_adj; 473#ifdef PPS_SYNC 474 if (pps_valid > 0) 475 pps_valid--; 476 else 477 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 478 STA_PPSWANDER | STA_PPSERROR); 479#endif /* PPS_SYNC */ 480} 481 482/* 483 * ntp_init() - initialize variables and structures 484 * 485 * This routine must be called after the kernel variables hz and tick 486 * are set or changed and before the next tick interrupt. In this 487 * particular implementation, these values are assumed set elsewhere in 488 * the kernel. The design allows the clock frequency and tick interval 489 * to be changed while the system is running. So, this routine should 490 * probably be integrated with the code that does that. 491 */ 492static void 493ntp_init() 494{ 495 496 /* 497 * The following variable must be initialized any time the 498 * kernel variable hz is changed. 499 */ 500 time_tick = NANOSECOND / hz; 501 502 /* 503 * The following variables are initialized only at startup. Only 504 * those structures not cleared by the compiler need to be 505 * initialized, and these only in the simulator. In the actual 506 * kernel, any nonzero values here will quickly evaporate. 507 */ 508 L_CLR(time_offset); 509 L_CLR(time_freq); 510#ifdef PPS_SYNC 511 pps_filt.sec = pps_filt.nsec = pps_filt.count = 0; 512 pps_tf[0] = pps_tf[1] = pps_tf[2] = pps_filt; 513 L_CLR(pps_freq); 514#endif /* PPS_SYNC */ 515} 516 517SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, ntp_init, NULL) 518 519/* 520 * hardupdate() - local clock update 521 * 522 * This routine is called by ntp_adjtime() to update the local clock 523 * phase and frequency. The implementation is of an adaptive-parameter, 524 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 525 * time and frequency offset estimates for each call. If the kernel PPS 526 * discipline code is configured (PPS_SYNC), the PPS signal itself 527 * determines the new time offset, instead of the calling argument. 528 * Presumably, calls to ntp_adjtime() occur only when the caller 529 * believes the local clock is valid within some bound (+-128 ms with 530 * NTP). If the caller's time is far different than the PPS time, an 531 * argument will ensue, and it's not clear who will lose. 532 * 533 * For uncompensated quartz crystal oscillators and nominal update 534 * intervals less than 256 s, operation should be in phase-lock mode, 535 * where the loop is disciplined to phase. For update intervals greater 536 * than 1024 s, operation should be in frequency-lock mode, where the 537 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode 538 * is selected by the STA_MODE status bit. 539 */ 540static void 541hardupdate(offset) 542 long offset; /* clock offset (ns) */ 543{ 544 long ltemp, mtemp; 545 l_fp ftemp; 546 547 /* 548 * Select how the phase is to be controlled and from which 549 * source. If the PPS signal is present and enabled to 550 * discipline the time, the PPS offset is used; otherwise, the 551 * argument offset is used. 552 */ 553 ltemp = offset; 554 if (ltemp > MAXPHASE) 555 ltemp = MAXPHASE; 556 else if (ltemp < -MAXPHASE) 557 ltemp = -MAXPHASE; 558 if (!(time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)) 559 L_LINT(time_offset, ltemp); 560 561 /* 562 * Select how the frequency is to be controlled and in which 563 * mode (PLL or FLL). If the PPS signal is present and enabled 564 * to discipline the frequency, the PPS frequency is used; 565 * otherwise, the argument offset is used to compute it. 566 */ 567 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) { 568 time_reftime = time_second; 569 return; 570 } 571 if (time_status & STA_FREQHOLD || time_reftime == 0) 572 time_reftime = time_second; 573 mtemp = time_second - time_reftime; 574 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC) 575 ) { 576 L_LINT(ftemp, (ltemp << 4) / mtemp); 577 L_RSHIFT(ftemp, SHIFT_FLL + 4); 578 L_ADD(time_freq, ftemp); 579 time_status |= STA_MODE; 580 } else { 581 L_LINT(ftemp, ltemp); 582 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1); 583 L_MPY(ftemp, mtemp); 584 L_ADD(time_freq, ftemp); 585 time_status &= ~STA_MODE; 586 } 587 time_reftime = time_second; 588 if (L_GINT(time_freq) > MAXFREQ) 589 L_LINT(time_freq, MAXFREQ); 590 else if (L_GINT(time_freq) < -MAXFREQ) 591 L_LINT(time_freq, -MAXFREQ); 592} 593 594#ifdef PPS_SYNC 595/* 596 * hardpps() - discipline CPU clock oscillator to external PPS signal 597 * 598 * This routine is called at each PPS interrupt in order to discipline 599 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 600 * and leaves it in a handy spot for the hardclock() routine. It 601 * integrates successive PPS phase differences and calculates the 602 * frequency offset. This is used in hardclock() to discipline the CPU 603 * clock oscillator so that the intrinsic frequency error is cancelled 604 * out. The code requires the caller to capture the time and 605 * architecture-dependent hardware counter values in nanoseconds at the 606 * on-time PPS signal transition. 607 * 608 * Note that, on some Unix systems this routine runs at an interrupt 609 * priority level higher than the timer interrupt routine hardclock(). 610 * Therefore, the variables used are distinct from the hardclock() 611 * variables, except for the actual time and frequency variables, which 612 * are determined by this routine and updated atomically. 613 */ 614void 615hardpps(tsp, nsec) 616 struct timespec *tsp; /* time at PPS */ 617 long nsec; /* hardware counter at PPS */ 618{ 619 long u_sec, u_nsec, v_nsec; /* temps */ 620 l_fp ftemp; 621 622 /* 623 * The signal is first processed by a frequency discriminator 624 * which rejects noise and input signals with frequencies 625 * outside the range 1 +-MAXFREQ PPS. If two hits occur in the 626 * same second, we ignore the later hit; if not and a hit occurs 627 * outside the range gate, keep the later hit but do not 628 * process it. 629 */ 630 time_status |= STA_PPSSIGNAL | STA_PPSJITTER; 631 time_status &= ~(STA_PPSWANDER | STA_PPSERROR); 632 pps_valid = PPS_VALID; 633 u_sec = tsp->tv_sec; 634 u_nsec = tsp->tv_nsec; 635 if (u_nsec >= (NANOSECOND >> 1)) { 636 u_nsec -= NANOSECOND; 637 u_sec++; 638 } 639 v_nsec = u_nsec - pps_tf[0].nsec; 640 if (u_sec == pps_tf[0].sec && v_nsec < -MAXFREQ) { 641 return; 642 } 643 pps_tf[2] = pps_tf[1]; 644 pps_tf[1] = pps_tf[0]; 645 pps_tf[0].sec = u_sec; 646 pps_tf[0].nsec = u_nsec; 647 648 /* 649 * Compute the difference between the current and previous 650 * counter values. If the difference exceeds 0.5 s, assume it 651 * has wrapped around, so correct 1.0 s. If the result exceeds 652 * the tick interval, the sample point has crossed a tick 653 * boundary during the last second, so correct the tick. Very 654 * intricate. 655 */ 656 u_nsec = nsec; 657 if (u_nsec > (NANOSECOND >> 1)) 658 u_nsec -= NANOSECOND; 659 else if (u_nsec < -(NANOSECOND >> 1)) 660 u_nsec += NANOSECOND; 661#if 0 662 if (u_nsec > (time_tick >> 1)) 663 u_nsec -= time_tick; 664 else if (u_nsec < -(time_tick >> 1)) 665 u_nsec += time_tick; 666#endif 667 pps_tf[0].count = pps_tf[1].count + u_nsec; 668 if (v_nsec > MAXFREQ) { 669 return; 670 } 671 time_status &= ~STA_PPSJITTER; 672 673 /* 674 * A three-stage median filter is used to help denoise the PPS 675 * time. The median sample becomes the time offset estimate; the 676 * difference between the other two samples becomes the time 677 * dispersion (jitter) estimate. 678 */ 679 if (pps_tf[0].nsec > pps_tf[1].nsec) { 680 if (pps_tf[1].nsec > pps_tf[2].nsec) { 681 pps_filt = pps_tf[1]; /* 0 1 2 */ 682 u_nsec = pps_tf[0].nsec - pps_tf[2].nsec; 683 } else if (pps_tf[2].nsec > pps_tf[0].nsec) { 684 pps_filt = pps_tf[0]; /* 2 0 1 */ 685 u_nsec = pps_tf[2].nsec - pps_tf[1].nsec; 686 } else { 687 pps_filt = pps_tf[2]; /* 0 2 1 */ 688 u_nsec = pps_tf[0].nsec - pps_tf[1].nsec; 689 } 690 } else { 691 if (pps_tf[1].nsec < pps_tf[2].nsec) { 692 pps_filt = pps_tf[1]; /* 2 1 0 */ 693 u_nsec = pps_tf[2].nsec - pps_tf[0].nsec; 694 } else if (pps_tf[2].nsec < pps_tf[0].nsec) { 695 pps_filt = pps_tf[0]; /* 1 0 2 */ 696 u_nsec = pps_tf[1].nsec - pps_tf[2].nsec; 697 } else { 698 pps_filt = pps_tf[2]; /* 1 2 0 */ 699 u_nsec = pps_tf[1].nsec - pps_tf[0].nsec; 700 } 701 } 702 703 /* 704 * Nominal jitter is due to PPS signal noise and interrupt 705 * latency. If it exceeds the jitter limit, the sample is 706 * discarded. otherwise, if so enabled, the time offset is 707 * updated. The offsets are accumulated over the phase averaging 708 * interval to improve accuracy. The jitter is averaged only for 709 * performance monitoring. We can tolerate a modest loss of data 710 * here without degrading time accuracy. 711 */ 712 if (u_nsec > MAXTIME) { 713 time_status |= STA_PPSJITTER; 714 pps_jitcnt++; 715 } else if (time_status & STA_PPSTIME) { 716 pps_offacc -= pps_filt.nsec; 717 pps_offcnt++; 718 } 719 if (pps_offcnt >= (1 << PPS_PAVG)) { 720 if (time_status & STA_PPSTIME) { 721 L_LINT(time_offset, pps_offacc); 722 L_RSHIFT(time_offset, PPS_PAVG); 723 } 724 pps_offacc = 0; 725 pps_offcnt = 0; 726 727 } 728 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG; 729 u_sec = pps_tf[0].sec - pps_lastsec; 730 if (u_sec < (1 << pps_shift)) 731 return; 732 733 /* 734 * At the end of the calibration interval the difference between 735 * the first and last counter values becomes the scaled 736 * frequency. It will later be divided by the length of the 737 * interval to determine the frequency update. If the frequency 738 * exceeds a sanity threshold, or if the actual calibration 739 * interval is not equal to the expected length, the data are 740 * discarded. We can tolerate a modest loss of data here without 741 * degrading frequency ccuracy. 742 */ 743 pps_calcnt++; 744 v_nsec = -pps_filt.count; 745 pps_lastsec = pps_tf[0].sec; 746 pps_tf[0].count = 0; 747 u_nsec = MAXFREQ << pps_shift; 748 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << 749 pps_shift)) { 750 time_status |= STA_PPSERROR; 751 pps_errcnt++; 752 return; 753 } 754 755 /* 756 * If the actual calibration interval is not equal to the 757 * expected length, the data are discarded. If the wander is 758 * less than the wander threshold for four consecutive 759 * intervals, the interval is doubled; if it is greater than the 760 * threshold for four consecutive intervals, the interval is 761 * halved. The scaled frequency offset is converted to frequency 762 * offset. The stability metric is calculated as the average of 763 * recent frequency changes, but is used only for performance 764 * monitoring. 765 */ 766 L_LINT(ftemp, v_nsec); 767 L_RSHIFT(ftemp, pps_shift); 768 L_SUB(ftemp, pps_freq); 769 u_nsec = L_GINT(ftemp); 770 if (u_nsec > MAXWANDER) { 771 L_LINT(ftemp, MAXWANDER); 772 pps_intcnt--; 773 time_status |= STA_PPSWANDER; 774 pps_stbcnt++; 775 } else if (u_nsec < -MAXWANDER) { 776 L_LINT(ftemp, -MAXWANDER); 777 pps_intcnt--; 778 time_status |= STA_PPSWANDER; 779 pps_stbcnt++; 780 } else { 781 pps_intcnt++; 782 } 783 if (pps_intcnt >= 4) { 784 pps_intcnt = 4; 785 if (pps_shift < PPS_FAVGMAX) { 786 pps_shift++; 787 pps_intcnt = 0; 788 } 789 } else if (pps_intcnt <= -4) { 790 pps_intcnt = -4; 791 if (pps_shift > PPS_FAVG) { 792 pps_shift--; 793 pps_intcnt = 0; 794 } 795 } 796 if (u_nsec < 0) 797 u_nsec = -u_nsec; 798 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG; 799 800 /* 801 * The frequency offset is averaged into the PPS frequency. If 802 * enabled, the system clock frequency is updated as well. 803 */ 804 L_RSHIFT(ftemp, PPS_FAVG); 805 L_ADD(pps_freq, ftemp); 806 u_nsec = L_GINT(pps_freq); 807 if (u_nsec > MAXFREQ) 808 L_LINT(pps_freq, MAXFREQ); 809 else if (u_nsec < -MAXFREQ) 810 L_LINT(pps_freq, -MAXFREQ); 811 if (time_status & STA_PPSFREQ) 812 time_freq = pps_freq; 813} 814#endif /* PPS_SYNC */ 815