1/*
2 * ntp_refclock - processing support for reference clocks
3 */
4#ifdef HAVE_CONFIG_H
5# include <config.h>
6#endif
7
8#include "ntpd.h"
9#include "ntp_io.h"
10#include "ntp_unixtime.h"
11#include "ntp_tty.h"
12#include "ntp_refclock.h"
13#include "ntp_stdlib.h"
14#include "ntp_assert.h"
15#include "timespecops.h"
16
17#include <stdio.h>
18
19#ifdef HAVE_SYS_IOCTL_H
20# include <sys/ioctl.h>
21#endif /* HAVE_SYS_IOCTL_H */
22
23#ifdef REFCLOCK
24
25#ifdef KERNEL_PLL
26#include "ntp_syscall.h"
27#endif /* KERNEL_PLL */
28
29#ifdef HAVE_PPSAPI
30#include "ppsapi_timepps.h"
31#include "refclock_atom.h"
32#endif /* HAVE_PPSAPI */
33
34/*
35 * Reference clock support is provided here by maintaining the fiction
36 * that the clock is actually a peer.  As no packets are exchanged with
37 * a reference clock, however, we replace the transmit, receive and
38 * packet procedures with separate code to simulate them.  Routines
39 * refclock_transmit() and refclock_receive() maintain the peer
40 * variables in a state analogous to an actual peer and pass reference
41 * clock data on through the filters.  Routines refclock_peer() and
42 * refclock_unpeer() are called to initialize and terminate reference
43 * clock associations.  A set of utility routines is included to open
44 * serial devices, process sample data, and to perform various debugging
45 * functions.
46 *
47 * The main interface used by these routines is the refclockproc
48 * structure, which contains for most drivers the decimal equivalants
49 * of the year, day, month, hour, second and millisecond/microsecond
50 * decoded from the ASCII timecode.  Additional information includes
51 * the receive timestamp, exception report, statistics tallies, etc.
52 * In addition, there may be a driver-specific unit structure used for
53 * local control of the device.
54 *
55 * The support routines are passed a pointer to the peer structure,
56 * which is used for all peer-specific processing and contains a
57 * pointer to the refclockproc structure, which in turn contains a
58 * pointer to the unit structure, if used.  The peer structure is
59 * identified by an interface address in the dotted quad form
60 * 127.127.t.u, where t is the clock type and u the unit.
61 */
62#define FUDGEFAC	.1	/* fudge correction factor */
63#define LF		0x0a	/* ASCII LF */
64
65int	cal_enable;		/* enable refclock calibrate */
66
67/*
68 * Forward declarations
69 */
70static int  refclock_cmpl_fp (const void *, const void *);
71static int  refclock_sample (struct refclockproc *);
72static int  refclock_ioctl(int, u_int);
73static void refclock_checkburst(struct peer *, struct refclockproc *);
74
75/* circular buffer functions
76 *
77 * circular buffer management comes in two flovours:
78 * for powers of two, and all others.
79 */
80
81#if MAXSTAGE & (MAXSTAGE - 1)
82
83static void clk_add_sample(
84	struct refclockproc * const	pp,
85	double				sv
86	)
87{
88	pp->coderecv = (pp->coderecv + 1) % MAXSTAGE;
89	if (pp->coderecv == pp->codeproc)
90		pp->codeproc = (pp->codeproc + 1) % MAXSTAGE;
91	pp->filter[pp->coderecv] = sv;
92}
93
94static double clk_pop_sample(
95	struct refclockproc * const	pp
96	)
97{
98	if (pp->coderecv == pp->codeproc)
99		return 0; /* Maybe a NaN would be better? */
100	pp->codeproc = (pp->codeproc + 1) % MAXSTAGE;
101	return pp->filter[pp->codeproc];
102}
103
104static inline u_int clk_cnt_sample(
105	struct refclockproc * const	pp
106	)
107{
108	u_int retv = pp->coderecv - pp->codeproc;
109	if (retv > MAXSTAGE)
110		retv += MAXSTAGE;
111	return retv;
112}
113
114#else
115
116static inline void clk_add_sample(
117	struct refclockproc * const	pp,
118	double				sv
119	)
120{
121	pp->coderecv  = (pp->coderecv + 1) & (MAXSTAGE - 1);
122	if (pp->coderecv == pp->codeproc)
123		pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1);
124	pp->filter[pp->coderecv] = sv;
125}
126
127static inline double clk_pop_sample(
128	struct refclockproc * const	pp
129	)
130{
131	if (pp->coderecv == pp->codeproc)
132		return 0; /* Maybe a NaN would be better? */
133	pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1);
134	return pp->filter[pp->codeproc];
135}
136
137static inline u_int clk_cnt_sample(
138	struct refclockproc * const	pp
139	)
140{
141	return (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
142}
143
144#endif
145
146/*
147 * refclock_report - note the occurance of an event
148 *
149 * This routine presently just remembers the report and logs it, but
150 * does nothing heroic for the trap handler. It tries to be a good
151 * citizen and bothers the system log only if things change.
152 */
153void
154refclock_report(
155	struct peer *peer,
156	int code
157	)
158{
159	struct refclockproc *pp;
160
161	pp = peer->procptr;
162	if (pp == NULL)
163		return;
164
165	switch (code) {
166
167	case CEVNT_TIMEOUT:
168		pp->noreply++;
169		break;
170
171	case CEVNT_BADREPLY:
172		pp->badformat++;
173		break;
174
175	case CEVNT_FAULT:
176		break;
177
178	case CEVNT_BADDATE:
179	case CEVNT_BADTIME:
180		pp->baddata++;
181		break;
182
183	default:
184		/* ignore others */
185		break;
186	}
187	if ((code != CEVNT_NOMINAL) && (pp->lastevent < 15))
188		pp->lastevent++;
189	if (pp->currentstatus != code) {
190		pp->currentstatus = (u_char)code;
191		report_event(PEVNT_CLOCK, peer, ceventstr(code));
192	}
193}
194
195
196/*
197 * init_refclock - initialize the reference clock drivers
198 *
199 * This routine calls each of the drivers in turn to initialize internal
200 * variables, if necessary. Most drivers have nothing to say at this
201 * point.
202 */
203void
204init_refclock(void)
205{
206	int i;
207
208	for (i = 0; i < (int)num_refclock_conf; i++)
209		if (refclock_conf[i]->clock_init != noentry)
210			(refclock_conf[i]->clock_init)();
211}
212
213
214/*
215 * refclock_newpeer - initialize and start a reference clock
216 *
217 * This routine allocates and initializes the interface structure which
218 * supports a reference clock in the form of an ordinary NTP peer. A
219 * driver-specific support routine completes the initialization, if
220 * used. Default peer variables which identify the clock and establish
221 * its reference ID and stratum are set here. It returns one if success
222 * and zero if the clock address is invalid or already running,
223 * insufficient resources are available or the driver declares a bum
224 * rap.
225 */
226int
227refclock_newpeer(
228	struct peer *peer	/* peer structure pointer */
229	)
230{
231	struct refclockproc *pp;
232	u_char clktype;
233	int unit;
234
235	/*
236	 * Check for valid clock address. If already running, shut it
237	 * down first.
238	 */
239	if (!ISREFCLOCKADR(&peer->srcadr)) {
240		msyslog(LOG_ERR,
241			"refclock_newpeer: clock address %s invalid",
242			stoa(&peer->srcadr));
243		return (0);
244	}
245	clktype = (u_char)REFCLOCKTYPE(&peer->srcadr);
246	unit = REFCLOCKUNIT(&peer->srcadr);
247	if (clktype >= num_refclock_conf ||
248		refclock_conf[clktype]->clock_start == noentry) {
249		msyslog(LOG_ERR,
250			"refclock_newpeer: clock type %d invalid\n",
251			clktype);
252		return (0);
253	}
254
255	/*
256	 * Allocate and initialize interface structure
257	 */
258	pp = emalloc_zero(sizeof(*pp));
259	peer->procptr = pp;
260
261	/*
262	 * Initialize structures
263	 */
264	peer->refclktype = clktype;
265	peer->refclkunit = (u_char)unit;
266	peer->flags |= FLAG_REFCLOCK;
267	peer->leap = LEAP_NOTINSYNC;
268	peer->stratum = STRATUM_REFCLOCK;
269	peer->ppoll = peer->maxpoll;
270	pp->type = clktype;
271	pp->conf = refclock_conf[clktype];
272	pp->timestarted = current_time;
273	pp->io.fd = -1;
274
275	/*
276	 * Set peer.pmode based on the hmode. For appearances only.
277	 */
278	switch (peer->hmode) {
279	case MODE_ACTIVE:
280		peer->pmode = MODE_PASSIVE;
281		break;
282
283	default:
284		peer->pmode = MODE_SERVER;
285		break;
286	}
287
288	/*
289	 * Do driver dependent initialization. The above defaults
290	 * can be wiggled, then finish up for consistency.
291	 */
292	if (!((refclock_conf[clktype]->clock_start)(unit, peer))) {
293		refclock_unpeer(peer);
294		return (0);
295	}
296	peer->refid = pp->refid;
297	return (1);
298}
299
300
301/*
302 * refclock_unpeer - shut down a clock
303 */
304void
305refclock_unpeer(
306	struct peer *peer	/* peer structure pointer */
307	)
308{
309	u_char clktype;
310	int unit;
311
312	/*
313	 * Wiggle the driver to release its resources, then give back
314	 * the interface structure.
315	 */
316	if (NULL == peer->procptr)
317		return;
318
319	clktype = peer->refclktype;
320	unit = peer->refclkunit;
321	if (refclock_conf[clktype]->clock_shutdown != noentry)
322		(refclock_conf[clktype]->clock_shutdown)(unit, peer);
323	free(peer->procptr);
324	peer->procptr = NULL;
325}
326
327
328/*
329 * refclock_timer - called once per second for housekeeping.
330 */
331void
332refclock_timer(
333	struct peer *p
334	)
335{
336	struct refclockproc *	pp;
337	int			unit;
338
339	unit = p->refclkunit;
340	pp = p->procptr;
341	if (pp->conf->clock_timer != noentry)
342		(*pp->conf->clock_timer)(unit, p);
343	if (pp->action != NULL && pp->nextaction <= current_time)
344		(*pp->action)(p);
345}
346
347
348/*
349 * refclock_transmit - simulate the transmit procedure
350 *
351 * This routine implements the NTP transmit procedure for a reference
352 * clock. This provides a mechanism to call the driver at the NTP poll
353 * interval, as well as provides a reachability mechanism to detect a
354 * broken radio or other madness.
355 */
356void
357refclock_transmit(
358	struct peer *peer	/* peer structure pointer */
359	)
360{
361	u_char clktype;
362	int unit;
363
364	clktype = peer->refclktype;
365	unit = peer->refclkunit;
366	peer->sent++;
367	get_systime(&peer->xmt);
368
369	/*
370	 * This is a ripoff of the peer transmit routine, but
371	 * specialized for reference clocks. We do a little less
372	 * protocol here and call the driver-specific transmit routine.
373	 */
374	if (peer->burst == 0) {
375		u_char oreach;
376#ifdef DEBUG
377		if (debug)
378			printf("refclock_transmit: at %ld %s\n",
379			    current_time, stoa(&(peer->srcadr)));
380#endif
381
382		/*
383		 * Update reachability and poll variables like the
384		 * network code.
385		 */
386		oreach = peer->reach & 0xfe;
387		peer->reach <<= 1;
388		if (!(peer->reach & 0x0f))
389			clock_filter(peer, 0., 0., MAXDISPERSE);
390		peer->outdate = current_time;
391		if (!peer->reach) {
392			if (oreach) {
393				report_event(PEVNT_UNREACH, peer, NULL);
394				peer->timereachable = current_time;
395			}
396		} else {
397			if (peer->flags & FLAG_BURST)
398				peer->burst = NSTAGE;
399		}
400	} else {
401		peer->burst--;
402	}
403	peer->procptr->inpoll = TRUE;
404	if (refclock_conf[clktype]->clock_poll != noentry)
405		(refclock_conf[clktype]->clock_poll)(unit, peer);
406	poll_update(peer, peer->hpoll, 0);
407}
408
409
410/*
411 * Compare two doubles - used with qsort()
412 */
413static int
414refclock_cmpl_fp(
415	const void *p1,
416	const void *p2
417	)
418{
419	const double *dp1 = (const double *)p1;
420	const double *dp2 = (const double *)p2;
421
422	if (*dp1 < *dp2)
423		return -1;
424	if (*dp1 > *dp2)
425		return 1;
426	return 0;
427}
428
429/*
430 * Get number of available samples
431 */
432int
433refclock_samples_avail(
434	struct refclockproc const * pp
435	)
436{
437	u_int	na;
438
439#   if MAXSTAGE & (MAXSTAGE - 1)
440
441	na = pp->coderecv - pp->codeproc;
442	if (na > MAXSTAGE)
443		na += MAXSTAGE;
444
445#   else
446
447	na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
448
449#   endif
450	return na;
451}
452
453/*
454 * Expire (remove) samples from the tail (oldest samples removed)
455 *
456 * Returns number of samples deleted
457 */
458int
459refclock_samples_expire(
460	struct refclockproc * pp,
461	int                   nd
462	)
463{
464	u_int	na;
465
466	if (nd <= 0)
467		return 0;
468
469#   if MAXSTAGE & (MAXSTAGE - 1)
470
471	na = pp->coderecv - pp->codeproc;
472	if (na > MAXSTAGE)
473		na += MAXSTAGE;
474	if ((u_int)nd < na)
475		nd = na;
476	pp->codeproc = (pp->codeproc + nd) % MAXSTAGE;
477
478#   else
479
480	na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
481	if ((u_int)nd > na)
482		nd = (int)na;
483	pp->codeproc = (pp->codeproc + nd) & (MAXSTAGE - 1);
484
485#   endif
486	return nd;
487}
488
489/*
490 * refclock_process_offset - update median filter
491 *
492 * This routine uses the given offset and timestamps to construct a new
493 * entry in the median filter circular buffer. Samples that overflow the
494 * filter are quietly discarded.
495 */
496void
497refclock_process_offset(
498	struct refclockproc *pp,	/* refclock structure pointer */
499	l_fp lasttim,			/* last timecode timestamp */
500	l_fp lastrec,			/* last receive timestamp */
501	double fudge
502	)
503{
504	l_fp lftemp;
505	double doffset;
506
507	pp->lastrec = lastrec;
508	lftemp = lasttim;
509	L_SUB(&lftemp, &lastrec);
510	LFPTOD(&lftemp, doffset);
511	clk_add_sample(pp, doffset + fudge);
512	refclock_checkburst(pp->io.srcclock, pp);
513}
514
515
516/*
517 * refclock_process - process a sample from the clock
518 * refclock_process_f - refclock_process with other than time1 fudge
519 *
520 * This routine converts the timecode in the form days, hours, minutes,
521 * seconds and milliseconds/microseconds to internal timestamp format,
522 * then constructs a new entry in the median filter circular buffer.
523 * Return success (1) if the data are correct and consistent with the
524 * conventional calendar.
525 *
526 * Important for PPS users: Normally, the pp->lastrec is set to the
527 * system time when the on-time character is received and the pp->year,
528 * ..., pp->second decoded and the seconds fraction pp->nsec in
529 * nanoseconds). When a PPS offset is available, pp->nsec is forced to
530 * zero and the fraction for pp->lastrec is set to the PPS offset.
531 */
532int
533refclock_process_f(
534	struct refclockproc *pp,	/* refclock structure pointer */
535	double fudge
536	)
537{
538	l_fp offset, ltemp;
539
540	/*
541	 * Compute the timecode timestamp from the days, hours, minutes,
542	 * seconds and milliseconds/microseconds of the timecode. Use
543	 * clocktime() for the aggregate seconds and the msec/usec for
544	 * the fraction, when present. Note that this code relies on the
545	 * file system time for the years and does not use the years of
546	 * the timecode.
547	 */
548	if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT,
549		pp->lastrec.l_ui, &pp->yearstart, &offset.l_ui))
550		return (0);
551
552	offset.l_uf = 0;
553	DTOLFP(pp->nsec / 1e9, &ltemp);
554	L_ADD(&offset, &ltemp);
555	refclock_process_offset(pp, offset, pp->lastrec, fudge);
556	return (1);
557}
558
559
560int
561refclock_process(
562	struct refclockproc *pp		/* refclock structure pointer */
563)
564{
565	return refclock_process_f(pp, pp->fudgetime1);
566}
567
568
569/*
570 * refclock_sample - process a pile of samples from the clock
571 *
572 * This routine implements a recursive median filter to suppress spikes
573 * in the data, as well as determine a performance statistic. It
574 * calculates the mean offset and RMS jitter. A time adjustment
575 * fudgetime1 can be added to the final offset to compensate for various
576 * systematic errors. The routine returns the number of samples
577 * processed, which could be zero.
578 */
579static int
580refclock_sample(
581	struct refclockproc *pp		/* refclock structure pointer */
582	)
583{
584	size_t	i, j, k, m, n;
585	double	off[MAXSTAGE];
586	double	offset;
587
588	/*
589	 * Copy the raw offsets and sort into ascending order. Don't do
590	 * anything if the buffer is empty.
591	 */
592	n = 0;
593	while (pp->codeproc != pp->coderecv)
594		off[n++] = clk_pop_sample(pp);
595	if (n == 0)
596		return (0);
597
598	if (n > 1)
599		qsort(off, n, sizeof(off[0]), refclock_cmpl_fp);
600
601	/*
602	 * Reject the furthest from the median of the samples until
603	 * approximately 60 percent of the samples remain.
604	 */
605	i = 0; j = n;
606	m = n - (n * 4) / 10;
607	while ((j - i) > m) {
608		offset = off[(j + i) / 2];
609		if (off[j - 1] - offset < offset - off[i])
610			i++;	/* reject low end */
611		else
612			j--;	/* reject high end */
613	}
614
615	/*
616	 * Determine the offset and jitter.
617	 */
618	pp->offset = off[i];
619	pp->jitter = 0;
620	for (k = i + 1; k < j; k++) {
621		pp->offset += off[k];
622		pp->jitter += SQUARE(off[k] - off[k - 1]);
623	}
624	pp->offset /= m;
625	m -= (m > 1);	/* only (m-1) terms attribute to jitter! */
626	pp->jitter = max(SQRT(pp->jitter / m), LOGTOD(sys_precision));
627
628	/*
629	 * If the source has a jitter that cannot be estimated, because
630	 * it is not statistic jitter, the source will be detected as
631	 * falseticker sooner or later.  Enforcing a minimal jitter value
632	 * avoids a too low estimation while still detecting higher jitter.
633	 *
634	 * Note that this changes the refclock samples and ends up in the
635	 * clock dispersion, not the clock jitter, despite being called
636	 * jitter.  To see the modified values, check the NTP clock variable
637	 * "filtdisp", not "jitter".
638	 */
639	pp->jitter = max(pp->jitter, pp->fudgeminjitter);
640
641#ifdef DEBUG
642	if (debug)
643		printf(
644		    "refclock_sample: n %d offset %.6f disp %.6f jitter %.6f\n",
645		    (int)n, pp->offset, pp->disp, pp->jitter);
646#endif
647	return (int)n;
648}
649
650
651/*
652 * refclock_receive - simulate the receive and packet procedures
653 *
654 * This routine simulates the NTP receive and packet procedures for a
655 * reference clock. This provides a mechanism in which the ordinary NTP
656 * filter, selection and combining algorithms can be used to suppress
657 * misbehaving radios and to mitigate between them when more than one is
658 * available for backup.
659 */
660void
661refclock_receive(
662	struct peer *peer	/* peer structure pointer */
663	)
664{
665	struct refclockproc *pp;
666
667#ifdef DEBUG
668	if (debug)
669		printf("refclock_receive: at %lu %s\n",
670		    current_time, stoa(&peer->srcadr));
671#endif
672
673	/*
674	 * Do a little sanity dance and update the peer structure. Groom
675	 * the median filter samples and give the data to the clock
676	 * filter.
677	 */
678	pp = peer->procptr;
679	pp->inpoll = FALSE;
680	peer->leap = pp->leap;
681	if (peer->leap == LEAP_NOTINSYNC)
682		return;
683
684	peer->received++;
685	peer->timereceived = current_time;
686	if (!peer->reach) {
687		report_event(PEVNT_REACH, peer, NULL);
688		peer->timereachable = current_time;
689	}
690	peer->reach = (peer->reach << (peer->reach & 1)) | 1;
691	peer->reftime = pp->lastref;
692	peer->aorg = pp->lastrec;
693	peer->rootdisp = pp->disp;
694	get_systime(&peer->dst);
695	if (!refclock_sample(pp))
696		return;
697
698	clock_filter(peer, pp->offset, 0., pp->jitter);
699	if (cal_enable && fabs(last_offset) < sys_mindisp && sys_peer !=
700	    NULL) {
701		if (sys_peer->refclktype == REFCLK_ATOM_PPS &&
702		    peer->refclktype != REFCLK_ATOM_PPS)
703			pp->fudgetime1 -= pp->offset * FUDGEFAC;
704	}
705}
706
707
708/*
709 * refclock_gtlin - groom next input line and extract timestamp
710 *
711 * This routine processes the timecode received from the clock and
712 * strips the parity bit and control characters. It returns the number
713 * of characters in the line followed by a NULL character ('\0'), which
714 * is not included in the count. In case of an empty line, the previous
715 * line is preserved.
716 */
717int
718refclock_gtlin(
719	struct recvbuf *rbufp,	/* receive buffer pointer */
720	char	*lineptr,	/* current line pointer */
721	int	bmax,		/* remaining characters in line */
722	l_fp	*tsptr		/* pointer to timestamp returned */
723	)
724{
725	const char *sp, *spend;
726	char	   *dp, *dpend;
727	int         dlen;
728
729	if (bmax <= 0)
730		return (0);
731
732	dp    = lineptr;
733	dpend = dp + bmax - 1; /* leave room for NUL pad */
734	sp    = (const char *)rbufp->recv_buffer;
735	spend = sp + rbufp->recv_length;
736
737	while (sp != spend && dp != dpend) {
738		char c;
739
740		c = *sp++ & 0x7f;
741		if (c >= 0x20 && c < 0x7f)
742			*dp++ = c;
743	}
744	/* Get length of data written to the destination buffer. If
745	 * zero, do *not* place a NUL byte to preserve the previous
746	 * buffer content.
747	 */
748	dlen = dp - lineptr;
749	if (dlen)
750	    *dp  = '\0';
751	*tsptr = rbufp->recv_time;
752	DPRINTF(2, ("refclock_gtlin: fd %d time %s timecode %d %s\n",
753		    rbufp->fd, ulfptoa(&rbufp->recv_time, 6), dlen,
754		    (dlen != 0)
755			? lineptr
756			: ""));
757	return (dlen);
758}
759
760
761/*
762 * refclock_gtraw - get next line/chunk of data
763 *
764 * This routine returns the raw data received from the clock in both
765 * canonical or raw modes. The terminal interface routines map CR to LF.
766 * In canonical mode this results in two lines, one containing data
767 * followed by LF and another containing only LF. In raw mode the
768 * interface routines can deliver arbitraty chunks of data from one
769 * character to a maximum specified by the calling routine. In either
770 * mode the routine returns the number of characters in the line
771 * followed by a NULL character ('\0'), which is not included in the
772 * count.
773 *
774 * *tsptr receives a copy of the buffer timestamp.
775 */
776int
777refclock_gtraw(
778	struct recvbuf *rbufp,	/* receive buffer pointer */
779	char	*lineptr,	/* current line pointer */
780	int	bmax,		/* remaining characters in line */
781	l_fp	*tsptr		/* pointer to timestamp returned */
782	)
783{
784	if (bmax <= 0)
785		return (0);
786	bmax -= 1; /* leave room for trailing NUL */
787	if (bmax > rbufp->recv_length)
788		bmax = rbufp->recv_length;
789	memcpy(lineptr, rbufp->recv_buffer, bmax);
790	lineptr[bmax] = '\0';
791
792	*tsptr = rbufp->recv_time;
793	DPRINTF(2, ("refclock_gtraw: fd %d time %s timecode %d %s\n",
794		    rbufp->fd, ulfptoa(&rbufp->recv_time, 6), bmax,
795		    lineptr));
796	return (bmax);
797}
798
799
800/*
801 * indicate_refclock_packet()
802 *
803 * Passes a fragment of refclock input read from the device to the
804 * driver direct input routine, which may consume it (batch it for
805 * queuing once a logical unit is assembled).  If it is not so
806 * consumed, queue it for the driver's receive entrypoint.
807 *
808 * The return value is TRUE if the data has been consumed as a fragment
809 * and should not be counted as a received packet.
810 */
811int
812indicate_refclock_packet(
813	struct refclockio *	rio,
814	struct recvbuf *	rb
815	)
816{
817	/* Does this refclock use direct input routine? */
818	if (rio->io_input != NULL && (*rio->io_input)(rb) == 0) {
819		/*
820		 * data was consumed - nothing to pass up
821		 * into block input machine
822		 */
823		freerecvbuf(rb);
824
825		return TRUE;
826	}
827	add_full_recv_buffer(rb);
828
829	return FALSE;
830}
831
832
833/*
834 * process_refclock_packet()
835 *
836 * Used for deferred processing of 'io_input' on systems where threading
837 * is used (notably Windows). This is acting as a trampoline to make the
838 * real calls to the refclock functions.
839 */
840#ifdef HAVE_IO_COMPLETION_PORT
841void
842process_refclock_packet(
843	struct recvbuf * rb
844	)
845{
846	struct refclockio * rio;
847
848	/* get the refclockio structure from the receive buffer */
849	rio  = &rb->recv_peer->procptr->io;
850
851	/* call 'clock_recv' if either there is no input function or the
852	 * raw input function tells us to feed the packet to the
853	 * receiver.
854	 */
855	if (rio->io_input == NULL || (*rio->io_input)(rb) != 0) {
856		rio->recvcount++;
857		packets_received++;
858		handler_pkts++;
859		(*rio->clock_recv)(rb);
860	}
861}
862#endif	/* HAVE_IO_COMPLETION_PORT */
863
864
865/*
866 * The following code does not apply to WINNT & VMS ...
867 */
868#if !defined(SYS_VXWORKS) && !defined(SYS_WINNT)
869#if defined(HAVE_TERMIOS) || defined(HAVE_SYSV_TTYS) || defined(HAVE_BSD_TTYS)
870
871/*
872 * refclock_open - open serial port for reference clock
873 *
874 * This routine opens a serial port for I/O and sets default options. It
875 * returns the file descriptor if successful, or logs an error and
876 * returns -1.
877 */
878int
879refclock_open(
880	const char	*dev,	/* device name pointer */
881	u_int		speed,	/* serial port speed (code) */
882	u_int		lflags	/* line discipline flags */
883	)
884{
885	int	fd;
886	int	omode;
887#ifdef O_NONBLOCK
888	char	trash[128];	/* litter bin for old input data */
889#endif
890
891	/*
892	 * Open serial port and set default options
893	 */
894	omode = O_RDWR;
895#ifdef O_NONBLOCK
896	omode |= O_NONBLOCK;
897#endif
898#ifdef O_NOCTTY
899	omode |= O_NOCTTY;
900#endif
901
902	fd = open(dev, omode, 0777);
903	/* refclock_open() long returned 0 on failure, avoid it. */
904	if (0 == fd) {
905		fd = dup(0);
906		SAVE_ERRNO(
907			close(0);
908		)
909	}
910	if (fd < 0) {
911		SAVE_ERRNO(
912			msyslog(LOG_ERR, "refclock_open %s: %m", dev);
913		)
914		return -1;
915	}
916	if (!refclock_setup(fd, speed, lflags)) {
917		close(fd);
918		return -1;
919	}
920	if (!refclock_ioctl(fd, lflags)) {
921		close(fd);
922		return -1;
923	}
924#ifdef O_NONBLOCK
925	/*
926	 * We want to make sure there is no pending trash in the input
927	 * buffer. Since we have non-blocking IO available, this is a
928	 * good moment to read and dump all available outdated stuff
929	 * that might have become toxic for the driver.
930	 */
931	while (read(fd, trash, sizeof(trash)) > 0 || errno == EINTR)
932		/*NOP*/;
933#endif
934	return fd;
935}
936
937
938/*
939 * refclock_setup - initialize terminal interface structure
940 */
941int
942refclock_setup(
943	int	fd,		/* file descriptor */
944	u_int	speed,		/* serial port speed (code) */
945	u_int	lflags		/* line discipline flags */
946	)
947{
948	int	i;
949	TTY	ttyb, *ttyp;
950
951	/*
952	 * By default, the serial line port is initialized in canonical
953	 * (line-oriented) mode at specified line speed, 8 bits and no
954	 * parity. LF ends the line and CR is mapped to LF. The break,
955	 * erase and kill functions are disabled. There is a different
956	 * section for each terminal interface, as selected at compile
957	 * time. The flag bits can be used to set raw mode and echo.
958	 */
959	ttyp = &ttyb;
960#ifdef HAVE_TERMIOS
961
962	/*
963	 * POSIX serial line parameters (termios interface)
964	 */
965	if (tcgetattr(fd, ttyp) < 0) {
966		SAVE_ERRNO(
967			msyslog(LOG_ERR,
968				"refclock_setup fd %d tcgetattr: %m",
969				fd);
970		)
971		return FALSE;
972	}
973
974	/*
975	 * Set canonical mode and local connection; set specified speed,
976	 * 8 bits and no parity; map CR to NL; ignore break.
977	 */
978	if (speed) {
979		u_int	ltemp = 0;
980
981		ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL;
982		ttyp->c_oflag = 0;
983		ttyp->c_cflag = CS8 | CLOCAL | CREAD;
984		if (lflags & LDISC_7O1) {
985			/* HP Z3801A needs 7-bit, odd parity */
986			ttyp->c_cflag = CS7 | PARENB | PARODD | CLOCAL | CREAD;
987		}
988		cfsetispeed(&ttyb, speed);
989		cfsetospeed(&ttyb, speed);
990		for (i = 0; i < NCCS; ++i)
991			ttyp->c_cc[i] = '\0';
992
993#if defined(TIOCMGET) && !defined(SCO5_CLOCK)
994
995		/*
996		 * If we have modem control, check to see if modem leads
997		 * are active; if so, set remote connection. This is
998		 * necessary for the kernel pps mods to work.
999		 */
1000		if (ioctl(fd, TIOCMGET, (char *)&ltemp) < 0)
1001			msyslog(LOG_ERR,
1002			    "refclock_setup fd %d TIOCMGET: %m", fd);
1003#ifdef DEBUG
1004		if (debug)
1005			printf("refclock_setup fd %d modem status: 0x%x\n",
1006			    fd, ltemp);
1007#endif
1008		if (ltemp & TIOCM_DSR && lflags & LDISC_REMOTE)
1009			ttyp->c_cflag &= ~CLOCAL;
1010#endif /* TIOCMGET */
1011	}
1012
1013	/*
1014	 * Set raw and echo modes. These can be changed on-fly.
1015	 */
1016	ttyp->c_lflag = ICANON;
1017	if (lflags & LDISC_RAW) {
1018		ttyp->c_lflag = 0;
1019		ttyp->c_iflag = 0;
1020		ttyp->c_cc[VMIN] = 1;
1021	}
1022	if (lflags & LDISC_ECHO)
1023		ttyp->c_lflag |= ECHO;
1024	if (tcsetattr(fd, TCSANOW, ttyp) < 0) {
1025		SAVE_ERRNO(
1026			msyslog(LOG_ERR,
1027				"refclock_setup fd %d TCSANOW: %m",
1028				fd);
1029		)
1030		return FALSE;
1031	}
1032
1033	/*
1034	 * flush input and output buffers to discard any outdated stuff
1035	 * that might have become toxic for the driver. Failing to do so
1036	 * is logged, but we keep our fingers crossed otherwise.
1037	 */
1038	if (tcflush(fd, TCIOFLUSH) < 0)
1039		msyslog(LOG_ERR, "refclock_setup fd %d tcflush(): %m",
1040			fd);
1041#endif /* HAVE_TERMIOS */
1042
1043#ifdef HAVE_SYSV_TTYS
1044
1045	/*
1046	 * System V serial line parameters (termio interface)
1047	 *
1048	 */
1049	if (ioctl(fd, TCGETA, ttyp) < 0) {
1050		SAVE_ERRNO(
1051			msyslog(LOG_ERR,
1052				"refclock_setup fd %d TCGETA: %m",
1053				fd);
1054		)
1055		return FALSE;
1056	}
1057
1058	/*
1059	 * Set canonical mode and local connection; set specified speed,
1060	 * 8 bits and no parity; map CR to NL; ignore break.
1061	 */
1062	if (speed) {
1063		u_int	ltemp = 0;
1064
1065		ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL;
1066		ttyp->c_oflag = 0;
1067		ttyp->c_cflag = speed | CS8 | CLOCAL | CREAD;
1068		for (i = 0; i < NCCS; ++i)
1069			ttyp->c_cc[i] = '\0';
1070
1071#if defined(TIOCMGET) && !defined(SCO5_CLOCK)
1072
1073		/*
1074		 * If we have modem control, check to see if modem leads
1075		 * are active; if so, set remote connection. This is
1076		 * necessary for the kernel pps mods to work.
1077		 */
1078		if (ioctl(fd, TIOCMGET, (char *)&ltemp) < 0)
1079			msyslog(LOG_ERR,
1080			    "refclock_setup fd %d TIOCMGET: %m", fd);
1081#ifdef DEBUG
1082		if (debug)
1083			printf("refclock_setup fd %d modem status: %x\n",
1084			    fd, ltemp);
1085#endif
1086		if (ltemp & TIOCM_DSR)
1087			ttyp->c_cflag &= ~CLOCAL;
1088#endif /* TIOCMGET */
1089	}
1090
1091	/*
1092	 * Set raw and echo modes. These can be changed on-fly.
1093	 */
1094	ttyp->c_lflag = ICANON;
1095	if (lflags & LDISC_RAW) {
1096		ttyp->c_lflag = 0;
1097		ttyp->c_iflag = 0;
1098		ttyp->c_cc[VMIN] = 1;
1099	}
1100	if (ioctl(fd, TCSETA, ttyp) < 0) {
1101		SAVE_ERRNO(
1102			msyslog(LOG_ERR,
1103				"refclock_setup fd %d TCSETA: %m", fd);
1104		)
1105		return FALSE;
1106	}
1107#endif /* HAVE_SYSV_TTYS */
1108
1109#ifdef HAVE_BSD_TTYS
1110
1111	/*
1112	 * 4.3bsd serial line parameters (sgttyb interface)
1113	 */
1114	if (ioctl(fd, TIOCGETP, (char *)ttyp) < 0) {
1115		SAVE_ERRNO(
1116			msyslog(LOG_ERR,
1117				"refclock_setup fd %d TIOCGETP: %m",
1118				fd);
1119		)
1120		return FALSE;
1121	}
1122	if (speed)
1123		ttyp->sg_ispeed = ttyp->sg_ospeed = speed;
1124	ttyp->sg_flags = EVENP | ODDP | CRMOD;
1125	if (ioctl(fd, TIOCSETP, (char *)ttyp) < 0) {
1126		SAVE_ERRNO(
1127			msyslog(LOG_ERR, "refclock_setup TIOCSETP: %m");
1128		)
1129		return FALSE;
1130	}
1131#endif /* HAVE_BSD_TTYS */
1132	return(1);
1133}
1134#endif /* HAVE_TERMIOS || HAVE_SYSV_TTYS || HAVE_BSD_TTYS */
1135
1136
1137/*
1138 * refclock_ioctl - set serial port control functions
1139 *
1140 * This routine attempts to hide the internal, system-specific details
1141 * of serial ports. It can handle POSIX (termios), SYSV (termio) and BSD
1142 * (sgtty) interfaces with varying degrees of success. The routine sets
1143 * up optional features such as tty_clk. The routine returns TRUE if
1144 * successful.
1145 */
1146int
1147refclock_ioctl(
1148	int	fd, 		/* file descriptor */
1149	u_int	lflags		/* line discipline flags */
1150	)
1151{
1152	/*
1153	 * simply return TRUE if no UNIX line discipline is supported
1154	 */
1155	DPRINTF(1, ("refclock_ioctl: fd %d flags 0x%x\n", fd, lflags));
1156
1157	return TRUE;
1158}
1159#endif /* !defined(SYS_VXWORKS) && !defined(SYS_WINNT) */
1160
1161
1162/*
1163 * refclock_control - set and/or return clock values
1164 *
1165 * This routine is used mainly for debugging. It returns designated
1166 * values from the interface structure that can be displayed using
1167 * ntpdc and the clockstat command. It can also be used to initialize
1168 * configuration variables, such as fudgetimes, fudgevalues, reference
1169 * ID and stratum.
1170 */
1171void
1172refclock_control(
1173	sockaddr_u *srcadr,
1174	const struct refclockstat *in,
1175	struct refclockstat *out
1176	)
1177{
1178	struct peer *peer;
1179	struct refclockproc *pp;
1180	u_char clktype;
1181	int unit;
1182
1183	/*
1184	 * Check for valid address and running peer
1185	 */
1186	if (!ISREFCLOCKADR(srcadr))
1187		return;
1188
1189	clktype = (u_char)REFCLOCKTYPE(srcadr);
1190	unit = REFCLOCKUNIT(srcadr);
1191
1192	peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL);
1193
1194	if (NULL == peer)
1195		return;
1196
1197	INSIST(peer->procptr != NULL);
1198	pp = peer->procptr;
1199
1200	/*
1201	 * Initialize requested data
1202	 */
1203	if (in != NULL) {
1204		if (in->haveflags & CLK_HAVETIME1)
1205			pp->fudgetime1 = in->fudgetime1;
1206		if (in->haveflags & CLK_HAVETIME2)
1207			pp->fudgetime2 = in->fudgetime2;
1208		if (in->haveflags & CLK_HAVEVAL1)
1209			peer->stratum = pp->stratum = (u_char)in->fudgeval1;
1210		if (in->haveflags & CLK_HAVEVAL2)
1211			peer->refid = pp->refid = in->fudgeval2;
1212		if (in->haveflags & CLK_HAVEFLAG1) {
1213			pp->sloppyclockflag &= ~CLK_FLAG1;
1214			pp->sloppyclockflag |= in->flags & CLK_FLAG1;
1215		}
1216		if (in->haveflags & CLK_HAVEFLAG2) {
1217			pp->sloppyclockflag &= ~CLK_FLAG2;
1218			pp->sloppyclockflag |= in->flags & CLK_FLAG2;
1219		}
1220		if (in->haveflags & CLK_HAVEFLAG3) {
1221			pp->sloppyclockflag &= ~CLK_FLAG3;
1222			pp->sloppyclockflag |= in->flags & CLK_FLAG3;
1223		}
1224		if (in->haveflags & CLK_HAVEFLAG4) {
1225			pp->sloppyclockflag &= ~CLK_FLAG4;
1226			pp->sloppyclockflag |= in->flags & CLK_FLAG4;
1227		}
1228		if (in->haveflags & CLK_HAVEMINJIT)
1229			pp->fudgeminjitter = in->fudgeminjitter;
1230	}
1231
1232	/*
1233	 * Readback requested data
1234	 */
1235	if (out != NULL) {
1236		out->fudgeval1 = pp->stratum;
1237		out->fudgeval2 = pp->refid;
1238		out->haveflags = CLK_HAVEVAL1 | CLK_HAVEVAL2;
1239		out->fudgetime1 = pp->fudgetime1;
1240		if (0.0 != out->fudgetime1)
1241			out->haveflags |= CLK_HAVETIME1;
1242		out->fudgetime2 = pp->fudgetime2;
1243		if (0.0 != out->fudgetime2)
1244			out->haveflags |= CLK_HAVETIME2;
1245		out->flags = (u_char) pp->sloppyclockflag;
1246		if (CLK_FLAG1 & out->flags)
1247			out->haveflags |= CLK_HAVEFLAG1;
1248		if (CLK_FLAG2 & out->flags)
1249			out->haveflags |= CLK_HAVEFLAG2;
1250		if (CLK_FLAG3 & out->flags)
1251			out->haveflags |= CLK_HAVEFLAG3;
1252		if (CLK_FLAG4 & out->flags)
1253			out->haveflags |= CLK_HAVEFLAG4;
1254		out->fudgeminjitter = pp->fudgeminjitter;
1255		if (0.0 != out->fudgeminjitter)
1256			out->haveflags |= CLK_HAVEMINJIT;
1257
1258		out->timereset = current_time - pp->timestarted;
1259		out->polls = pp->polls;
1260		out->noresponse = pp->noreply;
1261		out->badformat = pp->badformat;
1262		out->baddata = pp->baddata;
1263
1264		out->lastevent = pp->lastevent;
1265		out->currentstatus = pp->currentstatus;
1266		out->type = pp->type;
1267		out->clockdesc = pp->clockdesc;
1268		out->lencode = (u_short)pp->lencode;
1269		out->p_lastcode = pp->a_lastcode;
1270	}
1271
1272	/*
1273	 * Give the stuff to the clock
1274	 */
1275	if (refclock_conf[clktype]->clock_control != noentry)
1276		(refclock_conf[clktype]->clock_control)(unit, in, out, peer);
1277}
1278
1279
1280/*
1281 * refclock_buginfo - return debugging info
1282 *
1283 * This routine is used mainly for debugging. It returns designated
1284 * values from the interface structure that can be displayed using
1285 * ntpdc and the clkbug command.
1286 */
1287void
1288refclock_buginfo(
1289	sockaddr_u *srcadr,	/* clock address */
1290	struct refclockbug *bug /* output structure */
1291	)
1292{
1293	struct peer *peer;
1294	struct refclockproc *pp;
1295	int clktype;
1296	int unit;
1297	unsigned u;
1298
1299	/*
1300	 * Check for valid address and peer structure
1301	 */
1302	if (!ISREFCLOCKADR(srcadr))
1303		return;
1304
1305	clktype = (u_char) REFCLOCKTYPE(srcadr);
1306	unit = REFCLOCKUNIT(srcadr);
1307
1308	peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL);
1309
1310	if (NULL == peer || NULL == peer->procptr)
1311		return;
1312
1313	pp = peer->procptr;
1314
1315	/*
1316	 * Copy structure values
1317	 */
1318	bug->nvalues = 8;
1319	bug->svalues = 0x0000003f;
1320	bug->values[0] = pp->year;
1321	bug->values[1] = pp->day;
1322	bug->values[2] = pp->hour;
1323	bug->values[3] = pp->minute;
1324	bug->values[4] = pp->second;
1325	bug->values[5] = pp->nsec;
1326	bug->values[6] = pp->yearstart;
1327	bug->values[7] = pp->coderecv;
1328	bug->stimes = 0xfffffffc;
1329	bug->times[0] = pp->lastref;
1330	bug->times[1] = pp->lastrec;
1331	for (u = 2; u < bug->ntimes; u++)
1332		DTOLFP(pp->filter[u - 2], &bug->times[u]);
1333
1334	/*
1335	 * Give the stuff to the clock
1336	 */
1337	if (refclock_conf[clktype]->clock_buginfo != noentry)
1338		(refclock_conf[clktype]->clock_buginfo)(unit, bug, peer);
1339}
1340
1341
1342#ifdef HAVE_PPSAPI
1343/*
1344 * refclock_ppsapi - initialize/update ppsapi
1345 *
1346 * This routine is called after the fudge command to open the PPSAPI
1347 * interface for later parameter setting after the fudge command.
1348 */
1349int
1350refclock_ppsapi(
1351	int	fddev,			/* fd device */
1352	struct refclock_atom *ap	/* atom structure pointer */
1353	)
1354{
1355	if (ap->handle == 0) {
1356		if (time_pps_create(fddev, &ap->handle) < 0) {
1357			msyslog(LOG_ERR,
1358			    "refclock_ppsapi: time_pps_create: %m");
1359			return (0);
1360		}
1361		ZERO(ap->ts); /* [Bug 2689] defined INIT state */
1362	}
1363	return (1);
1364}
1365
1366
1367/*
1368 * refclock_params - set ppsapi parameters
1369 *
1370 * This routine is called to set the PPSAPI parameters after the fudge
1371 * command.
1372 */
1373int
1374refclock_params(
1375	int	mode,			/* mode bits */
1376	struct refclock_atom *ap	/* atom structure pointer */
1377	)
1378{
1379	ZERO(ap->pps_params);
1380	ap->pps_params.api_version = PPS_API_VERS_1;
1381
1382	/*
1383	 * Solaris serial ports provide PPS pulse capture only on the
1384	 * assert edge. FreeBSD serial ports provide capture on the
1385	 * clear edge, while FreeBSD parallel ports provide capture
1386	 * on the assert edge. Your mileage may vary.
1387	 */
1388	if (mode & CLK_FLAG2)
1389		ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTURECLEAR;
1390	else
1391		ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTUREASSERT;
1392	if (time_pps_setparams(ap->handle, &ap->pps_params) < 0) {
1393		msyslog(LOG_ERR,
1394		    "refclock_params: time_pps_setparams: %m");
1395		return (0);
1396	}
1397
1398	/*
1399	 * If flag3 is lit, select the kernel PPS if we can.
1400	 *
1401	 * Note: EOPNOTSUPP is the only 'legal' error code we deal with;
1402	 * it is part of the 'if we can' strategy.  Any other error
1403	 * indicates something more sinister and makes this function fail.
1404	 */
1405	if (mode & CLK_FLAG3) {
1406		if (time_pps_kcbind(ap->handle, PPS_KC_HARDPPS,
1407		    ap->pps_params.mode & ~PPS_TSFMT_TSPEC,
1408		    PPS_TSFMT_TSPEC) < 0)
1409		{
1410			if (errno != EOPNOTSUPP) {
1411				msyslog(LOG_ERR,
1412					"refclock_params: time_pps_kcbind: %m");
1413				return (0);
1414			}
1415		} else {
1416			hardpps_enable = 1;
1417		}
1418	}
1419	return (1);
1420}
1421
1422
1423/*
1424 * refclock_pps - called once per second
1425 *
1426 * This routine is called once per second. It snatches the PPS
1427 * timestamp from the kernel and saves the sign-extended fraction in
1428 * a circular buffer for processing at the next poll event.
1429 */
1430int
1431refclock_pps(
1432	struct peer *peer,		/* peer structure pointer */
1433	struct refclock_atom *ap,	/* atom structure pointer */
1434	int	mode			/* mode bits */
1435	)
1436{
1437	struct refclockproc *pp;
1438	pps_info_t pps_info;
1439	struct timespec timeout;
1440	double	dtemp, dcorr, trash;
1441
1442	/*
1443	 * We require the clock to be synchronized before setting the
1444	 * parameters. When the parameters have been set, fetch the
1445	 * most recent PPS timestamp.
1446	 */
1447	pp = peer->procptr;
1448	if (ap->handle == 0)
1449		return (0);
1450
1451	if (ap->pps_params.mode == 0 && sys_leap != LEAP_NOTINSYNC) {
1452		if (refclock_params(pp->sloppyclockflag, ap) < 1)
1453			return (0);
1454	}
1455	ZERO(timeout);
1456	ZERO(pps_info);
1457	if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC, &pps_info,
1458	    &timeout) < 0) {
1459		refclock_report(peer, CEVNT_FAULT);
1460		return (0);
1461	}
1462	timeout = ap->ts;	/* save old timestamp for check */
1463	if (ap->pps_params.mode & PPS_CAPTUREASSERT)
1464		ap->ts = pps_info.assert_timestamp;
1465	else if (ap->pps_params.mode & PPS_CAPTURECLEAR)
1466		ap->ts = pps_info.clear_timestamp;
1467	else
1468		return (0);
1469
1470	/* [Bug 2689] Discard the first sample we read -- if the PPS
1471	 * source is currently down / disconnected, we have read a
1472	 * potentially *very* stale value here. So if our old TS value
1473	 * is all-zero, we consider this sample unrealiable and drop it.
1474	 *
1475	 * Note 1: a better check would compare the PPS time stamp to
1476	 * the current system time and drop it if it's more than say 3s
1477	 * away.
1478	 *
1479	 * Note 2: If we ever again get an all-zero PPS sample, the next
1480	 * one will be discarded. This can happen every 136yrs and is
1481	 * unlikely to be ever observed.
1482	 */
1483	if (0 == (timeout.tv_sec | timeout.tv_nsec))
1484		return (0);
1485
1486	/* If the PPS source fails to deliver a new sample between
1487	 * polls, it regurgitates the last sample. We do not want to
1488	 * process the same sample multiple times.
1489	 */
1490	if (0 == memcmp(&timeout, &ap->ts, sizeof(timeout)))
1491		return (0);
1492
1493	/*
1494	 * Convert to signed fraction offset, apply fudge and properly
1495	 * fold the correction into the [-0.5s,0.5s] range. Handle
1496	 * excessive fudge times, too.
1497	 */
1498	dtemp = ap->ts.tv_nsec / 1e9;
1499	dcorr = modf((pp->fudgetime1 - dtemp), &trash);
1500	if (dcorr > 0.5)
1501		dcorr -= 1.0;
1502	else if (dcorr < -0.5)
1503		dcorr += 1.0;
1504
1505	/* phase gate check: avoid wobbling by +/-1s when too close to
1506	 * the switch-over point. We allow +/-400ms max phase deviation.
1507	 * The trade-off is clear: The smaller the limit, the less
1508	 * sensitive to sampling noise the clock becomes. OTOH the
1509	 * system must get into phase gate range by other means for the
1510	 * PPS clock to lock in.
1511	 */
1512	if (fabs(dcorr) > 0.4)
1513		return (0);
1514
1515	/*
1516	 * record this time stamp and stuff in median filter
1517	 */
1518	pp->lastrec.l_ui = (u_int32)ap->ts.tv_sec + JAN_1970;
1519	pp->lastrec.l_uf = (u_int32)(dtemp * FRAC);
1520	clk_add_sample(pp, dcorr);
1521	refclock_checkburst(peer, pp);
1522
1523#ifdef DEBUG
1524	if (debug > 1)
1525		printf("refclock_pps: %lu %f %f\n", current_time,
1526		    dcorr, pp->fudgetime1);
1527#endif
1528	return (1);
1529}
1530#endif /* HAVE_PPSAPI */
1531
1532
1533/*
1534 * -------------------------------------------------------------------
1535 * refclock_ppsaugment(...) -- correlate with PPS edge
1536 *
1537 * This function is used to correlate a receive time stamp with a PPS
1538 * edge time stamp. It applies the necessary fudges and then tries to
1539 * move the receive time stamp to the corresponding edge. This can warp
1540 * into future, if a transmission delay of more than 500ms is not
1541 * compensated with a corresponding fudge time2 value, because then the
1542 * next PPS edge is nearer than the last. (Similiar to what the PPS ATOM
1543 * driver does, but we deal with full time stamps here, not just phase
1544 * shift information.) Likewise, a negative fudge time2 value must be
1545 * used if the reference time stamp correlates with the *following* PPS
1546 * pulse.
1547 *
1548 * Note that the receive time fudge value only needs to move the receive
1549 * stamp near a PPS edge but that close proximity is not required;
1550 * +/-100ms precision should be enough. But since the fudge value will
1551 * probably also be used to compensate the transmission delay when no
1552 * PPS edge can be related to the time stamp, it's best to get it as
1553 * close as possible.
1554 *
1555 * It should also be noted that the typical use case is matching to the
1556 * preceeding edge, as most units relate their sentences to the current
1557 * second.
1558 *
1559 * The function returns FALSE if there is no correlation possible, TRUE
1560 * otherwise.  Reason for failures are:
1561 *
1562 *  - no PPS/ATOM unit given
1563 *  - PPS stamp is stale (that is, the difference between the PPS stamp
1564 *    and the corrected time stamp would exceed two seconds)
1565 *  - The phase difference is too close to 0.5, and the decision wether
1566 *    to move up or down is too sensitive to noise.
1567 *
1568 * On output, the receive time stamp is updated with the 'fixed' receive
1569 * time.
1570 * -------------------------------------------------------------------
1571 */
1572
1573int/*BOOL*/
1574refclock_ppsaugment(
1575	const struct refclock_atom * ap	    ,	/* for PPS io	  */
1576	l_fp 			   * rcvtime ,
1577	double			     rcvfudge,	/* i/o read fudge */
1578	double			     ppsfudge	/* pps fudge	  */
1579	)
1580{
1581	l_fp		delta[1];
1582
1583#ifdef HAVE_PPSAPI
1584
1585	pps_info_t	pps_info;
1586	struct timespec timeout;
1587	l_fp		stamp[1];
1588	uint32_t	phase;
1589
1590	static const uint32_t s_plim_hi = UINT32_C(1932735284);
1591	static const uint32_t s_plim_lo = UINT32_C(2362232013);
1592
1593	/* fixup receive time in case we have to bail out early */
1594	DTOLFP(rcvfudge, delta);
1595	L_SUB(rcvtime, delta);
1596
1597	if (NULL == ap)
1598		return FALSE;
1599
1600	ZERO(timeout);
1601	ZERO(pps_info);
1602
1603	/* fetch PPS stamp from ATOM block */
1604	if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC,
1605			   &pps_info, &timeout) < 0)
1606		return FALSE; /* can't get time stamps */
1607
1608	/* get last active PPS edge before receive */
1609	if (ap->pps_params.mode & PPS_CAPTUREASSERT)
1610		timeout = pps_info.assert_timestamp;
1611	else if (ap->pps_params.mode & PPS_CAPTURECLEAR)
1612		timeout = pps_info.clear_timestamp;
1613	else
1614		return FALSE; /* WHICH edge, please?!? */
1615
1616	/* convert PPS stamp to l_fp and apply fudge */
1617	*stamp = tspec_stamp_to_lfp(timeout);
1618	DTOLFP(ppsfudge, delta);
1619	L_SUB(stamp, delta);
1620
1621	/* Get difference between PPS stamp (--> yield) and receive time
1622	 * (--> base)
1623	 */
1624	*delta = *stamp;
1625	L_SUB(delta, rcvtime);
1626
1627	/* check if either the PPS or the STAMP is stale in relation
1628	 * to each other. Bail if it is so...
1629	 */
1630	phase = delta->l_ui;
1631	if (phase >= 2 && phase < (uint32_t)-2)
1632		return FALSE; /* PPS is stale, don't use it */
1633
1634	/* If the phase is too close to 0.5, the decision whether to
1635	 * move up or down is becoming noise sensitive. That is, we
1636	 * might amplify usec noise between samples into seconds with a
1637	 * simple threshold. This can be solved by a Schmitt Trigger
1638	 * characteristic, but that would also require additional state
1639	 * where we could remember previous decisions.  Easier to play
1640	 * dead duck and wait for the conditions to become clear.
1641	 */
1642	phase = delta->l_uf;
1643	if (phase > s_plim_hi && phase < s_plim_lo)
1644		return FALSE; /* we're in the noise lock gap */
1645
1646	/* sign-extend fraction into seconds */
1647	delta->l_ui = UINT32_C(0) - ((phase >> 31) & 1);
1648	/* add it up now */
1649	L_ADD(rcvtime, delta);
1650	return TRUE;
1651
1652#   else /* have no PPS support at all */
1653
1654	/* just fixup receive time and fail */
1655	UNUSED_ARG(ap);
1656	UNUSED_ARG(ppsfudge);
1657
1658	DTOLFP(rcvfudge, delta);
1659	L_SUB(rcvtime, delta);
1660	return FALSE;
1661
1662#   endif
1663}
1664
1665/*
1666 * -------------------------------------------------------------------
1667 * check if it makes sense to schedule an 'early' poll to get the clock
1668 * up fast after start or longer signal dropout.
1669 */
1670static void
1671refclock_checkburst(
1672	struct peer *         peer,
1673	struct refclockproc * pp
1674	)
1675{
1676	uint32_t	limit;	/* when we should poll */
1677	u_int		needs;	/* needed number of samples */
1678
1679	/* Paranoia: stop here if peer and clockproc don't match up.
1680	 * And when a poll is actually pending, we don't have to do
1681	 * anything, either. Likewise if the reach mask is full, of
1682	 * course, and if the filter has stabilized.
1683	 */
1684	if (pp->inpoll || (peer->procptr != pp) ||
1685	    ((peer->reach == 0xFF) && (peer->disp <= MAXDISTANCE)))
1686		return;
1687
1688	/* If the next poll is soon enough, bail out, too: */
1689	limit = current_time + 1;
1690	if (peer->nextdate <= limit)
1691		return;
1692
1693	/* Derive the number of samples needed from the popcount of the
1694	 * reach mask.  With less samples available, we break away.
1695	 */
1696	needs  = peer->reach;
1697	needs -= (needs >> 1) & 0x55;
1698	needs  = (needs & 0x33) + ((needs >> 2) & 0x33);
1699	needs  = (needs + (needs >> 4)) & 0x0F;
1700	if (needs > 6)
1701		needs = 6;
1702	else if (needs < 3)
1703		needs = 3;
1704	if (clk_cnt_sample(pp) < needs)
1705		return;
1706
1707	/* Get serious. Reduce the poll to minimum and schedule early.
1708	 * (Changing the peer poll is probably in vain, as it will be
1709	 * re-adjusted, but maybe some time the hint will work...)
1710	 */
1711	peer->hpoll = peer->minpoll;
1712	peer->nextdate = limit;
1713}
1714
1715/*
1716 * -------------------------------------------------------------------
1717 * Save the last timecode string, making sure it's properly truncated
1718 * if necessary and NUL terminated in any case.
1719 */
1720void
1721refclock_save_lcode(
1722	struct refclockproc *	pp,
1723	char const *		tc,
1724	size_t			len
1725	)
1726{
1727	if (len == (size_t)-1)
1728		len = strnlen(tc,  sizeof(pp->a_lastcode) - 1);
1729	else if (len >= sizeof(pp->a_lastcode))
1730		len = sizeof(pp->a_lastcode) - 1;
1731
1732	pp->lencode = (u_short)len;
1733	memcpy(pp->a_lastcode, tc, len);
1734	pp->a_lastcode[len] = '\0';
1735}
1736
1737/* format data into a_lastcode */
1738void
1739refclock_vformat_lcode(
1740	struct refclockproc *	pp,
1741	char const *		fmt,
1742	va_list			va
1743	)
1744{
1745	long len;
1746
1747	len = vsnprintf(pp->a_lastcode, sizeof(pp->a_lastcode), fmt, va);
1748	if (len <= 0)
1749		len = 0;
1750	else if (len >= sizeof(pp->a_lastcode))
1751		len = sizeof(pp->a_lastcode) - 1;
1752
1753	pp->lencode = (u_short)len;
1754	pp->a_lastcode[len] = '\0';
1755	/* !note! the NUL byte is needed in case vsnprintf() really fails */
1756}
1757
1758void
1759refclock_format_lcode(
1760	struct refclockproc *	pp,
1761	char const *		fmt,
1762	...
1763	)
1764{
1765	va_list va;
1766
1767	va_start(va, fmt);
1768	refclock_vformat_lcode(pp, fmt, va);
1769	va_end(va);
1770}
1771
1772#endif /* REFCLOCK */
1773