1#include <config.h>
2
3#include <event2/util.h>
4#include <event2/event.h>
5
6#include "ntp_workimpl.h"
7#ifdef WORK_THREAD
8# include <event2/thread.h>
9#endif
10
11#include "main.h"
12#include "ntp_libopts.h"
13#include "kod_management.h"
14#include "networking.h"
15#include "utilities.h"
16#include "log.h"
17#include "libntp.h"
18
19
20int shutting_down;
21int time_derived;
22int time_adjusted;
23int n_pending_dns = 0;
24int n_pending_ntp = 0;
25int ai_fam_pref = AF_UNSPEC;
26int ntpver = 4;
27double steplimit = -1;
28SOCKET sock4 = -1;		/* Socket for IPv4 */
29SOCKET sock6 = -1;		/* Socket for IPv6 */
30/*
31** BCAST *must* listen on port 123 (by default), so we can only
32** use the UCST sockets (above) if they too are using port 123
33*/
34SOCKET bsock4 = -1;		/* Broadcast Socket for IPv4 */
35SOCKET bsock6 = -1;		/* Broadcast Socket for IPv6 */
36struct event_base *base;
37struct event *ev_sock4;
38struct event *ev_sock6;
39struct event *ev_worker_timeout;
40struct event *ev_xmt_timer;
41
42struct dns_ctx {
43	const char *	name;
44	int		flags;
45#define CTX_BCST	0x0001
46#define CTX_UCST	0x0002
47#define CTX_xCST	0x0003
48#define CTX_CONC	0x0004
49#define CTX_unused	0xfffd
50	int		key_id;
51	struct timeval	timeout;
52	struct key *	key;
53};
54
55typedef struct sent_pkt_tag sent_pkt;
56struct sent_pkt_tag {
57	sent_pkt *		link;
58	struct dns_ctx *	dctx;
59	sockaddr_u		addr;
60	time_t			stime;
61	int			done;
62	struct pkt		x_pkt;
63};
64
65typedef struct xmt_ctx_tag xmt_ctx;
66struct xmt_ctx_tag {
67	xmt_ctx *		link;
68	SOCKET			sock;
69	time_t			sched;
70	sent_pkt *		spkt;
71};
72
73struct timeval	gap;
74xmt_ctx *	xmt_q;
75struct key *	keys = NULL;
76int		response_timeout;
77struct timeval	response_tv;
78struct timeval	start_tv;
79/* check the timeout at least once per second */
80struct timeval	wakeup_tv = { 0, 888888 };
81
82sent_pkt *	fam_listheads[2];
83#define v4_pkts_list	(fam_listheads[0])
84#define v6_pkts_list	(fam_listheads[1])
85
86static union {
87	struct pkt pkt;
88	char   buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
89} rbuf;
90
91#define r_pkt  rbuf.pkt
92
93#ifdef HAVE_DROPROOT
94int droproot;			/* intres imports these */
95int root_dropped;
96#endif
97u_long current_time;		/* libntp/authkeys.c */
98
99void open_sockets(void);
100void handle_lookup(const char *name, int flags);
101void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
102void worker_timeout(evutil_socket_t, short, void *);
103void worker_resp_cb(evutil_socket_t, short, void *);
104void sntp_name_resolved(int, int, void *, const char *, const char *,
105			const struct addrinfo *,
106			const struct addrinfo *);
107void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
108	       u_int xmt_delay);
109void xmt_timer_cb(evutil_socket_t, short, void *ptr);
110void xmt(xmt_ctx *xctx);
111int  check_kod(const struct addrinfo *ai);
112void timeout_query(sent_pkt *);
113void timeout_queries(void);
114void sock_cb(evutil_socket_t, short, void *);
115void check_exit_conditions(void);
116void sntp_libevent_log_cb(int, const char *);
117void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
118int  set_time(double offset);
119void dec_pending_ntp(const char *, sockaddr_u *);
120int  libevent_version_ok(void);
121int  gettimeofday_cached(struct event_base *b, struct timeval *tv);
122
123
124/*
125 * The actual main function.
126 */
127int
128sntp_main (
129	int argc,
130	char **argv,
131	const char *sntpVersion
132	)
133{
134	int			i;
135	int			exitcode;
136	int			optct;
137	struct event_config *	evcfg;
138
139	/* Initialize logging system - sets up progname */
140	sntp_init_logging(argv[0]);
141
142	if (!libevent_version_ok())
143		exit(EX_SOFTWARE);
144
145	init_lib();
146	init_auth();
147
148	optct = ntpOptionProcess(&sntpOptions, argc, argv);
149	argc -= optct;
150	argv += optct;
151
152
153	debug = OPT_VALUE_SET_DEBUG_LEVEL;
154
155	TRACE(2, ("init_lib() done, %s%s\n",
156		  (ipv4_works)
157		      ? "ipv4_works "
158		      : "",
159		  (ipv6_works)
160		      ? "ipv6_works "
161		      : ""));
162	ntpver = OPT_VALUE_NTPVERSION;
163	steplimit = OPT_VALUE_STEPLIMIT / 1e3;
164	gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
165	gap.tv_usec = min(gap.tv_usec, 999999);
166
167	if (HAVE_OPT(LOGFILE))
168		open_logfile(OPT_ARG(LOGFILE));
169
170	msyslog(LOG_INFO, "%s", sntpVersion);
171
172	if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
173		printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
174		       progname);
175		exit(EX_USAGE);
176	}
177
178
179	/*
180	** Eventually, we probably want:
181	** - separate bcst and ucst timeouts (why?)
182	** - multiple --timeout values in the commandline
183	*/
184
185	response_timeout = OPT_VALUE_TIMEOUT;
186	response_tv.tv_sec = response_timeout;
187	response_tv.tv_usec = 0;
188
189	/* IPv6 available? */
190	if (isc_net_probeipv6() != ISC_R_SUCCESS) {
191		ai_fam_pref = AF_INET;
192		TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
193	} else {
194		/* Check for options -4 and -6 */
195		if (HAVE_OPT(IPV4))
196			ai_fam_pref = AF_INET;
197		else if (HAVE_OPT(IPV6))
198			ai_fam_pref = AF_INET6;
199	}
200
201	/* TODO: Parse config file if declared */
202
203	/*
204	** Init the KOD system.
205	** For embedded systems with no writable filesystem,
206	** -K /dev/null can be used to disable KoD storage.
207	*/
208	kod_init_kod_db(OPT_ARG(KOD), FALSE);
209
210	/* HMS: Check and see what happens if KEYFILE doesn't exist */
211	auth_init(OPT_ARG(KEYFILE), &keys);
212
213	/*
214	** Considering employing a variable that prevents functions of doing
215	** anything until everything is initialized properly
216	**
217	** HMS: What exactly does the above mean?
218	*/
219	event_set_log_callback(&sntp_libevent_log_cb);
220	if (debug > 0)
221		event_enable_debug_mode();
222#ifdef WORK_THREAD
223	evthread_use_pthreads();
224	/* we use libevent from main thread only, locks should be academic */
225	if (debug > 0)
226		evthread_enable_lock_debuging();
227#endif
228	evcfg = event_config_new();
229	if (NULL == evcfg) {
230		printf("%s: event_config_new() failed!\n", progname);
231		return -1;
232	}
233#ifndef HAVE_SOCKETPAIR
234	event_config_require_features(evcfg, EV_FEATURE_FDS);
235#endif
236	/* all libevent calls are from main thread */
237	/* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
238	base = event_base_new_with_config(evcfg);
239	event_config_free(evcfg);
240	if (NULL == base) {
241		printf("%s: event_base_new() failed!\n", progname);
242		return -1;
243	}
244
245	/* wire into intres resolver */
246	worker_per_query = TRUE;
247	addremove_io_fd = &sntp_addremove_fd;
248
249	open_sockets();
250
251	if (HAVE_OPT(BROADCAST)) {
252		int		cn = STACKCT_OPT(  BROADCAST );
253		const char **	cp = STACKLST_OPT( BROADCAST );
254
255		while (cn-- > 0) {
256			handle_lookup(*cp, CTX_BCST);
257			cp++;
258		}
259	}
260
261	if (HAVE_OPT(CONCURRENT)) {
262		int		cn = STACKCT_OPT( CONCURRENT );
263		const char **	cp = STACKLST_OPT( CONCURRENT );
264
265		while (cn-- > 0) {
266			handle_lookup(*cp, CTX_UCST | CTX_CONC);
267			cp++;
268		}
269	}
270
271	for (i = 0; i < argc; ++i)
272		handle_lookup(argv[i], CTX_UCST);
273
274	gettimeofday_cached(base, &start_tv);
275	event_base_dispatch(base);
276	event_base_free(base);
277
278	if (!time_adjusted &&
279	    (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
280		exitcode = 1;
281	else
282		exitcode = 0;
283
284	return exitcode;
285}
286
287
288/*
289** open sockets and make them non-blocking
290*/
291void
292open_sockets(
293	void
294	)
295{
296	sockaddr_u	name;
297
298	if (-1 == sock4) {
299		sock4 = socket(PF_INET, SOCK_DGRAM, 0);
300		if (-1 == sock4) {
301			/* error getting a socket */
302			msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
303			exit(1);
304		}
305		/* Make it non-blocking */
306		make_socket_nonblocking(sock4);
307
308		/* Let's try using a wildcard... */
309		ZERO(name);
310		AF(&name) = AF_INET;
311		SET_ADDR4N(&name, INADDR_ANY);
312		SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
313
314		if (-1 == bind(sock4, &name.sa,
315			       SOCKLEN(&name))) {
316			msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
317			exit(1);
318		}
319
320		/* Register an NTP callback for recv/timeout */
321		ev_sock4 = event_new(base, sock4,
322				     EV_TIMEOUT | EV_READ | EV_PERSIST,
323				     &sock_cb, NULL);
324		if (NULL == ev_sock4) {
325			msyslog(LOG_ERR,
326				"open_sockets: event_new(base, sock4) failed!");
327		} else {
328			event_add(ev_sock4, &wakeup_tv);
329		}
330	}
331
332	/* We may not always have IPv6... */
333	if (-1 == sock6 && ipv6_works) {
334		sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
335		if (-1 == sock6 && ipv6_works) {
336			/* error getting a socket */
337			msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
338			exit(1);
339		}
340		/* Make it non-blocking */
341		make_socket_nonblocking(sock6);
342
343		/* Let's try using a wildcard... */
344		ZERO(name);
345		AF(&name) = AF_INET6;
346		SET_ADDR6N(&name, in6addr_any);
347		SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
348
349		if (-1 == bind(sock6, &name.sa,
350			       SOCKLEN(&name))) {
351			msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
352			exit(1);
353		}
354		/* Register an NTP callback for recv/timeout */
355		ev_sock6 = event_new(base, sock6,
356				     EV_TIMEOUT | EV_READ | EV_PERSIST,
357				     &sock_cb, NULL);
358		if (NULL == ev_sock6) {
359			msyslog(LOG_ERR,
360				"open_sockets: event_new(base, sock6) failed!");
361		} else {
362			event_add(ev_sock6, &wakeup_tv);
363		}
364	}
365
366	return;
367}
368
369
370/*
371** handle_lookup
372*/
373void
374handle_lookup(
375	const char *name,
376	int flags
377	)
378{
379	struct addrinfo	hints;	/* Local copy is OK */
380	struct dns_ctx *ctx;
381	char *		name_copy;
382	size_t		name_sz;
383	size_t		octets;
384
385	TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
386
387	ZERO(hints);
388	hints.ai_family = ai_fam_pref;
389	hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
390	/*
391	** Unless we specify a socktype, we'll get at least two
392	** entries for each address: one for TCP and one for
393	** UDP. That's not what we want.
394	*/
395	hints.ai_socktype = SOCK_DGRAM;
396	hints.ai_protocol = IPPROTO_UDP;
397
398	name_sz = 1 + strlen(name);
399	octets = sizeof(*ctx) + name_sz;	// Space for a ctx and the name
400	ctx = emalloc_zero(octets);		// ctx at ctx[0]
401	name_copy = (char *)(ctx + 1);		// Put the name at ctx[1]
402	memcpy(name_copy, name, name_sz);	// copy the name to ctx[1]
403	ctx->name = name_copy;			// point to it...
404	ctx->flags = flags;
405	ctx->timeout = response_tv;
406	ctx->key = NULL;
407
408	/* The following should arguably be passed in... */
409	if (ENABLED_OPT(AUTHENTICATION)) {
410		ctx->key_id = OPT_VALUE_AUTHENTICATION;
411		get_key(ctx->key_id, &ctx->key);
412		if (NULL == ctx->key) {
413			fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n",
414				progname, ctx->key_id, OPT_ARG(KEYFILE));
415			exit(1);
416		}
417	} else {
418		ctx->key_id = -1;
419	}
420
421	++n_pending_dns;
422	getaddrinfo_sometime(name, "123", &hints, 0,
423			     &sntp_name_resolved, ctx);
424}
425
426
427/*
428** DNS Callback:
429** - For each IP:
430** - - open a socket
431** - - increment n_pending_ntp
432** - - send a request if this is a Unicast callback
433** - - queue wait for response
434** - decrement n_pending_dns
435*/
436void
437sntp_name_resolved(
438	int			rescode,
439	int			gai_errno,
440	void *			context,
441	const char *		name,
442	const char *		service,
443	const struct addrinfo *	hints,
444	const struct addrinfo *	addr
445	)
446{
447	struct dns_ctx *	dctx;
448	sent_pkt *		spkt;
449	const struct addrinfo *	ai;
450	SOCKET			sock;
451	u_int			xmt_delay_v4;
452	u_int			xmt_delay_v6;
453	u_int			xmt_delay;
454	size_t			octets;
455
456	xmt_delay_v4 = 0;
457	xmt_delay_v6 = 0;
458	dctx = context;
459	if (rescode) {
460#ifdef EAI_SYSTEM
461		if (EAI_SYSTEM == rescode) {
462			errno = gai_errno;
463			mfprintf(stderr, "%s lookup error %m\n",
464				 dctx->name);
465		} else
466#endif
467			fprintf(stderr, "%s lookup error %s\n",
468				dctx->name, gai_strerror(rescode));
469	} else {
470		TRACE(3, ("%s [%s]\n", dctx->name,
471			  (addr->ai_canonname != NULL)
472			      ? addr->ai_canonname
473			      : ""));
474
475		for (ai = addr; ai != NULL; ai = ai->ai_next) {
476
477			if (check_kod(ai))
478				continue;
479
480			switch (ai->ai_family) {
481
482			case AF_INET:
483				sock = sock4;
484				xmt_delay = xmt_delay_v4;
485				xmt_delay_v4++;
486				break;
487
488			case AF_INET6:
489				if (!ipv6_works)
490					continue;
491
492				sock = sock6;
493				xmt_delay = xmt_delay_v6;
494				xmt_delay_v6++;
495				break;
496
497			default:
498				msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
499					ai->ai_family);
500				exit(1);
501				break;
502			}
503
504			/*
505			** We're waiting for a response for either unicast
506			** or broadcast, so...
507			*/
508			++n_pending_ntp;
509
510			/* If this is for a unicast IP, queue a request */
511			if (dctx->flags & CTX_UCST) {
512				spkt = emalloc_zero(sizeof(*spkt));
513				spkt->dctx = dctx;
514				octets = min(ai->ai_addrlen, sizeof(spkt->addr));
515				memcpy(&spkt->addr, ai->ai_addr, octets);
516				queue_xmt(sock, dctx, spkt, xmt_delay);
517			}
518		}
519	}
520	/* n_pending_dns really should be >0 here... */
521	--n_pending_dns;
522	check_exit_conditions();
523}
524
525
526/*
527** queue_xmt
528*/
529void
530queue_xmt(
531	SOCKET			sock,
532	struct dns_ctx *	dctx,
533	sent_pkt *		spkt,
534	u_int			xmt_delay
535	)
536{
537	sockaddr_u *	dest;
538	sent_pkt **	pkt_listp;
539	sent_pkt *	match;
540	xmt_ctx *	xctx;
541	struct timeval	start_cb;
542	struct timeval	delay;
543
544	dest = &spkt->addr;
545	if (IS_IPV6(dest))
546		pkt_listp = &v6_pkts_list;
547	else
548		pkt_listp = &v4_pkts_list;
549
550	/* reject attempts to add address already listed */
551	for (match = *pkt_listp; match != NULL; match = match->link) {
552		if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
553			if (strcasecmp(spkt->dctx->name,
554				       match->dctx->name))
555				printf("%s %s duplicate address from %s ignored.\n",
556				       sptoa(&match->addr),
557				       match->dctx->name,
558				       spkt->dctx->name);
559			else
560				printf("%s %s, duplicate address ignored.\n",
561				       sptoa(&match->addr),
562				       match->dctx->name);
563			dec_pending_ntp(spkt->dctx->name, &spkt->addr);
564			free(spkt);
565			return;
566		}
567	}
568
569	LINK_SLIST(*pkt_listp, spkt, link);
570
571	xctx = emalloc_zero(sizeof(*xctx));
572	xctx->sock = sock;
573	xctx->spkt = spkt;
574	gettimeofday_cached(base, &start_cb);
575	xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
576
577	LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
578			link, xmt_ctx);
579	if (xmt_q == xctx) {
580		/*
581		 * The new entry is the first scheduled.  The timer is
582		 * either not active or is set for the second xmt
583		 * context in xmt_q.
584		 */
585		if (NULL == ev_xmt_timer)
586			ev_xmt_timer = event_new(base, INVALID_SOCKET,
587						 EV_TIMEOUT,
588						 &xmt_timer_cb, NULL);
589		if (NULL == ev_xmt_timer) {
590			msyslog(LOG_ERR,
591				"queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
592			exit(1);
593		}
594		ZERO(delay);
595		if (xctx->sched > start_cb.tv_sec)
596			delay.tv_sec = xctx->sched - start_cb.tv_sec;
597		event_add(ev_xmt_timer, &delay);
598		TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
599			  (u_int)delay.tv_usec));
600	}
601}
602
603
604/*
605** xmt_timer_cb
606*/
607void
608xmt_timer_cb(
609	evutil_socket_t	fd,
610	short		what,
611	void *		ctx
612	)
613{
614	struct timeval	start_cb;
615	struct timeval	delay;
616	xmt_ctx *	x;
617
618	UNUSED_ARG(fd);
619	UNUSED_ARG(ctx);
620	DEBUG_INSIST(EV_TIMEOUT == what);
621
622	if (NULL == xmt_q || shutting_down)
623		return;
624	gettimeofday_cached(base, &start_cb);
625	if (xmt_q->sched <= start_cb.tv_sec) {
626		UNLINK_HEAD_SLIST(x, xmt_q, link);
627		TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
628			  (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
629		xmt(x);
630		free(x);
631		if (NULL == xmt_q)
632			return;
633	}
634	if (xmt_q->sched <= start_cb.tv_sec) {
635		event_add(ev_xmt_timer, &gap);
636		TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
637			  (u_int)start_cb.tv_usec,
638			  (u_int)gap.tv_usec));
639	} else {
640		delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
641		delay.tv_usec = 0;
642		event_add(ev_xmt_timer, &delay);
643		TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
644			  (u_int)start_cb.tv_usec,
645			  (long)delay.tv_sec));
646	}
647}
648
649
650/*
651** xmt()
652*/
653void
654xmt(
655	xmt_ctx *	xctx
656	)
657{
658	SOCKET		sock = xctx->sock;
659	struct dns_ctx *dctx = xctx->spkt->dctx;
660	sent_pkt *	spkt = xctx->spkt;
661	sockaddr_u *	dst = &spkt->addr;
662	struct timeval	tv_xmt;
663	struct pkt	x_pkt;
664	size_t		pkt_len;
665	int		sent;
666
667	if (0 != gettimeofday(&tv_xmt, NULL)) {
668		msyslog(LOG_ERR,
669			"xmt: gettimeofday() failed: %m");
670		exit(1);
671	}
672	tv_xmt.tv_sec += JAN_1970;
673
674	pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
675			       dctx->key);
676
677	sent = sendpkt(sock, dst, &x_pkt, pkt_len);
678	if (sent) {
679		/* Save the packet we sent... */
680		memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
681		       pkt_len));
682		spkt->stime = tv_xmt.tv_sec - JAN_1970;
683
684		TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
685			  (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
686	} else {
687		dec_pending_ntp(dctx->name, dst);
688	}
689
690	return;
691}
692
693
694/*
695 * timeout_queries() -- give up on unrequited NTP queries
696 */
697void
698timeout_queries(void)
699{
700	struct timeval	start_cb;
701	u_int		idx;
702	sent_pkt *	head;
703	sent_pkt *	spkt;
704	sent_pkt *	spkt_next;
705	long		age;
706	int didsomething = 0;
707
708	TRACE(3, ("timeout_queries: called to check %u items\n",
709		  (unsigned)COUNTOF(fam_listheads)));
710
711	gettimeofday_cached(base, &start_cb);
712	for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
713		head = fam_listheads[idx];
714		for (spkt = head; spkt != NULL; spkt = spkt_next) {
715			char xcst;
716
717			didsomething = 1;
718			switch (spkt->dctx->flags & CTX_xCST) {
719			    case CTX_BCST:
720				xcst = 'B';
721				break;
722
723			    case CTX_UCST:
724				xcst = 'U';
725				break;
726
727			    default:
728				INSIST(!"spkt->dctx->flags neither UCST nor BCST");
729				break;
730			}
731
732			spkt_next = spkt->link;
733			if (0 == spkt->stime || spkt->done)
734				continue;
735			age = start_cb.tv_sec - spkt->stime;
736			TRACE(3, ("%s %s %cCST age %ld\n",
737				  stoa(&spkt->addr),
738				  spkt->dctx->name, xcst, age));
739			if (age > response_timeout)
740				timeout_query(spkt);
741		}
742	}
743	// Do we care about didsomething?
744	TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
745		  didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
746	if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
747		TRACE(3, ("timeout_queries: bail!\n"));
748		event_base_loopexit(base, NULL);
749		shutting_down = TRUE;
750	}
751}
752
753
754void dec_pending_ntp(
755	const char *	name,
756	sockaddr_u *	server
757	)
758{
759	if (n_pending_ntp > 0) {
760		--n_pending_ntp;
761		check_exit_conditions();
762	} else {
763		INSIST(0 == n_pending_ntp);
764		TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
765			  hostnameaddr(name, server)));
766	}
767}
768
769
770void timeout_query(
771	sent_pkt *	spkt
772	)
773{
774	sockaddr_u *	server;
775	char		xcst;
776
777
778	switch (spkt->dctx->flags & CTX_xCST) {
779	    case CTX_BCST:
780		xcst = 'B';
781		break;
782
783	    case CTX_UCST:
784		xcst = 'U';
785		break;
786
787	    default:
788		INSIST(!"spkt->dctx->flags neither UCST nor BCST");
789		break;
790	}
791	spkt->done = TRUE;
792	server = &spkt->addr;
793	msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
794		hostnameaddr(spkt->dctx->name, server), xcst,
795		response_timeout);
796	dec_pending_ntp(spkt->dctx->name, server);
797	return;
798}
799
800
801/*
802** check_kod
803*/
804int
805check_kod(
806	const struct addrinfo *	ai
807	)
808{
809	char *hostname;
810	struct kod_entry *reason;
811
812	/* Is there a KoD on file for this address? */
813	hostname = addrinfo_to_str(ai);
814	TRACE(2, ("check_kod: checking <%s>\n", hostname));
815	if (search_entry(hostname, &reason)) {
816		printf("prior KoD for %s, skipping.\n",
817			hostname);
818		free(reason);
819		free(hostname);
820
821		return 1;
822	}
823	free(hostname);
824
825	return 0;
826}
827
828
829/*
830** Socket readable/timeout Callback:
831** Read in the packet
832** Unicast:
833** - close socket
834** - decrement n_pending_ntp
835** - If packet is good, set the time and "exit"
836** Broadcast:
837** - If packet is good, set the time and "exit"
838*/
839void
840sock_cb(
841	evutil_socket_t fd,
842	short what,
843	void *ptr
844	)
845{
846	sockaddr_u	sender;
847	sockaddr_u *	psau;
848	sent_pkt **	p_pktlist;
849	sent_pkt *	spkt;
850	int		rpktl;
851	int		rc;
852
853	INSIST(sock4 == fd || sock6 == fd);
854
855	TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
856		  (fd == sock6)
857		      ? "6"
858		      : "4",
859		  (what & EV_TIMEOUT) ? " timeout" : "",
860		  (what & EV_READ)    ? " read" : "",
861		  (what & EV_WRITE)   ? " write" : "",
862		  (what & EV_SIGNAL)  ? " signal" : ""));
863
864	if (!(EV_READ & what)) {
865		if (EV_TIMEOUT & what)
866			timeout_queries();
867
868		return;
869	}
870
871	/* Read in the packet */
872	rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
873	if (rpktl < 0) {
874		msyslog(LOG_DEBUG, "recvfrom error %m");
875		return;
876	}
877
878	if (sock6 == fd)
879		p_pktlist = &v6_pkts_list;
880	else
881		p_pktlist = &v4_pkts_list;
882
883	for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
884		psau = &spkt->addr;
885		if (SOCK_EQ(&sender, psau))
886			break;
887	}
888	if (NULL == spkt) {
889		msyslog(LOG_WARNING,
890			"Packet from unexpected source %s dropped",
891			sptoa(&sender));
892		return;
893	}
894
895	TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
896		  sptoa(&sender)));
897
898	rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
899			    &spkt->x_pkt, "sock_cb");
900
901	TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
902
903	/* If this is a Unicast packet, one down ... */
904	if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
905		dec_pending_ntp(spkt->dctx->name, &spkt->addr);
906		spkt->done = TRUE;
907	}
908
909
910	/* If the packet is good, set the time and we're all done */
911	rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
912	if (0 != rc)
913		TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
914	check_exit_conditions();
915}
916
917
918/*
919 * check_exit_conditions()
920 *
921 * If sntp has a reply, ask the event loop to stop after this round of
922 * callbacks, unless --wait was used.
923 */
924void
925check_exit_conditions(void)
926{
927	if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
928	    (time_derived && !HAVE_OPT(WAIT))) {
929		event_base_loopexit(base, NULL);
930		shutting_down = TRUE;
931	} else {
932		TRACE(2, ("%d NTP and %d name queries pending\n",
933			  n_pending_ntp, n_pending_dns));
934	}
935}
936
937
938/*
939 * sntp_addremove_fd() is invoked by the intres blocking worker code
940 * to read from a pipe, or to stop same.
941 */
942void sntp_addremove_fd(
943	int	fd,
944	int	is_pipe,
945	int	remove_it
946	)
947{
948	u_int		idx;
949	blocking_child *c;
950	struct event *	ev;
951
952#ifdef HAVE_SOCKETPAIR
953	if (is_pipe) {
954		/* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
955		msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
956		exit(1);
957	}
958#endif
959
960	c = NULL;
961	for (idx = 0; idx < blocking_children_alloc; idx++) {
962		c = blocking_children[idx];
963		if (NULL == c)
964			continue;
965		if (fd == c->resp_read_pipe)
966			break;
967	}
968	if (idx == blocking_children_alloc)
969		return;
970
971	if (remove_it) {
972		ev = c->resp_read_ctx;
973		c->resp_read_ctx = NULL;
974		event_del(ev);
975		event_free(ev);
976
977		return;
978	}
979
980	ev = event_new(base, fd, EV_READ | EV_PERSIST,
981		       &worker_resp_cb, c);
982	if (NULL == ev) {
983		msyslog(LOG_ERR,
984			"sntp_addremove_fd: event_new(base, fd) failed!");
985		return;
986	}
987	c->resp_read_ctx = ev;
988	event_add(ev, NULL);
989}
990
991
992/* called by forked intres child to close open descriptors */
993#ifdef WORK_FORK
994void
995kill_asyncio(
996	int	startfd
997	)
998{
999	if (INVALID_SOCKET != sock4) {
1000		closesocket(sock4);
1001		sock4 = INVALID_SOCKET;
1002	}
1003	if (INVALID_SOCKET != sock6) {
1004		closesocket(sock6);
1005		sock6 = INVALID_SOCKET;
1006	}
1007	if (INVALID_SOCKET != bsock4) {
1008		closesocket(sock4);
1009		sock4 = INVALID_SOCKET;
1010	}
1011	if (INVALID_SOCKET != bsock6) {
1012		closesocket(sock6);
1013		sock6 = INVALID_SOCKET;
1014	}
1015}
1016#endif
1017
1018
1019/*
1020 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1021 */
1022void
1023worker_resp_cb(
1024	evutil_socket_t	fd,
1025	short		what,
1026	void *		ctx	/* blocking_child * */
1027	)
1028{
1029	blocking_child *	c;
1030
1031	DEBUG_INSIST(EV_READ & what);
1032	c = ctx;
1033	DEBUG_INSIST(fd == c->resp_read_pipe);
1034	process_blocking_resp(c);
1035}
1036
1037
1038/*
1039 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1040 * timeout to fire in s seconds, if not reset earlier by a call to
1041 * intres_timeout_req(0), which clears any pending timeout.  When the
1042 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1043 * parent).
1044 *
1045 * sntp and ntpd each provide implementations adapted to their timers.
1046 */
1047void
1048intres_timeout_req(
1049	u_int	seconds		/* 0 cancels */
1050	)
1051{
1052	struct timeval	tv_to;
1053
1054	if (NULL == ev_worker_timeout) {
1055		ev_worker_timeout = event_new(base, -1,
1056					      EV_TIMEOUT | EV_PERSIST,
1057					      &worker_timeout, NULL);
1058		DEBUG_INSIST(NULL != ev_worker_timeout);
1059	} else {
1060		event_del(ev_worker_timeout);
1061	}
1062	if (0 == seconds)
1063		return;
1064	tv_to.tv_sec = seconds;
1065	tv_to.tv_usec = 0;
1066	event_add(ev_worker_timeout, &tv_to);
1067}
1068
1069
1070void
1071worker_timeout(
1072	evutil_socket_t	fd,
1073	short		what,
1074	void *		ctx
1075	)
1076{
1077	UNUSED_ARG(fd);
1078	UNUSED_ARG(ctx);
1079
1080	DEBUG_REQUIRE(EV_TIMEOUT & what);
1081	worker_idle_timer_fired();
1082}
1083
1084
1085void
1086sntp_libevent_log_cb(
1087	int		severity,
1088	const char *	msg
1089	)
1090{
1091	int		level;
1092
1093	switch (severity) {
1094
1095	default:
1096	case _EVENT_LOG_DEBUG:
1097		level = LOG_DEBUG;
1098		break;
1099
1100	case _EVENT_LOG_MSG:
1101		level = LOG_NOTICE;
1102		break;
1103
1104	case _EVENT_LOG_WARN:
1105		level = LOG_WARNING;
1106		break;
1107
1108	case _EVENT_LOG_ERR:
1109		level = LOG_ERR;
1110		break;
1111	}
1112
1113	msyslog(level, "%s", msg);
1114}
1115
1116
1117int
1118generate_pkt (
1119	struct pkt *x_pkt,
1120	const struct timeval *tv_xmt,
1121	int key_id,
1122	struct key *pkt_key
1123	)
1124{
1125	l_fp	xmt_fp;
1126	int	pkt_len;
1127	int	mac_size;
1128
1129	pkt_len = LEN_PKT_NOMAC;
1130	ZERO(*x_pkt);
1131	TVTOTS(tv_xmt, &xmt_fp);
1132	HTONL_FP(&xmt_fp, &x_pkt->xmt);
1133	x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1134	x_pkt->ppoll = 8;
1135	/* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1136	set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1137	if (debug > 0) {
1138		printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key);
1139	}
1140	if (pkt_key != NULL) {
1141		x_pkt->exten[0] = htonl(key_id);
1142		mac_size = make_mac(x_pkt, pkt_len, MAX_MDG_LEN,
1143				    pkt_key, (char *)&x_pkt->exten[1]);
1144		if (mac_size > 0)
1145			pkt_len += mac_size + KEY_MAC_LEN;
1146#ifdef DEBUG
1147		if (debug > 0) {
1148			printf("generate_pkt: mac_size is %d\n", mac_size);
1149		}
1150#endif
1151
1152	}
1153	return pkt_len;
1154}
1155
1156
1157int
1158handle_pkt(
1159	int		rpktl,
1160	struct pkt *	rpkt,
1161	sockaddr_u *	host,
1162	const char *	hostname
1163	)
1164{
1165	char		disptxt[32];
1166	const char *	addrtxt;
1167	struct timeval	tv_dst;
1168	int		cnt;
1169	int		sw_case;
1170	int		digits;
1171	int		stratum;
1172	char *		ref;
1173	char *		ts_str;
1174	const char *	leaptxt;
1175	double		offset;
1176	double		precision;
1177	double		synch_distance;
1178	char *		p_SNTP_PRETEND_TIME;
1179	time_t		pretend_time;
1180#if SIZEOF_TIME_T == 8
1181	long long	ll;
1182#else
1183	long		l;
1184#endif
1185
1186	ts_str = NULL;
1187
1188	if (rpktl > 0)
1189		sw_case = 1;
1190	else
1191		sw_case = rpktl;
1192
1193	switch (sw_case) {
1194
1195	case SERVER_UNUSEABLE:
1196		return -1;
1197		break;
1198
1199	case PACKET_UNUSEABLE:
1200		break;
1201
1202	case SERVER_AUTH_FAIL:
1203		break;
1204
1205	case KOD_DEMOBILIZE:
1206		/* Received a DENY or RESTR KOD packet */
1207		addrtxt = stoa(host);
1208		ref = (char *)&rpkt->refid;
1209		add_entry(addrtxt, ref);
1210		msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1211			ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1212		break;
1213
1214	case KOD_RATE:
1215		/*
1216		** Hmm...
1217		** We should probably call add_entry() with an
1218		** expiration timestamp of several seconds in the future,
1219		** and back-off even more if we get more RATE responses.
1220		*/
1221		break;
1222
1223	case 1:
1224		TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1225			  rpktl, stoa(host), hostname));
1226
1227		gettimeofday_cached(base, &tv_dst);
1228
1229		p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1230		if (p_SNTP_PRETEND_TIME) {
1231			pretend_time = 0;
1232#if SIZEOF_TIME_T == 4
1233			if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1234				pretend_time = (time_t)l;
1235#elif SIZEOF_TIME_T == 8
1236			if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1237				pretend_time = (time_t)ll;
1238#else
1239# include "GRONK: unexpected value for SIZEOF_TIME_T"
1240#endif
1241			if (0 != pretend_time)
1242				tv_dst.tv_sec = pretend_time;
1243		}
1244
1245		offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1246				   &precision, &synch_distance);
1247		time_derived = TRUE;
1248
1249		for (digits = 0; (precision *= 10.) < 1.; ++digits)
1250			/* empty */ ;
1251		if (digits > 6)
1252			digits = 6;
1253
1254		ts_str = tv_to_str(&tv_dst);
1255		stratum = rpkt->stratum;
1256		if (0 == stratum)
1257				stratum = 16;
1258
1259		if (synch_distance > 0.) {
1260			cnt = snprintf(disptxt, sizeof(disptxt),
1261				       " +/- %f", synch_distance);
1262			if ((size_t)cnt >= sizeof(disptxt))
1263				snprintf(disptxt, sizeof(disptxt),
1264					 "ERROR %d >= %d", cnt,
1265					 (int)sizeof(disptxt));
1266		} else {
1267			disptxt[0] = '\0';
1268		}
1269
1270		switch (PKT_LEAP(rpkt->li_vn_mode)) {
1271		    case LEAP_NOWARNING:
1272		    	leaptxt = "no-leap";
1273			break;
1274		    case LEAP_ADDSECOND:
1275		    	leaptxt = "add-leap";
1276			break;
1277		    case LEAP_DELSECOND:
1278		    	leaptxt = "del-leap";
1279			break;
1280		    case LEAP_NOTINSYNC:
1281		    	leaptxt = "unsync";
1282			break;
1283		    default:
1284		    	leaptxt = "LEAP-ERROR";
1285			break;
1286		}
1287
1288		msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1289			digits, offset, disptxt,
1290			hostnameaddr(hostname, host), stratum,
1291			leaptxt,
1292			(time_adjusted)
1293			    ? " [excess]"
1294			    : "");
1295		free(ts_str);
1296
1297		if (p_SNTP_PRETEND_TIME)
1298			return 0;
1299
1300		if (!time_adjusted &&
1301		    (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1302			return set_time(offset);
1303
1304		return EX_OK;
1305	}
1306
1307	return 1;
1308}
1309
1310
1311void
1312offset_calculation(
1313	struct pkt *rpkt,
1314	int rpktl,
1315	struct timeval *tv_dst,
1316	double *offset,
1317	double *precision,
1318	double *synch_distance
1319	)
1320{
1321	l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1322	u_fp p_rdly, p_rdsp;
1323	double t21, t34, delta;
1324
1325	/* Convert timestamps from network to host byte order */
1326	p_rdly = NTOHS_FP(rpkt->rootdelay);
1327	p_rdsp = NTOHS_FP(rpkt->rootdisp);
1328	NTOHL_FP(&rpkt->reftime, &p_ref);
1329	NTOHL_FP(&rpkt->org, &p_org);
1330	NTOHL_FP(&rpkt->rec, &p_rec);
1331	NTOHL_FP(&rpkt->xmt, &p_xmt);
1332
1333	*precision = LOGTOD(rpkt->precision);
1334
1335	TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1336
1337	/* Compute offset etc. */
1338	tmp = p_rec;
1339	L_SUB(&tmp, &p_org);
1340	LFPTOD(&tmp, t21);
1341	TVTOTS(tv_dst, &dst);
1342	dst.l_ui += JAN_1970;
1343	tmp = p_xmt;
1344	L_SUB(&tmp, &dst);
1345	LFPTOD(&tmp, t34);
1346	*offset = (t21 + t34) / 2.;
1347	delta = t21 - t34;
1348
1349	// synch_distance is:
1350	// (peer->delay + peer->rootdelay) / 2 + peer->disp
1351	// + peer->rootdisp + clock_phi * (current_time - peer->update)
1352	// + peer->jitter;
1353	//
1354	// and peer->delay = fabs(peer->offset - p_offset) * 2;
1355	// and peer->offset needs history, so we're left with
1356	// p_offset = (t21 + t34) / 2.;
1357	// peer->disp = 0; (we have no history to augment this)
1358	// clock_phi = 15e-6;
1359	// peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1360	// and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1361	//
1362	// so our answer seems to be:
1363	//
1364	// (fabs(t21 + t34) + peer->rootdelay) / 3.
1365	// + 0 (peer->disp)
1366	// + peer->rootdisp
1367	// + 15e-6 (clock_phi)
1368	// + LOGTOD(sys_precision)
1369
1370	INSIST( FPTOD(p_rdly) >= 0. );
1371#if 1
1372	*synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1373		+ 0.
1374		+ FPTOD(p_rdsp)
1375		+ 15e-6
1376		+ 0.	/* LOGTOD(sys_precision) when we can get it */
1377		;
1378	INSIST( *synch_distance >= 0. );
1379#else
1380	*synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1381#endif
1382
1383#ifdef DEBUG
1384	if (debug > 3) {
1385		printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1386		printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1387		printf("sntp syncdist: %f\n", *synch_distance);
1388
1389		pkt_output(rpkt, rpktl, stdout);
1390
1391		printf("sntp offset_calculation: rpkt->reftime:\n");
1392		l_fp_output(&p_ref, stdout);
1393		printf("sntp offset_calculation: rpkt->org:\n");
1394		l_fp_output(&p_org, stdout);
1395		printf("sntp offset_calculation: rpkt->rec:\n");
1396		l_fp_output(&p_rec, stdout);
1397		printf("sntp offset_calculation: rpkt->xmt:\n");
1398		l_fp_output(&p_xmt, stdout);
1399	}
1400#endif
1401
1402	TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1403		  "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1404		  t21, t34, delta, *offset));
1405
1406	return;
1407}
1408
1409
1410
1411/* Compute the 8 bits for li_vn_mode */
1412void
1413set_li_vn_mode (
1414	struct pkt *spkt,
1415	char leap,
1416	char version,
1417	char mode
1418	)
1419{
1420	if (leap > 3) {
1421		msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1422		leap = 3;
1423	}
1424
1425	if ((unsigned char)version > 7) {
1426		msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1427		version = 4;
1428	}
1429
1430	if (mode > 7) {
1431		msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1432		mode = 3;
1433	}
1434
1435	spkt->li_vn_mode  = leap << 6;
1436	spkt->li_vn_mode |= version << 3;
1437	spkt->li_vn_mode |= mode;
1438}
1439
1440
1441/*
1442** set_time applies 'offset' to the local clock.
1443*/
1444int
1445set_time(
1446	double offset
1447	)
1448{
1449	int rc;
1450
1451	if (time_adjusted)
1452		return EX_OK;
1453
1454	/*
1455	** If we can step but we cannot slew, then step.
1456	** If we can step or slew and and |offset| > steplimit, then step.
1457	*/
1458	if (ENABLED_OPT(STEP) &&
1459	    (   !ENABLED_OPT(SLEW)
1460	     || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1461	    )) {
1462		rc = step_systime(offset);
1463
1464		/* If there was a problem, can we rely on errno? */
1465		if (1 == rc)
1466			time_adjusted = TRUE;
1467		return (time_adjusted)
1468			   ? EX_OK
1469			   : 1;
1470		/*
1471		** In case of error, what should we use?
1472		** EX_UNAVAILABLE?
1473		** EX_OSERR?
1474		** EX_NOPERM?
1475		*/
1476	}
1477
1478	if (ENABLED_OPT(SLEW)) {
1479		rc = adj_systime(offset);
1480
1481		/* If there was a problem, can we rely on errno? */
1482		if (1 == rc)
1483			time_adjusted = TRUE;
1484		return (time_adjusted)
1485			   ? EX_OK
1486			   : 1;
1487		/*
1488		** In case of error, what should we use?
1489		** EX_UNAVAILABLE?
1490		** EX_OSERR?
1491		** EX_NOPERM?
1492		*/
1493	}
1494
1495	return EX_SOFTWARE;
1496}
1497
1498
1499int
1500libevent_version_ok(void)
1501{
1502	ev_uint32_t v_compile_maj;
1503	ev_uint32_t v_run_maj;
1504
1505	v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1506	v_run_maj = event_get_version_number() & 0xffff0000;
1507	if (v_compile_maj != v_run_maj) {
1508		fprintf(stderr,
1509			"Incompatible libevent versions: have %s, built with %s\n",
1510			event_get_version(),
1511			LIBEVENT_VERSION);
1512		return 0;
1513	}
1514	return 1;
1515}
1516
1517/*
1518 * gettimeofday_cached()
1519 *
1520 * Clones the event_base_gettimeofday_cached() interface but ensures the
1521 * times are always on the gettimeofday() 1970 scale.  Older libevent 2
1522 * sometimes used gettimeofday(), sometimes the since-system-start
1523 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1524 *
1525 * It is not cleanly possible to tell which timescale older libevent is
1526 * using.
1527 *
1528 * The strategy involves 1 hour thresholds chosen to be far longer than
1529 * the duration of a round of libevent callbacks, which share a cached
1530 * start-of-round time.  First compare the last cached time with the
1531 * current gettimeofday() time.  If they are within one hour, libevent
1532 * is using the proper timescale so leave the offset 0.  Otherwise,
1533 * compare libevent's cached time and the current time on the monotonic
1534 * scale.  If they are within an hour, libevent is using the monotonic
1535 * scale so calculate the offset to add to such times to bring them to
1536 * gettimeofday()'s scale.
1537 */
1538int
1539gettimeofday_cached(
1540	struct event_base *	b,
1541	struct timeval *	caller_tv
1542	)
1543{
1544#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1545	static struct event_base *	cached_b;
1546	static struct timeval		cached;
1547	static struct timeval		adj_cached;
1548	static struct timeval		offset;
1549	static int			offset_ready;
1550	struct timeval			latest;
1551	struct timeval			systemt;
1552	struct timespec			ts;
1553	struct timeval			mono;
1554	struct timeval			diff;
1555	int				cgt_rc;
1556	int				gtod_rc;
1557
1558	event_base_gettimeofday_cached(b, &latest);
1559	if (b == cached_b &&
1560	    !memcmp(&latest, &cached, sizeof(latest))) {
1561		*caller_tv = adj_cached;
1562		return 0;
1563	}
1564	cached = latest;
1565	cached_b = b;
1566	if (!offset_ready) {
1567		cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1568		gtod_rc = gettimeofday(&systemt, NULL);
1569		if (0 != gtod_rc) {
1570			msyslog(LOG_ERR,
1571				"%s: gettimeofday() error %m",
1572				progname);
1573			exit(1);
1574		}
1575		diff = sub_tval(systemt, latest);
1576		if (debug > 1)
1577			printf("system minus cached %+ld.%06ld\n",
1578			       (long)diff.tv_sec, (long)diff.tv_usec);
1579		if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1580			/*
1581			 * Either use_monotonic == 0, or this libevent
1582			 * has been repaired.  Leave offset at zero.
1583			 */
1584		} else {
1585			mono.tv_sec = ts.tv_sec;
1586			mono.tv_usec = ts.tv_nsec / 1000;
1587			diff = sub_tval(latest, mono);
1588			if (debug > 1)
1589				printf("cached minus monotonic %+ld.%06ld\n",
1590				       (long)diff.tv_sec, (long)diff.tv_usec);
1591			if (labs((long)diff.tv_sec) < 3600) {
1592				/* older libevent2 using monotonic */
1593				offset = sub_tval(systemt, mono);
1594				TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times  by %+ld.%06ld\n",
1595					 "gettimeofday_cached",
1596					 (long)offset.tv_sec,
1597					 (long)offset.tv_usec));
1598			}
1599		}
1600		offset_ready = TRUE;
1601	}
1602	adj_cached = add_tval(cached, offset);
1603	*caller_tv = adj_cached;
1604
1605	return 0;
1606#else
1607	return event_base_gettimeofday_cached(b, caller_tv);
1608#endif
1609}
1610
1611/* Dummy function to satisfy libntp/work_fork.c */
1612int
1613set_user_group_ids(
1614	)
1615{
1616    return 1;
1617}
1618