1#include <config.h>
2
3#include <event2/util.h>
4#include <event2/event.h>
5
6#include "ntp_workimpl.h"
7#ifdef WORK_THREAD
8# include <event2/thread.h>
9#endif
10
11#ifdef HAVE_SYSEXITS_H
12# include <sysexits.h>
13#endif
14
15#include "main.h"
16#include "ntp_libopts.h"
17#include "kod_management.h"
18#include "networking.h"
19#include "utilities.h"
20#include "log.h"
21#include "libntp.h"
22
23
24int shutting_down;
25int time_derived;
26int time_adjusted;
27int n_pending_dns = 0;
28int n_pending_ntp = 0;
29int ai_fam_pref = AF_UNSPEC;
30int ntpver = 4;
31double steplimit = -1;
32SOCKET sock4 = -1;		/* Socket for IPv4 */
33SOCKET sock6 = -1;		/* Socket for IPv6 */
34/*
35** BCAST *must* listen on port 123 (by default), so we can only
36** use the UCST sockets (above) if they too are using port 123
37*/
38SOCKET bsock4 = -1;		/* Broadcast Socket for IPv4 */
39SOCKET bsock6 = -1;		/* Broadcast Socket for IPv6 */
40struct event_base *base;
41struct event *ev_sock4;
42struct event *ev_sock6;
43struct event *ev_worker_timeout;
44struct event *ev_xmt_timer;
45
46struct dns_ctx {
47	const char *	name;
48	int		flags;
49#define CTX_BCST	0x0001
50#define CTX_UCST	0x0002
51#define CTX_xCST	0x0003
52#define CTX_CONC	0x0004
53#define CTX_unused	0xfffd
54	int		key_id;
55	struct timeval	timeout;
56	struct key *	key;
57};
58
59typedef struct sent_pkt_tag sent_pkt;
60struct sent_pkt_tag {
61	sent_pkt *		link;
62	struct dns_ctx *	dctx;
63	sockaddr_u		addr;
64	time_t			stime;
65	int			done;
66	struct pkt		x_pkt;
67};
68
69typedef struct xmt_ctx_tag xmt_ctx;
70struct xmt_ctx_tag {
71	xmt_ctx *		link;
72	SOCKET			sock;
73	time_t			sched;
74	sent_pkt *		spkt;
75};
76
77struct timeval	gap;
78xmt_ctx *	xmt_q;
79struct key *	keys = NULL;
80int		response_timeout;
81struct timeval	response_tv;
82struct timeval	start_tv;
83/* check the timeout at least once per second */
84struct timeval	wakeup_tv = { 0, 888888 };
85
86sent_pkt *	fam_listheads[2];
87#define v4_pkts_list	(fam_listheads[0])
88#define v6_pkts_list	(fam_listheads[1])
89
90static union {
91	struct pkt pkt;
92	char   buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN];
93} rbuf;
94
95#define r_pkt  rbuf.pkt
96
97#ifdef HAVE_DROPROOT
98int droproot;			/* intres imports these */
99int root_dropped;
100#endif
101u_long current_time;		/* libntp/authkeys.c */
102
103void open_sockets(void);
104void handle_lookup(const char *name, int flags);
105void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
106void worker_timeout(evutil_socket_t, short, void *);
107void worker_resp_cb(evutil_socket_t, short, void *);
108void sntp_name_resolved(int, int, void *, const char *, const char *,
109			const struct addrinfo *,
110			const struct addrinfo *);
111void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
112	       u_int xmt_delay);
113void xmt_timer_cb(evutil_socket_t, short, void *ptr);
114void xmt(xmt_ctx *xctx);
115int  check_kod(const struct addrinfo *ai);
116void timeout_query(sent_pkt *);
117void timeout_queries(void);
118void sock_cb(evutil_socket_t, short, void *);
119void check_exit_conditions(void);
120void sntp_libevent_log_cb(int, const char *);
121void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
122int  set_time(double offset);
123void dec_pending_ntp(const char *, sockaddr_u *);
124int  libevent_version_ok(void);
125int  gettimeofday_cached(struct event_base *b, struct timeval *tv);
126
127
128/*
129 * The actual main function.
130 */
131int
132sntp_main (
133	int argc,
134	char **argv,
135	const char *sntpVersion
136	)
137{
138	int			i;
139	int			exitcode;
140	int			optct;
141	struct event_config *	evcfg;
142
143	/* Initialize logging system - sets up progname */
144	sntp_init_logging(argv[0]);
145
146	if (!libevent_version_ok())
147		exit(EX_SOFTWARE);
148
149	init_lib();
150	init_auth();
151
152	optct = ntpOptionProcess(&sntpOptions, argc, argv);
153	argc -= optct;
154	argv += optct;
155
156
157	debug = OPT_VALUE_SET_DEBUG_LEVEL;
158
159	TRACE(2, ("init_lib() done, %s%s\n",
160		  (ipv4_works)
161		      ? "ipv4_works "
162		      : "",
163		  (ipv6_works)
164		      ? "ipv6_works "
165		      : ""));
166	ntpver = OPT_VALUE_NTPVERSION;
167	steplimit = OPT_VALUE_STEPLIMIT / 1e3;
168	gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
169	gap.tv_usec = min(gap.tv_usec, 999999);
170
171	if (HAVE_OPT(LOGFILE))
172		open_logfile(OPT_ARG(LOGFILE));
173
174	msyslog(LOG_INFO, "%s", sntpVersion);
175
176	if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
177		printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
178		       progname);
179		exit(EX_USAGE);
180	}
181
182
183	/*
184	** Eventually, we probably want:
185	** - separate bcst and ucst timeouts (why?)
186	** - multiple --timeout values in the commandline
187	*/
188
189	response_timeout = OPT_VALUE_TIMEOUT;
190	response_tv.tv_sec = response_timeout;
191	response_tv.tv_usec = 0;
192
193	/* IPv6 available? */
194	if (isc_net_probeipv6() != ISC_R_SUCCESS) {
195		ai_fam_pref = AF_INET;
196		TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
197	} else {
198		/* Check for options -4 and -6 */
199		if (HAVE_OPT(IPV4))
200			ai_fam_pref = AF_INET;
201		else if (HAVE_OPT(IPV6))
202			ai_fam_pref = AF_INET6;
203	}
204
205	/* TODO: Parse config file if declared */
206
207	/*
208	** Init the KOD system.
209	** For embedded systems with no writable filesystem,
210	** -K /dev/null can be used to disable KoD storage.
211	*/
212	kod_init_kod_db(OPT_ARG(KOD), FALSE);
213
214	/* HMS: Check and see what happens if KEYFILE doesn't exist */
215	auth_init(OPT_ARG(KEYFILE), &keys);
216
217	/*
218	** Considering employing a variable that prevents functions of doing
219	** anything until everything is initialized properly
220	**
221	** HMS: What exactly does the above mean?
222	*/
223	event_set_log_callback(&sntp_libevent_log_cb);
224	if (debug > 0)
225		event_enable_debug_mode();
226#ifdef WORK_THREAD
227	evthread_use_pthreads();
228	/* we use libevent from main thread only, locks should be academic */
229	if (debug > 0)
230		evthread_enable_lock_debuging();
231#endif
232	evcfg = event_config_new();
233	if (NULL == evcfg) {
234		printf("%s: event_config_new() failed!\n", progname);
235		return -1;
236	}
237#ifndef HAVE_SOCKETPAIR
238	event_config_require_features(evcfg, EV_FEATURE_FDS);
239#endif
240	/* all libevent calls are from main thread */
241	/* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
242	base = event_base_new_with_config(evcfg);
243	event_config_free(evcfg);
244	if (NULL == base) {
245		printf("%s: event_base_new() failed!\n", progname);
246		return -1;
247	}
248
249	/* wire into intres resolver */
250	worker_per_query = TRUE;
251	addremove_io_fd = &sntp_addremove_fd;
252
253	open_sockets();
254
255	if (HAVE_OPT(BROADCAST)) {
256		int		cn = STACKCT_OPT(  BROADCAST );
257		const char **	cp = STACKLST_OPT( BROADCAST );
258
259		while (cn-- > 0) {
260			handle_lookup(*cp, CTX_BCST);
261			cp++;
262		}
263	}
264
265	if (HAVE_OPT(CONCURRENT)) {
266		int		cn = STACKCT_OPT( CONCURRENT );
267		const char **	cp = STACKLST_OPT( CONCURRENT );
268
269		while (cn-- > 0) {
270			handle_lookup(*cp, CTX_UCST | CTX_CONC);
271			cp++;
272		}
273	}
274
275	for (i = 0; i < argc; ++i)
276		handle_lookup(argv[i], CTX_UCST);
277
278	gettimeofday_cached(base, &start_tv);
279	event_base_dispatch(base);
280	event_base_free(base);
281
282	if (!time_adjusted &&
283	    (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
284		exitcode = 1;
285	else
286		exitcode = 0;
287
288	return exitcode;
289}
290
291
292/*
293** open sockets and make them non-blocking
294*/
295void
296open_sockets(
297	void
298	)
299{
300	sockaddr_u	name;
301
302	if (-1 == sock4) {
303		sock4 = socket(PF_INET, SOCK_DGRAM, 0);
304		if (-1 == sock4) {
305			/* error getting a socket */
306			msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
307			exit(1);
308		}
309		/* Make it non-blocking */
310		make_socket_nonblocking(sock4);
311
312		/* Let's try using a wildcard... */
313		ZERO(name);
314		AF(&name) = AF_INET;
315		SET_ADDR4N(&name, INADDR_ANY);
316		SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
317
318		if (-1 == bind(sock4, &name.sa,
319			       SOCKLEN(&name))) {
320			msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
321			exit(1);
322		}
323
324		/* Register an NTP callback for recv/timeout */
325		ev_sock4 = event_new(base, sock4,
326				     EV_TIMEOUT | EV_READ | EV_PERSIST,
327				     &sock_cb, NULL);
328		if (NULL == ev_sock4) {
329			msyslog(LOG_ERR,
330				"open_sockets: event_new(base, sock4) failed!");
331		} else {
332			event_add(ev_sock4, &wakeup_tv);
333		}
334	}
335
336	/* We may not always have IPv6... */
337	if (-1 == sock6 && ipv6_works) {
338		sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
339		if (-1 == sock6 && ipv6_works) {
340			/* error getting a socket */
341			msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
342			exit(1);
343		}
344		/* Make it non-blocking */
345		make_socket_nonblocking(sock6);
346
347		/* Let's try using a wildcard... */
348		ZERO(name);
349		AF(&name) = AF_INET6;
350		SET_ADDR6N(&name, in6addr_any);
351		SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
352
353		if (-1 == bind(sock6, &name.sa,
354			       SOCKLEN(&name))) {
355			msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
356			exit(1);
357		}
358		/* Register an NTP callback for recv/timeout */
359		ev_sock6 = event_new(base, sock6,
360				     EV_TIMEOUT | EV_READ | EV_PERSIST,
361				     &sock_cb, NULL);
362		if (NULL == ev_sock6) {
363			msyslog(LOG_ERR,
364				"open_sockets: event_new(base, sock6) failed!");
365		} else {
366			event_add(ev_sock6, &wakeup_tv);
367		}
368	}
369
370	return;
371}
372
373
374/*
375** handle_lookup
376*/
377void
378handle_lookup(
379	const char *name,
380	int flags
381	)
382{
383	struct addrinfo	hints;	/* Local copy is OK */
384	struct dns_ctx *ctx;
385	char *		name_copy;
386	size_t		name_sz;
387	size_t		octets;
388
389	TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
390
391	ZERO(hints);
392	hints.ai_family = ai_fam_pref;
393	hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
394	/*
395	** Unless we specify a socktype, we'll get at least two
396	** entries for each address: one for TCP and one for
397	** UDP. That's not what we want.
398	*/
399	hints.ai_socktype = SOCK_DGRAM;
400	hints.ai_protocol = IPPROTO_UDP;
401
402	name_sz = 1 + strlen(name);
403	octets = sizeof(*ctx) + name_sz;	// Space for a ctx and the name
404	ctx = emalloc_zero(octets);		// ctx at ctx[0]
405	name_copy = (char *)(ctx + 1);		// Put the name at ctx[1]
406	memcpy(name_copy, name, name_sz);	// copy the name to ctx[1]
407	ctx->name = name_copy;			// point to it...
408	ctx->flags = flags;
409	ctx->timeout = response_tv;
410	ctx->key = NULL;
411
412	/* The following should arguably be passed in... */
413	if (ENABLED_OPT(AUTHENTICATION)) {
414		ctx->key_id = OPT_VALUE_AUTHENTICATION;
415		get_key(ctx->key_id, &ctx->key);
416		if (NULL == ctx->key) {
417			fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n",
418				progname, ctx->key_id, OPT_ARG(KEYFILE));
419			exit(1);
420		}
421	} else {
422		ctx->key_id = -1;
423	}
424
425	++n_pending_dns;
426	getaddrinfo_sometime(name, "123", &hints, 0,
427			     &sntp_name_resolved, ctx);
428}
429
430
431/*
432** DNS Callback:
433** - For each IP:
434** - - open a socket
435** - - increment n_pending_ntp
436** - - send a request if this is a Unicast callback
437** - - queue wait for response
438** - decrement n_pending_dns
439*/
440void
441sntp_name_resolved(
442	int			rescode,
443	int			gai_errno,
444	void *			context,
445	const char *		name,
446	const char *		service,
447	const struct addrinfo *	hints,
448	const struct addrinfo *	addr
449	)
450{
451	struct dns_ctx *	dctx;
452	sent_pkt *		spkt;
453	const struct addrinfo *	ai;
454	SOCKET			sock;
455	u_int			xmt_delay_v4;
456	u_int			xmt_delay_v6;
457	u_int			xmt_delay;
458	size_t			octets;
459
460	xmt_delay_v4 = 0;
461	xmt_delay_v6 = 0;
462	dctx = context;
463	if (rescode) {
464#ifdef EAI_SYSTEM
465		if (EAI_SYSTEM == rescode) {
466			errno = gai_errno;
467			mfprintf(stderr, "%s lookup error %m\n",
468				 dctx->name);
469		} else
470#endif
471			fprintf(stderr, "%s lookup error %s\n",
472				dctx->name, gai_strerror(rescode));
473	} else {
474		TRACE(3, ("%s [%s]\n", dctx->name,
475			  (addr->ai_canonname != NULL)
476			      ? addr->ai_canonname
477			      : ""));
478
479		for (ai = addr; ai != NULL; ai = ai->ai_next) {
480
481			if (check_kod(ai))
482				continue;
483
484			switch (ai->ai_family) {
485
486			case AF_INET:
487				sock = sock4;
488				xmt_delay = xmt_delay_v4;
489				xmt_delay_v4++;
490				break;
491
492			case AF_INET6:
493				if (!ipv6_works)
494					continue;
495
496				sock = sock6;
497				xmt_delay = xmt_delay_v6;
498				xmt_delay_v6++;
499				break;
500
501			default:
502				msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
503					ai->ai_family);
504				exit(1);
505				break;
506			}
507
508			/*
509			** We're waiting for a response for either unicast
510			** or broadcast, so...
511			*/
512			++n_pending_ntp;
513
514			/* If this is for a unicast IP, queue a request */
515			if (dctx->flags & CTX_UCST) {
516				spkt = emalloc_zero(sizeof(*spkt));
517				spkt->dctx = dctx;
518				octets = min(ai->ai_addrlen, sizeof(spkt->addr));
519				memcpy(&spkt->addr, ai->ai_addr, octets);
520				queue_xmt(sock, dctx, spkt, xmt_delay);
521			}
522		}
523	}
524	/* n_pending_dns really should be >0 here... */
525	--n_pending_dns;
526	check_exit_conditions();
527}
528
529
530/*
531** queue_xmt
532*/
533void
534queue_xmt(
535	SOCKET			sock,
536	struct dns_ctx *	dctx,
537	sent_pkt *		spkt,
538	u_int			xmt_delay
539	)
540{
541	sockaddr_u *	dest;
542	sent_pkt **	pkt_listp;
543	sent_pkt *	match;
544	xmt_ctx *	xctx;
545	struct timeval	start_cb;
546	struct timeval	delay;
547
548	dest = &spkt->addr;
549	if (IS_IPV6(dest))
550		pkt_listp = &v6_pkts_list;
551	else
552		pkt_listp = &v4_pkts_list;
553
554	/* reject attempts to add address already listed */
555	for (match = *pkt_listp; match != NULL; match = match->link) {
556		if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
557			if (strcasecmp(spkt->dctx->name,
558				       match->dctx->name))
559				printf("%s %s duplicate address from %s ignored.\n",
560				       sptoa(&match->addr),
561				       match->dctx->name,
562				       spkt->dctx->name);
563			else
564				printf("%s %s, duplicate address ignored.\n",
565				       sptoa(&match->addr),
566				       match->dctx->name);
567			dec_pending_ntp(spkt->dctx->name, &spkt->addr);
568			free(spkt);
569			return;
570		}
571	}
572
573	LINK_SLIST(*pkt_listp, spkt, link);
574
575	xctx = emalloc_zero(sizeof(*xctx));
576	xctx->sock = sock;
577	xctx->spkt = spkt;
578	gettimeofday_cached(base, &start_cb);
579	xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
580
581	LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
582			link, xmt_ctx);
583	if (xmt_q == xctx) {
584		/*
585		 * The new entry is the first scheduled.  The timer is
586		 * either not active or is set for the second xmt
587		 * context in xmt_q.
588		 */
589		if (NULL == ev_xmt_timer)
590			ev_xmt_timer = event_new(base, INVALID_SOCKET,
591						 EV_TIMEOUT,
592						 &xmt_timer_cb, NULL);
593		if (NULL == ev_xmt_timer) {
594			msyslog(LOG_ERR,
595				"queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
596			exit(1);
597		}
598		ZERO(delay);
599		if (xctx->sched > start_cb.tv_sec)
600			delay.tv_sec = xctx->sched - start_cb.tv_sec;
601		event_add(ev_xmt_timer, &delay);
602		TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
603			  (u_int)delay.tv_usec));
604	}
605}
606
607
608/*
609** xmt_timer_cb
610*/
611void
612xmt_timer_cb(
613	evutil_socket_t	fd,
614	short		what,
615	void *		ctx
616	)
617{
618	struct timeval	start_cb;
619	struct timeval	delay;
620	xmt_ctx *	x;
621
622	UNUSED_ARG(fd);
623	UNUSED_ARG(ctx);
624	DEBUG_INSIST(EV_TIMEOUT == what);
625
626	if (NULL == xmt_q || shutting_down)
627		return;
628	gettimeofday_cached(base, &start_cb);
629	if (xmt_q->sched <= start_cb.tv_sec) {
630		UNLINK_HEAD_SLIST(x, xmt_q, link);
631		TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
632			  (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
633		xmt(x);
634		free(x);
635		if (NULL == xmt_q)
636			return;
637	}
638	if (xmt_q->sched <= start_cb.tv_sec) {
639		event_add(ev_xmt_timer, &gap);
640		TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
641			  (u_int)start_cb.tv_usec,
642			  (u_int)gap.tv_usec));
643	} else {
644		delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
645		delay.tv_usec = 0;
646		event_add(ev_xmt_timer, &delay);
647		TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
648			  (u_int)start_cb.tv_usec,
649			  (long)delay.tv_sec));
650	}
651}
652
653
654/*
655** xmt()
656*/
657void
658xmt(
659	xmt_ctx *	xctx
660	)
661{
662	SOCKET		sock = xctx->sock;
663	struct dns_ctx *dctx = xctx->spkt->dctx;
664	sent_pkt *	spkt = xctx->spkt;
665	sockaddr_u *	dst = &spkt->addr;
666	struct timeval	tv_xmt;
667	struct pkt	x_pkt;
668	size_t		pkt_len;
669	int		sent;
670
671	if (0 != gettimeofday(&tv_xmt, NULL)) {
672		msyslog(LOG_ERR,
673			"xmt: gettimeofday() failed: %m");
674		exit(1);
675	}
676	tv_xmt.tv_sec += JAN_1970;
677
678	pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
679			       dctx->key);
680
681	sent = sendpkt(sock, dst, &x_pkt, pkt_len);
682	if (sent) {
683		/* Save the packet we sent... */
684		memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
685		       pkt_len));
686		spkt->stime = tv_xmt.tv_sec - JAN_1970;
687
688		TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
689			  (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
690	} else {
691		dec_pending_ntp(dctx->name, dst);
692	}
693
694	return;
695}
696
697
698/*
699 * timeout_queries() -- give up on unrequited NTP queries
700 */
701void
702timeout_queries(void)
703{
704	struct timeval	start_cb;
705	u_int		idx;
706	sent_pkt *	head;
707	sent_pkt *	spkt;
708	sent_pkt *	spkt_next;
709	long		age;
710	int didsomething = 0;
711
712	TRACE(3, ("timeout_queries: called to check %u items\n",
713		  (unsigned)COUNTOF(fam_listheads)));
714
715	gettimeofday_cached(base, &start_cb);
716	for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
717		head = fam_listheads[idx];
718		for (spkt = head; spkt != NULL; spkt = spkt_next) {
719			char xcst;
720
721			didsomething = 1;
722			switch (spkt->dctx->flags & CTX_xCST) {
723			    case CTX_BCST:
724				xcst = 'B';
725				break;
726
727			    case CTX_UCST:
728				xcst = 'U';
729				break;
730
731			    default:
732				INSIST(!"spkt->dctx->flags neither UCST nor BCST");
733				break;
734			}
735
736			spkt_next = spkt->link;
737			if (0 == spkt->stime || spkt->done)
738				continue;
739			age = start_cb.tv_sec - spkt->stime;
740			TRACE(3, ("%s %s %cCST age %ld\n",
741				  stoa(&spkt->addr),
742				  spkt->dctx->name, xcst, age));
743			if (age > response_timeout)
744				timeout_query(spkt);
745		}
746	}
747	// Do we care about didsomething?
748	TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
749		  didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
750	if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
751		TRACE(3, ("timeout_queries: bail!\n"));
752		event_base_loopexit(base, NULL);
753		shutting_down = TRUE;
754	}
755}
756
757
758void dec_pending_ntp(
759	const char *	name,
760	sockaddr_u *	server
761	)
762{
763	if (n_pending_ntp > 0) {
764		--n_pending_ntp;
765		check_exit_conditions();
766	} else {
767		INSIST(0 == n_pending_ntp);
768		TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
769			  hostnameaddr(name, server)));
770	}
771}
772
773
774void timeout_query(
775	sent_pkt *	spkt
776	)
777{
778	sockaddr_u *	server;
779	char		xcst;
780
781
782	switch (spkt->dctx->flags & CTX_xCST) {
783	    case CTX_BCST:
784		xcst = 'B';
785		break;
786
787	    case CTX_UCST:
788		xcst = 'U';
789		break;
790
791	    default:
792		INSIST(!"spkt->dctx->flags neither UCST nor BCST");
793		break;
794	}
795	spkt->done = TRUE;
796	server = &spkt->addr;
797	msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
798		hostnameaddr(spkt->dctx->name, server), xcst,
799		response_timeout);
800	dec_pending_ntp(spkt->dctx->name, server);
801	return;
802}
803
804
805/*
806** check_kod
807*/
808int
809check_kod(
810	const struct addrinfo *	ai
811	)
812{
813	char *hostname;
814	struct kod_entry *reason;
815
816	/* Is there a KoD on file for this address? */
817	hostname = addrinfo_to_str(ai);
818	TRACE(2, ("check_kod: checking <%s>\n", hostname));
819	if (search_entry(hostname, &reason)) {
820		printf("prior KoD for %s, skipping.\n",
821			hostname);
822		free(reason);
823		free(hostname);
824
825		return 1;
826	}
827	free(hostname);
828
829	return 0;
830}
831
832
833/*
834** Socket readable/timeout Callback:
835** Read in the packet
836** Unicast:
837** - close socket
838** - decrement n_pending_ntp
839** - If packet is good, set the time and "exit"
840** Broadcast:
841** - If packet is good, set the time and "exit"
842*/
843void
844sock_cb(
845	evutil_socket_t fd,
846	short what,
847	void *ptr
848	)
849{
850	sockaddr_u	sender;
851	sockaddr_u *	psau;
852	sent_pkt **	p_pktlist;
853	sent_pkt *	spkt;
854	int		rpktl;
855	int		rc;
856
857	INSIST(sock4 == fd || sock6 == fd);
858
859	TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
860		  (fd == sock6)
861		      ? "6"
862		      : "4",
863		  (what & EV_TIMEOUT) ? " timeout" : "",
864		  (what & EV_READ)    ? " read" : "",
865		  (what & EV_WRITE)   ? " write" : "",
866		  (what & EV_SIGNAL)  ? " signal" : ""));
867
868	if (!(EV_READ & what)) {
869		if (EV_TIMEOUT & what)
870			timeout_queries();
871
872		return;
873	}
874
875	/* Read in the packet */
876	rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
877	if (rpktl < 0) {
878		msyslog(LOG_DEBUG, "recvfrom error %m");
879		return;
880	}
881
882	if (sock6 == fd)
883		p_pktlist = &v6_pkts_list;
884	else
885		p_pktlist = &v4_pkts_list;
886
887	for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
888		psau = &spkt->addr;
889		if (SOCK_EQ(&sender, psau))
890			break;
891	}
892	if (NULL == spkt) {
893		msyslog(LOG_WARNING,
894			"Packet from unexpected source %s dropped",
895			sptoa(&sender));
896		return;
897	}
898
899	TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
900		  sptoa(&sender)));
901
902	rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
903			    &spkt->x_pkt, "sock_cb");
904
905	TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
906
907	/* If this is a Unicast packet, one down ... */
908	if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
909		dec_pending_ntp(spkt->dctx->name, &spkt->addr);
910		spkt->done = TRUE;
911	}
912
913
914	/* If the packet is good, set the time and we're all done */
915	rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
916	if (0 != rc)
917		TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
918	check_exit_conditions();
919}
920
921
922/*
923 * check_exit_conditions()
924 *
925 * If sntp has a reply, ask the event loop to stop after this round of
926 * callbacks, unless --wait was used.
927 */
928void
929check_exit_conditions(void)
930{
931	if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
932	    (time_derived && !HAVE_OPT(WAIT))) {
933		event_base_loopexit(base, NULL);
934		shutting_down = TRUE;
935	} else {
936		TRACE(2, ("%d NTP and %d name queries pending\n",
937			  n_pending_ntp, n_pending_dns));
938	}
939}
940
941
942/*
943 * sntp_addremove_fd() is invoked by the intres blocking worker code
944 * to read from a pipe, or to stop same.
945 */
946void sntp_addremove_fd(
947	int	fd,
948	int	is_pipe,
949	int	remove_it
950	)
951{
952	u_int		idx;
953	blocking_child *c;
954	struct event *	ev;
955
956#ifdef HAVE_SOCKETPAIR
957	if (is_pipe) {
958		/* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
959		msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
960		exit(1);
961	}
962#endif
963
964	c = NULL;
965	for (idx = 0; idx < blocking_children_alloc; idx++) {
966		c = blocking_children[idx];
967		if (NULL == c)
968			continue;
969		if (fd == c->resp_read_pipe)
970			break;
971	}
972	if (idx == blocking_children_alloc)
973		return;
974
975	if (remove_it) {
976		ev = c->resp_read_ctx;
977		c->resp_read_ctx = NULL;
978		event_del(ev);
979		event_free(ev);
980
981		return;
982	}
983
984	ev = event_new(base, fd, EV_READ | EV_PERSIST,
985		       &worker_resp_cb, c);
986	if (NULL == ev) {
987		msyslog(LOG_ERR,
988			"sntp_addremove_fd: event_new(base, fd) failed!");
989		return;
990	}
991	c->resp_read_ctx = ev;
992	event_add(ev, NULL);
993}
994
995
996/* called by forked intres child to close open descriptors */
997#ifdef WORK_FORK
998void
999kill_asyncio(
1000	int	startfd
1001	)
1002{
1003	if (INVALID_SOCKET != sock4) {
1004		closesocket(sock4);
1005		sock4 = INVALID_SOCKET;
1006	}
1007	if (INVALID_SOCKET != sock6) {
1008		closesocket(sock6);
1009		sock6 = INVALID_SOCKET;
1010	}
1011	if (INVALID_SOCKET != bsock4) {
1012		closesocket(sock4);
1013		sock4 = INVALID_SOCKET;
1014	}
1015	if (INVALID_SOCKET != bsock6) {
1016		closesocket(sock6);
1017		sock6 = INVALID_SOCKET;
1018	}
1019}
1020#endif
1021
1022
1023/*
1024 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1025 */
1026void
1027worker_resp_cb(
1028	evutil_socket_t	fd,
1029	short		what,
1030	void *		ctx	/* blocking_child * */
1031	)
1032{
1033	blocking_child *	c;
1034
1035	DEBUG_INSIST(EV_READ & what);
1036	c = ctx;
1037	DEBUG_INSIST(fd == c->resp_read_pipe);
1038	process_blocking_resp(c);
1039}
1040
1041
1042/*
1043 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1044 * timeout to fire in s seconds, if not reset earlier by a call to
1045 * intres_timeout_req(0), which clears any pending timeout.  When the
1046 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1047 * parent).
1048 *
1049 * sntp and ntpd each provide implementations adapted to their timers.
1050 */
1051void
1052intres_timeout_req(
1053	u_int	seconds		/* 0 cancels */
1054	)
1055{
1056	struct timeval	tv_to;
1057
1058	if (NULL == ev_worker_timeout) {
1059		ev_worker_timeout = event_new(base, -1,
1060					      EV_TIMEOUT | EV_PERSIST,
1061					      &worker_timeout, NULL);
1062		DEBUG_INSIST(NULL != ev_worker_timeout);
1063	} else {
1064		event_del(ev_worker_timeout);
1065	}
1066	if (0 == seconds)
1067		return;
1068	tv_to.tv_sec = seconds;
1069	tv_to.tv_usec = 0;
1070	event_add(ev_worker_timeout, &tv_to);
1071}
1072
1073
1074void
1075worker_timeout(
1076	evutil_socket_t	fd,
1077	short		what,
1078	void *		ctx
1079	)
1080{
1081	UNUSED_ARG(fd);
1082	UNUSED_ARG(ctx);
1083
1084	DEBUG_REQUIRE(EV_TIMEOUT & what);
1085	worker_idle_timer_fired();
1086}
1087
1088
1089void
1090sntp_libevent_log_cb(
1091	int		severity,
1092	const char *	msg
1093	)
1094{
1095	int		level;
1096
1097	switch (severity) {
1098
1099	default:
1100	case _EVENT_LOG_DEBUG:
1101		level = LOG_DEBUG;
1102		break;
1103
1104	case _EVENT_LOG_MSG:
1105		level = LOG_NOTICE;
1106		break;
1107
1108	case _EVENT_LOG_WARN:
1109		level = LOG_WARNING;
1110		break;
1111
1112	case _EVENT_LOG_ERR:
1113		level = LOG_ERR;
1114		break;
1115	}
1116
1117	msyslog(level, "%s", msg);
1118}
1119
1120
1121int
1122generate_pkt (
1123	struct pkt *x_pkt,
1124	const struct timeval *tv_xmt,
1125	int key_id,
1126	struct key *pkt_key
1127	)
1128{
1129	l_fp	xmt_fp;
1130	int	pkt_len;
1131	int	mac_size;
1132
1133	pkt_len = LEN_PKT_NOMAC;
1134	ZERO(*x_pkt);
1135	TVTOTS(tv_xmt, &xmt_fp);
1136	HTONL_FP(&xmt_fp, &x_pkt->xmt);
1137	x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1138	x_pkt->ppoll = 8;
1139	/* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1140	set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1141	if (debug > 0) {
1142		printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key);
1143	}
1144	if (pkt_key != NULL) {
1145		x_pkt->exten[0] = htonl(key_id);
1146		mac_size = make_mac(x_pkt, pkt_len, MAX_MDG_LEN,
1147				    pkt_key, (char *)&x_pkt->exten[1]);
1148		if (mac_size > 0)
1149			pkt_len += mac_size + KEY_MAC_LEN;
1150#ifdef DEBUG
1151		if (debug > 0) {
1152			printf("generate_pkt: mac_size is %d\n", mac_size);
1153		}
1154#endif
1155
1156	}
1157	return pkt_len;
1158}
1159
1160
1161int
1162handle_pkt(
1163	int		rpktl,
1164	struct pkt *	rpkt,
1165	sockaddr_u *	host,
1166	const char *	hostname
1167	)
1168{
1169	char		disptxt[32];
1170	const char *	addrtxt;
1171	struct timeval	tv_dst;
1172	int		cnt;
1173	int		sw_case;
1174	int		digits;
1175	int		stratum;
1176	char *		ref;
1177	char *		ts_str;
1178	const char *	leaptxt;
1179	double		offset;
1180	double		precision;
1181	double		synch_distance;
1182	char *		p_SNTP_PRETEND_TIME;
1183	time_t		pretend_time;
1184#if SIZEOF_TIME_T == 8
1185	long long	ll;
1186#else
1187	long		l;
1188#endif
1189
1190	ts_str = NULL;
1191
1192	if (rpktl > 0)
1193		sw_case = 1;
1194	else
1195		sw_case = rpktl;
1196
1197	switch (sw_case) {
1198
1199	case SERVER_UNUSEABLE:
1200		return -1;
1201		break;
1202
1203	case PACKET_UNUSEABLE:
1204		break;
1205
1206	case SERVER_AUTH_FAIL:
1207		break;
1208
1209	case KOD_DEMOBILIZE:
1210		/* Received a DENY or RESTR KOD packet */
1211		addrtxt = stoa(host);
1212		ref = (char *)&rpkt->refid;
1213		add_entry(addrtxt, ref);
1214		msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1215			ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1216		break;
1217
1218	case KOD_RATE:
1219		/*
1220		** Hmm...
1221		** We should probably call add_entry() with an
1222		** expiration timestamp of several seconds in the future,
1223		** and back-off even more if we get more RATE responses.
1224		*/
1225		break;
1226
1227	case 1:
1228		TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1229			  rpktl, stoa(host), hostname));
1230
1231		gettimeofday_cached(base, &tv_dst);
1232
1233		p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1234		if (p_SNTP_PRETEND_TIME) {
1235			pretend_time = 0;
1236#if SIZEOF_TIME_T == 4
1237			if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1238				pretend_time = (time_t)l;
1239#elif SIZEOF_TIME_T == 8
1240			if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1241				pretend_time = (time_t)ll;
1242#else
1243# include "GRONK: unexpected value for SIZEOF_TIME_T"
1244#endif
1245			if (0 != pretend_time)
1246				tv_dst.tv_sec = pretend_time;
1247		}
1248
1249		offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1250				   &precision, &synch_distance);
1251		time_derived = TRUE;
1252
1253		for (digits = 0; (precision *= 10.) < 1.; ++digits)
1254			/* empty */ ;
1255		if (digits > 6)
1256			digits = 6;
1257
1258		ts_str = tv_to_str(&tv_dst);
1259		stratum = rpkt->stratum;
1260		if (0 == stratum)
1261				stratum = 16;
1262
1263		if (synch_distance > 0.) {
1264			cnt = snprintf(disptxt, sizeof(disptxt),
1265				       " +/- %f", synch_distance);
1266			if ((size_t)cnt >= sizeof(disptxt))
1267				snprintf(disptxt, sizeof(disptxt),
1268					 "ERROR %d >= %d", cnt,
1269					 (int)sizeof(disptxt));
1270		} else {
1271			disptxt[0] = '\0';
1272		}
1273
1274		switch (PKT_LEAP(rpkt->li_vn_mode)) {
1275		    case LEAP_NOWARNING:
1276		    	leaptxt = "no-leap";
1277			break;
1278		    case LEAP_ADDSECOND:
1279		    	leaptxt = "add-leap";
1280			break;
1281		    case LEAP_DELSECOND:
1282		    	leaptxt = "del-leap";
1283			break;
1284		    case LEAP_NOTINSYNC:
1285		    	leaptxt = "unsync";
1286			break;
1287		    default:
1288		    	leaptxt = "LEAP-ERROR";
1289			break;
1290		}
1291
1292		msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str,
1293			digits, offset, disptxt,
1294			hostnameaddr(hostname, host), stratum,
1295			leaptxt,
1296			(time_adjusted)
1297			    ? " [excess]"
1298			    : "");
1299		free(ts_str);
1300
1301		if (p_SNTP_PRETEND_TIME)
1302			return 0;
1303
1304		if (!time_adjusted &&
1305		    (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1306			return set_time(offset);
1307
1308		return EX_OK;
1309	}
1310
1311	return 1;
1312}
1313
1314
1315void
1316offset_calculation(
1317	struct pkt *rpkt,
1318	int rpktl,
1319	struct timeval *tv_dst,
1320	double *offset,
1321	double *precision,
1322	double *synch_distance
1323	)
1324{
1325	l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1326	u_fp p_rdly, p_rdsp;
1327	double t21, t34, delta;
1328
1329	/* Convert timestamps from network to host byte order */
1330	p_rdly = NTOHS_FP(rpkt->rootdelay);
1331	p_rdsp = NTOHS_FP(rpkt->rootdisp);
1332	NTOHL_FP(&rpkt->reftime, &p_ref);
1333	NTOHL_FP(&rpkt->org, &p_org);
1334	NTOHL_FP(&rpkt->rec, &p_rec);
1335	NTOHL_FP(&rpkt->xmt, &p_xmt);
1336
1337	*precision = LOGTOD(rpkt->precision);
1338
1339	TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1340
1341	/* Compute offset etc. */
1342	tmp = p_rec;
1343	L_SUB(&tmp, &p_org);
1344	LFPTOD(&tmp, t21);
1345	TVTOTS(tv_dst, &dst);
1346	dst.l_ui += JAN_1970;
1347	tmp = p_xmt;
1348	L_SUB(&tmp, &dst);
1349	LFPTOD(&tmp, t34);
1350	*offset = (t21 + t34) / 2.;
1351	delta = t21 - t34;
1352
1353	// synch_distance is:
1354	// (peer->delay + peer->rootdelay) / 2 + peer->disp
1355	// + peer->rootdisp + clock_phi * (current_time - peer->update)
1356	// + peer->jitter;
1357	//
1358	// and peer->delay = fabs(peer->offset - p_offset) * 2;
1359	// and peer->offset needs history, so we're left with
1360	// p_offset = (t21 + t34) / 2.;
1361	// peer->disp = 0; (we have no history to augment this)
1362	// clock_phi = 15e-6;
1363	// peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1364	// and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1365	//
1366	// so our answer seems to be:
1367	//
1368	// (fabs(t21 + t34) + peer->rootdelay) / 3.
1369	// + 0 (peer->disp)
1370	// + peer->rootdisp
1371	// + 15e-6 (clock_phi)
1372	// + LOGTOD(sys_precision)
1373
1374	INSIST( FPTOD(p_rdly) >= 0. );
1375#if 1
1376	*synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1377		+ 0.
1378		+ FPTOD(p_rdsp)
1379		+ 15e-6
1380		+ 0.	/* LOGTOD(sys_precision) when we can get it */
1381		;
1382	INSIST( *synch_distance >= 0. );
1383#else
1384	*synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1385#endif
1386
1387#ifdef DEBUG
1388	if (debug > 3) {
1389		printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1390		printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1391		printf("sntp syncdist: %f\n", *synch_distance);
1392
1393		pkt_output(rpkt, rpktl, stdout);
1394
1395		printf("sntp offset_calculation: rpkt->reftime:\n");
1396		l_fp_output(&p_ref, stdout);
1397		printf("sntp offset_calculation: rpkt->org:\n");
1398		l_fp_output(&p_org, stdout);
1399		printf("sntp offset_calculation: rpkt->rec:\n");
1400		l_fp_output(&p_rec, stdout);
1401		printf("sntp offset_calculation: rpkt->xmt:\n");
1402		l_fp_output(&p_xmt, stdout);
1403	}
1404#endif
1405
1406	TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1407		  "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1408		  t21, t34, delta, *offset));
1409
1410	return;
1411}
1412
1413
1414
1415/* Compute the 8 bits for li_vn_mode */
1416void
1417set_li_vn_mode (
1418	struct pkt *spkt,
1419	char leap,
1420	char version,
1421	char mode
1422	)
1423{
1424	if (leap > 3) {
1425		msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1426		leap = 3;
1427	}
1428
1429	if ((unsigned char)version > 7) {
1430		msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1431		version = 4;
1432	}
1433
1434	if (mode > 7) {
1435		msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1436		mode = 3;
1437	}
1438
1439	spkt->li_vn_mode  = leap << 6;
1440	spkt->li_vn_mode |= version << 3;
1441	spkt->li_vn_mode |= mode;
1442}
1443
1444
1445/*
1446** set_time applies 'offset' to the local clock.
1447*/
1448int
1449set_time(
1450	double offset
1451	)
1452{
1453	int rc;
1454
1455	if (time_adjusted)
1456		return EX_OK;
1457
1458	/*
1459	** If we can step but we cannot slew, then step.
1460	** If we can step or slew and and |offset| > steplimit, then step.
1461	*/
1462	if (ENABLED_OPT(STEP) &&
1463	    (   !ENABLED_OPT(SLEW)
1464	     || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1465	    )) {
1466		rc = step_systime(offset);
1467
1468		/* If there was a problem, can we rely on errno? */
1469		if (1 == rc)
1470			time_adjusted = TRUE;
1471		return (time_adjusted)
1472			   ? EX_OK
1473			   : 1;
1474		/*
1475		** In case of error, what should we use?
1476		** EX_UNAVAILABLE?
1477		** EX_OSERR?
1478		** EX_NOPERM?
1479		*/
1480	}
1481
1482	if (ENABLED_OPT(SLEW)) {
1483		rc = adj_systime(offset);
1484
1485		/* If there was a problem, can we rely on errno? */
1486		if (1 == rc)
1487			time_adjusted = TRUE;
1488		return (time_adjusted)
1489			   ? EX_OK
1490			   : 1;
1491		/*
1492		** In case of error, what should we use?
1493		** EX_UNAVAILABLE?
1494		** EX_OSERR?
1495		** EX_NOPERM?
1496		*/
1497	}
1498
1499	return EX_SOFTWARE;
1500}
1501
1502
1503int
1504libevent_version_ok(void)
1505{
1506	ev_uint32_t v_compile_maj;
1507	ev_uint32_t v_run_maj;
1508
1509	v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1510	v_run_maj = event_get_version_number() & 0xffff0000;
1511	if (v_compile_maj != v_run_maj) {
1512		fprintf(stderr,
1513			"Incompatible libevent versions: have %s, built with %s\n",
1514			event_get_version(),
1515			LIBEVENT_VERSION);
1516		return 0;
1517	}
1518	return 1;
1519}
1520
1521/*
1522 * gettimeofday_cached()
1523 *
1524 * Clones the event_base_gettimeofday_cached() interface but ensures the
1525 * times are always on the gettimeofday() 1970 scale.  Older libevent 2
1526 * sometimes used gettimeofday(), sometimes the since-system-start
1527 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1528 *
1529 * It is not cleanly possible to tell which timescale older libevent is
1530 * using.
1531 *
1532 * The strategy involves 1 hour thresholds chosen to be far longer than
1533 * the duration of a round of libevent callbacks, which share a cached
1534 * start-of-round time.  First compare the last cached time with the
1535 * current gettimeofday() time.  If they are within one hour, libevent
1536 * is using the proper timescale so leave the offset 0.  Otherwise,
1537 * compare libevent's cached time and the current time on the monotonic
1538 * scale.  If they are within an hour, libevent is using the monotonic
1539 * scale so calculate the offset to add to such times to bring them to
1540 * gettimeofday()'s scale.
1541 */
1542int
1543gettimeofday_cached(
1544	struct event_base *	b,
1545	struct timeval *	caller_tv
1546	)
1547{
1548#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1549	static struct event_base *	cached_b;
1550	static struct timeval		cached;
1551	static struct timeval		adj_cached;
1552	static struct timeval		offset;
1553	static int			offset_ready;
1554	struct timeval			latest;
1555	struct timeval			systemt;
1556	struct timespec			ts;
1557	struct timeval			mono;
1558	struct timeval			diff;
1559	int				cgt_rc;
1560	int				gtod_rc;
1561
1562	event_base_gettimeofday_cached(b, &latest);
1563	if (b == cached_b &&
1564	    !memcmp(&latest, &cached, sizeof(latest))) {
1565		*caller_tv = adj_cached;
1566		return 0;
1567	}
1568	cached = latest;
1569	cached_b = b;
1570	if (!offset_ready) {
1571		cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1572		gtod_rc = gettimeofday(&systemt, NULL);
1573		if (0 != gtod_rc) {
1574			msyslog(LOG_ERR,
1575				"%s: gettimeofday() error %m",
1576				progname);
1577			exit(1);
1578		}
1579		diff = sub_tval(systemt, latest);
1580		if (debug > 1)
1581			printf("system minus cached %+ld.%06ld\n",
1582			       (long)diff.tv_sec, (long)diff.tv_usec);
1583		if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1584			/*
1585			 * Either use_monotonic == 0, or this libevent
1586			 * has been repaired.  Leave offset at zero.
1587			 */
1588		} else {
1589			mono.tv_sec = ts.tv_sec;
1590			mono.tv_usec = ts.tv_nsec / 1000;
1591			diff = sub_tval(latest, mono);
1592			if (debug > 1)
1593				printf("cached minus monotonic %+ld.%06ld\n",
1594				       (long)diff.tv_sec, (long)diff.tv_usec);
1595			if (labs((long)diff.tv_sec) < 3600) {
1596				/* older libevent2 using monotonic */
1597				offset = sub_tval(systemt, mono);
1598				TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times  by %+ld.%06ld\n",
1599					 "gettimeofday_cached",
1600					 (long)offset.tv_sec,
1601					 (long)offset.tv_usec));
1602			}
1603		}
1604		offset_ready = TRUE;
1605	}
1606	adj_cached = add_tval(cached, offset);
1607	*caller_tv = adj_cached;
1608
1609	return 0;
1610#else
1611	return event_base_gettimeofday_cached(b, caller_tv);
1612#endif
1613}
1614
1615/* Dummy function to satisfy libntp/work_fork.c */
1616extern int set_user_group_ids(void);
1617int set_user_group_ids(void)
1618{
1619    return 1;
1620}
1621