1/*	$NetBSD: ntp_request.c,v 1.18 2022/10/09 21:41:03 christos Exp $	*/
2
3/*
4 * ntp_request.c - respond to information requests
5 */
6
7#ifdef HAVE_CONFIG_H
8# include <config.h>
9#endif
10
11#include "ntpd.h"
12#include "ntp_io.h"
13#include "ntp_request.h"
14#include "ntp_control.h"
15#include "ntp_refclock.h"
16#include "ntp_if.h"
17#include "ntp_stdlib.h"
18#include "ntp_assert.h"
19
20#include <stdio.h>
21#include <stddef.h>
22#include <signal.h>
23#ifdef HAVE_NETINET_IN_H
24#include <netinet/in.h>
25#endif
26#include <arpa/inet.h>
27
28#include "recvbuff.h"
29
30#ifdef KERNEL_PLL
31#include "ntp_syscall.h"
32#endif /* KERNEL_PLL */
33
34/*
35 * Structure to hold request procedure information
36 */
37#define	NOAUTH	0
38#define	AUTH	1
39
40#define	NO_REQUEST	(-1)
41/*
42 * Because we now have v6 addresses in the messages, we need to compensate
43 * for the larger size.  Therefore, we introduce the alternate size to
44 * keep us friendly with older implementations.  A little ugly.
45 */
46static int client_v6_capable = 0;   /* the client can handle longer messages */
47
48#define v6sizeof(type)	(client_v6_capable ? sizeof(type) : v4sizeof(type))
49
50struct req_proc {
51	short request_code;	/* defined request code */
52	short needs_auth;	/* true when authentication needed */
53	short sizeofitem;	/* size of request data item (older size)*/
54	short v6_sizeofitem;	/* size of request data item (new size)*/
55	void (*handler) (sockaddr_u *, endpt *,
56			   struct req_pkt *);	/* routine to handle request */
57};
58
59/*
60 * Universal request codes
61 */
62static const struct req_proc univ_codes[] = {
63	{ NO_REQUEST,		NOAUTH,	 0,	0, NULL }
64};
65
66static	void	req_ack	(sockaddr_u *, endpt *, struct req_pkt *, int);
67static	void *	prepare_pkt	(sockaddr_u *, endpt *,
68				 struct req_pkt *, size_t);
69static	void *	more_pkt	(void);
70static	void	flush_pkt	(void);
71static	void	list_peers	(sockaddr_u *, endpt *, struct req_pkt *);
72static	void	list_peers_sum	(sockaddr_u *, endpt *, struct req_pkt *);
73static	void	peer_info	(sockaddr_u *, endpt *, struct req_pkt *);
74static	void	peer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
75static	void	sys_info	(sockaddr_u *, endpt *, struct req_pkt *);
76static	void	sys_stats	(sockaddr_u *, endpt *, struct req_pkt *);
77static	void	mem_stats	(sockaddr_u *, endpt *, struct req_pkt *);
78static	void	io_stats	(sockaddr_u *, endpt *, struct req_pkt *);
79static	void	timer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
80static	void	loop_info	(sockaddr_u *, endpt *, struct req_pkt *);
81static	void	do_conf		(sockaddr_u *, endpt *, struct req_pkt *);
82static	void	do_unconf	(sockaddr_u *, endpt *, struct req_pkt *);
83static	void	set_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
84static	void	clr_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
85static	void	setclr_flags	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
86static	void	list_restrict4	(const restrict_u *, struct info_restrict **);
87static	void	list_restrict6	(const restrict_u *, struct info_restrict **);
88static	void	list_restrict	(sockaddr_u *, endpt *, struct req_pkt *);
89static	void	do_resaddflags	(sockaddr_u *, endpt *, struct req_pkt *);
90static	void	do_ressubflags	(sockaddr_u *, endpt *, struct req_pkt *);
91static	void	do_unrestrict	(sockaddr_u *, endpt *, struct req_pkt *);
92static	void	do_restrict	(sockaddr_u *, endpt *, struct req_pkt *, restrict_op);
93static	void	mon_getlist	(sockaddr_u *, endpt *, struct req_pkt *);
94static	void	reset_stats	(sockaddr_u *, endpt *, struct req_pkt *);
95static	void	reset_peer	(sockaddr_u *, endpt *, struct req_pkt *);
96static	void	do_key_reread	(sockaddr_u *, endpt *, struct req_pkt *);
97static	void	trust_key	(sockaddr_u *, endpt *, struct req_pkt *);
98static	void	untrust_key	(sockaddr_u *, endpt *, struct req_pkt *);
99static	void	do_trustkey	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
100static	void	get_auth_info	(sockaddr_u *, endpt *, struct req_pkt *);
101static	void	req_get_traps	(sockaddr_u *, endpt *, struct req_pkt *);
102static	void	req_set_trap	(sockaddr_u *, endpt *, struct req_pkt *);
103static	void	req_clr_trap	(sockaddr_u *, endpt *, struct req_pkt *);
104static	void	do_setclr_trap	(sockaddr_u *, endpt *, struct req_pkt *, int);
105static	void	set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
106static	void	set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
107static	void	get_ctl_stats   (sockaddr_u *, endpt *, struct req_pkt *);
108static	void	get_if_stats    (sockaddr_u *, endpt *, struct req_pkt *);
109static	void	do_if_reload    (sockaddr_u *, endpt *, struct req_pkt *);
110#ifdef KERNEL_PLL
111static	void	get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
112#endif /* KERNEL_PLL */
113#ifdef REFCLOCK
114static	void	get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
115static	void	set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
116#endif	/* REFCLOCK */
117#ifdef REFCLOCK
118static	void	get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
119#endif	/* REFCLOCK */
120
121/*
122 * ntpd request codes
123 */
124static const struct req_proc ntp_codes[] = {
125	{ REQ_PEER_LIST,	NOAUTH,	0, 0,	list_peers },
126	{ REQ_PEER_LIST_SUM,	NOAUTH,	0, 0,	list_peers_sum },
127	{ REQ_PEER_INFO,    NOAUTH, v4sizeof(struct info_peer_list),
128				sizeof(struct info_peer_list), peer_info},
129	{ REQ_PEER_STATS,   NOAUTH, v4sizeof(struct info_peer_list),
130				sizeof(struct info_peer_list), peer_stats},
131	{ REQ_SYS_INFO,		NOAUTH,	0, 0,	sys_info },
132	{ REQ_SYS_STATS,	NOAUTH,	0, 0,	sys_stats },
133	{ REQ_IO_STATS,		NOAUTH,	0, 0,	io_stats },
134	{ REQ_MEM_STATS,	NOAUTH,	0, 0,	mem_stats },
135	{ REQ_LOOP_INFO,	NOAUTH,	0, 0,	loop_info },
136	{ REQ_TIMER_STATS,	NOAUTH,	0, 0,	timer_stats },
137	{ REQ_CONFIG,	    AUTH, v4sizeof(struct conf_peer),
138				sizeof(struct conf_peer), do_conf },
139	{ REQ_UNCONFIG,	    AUTH, v4sizeof(struct conf_unpeer),
140				sizeof(struct conf_unpeer), do_unconf },
141	{ REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142				sizeof(struct conf_sys_flags), set_sys_flag },
143	{ REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
144				sizeof(struct conf_sys_flags),  clr_sys_flag },
145	{ REQ_GET_RESTRICT,	NOAUTH,	0, 0,	list_restrict },
146	{ REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
147				sizeof(struct conf_restrict), do_resaddflags },
148	{ REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
149				sizeof(struct conf_restrict), do_ressubflags },
150	{ REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
151				sizeof(struct conf_restrict), do_unrestrict },
152	{ REQ_MON_GETLIST,	NOAUTH,	0, 0,	mon_getlist },
153	{ REQ_MON_GETLIST_1,	NOAUTH,	0, 0,	mon_getlist },
154	{ REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
155	{ REQ_RESET_PEER,  AUTH, v4sizeof(struct conf_unpeer),
156				sizeof(struct conf_unpeer), reset_peer },
157	{ REQ_REREAD_KEYS,	AUTH,	0, 0,	do_key_reread },
158	{ REQ_TRUSTKEY,   AUTH, sizeof(u_long), sizeof(u_long), trust_key },
159	{ REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
160	{ REQ_AUTHINFO,		NOAUTH,	0, 0,	get_auth_info },
161	{ REQ_TRAPS,		NOAUTH, 0, 0,	req_get_traps },
162	{ REQ_ADD_TRAP,	AUTH, v4sizeof(struct conf_trap),
163				sizeof(struct conf_trap), req_set_trap },
164	{ REQ_CLR_TRAP,	AUTH, v4sizeof(struct conf_trap),
165				sizeof(struct conf_trap), req_clr_trap },
166	{ REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
167				set_request_keyid },
168	{ REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
169				set_control_keyid },
170	{ REQ_GET_CTLSTATS,	NOAUTH,	0, 0,	get_ctl_stats },
171#ifdef KERNEL_PLL
172	{ REQ_GET_KERNEL,	NOAUTH,	0, 0,	get_kernel_info },
173#endif
174#ifdef REFCLOCK
175	{ REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
176				get_clock_info },
177	{ REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
178				sizeof(struct conf_fudge), set_clock_fudge },
179	{ REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
180				get_clkbug_info },
181#endif
182	{ REQ_IF_STATS,		AUTH, 0, 0,	get_if_stats },
183	{ REQ_IF_RELOAD,	AUTH, 0, 0,	do_if_reload },
184
185	{ NO_REQUEST,		NOAUTH,	0, 0,	0 }
186};
187
188
189/*
190 * Authentication keyid used to authenticate requests.  Zero means we
191 * don't allow writing anything.
192 */
193keyid_t info_auth_keyid;
194
195/*
196 * Statistic counters to keep track of requests and responses.
197 */
198u_long numrequests;		/* number of requests we've received */
199u_long numresppkts;		/* number of resp packets sent with data */
200
201/*
202 * lazy way to count errors, indexed by the error code
203 */
204u_long errorcounter[MAX_INFO_ERR + 1];
205
206/*
207 * A hack.  To keep the authentication module clear of ntp-ism's, we
208 * include a time reset variable for its stats here.
209 */
210u_long auth_timereset;
211
212/*
213 * Response packet used by these routines.  Also some state information
214 * so that we can handle packet formatting within a common set of
215 * subroutines.  Note we try to enter data in place whenever possible,
216 * but the need to set the more bit correctly means we occasionally
217 * use the extra buffer and copy.
218 */
219static struct resp_pkt rpkt;
220static int reqver;
221static int seqno;
222static int nitems;
223static int itemsize;
224static int databytes;
225static char exbuf[RESP_DATA_SIZE];
226static int usingexbuf;
227static sockaddr_u *toaddr;
228static endpt *frominter;
229
230/*
231 * init_request - initialize request data
232 */
233void
234init_request (void)
235{
236	size_t i;
237
238	numrequests = 0;
239	numresppkts = 0;
240	auth_timereset = 0;
241	info_auth_keyid = 0;	/* by default, can't do this */
242
243	for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
244	    errorcounter[i] = 0;
245}
246
247
248/*
249 * req_ack - acknowledge request with no data
250 */
251static void
252req_ack(
253	sockaddr_u *srcadr,
254	endpt *inter,
255	struct req_pkt *inpkt,
256	int errcode
257	)
258{
259	/*
260	 * fill in the fields
261	 */
262	rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
263	rpkt.auth_seq = AUTH_SEQ(0, 0);
264	rpkt.implementation = inpkt->implementation;
265	rpkt.request = inpkt->request;
266	rpkt.err_nitems = ERR_NITEMS(errcode, 0);
267	rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
268
269	/*
270	 * send packet and bump counters
271	 */
272	sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
273	errorcounter[errcode]++;
274}
275
276
277/*
278 * prepare_pkt - prepare response packet for transmission, return pointer
279 *		 to storage for data item.
280 */
281static void *
282prepare_pkt(
283	sockaddr_u *srcadr,
284	endpt *inter,
285	struct req_pkt *pkt,
286	size_t structsize
287	)
288{
289	DPRINTF(4, ("request: preparing pkt\n"));
290
291	/*
292	 * Fill in the implementation, request and itemsize fields
293	 * since these won't change.
294	 */
295	rpkt.implementation = pkt->implementation;
296	rpkt.request = pkt->request;
297	rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
298
299	/*
300	 * Compute the static data needed to carry on.
301	 */
302	toaddr = srcadr;
303	frominter = inter;
304	seqno = 0;
305	nitems = 0;
306	itemsize = structsize;
307	databytes = 0;
308	usingexbuf = 0;
309
310	/*
311	 * return the beginning of the packet buffer.
312	 */
313	return &rpkt.u;
314}
315
316
317/*
318 * more_pkt - return a data pointer for a new item.
319 */
320static void *
321more_pkt(void)
322{
323	/*
324	 * If we were using the extra buffer, send the packet.
325	 */
326	if (usingexbuf) {
327		DPRINTF(3, ("request: sending pkt\n"));
328		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
329		rpkt.auth_seq = AUTH_SEQ(0, seqno);
330		rpkt.err_nitems = htons((u_short)nitems);
331		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
332			RESP_HEADER_SIZE + databytes);
333		numresppkts++;
334
335		/*
336		 * Copy data out of exbuf into the packet.
337		 */
338		memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
339		seqno++;
340		databytes = 0;
341		nitems = 0;
342		usingexbuf = 0;
343	}
344
345	databytes += itemsize;
346	nitems++;
347	if (databytes + itemsize <= RESP_DATA_SIZE) {
348		DPRINTF(4, ("request: giving him more data\n"));
349		/*
350		 * More room in packet.  Give him the
351		 * next address.
352		 */
353		return &rpkt.u.data[databytes];
354	} else {
355		/*
356		 * No room in packet.  Give him the extra
357		 * buffer unless this was the last in the sequence.
358		 */
359		DPRINTF(4, ("request: into extra buffer\n"));
360		if (seqno == MAXSEQ)
361			return NULL;
362		else {
363			usingexbuf = 1;
364			return exbuf;
365		}
366	}
367}
368
369
370/*
371 * flush_pkt - we're done, return remaining information.
372 */
373static void
374flush_pkt(void)
375{
376	DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
377	/*
378	 * Must send the last packet.  If nothing in here and nothing
379	 * has been sent, send an error saying no data to be found.
380	 */
381	if (seqno == 0 && nitems == 0)
382		req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
383			INFO_ERR_NODATA);
384	else {
385		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
386		rpkt.auth_seq = AUTH_SEQ(0, seqno);
387		rpkt.err_nitems = htons((u_short)nitems);
388		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
389			RESP_HEADER_SIZE+databytes);
390		numresppkts++;
391	}
392}
393
394
395
396/*
397 * Given a buffer, return the packet mode
398 */
399int
400get_packet_mode(struct recvbuf *rbufp)
401{
402	struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
403	return (INFO_MODE(inpkt->rm_vn_mode));
404}
405
406
407/*
408 * process_private - process private mode (7) packets
409 */
410void
411process_private(
412	struct recvbuf *rbufp,
413	int mod_okay
414	)
415{
416	static u_long quiet_until;
417	struct req_pkt *inpkt;
418	struct req_pkt_tail *tailinpkt;
419	sockaddr_u *srcadr;
420	endpt *inter;
421	const struct req_proc *proc;
422	int ec;
423	short temp_size;
424	l_fp ftmp;
425	double dtemp;
426	size_t recv_len;
427	size_t noslop_len;
428	size_t mac_len;
429
430	/*
431	 * Initialize pointers, for convenience
432	 */
433	recv_len = rbufp->recv_length;
434	inpkt = (struct req_pkt *)&rbufp->recv_pkt;
435	srcadr = &rbufp->recv_srcadr;
436	inter = rbufp->dstadr;
437
438	DPRINTF(3, ("process_private: impl %d req %d\n",
439		    inpkt->implementation, inpkt->request));
440
441	/*
442	 * Do some sanity checks on the packet.  Return a format
443	 * error if it fails.
444	 */
445	ec = 0;
446	if (   (++ec, ISRESPONSE(inpkt->rm_vn_mode))
447	    || (++ec, ISMORE(inpkt->rm_vn_mode))
448	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
449	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
450	    || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
451	    || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
452	    || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
453	    || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
454		) {
455		NLOG(NLOG_SYSEVENT)
456			if (current_time >= quiet_until) {
457				msyslog(LOG_ERR,
458					"process_private: drop test %d"
459					" failed, pkt from %s",
460					ec, stoa(srcadr));
461				quiet_until = current_time + 60;
462			}
463		return;
464	}
465
466	reqver = INFO_VERSION(inpkt->rm_vn_mode);
467
468	/*
469	 * Get the appropriate procedure list to search.
470	 */
471	if (inpkt->implementation == IMPL_UNIV)
472		proc = univ_codes;
473	else if ((inpkt->implementation == IMPL_XNTPD) ||
474		 (inpkt->implementation == IMPL_XNTPD_OLD))
475		proc = ntp_codes;
476	else {
477		req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
478		return;
479	}
480
481	/*
482	 * Search the list for the request codes.  If it isn't one
483	 * we know, return an error.
484	 */
485	while (proc->request_code != NO_REQUEST) {
486		if (proc->request_code == (short) inpkt->request)
487			break;
488		proc++;
489	}
490	if (proc->request_code == NO_REQUEST) {
491		req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
492		return;
493	}
494
495	DPRINTF(4, ("found request in tables\n"));
496
497	/*
498	 * If we need data, check to see if we have some.  If we
499	 * don't, check to see that there is none (picky, picky).
500	 */
501
502	/* This part is a bit tricky, we want to be sure that the size
503	 * returned is either the old or the new size.  We also can find
504	 * out if the client can accept both types of messages this way.
505	 *
506	 * Handle the exception of REQ_CONFIG. It can have two data sizes.
507	 */
508	temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
509	if ((temp_size != proc->sizeofitem &&
510	     temp_size != proc->v6_sizeofitem) &&
511	    !(inpkt->implementation == IMPL_XNTPD &&
512	      inpkt->request == REQ_CONFIG &&
513	      temp_size == sizeof(struct old_conf_peer))) {
514		DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
515			    temp_size, proc->sizeofitem, proc->v6_sizeofitem));
516		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
517		return;
518	}
519	if ((proc->sizeofitem != 0) &&
520	    ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
521	     (recv_len - REQ_LEN_HDR))) {
522		DPRINTF(3, ("process_private: not enough data\n"));
523		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
524		return;
525	}
526
527	switch (inpkt->implementation) {
528	case IMPL_XNTPD:
529		client_v6_capable = 1;
530		break;
531	case IMPL_XNTPD_OLD:
532		client_v6_capable = 0;
533		break;
534	default:
535		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
536		return;
537	}
538
539	/*
540	 * If we need to authenticate, do so.  Note that an
541	 * authenticatable packet must include a mac field, must
542	 * have used key info_auth_keyid and must have included
543	 * a time stamp in the appropriate field.  The time stamp
544	 * must be within INFO_TS_MAXSKEW of the receive
545	 * time stamp.
546	 */
547	if (proc->needs_auth && sys_authenticate) {
548
549		if (recv_len < (REQ_LEN_HDR +
550		    (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
551		    INFO_NITEMS(inpkt->err_nitems)) +
552		    REQ_TAIL_MIN)) {
553			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
554			return;
555		}
556
557		/*
558		 * For 16-octet digests, regardless of itemsize and
559		 * nitems, authenticated requests are a fixed size
560		 * with the timestamp, key ID, and digest located
561		 * at the end of the packet.  Because the key ID
562		 * determining the digest size precedes the digest,
563		 * for larger digests the fixed size request scheme
564		 * is abandoned and the timestamp, key ID, and digest
565		 * are located relative to the start of the packet,
566		 * with the digest size determined by the packet size.
567		 */
568		noslop_len = REQ_LEN_HDR
569			     + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
570			       INFO_NITEMS(inpkt->err_nitems)
571			     + sizeof(inpkt->tstamp);
572		/* 32-bit alignment */
573		noslop_len = (noslop_len + 3) & ~3;
574		if (recv_len > (noslop_len + MAX_MAC_LEN))
575			mac_len = 20;
576		else
577			mac_len = recv_len - noslop_len;
578
579		tailinpkt = (void *)((char *)inpkt + recv_len -
580			    (mac_len + sizeof(inpkt->tstamp)));
581
582		/*
583		 * If this guy is restricted from doing this, don't let
584		 * him.  If the wrong key was used, or packet doesn't
585		 * have mac, return.
586		 */
587		/* XXX: Use authistrustedip(), or equivalent. */
588		if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
589		    || ntohl(tailinpkt->keyid) != info_auth_keyid) {
590			DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
591				    INFO_IS_AUTH(inpkt->auth_seq),
592				    info_auth_keyid,
593				    ntohl(tailinpkt->keyid), (u_long)mac_len));
594#ifdef DEBUG
595			msyslog(LOG_DEBUG,
596				"process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
597				INFO_IS_AUTH(inpkt->auth_seq),
598				info_auth_keyid,
599				ntohl(tailinpkt->keyid), (u_long)mac_len);
600#endif
601			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
602			return;
603		}
604		if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
605			DPRINTF(5, ("bad pkt length %zu\n", recv_len));
606			msyslog(LOG_ERR,
607				"process_private: bad pkt length %zu",
608				recv_len);
609			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
610			return;
611		}
612		if (!mod_okay || !authhavekey(info_auth_keyid)) {
613			DPRINTF(5, ("failed auth mod_okay %d\n",
614				    mod_okay));
615#ifdef DEBUG
616			msyslog(LOG_DEBUG,
617				"process_private: failed auth mod_okay %d\n",
618				mod_okay);
619#endif
620			if (!mod_okay) {
621				sys_restricted++;
622			}
623			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
624			return;
625		}
626
627		/*
628		 * calculate absolute time difference between xmit time stamp
629		 * and receive time stamp.  If too large, too bad.
630		 */
631		NTOHL_FP(&tailinpkt->tstamp, &ftmp);
632		L_SUB(&ftmp, &rbufp->recv_time);
633		LFPTOD(&ftmp, dtemp);
634		if (fabs(dtemp) > INFO_TS_MAXSKEW) {
635			/*
636			 * He's a loser.  Tell him.
637			 */
638			DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
639				    dtemp, INFO_TS_MAXSKEW));
640			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
641			return;
642		}
643
644		/*
645		 * So far so good.  See if decryption works out okay.
646		 */
647		if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
648				 recv_len - mac_len, mac_len)) {
649			DPRINTF(5, ("authdecrypt failed\n"));
650			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
651			return;
652		}
653	}
654
655	DPRINTF(3, ("process_private: all okay, into handler\n"));
656	/*
657	 * Packet is okay.  Call the handler to send him data.
658	 */
659	(proc->handler)(srcadr, inter, inpkt);
660}
661
662
663/*
664 * list_peers - send a list of the peers
665 */
666static void
667list_peers(
668	sockaddr_u *srcadr,
669	endpt *inter,
670	struct req_pkt *inpkt
671	)
672{
673	struct info_peer_list *	ip;
674	const struct peer *	pp;
675
676	ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
677	    v6sizeof(struct info_peer_list));
678	for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
679		if (IS_IPV6(&pp->srcadr)) {
680			if (!client_v6_capable)
681				continue;
682			ip->addr6 = SOCK_ADDR6(&pp->srcadr);
683			ip->v6_flag = 1;
684		} else {
685			ip->addr = NSRCADR(&pp->srcadr);
686			if (client_v6_capable)
687				ip->v6_flag = 0;
688		}
689
690		ip->port = NSRCPORT(&pp->srcadr);
691		ip->hmode = pp->hmode;
692		ip->flags = 0;
693		if (pp->flags & FLAG_CONFIG)
694			ip->flags |= INFO_FLAG_CONFIG;
695		if (pp == sys_peer)
696			ip->flags |= INFO_FLAG_SYSPEER;
697		if (pp->status == CTL_PST_SEL_SYNCCAND)
698			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
699		if (pp->status >= CTL_PST_SEL_SYSPEER)
700			ip->flags |= INFO_FLAG_SHORTLIST;
701		ip = (struct info_peer_list *)more_pkt();
702	}	/* for pp */
703
704	flush_pkt();
705}
706
707
708/*
709 * list_peers_sum - return extended peer list
710 */
711static void
712list_peers_sum(
713	sockaddr_u *srcadr,
714	endpt *inter,
715	struct req_pkt *inpkt
716	)
717{
718	struct info_peer_summary *	ips;
719	const struct peer *		pp;
720	l_fp 				ltmp;
721
722	DPRINTF(3, ("wants peer list summary\n"));
723
724	ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
725	    v6sizeof(struct info_peer_summary));
726	for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
727		DPRINTF(4, ("sum: got one\n"));
728		/*
729		 * Be careful here not to return v6 peers when we
730		 * want only v4.
731		 */
732		if (IS_IPV6(&pp->srcadr)) {
733			if (!client_v6_capable)
734				continue;
735			ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
736			ips->v6_flag = 1;
737			if (pp->dstadr)
738				ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
739			else
740				ZERO(ips->dstadr6);
741		} else {
742			ips->srcadr = NSRCADR(&pp->srcadr);
743			if (client_v6_capable)
744				ips->v6_flag = 0;
745
746			if (pp->dstadr) {
747				if (!pp->processed)
748					ips->dstadr = NSRCADR(&pp->dstadr->sin);
749				else {
750					if (MDF_BCAST == pp->cast_flags)
751						ips->dstadr = NSRCADR(&pp->dstadr->bcast);
752					else if (pp->cast_flags) {
753						ips->dstadr = NSRCADR(&pp->dstadr->sin);
754						if (!ips->dstadr)
755							ips->dstadr = NSRCADR(&pp->dstadr->bcast);
756					}
757				}
758			} else {
759				ips->dstadr = 0;
760			}
761		}
762
763		ips->srcport = NSRCPORT(&pp->srcadr);
764		ips->stratum = pp->stratum;
765		ips->hpoll = pp->hpoll;
766		ips->ppoll = pp->ppoll;
767		ips->reach = pp->reach;
768		ips->flags = 0;
769		if (pp == sys_peer)
770			ips->flags |= INFO_FLAG_SYSPEER;
771		if (pp->flags & FLAG_CONFIG)
772			ips->flags |= INFO_FLAG_CONFIG;
773		if (pp->flags & FLAG_REFCLOCK)
774			ips->flags |= INFO_FLAG_REFCLOCK;
775		if (pp->flags & FLAG_PREFER)
776			ips->flags |= INFO_FLAG_PREFER;
777		if (pp->flags & FLAG_BURST)
778			ips->flags |= INFO_FLAG_BURST;
779		if (pp->status == CTL_PST_SEL_SYNCCAND)
780			ips->flags |= INFO_FLAG_SEL_CANDIDATE;
781		if (pp->status >= CTL_PST_SEL_SYSPEER)
782			ips->flags |= INFO_FLAG_SHORTLIST;
783		ips->hmode = pp->hmode;
784		ips->delay = HTONS_FP(DTOFP(pp->delay));
785		DTOLFP(pp->offset, &ltmp);
786		HTONL_FP(&ltmp, &ips->offset);
787		ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
788
789		ips = (struct info_peer_summary *)more_pkt();
790	}	/* for pp */
791
792	flush_pkt();
793}
794
795
796/*
797 * peer_info - send information for one or more peers
798 */
799static void
800peer_info (
801	sockaddr_u *srcadr,
802	endpt *inter,
803	struct req_pkt *inpkt
804	)
805{
806	u_short			items;
807	size_t			item_sz;
808	char *			datap;
809	struct info_peer_list	ipl;
810	struct peer *		pp;
811	struct info_peer *	ip;
812	int			i;
813	int			j;
814	sockaddr_u		addr;
815	l_fp			ltmp;
816
817	items = INFO_NITEMS(inpkt->err_nitems);
818	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
819	datap = inpkt->u.data;
820	if (item_sz != sizeof(ipl)) {
821		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
822		return;
823	}
824	ip = prepare_pkt(srcadr, inter, inpkt,
825			 v6sizeof(struct info_peer));
826	while (items-- > 0 && ip != NULL) {
827		ZERO(ipl);
828		memcpy(&ipl, datap, item_sz);
829		ZERO_SOCK(&addr);
830		NSRCPORT(&addr) = ipl.port;
831		if (client_v6_capable && ipl.v6_flag) {
832			AF(&addr) = AF_INET6;
833			SOCK_ADDR6(&addr) = ipl.addr6;
834		} else {
835			AF(&addr) = AF_INET;
836			NSRCADR(&addr) = ipl.addr;
837		}
838#ifdef ISC_PLATFORM_HAVESALEN
839		addr.sa.sa_len = SOCKLEN(&addr);
840#endif
841		datap += item_sz;
842
843		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
844		if (NULL == pp)
845			continue;
846		if (IS_IPV6(&pp->srcadr)) {
847			if (pp->dstadr)
848				ip->dstadr6 =
849				    (MDF_BCAST == pp->cast_flags)
850					? SOCK_ADDR6(&pp->dstadr->bcast)
851					: SOCK_ADDR6(&pp->dstadr->sin);
852			else
853				ZERO(ip->dstadr6);
854
855			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
856			ip->v6_flag = 1;
857		} else {
858			if (pp->dstadr) {
859				if (!pp->processed)
860					ip->dstadr = NSRCADR(&pp->dstadr->sin);
861				else {
862					if (MDF_BCAST == pp->cast_flags)
863						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
864					else if (pp->cast_flags) {
865						ip->dstadr = NSRCADR(&pp->dstadr->sin);
866						if (!ip->dstadr)
867							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
868					}
869				}
870			} else
871				ip->dstadr = 0;
872
873			ip->srcadr = NSRCADR(&pp->srcadr);
874			if (client_v6_capable)
875				ip->v6_flag = 0;
876		}
877		ip->srcport = NSRCPORT(&pp->srcadr);
878		ip->flags = 0;
879		if (pp == sys_peer)
880			ip->flags |= INFO_FLAG_SYSPEER;
881		if (pp->flags & FLAG_CONFIG)
882			ip->flags |= INFO_FLAG_CONFIG;
883		if (pp->flags & FLAG_REFCLOCK)
884			ip->flags |= INFO_FLAG_REFCLOCK;
885		if (pp->flags & FLAG_PREFER)
886			ip->flags |= INFO_FLAG_PREFER;
887		if (pp->flags & FLAG_BURST)
888			ip->flags |= INFO_FLAG_BURST;
889		if (pp->status == CTL_PST_SEL_SYNCCAND)
890			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
891		if (pp->status >= CTL_PST_SEL_SYSPEER)
892			ip->flags |= INFO_FLAG_SHORTLIST;
893		ip->leap = pp->leap;
894		ip->hmode = pp->hmode;
895		ip->pmode = pp->pmode;
896		ip->keyid = pp->keyid;
897		ip->stratum = pp->stratum;
898		ip->ppoll = pp->ppoll;
899		ip->hpoll = pp->hpoll;
900		ip->precision = pp->precision;
901		ip->version = pp->version;
902		ip->reach = pp->reach;
903		ip->unreach = (u_char)pp->unreach;
904		ip->flash = (u_char)pp->flash;
905		ip->flash2 = (u_short)pp->flash;
906		ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
907		ip->ttl = (u_char)pp->ttl;
908		ip->associd = htons(pp->associd);
909		ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
910		ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
911		ip->refid = pp->refid;
912		HTONL_FP(&pp->reftime, &ip->reftime);
913		HTONL_FP(&pp->aorg, &ip->org);
914		HTONL_FP(&pp->rec, &ip->rec);
915		HTONL_FP(&pp->xmt, &ip->xmt);
916		j = pp->filter_nextpt - 1;
917		for (i = 0; i < NTP_SHIFT; i++, j--) {
918			if (j < 0)
919				j = NTP_SHIFT-1;
920			ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
921			DTOLFP(pp->filter_offset[j], &ltmp);
922			HTONL_FP(&ltmp, &ip->filtoffset[i]);
923			ip->order[i] = (u_char)((pp->filter_nextpt +
924						 NTP_SHIFT - 1) -
925						pp->filter_order[i]);
926			if (ip->order[i] >= NTP_SHIFT)
927				ip->order[i] -= NTP_SHIFT;
928		}
929		DTOLFP(pp->offset, &ltmp);
930		HTONL_FP(&ltmp, &ip->offset);
931		ip->delay = HTONS_FP(DTOFP(pp->delay));
932		ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
933		ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
934		ip = more_pkt();
935	}
936	flush_pkt();
937}
938
939
940/*
941 * peer_stats - send statistics for one or more peers
942 */
943static void
944peer_stats (
945	sockaddr_u *srcadr,
946	endpt *inter,
947	struct req_pkt *inpkt
948	)
949{
950	u_short			items;
951	size_t			item_sz;
952	char *			datap;
953	struct info_peer_list	ipl;
954	struct peer *		pp;
955	struct info_peer_stats *ip;
956	sockaddr_u addr;
957
958	DPRINTF(1, ("peer_stats: called\n"));
959	items = INFO_NITEMS(inpkt->err_nitems);
960	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
961	datap = inpkt->u.data;
962	if (item_sz > sizeof(ipl)) {
963		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
964		return;
965	}
966	ip = prepare_pkt(srcadr, inter, inpkt,
967			 v6sizeof(struct info_peer_stats));
968	while (items-- > 0 && ip != NULL) {
969		ZERO(ipl);
970		memcpy(&ipl, datap, item_sz);
971		ZERO(addr);
972		NSRCPORT(&addr) = ipl.port;
973		if (client_v6_capable && ipl.v6_flag) {
974			AF(&addr) = AF_INET6;
975			SOCK_ADDR6(&addr) = ipl.addr6;
976		} else {
977			AF(&addr) = AF_INET;
978			NSRCADR(&addr) = ipl.addr;
979		}
980#ifdef ISC_PLATFORM_HAVESALEN
981		addr.sa.sa_len = SOCKLEN(&addr);
982#endif
983		DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
984			    stoa(&addr), ipl.port, NSRCPORT(&addr)));
985
986		datap += item_sz;
987
988		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
989		if (NULL == pp)
990			continue;
991
992		DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
993
994		if (IS_IPV4(&pp->srcadr)) {
995			if (pp->dstadr) {
996				if (!pp->processed)
997					ip->dstadr = NSRCADR(&pp->dstadr->sin);
998				else {
999					if (MDF_BCAST == pp->cast_flags)
1000						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1001					else if (pp->cast_flags) {
1002						ip->dstadr = NSRCADR(&pp->dstadr->sin);
1003						if (!ip->dstadr)
1004							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1005					}
1006				}
1007			} else
1008				ip->dstadr = 0;
1009
1010			ip->srcadr = NSRCADR(&pp->srcadr);
1011			if (client_v6_capable)
1012				ip->v6_flag = 0;
1013		} else {
1014			if (pp->dstadr)
1015				ip->dstadr6 =
1016				    (MDF_BCAST == pp->cast_flags)
1017					? SOCK_ADDR6(&pp->dstadr->bcast)
1018					: SOCK_ADDR6(&pp->dstadr->sin);
1019			else
1020				ZERO(ip->dstadr6);
1021
1022			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1023			ip->v6_flag = 1;
1024		}
1025		ip->srcport = NSRCPORT(&pp->srcadr);
1026		ip->flags = 0;
1027		if (pp == sys_peer)
1028		    ip->flags |= INFO_FLAG_SYSPEER;
1029		if (pp->flags & FLAG_CONFIG)
1030		    ip->flags |= INFO_FLAG_CONFIG;
1031		if (pp->flags & FLAG_REFCLOCK)
1032		    ip->flags |= INFO_FLAG_REFCLOCK;
1033		if (pp->flags & FLAG_PREFER)
1034		    ip->flags |= INFO_FLAG_PREFER;
1035		if (pp->flags & FLAG_BURST)
1036		    ip->flags |= INFO_FLAG_BURST;
1037		if (pp->flags & FLAG_IBURST)
1038		    ip->flags |= INFO_FLAG_IBURST;
1039		if (pp->status == CTL_PST_SEL_SYNCCAND)
1040		    ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1041		if (pp->status >= CTL_PST_SEL_SYSPEER)
1042		    ip->flags |= INFO_FLAG_SHORTLIST;
1043		ip->flags = htons(ip->flags);
1044		ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1045		ip->timetosend = htonl(pp->nextdate - current_time);
1046		ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1047		ip->sent = htonl((u_int32)(pp->sent));
1048		ip->processed = htonl((u_int32)(pp->processed));
1049		ip->badauth = htonl((u_int32)(pp->badauth));
1050		ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1051		ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1052		ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1053		ip->selbroken = htonl((u_int32)(pp->selbroken));
1054		ip->candidate = pp->status;
1055		ip = (struct info_peer_stats *)more_pkt();
1056	}
1057	flush_pkt();
1058}
1059
1060
1061/*
1062 * sys_info - return system info
1063 */
1064static void
1065sys_info(
1066	sockaddr_u *srcadr,
1067	endpt *inter,
1068	struct req_pkt *inpkt
1069	)
1070{
1071	register struct info_sys *is;
1072
1073	is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1074	    v6sizeof(struct info_sys));
1075
1076	if (sys_peer) {
1077		if (IS_IPV4(&sys_peer->srcadr)) {
1078			is->peer = NSRCADR(&sys_peer->srcadr);
1079			if (client_v6_capable)
1080				is->v6_flag = 0;
1081		} else if (client_v6_capable) {
1082			is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1083			is->v6_flag = 1;
1084		}
1085		is->peer_mode = sys_peer->hmode;
1086	} else {
1087		is->peer = 0;
1088		if (client_v6_capable) {
1089			is->v6_flag = 0;
1090		}
1091		is->peer_mode = 0;
1092	}
1093
1094	is->leap = sys_leap;
1095	is->stratum = sys_stratum;
1096	is->precision = sys_precision;
1097	is->rootdelay = htonl(DTOFP(sys_rootdelay));
1098	is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1099	is->frequency = htonl(DTOFP(sys_jitter));
1100	is->stability = htonl(DTOUFP(clock_stability * 1e6));
1101	is->refid = sys_refid;
1102	HTONL_FP(&sys_reftime, &is->reftime);
1103
1104	is->poll = sys_poll;
1105
1106	is->flags = 0;
1107	if (sys_authenticate)
1108		is->flags |= INFO_FLAG_AUTHENTICATE;
1109	if (sys_bclient)
1110		is->flags |= INFO_FLAG_BCLIENT;
1111#ifdef REFCLOCK
1112	if (cal_enable)
1113		is->flags |= INFO_FLAG_CAL;
1114#endif /* REFCLOCK */
1115	if (kern_enable)
1116		is->flags |= INFO_FLAG_KERNEL;
1117	if (mon_enabled != MON_OFF)
1118		is->flags |= INFO_FLAG_MONITOR;
1119	if (ntp_enable)
1120		is->flags |= INFO_FLAG_NTP;
1121	if (hardpps_enable)
1122		is->flags |= INFO_FLAG_PPS_SYNC;
1123	if (stats_control)
1124		is->flags |= INFO_FLAG_FILEGEN;
1125	is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1126	HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1127	(void) more_pkt();
1128	flush_pkt();
1129}
1130
1131
1132/*
1133 * sys_stats - return system statistics
1134 */
1135static void
1136sys_stats(
1137	sockaddr_u *srcadr,
1138	endpt *inter,
1139	struct req_pkt *inpkt
1140	)
1141{
1142	register struct info_sys_stats *ss;
1143
1144	ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1145		sizeof(struct info_sys_stats));
1146	ss->timeup = htonl((u_int32)current_time);
1147	ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1148	ss->denied = htonl((u_int32)sys_restricted);
1149	ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1150	ss->newversionpkt = htonl((u_int32)sys_newversion);
1151	ss->unknownversion = htonl((u_int32)sys_declined);
1152	ss->badlength = htonl((u_int32)sys_badlength);
1153	ss->processed = htonl((u_int32)sys_processed);
1154	ss->badauth = htonl((u_int32)sys_badauth);
1155	ss->limitrejected = htonl((u_int32)sys_limitrejected);
1156	ss->received = htonl((u_int32)sys_received);
1157	ss->lamport = htonl((u_int32)sys_lamport);
1158	ss->tsrounding = htonl((u_int32)sys_tsrounding);
1159	(void) more_pkt();
1160	flush_pkt();
1161}
1162
1163
1164/*
1165 * mem_stats - return memory statistics
1166 */
1167static void
1168mem_stats(
1169	sockaddr_u *srcadr,
1170	endpt *inter,
1171	struct req_pkt *inpkt
1172	)
1173{
1174	register struct info_mem_stats *ms;
1175	register int i;
1176
1177	ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1178						  sizeof(struct info_mem_stats));
1179
1180	ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1181	ms->totalpeermem = htons((u_short)total_peer_structs);
1182	ms->freepeermem = htons((u_short)peer_free_count);
1183	ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1184	ms->allocations = htonl((u_int32)peer_allocations);
1185	ms->demobilizations = htonl((u_int32)peer_demobilizations);
1186
1187	for (i = 0; i < NTP_HASH_SIZE; i++)
1188		ms->hashcount[i] = (u_char)
1189		    min((u_int)peer_hash_count[i], UCHAR_MAX);
1190
1191	(void) more_pkt();
1192	flush_pkt();
1193}
1194
1195
1196/*
1197 * io_stats - return io statistics
1198 */
1199static void
1200io_stats(
1201	sockaddr_u *srcadr,
1202	endpt *inter,
1203	struct req_pkt *inpkt
1204	)
1205{
1206	struct info_io_stats *io;
1207
1208	io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1209						 sizeof(struct info_io_stats));
1210
1211	io->timereset = htonl((u_int32)(current_time - io_timereset));
1212	io->totalrecvbufs = htons((u_short) total_recvbuffs());
1213	io->freerecvbufs = htons((u_short) free_recvbuffs());
1214	io->fullrecvbufs = htons((u_short) full_recvbuffs());
1215	io->lowwater = htons((u_short) lowater_additions());
1216	io->dropped = htonl((u_int32)packets_dropped);
1217	io->ignored = htonl((u_int32)packets_ignored);
1218	io->received = htonl((u_int32)packets_received);
1219	io->sent = htonl((u_int32)packets_sent);
1220	io->notsent = htonl((u_int32)packets_notsent);
1221	io->interrupts = htonl((u_int32)handler_calls);
1222	io->int_received = htonl((u_int32)handler_pkts);
1223
1224	(void) more_pkt();
1225	flush_pkt();
1226}
1227
1228
1229/*
1230 * timer_stats - return timer statistics
1231 */
1232static void
1233timer_stats(
1234	sockaddr_u *		srcadr,
1235	endpt *			inter,
1236	struct req_pkt *	inpkt
1237	)
1238{
1239	struct info_timer_stats *	ts;
1240	u_long				sincereset;
1241
1242	ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1243						    inpkt, sizeof(*ts));
1244
1245	sincereset = current_time - timer_timereset;
1246	ts->timereset = htonl((u_int32)sincereset);
1247	ts->alarms = ts->timereset;
1248	ts->overflows = htonl((u_int32)alarm_overflow);
1249	ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1250
1251	(void) more_pkt();
1252	flush_pkt();
1253}
1254
1255
1256/*
1257 * loop_info - return the current state of the loop filter
1258 */
1259static void
1260loop_info(
1261	sockaddr_u *srcadr,
1262	endpt *inter,
1263	struct req_pkt *inpkt
1264	)
1265{
1266	struct info_loop *li;
1267	l_fp ltmp;
1268
1269	li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1270	    sizeof(struct info_loop));
1271
1272	DTOLFP(last_offset, &ltmp);
1273	HTONL_FP(&ltmp, &li->last_offset);
1274	DTOLFP(drift_comp * 1e6, &ltmp);
1275	HTONL_FP(&ltmp, &li->drift_comp);
1276	li->compliance = htonl((u_int32)(tc_counter));
1277	li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1278
1279	(void) more_pkt();
1280	flush_pkt();
1281}
1282
1283
1284/*
1285 * do_conf - add a peer to the configuration list
1286 */
1287static void
1288do_conf(
1289	sockaddr_u *srcadr,
1290	endpt *inter,
1291	struct req_pkt *inpkt
1292	)
1293{
1294	u_short			items;
1295	size_t			item_sz;
1296	u_int			fl;
1297	char *			datap;
1298	struct conf_peer	temp_cp;
1299	sockaddr_u		peeraddr;
1300
1301	/*
1302	 * Do a check of everything to see that it looks
1303	 * okay.  If not, complain about it.  Note we are
1304	 * very picky here.
1305	 */
1306	items = INFO_NITEMS(inpkt->err_nitems);
1307	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1308	datap = inpkt->u.data;
1309	if (item_sz > sizeof(temp_cp)) {
1310		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1311		return;
1312	}
1313
1314	while (items-- > 0) {
1315		ZERO(temp_cp);
1316		memcpy(&temp_cp, datap, item_sz);
1317		ZERO_SOCK(&peeraddr);
1318
1319		fl = 0;
1320		if (temp_cp.flags & CONF_FLAG_PREFER)
1321			fl |= FLAG_PREFER;
1322		if (temp_cp.flags & CONF_FLAG_BURST)
1323			fl |= FLAG_BURST;
1324		if (temp_cp.flags & CONF_FLAG_IBURST)
1325			fl |= FLAG_IBURST;
1326#ifdef AUTOKEY
1327		if (temp_cp.flags & CONF_FLAG_SKEY)
1328			fl |= FLAG_SKEY;
1329#endif	/* AUTOKEY */
1330		if (client_v6_capable && temp_cp.v6_flag) {
1331			AF(&peeraddr) = AF_INET6;
1332			SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1333		} else {
1334			AF(&peeraddr) = AF_INET;
1335			NSRCADR(&peeraddr) = temp_cp.peeraddr;
1336			/*
1337			 * Make sure the address is valid
1338			 */
1339			if (!ISREFCLOCKADR(&peeraddr) &&
1340			    ISBADADR(&peeraddr)) {
1341				req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1342				return;
1343			}
1344
1345		}
1346		NSRCPORT(&peeraddr) = htons(NTP_PORT);
1347#ifdef ISC_PLATFORM_HAVESALEN
1348		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1349#endif
1350
1351		/* check mode value: 0 <= hmode <= 6
1352		 *
1353		 * There's no good global define for that limit, and
1354		 * using a magic define is as good (or bad, actually) as
1355		 * a magic number. So we use the highest possible peer
1356		 * mode, and that is MODE_BCLIENT.
1357		 *
1358		 * [Bug 3009] claims that a problem occurs for hmode > 7,
1359		 * but the code in ntp_peer.c indicates trouble for any
1360		 * hmode > 6 ( --> MODE_BCLIENT).
1361		 */
1362		if (temp_cp.hmode > MODE_BCLIENT) {
1363			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1364			return;
1365		}
1366
1367		/* Any more checks on the values? Unchecked at this
1368		 * point:
1369		 *   - version
1370		 *   - ttl
1371		 *   - keyid
1372		 *
1373		 *   - minpoll/maxpoll, but they are treated properly
1374		 *     for all cases internally. Checking not necessary.
1375		 *
1376		 * Note that we ignore any previously-specified ippeerlimit.
1377		 * If we're told to create the peer, we create the peer.
1378		 */
1379
1380		/* finally create the peer */
1381		if (peer_config(&peeraddr, NULL, NULL, -1,
1382		    temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1383		    temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1384		    NULL) == 0)
1385		{
1386			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1387			return;
1388		}
1389
1390		datap += item_sz;
1391	}
1392	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1393}
1394
1395
1396/*
1397 * do_unconf - remove a peer from the configuration list
1398 */
1399static void
1400do_unconf(
1401	sockaddr_u *	srcadr,
1402	endpt *		inter,
1403	struct req_pkt *inpkt
1404	)
1405{
1406	u_short			items;
1407	size_t			item_sz;
1408	char *			datap;
1409	struct conf_unpeer	temp_cp;
1410	struct peer *		p;
1411	sockaddr_u		peeraddr;
1412	int			loops;
1413
1414	/*
1415	 * This is a bit unstructured, but I like to be careful.
1416	 * We check to see that every peer exists and is actually
1417	 * configured.  If so, we remove them.  If not, we return
1418	 * an error.
1419	 *
1420	 * [Bug 3011] Even if we checked all peers given in the request
1421	 * in a dry run, there's still a chance that the caller played
1422	 * unfair and gave the same peer multiple times. So we still
1423	 * have to be prepared for nasty surprises in the second run ;)
1424	 */
1425
1426	/* basic consistency checks */
1427	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1428	if (item_sz > sizeof(temp_cp)) {
1429		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1430		return;
1431	}
1432
1433	/* now do two runs: first a dry run, then a busy one */
1434	for (loops = 0; loops != 2; ++loops) {
1435		items = INFO_NITEMS(inpkt->err_nitems);
1436		datap = inpkt->u.data;
1437		while (items-- > 0) {
1438			/* copy from request to local */
1439			ZERO(temp_cp);
1440			memcpy(&temp_cp, datap, item_sz);
1441			/* get address structure */
1442			ZERO_SOCK(&peeraddr);
1443			if (client_v6_capable && temp_cp.v6_flag) {
1444				AF(&peeraddr) = AF_INET6;
1445				SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1446			} else {
1447				AF(&peeraddr) = AF_INET;
1448				NSRCADR(&peeraddr) = temp_cp.peeraddr;
1449			}
1450			SET_PORT(&peeraddr, NTP_PORT);
1451#ifdef ISC_PLATFORM_HAVESALEN
1452			peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1453#endif
1454			DPRINTF(1, ("searching for %s\n",
1455				    stoa(&peeraddr)));
1456
1457			/* search for matching configred(!) peer */
1458			p = NULL;
1459			do {
1460				p = findexistingpeer(
1461					&peeraddr, NULL, p, -1, 0, NULL);
1462			} while (p && !(FLAG_CONFIG & p->flags));
1463
1464			if (!loops && !p) {
1465				/* Item not found in dry run -- bail! */
1466				req_ack(srcadr, inter, inpkt,
1467					INFO_ERR_NODATA);
1468				return;
1469			} else if (loops && p) {
1470				/* Item found in busy run -- remove! */
1471				peer_clear(p, "GONE");
1472				unpeer(p);
1473			}
1474			datap += item_sz;
1475		}
1476	}
1477
1478	/* report success */
1479	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1480}
1481
1482
1483/*
1484 * set_sys_flag - set system flags
1485 */
1486static void
1487set_sys_flag(
1488	sockaddr_u *srcadr,
1489	endpt *inter,
1490	struct req_pkt *inpkt
1491	)
1492{
1493	setclr_flags(srcadr, inter, inpkt, 1);
1494}
1495
1496
1497/*
1498 * clr_sys_flag - clear system flags
1499 */
1500static void
1501clr_sys_flag(
1502	sockaddr_u *srcadr,
1503	endpt *inter,
1504	struct req_pkt *inpkt
1505	)
1506{
1507	setclr_flags(srcadr, inter, inpkt, 0);
1508}
1509
1510
1511/*
1512 * setclr_flags - do the grunge work of flag setting/clearing
1513 */
1514static void
1515setclr_flags(
1516	sockaddr_u *srcadr,
1517	endpt *inter,
1518	struct req_pkt *inpkt,
1519	u_long set
1520	)
1521{
1522	struct conf_sys_flags *sf;
1523	u_int32 flags;
1524
1525	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1526		msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1527		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1528		return;
1529	}
1530
1531	sf = (struct conf_sys_flags *)&inpkt->u;
1532	flags = ntohl(sf->flags);
1533
1534	if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1535		      SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1536		      SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1537		msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1538			flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1539				  SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1540				  SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1541				  SYS_FLAG_AUTH | SYS_FLAG_CAL));
1542		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1543		return;
1544	}
1545
1546	if (flags & SYS_FLAG_BCLIENT)
1547		proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1548	if (flags & SYS_FLAG_PPS)
1549		proto_config(PROTO_PPS, set, 0., NULL);
1550	if (flags & SYS_FLAG_NTP)
1551		proto_config(PROTO_NTP, set, 0., NULL);
1552	if (flags & SYS_FLAG_KERNEL)
1553		proto_config(PROTO_KERNEL, set, 0., NULL);
1554	if (flags & SYS_FLAG_MONITOR)
1555		proto_config(PROTO_MONITOR, set, 0., NULL);
1556	if (flags & SYS_FLAG_FILEGEN)
1557		proto_config(PROTO_FILEGEN, set, 0., NULL);
1558	if (flags & SYS_FLAG_AUTH)
1559		proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1560	if (flags & SYS_FLAG_CAL)
1561		proto_config(PROTO_CAL, set, 0., NULL);
1562	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1563}
1564
1565/* There have been some issues with the restrict list processing,
1566 * ranging from problems with deep recursion (resulting in stack
1567 * overflows) and overfull reply buffers.
1568 *
1569 * To avoid this trouble the list reversal is done iteratively using a
1570 * scratch pad.
1571 */
1572typedef struct RestrictStack RestrictStackT;
1573struct RestrictStack {
1574	RestrictStackT   *link;
1575	size_t            fcnt;
1576	const restrict_u *pres[63];
1577};
1578
1579static size_t
1580getStackSheetSize(
1581	RestrictStackT *sp
1582	)
1583{
1584	if (sp)
1585		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1586	return 0u;
1587}
1588
1589static int/*BOOL*/
1590pushRestriction(
1591	RestrictStackT  **spp,
1592	const restrict_u *ptr
1593	)
1594{
1595	RestrictStackT *sp;
1596
1597	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1598		/* need another sheet in the scratch pad */
1599		sp = emalloc(sizeof(*sp));
1600		sp->link = *spp;
1601		sp->fcnt = getStackSheetSize(sp);
1602		*spp = sp;
1603	}
1604	sp->pres[--sp->fcnt] = ptr;
1605	return TRUE;
1606}
1607
1608static int/*BOOL*/
1609popRestriction(
1610	RestrictStackT   **spp,
1611	const restrict_u **opp
1612	)
1613{
1614	RestrictStackT *sp;
1615
1616	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
1617		return FALSE;
1618
1619	*opp = sp->pres[sp->fcnt++];
1620	if (sp->fcnt >= getStackSheetSize(sp)) {
1621		/* discard sheet from scratch pad */
1622		*spp = sp->link;
1623		free(sp);
1624	}
1625	return TRUE;
1626}
1627
1628static void
1629flushRestrictionStack(
1630	RestrictStackT **spp
1631	)
1632{
1633	RestrictStackT *sp;
1634
1635	while (NULL != (sp = *spp)) {
1636		*spp = sp->link;
1637		free(sp);
1638	}
1639}
1640
1641/*
1642 * list_restrict4 - iterative helper for list_restrict dumps IPv4
1643 *		    restriction list in reverse order.
1644 */
1645static void
1646list_restrict4(
1647	const restrict_u *	res,
1648	struct info_restrict **	ppir
1649	)
1650{
1651	RestrictStackT *	rpad;
1652	struct info_restrict *	pir;
1653
1654	pir = *ppir;
1655	for (rpad = NULL; res; res = res->link)
1656		if (!pushRestriction(&rpad, res))
1657			break;
1658
1659	while (pir && popRestriction(&rpad, &res)) {
1660		pir->addr = htonl(res->u.v4.addr);
1661		if (client_v6_capable)
1662			pir->v6_flag = 0;
1663		pir->mask = htonl(res->u.v4.mask);
1664		pir->count = htonl(res->count);
1665		pir->rflags = htons(res->rflags);
1666		pir->mflags = htons(res->mflags);
1667		pir = (struct info_restrict *)more_pkt();
1668	}
1669	flushRestrictionStack(&rpad);
1670	*ppir = pir;
1671}
1672
1673/*
1674 * list_restrict6 - iterative helper for list_restrict dumps IPv6
1675 *		    restriction list in reverse order.
1676 */
1677static void
1678list_restrict6(
1679	const restrict_u *	res,
1680	struct info_restrict **	ppir
1681	)
1682{
1683	RestrictStackT *	rpad;
1684	struct info_restrict *	pir;
1685
1686	pir = *ppir;
1687	for (rpad = NULL; res; res = res->link)
1688		if (!pushRestriction(&rpad, res))
1689			break;
1690
1691	while (pir && popRestriction(&rpad, &res)) {
1692		pir->addr6 = res->u.v6.addr;
1693		pir->mask6 = res->u.v6.mask;
1694		pir->v6_flag = 1;
1695		pir->count = htonl(res->count);
1696		pir->rflags = htons(res->rflags);
1697		pir->mflags = htons(res->mflags);
1698		pir = (struct info_restrict *)more_pkt();
1699	}
1700	flushRestrictionStack(&rpad);
1701	*ppir = pir;
1702}
1703
1704
1705/*
1706 * list_restrict - return the restrict list
1707 */
1708static void
1709list_restrict(
1710	sockaddr_u *srcadr,
1711	endpt *inter,
1712	struct req_pkt *inpkt
1713	)
1714{
1715	struct info_restrict *ir;
1716
1717	DPRINTF(3, ("wants restrict list summary\n"));
1718
1719	ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1720	    v6sizeof(struct info_restrict));
1721
1722	/*
1723	 * The restriction lists are kept sorted in the reverse order
1724	 * than they were originally.  To preserve the output semantics,
1725	 * dump each list in reverse order. The workers take care of that.
1726	 */
1727	list_restrict4(restrictlist4, &ir);
1728	if (client_v6_capable)
1729		list_restrict6(restrictlist6, &ir);
1730	flush_pkt();
1731}
1732
1733
1734/*
1735 * do_resaddflags - add flags to a restrict entry (or create one)
1736 */
1737static void
1738do_resaddflags(
1739	sockaddr_u *srcadr,
1740	endpt *inter,
1741	struct req_pkt *inpkt
1742	)
1743{
1744	do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1745}
1746
1747
1748
1749/*
1750 * do_ressubflags - remove flags from a restrict entry
1751 */
1752static void
1753do_ressubflags(
1754	sockaddr_u *srcadr,
1755	endpt *inter,
1756	struct req_pkt *inpkt
1757	)
1758{
1759	do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1760}
1761
1762
1763/*
1764 * do_unrestrict - remove a restrict entry from the list
1765 */
1766static void
1767do_unrestrict(
1768	sockaddr_u *srcadr,
1769	endpt *inter,
1770	struct req_pkt *inpkt
1771	)
1772{
1773	do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1774}
1775
1776
1777/*
1778 * do_restrict - do the dirty stuff of dealing with restrictions
1779 */
1780static void
1781do_restrict(
1782	sockaddr_u *srcadr,
1783	endpt *inter,
1784	struct req_pkt *inpkt,
1785	restrict_op op
1786	)
1787{
1788	char *			datap;
1789	struct conf_restrict	cr;
1790	u_short			items;
1791	size_t			item_sz;
1792	sockaddr_u		matchaddr;
1793	sockaddr_u		matchmask;
1794	int			bad;
1795
1796	switch(op) {
1797	    case RESTRICT_FLAGS:
1798	    case RESTRICT_UNFLAG:
1799	    case RESTRICT_REMOVE:
1800	    case RESTRICT_REMOVEIF:
1801	    	break;
1802
1803	    default:
1804		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1805		return;
1806	}
1807
1808	/*
1809	 * Do a check of the flags to make sure that only
1810	 * the NTPPORT flag is set, if any.  If not, complain
1811	 * about it.  Note we are very picky here.
1812	 */
1813	items = INFO_NITEMS(inpkt->err_nitems);
1814	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1815	datap = inpkt->u.data;
1816	if (item_sz > sizeof(cr)) {
1817		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1818		return;
1819	}
1820
1821	bad = 0;
1822	while (items-- > 0 && !bad) {
1823		memcpy(&cr, datap, item_sz);
1824		cr.flags = ntohs(cr.flags);	/* XXX */
1825		cr.mflags = ntohs(cr.mflags);
1826		if (~RESM_NTPONLY & cr.mflags)
1827			bad |= 1;
1828		if (~RES_ALLFLAGS & cr.flags)
1829			bad |= 2;
1830		if (INADDR_ANY != cr.mask) {
1831			if (client_v6_capable && cr.v6_flag) {
1832				if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1833					bad |= 4;
1834			} else {
1835				if (INADDR_ANY == cr.addr)
1836					bad |= 8;
1837			}
1838		}
1839		datap += item_sz;
1840	}
1841
1842	if (bad) {
1843		msyslog(LOG_ERR, "do_restrict: bad = %#x", bad);
1844		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1845		return;
1846	}
1847
1848	/*
1849	 * Looks okay, try it out.  Needs to reload data pointer and
1850	 * item counter. (Talos-CAN-0052)
1851	 */
1852	ZERO_SOCK(&matchaddr);
1853	ZERO_SOCK(&matchmask);
1854	items = INFO_NITEMS(inpkt->err_nitems);
1855	datap = inpkt->u.data;
1856
1857	while (items-- > 0) {
1858		memcpy(&cr, datap, item_sz);
1859		cr.flags = ntohs(cr.flags);	/* XXX: size */
1860		cr.mflags = ntohs(cr.mflags);
1861		cr.ippeerlimit = ntohs(cr.ippeerlimit);
1862		if (client_v6_capable && cr.v6_flag) {
1863			AF(&matchaddr) = AF_INET6;
1864			AF(&matchmask) = AF_INET6;
1865			SOCK_ADDR6(&matchaddr) = cr.addr6;
1866			SOCK_ADDR6(&matchmask) = cr.mask6;
1867		} else {
1868			AF(&matchaddr) = AF_INET;
1869			AF(&matchmask) = AF_INET;
1870			NSRCADR(&matchaddr) = cr.addr;
1871			NSRCADR(&matchmask) = cr.mask;
1872		}
1873		hack_restrict(op, &matchaddr, &matchmask, cr.mflags,
1874			      cr.ippeerlimit, cr.flags, 0);
1875		datap += item_sz;
1876	}
1877
1878	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1879}
1880
1881
1882/*
1883 * mon_getlist - return monitor data
1884 */
1885static void
1886mon_getlist(
1887	sockaddr_u *srcadr,
1888	endpt *inter,
1889	struct req_pkt *inpkt
1890	)
1891{
1892	req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1893}
1894
1895
1896/*
1897 * Module entry points and the flags they correspond with
1898 */
1899struct reset_entry {
1900	int flag;		/* flag this corresponds to */
1901	void (*handler)(void);	/* routine to handle request */
1902};
1903
1904struct reset_entry reset_entries[] = {
1905	{ RESET_FLAG_ALLPEERS,	peer_all_reset },
1906	{ RESET_FLAG_IO,	io_clr_stats },
1907	{ RESET_FLAG_SYS,	proto_clr_stats },
1908	{ RESET_FLAG_MEM,	peer_clr_stats },
1909	{ RESET_FLAG_TIMER,	timer_clr_stats },
1910	{ RESET_FLAG_AUTH,	reset_auth_stats },
1911	{ RESET_FLAG_CTL,	ctl_clr_stats },
1912	{ 0,			0 }
1913};
1914
1915/*
1916 * reset_stats - reset statistic counters here and there
1917 */
1918static void
1919reset_stats(
1920	sockaddr_u *srcadr,
1921	endpt *inter,
1922	struct req_pkt *inpkt
1923	)
1924{
1925	struct reset_flags *rflags;
1926	u_long flags;
1927	struct reset_entry *rent;
1928
1929	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1930		msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
1931		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1932		return;
1933	}
1934
1935	rflags = (struct reset_flags *)&inpkt->u;
1936	flags = ntohl(rflags->flags);
1937
1938	if (flags & ~RESET_ALLFLAGS) {
1939		msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
1940			flags & ~RESET_ALLFLAGS);
1941		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1942		return;
1943	}
1944
1945	for (rent = reset_entries; rent->flag != 0; rent++) {
1946		if (flags & rent->flag)
1947			(*rent->handler)();
1948	}
1949	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1950}
1951
1952
1953/*
1954 * reset_peer - clear a peer's statistics
1955 */
1956static void
1957reset_peer(
1958	sockaddr_u *srcadr,
1959	endpt *inter,
1960	struct req_pkt *inpkt
1961	)
1962{
1963	u_short			items;
1964	size_t			item_sz;
1965	char *			datap;
1966	struct conf_unpeer	cp;
1967	struct peer *		p;
1968	sockaddr_u		peeraddr;
1969	int			bad;
1970
1971	/*
1972	 * We check first to see that every peer exists.  If not,
1973	 * we return an error.
1974	 */
1975
1976	items = INFO_NITEMS(inpkt->err_nitems);
1977	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1978	datap = inpkt->u.data;
1979	if (item_sz > sizeof(cp)) {
1980		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1981		return;
1982	}
1983
1984	bad = FALSE;
1985	while (items-- > 0 && !bad) {
1986		ZERO(cp);
1987		memcpy(&cp, datap, item_sz);
1988		ZERO_SOCK(&peeraddr);
1989		if (client_v6_capable && cp.v6_flag) {
1990			AF(&peeraddr) = AF_INET6;
1991			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
1992		} else {
1993			AF(&peeraddr) = AF_INET;
1994			NSRCADR(&peeraddr) = cp.peeraddr;
1995		}
1996
1997#ifdef ISC_PLATFORM_HAVESALEN
1998		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1999#endif
2000		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2001		if (NULL == p)
2002			bad++;
2003		datap += item_sz;
2004	}
2005
2006	if (bad) {
2007		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2008		return;
2009	}
2010
2011	/*
2012	 * Now do it in earnest. Needs to reload data pointer and item
2013	 * counter. (Talos-CAN-0052)
2014	 */
2015
2016	items = INFO_NITEMS(inpkt->err_nitems);
2017	datap = inpkt->u.data;
2018	while (items-- > 0) {
2019		ZERO(cp);
2020		memcpy(&cp, datap, item_sz);
2021		ZERO_SOCK(&peeraddr);
2022		if (client_v6_capable && cp.v6_flag) {
2023			AF(&peeraddr) = AF_INET6;
2024			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2025		} else {
2026			AF(&peeraddr) = AF_INET;
2027			NSRCADR(&peeraddr) = cp.peeraddr;
2028		}
2029		SET_PORT(&peeraddr, 123);
2030#ifdef ISC_PLATFORM_HAVESALEN
2031		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2032#endif
2033		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2034		while (p != NULL) {
2035			peer_reset(p);
2036			p = findexistingpeer(&peeraddr, NULL, p, -1, 0, NULL);
2037		}
2038		datap += item_sz;
2039	}
2040
2041	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2042}
2043
2044
2045/*
2046 * do_key_reread - reread the encryption key file
2047 */
2048static void
2049do_key_reread(
2050	sockaddr_u *srcadr,
2051	endpt *inter,
2052	struct req_pkt *inpkt
2053	)
2054{
2055	rereadkeys();
2056	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2057}
2058
2059
2060/*
2061 * trust_key - make one or more keys trusted
2062 */
2063static void
2064trust_key(
2065	sockaddr_u *srcadr,
2066	endpt *inter,
2067	struct req_pkt *inpkt
2068	)
2069{
2070	do_trustkey(srcadr, inter, inpkt, 1);
2071}
2072
2073
2074/*
2075 * untrust_key - make one or more keys untrusted
2076 */
2077static void
2078untrust_key(
2079	sockaddr_u *srcadr,
2080	endpt *inter,
2081	struct req_pkt *inpkt
2082	)
2083{
2084	do_trustkey(srcadr, inter, inpkt, 0);
2085}
2086
2087
2088/*
2089 * do_trustkey - make keys either trustable or untrustable
2090 */
2091static void
2092do_trustkey(
2093	sockaddr_u *srcadr,
2094	endpt *inter,
2095	struct req_pkt *inpkt,
2096	u_long trust
2097	)
2098{
2099	register uint32_t *kp;
2100	register int items;
2101
2102	items = INFO_NITEMS(inpkt->err_nitems);
2103	kp = (uint32_t *)&inpkt->u;
2104	while (items-- > 0) {
2105		authtrust(*kp, trust);
2106		kp++;
2107	}
2108
2109	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2110}
2111
2112
2113/*
2114 * get_auth_info - return some stats concerning the authentication module
2115 */
2116static void
2117get_auth_info(
2118	sockaddr_u *srcadr,
2119	endpt *inter,
2120	struct req_pkt *inpkt
2121	)
2122{
2123	register struct info_auth *ia;
2124
2125	ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2126					     sizeof(struct info_auth));
2127
2128	ia->numkeys = htonl((u_int32)authnumkeys);
2129	ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2130	ia->keylookups = htonl((u_int32)authkeylookups);
2131	ia->keynotfound = htonl((u_int32)authkeynotfound);
2132	ia->encryptions = htonl((u_int32)authencryptions);
2133	ia->decryptions = htonl((u_int32)authdecryptions);
2134	ia->keyuncached = htonl((u_int32)authkeyuncached);
2135	ia->expired = htonl((u_int32)authkeyexpired);
2136	ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2137
2138	(void) more_pkt();
2139	flush_pkt();
2140}
2141
2142
2143
2144/*
2145 * reset_auth_stats - reset the authentication stat counters.  Done here
2146 *		      to keep ntp-isms out of the authentication module
2147 */
2148void
2149reset_auth_stats(void)
2150{
2151	authkeylookups = 0;
2152	authkeynotfound = 0;
2153	authencryptions = 0;
2154	authdecryptions = 0;
2155	authkeyuncached = 0;
2156	auth_timereset = current_time;
2157}
2158
2159
2160/*
2161 * req_get_traps - return information about current trap holders
2162 */
2163static void
2164req_get_traps(
2165	sockaddr_u *srcadr,
2166	endpt *inter,
2167	struct req_pkt *inpkt
2168	)
2169{
2170	struct info_trap *it;
2171	struct ctl_trap *tr;
2172	size_t i;
2173
2174	if (num_ctl_traps == 0) {
2175		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2176		return;
2177	}
2178
2179	it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2180	    v6sizeof(struct info_trap));
2181
2182	for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2183		if (tr->tr_flags & TRAP_INUSE) {
2184			if (IS_IPV4(&tr->tr_addr)) {
2185				if (tr->tr_localaddr == any_interface)
2186					it->local_address = 0;
2187				else
2188					it->local_address
2189					    = NSRCADR(&tr->tr_localaddr->sin);
2190				it->trap_address = NSRCADR(&tr->tr_addr);
2191				if (client_v6_capable)
2192					it->v6_flag = 0;
2193			} else {
2194				if (!client_v6_capable)
2195					continue;
2196				it->local_address6
2197				    = SOCK_ADDR6(&tr->tr_localaddr->sin);
2198				it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2199				it->v6_flag = 1;
2200			}
2201			it->trap_port = NSRCPORT(&tr->tr_addr);
2202			it->sequence = htons(tr->tr_sequence);
2203			it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2204			it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2205			it->resets = htonl((u_int32)tr->tr_resets);
2206			it->flags = htonl((u_int32)tr->tr_flags);
2207			it = (struct info_trap *)more_pkt();
2208		}
2209	}
2210	flush_pkt();
2211}
2212
2213
2214/*
2215 * req_set_trap - configure a trap
2216 */
2217static void
2218req_set_trap(
2219	sockaddr_u *srcadr,
2220	endpt *inter,
2221	struct req_pkt *inpkt
2222	)
2223{
2224	do_setclr_trap(srcadr, inter, inpkt, 1);
2225}
2226
2227
2228
2229/*
2230 * req_clr_trap - unconfigure a trap
2231 */
2232static void
2233req_clr_trap(
2234	sockaddr_u *srcadr,
2235	endpt *inter,
2236	struct req_pkt *inpkt
2237	)
2238{
2239	do_setclr_trap(srcadr, inter, inpkt, 0);
2240}
2241
2242
2243
2244/*
2245 * do_setclr_trap - do the grunge work of (un)configuring a trap
2246 */
2247static void
2248do_setclr_trap(
2249	sockaddr_u *srcadr,
2250	endpt *inter,
2251	struct req_pkt *inpkt,
2252	int set
2253	)
2254{
2255	register struct conf_trap *ct;
2256	register endpt *linter;
2257	int res;
2258	sockaddr_u laddr;
2259
2260	/*
2261	 * Prepare sockaddr
2262	 */
2263	ZERO_SOCK(&laddr);
2264	AF(&laddr) = AF(srcadr);
2265	SET_PORT(&laddr, NTP_PORT);
2266
2267	/*
2268	 * Restrict ourselves to one item only.  This eliminates
2269	 * the error reporting problem.
2270	 */
2271	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2272		msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2273		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2274		return;
2275	}
2276	ct = (struct conf_trap *)&inpkt->u;
2277
2278	/*
2279	 * Look for the local interface.  If none, use the default.
2280	 */
2281	if (ct->local_address == 0) {
2282		linter = any_interface;
2283	} else {
2284		if (IS_IPV4(&laddr))
2285			NSRCADR(&laddr) = ct->local_address;
2286		else
2287			SOCK_ADDR6(&laddr) = ct->local_address6;
2288		linter = findinterface(&laddr);
2289		if (NULL == linter) {
2290			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2291			return;
2292		}
2293	}
2294
2295	if (IS_IPV4(&laddr))
2296		NSRCADR(&laddr) = ct->trap_address;
2297	else
2298		SOCK_ADDR6(&laddr) = ct->trap_address6;
2299	if (ct->trap_port)
2300		NSRCPORT(&laddr) = ct->trap_port;
2301	else
2302		SET_PORT(&laddr, TRAPPORT);
2303
2304	if (set) {
2305		res = ctlsettrap(&laddr, linter, 0,
2306				 INFO_VERSION(inpkt->rm_vn_mode));
2307	} else {
2308		res = ctlclrtrap(&laddr, linter, 0);
2309	}
2310
2311	if (!res) {
2312		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2313	} else {
2314		req_ack(srcadr, inter, inpkt, INFO_OKAY);
2315	}
2316	return;
2317}
2318
2319/*
2320 * Validate a request packet for a new request or control key:
2321 *  - only one item allowed
2322 *  - key must be valid (that is, known, and not in the autokey range)
2323 */
2324static void
2325set_keyid_checked(
2326	keyid_t        *into,
2327	const char     *what,
2328	sockaddr_u     *srcadr,
2329	endpt          *inter,
2330	struct req_pkt *inpkt
2331	)
2332{
2333	keyid_t *pkeyid;
2334	keyid_t  tmpkey;
2335
2336	/* restrict ourselves to one item only */
2337	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2338		msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2339			what);
2340		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2341		return;
2342	}
2343
2344	/* plug the new key from the packet */
2345	pkeyid = (keyid_t *)&inpkt->u;
2346	tmpkey = ntohl(*pkeyid);
2347
2348	/* validate the new key id, claim data error on failure */
2349	if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2350		msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2351			what, (long)tmpkey);
2352		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2353		return;
2354	}
2355
2356	/* if we arrive here, the key is good -- use it */
2357	*into = tmpkey;
2358	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2359}
2360
2361/*
2362 * set_request_keyid - set the keyid used to authenticate requests
2363 */
2364static void
2365set_request_keyid(
2366	sockaddr_u *srcadr,
2367	endpt *inter,
2368	struct req_pkt *inpkt
2369	)
2370{
2371	set_keyid_checked(&info_auth_keyid, "request",
2372			  srcadr, inter, inpkt);
2373}
2374
2375
2376
2377/*
2378 * set_control_keyid - set the keyid used to authenticate requests
2379 */
2380static void
2381set_control_keyid(
2382	sockaddr_u *srcadr,
2383	endpt *inter,
2384	struct req_pkt *inpkt
2385	)
2386{
2387	set_keyid_checked(&ctl_auth_keyid, "control",
2388			  srcadr, inter, inpkt);
2389}
2390
2391
2392
2393/*
2394 * get_ctl_stats - return some stats concerning the control message module
2395 */
2396static void
2397get_ctl_stats(
2398	sockaddr_u *srcadr,
2399	endpt *inter,
2400	struct req_pkt *inpkt
2401	)
2402{
2403	register struct info_control *ic;
2404
2405	ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2406						sizeof(struct info_control));
2407
2408	ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2409	ic->numctlreq = htonl((u_int32)numctlreq);
2410	ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2411	ic->numctlresponses = htonl((u_int32)numctlresponses);
2412	ic->numctlfrags = htonl((u_int32)numctlfrags);
2413	ic->numctlerrors = htonl((u_int32)numctlerrors);
2414	ic->numctltooshort = htonl((u_int32)numctltooshort);
2415	ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2416	ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2417	ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2418	ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2419	ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2420	ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2421	ic->numctlbadop = htonl((u_int32)numctlbadop);
2422	ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2423
2424	(void) more_pkt();
2425	flush_pkt();
2426}
2427
2428
2429#ifdef KERNEL_PLL
2430/*
2431 * get_kernel_info - get kernel pll/pps information
2432 */
2433static void
2434get_kernel_info(
2435	sockaddr_u *srcadr,
2436	endpt *inter,
2437	struct req_pkt *inpkt
2438	)
2439{
2440	register struct info_kernel *ik;
2441	struct timex ntx;
2442
2443	if (!pll_control) {
2444		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2445		return;
2446	}
2447
2448	ZERO(ntx);
2449	if (ntp_adjtime(&ntx) < 0)
2450		msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2451	ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2452	    sizeof(struct info_kernel));
2453
2454	/*
2455	 * pll variables
2456	 */
2457	ik->offset = htonl((u_int32)ntx.offset);
2458	ik->freq = htonl((u_int32)ntx.freq);
2459	ik->maxerror = htonl((u_int32)ntx.maxerror);
2460	ik->esterror = htonl((u_int32)ntx.esterror);
2461	ik->status = htons(ntx.status);
2462	ik->constant = htonl((u_int32)ntx.constant);
2463	ik->precision = htonl((u_int32)ntx.precision);
2464	ik->tolerance = htonl((u_int32)ntx.tolerance);
2465
2466	/*
2467	 * pps variables
2468	 */
2469	ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2470	ik->jitter = htonl((u_int32)ntx.jitter);
2471	ik->shift = htons(ntx.shift);
2472	ik->stabil = htonl((u_int32)ntx.stabil);
2473	ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2474	ik->calcnt = htonl((u_int32)ntx.calcnt);
2475	ik->errcnt = htonl((u_int32)ntx.errcnt);
2476	ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2477
2478	(void) more_pkt();
2479	flush_pkt();
2480}
2481#endif /* KERNEL_PLL */
2482
2483
2484#ifdef REFCLOCK
2485/*
2486 * get_clock_info - get info about a clock
2487 */
2488static void
2489get_clock_info(
2490	sockaddr_u *srcadr,
2491	endpt *inter,
2492	struct req_pkt *inpkt
2493	)
2494{
2495	register struct info_clock *ic;
2496	register u_int32 *clkaddr;
2497	register int items;
2498	struct refclockstat clock_stat;
2499	sockaddr_u addr;
2500	l_fp ltmp;
2501
2502	ZERO_SOCK(&addr);
2503	AF(&addr) = AF_INET;
2504#ifdef ISC_PLATFORM_HAVESALEN
2505	addr.sa.sa_len = SOCKLEN(&addr);
2506#endif
2507	SET_PORT(&addr, NTP_PORT);
2508	items = INFO_NITEMS(inpkt->err_nitems);
2509	clkaddr = &inpkt->u.u32[0];
2510
2511	ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2512					      sizeof(struct info_clock));
2513
2514	while (items-- > 0 && ic) {
2515		NSRCADR(&addr) = *clkaddr++;
2516		if (!ISREFCLOCKADR(&addr) || NULL ==
2517		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2518			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2519			return;
2520		}
2521
2522		clock_stat.kv_list = (struct ctl_var *)0;
2523
2524		refclock_control(&addr, NULL, &clock_stat);
2525
2526		ic->clockadr = NSRCADR(&addr);
2527		ic->type = clock_stat.type;
2528		ic->flags = clock_stat.flags;
2529		ic->lastevent = clock_stat.lastevent;
2530		ic->currentstatus = clock_stat.currentstatus;
2531		ic->polls = htonl((u_int32)clock_stat.polls);
2532		ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2533		ic->badformat = htonl((u_int32)clock_stat.badformat);
2534		ic->baddata = htonl((u_int32)clock_stat.baddata);
2535		ic->timestarted = htonl((u_int32)clock_stat.timereset);
2536		DTOLFP(clock_stat.fudgetime1, &ltmp);
2537		HTONL_FP(&ltmp, &ic->fudgetime1);
2538		DTOLFP(clock_stat.fudgetime2, &ltmp);
2539		HTONL_FP(&ltmp, &ic->fudgetime2);
2540		ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2541		/* [Bug3527] Backward Incompatible: ic->fudgeval2 is
2542		 * a string, instantiated via memcpy() so there is no
2543		 * endian issue to correct.
2544		 */
2545#ifdef DISABLE_BUG3527_FIX
2546		ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2547#else
2548		ic->fudgeval2 = clock_stat.fudgeval2;
2549#endif
2550
2551		free_varlist(clock_stat.kv_list);
2552
2553		ic = (struct info_clock *)more_pkt();
2554	}
2555	flush_pkt();
2556}
2557
2558
2559
2560/*
2561 * set_clock_fudge - get a clock's fudge factors
2562 */
2563static void
2564set_clock_fudge(
2565	sockaddr_u *srcadr,
2566	endpt *inter,
2567	struct req_pkt *inpkt
2568	)
2569{
2570	register struct conf_fudge *cf;
2571	register int items;
2572	struct refclockstat clock_stat;
2573	sockaddr_u addr;
2574	l_fp ltmp;
2575
2576	ZERO(addr);
2577	ZERO(clock_stat);
2578	items = INFO_NITEMS(inpkt->err_nitems);
2579	cf = (struct conf_fudge *)&inpkt->u;
2580
2581	while (items-- > 0) {
2582		AF(&addr) = AF_INET;
2583		NSRCADR(&addr) = cf->clockadr;
2584#ifdef ISC_PLATFORM_HAVESALEN
2585		addr.sa.sa_len = SOCKLEN(&addr);
2586#endif
2587		SET_PORT(&addr, NTP_PORT);
2588		if (!ISREFCLOCKADR(&addr) || NULL ==
2589		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2590			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2591			return;
2592		}
2593
2594		switch(ntohl(cf->which)) {
2595		    case FUDGE_TIME1:
2596			NTOHL_FP(&cf->fudgetime, &ltmp);
2597			LFPTOD(&ltmp, clock_stat.fudgetime1);
2598			clock_stat.haveflags = CLK_HAVETIME1;
2599			break;
2600		    case FUDGE_TIME2:
2601			NTOHL_FP(&cf->fudgetime, &ltmp);
2602			LFPTOD(&ltmp, clock_stat.fudgetime2);
2603			clock_stat.haveflags = CLK_HAVETIME2;
2604			break;
2605		    case FUDGE_VAL1:
2606			clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2607			clock_stat.haveflags = CLK_HAVEVAL1;
2608			break;
2609		    case FUDGE_VAL2:
2610			clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2611			clock_stat.haveflags = CLK_HAVEVAL2;
2612			break;
2613		    case FUDGE_FLAGS:
2614			clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2615			clock_stat.haveflags =
2616				(CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2617			break;
2618		    default:
2619			msyslog(LOG_ERR, "set_clock_fudge: default!");
2620			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2621			return;
2622		}
2623
2624		refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2625	}
2626
2627	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2628}
2629#endif
2630
2631#ifdef REFCLOCK
2632/*
2633 * get_clkbug_info - get debugging info about a clock
2634 */
2635static void
2636get_clkbug_info(
2637	sockaddr_u *srcadr,
2638	endpt *inter,
2639	struct req_pkt *inpkt
2640	)
2641{
2642	register int i;
2643	register struct info_clkbug *ic;
2644	register u_int32 *clkaddr;
2645	register int items;
2646	struct refclockbug bug;
2647	sockaddr_u addr;
2648
2649	ZERO_SOCK(&addr);
2650	AF(&addr) = AF_INET;
2651#ifdef ISC_PLATFORM_HAVESALEN
2652	addr.sa.sa_len = SOCKLEN(&addr);
2653#endif
2654	SET_PORT(&addr, NTP_PORT);
2655	items = INFO_NITEMS(inpkt->err_nitems);
2656	clkaddr = (u_int32 *)&inpkt->u;
2657
2658	ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2659					       sizeof(struct info_clkbug));
2660
2661	while (items-- > 0 && ic) {
2662		NSRCADR(&addr) = *clkaddr++;
2663		if (!ISREFCLOCKADR(&addr) || NULL ==
2664		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2665			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2666			return;
2667		}
2668
2669		ZERO(bug);
2670		refclock_buginfo(&addr, &bug);
2671		if (bug.nvalues == 0 && bug.ntimes == 0) {
2672			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2673			return;
2674		}
2675
2676		ic->clockadr = NSRCADR(&addr);
2677		i = bug.nvalues;
2678		if (i > NUMCBUGVALUES)
2679		    i = NUMCBUGVALUES;
2680		ic->nvalues = (u_char)i;
2681		ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2682		while (--i >= 0)
2683		    ic->values[i] = htonl(bug.values[i]);
2684
2685		i = bug.ntimes;
2686		if (i > NUMCBUGTIMES)
2687		    i = NUMCBUGTIMES;
2688		ic->ntimes = (u_char)i;
2689		ic->stimes = htonl(bug.stimes);
2690		while (--i >= 0) {
2691			HTONL_FP(&bug.times[i], &ic->times[i]);
2692		}
2693
2694		ic = (struct info_clkbug *)more_pkt();
2695	}
2696	flush_pkt();
2697}
2698#endif
2699
2700/*
2701 * receiver of interface structures
2702 */
2703static void
2704fill_info_if_stats(void *data, interface_info_t *interface_info)
2705{
2706	struct info_if_stats **ifsp = (struct info_if_stats **)data;
2707	struct info_if_stats *ifs = *ifsp;
2708	endpt *ep = interface_info->ep;
2709
2710	if (NULL == ifs)
2711		return;
2712
2713	ZERO(*ifs);
2714
2715	if (IS_IPV6(&ep->sin)) {
2716		if (!client_v6_capable)
2717			return;
2718		ifs->v6_flag = 1;
2719		ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2720		ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2721		ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2722	} else {
2723		ifs->v6_flag = 0;
2724		ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2725		ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2726		ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2727	}
2728	ifs->v6_flag = htonl(ifs->v6_flag);
2729	strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2730	ifs->family = htons(ep->family);
2731	ifs->flags = htonl(ep->flags);
2732	ifs->last_ttl = htonl(ep->last_ttl);
2733	ifs->num_mcast = htonl(ep->num_mcast);
2734	ifs->received = htonl(ep->received);
2735	ifs->sent = htonl(ep->sent);
2736	ifs->notsent = htonl(ep->notsent);
2737	ifs->ifindex = htonl(ep->ifindex);
2738	/* scope no longer in endpt, in in6_addr typically */
2739	ifs->scopeid = ifs->ifindex;
2740	ifs->ifnum = htonl(ep->ifnum);
2741	ifs->uptime = htonl(current_time - ep->starttime);
2742	ifs->ignore_packets = ep->ignore_packets;
2743	ifs->peercnt = htonl(ep->peercnt);
2744	ifs->action = interface_info->action;
2745
2746	*ifsp = (struct info_if_stats *)more_pkt();
2747}
2748
2749/*
2750 * get_if_stats - get interface statistics
2751 */
2752static void
2753get_if_stats(
2754	sockaddr_u *srcadr,
2755	endpt *inter,
2756	struct req_pkt *inpkt
2757	)
2758{
2759	struct info_if_stats *ifs;
2760
2761	DPRINTF(3, ("wants interface statistics\n"));
2762
2763	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2764	    v6sizeof(struct info_if_stats));
2765
2766	interface_enumerate(fill_info_if_stats, &ifs);
2767
2768	flush_pkt();
2769}
2770
2771static void
2772do_if_reload(
2773	sockaddr_u *srcadr,
2774	endpt *inter,
2775	struct req_pkt *inpkt
2776	)
2777{
2778	struct info_if_stats *ifs;
2779
2780	DPRINTF(3, ("wants interface reload\n"));
2781
2782	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2783	    v6sizeof(struct info_if_stats));
2784
2785	interface_update(fill_info_if_stats, &ifs);
2786
2787	flush_pkt();
2788}
2789
2790