ntp_request.c revision 1.14
1/*	$NetBSD: ntp_request.c,v 1.14 2016/05/01 23:32:01 christos Exp $	*/
2
3/*
4 * ntp_request.c - respond to information requests
5 */
6
7#ifdef HAVE_CONFIG_H
8# include <config.h>
9#endif
10
11#include "ntpd.h"
12#include "ntp_io.h"
13#include "ntp_request.h"
14#include "ntp_control.h"
15#include "ntp_refclock.h"
16#include "ntp_if.h"
17#include "ntp_stdlib.h"
18#include "ntp_assert.h"
19
20#include <stdio.h>
21#include <stddef.h>
22#include <signal.h>
23#ifdef HAVE_NETINET_IN_H
24#include <netinet/in.h>
25#endif
26#include <arpa/inet.h>
27
28#include "recvbuff.h"
29
30#ifdef KERNEL_PLL
31#include "ntp_syscall.h"
32#endif /* KERNEL_PLL */
33
34/*
35 * Structure to hold request procedure information
36 */
37#define	NOAUTH	0
38#define	AUTH	1
39
40#define	NO_REQUEST	(-1)
41/*
42 * Because we now have v6 addresses in the messages, we need to compensate
43 * for the larger size.  Therefore, we introduce the alternate size to
44 * keep us friendly with older implementations.  A little ugly.
45 */
46static int client_v6_capable = 0;   /* the client can handle longer messages */
47
48#define v6sizeof(type)	(client_v6_capable ? sizeof(type) : v4sizeof(type))
49
50struct req_proc {
51	short request_code;	/* defined request code */
52	short needs_auth;	/* true when authentication needed */
53	short sizeofitem;	/* size of request data item (older size)*/
54	short v6_sizeofitem;	/* size of request data item (new size)*/
55	void (*handler) (sockaddr_u *, endpt *,
56			   struct req_pkt *);	/* routine to handle request */
57};
58
59/*
60 * Universal request codes
61 */
62static const struct req_proc univ_codes[] = {
63	{ NO_REQUEST,		NOAUTH,	 0,	0, NULL }
64};
65
66static	void	req_ack	(sockaddr_u *, endpt *, struct req_pkt *, int);
67static	void *	prepare_pkt	(sockaddr_u *, endpt *,
68				 struct req_pkt *, size_t);
69static	void *	more_pkt	(void);
70static	void	flush_pkt	(void);
71static	void	list_peers	(sockaddr_u *, endpt *, struct req_pkt *);
72static	void	list_peers_sum	(sockaddr_u *, endpt *, struct req_pkt *);
73static	void	peer_info	(sockaddr_u *, endpt *, struct req_pkt *);
74static	void	peer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
75static	void	sys_info	(sockaddr_u *, endpt *, struct req_pkt *);
76static	void	sys_stats	(sockaddr_u *, endpt *, struct req_pkt *);
77static	void	mem_stats	(sockaddr_u *, endpt *, struct req_pkt *);
78static	void	io_stats	(sockaddr_u *, endpt *, struct req_pkt *);
79static	void	timer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
80static	void	loop_info	(sockaddr_u *, endpt *, struct req_pkt *);
81static	void	do_conf		(sockaddr_u *, endpt *, struct req_pkt *);
82static	void	do_unconf	(sockaddr_u *, endpt *, struct req_pkt *);
83static	void	set_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
84static	void	clr_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
85static	void	setclr_flags	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
86static	void	list_restrict4	(const restrict_u *, struct info_restrict **);
87static	void	list_restrict6	(const restrict_u *, struct info_restrict **);
88static	void	list_restrict	(sockaddr_u *, endpt *, struct req_pkt *);
89static	void	do_resaddflags	(sockaddr_u *, endpt *, struct req_pkt *);
90static	void	do_ressubflags	(sockaddr_u *, endpt *, struct req_pkt *);
91static	void	do_unrestrict	(sockaddr_u *, endpt *, struct req_pkt *);
92static	void	do_restrict	(sockaddr_u *, endpt *, struct req_pkt *, int);
93static	void	mon_getlist	(sockaddr_u *, endpt *, struct req_pkt *);
94static	void	reset_stats	(sockaddr_u *, endpt *, struct req_pkt *);
95static	void	reset_peer	(sockaddr_u *, endpt *, struct req_pkt *);
96static	void	do_key_reread	(sockaddr_u *, endpt *, struct req_pkt *);
97static	void	trust_key	(sockaddr_u *, endpt *, struct req_pkt *);
98static	void	untrust_key	(sockaddr_u *, endpt *, struct req_pkt *);
99static	void	do_trustkey	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
100static	void	get_auth_info	(sockaddr_u *, endpt *, struct req_pkt *);
101static	void	req_get_traps	(sockaddr_u *, endpt *, struct req_pkt *);
102static	void	req_set_trap	(sockaddr_u *, endpt *, struct req_pkt *);
103static	void	req_clr_trap	(sockaddr_u *, endpt *, struct req_pkt *);
104static	void	do_setclr_trap	(sockaddr_u *, endpt *, struct req_pkt *, int);
105static	void	set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
106static	void	set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
107static	void	get_ctl_stats   (sockaddr_u *, endpt *, struct req_pkt *);
108static	void	get_if_stats    (sockaddr_u *, endpt *, struct req_pkt *);
109static	void	do_if_reload    (sockaddr_u *, endpt *, struct req_pkt *);
110#ifdef KERNEL_PLL
111static	void	get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
112#endif /* KERNEL_PLL */
113#ifdef REFCLOCK
114static	void	get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
115static	void	set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
116#endif	/* REFCLOCK */
117#ifdef REFCLOCK
118static	void	get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
119#endif	/* REFCLOCK */
120
121/*
122 * ntpd request codes
123 */
124static const struct req_proc ntp_codes[] = {
125	{ REQ_PEER_LIST,	NOAUTH,	0, 0,	list_peers },
126	{ REQ_PEER_LIST_SUM,	NOAUTH,	0, 0,	list_peers_sum },
127	{ REQ_PEER_INFO,    NOAUTH, v4sizeof(struct info_peer_list),
128				sizeof(struct info_peer_list), peer_info},
129	{ REQ_PEER_STATS,   NOAUTH, v4sizeof(struct info_peer_list),
130				sizeof(struct info_peer_list), peer_stats},
131	{ REQ_SYS_INFO,		NOAUTH,	0, 0,	sys_info },
132	{ REQ_SYS_STATS,	NOAUTH,	0, 0,	sys_stats },
133	{ REQ_IO_STATS,		NOAUTH,	0, 0,	io_stats },
134	{ REQ_MEM_STATS,	NOAUTH,	0, 0,	mem_stats },
135	{ REQ_LOOP_INFO,	NOAUTH,	0, 0,	loop_info },
136	{ REQ_TIMER_STATS,	NOAUTH,	0, 0,	timer_stats },
137	{ REQ_CONFIG,	    AUTH, v4sizeof(struct conf_peer),
138				sizeof(struct conf_peer), do_conf },
139	{ REQ_UNCONFIG,	    AUTH, v4sizeof(struct conf_unpeer),
140				sizeof(struct conf_unpeer), do_unconf },
141	{ REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142				sizeof(struct conf_sys_flags), set_sys_flag },
143	{ REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
144				sizeof(struct conf_sys_flags),  clr_sys_flag },
145	{ REQ_GET_RESTRICT,	NOAUTH,	0, 0,	list_restrict },
146	{ REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
147				sizeof(struct conf_restrict), do_resaddflags },
148	{ REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
149				sizeof(struct conf_restrict), do_ressubflags },
150	{ REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
151				sizeof(struct conf_restrict), do_unrestrict },
152	{ REQ_MON_GETLIST,	NOAUTH,	0, 0,	mon_getlist },
153	{ REQ_MON_GETLIST_1,	NOAUTH,	0, 0,	mon_getlist },
154	{ REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
155	{ REQ_RESET_PEER,  AUTH, v4sizeof(struct conf_unpeer),
156				sizeof(struct conf_unpeer), reset_peer },
157	{ REQ_REREAD_KEYS,	AUTH,	0, 0,	do_key_reread },
158	{ REQ_TRUSTKEY,   AUTH, sizeof(u_long), sizeof(u_long), trust_key },
159	{ REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
160	{ REQ_AUTHINFO,		NOAUTH,	0, 0,	get_auth_info },
161	{ REQ_TRAPS,		NOAUTH, 0, 0,	req_get_traps },
162	{ REQ_ADD_TRAP,	AUTH, v4sizeof(struct conf_trap),
163				sizeof(struct conf_trap), req_set_trap },
164	{ REQ_CLR_TRAP,	AUTH, v4sizeof(struct conf_trap),
165				sizeof(struct conf_trap), req_clr_trap },
166	{ REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
167				set_request_keyid },
168	{ REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
169				set_control_keyid },
170	{ REQ_GET_CTLSTATS,	NOAUTH,	0, 0,	get_ctl_stats },
171#ifdef KERNEL_PLL
172	{ REQ_GET_KERNEL,	NOAUTH,	0, 0,	get_kernel_info },
173#endif
174#ifdef REFCLOCK
175	{ REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
176				get_clock_info },
177	{ REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
178				sizeof(struct conf_fudge), set_clock_fudge },
179	{ REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
180				get_clkbug_info },
181#endif
182	{ REQ_IF_STATS,		AUTH, 0, 0,	get_if_stats },
183	{ REQ_IF_RELOAD,	AUTH, 0, 0,	do_if_reload },
184
185	{ NO_REQUEST,		NOAUTH,	0, 0,	0 }
186};
187
188
189/*
190 * Authentication keyid used to authenticate requests.  Zero means we
191 * don't allow writing anything.
192 */
193keyid_t info_auth_keyid;
194
195/*
196 * Statistic counters to keep track of requests and responses.
197 */
198u_long numrequests;		/* number of requests we've received */
199u_long numresppkts;		/* number of resp packets sent with data */
200
201/*
202 * lazy way to count errors, indexed by the error code
203 */
204u_long errorcounter[MAX_INFO_ERR + 1];
205
206/*
207 * A hack.  To keep the authentication module clear of ntp-ism's, we
208 * include a time reset variable for its stats here.
209 */
210u_long auth_timereset;
211
212/*
213 * Response packet used by these routines.  Also some state information
214 * so that we can handle packet formatting within a common set of
215 * subroutines.  Note we try to enter data in place whenever possible,
216 * but the need to set the more bit correctly means we occasionally
217 * use the extra buffer and copy.
218 */
219static struct resp_pkt rpkt;
220static int reqver;
221static int seqno;
222static int nitems;
223static int itemsize;
224static int databytes;
225static char exbuf[RESP_DATA_SIZE];
226static int usingexbuf;
227static sockaddr_u *toaddr;
228static endpt *frominter;
229
230/*
231 * init_request - initialize request data
232 */
233void
234init_request (void)
235{
236	size_t i;
237
238	numrequests = 0;
239	numresppkts = 0;
240	auth_timereset = 0;
241	info_auth_keyid = 0;	/* by default, can't do this */
242
243	for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
244	    errorcounter[i] = 0;
245}
246
247
248/*
249 * req_ack - acknowledge request with no data
250 */
251static void
252req_ack(
253	sockaddr_u *srcadr,
254	endpt *inter,
255	struct req_pkt *inpkt,
256	int errcode
257	)
258{
259	/*
260	 * fill in the fields
261	 */
262	rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
263	rpkt.auth_seq = AUTH_SEQ(0, 0);
264	rpkt.implementation = inpkt->implementation;
265	rpkt.request = inpkt->request;
266	rpkt.err_nitems = ERR_NITEMS(errcode, 0);
267	rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
268
269	/*
270	 * send packet and bump counters
271	 */
272	sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
273	errorcounter[errcode]++;
274}
275
276
277/*
278 * prepare_pkt - prepare response packet for transmission, return pointer
279 *		 to storage for data item.
280 */
281static void *
282prepare_pkt(
283	sockaddr_u *srcadr,
284	endpt *inter,
285	struct req_pkt *pkt,
286	size_t structsize
287	)
288{
289	DPRINTF(4, ("request: preparing pkt\n"));
290
291	/*
292	 * Fill in the implementation, request and itemsize fields
293	 * since these won't change.
294	 */
295	rpkt.implementation = pkt->implementation;
296	rpkt.request = pkt->request;
297	rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
298
299	/*
300	 * Compute the static data needed to carry on.
301	 */
302	toaddr = srcadr;
303	frominter = inter;
304	seqno = 0;
305	nitems = 0;
306	itemsize = structsize;
307	databytes = 0;
308	usingexbuf = 0;
309
310	/*
311	 * return the beginning of the packet buffer.
312	 */
313	return &rpkt.u;
314}
315
316
317/*
318 * more_pkt - return a data pointer for a new item.
319 */
320static void *
321more_pkt(void)
322{
323	/*
324	 * If we were using the extra buffer, send the packet.
325	 */
326	if (usingexbuf) {
327		DPRINTF(3, ("request: sending pkt\n"));
328		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
329		rpkt.auth_seq = AUTH_SEQ(0, seqno);
330		rpkt.err_nitems = htons((u_short)nitems);
331		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
332			RESP_HEADER_SIZE + databytes);
333		numresppkts++;
334
335		/*
336		 * Copy data out of exbuf into the packet.
337		 */
338		memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
339		seqno++;
340		databytes = 0;
341		nitems = 0;
342		usingexbuf = 0;
343	}
344
345	databytes += itemsize;
346	nitems++;
347	if (databytes + itemsize <= RESP_DATA_SIZE) {
348		DPRINTF(4, ("request: giving him more data\n"));
349		/*
350		 * More room in packet.  Give him the
351		 * next address.
352		 */
353		return &rpkt.u.data[databytes];
354	} else {
355		/*
356		 * No room in packet.  Give him the extra
357		 * buffer unless this was the last in the sequence.
358		 */
359		DPRINTF(4, ("request: into extra buffer\n"));
360		if (seqno == MAXSEQ)
361			return NULL;
362		else {
363			usingexbuf = 1;
364			return exbuf;
365		}
366	}
367}
368
369
370/*
371 * flush_pkt - we're done, return remaining information.
372 */
373static void
374flush_pkt(void)
375{
376	DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
377	/*
378	 * Must send the last packet.  If nothing in here and nothing
379	 * has been sent, send an error saying no data to be found.
380	 */
381	if (seqno == 0 && nitems == 0)
382		req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
383			INFO_ERR_NODATA);
384	else {
385		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
386		rpkt.auth_seq = AUTH_SEQ(0, seqno);
387		rpkt.err_nitems = htons((u_short)nitems);
388		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
389			RESP_HEADER_SIZE+databytes);
390		numresppkts++;
391	}
392}
393
394
395
396/*
397 * Given a buffer, return the packet mode
398 */
399int
400get_packet_mode(struct recvbuf *rbufp)
401{
402	struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
403	return (INFO_MODE(inpkt->rm_vn_mode));
404}
405
406
407/*
408 * process_private - process private mode (7) packets
409 */
410void
411process_private(
412	struct recvbuf *rbufp,
413	int mod_okay
414	)
415{
416	static u_long quiet_until;
417	struct req_pkt *inpkt;
418	struct req_pkt_tail *tailinpkt;
419	sockaddr_u *srcadr;
420	endpt *inter;
421	const struct req_proc *proc;
422	int ec;
423	short temp_size;
424	l_fp ftmp;
425	double dtemp;
426	size_t recv_len;
427	size_t noslop_len;
428	size_t mac_len;
429
430	/*
431	 * Initialize pointers, for convenience
432	 */
433	recv_len = rbufp->recv_length;
434	inpkt = (struct req_pkt *)&rbufp->recv_pkt;
435	srcadr = &rbufp->recv_srcadr;
436	inter = rbufp->dstadr;
437
438	DPRINTF(3, ("process_private: impl %d req %d\n",
439		    inpkt->implementation, inpkt->request));
440
441	/*
442	 * Do some sanity checks on the packet.  Return a format
443	 * error if it fails.
444	 */
445	ec = 0;
446	if (   (++ec, ISRESPONSE(inpkt->rm_vn_mode))
447	    || (++ec, ISMORE(inpkt->rm_vn_mode))
448	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
449	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
450	    || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
451	    || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
452	    || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
453	    || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
454		) {
455		NLOG(NLOG_SYSEVENT)
456			if (current_time >= quiet_until) {
457				msyslog(LOG_ERR,
458					"process_private: drop test %d"
459					" failed, pkt from %s",
460					ec, stoa(srcadr));
461				quiet_until = current_time + 60;
462			}
463		return;
464	}
465
466	reqver = INFO_VERSION(inpkt->rm_vn_mode);
467
468	/*
469	 * Get the appropriate procedure list to search.
470	 */
471	if (inpkt->implementation == IMPL_UNIV)
472		proc = univ_codes;
473	else if ((inpkt->implementation == IMPL_XNTPD) ||
474		 (inpkt->implementation == IMPL_XNTPD_OLD))
475		proc = ntp_codes;
476	else {
477		req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
478		return;
479	}
480
481	/*
482	 * Search the list for the request codes.  If it isn't one
483	 * we know, return an error.
484	 */
485	while (proc->request_code != NO_REQUEST) {
486		if (proc->request_code == (short) inpkt->request)
487			break;
488		proc++;
489	}
490	if (proc->request_code == NO_REQUEST) {
491		req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
492		return;
493	}
494
495	DPRINTF(4, ("found request in tables\n"));
496
497	/*
498	 * If we need data, check to see if we have some.  If we
499	 * don't, check to see that there is none (picky, picky).
500	 */
501
502	/* This part is a bit tricky, we want to be sure that the size
503	 * returned is either the old or the new size.  We also can find
504	 * out if the client can accept both types of messages this way.
505	 *
506	 * Handle the exception of REQ_CONFIG. It can have two data sizes.
507	 */
508	temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
509	if ((temp_size != proc->sizeofitem &&
510	     temp_size != proc->v6_sizeofitem) &&
511	    !(inpkt->implementation == IMPL_XNTPD &&
512	      inpkt->request == REQ_CONFIG &&
513	      temp_size == sizeof(struct old_conf_peer))) {
514		DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
515			    temp_size, proc->sizeofitem, proc->v6_sizeofitem));
516		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
517		return;
518	}
519	if ((proc->sizeofitem != 0) &&
520	    ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
521	     (recv_len - REQ_LEN_HDR))) {
522		DPRINTF(3, ("process_private: not enough data\n"));
523		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
524		return;
525	}
526
527	switch (inpkt->implementation) {
528	case IMPL_XNTPD:
529		client_v6_capable = 1;
530		break;
531	case IMPL_XNTPD_OLD:
532		client_v6_capable = 0;
533		break;
534	default:
535		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
536		return;
537	}
538
539	/*
540	 * If we need to authenticate, do so.  Note that an
541	 * authenticatable packet must include a mac field, must
542	 * have used key info_auth_keyid and must have included
543	 * a time stamp in the appropriate field.  The time stamp
544	 * must be within INFO_TS_MAXSKEW of the receive
545	 * time stamp.
546	 */
547	if (proc->needs_auth && sys_authenticate) {
548
549		if (recv_len < (REQ_LEN_HDR +
550		    (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
551		    INFO_NITEMS(inpkt->err_nitems)) +
552		    REQ_TAIL_MIN)) {
553			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
554			return;
555		}
556
557		/*
558		 * For 16-octet digests, regardless of itemsize and
559		 * nitems, authenticated requests are a fixed size
560		 * with the timestamp, key ID, and digest located
561		 * at the end of the packet.  Because the key ID
562		 * determining the digest size precedes the digest,
563		 * for larger digests the fixed size request scheme
564		 * is abandoned and the timestamp, key ID, and digest
565		 * are located relative to the start of the packet,
566		 * with the digest size determined by the packet size.
567		 */
568		noslop_len = REQ_LEN_HDR
569			     + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
570			       INFO_NITEMS(inpkt->err_nitems)
571			     + sizeof(inpkt->tstamp);
572		/* 32-bit alignment */
573		noslop_len = (noslop_len + 3) & ~3;
574		if (recv_len > (noslop_len + MAX_MAC_LEN))
575			mac_len = 20;
576		else
577			mac_len = recv_len - noslop_len;
578
579		tailinpkt = (void *)((char *)inpkt + recv_len -
580			    (mac_len + sizeof(inpkt->tstamp)));
581
582		/*
583		 * If this guy is restricted from doing this, don't let
584		 * him.  If the wrong key was used, or packet doesn't
585		 * have mac, return.
586		 */
587		if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
588		    || ntohl(tailinpkt->keyid) != info_auth_keyid) {
589			DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
590				    INFO_IS_AUTH(inpkt->auth_seq),
591				    info_auth_keyid,
592				    ntohl(tailinpkt->keyid), (u_long)mac_len));
593#ifdef DEBUG
594			msyslog(LOG_DEBUG,
595				"process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
596				INFO_IS_AUTH(inpkt->auth_seq),
597				info_auth_keyid,
598				ntohl(tailinpkt->keyid), (u_long)mac_len);
599#endif
600			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
601			return;
602		}
603		if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
604			DPRINTF(5, ("bad pkt length %zu\n", recv_len));
605			msyslog(LOG_ERR,
606				"process_private: bad pkt length %zu",
607				recv_len);
608			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
609			return;
610		}
611		if (!mod_okay || !authhavekey(info_auth_keyid)) {
612			DPRINTF(5, ("failed auth mod_okay %d\n",
613				    mod_okay));
614#ifdef DEBUG
615			msyslog(LOG_DEBUG,
616				"process_private: failed auth mod_okay %d\n",
617				mod_okay);
618#endif
619			if (!mod_okay) {
620				sys_restricted++;
621			}
622			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
623			return;
624		}
625
626		/*
627		 * calculate absolute time difference between xmit time stamp
628		 * and receive time stamp.  If too large, too bad.
629		 */
630		NTOHL_FP(&tailinpkt->tstamp, &ftmp);
631		L_SUB(&ftmp, &rbufp->recv_time);
632		LFPTOD(&ftmp, dtemp);
633		if (fabs(dtemp) > INFO_TS_MAXSKEW) {
634			/*
635			 * He's a loser.  Tell him.
636			 */
637			DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
638				    dtemp, INFO_TS_MAXSKEW));
639			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
640			return;
641		}
642
643		/*
644		 * So far so good.  See if decryption works out okay.
645		 */
646		if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
647				 recv_len - mac_len, mac_len)) {
648			DPRINTF(5, ("authdecrypt failed\n"));
649			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
650			return;
651		}
652	}
653
654	DPRINTF(3, ("process_private: all okay, into handler\n"));
655	/*
656	 * Packet is okay.  Call the handler to send him data.
657	 */
658	(proc->handler)(srcadr, inter, inpkt);
659}
660
661
662/*
663 * list_peers - send a list of the peers
664 */
665static void
666list_peers(
667	sockaddr_u *srcadr,
668	endpt *inter,
669	struct req_pkt *inpkt
670	)
671{
672	struct info_peer_list *	ip;
673	const struct peer *	pp;
674
675	ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
676	    v6sizeof(struct info_peer_list));
677	for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
678		if (IS_IPV6(&pp->srcadr)) {
679			if (!client_v6_capable)
680				continue;
681			ip->addr6 = SOCK_ADDR6(&pp->srcadr);
682			ip->v6_flag = 1;
683		} else {
684			ip->addr = NSRCADR(&pp->srcadr);
685			if (client_v6_capable)
686				ip->v6_flag = 0;
687		}
688
689		ip->port = NSRCPORT(&pp->srcadr);
690		ip->hmode = pp->hmode;
691		ip->flags = 0;
692		if (pp->flags & FLAG_CONFIG)
693			ip->flags |= INFO_FLAG_CONFIG;
694		if (pp == sys_peer)
695			ip->flags |= INFO_FLAG_SYSPEER;
696		if (pp->status == CTL_PST_SEL_SYNCCAND)
697			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
698		if (pp->status >= CTL_PST_SEL_SYSPEER)
699			ip->flags |= INFO_FLAG_SHORTLIST;
700		ip = (struct info_peer_list *)more_pkt();
701	}	/* for pp */
702
703	flush_pkt();
704}
705
706
707/*
708 * list_peers_sum - return extended peer list
709 */
710static void
711list_peers_sum(
712	sockaddr_u *srcadr,
713	endpt *inter,
714	struct req_pkt *inpkt
715	)
716{
717	struct info_peer_summary *	ips;
718	const struct peer *		pp;
719	l_fp 				ltmp;
720
721	DPRINTF(3, ("wants peer list summary\n"));
722
723	ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
724	    v6sizeof(struct info_peer_summary));
725	for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
726		DPRINTF(4, ("sum: got one\n"));
727		/*
728		 * Be careful here not to return v6 peers when we
729		 * want only v4.
730		 */
731		if (IS_IPV6(&pp->srcadr)) {
732			if (!client_v6_capable)
733				continue;
734			ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
735			ips->v6_flag = 1;
736			if (pp->dstadr)
737				ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
738			else
739				ZERO(ips->dstadr6);
740		} else {
741			ips->srcadr = NSRCADR(&pp->srcadr);
742			if (client_v6_capable)
743				ips->v6_flag = 0;
744
745			if (pp->dstadr) {
746				if (!pp->processed)
747					ips->dstadr = NSRCADR(&pp->dstadr->sin);
748				else {
749					if (MDF_BCAST == pp->cast_flags)
750						ips->dstadr = NSRCADR(&pp->dstadr->bcast);
751					else if (pp->cast_flags) {
752						ips->dstadr = NSRCADR(&pp->dstadr->sin);
753						if (!ips->dstadr)
754							ips->dstadr = NSRCADR(&pp->dstadr->bcast);
755					}
756				}
757			} else {
758				ips->dstadr = 0;
759			}
760		}
761
762		ips->srcport = NSRCPORT(&pp->srcadr);
763		ips->stratum = pp->stratum;
764		ips->hpoll = pp->hpoll;
765		ips->ppoll = pp->ppoll;
766		ips->reach = pp->reach;
767		ips->flags = 0;
768		if (pp == sys_peer)
769			ips->flags |= INFO_FLAG_SYSPEER;
770		if (pp->flags & FLAG_CONFIG)
771			ips->flags |= INFO_FLAG_CONFIG;
772		if (pp->flags & FLAG_REFCLOCK)
773			ips->flags |= INFO_FLAG_REFCLOCK;
774		if (pp->flags & FLAG_PREFER)
775			ips->flags |= INFO_FLAG_PREFER;
776		if (pp->flags & FLAG_BURST)
777			ips->flags |= INFO_FLAG_BURST;
778		if (pp->status == CTL_PST_SEL_SYNCCAND)
779			ips->flags |= INFO_FLAG_SEL_CANDIDATE;
780		if (pp->status >= CTL_PST_SEL_SYSPEER)
781			ips->flags |= INFO_FLAG_SHORTLIST;
782		ips->hmode = pp->hmode;
783		ips->delay = HTONS_FP(DTOFP(pp->delay));
784		DTOLFP(pp->offset, &ltmp);
785		HTONL_FP(&ltmp, &ips->offset);
786		ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
787
788		ips = (struct info_peer_summary *)more_pkt();
789	}	/* for pp */
790
791	flush_pkt();
792}
793
794
795/*
796 * peer_info - send information for one or more peers
797 */
798static void
799peer_info (
800	sockaddr_u *srcadr,
801	endpt *inter,
802	struct req_pkt *inpkt
803	)
804{
805	u_short			items;
806	size_t			item_sz;
807	char *			datap;
808	struct info_peer_list	ipl;
809	struct peer *		pp;
810	struct info_peer *	ip;
811	int			i;
812	int			j;
813	sockaddr_u		addr;
814	l_fp			ltmp;
815
816	items = INFO_NITEMS(inpkt->err_nitems);
817	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
818	datap = inpkt->u.data;
819	if (item_sz != sizeof(ipl)) {
820		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
821		return;
822	}
823	ip = prepare_pkt(srcadr, inter, inpkt,
824			 v6sizeof(struct info_peer));
825	while (items-- > 0 && ip != NULL) {
826		ZERO(ipl);
827		memcpy(&ipl, datap, item_sz);
828		ZERO_SOCK(&addr);
829		NSRCPORT(&addr) = ipl.port;
830		if (client_v6_capable && ipl.v6_flag) {
831			AF(&addr) = AF_INET6;
832			SOCK_ADDR6(&addr) = ipl.addr6;
833		} else {
834			AF(&addr) = AF_INET;
835			NSRCADR(&addr) = ipl.addr;
836		}
837#ifdef ISC_PLATFORM_HAVESALEN
838		addr.sa.sa_len = SOCKLEN(&addr);
839#endif
840		datap += item_sz;
841
842		pp = findexistingpeer(&addr, NULL, NULL, -1, 0);
843		if (NULL == pp)
844			continue;
845		if (IS_IPV6(srcadr)) {
846			if (pp->dstadr)
847				ip->dstadr6 =
848				    (MDF_BCAST == pp->cast_flags)
849					? SOCK_ADDR6(&pp->dstadr->bcast)
850					: SOCK_ADDR6(&pp->dstadr->sin);
851			else
852				ZERO(ip->dstadr6);
853
854			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
855			ip->v6_flag = 1;
856		} else {
857			if (pp->dstadr) {
858				if (!pp->processed)
859					ip->dstadr = NSRCADR(&pp->dstadr->sin);
860				else {
861					if (MDF_BCAST == pp->cast_flags)
862						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
863					else if (pp->cast_flags) {
864						ip->dstadr = NSRCADR(&pp->dstadr->sin);
865						if (!ip->dstadr)
866							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
867					}
868				}
869			} else
870				ip->dstadr = 0;
871
872			ip->srcadr = NSRCADR(&pp->srcadr);
873			if (client_v6_capable)
874				ip->v6_flag = 0;
875		}
876		ip->srcport = NSRCPORT(&pp->srcadr);
877		ip->flags = 0;
878		if (pp == sys_peer)
879			ip->flags |= INFO_FLAG_SYSPEER;
880		if (pp->flags & FLAG_CONFIG)
881			ip->flags |= INFO_FLAG_CONFIG;
882		if (pp->flags & FLAG_REFCLOCK)
883			ip->flags |= INFO_FLAG_REFCLOCK;
884		if (pp->flags & FLAG_PREFER)
885			ip->flags |= INFO_FLAG_PREFER;
886		if (pp->flags & FLAG_BURST)
887			ip->flags |= INFO_FLAG_BURST;
888		if (pp->status == CTL_PST_SEL_SYNCCAND)
889			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
890		if (pp->status >= CTL_PST_SEL_SYSPEER)
891			ip->flags |= INFO_FLAG_SHORTLIST;
892		ip->leap = pp->leap;
893		ip->hmode = pp->hmode;
894		ip->keyid = pp->keyid;
895		ip->stratum = pp->stratum;
896		ip->ppoll = pp->ppoll;
897		ip->hpoll = pp->hpoll;
898		ip->precision = pp->precision;
899		ip->version = pp->version;
900		ip->reach = pp->reach;
901		ip->unreach = (u_char)pp->unreach;
902		ip->flash = (u_char)pp->flash;
903		ip->flash2 = (u_short)pp->flash;
904		ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
905		ip->ttl = (u_char)pp->ttl;
906		ip->associd = htons(pp->associd);
907		ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
908		ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
909		ip->refid = pp->refid;
910		HTONL_FP(&pp->reftime, &ip->reftime);
911		HTONL_FP(&pp->aorg, &ip->org);
912		HTONL_FP(&pp->rec, &ip->rec);
913		HTONL_FP(&pp->xmt, &ip->xmt);
914		j = pp->filter_nextpt - 1;
915		for (i = 0; i < NTP_SHIFT; i++, j--) {
916			if (j < 0)
917				j = NTP_SHIFT-1;
918			ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
919			DTOLFP(pp->filter_offset[j], &ltmp);
920			HTONL_FP(&ltmp, &ip->filtoffset[i]);
921			ip->order[i] = (u_char)((pp->filter_nextpt +
922						 NTP_SHIFT - 1) -
923						pp->filter_order[i]);
924			if (ip->order[i] >= NTP_SHIFT)
925				ip->order[i] -= NTP_SHIFT;
926		}
927		DTOLFP(pp->offset, &ltmp);
928		HTONL_FP(&ltmp, &ip->offset);
929		ip->delay = HTONS_FP(DTOFP(pp->delay));
930		ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
931		ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
932		ip = more_pkt();
933	}
934	flush_pkt();
935}
936
937
938/*
939 * peer_stats - send statistics for one or more peers
940 */
941static void
942peer_stats (
943	sockaddr_u *srcadr,
944	endpt *inter,
945	struct req_pkt *inpkt
946	)
947{
948	u_short			items;
949	size_t			item_sz;
950	char *			datap;
951	struct info_peer_list	ipl;
952	struct peer *		pp;
953	struct info_peer_stats *ip;
954	sockaddr_u addr;
955
956	DPRINTF(1, ("peer_stats: called\n"));
957	items = INFO_NITEMS(inpkt->err_nitems);
958	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
959	datap = inpkt->u.data;
960	if (item_sz > sizeof(ipl)) {
961		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
962		return;
963	}
964	ip = prepare_pkt(srcadr, inter, inpkt,
965			 v6sizeof(struct info_peer_stats));
966	while (items-- > 0 && ip != NULL) {
967		ZERO(ipl);
968		memcpy(&ipl, datap, item_sz);
969		ZERO(addr);
970		NSRCPORT(&addr) = ipl.port;
971		if (client_v6_capable && ipl.v6_flag) {
972			AF(&addr) = AF_INET6;
973			SOCK_ADDR6(&addr) = ipl.addr6;
974		} else {
975			AF(&addr) = AF_INET;
976			NSRCADR(&addr) = ipl.addr;
977		}
978#ifdef ISC_PLATFORM_HAVESALEN
979		addr.sa.sa_len = SOCKLEN(&addr);
980#endif
981		DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
982			    stoa(&addr), ipl.port, NSRCPORT(&addr)));
983
984		datap += item_sz;
985
986		pp = findexistingpeer(&addr, NULL, NULL, -1, 0);
987		if (NULL == pp)
988			continue;
989
990		DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
991
992		if (IS_IPV4(&pp->srcadr)) {
993			if (pp->dstadr) {
994				if (!pp->processed)
995					ip->dstadr = NSRCADR(&pp->dstadr->sin);
996				else {
997					if (MDF_BCAST == pp->cast_flags)
998						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
999					else if (pp->cast_flags) {
1000						ip->dstadr = NSRCADR(&pp->dstadr->sin);
1001						if (!ip->dstadr)
1002							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1003					}
1004				}
1005			} else
1006				ip->dstadr = 0;
1007
1008			ip->srcadr = NSRCADR(&pp->srcadr);
1009			if (client_v6_capable)
1010				ip->v6_flag = 0;
1011		} else {
1012			if (pp->dstadr)
1013				ip->dstadr6 =
1014				    (MDF_BCAST == pp->cast_flags)
1015					? SOCK_ADDR6(&pp->dstadr->bcast)
1016					: SOCK_ADDR6(&pp->dstadr->sin);
1017			else
1018				ZERO(ip->dstadr6);
1019
1020			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1021			ip->v6_flag = 1;
1022		}
1023		ip->srcport = NSRCPORT(&pp->srcadr);
1024		ip->flags = 0;
1025		if (pp == sys_peer)
1026		    ip->flags |= INFO_FLAG_SYSPEER;
1027		if (pp->flags & FLAG_CONFIG)
1028		    ip->flags |= INFO_FLAG_CONFIG;
1029		if (pp->flags & FLAG_REFCLOCK)
1030		    ip->flags |= INFO_FLAG_REFCLOCK;
1031		if (pp->flags & FLAG_PREFER)
1032		    ip->flags |= INFO_FLAG_PREFER;
1033		if (pp->flags & FLAG_BURST)
1034		    ip->flags |= INFO_FLAG_BURST;
1035		if (pp->flags & FLAG_IBURST)
1036		    ip->flags |= INFO_FLAG_IBURST;
1037		if (pp->status == CTL_PST_SEL_SYNCCAND)
1038		    ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1039		if (pp->status >= CTL_PST_SEL_SYSPEER)
1040		    ip->flags |= INFO_FLAG_SHORTLIST;
1041		ip->flags = htons(ip->flags);
1042		ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1043		ip->timetosend = htonl(pp->nextdate - current_time);
1044		ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1045		ip->sent = htonl((u_int32)(pp->sent));
1046		ip->processed = htonl((u_int32)(pp->processed));
1047		ip->badauth = htonl((u_int32)(pp->badauth));
1048		ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1049		ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1050		ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1051		ip->selbroken = htonl((u_int32)(pp->selbroken));
1052		ip->candidate = pp->status;
1053		ip = (struct info_peer_stats *)more_pkt();
1054	}
1055	flush_pkt();
1056}
1057
1058
1059/*
1060 * sys_info - return system info
1061 */
1062static void
1063sys_info(
1064	sockaddr_u *srcadr,
1065	endpt *inter,
1066	struct req_pkt *inpkt
1067	)
1068{
1069	register struct info_sys *is;
1070
1071	is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1072	    v6sizeof(struct info_sys));
1073
1074	if (sys_peer) {
1075		if (IS_IPV4(&sys_peer->srcadr)) {
1076			is->peer = NSRCADR(&sys_peer->srcadr);
1077			if (client_v6_capable)
1078				is->v6_flag = 0;
1079		} else if (client_v6_capable) {
1080			is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1081			is->v6_flag = 1;
1082		}
1083		is->peer_mode = sys_peer->hmode;
1084	} else {
1085		is->peer = 0;
1086		if (client_v6_capable) {
1087			is->v6_flag = 0;
1088		}
1089		is->peer_mode = 0;
1090	}
1091
1092	is->leap = sys_leap;
1093	is->stratum = sys_stratum;
1094	is->precision = sys_precision;
1095	is->rootdelay = htonl(DTOFP(sys_rootdelay));
1096	is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1097	is->frequency = htonl(DTOFP(sys_jitter));
1098	is->stability = htonl(DTOUFP(clock_stability * 1e6));
1099	is->refid = sys_refid;
1100	HTONL_FP(&sys_reftime, &is->reftime);
1101
1102	is->poll = sys_poll;
1103
1104	is->flags = 0;
1105	if (sys_authenticate)
1106		is->flags |= INFO_FLAG_AUTHENTICATE;
1107	if (sys_bclient)
1108		is->flags |= INFO_FLAG_BCLIENT;
1109#ifdef REFCLOCK
1110	if (cal_enable)
1111		is->flags |= INFO_FLAG_CAL;
1112#endif /* REFCLOCK */
1113	if (kern_enable)
1114		is->flags |= INFO_FLAG_KERNEL;
1115	if (mon_enabled != MON_OFF)
1116		is->flags |= INFO_FLAG_MONITOR;
1117	if (ntp_enable)
1118		is->flags |= INFO_FLAG_NTP;
1119	if (hardpps_enable)
1120		is->flags |= INFO_FLAG_PPS_SYNC;
1121	if (stats_control)
1122		is->flags |= INFO_FLAG_FILEGEN;
1123	is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1124	HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1125	(void) more_pkt();
1126	flush_pkt();
1127}
1128
1129
1130/*
1131 * sys_stats - return system statistics
1132 */
1133static void
1134sys_stats(
1135	sockaddr_u *srcadr,
1136	endpt *inter,
1137	struct req_pkt *inpkt
1138	)
1139{
1140	register struct info_sys_stats *ss;
1141
1142	ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1143		sizeof(struct info_sys_stats));
1144	ss->timeup = htonl((u_int32)current_time);
1145	ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1146	ss->denied = htonl((u_int32)sys_restricted);
1147	ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1148	ss->newversionpkt = htonl((u_int32)sys_newversion);
1149	ss->unknownversion = htonl((u_int32)sys_declined);
1150	ss->badlength = htonl((u_int32)sys_badlength);
1151	ss->processed = htonl((u_int32)sys_processed);
1152	ss->badauth = htonl((u_int32)sys_badauth);
1153	ss->limitrejected = htonl((u_int32)sys_limitrejected);
1154	ss->received = htonl((u_int32)sys_received);
1155	(void) more_pkt();
1156	flush_pkt();
1157}
1158
1159
1160/*
1161 * mem_stats - return memory statistics
1162 */
1163static void
1164mem_stats(
1165	sockaddr_u *srcadr,
1166	endpt *inter,
1167	struct req_pkt *inpkt
1168	)
1169{
1170	register struct info_mem_stats *ms;
1171	register int i;
1172
1173	ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1174						  sizeof(struct info_mem_stats));
1175
1176	ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1177	ms->totalpeermem = htons((u_short)total_peer_structs);
1178	ms->freepeermem = htons((u_short)peer_free_count);
1179	ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1180	ms->allocations = htonl((u_int32)peer_allocations);
1181	ms->demobilizations = htonl((u_int32)peer_demobilizations);
1182
1183	for (i = 0; i < NTP_HASH_SIZE; i++)
1184		ms->hashcount[i] = (u_char)
1185		    max((u_int)peer_hash_count[i], UCHAR_MAX);
1186
1187	(void) more_pkt();
1188	flush_pkt();
1189}
1190
1191
1192/*
1193 * io_stats - return io statistics
1194 */
1195static void
1196io_stats(
1197	sockaddr_u *srcadr,
1198	endpt *inter,
1199	struct req_pkt *inpkt
1200	)
1201{
1202	struct info_io_stats *io;
1203
1204	io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1205						 sizeof(struct info_io_stats));
1206
1207	io->timereset = htonl((u_int32)(current_time - io_timereset));
1208	io->totalrecvbufs = htons((u_short) total_recvbuffs());
1209	io->freerecvbufs = htons((u_short) free_recvbuffs());
1210	io->fullrecvbufs = htons((u_short) full_recvbuffs());
1211	io->lowwater = htons((u_short) lowater_additions());
1212	io->dropped = htonl((u_int32)packets_dropped);
1213	io->ignored = htonl((u_int32)packets_ignored);
1214	io->received = htonl((u_int32)packets_received);
1215	io->sent = htonl((u_int32)packets_sent);
1216	io->notsent = htonl((u_int32)packets_notsent);
1217	io->interrupts = htonl((u_int32)handler_calls);
1218	io->int_received = htonl((u_int32)handler_pkts);
1219
1220	(void) more_pkt();
1221	flush_pkt();
1222}
1223
1224
1225/*
1226 * timer_stats - return timer statistics
1227 */
1228static void
1229timer_stats(
1230	sockaddr_u *		srcadr,
1231	endpt *			inter,
1232	struct req_pkt *	inpkt
1233	)
1234{
1235	struct info_timer_stats *	ts;
1236	u_long				sincereset;
1237
1238	ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1239						    inpkt, sizeof(*ts));
1240
1241	sincereset = current_time - timer_timereset;
1242	ts->timereset = htonl((u_int32)sincereset);
1243	ts->alarms = ts->timereset;
1244	ts->overflows = htonl((u_int32)alarm_overflow);
1245	ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1246
1247	(void) more_pkt();
1248	flush_pkt();
1249}
1250
1251
1252/*
1253 * loop_info - return the current state of the loop filter
1254 */
1255static void
1256loop_info(
1257	sockaddr_u *srcadr,
1258	endpt *inter,
1259	struct req_pkt *inpkt
1260	)
1261{
1262	struct info_loop *li;
1263	l_fp ltmp;
1264
1265	li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1266	    sizeof(struct info_loop));
1267
1268	DTOLFP(last_offset, &ltmp);
1269	HTONL_FP(&ltmp, &li->last_offset);
1270	DTOLFP(drift_comp * 1e6, &ltmp);
1271	HTONL_FP(&ltmp, &li->drift_comp);
1272	li->compliance = htonl((u_int32)(tc_counter));
1273	li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1274
1275	(void) more_pkt();
1276	flush_pkt();
1277}
1278
1279
1280/*
1281 * do_conf - add a peer to the configuration list
1282 */
1283static void
1284do_conf(
1285	sockaddr_u *srcadr,
1286	endpt *inter,
1287	struct req_pkt *inpkt
1288	)
1289{
1290	u_short			items;
1291	size_t			item_sz;
1292	u_int			fl;
1293	char *			datap;
1294	struct conf_peer	temp_cp;
1295	sockaddr_u		peeraddr;
1296
1297	/*
1298	 * Do a check of everything to see that it looks
1299	 * okay.  If not, complain about it.  Note we are
1300	 * very picky here.
1301	 */
1302	items = INFO_NITEMS(inpkt->err_nitems);
1303	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1304	datap = inpkt->u.data;
1305	if (item_sz > sizeof(temp_cp)) {
1306		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1307		return;
1308	}
1309
1310	while (items-- > 0) {
1311		ZERO(temp_cp);
1312		memcpy(&temp_cp, datap, item_sz);
1313		ZERO_SOCK(&peeraddr);
1314
1315		fl = 0;
1316		if (temp_cp.flags & CONF_FLAG_PREFER)
1317			fl |= FLAG_PREFER;
1318		if (temp_cp.flags & CONF_FLAG_BURST)
1319			fl |= FLAG_BURST;
1320		if (temp_cp.flags & CONF_FLAG_IBURST)
1321			fl |= FLAG_IBURST;
1322#ifdef AUTOKEY
1323		if (temp_cp.flags & CONF_FLAG_SKEY)
1324			fl |= FLAG_SKEY;
1325#endif	/* AUTOKEY */
1326		if (client_v6_capable && temp_cp.v6_flag) {
1327			AF(&peeraddr) = AF_INET6;
1328			SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1329		} else {
1330			AF(&peeraddr) = AF_INET;
1331			NSRCADR(&peeraddr) = temp_cp.peeraddr;
1332			/*
1333			 * Make sure the address is valid
1334			 */
1335			if (!ISREFCLOCKADR(&peeraddr) &&
1336			    ISBADADR(&peeraddr)) {
1337				req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1338				return;
1339			}
1340
1341		}
1342		NSRCPORT(&peeraddr) = htons(NTP_PORT);
1343#ifdef ISC_PLATFORM_HAVESALEN
1344		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1345#endif
1346
1347		/* check mode value: 0 <= hmode <= 6
1348		 *
1349		 * There's no good global define for that limit, and
1350		 * using a magic define is as good (or bad, actually) as
1351		 * a magic number. So we use the highest possible peer
1352		 * mode, and that is MODE_BCLIENT.
1353		 *
1354		 * [Bug 3009] claims that a problem occurs for hmode > 7,
1355		 * but the code in ntp_peer.c indicates trouble for any
1356		 * hmode > 6 ( --> MODE_BCLIENT).
1357		 */
1358		if (temp_cp.hmode > MODE_BCLIENT) {
1359			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1360			return;
1361		}
1362
1363		/* Any more checks on the values? Unchecked at this
1364		 * point:
1365		 *   - version
1366		 *   - ttl
1367		 *   - keyid
1368		 *
1369		 *   - minpoll/maxpoll, but they are treated properly
1370		 *     for all cases internally. Checking not necessary.
1371		 */
1372
1373		/* finally create the peer */
1374		if (peer_config(&peeraddr, NULL, NULL,
1375		    temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1376		    temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1377		    NULL) == 0)
1378		{
1379			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1380			return;
1381		}
1382
1383		datap += item_sz;
1384	}
1385	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1386}
1387
1388
1389/*
1390 * do_unconf - remove a peer from the configuration list
1391 */
1392static void
1393do_unconf(
1394	sockaddr_u *	srcadr,
1395	endpt *		inter,
1396	struct req_pkt *inpkt
1397	)
1398{
1399	u_short			items;
1400	size_t			item_sz;
1401	char *			datap;
1402	struct conf_unpeer	temp_cp;
1403	struct peer *		p;
1404	sockaddr_u		peeraddr;
1405	int			loops;
1406
1407	/*
1408	 * This is a bit unstructured, but I like to be careful.
1409	 * We check to see that every peer exists and is actually
1410	 * configured.  If so, we remove them.  If not, we return
1411	 * an error.
1412	 *
1413	 * [Bug 3011] Even if we checked all peers given in the request
1414	 * in a dry run, there's still a chance that the caller played
1415	 * unfair and gave the same peer multiple times. So we still
1416	 * have to be prepared for nasty surprises in the second run ;)
1417	 */
1418
1419	/* basic consistency checks */
1420	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1421	if (item_sz > sizeof(temp_cp)) {
1422		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1423		return;
1424	}
1425
1426	/* now do two runs: first a dry run, then a busy one */
1427	for (loops = 0; loops != 2; ++loops) {
1428		items = INFO_NITEMS(inpkt->err_nitems);
1429		datap = inpkt->u.data;
1430		while (items-- > 0) {
1431			/* copy from request to local */
1432			ZERO(temp_cp);
1433			memcpy(&temp_cp, datap, item_sz);
1434			/* get address structure */
1435			ZERO_SOCK(&peeraddr);
1436			if (client_v6_capable && temp_cp.v6_flag) {
1437				AF(&peeraddr) = AF_INET6;
1438				SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1439			} else {
1440				AF(&peeraddr) = AF_INET;
1441				NSRCADR(&peeraddr) = temp_cp.peeraddr;
1442			}
1443			SET_PORT(&peeraddr, NTP_PORT);
1444#ifdef ISC_PLATFORM_HAVESALEN
1445			peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1446#endif
1447			DPRINTF(1, ("searching for %s\n",
1448				    stoa(&peeraddr)));
1449
1450			/* search for matching configred(!) peer */
1451			p = NULL;
1452			do {
1453				p = findexistingpeer(
1454					&peeraddr, NULL, p, -1, 0);
1455			} while (p && !(FLAG_CONFIG & p->flags));
1456
1457			if (!loops && !p) {
1458				/* Item not found in dry run -- bail! */
1459				req_ack(srcadr, inter, inpkt,
1460					INFO_ERR_NODATA);
1461				return;
1462			} else if (loops && p) {
1463				/* Item found in busy run -- remove! */
1464				peer_clear(p, "GONE");
1465				unpeer(p);
1466			}
1467			datap += item_sz;
1468		}
1469	}
1470
1471	/* report success */
1472	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1473}
1474
1475
1476/*
1477 * set_sys_flag - set system flags
1478 */
1479static void
1480set_sys_flag(
1481	sockaddr_u *srcadr,
1482	endpt *inter,
1483	struct req_pkt *inpkt
1484	)
1485{
1486	setclr_flags(srcadr, inter, inpkt, 1);
1487}
1488
1489
1490/*
1491 * clr_sys_flag - clear system flags
1492 */
1493static void
1494clr_sys_flag(
1495	sockaddr_u *srcadr,
1496	endpt *inter,
1497	struct req_pkt *inpkt
1498	)
1499{
1500	setclr_flags(srcadr, inter, inpkt, 0);
1501}
1502
1503
1504/*
1505 * setclr_flags - do the grunge work of flag setting/clearing
1506 */
1507static void
1508setclr_flags(
1509	sockaddr_u *srcadr,
1510	endpt *inter,
1511	struct req_pkt *inpkt,
1512	u_long set
1513	)
1514{
1515	struct conf_sys_flags *sf;
1516	u_int32 flags;
1517
1518	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1519		msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1520		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1521		return;
1522	}
1523
1524	sf = (struct conf_sys_flags *)&inpkt->u;
1525	flags = ntohl(sf->flags);
1526
1527	if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1528		      SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1529		      SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1530		msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1531			flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1532				  SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1533				  SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1534				  SYS_FLAG_AUTH | SYS_FLAG_CAL));
1535		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1536		return;
1537	}
1538
1539	if (flags & SYS_FLAG_BCLIENT)
1540		proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1541	if (flags & SYS_FLAG_PPS)
1542		proto_config(PROTO_PPS, set, 0., NULL);
1543	if (flags & SYS_FLAG_NTP)
1544		proto_config(PROTO_NTP, set, 0., NULL);
1545	if (flags & SYS_FLAG_KERNEL)
1546		proto_config(PROTO_KERNEL, set, 0., NULL);
1547	if (flags & SYS_FLAG_MONITOR)
1548		proto_config(PROTO_MONITOR, set, 0., NULL);
1549	if (flags & SYS_FLAG_FILEGEN)
1550		proto_config(PROTO_FILEGEN, set, 0., NULL);
1551	if (flags & SYS_FLAG_AUTH)
1552		proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1553	if (flags & SYS_FLAG_CAL)
1554		proto_config(PROTO_CAL, set, 0., NULL);
1555	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1556}
1557
1558/* There have been some issues with the restrict list processing,
1559 * ranging from problems with deep recursion (resulting in stack
1560 * overflows) and overfull reply buffers.
1561 *
1562 * To avoid this trouble the list reversal is done iteratively using a
1563 * scratch pad.
1564 */
1565typedef struct RestrictStack RestrictStackT;
1566struct RestrictStack {
1567	RestrictStackT   *link;
1568	size_t            fcnt;
1569	const restrict_u *pres[63];
1570};
1571
1572static size_t
1573getStackSheetSize(
1574	RestrictStackT *sp
1575	)
1576{
1577	if (sp)
1578		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1579	return 0u;
1580}
1581
1582static int/*BOOL*/
1583pushRestriction(
1584	RestrictStackT  **spp,
1585	const restrict_u *ptr
1586	)
1587{
1588	RestrictStackT *sp;
1589
1590	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1591		/* need another sheet in the scratch pad */
1592		sp = emalloc(sizeof(*sp));
1593		sp->link = *spp;
1594		sp->fcnt = getStackSheetSize(sp);
1595		*spp = sp;
1596	}
1597	sp->pres[--sp->fcnt] = ptr;
1598	return TRUE;
1599}
1600
1601static int/*BOOL*/
1602popRestriction(
1603	RestrictStackT   **spp,
1604	const restrict_u **opp
1605	)
1606{
1607	RestrictStackT *sp;
1608
1609	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
1610		return FALSE;
1611
1612	*opp = sp->pres[sp->fcnt++];
1613	if (sp->fcnt >= getStackSheetSize(sp)) {
1614		/* discard sheet from scratch pad */
1615		*spp = sp->link;
1616		free(sp);
1617	}
1618	return TRUE;
1619}
1620
1621static void
1622flushRestrictionStack(
1623	RestrictStackT **spp
1624	)
1625{
1626	RestrictStackT *sp;
1627
1628	while (NULL != (sp = *spp)) {
1629		*spp = sp->link;
1630		free(sp);
1631	}
1632}
1633
1634/*
1635 * list_restrict4 - iterative helper for list_restrict dumps IPv4
1636 *		    restriction list in reverse order.
1637 */
1638static void
1639list_restrict4(
1640	const restrict_u *	res,
1641	struct info_restrict **	ppir
1642	)
1643{
1644	RestrictStackT *	rpad;
1645	struct info_restrict *	pir;
1646
1647	pir = *ppir;
1648	for (rpad = NULL; res; res = res->link)
1649		if (!pushRestriction(&rpad, res))
1650			break;
1651
1652	while (pir && popRestriction(&rpad, &res)) {
1653		pir->addr = htonl(res->u.v4.addr);
1654		if (client_v6_capable)
1655			pir->v6_flag = 0;
1656		pir->mask = htonl(res->u.v4.mask);
1657		pir->count = htonl(res->count);
1658		pir->flags = htons(res->flags);
1659		pir->mflags = htons(res->mflags);
1660		pir = (struct info_restrict *)more_pkt();
1661	}
1662	flushRestrictionStack(&rpad);
1663	*ppir = pir;
1664}
1665
1666/*
1667 * list_restrict6 - iterative helper for list_restrict dumps IPv6
1668 *		    restriction list in reverse order.
1669 */
1670static void
1671list_restrict6(
1672	const restrict_u *	res,
1673	struct info_restrict **	ppir
1674	)
1675{
1676	RestrictStackT *	rpad;
1677	struct info_restrict *	pir;
1678
1679	pir = *ppir;
1680	for (rpad = NULL; res; res = res->link)
1681		if (!pushRestriction(&rpad, res))
1682			break;
1683
1684	while (pir && popRestriction(&rpad, &res)) {
1685		pir->addr6 = res->u.v6.addr;
1686		pir->mask6 = res->u.v6.mask;
1687		pir->v6_flag = 1;
1688		pir->count = htonl(res->count);
1689		pir->flags = htons(res->flags);
1690		pir->mflags = htons(res->mflags);
1691		pir = (struct info_restrict *)more_pkt();
1692	}
1693	flushRestrictionStack(&rpad);
1694	*ppir = pir;
1695}
1696
1697
1698/*
1699 * list_restrict - return the restrict list
1700 */
1701static void
1702list_restrict(
1703	sockaddr_u *srcadr,
1704	endpt *inter,
1705	struct req_pkt *inpkt
1706	)
1707{
1708	struct info_restrict *ir;
1709
1710	DPRINTF(3, ("wants restrict list summary\n"));
1711
1712	ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1713	    v6sizeof(struct info_restrict));
1714
1715	/*
1716	 * The restriction lists are kept sorted in the reverse order
1717	 * than they were originally.  To preserve the output semantics,
1718	 * dump each list in reverse order. The workers take care of that.
1719	 */
1720	list_restrict4(restrictlist4, &ir);
1721	if (client_v6_capable)
1722		list_restrict6(restrictlist6, &ir);
1723	flush_pkt();
1724}
1725
1726
1727/*
1728 * do_resaddflags - add flags to a restrict entry (or create one)
1729 */
1730static void
1731do_resaddflags(
1732	sockaddr_u *srcadr,
1733	endpt *inter,
1734	struct req_pkt *inpkt
1735	)
1736{
1737	do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1738}
1739
1740
1741
1742/*
1743 * do_ressubflags - remove flags from a restrict entry
1744 */
1745static void
1746do_ressubflags(
1747	sockaddr_u *srcadr,
1748	endpt *inter,
1749	struct req_pkt *inpkt
1750	)
1751{
1752	do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1753}
1754
1755
1756/*
1757 * do_unrestrict - remove a restrict entry from the list
1758 */
1759static void
1760do_unrestrict(
1761	sockaddr_u *srcadr,
1762	endpt *inter,
1763	struct req_pkt *inpkt
1764	)
1765{
1766	do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1767}
1768
1769
1770/*
1771 * do_restrict - do the dirty stuff of dealing with restrictions
1772 */
1773static void
1774do_restrict(
1775	sockaddr_u *srcadr,
1776	endpt *inter,
1777	struct req_pkt *inpkt,
1778	int op
1779	)
1780{
1781	char *			datap;
1782	struct conf_restrict	cr;
1783	u_short			items;
1784	size_t			item_sz;
1785	sockaddr_u		matchaddr;
1786	sockaddr_u		matchmask;
1787	int			bad;
1788
1789	/*
1790	 * Do a check of the flags to make sure that only
1791	 * the NTPPORT flag is set, if any.  If not, complain
1792	 * about it.  Note we are very picky here.
1793	 */
1794	items = INFO_NITEMS(inpkt->err_nitems);
1795	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1796	datap = inpkt->u.data;
1797	if (item_sz > sizeof(cr)) {
1798		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1799		return;
1800	}
1801
1802	bad = FALSE;
1803	while (items-- > 0 && !bad) {
1804		memcpy(&cr, datap, item_sz);
1805		cr.flags = ntohs(cr.flags);
1806		cr.mflags = ntohs(cr.mflags);
1807		if (~RESM_NTPONLY & cr.mflags)
1808			bad |= 1;
1809		if (~RES_ALLFLAGS & cr.flags)
1810			bad |= 2;
1811		if (INADDR_ANY != cr.mask) {
1812			if (client_v6_capable && cr.v6_flag) {
1813				if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1814					bad |= 4;
1815			} else {
1816				if (INADDR_ANY == cr.addr)
1817					bad |= 8;
1818			}
1819		}
1820		datap += item_sz;
1821	}
1822
1823	if (bad) {
1824		msyslog(LOG_ERR, "do_restrict: bad = %#x", bad);
1825		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1826		return;
1827	}
1828
1829	/*
1830	 * Looks okay, try it out.  Needs to reload data pointer and
1831	 * item counter. (Talos-CAN-0052)
1832	 */
1833	ZERO_SOCK(&matchaddr);
1834	ZERO_SOCK(&matchmask);
1835	items = INFO_NITEMS(inpkt->err_nitems);
1836	datap = inpkt->u.data;
1837
1838	while (items-- > 0) {
1839		memcpy(&cr, datap, item_sz);
1840		cr.flags = ntohs(cr.flags);
1841		cr.mflags = ntohs(cr.mflags);
1842		if (client_v6_capable && cr.v6_flag) {
1843			AF(&matchaddr) = AF_INET6;
1844			AF(&matchmask) = AF_INET6;
1845			SOCK_ADDR6(&matchaddr) = cr.addr6;
1846			SOCK_ADDR6(&matchmask) = cr.mask6;
1847		} else {
1848			AF(&matchaddr) = AF_INET;
1849			AF(&matchmask) = AF_INET;
1850			NSRCADR(&matchaddr) = cr.addr;
1851			NSRCADR(&matchmask) = cr.mask;
1852		}
1853		hack_restrict(op, &matchaddr, &matchmask, cr.mflags,
1854			      cr.flags, 0);
1855		datap += item_sz;
1856	}
1857
1858	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1859}
1860
1861
1862/*
1863 * mon_getlist - return monitor data
1864 */
1865static void
1866mon_getlist(
1867	sockaddr_u *srcadr,
1868	endpt *inter,
1869	struct req_pkt *inpkt
1870	)
1871{
1872	req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1873}
1874
1875
1876/*
1877 * Module entry points and the flags they correspond with
1878 */
1879struct reset_entry {
1880	int flag;		/* flag this corresponds to */
1881	void (*handler)(void);	/* routine to handle request */
1882};
1883
1884struct reset_entry reset_entries[] = {
1885	{ RESET_FLAG_ALLPEERS,	peer_all_reset },
1886	{ RESET_FLAG_IO,	io_clr_stats },
1887	{ RESET_FLAG_SYS,	proto_clr_stats },
1888	{ RESET_FLAG_MEM,	peer_clr_stats },
1889	{ RESET_FLAG_TIMER,	timer_clr_stats },
1890	{ RESET_FLAG_AUTH,	reset_auth_stats },
1891	{ RESET_FLAG_CTL,	ctl_clr_stats },
1892	{ 0,			0 }
1893};
1894
1895/*
1896 * reset_stats - reset statistic counters here and there
1897 */
1898static void
1899reset_stats(
1900	sockaddr_u *srcadr,
1901	endpt *inter,
1902	struct req_pkt *inpkt
1903	)
1904{
1905	struct reset_flags *rflags;
1906	u_long flags;
1907	struct reset_entry *rent;
1908
1909	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1910		msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
1911		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1912		return;
1913	}
1914
1915	rflags = (struct reset_flags *)&inpkt->u;
1916	flags = ntohl(rflags->flags);
1917
1918	if (flags & ~RESET_ALLFLAGS) {
1919		msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
1920			flags & ~RESET_ALLFLAGS);
1921		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1922		return;
1923	}
1924
1925	for (rent = reset_entries; rent->flag != 0; rent++) {
1926		if (flags & rent->flag)
1927			(*rent->handler)();
1928	}
1929	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1930}
1931
1932
1933/*
1934 * reset_peer - clear a peer's statistics
1935 */
1936static void
1937reset_peer(
1938	sockaddr_u *srcadr,
1939	endpt *inter,
1940	struct req_pkt *inpkt
1941	)
1942{
1943	u_short			items;
1944	size_t			item_sz;
1945	char *			datap;
1946	struct conf_unpeer	cp;
1947	struct peer *		p;
1948	sockaddr_u		peeraddr;
1949	int			bad;
1950
1951	/*
1952	 * We check first to see that every peer exists.  If not,
1953	 * we return an error.
1954	 */
1955
1956	items = INFO_NITEMS(inpkt->err_nitems);
1957	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1958	datap = inpkt->u.data;
1959	if (item_sz > sizeof(cp)) {
1960		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1961		return;
1962	}
1963
1964	bad = FALSE;
1965	while (items-- > 0 && !bad) {
1966		ZERO(cp);
1967		memcpy(&cp, datap, item_sz);
1968		ZERO_SOCK(&peeraddr);
1969		if (client_v6_capable && cp.v6_flag) {
1970			AF(&peeraddr) = AF_INET6;
1971			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
1972		} else {
1973			AF(&peeraddr) = AF_INET;
1974			NSRCADR(&peeraddr) = cp.peeraddr;
1975		}
1976
1977#ifdef ISC_PLATFORM_HAVESALEN
1978		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1979#endif
1980		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0);
1981		if (NULL == p)
1982			bad++;
1983		datap += item_sz;
1984	}
1985
1986	if (bad) {
1987		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1988		return;
1989	}
1990
1991	/*
1992	 * Now do it in earnest. Needs to reload data pointer and item
1993	 * counter. (Talos-CAN-0052)
1994	 */
1995
1996	items = INFO_NITEMS(inpkt->err_nitems);
1997	datap = inpkt->u.data;
1998	while (items-- > 0) {
1999		ZERO(cp);
2000		memcpy(&cp, datap, item_sz);
2001		ZERO_SOCK(&peeraddr);
2002		if (client_v6_capable && cp.v6_flag) {
2003			AF(&peeraddr) = AF_INET6;
2004			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2005		} else {
2006			AF(&peeraddr) = AF_INET;
2007			NSRCADR(&peeraddr) = cp.peeraddr;
2008		}
2009		SET_PORT(&peeraddr, 123);
2010#ifdef ISC_PLATFORM_HAVESALEN
2011		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2012#endif
2013		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0);
2014		while (p != NULL) {
2015			peer_reset(p);
2016			p = findexistingpeer(&peeraddr, NULL, p, -1, 0);
2017		}
2018		datap += item_sz;
2019	}
2020
2021	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2022}
2023
2024
2025/*
2026 * do_key_reread - reread the encryption key file
2027 */
2028static void
2029do_key_reread(
2030	sockaddr_u *srcadr,
2031	endpt *inter,
2032	struct req_pkt *inpkt
2033	)
2034{
2035	rereadkeys();
2036	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2037}
2038
2039
2040/*
2041 * trust_key - make one or more keys trusted
2042 */
2043static void
2044trust_key(
2045	sockaddr_u *srcadr,
2046	endpt *inter,
2047	struct req_pkt *inpkt
2048	)
2049{
2050	do_trustkey(srcadr, inter, inpkt, 1);
2051}
2052
2053
2054/*
2055 * untrust_key - make one or more keys untrusted
2056 */
2057static void
2058untrust_key(
2059	sockaddr_u *srcadr,
2060	endpt *inter,
2061	struct req_pkt *inpkt
2062	)
2063{
2064	do_trustkey(srcadr, inter, inpkt, 0);
2065}
2066
2067
2068/*
2069 * do_trustkey - make keys either trustable or untrustable
2070 */
2071static void
2072do_trustkey(
2073	sockaddr_u *srcadr,
2074	endpt *inter,
2075	struct req_pkt *inpkt,
2076	u_long trust
2077	)
2078{
2079	register uint32_t *kp;
2080	register int items;
2081
2082	items = INFO_NITEMS(inpkt->err_nitems);
2083	kp = (uint32_t *)&inpkt->u;
2084	while (items-- > 0) {
2085		authtrust(*kp, trust);
2086		kp++;
2087	}
2088
2089	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2090}
2091
2092
2093/*
2094 * get_auth_info - return some stats concerning the authentication module
2095 */
2096static void
2097get_auth_info(
2098	sockaddr_u *srcadr,
2099	endpt *inter,
2100	struct req_pkt *inpkt
2101	)
2102{
2103	register struct info_auth *ia;
2104
2105	ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2106					     sizeof(struct info_auth));
2107
2108	ia->numkeys = htonl((u_int32)authnumkeys);
2109	ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2110	ia->keylookups = htonl((u_int32)authkeylookups);
2111	ia->keynotfound = htonl((u_int32)authkeynotfound);
2112	ia->encryptions = htonl((u_int32)authencryptions);
2113	ia->decryptions = htonl((u_int32)authdecryptions);
2114	ia->keyuncached = htonl((u_int32)authkeyuncached);
2115	ia->expired = htonl((u_int32)authkeyexpired);
2116	ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2117
2118	(void) more_pkt();
2119	flush_pkt();
2120}
2121
2122
2123
2124/*
2125 * reset_auth_stats - reset the authentication stat counters.  Done here
2126 *		      to keep ntp-isms out of the authentication module
2127 */
2128void
2129reset_auth_stats(void)
2130{
2131	authkeylookups = 0;
2132	authkeynotfound = 0;
2133	authencryptions = 0;
2134	authdecryptions = 0;
2135	authkeyuncached = 0;
2136	auth_timereset = current_time;
2137}
2138
2139
2140/*
2141 * req_get_traps - return information about current trap holders
2142 */
2143static void
2144req_get_traps(
2145	sockaddr_u *srcadr,
2146	endpt *inter,
2147	struct req_pkt *inpkt
2148	)
2149{
2150	struct info_trap *it;
2151	struct ctl_trap *tr;
2152	size_t i;
2153
2154	if (num_ctl_traps == 0) {
2155		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2156		return;
2157	}
2158
2159	it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2160	    v6sizeof(struct info_trap));
2161
2162	for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2163		if (tr->tr_flags & TRAP_INUSE) {
2164			if (IS_IPV4(&tr->tr_addr)) {
2165				if (tr->tr_localaddr == any_interface)
2166					it->local_address = 0;
2167				else
2168					it->local_address
2169					    = NSRCADR(&tr->tr_localaddr->sin);
2170				it->trap_address = NSRCADR(&tr->tr_addr);
2171				if (client_v6_capable)
2172					it->v6_flag = 0;
2173			} else {
2174				if (!client_v6_capable)
2175					continue;
2176				it->local_address6
2177				    = SOCK_ADDR6(&tr->tr_localaddr->sin);
2178				it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2179				it->v6_flag = 1;
2180			}
2181			it->trap_port = NSRCPORT(&tr->tr_addr);
2182			it->sequence = htons(tr->tr_sequence);
2183			it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2184			it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2185			it->resets = htonl((u_int32)tr->tr_resets);
2186			it->flags = htonl((u_int32)tr->tr_flags);
2187			it = (struct info_trap *)more_pkt();
2188		}
2189	}
2190	flush_pkt();
2191}
2192
2193
2194/*
2195 * req_set_trap - configure a trap
2196 */
2197static void
2198req_set_trap(
2199	sockaddr_u *srcadr,
2200	endpt *inter,
2201	struct req_pkt *inpkt
2202	)
2203{
2204	do_setclr_trap(srcadr, inter, inpkt, 1);
2205}
2206
2207
2208
2209/*
2210 * req_clr_trap - unconfigure a trap
2211 */
2212static void
2213req_clr_trap(
2214	sockaddr_u *srcadr,
2215	endpt *inter,
2216	struct req_pkt *inpkt
2217	)
2218{
2219	do_setclr_trap(srcadr, inter, inpkt, 0);
2220}
2221
2222
2223
2224/*
2225 * do_setclr_trap - do the grunge work of (un)configuring a trap
2226 */
2227static void
2228do_setclr_trap(
2229	sockaddr_u *srcadr,
2230	endpt *inter,
2231	struct req_pkt *inpkt,
2232	int set
2233	)
2234{
2235	register struct conf_trap *ct;
2236	register endpt *linter;
2237	int res;
2238	sockaddr_u laddr;
2239
2240	/*
2241	 * Prepare sockaddr
2242	 */
2243	ZERO_SOCK(&laddr);
2244	AF(&laddr) = AF(srcadr);
2245	SET_PORT(&laddr, NTP_PORT);
2246
2247	/*
2248	 * Restrict ourselves to one item only.  This eliminates
2249	 * the error reporting problem.
2250	 */
2251	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2252		msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2253		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2254		return;
2255	}
2256	ct = (struct conf_trap *)&inpkt->u;
2257
2258	/*
2259	 * Look for the local interface.  If none, use the default.
2260	 */
2261	if (ct->local_address == 0) {
2262		linter = any_interface;
2263	} else {
2264		if (IS_IPV4(&laddr))
2265			NSRCADR(&laddr) = ct->local_address;
2266		else
2267			SOCK_ADDR6(&laddr) = ct->local_address6;
2268		linter = findinterface(&laddr);
2269		if (NULL == linter) {
2270			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2271			return;
2272		}
2273	}
2274
2275	if (IS_IPV4(&laddr))
2276		NSRCADR(&laddr) = ct->trap_address;
2277	else
2278		SOCK_ADDR6(&laddr) = ct->trap_address6;
2279	if (ct->trap_port)
2280		NSRCPORT(&laddr) = ct->trap_port;
2281	else
2282		SET_PORT(&laddr, TRAPPORT);
2283
2284	if (set) {
2285		res = ctlsettrap(&laddr, linter, 0,
2286				 INFO_VERSION(inpkt->rm_vn_mode));
2287	} else {
2288		res = ctlclrtrap(&laddr, linter, 0);
2289	}
2290
2291	if (!res) {
2292		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2293	} else {
2294		req_ack(srcadr, inter, inpkt, INFO_OKAY);
2295	}
2296	return;
2297}
2298
2299/*
2300 * Validate a request packet for a new request or control key:
2301 *  - only one item allowed
2302 *  - key must be valid (that is, known, and not in the autokey range)
2303 */
2304static void
2305set_keyid_checked(
2306	keyid_t        *into,
2307	const char     *what,
2308	sockaddr_u     *srcadr,
2309	endpt          *inter,
2310	struct req_pkt *inpkt
2311	)
2312{
2313	keyid_t *pkeyid;
2314	keyid_t  tmpkey;
2315
2316	/* restrict ourselves to one item only */
2317	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2318		msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2319			what);
2320		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2321		return;
2322	}
2323
2324	/* plug the new key from the packet */
2325	pkeyid = (keyid_t *)&inpkt->u;
2326	tmpkey = ntohl(*pkeyid);
2327
2328	/* validate the new key id, claim data error on failure */
2329	if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2330		msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2331			what, (long)tmpkey);
2332		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2333		return;
2334	}
2335
2336	/* if we arrive here, the key is good -- use it */
2337	*into = tmpkey;
2338	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2339}
2340
2341/*
2342 * set_request_keyid - set the keyid used to authenticate requests
2343 */
2344static void
2345set_request_keyid(
2346	sockaddr_u *srcadr,
2347	endpt *inter,
2348	struct req_pkt *inpkt
2349	)
2350{
2351	set_keyid_checked(&info_auth_keyid, "request",
2352			  srcadr, inter, inpkt);
2353}
2354
2355
2356
2357/*
2358 * set_control_keyid - set the keyid used to authenticate requests
2359 */
2360static void
2361set_control_keyid(
2362	sockaddr_u *srcadr,
2363	endpt *inter,
2364	struct req_pkt *inpkt
2365	)
2366{
2367	set_keyid_checked(&ctl_auth_keyid, "control",
2368			  srcadr, inter, inpkt);
2369}
2370
2371
2372
2373/*
2374 * get_ctl_stats - return some stats concerning the control message module
2375 */
2376static void
2377get_ctl_stats(
2378	sockaddr_u *srcadr,
2379	endpt *inter,
2380	struct req_pkt *inpkt
2381	)
2382{
2383	register struct info_control *ic;
2384
2385	ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2386						sizeof(struct info_control));
2387
2388	ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2389	ic->numctlreq = htonl((u_int32)numctlreq);
2390	ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2391	ic->numctlresponses = htonl((u_int32)numctlresponses);
2392	ic->numctlfrags = htonl((u_int32)numctlfrags);
2393	ic->numctlerrors = htonl((u_int32)numctlerrors);
2394	ic->numctltooshort = htonl((u_int32)numctltooshort);
2395	ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2396	ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2397	ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2398	ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2399	ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2400	ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2401	ic->numctlbadop = htonl((u_int32)numctlbadop);
2402	ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2403
2404	(void) more_pkt();
2405	flush_pkt();
2406}
2407
2408
2409#ifdef KERNEL_PLL
2410/*
2411 * get_kernel_info - get kernel pll/pps information
2412 */
2413static void
2414get_kernel_info(
2415	sockaddr_u *srcadr,
2416	endpt *inter,
2417	struct req_pkt *inpkt
2418	)
2419{
2420	register struct info_kernel *ik;
2421	struct timex ntx;
2422
2423	if (!pll_control) {
2424		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2425		return;
2426	}
2427
2428	ZERO(ntx);
2429	if (ntp_adjtime(&ntx) < 0)
2430		msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2431	ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2432	    sizeof(struct info_kernel));
2433
2434	/*
2435	 * pll variables
2436	 */
2437	ik->offset = htonl((u_int32)ntx.offset);
2438	ik->freq = htonl((u_int32)ntx.freq);
2439	ik->maxerror = htonl((u_int32)ntx.maxerror);
2440	ik->esterror = htonl((u_int32)ntx.esterror);
2441	ik->status = htons(ntx.status);
2442	ik->constant = htonl((u_int32)ntx.constant);
2443	ik->precision = htonl((u_int32)ntx.precision);
2444	ik->tolerance = htonl((u_int32)ntx.tolerance);
2445
2446	/*
2447	 * pps variables
2448	 */
2449	ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2450	ik->jitter = htonl((u_int32)ntx.jitter);
2451	ik->shift = htons(ntx.shift);
2452	ik->stabil = htonl((u_int32)ntx.stabil);
2453	ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2454	ik->calcnt = htonl((u_int32)ntx.calcnt);
2455	ik->errcnt = htonl((u_int32)ntx.errcnt);
2456	ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2457
2458	(void) more_pkt();
2459	flush_pkt();
2460}
2461#endif /* KERNEL_PLL */
2462
2463
2464#ifdef REFCLOCK
2465/*
2466 * get_clock_info - get info about a clock
2467 */
2468static void
2469get_clock_info(
2470	sockaddr_u *srcadr,
2471	endpt *inter,
2472	struct req_pkt *inpkt
2473	)
2474{
2475	register struct info_clock *ic;
2476	register u_int32 *clkaddr;
2477	register int items;
2478	struct refclockstat clock_stat;
2479	sockaddr_u addr;
2480	l_fp ltmp;
2481
2482	ZERO_SOCK(&addr);
2483	AF(&addr) = AF_INET;
2484#ifdef ISC_PLATFORM_HAVESALEN
2485	addr.sa.sa_len = SOCKLEN(&addr);
2486#endif
2487	SET_PORT(&addr, NTP_PORT);
2488	items = INFO_NITEMS(inpkt->err_nitems);
2489	clkaddr = &inpkt->u.u32[0];
2490
2491	ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2492					      sizeof(struct info_clock));
2493
2494	while (items-- > 0 && ic) {
2495		NSRCADR(&addr) = *clkaddr++;
2496		if (!ISREFCLOCKADR(&addr) || NULL ==
2497		    findexistingpeer(&addr, NULL, NULL, -1, 0)) {
2498			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2499			return;
2500		}
2501
2502		clock_stat.kv_list = (struct ctl_var *)0;
2503
2504		refclock_control(&addr, NULL, &clock_stat);
2505
2506		ic->clockadr = NSRCADR(&addr);
2507		ic->type = clock_stat.type;
2508		ic->flags = clock_stat.flags;
2509		ic->lastevent = clock_stat.lastevent;
2510		ic->currentstatus = clock_stat.currentstatus;
2511		ic->polls = htonl((u_int32)clock_stat.polls);
2512		ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2513		ic->badformat = htonl((u_int32)clock_stat.badformat);
2514		ic->baddata = htonl((u_int32)clock_stat.baddata);
2515		ic->timestarted = htonl((u_int32)clock_stat.timereset);
2516		DTOLFP(clock_stat.fudgetime1, &ltmp);
2517		HTONL_FP(&ltmp, &ic->fudgetime1);
2518		DTOLFP(clock_stat.fudgetime2, &ltmp);
2519		HTONL_FP(&ltmp, &ic->fudgetime2);
2520		ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2521		ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2522
2523		free_varlist(clock_stat.kv_list);
2524
2525		ic = (struct info_clock *)more_pkt();
2526	}
2527	flush_pkt();
2528}
2529
2530
2531
2532/*
2533 * set_clock_fudge - get a clock's fudge factors
2534 */
2535static void
2536set_clock_fudge(
2537	sockaddr_u *srcadr,
2538	endpt *inter,
2539	struct req_pkt *inpkt
2540	)
2541{
2542	register struct conf_fudge *cf;
2543	register int items;
2544	struct refclockstat clock_stat;
2545	sockaddr_u addr;
2546	l_fp ltmp;
2547
2548	ZERO(addr);
2549	ZERO(clock_stat);
2550	items = INFO_NITEMS(inpkt->err_nitems);
2551	cf = (struct conf_fudge *)&inpkt->u;
2552
2553	while (items-- > 0) {
2554		AF(&addr) = AF_INET;
2555		NSRCADR(&addr) = cf->clockadr;
2556#ifdef ISC_PLATFORM_HAVESALEN
2557		addr.sa.sa_len = SOCKLEN(&addr);
2558#endif
2559		SET_PORT(&addr, NTP_PORT);
2560		if (!ISREFCLOCKADR(&addr) || NULL ==
2561		    findexistingpeer(&addr, NULL, NULL, -1, 0)) {
2562			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2563			return;
2564		}
2565
2566		switch(ntohl(cf->which)) {
2567		    case FUDGE_TIME1:
2568			NTOHL_FP(&cf->fudgetime, &ltmp);
2569			LFPTOD(&ltmp, clock_stat.fudgetime1);
2570			clock_stat.haveflags = CLK_HAVETIME1;
2571			break;
2572		    case FUDGE_TIME2:
2573			NTOHL_FP(&cf->fudgetime, &ltmp);
2574			LFPTOD(&ltmp, clock_stat.fudgetime2);
2575			clock_stat.haveflags = CLK_HAVETIME2;
2576			break;
2577		    case FUDGE_VAL1:
2578			clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2579			clock_stat.haveflags = CLK_HAVEVAL1;
2580			break;
2581		    case FUDGE_VAL2:
2582			clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2583			clock_stat.haveflags = CLK_HAVEVAL2;
2584			break;
2585		    case FUDGE_FLAGS:
2586			clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2587			clock_stat.haveflags =
2588				(CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2589			break;
2590		    default:
2591			msyslog(LOG_ERR, "set_clock_fudge: default!");
2592			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2593			return;
2594		}
2595
2596		refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2597	}
2598
2599	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2600}
2601#endif
2602
2603#ifdef REFCLOCK
2604/*
2605 * get_clkbug_info - get debugging info about a clock
2606 */
2607static void
2608get_clkbug_info(
2609	sockaddr_u *srcadr,
2610	endpt *inter,
2611	struct req_pkt *inpkt
2612	)
2613{
2614	register int i;
2615	register struct info_clkbug *ic;
2616	register u_int32 *clkaddr;
2617	register int items;
2618	struct refclockbug bug;
2619	sockaddr_u addr;
2620
2621	ZERO_SOCK(&addr);
2622	AF(&addr) = AF_INET;
2623#ifdef ISC_PLATFORM_HAVESALEN
2624	addr.sa.sa_len = SOCKLEN(&addr);
2625#endif
2626	SET_PORT(&addr, NTP_PORT);
2627	items = INFO_NITEMS(inpkt->err_nitems);
2628	clkaddr = (u_int32 *)&inpkt->u;
2629
2630	ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2631					       sizeof(struct info_clkbug));
2632
2633	while (items-- > 0 && ic) {
2634		NSRCADR(&addr) = *clkaddr++;
2635		if (!ISREFCLOCKADR(&addr) || NULL ==
2636		    findexistingpeer(&addr, NULL, NULL, -1, 0)) {
2637			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2638			return;
2639		}
2640
2641		ZERO(bug);
2642		refclock_buginfo(&addr, &bug);
2643		if (bug.nvalues == 0 && bug.ntimes == 0) {
2644			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2645			return;
2646		}
2647
2648		ic->clockadr = NSRCADR(&addr);
2649		i = bug.nvalues;
2650		if (i > NUMCBUGVALUES)
2651		    i = NUMCBUGVALUES;
2652		ic->nvalues = (u_char)i;
2653		ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2654		while (--i >= 0)
2655		    ic->values[i] = htonl(bug.values[i]);
2656
2657		i = bug.ntimes;
2658		if (i > NUMCBUGTIMES)
2659		    i = NUMCBUGTIMES;
2660		ic->ntimes = (u_char)i;
2661		ic->stimes = htonl(bug.stimes);
2662		while (--i >= 0) {
2663			HTONL_FP(&bug.times[i], &ic->times[i]);
2664		}
2665
2666		ic = (struct info_clkbug *)more_pkt();
2667	}
2668	flush_pkt();
2669}
2670#endif
2671
2672/*
2673 * receiver of interface structures
2674 */
2675static void
2676fill_info_if_stats(void *data, interface_info_t *interface_info)
2677{
2678	struct info_if_stats **ifsp = (struct info_if_stats **)data;
2679	struct info_if_stats *ifs = *ifsp;
2680	endpt *ep = interface_info->ep;
2681
2682	if (NULL == ifs)
2683		return;
2684
2685	ZERO(*ifs);
2686
2687	if (IS_IPV6(&ep->sin)) {
2688		if (!client_v6_capable)
2689			return;
2690		ifs->v6_flag = 1;
2691		ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2692		ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2693		ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2694	} else {
2695		ifs->v6_flag = 0;
2696		ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2697		ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2698		ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2699	}
2700	ifs->v6_flag = htonl(ifs->v6_flag);
2701	strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2702	ifs->family = htons(ep->family);
2703	ifs->flags = htonl(ep->flags);
2704	ifs->last_ttl = htonl(ep->last_ttl);
2705	ifs->num_mcast = htonl(ep->num_mcast);
2706	ifs->received = htonl(ep->received);
2707	ifs->sent = htonl(ep->sent);
2708	ifs->notsent = htonl(ep->notsent);
2709	ifs->ifindex = htonl(ep->ifindex);
2710	/* scope no longer in endpt, in in6_addr typically */
2711	ifs->scopeid = ifs->ifindex;
2712	ifs->ifnum = htonl(ep->ifnum);
2713	ifs->uptime = htonl(current_time - ep->starttime);
2714	ifs->ignore_packets = ep->ignore_packets;
2715	ifs->peercnt = htonl(ep->peercnt);
2716	ifs->action = interface_info->action;
2717
2718	*ifsp = (struct info_if_stats *)more_pkt();
2719}
2720
2721/*
2722 * get_if_stats - get interface statistics
2723 */
2724static void
2725get_if_stats(
2726	sockaddr_u *srcadr,
2727	endpt *inter,
2728	struct req_pkt *inpkt
2729	)
2730{
2731	struct info_if_stats *ifs;
2732
2733	DPRINTF(3, ("wants interface statistics\n"));
2734
2735	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2736	    v6sizeof(struct info_if_stats));
2737
2738	interface_enumerate(fill_info_if_stats, &ifs);
2739
2740	flush_pkt();
2741}
2742
2743static void
2744do_if_reload(
2745	sockaddr_u *srcadr,
2746	endpt *inter,
2747	struct req_pkt *inpkt
2748	)
2749{
2750	struct info_if_stats *ifs;
2751
2752	DPRINTF(3, ("wants interface reload\n"));
2753
2754	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2755	    v6sizeof(struct info_if_stats));
2756
2757	interface_update(fill_info_if_stats, &ifs);
2758
2759	flush_pkt();
2760}
2761
2762