ntp_request.c revision 362716
1/*
2 * ntp_request.c - respond to information requests
3 */
4
5#ifdef HAVE_CONFIG_H
6# include <config.h>
7#endif
8
9#include "ntpd.h"
10#include "ntp_io.h"
11#include "ntp_request.h"
12#include "ntp_control.h"
13#include "ntp_refclock.h"
14#include "ntp_if.h"
15#include "ntp_stdlib.h"
16#include "ntp_assert.h"
17
18#include <stdio.h>
19#include <stddef.h>
20#include <signal.h>
21#ifdef HAVE_NETINET_IN_H
22#include <netinet/in.h>
23#endif
24#include <arpa/inet.h>
25
26#include "recvbuff.h"
27
28#ifdef KERNEL_PLL
29#include "ntp_syscall.h"
30#endif /* KERNEL_PLL */
31
32/*
33 * Structure to hold request procedure information
34 */
35#define	NOAUTH	0
36#define	AUTH	1
37
38#define	NO_REQUEST	(-1)
39/*
40 * Because we now have v6 addresses in the messages, we need to compensate
41 * for the larger size.  Therefore, we introduce the alternate size to
42 * keep us friendly with older implementations.  A little ugly.
43 */
44static int client_v6_capable = 0;   /* the client can handle longer messages */
45
46#define v6sizeof(type)	(client_v6_capable ? sizeof(type) : v4sizeof(type))
47
48struct req_proc {
49	short request_code;	/* defined request code */
50	short needs_auth;	/* true when authentication needed */
51	short sizeofitem;	/* size of request data item (older size)*/
52	short v6_sizeofitem;	/* size of request data item (new size)*/
53	void (*handler) (sockaddr_u *, endpt *,
54			   struct req_pkt *);	/* routine to handle request */
55};
56
57/*
58 * Universal request codes
59 */
60static const struct req_proc univ_codes[] = {
61	{ NO_REQUEST,		NOAUTH,	 0,	0, NULL }
62};
63
64static	void	req_ack	(sockaddr_u *, endpt *, struct req_pkt *, int);
65static	void *	prepare_pkt	(sockaddr_u *, endpt *,
66				 struct req_pkt *, size_t);
67static	void *	more_pkt	(void);
68static	void	flush_pkt	(void);
69static	void	list_peers	(sockaddr_u *, endpt *, struct req_pkt *);
70static	void	list_peers_sum	(sockaddr_u *, endpt *, struct req_pkt *);
71static	void	peer_info	(sockaddr_u *, endpt *, struct req_pkt *);
72static	void	peer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
73static	void	sys_info	(sockaddr_u *, endpt *, struct req_pkt *);
74static	void	sys_stats	(sockaddr_u *, endpt *, struct req_pkt *);
75static	void	mem_stats	(sockaddr_u *, endpt *, struct req_pkt *);
76static	void	io_stats	(sockaddr_u *, endpt *, struct req_pkt *);
77static	void	timer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
78static	void	loop_info	(sockaddr_u *, endpt *, struct req_pkt *);
79static	void	do_conf		(sockaddr_u *, endpt *, struct req_pkt *);
80static	void	do_unconf	(sockaddr_u *, endpt *, struct req_pkt *);
81static	void	set_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
82static	void	clr_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
83static	void	setclr_flags	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
84static	void	list_restrict4	(const restrict_u *, struct info_restrict **);
85static	void	list_restrict6	(const restrict_u *, struct info_restrict **);
86static	void	list_restrict	(sockaddr_u *, endpt *, struct req_pkt *);
87static	void	do_resaddflags	(sockaddr_u *, endpt *, struct req_pkt *);
88static	void	do_ressubflags	(sockaddr_u *, endpt *, struct req_pkt *);
89static	void	do_unrestrict	(sockaddr_u *, endpt *, struct req_pkt *);
90static	void	do_restrict	(sockaddr_u *, endpt *, struct req_pkt *, restrict_op);
91static	void	mon_getlist	(sockaddr_u *, endpt *, struct req_pkt *);
92static	void	reset_stats	(sockaddr_u *, endpt *, struct req_pkt *);
93static	void	reset_peer	(sockaddr_u *, endpt *, struct req_pkt *);
94static	void	do_key_reread	(sockaddr_u *, endpt *, struct req_pkt *);
95static	void	trust_key	(sockaddr_u *, endpt *, struct req_pkt *);
96static	void	untrust_key	(sockaddr_u *, endpt *, struct req_pkt *);
97static	void	do_trustkey	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
98static	void	get_auth_info	(sockaddr_u *, endpt *, struct req_pkt *);
99static	void	req_get_traps	(sockaddr_u *, endpt *, struct req_pkt *);
100static	void	req_set_trap	(sockaddr_u *, endpt *, struct req_pkt *);
101static	void	req_clr_trap	(sockaddr_u *, endpt *, struct req_pkt *);
102static	void	do_setclr_trap	(sockaddr_u *, endpt *, struct req_pkt *, int);
103static	void	set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
104static	void	set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
105static	void	get_ctl_stats   (sockaddr_u *, endpt *, struct req_pkt *);
106static	void	get_if_stats    (sockaddr_u *, endpt *, struct req_pkt *);
107static	void	do_if_reload    (sockaddr_u *, endpt *, struct req_pkt *);
108#ifdef KERNEL_PLL
109static	void	get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
110#endif /* KERNEL_PLL */
111#ifdef REFCLOCK
112static	void	get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
113static	void	set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
114#endif	/* REFCLOCK */
115#ifdef REFCLOCK
116static	void	get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
117#endif	/* REFCLOCK */
118
119/*
120 * ntpd request codes
121 */
122static const struct req_proc ntp_codes[] = {
123	{ REQ_PEER_LIST,	NOAUTH,	0, 0,	list_peers },
124	{ REQ_PEER_LIST_SUM,	NOAUTH,	0, 0,	list_peers_sum },
125	{ REQ_PEER_INFO,    NOAUTH, v4sizeof(struct info_peer_list),
126				sizeof(struct info_peer_list), peer_info},
127	{ REQ_PEER_STATS,   NOAUTH, v4sizeof(struct info_peer_list),
128				sizeof(struct info_peer_list), peer_stats},
129	{ REQ_SYS_INFO,		NOAUTH,	0, 0,	sys_info },
130	{ REQ_SYS_STATS,	NOAUTH,	0, 0,	sys_stats },
131	{ REQ_IO_STATS,		NOAUTH,	0, 0,	io_stats },
132	{ REQ_MEM_STATS,	NOAUTH,	0, 0,	mem_stats },
133	{ REQ_LOOP_INFO,	NOAUTH,	0, 0,	loop_info },
134	{ REQ_TIMER_STATS,	NOAUTH,	0, 0,	timer_stats },
135	{ REQ_CONFIG,	    AUTH, v4sizeof(struct conf_peer),
136				sizeof(struct conf_peer), do_conf },
137	{ REQ_UNCONFIG,	    AUTH, v4sizeof(struct conf_unpeer),
138				sizeof(struct conf_unpeer), do_unconf },
139	{ REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
140				sizeof(struct conf_sys_flags), set_sys_flag },
141	{ REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142				sizeof(struct conf_sys_flags),  clr_sys_flag },
143	{ REQ_GET_RESTRICT,	NOAUTH,	0, 0,	list_restrict },
144	{ REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
145				sizeof(struct conf_restrict), do_resaddflags },
146	{ REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
147				sizeof(struct conf_restrict), do_ressubflags },
148	{ REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
149				sizeof(struct conf_restrict), do_unrestrict },
150	{ REQ_MON_GETLIST,	NOAUTH,	0, 0,	mon_getlist },
151	{ REQ_MON_GETLIST_1,	NOAUTH,	0, 0,	mon_getlist },
152	{ REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
153	{ REQ_RESET_PEER,  AUTH, v4sizeof(struct conf_unpeer),
154				sizeof(struct conf_unpeer), reset_peer },
155	{ REQ_REREAD_KEYS,	AUTH,	0, 0,	do_key_reread },
156	{ REQ_TRUSTKEY,   AUTH, sizeof(u_long), sizeof(u_long), trust_key },
157	{ REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
158	{ REQ_AUTHINFO,		NOAUTH,	0, 0,	get_auth_info },
159	{ REQ_TRAPS,		NOAUTH, 0, 0,	req_get_traps },
160	{ REQ_ADD_TRAP,	AUTH, v4sizeof(struct conf_trap),
161				sizeof(struct conf_trap), req_set_trap },
162	{ REQ_CLR_TRAP,	AUTH, v4sizeof(struct conf_trap),
163				sizeof(struct conf_trap), req_clr_trap },
164	{ REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
165				set_request_keyid },
166	{ REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
167				set_control_keyid },
168	{ REQ_GET_CTLSTATS,	NOAUTH,	0, 0,	get_ctl_stats },
169#ifdef KERNEL_PLL
170	{ REQ_GET_KERNEL,	NOAUTH,	0, 0,	get_kernel_info },
171#endif
172#ifdef REFCLOCK
173	{ REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
174				get_clock_info },
175	{ REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
176				sizeof(struct conf_fudge), set_clock_fudge },
177	{ REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
178				get_clkbug_info },
179#endif
180	{ REQ_IF_STATS,		AUTH, 0, 0,	get_if_stats },
181	{ REQ_IF_RELOAD,	AUTH, 0, 0,	do_if_reload },
182
183	{ NO_REQUEST,		NOAUTH,	0, 0,	0 }
184};
185
186
187/*
188 * Authentication keyid used to authenticate requests.  Zero means we
189 * don't allow writing anything.
190 */
191keyid_t info_auth_keyid;
192
193/*
194 * Statistic counters to keep track of requests and responses.
195 */
196u_long numrequests;		/* number of requests we've received */
197u_long numresppkts;		/* number of resp packets sent with data */
198
199/*
200 * lazy way to count errors, indexed by the error code
201 */
202u_long errorcounter[MAX_INFO_ERR + 1];
203
204/*
205 * A hack.  To keep the authentication module clear of ntp-ism's, we
206 * include a time reset variable for its stats here.
207 */
208u_long auth_timereset;
209
210/*
211 * Response packet used by these routines.  Also some state information
212 * so that we can handle packet formatting within a common set of
213 * subroutines.  Note we try to enter data in place whenever possible,
214 * but the need to set the more bit correctly means we occasionally
215 * use the extra buffer and copy.
216 */
217static struct resp_pkt rpkt;
218static int reqver;
219static int seqno;
220static int nitems;
221static int itemsize;
222static int databytes;
223static char exbuf[RESP_DATA_SIZE];
224static int usingexbuf;
225static sockaddr_u *toaddr;
226static endpt *frominter;
227
228/*
229 * init_request - initialize request data
230 */
231void
232init_request (void)
233{
234	size_t i;
235
236	numrequests = 0;
237	numresppkts = 0;
238	auth_timereset = 0;
239	info_auth_keyid = 0;	/* by default, can't do this */
240
241	for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
242	    errorcounter[i] = 0;
243}
244
245
246/*
247 * req_ack - acknowledge request with no data
248 */
249static void
250req_ack(
251	sockaddr_u *srcadr,
252	endpt *inter,
253	struct req_pkt *inpkt,
254	int errcode
255	)
256{
257	/*
258	 * fill in the fields
259	 */
260	rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
261	rpkt.auth_seq = AUTH_SEQ(0, 0);
262	rpkt.implementation = inpkt->implementation;
263	rpkt.request = inpkt->request;
264	rpkt.err_nitems = ERR_NITEMS(errcode, 0);
265	rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
266
267	/*
268	 * send packet and bump counters
269	 */
270	sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
271	errorcounter[errcode]++;
272}
273
274
275/*
276 * prepare_pkt - prepare response packet for transmission, return pointer
277 *		 to storage for data item.
278 */
279static void *
280prepare_pkt(
281	sockaddr_u *srcadr,
282	endpt *inter,
283	struct req_pkt *pkt,
284	size_t structsize
285	)
286{
287	DPRINTF(4, ("request: preparing pkt\n"));
288
289	/*
290	 * Fill in the implementation, request and itemsize fields
291	 * since these won't change.
292	 */
293	rpkt.implementation = pkt->implementation;
294	rpkt.request = pkt->request;
295	rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
296
297	/*
298	 * Compute the static data needed to carry on.
299	 */
300	toaddr = srcadr;
301	frominter = inter;
302	seqno = 0;
303	nitems = 0;
304	itemsize = structsize;
305	databytes = 0;
306	usingexbuf = 0;
307
308	/*
309	 * return the beginning of the packet buffer.
310	 */
311	return &rpkt.u;
312}
313
314
315/*
316 * more_pkt - return a data pointer for a new item.
317 */
318static void *
319more_pkt(void)
320{
321	/*
322	 * If we were using the extra buffer, send the packet.
323	 */
324	if (usingexbuf) {
325		DPRINTF(3, ("request: sending pkt\n"));
326		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
327		rpkt.auth_seq = AUTH_SEQ(0, seqno);
328		rpkt.err_nitems = htons((u_short)nitems);
329		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
330			RESP_HEADER_SIZE + databytes);
331		numresppkts++;
332
333		/*
334		 * Copy data out of exbuf into the packet.
335		 */
336		memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
337		seqno++;
338		databytes = 0;
339		nitems = 0;
340		usingexbuf = 0;
341	}
342
343	databytes += itemsize;
344	nitems++;
345	if (databytes + itemsize <= RESP_DATA_SIZE) {
346		DPRINTF(4, ("request: giving him more data\n"));
347		/*
348		 * More room in packet.  Give him the
349		 * next address.
350		 */
351		return &rpkt.u.data[databytes];
352	} else {
353		/*
354		 * No room in packet.  Give him the extra
355		 * buffer unless this was the last in the sequence.
356		 */
357		DPRINTF(4, ("request: into extra buffer\n"));
358		if (seqno == MAXSEQ)
359			return NULL;
360		else {
361			usingexbuf = 1;
362			return exbuf;
363		}
364	}
365}
366
367
368/*
369 * flush_pkt - we're done, return remaining information.
370 */
371static void
372flush_pkt(void)
373{
374	DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
375	/*
376	 * Must send the last packet.  If nothing in here and nothing
377	 * has been sent, send an error saying no data to be found.
378	 */
379	if (seqno == 0 && nitems == 0)
380		req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
381			INFO_ERR_NODATA);
382	else {
383		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
384		rpkt.auth_seq = AUTH_SEQ(0, seqno);
385		rpkt.err_nitems = htons((u_short)nitems);
386		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
387			RESP_HEADER_SIZE+databytes);
388		numresppkts++;
389	}
390}
391
392
393
394/*
395 * Given a buffer, return the packet mode
396 */
397int
398get_packet_mode(struct recvbuf *rbufp)
399{
400	struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
401	return (INFO_MODE(inpkt->rm_vn_mode));
402}
403
404
405/*
406 * process_private - process private mode (7) packets
407 */
408void
409process_private(
410	struct recvbuf *rbufp,
411	int mod_okay
412	)
413{
414	static u_long quiet_until;
415	struct req_pkt *inpkt;
416	struct req_pkt_tail *tailinpkt;
417	sockaddr_u *srcadr;
418	endpt *inter;
419	const struct req_proc *proc;
420	int ec;
421	short temp_size;
422	l_fp ftmp;
423	double dtemp;
424	size_t recv_len;
425	size_t noslop_len;
426	size_t mac_len;
427
428	/*
429	 * Initialize pointers, for convenience
430	 */
431	recv_len = rbufp->recv_length;
432	inpkt = (struct req_pkt *)&rbufp->recv_pkt;
433	srcadr = &rbufp->recv_srcadr;
434	inter = rbufp->dstadr;
435
436	DPRINTF(3, ("process_private: impl %d req %d\n",
437		    inpkt->implementation, inpkt->request));
438
439	/*
440	 * Do some sanity checks on the packet.  Return a format
441	 * error if it fails.
442	 */
443	ec = 0;
444	if (   (++ec, ISRESPONSE(inpkt->rm_vn_mode))
445	    || (++ec, ISMORE(inpkt->rm_vn_mode))
446	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
447	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
448	    || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
449	    || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
450	    || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
451	    || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
452		) {
453		NLOG(NLOG_SYSEVENT)
454			if (current_time >= quiet_until) {
455				msyslog(LOG_ERR,
456					"process_private: drop test %d"
457					" failed, pkt from %s",
458					ec, stoa(srcadr));
459				quiet_until = current_time + 60;
460			}
461		return;
462	}
463
464	reqver = INFO_VERSION(inpkt->rm_vn_mode);
465
466	/*
467	 * Get the appropriate procedure list to search.
468	 */
469	if (inpkt->implementation == IMPL_UNIV)
470		proc = univ_codes;
471	else if ((inpkt->implementation == IMPL_XNTPD) ||
472		 (inpkt->implementation == IMPL_XNTPD_OLD))
473		proc = ntp_codes;
474	else {
475		req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
476		return;
477	}
478
479	/*
480	 * Search the list for the request codes.  If it isn't one
481	 * we know, return an error.
482	 */
483	while (proc->request_code != NO_REQUEST) {
484		if (proc->request_code == (short) inpkt->request)
485			break;
486		proc++;
487	}
488	if (proc->request_code == NO_REQUEST) {
489		req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
490		return;
491	}
492
493	DPRINTF(4, ("found request in tables\n"));
494
495	/*
496	 * If we need data, check to see if we have some.  If we
497	 * don't, check to see that there is none (picky, picky).
498	 */
499
500	/* This part is a bit tricky, we want to be sure that the size
501	 * returned is either the old or the new size.  We also can find
502	 * out if the client can accept both types of messages this way.
503	 *
504	 * Handle the exception of REQ_CONFIG. It can have two data sizes.
505	 */
506	temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
507	if ((temp_size != proc->sizeofitem &&
508	     temp_size != proc->v6_sizeofitem) &&
509	    !(inpkt->implementation == IMPL_XNTPD &&
510	      inpkt->request == REQ_CONFIG &&
511	      temp_size == sizeof(struct old_conf_peer))) {
512		DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
513			    temp_size, proc->sizeofitem, proc->v6_sizeofitem));
514		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
515		return;
516	}
517	if ((proc->sizeofitem != 0) &&
518	    ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
519	     (recv_len - REQ_LEN_HDR))) {
520		DPRINTF(3, ("process_private: not enough data\n"));
521		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
522		return;
523	}
524
525	switch (inpkt->implementation) {
526	case IMPL_XNTPD:
527		client_v6_capable = 1;
528		break;
529	case IMPL_XNTPD_OLD:
530		client_v6_capable = 0;
531		break;
532	default:
533		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
534		return;
535	}
536
537	/*
538	 * If we need to authenticate, do so.  Note that an
539	 * authenticatable packet must include a mac field, must
540	 * have used key info_auth_keyid and must have included
541	 * a time stamp in the appropriate field.  The time stamp
542	 * must be within INFO_TS_MAXSKEW of the receive
543	 * time stamp.
544	 */
545	if (proc->needs_auth && sys_authenticate) {
546
547		if (recv_len < (REQ_LEN_HDR +
548		    (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
549		    INFO_NITEMS(inpkt->err_nitems)) +
550		    REQ_TAIL_MIN)) {
551			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
552			return;
553		}
554
555		/*
556		 * For 16-octet digests, regardless of itemsize and
557		 * nitems, authenticated requests are a fixed size
558		 * with the timestamp, key ID, and digest located
559		 * at the end of the packet.  Because the key ID
560		 * determining the digest size precedes the digest,
561		 * for larger digests the fixed size request scheme
562		 * is abandoned and the timestamp, key ID, and digest
563		 * are located relative to the start of the packet,
564		 * with the digest size determined by the packet size.
565		 */
566		noslop_len = REQ_LEN_HDR
567			     + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
568			       INFO_NITEMS(inpkt->err_nitems)
569			     + sizeof(inpkt->tstamp);
570		/* 32-bit alignment */
571		noslop_len = (noslop_len + 3) & ~3;
572		if (recv_len > (noslop_len + MAX_MAC_LEN))
573			mac_len = 20;
574		else
575			mac_len = recv_len - noslop_len;
576
577		tailinpkt = (void *)((char *)inpkt + recv_len -
578			    (mac_len + sizeof(inpkt->tstamp)));
579
580		/*
581		 * If this guy is restricted from doing this, don't let
582		 * him.  If the wrong key was used, or packet doesn't
583		 * have mac, return.
584		 */
585		/* XXX: Use authistrustedip(), or equivalent. */
586		if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
587		    || ntohl(tailinpkt->keyid) != info_auth_keyid) {
588			DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
589				    INFO_IS_AUTH(inpkt->auth_seq),
590				    info_auth_keyid,
591				    ntohl(tailinpkt->keyid), (u_long)mac_len));
592#ifdef DEBUG
593			msyslog(LOG_DEBUG,
594				"process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
595				INFO_IS_AUTH(inpkt->auth_seq),
596				info_auth_keyid,
597				ntohl(tailinpkt->keyid), (u_long)mac_len);
598#endif
599			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
600			return;
601		}
602		if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
603			DPRINTF(5, ("bad pkt length %zu\n", recv_len));
604			msyslog(LOG_ERR,
605				"process_private: bad pkt length %zu",
606				recv_len);
607			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
608			return;
609		}
610		if (!mod_okay || !authhavekey(info_auth_keyid)) {
611			DPRINTF(5, ("failed auth mod_okay %d\n",
612				    mod_okay));
613#ifdef DEBUG
614			msyslog(LOG_DEBUG,
615				"process_private: failed auth mod_okay %d\n",
616				mod_okay);
617#endif
618			if (!mod_okay) {
619				sys_restricted++;
620			}
621			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
622			return;
623		}
624
625		/*
626		 * calculate absolute time difference between xmit time stamp
627		 * and receive time stamp.  If too large, too bad.
628		 */
629		NTOHL_FP(&tailinpkt->tstamp, &ftmp);
630		L_SUB(&ftmp, &rbufp->recv_time);
631		LFPTOD(&ftmp, dtemp);
632		if (fabs(dtemp) > INFO_TS_MAXSKEW) {
633			/*
634			 * He's a loser.  Tell him.
635			 */
636			DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
637				    dtemp, INFO_TS_MAXSKEW));
638			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
639			return;
640		}
641
642		/*
643		 * So far so good.  See if decryption works out okay.
644		 */
645		if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
646				 recv_len - mac_len, mac_len)) {
647			DPRINTF(5, ("authdecrypt failed\n"));
648			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
649			return;
650		}
651	}
652
653	DPRINTF(3, ("process_private: all okay, into handler\n"));
654	/*
655	 * Packet is okay.  Call the handler to send him data.
656	 */
657	(proc->handler)(srcadr, inter, inpkt);
658}
659
660
661/*
662 * list_peers - send a list of the peers
663 */
664static void
665list_peers(
666	sockaddr_u *srcadr,
667	endpt *inter,
668	struct req_pkt *inpkt
669	)
670{
671	struct info_peer_list *	ip;
672	const struct peer *	pp;
673
674	ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
675	    v6sizeof(struct info_peer_list));
676	for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
677		if (IS_IPV6(&pp->srcadr)) {
678			if (!client_v6_capable)
679				continue;
680			ip->addr6 = SOCK_ADDR6(&pp->srcadr);
681			ip->v6_flag = 1;
682		} else {
683			ip->addr = NSRCADR(&pp->srcadr);
684			if (client_v6_capable)
685				ip->v6_flag = 0;
686		}
687
688		ip->port = NSRCPORT(&pp->srcadr);
689		ip->hmode = pp->hmode;
690		ip->flags = 0;
691		if (pp->flags & FLAG_CONFIG)
692			ip->flags |= INFO_FLAG_CONFIG;
693		if (pp == sys_peer)
694			ip->flags |= INFO_FLAG_SYSPEER;
695		if (pp->status == CTL_PST_SEL_SYNCCAND)
696			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
697		if (pp->status >= CTL_PST_SEL_SYSPEER)
698			ip->flags |= INFO_FLAG_SHORTLIST;
699		ip = (struct info_peer_list *)more_pkt();
700	}	/* for pp */
701
702	flush_pkt();
703}
704
705
706/*
707 * list_peers_sum - return extended peer list
708 */
709static void
710list_peers_sum(
711	sockaddr_u *srcadr,
712	endpt *inter,
713	struct req_pkt *inpkt
714	)
715{
716	struct info_peer_summary *	ips;
717	const struct peer *		pp;
718	l_fp 				ltmp;
719
720	DPRINTF(3, ("wants peer list summary\n"));
721
722	ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
723	    v6sizeof(struct info_peer_summary));
724	for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
725		DPRINTF(4, ("sum: got one\n"));
726		/*
727		 * Be careful here not to return v6 peers when we
728		 * want only v4.
729		 */
730		if (IS_IPV6(&pp->srcadr)) {
731			if (!client_v6_capable)
732				continue;
733			ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
734			ips->v6_flag = 1;
735			if (pp->dstadr)
736				ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
737			else
738				ZERO(ips->dstadr6);
739		} else {
740			ips->srcadr = NSRCADR(&pp->srcadr);
741			if (client_v6_capable)
742				ips->v6_flag = 0;
743
744			if (pp->dstadr) {
745				if (!pp->processed)
746					ips->dstadr = NSRCADR(&pp->dstadr->sin);
747				else {
748					if (MDF_BCAST == pp->cast_flags)
749						ips->dstadr = NSRCADR(&pp->dstadr->bcast);
750					else if (pp->cast_flags) {
751						ips->dstadr = NSRCADR(&pp->dstadr->sin);
752						if (!ips->dstadr)
753							ips->dstadr = NSRCADR(&pp->dstadr->bcast);
754					}
755				}
756			} else {
757				ips->dstadr = 0;
758			}
759		}
760
761		ips->srcport = NSRCPORT(&pp->srcadr);
762		ips->stratum = pp->stratum;
763		ips->hpoll = pp->hpoll;
764		ips->ppoll = pp->ppoll;
765		ips->reach = pp->reach;
766		ips->flags = 0;
767		if (pp == sys_peer)
768			ips->flags |= INFO_FLAG_SYSPEER;
769		if (pp->flags & FLAG_CONFIG)
770			ips->flags |= INFO_FLAG_CONFIG;
771		if (pp->flags & FLAG_REFCLOCK)
772			ips->flags |= INFO_FLAG_REFCLOCK;
773		if (pp->flags & FLAG_PREFER)
774			ips->flags |= INFO_FLAG_PREFER;
775		if (pp->flags & FLAG_BURST)
776			ips->flags |= INFO_FLAG_BURST;
777		if (pp->status == CTL_PST_SEL_SYNCCAND)
778			ips->flags |= INFO_FLAG_SEL_CANDIDATE;
779		if (pp->status >= CTL_PST_SEL_SYSPEER)
780			ips->flags |= INFO_FLAG_SHORTLIST;
781		ips->hmode = pp->hmode;
782		ips->delay = HTONS_FP(DTOFP(pp->delay));
783		DTOLFP(pp->offset, &ltmp);
784		HTONL_FP(&ltmp, &ips->offset);
785		ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
786
787		ips = (struct info_peer_summary *)more_pkt();
788	}	/* for pp */
789
790	flush_pkt();
791}
792
793
794/*
795 * peer_info - send information for one or more peers
796 */
797static void
798peer_info (
799	sockaddr_u *srcadr,
800	endpt *inter,
801	struct req_pkt *inpkt
802	)
803{
804	u_short			items;
805	size_t			item_sz;
806	char *			datap;
807	struct info_peer_list	ipl;
808	struct peer *		pp;
809	struct info_peer *	ip;
810	int			i;
811	int			j;
812	sockaddr_u		addr;
813	l_fp			ltmp;
814
815	items = INFO_NITEMS(inpkt->err_nitems);
816	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
817	datap = inpkt->u.data;
818	if (item_sz != sizeof(ipl)) {
819		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
820		return;
821	}
822	ip = prepare_pkt(srcadr, inter, inpkt,
823			 v6sizeof(struct info_peer));
824	while (items-- > 0 && ip != NULL) {
825		ZERO(ipl);
826		memcpy(&ipl, datap, item_sz);
827		ZERO_SOCK(&addr);
828		NSRCPORT(&addr) = ipl.port;
829		if (client_v6_capable && ipl.v6_flag) {
830			AF(&addr) = AF_INET6;
831			SOCK_ADDR6(&addr) = ipl.addr6;
832		} else {
833			AF(&addr) = AF_INET;
834			NSRCADR(&addr) = ipl.addr;
835		}
836#ifdef ISC_PLATFORM_HAVESALEN
837		addr.sa.sa_len = SOCKLEN(&addr);
838#endif
839		datap += item_sz;
840
841		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
842		if (NULL == pp)
843			continue;
844		if (IS_IPV6(&pp->srcadr)) {
845			if (pp->dstadr)
846				ip->dstadr6 =
847				    (MDF_BCAST == pp->cast_flags)
848					? SOCK_ADDR6(&pp->dstadr->bcast)
849					: SOCK_ADDR6(&pp->dstadr->sin);
850			else
851				ZERO(ip->dstadr6);
852
853			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
854			ip->v6_flag = 1;
855		} else {
856			if (pp->dstadr) {
857				if (!pp->processed)
858					ip->dstadr = NSRCADR(&pp->dstadr->sin);
859				else {
860					if (MDF_BCAST == pp->cast_flags)
861						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
862					else if (pp->cast_flags) {
863						ip->dstadr = NSRCADR(&pp->dstadr->sin);
864						if (!ip->dstadr)
865							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
866					}
867				}
868			} else
869				ip->dstadr = 0;
870
871			ip->srcadr = NSRCADR(&pp->srcadr);
872			if (client_v6_capable)
873				ip->v6_flag = 0;
874		}
875		ip->srcport = NSRCPORT(&pp->srcadr);
876		ip->flags = 0;
877		if (pp == sys_peer)
878			ip->flags |= INFO_FLAG_SYSPEER;
879		if (pp->flags & FLAG_CONFIG)
880			ip->flags |= INFO_FLAG_CONFIG;
881		if (pp->flags & FLAG_REFCLOCK)
882			ip->flags |= INFO_FLAG_REFCLOCK;
883		if (pp->flags & FLAG_PREFER)
884			ip->flags |= INFO_FLAG_PREFER;
885		if (pp->flags & FLAG_BURST)
886			ip->flags |= INFO_FLAG_BURST;
887		if (pp->status == CTL_PST_SEL_SYNCCAND)
888			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
889		if (pp->status >= CTL_PST_SEL_SYSPEER)
890			ip->flags |= INFO_FLAG_SHORTLIST;
891		ip->leap = pp->leap;
892		ip->hmode = pp->hmode;
893		ip->pmode = pp->pmode;
894		ip->keyid = pp->keyid;
895		ip->stratum = pp->stratum;
896		ip->ppoll = pp->ppoll;
897		ip->hpoll = pp->hpoll;
898		ip->precision = pp->precision;
899		ip->version = pp->version;
900		ip->reach = pp->reach;
901		ip->unreach = (u_char)pp->unreach;
902		ip->flash = (u_char)pp->flash;
903		ip->flash2 = (u_short)pp->flash;
904		ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
905		ip->ttl = (u_char)pp->ttl;
906		ip->associd = htons(pp->associd);
907		ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
908		ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
909		ip->refid = pp->refid;
910		HTONL_FP(&pp->reftime, &ip->reftime);
911		HTONL_FP(&pp->aorg, &ip->org);
912		HTONL_FP(&pp->rec, &ip->rec);
913		HTONL_FP(&pp->xmt, &ip->xmt);
914		j = pp->filter_nextpt - 1;
915		for (i = 0; i < NTP_SHIFT; i++, j--) {
916			if (j < 0)
917				j = NTP_SHIFT-1;
918			ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
919			DTOLFP(pp->filter_offset[j], &ltmp);
920			HTONL_FP(&ltmp, &ip->filtoffset[i]);
921			ip->order[i] = (u_char)((pp->filter_nextpt +
922						 NTP_SHIFT - 1) -
923						pp->filter_order[i]);
924			if (ip->order[i] >= NTP_SHIFT)
925				ip->order[i] -= NTP_SHIFT;
926		}
927		DTOLFP(pp->offset, &ltmp);
928		HTONL_FP(&ltmp, &ip->offset);
929		ip->delay = HTONS_FP(DTOFP(pp->delay));
930		ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
931		ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
932		ip = more_pkt();
933	}
934	flush_pkt();
935}
936
937
938/*
939 * peer_stats - send statistics for one or more peers
940 */
941static void
942peer_stats (
943	sockaddr_u *srcadr,
944	endpt *inter,
945	struct req_pkt *inpkt
946	)
947{
948	u_short			items;
949	size_t			item_sz;
950	char *			datap;
951	struct info_peer_list	ipl;
952	struct peer *		pp;
953	struct info_peer_stats *ip;
954	sockaddr_u addr;
955
956	DPRINTF(1, ("peer_stats: called\n"));
957	items = INFO_NITEMS(inpkt->err_nitems);
958	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
959	datap = inpkt->u.data;
960	if (item_sz > sizeof(ipl)) {
961		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
962		return;
963	}
964	ip = prepare_pkt(srcadr, inter, inpkt,
965			 v6sizeof(struct info_peer_stats));
966	while (items-- > 0 && ip != NULL) {
967		ZERO(ipl);
968		memcpy(&ipl, datap, item_sz);
969		ZERO(addr);
970		NSRCPORT(&addr) = ipl.port;
971		if (client_v6_capable && ipl.v6_flag) {
972			AF(&addr) = AF_INET6;
973			SOCK_ADDR6(&addr) = ipl.addr6;
974		} else {
975			AF(&addr) = AF_INET;
976			NSRCADR(&addr) = ipl.addr;
977		}
978#ifdef ISC_PLATFORM_HAVESALEN
979		addr.sa.sa_len = SOCKLEN(&addr);
980#endif
981		DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
982			    stoa(&addr), ipl.port, NSRCPORT(&addr)));
983
984		datap += item_sz;
985
986		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
987		if (NULL == pp)
988			continue;
989
990		DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
991
992		if (IS_IPV4(&pp->srcadr)) {
993			if (pp->dstadr) {
994				if (!pp->processed)
995					ip->dstadr = NSRCADR(&pp->dstadr->sin);
996				else {
997					if (MDF_BCAST == pp->cast_flags)
998						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
999					else if (pp->cast_flags) {
1000						ip->dstadr = NSRCADR(&pp->dstadr->sin);
1001						if (!ip->dstadr)
1002							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1003					}
1004				}
1005			} else
1006				ip->dstadr = 0;
1007
1008			ip->srcadr = NSRCADR(&pp->srcadr);
1009			if (client_v6_capable)
1010				ip->v6_flag = 0;
1011		} else {
1012			if (pp->dstadr)
1013				ip->dstadr6 =
1014				    (MDF_BCAST == pp->cast_flags)
1015					? SOCK_ADDR6(&pp->dstadr->bcast)
1016					: SOCK_ADDR6(&pp->dstadr->sin);
1017			else
1018				ZERO(ip->dstadr6);
1019
1020			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1021			ip->v6_flag = 1;
1022		}
1023		ip->srcport = NSRCPORT(&pp->srcadr);
1024		ip->flags = 0;
1025		if (pp == sys_peer)
1026		    ip->flags |= INFO_FLAG_SYSPEER;
1027		if (pp->flags & FLAG_CONFIG)
1028		    ip->flags |= INFO_FLAG_CONFIG;
1029		if (pp->flags & FLAG_REFCLOCK)
1030		    ip->flags |= INFO_FLAG_REFCLOCK;
1031		if (pp->flags & FLAG_PREFER)
1032		    ip->flags |= INFO_FLAG_PREFER;
1033		if (pp->flags & FLAG_BURST)
1034		    ip->flags |= INFO_FLAG_BURST;
1035		if (pp->flags & FLAG_IBURST)
1036		    ip->flags |= INFO_FLAG_IBURST;
1037		if (pp->status == CTL_PST_SEL_SYNCCAND)
1038		    ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1039		if (pp->status >= CTL_PST_SEL_SYSPEER)
1040		    ip->flags |= INFO_FLAG_SHORTLIST;
1041		ip->flags = htons(ip->flags);
1042		ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1043		ip->timetosend = htonl(pp->nextdate - current_time);
1044		ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1045		ip->sent = htonl((u_int32)(pp->sent));
1046		ip->processed = htonl((u_int32)(pp->processed));
1047		ip->badauth = htonl((u_int32)(pp->badauth));
1048		ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1049		ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1050		ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1051		ip->selbroken = htonl((u_int32)(pp->selbroken));
1052		ip->candidate = pp->status;
1053		ip = (struct info_peer_stats *)more_pkt();
1054	}
1055	flush_pkt();
1056}
1057
1058
1059/*
1060 * sys_info - return system info
1061 */
1062static void
1063sys_info(
1064	sockaddr_u *srcadr,
1065	endpt *inter,
1066	struct req_pkt *inpkt
1067	)
1068{
1069	register struct info_sys *is;
1070
1071	is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1072	    v6sizeof(struct info_sys));
1073
1074	if (sys_peer) {
1075		if (IS_IPV4(&sys_peer->srcadr)) {
1076			is->peer = NSRCADR(&sys_peer->srcadr);
1077			if (client_v6_capable)
1078				is->v6_flag = 0;
1079		} else if (client_v6_capable) {
1080			is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1081			is->v6_flag = 1;
1082		}
1083		is->peer_mode = sys_peer->hmode;
1084	} else {
1085		is->peer = 0;
1086		if (client_v6_capable) {
1087			is->v6_flag = 0;
1088		}
1089		is->peer_mode = 0;
1090	}
1091
1092	is->leap = sys_leap;
1093	is->stratum = sys_stratum;
1094	is->precision = sys_precision;
1095	is->rootdelay = htonl(DTOFP(sys_rootdelay));
1096	is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1097	is->frequency = htonl(DTOFP(sys_jitter));
1098	is->stability = htonl(DTOUFP(clock_stability * 1e6));
1099	is->refid = sys_refid;
1100	HTONL_FP(&sys_reftime, &is->reftime);
1101
1102	is->poll = sys_poll;
1103
1104	is->flags = 0;
1105	if (sys_authenticate)
1106		is->flags |= INFO_FLAG_AUTHENTICATE;
1107	if (sys_bclient)
1108		is->flags |= INFO_FLAG_BCLIENT;
1109#ifdef REFCLOCK
1110	if (cal_enable)
1111		is->flags |= INFO_FLAG_CAL;
1112#endif /* REFCLOCK */
1113	if (kern_enable)
1114		is->flags |= INFO_FLAG_KERNEL;
1115	if (mon_enabled != MON_OFF)
1116		is->flags |= INFO_FLAG_MONITOR;
1117	if (ntp_enable)
1118		is->flags |= INFO_FLAG_NTP;
1119	if (hardpps_enable)
1120		is->flags |= INFO_FLAG_PPS_SYNC;
1121	if (stats_control)
1122		is->flags |= INFO_FLAG_FILEGEN;
1123	is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1124	HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1125	(void) more_pkt();
1126	flush_pkt();
1127}
1128
1129
1130/*
1131 * sys_stats - return system statistics
1132 */
1133static void
1134sys_stats(
1135	sockaddr_u *srcadr,
1136	endpt *inter,
1137	struct req_pkt *inpkt
1138	)
1139{
1140	register struct info_sys_stats *ss;
1141
1142	ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1143		sizeof(struct info_sys_stats));
1144	ss->timeup = htonl((u_int32)current_time);
1145	ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1146	ss->denied = htonl((u_int32)sys_restricted);
1147	ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1148	ss->newversionpkt = htonl((u_int32)sys_newversion);
1149	ss->unknownversion = htonl((u_int32)sys_declined);
1150	ss->badlength = htonl((u_int32)sys_badlength);
1151	ss->processed = htonl((u_int32)sys_processed);
1152	ss->badauth = htonl((u_int32)sys_badauth);
1153	ss->limitrejected = htonl((u_int32)sys_limitrejected);
1154	ss->received = htonl((u_int32)sys_received);
1155	ss->lamport = htonl((u_int32)sys_lamport);
1156	ss->tsrounding = htonl((u_int32)sys_tsrounding);
1157	(void) more_pkt();
1158	flush_pkt();
1159}
1160
1161
1162/*
1163 * mem_stats - return memory statistics
1164 */
1165static void
1166mem_stats(
1167	sockaddr_u *srcadr,
1168	endpt *inter,
1169	struct req_pkt *inpkt
1170	)
1171{
1172	register struct info_mem_stats *ms;
1173	register int i;
1174
1175	ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1176						  sizeof(struct info_mem_stats));
1177
1178	ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1179	ms->totalpeermem = htons((u_short)total_peer_structs);
1180	ms->freepeermem = htons((u_short)peer_free_count);
1181	ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1182	ms->allocations = htonl((u_int32)peer_allocations);
1183	ms->demobilizations = htonl((u_int32)peer_demobilizations);
1184
1185	for (i = 0; i < NTP_HASH_SIZE; i++)
1186		ms->hashcount[i] = (u_char)
1187		    min((u_int)peer_hash_count[i], UCHAR_MAX);
1188
1189	(void) more_pkt();
1190	flush_pkt();
1191}
1192
1193
1194/*
1195 * io_stats - return io statistics
1196 */
1197static void
1198io_stats(
1199	sockaddr_u *srcadr,
1200	endpt *inter,
1201	struct req_pkt *inpkt
1202	)
1203{
1204	struct info_io_stats *io;
1205
1206	io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1207						 sizeof(struct info_io_stats));
1208
1209	io->timereset = htonl((u_int32)(current_time - io_timereset));
1210	io->totalrecvbufs = htons((u_short) total_recvbuffs());
1211	io->freerecvbufs = htons((u_short) free_recvbuffs());
1212	io->fullrecvbufs = htons((u_short) full_recvbuffs());
1213	io->lowwater = htons((u_short) lowater_additions());
1214	io->dropped = htonl((u_int32)packets_dropped);
1215	io->ignored = htonl((u_int32)packets_ignored);
1216	io->received = htonl((u_int32)packets_received);
1217	io->sent = htonl((u_int32)packets_sent);
1218	io->notsent = htonl((u_int32)packets_notsent);
1219	io->interrupts = htonl((u_int32)handler_calls);
1220	io->int_received = htonl((u_int32)handler_pkts);
1221
1222	(void) more_pkt();
1223	flush_pkt();
1224}
1225
1226
1227/*
1228 * timer_stats - return timer statistics
1229 */
1230static void
1231timer_stats(
1232	sockaddr_u *		srcadr,
1233	endpt *			inter,
1234	struct req_pkt *	inpkt
1235	)
1236{
1237	struct info_timer_stats *	ts;
1238	u_long				sincereset;
1239
1240	ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1241						    inpkt, sizeof(*ts));
1242
1243	sincereset = current_time - timer_timereset;
1244	ts->timereset = htonl((u_int32)sincereset);
1245	ts->alarms = ts->timereset;
1246	ts->overflows = htonl((u_int32)alarm_overflow);
1247	ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1248
1249	(void) more_pkt();
1250	flush_pkt();
1251}
1252
1253
1254/*
1255 * loop_info - return the current state of the loop filter
1256 */
1257static void
1258loop_info(
1259	sockaddr_u *srcadr,
1260	endpt *inter,
1261	struct req_pkt *inpkt
1262	)
1263{
1264	struct info_loop *li;
1265	l_fp ltmp;
1266
1267	li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1268	    sizeof(struct info_loop));
1269
1270	DTOLFP(last_offset, &ltmp);
1271	HTONL_FP(&ltmp, &li->last_offset);
1272	DTOLFP(drift_comp * 1e6, &ltmp);
1273	HTONL_FP(&ltmp, &li->drift_comp);
1274	li->compliance = htonl((u_int32)(tc_counter));
1275	li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1276
1277	(void) more_pkt();
1278	flush_pkt();
1279}
1280
1281
1282/*
1283 * do_conf - add a peer to the configuration list
1284 */
1285static void
1286do_conf(
1287	sockaddr_u *srcadr,
1288	endpt *inter,
1289	struct req_pkt *inpkt
1290	)
1291{
1292	u_short			items;
1293	size_t			item_sz;
1294	u_int			fl;
1295	char *			datap;
1296	struct conf_peer	temp_cp;
1297	sockaddr_u		peeraddr;
1298
1299	/*
1300	 * Do a check of everything to see that it looks
1301	 * okay.  If not, complain about it.  Note we are
1302	 * very picky here.
1303	 */
1304	items = INFO_NITEMS(inpkt->err_nitems);
1305	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1306	datap = inpkt->u.data;
1307	if (item_sz > sizeof(temp_cp)) {
1308		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1309		return;
1310	}
1311
1312	while (items-- > 0) {
1313		ZERO(temp_cp);
1314		memcpy(&temp_cp, datap, item_sz);
1315		ZERO_SOCK(&peeraddr);
1316
1317		fl = 0;
1318		if (temp_cp.flags & CONF_FLAG_PREFER)
1319			fl |= FLAG_PREFER;
1320		if (temp_cp.flags & CONF_FLAG_BURST)
1321			fl |= FLAG_BURST;
1322		if (temp_cp.flags & CONF_FLAG_IBURST)
1323			fl |= FLAG_IBURST;
1324#ifdef AUTOKEY
1325		if (temp_cp.flags & CONF_FLAG_SKEY)
1326			fl |= FLAG_SKEY;
1327#endif	/* AUTOKEY */
1328		if (client_v6_capable && temp_cp.v6_flag) {
1329			AF(&peeraddr) = AF_INET6;
1330			SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1331		} else {
1332			AF(&peeraddr) = AF_INET;
1333			NSRCADR(&peeraddr) = temp_cp.peeraddr;
1334			/*
1335			 * Make sure the address is valid
1336			 */
1337			if (!ISREFCLOCKADR(&peeraddr) &&
1338			    ISBADADR(&peeraddr)) {
1339				req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1340				return;
1341			}
1342
1343		}
1344		NSRCPORT(&peeraddr) = htons(NTP_PORT);
1345#ifdef ISC_PLATFORM_HAVESALEN
1346		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1347#endif
1348
1349		/* check mode value: 0 <= hmode <= 6
1350		 *
1351		 * There's no good global define for that limit, and
1352		 * using a magic define is as good (or bad, actually) as
1353		 * a magic number. So we use the highest possible peer
1354		 * mode, and that is MODE_BCLIENT.
1355		 *
1356		 * [Bug 3009] claims that a problem occurs for hmode > 7,
1357		 * but the code in ntp_peer.c indicates trouble for any
1358		 * hmode > 6 ( --> MODE_BCLIENT).
1359		 */
1360		if (temp_cp.hmode > MODE_BCLIENT) {
1361			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1362			return;
1363		}
1364
1365		/* Any more checks on the values? Unchecked at this
1366		 * point:
1367		 *   - version
1368		 *   - ttl
1369		 *   - keyid
1370		 *
1371		 *   - minpoll/maxpoll, but they are treated properly
1372		 *     for all cases internally. Checking not necessary.
1373		 *
1374		 * Note that we ignore any previously-specified ippeerlimit.
1375		 * If we're told to create the peer, we create the peer.
1376		 */
1377
1378		/* finally create the peer */
1379		if (peer_config(&peeraddr, NULL, NULL, -1,
1380		    temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1381		    temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1382		    NULL) == 0)
1383		{
1384			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1385			return;
1386		}
1387
1388		datap += item_sz;
1389	}
1390	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1391}
1392
1393
1394/*
1395 * do_unconf - remove a peer from the configuration list
1396 */
1397static void
1398do_unconf(
1399	sockaddr_u *	srcadr,
1400	endpt *		inter,
1401	struct req_pkt *inpkt
1402	)
1403{
1404	u_short			items;
1405	size_t			item_sz;
1406	char *			datap;
1407	struct conf_unpeer	temp_cp;
1408	struct peer *		p;
1409	sockaddr_u		peeraddr;
1410	int			loops;
1411
1412	/*
1413	 * This is a bit unstructured, but I like to be careful.
1414	 * We check to see that every peer exists and is actually
1415	 * configured.  If so, we remove them.  If not, we return
1416	 * an error.
1417	 *
1418	 * [Bug 3011] Even if we checked all peers given in the request
1419	 * in a dry run, there's still a chance that the caller played
1420	 * unfair and gave the same peer multiple times. So we still
1421	 * have to be prepared for nasty surprises in the second run ;)
1422	 */
1423
1424	/* basic consistency checks */
1425	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1426	if (item_sz > sizeof(temp_cp)) {
1427		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1428		return;
1429	}
1430
1431	/* now do two runs: first a dry run, then a busy one */
1432	for (loops = 0; loops != 2; ++loops) {
1433		items = INFO_NITEMS(inpkt->err_nitems);
1434		datap = inpkt->u.data;
1435		while (items-- > 0) {
1436			/* copy from request to local */
1437			ZERO(temp_cp);
1438			memcpy(&temp_cp, datap, item_sz);
1439			/* get address structure */
1440			ZERO_SOCK(&peeraddr);
1441			if (client_v6_capable && temp_cp.v6_flag) {
1442				AF(&peeraddr) = AF_INET6;
1443				SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1444			} else {
1445				AF(&peeraddr) = AF_INET;
1446				NSRCADR(&peeraddr) = temp_cp.peeraddr;
1447			}
1448			SET_PORT(&peeraddr, NTP_PORT);
1449#ifdef ISC_PLATFORM_HAVESALEN
1450			peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1451#endif
1452			DPRINTF(1, ("searching for %s\n",
1453				    stoa(&peeraddr)));
1454
1455			/* search for matching configred(!) peer */
1456			p = NULL;
1457			do {
1458				p = findexistingpeer(
1459					&peeraddr, NULL, p, -1, 0, NULL);
1460			} while (p && !(FLAG_CONFIG & p->flags));
1461
1462			if (!loops && !p) {
1463				/* Item not found in dry run -- bail! */
1464				req_ack(srcadr, inter, inpkt,
1465					INFO_ERR_NODATA);
1466				return;
1467			} else if (loops && p) {
1468				/* Item found in busy run -- remove! */
1469				peer_clear(p, "GONE");
1470				unpeer(p);
1471			}
1472			datap += item_sz;
1473		}
1474	}
1475
1476	/* report success */
1477	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1478}
1479
1480
1481/*
1482 * set_sys_flag - set system flags
1483 */
1484static void
1485set_sys_flag(
1486	sockaddr_u *srcadr,
1487	endpt *inter,
1488	struct req_pkt *inpkt
1489	)
1490{
1491	setclr_flags(srcadr, inter, inpkt, 1);
1492}
1493
1494
1495/*
1496 * clr_sys_flag - clear system flags
1497 */
1498static void
1499clr_sys_flag(
1500	sockaddr_u *srcadr,
1501	endpt *inter,
1502	struct req_pkt *inpkt
1503	)
1504{
1505	setclr_flags(srcadr, inter, inpkt, 0);
1506}
1507
1508
1509/*
1510 * setclr_flags - do the grunge work of flag setting/clearing
1511 */
1512static void
1513setclr_flags(
1514	sockaddr_u *srcadr,
1515	endpt *inter,
1516	struct req_pkt *inpkt,
1517	u_long set
1518	)
1519{
1520	struct conf_sys_flags *sf;
1521	u_int32 flags;
1522
1523	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1524		msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1525		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1526		return;
1527	}
1528
1529	sf = (struct conf_sys_flags *)&inpkt->u;
1530	flags = ntohl(sf->flags);
1531
1532	if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1533		      SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1534		      SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1535		msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1536			flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1537				  SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1538				  SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1539				  SYS_FLAG_AUTH | SYS_FLAG_CAL));
1540		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1541		return;
1542	}
1543
1544	if (flags & SYS_FLAG_BCLIENT)
1545		proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1546	if (flags & SYS_FLAG_PPS)
1547		proto_config(PROTO_PPS, set, 0., NULL);
1548	if (flags & SYS_FLAG_NTP)
1549		proto_config(PROTO_NTP, set, 0., NULL);
1550	if (flags & SYS_FLAG_KERNEL)
1551		proto_config(PROTO_KERNEL, set, 0., NULL);
1552	if (flags & SYS_FLAG_MONITOR)
1553		proto_config(PROTO_MONITOR, set, 0., NULL);
1554	if (flags & SYS_FLAG_FILEGEN)
1555		proto_config(PROTO_FILEGEN, set, 0., NULL);
1556	if (flags & SYS_FLAG_AUTH)
1557		proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1558	if (flags & SYS_FLAG_CAL)
1559		proto_config(PROTO_CAL, set, 0., NULL);
1560	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1561}
1562
1563/* There have been some issues with the restrict list processing,
1564 * ranging from problems with deep recursion (resulting in stack
1565 * overflows) and overfull reply buffers.
1566 *
1567 * To avoid this trouble the list reversal is done iteratively using a
1568 * scratch pad.
1569 */
1570typedef struct RestrictStack RestrictStackT;
1571struct RestrictStack {
1572	RestrictStackT   *link;
1573	size_t            fcnt;
1574	const restrict_u *pres[63];
1575};
1576
1577static size_t
1578getStackSheetSize(
1579	RestrictStackT *sp
1580	)
1581{
1582	if (sp)
1583		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1584	return 0u;
1585}
1586
1587static int/*BOOL*/
1588pushRestriction(
1589	RestrictStackT  **spp,
1590	const restrict_u *ptr
1591	)
1592{
1593	RestrictStackT *sp;
1594
1595	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1596		/* need another sheet in the scratch pad */
1597		sp = emalloc(sizeof(*sp));
1598		sp->link = *spp;
1599		sp->fcnt = getStackSheetSize(sp);
1600		*spp = sp;
1601	}
1602	sp->pres[--sp->fcnt] = ptr;
1603	return TRUE;
1604}
1605
1606static int/*BOOL*/
1607popRestriction(
1608	RestrictStackT   **spp,
1609	const restrict_u **opp
1610	)
1611{
1612	RestrictStackT *sp;
1613
1614	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
1615		return FALSE;
1616
1617	*opp = sp->pres[sp->fcnt++];
1618	if (sp->fcnt >= getStackSheetSize(sp)) {
1619		/* discard sheet from scratch pad */
1620		*spp = sp->link;
1621		free(sp);
1622	}
1623	return TRUE;
1624}
1625
1626static void
1627flushRestrictionStack(
1628	RestrictStackT **spp
1629	)
1630{
1631	RestrictStackT *sp;
1632
1633	while (NULL != (sp = *spp)) {
1634		*spp = sp->link;
1635		free(sp);
1636	}
1637}
1638
1639/*
1640 * list_restrict4 - iterative helper for list_restrict dumps IPv4
1641 *		    restriction list in reverse order.
1642 */
1643static void
1644list_restrict4(
1645	const restrict_u *	res,
1646	struct info_restrict **	ppir
1647	)
1648{
1649	RestrictStackT *	rpad;
1650	struct info_restrict *	pir;
1651
1652	pir = *ppir;
1653	for (rpad = NULL; res; res = res->link)
1654		if (!pushRestriction(&rpad, res))
1655			break;
1656
1657	while (pir && popRestriction(&rpad, &res)) {
1658		pir->addr = htonl(res->u.v4.addr);
1659		if (client_v6_capable)
1660			pir->v6_flag = 0;
1661		pir->mask = htonl(res->u.v4.mask);
1662		pir->count = htonl(res->count);
1663		pir->rflags = htons(res->rflags);
1664		pir->mflags = htons(res->mflags);
1665		pir = (struct info_restrict *)more_pkt();
1666	}
1667	flushRestrictionStack(&rpad);
1668	*ppir = pir;
1669}
1670
1671/*
1672 * list_restrict6 - iterative helper for list_restrict dumps IPv6
1673 *		    restriction list in reverse order.
1674 */
1675static void
1676list_restrict6(
1677	const restrict_u *	res,
1678	struct info_restrict **	ppir
1679	)
1680{
1681	RestrictStackT *	rpad;
1682	struct info_restrict *	pir;
1683
1684	pir = *ppir;
1685	for (rpad = NULL; res; res = res->link)
1686		if (!pushRestriction(&rpad, res))
1687			break;
1688
1689	while (pir && popRestriction(&rpad, &res)) {
1690		pir->addr6 = res->u.v6.addr;
1691		pir->mask6 = res->u.v6.mask;
1692		pir->v6_flag = 1;
1693		pir->count = htonl(res->count);
1694		pir->rflags = htons(res->rflags);
1695		pir->mflags = htons(res->mflags);
1696		pir = (struct info_restrict *)more_pkt();
1697	}
1698	flushRestrictionStack(&rpad);
1699	*ppir = pir;
1700}
1701
1702
1703/*
1704 * list_restrict - return the restrict list
1705 */
1706static void
1707list_restrict(
1708	sockaddr_u *srcadr,
1709	endpt *inter,
1710	struct req_pkt *inpkt
1711	)
1712{
1713	struct info_restrict *ir;
1714
1715	DPRINTF(3, ("wants restrict list summary\n"));
1716
1717	ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1718	    v6sizeof(struct info_restrict));
1719
1720	/*
1721	 * The restriction lists are kept sorted in the reverse order
1722	 * than they were originally.  To preserve the output semantics,
1723	 * dump each list in reverse order. The workers take care of that.
1724	 */
1725	list_restrict4(restrictlist4, &ir);
1726	if (client_v6_capable)
1727		list_restrict6(restrictlist6, &ir);
1728	flush_pkt();
1729}
1730
1731
1732/*
1733 * do_resaddflags - add flags to a restrict entry (or create one)
1734 */
1735static void
1736do_resaddflags(
1737	sockaddr_u *srcadr,
1738	endpt *inter,
1739	struct req_pkt *inpkt
1740	)
1741{
1742	do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1743}
1744
1745
1746
1747/*
1748 * do_ressubflags - remove flags from a restrict entry
1749 */
1750static void
1751do_ressubflags(
1752	sockaddr_u *srcadr,
1753	endpt *inter,
1754	struct req_pkt *inpkt
1755	)
1756{
1757	do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1758}
1759
1760
1761/*
1762 * do_unrestrict - remove a restrict entry from the list
1763 */
1764static void
1765do_unrestrict(
1766	sockaddr_u *srcadr,
1767	endpt *inter,
1768	struct req_pkt *inpkt
1769	)
1770{
1771	do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1772}
1773
1774
1775/*
1776 * do_restrict - do the dirty stuff of dealing with restrictions
1777 */
1778static void
1779do_restrict(
1780	sockaddr_u *srcadr,
1781	endpt *inter,
1782	struct req_pkt *inpkt,
1783	restrict_op op
1784	)
1785{
1786	char *			datap;
1787	struct conf_restrict	cr;
1788	u_short			items;
1789	size_t			item_sz;
1790	sockaddr_u		matchaddr;
1791	sockaddr_u		matchmask;
1792	int			bad;
1793
1794	switch(op) {
1795	    case RESTRICT_FLAGS:
1796	    case RESTRICT_UNFLAG:
1797	    case RESTRICT_REMOVE:
1798	    case RESTRICT_REMOVEIF:
1799	    	break;
1800
1801	    default:
1802		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1803		return;
1804	}
1805
1806	/*
1807	 * Do a check of the flags to make sure that only
1808	 * the NTPPORT flag is set, if any.  If not, complain
1809	 * about it.  Note we are very picky here.
1810	 */
1811	items = INFO_NITEMS(inpkt->err_nitems);
1812	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1813	datap = inpkt->u.data;
1814	if (item_sz > sizeof(cr)) {
1815		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1816		return;
1817	}
1818
1819	bad = 0;
1820	while (items-- > 0 && !bad) {
1821		memcpy(&cr, datap, item_sz);
1822		cr.flags = ntohs(cr.flags);	/* XXX */
1823		cr.mflags = ntohs(cr.mflags);
1824		if (~RESM_NTPONLY & cr.mflags)
1825			bad |= 1;
1826		if (~RES_ALLFLAGS & cr.flags)
1827			bad |= 2;
1828		if (INADDR_ANY != cr.mask) {
1829			if (client_v6_capable && cr.v6_flag) {
1830				if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1831					bad |= 4;
1832			} else {
1833				if (INADDR_ANY == cr.addr)
1834					bad |= 8;
1835			}
1836		}
1837		datap += item_sz;
1838	}
1839
1840	if (bad) {
1841		msyslog(LOG_ERR, "do_restrict: bad = %#x", bad);
1842		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1843		return;
1844	}
1845
1846	/*
1847	 * Looks okay, try it out.  Needs to reload data pointer and
1848	 * item counter. (Talos-CAN-0052)
1849	 */
1850	ZERO_SOCK(&matchaddr);
1851	ZERO_SOCK(&matchmask);
1852	items = INFO_NITEMS(inpkt->err_nitems);
1853	datap = inpkt->u.data;
1854
1855	while (items-- > 0) {
1856		memcpy(&cr, datap, item_sz);
1857		cr.flags = ntohs(cr.flags);	/* XXX: size */
1858		cr.mflags = ntohs(cr.mflags);
1859		cr.ippeerlimit = ntohs(cr.ippeerlimit);
1860		if (client_v6_capable && cr.v6_flag) {
1861			AF(&matchaddr) = AF_INET6;
1862			AF(&matchmask) = AF_INET6;
1863			SOCK_ADDR6(&matchaddr) = cr.addr6;
1864			SOCK_ADDR6(&matchmask) = cr.mask6;
1865		} else {
1866			AF(&matchaddr) = AF_INET;
1867			AF(&matchmask) = AF_INET;
1868			NSRCADR(&matchaddr) = cr.addr;
1869			NSRCADR(&matchmask) = cr.mask;
1870		}
1871		hack_restrict(op, &matchaddr, &matchmask, cr.mflags,
1872			      cr.ippeerlimit, cr.flags, 0);
1873		datap += item_sz;
1874	}
1875
1876	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1877}
1878
1879
1880/*
1881 * mon_getlist - return monitor data
1882 */
1883static void
1884mon_getlist(
1885	sockaddr_u *srcadr,
1886	endpt *inter,
1887	struct req_pkt *inpkt
1888	)
1889{
1890	req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1891}
1892
1893
1894/*
1895 * Module entry points and the flags they correspond with
1896 */
1897struct reset_entry {
1898	int flag;		/* flag this corresponds to */
1899	void (*handler)(void);	/* routine to handle request */
1900};
1901
1902struct reset_entry reset_entries[] = {
1903	{ RESET_FLAG_ALLPEERS,	peer_all_reset },
1904	{ RESET_FLAG_IO,	io_clr_stats },
1905	{ RESET_FLAG_SYS,	proto_clr_stats },
1906	{ RESET_FLAG_MEM,	peer_clr_stats },
1907	{ RESET_FLAG_TIMER,	timer_clr_stats },
1908	{ RESET_FLAG_AUTH,	reset_auth_stats },
1909	{ RESET_FLAG_CTL,	ctl_clr_stats },
1910	{ 0,			0 }
1911};
1912
1913/*
1914 * reset_stats - reset statistic counters here and there
1915 */
1916static void
1917reset_stats(
1918	sockaddr_u *srcadr,
1919	endpt *inter,
1920	struct req_pkt *inpkt
1921	)
1922{
1923	struct reset_flags *rflags;
1924	u_long flags;
1925	struct reset_entry *rent;
1926
1927	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1928		msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
1929		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1930		return;
1931	}
1932
1933	rflags = (struct reset_flags *)&inpkt->u;
1934	flags = ntohl(rflags->flags);
1935
1936	if (flags & ~RESET_ALLFLAGS) {
1937		msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
1938			flags & ~RESET_ALLFLAGS);
1939		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1940		return;
1941	}
1942
1943	for (rent = reset_entries; rent->flag != 0; rent++) {
1944		if (flags & rent->flag)
1945			(*rent->handler)();
1946	}
1947	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1948}
1949
1950
1951/*
1952 * reset_peer - clear a peer's statistics
1953 */
1954static void
1955reset_peer(
1956	sockaddr_u *srcadr,
1957	endpt *inter,
1958	struct req_pkt *inpkt
1959	)
1960{
1961	u_short			items;
1962	size_t			item_sz;
1963	char *			datap;
1964	struct conf_unpeer	cp;
1965	struct peer *		p;
1966	sockaddr_u		peeraddr;
1967	int			bad;
1968
1969	/*
1970	 * We check first to see that every peer exists.  If not,
1971	 * we return an error.
1972	 */
1973
1974	items = INFO_NITEMS(inpkt->err_nitems);
1975	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1976	datap = inpkt->u.data;
1977	if (item_sz > sizeof(cp)) {
1978		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1979		return;
1980	}
1981
1982	bad = FALSE;
1983	while (items-- > 0 && !bad) {
1984		ZERO(cp);
1985		memcpy(&cp, datap, item_sz);
1986		ZERO_SOCK(&peeraddr);
1987		if (client_v6_capable && cp.v6_flag) {
1988			AF(&peeraddr) = AF_INET6;
1989			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
1990		} else {
1991			AF(&peeraddr) = AF_INET;
1992			NSRCADR(&peeraddr) = cp.peeraddr;
1993		}
1994
1995#ifdef ISC_PLATFORM_HAVESALEN
1996		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1997#endif
1998		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
1999		if (NULL == p)
2000			bad++;
2001		datap += item_sz;
2002	}
2003
2004	if (bad) {
2005		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2006		return;
2007	}
2008
2009	/*
2010	 * Now do it in earnest. Needs to reload data pointer and item
2011	 * counter. (Talos-CAN-0052)
2012	 */
2013
2014	items = INFO_NITEMS(inpkt->err_nitems);
2015	datap = inpkt->u.data;
2016	while (items-- > 0) {
2017		ZERO(cp);
2018		memcpy(&cp, datap, item_sz);
2019		ZERO_SOCK(&peeraddr);
2020		if (client_v6_capable && cp.v6_flag) {
2021			AF(&peeraddr) = AF_INET6;
2022			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2023		} else {
2024			AF(&peeraddr) = AF_INET;
2025			NSRCADR(&peeraddr) = cp.peeraddr;
2026		}
2027		SET_PORT(&peeraddr, 123);
2028#ifdef ISC_PLATFORM_HAVESALEN
2029		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2030#endif
2031		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2032		while (p != NULL) {
2033			peer_reset(p);
2034			p = findexistingpeer(&peeraddr, NULL, p, -1, 0, NULL);
2035		}
2036		datap += item_sz;
2037	}
2038
2039	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2040}
2041
2042
2043/*
2044 * do_key_reread - reread the encryption key file
2045 */
2046static void
2047do_key_reread(
2048	sockaddr_u *srcadr,
2049	endpt *inter,
2050	struct req_pkt *inpkt
2051	)
2052{
2053	rereadkeys();
2054	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2055}
2056
2057
2058/*
2059 * trust_key - make one or more keys trusted
2060 */
2061static void
2062trust_key(
2063	sockaddr_u *srcadr,
2064	endpt *inter,
2065	struct req_pkt *inpkt
2066	)
2067{
2068	do_trustkey(srcadr, inter, inpkt, 1);
2069}
2070
2071
2072/*
2073 * untrust_key - make one or more keys untrusted
2074 */
2075static void
2076untrust_key(
2077	sockaddr_u *srcadr,
2078	endpt *inter,
2079	struct req_pkt *inpkt
2080	)
2081{
2082	do_trustkey(srcadr, inter, inpkt, 0);
2083}
2084
2085
2086/*
2087 * do_trustkey - make keys either trustable or untrustable
2088 */
2089static void
2090do_trustkey(
2091	sockaddr_u *srcadr,
2092	endpt *inter,
2093	struct req_pkt *inpkt,
2094	u_long trust
2095	)
2096{
2097	register uint32_t *kp;
2098	register int items;
2099
2100	items = INFO_NITEMS(inpkt->err_nitems);
2101	kp = (uint32_t *)&inpkt->u;
2102	while (items-- > 0) {
2103		authtrust(*kp, trust);
2104		kp++;
2105	}
2106
2107	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2108}
2109
2110
2111/*
2112 * get_auth_info - return some stats concerning the authentication module
2113 */
2114static void
2115get_auth_info(
2116	sockaddr_u *srcadr,
2117	endpt *inter,
2118	struct req_pkt *inpkt
2119	)
2120{
2121	register struct info_auth *ia;
2122
2123	ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2124					     sizeof(struct info_auth));
2125
2126	ia->numkeys = htonl((u_int32)authnumkeys);
2127	ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2128	ia->keylookups = htonl((u_int32)authkeylookups);
2129	ia->keynotfound = htonl((u_int32)authkeynotfound);
2130	ia->encryptions = htonl((u_int32)authencryptions);
2131	ia->decryptions = htonl((u_int32)authdecryptions);
2132	ia->keyuncached = htonl((u_int32)authkeyuncached);
2133	ia->expired = htonl((u_int32)authkeyexpired);
2134	ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2135
2136	(void) more_pkt();
2137	flush_pkt();
2138}
2139
2140
2141
2142/*
2143 * reset_auth_stats - reset the authentication stat counters.  Done here
2144 *		      to keep ntp-isms out of the authentication module
2145 */
2146void
2147reset_auth_stats(void)
2148{
2149	authkeylookups = 0;
2150	authkeynotfound = 0;
2151	authencryptions = 0;
2152	authdecryptions = 0;
2153	authkeyuncached = 0;
2154	auth_timereset = current_time;
2155}
2156
2157
2158/*
2159 * req_get_traps - return information about current trap holders
2160 */
2161static void
2162req_get_traps(
2163	sockaddr_u *srcadr,
2164	endpt *inter,
2165	struct req_pkt *inpkt
2166	)
2167{
2168	struct info_trap *it;
2169	struct ctl_trap *tr;
2170	size_t i;
2171
2172	if (num_ctl_traps == 0) {
2173		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2174		return;
2175	}
2176
2177	it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2178	    v6sizeof(struct info_trap));
2179
2180	for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2181		if (tr->tr_flags & TRAP_INUSE) {
2182			if (IS_IPV4(&tr->tr_addr)) {
2183				if (tr->tr_localaddr == any_interface)
2184					it->local_address = 0;
2185				else
2186					it->local_address
2187					    = NSRCADR(&tr->tr_localaddr->sin);
2188				it->trap_address = NSRCADR(&tr->tr_addr);
2189				if (client_v6_capable)
2190					it->v6_flag = 0;
2191			} else {
2192				if (!client_v6_capable)
2193					continue;
2194				it->local_address6
2195				    = SOCK_ADDR6(&tr->tr_localaddr->sin);
2196				it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2197				it->v6_flag = 1;
2198			}
2199			it->trap_port = NSRCPORT(&tr->tr_addr);
2200			it->sequence = htons(tr->tr_sequence);
2201			it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2202			it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2203			it->resets = htonl((u_int32)tr->tr_resets);
2204			it->flags = htonl((u_int32)tr->tr_flags);
2205			it = (struct info_trap *)more_pkt();
2206		}
2207	}
2208	flush_pkt();
2209}
2210
2211
2212/*
2213 * req_set_trap - configure a trap
2214 */
2215static void
2216req_set_trap(
2217	sockaddr_u *srcadr,
2218	endpt *inter,
2219	struct req_pkt *inpkt
2220	)
2221{
2222	do_setclr_trap(srcadr, inter, inpkt, 1);
2223}
2224
2225
2226
2227/*
2228 * req_clr_trap - unconfigure a trap
2229 */
2230static void
2231req_clr_trap(
2232	sockaddr_u *srcadr,
2233	endpt *inter,
2234	struct req_pkt *inpkt
2235	)
2236{
2237	do_setclr_trap(srcadr, inter, inpkt, 0);
2238}
2239
2240
2241
2242/*
2243 * do_setclr_trap - do the grunge work of (un)configuring a trap
2244 */
2245static void
2246do_setclr_trap(
2247	sockaddr_u *srcadr,
2248	endpt *inter,
2249	struct req_pkt *inpkt,
2250	int set
2251	)
2252{
2253	register struct conf_trap *ct;
2254	register endpt *linter;
2255	int res;
2256	sockaddr_u laddr;
2257
2258	/*
2259	 * Prepare sockaddr
2260	 */
2261	ZERO_SOCK(&laddr);
2262	AF(&laddr) = AF(srcadr);
2263	SET_PORT(&laddr, NTP_PORT);
2264
2265	/*
2266	 * Restrict ourselves to one item only.  This eliminates
2267	 * the error reporting problem.
2268	 */
2269	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2270		msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2271		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2272		return;
2273	}
2274	ct = (struct conf_trap *)&inpkt->u;
2275
2276	/*
2277	 * Look for the local interface.  If none, use the default.
2278	 */
2279	if (ct->local_address == 0) {
2280		linter = any_interface;
2281	} else {
2282		if (IS_IPV4(&laddr))
2283			NSRCADR(&laddr) = ct->local_address;
2284		else
2285			SOCK_ADDR6(&laddr) = ct->local_address6;
2286		linter = findinterface(&laddr);
2287		if (NULL == linter) {
2288			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2289			return;
2290		}
2291	}
2292
2293	if (IS_IPV4(&laddr))
2294		NSRCADR(&laddr) = ct->trap_address;
2295	else
2296		SOCK_ADDR6(&laddr) = ct->trap_address6;
2297	if (ct->trap_port)
2298		NSRCPORT(&laddr) = ct->trap_port;
2299	else
2300		SET_PORT(&laddr, TRAPPORT);
2301
2302	if (set) {
2303		res = ctlsettrap(&laddr, linter, 0,
2304				 INFO_VERSION(inpkt->rm_vn_mode));
2305	} else {
2306		res = ctlclrtrap(&laddr, linter, 0);
2307	}
2308
2309	if (!res) {
2310		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2311	} else {
2312		req_ack(srcadr, inter, inpkt, INFO_OKAY);
2313	}
2314	return;
2315}
2316
2317/*
2318 * Validate a request packet for a new request or control key:
2319 *  - only one item allowed
2320 *  - key must be valid (that is, known, and not in the autokey range)
2321 */
2322static void
2323set_keyid_checked(
2324	keyid_t        *into,
2325	const char     *what,
2326	sockaddr_u     *srcadr,
2327	endpt          *inter,
2328	struct req_pkt *inpkt
2329	)
2330{
2331	keyid_t *pkeyid;
2332	keyid_t  tmpkey;
2333
2334	/* restrict ourselves to one item only */
2335	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2336		msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2337			what);
2338		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2339		return;
2340	}
2341
2342	/* plug the new key from the packet */
2343	pkeyid = (keyid_t *)&inpkt->u;
2344	tmpkey = ntohl(*pkeyid);
2345
2346	/* validate the new key id, claim data error on failure */
2347	if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2348		msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2349			what, (long)tmpkey);
2350		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2351		return;
2352	}
2353
2354	/* if we arrive here, the key is good -- use it */
2355	*into = tmpkey;
2356	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2357}
2358
2359/*
2360 * set_request_keyid - set the keyid used to authenticate requests
2361 */
2362static void
2363set_request_keyid(
2364	sockaddr_u *srcadr,
2365	endpt *inter,
2366	struct req_pkt *inpkt
2367	)
2368{
2369	set_keyid_checked(&info_auth_keyid, "request",
2370			  srcadr, inter, inpkt);
2371}
2372
2373
2374
2375/*
2376 * set_control_keyid - set the keyid used to authenticate requests
2377 */
2378static void
2379set_control_keyid(
2380	sockaddr_u *srcadr,
2381	endpt *inter,
2382	struct req_pkt *inpkt
2383	)
2384{
2385	set_keyid_checked(&ctl_auth_keyid, "control",
2386			  srcadr, inter, inpkt);
2387}
2388
2389
2390
2391/*
2392 * get_ctl_stats - return some stats concerning the control message module
2393 */
2394static void
2395get_ctl_stats(
2396	sockaddr_u *srcadr,
2397	endpt *inter,
2398	struct req_pkt *inpkt
2399	)
2400{
2401	register struct info_control *ic;
2402
2403	ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2404						sizeof(struct info_control));
2405
2406	ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2407	ic->numctlreq = htonl((u_int32)numctlreq);
2408	ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2409	ic->numctlresponses = htonl((u_int32)numctlresponses);
2410	ic->numctlfrags = htonl((u_int32)numctlfrags);
2411	ic->numctlerrors = htonl((u_int32)numctlerrors);
2412	ic->numctltooshort = htonl((u_int32)numctltooshort);
2413	ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2414	ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2415	ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2416	ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2417	ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2418	ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2419	ic->numctlbadop = htonl((u_int32)numctlbadop);
2420	ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2421
2422	(void) more_pkt();
2423	flush_pkt();
2424}
2425
2426
2427#ifdef KERNEL_PLL
2428/*
2429 * get_kernel_info - get kernel pll/pps information
2430 */
2431static void
2432get_kernel_info(
2433	sockaddr_u *srcadr,
2434	endpt *inter,
2435	struct req_pkt *inpkt
2436	)
2437{
2438	register struct info_kernel *ik;
2439	struct timex ntx;
2440
2441	if (!pll_control) {
2442		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2443		return;
2444	}
2445
2446	ZERO(ntx);
2447	if (ntp_adjtime(&ntx) < 0)
2448		msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2449	ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2450	    sizeof(struct info_kernel));
2451
2452	/*
2453	 * pll variables
2454	 */
2455	ik->offset = htonl((u_int32)ntx.offset);
2456	ik->freq = htonl((u_int32)ntx.freq);
2457	ik->maxerror = htonl((u_int32)ntx.maxerror);
2458	ik->esterror = htonl((u_int32)ntx.esterror);
2459	ik->status = htons(ntx.status);
2460	ik->constant = htonl((u_int32)ntx.constant);
2461	ik->precision = htonl((u_int32)ntx.precision);
2462	ik->tolerance = htonl((u_int32)ntx.tolerance);
2463
2464	/*
2465	 * pps variables
2466	 */
2467	ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2468	ik->jitter = htonl((u_int32)ntx.jitter);
2469	ik->shift = htons(ntx.shift);
2470	ik->stabil = htonl((u_int32)ntx.stabil);
2471	ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2472	ik->calcnt = htonl((u_int32)ntx.calcnt);
2473	ik->errcnt = htonl((u_int32)ntx.errcnt);
2474	ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2475
2476	(void) more_pkt();
2477	flush_pkt();
2478}
2479#endif /* KERNEL_PLL */
2480
2481
2482#ifdef REFCLOCK
2483/*
2484 * get_clock_info - get info about a clock
2485 */
2486static void
2487get_clock_info(
2488	sockaddr_u *srcadr,
2489	endpt *inter,
2490	struct req_pkt *inpkt
2491	)
2492{
2493	register struct info_clock *ic;
2494	register u_int32 *clkaddr;
2495	register int items;
2496	struct refclockstat clock_stat;
2497	sockaddr_u addr;
2498	l_fp ltmp;
2499
2500	ZERO_SOCK(&addr);
2501	AF(&addr) = AF_INET;
2502#ifdef ISC_PLATFORM_HAVESALEN
2503	addr.sa.sa_len = SOCKLEN(&addr);
2504#endif
2505	SET_PORT(&addr, NTP_PORT);
2506	items = INFO_NITEMS(inpkt->err_nitems);
2507	clkaddr = &inpkt->u.u32[0];
2508
2509	ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2510					      sizeof(struct info_clock));
2511
2512	while (items-- > 0 && ic) {
2513		NSRCADR(&addr) = *clkaddr++;
2514		if (!ISREFCLOCKADR(&addr) || NULL ==
2515		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2516			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2517			return;
2518		}
2519
2520		clock_stat.kv_list = (struct ctl_var *)0;
2521
2522		refclock_control(&addr, NULL, &clock_stat);
2523
2524		ic->clockadr = NSRCADR(&addr);
2525		ic->type = clock_stat.type;
2526		ic->flags = clock_stat.flags;
2527		ic->lastevent = clock_stat.lastevent;
2528		ic->currentstatus = clock_stat.currentstatus;
2529		ic->polls = htonl((u_int32)clock_stat.polls);
2530		ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2531		ic->badformat = htonl((u_int32)clock_stat.badformat);
2532		ic->baddata = htonl((u_int32)clock_stat.baddata);
2533		ic->timestarted = htonl((u_int32)clock_stat.timereset);
2534		DTOLFP(clock_stat.fudgetime1, &ltmp);
2535		HTONL_FP(&ltmp, &ic->fudgetime1);
2536		DTOLFP(clock_stat.fudgetime2, &ltmp);
2537		HTONL_FP(&ltmp, &ic->fudgetime2);
2538		ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2539		/* [Bug3527] Backward Incompatible: ic->fudgeval2 is
2540		 * a string, instantiated via memcpy() so there is no
2541		 * endian issue to correct.
2542		 */
2543#ifdef DISABLE_BUG3527_FIX
2544		ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2545#else
2546		ic->fudgeval2 = clock_stat.fudgeval2;
2547#endif
2548
2549		free_varlist(clock_stat.kv_list);
2550
2551		ic = (struct info_clock *)more_pkt();
2552	}
2553	flush_pkt();
2554}
2555
2556
2557
2558/*
2559 * set_clock_fudge - get a clock's fudge factors
2560 */
2561static void
2562set_clock_fudge(
2563	sockaddr_u *srcadr,
2564	endpt *inter,
2565	struct req_pkt *inpkt
2566	)
2567{
2568	register struct conf_fudge *cf;
2569	register int items;
2570	struct refclockstat clock_stat;
2571	sockaddr_u addr;
2572	l_fp ltmp;
2573
2574	ZERO(addr);
2575	ZERO(clock_stat);
2576	items = INFO_NITEMS(inpkt->err_nitems);
2577	cf = (struct conf_fudge *)&inpkt->u;
2578
2579	while (items-- > 0) {
2580		AF(&addr) = AF_INET;
2581		NSRCADR(&addr) = cf->clockadr;
2582#ifdef ISC_PLATFORM_HAVESALEN
2583		addr.sa.sa_len = SOCKLEN(&addr);
2584#endif
2585		SET_PORT(&addr, NTP_PORT);
2586		if (!ISREFCLOCKADR(&addr) || NULL ==
2587		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2588			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2589			return;
2590		}
2591
2592		switch(ntohl(cf->which)) {
2593		    case FUDGE_TIME1:
2594			NTOHL_FP(&cf->fudgetime, &ltmp);
2595			LFPTOD(&ltmp, clock_stat.fudgetime1);
2596			clock_stat.haveflags = CLK_HAVETIME1;
2597			break;
2598		    case FUDGE_TIME2:
2599			NTOHL_FP(&cf->fudgetime, &ltmp);
2600			LFPTOD(&ltmp, clock_stat.fudgetime2);
2601			clock_stat.haveflags = CLK_HAVETIME2;
2602			break;
2603		    case FUDGE_VAL1:
2604			clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2605			clock_stat.haveflags = CLK_HAVEVAL1;
2606			break;
2607		    case FUDGE_VAL2:
2608			clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2609			clock_stat.haveflags = CLK_HAVEVAL2;
2610			break;
2611		    case FUDGE_FLAGS:
2612			clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2613			clock_stat.haveflags =
2614				(CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2615			break;
2616		    default:
2617			msyslog(LOG_ERR, "set_clock_fudge: default!");
2618			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2619			return;
2620		}
2621
2622		refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2623	}
2624
2625	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2626}
2627#endif
2628
2629#ifdef REFCLOCK
2630/*
2631 * get_clkbug_info - get debugging info about a clock
2632 */
2633static void
2634get_clkbug_info(
2635	sockaddr_u *srcadr,
2636	endpt *inter,
2637	struct req_pkt *inpkt
2638	)
2639{
2640	register int i;
2641	register struct info_clkbug *ic;
2642	register u_int32 *clkaddr;
2643	register int items;
2644	struct refclockbug bug;
2645	sockaddr_u addr;
2646
2647	ZERO_SOCK(&addr);
2648	AF(&addr) = AF_INET;
2649#ifdef ISC_PLATFORM_HAVESALEN
2650	addr.sa.sa_len = SOCKLEN(&addr);
2651#endif
2652	SET_PORT(&addr, NTP_PORT);
2653	items = INFO_NITEMS(inpkt->err_nitems);
2654	clkaddr = (u_int32 *)&inpkt->u;
2655
2656	ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2657					       sizeof(struct info_clkbug));
2658
2659	while (items-- > 0 && ic) {
2660		NSRCADR(&addr) = *clkaddr++;
2661		if (!ISREFCLOCKADR(&addr) || NULL ==
2662		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2663			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2664			return;
2665		}
2666
2667		ZERO(bug);
2668		refclock_buginfo(&addr, &bug);
2669		if (bug.nvalues == 0 && bug.ntimes == 0) {
2670			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2671			return;
2672		}
2673
2674		ic->clockadr = NSRCADR(&addr);
2675		i = bug.nvalues;
2676		if (i > NUMCBUGVALUES)
2677		    i = NUMCBUGVALUES;
2678		ic->nvalues = (u_char)i;
2679		ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2680		while (--i >= 0)
2681		    ic->values[i] = htonl(bug.values[i]);
2682
2683		i = bug.ntimes;
2684		if (i > NUMCBUGTIMES)
2685		    i = NUMCBUGTIMES;
2686		ic->ntimes = (u_char)i;
2687		ic->stimes = htonl(bug.stimes);
2688		while (--i >= 0) {
2689			HTONL_FP(&bug.times[i], &ic->times[i]);
2690		}
2691
2692		ic = (struct info_clkbug *)more_pkt();
2693	}
2694	flush_pkt();
2695}
2696#endif
2697
2698/*
2699 * receiver of interface structures
2700 */
2701static void
2702fill_info_if_stats(void *data, interface_info_t *interface_info)
2703{
2704	struct info_if_stats **ifsp = (struct info_if_stats **)data;
2705	struct info_if_stats *ifs = *ifsp;
2706	endpt *ep = interface_info->ep;
2707
2708	if (NULL == ifs)
2709		return;
2710
2711	ZERO(*ifs);
2712
2713	if (IS_IPV6(&ep->sin)) {
2714		if (!client_v6_capable)
2715			return;
2716		ifs->v6_flag = 1;
2717		ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2718		ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2719		ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2720	} else {
2721		ifs->v6_flag = 0;
2722		ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2723		ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2724		ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2725	}
2726	ifs->v6_flag = htonl(ifs->v6_flag);
2727	strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2728	ifs->family = htons(ep->family);
2729	ifs->flags = htonl(ep->flags);
2730	ifs->last_ttl = htonl(ep->last_ttl);
2731	ifs->num_mcast = htonl(ep->num_mcast);
2732	ifs->received = htonl(ep->received);
2733	ifs->sent = htonl(ep->sent);
2734	ifs->notsent = htonl(ep->notsent);
2735	ifs->ifindex = htonl(ep->ifindex);
2736	/* scope no longer in endpt, in in6_addr typically */
2737	ifs->scopeid = ifs->ifindex;
2738	ifs->ifnum = htonl(ep->ifnum);
2739	ifs->uptime = htonl(current_time - ep->starttime);
2740	ifs->ignore_packets = ep->ignore_packets;
2741	ifs->peercnt = htonl(ep->peercnt);
2742	ifs->action = interface_info->action;
2743
2744	*ifsp = (struct info_if_stats *)more_pkt();
2745}
2746
2747/*
2748 * get_if_stats - get interface statistics
2749 */
2750static void
2751get_if_stats(
2752	sockaddr_u *srcadr,
2753	endpt *inter,
2754	struct req_pkt *inpkt
2755	)
2756{
2757	struct info_if_stats *ifs;
2758
2759	DPRINTF(3, ("wants interface statistics\n"));
2760
2761	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2762	    v6sizeof(struct info_if_stats));
2763
2764	interface_enumerate(fill_info_if_stats, &ifs);
2765
2766	flush_pkt();
2767}
2768
2769static void
2770do_if_reload(
2771	sockaddr_u *srcadr,
2772	endpt *inter,
2773	struct req_pkt *inpkt
2774	)
2775{
2776	struct info_if_stats *ifs;
2777
2778	DPRINTF(3, ("wants interface reload\n"));
2779
2780	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2781	    v6sizeof(struct info_if_stats));
2782
2783	interface_update(fill_info_if_stats, &ifs);
2784
2785	flush_pkt();
2786}
2787
2788