ntp_request.c revision 330567
1/*
2 * ntp_request.c - respond to information requests
3 */
4
5#ifdef HAVE_CONFIG_H
6# include <config.h>
7#endif
8
9#include "ntpd.h"
10#include "ntp_io.h"
11#include "ntp_request.h"
12#include "ntp_control.h"
13#include "ntp_refclock.h"
14#include "ntp_if.h"
15#include "ntp_stdlib.h"
16#include "ntp_assert.h"
17
18#include <stdio.h>
19#include <stddef.h>
20#include <signal.h>
21#ifdef HAVE_NETINET_IN_H
22#include <netinet/in.h>
23#endif
24#include <arpa/inet.h>
25
26#include "recvbuff.h"
27
28#ifdef KERNEL_PLL
29#include "ntp_syscall.h"
30#endif /* KERNEL_PLL */
31
32/*
33 * Structure to hold request procedure information
34 */
35#define	NOAUTH	0
36#define	AUTH	1
37
38#define	NO_REQUEST	(-1)
39/*
40 * Because we now have v6 addresses in the messages, we need to compensate
41 * for the larger size.  Therefore, we introduce the alternate size to
42 * keep us friendly with older implementations.  A little ugly.
43 */
44static int client_v6_capable = 0;   /* the client can handle longer messages */
45
46#define v6sizeof(type)	(client_v6_capable ? sizeof(type) : v4sizeof(type))
47
48struct req_proc {
49	short request_code;	/* defined request code */
50	short needs_auth;	/* true when authentication needed */
51	short sizeofitem;	/* size of request data item (older size)*/
52	short v6_sizeofitem;	/* size of request data item (new size)*/
53	void (*handler) (sockaddr_u *, endpt *,
54			   struct req_pkt *);	/* routine to handle request */
55};
56
57/*
58 * Universal request codes
59 */
60static const struct req_proc univ_codes[] = {
61	{ NO_REQUEST,		NOAUTH,	 0,	0, NULL }
62};
63
64static	void	req_ack	(sockaddr_u *, endpt *, struct req_pkt *, int);
65static	void *	prepare_pkt	(sockaddr_u *, endpt *,
66				 struct req_pkt *, size_t);
67static	void *	more_pkt	(void);
68static	void	flush_pkt	(void);
69static	void	list_peers	(sockaddr_u *, endpt *, struct req_pkt *);
70static	void	list_peers_sum	(sockaddr_u *, endpt *, struct req_pkt *);
71static	void	peer_info	(sockaddr_u *, endpt *, struct req_pkt *);
72static	void	peer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
73static	void	sys_info	(sockaddr_u *, endpt *, struct req_pkt *);
74static	void	sys_stats	(sockaddr_u *, endpt *, struct req_pkt *);
75static	void	mem_stats	(sockaddr_u *, endpt *, struct req_pkt *);
76static	void	io_stats	(sockaddr_u *, endpt *, struct req_pkt *);
77static	void	timer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
78static	void	loop_info	(sockaddr_u *, endpt *, struct req_pkt *);
79static	void	do_conf		(sockaddr_u *, endpt *, struct req_pkt *);
80static	void	do_unconf	(sockaddr_u *, endpt *, struct req_pkt *);
81static	void	set_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
82static	void	clr_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
83static	void	setclr_flags	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
84static	void	list_restrict4	(const restrict_u *, struct info_restrict **);
85static	void	list_restrict6	(const restrict_u *, struct info_restrict **);
86static	void	list_restrict	(sockaddr_u *, endpt *, struct req_pkt *);
87static	void	do_resaddflags	(sockaddr_u *, endpt *, struct req_pkt *);
88static	void	do_ressubflags	(sockaddr_u *, endpt *, struct req_pkt *);
89static	void	do_unrestrict	(sockaddr_u *, endpt *, struct req_pkt *);
90static	void	do_restrict	(sockaddr_u *, endpt *, struct req_pkt *, restrict_op);
91static	void	mon_getlist	(sockaddr_u *, endpt *, struct req_pkt *);
92static	void	reset_stats	(sockaddr_u *, endpt *, struct req_pkt *);
93static	void	reset_peer	(sockaddr_u *, endpt *, struct req_pkt *);
94static	void	do_key_reread	(sockaddr_u *, endpt *, struct req_pkt *);
95static	void	trust_key	(sockaddr_u *, endpt *, struct req_pkt *);
96static	void	untrust_key	(sockaddr_u *, endpt *, struct req_pkt *);
97static	void	do_trustkey	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
98static	void	get_auth_info	(sockaddr_u *, endpt *, struct req_pkt *);
99static	void	req_get_traps	(sockaddr_u *, endpt *, struct req_pkt *);
100static	void	req_set_trap	(sockaddr_u *, endpt *, struct req_pkt *);
101static	void	req_clr_trap	(sockaddr_u *, endpt *, struct req_pkt *);
102static	void	do_setclr_trap	(sockaddr_u *, endpt *, struct req_pkt *, int);
103static	void	set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
104static	void	set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
105static	void	get_ctl_stats   (sockaddr_u *, endpt *, struct req_pkt *);
106static	void	get_if_stats    (sockaddr_u *, endpt *, struct req_pkt *);
107static	void	do_if_reload    (sockaddr_u *, endpt *, struct req_pkt *);
108#ifdef KERNEL_PLL
109static	void	get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
110#endif /* KERNEL_PLL */
111#ifdef REFCLOCK
112static	void	get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
113static	void	set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
114#endif	/* REFCLOCK */
115#ifdef REFCLOCK
116static	void	get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
117#endif	/* REFCLOCK */
118
119/*
120 * ntpd request codes
121 */
122static const struct req_proc ntp_codes[] = {
123	{ REQ_PEER_LIST,	NOAUTH,	0, 0,	list_peers },
124	{ REQ_PEER_LIST_SUM,	NOAUTH,	0, 0,	list_peers_sum },
125	{ REQ_PEER_INFO,    NOAUTH, v4sizeof(struct info_peer_list),
126				sizeof(struct info_peer_list), peer_info},
127	{ REQ_PEER_STATS,   NOAUTH, v4sizeof(struct info_peer_list),
128				sizeof(struct info_peer_list), peer_stats},
129	{ REQ_SYS_INFO,		NOAUTH,	0, 0,	sys_info },
130	{ REQ_SYS_STATS,	NOAUTH,	0, 0,	sys_stats },
131	{ REQ_IO_STATS,		NOAUTH,	0, 0,	io_stats },
132	{ REQ_MEM_STATS,	NOAUTH,	0, 0,	mem_stats },
133	{ REQ_LOOP_INFO,	NOAUTH,	0, 0,	loop_info },
134	{ REQ_TIMER_STATS,	NOAUTH,	0, 0,	timer_stats },
135	{ REQ_CONFIG,	    AUTH, v4sizeof(struct conf_peer),
136				sizeof(struct conf_peer), do_conf },
137	{ REQ_UNCONFIG,	    AUTH, v4sizeof(struct conf_unpeer),
138				sizeof(struct conf_unpeer), do_unconf },
139	{ REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
140				sizeof(struct conf_sys_flags), set_sys_flag },
141	{ REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142				sizeof(struct conf_sys_flags),  clr_sys_flag },
143	{ REQ_GET_RESTRICT,	NOAUTH,	0, 0,	list_restrict },
144	{ REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
145				sizeof(struct conf_restrict), do_resaddflags },
146	{ REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
147				sizeof(struct conf_restrict), do_ressubflags },
148	{ REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
149				sizeof(struct conf_restrict), do_unrestrict },
150	{ REQ_MON_GETLIST,	NOAUTH,	0, 0,	mon_getlist },
151	{ REQ_MON_GETLIST_1,	NOAUTH,	0, 0,	mon_getlist },
152	{ REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
153	{ REQ_RESET_PEER,  AUTH, v4sizeof(struct conf_unpeer),
154				sizeof(struct conf_unpeer), reset_peer },
155	{ REQ_REREAD_KEYS,	AUTH,	0, 0,	do_key_reread },
156	{ REQ_TRUSTKEY,   AUTH, sizeof(u_long), sizeof(u_long), trust_key },
157	{ REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
158	{ REQ_AUTHINFO,		NOAUTH,	0, 0,	get_auth_info },
159	{ REQ_TRAPS,		NOAUTH, 0, 0,	req_get_traps },
160	{ REQ_ADD_TRAP,	AUTH, v4sizeof(struct conf_trap),
161				sizeof(struct conf_trap), req_set_trap },
162	{ REQ_CLR_TRAP,	AUTH, v4sizeof(struct conf_trap),
163				sizeof(struct conf_trap), req_clr_trap },
164	{ REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
165				set_request_keyid },
166	{ REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
167				set_control_keyid },
168	{ REQ_GET_CTLSTATS,	NOAUTH,	0, 0,	get_ctl_stats },
169#ifdef KERNEL_PLL
170	{ REQ_GET_KERNEL,	NOAUTH,	0, 0,	get_kernel_info },
171#endif
172#ifdef REFCLOCK
173	{ REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
174				get_clock_info },
175	{ REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
176				sizeof(struct conf_fudge), set_clock_fudge },
177	{ REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
178				get_clkbug_info },
179#endif
180	{ REQ_IF_STATS,		AUTH, 0, 0,	get_if_stats },
181	{ REQ_IF_RELOAD,	AUTH, 0, 0,	do_if_reload },
182
183	{ NO_REQUEST,		NOAUTH,	0, 0,	0 }
184};
185
186
187/*
188 * Authentication keyid used to authenticate requests.  Zero means we
189 * don't allow writing anything.
190 */
191keyid_t info_auth_keyid;
192
193/*
194 * Statistic counters to keep track of requests and responses.
195 */
196u_long numrequests;		/* number of requests we've received */
197u_long numresppkts;		/* number of resp packets sent with data */
198
199/*
200 * lazy way to count errors, indexed by the error code
201 */
202u_long errorcounter[MAX_INFO_ERR + 1];
203
204/*
205 * A hack.  To keep the authentication module clear of ntp-ism's, we
206 * include a time reset variable for its stats here.
207 */
208u_long auth_timereset;
209
210/*
211 * Response packet used by these routines.  Also some state information
212 * so that we can handle packet formatting within a common set of
213 * subroutines.  Note we try to enter data in place whenever possible,
214 * but the need to set the more bit correctly means we occasionally
215 * use the extra buffer and copy.
216 */
217static struct resp_pkt rpkt;
218static int reqver;
219static int seqno;
220static int nitems;
221static int itemsize;
222static int databytes;
223static char exbuf[RESP_DATA_SIZE];
224static int usingexbuf;
225static sockaddr_u *toaddr;
226static endpt *frominter;
227
228/*
229 * init_request - initialize request data
230 */
231void
232init_request (void)
233{
234	size_t i;
235
236	numrequests = 0;
237	numresppkts = 0;
238	auth_timereset = 0;
239	info_auth_keyid = 0;	/* by default, can't do this */
240
241	for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
242	    errorcounter[i] = 0;
243}
244
245
246/*
247 * req_ack - acknowledge request with no data
248 */
249static void
250req_ack(
251	sockaddr_u *srcadr,
252	endpt *inter,
253	struct req_pkt *inpkt,
254	int errcode
255	)
256{
257	/*
258	 * fill in the fields
259	 */
260	rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
261	rpkt.auth_seq = AUTH_SEQ(0, 0);
262	rpkt.implementation = inpkt->implementation;
263	rpkt.request = inpkt->request;
264	rpkt.err_nitems = ERR_NITEMS(errcode, 0);
265	rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
266
267	/*
268	 * send packet and bump counters
269	 */
270	sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
271	errorcounter[errcode]++;
272}
273
274
275/*
276 * prepare_pkt - prepare response packet for transmission, return pointer
277 *		 to storage for data item.
278 */
279static void *
280prepare_pkt(
281	sockaddr_u *srcadr,
282	endpt *inter,
283	struct req_pkt *pkt,
284	size_t structsize
285	)
286{
287	DPRINTF(4, ("request: preparing pkt\n"));
288
289	/*
290	 * Fill in the implementation, request and itemsize fields
291	 * since these won't change.
292	 */
293	rpkt.implementation = pkt->implementation;
294	rpkt.request = pkt->request;
295	rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
296
297	/*
298	 * Compute the static data needed to carry on.
299	 */
300	toaddr = srcadr;
301	frominter = inter;
302	seqno = 0;
303	nitems = 0;
304	itemsize = structsize;
305	databytes = 0;
306	usingexbuf = 0;
307
308	/*
309	 * return the beginning of the packet buffer.
310	 */
311	return &rpkt.u;
312}
313
314
315/*
316 * more_pkt - return a data pointer for a new item.
317 */
318static void *
319more_pkt(void)
320{
321	/*
322	 * If we were using the extra buffer, send the packet.
323	 */
324	if (usingexbuf) {
325		DPRINTF(3, ("request: sending pkt\n"));
326		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
327		rpkt.auth_seq = AUTH_SEQ(0, seqno);
328		rpkt.err_nitems = htons((u_short)nitems);
329		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
330			RESP_HEADER_SIZE + databytes);
331		numresppkts++;
332
333		/*
334		 * Copy data out of exbuf into the packet.
335		 */
336		memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
337		seqno++;
338		databytes = 0;
339		nitems = 0;
340		usingexbuf = 0;
341	}
342
343	databytes += itemsize;
344	nitems++;
345	if (databytes + itemsize <= RESP_DATA_SIZE) {
346		DPRINTF(4, ("request: giving him more data\n"));
347		/*
348		 * More room in packet.  Give him the
349		 * next address.
350		 */
351		return &rpkt.u.data[databytes];
352	} else {
353		/*
354		 * No room in packet.  Give him the extra
355		 * buffer unless this was the last in the sequence.
356		 */
357		DPRINTF(4, ("request: into extra buffer\n"));
358		if (seqno == MAXSEQ)
359			return NULL;
360		else {
361			usingexbuf = 1;
362			return exbuf;
363		}
364	}
365}
366
367
368/*
369 * flush_pkt - we're done, return remaining information.
370 */
371static void
372flush_pkt(void)
373{
374	DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
375	/*
376	 * Must send the last packet.  If nothing in here and nothing
377	 * has been sent, send an error saying no data to be found.
378	 */
379	if (seqno == 0 && nitems == 0)
380		req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
381			INFO_ERR_NODATA);
382	else {
383		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
384		rpkt.auth_seq = AUTH_SEQ(0, seqno);
385		rpkt.err_nitems = htons((u_short)nitems);
386		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
387			RESP_HEADER_SIZE+databytes);
388		numresppkts++;
389	}
390}
391
392
393
394/*
395 * Given a buffer, return the packet mode
396 */
397int
398get_packet_mode(struct recvbuf *rbufp)
399{
400	struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
401	return (INFO_MODE(inpkt->rm_vn_mode));
402}
403
404
405/*
406 * process_private - process private mode (7) packets
407 */
408void
409process_private(
410	struct recvbuf *rbufp,
411	int mod_okay
412	)
413{
414	static u_long quiet_until;
415	struct req_pkt *inpkt;
416	struct req_pkt_tail *tailinpkt;
417	sockaddr_u *srcadr;
418	endpt *inter;
419	const struct req_proc *proc;
420	int ec;
421	short temp_size;
422	l_fp ftmp;
423	double dtemp;
424	size_t recv_len;
425	size_t noslop_len;
426	size_t mac_len;
427
428	/*
429	 * Initialize pointers, for convenience
430	 */
431	recv_len = rbufp->recv_length;
432	inpkt = (struct req_pkt *)&rbufp->recv_pkt;
433	srcadr = &rbufp->recv_srcadr;
434	inter = rbufp->dstadr;
435
436	DPRINTF(3, ("process_private: impl %d req %d\n",
437		    inpkt->implementation, inpkt->request));
438
439	/*
440	 * Do some sanity checks on the packet.  Return a format
441	 * error if it fails.
442	 */
443	ec = 0;
444	if (   (++ec, ISRESPONSE(inpkt->rm_vn_mode))
445	    || (++ec, ISMORE(inpkt->rm_vn_mode))
446	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
447	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
448	    || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
449	    || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
450	    || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
451	    || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
452		) {
453		NLOG(NLOG_SYSEVENT)
454			if (current_time >= quiet_until) {
455				msyslog(LOG_ERR,
456					"process_private: drop test %d"
457					" failed, pkt from %s",
458					ec, stoa(srcadr));
459				quiet_until = current_time + 60;
460			}
461		return;
462	}
463
464	reqver = INFO_VERSION(inpkt->rm_vn_mode);
465
466	/*
467	 * Get the appropriate procedure list to search.
468	 */
469	if (inpkt->implementation == IMPL_UNIV)
470		proc = univ_codes;
471	else if ((inpkt->implementation == IMPL_XNTPD) ||
472		 (inpkt->implementation == IMPL_XNTPD_OLD))
473		proc = ntp_codes;
474	else {
475		req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
476		return;
477	}
478
479	/*
480	 * Search the list for the request codes.  If it isn't one
481	 * we know, return an error.
482	 */
483	while (proc->request_code != NO_REQUEST) {
484		if (proc->request_code == (short) inpkt->request)
485			break;
486		proc++;
487	}
488	if (proc->request_code == NO_REQUEST) {
489		req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
490		return;
491	}
492
493	DPRINTF(4, ("found request in tables\n"));
494
495	/*
496	 * If we need data, check to see if we have some.  If we
497	 * don't, check to see that there is none (picky, picky).
498	 */
499
500	/* This part is a bit tricky, we want to be sure that the size
501	 * returned is either the old or the new size.  We also can find
502	 * out if the client can accept both types of messages this way.
503	 *
504	 * Handle the exception of REQ_CONFIG. It can have two data sizes.
505	 */
506	temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
507	if ((temp_size != proc->sizeofitem &&
508	     temp_size != proc->v6_sizeofitem) &&
509	    !(inpkt->implementation == IMPL_XNTPD &&
510	      inpkt->request == REQ_CONFIG &&
511	      temp_size == sizeof(struct old_conf_peer))) {
512		DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
513			    temp_size, proc->sizeofitem, proc->v6_sizeofitem));
514		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
515		return;
516	}
517	if ((proc->sizeofitem != 0) &&
518	    ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
519	     (recv_len - REQ_LEN_HDR))) {
520		DPRINTF(3, ("process_private: not enough data\n"));
521		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
522		return;
523	}
524
525	switch (inpkt->implementation) {
526	case IMPL_XNTPD:
527		client_v6_capable = 1;
528		break;
529	case IMPL_XNTPD_OLD:
530		client_v6_capable = 0;
531		break;
532	default:
533		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
534		return;
535	}
536
537	/*
538	 * If we need to authenticate, do so.  Note that an
539	 * authenticatable packet must include a mac field, must
540	 * have used key info_auth_keyid and must have included
541	 * a time stamp in the appropriate field.  The time stamp
542	 * must be within INFO_TS_MAXSKEW of the receive
543	 * time stamp.
544	 */
545	if (proc->needs_auth && sys_authenticate) {
546
547		if (recv_len < (REQ_LEN_HDR +
548		    (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
549		    INFO_NITEMS(inpkt->err_nitems)) +
550		    REQ_TAIL_MIN)) {
551			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
552			return;
553		}
554
555		/*
556		 * For 16-octet digests, regardless of itemsize and
557		 * nitems, authenticated requests are a fixed size
558		 * with the timestamp, key ID, and digest located
559		 * at the end of the packet.  Because the key ID
560		 * determining the digest size precedes the digest,
561		 * for larger digests the fixed size request scheme
562		 * is abandoned and the timestamp, key ID, and digest
563		 * are located relative to the start of the packet,
564		 * with the digest size determined by the packet size.
565		 */
566		noslop_len = REQ_LEN_HDR
567			     + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
568			       INFO_NITEMS(inpkt->err_nitems)
569			     + sizeof(inpkt->tstamp);
570		/* 32-bit alignment */
571		noslop_len = (noslop_len + 3) & ~3;
572		if (recv_len > (noslop_len + MAX_MAC_LEN))
573			mac_len = 20;
574		else
575			mac_len = recv_len - noslop_len;
576
577		tailinpkt = (void *)((char *)inpkt + recv_len -
578			    (mac_len + sizeof(inpkt->tstamp)));
579
580		/*
581		 * If this guy is restricted from doing this, don't let
582		 * him.  If the wrong key was used, or packet doesn't
583		 * have mac, return.
584		 */
585		/* XXX: Use authistrustedip(), or equivalent. */
586		if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
587		    || ntohl(tailinpkt->keyid) != info_auth_keyid) {
588			DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
589				    INFO_IS_AUTH(inpkt->auth_seq),
590				    info_auth_keyid,
591				    ntohl(tailinpkt->keyid), (u_long)mac_len));
592#ifdef DEBUG
593			msyslog(LOG_DEBUG,
594				"process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
595				INFO_IS_AUTH(inpkt->auth_seq),
596				info_auth_keyid,
597				ntohl(tailinpkt->keyid), (u_long)mac_len);
598#endif
599			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
600			return;
601		}
602		if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
603			DPRINTF(5, ("bad pkt length %zu\n", recv_len));
604			msyslog(LOG_ERR,
605				"process_private: bad pkt length %zu",
606				recv_len);
607			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
608			return;
609		}
610		if (!mod_okay || !authhavekey(info_auth_keyid)) {
611			DPRINTF(5, ("failed auth mod_okay %d\n",
612				    mod_okay));
613#ifdef DEBUG
614			msyslog(LOG_DEBUG,
615				"process_private: failed auth mod_okay %d\n",
616				mod_okay);
617#endif
618			if (!mod_okay) {
619				sys_restricted++;
620			}
621			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
622			return;
623		}
624
625		/*
626		 * calculate absolute time difference between xmit time stamp
627		 * and receive time stamp.  If too large, too bad.
628		 */
629		NTOHL_FP(&tailinpkt->tstamp, &ftmp);
630		L_SUB(&ftmp, &rbufp->recv_time);
631		LFPTOD(&ftmp, dtemp);
632		if (fabs(dtemp) > INFO_TS_MAXSKEW) {
633			/*
634			 * He's a loser.  Tell him.
635			 */
636			DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
637				    dtemp, INFO_TS_MAXSKEW));
638			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
639			return;
640		}
641
642		/*
643		 * So far so good.  See if decryption works out okay.
644		 */
645		if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
646				 recv_len - mac_len, mac_len)) {
647			DPRINTF(5, ("authdecrypt failed\n"));
648			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
649			return;
650		}
651	}
652
653	DPRINTF(3, ("process_private: all okay, into handler\n"));
654	/*
655	 * Packet is okay.  Call the handler to send him data.
656	 */
657	(proc->handler)(srcadr, inter, inpkt);
658}
659
660
661/*
662 * list_peers - send a list of the peers
663 */
664static void
665list_peers(
666	sockaddr_u *srcadr,
667	endpt *inter,
668	struct req_pkt *inpkt
669	)
670{
671	struct info_peer_list *	ip;
672	const struct peer *	pp;
673
674	ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
675	    v6sizeof(struct info_peer_list));
676	for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
677		if (IS_IPV6(&pp->srcadr)) {
678			if (!client_v6_capable)
679				continue;
680			ip->addr6 = SOCK_ADDR6(&pp->srcadr);
681			ip->v6_flag = 1;
682		} else {
683			ip->addr = NSRCADR(&pp->srcadr);
684			if (client_v6_capable)
685				ip->v6_flag = 0;
686		}
687
688		ip->port = NSRCPORT(&pp->srcadr);
689		ip->hmode = pp->hmode;
690		ip->flags = 0;
691		if (pp->flags & FLAG_CONFIG)
692			ip->flags |= INFO_FLAG_CONFIG;
693		if (pp == sys_peer)
694			ip->flags |= INFO_FLAG_SYSPEER;
695		if (pp->status == CTL_PST_SEL_SYNCCAND)
696			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
697		if (pp->status >= CTL_PST_SEL_SYSPEER)
698			ip->flags |= INFO_FLAG_SHORTLIST;
699		ip = (struct info_peer_list *)more_pkt();
700	}	/* for pp */
701
702	flush_pkt();
703}
704
705
706/*
707 * list_peers_sum - return extended peer list
708 */
709static void
710list_peers_sum(
711	sockaddr_u *srcadr,
712	endpt *inter,
713	struct req_pkt *inpkt
714	)
715{
716	struct info_peer_summary *	ips;
717	const struct peer *		pp;
718	l_fp 				ltmp;
719
720	DPRINTF(3, ("wants peer list summary\n"));
721
722	ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
723	    v6sizeof(struct info_peer_summary));
724	for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
725		DPRINTF(4, ("sum: got one\n"));
726		/*
727		 * Be careful here not to return v6 peers when we
728		 * want only v4.
729		 */
730		if (IS_IPV6(&pp->srcadr)) {
731			if (!client_v6_capable)
732				continue;
733			ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
734			ips->v6_flag = 1;
735			if (pp->dstadr)
736				ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
737			else
738				ZERO(ips->dstadr6);
739		} else {
740			ips->srcadr = NSRCADR(&pp->srcadr);
741			if (client_v6_capable)
742				ips->v6_flag = 0;
743
744			if (pp->dstadr) {
745				if (!pp->processed)
746					ips->dstadr = NSRCADR(&pp->dstadr->sin);
747				else {
748					if (MDF_BCAST == pp->cast_flags)
749						ips->dstadr = NSRCADR(&pp->dstadr->bcast);
750					else if (pp->cast_flags) {
751						ips->dstadr = NSRCADR(&pp->dstadr->sin);
752						if (!ips->dstadr)
753							ips->dstadr = NSRCADR(&pp->dstadr->bcast);
754					}
755				}
756			} else {
757				ips->dstadr = 0;
758			}
759		}
760
761		ips->srcport = NSRCPORT(&pp->srcadr);
762		ips->stratum = pp->stratum;
763		ips->hpoll = pp->hpoll;
764		ips->ppoll = pp->ppoll;
765		ips->reach = pp->reach;
766		ips->flags = 0;
767		if (pp == sys_peer)
768			ips->flags |= INFO_FLAG_SYSPEER;
769		if (pp->flags & FLAG_CONFIG)
770			ips->flags |= INFO_FLAG_CONFIG;
771		if (pp->flags & FLAG_REFCLOCK)
772			ips->flags |= INFO_FLAG_REFCLOCK;
773		if (pp->flags & FLAG_PREFER)
774			ips->flags |= INFO_FLAG_PREFER;
775		if (pp->flags & FLAG_BURST)
776			ips->flags |= INFO_FLAG_BURST;
777		if (pp->status == CTL_PST_SEL_SYNCCAND)
778			ips->flags |= INFO_FLAG_SEL_CANDIDATE;
779		if (pp->status >= CTL_PST_SEL_SYSPEER)
780			ips->flags |= INFO_FLAG_SHORTLIST;
781		ips->hmode = pp->hmode;
782		ips->delay = HTONS_FP(DTOFP(pp->delay));
783		DTOLFP(pp->offset, &ltmp);
784		HTONL_FP(&ltmp, &ips->offset);
785		ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
786
787		ips = (struct info_peer_summary *)more_pkt();
788	}	/* for pp */
789
790	flush_pkt();
791}
792
793
794/*
795 * peer_info - send information for one or more peers
796 */
797static void
798peer_info (
799	sockaddr_u *srcadr,
800	endpt *inter,
801	struct req_pkt *inpkt
802	)
803{
804	u_short			items;
805	size_t			item_sz;
806	char *			datap;
807	struct info_peer_list	ipl;
808	struct peer *		pp;
809	struct info_peer *	ip;
810	int			i;
811	int			j;
812	sockaddr_u		addr;
813	l_fp			ltmp;
814
815	items = INFO_NITEMS(inpkt->err_nitems);
816	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
817	datap = inpkt->u.data;
818	if (item_sz != sizeof(ipl)) {
819		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
820		return;
821	}
822	ip = prepare_pkt(srcadr, inter, inpkt,
823			 v6sizeof(struct info_peer));
824	while (items-- > 0 && ip != NULL) {
825		ZERO(ipl);
826		memcpy(&ipl, datap, item_sz);
827		ZERO_SOCK(&addr);
828		NSRCPORT(&addr) = ipl.port;
829		if (client_v6_capable && ipl.v6_flag) {
830			AF(&addr) = AF_INET6;
831			SOCK_ADDR6(&addr) = ipl.addr6;
832		} else {
833			AF(&addr) = AF_INET;
834			NSRCADR(&addr) = ipl.addr;
835		}
836#ifdef ISC_PLATFORM_HAVESALEN
837		addr.sa.sa_len = SOCKLEN(&addr);
838#endif
839		datap += item_sz;
840
841		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
842		if (NULL == pp)
843			continue;
844		if (IS_IPV6(srcadr)) {
845			if (pp->dstadr)
846				ip->dstadr6 =
847				    (MDF_BCAST == pp->cast_flags)
848					? SOCK_ADDR6(&pp->dstadr->bcast)
849					: SOCK_ADDR6(&pp->dstadr->sin);
850			else
851				ZERO(ip->dstadr6);
852
853			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
854			ip->v6_flag = 1;
855		} else {
856			if (pp->dstadr) {
857				if (!pp->processed)
858					ip->dstadr = NSRCADR(&pp->dstadr->sin);
859				else {
860					if (MDF_BCAST == pp->cast_flags)
861						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
862					else if (pp->cast_flags) {
863						ip->dstadr = NSRCADR(&pp->dstadr->sin);
864						if (!ip->dstadr)
865							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
866					}
867				}
868			} else
869				ip->dstadr = 0;
870
871			ip->srcadr = NSRCADR(&pp->srcadr);
872			if (client_v6_capable)
873				ip->v6_flag = 0;
874		}
875		ip->srcport = NSRCPORT(&pp->srcadr);
876		ip->flags = 0;
877		if (pp == sys_peer)
878			ip->flags |= INFO_FLAG_SYSPEER;
879		if (pp->flags & FLAG_CONFIG)
880			ip->flags |= INFO_FLAG_CONFIG;
881		if (pp->flags & FLAG_REFCLOCK)
882			ip->flags |= INFO_FLAG_REFCLOCK;
883		if (pp->flags & FLAG_PREFER)
884			ip->flags |= INFO_FLAG_PREFER;
885		if (pp->flags & FLAG_BURST)
886			ip->flags |= INFO_FLAG_BURST;
887		if (pp->status == CTL_PST_SEL_SYNCCAND)
888			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
889		if (pp->status >= CTL_PST_SEL_SYSPEER)
890			ip->flags |= INFO_FLAG_SHORTLIST;
891		ip->leap = pp->leap;
892		ip->hmode = pp->hmode;
893		ip->keyid = pp->keyid;
894		ip->stratum = pp->stratum;
895		ip->ppoll = pp->ppoll;
896		ip->hpoll = pp->hpoll;
897		ip->precision = pp->precision;
898		ip->version = pp->version;
899		ip->reach = pp->reach;
900		ip->unreach = (u_char)pp->unreach;
901		ip->flash = (u_char)pp->flash;
902		ip->flash2 = (u_short)pp->flash;
903		ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
904		ip->ttl = (u_char)pp->ttl;
905		ip->associd = htons(pp->associd);
906		ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
907		ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
908		ip->refid = pp->refid;
909		HTONL_FP(&pp->reftime, &ip->reftime);
910		HTONL_FP(&pp->aorg, &ip->org);
911		HTONL_FP(&pp->rec, &ip->rec);
912		HTONL_FP(&pp->xmt, &ip->xmt);
913		j = pp->filter_nextpt - 1;
914		for (i = 0; i < NTP_SHIFT; i++, j--) {
915			if (j < 0)
916				j = NTP_SHIFT-1;
917			ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
918			DTOLFP(pp->filter_offset[j], &ltmp);
919			HTONL_FP(&ltmp, &ip->filtoffset[i]);
920			ip->order[i] = (u_char)((pp->filter_nextpt +
921						 NTP_SHIFT - 1) -
922						pp->filter_order[i]);
923			if (ip->order[i] >= NTP_SHIFT)
924				ip->order[i] -= NTP_SHIFT;
925		}
926		DTOLFP(pp->offset, &ltmp);
927		HTONL_FP(&ltmp, &ip->offset);
928		ip->delay = HTONS_FP(DTOFP(pp->delay));
929		ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
930		ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
931		ip = more_pkt();
932	}
933	flush_pkt();
934}
935
936
937/*
938 * peer_stats - send statistics for one or more peers
939 */
940static void
941peer_stats (
942	sockaddr_u *srcadr,
943	endpt *inter,
944	struct req_pkt *inpkt
945	)
946{
947	u_short			items;
948	size_t			item_sz;
949	char *			datap;
950	struct info_peer_list	ipl;
951	struct peer *		pp;
952	struct info_peer_stats *ip;
953	sockaddr_u addr;
954
955	DPRINTF(1, ("peer_stats: called\n"));
956	items = INFO_NITEMS(inpkt->err_nitems);
957	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
958	datap = inpkt->u.data;
959	if (item_sz > sizeof(ipl)) {
960		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
961		return;
962	}
963	ip = prepare_pkt(srcadr, inter, inpkt,
964			 v6sizeof(struct info_peer_stats));
965	while (items-- > 0 && ip != NULL) {
966		ZERO(ipl);
967		memcpy(&ipl, datap, item_sz);
968		ZERO(addr);
969		NSRCPORT(&addr) = ipl.port;
970		if (client_v6_capable && ipl.v6_flag) {
971			AF(&addr) = AF_INET6;
972			SOCK_ADDR6(&addr) = ipl.addr6;
973		} else {
974			AF(&addr) = AF_INET;
975			NSRCADR(&addr) = ipl.addr;
976		}
977#ifdef ISC_PLATFORM_HAVESALEN
978		addr.sa.sa_len = SOCKLEN(&addr);
979#endif
980		DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
981			    stoa(&addr), ipl.port, NSRCPORT(&addr)));
982
983		datap += item_sz;
984
985		pp = findexistingpeer(&addr, NULL, NULL, -1, 0, NULL);
986		if (NULL == pp)
987			continue;
988
989		DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
990
991		if (IS_IPV4(&pp->srcadr)) {
992			if (pp->dstadr) {
993				if (!pp->processed)
994					ip->dstadr = NSRCADR(&pp->dstadr->sin);
995				else {
996					if (MDF_BCAST == pp->cast_flags)
997						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
998					else if (pp->cast_flags) {
999						ip->dstadr = NSRCADR(&pp->dstadr->sin);
1000						if (!ip->dstadr)
1001							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1002					}
1003				}
1004			} else
1005				ip->dstadr = 0;
1006
1007			ip->srcadr = NSRCADR(&pp->srcadr);
1008			if (client_v6_capable)
1009				ip->v6_flag = 0;
1010		} else {
1011			if (pp->dstadr)
1012				ip->dstadr6 =
1013				    (MDF_BCAST == pp->cast_flags)
1014					? SOCK_ADDR6(&pp->dstadr->bcast)
1015					: SOCK_ADDR6(&pp->dstadr->sin);
1016			else
1017				ZERO(ip->dstadr6);
1018
1019			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1020			ip->v6_flag = 1;
1021		}
1022		ip->srcport = NSRCPORT(&pp->srcadr);
1023		ip->flags = 0;
1024		if (pp == sys_peer)
1025		    ip->flags |= INFO_FLAG_SYSPEER;
1026		if (pp->flags & FLAG_CONFIG)
1027		    ip->flags |= INFO_FLAG_CONFIG;
1028		if (pp->flags & FLAG_REFCLOCK)
1029		    ip->flags |= INFO_FLAG_REFCLOCK;
1030		if (pp->flags & FLAG_PREFER)
1031		    ip->flags |= INFO_FLAG_PREFER;
1032		if (pp->flags & FLAG_BURST)
1033		    ip->flags |= INFO_FLAG_BURST;
1034		if (pp->flags & FLAG_IBURST)
1035		    ip->flags |= INFO_FLAG_IBURST;
1036		if (pp->status == CTL_PST_SEL_SYNCCAND)
1037		    ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1038		if (pp->status >= CTL_PST_SEL_SYSPEER)
1039		    ip->flags |= INFO_FLAG_SHORTLIST;
1040		ip->flags = htons(ip->flags);
1041		ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1042		ip->timetosend = htonl(pp->nextdate - current_time);
1043		ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1044		ip->sent = htonl((u_int32)(pp->sent));
1045		ip->processed = htonl((u_int32)(pp->processed));
1046		ip->badauth = htonl((u_int32)(pp->badauth));
1047		ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1048		ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1049		ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1050		ip->selbroken = htonl((u_int32)(pp->selbroken));
1051		ip->candidate = pp->status;
1052		ip = (struct info_peer_stats *)more_pkt();
1053	}
1054	flush_pkt();
1055}
1056
1057
1058/*
1059 * sys_info - return system info
1060 */
1061static void
1062sys_info(
1063	sockaddr_u *srcadr,
1064	endpt *inter,
1065	struct req_pkt *inpkt
1066	)
1067{
1068	register struct info_sys *is;
1069
1070	is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1071	    v6sizeof(struct info_sys));
1072
1073	if (sys_peer) {
1074		if (IS_IPV4(&sys_peer->srcadr)) {
1075			is->peer = NSRCADR(&sys_peer->srcadr);
1076			if (client_v6_capable)
1077				is->v6_flag = 0;
1078		} else if (client_v6_capable) {
1079			is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1080			is->v6_flag = 1;
1081		}
1082		is->peer_mode = sys_peer->hmode;
1083	} else {
1084		is->peer = 0;
1085		if (client_v6_capable) {
1086			is->v6_flag = 0;
1087		}
1088		is->peer_mode = 0;
1089	}
1090
1091	is->leap = sys_leap;
1092	is->stratum = sys_stratum;
1093	is->precision = sys_precision;
1094	is->rootdelay = htonl(DTOFP(sys_rootdelay));
1095	is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1096	is->frequency = htonl(DTOFP(sys_jitter));
1097	is->stability = htonl(DTOUFP(clock_stability * 1e6));
1098	is->refid = sys_refid;
1099	HTONL_FP(&sys_reftime, &is->reftime);
1100
1101	is->poll = sys_poll;
1102
1103	is->flags = 0;
1104	if (sys_authenticate)
1105		is->flags |= INFO_FLAG_AUTHENTICATE;
1106	if (sys_bclient)
1107		is->flags |= INFO_FLAG_BCLIENT;
1108#ifdef REFCLOCK
1109	if (cal_enable)
1110		is->flags |= INFO_FLAG_CAL;
1111#endif /* REFCLOCK */
1112	if (kern_enable)
1113		is->flags |= INFO_FLAG_KERNEL;
1114	if (mon_enabled != MON_OFF)
1115		is->flags |= INFO_FLAG_MONITOR;
1116	if (ntp_enable)
1117		is->flags |= INFO_FLAG_NTP;
1118	if (hardpps_enable)
1119		is->flags |= INFO_FLAG_PPS_SYNC;
1120	if (stats_control)
1121		is->flags |= INFO_FLAG_FILEGEN;
1122	is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1123	HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1124	(void) more_pkt();
1125	flush_pkt();
1126}
1127
1128
1129/*
1130 * sys_stats - return system statistics
1131 */
1132static void
1133sys_stats(
1134	sockaddr_u *srcadr,
1135	endpt *inter,
1136	struct req_pkt *inpkt
1137	)
1138{
1139	register struct info_sys_stats *ss;
1140
1141	ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1142		sizeof(struct info_sys_stats));
1143	ss->timeup = htonl((u_int32)current_time);
1144	ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1145	ss->denied = htonl((u_int32)sys_restricted);
1146	ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1147	ss->newversionpkt = htonl((u_int32)sys_newversion);
1148	ss->unknownversion = htonl((u_int32)sys_declined);
1149	ss->badlength = htonl((u_int32)sys_badlength);
1150	ss->processed = htonl((u_int32)sys_processed);
1151	ss->badauth = htonl((u_int32)sys_badauth);
1152	ss->limitrejected = htonl((u_int32)sys_limitrejected);
1153	ss->received = htonl((u_int32)sys_received);
1154	ss->lamport = htonl((u_int32)sys_lamport);
1155	ss->tsrounding = htonl((u_int32)sys_tsrounding);
1156	(void) more_pkt();
1157	flush_pkt();
1158}
1159
1160
1161/*
1162 * mem_stats - return memory statistics
1163 */
1164static void
1165mem_stats(
1166	sockaddr_u *srcadr,
1167	endpt *inter,
1168	struct req_pkt *inpkt
1169	)
1170{
1171	register struct info_mem_stats *ms;
1172	register int i;
1173
1174	ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1175						  sizeof(struct info_mem_stats));
1176
1177	ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1178	ms->totalpeermem = htons((u_short)total_peer_structs);
1179	ms->freepeermem = htons((u_short)peer_free_count);
1180	ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1181	ms->allocations = htonl((u_int32)peer_allocations);
1182	ms->demobilizations = htonl((u_int32)peer_demobilizations);
1183
1184	for (i = 0; i < NTP_HASH_SIZE; i++)
1185		ms->hashcount[i] = (u_char)
1186		    max((u_int)peer_hash_count[i], UCHAR_MAX);
1187
1188	(void) more_pkt();
1189	flush_pkt();
1190}
1191
1192
1193/*
1194 * io_stats - return io statistics
1195 */
1196static void
1197io_stats(
1198	sockaddr_u *srcadr,
1199	endpt *inter,
1200	struct req_pkt *inpkt
1201	)
1202{
1203	struct info_io_stats *io;
1204
1205	io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1206						 sizeof(struct info_io_stats));
1207
1208	io->timereset = htonl((u_int32)(current_time - io_timereset));
1209	io->totalrecvbufs = htons((u_short) total_recvbuffs());
1210	io->freerecvbufs = htons((u_short) free_recvbuffs());
1211	io->fullrecvbufs = htons((u_short) full_recvbuffs());
1212	io->lowwater = htons((u_short) lowater_additions());
1213	io->dropped = htonl((u_int32)packets_dropped);
1214	io->ignored = htonl((u_int32)packets_ignored);
1215	io->received = htonl((u_int32)packets_received);
1216	io->sent = htonl((u_int32)packets_sent);
1217	io->notsent = htonl((u_int32)packets_notsent);
1218	io->interrupts = htonl((u_int32)handler_calls);
1219	io->int_received = htonl((u_int32)handler_pkts);
1220
1221	(void) more_pkt();
1222	flush_pkt();
1223}
1224
1225
1226/*
1227 * timer_stats - return timer statistics
1228 */
1229static void
1230timer_stats(
1231	sockaddr_u *		srcadr,
1232	endpt *			inter,
1233	struct req_pkt *	inpkt
1234	)
1235{
1236	struct info_timer_stats *	ts;
1237	u_long				sincereset;
1238
1239	ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1240						    inpkt, sizeof(*ts));
1241
1242	sincereset = current_time - timer_timereset;
1243	ts->timereset = htonl((u_int32)sincereset);
1244	ts->alarms = ts->timereset;
1245	ts->overflows = htonl((u_int32)alarm_overflow);
1246	ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1247
1248	(void) more_pkt();
1249	flush_pkt();
1250}
1251
1252
1253/*
1254 * loop_info - return the current state of the loop filter
1255 */
1256static void
1257loop_info(
1258	sockaddr_u *srcadr,
1259	endpt *inter,
1260	struct req_pkt *inpkt
1261	)
1262{
1263	struct info_loop *li;
1264	l_fp ltmp;
1265
1266	li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1267	    sizeof(struct info_loop));
1268
1269	DTOLFP(last_offset, &ltmp);
1270	HTONL_FP(&ltmp, &li->last_offset);
1271	DTOLFP(drift_comp * 1e6, &ltmp);
1272	HTONL_FP(&ltmp, &li->drift_comp);
1273	li->compliance = htonl((u_int32)(tc_counter));
1274	li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1275
1276	(void) more_pkt();
1277	flush_pkt();
1278}
1279
1280
1281/*
1282 * do_conf - add a peer to the configuration list
1283 */
1284static void
1285do_conf(
1286	sockaddr_u *srcadr,
1287	endpt *inter,
1288	struct req_pkt *inpkt
1289	)
1290{
1291	u_short			items;
1292	size_t			item_sz;
1293	u_int			fl;
1294	char *			datap;
1295	struct conf_peer	temp_cp;
1296	sockaddr_u		peeraddr;
1297
1298	/*
1299	 * Do a check of everything to see that it looks
1300	 * okay.  If not, complain about it.  Note we are
1301	 * very picky here.
1302	 */
1303	items = INFO_NITEMS(inpkt->err_nitems);
1304	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1305	datap = inpkt->u.data;
1306	if (item_sz > sizeof(temp_cp)) {
1307		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1308		return;
1309	}
1310
1311	while (items-- > 0) {
1312		ZERO(temp_cp);
1313		memcpy(&temp_cp, datap, item_sz);
1314		ZERO_SOCK(&peeraddr);
1315
1316		fl = 0;
1317		if (temp_cp.flags & CONF_FLAG_PREFER)
1318			fl |= FLAG_PREFER;
1319		if (temp_cp.flags & CONF_FLAG_BURST)
1320			fl |= FLAG_BURST;
1321		if (temp_cp.flags & CONF_FLAG_IBURST)
1322			fl |= FLAG_IBURST;
1323#ifdef AUTOKEY
1324		if (temp_cp.flags & CONF_FLAG_SKEY)
1325			fl |= FLAG_SKEY;
1326#endif	/* AUTOKEY */
1327		if (client_v6_capable && temp_cp.v6_flag) {
1328			AF(&peeraddr) = AF_INET6;
1329			SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1330		} else {
1331			AF(&peeraddr) = AF_INET;
1332			NSRCADR(&peeraddr) = temp_cp.peeraddr;
1333			/*
1334			 * Make sure the address is valid
1335			 */
1336			if (!ISREFCLOCKADR(&peeraddr) &&
1337			    ISBADADR(&peeraddr)) {
1338				req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1339				return;
1340			}
1341
1342		}
1343		NSRCPORT(&peeraddr) = htons(NTP_PORT);
1344#ifdef ISC_PLATFORM_HAVESALEN
1345		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1346#endif
1347
1348		/* check mode value: 0 <= hmode <= 6
1349		 *
1350		 * There's no good global define for that limit, and
1351		 * using a magic define is as good (or bad, actually) as
1352		 * a magic number. So we use the highest possible peer
1353		 * mode, and that is MODE_BCLIENT.
1354		 *
1355		 * [Bug 3009] claims that a problem occurs for hmode > 7,
1356		 * but the code in ntp_peer.c indicates trouble for any
1357		 * hmode > 6 ( --> MODE_BCLIENT).
1358		 */
1359		if (temp_cp.hmode > MODE_BCLIENT) {
1360			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1361			return;
1362		}
1363
1364		/* Any more checks on the values? Unchecked at this
1365		 * point:
1366		 *   - version
1367		 *   - ttl
1368		 *   - keyid
1369		 *
1370		 *   - minpoll/maxpoll, but they are treated properly
1371		 *     for all cases internally. Checking not necessary.
1372		 *
1373		 * Note that we ignore any previously-specified ippeerlimit.
1374		 * If we're told to create the peer, we create the peer.
1375		 */
1376
1377		/* finally create the peer */
1378		if (peer_config(&peeraddr, NULL, NULL, -1,
1379		    temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1380		    temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1381		    NULL) == 0)
1382		{
1383			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1384			return;
1385		}
1386
1387		datap += item_sz;
1388	}
1389	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1390}
1391
1392
1393/*
1394 * do_unconf - remove a peer from the configuration list
1395 */
1396static void
1397do_unconf(
1398	sockaddr_u *	srcadr,
1399	endpt *		inter,
1400	struct req_pkt *inpkt
1401	)
1402{
1403	u_short			items;
1404	size_t			item_sz;
1405	char *			datap;
1406	struct conf_unpeer	temp_cp;
1407	struct peer *		p;
1408	sockaddr_u		peeraddr;
1409	int			loops;
1410
1411	/*
1412	 * This is a bit unstructured, but I like to be careful.
1413	 * We check to see that every peer exists and is actually
1414	 * configured.  If so, we remove them.  If not, we return
1415	 * an error.
1416	 *
1417	 * [Bug 3011] Even if we checked all peers given in the request
1418	 * in a dry run, there's still a chance that the caller played
1419	 * unfair and gave the same peer multiple times. So we still
1420	 * have to be prepared for nasty surprises in the second run ;)
1421	 */
1422
1423	/* basic consistency checks */
1424	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1425	if (item_sz > sizeof(temp_cp)) {
1426		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1427		return;
1428	}
1429
1430	/* now do two runs: first a dry run, then a busy one */
1431	for (loops = 0; loops != 2; ++loops) {
1432		items = INFO_NITEMS(inpkt->err_nitems);
1433		datap = inpkt->u.data;
1434		while (items-- > 0) {
1435			/* copy from request to local */
1436			ZERO(temp_cp);
1437			memcpy(&temp_cp, datap, item_sz);
1438			/* get address structure */
1439			ZERO_SOCK(&peeraddr);
1440			if (client_v6_capable && temp_cp.v6_flag) {
1441				AF(&peeraddr) = AF_INET6;
1442				SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1443			} else {
1444				AF(&peeraddr) = AF_INET;
1445				NSRCADR(&peeraddr) = temp_cp.peeraddr;
1446			}
1447			SET_PORT(&peeraddr, NTP_PORT);
1448#ifdef ISC_PLATFORM_HAVESALEN
1449			peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1450#endif
1451			DPRINTF(1, ("searching for %s\n",
1452				    stoa(&peeraddr)));
1453
1454			/* search for matching configred(!) peer */
1455			p = NULL;
1456			do {
1457				p = findexistingpeer(
1458					&peeraddr, NULL, p, -1, 0, NULL);
1459			} while (p && !(FLAG_CONFIG & p->flags));
1460
1461			if (!loops && !p) {
1462				/* Item not found in dry run -- bail! */
1463				req_ack(srcadr, inter, inpkt,
1464					INFO_ERR_NODATA);
1465				return;
1466			} else if (loops && p) {
1467				/* Item found in busy run -- remove! */
1468				peer_clear(p, "GONE");
1469				unpeer(p);
1470			}
1471			datap += item_sz;
1472		}
1473	}
1474
1475	/* report success */
1476	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1477}
1478
1479
1480/*
1481 * set_sys_flag - set system flags
1482 */
1483static void
1484set_sys_flag(
1485	sockaddr_u *srcadr,
1486	endpt *inter,
1487	struct req_pkt *inpkt
1488	)
1489{
1490	setclr_flags(srcadr, inter, inpkt, 1);
1491}
1492
1493
1494/*
1495 * clr_sys_flag - clear system flags
1496 */
1497static void
1498clr_sys_flag(
1499	sockaddr_u *srcadr,
1500	endpt *inter,
1501	struct req_pkt *inpkt
1502	)
1503{
1504	setclr_flags(srcadr, inter, inpkt, 0);
1505}
1506
1507
1508/*
1509 * setclr_flags - do the grunge work of flag setting/clearing
1510 */
1511static void
1512setclr_flags(
1513	sockaddr_u *srcadr,
1514	endpt *inter,
1515	struct req_pkt *inpkt,
1516	u_long set
1517	)
1518{
1519	struct conf_sys_flags *sf;
1520	u_int32 flags;
1521
1522	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1523		msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1524		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1525		return;
1526	}
1527
1528	sf = (struct conf_sys_flags *)&inpkt->u;
1529	flags = ntohl(sf->flags);
1530
1531	if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1532		      SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1533		      SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1534		msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1535			flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1536				  SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1537				  SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1538				  SYS_FLAG_AUTH | SYS_FLAG_CAL));
1539		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1540		return;
1541	}
1542
1543	if (flags & SYS_FLAG_BCLIENT)
1544		proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1545	if (flags & SYS_FLAG_PPS)
1546		proto_config(PROTO_PPS, set, 0., NULL);
1547	if (flags & SYS_FLAG_NTP)
1548		proto_config(PROTO_NTP, set, 0., NULL);
1549	if (flags & SYS_FLAG_KERNEL)
1550		proto_config(PROTO_KERNEL, set, 0., NULL);
1551	if (flags & SYS_FLAG_MONITOR)
1552		proto_config(PROTO_MONITOR, set, 0., NULL);
1553	if (flags & SYS_FLAG_FILEGEN)
1554		proto_config(PROTO_FILEGEN, set, 0., NULL);
1555	if (flags & SYS_FLAG_AUTH)
1556		proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1557	if (flags & SYS_FLAG_CAL)
1558		proto_config(PROTO_CAL, set, 0., NULL);
1559	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1560}
1561
1562/* There have been some issues with the restrict list processing,
1563 * ranging from problems with deep recursion (resulting in stack
1564 * overflows) and overfull reply buffers.
1565 *
1566 * To avoid this trouble the list reversal is done iteratively using a
1567 * scratch pad.
1568 */
1569typedef struct RestrictStack RestrictStackT;
1570struct RestrictStack {
1571	RestrictStackT   *link;
1572	size_t            fcnt;
1573	const restrict_u *pres[63];
1574};
1575
1576static size_t
1577getStackSheetSize(
1578	RestrictStackT *sp
1579	)
1580{
1581	if (sp)
1582		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1583	return 0u;
1584}
1585
1586static int/*BOOL*/
1587pushRestriction(
1588	RestrictStackT  **spp,
1589	const restrict_u *ptr
1590	)
1591{
1592	RestrictStackT *sp;
1593
1594	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1595		/* need another sheet in the scratch pad */
1596		sp = emalloc(sizeof(*sp));
1597		sp->link = *spp;
1598		sp->fcnt = getStackSheetSize(sp);
1599		*spp = sp;
1600	}
1601	sp->pres[--sp->fcnt] = ptr;
1602	return TRUE;
1603}
1604
1605static int/*BOOL*/
1606popRestriction(
1607	RestrictStackT   **spp,
1608	const restrict_u **opp
1609	)
1610{
1611	RestrictStackT *sp;
1612
1613	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
1614		return FALSE;
1615
1616	*opp = sp->pres[sp->fcnt++];
1617	if (sp->fcnt >= getStackSheetSize(sp)) {
1618		/* discard sheet from scratch pad */
1619		*spp = sp->link;
1620		free(sp);
1621	}
1622	return TRUE;
1623}
1624
1625static void
1626flushRestrictionStack(
1627	RestrictStackT **spp
1628	)
1629{
1630	RestrictStackT *sp;
1631
1632	while (NULL != (sp = *spp)) {
1633		*spp = sp->link;
1634		free(sp);
1635	}
1636}
1637
1638/*
1639 * list_restrict4 - iterative helper for list_restrict dumps IPv4
1640 *		    restriction list in reverse order.
1641 */
1642static void
1643list_restrict4(
1644	const restrict_u *	res,
1645	struct info_restrict **	ppir
1646	)
1647{
1648	RestrictStackT *	rpad;
1649	struct info_restrict *	pir;
1650
1651	pir = *ppir;
1652	for (rpad = NULL; res; res = res->link)
1653		if (!pushRestriction(&rpad, res))
1654			break;
1655
1656	while (pir && popRestriction(&rpad, &res)) {
1657		pir->addr = htonl(res->u.v4.addr);
1658		if (client_v6_capable)
1659			pir->v6_flag = 0;
1660		pir->mask = htonl(res->u.v4.mask);
1661		pir->count = htonl(res->count);
1662		pir->rflags = htons(res->rflags);
1663		pir->mflags = htons(res->mflags);
1664		pir = (struct info_restrict *)more_pkt();
1665	}
1666	flushRestrictionStack(&rpad);
1667	*ppir = pir;
1668}
1669
1670/*
1671 * list_restrict6 - iterative helper for list_restrict dumps IPv6
1672 *		    restriction list in reverse order.
1673 */
1674static void
1675list_restrict6(
1676	const restrict_u *	res,
1677	struct info_restrict **	ppir
1678	)
1679{
1680	RestrictStackT *	rpad;
1681	struct info_restrict *	pir;
1682
1683	pir = *ppir;
1684	for (rpad = NULL; res; res = res->link)
1685		if (!pushRestriction(&rpad, res))
1686			break;
1687
1688	while (pir && popRestriction(&rpad, &res)) {
1689		pir->addr6 = res->u.v6.addr;
1690		pir->mask6 = res->u.v6.mask;
1691		pir->v6_flag = 1;
1692		pir->count = htonl(res->count);
1693		pir->rflags = htons(res->rflags);
1694		pir->mflags = htons(res->mflags);
1695		pir = (struct info_restrict *)more_pkt();
1696	}
1697	flushRestrictionStack(&rpad);
1698	*ppir = pir;
1699}
1700
1701
1702/*
1703 * list_restrict - return the restrict list
1704 */
1705static void
1706list_restrict(
1707	sockaddr_u *srcadr,
1708	endpt *inter,
1709	struct req_pkt *inpkt
1710	)
1711{
1712	struct info_restrict *ir;
1713
1714	DPRINTF(3, ("wants restrict list summary\n"));
1715
1716	ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1717	    v6sizeof(struct info_restrict));
1718
1719	/*
1720	 * The restriction lists are kept sorted in the reverse order
1721	 * than they were originally.  To preserve the output semantics,
1722	 * dump each list in reverse order. The workers take care of that.
1723	 */
1724	list_restrict4(restrictlist4, &ir);
1725	if (client_v6_capable)
1726		list_restrict6(restrictlist6, &ir);
1727	flush_pkt();
1728}
1729
1730
1731/*
1732 * do_resaddflags - add flags to a restrict entry (or create one)
1733 */
1734static void
1735do_resaddflags(
1736	sockaddr_u *srcadr,
1737	endpt *inter,
1738	struct req_pkt *inpkt
1739	)
1740{
1741	do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1742}
1743
1744
1745
1746/*
1747 * do_ressubflags - remove flags from a restrict entry
1748 */
1749static void
1750do_ressubflags(
1751	sockaddr_u *srcadr,
1752	endpt *inter,
1753	struct req_pkt *inpkt
1754	)
1755{
1756	do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1757}
1758
1759
1760/*
1761 * do_unrestrict - remove a restrict entry from the list
1762 */
1763static void
1764do_unrestrict(
1765	sockaddr_u *srcadr,
1766	endpt *inter,
1767	struct req_pkt *inpkt
1768	)
1769{
1770	do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1771}
1772
1773
1774/*
1775 * do_restrict - do the dirty stuff of dealing with restrictions
1776 */
1777static void
1778do_restrict(
1779	sockaddr_u *srcadr,
1780	endpt *inter,
1781	struct req_pkt *inpkt,
1782	restrict_op op
1783	)
1784{
1785	char *			datap;
1786	struct conf_restrict	cr;
1787	u_short			items;
1788	size_t			item_sz;
1789	sockaddr_u		matchaddr;
1790	sockaddr_u		matchmask;
1791	int			bad;
1792
1793	switch(op) {
1794	    case RESTRICT_FLAGS:
1795	    case RESTRICT_UNFLAG:
1796	    case RESTRICT_REMOVE:
1797	    case RESTRICT_REMOVEIF:
1798	    	break;
1799
1800	    default:
1801		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1802		return;
1803	}
1804
1805	/*
1806	 * Do a check of the flags to make sure that only
1807	 * the NTPPORT flag is set, if any.  If not, complain
1808	 * about it.  Note we are very picky here.
1809	 */
1810	items = INFO_NITEMS(inpkt->err_nitems);
1811	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1812	datap = inpkt->u.data;
1813	if (item_sz > sizeof(cr)) {
1814		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1815		return;
1816	}
1817
1818	bad = 0;
1819	while (items-- > 0 && !bad) {
1820		memcpy(&cr, datap, item_sz);
1821		cr.flags = ntohs(cr.flags);
1822		cr.mflags = ntohs(cr.mflags);
1823		if (~RESM_NTPONLY & cr.mflags)
1824			bad |= 1;
1825		if (~RES_ALLFLAGS & cr.flags)
1826			bad |= 2;
1827		if (INADDR_ANY != cr.mask) {
1828			if (client_v6_capable && cr.v6_flag) {
1829				if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1830					bad |= 4;
1831			} else {
1832				if (INADDR_ANY == cr.addr)
1833					bad |= 8;
1834			}
1835		}
1836		datap += item_sz;
1837	}
1838
1839	if (bad) {
1840		msyslog(LOG_ERR, "do_restrict: bad = %#x", bad);
1841		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1842		return;
1843	}
1844
1845	/*
1846	 * Looks okay, try it out.  Needs to reload data pointer and
1847	 * item counter. (Talos-CAN-0052)
1848	 */
1849	ZERO_SOCK(&matchaddr);
1850	ZERO_SOCK(&matchmask);
1851	items = INFO_NITEMS(inpkt->err_nitems);
1852	datap = inpkt->u.data;
1853
1854	while (items-- > 0) {
1855		memcpy(&cr, datap, item_sz);
1856		cr.flags = ntohs(cr.flags);
1857		cr.mflags = ntohs(cr.mflags);
1858		cr.ippeerlimit = ntohs(cr.ippeerlimit);
1859		if (client_v6_capable && cr.v6_flag) {
1860			AF(&matchaddr) = AF_INET6;
1861			AF(&matchmask) = AF_INET6;
1862			SOCK_ADDR6(&matchaddr) = cr.addr6;
1863			SOCK_ADDR6(&matchmask) = cr.mask6;
1864		} else {
1865			AF(&matchaddr) = AF_INET;
1866			AF(&matchmask) = AF_INET;
1867			NSRCADR(&matchaddr) = cr.addr;
1868			NSRCADR(&matchmask) = cr.mask;
1869		}
1870		hack_restrict(op, &matchaddr, &matchmask, cr.mflags,
1871			      cr.ippeerlimit, cr.flags, 0);
1872		datap += item_sz;
1873	}
1874
1875	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1876}
1877
1878
1879/*
1880 * mon_getlist - return monitor data
1881 */
1882static void
1883mon_getlist(
1884	sockaddr_u *srcadr,
1885	endpt *inter,
1886	struct req_pkt *inpkt
1887	)
1888{
1889	req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1890}
1891
1892
1893/*
1894 * Module entry points and the flags they correspond with
1895 */
1896struct reset_entry {
1897	int flag;		/* flag this corresponds to */
1898	void (*handler)(void);	/* routine to handle request */
1899};
1900
1901struct reset_entry reset_entries[] = {
1902	{ RESET_FLAG_ALLPEERS,	peer_all_reset },
1903	{ RESET_FLAG_IO,	io_clr_stats },
1904	{ RESET_FLAG_SYS,	proto_clr_stats },
1905	{ RESET_FLAG_MEM,	peer_clr_stats },
1906	{ RESET_FLAG_TIMER,	timer_clr_stats },
1907	{ RESET_FLAG_AUTH,	reset_auth_stats },
1908	{ RESET_FLAG_CTL,	ctl_clr_stats },
1909	{ 0,			0 }
1910};
1911
1912/*
1913 * reset_stats - reset statistic counters here and there
1914 */
1915static void
1916reset_stats(
1917	sockaddr_u *srcadr,
1918	endpt *inter,
1919	struct req_pkt *inpkt
1920	)
1921{
1922	struct reset_flags *rflags;
1923	u_long flags;
1924	struct reset_entry *rent;
1925
1926	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1927		msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
1928		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1929		return;
1930	}
1931
1932	rflags = (struct reset_flags *)&inpkt->u;
1933	flags = ntohl(rflags->flags);
1934
1935	if (flags & ~RESET_ALLFLAGS) {
1936		msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
1937			flags & ~RESET_ALLFLAGS);
1938		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1939		return;
1940	}
1941
1942	for (rent = reset_entries; rent->flag != 0; rent++) {
1943		if (flags & rent->flag)
1944			(*rent->handler)();
1945	}
1946	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1947}
1948
1949
1950/*
1951 * reset_peer - clear a peer's statistics
1952 */
1953static void
1954reset_peer(
1955	sockaddr_u *srcadr,
1956	endpt *inter,
1957	struct req_pkt *inpkt
1958	)
1959{
1960	u_short			items;
1961	size_t			item_sz;
1962	char *			datap;
1963	struct conf_unpeer	cp;
1964	struct peer *		p;
1965	sockaddr_u		peeraddr;
1966	int			bad;
1967
1968	/*
1969	 * We check first to see that every peer exists.  If not,
1970	 * we return an error.
1971	 */
1972
1973	items = INFO_NITEMS(inpkt->err_nitems);
1974	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1975	datap = inpkt->u.data;
1976	if (item_sz > sizeof(cp)) {
1977		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1978		return;
1979	}
1980
1981	bad = FALSE;
1982	while (items-- > 0 && !bad) {
1983		ZERO(cp);
1984		memcpy(&cp, datap, item_sz);
1985		ZERO_SOCK(&peeraddr);
1986		if (client_v6_capable && cp.v6_flag) {
1987			AF(&peeraddr) = AF_INET6;
1988			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
1989		} else {
1990			AF(&peeraddr) = AF_INET;
1991			NSRCADR(&peeraddr) = cp.peeraddr;
1992		}
1993
1994#ifdef ISC_PLATFORM_HAVESALEN
1995		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1996#endif
1997		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
1998		if (NULL == p)
1999			bad++;
2000		datap += item_sz;
2001	}
2002
2003	if (bad) {
2004		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2005		return;
2006	}
2007
2008	/*
2009	 * Now do it in earnest. Needs to reload data pointer and item
2010	 * counter. (Talos-CAN-0052)
2011	 */
2012
2013	items = INFO_NITEMS(inpkt->err_nitems);
2014	datap = inpkt->u.data;
2015	while (items-- > 0) {
2016		ZERO(cp);
2017		memcpy(&cp, datap, item_sz);
2018		ZERO_SOCK(&peeraddr);
2019		if (client_v6_capable && cp.v6_flag) {
2020			AF(&peeraddr) = AF_INET6;
2021			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2022		} else {
2023			AF(&peeraddr) = AF_INET;
2024			NSRCADR(&peeraddr) = cp.peeraddr;
2025		}
2026		SET_PORT(&peeraddr, 123);
2027#ifdef ISC_PLATFORM_HAVESALEN
2028		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2029#endif
2030		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0, NULL);
2031		while (p != NULL) {
2032			peer_reset(p);
2033			p = findexistingpeer(&peeraddr, NULL, p, -1, 0, NULL);
2034		}
2035		datap += item_sz;
2036	}
2037
2038	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2039}
2040
2041
2042/*
2043 * do_key_reread - reread the encryption key file
2044 */
2045static void
2046do_key_reread(
2047	sockaddr_u *srcadr,
2048	endpt *inter,
2049	struct req_pkt *inpkt
2050	)
2051{
2052	rereadkeys();
2053	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2054}
2055
2056
2057/*
2058 * trust_key - make one or more keys trusted
2059 */
2060static void
2061trust_key(
2062	sockaddr_u *srcadr,
2063	endpt *inter,
2064	struct req_pkt *inpkt
2065	)
2066{
2067	do_trustkey(srcadr, inter, inpkt, 1);
2068}
2069
2070
2071/*
2072 * untrust_key - make one or more keys untrusted
2073 */
2074static void
2075untrust_key(
2076	sockaddr_u *srcadr,
2077	endpt *inter,
2078	struct req_pkt *inpkt
2079	)
2080{
2081	do_trustkey(srcadr, inter, inpkt, 0);
2082}
2083
2084
2085/*
2086 * do_trustkey - make keys either trustable or untrustable
2087 */
2088static void
2089do_trustkey(
2090	sockaddr_u *srcadr,
2091	endpt *inter,
2092	struct req_pkt *inpkt,
2093	u_long trust
2094	)
2095{
2096	register uint32_t *kp;
2097	register int items;
2098
2099	items = INFO_NITEMS(inpkt->err_nitems);
2100	kp = (uint32_t *)&inpkt->u;
2101	while (items-- > 0) {
2102		authtrust(*kp, trust);
2103		kp++;
2104	}
2105
2106	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2107}
2108
2109
2110/*
2111 * get_auth_info - return some stats concerning the authentication module
2112 */
2113static void
2114get_auth_info(
2115	sockaddr_u *srcadr,
2116	endpt *inter,
2117	struct req_pkt *inpkt
2118	)
2119{
2120	register struct info_auth *ia;
2121
2122	ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2123					     sizeof(struct info_auth));
2124
2125	ia->numkeys = htonl((u_int32)authnumkeys);
2126	ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2127	ia->keylookups = htonl((u_int32)authkeylookups);
2128	ia->keynotfound = htonl((u_int32)authkeynotfound);
2129	ia->encryptions = htonl((u_int32)authencryptions);
2130	ia->decryptions = htonl((u_int32)authdecryptions);
2131	ia->keyuncached = htonl((u_int32)authkeyuncached);
2132	ia->expired = htonl((u_int32)authkeyexpired);
2133	ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2134
2135	(void) more_pkt();
2136	flush_pkt();
2137}
2138
2139
2140
2141/*
2142 * reset_auth_stats - reset the authentication stat counters.  Done here
2143 *		      to keep ntp-isms out of the authentication module
2144 */
2145void
2146reset_auth_stats(void)
2147{
2148	authkeylookups = 0;
2149	authkeynotfound = 0;
2150	authencryptions = 0;
2151	authdecryptions = 0;
2152	authkeyuncached = 0;
2153	auth_timereset = current_time;
2154}
2155
2156
2157/*
2158 * req_get_traps - return information about current trap holders
2159 */
2160static void
2161req_get_traps(
2162	sockaddr_u *srcadr,
2163	endpt *inter,
2164	struct req_pkt *inpkt
2165	)
2166{
2167	struct info_trap *it;
2168	struct ctl_trap *tr;
2169	size_t i;
2170
2171	if (num_ctl_traps == 0) {
2172		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2173		return;
2174	}
2175
2176	it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2177	    v6sizeof(struct info_trap));
2178
2179	for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2180		if (tr->tr_flags & TRAP_INUSE) {
2181			if (IS_IPV4(&tr->tr_addr)) {
2182				if (tr->tr_localaddr == any_interface)
2183					it->local_address = 0;
2184				else
2185					it->local_address
2186					    = NSRCADR(&tr->tr_localaddr->sin);
2187				it->trap_address = NSRCADR(&tr->tr_addr);
2188				if (client_v6_capable)
2189					it->v6_flag = 0;
2190			} else {
2191				if (!client_v6_capable)
2192					continue;
2193				it->local_address6
2194				    = SOCK_ADDR6(&tr->tr_localaddr->sin);
2195				it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2196				it->v6_flag = 1;
2197			}
2198			it->trap_port = NSRCPORT(&tr->tr_addr);
2199			it->sequence = htons(tr->tr_sequence);
2200			it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2201			it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2202			it->resets = htonl((u_int32)tr->tr_resets);
2203			it->flags = htonl((u_int32)tr->tr_flags);
2204			it = (struct info_trap *)more_pkt();
2205		}
2206	}
2207	flush_pkt();
2208}
2209
2210
2211/*
2212 * req_set_trap - configure a trap
2213 */
2214static void
2215req_set_trap(
2216	sockaddr_u *srcadr,
2217	endpt *inter,
2218	struct req_pkt *inpkt
2219	)
2220{
2221	do_setclr_trap(srcadr, inter, inpkt, 1);
2222}
2223
2224
2225
2226/*
2227 * req_clr_trap - unconfigure a trap
2228 */
2229static void
2230req_clr_trap(
2231	sockaddr_u *srcadr,
2232	endpt *inter,
2233	struct req_pkt *inpkt
2234	)
2235{
2236	do_setclr_trap(srcadr, inter, inpkt, 0);
2237}
2238
2239
2240
2241/*
2242 * do_setclr_trap - do the grunge work of (un)configuring a trap
2243 */
2244static void
2245do_setclr_trap(
2246	sockaddr_u *srcadr,
2247	endpt *inter,
2248	struct req_pkt *inpkt,
2249	int set
2250	)
2251{
2252	register struct conf_trap *ct;
2253	register endpt *linter;
2254	int res;
2255	sockaddr_u laddr;
2256
2257	/*
2258	 * Prepare sockaddr
2259	 */
2260	ZERO_SOCK(&laddr);
2261	AF(&laddr) = AF(srcadr);
2262	SET_PORT(&laddr, NTP_PORT);
2263
2264	/*
2265	 * Restrict ourselves to one item only.  This eliminates
2266	 * the error reporting problem.
2267	 */
2268	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2269		msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2270		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2271		return;
2272	}
2273	ct = (struct conf_trap *)&inpkt->u;
2274
2275	/*
2276	 * Look for the local interface.  If none, use the default.
2277	 */
2278	if (ct->local_address == 0) {
2279		linter = any_interface;
2280	} else {
2281		if (IS_IPV4(&laddr))
2282			NSRCADR(&laddr) = ct->local_address;
2283		else
2284			SOCK_ADDR6(&laddr) = ct->local_address6;
2285		linter = findinterface(&laddr);
2286		if (NULL == linter) {
2287			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2288			return;
2289		}
2290	}
2291
2292	if (IS_IPV4(&laddr))
2293		NSRCADR(&laddr) = ct->trap_address;
2294	else
2295		SOCK_ADDR6(&laddr) = ct->trap_address6;
2296	if (ct->trap_port)
2297		NSRCPORT(&laddr) = ct->trap_port;
2298	else
2299		SET_PORT(&laddr, TRAPPORT);
2300
2301	if (set) {
2302		res = ctlsettrap(&laddr, linter, 0,
2303				 INFO_VERSION(inpkt->rm_vn_mode));
2304	} else {
2305		res = ctlclrtrap(&laddr, linter, 0);
2306	}
2307
2308	if (!res) {
2309		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2310	} else {
2311		req_ack(srcadr, inter, inpkt, INFO_OKAY);
2312	}
2313	return;
2314}
2315
2316/*
2317 * Validate a request packet for a new request or control key:
2318 *  - only one item allowed
2319 *  - key must be valid (that is, known, and not in the autokey range)
2320 */
2321static void
2322set_keyid_checked(
2323	keyid_t        *into,
2324	const char     *what,
2325	sockaddr_u     *srcadr,
2326	endpt          *inter,
2327	struct req_pkt *inpkt
2328	)
2329{
2330	keyid_t *pkeyid;
2331	keyid_t  tmpkey;
2332
2333	/* restrict ourselves to one item only */
2334	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2335		msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2336			what);
2337		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2338		return;
2339	}
2340
2341	/* plug the new key from the packet */
2342	pkeyid = (keyid_t *)&inpkt->u;
2343	tmpkey = ntohl(*pkeyid);
2344
2345	/* validate the new key id, claim data error on failure */
2346	if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2347		msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2348			what, (long)tmpkey);
2349		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2350		return;
2351	}
2352
2353	/* if we arrive here, the key is good -- use it */
2354	*into = tmpkey;
2355	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2356}
2357
2358/*
2359 * set_request_keyid - set the keyid used to authenticate requests
2360 */
2361static void
2362set_request_keyid(
2363	sockaddr_u *srcadr,
2364	endpt *inter,
2365	struct req_pkt *inpkt
2366	)
2367{
2368	set_keyid_checked(&info_auth_keyid, "request",
2369			  srcadr, inter, inpkt);
2370}
2371
2372
2373
2374/*
2375 * set_control_keyid - set the keyid used to authenticate requests
2376 */
2377static void
2378set_control_keyid(
2379	sockaddr_u *srcadr,
2380	endpt *inter,
2381	struct req_pkt *inpkt
2382	)
2383{
2384	set_keyid_checked(&ctl_auth_keyid, "control",
2385			  srcadr, inter, inpkt);
2386}
2387
2388
2389
2390/*
2391 * get_ctl_stats - return some stats concerning the control message module
2392 */
2393static void
2394get_ctl_stats(
2395	sockaddr_u *srcadr,
2396	endpt *inter,
2397	struct req_pkt *inpkt
2398	)
2399{
2400	register struct info_control *ic;
2401
2402	ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2403						sizeof(struct info_control));
2404
2405	ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2406	ic->numctlreq = htonl((u_int32)numctlreq);
2407	ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2408	ic->numctlresponses = htonl((u_int32)numctlresponses);
2409	ic->numctlfrags = htonl((u_int32)numctlfrags);
2410	ic->numctlerrors = htonl((u_int32)numctlerrors);
2411	ic->numctltooshort = htonl((u_int32)numctltooshort);
2412	ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2413	ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2414	ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2415	ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2416	ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2417	ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2418	ic->numctlbadop = htonl((u_int32)numctlbadop);
2419	ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2420
2421	(void) more_pkt();
2422	flush_pkt();
2423}
2424
2425
2426#ifdef KERNEL_PLL
2427/*
2428 * get_kernel_info - get kernel pll/pps information
2429 */
2430static void
2431get_kernel_info(
2432	sockaddr_u *srcadr,
2433	endpt *inter,
2434	struct req_pkt *inpkt
2435	)
2436{
2437	register struct info_kernel *ik;
2438	struct timex ntx;
2439
2440	if (!pll_control) {
2441		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2442		return;
2443	}
2444
2445	ZERO(ntx);
2446	if (ntp_adjtime(&ntx) < 0)
2447		msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2448	ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2449	    sizeof(struct info_kernel));
2450
2451	/*
2452	 * pll variables
2453	 */
2454	ik->offset = htonl((u_int32)ntx.offset);
2455	ik->freq = htonl((u_int32)ntx.freq);
2456	ik->maxerror = htonl((u_int32)ntx.maxerror);
2457	ik->esterror = htonl((u_int32)ntx.esterror);
2458	ik->status = htons(ntx.status);
2459	ik->constant = htonl((u_int32)ntx.constant);
2460	ik->precision = htonl((u_int32)ntx.precision);
2461	ik->tolerance = htonl((u_int32)ntx.tolerance);
2462
2463	/*
2464	 * pps variables
2465	 */
2466	ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2467	ik->jitter = htonl((u_int32)ntx.jitter);
2468	ik->shift = htons(ntx.shift);
2469	ik->stabil = htonl((u_int32)ntx.stabil);
2470	ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2471	ik->calcnt = htonl((u_int32)ntx.calcnt);
2472	ik->errcnt = htonl((u_int32)ntx.errcnt);
2473	ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2474
2475	(void) more_pkt();
2476	flush_pkt();
2477}
2478#endif /* KERNEL_PLL */
2479
2480
2481#ifdef REFCLOCK
2482/*
2483 * get_clock_info - get info about a clock
2484 */
2485static void
2486get_clock_info(
2487	sockaddr_u *srcadr,
2488	endpt *inter,
2489	struct req_pkt *inpkt
2490	)
2491{
2492	register struct info_clock *ic;
2493	register u_int32 *clkaddr;
2494	register int items;
2495	struct refclockstat clock_stat;
2496	sockaddr_u addr;
2497	l_fp ltmp;
2498
2499	ZERO_SOCK(&addr);
2500	AF(&addr) = AF_INET;
2501#ifdef ISC_PLATFORM_HAVESALEN
2502	addr.sa.sa_len = SOCKLEN(&addr);
2503#endif
2504	SET_PORT(&addr, NTP_PORT);
2505	items = INFO_NITEMS(inpkt->err_nitems);
2506	clkaddr = &inpkt->u.u32[0];
2507
2508	ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2509					      sizeof(struct info_clock));
2510
2511	while (items-- > 0 && ic) {
2512		NSRCADR(&addr) = *clkaddr++;
2513		if (!ISREFCLOCKADR(&addr) || NULL ==
2514		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2515			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2516			return;
2517		}
2518
2519		clock_stat.kv_list = (struct ctl_var *)0;
2520
2521		refclock_control(&addr, NULL, &clock_stat);
2522
2523		ic->clockadr = NSRCADR(&addr);
2524		ic->type = clock_stat.type;
2525		ic->flags = clock_stat.flags;
2526		ic->lastevent = clock_stat.lastevent;
2527		ic->currentstatus = clock_stat.currentstatus;
2528		ic->polls = htonl((u_int32)clock_stat.polls);
2529		ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2530		ic->badformat = htonl((u_int32)clock_stat.badformat);
2531		ic->baddata = htonl((u_int32)clock_stat.baddata);
2532		ic->timestarted = htonl((u_int32)clock_stat.timereset);
2533		DTOLFP(clock_stat.fudgetime1, &ltmp);
2534		HTONL_FP(&ltmp, &ic->fudgetime1);
2535		DTOLFP(clock_stat.fudgetime2, &ltmp);
2536		HTONL_FP(&ltmp, &ic->fudgetime2);
2537		ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2538		ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2539
2540		free_varlist(clock_stat.kv_list);
2541
2542		ic = (struct info_clock *)more_pkt();
2543	}
2544	flush_pkt();
2545}
2546
2547
2548
2549/*
2550 * set_clock_fudge - get a clock's fudge factors
2551 */
2552static void
2553set_clock_fudge(
2554	sockaddr_u *srcadr,
2555	endpt *inter,
2556	struct req_pkt *inpkt
2557	)
2558{
2559	register struct conf_fudge *cf;
2560	register int items;
2561	struct refclockstat clock_stat;
2562	sockaddr_u addr;
2563	l_fp ltmp;
2564
2565	ZERO(addr);
2566	ZERO(clock_stat);
2567	items = INFO_NITEMS(inpkt->err_nitems);
2568	cf = (struct conf_fudge *)&inpkt->u;
2569
2570	while (items-- > 0) {
2571		AF(&addr) = AF_INET;
2572		NSRCADR(&addr) = cf->clockadr;
2573#ifdef ISC_PLATFORM_HAVESALEN
2574		addr.sa.sa_len = SOCKLEN(&addr);
2575#endif
2576		SET_PORT(&addr, NTP_PORT);
2577		if (!ISREFCLOCKADR(&addr) || NULL ==
2578		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2579			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2580			return;
2581		}
2582
2583		switch(ntohl(cf->which)) {
2584		    case FUDGE_TIME1:
2585			NTOHL_FP(&cf->fudgetime, &ltmp);
2586			LFPTOD(&ltmp, clock_stat.fudgetime1);
2587			clock_stat.haveflags = CLK_HAVETIME1;
2588			break;
2589		    case FUDGE_TIME2:
2590			NTOHL_FP(&cf->fudgetime, &ltmp);
2591			LFPTOD(&ltmp, clock_stat.fudgetime2);
2592			clock_stat.haveflags = CLK_HAVETIME2;
2593			break;
2594		    case FUDGE_VAL1:
2595			clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2596			clock_stat.haveflags = CLK_HAVEVAL1;
2597			break;
2598		    case FUDGE_VAL2:
2599			clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2600			clock_stat.haveflags = CLK_HAVEVAL2;
2601			break;
2602		    case FUDGE_FLAGS:
2603			clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2604			clock_stat.haveflags =
2605				(CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2606			break;
2607		    default:
2608			msyslog(LOG_ERR, "set_clock_fudge: default!");
2609			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2610			return;
2611		}
2612
2613		refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2614	}
2615
2616	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2617}
2618#endif
2619
2620#ifdef REFCLOCK
2621/*
2622 * get_clkbug_info - get debugging info about a clock
2623 */
2624static void
2625get_clkbug_info(
2626	sockaddr_u *srcadr,
2627	endpt *inter,
2628	struct req_pkt *inpkt
2629	)
2630{
2631	register int i;
2632	register struct info_clkbug *ic;
2633	register u_int32 *clkaddr;
2634	register int items;
2635	struct refclockbug bug;
2636	sockaddr_u addr;
2637
2638	ZERO_SOCK(&addr);
2639	AF(&addr) = AF_INET;
2640#ifdef ISC_PLATFORM_HAVESALEN
2641	addr.sa.sa_len = SOCKLEN(&addr);
2642#endif
2643	SET_PORT(&addr, NTP_PORT);
2644	items = INFO_NITEMS(inpkt->err_nitems);
2645	clkaddr = (u_int32 *)&inpkt->u;
2646
2647	ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2648					       sizeof(struct info_clkbug));
2649
2650	while (items-- > 0 && ic) {
2651		NSRCADR(&addr) = *clkaddr++;
2652		if (!ISREFCLOCKADR(&addr) || NULL ==
2653		    findexistingpeer(&addr, NULL, NULL, -1, 0, NULL)) {
2654			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2655			return;
2656		}
2657
2658		ZERO(bug);
2659		refclock_buginfo(&addr, &bug);
2660		if (bug.nvalues == 0 && bug.ntimes == 0) {
2661			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2662			return;
2663		}
2664
2665		ic->clockadr = NSRCADR(&addr);
2666		i = bug.nvalues;
2667		if (i > NUMCBUGVALUES)
2668		    i = NUMCBUGVALUES;
2669		ic->nvalues = (u_char)i;
2670		ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2671		while (--i >= 0)
2672		    ic->values[i] = htonl(bug.values[i]);
2673
2674		i = bug.ntimes;
2675		if (i > NUMCBUGTIMES)
2676		    i = NUMCBUGTIMES;
2677		ic->ntimes = (u_char)i;
2678		ic->stimes = htonl(bug.stimes);
2679		while (--i >= 0) {
2680			HTONL_FP(&bug.times[i], &ic->times[i]);
2681		}
2682
2683		ic = (struct info_clkbug *)more_pkt();
2684	}
2685	flush_pkt();
2686}
2687#endif
2688
2689/*
2690 * receiver of interface structures
2691 */
2692static void
2693fill_info_if_stats(void *data, interface_info_t *interface_info)
2694{
2695	struct info_if_stats **ifsp = (struct info_if_stats **)data;
2696	struct info_if_stats *ifs = *ifsp;
2697	endpt *ep = interface_info->ep;
2698
2699	if (NULL == ifs)
2700		return;
2701
2702	ZERO(*ifs);
2703
2704	if (IS_IPV6(&ep->sin)) {
2705		if (!client_v6_capable)
2706			return;
2707		ifs->v6_flag = 1;
2708		ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2709		ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2710		ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2711	} else {
2712		ifs->v6_flag = 0;
2713		ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2714		ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2715		ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2716	}
2717	ifs->v6_flag = htonl(ifs->v6_flag);
2718	strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2719	ifs->family = htons(ep->family);
2720	ifs->flags = htonl(ep->flags);
2721	ifs->last_ttl = htonl(ep->last_ttl);
2722	ifs->num_mcast = htonl(ep->num_mcast);
2723	ifs->received = htonl(ep->received);
2724	ifs->sent = htonl(ep->sent);
2725	ifs->notsent = htonl(ep->notsent);
2726	ifs->ifindex = htonl(ep->ifindex);
2727	/* scope no longer in endpt, in in6_addr typically */
2728	ifs->scopeid = ifs->ifindex;
2729	ifs->ifnum = htonl(ep->ifnum);
2730	ifs->uptime = htonl(current_time - ep->starttime);
2731	ifs->ignore_packets = ep->ignore_packets;
2732	ifs->peercnt = htonl(ep->peercnt);
2733	ifs->action = interface_info->action;
2734
2735	*ifsp = (struct info_if_stats *)more_pkt();
2736}
2737
2738/*
2739 * get_if_stats - get interface statistics
2740 */
2741static void
2742get_if_stats(
2743	sockaddr_u *srcadr,
2744	endpt *inter,
2745	struct req_pkt *inpkt
2746	)
2747{
2748	struct info_if_stats *ifs;
2749
2750	DPRINTF(3, ("wants interface statistics\n"));
2751
2752	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2753	    v6sizeof(struct info_if_stats));
2754
2755	interface_enumerate(fill_info_if_stats, &ifs);
2756
2757	flush_pkt();
2758}
2759
2760static void
2761do_if_reload(
2762	sockaddr_u *srcadr,
2763	endpt *inter,
2764	struct req_pkt *inpkt
2765	)
2766{
2767	struct info_if_stats *ifs;
2768
2769	DPRINTF(3, ("wants interface reload\n"));
2770
2771	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2772	    v6sizeof(struct info_if_stats));
2773
2774	interface_update(fill_info_if_stats, &ifs);
2775
2776	flush_pkt();
2777}
2778
2779