kern_tc.c revision 105354
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD: head/sys/kern/kern_tc.c 105354 2002-10-17 20:03:38Z robert $
10 */
11
12#include "opt_ntp.h"
13
14#include <sys/param.h>
15#include <sys/kernel.h>
16#include <sys/sysctl.h>
17#include <sys/systm.h>
18#include <sys/timepps.h>
19#include <sys/timetc.h>
20#include <sys/timex.h>
21
22/*
23 * Implement a dummy timecounter which we can use until we get a real one
24 * in the air.  This allows the console and other early stuff to use
25 * time services.
26 */
27
28static u_int
29dummy_get_timecount(struct timecounter *tc)
30{
31	static u_int now;
32
33	return (++now);
34}
35
36static struct timecounter dummy_timecounter = {
37	dummy_get_timecount, 0, ~0u, 1000000, "dummy",
38};
39
40struct timehands {
41	/* These fields must be initialized by the driver. */
42	struct timecounter	*th_counter;
43	int64_t			th_adjustment;
44	u_int64_t		th_scale;
45	u_int	 		th_offset_count;
46	struct bintime		th_offset;
47	struct timeval		th_microtime;
48	struct timespec		th_nanotime;
49	/* Fields not to be copied in tc_windup start with th_generation. */
50	volatile u_int		th_generation;
51	struct timehands	*th_next;
52};
53
54extern struct timehands th0;
55static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
56static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
57static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
58static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
59static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
60static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
61static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
62static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
63static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
64static struct timehands th0 = {
65	&dummy_timecounter,
66	0,
67	(uint64_t)-1 / 1000000,
68	0,
69	{1, 0},
70	{0, 0},
71	{0, 0},
72	1,
73	&th1
74};
75
76static struct timehands *volatile timehands = &th0;
77struct timecounter *timecounter = &dummy_timecounter;
78static struct timecounter *timecounters = &dummy_timecounter;
79
80time_t time_second = 1;
81
82static struct bintime boottimebin;
83struct timeval boottime;
84SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
85    &boottime, timeval, "System boottime");
86
87SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
88
89#define TC_STATS(foo) \
90	static u_int foo; \
91	SYSCTL_UINT(_kern_timecounter, OID_AUTO, foo, CTLFLAG_RD, &foo, 0, "");\
92	struct __hack
93
94TC_STATS(nbinuptime);    TC_STATS(nnanouptime);    TC_STATS(nmicrouptime);
95TC_STATS(nbintime);      TC_STATS(nnanotime);      TC_STATS(nmicrotime);
96TC_STATS(ngetbinuptime); TC_STATS(ngetnanouptime); TC_STATS(ngetmicrouptime);
97TC_STATS(ngetbintime);   TC_STATS(ngetnanotime);   TC_STATS(ngetmicrotime);
98
99#undef TC_STATS
100
101static void tc_windup(void);
102
103/*
104 * Return the difference between the timehands' counter value now and what
105 * was when we copied it to the timehands' offset_count.
106 */
107static __inline u_int
108tc_delta(struct timehands *th)
109{
110	struct timecounter *tc;
111
112	tc = th->th_counter;
113	return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
114	    tc->tc_counter_mask);
115}
116
117/*
118 * Functions for reading the time.  We have to loop until we are sure that
119 * the timehands that we operated on was not updated under our feet.  See
120 * the comment in <sys/time.h> for a description of these 12 functions.
121 */
122
123void
124binuptime(struct bintime *bt)
125{
126	struct timehands *th;
127	u_int gen;
128
129	nbinuptime++;
130	do {
131		th = timehands;
132		gen = th->th_generation;
133		*bt = th->th_offset;
134		bintime_addx(bt, th->th_scale * tc_delta(th));
135	} while (gen == 0 || gen != th->th_generation);
136}
137
138void
139nanouptime(struct timespec *tsp)
140{
141	struct bintime bt;
142
143	nnanouptime++;
144	binuptime(&bt);
145	bintime2timespec(&bt, tsp);
146}
147
148void
149microuptime(struct timeval *tvp)
150{
151	struct bintime bt;
152
153	nmicrouptime++;
154	binuptime(&bt);
155	bintime2timeval(&bt, tvp);
156}
157
158void
159bintime(struct bintime *bt)
160{
161
162	nbintime++;
163	binuptime(bt);
164	bintime_add(bt, &boottimebin);
165}
166
167void
168nanotime(struct timespec *tsp)
169{
170	struct bintime bt;
171
172	nnanotime++;
173	bintime(&bt);
174	bintime2timespec(&bt, tsp);
175}
176
177void
178microtime(struct timeval *tvp)
179{
180	struct bintime bt;
181
182	nmicrotime++;
183	bintime(&bt);
184	bintime2timeval(&bt, tvp);
185}
186
187void
188getbinuptime(struct bintime *bt)
189{
190	struct timehands *th;
191	u_int gen;
192
193	ngetbinuptime++;
194	do {
195		th = timehands;
196		gen = th->th_generation;
197		*bt = th->th_offset;
198	} while (gen == 0 || gen != th->th_generation);
199}
200
201void
202getnanouptime(struct timespec *tsp)
203{
204	struct timehands *th;
205	u_int gen;
206
207	ngetnanouptime++;
208	do {
209		th = timehands;
210		gen = th->th_generation;
211		bintime2timespec(&th->th_offset, tsp);
212	} while (gen == 0 || gen != th->th_generation);
213}
214
215void
216getmicrouptime(struct timeval *tvp)
217{
218	struct timehands *th;
219	u_int gen;
220
221	ngetmicrouptime++;
222	do {
223		th = timehands;
224		gen = th->th_generation;
225		bintime2timeval(&th->th_offset, tvp);
226	} while (gen == 0 || gen != th->th_generation);
227}
228
229void
230getbintime(struct bintime *bt)
231{
232	struct timehands *th;
233	u_int gen;
234
235	ngetbintime++;
236	do {
237		th = timehands;
238		gen = th->th_generation;
239		*bt = th->th_offset;
240	} while (gen == 0 || gen != th->th_generation);
241	bintime_add(bt, &boottimebin);
242}
243
244void
245getnanotime(struct timespec *tsp)
246{
247	struct timehands *th;
248	u_int gen;
249
250	ngetnanotime++;
251	do {
252		th = timehands;
253		gen = th->th_generation;
254		*tsp = th->th_nanotime;
255	} while (gen == 0 || gen != th->th_generation);
256}
257
258void
259getmicrotime(struct timeval *tvp)
260{
261	struct timehands *th;
262	u_int gen;
263
264	ngetmicrotime++;
265	do {
266		th = timehands;
267		gen = th->th_generation;
268		*tvp = th->th_microtime;
269	} while (gen == 0 || gen != th->th_generation);
270}
271
272/*
273 * Initialize a new timecounter.
274 * We should really try to rank the timecounters and intelligently determine
275 * if the new timecounter is better than the current one.  This is subject
276 * to further study.  For now always use the new timecounter.
277 */
278void
279tc_init(struct timecounter *tc)
280{
281	unsigned u;
282
283	printf("Timecounter \"%s\"  frequency %lu Hz",
284	    tc->tc_name, (u_long)tc->tc_frequency);
285
286	u = tc->tc_frequency / tc->tc_counter_mask;
287	if (u > hz) {
288		printf(" -- Insufficient hz, needs at least %u\n", u);
289		return;
290	}
291	tc->tc_next = timecounters;
292	timecounters = tc;
293	printf("\n");
294	(void)tc->tc_get_timecount(tc);
295	(void)tc->tc_get_timecount(tc);
296	timecounter = tc;
297}
298
299/* Report the frequency of the current timecounter. */
300u_int32_t
301tc_getfrequency(void)
302{
303
304	return (timehands->th_counter->tc_frequency);
305}
306
307/*
308 * Step our concept of GMT.  This is done by modifying our estimate of
309 * when we booted.  XXX: needs futher work.
310 */
311void
312tc_setclock(struct timespec *ts)
313{
314	struct timespec ts2;
315
316	nanouptime(&ts2);
317	boottime.tv_sec = ts->tv_sec - ts2.tv_sec;
318	/* XXX boottime should probably be a timespec. */
319	boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000;
320	if (boottime.tv_usec < 0) {
321		boottime.tv_usec += 1000000;
322		boottime.tv_sec--;
323	}
324	timeval2bintime(&boottime, &boottimebin);
325
326	/* XXX fiddle all the little crinkly bits around the fiords... */
327	tc_windup();
328}
329
330/*
331 * Initialize the next struct timehands in the ring and make
332 * it the active timehands.  Along the way we might switch to a different
333 * timecounter and/or do seconds processing in NTP.  Slightly magic.
334 */
335static void
336tc_windup(void)
337{
338	struct bintime bt;
339	struct timehands *th, *tho;
340	u_int64_t scale;
341	u_int delta, ncount, ogen;
342	int i;
343
344	/*
345	 * Make the next timehands a copy of the current one, but do not
346	 * overwrite the generation or next pointer.  While we update
347	 * the contents, the generation must be zero.
348	 */
349	tho = timehands;
350	th = tho->th_next;
351	ogen = th->th_generation;
352	th->th_generation = 0;
353	bcopy(tho, th, offsetof(struct timehands, th_generation));
354
355	/*
356	 * Capture a timecounter delta on the current timecounter and if
357	 * changing timecounters, a counter value from the new timecounter.
358	 * Update the offset fields accordingly.
359	 */
360	delta = tc_delta(th);
361	if (th->th_counter != timecounter)
362		ncount = timecounter->tc_get_timecount(timecounter);
363	else
364		ncount = 0;
365	th->th_offset_count += delta;
366	th->th_offset_count &= th->th_counter->tc_counter_mask;
367	bintime_addx(&th->th_offset, th->th_scale * delta);
368
369	/*
370	 * Hardware latching timecounters may not generate interrupts on
371	 * PPS events, so instead we poll them.  There is a finite risk that
372	 * the hardware might capture a count which is later than the one we
373	 * got above, and therefore possibly in the next NTP second which might
374	 * have a different rate than the current NTP second.  It doesn't
375	 * matter in practice.
376	 */
377	if (tho->th_counter->tc_poll_pps)
378		tho->th_counter->tc_poll_pps(tho->th_counter);
379
380	/*
381	 * Deal with NTP second processing.  The for loop normally only
382	 * iterates once, but in extreme situations it might keep NTP sane
383	 * if timeouts are not run for several seconds.
384	 */
385	for (i = th->th_offset.sec - tho->th_offset.sec; i > 0; i--)
386		ntp_update_second(&th->th_adjustment, &th->th_offset.sec);
387
388	/* Now is a good time to change timecounters. */
389	if (th->th_counter != timecounter) {
390		th->th_counter = timecounter;
391		th->th_offset_count = ncount;
392	}
393
394	/*-
395	 * Recalculate the scaling factor.  We want the number of 1/2^64
396	 * fractions of a second per period of the hardware counter, taking
397	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
398	 * processing provides us with.
399	 *
400	 * The th_adjustment is nanoseconds per second with 32 bit binary
401	 * fraction and want 64 bit binary fraction of second:
402	 *
403	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
404	 *
405	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
406	 * we can only multiply by about 850 without overflowing, but that
407	 * leaves suitably precise fractions for multiply before divide.
408	 *
409	 * Divide before multiply with a fraction of 2199/512 results in a
410	 * systematic undercompensation of 10PPM of th_adjustment.  On a
411	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
412 	 *
413	 * We happily sacrifice the lowest of the 64 bits of our result
414	 * to the goddess of code clarity.
415	 *
416	 */
417	scale = (u_int64_t)1 << 63;
418	scale += (th->th_adjustment / 1024) * 2199;
419	scale /= th->th_counter->tc_frequency;
420	th->th_scale = scale * 2;
421
422	/* Update the GMT timestamps used for the get*() functions. */
423	bt = th->th_offset;
424	bintime_add(&bt, &boottimebin);
425	bintime2timeval(&bt, &th->th_microtime);
426	bintime2timespec(&bt, &th->th_nanotime);
427
428	/*
429	 * Now that the struct timehands is again consistent, set the new
430	 * generation number, making sure to not make it zero.
431	 */
432	if (++ogen == 0)
433		ogen = 1;
434	th->th_generation = ogen;
435
436	/* Go live with the new struct timehands. */
437	time_second = th->th_microtime.tv_sec;
438	timehands = th;
439}
440
441/* Report or change the active timecounter hardware. */
442static int
443sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
444{
445	char newname[32];
446	struct timecounter *newtc, *tc;
447	int error;
448
449	tc = timecounter;
450	strlcpy(newname, tc->tc_name, sizeof(newname));
451
452	error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
453	if (error != 0 || req->newptr == NULL ||
454	    strcmp(newname, tc->tc_name) == 0)
455		return (error);
456	for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
457		if (strcmp(newname, newtc->tc_name) != 0)
458			continue;
459
460		/* Warm up new timecounter. */
461		(void)newtc->tc_get_timecount(newtc);
462		(void)newtc->tc_get_timecount(newtc);
463
464		timecounter = newtc;
465		return (0);
466	}
467	return (EINVAL);
468}
469
470SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
471    0, 0, sysctl_kern_timecounter_hardware, "A", "");
472
473/*
474 * RFC 2783 PPS-API implementation.
475 */
476
477int
478pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
479{
480	pps_params_t *app;
481	struct pps_fetch_args *fapi;
482#ifdef PPS_SYNC
483	struct pps_kcbind_args *kapi;
484#endif
485
486	switch (cmd) {
487	case PPS_IOC_CREATE:
488		return (0);
489	case PPS_IOC_DESTROY:
490		return (0);
491	case PPS_IOC_SETPARAMS:
492		app = (pps_params_t *)data;
493		if (app->mode & ~pps->ppscap)
494			return (EINVAL);
495		pps->ppsparam = *app;
496		return (0);
497	case PPS_IOC_GETPARAMS:
498		app = (pps_params_t *)data;
499		*app = pps->ppsparam;
500		app->api_version = PPS_API_VERS_1;
501		return (0);
502	case PPS_IOC_GETCAP:
503		*(int*)data = pps->ppscap;
504		return (0);
505	case PPS_IOC_FETCH:
506		fapi = (struct pps_fetch_args *)data;
507		if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
508			return (EINVAL);
509		if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
510			return (EOPNOTSUPP);
511		pps->ppsinfo.current_mode = pps->ppsparam.mode;
512		fapi->pps_info_buf = pps->ppsinfo;
513		return (0);
514	case PPS_IOC_KCBIND:
515#ifdef PPS_SYNC
516		kapi = (struct pps_kcbind_args *)data;
517		/* XXX Only root should be able to do this */
518		if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
519			return (EINVAL);
520		if (kapi->kernel_consumer != PPS_KC_HARDPPS)
521			return (EINVAL);
522		if (kapi->edge & ~pps->ppscap)
523			return (EINVAL);
524		pps->kcmode = kapi->edge;
525		return (0);
526#else
527		return (EOPNOTSUPP);
528#endif
529	default:
530		return (ENOTTY);
531	}
532}
533
534void
535pps_init(struct pps_state *pps)
536{
537	pps->ppscap |= PPS_TSFMT_TSPEC;
538	if (pps->ppscap & PPS_CAPTUREASSERT)
539		pps->ppscap |= PPS_OFFSETASSERT;
540	if (pps->ppscap & PPS_CAPTURECLEAR)
541		pps->ppscap |= PPS_OFFSETCLEAR;
542}
543
544void
545pps_capture(struct pps_state *pps)
546{
547	struct timehands *th;
548
549	th = timehands;
550	pps->capgen = th->th_generation;
551	pps->capth = th;
552	pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
553	if (pps->capgen != th->th_generation)
554		pps->capgen = 0;
555}
556
557void
558pps_event(struct pps_state *pps, int event)
559{
560	struct bintime bt;
561	struct timespec ts, *tsp, *osp;
562	u_int tcount, *pcount;
563	int foff, fhard;
564	pps_seq_t *pseq;
565
566	/* If the timecounter was wound up underneath us, bail out. */
567	if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
568		return;
569
570	/* Things would be easier with arrays. */
571	if (event == PPS_CAPTUREASSERT) {
572		tsp = &pps->ppsinfo.assert_timestamp;
573		osp = &pps->ppsparam.assert_offset;
574		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
575		fhard = pps->kcmode & PPS_CAPTUREASSERT;
576		pcount = &pps->ppscount[0];
577		pseq = &pps->ppsinfo.assert_sequence;
578	} else {
579		tsp = &pps->ppsinfo.clear_timestamp;
580		osp = &pps->ppsparam.clear_offset;
581		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
582		fhard = pps->kcmode & PPS_CAPTURECLEAR;
583		pcount = &pps->ppscount[1];
584		pseq = &pps->ppsinfo.clear_sequence;
585	}
586
587	/*
588	 * If the timecounter changed, we cannot compare the count values, so
589	 * we have to drop the rest of the PPS-stuff until the next event.
590	 */
591	if (pps->ppstc != pps->capth->th_counter) {
592		pps->ppstc = pps->capth->th_counter;
593		*pcount = pps->capcount;
594		pps->ppscount[2] = pps->capcount;
595		return;
596	}
597
598	/* Return if nothing really happened. */
599	if (*pcount == pps->capcount)
600		return;
601
602	/* Convert the count to a timespec. */
603	tcount = pps->capcount - pps->capth->th_offset_count;
604	tcount &= pps->capth->th_counter->tc_counter_mask;
605	bt = pps->capth->th_offset;
606	bintime_addx(&bt, pps->capth->th_scale * tcount);
607	bintime_add(&bt, &boottimebin);
608	bintime2timespec(&bt, &ts);
609
610	/* If the timecounter was wound up underneath us, bail out. */
611	if (pps->capgen != pps->capth->th_generation)
612		return;
613
614	*pcount = pps->capcount;
615	(*pseq)++;
616	*tsp = ts;
617
618	if (foff) {
619		timespecadd(tsp, osp);
620		if (tsp->tv_nsec < 0) {
621			tsp->tv_nsec += 1000000000;
622			tsp->tv_sec -= 1;
623		}
624	}
625#ifdef PPS_SYNC
626	if (fhard) {
627		/*
628		 * Feed the NTP PLL/FLL.
629		 * The FLL wants to know how many nanoseconds elapsed since
630		 * the previous event.
631		 * I have never been able to convince myself that this code
632		 * is actually correct:  Using th_scale is bound to contain
633		 * a phase correction component from userland, when running
634		 * as FLL, so the number hardpps() gets is not meaningful IMO.
635		 */
636		tcount = pps->capcount - pps->ppscount[2];
637		pps->ppscount[2] = pps->capcount;
638		tcount &= pps->capth->th_counter->tc_counter_mask;
639		bt.sec = 0;
640		bt.frac = 0;
641		bintime_addx(&bt, pps->capth->th_scale * tcount);
642		bintime2timespec(&bt, &ts);
643		hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
644	}
645#endif
646}
647
648/*
649 * Timecounters need to be updated every so often to prevent the hardware
650 * counter from overflowing.  Updating also recalculates the cached values
651 * used by the get*() family of functions, so their precision depends on
652 * the update frequency.
653 */
654
655static int tc_tick;
656SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tick, 0, "");
657
658void
659tc_ticktock(void)
660{
661	static int count;
662
663	if (++count < tc_tick)
664		return;
665	count = 0;
666	tc_windup();
667}
668
669static void
670inittimecounter(void *dummy)
671{
672	u_int p;
673
674	/*
675	 * Set the initial timeout to
676	 * max(1, <approx. number of hardclock ticks in a millisecond>).
677	 * People should probably not use the sysctl to set the timeout
678	 * to smaller than its inital value, since that value is the
679	 * smallest reasonable one.  If they want better timestamps they
680	 * should use the non-"get"* functions.
681	 */
682	if (hz > 1000)
683		tc_tick = (hz + 500) / 1000;
684	else
685		tc_tick = 1;
686	p = (tc_tick * 1000000) / hz;
687	printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
688
689	/* warm up new timecounter (again) and get rolling. */
690	(void)timecounter->tc_get_timecount(timecounter);
691	(void)timecounter->tc_get_timecount(timecounter);
692}
693
694SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_FIRST, inittimecounter, NULL)
695