kern_tc.c revision 150348
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 */
9
10#include <sys/cdefs.h>
11__FBSDID("$FreeBSD: head/sys/kern/kern_tc.c 150348 2005-09-19 22:16:31Z andre $");
12
13#include "opt_ntp.h"
14
15#include <sys/param.h>
16#include <sys/kernel.h>
17#include <sys/sysctl.h>
18#include <sys/syslog.h>
19#include <sys/systm.h>
20#include <sys/timepps.h>
21#include <sys/timetc.h>
22#include <sys/timex.h>
23
24/*
25 * A large step happens on boot.  This constant detects such steps.
26 * It is relatively small so that ntp_update_second gets called enough
27 * in the typical 'missed a couple of seconds' case, but doesn't loop
28 * forever when the time step is large.
29 */
30#define LARGE_STEP	200
31
32/*
33 * Implement a dummy timecounter which we can use until we get a real one
34 * in the air.  This allows the console and other early stuff to use
35 * time services.
36 */
37
38static u_int
39dummy_get_timecount(struct timecounter *tc)
40{
41	static u_int now;
42
43	return (++now);
44}
45
46static struct timecounter dummy_timecounter = {
47	dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
48};
49
50struct timehands {
51	/* These fields must be initialized by the driver. */
52	struct timecounter	*th_counter;
53	int64_t			th_adjustment;
54	u_int64_t		th_scale;
55	u_int	 		th_offset_count;
56	struct bintime		th_offset;
57	struct timeval		th_microtime;
58	struct timespec		th_nanotime;
59	/* Fields not to be copied in tc_windup start with th_generation. */
60	volatile u_int		th_generation;
61	struct timehands	*th_next;
62};
63
64static struct timehands th0;
65static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
66static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
67static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
68static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
69static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
70static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
71static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
72static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
73static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
74static struct timehands th0 = {
75	&dummy_timecounter,
76	0,
77	(uint64_t)-1 / 1000000,
78	0,
79	{1, 0},
80	{0, 0},
81	{0, 0},
82	1,
83	&th1
84};
85
86static struct timehands *volatile timehands = &th0;
87struct timecounter *timecounter = &dummy_timecounter;
88static struct timecounter *timecounters = &dummy_timecounter;
89
90time_t time_second = 1;
91time_t time_uptime = 1;
92
93static struct bintime boottimebin;
94struct timeval boottime;
95static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
96SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
97    NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
98
99SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
100
101static int timestepwarnings;
102SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
103    &timestepwarnings, 0, "");
104
105#define TC_STATS(foo) \
106	static u_int foo; \
107	SYSCTL_UINT(_kern_timecounter, OID_AUTO, foo, CTLFLAG_RD, &foo, 0, "");\
108	struct __hack
109
110TC_STATS(nbinuptime);    TC_STATS(nnanouptime);    TC_STATS(nmicrouptime);
111TC_STATS(nbintime);      TC_STATS(nnanotime);      TC_STATS(nmicrotime);
112TC_STATS(ngetbinuptime); TC_STATS(ngetnanouptime); TC_STATS(ngetmicrouptime);
113TC_STATS(ngetbintime);   TC_STATS(ngetnanotime);   TC_STATS(ngetmicrotime);
114TC_STATS(nsetclock);
115
116#undef TC_STATS
117
118static void tc_windup(void);
119
120static int
121sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
122{
123#ifdef SCTL_MASK32
124	int tv[2];
125
126	if (req->flags & SCTL_MASK32) {
127		tv[0] = boottime.tv_sec;
128		tv[1] = boottime.tv_usec;
129		return SYSCTL_OUT(req, tv, sizeof(tv));
130	} else
131#endif
132		return SYSCTL_OUT(req, &boottime, sizeof(boottime));
133}
134/*
135 * Return the difference between the timehands' counter value now and what
136 * was when we copied it to the timehands' offset_count.
137 */
138static __inline u_int
139tc_delta(struct timehands *th)
140{
141	struct timecounter *tc;
142
143	tc = th->th_counter;
144	return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
145	    tc->tc_counter_mask);
146}
147
148/*
149 * Functions for reading the time.  We have to loop until we are sure that
150 * the timehands that we operated on was not updated under our feet.  See
151 * the comment in <sys/time.h> for a description of these 12 functions.
152 */
153
154void
155binuptime(struct bintime *bt)
156{
157	struct timehands *th;
158	u_int gen;
159
160	nbinuptime++;
161	do {
162		th = timehands;
163		gen = th->th_generation;
164		*bt = th->th_offset;
165		bintime_addx(bt, th->th_scale * tc_delta(th));
166	} while (gen == 0 || gen != th->th_generation);
167}
168
169void
170nanouptime(struct timespec *tsp)
171{
172	struct bintime bt;
173
174	nnanouptime++;
175	binuptime(&bt);
176	bintime2timespec(&bt, tsp);
177}
178
179void
180microuptime(struct timeval *tvp)
181{
182	struct bintime bt;
183
184	nmicrouptime++;
185	binuptime(&bt);
186	bintime2timeval(&bt, tvp);
187}
188
189void
190bintime(struct bintime *bt)
191{
192
193	nbintime++;
194	binuptime(bt);
195	bintime_add(bt, &boottimebin);
196}
197
198void
199nanotime(struct timespec *tsp)
200{
201	struct bintime bt;
202
203	nnanotime++;
204	bintime(&bt);
205	bintime2timespec(&bt, tsp);
206}
207
208void
209microtime(struct timeval *tvp)
210{
211	struct bintime bt;
212
213	nmicrotime++;
214	bintime(&bt);
215	bintime2timeval(&bt, tvp);
216}
217
218void
219getbinuptime(struct bintime *bt)
220{
221	struct timehands *th;
222	u_int gen;
223
224	ngetbinuptime++;
225	do {
226		th = timehands;
227		gen = th->th_generation;
228		*bt = th->th_offset;
229	} while (gen == 0 || gen != th->th_generation);
230}
231
232void
233getnanouptime(struct timespec *tsp)
234{
235	struct timehands *th;
236	u_int gen;
237
238	ngetnanouptime++;
239	do {
240		th = timehands;
241		gen = th->th_generation;
242		bintime2timespec(&th->th_offset, tsp);
243	} while (gen == 0 || gen != th->th_generation);
244}
245
246void
247getmicrouptime(struct timeval *tvp)
248{
249	struct timehands *th;
250	u_int gen;
251
252	ngetmicrouptime++;
253	do {
254		th = timehands;
255		gen = th->th_generation;
256		bintime2timeval(&th->th_offset, tvp);
257	} while (gen == 0 || gen != th->th_generation);
258}
259
260void
261getbintime(struct bintime *bt)
262{
263	struct timehands *th;
264	u_int gen;
265
266	ngetbintime++;
267	do {
268		th = timehands;
269		gen = th->th_generation;
270		*bt = th->th_offset;
271	} while (gen == 0 || gen != th->th_generation);
272	bintime_add(bt, &boottimebin);
273}
274
275void
276getnanotime(struct timespec *tsp)
277{
278	struct timehands *th;
279	u_int gen;
280
281	ngetnanotime++;
282	do {
283		th = timehands;
284		gen = th->th_generation;
285		*tsp = th->th_nanotime;
286	} while (gen == 0 || gen != th->th_generation);
287}
288
289void
290getmicrotime(struct timeval *tvp)
291{
292	struct timehands *th;
293	u_int gen;
294
295	ngetmicrotime++;
296	do {
297		th = timehands;
298		gen = th->th_generation;
299		*tvp = th->th_microtime;
300	} while (gen == 0 || gen != th->th_generation);
301}
302
303/*
304 * Initialize a new timecounter and possibly use it.
305 */
306void
307tc_init(struct timecounter *tc)
308{
309	u_int u;
310
311	u = tc->tc_frequency / tc->tc_counter_mask;
312	/* XXX: We need some margin here, 10% is a guess */
313	u *= 11;
314	u /= 10;
315	if (u > hz && tc->tc_quality >= 0) {
316		tc->tc_quality = -2000;
317		if (bootverbose) {
318			printf("Timecounter \"%s\" frequency %ju Hz",
319			    tc->tc_name, (uintmax_t)tc->tc_frequency);
320			printf(" -- Insufficient hz, needs at least %u\n", u);
321		}
322	} else if (tc->tc_quality >= 0 || bootverbose) {
323		printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
324		    tc->tc_name, (uintmax_t)tc->tc_frequency,
325		    tc->tc_quality);
326	}
327
328	tc->tc_next = timecounters;
329	timecounters = tc;
330	/*
331	 * Never automatically use a timecounter with negative quality.
332	 * Even though we run on the dummy counter, switching here may be
333	 * worse since this timecounter may not be monotonous.
334	 */
335	if (tc->tc_quality < 0)
336		return;
337	if (tc->tc_quality < timecounter->tc_quality)
338		return;
339	if (tc->tc_quality == timecounter->tc_quality &&
340	    tc->tc_frequency < timecounter->tc_frequency)
341		return;
342	(void)tc->tc_get_timecount(tc);
343	(void)tc->tc_get_timecount(tc);
344	timecounter = tc;
345}
346
347/* Report the frequency of the current timecounter. */
348u_int64_t
349tc_getfrequency(void)
350{
351
352	return (timehands->th_counter->tc_frequency);
353}
354
355/*
356 * Step our concept of UTC.  This is done by modifying our estimate of
357 * when we booted.
358 * XXX: not locked.
359 */
360void
361tc_setclock(struct timespec *ts)
362{
363	struct timespec ts2;
364	struct bintime bt, bt2;
365
366	nsetclock++;
367	binuptime(&bt2);
368	timespec2bintime(ts, &bt);
369	bintime_sub(&bt, &bt2);
370	bintime_add(&bt2, &boottimebin);
371	boottimebin = bt;
372	bintime2timeval(&bt, &boottime);
373
374	/* XXX fiddle all the little crinkly bits around the fiords... */
375	tc_windup();
376	if (timestepwarnings) {
377		bintime2timespec(&bt2, &ts2);
378		log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld\n",
379		    (intmax_t)ts2.tv_sec, ts2.tv_nsec,
380		    (intmax_t)ts->tv_sec, ts->tv_nsec);
381	}
382}
383
384/*
385 * Initialize the next struct timehands in the ring and make
386 * it the active timehands.  Along the way we might switch to a different
387 * timecounter and/or do seconds processing in NTP.  Slightly magic.
388 */
389static void
390tc_windup(void)
391{
392	struct bintime bt;
393	struct timehands *th, *tho;
394	u_int64_t scale;
395	u_int delta, ncount, ogen;
396	int i;
397	time_t t;
398
399	/*
400	 * Make the next timehands a copy of the current one, but do not
401	 * overwrite the generation or next pointer.  While we update
402	 * the contents, the generation must be zero.
403	 */
404	tho = timehands;
405	th = tho->th_next;
406	ogen = th->th_generation;
407	th->th_generation = 0;
408	bcopy(tho, th, offsetof(struct timehands, th_generation));
409
410	/*
411	 * Capture a timecounter delta on the current timecounter and if
412	 * changing timecounters, a counter value from the new timecounter.
413	 * Update the offset fields accordingly.
414	 */
415	delta = tc_delta(th);
416	if (th->th_counter != timecounter)
417		ncount = timecounter->tc_get_timecount(timecounter);
418	else
419		ncount = 0;
420	th->th_offset_count += delta;
421	th->th_offset_count &= th->th_counter->tc_counter_mask;
422	bintime_addx(&th->th_offset, th->th_scale * delta);
423
424	/*
425	 * Hardware latching timecounters may not generate interrupts on
426	 * PPS events, so instead we poll them.  There is a finite risk that
427	 * the hardware might capture a count which is later than the one we
428	 * got above, and therefore possibly in the next NTP second which might
429	 * have a different rate than the current NTP second.  It doesn't
430	 * matter in practice.
431	 */
432	if (tho->th_counter->tc_poll_pps)
433		tho->th_counter->tc_poll_pps(tho->th_counter);
434
435	/*
436	 * Deal with NTP second processing.  The for loop normally
437	 * iterates at most once, but in extreme situations it might
438	 * keep NTP sane if timeouts are not run for several seconds.
439	 * At boot, the time step can be large when the TOD hardware
440	 * has been read, so on really large steps, we call
441	 * ntp_update_second only twice.  We need to call it twice in
442	 * case we missed a leap second.
443	 */
444	bt = th->th_offset;
445	bintime_add(&bt, &boottimebin);
446	i = bt.sec - tho->th_microtime.tv_sec;
447	if (i > LARGE_STEP)
448		i = 2;
449	for (; i > 0; i--) {
450		t = bt.sec;
451		ntp_update_second(&th->th_adjustment, &bt.sec);
452		if (bt.sec != t)
453			boottimebin.sec += bt.sec - t;
454	}
455	/* Update the UTC timestamps used by the get*() functions. */
456	/* XXX shouldn't do this here.  Should force non-`get' versions. */
457	bintime2timeval(&bt, &th->th_microtime);
458	bintime2timespec(&bt, &th->th_nanotime);
459
460	/* Now is a good time to change timecounters. */
461	if (th->th_counter != timecounter) {
462		th->th_counter = timecounter;
463		th->th_offset_count = ncount;
464	}
465
466	/*-
467	 * Recalculate the scaling factor.  We want the number of 1/2^64
468	 * fractions of a second per period of the hardware counter, taking
469	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
470	 * processing provides us with.
471	 *
472	 * The th_adjustment is nanoseconds per second with 32 bit binary
473	 * fraction and we want 64 bit binary fraction of second:
474	 *
475	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
476	 *
477	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
478	 * we can only multiply by about 850 without overflowing, but that
479	 * leaves suitably precise fractions for multiply before divide.
480	 *
481	 * Divide before multiply with a fraction of 2199/512 results in a
482	 * systematic undercompensation of 10PPM of th_adjustment.  On a
483	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
484 	 *
485	 * We happily sacrifice the lowest of the 64 bits of our result
486	 * to the goddess of code clarity.
487	 *
488	 */
489	scale = (u_int64_t)1 << 63;
490	scale += (th->th_adjustment / 1024) * 2199;
491	scale /= th->th_counter->tc_frequency;
492	th->th_scale = scale * 2;
493
494	/*
495	 * Now that the struct timehands is again consistent, set the new
496	 * generation number, making sure to not make it zero.
497	 */
498	if (++ogen == 0)
499		ogen = 1;
500	th->th_generation = ogen;
501
502	/* Go live with the new struct timehands. */
503	time_second = th->th_microtime.tv_sec;
504	time_uptime = th->th_offset.sec;
505	timehands = th;
506}
507
508/* Report or change the active timecounter hardware. */
509static int
510sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
511{
512	char newname[32];
513	struct timecounter *newtc, *tc;
514	int error;
515
516	tc = timecounter;
517	strlcpy(newname, tc->tc_name, sizeof(newname));
518
519	error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
520	if (error != 0 || req->newptr == NULL ||
521	    strcmp(newname, tc->tc_name) == 0)
522		return (error);
523	for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
524		if (strcmp(newname, newtc->tc_name) != 0)
525			continue;
526
527		/* Warm up new timecounter. */
528		(void)newtc->tc_get_timecount(newtc);
529		(void)newtc->tc_get_timecount(newtc);
530
531		timecounter = newtc;
532		return (0);
533	}
534	return (EINVAL);
535}
536
537SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
538    0, 0, sysctl_kern_timecounter_hardware, "A", "");
539
540
541/* Report or change the active timecounter hardware. */
542static int
543sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
544{
545	char buf[32], *spc;
546	struct timecounter *tc;
547	int error;
548
549	spc = "";
550	error = 0;
551	for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
552		sprintf(buf, "%s%s(%d)",
553		    spc, tc->tc_name, tc->tc_quality);
554		error = SYSCTL_OUT(req, buf, strlen(buf));
555		spc = " ";
556	}
557	return (error);
558}
559
560SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
561    0, 0, sysctl_kern_timecounter_choice, "A", "");
562
563/*
564 * RFC 2783 PPS-API implementation.
565 */
566
567int
568pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
569{
570	pps_params_t *app;
571	struct pps_fetch_args *fapi;
572#ifdef PPS_SYNC
573	struct pps_kcbind_args *kapi;
574#endif
575
576	KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
577	switch (cmd) {
578	case PPS_IOC_CREATE:
579		return (0);
580	case PPS_IOC_DESTROY:
581		return (0);
582	case PPS_IOC_SETPARAMS:
583		app = (pps_params_t *)data;
584		if (app->mode & ~pps->ppscap)
585			return (EINVAL);
586		pps->ppsparam = *app;
587		return (0);
588	case PPS_IOC_GETPARAMS:
589		app = (pps_params_t *)data;
590		*app = pps->ppsparam;
591		app->api_version = PPS_API_VERS_1;
592		return (0);
593	case PPS_IOC_GETCAP:
594		*(int*)data = pps->ppscap;
595		return (0);
596	case PPS_IOC_FETCH:
597		fapi = (struct pps_fetch_args *)data;
598		if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
599			return (EINVAL);
600		if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
601			return (EOPNOTSUPP);
602		pps->ppsinfo.current_mode = pps->ppsparam.mode;
603		fapi->pps_info_buf = pps->ppsinfo;
604		return (0);
605	case PPS_IOC_KCBIND:
606#ifdef PPS_SYNC
607		kapi = (struct pps_kcbind_args *)data;
608		/* XXX Only root should be able to do this */
609		if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
610			return (EINVAL);
611		if (kapi->kernel_consumer != PPS_KC_HARDPPS)
612			return (EINVAL);
613		if (kapi->edge & ~pps->ppscap)
614			return (EINVAL);
615		pps->kcmode = kapi->edge;
616		return (0);
617#else
618		return (EOPNOTSUPP);
619#endif
620	default:
621		return (ENOIOCTL);
622	}
623}
624
625void
626pps_init(struct pps_state *pps)
627{
628	pps->ppscap |= PPS_TSFMT_TSPEC;
629	if (pps->ppscap & PPS_CAPTUREASSERT)
630		pps->ppscap |= PPS_OFFSETASSERT;
631	if (pps->ppscap & PPS_CAPTURECLEAR)
632		pps->ppscap |= PPS_OFFSETCLEAR;
633}
634
635void
636pps_capture(struct pps_state *pps)
637{
638	struct timehands *th;
639
640	KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
641	th = timehands;
642	pps->capgen = th->th_generation;
643	pps->capth = th;
644	pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
645	if (pps->capgen != th->th_generation)
646		pps->capgen = 0;
647}
648
649void
650pps_event(struct pps_state *pps, int event)
651{
652	struct bintime bt;
653	struct timespec ts, *tsp, *osp;
654	u_int tcount, *pcount;
655	int foff, fhard;
656	pps_seq_t *pseq;
657
658	KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
659	/* If the timecounter was wound up underneath us, bail out. */
660	if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
661		return;
662
663	/* Things would be easier with arrays. */
664	if (event == PPS_CAPTUREASSERT) {
665		tsp = &pps->ppsinfo.assert_timestamp;
666		osp = &pps->ppsparam.assert_offset;
667		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
668		fhard = pps->kcmode & PPS_CAPTUREASSERT;
669		pcount = &pps->ppscount[0];
670		pseq = &pps->ppsinfo.assert_sequence;
671	} else {
672		tsp = &pps->ppsinfo.clear_timestamp;
673		osp = &pps->ppsparam.clear_offset;
674		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
675		fhard = pps->kcmode & PPS_CAPTURECLEAR;
676		pcount = &pps->ppscount[1];
677		pseq = &pps->ppsinfo.clear_sequence;
678	}
679
680	/*
681	 * If the timecounter changed, we cannot compare the count values, so
682	 * we have to drop the rest of the PPS-stuff until the next event.
683	 */
684	if (pps->ppstc != pps->capth->th_counter) {
685		pps->ppstc = pps->capth->th_counter;
686		*pcount = pps->capcount;
687		pps->ppscount[2] = pps->capcount;
688		return;
689	}
690
691	/* Convert the count to a timespec. */
692	tcount = pps->capcount - pps->capth->th_offset_count;
693	tcount &= pps->capth->th_counter->tc_counter_mask;
694	bt = pps->capth->th_offset;
695	bintime_addx(&bt, pps->capth->th_scale * tcount);
696	bintime_add(&bt, &boottimebin);
697	bintime2timespec(&bt, &ts);
698
699	/* If the timecounter was wound up underneath us, bail out. */
700	if (pps->capgen != pps->capth->th_generation)
701		return;
702
703	*pcount = pps->capcount;
704	(*pseq)++;
705	*tsp = ts;
706
707	if (foff) {
708		timespecadd(tsp, osp);
709		if (tsp->tv_nsec < 0) {
710			tsp->tv_nsec += 1000000000;
711			tsp->tv_sec -= 1;
712		}
713	}
714#ifdef PPS_SYNC
715	if (fhard) {
716		u_int64_t scale;
717
718		/*
719		 * Feed the NTP PLL/FLL.
720		 * The FLL wants to know how many (hardware) nanoseconds
721		 * elapsed since the previous event.
722		 */
723		tcount = pps->capcount - pps->ppscount[2];
724		pps->ppscount[2] = pps->capcount;
725		tcount &= pps->capth->th_counter->tc_counter_mask;
726		scale = (u_int64_t)1 << 63;
727		scale /= pps->capth->th_counter->tc_frequency;
728		scale *= 2;
729		bt.sec = 0;
730		bt.frac = 0;
731		bintime_addx(&bt, scale * tcount);
732		bintime2timespec(&bt, &ts);
733		hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
734	}
735#endif
736}
737
738/*
739 * Timecounters need to be updated every so often to prevent the hardware
740 * counter from overflowing.  Updating also recalculates the cached values
741 * used by the get*() family of functions, so their precision depends on
742 * the update frequency.
743 */
744
745static int tc_tick;
746SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, "");
747
748void
749tc_ticktock(void)
750{
751	static int count;
752
753	if (++count < tc_tick)
754		return;
755	count = 0;
756	tc_windup();
757}
758
759static void
760inittimecounter(void *dummy)
761{
762	u_int p;
763
764	/*
765	 * Set the initial timeout to
766	 * max(1, <approx. number of hardclock ticks in a millisecond>).
767	 * People should probably not use the sysctl to set the timeout
768	 * to smaller than its inital value, since that value is the
769	 * smallest reasonable one.  If they want better timestamps they
770	 * should use the non-"get"* functions.
771	 */
772	if (hz > 1000)
773		tc_tick = (hz + 500) / 1000;
774	else
775		tc_tick = 1;
776	p = (tc_tick * 1000000) / hz;
777	printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
778
779	/* warm up new timecounter (again) and get rolling. */
780	(void)timecounter->tc_get_timecount(timecounter);
781	(void)timecounter->tc_get_timecount(timecounter);
782}
783
784SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL)
785