kern_tc.c revision 126600
1/*-
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 */
9
10#include <sys/cdefs.h>
11__FBSDID("$FreeBSD: head/sys/kern/kern_tc.c 126600 2004-03-04 14:14:23Z phk $");
12
13#include "opt_ntp.h"
14
15#include <sys/param.h>
16#include <sys/kernel.h>
17#include <sys/sysctl.h>
18#include <sys/syslog.h>
19#include <sys/systm.h>
20#include <sys/timepps.h>
21#include <sys/timetc.h>
22#include <sys/timex.h>
23
24/*
25 * A large step happens on boot.  This constant detects such steps.
26 * It is relatively small so that ntp_update_second gets called enough
27 * in the typical 'missed a couple of seconds' case, but doesn't loop
28 * forever when the time step is large.
29 */
30#define LARGE_STEP	200
31
32/*
33 * Implement a dummy timecounter which we can use until we get a real one
34 * in the air.  This allows the console and other early stuff to use
35 * time services.
36 */
37
38static u_int
39dummy_get_timecount(struct timecounter *tc)
40{
41	static u_int now;
42
43	return (++now);
44}
45
46static struct timecounter dummy_timecounter = {
47	dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
48};
49
50struct timehands {
51	/* These fields must be initialized by the driver. */
52	struct timecounter	*th_counter;
53	int64_t			th_adjustment;
54	u_int64_t		th_scale;
55	u_int	 		th_offset_count;
56	struct bintime		th_offset;
57	struct timeval		th_microtime;
58	struct timespec		th_nanotime;
59	/* Fields not to be copied in tc_windup start with th_generation. */
60	volatile u_int		th_generation;
61	struct timehands	*th_next;
62};
63
64extern struct timehands th0;
65static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
66static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
67static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
68static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
69static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
70static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
71static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
72static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
73static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
74static struct timehands th0 = {
75	&dummy_timecounter,
76	0,
77	(uint64_t)-1 / 1000000,
78	0,
79	{1, 0},
80	{0, 0},
81	{0, 0},
82	1,
83	&th1
84};
85
86static struct timehands *volatile timehands = &th0;
87struct timecounter *timecounter = &dummy_timecounter;
88static struct timecounter *timecounters = &dummy_timecounter;
89
90time_t time_second = 1;
91time_t time_uptime = 0;
92
93static struct bintime boottimebin;
94struct timeval boottime;
95SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
96    &boottime, timeval, "System boottime");
97
98SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
99
100static int timestepwarnings;
101SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
102    &timestepwarnings, 0, "");
103
104#define TC_STATS(foo) \
105	static u_int foo; \
106	SYSCTL_UINT(_kern_timecounter, OID_AUTO, foo, CTLFLAG_RD, &foo, 0, "");\
107	struct __hack
108
109TC_STATS(nbinuptime);    TC_STATS(nnanouptime);    TC_STATS(nmicrouptime);
110TC_STATS(nbintime);      TC_STATS(nnanotime);      TC_STATS(nmicrotime);
111TC_STATS(ngetbinuptime); TC_STATS(ngetnanouptime); TC_STATS(ngetmicrouptime);
112TC_STATS(ngetbintime);   TC_STATS(ngetnanotime);   TC_STATS(ngetmicrotime);
113TC_STATS(nsetclock);
114
115#undef TC_STATS
116
117static void tc_windup(void);
118
119/*
120 * Return the difference between the timehands' counter value now and what
121 * was when we copied it to the timehands' offset_count.
122 */
123static __inline u_int
124tc_delta(struct timehands *th)
125{
126	struct timecounter *tc;
127
128	tc = th->th_counter;
129	return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
130	    tc->tc_counter_mask);
131}
132
133/*
134 * Functions for reading the time.  We have to loop until we are sure that
135 * the timehands that we operated on was not updated under our feet.  See
136 * the comment in <sys/time.h> for a description of these 12 functions.
137 */
138
139void
140binuptime(struct bintime *bt)
141{
142	struct timehands *th;
143	u_int gen;
144
145	nbinuptime++;
146	do {
147		th = timehands;
148		gen = th->th_generation;
149		*bt = th->th_offset;
150		bintime_addx(bt, th->th_scale * tc_delta(th));
151	} while (gen == 0 || gen != th->th_generation);
152}
153
154void
155nanouptime(struct timespec *tsp)
156{
157	struct bintime bt;
158
159	nnanouptime++;
160	binuptime(&bt);
161	bintime2timespec(&bt, tsp);
162}
163
164void
165microuptime(struct timeval *tvp)
166{
167	struct bintime bt;
168
169	nmicrouptime++;
170	binuptime(&bt);
171	bintime2timeval(&bt, tvp);
172}
173
174void
175bintime(struct bintime *bt)
176{
177
178	nbintime++;
179	binuptime(bt);
180	bintime_add(bt, &boottimebin);
181}
182
183void
184nanotime(struct timespec *tsp)
185{
186	struct bintime bt;
187
188	nnanotime++;
189	bintime(&bt);
190	bintime2timespec(&bt, tsp);
191}
192
193void
194microtime(struct timeval *tvp)
195{
196	struct bintime bt;
197
198	nmicrotime++;
199	bintime(&bt);
200	bintime2timeval(&bt, tvp);
201}
202
203void
204getbinuptime(struct bintime *bt)
205{
206	struct timehands *th;
207	u_int gen;
208
209	ngetbinuptime++;
210	do {
211		th = timehands;
212		gen = th->th_generation;
213		*bt = th->th_offset;
214	} while (gen == 0 || gen != th->th_generation);
215}
216
217void
218getnanouptime(struct timespec *tsp)
219{
220	struct timehands *th;
221	u_int gen;
222
223	ngetnanouptime++;
224	do {
225		th = timehands;
226		gen = th->th_generation;
227		bintime2timespec(&th->th_offset, tsp);
228	} while (gen == 0 || gen != th->th_generation);
229}
230
231void
232getmicrouptime(struct timeval *tvp)
233{
234	struct timehands *th;
235	u_int gen;
236
237	ngetmicrouptime++;
238	do {
239		th = timehands;
240		gen = th->th_generation;
241		bintime2timeval(&th->th_offset, tvp);
242	} while (gen == 0 || gen != th->th_generation);
243}
244
245void
246getbintime(struct bintime *bt)
247{
248	struct timehands *th;
249	u_int gen;
250
251	ngetbintime++;
252	do {
253		th = timehands;
254		gen = th->th_generation;
255		*bt = th->th_offset;
256	} while (gen == 0 || gen != th->th_generation);
257	bintime_add(bt, &boottimebin);
258}
259
260void
261getnanotime(struct timespec *tsp)
262{
263	struct timehands *th;
264	u_int gen;
265
266	ngetnanotime++;
267	do {
268		th = timehands;
269		gen = th->th_generation;
270		*tsp = th->th_nanotime;
271	} while (gen == 0 || gen != th->th_generation);
272}
273
274void
275getmicrotime(struct timeval *tvp)
276{
277	struct timehands *th;
278	u_int gen;
279
280	ngetmicrotime++;
281	do {
282		th = timehands;
283		gen = th->th_generation;
284		*tvp = th->th_microtime;
285	} while (gen == 0 || gen != th->th_generation);
286}
287
288/*
289 * Initialize a new timecounter and possibly use it.
290 */
291void
292tc_init(struct timecounter *tc)
293{
294	u_int u;
295
296	u = tc->tc_frequency / tc->tc_counter_mask;
297	/* XXX: We need some margin here, 10% is a guess */
298	u *= 11;
299	u /= 10;
300	if (u > hz && tc->tc_quality >= 0) {
301		tc->tc_quality = -2000;
302		if (bootverbose) {
303			printf("Timecounter \"%s\" frequency %ju Hz",
304			    tc->tc_name, (uintmax_t)tc->tc_frequency);
305			printf(" -- Insufficient hz, needs at least %u\n", u);
306		}
307	} else if (tc->tc_quality >= 0 || bootverbose) {
308		printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
309		    tc->tc_name, (uintmax_t)tc->tc_frequency,
310		    tc->tc_quality);
311	}
312
313	tc->tc_next = timecounters;
314	timecounters = tc;
315	/*
316	 * Never automatically use a timecounter with negative quality.
317	 * Even though we run on the dummy counter, switching here may be
318	 * worse since this timecounter may not be monotonous.
319	 */
320	if (tc->tc_quality < 0)
321		return;
322	if (tc->tc_quality < timecounter->tc_quality)
323		return;
324	if (tc->tc_quality == timecounter->tc_quality &&
325	    tc->tc_frequency < timecounter->tc_frequency)
326		return;
327	(void)tc->tc_get_timecount(tc);
328	(void)tc->tc_get_timecount(tc);
329	timecounter = tc;
330}
331
332/* Report the frequency of the current timecounter. */
333u_int64_t
334tc_getfrequency(void)
335{
336
337	return (timehands->th_counter->tc_frequency);
338}
339
340/*
341 * Step our concept of UTC.  This is done by modifying our estimate of
342 * when we booted.
343 * XXX: not locked.
344 */
345void
346tc_setclock(struct timespec *ts)
347{
348	struct timespec ts2;
349	struct bintime bt, bt2;
350
351	nsetclock++;
352	binuptime(&bt2);
353	timespec2bintime(ts, &bt);
354	bintime_sub(&bt, &bt2);
355	bintime_add(&bt2, &boottimebin);
356	boottimebin = bt;
357	bintime2timeval(&bt, &boottime);
358
359	/* XXX fiddle all the little crinkly bits around the fiords... */
360	tc_windup();
361	if (timestepwarnings) {
362		bintime2timespec(&bt2, &ts2);
363		log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld\n",
364		    (intmax_t)ts2.tv_sec, ts2.tv_nsec,
365		    (intmax_t)ts->tv_sec, ts->tv_nsec);
366	}
367}
368
369/*
370 * Initialize the next struct timehands in the ring and make
371 * it the active timehands.  Along the way we might switch to a different
372 * timecounter and/or do seconds processing in NTP.  Slightly magic.
373 */
374static void
375tc_windup(void)
376{
377	struct bintime bt;
378	struct timehands *th, *tho;
379	u_int64_t scale;
380	u_int delta, ncount, ogen;
381	int i;
382	time_t t;
383
384	/*
385	 * Make the next timehands a copy of the current one, but do not
386	 * overwrite the generation or next pointer.  While we update
387	 * the contents, the generation must be zero.
388	 */
389	tho = timehands;
390	th = tho->th_next;
391	ogen = th->th_generation;
392	th->th_generation = 0;
393	bcopy(tho, th, offsetof(struct timehands, th_generation));
394
395	/*
396	 * Capture a timecounter delta on the current timecounter and if
397	 * changing timecounters, a counter value from the new timecounter.
398	 * Update the offset fields accordingly.
399	 */
400	delta = tc_delta(th);
401	if (th->th_counter != timecounter)
402		ncount = timecounter->tc_get_timecount(timecounter);
403	else
404		ncount = 0;
405	th->th_offset_count += delta;
406	th->th_offset_count &= th->th_counter->tc_counter_mask;
407	bintime_addx(&th->th_offset, th->th_scale * delta);
408
409	/*
410	 * Hardware latching timecounters may not generate interrupts on
411	 * PPS events, so instead we poll them.  There is a finite risk that
412	 * the hardware might capture a count which is later than the one we
413	 * got above, and therefore possibly in the next NTP second which might
414	 * have a different rate than the current NTP second.  It doesn't
415	 * matter in practice.
416	 */
417	if (tho->th_counter->tc_poll_pps)
418		tho->th_counter->tc_poll_pps(tho->th_counter);
419
420	/*
421	 * Deal with NTP second processing.  The for loop normally
422	 * iterates at most once, but in extreme situations it might
423	 * keep NTP sane if timeouts are not run for several seconds.
424	 * At boot, the time step can be large when the TOD hardware
425	 * has been read, so on really large steps, we call
426	 * ntp_update_second only twice.  We need to call it twice in
427	 * case we missed a leap second.
428	 */
429	bt = th->th_offset;
430	bintime_add(&bt, &boottimebin);
431	i = bt.sec - tho->th_microtime.tv_sec;
432	if (i > LARGE_STEP)
433		i = 2;
434	for (; i > 0; i--) {
435		t = bt.sec;
436		ntp_update_second(&th->th_adjustment, &bt.sec);
437		if (bt.sec != t)
438			boottimebin.sec += bt.sec - t;
439	}
440	/* Update the UTC timestamps used by the get*() functions. */
441	/* XXX shouldn't do this here.  Should force non-`get' versions. */
442	bintime2timeval(&bt, &th->th_microtime);
443	bintime2timespec(&bt, &th->th_nanotime);
444
445	/* Now is a good time to change timecounters. */
446	if (th->th_counter != timecounter) {
447		th->th_counter = timecounter;
448		th->th_offset_count = ncount;
449	}
450
451	/*-
452	 * Recalculate the scaling factor.  We want the number of 1/2^64
453	 * fractions of a second per period of the hardware counter, taking
454	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
455	 * processing provides us with.
456	 *
457	 * The th_adjustment is nanoseconds per second with 32 bit binary
458	 * fraction and we want 64 bit binary fraction of second:
459	 *
460	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
461	 *
462	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
463	 * we can only multiply by about 850 without overflowing, but that
464	 * leaves suitably precise fractions for multiply before divide.
465	 *
466	 * Divide before multiply with a fraction of 2199/512 results in a
467	 * systematic undercompensation of 10PPM of th_adjustment.  On a
468	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
469 	 *
470	 * We happily sacrifice the lowest of the 64 bits of our result
471	 * to the goddess of code clarity.
472	 *
473	 */
474	scale = (u_int64_t)1 << 63;
475	scale += (th->th_adjustment / 1024) * 2199;
476	scale /= th->th_counter->tc_frequency;
477	th->th_scale = scale * 2;
478
479	/*
480	 * Now that the struct timehands is again consistent, set the new
481	 * generation number, making sure to not make it zero.
482	 */
483	if (++ogen == 0)
484		ogen = 1;
485	th->th_generation = ogen;
486
487	/* Go live with the new struct timehands. */
488	time_second = th->th_microtime.tv_sec;
489	time_uptime = th->th_offset.sec;
490	timehands = th;
491}
492
493/* Report or change the active timecounter hardware. */
494static int
495sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
496{
497	char newname[32];
498	struct timecounter *newtc, *tc;
499	int error;
500
501	tc = timecounter;
502	strlcpy(newname, tc->tc_name, sizeof(newname));
503
504	error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
505	if (error != 0 || req->newptr == NULL ||
506	    strcmp(newname, tc->tc_name) == 0)
507		return (error);
508	for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
509		if (strcmp(newname, newtc->tc_name) != 0)
510			continue;
511
512		/* Warm up new timecounter. */
513		(void)newtc->tc_get_timecount(newtc);
514		(void)newtc->tc_get_timecount(newtc);
515
516		timecounter = newtc;
517		return (0);
518	}
519	return (EINVAL);
520}
521
522SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
523    0, 0, sysctl_kern_timecounter_hardware, "A", "");
524
525
526/* Report or change the active timecounter hardware. */
527static int
528sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
529{
530	char buf[32], *spc;
531	struct timecounter *tc;
532	int error;
533
534	spc = "";
535	error = 0;
536	for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) {
537		sprintf(buf, "%s%s(%d)",
538		    spc, tc->tc_name, tc->tc_quality);
539		error = SYSCTL_OUT(req, buf, strlen(buf));
540		spc = " ";
541	}
542	return (error);
543}
544
545SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
546    0, 0, sysctl_kern_timecounter_choice, "A", "");
547
548/*
549 * RFC 2783 PPS-API implementation.
550 */
551
552int
553pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
554{
555	pps_params_t *app;
556	struct pps_fetch_args *fapi;
557#ifdef PPS_SYNC
558	struct pps_kcbind_args *kapi;
559#endif
560
561	switch (cmd) {
562	case PPS_IOC_CREATE:
563		return (0);
564	case PPS_IOC_DESTROY:
565		return (0);
566	case PPS_IOC_SETPARAMS:
567		app = (pps_params_t *)data;
568		if (app->mode & ~pps->ppscap)
569			return (EINVAL);
570		pps->ppsparam = *app;
571		return (0);
572	case PPS_IOC_GETPARAMS:
573		app = (pps_params_t *)data;
574		*app = pps->ppsparam;
575		app->api_version = PPS_API_VERS_1;
576		return (0);
577	case PPS_IOC_GETCAP:
578		*(int*)data = pps->ppscap;
579		return (0);
580	case PPS_IOC_FETCH:
581		fapi = (struct pps_fetch_args *)data;
582		if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
583			return (EINVAL);
584		if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
585			return (EOPNOTSUPP);
586		pps->ppsinfo.current_mode = pps->ppsparam.mode;
587		fapi->pps_info_buf = pps->ppsinfo;
588		return (0);
589	case PPS_IOC_KCBIND:
590#ifdef PPS_SYNC
591		kapi = (struct pps_kcbind_args *)data;
592		/* XXX Only root should be able to do this */
593		if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
594			return (EINVAL);
595		if (kapi->kernel_consumer != PPS_KC_HARDPPS)
596			return (EINVAL);
597		if (kapi->edge & ~pps->ppscap)
598			return (EINVAL);
599		pps->kcmode = kapi->edge;
600		return (0);
601#else
602		return (EOPNOTSUPP);
603#endif
604	default:
605		return (ENOTTY);
606	}
607}
608
609void
610pps_init(struct pps_state *pps)
611{
612	pps->ppscap |= PPS_TSFMT_TSPEC;
613	if (pps->ppscap & PPS_CAPTUREASSERT)
614		pps->ppscap |= PPS_OFFSETASSERT;
615	if (pps->ppscap & PPS_CAPTURECLEAR)
616		pps->ppscap |= PPS_OFFSETCLEAR;
617}
618
619void
620pps_capture(struct pps_state *pps)
621{
622	struct timehands *th;
623
624	th = timehands;
625	pps->capgen = th->th_generation;
626	pps->capth = th;
627	pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
628	if (pps->capgen != th->th_generation)
629		pps->capgen = 0;
630}
631
632void
633pps_event(struct pps_state *pps, int event)
634{
635	struct bintime bt;
636	struct timespec ts, *tsp, *osp;
637	u_int tcount, *pcount;
638	int foff, fhard;
639	pps_seq_t *pseq;
640
641	/* If the timecounter was wound up underneath us, bail out. */
642	if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
643		return;
644
645	/* Things would be easier with arrays. */
646	if (event == PPS_CAPTUREASSERT) {
647		tsp = &pps->ppsinfo.assert_timestamp;
648		osp = &pps->ppsparam.assert_offset;
649		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
650		fhard = pps->kcmode & PPS_CAPTUREASSERT;
651		pcount = &pps->ppscount[0];
652		pseq = &pps->ppsinfo.assert_sequence;
653	} else {
654		tsp = &pps->ppsinfo.clear_timestamp;
655		osp = &pps->ppsparam.clear_offset;
656		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
657		fhard = pps->kcmode & PPS_CAPTURECLEAR;
658		pcount = &pps->ppscount[1];
659		pseq = &pps->ppsinfo.clear_sequence;
660	}
661
662	/*
663	 * If the timecounter changed, we cannot compare the count values, so
664	 * we have to drop the rest of the PPS-stuff until the next event.
665	 */
666	if (pps->ppstc != pps->capth->th_counter) {
667		pps->ppstc = pps->capth->th_counter;
668		*pcount = pps->capcount;
669		pps->ppscount[2] = pps->capcount;
670		return;
671	}
672
673	/* Convert the count to a timespec. */
674	tcount = pps->capcount - pps->capth->th_offset_count;
675	tcount &= pps->capth->th_counter->tc_counter_mask;
676	bt = pps->capth->th_offset;
677	bintime_addx(&bt, pps->capth->th_scale * tcount);
678	bintime_add(&bt, &boottimebin);
679	bintime2timespec(&bt, &ts);
680
681	/* If the timecounter was wound up underneath us, bail out. */
682	if (pps->capgen != pps->capth->th_generation)
683		return;
684
685	*pcount = pps->capcount;
686	(*pseq)++;
687	*tsp = ts;
688
689	if (foff) {
690		timespecadd(tsp, osp);
691		if (tsp->tv_nsec < 0) {
692			tsp->tv_nsec += 1000000000;
693			tsp->tv_sec -= 1;
694		}
695	}
696#ifdef PPS_SYNC
697	if (fhard) {
698		u_int64_t scale;
699
700		/*
701		 * Feed the NTP PLL/FLL.
702		 * The FLL wants to know how many (hardware) nanoseconds
703		 * elapsed since the previous event.
704		 */
705		tcount = pps->capcount - pps->ppscount[2];
706		pps->ppscount[2] = pps->capcount;
707		tcount &= pps->capth->th_counter->tc_counter_mask;
708		scale = (u_int64_t)1 << 63;
709		scale /= pps->capth->th_counter->tc_frequency;
710		scale *= 2;
711		bt.sec = 0;
712		bt.frac = 0;
713		bintime_addx(&bt, scale * tcount);
714		bintime2timespec(&bt, &ts);
715		hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
716	}
717#endif
718}
719
720/*
721 * Timecounters need to be updated every so often to prevent the hardware
722 * counter from overflowing.  Updating also recalculates the cached values
723 * used by the get*() family of functions, so their precision depends on
724 * the update frequency.
725 */
726
727static int tc_tick;
728SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, "");
729
730void
731tc_ticktock(void)
732{
733	static int count;
734
735	if (++count < tc_tick)
736		return;
737	count = 0;
738	tc_windup();
739}
740
741static void
742inittimecounter(void *dummy)
743{
744	u_int p;
745
746	/*
747	 * Set the initial timeout to
748	 * max(1, <approx. number of hardclock ticks in a millisecond>).
749	 * People should probably not use the sysctl to set the timeout
750	 * to smaller than its inital value, since that value is the
751	 * smallest reasonable one.  If they want better timestamps they
752	 * should use the non-"get"* functions.
753	 */
754	if (hz > 1000)
755		tc_tick = (hz + 500) / 1000;
756	else
757		tc_tick = 1;
758	p = (tc_tick * 1000000) / hz;
759	printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
760
761	/* warm up new timecounter (again) and get rolling. */
762	(void)timecounter->tc_get_timecount(timecounter);
763	(void)timecounter->tc_get_timecount(timecounter);
764}
765
766SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL)
767