1/*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 */
33
34#include <mach/mach_types.h>
35
36#include <kern/lock.h>
37#include <kern/spl.h>
38#include <kern/sched_prim.h>
39#include <kern/thread.h>
40#include <kern/clock.h>
41#include <kern/host_notify.h>
42
43#include <IOKit/IOPlatformExpert.h>
44
45#include <machine/commpage.h>
46
47#include <mach/mach_traps.h>
48#include <mach/mach_time.h>
49
50uint32_t	hz_tick_interval = 1;
51
52#if CONFIG_DTRACE
53static void clock_track_calend_nowait(void);
54#endif
55
56decl_simple_lock_data(static,clock_lock)
57
58/*
59 *	Time of day (calendar) variables.
60 *
61 *	Algorithm:
62 *
63 *	TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
64 *
65 *	where CONV converts absolute time units into seconds and a fraction.
66 */
67static struct clock_calend {
68
69	uint64_t	epoch;
70	uint64_t	offset;
71	int64_t		adjtotal;	/* Nanosecond remaining total adjustment */
72	uint64_t	adjdeadline;	/* Absolute time value for next adjustment period */
73	uint32_t	adjinterval;	/* Absolute time interval of adjustment period */
74	int32_t		adjdelta;	/* Nanosecond time delta for this adjustment period */
75	uint64_t	adjstart;	/* Absolute time value for start of this adjustment period */
76	uint32_t	adjoffset;	/* Absolute time offset for this adjustment period as absolute value */
77	uint32_t	adjactive;
78	timer_call_data_t	adjcall;
79} clock_calend;
80
81#if CONFIG_DTRACE
82/*
83 *	Unlocked calendar flipflop; this is used to track a clock_calend such
84 *	that we can safely access a snapshot of a valid  clock_calend structure
85 *	without needing to take any locks to do it.
86 *
87 *	The trick is to use a generation count and set the low bit when it is
88 *	being updated/read; by doing this, we guarantee, through use of the
89 *	hw_atomic functions, that the generation is incremented when the bit
90 *	is cleared atomically (by using a 1 bit add).
91 */
92static struct unlocked_clock_calend {
93	struct clock_calend	calend;		/* copy of calendar */
94	uint32_t		gen;		/* generation count */
95} flipflop[ 2];
96#endif
97
98/*
99 *	Calendar adjustment variables and values.
100 */
101#define calend_adjperiod	(NSEC_PER_SEC / 100)	/* adjustment period, ns */
102#define calend_adjskew		(40 * NSEC_PER_USEC)	/* "standard" skew, ns / period */
103#define	calend_adjbig		(NSEC_PER_SEC)			/* use 10x skew above adjbig ns */
104
105static uint32_t		calend_set_adjustment(
106						int32_t			*secs,
107						int32_t			*microsecs);
108
109static void			calend_adjust_call(void);
110static uint32_t		calend_adjust(void);
111
112static thread_call_data_t	calend_wakecall;
113
114extern	void	IOKitResetTime(void);
115
116static uint64_t		clock_boottime;				/* Seconds boottime epoch */
117
118#define TIME_ADD(rsecs, secs, rfrac, frac, unit)	\
119MACRO_BEGIN											\
120	if (((rfrac) += (frac)) >= (unit)) {			\
121		(rfrac) -= (unit);							\
122		(rsecs) += 1;								\
123	}												\
124	(rsecs) += (secs);								\
125MACRO_END
126
127#define TIME_SUB(rsecs, secs, rfrac, frac, unit)	\
128MACRO_BEGIN											\
129	if ((int32_t)((rfrac) -= (frac)) < 0) {			\
130		(rfrac) += (unit);							\
131		(rsecs) -= 1;								\
132	}												\
133	(rsecs) -= (secs);								\
134MACRO_END
135
136/*
137 *	clock_config:
138 *
139 *	Called once at boot to configure the clock subsystem.
140 */
141void
142clock_config(void)
143{
144	simple_lock_init(&clock_lock, 0);
145
146	timer_call_setup(&clock_calend.adjcall, (timer_call_func_t)calend_adjust_call, NULL);
147	thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
148
149	clock_oldconfig();
150
151	/*
152	 * Initialize the timer callouts.
153	 */
154	timer_call_initialize();
155}
156
157/*
158 *	clock_init:
159 *
160 *	Called on a processor each time started.
161 */
162void
163clock_init(void)
164{
165	clock_oldinit();
166}
167
168/*
169 *	clock_timebase_init:
170 *
171 *	Called by machine dependent code
172 *	to initialize areas dependent on the
173 *	timebase value.  May be called multiple
174 *	times during start up.
175 */
176void
177clock_timebase_init(void)
178{
179	uint64_t	abstime;
180
181	nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
182	clock_calend.adjinterval = abstime;
183
184	nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
185	hz_tick_interval = abstime;
186
187	sched_timebase_init();
188}
189
190/*
191 *	mach_timebase_info_trap:
192 *
193 *	User trap returns timebase constant.
194 */
195kern_return_t
196mach_timebase_info_trap(
197	struct mach_timebase_info_trap_args *args)
198{
199	mach_vm_address_t 			out_info_addr = args->info;
200	mach_timebase_info_data_t	info;
201
202	clock_timebase_info(&info);
203
204	copyout((void *)&info, out_info_addr, sizeof (info));
205
206	return (KERN_SUCCESS);
207}
208
209/*
210 *	Calendar routines.
211 */
212
213/*
214 *	clock_get_calendar_microtime:
215 *
216 *	Returns the current calendar value,
217 *	microseconds as the fraction.
218 */
219void
220clock_get_calendar_microtime(
221	uint32_t			*secs,
222	uint32_t			*microsecs)
223{
224	uint64_t		now;
225	spl_t			s;
226
227	s = splclock();
228	simple_lock(&clock_lock);
229
230	now = mach_absolute_time();
231
232	if (clock_calend.adjdelta < 0) {
233		uint32_t	t32;
234
235		if (now > clock_calend.adjstart) {
236			t32 = now - clock_calend.adjstart;
237
238			if (t32 > clock_calend.adjoffset)
239				now -= clock_calend.adjoffset;
240			else
241				now = clock_calend.adjstart;
242		}
243	}
244
245	now += clock_calend.offset;
246
247	absolutetime_to_microtime(now, secs, microsecs);
248
249	*secs += clock_calend.epoch;
250
251	simple_unlock(&clock_lock);
252	splx(s);
253}
254
255/*
256 *	clock_get_calendar_nanotime:
257 *
258 *	Returns the current calendar value,
259 *	nanoseconds as the fraction.
260 *
261 *	Since we do not have an interface to
262 *	set the calendar with resolution greater
263 *	than a microsecond, we honor that here.
264 */
265void
266clock_get_calendar_nanotime(
267	uint32_t			*secs,
268	uint32_t			*nanosecs)
269{
270	uint64_t		now;
271	spl_t			s;
272
273	s = splclock();
274	simple_lock(&clock_lock);
275
276	now = mach_absolute_time();
277
278	if (clock_calend.adjdelta < 0) {
279		uint32_t	t32;
280
281		if (now > clock_calend.adjstart) {
282			t32 = now - clock_calend.adjstart;
283
284			if (t32 > clock_calend.adjoffset)
285				now -= clock_calend.adjoffset;
286			else
287				now = clock_calend.adjstart;
288		}
289	}
290
291	now += clock_calend.offset;
292
293	absolutetime_to_microtime(now, secs, nanosecs);
294	*nanosecs *= NSEC_PER_USEC;
295
296	*secs += clock_calend.epoch;
297
298	simple_unlock(&clock_lock);
299	splx(s);
300}
301
302/*
303 *	clock_gettimeofday:
304 *
305 *	Kernel interface for commpage implementation of
306 *	gettimeofday() syscall.
307 *
308 *	Returns the current calendar value, and updates the
309 *	commpage info as appropriate.  Because most calls to
310 *	gettimeofday() are handled in user mode by the commpage,
311 *	this routine should be used infrequently.
312 */
313void
314clock_gettimeofday(
315	uint32_t			*secs,
316	uint32_t			*microsecs)
317{
318	uint64_t		now;
319	spl_t			s;
320
321	s = splclock();
322	simple_lock(&clock_lock);
323
324	now = mach_absolute_time();
325
326	if (clock_calend.adjdelta >= 0) {
327		clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
328	}
329	else {
330		uint32_t	t32;
331
332		if (now > clock_calend.adjstart) {
333			t32 = now - clock_calend.adjstart;
334
335			if (t32 > clock_calend.adjoffset)
336				now -= clock_calend.adjoffset;
337			else
338				now = clock_calend.adjstart;
339		}
340
341		now += clock_calend.offset;
342
343		absolutetime_to_microtime(now, secs, microsecs);
344
345		*secs += clock_calend.epoch;
346	}
347
348	simple_unlock(&clock_lock);
349	splx(s);
350}
351
352/*
353 *	clock_set_calendar_microtime:
354 *
355 *	Sets the current calendar value by
356 *	recalculating the epoch and offset
357 *	from the system clock.
358 *
359 *	Also adjusts the boottime to keep the
360 *	value consistent, writes the new
361 *	calendar value to the platform clock,
362 *	and sends calendar change notifications.
363 */
364void
365clock_set_calendar_microtime(
366	uint32_t			secs,
367	uint32_t			microsecs)
368{
369	uint32_t		sys, microsys;
370	uint32_t		newsecs;
371	spl_t			s;
372
373	newsecs = (microsecs < 500*USEC_PER_SEC)?
374						secs: secs + 1;
375
376	s = splclock();
377	simple_lock(&clock_lock);
378
379	commpage_disable_timestamp();
380
381	/*
382	 *	Calculate the new calendar epoch based on
383	 *	the new value and the system clock.
384	 */
385	clock_get_system_microtime(&sys, &microsys);
386	TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
387
388	/*
389	 *	Adjust the boottime based on the delta.
390	 */
391	clock_boottime += secs - clock_calend.epoch;
392
393	/*
394	 *	Set the new calendar epoch.
395	 */
396	clock_calend.epoch = secs;
397	nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
398
399	/*
400	 *	Cancel any adjustment in progress.
401	 */
402	clock_calend.adjdelta = clock_calend.adjtotal = 0;
403
404	simple_unlock(&clock_lock);
405
406	/*
407	 *	Set the new value for the platform clock.
408	 */
409	PESetGMTTimeOfDay(newsecs);
410
411	splx(s);
412
413	/*
414	 *	Send host notifications.
415	 */
416	host_notify_calendar_change();
417
418#if CONFIG_DTRACE
419	clock_track_calend_nowait();
420#endif
421}
422
423/*
424 *	clock_initialize_calendar:
425 *
426 *	Set the calendar and related clocks
427 *	from the platform clock at boot or
428 *	wake event.
429 *
430 *	Also sends host notifications.
431 */
432void
433clock_initialize_calendar(void)
434{
435	uint32_t		sys, microsys;
436	uint32_t		microsecs = 0, secs = PEGetGMTTimeOfDay();
437	spl_t			s;
438
439	s = splclock();
440	simple_lock(&clock_lock);
441
442	commpage_disable_timestamp();
443
444	if ((int32_t)secs >= (int32_t)clock_boottime) {
445		/*
446		 *	Initialize the boot time based on the platform clock.
447		 */
448		if (clock_boottime == 0)
449			clock_boottime = secs;
450
451		/*
452		 *	Calculate the new calendar epoch based on
453		 *	the platform clock and the system clock.
454		 */
455		clock_get_system_microtime(&sys, &microsys);
456		TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
457
458		/*
459		 *	Set the new calendar epoch.
460		 */
461		clock_calend.epoch = secs;
462		nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
463
464		/*
465		 *	 Cancel any adjustment in progress.
466		 */
467		clock_calend.adjdelta = clock_calend.adjtotal = 0;
468	}
469
470	simple_unlock(&clock_lock);
471	splx(s);
472
473	/*
474	 *	Send host notifications.
475	 */
476	host_notify_calendar_change();
477
478#if CONFIG_DTRACE
479	clock_track_calend_nowait();
480#endif
481}
482
483/*
484 *	clock_get_boottime_nanotime:
485 *
486 *	Return the boottime, used by sysctl.
487 */
488void
489clock_get_boottime_nanotime(
490	uint32_t			*secs,
491	uint32_t			*nanosecs)
492{
493	*secs = clock_boottime;
494	*nanosecs = 0;
495}
496
497/*
498 *	clock_adjtime:
499 *
500 *	Interface to adjtime() syscall.
501 *
502 *	Calculates adjustment variables and
503 *	initiates adjustment.
504 */
505void
506clock_adjtime(
507	int32_t		*secs,
508	int32_t		*microsecs)
509{
510	uint32_t	interval;
511	spl_t		s;
512
513	s = splclock();
514	simple_lock(&clock_lock);
515
516	interval = calend_set_adjustment(secs, microsecs);
517	if (interval != 0) {
518		clock_calend.adjdeadline = mach_absolute_time() + interval;
519		if (!timer_call_enter(&clock_calend.adjcall, clock_calend.adjdeadline))
520			clock_calend.adjactive++;
521	}
522	else
523	if (timer_call_cancel(&clock_calend.adjcall))
524		clock_calend.adjactive--;
525
526	simple_unlock(&clock_lock);
527	splx(s);
528}
529
530static uint32_t
531calend_set_adjustment(
532	int32_t				*secs,
533	int32_t				*microsecs)
534{
535	uint64_t		now, t64;
536	int64_t			total, ototal;
537	uint32_t		interval = 0;
538
539	total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
540
541	commpage_disable_timestamp();
542
543	now = mach_absolute_time();
544
545	ototal = clock_calend.adjtotal;
546
547	if (total != 0) {
548		int32_t		delta = calend_adjskew;
549
550		if (total > 0) {
551			if (total > calend_adjbig)
552				delta *= 10;
553			if (delta > total)
554				delta = total;
555
556			nanoseconds_to_absolutetime((uint64_t)delta, &t64);
557			clock_calend.adjoffset = t64;
558		}
559		else {
560			if (total < -calend_adjbig)
561				delta *= 10;
562			delta = -delta;
563			if (delta < total)
564				delta = total;
565
566			clock_calend.adjstart = now;
567
568			nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
569			clock_calend.adjoffset = t64;
570		}
571
572		clock_calend.adjtotal = total;
573		clock_calend.adjdelta = delta;
574
575		interval = clock_calend.adjinterval;
576	}
577	else
578		clock_calend.adjdelta = clock_calend.adjtotal = 0;
579
580	if (ototal != 0) {
581		*secs = ototal / NSEC_PER_SEC;
582		*microsecs = (ototal % NSEC_PER_SEC) / NSEC_PER_USEC;
583	}
584	else
585		*secs = *microsecs = 0;
586
587#if CONFIG_DTRACE
588	clock_track_calend_nowait();
589#endif
590
591	return (interval);
592}
593
594static void
595calend_adjust_call(void)
596{
597	uint32_t	interval;
598	spl_t		s;
599
600	s = splclock();
601	simple_lock(&clock_lock);
602
603	if (--clock_calend.adjactive == 0) {
604		interval = calend_adjust();
605		if (interval != 0) {
606			clock_deadline_for_periodic_event(interval, mach_absolute_time(),
607																&clock_calend.adjdeadline);
608
609			if (!timer_call_enter(&clock_calend.adjcall, clock_calend.adjdeadline))
610				clock_calend.adjactive++;
611		}
612	}
613
614	simple_unlock(&clock_lock);
615	splx(s);
616}
617
618static uint32_t
619calend_adjust(void)
620{
621	uint64_t		now, t64;
622	int32_t			delta;
623	uint32_t		interval = 0;
624
625	commpage_disable_timestamp();
626
627	now = mach_absolute_time();
628
629	delta = clock_calend.adjdelta;
630
631	if (delta > 0) {
632		clock_calend.offset += clock_calend.adjoffset;
633
634		clock_calend.adjtotal -= delta;
635		if (delta > clock_calend.adjtotal) {
636			clock_calend.adjdelta = delta = clock_calend.adjtotal;
637
638			nanoseconds_to_absolutetime((uint64_t)delta, &t64);
639			clock_calend.adjoffset = t64;
640		}
641	}
642	else
643	if (delta < 0) {
644		clock_calend.offset -= clock_calend.adjoffset;
645
646		clock_calend.adjtotal -= delta;
647		if (delta < clock_calend.adjtotal) {
648			clock_calend.adjdelta = delta = clock_calend.adjtotal;
649
650			nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
651			clock_calend.adjoffset = t64;
652		}
653
654		if (clock_calend.adjdelta != 0)
655			clock_calend.adjstart = now;
656	}
657
658	if (clock_calend.adjdelta != 0)
659		interval = clock_calend.adjinterval;
660
661#if CONFIG_DTRACE
662	clock_track_calend_nowait();
663#endif
664
665	return (interval);
666}
667
668/*
669 *	clock_wakeup_calendar:
670 *
671 *	Interface to power management, used
672 *	to initiate the reset of the calendar
673 *	on wake from sleep event.
674 */
675void
676clock_wakeup_calendar(void)
677{
678	thread_call_enter(&calend_wakecall);
679}
680
681/*
682 *	Wait / delay routines.
683 */
684static void
685mach_wait_until_continue(
686	__unused void	*parameter,
687	wait_result_t	wresult)
688{
689	thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
690	/*NOTREACHED*/
691}
692
693kern_return_t
694mach_wait_until_trap(
695	struct mach_wait_until_trap_args	*args)
696{
697	uint64_t		deadline = args->deadline;
698	wait_result_t	wresult;
699
700	wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
701	if (wresult == THREAD_WAITING)
702		wresult = thread_block(mach_wait_until_continue);
703
704	return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
705}
706
707void
708clock_delay_until(
709	uint64_t		deadline)
710{
711	uint64_t		now = mach_absolute_time();
712
713	if (now >= deadline)
714		return;
715
716	if (	(deadline - now) < (8 * sched_cswtime)	||
717			get_preemption_level() != 0				||
718			ml_get_interrupts_enabled() == FALSE	)
719		machine_delay_until(deadline);
720	else {
721		assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
722
723		thread_block(THREAD_CONTINUE_NULL);
724	}
725}
726
727void
728delay_for_interval(
729	uint32_t		interval,
730	uint32_t		scale_factor)
731{
732	uint64_t		end;
733
734	clock_interval_to_deadline(interval, scale_factor, &end);
735
736	clock_delay_until(end);
737}
738
739void
740delay(
741	int		usec)
742{
743	delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
744}
745
746/*
747 *	Miscellaneous routines.
748 */
749void
750clock_interval_to_deadline(
751	uint32_t			interval,
752	uint32_t			scale_factor,
753	uint64_t			*result)
754{
755	uint64_t	abstime;
756
757	clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
758
759	*result = mach_absolute_time() + abstime;
760}
761
762void
763clock_absolutetime_interval_to_deadline(
764	uint64_t			abstime,
765	uint64_t			*result)
766{
767	*result = mach_absolute_time() + abstime;
768}
769
770void
771clock_get_uptime(
772	uint64_t	*result)
773{
774	*result = mach_absolute_time();
775}
776
777void
778clock_deadline_for_periodic_event(
779	uint64_t			interval,
780	uint64_t			abstime,
781	uint64_t			*deadline)
782{
783	assert(interval != 0);
784
785	*deadline += interval;
786
787	if (*deadline <= abstime) {
788		*deadline = abstime + interval;
789		abstime = mach_absolute_time();
790
791		if (*deadline <= abstime)
792			*deadline = abstime + interval;
793	}
794}
795
796#if CONFIG_DTRACE
797
798/*
799 * clock_get_calendar_nanotime_nowait
800 *
801 * Description:	Non-blocking version of clock_get_calendar_nanotime()
802 *
803 * Notes:	This function operates by separately tracking calendar time
804 *		updates using a two element structure to copy the calendar
805 *		state, which may be asynchronously modified.  It utilizes
806 *		barrier instructions in the tracking process and in the local
807 *		stable snapshot process in order to ensure that a consistent
808 *		snapshot is used to perform the calculation.
809 */
810void
811clock_get_calendar_nanotime_nowait(
812	uint32_t			*secs,
813	uint32_t			*nanosecs)
814{
815	int i = 0;
816	uint64_t		now;
817	struct unlocked_clock_calend stable;
818
819	for (;;) {
820		stable = flipflop[i];		/* take snapshot */
821
822		/*
823		 * Use a barrier instructions to ensure atomicity.  We AND
824		 * off the "in progress" bit to get the current generation
825		 * count.
826		 */
827		(void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
828
829		/*
830		 * If an update _is_ in progress, the generation count will be
831		 * off by one, if it _was_ in progress, it will be off by two,
832		 * and if we caught it at a good time, it will be equal (and
833		 * our snapshot is threfore stable).
834		 */
835		if (flipflop[i].gen == stable.gen)
836			break;
837
838		/* Switch to the oher element of the flipflop, and try again. */
839		i ^= 1;
840	}
841
842	now = mach_absolute_time();
843
844	if (stable.calend.adjdelta < 0) {
845		uint32_t	t32;
846
847		if (now > stable.calend.adjstart) {
848			t32 = now - stable.calend.adjstart;
849
850			if (t32 > stable.calend.adjoffset)
851				now -= stable.calend.adjoffset;
852			else
853				now = stable.calend.adjstart;
854		}
855	}
856
857	now += stable.calend.offset;
858
859	absolutetime_to_microtime(now, secs, nanosecs);
860	*nanosecs *= NSEC_PER_USEC;
861
862	*secs += stable.calend.epoch;
863}
864
865static void
866clock_track_calend_nowait(void)
867{
868	int i;
869
870	for (i = 0; i < 2; i++) {
871		struct clock_calend tmp = clock_calend;
872
873		/*
874		 * Set the low bit if the generation count; since we use a
875		 * barrier instruction to do this, we are guaranteed that this
876		 * will flag an update in progress to an async caller trying
877		 * to examine the contents.
878		 */
879		(void)hw_atomic_or(&flipflop[i].gen, 1);
880
881		flipflop[i].calend = tmp;
882
883		/*
884		 * Increment the generation count to clear the low bit to
885		 * signal completion.  If a caller compares the generation
886		 * count after taking a copy while in progress, the count
887		 * will be off by two.
888		 */
889		(void)hw_atomic_add(&flipflop[i].gen, 1);
890	}
891}
892#endif /* CONFIG_DTRACE */
893