1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 */
33
34#include <mach/mach_types.h>
35
36#include <kern/lock.h>
37#include <kern/spl.h>
38#include <kern/sched_prim.h>
39#include <kern/thread.h>
40#include <kern/clock.h>
41#include <kern/host_notify.h>
42
43#include <IOKit/IOPlatformExpert.h>
44
45#include <machine/commpage.h>
46
47#include <mach/mach_traps.h>
48#include <mach/mach_time.h>
49
50uint32_t	hz_tick_interval = 1;
51
52
53decl_simple_lock_data(,clock_lock)
54
55#define clock_lock()	\
56	simple_lock(&clock_lock)
57
58#define clock_unlock()	\
59	simple_unlock(&clock_lock)
60
61#define clock_lock_init()	\
62	simple_lock_init(&clock_lock, 0)
63
64
65/*
66 *	Time of day (calendar) variables.
67 *
68 *	Algorithm:
69 *
70 *	TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
71 *
72 *	where CONV converts absolute time units into seconds and a fraction.
73 */
74static struct clock_calend {
75	uint64_t	epoch;
76	uint64_t	offset;
77
78	int32_t		adjdelta;	/* Nanosecond time delta for this adjustment period */
79	uint64_t	adjstart;	/* Absolute time value for start of this adjustment period */
80	uint32_t	adjoffset;	/* Absolute time offset for this adjustment period as absolute value */
81} clock_calend;
82
83#if	CONFIG_DTRACE
84
85/*
86 *	Unlocked calendar flipflop; this is used to track a clock_calend such
87 *	that we can safely access a snapshot of a valid  clock_calend structure
88 *	without needing to take any locks to do it.
89 *
90 *	The trick is to use a generation count and set the low bit when it is
91 *	being updated/read; by doing this, we guarantee, through use of the
92 *	hw_atomic functions, that the generation is incremented when the bit
93 *	is cleared atomically (by using a 1 bit add).
94 */
95static struct unlocked_clock_calend {
96	struct clock_calend	calend;		/* copy of calendar */
97	uint32_t		gen;		/* generation count */
98} flipflop[ 2];
99
100static void clock_track_calend_nowait(void);
101
102#endif
103
104/*
105 *	Calendar adjustment variables and values.
106 */
107#define calend_adjperiod	(NSEC_PER_SEC / 100)	/* adjustment period, ns */
108#define calend_adjskew		(40 * NSEC_PER_USEC)	/* "standard" skew, ns / period */
109#define	calend_adjbig		(NSEC_PER_SEC)			/* use 10x skew above adjbig ns */
110
111static int64_t				calend_adjtotal;		/* Nanosecond remaining total adjustment */
112static uint64_t				calend_adjdeadline;		/* Absolute time value for next adjustment period */
113static uint32_t				calend_adjinterval;		/* Absolute time interval of adjustment period */
114
115static timer_call_data_t	calend_adjcall;
116static uint32_t				calend_adjactive;
117
118static uint32_t		calend_set_adjustment(
119						long			*secs,
120						int				*microsecs);
121
122static void			calend_adjust_call(void);
123static uint32_t		calend_adjust(void);
124
125static thread_call_data_t	calend_wakecall;
126
127extern	void	IOKitResetTime(void);
128
129void _clock_delay_until_deadline(uint64_t		interval,
130								 uint64_t		deadline);
131
132static uint64_t		clock_boottime;				/* Seconds boottime epoch */
133
134#define TIME_ADD(rsecs, secs, rfrac, frac, unit)	\
135MACRO_BEGIN											\
136	if (((rfrac) += (frac)) >= (unit)) {			\
137		(rfrac) -= (unit);							\
138		(rsecs) += 1;								\
139	}												\
140	(rsecs) += (secs);								\
141MACRO_END
142
143#define TIME_SUB(rsecs, secs, rfrac, frac, unit)	\
144MACRO_BEGIN											\
145	if ((int)((rfrac) -= (frac)) < 0) {				\
146		(rfrac) += (unit);							\
147		(rsecs) -= 1;								\
148	}												\
149	(rsecs) -= (secs);								\
150MACRO_END
151
152/*
153 *	clock_config:
154 *
155 *	Called once at boot to configure the clock subsystem.
156 */
157void
158clock_config(void)
159{
160	clock_lock_init();
161
162	timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
163	thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
164
165	clock_oldconfig();
166}
167
168/*
169 *	clock_init:
170 *
171 *	Called on a processor each time started.
172 */
173void
174clock_init(void)
175{
176	clock_oldinit();
177}
178
179/*
180 *	clock_timebase_init:
181 *
182 *	Called by machine dependent code
183 *	to initialize areas dependent on the
184 *	timebase value.  May be called multiple
185 *	times during start up.
186 */
187void
188clock_timebase_init(void)
189{
190	uint64_t	abstime;
191
192	nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
193	calend_adjinterval = (uint32_t)abstime;
194
195	nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
196	hz_tick_interval = (uint32_t)abstime;
197
198	sched_timebase_init();
199}
200
201/*
202 *	mach_timebase_info_trap:
203 *
204 *	User trap returns timebase constant.
205 */
206kern_return_t
207mach_timebase_info_trap(
208	struct mach_timebase_info_trap_args *args)
209{
210	mach_vm_address_t 			out_info_addr = args->info;
211	mach_timebase_info_data_t	info;
212
213	clock_timebase_info(&info);
214
215	copyout((void *)&info, out_info_addr, sizeof (info));
216
217	return (KERN_SUCCESS);
218}
219
220/*
221 *	Calendar routines.
222 */
223
224/*
225 *	clock_get_calendar_microtime:
226 *
227 *	Returns the current calendar value,
228 *	microseconds as the fraction.
229 */
230void
231clock_get_calendar_microtime(
232	clock_sec_t			*secs,
233	clock_usec_t		*microsecs)
234{
235	uint64_t		now;
236	spl_t			s;
237
238	s = splclock();
239	clock_lock();
240
241	now = mach_absolute_time();
242
243	if (clock_calend.adjdelta < 0) {
244		uint32_t	t32;
245
246		/*
247		 * Since offset is decremented during a negative adjustment,
248		 * ensure that time increases monotonically without going
249		 * temporarily backwards.
250		 * If the delta has not yet passed, now is set to the start
251		 * of the current adjustment period; otherwise, we're between
252		 * the expiry of the delta and the next call to calend_adjust(),
253		 * and we offset accordingly.
254		 */
255		if (now > clock_calend.adjstart) {
256			t32 = (uint32_t)(now - clock_calend.adjstart);
257
258			if (t32 > clock_calend.adjoffset)
259				now -= clock_calend.adjoffset;
260			else
261				now = clock_calend.adjstart;
262		}
263	}
264
265	now += clock_calend.offset;
266
267	absolutetime_to_microtime(now, secs, microsecs);
268
269	*secs += (clock_sec_t)clock_calend.epoch;
270
271	clock_unlock();
272	splx(s);
273}
274
275/*
276 *	clock_get_calendar_nanotime:
277 *
278 *	Returns the current calendar value,
279 *	nanoseconds as the fraction.
280 *
281 *	Since we do not have an interface to
282 *	set the calendar with resolution greater
283 *	than a microsecond, we honor that here.
284 */
285void
286clock_get_calendar_nanotime(
287	clock_sec_t			*secs,
288	clock_nsec_t		*nanosecs)
289{
290	uint64_t		now;
291	spl_t			s;
292
293	s = splclock();
294	clock_lock();
295
296	now = mach_absolute_time();
297
298	if (clock_calend.adjdelta < 0) {
299		uint32_t	t32;
300
301		if (now > clock_calend.adjstart) {
302			t32 = (uint32_t)(now - clock_calend.adjstart);
303
304			if (t32 > clock_calend.adjoffset)
305				now -= clock_calend.adjoffset;
306			else
307				now = clock_calend.adjstart;
308		}
309	}
310
311	now += clock_calend.offset;
312
313	absolutetime_to_microtime(now, secs, nanosecs);
314
315	*nanosecs *= NSEC_PER_USEC;
316
317	*secs += (clock_sec_t)clock_calend.epoch;
318
319	clock_unlock();
320	splx(s);
321}
322
323/*
324 *	clock_gettimeofday:
325 *
326 *	Kernel interface for commpage implementation of
327 *	gettimeofday() syscall.
328 *
329 *	Returns the current calendar value, and updates the
330 *	commpage info as appropriate.  Because most calls to
331 *	gettimeofday() are handled in user mode by the commpage,
332 *	this routine should be used infrequently.
333 */
334void
335clock_gettimeofday(
336	clock_sec_t		*secs,
337	clock_usec_t	*microsecs)
338{
339	uint64_t		now;
340	spl_t			s;
341
342	s = splclock();
343	clock_lock();
344
345	now = mach_absolute_time();
346
347	if (clock_calend.adjdelta >= 0) {
348		clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
349	}
350	else {
351		uint32_t	t32;
352
353		if (now > clock_calend.adjstart) {
354			t32 = (uint32_t)(now - clock_calend.adjstart);
355
356			if (t32 > clock_calend.adjoffset)
357				now -= clock_calend.adjoffset;
358			else
359				now = clock_calend.adjstart;
360		}
361
362		now += clock_calend.offset;
363
364		absolutetime_to_microtime(now, secs, microsecs);
365
366		*secs += (clock_sec_t)clock_calend.epoch;
367	}
368
369	clock_unlock();
370	splx(s);
371}
372
373/*
374 *	clock_set_calendar_microtime:
375 *
376 *	Sets the current calendar value by
377 *	recalculating the epoch and offset
378 *	from the system clock.
379 *
380 *	Also adjusts the boottime to keep the
381 *	value consistent, writes the new
382 *	calendar value to the platform clock,
383 *	and sends calendar change notifications.
384 */
385void
386clock_set_calendar_microtime(
387	clock_sec_t			secs,
388	clock_usec_t		microsecs)
389{
390	clock_sec_t			sys;
391	clock_usec_t		microsys;
392	clock_sec_t			newsecs;
393	spl_t				s;
394
395	newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;
396
397	s = splclock();
398	clock_lock();
399
400	commpage_disable_timestamp();
401
402	/*
403	 *	Calculate the new calendar epoch based on
404	 *	the new value and the system clock.
405	 */
406	clock_get_system_microtime(&sys, &microsys);
407	TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
408
409	/*
410	 *	Adjust the boottime based on the delta.
411	 */
412	clock_boottime += secs - clock_calend.epoch;
413
414	/*
415	 *	Set the new calendar epoch.
416	 */
417	clock_calend.epoch = secs;
418
419	nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
420
421	/*
422	 *	Cancel any adjustment in progress.
423	 */
424	calend_adjtotal = clock_calend.adjdelta = 0;
425
426	clock_unlock();
427
428	/*
429	 *	Set the new value for the platform clock.
430	 */
431	PESetGMTTimeOfDay(newsecs);
432
433	splx(s);
434
435	/*
436	 *	Send host notifications.
437	 */
438	host_notify_calendar_change();
439
440#if CONFIG_DTRACE
441	clock_track_calend_nowait();
442#endif
443}
444
445/*
446 *	clock_initialize_calendar:
447 *
448 *	Set the calendar and related clocks
449 *	from the platform clock at boot or
450 *	wake event.
451 *
452 *	Also sends host notifications.
453 */
454void
455clock_initialize_calendar(void)
456{
457	clock_sec_t			sys, secs = PEGetGMTTimeOfDay();
458	clock_usec_t 		microsys, microsecs = 0;
459	spl_t				s;
460
461	s = splclock();
462	clock_lock();
463
464	commpage_disable_timestamp();
465
466	if ((long)secs >= (long)clock_boottime) {
467		/*
468		 *	Initialize the boot time based on the platform clock.
469		 */
470		if (clock_boottime == 0)
471			clock_boottime = secs;
472
473		/*
474		 *	Calculate the new calendar epoch based on
475		 *	the platform clock and the system clock.
476		 */
477		clock_get_system_microtime(&sys, &microsys);
478		TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
479
480		/*
481		 *	Set the new calendar epoch.
482		 */
483		clock_calend.epoch = secs;
484
485		nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
486
487		/*
488		 *	 Cancel any adjustment in progress.
489		 */
490		calend_adjtotal = clock_calend.adjdelta = 0;
491	}
492
493	clock_unlock();
494	splx(s);
495
496	/*
497	 *	Send host notifications.
498	 */
499	host_notify_calendar_change();
500
501#if CONFIG_DTRACE
502	clock_track_calend_nowait();
503#endif
504}
505
506/*
507 *	clock_get_boottime_nanotime:
508 *
509 *	Return the boottime, used by sysctl.
510 */
511void
512clock_get_boottime_nanotime(
513	clock_sec_t			*secs,
514	clock_nsec_t		*nanosecs)
515{
516	spl_t	s;
517
518	s = splclock();
519	clock_lock();
520
521	*secs = (clock_sec_t)clock_boottime;
522	*nanosecs = 0;
523
524	clock_unlock();
525	splx(s);
526}
527
528/*
529 *	clock_adjtime:
530 *
531 *	Interface to adjtime() syscall.
532 *
533 *	Calculates adjustment variables and
534 *	initiates adjustment.
535 */
536void
537clock_adjtime(
538	long		*secs,
539	int			*microsecs)
540{
541	uint32_t	interval;
542	spl_t		s;
543
544	s = splclock();
545	clock_lock();
546
547	interval = calend_set_adjustment(secs, microsecs);
548	if (interval != 0) {
549		calend_adjdeadline = mach_absolute_time() + interval;
550		if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
551			calend_adjactive++;
552	}
553	else
554	if (timer_call_cancel(&calend_adjcall))
555		calend_adjactive--;
556
557	clock_unlock();
558	splx(s);
559}
560
561static uint32_t
562calend_set_adjustment(
563	long			*secs,
564	int				*microsecs)
565{
566	uint64_t		now, t64;
567	int64_t			total, ototal;
568	uint32_t		interval = 0;
569
570	/*
571	 * Compute the total adjustment time in nanoseconds.
572	 */
573	total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
574
575	/*
576	 * Disable commpage gettimeofday().
577	 */
578	commpage_disable_timestamp();
579
580	/*
581	 * Get current absolute time.
582	 */
583	now = mach_absolute_time();
584
585	/*
586	 * Save the old adjustment total for later return.
587	 */
588	ototal = calend_adjtotal;
589
590	/*
591	 * Is a new correction specified?
592	 */
593	if (total != 0) {
594		/*
595		 * Set delta to the standard, small, adjustment skew.
596		 */
597		int32_t		delta = calend_adjskew;
598
599		if (total > 0) {
600			/*
601			 * Positive adjustment. If greater than the preset 'big'
602			 * threshold, slew at a faster rate, capping if necessary.
603			 */
604			if (total > calend_adjbig)
605				delta *= 10;
606			if (delta > total)
607				delta = (int32_t)total;
608
609			/*
610			 * Convert the delta back from ns to absolute time and store in adjoffset.
611			 */
612			nanoseconds_to_absolutetime((uint64_t)delta, &t64);
613			clock_calend.adjoffset = (uint32_t)t64;
614		}
615		else {
616			/*
617			 * Negative adjustment; therefore, negate the delta. If
618			 * greater than the preset 'big' threshold, slew at a faster
619			 * rate, capping if necessary.
620			 */
621			if (total < -calend_adjbig)
622				delta *= 10;
623			delta = -delta;
624			if (delta < total)
625				delta = (int32_t)total;
626
627			/*
628			 * Save the current absolute time. Subsequent time operations occuring
629			 * during this negative correction can make use of this value to ensure
630			 * that time increases monotonically.
631			 */
632			clock_calend.adjstart = now;
633
634			/*
635			 * Convert the delta back from ns to absolute time and store in adjoffset.
636			 */
637			nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
638			clock_calend.adjoffset = (uint32_t)t64;
639		}
640
641		/*
642		 * Store the total adjustment time in ns.
643		 */
644		calend_adjtotal = total;
645
646		/*
647		 * Store the delta for this adjustment period in ns.
648		 */
649		clock_calend.adjdelta = delta;
650
651		/*
652		 * Set the interval in absolute time for later return.
653		 */
654		interval = calend_adjinterval;
655	}
656	else {
657		/*
658		 * No change; clear any prior adjustment.
659		 */
660		calend_adjtotal = clock_calend.adjdelta = 0;
661	}
662
663	/*
664	 * If an prior correction was in progress, return the
665	 * remaining uncorrected time from it.
666	 */
667	if (ototal != 0) {
668		*secs = (long)(ototal / NSEC_PER_SEC);
669		*microsecs = (int)((ototal % NSEC_PER_SEC) / NSEC_PER_USEC);
670	}
671	else
672		*secs = *microsecs = 0;
673
674#if CONFIG_DTRACE
675	clock_track_calend_nowait();
676#endif
677
678	return (interval);
679}
680
681static void
682calend_adjust_call(void)
683{
684	uint32_t	interval;
685	spl_t		s;
686
687	s = splclock();
688	clock_lock();
689
690	if (--calend_adjactive == 0) {
691		interval = calend_adjust();
692		if (interval != 0) {
693			clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
694
695			if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
696				calend_adjactive++;
697		}
698	}
699
700	clock_unlock();
701	splx(s);
702}
703
704static uint32_t
705calend_adjust(void)
706{
707	uint64_t		now, t64;
708	int32_t			delta;
709	uint32_t		interval = 0;
710
711	commpage_disable_timestamp();
712
713	now = mach_absolute_time();
714
715	delta = clock_calend.adjdelta;
716
717	if (delta > 0) {
718		clock_calend.offset += clock_calend.adjoffset;
719
720		calend_adjtotal -= delta;
721		if (delta > calend_adjtotal) {
722			clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
723
724			nanoseconds_to_absolutetime((uint64_t)delta, &t64);
725			clock_calend.adjoffset = (uint32_t)t64;
726		}
727	}
728	else
729		if (delta < 0) {
730			clock_calend.offset -= clock_calend.adjoffset;
731
732			calend_adjtotal -= delta;
733			if (delta < calend_adjtotal) {
734				clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
735
736				nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
737				clock_calend.adjoffset = (uint32_t)t64;
738			}
739
740			if (clock_calend.adjdelta != 0)
741				clock_calend.adjstart = now;
742		}
743
744	if (clock_calend.adjdelta != 0)
745		interval = calend_adjinterval;
746
747#if CONFIG_DTRACE
748	clock_track_calend_nowait();
749#endif
750
751	return (interval);
752}
753
754/*
755 *	clock_wakeup_calendar:
756 *
757 *	Interface to power management, used
758 *	to initiate the reset of the calendar
759 *	on wake from sleep event.
760 */
761void
762clock_wakeup_calendar(void)
763{
764	thread_call_enter(&calend_wakecall);
765}
766
767/*
768 *	Wait / delay routines.
769 */
770static void
771mach_wait_until_continue(
772	__unused void	*parameter,
773	wait_result_t	wresult)
774{
775	thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
776	/*NOTREACHED*/
777}
778
779/*
780 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
781 *
782 * Parameters:    args->deadline          Amount of time to wait
783 *
784 * Returns:        0                      Success
785 *                !0                      Not success
786 *
787 */
788kern_return_t
789mach_wait_until_trap(
790	struct mach_wait_until_trap_args	*args)
791{
792	uint64_t		deadline = args->deadline;
793	wait_result_t	wresult;
794
795	wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
796	if (wresult == THREAD_WAITING)
797		wresult = thread_block(mach_wait_until_continue);
798
799	return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
800}
801
802void
803clock_delay_until(
804	uint64_t		deadline)
805{
806	uint64_t		now = mach_absolute_time();
807
808	if (now >= deadline)
809		return;
810
811	_clock_delay_until_deadline(deadline - now, deadline);
812}
813
814/*
815 * Preserve the original precise interval that the client
816 * requested for comparison to the spin threshold.
817 */
818void
819_clock_delay_until_deadline(
820	uint64_t		interval,
821	uint64_t		deadline)
822{
823
824	if (interval == 0)
825		return;
826
827	if (	ml_delay_should_spin(interval)	||
828			get_preemption_level() != 0				||
829			ml_get_interrupts_enabled() == FALSE	) {
830		machine_delay_until(interval, deadline);
831	} else {
832		assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
833
834		thread_block(THREAD_CONTINUE_NULL);
835	}
836}
837
838
839void
840delay_for_interval(
841	uint32_t		interval,
842	uint32_t		scale_factor)
843{
844	uint64_t		abstime;
845
846	clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
847
848	_clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
849}
850
851void
852delay(
853	int		usec)
854{
855	delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
856}
857
858/*
859 *	Miscellaneous routines.
860 */
861void
862clock_interval_to_deadline(
863	uint32_t			interval,
864	uint32_t			scale_factor,
865	uint64_t			*result)
866{
867	uint64_t	abstime;
868
869	clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
870
871	*result = mach_absolute_time() + abstime;
872}
873
874void
875clock_absolutetime_interval_to_deadline(
876	uint64_t			abstime,
877	uint64_t			*result)
878{
879	*result = mach_absolute_time() + abstime;
880}
881
882void
883clock_get_uptime(
884	uint64_t	*result)
885{
886	*result = mach_absolute_time();
887}
888
889void
890clock_deadline_for_periodic_event(
891	uint64_t			interval,
892	uint64_t			abstime,
893	uint64_t			*deadline)
894{
895	assert(interval != 0);
896
897	*deadline += interval;
898
899	if (*deadline <= abstime) {
900		*deadline = abstime + interval;
901		abstime = mach_absolute_time();
902
903		if (*deadline <= abstime)
904			*deadline = abstime + interval;
905	}
906}
907
908#if	CONFIG_DTRACE
909
910/*
911 * clock_get_calendar_nanotime_nowait
912 *
913 * Description:	Non-blocking version of clock_get_calendar_nanotime()
914 *
915 * Notes:	This function operates by separately tracking calendar time
916 *		updates using a two element structure to copy the calendar
917 *		state, which may be asynchronously modified.  It utilizes
918 *		barrier instructions in the tracking process and in the local
919 *		stable snapshot process in order to ensure that a consistent
920 *		snapshot is used to perform the calculation.
921 */
922void
923clock_get_calendar_nanotime_nowait(
924	clock_sec_t			*secs,
925	clock_nsec_t		*nanosecs)
926{
927	int i = 0;
928	uint64_t		now;
929	struct unlocked_clock_calend stable;
930
931	for (;;) {
932		stable = flipflop[i];		/* take snapshot */
933
934		/*
935		 * Use a barrier instructions to ensure atomicity.  We AND
936		 * off the "in progress" bit to get the current generation
937		 * count.
938		 */
939		(void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
940
941		/*
942		 * If an update _is_ in progress, the generation count will be
943		 * off by one, if it _was_ in progress, it will be off by two,
944		 * and if we caught it at a good time, it will be equal (and
945		 * our snapshot is threfore stable).
946		 */
947		if (flipflop[i].gen == stable.gen)
948			break;
949
950		/* Switch to the oher element of the flipflop, and try again. */
951		i ^= 1;
952	}
953
954	now = mach_absolute_time();
955
956	if (stable.calend.adjdelta < 0) {
957		uint32_t	t32;
958
959		if (now > stable.calend.adjstart) {
960			t32 = (uint32_t)(now - stable.calend.adjstart);
961
962			if (t32 > stable.calend.adjoffset)
963				now -= stable.calend.adjoffset;
964			else
965				now = stable.calend.adjstart;
966		}
967	}
968
969	now += stable.calend.offset;
970
971	absolutetime_to_microtime(now, secs, nanosecs);
972	*nanosecs *= NSEC_PER_USEC;
973
974	*secs += (clock_sec_t)stable.calend.epoch;
975}
976
977static void
978clock_track_calend_nowait(void)
979{
980	int i;
981
982	for (i = 0; i < 2; i++) {
983		struct clock_calend tmp = clock_calend;
984
985		/*
986		 * Set the low bit if the generation count; since we use a
987		 * barrier instruction to do this, we are guaranteed that this
988		 * will flag an update in progress to an async caller trying
989		 * to examine the contents.
990		 */
991		(void)hw_atomic_or(&flipflop[i].gen, 1);
992
993		flipflop[i].calend = tmp;
994
995		/*
996		 * Increment the generation count to clear the low bit to
997		 * signal completion.  If a caller compares the generation
998		 * count after taking a copy while in progress, the count
999		 * will be off by two.
1000		 */
1001		(void)hw_atomic_add(&flipflop[i].gen, 1);
1002	}
1003}
1004
1005#endif	/* CONFIG_DTRACE */
1006