1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32/*
33 *	File:		i386/rtclock.c
34 *	Purpose:	Routines for handling the machine dependent
35 *			real-time clock. Historically, this clock is
36 *			generated by the Intel 8254 Programmable Interval
37 *			Timer, but local apic timers are now used for
38 *			this purpose with the master time reference being
39 *			the cpu clock counted by the timestamp MSR.
40 */
41
42#include <platforms.h>
43
44#include <mach/mach_types.h>
45
46#include <kern/cpu_data.h>
47#include <kern/cpu_number.h>
48#include <kern/clock.h>
49#include <kern/host_notify.h>
50#include <kern/macro_help.h>
51#include <kern/misc_protos.h>
52#include <kern/spl.h>
53#include <kern/assert.h>
54#include <kern/etimer.h>
55#include <mach/vm_prot.h>
56#include <vm/pmap.h>
57#include <vm/vm_kern.h>		/* for kernel_map */
58#include <architecture/i386/pio.h>
59#include <i386/machine_cpu.h>
60#include <i386/cpuid.h>
61#include <i386/cpu_threads.h>
62#include <i386/mp.h>
63#include <i386/machine_routines.h>
64#include <i386/pal_routines.h>
65#include <i386/proc_reg.h>
66#include <i386/misc_protos.h>
67#include <pexpert/pexpert.h>
68#include <machine/limits.h>
69#include <machine/commpage.h>
70#include <sys/kdebug.h>
71#include <i386/tsc.h>
72#include <i386/rtclock_protos.h>
73
74#define UI_CPUFREQ_ROUNDING_FACTOR	10000000
75
76int		rtclock_config(void);
77
78int		rtclock_init(void);
79
80uint64_t	tsc_rebase_abs_time = 0;
81
82static void	rtc_set_timescale(uint64_t cycles);
83static uint64_t	rtc_export_speed(uint64_t cycles);
84
85void
86rtc_timer_start(void)
87{
88	/*
89	 * Force a complete re-evaluation of timer deadlines.
90	 */
91	etimer_resync_deadlines();
92}
93
94static inline uint32_t
95_absolutetime_to_microtime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *microsecs)
96{
97	uint32_t remain;
98#if defined(__i386__)
99	asm volatile(
100			"divl %3"
101				: "=a" (*secs), "=d" (remain)
102				: "A" (abstime), "r" (NSEC_PER_SEC));
103	asm volatile(
104			"divl %3"
105				: "=a" (*microsecs)
106				: "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
107#elif defined(__x86_64__)
108	*secs = abstime / (uint64_t)NSEC_PER_SEC;
109	remain = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC);
110	*microsecs = remain / NSEC_PER_USEC;
111#else
112#error Unsupported architecture
113#endif
114	return remain;
115}
116
117static inline void
118_absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nanosecs)
119{
120#if defined(__i386__)
121	asm volatile(
122			"divl %3"
123			: "=a" (*secs), "=d" (*nanosecs)
124			: "A" (abstime), "r" (NSEC_PER_SEC));
125#elif defined(__x86_64__)
126	*secs = abstime / (uint64_t)NSEC_PER_SEC;
127	*nanosecs = (clock_usec_t)(abstime % (uint64_t)NSEC_PER_SEC);
128#else
129#error Unsupported architecture
130#endif
131}
132
133/*
134 * Configure the real-time clock device. Return success (1)
135 * or failure (0).
136 */
137
138int
139rtclock_config(void)
140{
141	/* nothing to do */
142	return (1);
143}
144
145
146/*
147 * Nanotime/mach_absolutime_time
148 * -----------------------------
149 * The timestamp counter (TSC) - which counts cpu clock cycles and can be read
150 * efficiently by the kernel and in userspace - is the reference for all timing.
151 * The cpu clock rate is platform-dependent and may stop or be reset when the
152 * processor is napped/slept.  As a result, nanotime is the software abstraction
153 * used to maintain a monotonic clock, adjusted from an outside reference as needed.
154 *
155 * The kernel maintains nanotime information recording:
156 * 	- the ratio of tsc to nanoseconds
157 *	  with this ratio expressed as a 32-bit scale and shift
158 *	  (power of 2 divider);
159 *	- { tsc_base, ns_base } pair of corresponding timestamps.
160 *
161 * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage
162 * for the userspace nanotime routine to read.
163 *
164 * All of the routines which update the nanotime data are non-reentrant.  This must
165 * be guaranteed by the caller.
166 */
167static inline void
168rtc_nanotime_set_commpage(pal_rtc_nanotime_t *rntp)
169{
170	commpage_set_nanotime(rntp->tsc_base, rntp->ns_base, rntp->scale, rntp->shift);
171}
172
173/*
174 * rtc_nanotime_init:
175 *
176 * Intialize the nanotime info from the base time.
177 */
178static inline void
179_rtc_nanotime_init(pal_rtc_nanotime_t *rntp, uint64_t base)
180{
181	uint64_t	tsc = rdtsc64();
182
183	_pal_rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp);
184}
185
186static void
187rtc_nanotime_init(uint64_t base)
188{
189	_rtc_nanotime_init(&pal_rtc_nanotime_info, base);
190	rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
191}
192
193/*
194 * rtc_nanotime_init_commpage:
195 *
196 * Call back from the commpage initialization to
197 * cause the commpage data to be filled in once the
198 * commpages have been created.
199 */
200void
201rtc_nanotime_init_commpage(void)
202{
203	spl_t			s = splclock();
204
205	rtc_nanotime_set_commpage(&pal_rtc_nanotime_info);
206	splx(s);
207}
208
209/*
210 * rtc_nanotime_read:
211 *
212 * Returns the current nanotime value, accessable from any
213 * context.
214 */
215static inline uint64_t
216rtc_nanotime_read(void)
217{
218	return	_rtc_nanotime_read(&pal_rtc_nanotime_info);
219}
220
221/*
222 * rtc_clock_napped:
223 *
224 * Invoked from power management when we exit from a low C-State (>= C4)
225 * and the TSC has stopped counting.  The nanotime data is updated according
226 * to the provided value which represents the new value for nanotime.
227 */
228void
229rtc_clock_napped(uint64_t base, uint64_t tsc_base)
230{
231	pal_rtc_nanotime_t	*rntp = &pal_rtc_nanotime_info;
232	uint64_t	oldnsecs;
233	uint64_t	newnsecs;
234	uint64_t	tsc;
235
236	assert(!ml_get_interrupts_enabled());
237	tsc = rdtsc64();
238	oldnsecs = rntp->ns_base + _rtc_tsc_to_nanoseconds(tsc - rntp->tsc_base, rntp);
239	newnsecs = base + _rtc_tsc_to_nanoseconds(tsc - tsc_base, rntp);
240
241	/*
242	 * Only update the base values if time using the new base values
243	 * is later than the time using the old base values.
244	 */
245	if (oldnsecs < newnsecs) {
246	    _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp);
247	    rtc_nanotime_set_commpage(rntp);
248		trace_set_timebases(tsc_base, base);
249	}
250}
251
252/*
253 * Invoked from power management to correct the SFLM TSC entry drift problem:
254 * a small delta is added to the tsc_base.  This is equivalent to nudgin time
255 * backwards.  We require this to be on the order of a TSC quantum which won't
256 * cause callers of mach_absolute_time() to see time going backwards!
257 */
258void
259rtc_clock_adjust(uint64_t tsc_base_delta)
260{
261    pal_rtc_nanotime_t	*rntp = &pal_rtc_nanotime_info;
262
263    assert(!ml_get_interrupts_enabled());
264    assert(tsc_base_delta < 100ULL);	/* i.e. it's small */
265    _rtc_nanotime_adjust(tsc_base_delta, rntp);
266    rtc_nanotime_set_commpage(rntp);
267}
268
269void
270rtc_clock_stepping(__unused uint32_t new_frequency,
271		   __unused uint32_t old_frequency)
272{
273	panic("rtc_clock_stepping unsupported");
274}
275
276void
277rtc_clock_stepped(__unused uint32_t new_frequency,
278		  __unused uint32_t old_frequency)
279{
280	panic("rtc_clock_stepped unsupported");
281}
282
283/*
284 * rtc_sleep_wakeup:
285 *
286 * Invoked from power management when we have awoken from a sleep (S3)
287 * and the TSC has been reset, or from Deep Idle (S0) sleep when the TSC
288 * has progressed.  The nanotime data is updated based on the passed-in value.
289 *
290 * The caller must guarantee non-reentrancy.
291 */
292void
293rtc_sleep_wakeup(
294	uint64_t		base)
295{
296    	/* Set fixed configuration for lapic timers */
297	rtc_timer->config();
298
299	/*
300	 * Reset nanotime.
301	 * The timestamp counter will have been reset
302	 * but nanotime (uptime) marches onward.
303	 */
304	rtc_nanotime_init(base);
305}
306
307/*
308 * Initialize the real-time clock device.
309 * In addition, various variables used to support the clock are initialized.
310 */
311int
312rtclock_init(void)
313{
314	uint64_t	cycles;
315
316	assert(!ml_get_interrupts_enabled());
317
318	if (cpu_number() == master_cpu) {
319
320		assert(tscFreq);
321		rtc_set_timescale(tscFreq);
322
323		/*
324		 * Adjust and set the exported cpu speed.
325		 */
326		cycles = rtc_export_speed(tscFreq);
327
328		/*
329		 * Set min/max to actual.
330		 * ACPI may update these later if speed-stepping is detected.
331		 */
332		gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
333		gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
334
335		rtc_timer_init();
336		clock_timebase_init();
337		ml_init_lock_timeout();
338		ml_init_delay_spin_threshold(10);
339	}
340
341    	/* Set fixed configuration for lapic timers */
342	rtc_timer->config();
343	rtc_timer_start();
344
345	return (1);
346}
347
348// utility routine
349// Code to calculate how many processor cycles are in a second...
350
351static void
352rtc_set_timescale(uint64_t cycles)
353{
354	pal_rtc_nanotime_t	*rntp = &pal_rtc_nanotime_info;
355	uint32_t    shift = 0;
356
357	/* the "scale" factor will overflow unless cycles>SLOW_TSC_THRESHOLD */
358
359	while ( cycles <= SLOW_TSC_THRESHOLD) {
360		shift++;
361		cycles <<= 1;
362	}
363
364	if ( shift != 0 )
365		printf("Slow TSC, rtc_nanotime.shift == %d\n", shift);
366
367	rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles);
368
369	rntp->shift = shift;
370
371	if (tsc_rebase_abs_time == 0)
372		tsc_rebase_abs_time = mach_absolute_time();
373
374	rtc_nanotime_init(0);
375}
376
377static uint64_t
378rtc_export_speed(uint64_t cyc_per_sec)
379{
380	uint64_t	cycles;
381
382	/* Round: */
383        cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
384			/ UI_CPUFREQ_ROUNDING_FACTOR)
385				* UI_CPUFREQ_ROUNDING_FACTOR;
386
387	/*
388	 * Set current measured speed.
389	 */
390        if (cycles >= 0x100000000ULL) {
391            gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
392        } else {
393            gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
394        }
395        gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
396
397	kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec);
398	return(cycles);
399}
400
401void
402clock_get_system_microtime(
403	clock_sec_t			*secs,
404	clock_usec_t		*microsecs)
405{
406	uint64_t	now = rtc_nanotime_read();
407
408	_absolutetime_to_microtime(now, secs, microsecs);
409}
410
411void
412clock_get_system_nanotime(
413	clock_sec_t			*secs,
414	clock_nsec_t		*nanosecs)
415{
416	uint64_t	now = rtc_nanotime_read();
417
418	_absolutetime_to_nanotime(now, secs, nanosecs);
419}
420
421void
422clock_gettimeofday_set_commpage(
423	uint64_t				abstime,
424	uint64_t				epoch,
425	uint64_t				offset,
426	clock_sec_t				*secs,
427	clock_usec_t			*microsecs)
428{
429	uint64_t	now = abstime + offset;
430	uint32_t	remain;
431
432	remain = _absolutetime_to_microtime(now, secs, microsecs);
433
434	*secs += (clock_sec_t)epoch;
435
436	commpage_set_timestamp(abstime - remain, *secs);
437}
438
439void
440clock_timebase_info(
441	mach_timebase_info_t	info)
442{
443	info->numer = info->denom =  1;
444}
445
446/*
447 * Real-time clock device interrupt.
448 */
449void
450rtclock_intr(
451	x86_saved_state_t	*tregs)
452{
453        uint64_t	rip;
454	boolean_t	user_mode = FALSE;
455
456	assert(get_preemption_level() > 0);
457	assert(!ml_get_interrupts_enabled());
458
459	if (is_saved_state64(tregs) == TRUE) {
460	        x86_saved_state64_t	*regs;
461
462		regs = saved_state64(tregs);
463
464		if (regs->isf.cs & 0x03)
465			user_mode = TRUE;
466		rip = regs->isf.rip;
467	} else {
468	        x86_saved_state32_t	*regs;
469
470		regs = saved_state32(tregs);
471
472		if (regs->cs & 0x03)
473		        user_mode = TRUE;
474		rip = regs->eip;
475	}
476
477	/* call the generic etimer */
478	etimer_intr(user_mode, rip);
479}
480
481
482/*
483 *	Request timer pop from the hardware
484 */
485
486uint64_t
487setPop(
488	uint64_t time)
489{
490	uint64_t	now;
491	uint64_t	pop;
492
493	/* 0 and EndOfAllTime are special-cases for "clear the timer" */
494	if (time == 0 || time == EndOfAllTime ) {
495		time = EndOfAllTime;
496		now = 0;
497		pop = rtc_timer->set(0, 0);
498	} else {
499		now = rtc_nanotime_read();	/* The time in nanoseconds */
500		pop = rtc_timer->set(time, now);
501	}
502
503	/* Record requested and actual deadlines set */
504	x86_lcpu()->rtcDeadline = time;
505	x86_lcpu()->rtcPop	= pop;
506
507	return pop - now;
508}
509
510uint64_t
511mach_absolute_time(void)
512{
513	return rtc_nanotime_read();
514}
515
516void
517clock_interval_to_absolutetime_interval(
518	uint32_t		interval,
519	uint32_t		scale_factor,
520	uint64_t		*result)
521{
522	*result = (uint64_t)interval * scale_factor;
523}
524
525void
526absolutetime_to_microtime(
527	uint64_t			abstime,
528	clock_sec_t			*secs,
529	clock_usec_t		*microsecs)
530{
531	_absolutetime_to_microtime(abstime, secs, microsecs);
532}
533
534void
535absolutetime_to_nanotime(
536	uint64_t			abstime,
537	clock_sec_t			*secs,
538	clock_nsec_t		*nanosecs)
539{
540	_absolutetime_to_nanotime(abstime, secs, nanosecs);
541}
542
543void
544nanotime_to_absolutetime(
545	clock_sec_t			secs,
546	clock_nsec_t		nanosecs,
547	uint64_t			*result)
548{
549	*result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs;
550}
551
552void
553absolutetime_to_nanoseconds(
554	uint64_t		abstime,
555	uint64_t		*result)
556{
557	*result = abstime;
558}
559
560void
561nanoseconds_to_absolutetime(
562	uint64_t		nanoseconds,
563	uint64_t		*result)
564{
565	*result = nanoseconds;
566}
567
568void
569machine_delay_until(
570        uint64_t interval,
571        uint64_t                deadline)
572{
573        (void)interval;
574        while (mach_absolute_time() < deadline) {
575                cpu_pause();
576        }
577}
578