1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <i386/asm.h>
30#include <i386/rtclock_asm.h>
31#include <i386/proc_reg.h>
32#include <i386/eflags.h>
33
34#include <i386/postcode.h>
35#include <i386/apic.h>
36#include <i386/vmx/vmx_asm.h>
37#include <assym.s>
38
39/*
40**      ml_get_timebase()
41**
42**      Returns TSC in RAX
43**
44*/
45ENTRY(ml_get_timebase)
46
47	lfence
48	rdtsc
49	lfence
50        shlq	$32,%rdx
51        orq	%rdx,%rax
52
53	ret
54
55/*
56 *  	Convert between various timer units
57 *
58 *	This code converts 64-bit time units to other units.
59 *	For example, the TSC is converted to HPET units.
60 *
61 *	Time is a 64-bit integer that is some number of ticks.
62 *	Conversion is 64-bit fixed point number which is composed
63 *	of a 32 bit integer and a 32 bit fraction.
64 *
65 *	The time ticks are multiplied by the conversion factor.  The
66 *	calculations are done as a 128-bit value but both the high
67 *	and low words are dropped.  The high word is overflow and the
68 *	low word is the fraction part of the result.
69 *
70 *	We return a 64-bit value.
71 *
72 *	Note that we can use this function to multiply 2 conversion factors.
73 *	We do this in order to calculate the multiplier used to convert
74 *	directly between any two units.
75 *
76 *	uint64_t tmrCvt(uint64_t time,		// %rdi
77 *			uint64_t conversion)	// %rsi
78 *
79 */
80ENTRY(tmrCvt)
81	movq	%rdi,%rax
82	mulq	%rsi				/* result is %rdx:%rax */
83	shrdq   $32,%rdx,%rax			/* %rdx:%rax >>= 32 */
84	ret
85
86 /*
87 * void _rtc_nanotime_adjust(
88 *		uint64_t        tsc_base_delta,	// %rdi
89 *		rtc_nanotime_t  *dst);		// %rsi
90 */
91ENTRY(_rtc_nanotime_adjust)
92	movl	RNT_GENERATION(%rsi),%eax	/* get current generation */
93	movl	$0,RNT_GENERATION(%rsi)		/* flag data as being updated */
94	addq	%rdi,RNT_TSC_BASE(%rsi)
95
96	incl	%eax				/* next generation */
97	jnz	1f
98	incl	%eax				/* skip 0, which is a flag */
991:	movl	%eax,RNT_GENERATION(%rsi)	/* update generation */
100
101	ret
102
103/*
104 * uint64_t _rtc_nanotime_read(rtc_nanotime_t *rntp);
105 *
106 * This is the same as the commpage nanotime routine, except that it uses the
107 * kernel internal "rtc_nanotime_info" data instead of the commpage data.
108 * These two copies of data are kept in sync by rtc_clock_napped().
109 *
110 * Warning!  There are several copies of this code in the trampolines found in
111 * osfmk/x86_64/idt64.s, coming from the various TIMER macros in rtclock_asm.h.
112 * They're all kept in sync by using the RTC_NANOTIME_READ() macro.
113 *
114 * The algorithm we use is:
115 *
116 *	ns = ((((rdtsc - rnt_tsc_base)<<rnt_shift)*rnt_tsc_scale) / 2**32) + rnt_ns_base;
117 *
118 * rnt_shift, a constant computed during initialization, is the smallest value for which:
119 *
120 *	(tscFreq << rnt_shift) > SLOW_TSC_THRESHOLD
121 *
122 * Where SLOW_TSC_THRESHOLD is about 10e9.  Since most processor's tscFreqs are greater
123 * than 1GHz, rnt_shift is usually 0.  rnt_tsc_scale is also a 32-bit constant:
124 *
125 *	rnt_tsc_scale = (10e9 * 2**32) / (tscFreq << rnt_shift);
126 *
127 * On 64-bit processors this algorithm could be simplified by doing a 64x64 bit
128 * multiply of rdtsc by tscFCvtt2n:
129 *
130 *	ns = (((rdtsc - rnt_tsc_base) * tscFCvtt2n) / 2**32) + rnt_ns_base;
131 *
132 * We don't do so in order to use the same algorithm in 32- and 64-bit mode.
133 * When U32 goes away, we should reconsider.
134 *
135 * Since this routine is not synchronized and can be called in any context,
136 * we use a generation count to guard against seeing partially updated data.
137 * In addition, the _rtc_nanotime_store() routine zeroes the generation before
138 * updating the data, and stores the nonzero generation only after all fields
139 * have been stored.  Because IA32 guarantees that stores by one processor
140 * must be seen in order by another, we can avoid using a lock.  We spin while
141 * the generation is zero.
142 *
143 * unint64_t _rtc_nanotime_read(
144 *			rtc_nanotime_t *rntp);		// %rdi
145 *
146 */
147ENTRY(_rtc_nanotime_read)
148
149	PAL_RTC_NANOTIME_READ_FAST()
150
151	ret
152
153/*
154 * extern uint64_t _rtc_tsc_to_nanoseconds(
155 *          uint64_t    value,              // %rdi
156 *          pal_rtc_nanotime_t	*rntp);     // %rsi
157 *
158 * Converts TSC units to nanoseconds, using an abbreviated form of the above
159 * algorithm.  Note that while we could have simply used tmrCvt(value,tscFCvtt2n),
160 * which would avoid the need for this asm, doing so is a bit more risky since
161 * we'd be using a different algorithm with possibly different rounding etc.
162 */
163
164ENTRY(_rtc_tsc_to_nanoseconds)
165	movq    %rdi,%rax			/* copy value (in TSC units) to convert */
166	movl    RNT_SHIFT(%rsi),%ecx
167	movl    RNT_SCALE(%rsi),%edx
168	shlq    %cl,%rax			/* tscUnits << shift */
169	mulq    %rdx				/* (tscUnits << shift) * scale */
170	shrdq   $32,%rdx,%rax			/* %rdx:%rax >>= 32 */
171	ret
172
173
174
175Entry(call_continuation)
176	movq	%rdi,%rcx			/* get continuation */
177	movq	%rsi,%rdi			/* continuation param */
178	movq	%rdx,%rsi			/* wait result */
179	movq	%gs:CPU_KERNEL_STACK,%rsp	/* set the stack */
180	xorq	%rbp,%rbp			/* zero frame pointer */
181	call	*%rcx				/* call continuation */
182	movq	%gs:CPU_ACTIVE_THREAD,%rdi
183	call	EXT(thread_terminate)
184
185Entry(x86_init_wrapper)
186	xor	%rbp, %rbp
187	movq	%rsi, %rsp
188	callq	*%rdi
189
190#if CONFIG_VMX
191
192/*
193 *	__vmxon -- Enter VMX Operation
194 *	int __vmxon(addr64_t v);
195 */
196Entry(__vmxon)
197	FRAME
198	push	%rdi
199
200	mov	$(VMX_FAIL_INVALID), %ecx
201	mov	$(VMX_FAIL_VALID), %edx
202	mov	$(VMX_SUCCEED), %eax
203	vmxon	(%rsp)
204	cmovcl 	%ecx, %eax	/* CF = 1, ZF = 0 */
205	cmovzl	%edx, %eax	/* CF = 0, ZF = 1 */
206
207	pop	%rdi
208	EMARF
209	ret
210
211/*
212 *	__vmxoff -- Leave VMX Operation
213 *	int __vmxoff(void);
214 */
215Entry(__vmxoff)
216	FRAME
217
218	mov	$(VMX_FAIL_INVALID), %ecx
219	mov	$(VMX_FAIL_VALID), %edx
220	mov	$(VMX_SUCCEED), %eax
221	vmxoff
222	cmovcl 	%ecx, %eax	/* CF = 1, ZF = 0 */
223	cmovzl	%edx, %eax	/* CF = 0, ZF = 1 */
224
225	EMARF
226	ret
227
228#endif /* CONFIG_VMX */
229
230/*
231 *	mfence -- Memory Barrier
232 *	Use out-of-line assembly to get
233 *	standard x86-64 ABI guarantees
234 *	about what the caller's codegen
235 *	has in registers vs. memory
236 */
237Entry(do_mfence)
238	mfence
239	ret
240