1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <platforms.h>
30#include <i386/asm.h>
31#include <i386/asm64.h>
32#include <i386/proc_reg.h>
33#include <i386/postcode.h>
34#include <i386/vmx/vmx_asm.h>
35#include <assym.s>
36
37	.data
38	.align 3
39	.globl EXT(gdtptr64)
40	/* align below right */
41	.word	0
42LEXT(gdtptr64)
43	.word	Times(8,GDTSZ)-1
44	/* XXX really want .quad here */
45	.long	EXT(master_gdt)
46	.long	KERNEL_UBER_BASE_HI32  /* must be in uber-space */
47
48	.align 3
49	.globl EXT(idtptr64)
50	/* align below right */
51	.word	0
52LEXT(idtptr64)
53	.word	Times(16,IDTSZ)-1
54	/* XXX really want .quad here */
55	.long	EXT(master_idt64)
56	.long	KERNEL_UBER_BASE_HI32  /* must be in uber-space */
57
58	.text
59
60Entry(ml_load_desc64)
61
62	ENTER_64BIT_MODE()
63
64	POSTCODE(ML_LOAD_DESC64_ENTRY)
65
66	lgdt	EXT(gdtptr64)		/* load GDT */
67
68	POSTCODE(ML_LOAD_DESC64_GDT)
69
70	lidt	EXT(idtptr64)		/* load IDT */
71
72	POSTCODE(ML_LOAD_DESC64_IDT)
73
74	movw	$(KERNEL_LDT),%ax	/* get LDT segment */
75	lldt	%ax			/* load LDT */
76
77	POSTCODE(ML_LOAD_DESC64_LDT)
78
79	movw	$(KERNEL_TSS),%ax
80	ltr	%ax			/* set up KTSS */
81
82	POSTCODE(ML_LOAD_DESC64_EXIT)
83
84	ENTER_COMPAT_MODE()
85
86	ret
87
88
89Entry(ml_64bit_lldt)
90	/* (int32_t selector) */
91
92	FRAME
93
94	ENTER_64BIT_MODE()
95
96	movl	B_ARG0, %eax
97	lldt	%ax
98
99	ENTER_COMPAT_MODE()
100
101	EMARF
102	ret
103
104Entry(set_64bit_debug_regs)
105	/* x86_debug_state64_t *ds */
106
107	FRAME
108
109	ENTER_64BIT_MODE()
110
111	mov	B_ARG0, %edx
112	mov	DS64_DR0(%edx), %rax
113	mov	%rax, %dr0
114	mov	DS64_DR1(%edx), %rax
115	mov	%rax, %dr1
116	mov	DS64_DR2(%edx), %rax
117	mov	%rax, %dr2
118	mov	DS64_DR3(%edx), %rax
119	mov	%rax, %dr3
120
121	ENTER_COMPAT_MODE()
122
123	EMARF
124	ret
125
126Entry(flush_tlb64)
127
128	FRAME
129
130	ENTER_64BIT_MODE()
131
132	mov	%cr3, %rax
133	mov	%rax, %cr3
134
135	ENTER_COMPAT_MODE()
136
137	EMARF
138	ret
139
140Entry(set64_cr3)
141
142	FRAME
143
144	movl	B_ARG0, %eax
145	movl	B_ARG1, %edx
146
147	ENTER_64BIT_MODE()
148
149	/* %rax = %edx:%eax */
150	shl	$32, %rax
151	shrd	$32, %rdx, %rax
152
153	mov	%rax, %cr3
154
155	ENTER_COMPAT_MODE()
156
157	EMARF
158	ret
159
160Entry(get64_cr3)
161
162	FRAME
163
164	ENTER_64BIT_MODE()
165
166	mov	%cr3, %rax
167	mov	%rax, %rdx
168	shr	$32, %rdx		// %edx:%eax = %cr3
169
170	ENTER_COMPAT_MODE()
171
172	EMARF
173	ret
174
175Entry(cpuid64)
176	ENTER_64BIT_MODE()
177	cpuid
178	ENTER_COMPAT_MODE()
179	ret
180
181
182/* FXSAVE and FXRSTOR operate in a mode dependent fashion, hence these variants.
183 * Must be called with interrupts disabled.
184 */
185
186Entry(fxsave64)
187	movl		S_ARG0,%eax
188	ENTER_64BIT_MODE()
189	fxsave		(%eax)
190	ENTER_COMPAT_MODE()
191	ret
192
193Entry(fxrstor64)
194	movl		S_ARG0,%eax
195	ENTER_64BIT_MODE()
196	fxrstor		(%rax)
197	ENTER_COMPAT_MODE()
198	ret
199
200Entry(xsave64o)
201	ENTER_64BIT_MODE()
202	.short	0xAE0F
203	/* MOD 0x4, ECX, 0x1 */
204	.byte	0x21
205	ENTER_COMPAT_MODE()
206	ret
207
208Entry(xrstor64o)
209	ENTER_64BIT_MODE()
210	.short	0xAE0F
211	/* MOD 0x5, ECX 0x1 */
212	.byte	0x29
213	ENTER_COMPAT_MODE()
214	ret
215
216#if CONFIG_VMX
217
218/*
219 *	__vmxon -- Enter VMX Operation
220 *	int __vmxon(addr64_t v);
221 */
222Entry(__vmxon)
223	FRAME
224
225	ENTER_64BIT_MODE()
226	mov	$(VMX_FAIL_INVALID), %ecx
227	mov	$(VMX_FAIL_VALID), %edx
228	mov	$(VMX_SUCCEED), %eax
229	vmxon	8(%rbp)		/* physical addr passed on stack */
230	cmovcl 	%ecx, %eax	/* CF = 1, ZF = 0 */
231	cmovzl	%edx, %eax	/* CF = 0, ZF = 1 */
232	ENTER_COMPAT_MODE()
233
234	EMARF
235	ret
236
237/*
238 *	__vmxoff -- Leave VMX Operation
239 *	int __vmxoff(void);
240 */
241Entry(__vmxoff)
242	FRAME
243
244	ENTER_64BIT_MODE()
245	mov	$(VMX_FAIL_INVALID), %ecx
246	mov	$(VMX_FAIL_VALID), %edx
247	mov	$(VMX_SUCCEED), %eax
248	vmxoff
249	cmovcl 	%ecx, %eax	/* CF = 1, ZF = 0 */
250	cmovzl	%edx, %eax	/* CF = 0, ZF = 1 */
251	ENTER_COMPAT_MODE()
252
253	EMARF
254	ret
255
256#endif /* CONFIG_VMX */
257