1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <platforms.h>
30#include <mach_kdb.h>
31
32#include <i386/asm.h>
33#include <i386/asm64.h>
34#include <i386/proc_reg.h>
35#include <i386/postcode.h>
36#include <assym.s>
37
38	.data
39	.align 3
40	.globl EXT(gdtptr64)
41	/* align below right */
42	.word	0
43LEXT(gdtptr64)
44	.word	Times(8,GDTSZ)-1
45	/* XXX really want .quad here */
46	.long	EXT(master_gdt)
47	.long	KERNEL_UBER_BASE_HI32  /* must be in uber-space */
48
49	.align 3
50	.globl EXT(idtptr64)
51	/* align below right */
52	.word	0
53LEXT(idtptr64)
54	.word	Times(16,IDTSZ)-1
55	/* XXX really want .quad here */
56	.long	EXT(master_idt64)
57	.long	KERNEL_UBER_BASE_HI32  /* must be in uber-space */
58
59	.text
60
61Entry(ml_load_desc64)
62
63	ENTER_64BIT_MODE()
64
65	POSTCODE(ML_LOAD_DESC64_ENTRY)
66
67	lgdt	EXT(gdtptr64)		/* load GDT */
68
69	POSTCODE(ML_LOAD_DESC64_GDT)
70
71	lidt	EXT(idtptr64)		/* load IDT */
72
73	POSTCODE(ML_LOAD_DESC64_IDT)
74
75	movw	$(KERNEL_LDT),%ax	/* get LDT segment */
76	lldt	%ax			/* load LDT */
77
78	POSTCODE(ML_LOAD_DESC64_LDT)
79
80	movw	$(KERNEL_TSS),%ax
81	ltr	%ax			/* set up KTSS */
82
83	POSTCODE(ML_LOAD_DESC64_EXIT)
84
85	ENTER_COMPAT_MODE()
86
87	ret
88
89
90Entry(ml_64bit_wrmsr64)
91	/* (uint32_t msr, uint64_t value) */
92	/* (uint32_t msr, uint32_t lo, uint32_t hi) */
93
94	FRAME
95
96	ENTER_64BIT_MODE()
97
98	movl	B_ARG0, %ecx
99	movl	B_ARG1, %eax
100	movl	B_ARG2, %edx
101	wrmsr
102
103	ENTER_COMPAT_MODE()
104
105	EMARF
106	ret
107
108
109Entry(ml_64bit_lldt)
110	/* (int32_t selector) */
111
112	FRAME
113
114	ENTER_64BIT_MODE()
115
116	movl	B_ARG0, %eax
117	lldt	%ax
118
119	ENTER_COMPAT_MODE()
120
121	EMARF
122	ret
123
124Entry(set_64bit_debug_regs)
125	/* x86_debug_state64_t *ds */
126
127	FRAME
128
129	ENTER_64BIT_MODE()
130
131	mov	B_ARG0, %edx
132	mov	DS64_DR0(%edx), %rax
133	mov	%rax, %dr0
134	mov	DS64_DR1(%edx), %rax
135	mov	%rax, %dr1
136	mov	DS64_DR2(%edx), %rax
137	mov	%rax, %dr2
138	mov	DS64_DR3(%edx), %rax
139	mov	%rax, %dr3
140
141	ENTER_COMPAT_MODE()
142
143	EMARF
144	ret
145
146Entry(flush_tlb64)
147
148	FRAME
149
150	ENTER_64BIT_MODE()
151
152	mov	%cr3, %rax
153	mov	%rax, %cr3
154
155	ENTER_COMPAT_MODE()
156
157	EMARF
158	ret
159
160Entry(set64_cr3)
161
162	FRAME
163
164	movl	B_ARG0, %eax
165	movl	B_ARG1, %edx
166
167	ENTER_64BIT_MODE()
168
169	/* %rax = %edx:%eax */
170	shl	$32, %rax
171	shrd	$32, %rdx, %rax
172
173	mov	%rax, %cr3
174
175	ENTER_COMPAT_MODE()
176
177	EMARF
178	ret
179
180Entry(get64_cr3)
181
182	FRAME
183
184	ENTER_64BIT_MODE()
185
186	mov	%cr3, %rax
187	mov	%rax, %rdx
188	shr	$32, %rdx		// %edx:%eax = %cr3
189
190	ENTER_COMPAT_MODE()
191
192	EMARF
193	ret
194
195/* FXSAVE and FXRSTOR operate in a mode dependent fashion, hence these variants.
196 * Must be called with interrupts disabled.
197 * We clear pending x87 exceptions here; this is technically incorrect, since we
198 * should propagate those to the user, but the compatibility mode kernel is
199 * currently not prepared to handle exceptions originating in 64-bit kernel mode.
200 * However, it may be possible to work around this should it prove necessary.
201 */
202
203Entry(fxsave64)
204	movl		S_ARG0,%eax
205	ENTER_64BIT_MODE()
206	fnclex
207	fxsave		0(%eax)
208	ENTER_COMPAT_MODE()
209	ret
210
211Entry(fxrstor64)
212	movl		S_ARG0,%eax
213	ENTER_64BIT_MODE()
214	fnclex
215	fxrstor		0(%rax)
216	ENTER_COMPAT_MODE()
217	ret
218