1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach_rt.h>
58#include <mach_kdp.h>
59#include <mach_assert.h>
60
61#include <sys/errno.h>
62#include <i386/asm.h>
63#include <i386/cpuid.h>
64#include <i386/eflags.h>
65#include <i386/proc_reg.h>
66#include <i386/trap.h>
67#include <assym.s>
68#include <mach/exception_types.h>
69#include <config_dtrace.h>
70
71#define _ARCH_I386_ASM_HELP_H_          /* Prevent inclusion of user header */
72#include <mach/i386/syscall_sw.h>
73
74#include <i386/mp.h>
75
76/*
77 * Fault recovery.
78 */
79
80#ifdef	__MACHO__
81#define	RECOVERY_SECTION	.section	__VECTORS, __recover
82#else
83#define	RECOVERY_SECTION	.text
84#define	RECOVERY_SECTION	.text
85#endif
86
87#define	RECOVER_TABLE_START	\
88	.align 3		; \
89	.globl	EXT(recover_table) ;\
90LEXT(recover_table)		;\
91	.text
92
93#define	RECOVER(addr)		\
94	.align	3;		\
95	.quad	9f		;\
96	.quad	addr		;\
97	.text			;\
989:
99
100#define	RECOVER_TABLE_END		\
101	.align	3			;\
102	.globl	EXT(recover_table_end)	;\
103LEXT(recover_table_end)			;\
104	.text
105
106/*
107 * Allocate recovery and table.
108 */
109	RECOVERY_SECTION
110	RECOVER_TABLE_START
111
112/*
113 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
114 */
115ENTRY(rdmsr_carefully)
116	movl	%edi, %ecx
117	movq	%rdx, %rdi
118	RECOVERY_SECTION
119	RECOVER(rdmsr_fail)
120	rdmsr
121	movl	%eax, (%rsi)
122	movl	%edx, (%rdi)
123	xorl	%eax, %eax
124	ret
125
126rdmsr_fail:
127	movq	$1, %rax
128	ret
129/*
130 * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
131 */
132
133ENTRY(rdmsr64_carefully)
134	movl	%edi, %ecx
135	RECOVERY_SECTION
136	RECOVER(rdmsr64_carefully_fail)
137	rdmsr
138	movl	%eax, (%rsi)
139	movl	%edx, 4(%rsi)
140	xorl	%eax, %eax
141	ret
142rdmsr64_carefully_fail:
143	movl	$1, %eax
144	ret
145/*
146 * int wrmsr64_carefully(uint32_t msr, uint64_t val);
147 */
148
149ENTRY(wrmsr_carefully)
150	movl	%edi, %ecx
151	movl	%esi, %eax
152	shr	$32, %rsi
153	movl	%esi, %edx
154	RECOVERY_SECTION
155	RECOVER(wrmsr_fail)
156	wrmsr
157	xorl	%eax, %eax
158	ret
159wrmsr_fail:
160	movl	$1, %eax
161	ret
162
163.globl	EXT(thread_exception_return)
164.globl	EXT(thread_bootstrap_return)
165LEXT(thread_bootstrap_return)
166#if CONFIG_DTRACE
167	call EXT(dtrace_thread_bootstrap)
168#endif
169
170LEXT(thread_exception_return)
171	cli
172	xorl	%ecx, %ecx		/* don't check if we're in the PFZ */
173	jmp	EXT(return_from_trap)
174
175/*
176 * Copyin/out from user/kernel address space.
177 * rdi:	source address
178 * rsi:	destination address
179 * rdx:	byte count
180 */
181Entry(_bcopy)
182// TODO not pop regs; movq; think about 32 bit or 64 bit byte count
183	xchgq	%rdi, %rsi		/* source %rsi, dest %rdi */
184
185	cld				/* count up */
186	movl	%edx,%ecx		/* move by longwords first */
187	shrl	$3,%ecx
188	RECOVERY_SECTION
189	RECOVER(_bcopy_fail)
190	rep
191	movsq				/* move longwords */
192
193	movl	%edx,%ecx		/* now move remaining bytes */
194	andl	$7,%ecx
195	RECOVERY_SECTION
196	RECOVER(_bcopy_fail)
197	rep
198	movsb
199
200	xorl	%eax,%eax		/* return 0 for success */
201	ret				/* and return */
202
203_bcopy_fail:
204	movl	$(EFAULT),%eax		/* return error for failure */
205	ret
206
207Entry(pmap_safe_read)
208	RECOVERY_SECTION
209	RECOVER(_pmap_safe_read_fail)
210	movq	(%rdi), %rcx
211	mov	%rcx, (%rsi)
212	mov	$1, %eax
213	ret
214_pmap_safe_read_fail:
215	xor	%eax, %eax
216	ret
217
218/*
219 * 2-byte copy used by ml_copy_phys().
220 * rdi:	source address
221 * rsi:	destination address
222 */
223Entry(_bcopy2)
224	RECOVERY_SECTION
225	RECOVER(_bcopy_fail)
226	movw	(%rdi), %cx
227	RECOVERY_SECTION
228	RECOVER(_bcopy_fail)
229	movw	%cx, (%rsi)
230
231	xorl	%eax,%eax		/* return 0 for success */
232	ret				/* and return */
233
234/*
235 * 4-byte copy used by ml_copy_phys().
236 * rdi:	source address
237 * rsi:	destination address
238 */
239Entry(_bcopy4)
240	RECOVERY_SECTION
241	RECOVER(_bcopy_fail)
242	movl	(%rdi), %ecx
243	RECOVERY_SECTION
244	RECOVER(_bcopy_fail)
245	mov	%ecx, (%rsi)
246
247	xorl	%eax,%eax		/* return 0 for success */
248	ret				/* and return */
249
250/*
251 * 8-byte copy used by ml_copy_phys().
252 * rdi:	source address
253 * rsi:	destination address
254 */
255Entry(_bcopy8)
256	RECOVERY_SECTION
257	RECOVER(_bcopy_fail)
258	movq	(%rdi), %rcx
259	RECOVERY_SECTION
260	RECOVER(_bcopy_fail)
261	mov	%rcx, (%rsi)
262
263	xorl	%eax,%eax		/* return 0 for success */
264	ret				/* and return */
265
266
267
268/*
269 * Copyin string from user/kern address space.
270 * rdi:	source address
271 * rsi:	destination address
272 * rdx:	max byte count
273 * rcx:	actual byte count (OUT)
274 */
275Entry(_bcopystr)
276	pushq	%rdi
277	xchgq	%rdi, %rsi		/* source %rsi, dest %rdi */
278
279	xorl	%eax,%eax		/* set to 0 here so that high 24 bits */
280					/* are 0 for the cmpl against 0 */
2812:
282	RECOVERY_SECTION
283	RECOVER(_bcopystr_fail)		/* copy bytes... */
284	movb	(%rsi),%al
285	incq	%rsi
286	testq	%rdi,%rdi		/* if kernel address is ... */
287	jz	3f			/* not NULL */
288	movb	%al,(%rdi)		/* copy the byte */
289	incq	%rdi
2903:
291	testl	%eax,%eax		/* did we just stuff the 0-byte? */
292	jz	4f			/* yes, return 0 already in %eax */
293	decq	%rdx			/* decrement #bytes left in buffer */
294	jnz	2b			/* buffer not full, copy another byte */
295	movl	$(ENAMETOOLONG),%eax	/* buffer full, no \0: ENAMETOOLONG */
2964:
297	cmpq	$0,%rcx			/* get OUT len ptr */
298	jz	_bcopystr_ret		/* if null, just return */
299	subq	(%rsp),%rsi
300	movq	%rsi,(%rcx)		/* else set OUT arg to xfer len */
301	popq	%rdi			/* restore registers */
302_bcopystr_ret:
303	ret				/* and return */
304
305_bcopystr_fail:
306	popq	%rdi			/* restore registers */
307	movl	$(EFAULT),%eax		/* return error for failure */
308	ret
309
310/*
311 * Done with recovery table.
312 */
313	RECOVERY_SECTION
314	RECOVER_TABLE_END
315
316