1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach_rt.h>
58#include <platforms.h>
59#include <mach_kdp.h>
60#include <mach_assert.h>
61
62#include <sys/errno.h>
63#include <i386/asm.h>
64#include <i386/cpuid.h>
65#include <i386/eflags.h>
66#include <i386/proc_reg.h>
67#include <i386/trap.h>
68#include <assym.s>
69#include <mach/exception_types.h>
70#include <config_dtrace.h>
71
72#define _ARCH_I386_ASM_HELP_H_          /* Prevent inclusion of user header */
73#include <mach/i386/syscall_sw.h>
74
75#include <i386/mp.h>
76
77/*
78 * Fault recovery.
79 */
80
81#ifdef	__MACHO__
82#define	RECOVERY_SECTION	.section	__VECTORS, __recover
83#else
84#define	RECOVERY_SECTION	.text
85#define	RECOVERY_SECTION	.text
86#endif
87
88#define	RECOVER_TABLE_START	\
89	.align 3		; \
90	.globl	EXT(recover_table) ;\
91LEXT(recover_table)		;\
92	.text
93
94#define	RECOVER(addr)		\
95	.align	3;		\
96	.quad	9f		;\
97	.quad	addr		;\
98	.text			;\
999:
100
101#define	RECOVER_TABLE_END		\
102	.align	3			;\
103	.globl	EXT(recover_table_end)	;\
104LEXT(recover_table_end)			;\
105	.text
106
107/*
108 * Allocate recovery and table.
109 */
110	RECOVERY_SECTION
111	RECOVER_TABLE_START
112
113/*
114 * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi)
115 */
116ENTRY(rdmsr_carefully)
117	movl	%edi, %ecx
118	movq	%rdx, %rdi
119	RECOVERY_SECTION
120	RECOVER(rdmsr_fail)
121	rdmsr
122	movl	%eax, (%rsi)
123	movl	%edx, (%rdi)
124	xorl	%eax, %eax
125	ret
126
127rdmsr_fail:
128	movq	$1, %rax
129	ret
130/*
131 * int rdmsr64_carefully(uint32_t msr, uint64_t *val);
132 */
133
134ENTRY(rdmsr64_carefully)
135	movl	%edi, %ecx
136	RECOVERY_SECTION
137	RECOVER(rdmsr64_carefully_fail)
138	rdmsr
139	movl	%eax, (%rsi)
140	movl	%edx, 4(%rsi)
141	xorl	%eax, %eax
142	ret
143rdmsr64_carefully_fail:
144	movl	$1, %eax
145	ret
146/*
147 * int wrmsr64_carefully(uint32_t msr, uint64_t val);
148 */
149
150ENTRY(wrmsr_carefully)
151	movl	%edi, %ecx
152	movl	%esi, %eax
153	shr	$32, %rsi
154	movl	%esi, %edx
155	RECOVERY_SECTION
156	RECOVER(wrmsr_fail)
157	wrmsr
158	xorl	%eax, %eax
159	ret
160wrmsr_fail:
161	movl	$1, %eax
162	ret
163
164.globl	EXT(thread_exception_return)
165.globl	EXT(thread_bootstrap_return)
166LEXT(thread_bootstrap_return)
167#if CONFIG_DTRACE
168	call EXT(dtrace_thread_bootstrap)
169#endif
170
171LEXT(thread_exception_return)
172	cli
173	xorl	%ecx, %ecx		/* don't check if we're in the PFZ */
174	jmp	EXT(return_from_trap)
175
176/*
177 * Copyin/out from user/kernel address space.
178 * rdi:	source address
179 * rsi:	destination address
180 * rdx:	byte count
181 */
182Entry(_bcopy)
183// TODO not pop regs; movq; think about 32 bit or 64 bit byte count
184	xchgq	%rdi, %rsi		/* source %rsi, dest %rdi */
185
186	cld				/* count up */
187	movl	%edx,%ecx		/* move by longwords first */
188	shrl	$3,%ecx
189	RECOVERY_SECTION
190	RECOVER(_bcopy_fail)
191	rep
192	movsq				/* move longwords */
193
194	movl	%edx,%ecx		/* now move remaining bytes */
195	andl	$7,%ecx
196	RECOVERY_SECTION
197	RECOVER(_bcopy_fail)
198	rep
199	movsb
200
201	xorl	%eax,%eax		/* return 0 for success */
202	ret				/* and return */
203
204_bcopy_fail:
205	movl	$(EFAULT),%eax		/* return error for failure */
206	ret
207
208Entry(pmap_safe_read)
209	RECOVERY_SECTION
210	RECOVER(_pmap_safe_read_fail)
211	movq	(%rdi), %rcx
212	mov	%rcx, (%rsi)
213	mov	$1, %eax
214	ret
215_pmap_safe_read_fail:
216	xor	%eax, %eax
217	ret
218
219/*
220 * 2-byte copy used by ml_copy_phys().
221 * rdi:	source address
222 * rsi:	destination address
223 */
224Entry(_bcopy2)
225	RECOVERY_SECTION
226	RECOVER(_bcopy_fail)
227	movw	(%rdi), %cx
228	RECOVERY_SECTION
229	RECOVER(_bcopy_fail)
230	movw	%cx, (%rsi)
231
232	xorl	%eax,%eax		/* return 0 for success */
233	ret				/* and return */
234
235/*
236 * 4-byte copy used by ml_copy_phys().
237 * rdi:	source address
238 * rsi:	destination address
239 */
240Entry(_bcopy4)
241	RECOVERY_SECTION
242	RECOVER(_bcopy_fail)
243	movl	(%rdi), %ecx
244	RECOVERY_SECTION
245	RECOVER(_bcopy_fail)
246	mov	%ecx, (%rsi)
247
248	xorl	%eax,%eax		/* return 0 for success */
249	ret				/* and return */
250
251/*
252 * 8-byte copy used by ml_copy_phys().
253 * rdi:	source address
254 * rsi:	destination address
255 */
256Entry(_bcopy8)
257	RECOVERY_SECTION
258	RECOVER(_bcopy_fail)
259	movq	(%rdi), %rcx
260	RECOVERY_SECTION
261	RECOVER(_bcopy_fail)
262	mov	%rcx, (%rsi)
263
264	xorl	%eax,%eax		/* return 0 for success */
265	ret				/* and return */
266
267
268
269/*
270 * Copyin string from user/kern address space.
271 * rdi:	source address
272 * rsi:	destination address
273 * rdx:	max byte count
274 * rcx:	actual byte count (OUT)
275 */
276Entry(_bcopystr)
277	pushq	%rdi
278	xchgq	%rdi, %rsi		/* source %rsi, dest %rdi */
279
280	xorl	%eax,%eax		/* set to 0 here so that high 24 bits */
281					/* are 0 for the cmpl against 0 */
2822:
283	RECOVERY_SECTION
284	RECOVER(_bcopystr_fail)		/* copy bytes... */
285	movb	(%rsi),%al
286	incq	%rsi
287	testq	%rdi,%rdi		/* if kernel address is ... */
288	jz	3f			/* not NULL */
289	movb	%al,(%rdi)		/* copy the byte */
290	incq	%rdi
2913:
292	testl	%eax,%eax		/* did we just stuff the 0-byte? */
293	jz	4f			/* yes, return 0 already in %eax */
294	decq	%rdx			/* decrement #bytes left in buffer */
295	jnz	2b			/* buffer not full, copy another byte */
296	movl	$(ENAMETOOLONG),%eax	/* buffer full, no \0: ENAMETOOLONG */
2974:
298	cmpq	$0,%rcx			/* get OUT len ptr */
299	jz	_bcopystr_ret		/* if null, just return */
300	subq	(%rsp),%rsi
301	movq	%rsi,(%rcx)		/* else set OUT arg to xfer len */
302	popq	%rdi			/* restore registers */
303_bcopystr_ret:
304	ret				/* and return */
305
306_bcopystr_fail:
307	popq	%rdi			/* restore registers */
308	movl	$(EFAULT),%eax		/* return error for failure */
309	ret
310
311/*
312 * Done with recovery table.
313 */
314	RECOVERY_SECTION
315	RECOVER_TABLE_END
316
317