1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <i386/asm.h>
29#include <i386/asm64.h>
30#include <assym.s>
31#include <mach_kdb.h>
32#include <i386/eflags.h>
33#include <i386/trap.h>
34#define _ARCH_I386_ASM_HELP_H_		/* Prevent inclusion of user header */
35#include <mach/i386/syscall_sw.h>
36#include <i386/postcode.h>
37#include <i386/proc_reg.h>
38
39/*
40 * Locore handlers.
41 */
42#define	LO_ALLINTRS		EXT(lo_allintrs)
43#define	LO_ALLTRAPS		EXT(lo_alltraps)
44#define	LO_SYSCALL		EXT(lo_syscall)
45#define	LO_UNIX_SCALL		EXT(lo_unix_scall)
46#define	LO_MACH_SCALL		EXT(lo_mach_scall)
47#define	LO_MDEP_SCALL		EXT(lo_mdep_scall)
48#define	LO_DIAG_SCALL		EXT(lo_diag_scall)
49#define	LO_DOUBLE_FAULT		EXT(lo_df64)
50#define	LO_MACHINE_CHECK	EXT(lo_mc64)
51
52/*
53 * Interrupt descriptor table and code vectors for it.
54 *
55 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
56 * reformatted ("fixed") before use.
57 * All vector are rebased in uber-space.
58 * Special vectors (e.g. double-fault) use a non-0 IST.
59 */
60#define	IDT64_BASE_ENTRY(vec,seg,ist,type)		 \
61	.data						;\
62	.long	vec					;\
63	.long	KERNEL_UBER_BASE_HI32			;\
64	.word	seg					;\
65	.byte	ist*16					;\
66	.byte	type					;\
67	.long	0					;\
68	.text
69
70#define	IDT64_ENTRY(vec,ist,type)			\
71	IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
72#define	IDT64_ENTRY_LOCAL(vec,ist,type)			\
73	IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
74
75/*
76 * Push trap number and address of compatibility mode handler,
77 * then branch to common trampoline. Error already pushed.
78 */
79#define	EXCEP64_ERR(n,name)				 \
80	IDT64_ENTRY(name,0,K_INTR_GATE)			;\
81Entry(name)						;\
82	push	$(n)					;\
83	movl	$(LO_ALLTRAPS), 4(%rsp)			;\
84	jmp	L_enter_lohandler
85
86
87/*
88 * Push error(0), trap number and address of compatibility mode handler,
89 * then branch to common trampoline.
90 */
91#define	EXCEPTION64(n,name)				 \
92	IDT64_ENTRY(name,0,K_INTR_GATE)			;\
93Entry(name)						;\
94	push	$0					;\
95	push	$(n)					;\
96	movl	$(LO_ALLTRAPS), 4(%rsp)			;\
97	jmp	L_enter_lohandler
98
99
100/*
101 * Interrupt from user.
102 * Push error (0), trap number and address of compatibility mode handler,
103 * then branch to common trampoline.
104 */
105#define	EXCEP64_USR(n,name)				 \
106	IDT64_ENTRY(name,0,U_INTR_GATE)			;\
107Entry(name)						;\
108	push	$0					;\
109	push	$(n)					;\
110	movl	$(LO_ALLTRAPS), 4(%rsp)			;\
111	jmp	L_enter_lohandler
112
113
114/*
115 * Special interrupt code from user.
116 */
117#define EXCEP64_SPC_USR(n,name) 			\
118	IDT64_ENTRY(name,0,U_INTR_GATE)
119
120
121/*
122 * Special interrupt code.
123 * In 64-bit mode we may use an IST slot instead of task gates.
124 */
125#define	EXCEP64_IST(n,name,ist) 			\
126	IDT64_ENTRY(name,ist,K_INTR_GATE)
127#define	EXCEP64_SPC(n,name)	 			\
128	IDT64_ENTRY(name,0,K_INTR_GATE)
129
130
131/*
132 * Interrupt.
133 * Push zero err, interrupt vector and address of compatibility mode handler,
134 * then branch to common trampoline.
135 */
136#define	INTERRUPT64(n)					 \
137	IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE)	;\
138	.align FALIGN					;\
139L_ ## n:						;\
140	push	$0					;\
141	push	$(n)					;\
142	movl	$(LO_ALLINTRS), 4(%rsp)			;\
143	jmp	L_enter_lohandler
144
145
146	.data
147	.align 12
148Entry(master_idt64)
149Entry(hi64_data_base)
150	.text
151	.code64
152Entry(hi64_text_base)
153
154EXCEPTION64(0x00,t64_zero_div)
155EXCEP64_SPC(0x01,hi64_debug)
156INTERRUPT64(0x02)			/* NMI */
157EXCEP64_USR(0x03,t64_int3)
158EXCEP64_USR(0x04,t64_into)
159EXCEP64_USR(0x05,t64_bounds)
160EXCEPTION64(0x06,t64_invop)
161EXCEPTION64(0x07,t64_nofpu)
162#if	MACH_KDB
163EXCEP64_IST(0x08,db_task_dbl_fault64,1)
164#else
165EXCEP64_IST(0x08,hi64_double_fault,1)
166#endif
167EXCEPTION64(0x09,a64_fpu_over)
168EXCEPTION64(0x0a,a64_inv_tss)
169EXCEP64_SPC(0x0b,hi64_segnp)
170#if	MACH_KDB
171EXCEP64_IST(0x0c,db_task_stk_fault64,1)
172#else
173EXCEP64_SPC(0x0c,hi64_stack_fault)
174#endif
175EXCEP64_SPC(0x0d,hi64_gen_prot)
176EXCEP64_SPC(0x0e, hi64_page_fault)
177EXCEPTION64(0x0f,t64_trap_0f)
178EXCEPTION64(0x10,t64_fpu_err)
179EXCEPTION64(0x11,t64_trap_11)
180EXCEP64_IST(0x12,mc64,1)
181EXCEPTION64(0x13,t64_sse_err)
182EXCEPTION64(0x14,t64_trap_14)
183EXCEPTION64(0x15,t64_trap_15)
184EXCEPTION64(0x16,t64_trap_16)
185EXCEPTION64(0x17,t64_trap_17)
186EXCEPTION64(0x18,t64_trap_18)
187EXCEPTION64(0x19,t64_trap_19)
188EXCEPTION64(0x1a,t64_trap_1a)
189EXCEPTION64(0x1b,t64_trap_1b)
190EXCEPTION64(0x1c,t64_trap_1c)
191EXCEPTION64(0x1d,t64_trap_1d)
192EXCEPTION64(0x1e,t64_trap_1e)
193EXCEPTION64(0x1f,t64_trap_1f)
194
195INTERRUPT64(0x20)
196INTERRUPT64(0x21)
197INTERRUPT64(0x22)
198INTERRUPT64(0x23)
199INTERRUPT64(0x24)
200INTERRUPT64(0x25)
201INTERRUPT64(0x26)
202INTERRUPT64(0x27)
203INTERRUPT64(0x28)
204INTERRUPT64(0x29)
205INTERRUPT64(0x2a)
206INTERRUPT64(0x2b)
207INTERRUPT64(0x2c)
208INTERRUPT64(0x2d)
209INTERRUPT64(0x2e)
210INTERRUPT64(0x2f)
211
212INTERRUPT64(0x30)
213INTERRUPT64(0x31)
214INTERRUPT64(0x32)
215INTERRUPT64(0x33)
216INTERRUPT64(0x34)
217INTERRUPT64(0x35)
218INTERRUPT64(0x36)
219INTERRUPT64(0x37)
220INTERRUPT64(0x38)
221INTERRUPT64(0x39)
222INTERRUPT64(0x3a)
223INTERRUPT64(0x3b)
224INTERRUPT64(0x3c)
225INTERRUPT64(0x3d)
226INTERRUPT64(0x3e)
227INTERRUPT64(0x3f)
228
229INTERRUPT64(0x40)
230INTERRUPT64(0x41)
231INTERRUPT64(0x42)
232INTERRUPT64(0x43)
233INTERRUPT64(0x44)
234INTERRUPT64(0x45)
235INTERRUPT64(0x46)
236INTERRUPT64(0x47)
237INTERRUPT64(0x48)
238INTERRUPT64(0x49)
239INTERRUPT64(0x4a)
240INTERRUPT64(0x4b)
241INTERRUPT64(0x4c)
242INTERRUPT64(0x4d)
243INTERRUPT64(0x4e)
244INTERRUPT64(0x4f)
245
246INTERRUPT64(0x50)
247INTERRUPT64(0x51)
248INTERRUPT64(0x52)
249INTERRUPT64(0x53)
250INTERRUPT64(0x54)
251INTERRUPT64(0x55)
252INTERRUPT64(0x56)
253INTERRUPT64(0x57)
254INTERRUPT64(0x58)
255INTERRUPT64(0x59)
256INTERRUPT64(0x5a)
257INTERRUPT64(0x5b)
258INTERRUPT64(0x5c)
259INTERRUPT64(0x5d)
260INTERRUPT64(0x5e)
261INTERRUPT64(0x5f)
262
263INTERRUPT64(0x60)
264INTERRUPT64(0x61)
265INTERRUPT64(0x62)
266INTERRUPT64(0x63)
267INTERRUPT64(0x64)
268INTERRUPT64(0x65)
269INTERRUPT64(0x66)
270INTERRUPT64(0x67)
271INTERRUPT64(0x68)
272INTERRUPT64(0x69)
273INTERRUPT64(0x6a)
274INTERRUPT64(0x6b)
275INTERRUPT64(0x6c)
276INTERRUPT64(0x6d)
277INTERRUPT64(0x6e)
278INTERRUPT64(0x6f)
279
280INTERRUPT64(0x70)
281INTERRUPT64(0x71)
282INTERRUPT64(0x72)
283INTERRUPT64(0x73)
284INTERRUPT64(0x74)
285INTERRUPT64(0x75)
286INTERRUPT64(0x76)
287INTERRUPT64(0x77)
288INTERRUPT64(0x78)
289INTERRUPT64(0x79)
290INTERRUPT64(0x7a)
291INTERRUPT64(0x7b)
292INTERRUPT64(0x7c)
293INTERRUPT64(0x7d)
294INTERRUPT64(0x7e)
295EXCEP64_USR(0x7f, t64_dtrace_ret)
296
297EXCEP64_SPC_USR(0x80,hi64_unix_scall)
298EXCEP64_SPC_USR(0x81,hi64_mach_scall)
299EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
300EXCEP64_SPC_USR(0x83,hi64_diag_scall)
301
302INTERRUPT64(0x84)
303INTERRUPT64(0x85)
304INTERRUPT64(0x86)
305INTERRUPT64(0x87)
306INTERRUPT64(0x88)
307INTERRUPT64(0x89)
308INTERRUPT64(0x8a)
309INTERRUPT64(0x8b)
310INTERRUPT64(0x8c)
311INTERRUPT64(0x8d)
312INTERRUPT64(0x8e)
313INTERRUPT64(0x8f)
314
315INTERRUPT64(0x90)
316INTERRUPT64(0x91)
317INTERRUPT64(0x92)
318INTERRUPT64(0x93)
319INTERRUPT64(0x94)
320INTERRUPT64(0x95)
321INTERRUPT64(0x96)
322INTERRUPT64(0x97)
323INTERRUPT64(0x98)
324INTERRUPT64(0x99)
325INTERRUPT64(0x9a)
326INTERRUPT64(0x9b)
327INTERRUPT64(0x9c)
328INTERRUPT64(0x9d)
329INTERRUPT64(0x9e)
330INTERRUPT64(0x9f)
331
332INTERRUPT64(0xa0)
333INTERRUPT64(0xa1)
334INTERRUPT64(0xa2)
335INTERRUPT64(0xa3)
336INTERRUPT64(0xa4)
337INTERRUPT64(0xa5)
338INTERRUPT64(0xa6)
339INTERRUPT64(0xa7)
340INTERRUPT64(0xa8)
341INTERRUPT64(0xa9)
342INTERRUPT64(0xaa)
343INTERRUPT64(0xab)
344INTERRUPT64(0xac)
345INTERRUPT64(0xad)
346INTERRUPT64(0xae)
347INTERRUPT64(0xaf)
348
349INTERRUPT64(0xb0)
350INTERRUPT64(0xb1)
351INTERRUPT64(0xb2)
352INTERRUPT64(0xb3)
353INTERRUPT64(0xb4)
354INTERRUPT64(0xb5)
355INTERRUPT64(0xb6)
356INTERRUPT64(0xb7)
357INTERRUPT64(0xb8)
358INTERRUPT64(0xb9)
359INTERRUPT64(0xba)
360INTERRUPT64(0xbb)
361INTERRUPT64(0xbc)
362INTERRUPT64(0xbd)
363INTERRUPT64(0xbe)
364INTERRUPT64(0xbf)
365
366INTERRUPT64(0xc0)
367INTERRUPT64(0xc1)
368INTERRUPT64(0xc2)
369INTERRUPT64(0xc3)
370INTERRUPT64(0xc4)
371INTERRUPT64(0xc5)
372INTERRUPT64(0xc6)
373INTERRUPT64(0xc7)
374INTERRUPT64(0xc8)
375INTERRUPT64(0xc9)
376INTERRUPT64(0xca)
377INTERRUPT64(0xcb)
378INTERRUPT64(0xcc)
379INTERRUPT64(0xcd)
380INTERRUPT64(0xce)
381INTERRUPT64(0xcf)
382
383INTERRUPT64(0xd0)
384INTERRUPT64(0xd1)
385INTERRUPT64(0xd2)
386INTERRUPT64(0xd3)
387INTERRUPT64(0xd4)
388INTERRUPT64(0xd5)
389INTERRUPT64(0xd6)
390INTERRUPT64(0xd7)
391INTERRUPT64(0xd8)
392INTERRUPT64(0xd9)
393INTERRUPT64(0xda)
394INTERRUPT64(0xdb)
395INTERRUPT64(0xdc)
396INTERRUPT64(0xdd)
397INTERRUPT64(0xde)
398INTERRUPT64(0xdf)
399
400INTERRUPT64(0xe0)
401INTERRUPT64(0xe1)
402INTERRUPT64(0xe2)
403INTERRUPT64(0xe3)
404INTERRUPT64(0xe4)
405INTERRUPT64(0xe5)
406INTERRUPT64(0xe6)
407INTERRUPT64(0xe7)
408INTERRUPT64(0xe8)
409INTERRUPT64(0xe9)
410INTERRUPT64(0xea)
411INTERRUPT64(0xeb)
412INTERRUPT64(0xec)
413INTERRUPT64(0xed)
414INTERRUPT64(0xee)
415INTERRUPT64(0xef)
416
417INTERRUPT64(0xf0)
418INTERRUPT64(0xf1)
419INTERRUPT64(0xf2)
420INTERRUPT64(0xf3)
421INTERRUPT64(0xf4)
422INTERRUPT64(0xf5)
423INTERRUPT64(0xf6)
424INTERRUPT64(0xf7)
425INTERRUPT64(0xf8)
426INTERRUPT64(0xf9)
427INTERRUPT64(0xfa)
428INTERRUPT64(0xfb)
429INTERRUPT64(0xfc)
430INTERRUPT64(0xfd)
431INTERRUPT64(0xfe)
432EXCEPTION64(0xff,t64_preempt)
433
434
435        .text
436/*
437 *
438 * Trap/interrupt entry points.
439 *
440 * All traps must create the following 32-bit save area on the PCB "stack"
441 * - this is identical to the legacy mode 32-bit case:
442 *
443 *	gs
444 *	fs
445 *	es
446 *	ds
447 *	edi
448 *	esi
449 *	ebp
450 *	cr2 (defined only for page fault)
451 *	ebx
452 *	edx
453 *	ecx
454 *	eax
455 *	trap number
456 *	error code
457 *	eip
458 *	cs
459 *	eflags
460 *	user esp - if from user
461 *	user ss  - if from user
462 *
463 * Above this is the trap number and compatibility mode handler address
464 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
465 *
466 *	(trapno, trapfn)
467 *	err
468 *	rip
469 *	cs
470 *	rflags
471 *	rsp
472 *	ss
473 *
474 */
475
476	.code32
477/*
478 * Control is passed here to return to the compatibility mode user.
479 * At this stage we're in kernel space in compatibility mode
480 * but we need to switch into 64-bit mode in the 4G-based trampoline
481 * space before performing the iret.
482 */
483Entry(lo64_ret_to_user)
484	movl	%gs:CPU_ACTIVE_THREAD,%ecx
485
486	movl	ACT_PCB_IDS(%ecx),%eax	/* Obtain this thread's debug state */
487	cmpl	$0,%eax			/* Is there a debug register context? */
488	je	2f 			/* branch if not */
489	cmpl	$(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
490	jne	1f
491	movl	DS_DR0(%eax), %ecx	/* If so, load the 32 bit DRs */
492	movl	%ecx, %db0
493	movl	DS_DR1(%eax), %ecx
494	movl	%ecx, %db1
495	movl	DS_DR2(%eax), %ecx
496	movl	%ecx, %db2
497	movl	DS_DR3(%eax), %ecx
498	movl	%ecx, %db3
499	movl	DS_DR7(%eax), %ecx
500	movl 	%ecx, %gs:CPU_DR7
501	movl	$0, %gs:CPU_DR7 + 4
502	jmp 	2f
5031:
504	ENTER_64BIT_MODE()		/* Enter long mode */
505	mov	DS64_DR0(%eax), %rcx	/* Load the full width DRs*/
506	mov	%rcx, %dr0
507	mov	DS64_DR1(%eax), %rcx
508	mov	%rcx, %dr1
509	mov	DS64_DR2(%eax), %rcx
510	mov	%rcx, %dr2
511	mov	DS64_DR3(%eax), %rcx
512	mov	%rcx, %dr3
513	mov	DS64_DR7(%eax), %rcx
514	mov 	%rcx, %gs:CPU_DR7
515	jmp	3f			/* Enter uberspace */
5162:
517	ENTER_64BIT_MODE()
5183:
519	ENTER_UBERSPACE()
520
521	/*
522	 * Now switch %cr3, if necessary.
523	 */
524	swapgs				/* switch back to uber-kernel gs base */
525	mov	%gs:CPU_TASK_CR3,%rcx
526	mov	%rcx,%gs:CPU_ACTIVE_CR3
527	mov	%cr3, %rax
528	cmp	%rcx, %rax
529	je	1f
530	/* flag the copyio engine state as WINDOWS_CLEAN */
531	mov	%gs:CPU_ACTIVE_THREAD,%eax
532	movl	$(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
533	mov	%rcx,%cr3               /* switch to user's address space */
5341:
535
536	mov	%gs:CPU_DR7, %rax	/* Is there a debug control register?*/
537	cmp	$0, %rax
538	je	1f
539	mov	%rax, %dr7		/* Set DR7 */
540	movq	$0, %gs:CPU_DR7
5411:
542
543	/*
544	 * Adjust stack to use uber-space.
545	 */
546	mov	$(KERNEL_UBER_BASE_HI32), %rax
547	shl	$32, %rsp
548	shrd	$32, %rax, %rsp			/* relocate into uber-space */
549
550	cmpl	$(SS_32), SS_FLAVOR(%rsp)	/* 32-bit state? */
551	jne	L_64bit_return
552	jmp	L_32bit_return
553
554Entry(lo64_ret_to_kernel)
555	ENTER_64BIT_MODE()
556	ENTER_UBERSPACE()
557
558	swapgs				/* switch back to uber-kernel gs base */
559
560	/*
561	 * Adjust stack to use uber-space.
562	 */
563	mov	$(KERNEL_UBER_BASE_HI32), %rax
564	shl	$32, %rsp
565	shrd	$32, %rax, %rsp			/* relocate into uber-space */
566
567	/* Check for return to 64-bit kernel space (EFI today) */
568	cmpl	$(SS_32), SS_FLAVOR(%rsp)	/* 32-bit state? */
569	jne	L_64bit_return
570	/* fall through for 32-bit return */
571
572L_32bit_return:
573	/*
574	 * Restore registers into the machine state for iret.
575	 */
576	movl	R_EIP(%rsp), %eax
577	movl	%eax, ISC32_RIP(%rsp)
578	movl	R_EFLAGS(%rsp), %eax
579	movl	%eax, ISC32_RFLAGS(%rsp)
580	movl	R_CS(%rsp), %eax
581	movl	%eax, ISC32_CS(%rsp)
582	movl	R_UESP(%rsp), %eax
583	movl	%eax, ISC32_RSP(%rsp)
584	movl	R_SS(%rsp), %eax
585	movl	%eax, ISC32_SS(%rsp)
586
587	/*
588	 * Restore general 32-bit registers
589	 */
590	movl	R_EAX(%rsp), %eax
591	movl	R_EBX(%rsp), %ebx
592	movl	R_ECX(%rsp), %ecx
593	movl	R_EDX(%rsp), %edx
594	movl	R_EBP(%rsp), %ebp
595	movl	R_ESI(%rsp), %esi
596	movl	R_EDI(%rsp), %edi
597
598	/*
599	 * Restore segment registers. We make take an exception here but
600	 * we've got enough space left in the save frame area to absorb
601         * a hardware frame plus the trapfn and trapno
602	 */
603	swapgs
604EXT(ret32_set_ds):
605	movw	R_DS(%rsp), %ds
606EXT(ret32_set_es):
607	movw	R_ES(%rsp), %es
608EXT(ret32_set_fs):
609	movw	R_FS(%rsp), %fs
610EXT(ret32_set_gs):
611	movw	R_GS(%rsp), %gs
612
613	add	$(ISC32_OFFSET)+8+8, %rsp	/* pop compat frame +
614						   trapno/trapfn and error */
615        cmp	$(SYSENTER_CS),ISF64_CS-8-8(%rsp)
616					/* test for fast entry/exit */
617        je      L_fast_exit
618EXT(ret32_iret):
619        iretq				/* return from interrupt */
620
621L_fast_exit:
622	pop	%rdx			/* user return eip */
623	pop	%rcx			/* pop and toss cs */
624	andl	$(~EFL_IF), (%rsp)	/* clear interrupts enable, sti below */
625	popf				/* flags - carry denotes failure */
626	pop	%rcx			/* user return esp */
627	.code32
628	sti				/* interrupts enabled after sysexit */
629	sysexit				/* 32-bit sysexit */
630	.code64
631
632L_64bit_return:
633	/*
634	 * Set the GS Base MSR with the user's gs base.
635	 */
636	movl	%gs:CPU_UBER_USER_GS_BASE, %eax
637	movl	%gs:CPU_UBER_USER_GS_BASE+4, %edx
638	movl	$(MSR_IA32_GS_BASE), %ecx
639	swapgs
640	testb	$3, R64_CS(%rsp)		/* returning to user-space? */
641	jz	1f
642	wrmsr					/* set 64-bit base */
6431:
644
645	/*
646	 * Restore general 64-bit registers
647	 */
648	mov	R64_R15(%rsp), %r15
649	mov	R64_R14(%rsp), %r14
650	mov	R64_R13(%rsp), %r13
651	mov	R64_R12(%rsp), %r12
652	mov	R64_R11(%rsp), %r11
653	mov	R64_R10(%rsp), %r10
654	mov	R64_R9(%rsp),  %r9
655	mov	R64_R8(%rsp),  %r8
656	mov	R64_RSI(%rsp), %rsi
657	mov	R64_RDI(%rsp), %rdi
658	mov	R64_RBP(%rsp), %rbp
659	mov	R64_RDX(%rsp), %rdx
660	mov	R64_RBX(%rsp), %rbx
661	mov	R64_RCX(%rsp), %rcx
662	mov	R64_RAX(%rsp), %rax
663
664	add	$(ISS64_OFFSET)+8+8, %rsp	/* pop saved state frame +
665						   trapno/trapfn and error */
666        cmpl	$(SYSCALL_CS),ISF64_CS-8-8(%rsp)
667					/* test for fast entry/exit */
668        je      L_sysret
669EXT(ret64_iret):
670        iretq				/* return from interrupt */
671
672L_sysret:
673	/*
674	 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
675	 * 	rcx	user rip
676	 *	r1	user rflags
677	 *	rsp	user stack pointer
678	 */
679	mov	ISF64_RIP-16(%rsp), %rcx
680	mov	ISF64_RFLAGS-16(%rsp), %r11
681	mov	ISF64_RSP-16(%rsp), %rsp
682	sysretq				/* return from system call */
683
684/*
685 * Common path to enter locore handlers.
686 */
687L_enter_lohandler:
688	swapgs				/* switch to kernel gs (cpu_data) */
689L_enter_lohandler_continue:
690	cmpl	$(USER64_CS), ISF64_CS(%rsp)
691	je	L_64bit_enter		/* this is a 64-bit user task */
692	cmpl	$(KERNEL64_CS), ISF64_CS(%rsp)
693	je	L_64bit_enter		/* we're in 64-bit (EFI) code */
694	jmp	L_32bit_enter
695
696/*
697 * System call handlers.
698 * These are entered via a syscall interrupt. The system call number in %rax
699 * is saved to the error code slot in the stack frame. We then branch to the
700 * common state saving code.
701 */
702
703Entry(hi64_unix_scall)
704	swapgs				/* switch to kernel gs (cpu_data) */
705L_unix_scall_continue:
706	push	%rax			/* save system call number */
707	push	$(UNIX_INT)
708	movl	$(LO_UNIX_SCALL), 4(%rsp)
709	jmp	L_32bit_enter_check
710
711
712Entry(hi64_mach_scall)
713	swapgs				/* switch to kernel gs (cpu_data) */
714L_mach_scall_continue:
715	push	%rax			/* save system call number */
716	push	$(MACH_INT)
717	movl	$(LO_MACH_SCALL), 4(%rsp)
718	jmp	L_32bit_enter_check
719
720
721Entry(hi64_mdep_scall)
722	swapgs				/* switch to kernel gs (cpu_data) */
723L_mdep_scall_continue:
724	push	%rax			/* save system call number */
725	push	$(MACHDEP_INT)
726	movl	$(LO_MDEP_SCALL), 4(%rsp)
727	jmp	L_32bit_enter_check
728
729
730Entry(hi64_diag_scall)
731	swapgs				/* switch to kernel gs (cpu_data) */
732L_diag_scall_continue:
733	push	%rax			/* save system call number */
734	push	$(DIAG_INT)
735	movl	$(LO_DIAG_SCALL), 4(%rsp)
736	jmp	L_32bit_enter_check
737
738Entry(hi64_syscall)
739	swapgs				/* Kapow! get per-cpu data area */
740L_syscall_continue:
741	mov	%rsp, %gs:CPU_UBER_TMP	/* save user stack */
742	mov	%gs:CPU_UBER_ISF, %rsp	/* switch stack to pcb */
743
744	/*
745	 * Save values in the ISF frame in the PCB
746	 * to cons up the saved machine state.
747	 */
748	movl	$(USER_DS), ISF64_SS(%rsp)
749	movl	$(SYSCALL_CS), ISF64_CS(%rsp)	/* cs - a pseudo-segment */
750	mov	%r11, ISF64_RFLAGS(%rsp)	/* rflags */
751	mov	%rcx, ISF64_RIP(%rsp)		/* rip */
752	mov	%gs:CPU_UBER_TMP, %rcx
753	mov	%rcx, ISF64_RSP(%rsp)		/* user stack */
754	mov	%rax, ISF64_ERR(%rsp)		/* err/rax - syscall code */
755	movl	$(0), ISF64_TRAPNO(%rsp)	/* trapno */
756	movl	$(LO_SYSCALL), ISF64_TRAPFN(%rsp)
757	jmp	L_64bit_enter		/* this can only be a 64-bit task */
758
759
760L_32bit_enter_check:
761	/*
762	 * Check we're not a confused 64-bit user.
763	 */
764	cmpl	$(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
765	jne	L_64bit_entry_reject
766	jmp	L_32bit_enter
767/*
768 * sysenter entry point
769 * Requires user code to set up:
770 *	edx: user instruction pointer (return address)
771 *	ecx: user stack pointer
772 *		on which is pushed stub ret addr and saved ebx
773 * Return to user-space is made using sysexit.
774 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
775 *       or requiring ecx to be preserved.
776 */
777Entry(hi64_sysenter)
778	mov	(%rsp), %rsp		/* switch from temporary stack to pcb */
779	/*
780	 * Push values on to the PCB stack
781	 * to cons up the saved machine state.
782	 */
783	push	$(USER_DS)		/* ss */
784	push	%rcx			/* uesp */
785	pushf				/* flags */
786	/*
787	 * Clear, among others, the Nested Task (NT) flags bit;
788	 * this is zeroed by INT, but not by SYSENTER.
789	 */
790	push	$0
791	popf
792	push	$(SYSENTER_CS)		/* cs */
793	swapgs				/* switch to kernel gs (cpu_data) */
794L_sysenter_continue:
795	push	%rdx			/* eip */
796	push	%rax			/* err/eax - syscall code */
797	push	$(0)
798	orl	$(EFL_IF), ISF64_RFLAGS(%rsp)
799	movl	$(LO_MACH_SCALL), ISF64_TRAPFN(%rsp)
800	testl	%eax, %eax
801	js	L_32bit_enter_check
802	movl	$(LO_UNIX_SCALL), ISF64_TRAPFN(%rsp)
803 	cmpl	$(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
804 	jne	L_64bit_entry_reject
805/* If the caller (typically LibSystem) has recorded the cumulative size of
806 * the arguments in EAX, copy them over from the user stack directly.
807 * We recover from exceptions inline--if the copy loop doesn't complete
808 * due to an exception, we fall back to copyin from compatibility mode.
809 * We can potentially extend this mechanism to mach traps as well (DRK).
810 */
811L_sysenter_copy_args:
812	testl	$(I386_SYSCALL_ARG_BYTES_MASK), %eax
813	jz	L_32bit_enter
814	xor	%r9, %r9
815	mov	%gs:CPU_UBER_ARG_STORE, %r8
816	movl	%eax, %r9d
817	mov	%gs:CPU_UBER_ARG_STORE_VALID, %r12
818	xor	%r10, %r10
819	shrl	$(I386_SYSCALL_ARG_DWORDS_SHIFT), %r9d
820	andl	$(I386_SYSCALL_ARG_DWORDS_MASK), %r9d
821	movl	$0, (%r12)
822EXT(hi64_sysenter_user_arg_copy):
8230:
824	movl	4(%rcx, %r10, 4), %r11d
825	movl	%r11d, (%r8, %r10, 4)
826	incl	%r10d
827	decl	%r9d
828	jnz	0b
829	movl	$1, (%r12)
830	/* Fall through to 32-bit handler */
831
832L_32bit_enter:
833	/*
834	 * Make space for the compatibility save area.
835	 */
836	sub	$(ISC32_OFFSET), %rsp
837	movl	$(SS_32), SS_FLAVOR(%rsp)
838
839	/*
840	 * Save segment regs
841	 */
842	mov	%ds, R_DS(%rsp)
843	mov	%es, R_ES(%rsp)
844	mov	%fs, R_FS(%rsp)
845	mov	%gs, R_GS(%rsp)
846
847	/*
848	 * Save general 32-bit registers
849	 */
850	mov	%eax, R_EAX(%rsp)
851	mov	%ebx, R_EBX(%rsp)
852	mov	%ecx, R_ECX(%rsp)
853	mov	%edx, R_EDX(%rsp)
854	mov	%ebp, R_EBP(%rsp)
855	mov	%esi, R_ESI(%rsp)
856	mov	%edi, R_EDI(%rsp)
857
858	/* Unconditionally save cr2; only meaningful on page faults */
859	mov	%cr2, %rax
860	mov	%eax, R_CR2(%rsp)
861
862	/*
863	 * Copy registers already saved in the machine state
864	 * (in the interrupt stack frame) into the compat save area.
865	 */
866	mov	ISC32_RIP(%rsp), %eax
867	mov	%eax, R_EIP(%rsp)
868	mov	ISC32_RFLAGS(%rsp), %eax
869	mov	%eax, R_EFLAGS(%rsp)
870	mov	ISC32_CS(%rsp), %eax
871	mov	%eax, R_CS(%rsp)
872	mov	ISC32_RSP(%rsp), %eax
873	mov	%eax, R_UESP(%rsp)
874	mov	ISC32_SS(%rsp), %eax
875	mov	%eax, R_SS(%rsp)
876L_32bit_enter_after_fault:
877	mov	ISC32_TRAPNO(%rsp), %ebx	/* %ebx := trapno for later */
878	mov	%ebx, R_TRAPNO(%rsp)
879	mov	ISC32_ERR(%rsp), %eax
880	mov	%eax, R_ERR(%rsp)
881	mov	ISC32_TRAPFN(%rsp), %edx
882
883/*
884 * Common point to enter lo_handler in compatibilty mode:
885 *	%ebx	trapno
886 *	%edx	locore handler address
887 */
888L_enter_lohandler2:
889	/*
890	 * Switch address space to kernel
891	 * if not shared space and not already mapped.
892	 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
893	 */
894	mov	%cr3, %rax
895	mov	%gs:CPU_TASK_CR3, %rcx
896	cmp	%rax, %rcx			/* is the task's cr3 loaded? */
897	jne	1f
898	cmpl	$(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
899	je	2f
9001:
901	mov	%gs:CPU_KERNEL_CR3, %rcx
902	cmp	%rax, %rcx
903	je	2f
904	mov	%rcx, %cr3
905	mov	%rcx, %gs:CPU_ACTIVE_CR3
9062:
907	/*
908	 * Switch to compatibility mode.
909	 * Then establish kernel segments.
910	 */
911	swapgs					/* Done with uber-kernel gs */
912	ENTER_COMPAT_MODE()
913
914	/*
915	 * Now in compatibility mode and running in compatibility space
916	 * prepare to enter the locore handler.
917	 * 	%ebx		trapno
918	 *	%edx		lo_handler pointer
919	 * Note: the stack pointer (now 32-bit) is now directly addressing the
920	 * the kernel below 4G and therefore is automagically re-based.
921	 */
922	mov	$(KERNEL_DS), %eax
923	mov	%eax, %ss
924	mov	%eax, %ds
925	mov	%eax, %es
926	mov	%eax, %fs
927	mov	$(CPU_DATA_GS), %eax
928	mov	%eax, %gs
929
930	movl	%gs:CPU_ACTIVE_THREAD,%ecx	/* Get the active thread */
931	cmpl	$0, ACT_PCB_IDS(%ecx)	/* Is there a debug register state? */
932	je	1f
933	movl	$0, %ecx		/* If so, reset DR7 (the control) */
934	movl	%ecx, %dr7
9351:
936	addl	$1,%gs:hwIntCnt(,%ebx,4)	// Bump the trap/intr count
937
938	/* Dispatch the designated lo handler */
939	jmp	*%edx
940
941	.code64
942L_64bit_entry_reject:
943	/*
944	 * Here for a 64-bit user attempting an invalid kernel entry.
945	 */
946	movl	$(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
947	movl	$(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
948	/* Fall through... */
949
950L_64bit_enter:
951	/*
952	 * Here for a 64-bit user task, or special 64-bit kernel code.
953	 * Make space for the save area.
954	 */
955	sub	$(ISS64_OFFSET), %rsp
956	movl	$(SS_64), SS_FLAVOR(%rsp)
957
958	/*
959	 * Save segment regs
960	 */
961	mov	%fs, R64_FS(%rsp)
962	mov	%gs, R64_GS(%rsp)
963
964	/* Save general-purpose registers */
965	mov	%rax, R64_RAX(%rsp)
966	mov	%rcx, R64_RCX(%rsp)
967	mov	%rbx, R64_RBX(%rsp)
968	mov	%rbp, R64_RBP(%rsp)
969	mov	%r11, R64_R11(%rsp)
970	mov	%r12, R64_R12(%rsp)
971	mov	%r13, R64_R13(%rsp)
972	mov	%r14, R64_R14(%rsp)
973	mov	%r15, R64_R15(%rsp)
974
975	/* cr2 is significant only for page-faults */
976	mov	%cr2, %rax
977	mov	%rax, R64_CR2(%rsp)
978
979	/* Other registers (which may contain syscall args) */
980	mov	%rdi, R64_RDI(%rsp)	/* arg0 .. */
981	mov	%rsi, R64_RSI(%rsp)
982	mov	%rdx, R64_RDX(%rsp)
983	mov	%r10, R64_R10(%rsp)
984	mov	%r8, R64_R8(%rsp)
985	mov	%r9, R64_R9(%rsp)	/* .. arg5 */
986
987L_64bit_enter_after_fault:
988	/*
989	 * At this point we're almost ready to join the common lo-entry code.
990	 */
991	mov	R64_TRAPNO(%rsp), %ebx
992	mov	R64_TRAPFN(%rsp), %edx
993
994	jmp	L_enter_lohandler2
995
996Entry(hi64_page_fault)
997	push	$(T_PAGE_FAULT)
998	movl	$(LO_ALLTRAPS), 4(%rsp)
999	cmpl	$(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1000	jne	L_enter_lohandler
1001	cmpl	$(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
1002	jne	L_enter_lohandler
1003	mov	ISF64_RSP(%rsp), %rsp
1004	jmp	L_32bit_enter
1005
1006/*
1007 * Debug trap.  Check for single-stepping across system call into
1008 * kernel.  If this is the case, taking the debug trap has turned
1009 * off single-stepping - save the flags register with the trace
1010 * bit set.
1011 */
1012Entry(hi64_debug)
1013	swapgs				/* set %gs for cpu data */
1014	push	$0			/* error code */
1015	push	$(T_DEBUG)
1016	movl	$(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
1017
1018	testb	$3, ISF64_CS(%rsp)
1019	jnz	L_enter_lohandler_continue
1020
1021	/*
1022	 * trap came from kernel mode
1023	 */
1024	cmpl	$(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1025	jne	L_enter_lohandler_continue	/* trap not in uber-space */
1026
1027	cmpl	$(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
1028	jne	6f
1029	add	$(ISF64_SIZE),%rsp	/* remove entire intr stack frame */
1030	jmp	L_mach_scall_continue	/* continue system call entry */
10316:
1032	cmpl	$(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
1033	jne	5f
1034	add	$(ISF64_SIZE),%rsp	/* remove entire intr stack frame */
1035	jmp	L_mdep_scall_continue	/* continue system call entry */
10365:
1037	cmpl	$(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1038	jne	4f
1039	add	$(ISF64_SIZE),%rsp	/* remove entire intr stack frame */
1040	jmp	L_unix_scall_continue	/* continue system call entry */
10414:
1042	cmpl	$(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1043	jne	L_enter_lohandler_continue
1044	/*
1045	 * Interrupt stack frame has been pushed on the temporary stack.
1046	 * We have to switch to pcb stack and copy eflags.
1047	 */
1048	add	$32,%rsp		/* remove trapno/trapfn/err/rip/cs */
1049	push	%rcx			/* save %rcx - user stack pointer */
1050	mov	32(%rsp),%rcx		/* top of intr stack -> pcb stack */
1051	xchg	%rcx,%rsp		/* switch to pcb stack */
1052	push	$(USER_DS)		/* ss */
1053	push	(%rcx)			/* saved %rcx into rsp slot */
1054	push	8(%rcx)			/* rflags */
1055	mov	(%rcx),%rcx		/* restore %rcx */
1056	push	$(SYSENTER_TF_CS)	/* cs - not SYSENTER_CS for iret path */
1057	jmp	L_sysenter_continue	/* continue sysenter entry */
1058
1059
1060Entry(hi64_double_fault)
1061	swapgs				/* set %gs for cpu data */
1062	push	$(T_DOUBLE_FAULT)
1063	movl	$(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1064
1065	cmpl	$(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1066	jne	L_enter_lohandler_continue	/* trap not in uber-space */
1067
1068	cmpl	$(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1069	jne	L_enter_lohandler_continue
1070
1071	mov	ISF64_RSP(%rsp), %rsp
1072	jmp	L_syscall_continue
1073
1074
1075/*
1076 * General protection or segment-not-present fault.
1077 * Check for a GP/NP fault in the kernel_return
1078 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1079 *
1080 * rsp->     0:	trap code (NP or GP) and trap function
1081 *	     8:	segment number in error (error code)
1082 *	    16	rip
1083 *	    24	cs
1084 *	    32	rflags
1085 *	    40	rsp
1086 *	    48	ss
1087 *	    56	old registers (trap is from kernel)
1088 */
1089Entry(hi64_gen_prot)
1090	push	$(T_GENERAL_PROTECTION)
1091	jmp	trap_check_kernel_exit	/* check for kernel exit sequence */
1092
1093Entry(hi64_stack_fault)
1094	push	$(T_STACK_FAULT)
1095	jmp	trap_check_kernel_exit	/* check for kernel exit sequence */
1096
1097Entry(hi64_segnp)
1098	push	$(T_SEGMENT_NOT_PRESENT)
1099					/* indicate fault type */
1100trap_check_kernel_exit:
1101	movl	$(LO_ALLTRAPS), 4(%rsp)
1102	testb	$3,24(%rsp)
1103	jnz	hi64_take_trap
1104					/* trap was from kernel mode, so */
1105					/* check for the kernel exit sequence */
1106	cmpl	$(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
1107	jne	hi64_take_trap		/* trap not in uber-space */
1108
1109	cmpl	$(EXT(ret32_iret)), 16(%rsp)
1110	je	L_fault_iret32
1111	cmpl	$(EXT(ret32_set_ds)), 16(%rsp)
1112	je	L_32bit_fault_set_seg
1113	cmpl	$(EXT(ret32_set_es)), 16(%rsp)
1114	je	L_32bit_fault_set_seg
1115	cmpl	$(EXT(ret32_set_fs)), 16(%rsp)
1116	je	L_32bit_fault_set_seg
1117	cmpl	$(EXT(ret32_set_gs)), 16(%rsp)
1118	je	L_32bit_fault_set_seg
1119
1120	cmpl	$(EXT(ret64_iret)), 16(%rsp)
1121	je	L_fault_iret64
1122
1123	cmpl	$(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
1124	jne	hi64_take_trap
1125	mov	ISF64_RSP(%rsp), %rsp
1126	jmp	L_32bit_enter
1127hi64_take_trap:
1128	jmp	L_enter_lohandler
1129
1130
1131/*
1132 * GP/NP fault on IRET: CS or SS is in error.
1133 * All registers contain the user's values.
1134 *
1135 * on SP is
1136 *   0	trap number/function
1137 *   8	errcode
1138 *  16	rip
1139 *  24	cs
1140 *  32	rflags
1141 *  40	rsp
1142 *  48	ss			--> new trapno/trapfn
1143 *  56  (16-byte padding)	--> new errcode
1144 *  64	user rip
1145 *  72	user cs
1146 *  80	user rflags
1147 *  88	user rsp
1148 *  96  user ss
1149 */
1150L_fault_iret32:
1151	mov	%rax, 16(%rsp)		/* save rax (we don`t need saved rip) */
1152	mov	0(%rsp), %rax		/* get trap number */
1153	mov	%rax, 48(%rsp)		/* put in user trap number */
1154	mov	8(%rsp), %rax		/* get error code */
1155	mov	%rax, 56(%rsp)		/* put in user errcode */
1156	mov	16(%rsp), %rax		/* restore rax */
1157	add	$48, %rsp		/* reset to original frame */
1158					/* now treat as fault from user */
1159	swapgs
1160	jmp	L_32bit_enter
1161
1162L_fault_iret64:
1163	mov	%rax, 16(%rsp)		/* save rax (we don`t need saved rip) */
1164	mov	0(%rsp), %rax		/* get trap number */
1165	mov	%rax, 48(%rsp)		/* put in user trap number */
1166	mov	8(%rsp), %rax		/* get error code */
1167	mov	%rax, 56(%rsp)		/* put in user errcode */
1168	mov	16(%rsp), %rax		/* restore rax */
1169	add	$48, %rsp		/* reset to original frame */
1170					/* now treat as fault from user */
1171	swapgs
1172	jmp	L_64bit_enter
1173
1174/*
1175 * Fault restoring a segment register.  All of the saved state is still
1176 * on the stack untouched since we didn't move the stack pointer.
1177 */
1178L_32bit_fault_set_seg:
1179	mov	0(%rsp), %rax		/* get trap number/function */
1180	mov	8(%rsp), %rdx		/* get error code */
1181	mov	40(%rsp), %rsp		/* reload stack prior to fault */
1182	mov	%rax,ISC32_TRAPNO(%rsp)
1183	mov	%rdx,ISC32_ERR(%rsp)
1184					/* now treat as fault from user */
1185					/* except that all the state is */
1186					/* already saved - we just have to */
1187					/* move the trapno and error into */
1188					/* the compatibility frame */
1189	swapgs
1190	jmp	L_32bit_enter_after_fault
1191
1192
1193/*
1194 * Fatal exception handlers:
1195 */
1196Entry(db_task_dbl_fault64)
1197	push	$(T_DOUBLE_FAULT)
1198	movl	$(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1199	jmp	L_enter_lohandler
1200
1201Entry(db_task_stk_fault64)
1202	push	$(T_STACK_FAULT)
1203	movl	$(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1204	jmp	L_enter_lohandler
1205
1206Entry(mc64)
1207	push	$(0)			/* Error */
1208	push	$(T_MACHINE_CHECK)
1209	movl	$(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
1210	jmp	L_enter_lohandler
1211