1#include <mini-os/x86/os.h>
2#include <mini-os/x86/limits.h>
3#include <xen/features.h>
4
5.section __xen_guest
6	.ascii	"GUEST_OS=rumprun"
7	.ascii	",XEN_VER=xen-3.0"
8	.ascii	",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
9	.ascii	",ELF_PADDR_OFFSET=0x0"
10	.ascii	",HYPERCALL_PAGE=0x2"
11	.ascii	",LOADER=generic"
12	.byte	0
13.text
14
15#define ENTRY(name) \
16        .globl _minios_entry_##name; \
17        _minios_entry_##name:
18
19.globl _start, _minios_shared_info, _minios_hypercall_page
20
21
22_start:
23        cld
24        movq stack_start(%rip),%rsp
25        andq $(~(__STACK_SIZE-1)), %rsp
26        pushq $0
27        pushq $0
28        xorq %rbp,%rbp
29        movq %rsi,%rdi
30        call _minios_start_kernel
31
32stack_start:
33        .quad _minios_stack+(2*__STACK_SIZE)
34
35        /* Unpleasant -- the PTE that maps this page is actually overwritten */
36        /* to map the real shared-info page! :-)                             */
37        .org 0x1000
38_minios_shared_info:
39        .org 0x2000
40
41_minios_hypercall_page:
42        .org 0x3000
43
44
45/* Offsets into shared_info_t. */
46#define evtchn_upcall_pending		/* 0 */
47#define evtchn_upcall_mask		1
48
49NMI_MASK = 0x80000000
50
51#define RDI 112
52#define ORIG_RAX 120       /* + error_code */
53#define EFLAGS 144
54
55.macro RESTORE_ALL
56	movq (%rsp),%r11
57	movq 1*8(%rsp),%r10
58	movq 2*8(%rsp),%r9
59	movq 3*8(%rsp),%r8
60	movq 4*8(%rsp),%rax
61	movq 5*8(%rsp),%rcx
62	movq 6*8(%rsp),%rdx
63	movq 7*8(%rsp),%rsi
64	movq 8*8(%rsp),%rdi
65	addq $9*8+8,%rsp
66.endm
67
68
69.macro HYPERVISOR_IRET flag
70	testl $NMI_MASK,2*8(%rsp)
71	jnz   2f
72
73	testb $1,(_minios_xen_features+XENFEAT_supervisor_mode_kernel)
74	jnz   1f
75
76	/* Direct iret to kernel space. Correct CS and SS. */
77	orb   $3,1*8(%rsp)
78	orb   $3,4*8(%rsp)
791:	iretq
80
812:	/* Slow iret via hypervisor. */
82	andl  $~NMI_MASK, 16(%rsp)
83	pushq $\flag
84	jmp  _minios_hypercall_page + (__HYPERVISOR_iret * 32)
85.endm
86
87/*
88 * Common code to all exception entry points. This expects an error
89 * code/orig_rax on the stack and the exception handler in %rax.
90 */
91error_common:
92	/* rdi slot contains rax, oldrax contains error code */
93	cld
94	subq  $14*8,%rsp
95	movq %rsi,13*8(%rsp)
96	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
97	movq %rdx,12*8(%rsp)
98	movq %rcx,11*8(%rsp)
99	movq %rsi,10*8(%rsp)	/* store rax */
100	movq %r8, 9*8(%rsp)
101	movq %r9, 8*8(%rsp)
102	movq %r10,7*8(%rsp)
103	movq %r11,6*8(%rsp)
104	movq %rbx,5*8(%rsp)
105	movq %rbp,4*8(%rsp)
106	movq %r12,3*8(%rsp)
107	movq %r13,2*8(%rsp)
108	movq %r14,1*8(%rsp)
109	movq %r15,(%rsp)
110
111error_call_handler:
112	movq %rdi, RDI(%rsp)
113	movq %rsp,%rdi
114	movq ORIG_RAX(%rsp),%rsi	# get error code
115	movq $-1,ORIG_RAX(%rsp)
116	call *%rax
117	jmp error_exit
118
119.macro errorentry sym has_error_code:req
120        movq (%rsp),%rcx
121        movq 8(%rsp),%r11
122        addq $0x10,%rsp /* skip rcx and r11 */
123        .if !\has_error_code
124	pushq $0	/* push error code/oldrax */
125        .endif
126	pushq %rax	/* push real oldrax to the rdi slot */
127	leaq  \sym(%rip),%rax
128	jmp error_common
129.endm
130
131#define XEN_GET_VCPU_INFO(reg)	movq HYPERVISOR_shared_info,reg
132#define XEN_PUT_VCPU_INFO(reg)
133#define XEN_PUT_VCPU_INFO_fixup
134#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
135#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
136#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
137
138#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
139                    			XEN_LOCKED_BLOCK_EVENTS(reg)	; \
140    				            XEN_PUT_VCPU_INFO(reg)
141
142#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
143                				XEN_LOCKED_UNBLOCK_EVENTS(reg)	; \
144    			            	XEN_PUT_VCPU_INFO(reg)
145
146
147
148ENTRY(hypervisor_callback)
149    errorentry hypervisor_callback2 0
150
151hypervisor_callback2:
152        movq %rdi, %rsp
15311:     movq %gs:8,%rax
154        incl %gs:0
155        cmovzq %rax,%rsp
156        pushq %rdi
157        call _minios_do_hypervisor_callback
158        popq %rsp
159        decl %gs:0
160        jmp error_exit
161
162restore_all_enable_events:
163	XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
164
165scrit:	/**** START OF CRITICAL REGION ****/
166	XEN_TEST_PENDING(%rsi)
167	jnz  14f			# process more events if necessary...
168	XEN_PUT_VCPU_INFO(%rsi)
169        RESTORE_ALL
170        HYPERVISOR_IRET 0
171
17214:	XEN_LOCKED_BLOCK_EVENTS(%rsi)
173	XEN_PUT_VCPU_INFO(%rsi)
174	subq $6*8,%rsp
175	movq %rbx,5*8(%rsp)
176	movq %rbp,4*8(%rsp)
177	movq %r12,3*8(%rsp)
178	movq %r13,2*8(%rsp)
179	movq %r14,1*8(%rsp)
180	movq %r15,(%rsp)
181        movq %rsp,%rdi                  # set the argument again
182	jmp  11b
183ecrit:  /**** END OF CRITICAL REGION ****/
184
185
186retint_kernel:
187retint_restore_args:
188	movl EFLAGS-6*8(%rsp), %eax
189	shr $9, %eax			# EAX[0] == IRET_EFLAGS.IF
190	XEN_GET_VCPU_INFO(%rsi)
191	andb evtchn_upcall_mask(%rsi),%al
192	andb $1,%al			# EAX[0] == IRET_EFLAGS.IF & event_mask
193	jnz restore_all_enable_events	#        != 0 => enable event delivery
194	XEN_PUT_VCPU_INFO(%rsi)
195
196	RESTORE_ALL
197	HYPERVISOR_IRET 0
198
199
200error_exit:
201	movq (%rsp),%r15
202	movq 1*8(%rsp),%r14
203	movq 2*8(%rsp),%r13
204	movq 3*8(%rsp),%r12
205	movq 4*8(%rsp),%rbp
206	movq 5*8(%rsp),%rbx
207	addq $6*8,%rsp
208	XEN_BLOCK_EVENTS(%rsi)
209	jmp retint_kernel
210
211
212
213ENTRY(failsafe_callback)
214        popq  %rcx
215        popq  %r11
216        iretq
217
218
219ENTRY(coprocessor_error)
220        errorentry do_coprocessor_error 0
221
222
223ENTRY(simd_coprocessor_error)
224        errorentry do_simd_coprocessor_error 0
225
226
227ENTRY(device_not_available)
228        errorentry do_device_not_available 0
229
230
231ENTRY(debug)
232        errorentry do_debug 0
233
234
235ENTRY(int3)
236        errorentry do_int3 0
237
238ENTRY(overflow)
239        errorentry do_overflow 0
240
241
242ENTRY(bounds)
243        errorentry do_bounds 0
244
245
246ENTRY(invalid_op)
247        errorentry do_invalid_op 0
248
249
250ENTRY(coprocessor_segment_overrun)
251        errorentry do_coprocessor_segment_overrun 0
252
253
254ENTRY(invalid_TSS)
255        errorentry do_invalid_TSS 1
256
257
258ENTRY(segment_not_present)
259        errorentry do_segment_not_present 1
260
261
262/* runs on exception stack */
263ENTRY(stack_segment)
264        errorentry do_stack_segment 1
265
266
267ENTRY(general_protection)
268        errorentry do_general_protection 1
269
270
271ENTRY(alignment_check)
272        errorentry do_alignment_check 1
273
274
275ENTRY(divide_error)
276        errorentry do_divide_error 0
277
278
279ENTRY(spurious_interrupt_bug)
280        errorentry do_spurious_interrupt_bug 0
281
282
283ENTRY(page_fault)
284        errorentry do_page_fault 1
285