vmx_support.S revision 245678
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx_support.S 245678 2013-01-20 03:42:49Z neel $
27 */
28
29#include <machine/asmacros.h>
30
31#include "vmx_assym.s"
32
33/*
34 * Disable interrupts before updating %rsp in VMX_CHECK_AST or
35 * VMX_GUEST_RESTORE.
36 *
37 * The location that %rsp points to is a 'vmxctx' and not a
38 * real stack so we don't want an interrupt handler to trash it
39 */
40#define	VMX_DISABLE_INTERRUPTS		cli
41
42/*
43 * If the thread hosting the vcpu has an ast pending then take care of it
44 * by returning from vmx_setjmp() with a return value of VMX_RETURN_AST.
45 *
46 * Assumes that %rdi holds a pointer to the 'vmxctx' and that interrupts
47 * are disabled.
48 */
49#define	VMX_CHECK_AST							\
50	movq	PCPU(CURTHREAD),%rax;					\
51	testl	$TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax);	\
52	je	9f;							\
53	movq	$VMX_RETURN_AST,%rsi;					\
54	movq	%rdi,%rsp;						\
55	addq	$VMXCTX_TMPSTKTOP,%rsp;					\
56	callq	vmx_return;						\
579:
58
59/*
60 * Assumes that %rdi holds a pointer to the 'vmxctx'.
61 *
62 * On "return" all registers are updated to reflect guest state. The two
63 * exceptions are %rip and %rsp. These registers are atomically switched
64 * by hardware from the guest area of the vmcs.
65 *
66 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
67 * host context in case of an error with 'vmlaunch' or 'vmresume'.
68 */
69#define	VMX_GUEST_RESTORE						\
70	movq	%rdi,%rsp;						\
71	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
72	movq	%rsi,%cr2;						\
73	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
74	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
75	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
76	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
77	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
78	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
79	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
80	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
81	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
82	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
83	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
84	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
85	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
86	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
87	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
88
89#define	VM_INSTRUCTION_ERROR(reg)					\
90	jnc 	1f;							\
91	movl 	$VM_FAIL_INVALID,reg;		/* CF is set */		\
92	jmp 	3f;							\
931:	jnz 	2f;							\
94	movl 	$VM_FAIL_VALID,reg;		/* ZF is set */		\
95	jmp 	3f;							\
962:	movl 	$VM_SUCCESS,reg;					\
973:	movl	reg,VMXCTX_LAUNCH_ERROR(%rsp)
98
99	.text
100/*
101 * int vmx_setjmp(ctxp)
102 * %rdi = ctxp
103 *
104 * Return value is '0' when it returns directly from here.
105 * Return value is '1' when it returns after a vm exit through vmx_longjmp.
106 */
107ENTRY(vmx_setjmp)
108	movq	(%rsp),%rax			/* return address */
109	movq    %r15,VMXCTX_HOST_R15(%rdi)
110	movq    %r14,VMXCTX_HOST_R14(%rdi)
111	movq    %r13,VMXCTX_HOST_R13(%rdi)
112	movq    %r12,VMXCTX_HOST_R12(%rdi)
113	movq    %rbp,VMXCTX_HOST_RBP(%rdi)
114	movq    %rsp,VMXCTX_HOST_RSP(%rdi)
115	movq    %rbx,VMXCTX_HOST_RBX(%rdi)
116	movq    %rax,VMXCTX_HOST_RIP(%rdi)
117
118	/*
119	 * XXX save host debug registers
120	 */
121	movl	$VMX_RETURN_DIRECT,%eax
122	ret
123END(vmx_setjmp)
124
125/*
126 * void vmx_return(struct vmxctx *ctxp, int retval)
127 * %rdi = ctxp
128 * %rsi = retval
129 * Return to vmm context through vmx_setjmp() with a value of 'retval'.
130 */
131ENTRY(vmx_return)
132	/* Restore host context. */
133	movq	VMXCTX_HOST_R15(%rdi),%r15
134	movq	VMXCTX_HOST_R14(%rdi),%r14
135	movq	VMXCTX_HOST_R13(%rdi),%r13
136	movq	VMXCTX_HOST_R12(%rdi),%r12
137	movq	VMXCTX_HOST_RBP(%rdi),%rbp
138	movq	VMXCTX_HOST_RSP(%rdi),%rsp
139	movq	VMXCTX_HOST_RBX(%rdi),%rbx
140	movq	VMXCTX_HOST_RIP(%rdi),%rax
141	movq	%rax,(%rsp)			/* return address */
142
143	/*
144	 * XXX restore host debug registers
145	 */
146	movl	%esi,%eax
147	ret
148END(vmx_return)
149
150/*
151 * void vmx_longjmp(void)
152 * %rsp points to the struct vmxctx
153 */
154ENTRY(vmx_longjmp)
155	/*
156	 * Save guest state that is not automatically saved in the vmcs.
157	 */
158	movq	%rdi,VMXCTX_GUEST_RDI(%rsp)
159	movq	%rsi,VMXCTX_GUEST_RSI(%rsp)
160	movq	%rdx,VMXCTX_GUEST_RDX(%rsp)
161	movq	%rcx,VMXCTX_GUEST_RCX(%rsp)
162	movq	%r8,VMXCTX_GUEST_R8(%rsp)
163	movq	%r9,VMXCTX_GUEST_R9(%rsp)
164	movq	%rax,VMXCTX_GUEST_RAX(%rsp)
165	movq	%rbx,VMXCTX_GUEST_RBX(%rsp)
166	movq	%rbp,VMXCTX_GUEST_RBP(%rsp)
167	movq	%r10,VMXCTX_GUEST_R10(%rsp)
168	movq	%r11,VMXCTX_GUEST_R11(%rsp)
169	movq	%r12,VMXCTX_GUEST_R12(%rsp)
170	movq	%r13,VMXCTX_GUEST_R13(%rsp)
171	movq	%r14,VMXCTX_GUEST_R14(%rsp)
172	movq	%r15,VMXCTX_GUEST_R15(%rsp)
173
174	movq	%cr2,%rdi
175	movq	%rdi,VMXCTX_GUEST_CR2(%rsp)
176
177	movq	%rsp,%rdi
178	movq	$VMX_RETURN_LONGJMP,%rsi
179
180	addq	$VMXCTX_TMPSTKTOP,%rsp
181	callq	vmx_return
182END(vmx_longjmp)
183
184/*
185 * void vmx_resume(struct vmxctx *ctxp)
186 * %rdi = ctxp
187 *
188 * Although the return type is a 'void' this function may return indirectly
189 * through vmx_setjmp() with a return value of 2.
190 */
191ENTRY(vmx_resume)
192	VMX_DISABLE_INTERRUPTS
193
194	VMX_CHECK_AST
195
196	/*
197	 * Restore guest state that is not automatically loaded from the vmcs.
198	 */
199	VMX_GUEST_RESTORE
200
201	vmresume
202
203	/*
204	 * Capture the reason why vmresume failed.
205	 */
206	VM_INSTRUCTION_ERROR(%eax)
207
208	/* Return via vmx_setjmp with return value of VMX_RETURN_VMRESUME */
209	movq	%rsp,%rdi
210	movq	$VMX_RETURN_VMRESUME,%rsi
211
212	addq	$VMXCTX_TMPSTKTOP,%rsp
213	callq	vmx_return
214END(vmx_resume)
215
216/*
217 * void vmx_launch(struct vmxctx *ctxp)
218 * %rdi = ctxp
219 *
220 * Although the return type is a 'void' this function may return indirectly
221 * through vmx_setjmp() with a return value of 3.
222 */
223ENTRY(vmx_launch)
224	VMX_DISABLE_INTERRUPTS
225
226	VMX_CHECK_AST
227
228	/*
229	 * Restore guest state that is not automatically loaded from the vmcs.
230	 */
231	VMX_GUEST_RESTORE
232
233	vmlaunch
234
235	/*
236	 * Capture the reason why vmlaunch failed.
237	 */
238	VM_INSTRUCTION_ERROR(%eax)
239
240	/* Return via vmx_setjmp with return value of VMX_RETURN_VMLAUNCH */
241	movq	%rsp,%rdi
242	movq	$VMX_RETURN_VMLAUNCH,%rsi
243
244	addq	$VMXCTX_TMPSTKTOP,%rsp
245	callq	vmx_return
246END(vmx_launch)
247