vmx_support.S revision 222112
1161748Scperciva/*-
2161748Scperciva * Copyright (c) 2011 NetApp, Inc.
3161748Scperciva * All rights reserved.
4161807Sru *
5161748Scperciva * Redistribution and use in source and binary forms, with or without
6161748Scperciva * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <machine/asmacros.h>
30
31#include "vmx_assym.s"
32
33/*
34 * Assumes that %rdi holds a pointer to the 'vmxctx'.
35 *
36 * On "return" all registers are updated to reflect guest state. The two
37 * exceptions are %rip and %rsp. These registers are atomically switched
38 * by hardware from the guest area of the vmcs.
39 *
40 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
41 * host context in case of an error with 'vmlaunch' or 'vmresume'.
42 */
43#define	VMX_GUEST_RESTORE						\
44	/*								\
45	 * Disable interrupts before updating %rsp. The location that	\
46	 * %rsp points to is a 'vmxctx' and not a real stack so we	\
47	 * don't want an interrupt handler to trash it.			\
48	 */								\
49	cli;								\
50	movq	%rdi,%rsp;						\
51	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
52	movq	%rsi,%cr2;						\
53	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
54	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
55	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
56	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
57	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
58	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
59	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
60	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
61	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
62	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
63	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
64	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
65	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
66	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
67	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
68
69#define	VM_INSTRUCTION_ERROR(reg)					\
70	jnc 	1f;							\
71	movl 	$VM_FAIL_INVALID,reg;		/* CF is set */		\
72	jmp 	3f;							\
731:	jnz 	2f;							\
74	movl 	$VM_FAIL_VALID,reg;		/* ZF is set */		\
75	jmp 	3f;							\
762:	movl 	$VM_SUCCESS,reg;					\
773:	movl	reg,VMXCTX_LAUNCH_ERROR(%rsp)
78
79	.text
80/*
81 * int vmx_setjmp(ctxp)
82 * %rdi = ctxp
83 *
84 * Return value is '0' when it returns directly from here.
85 * Return value is '1' when it returns after a vm exit through vmx_longjmp.
86 */
87ENTRY(vmx_setjmp)
88	movq	(%rsp),%rax			/* return address */
89	movq    %r15,VMXCTX_HOST_R15(%rdi)
90	movq    %r14,VMXCTX_HOST_R14(%rdi)
91	movq    %r13,VMXCTX_HOST_R13(%rdi)
92	movq    %r12,VMXCTX_HOST_R12(%rdi)
93	movq    %rbp,VMXCTX_HOST_RBP(%rdi)
94	movq    %rsp,VMXCTX_HOST_RSP(%rdi)
95	movq    %rbx,VMXCTX_HOST_RBX(%rdi)
96	movq    %rax,VMXCTX_HOST_RIP(%rdi)
97
98	/*
99	 * XXX save host debug registers
100	 */
101	movl	$VMX_RETURN_DIRECT,%eax
102	ret
103END(vmx_setjmp)
104
105/*
106 * void vmx_return(struct vmxctx *ctxp, int retval)
107 * %rdi = ctxp
108 * %rsi = retval
109 * Return to vmm context through vmx_setjmp() with a value of 'retval'.
110 */
111ENTRY(vmx_return)
112	/* Restore host context. */
113	movq	VMXCTX_HOST_R15(%rdi),%r15
114	movq	VMXCTX_HOST_R14(%rdi),%r14
115	movq	VMXCTX_HOST_R13(%rdi),%r13
116	movq	VMXCTX_HOST_R12(%rdi),%r12
117	movq	VMXCTX_HOST_RBP(%rdi),%rbp
118	movq	VMXCTX_HOST_RSP(%rdi),%rsp
119	movq	VMXCTX_HOST_RBX(%rdi),%rbx
120	movq	VMXCTX_HOST_RIP(%rdi),%rax
121	movq	%rax,(%rsp)			/* return address */
122
123	/*
124	 * XXX restore host debug registers
125	 */
126	movl	%esi,%eax
127	ret
128END(vmx_return)
129
130/*
131 * void vmx_longjmp(void)
132 * %rsp points to the struct vmxctx
133 */
134ENTRY(vmx_longjmp)
135	/*
136	 * Save guest state that is not automatically saved in the vmcs.
137	 */
138	movq	%rdi,VMXCTX_GUEST_RDI(%rsp)
139	movq	%rsi,VMXCTX_GUEST_RSI(%rsp)
140	movq	%rdx,VMXCTX_GUEST_RDX(%rsp)
141	movq	%rcx,VMXCTX_GUEST_RCX(%rsp)
142	movq	%r8,VMXCTX_GUEST_R8(%rsp)
143	movq	%r9,VMXCTX_GUEST_R9(%rsp)
144	movq	%rax,VMXCTX_GUEST_RAX(%rsp)
145	movq	%rbx,VMXCTX_GUEST_RBX(%rsp)
146	movq	%rbp,VMXCTX_GUEST_RBP(%rsp)
147	movq	%r10,VMXCTX_GUEST_R10(%rsp)
148	movq	%r11,VMXCTX_GUEST_R11(%rsp)
149	movq	%r12,VMXCTX_GUEST_R12(%rsp)
150	movq	%r13,VMXCTX_GUEST_R13(%rsp)
151	movq	%r14,VMXCTX_GUEST_R14(%rsp)
152	movq	%r15,VMXCTX_GUEST_R15(%rsp)
153
154	movq	%cr2,%rdi
155	movq	%rdi,VMXCTX_GUEST_CR2(%rsp)
156
157	movq	%rsp,%rdi
158	movq	$VMX_RETURN_LONGJMP,%rsi
159
160	addq	$VMXCTX_TMPSTKTOP,%rsp
161	callq	vmx_return
162END(vmx_longjmp)
163
164/*
165 * void vmx_resume(struct vmxctx *ctxp)
166 * %rdi = ctxp
167 *
168 * Although the return type is a 'void' this function may return indirectly
169 * through vmx_setjmp() with a return value of 2.
170 */
171ENTRY(vmx_resume)
172	/*
173	 * Restore guest state that is not automatically loaded from the vmcs.
174	 */
175	VMX_GUEST_RESTORE
176
177	vmresume
178
179	/*
180	 * Capture the reason why vmresume failed.
181	 */
182	VM_INSTRUCTION_ERROR(%eax)
183
184	/* Return via vmx_setjmp with return value of VMX_RETURN_VMRESUME */
185	movq	%rsp,%rdi
186	movq	$VMX_RETURN_VMRESUME,%rsi
187
188	addq	$VMXCTX_TMPSTKTOP,%rsp
189	callq	vmx_return
190END(vmx_resume)
191
192/*
193 * void vmx_launch(struct vmxctx *ctxp)
194 * %rdi = ctxp
195 *
196 * Although the return type is a 'void' this function may return indirectly
197 * through vmx_setjmp() with a return value of 3.
198 */
199ENTRY(vmx_launch)
200	/*
201	 * Restore guest state that is not automatically loaded from the vmcs.
202	 */
203	VMX_GUEST_RESTORE
204
205	vmlaunch
206
207	/*
208	 * Capture the reason why vmlaunch failed.
209	 */
210	VM_INSTRUCTION_ERROR(%eax)
211
212	/* Return via vmx_setjmp with return value of VMX_RETURN_VMLAUNCH */
213	movq	%rsp,%rdi
214	movq	$VMX_RETURN_VMLAUNCH,%rsi
215
216	addq	$VMXCTX_TMPSTKTOP,%rsp
217	callq	vmx_return
218END(vmx_launch)
219