Deleted Added
full compact
vmx_support.S (264619) vmx_support.S (266339)
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_support.S 264619 2014-04-17 18:00:07Z jhb $
27 * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_support.S 266339 2014-05-17 19:11:08Z jhb $
28 */
29
30#include <machine/asmacros.h>
31
32#include "vmx_assym.s"
33
34#ifdef SMP
35#define LK lock ;
36#else
37#define LK
38#endif
39
40/*
41 * Assumes that %rdi holds a pointer to the 'vmxctx'.
42 *
43 * On "return" all registers are updated to reflect guest state. The two
44 * exceptions are %rip and %rsp. These registers are atomically switched
45 * by hardware from the guest area of the vmcs.
46 *
47 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
48 * host context in case of an error with 'vmlaunch' or 'vmresume'.
49 */
50#define VMX_GUEST_RESTORE \
51 movq %rdi,%rsp; \
52 movq VMXCTX_GUEST_CR2(%rdi),%rsi; \
53 movq %rsi,%cr2; \
54 movq VMXCTX_GUEST_RSI(%rdi),%rsi; \
55 movq VMXCTX_GUEST_RDX(%rdi),%rdx; \
56 movq VMXCTX_GUEST_RCX(%rdi),%rcx; \
57 movq VMXCTX_GUEST_R8(%rdi),%r8; \
58 movq VMXCTX_GUEST_R9(%rdi),%r9; \
59 movq VMXCTX_GUEST_RAX(%rdi),%rax; \
60 movq VMXCTX_GUEST_RBX(%rdi),%rbx; \
61 movq VMXCTX_GUEST_RBP(%rdi),%rbp; \
62 movq VMXCTX_GUEST_R10(%rdi),%r10; \
63 movq VMXCTX_GUEST_R11(%rdi),%r11; \
64 movq VMXCTX_GUEST_R12(%rdi),%r12; \
65 movq VMXCTX_GUEST_R13(%rdi),%r13; \
66 movq VMXCTX_GUEST_R14(%rdi),%r14; \
67 movq VMXCTX_GUEST_R15(%rdi),%r15; \
68 movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
69
70/*
71 * Save and restore the host context.
72 *
73 * Assumes that %rdi holds a pointer to the 'vmxctx'.
74 */
75#define VMX_HOST_SAVE(tmpreg) \
76 movq (%rsp), tmpreg; /* return address */ \
77 movq %r15, VMXCTX_HOST_R15(%rdi); \
78 movq %r14, VMXCTX_HOST_R14(%rdi); \
79 movq %r13, VMXCTX_HOST_R13(%rdi); \
80 movq %r12, VMXCTX_HOST_R12(%rdi); \
81 movq %rbp, VMXCTX_HOST_RBP(%rdi); \
82 movq %rsp, VMXCTX_HOST_RSP(%rdi); \
83 movq %rbx, VMXCTX_HOST_RBX(%rdi); \
84 movq tmpreg, VMXCTX_HOST_RIP(%rdi)
85
86#define VMX_HOST_RESTORE(tmpreg) \
87 movq VMXCTX_HOST_R15(%rdi), %r15; \
88 movq VMXCTX_HOST_R14(%rdi), %r14; \
89 movq VMXCTX_HOST_R13(%rdi), %r13; \
90 movq VMXCTX_HOST_R12(%rdi), %r12; \
91 movq VMXCTX_HOST_RBP(%rdi), %rbp; \
92 movq VMXCTX_HOST_RSP(%rdi), %rsp; \
93 movq VMXCTX_HOST_RBX(%rdi), %rbx; \
94 movq VMXCTX_HOST_RIP(%rdi), tmpreg; \
95 movq tmpreg, (%rsp) /* return address */
96
97/*
98 * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
99 * %rdi: pointer to the 'vmxctx'
28 */
29
30#include <machine/asmacros.h>
31
32#include "vmx_assym.s"
33
34#ifdef SMP
35#define LK lock ;
36#else
37#define LK
38#endif
39
40/*
41 * Assumes that %rdi holds a pointer to the 'vmxctx'.
42 *
43 * On "return" all registers are updated to reflect guest state. The two
44 * exceptions are %rip and %rsp. These registers are atomically switched
45 * by hardware from the guest area of the vmcs.
46 *
47 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
48 * host context in case of an error with 'vmlaunch' or 'vmresume'.
49 */
50#define VMX_GUEST_RESTORE \
51 movq %rdi,%rsp; \
52 movq VMXCTX_GUEST_CR2(%rdi),%rsi; \
53 movq %rsi,%cr2; \
54 movq VMXCTX_GUEST_RSI(%rdi),%rsi; \
55 movq VMXCTX_GUEST_RDX(%rdi),%rdx; \
56 movq VMXCTX_GUEST_RCX(%rdi),%rcx; \
57 movq VMXCTX_GUEST_R8(%rdi),%r8; \
58 movq VMXCTX_GUEST_R9(%rdi),%r9; \
59 movq VMXCTX_GUEST_RAX(%rdi),%rax; \
60 movq VMXCTX_GUEST_RBX(%rdi),%rbx; \
61 movq VMXCTX_GUEST_RBP(%rdi),%rbp; \
62 movq VMXCTX_GUEST_R10(%rdi),%r10; \
63 movq VMXCTX_GUEST_R11(%rdi),%r11; \
64 movq VMXCTX_GUEST_R12(%rdi),%r12; \
65 movq VMXCTX_GUEST_R13(%rdi),%r13; \
66 movq VMXCTX_GUEST_R14(%rdi),%r14; \
67 movq VMXCTX_GUEST_R15(%rdi),%r15; \
68 movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
69
70/*
71 * Save and restore the host context.
72 *
73 * Assumes that %rdi holds a pointer to the 'vmxctx'.
74 */
75#define VMX_HOST_SAVE(tmpreg) \
76 movq (%rsp), tmpreg; /* return address */ \
77 movq %r15, VMXCTX_HOST_R15(%rdi); \
78 movq %r14, VMXCTX_HOST_R14(%rdi); \
79 movq %r13, VMXCTX_HOST_R13(%rdi); \
80 movq %r12, VMXCTX_HOST_R12(%rdi); \
81 movq %rbp, VMXCTX_HOST_RBP(%rdi); \
82 movq %rsp, VMXCTX_HOST_RSP(%rdi); \
83 movq %rbx, VMXCTX_HOST_RBX(%rdi); \
84 movq tmpreg, VMXCTX_HOST_RIP(%rdi)
85
86#define VMX_HOST_RESTORE(tmpreg) \
87 movq VMXCTX_HOST_R15(%rdi), %r15; \
88 movq VMXCTX_HOST_R14(%rdi), %r14; \
89 movq VMXCTX_HOST_R13(%rdi), %r13; \
90 movq VMXCTX_HOST_R12(%rdi), %r12; \
91 movq VMXCTX_HOST_RBP(%rdi), %rbp; \
92 movq VMXCTX_HOST_RSP(%rdi), %rsp; \
93 movq VMXCTX_HOST_RBX(%rdi), %rbx; \
94 movq VMXCTX_HOST_RIP(%rdi), tmpreg; \
95 movq tmpreg, (%rsp) /* return address */
96
97/*
98 * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
99 * %rdi: pointer to the 'vmxctx'
100 * %esi: launch state of the VMCS
100 * %rsi: pointer to the 'vmx'
101 * %edx: launch state of the VMCS
101 * Interrupts must be disabled on entry.
102 */
103ENTRY(vmx_enter_guest)
104 /*
105 * Save host state before doing anything else.
106 */
107 VMX_HOST_SAVE(%r10)
108
109 /*
110 * Activate guest pmap on this cpu.
111 */
112 movq VMXCTX_PMAP(%rdi), %r11
113 movl PCPU(CPUID), %eax
114 LK btsl %eax, PM_ACTIVE(%r11)
115
116 /*
102 * Interrupts must be disabled on entry.
103 */
104ENTRY(vmx_enter_guest)
105 /*
106 * Save host state before doing anything else.
107 */
108 VMX_HOST_SAVE(%r10)
109
110 /*
111 * Activate guest pmap on this cpu.
112 */
113 movq VMXCTX_PMAP(%rdi), %r11
114 movl PCPU(CPUID), %eax
115 LK btsl %eax, PM_ACTIVE(%r11)
116
117 /*
117 * If 'vmxctx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
118 * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
118 * then we must invalidate all mappings associated with this EPTP.
119 */
120 movq PM_EPTGEN(%r11), %r10
119 * then we must invalidate all mappings associated with this EPTP.
120 */
121 movq PM_EPTGEN(%r11), %r10
121 cmpq %r10, VMXCTX_EPTGEN(%rdi, %rax, 8)
122 cmpq %r10, VMX_EPTGEN(%rsi, %rax, 8)
122 je guest_restore
123
123 je guest_restore
124
124 /* Refresh 'vmxctx->eptgen[curcpu]' */
125 movq %r10, VMXCTX_EPTGEN(%rdi, %rax, 8)
125 /* Refresh 'vmx->eptgen[curcpu]' */
126 movq %r10, VMX_EPTGEN(%rsi, %rax, 8)
126
127 /* Setup the invept descriptor on the host stack */
128 mov %rsp, %r11
127
128 /* Setup the invept descriptor on the host stack */
129 mov %rsp, %r11
129 movq VMXCTX_EPTP(%rdi), %rax
130 movq VMX_EPTP(%rsi), %rax
130 movq %rax, -16(%r11)
131 movq $0x0, -8(%r11)
132 mov $0x1, %eax /* Single context invalidate */
133 invept -16(%r11), %rax
134 jbe invept_error /* Check invept instruction error */
135
136guest_restore:
131 movq %rax, -16(%r11)
132 movq $0x0, -8(%r11)
133 mov $0x1, %eax /* Single context invalidate */
134 invept -16(%r11), %rax
135 jbe invept_error /* Check invept instruction error */
136
137guest_restore:
137 cmpl $0, %esi
138 cmpl $0, %edx
138 je do_launch
139
140 VMX_GUEST_RESTORE
141 vmresume
142 /*
143 * In the common case 'vmresume' returns back to the host through
144 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
145 *
146 * If there is an error we return VMX_VMRESUME_ERROR to the caller.
147 */
148 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
149 movl $VMX_VMRESUME_ERROR, %eax
150 jmp decode_inst_error
151
152do_launch:
153 VMX_GUEST_RESTORE
154 vmlaunch
155 /*
156 * In the common case 'vmlaunch' returns back to the host through
157 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
158 *
159 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
160 */
161 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
162 movl $VMX_VMLAUNCH_ERROR, %eax
163 jmp decode_inst_error
164
165invept_error:
166 movl $VMX_INVEPT_ERROR, %eax
167 jmp decode_inst_error
168
169decode_inst_error:
170 movl $VM_FAIL_VALID, %r11d
171 jz inst_error
172 movl $VM_FAIL_INVALID, %r11d
173inst_error:
174 movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
175
176 /*
177 * The return value is already populated in %eax so we cannot use
178 * it as a scratch register beyond this point.
179 */
180
181 /*
182 * Deactivate guest pmap from this cpu.
183 */
184 movq VMXCTX_PMAP(%rdi), %r11
185 movl PCPU(CPUID), %r10d
186 LK btrl %r10d, PM_ACTIVE(%r11)
187
188 VMX_HOST_RESTORE(%r10)
189 ret
190END(vmx_enter_guest)
191
192/*
193 * void vmx_exit_guest(void)
194 * %rsp points to the struct vmxctx
195 */
196ENTRY(vmx_exit_guest)
197 /*
198 * Save guest state that is not automatically saved in the vmcs.
199 */
200 movq %rdi,VMXCTX_GUEST_RDI(%rsp)
201 movq %rsi,VMXCTX_GUEST_RSI(%rsp)
202 movq %rdx,VMXCTX_GUEST_RDX(%rsp)
203 movq %rcx,VMXCTX_GUEST_RCX(%rsp)
204 movq %r8,VMXCTX_GUEST_R8(%rsp)
205 movq %r9,VMXCTX_GUEST_R9(%rsp)
206 movq %rax,VMXCTX_GUEST_RAX(%rsp)
207 movq %rbx,VMXCTX_GUEST_RBX(%rsp)
208 movq %rbp,VMXCTX_GUEST_RBP(%rsp)
209 movq %r10,VMXCTX_GUEST_R10(%rsp)
210 movq %r11,VMXCTX_GUEST_R11(%rsp)
211 movq %r12,VMXCTX_GUEST_R12(%rsp)
212 movq %r13,VMXCTX_GUEST_R13(%rsp)
213 movq %r14,VMXCTX_GUEST_R14(%rsp)
214 movq %r15,VMXCTX_GUEST_R15(%rsp)
215
216 movq %cr2,%rdi
217 movq %rdi,VMXCTX_GUEST_CR2(%rsp)
218
219 movq %rsp,%rdi
220
221 /*
222 * Deactivate guest pmap from this cpu.
223 */
224 movq VMXCTX_PMAP(%rdi), %r11
225 movl PCPU(CPUID), %r10d
226 LK btrl %r10d, PM_ACTIVE(%r11)
227
228 VMX_HOST_RESTORE(%r10)
229
230 /*
231 * This will return to the caller of 'vmx_enter_guest()' with a return
232 * value of VMX_GUEST_VMEXIT.
233 */
234 movl $VMX_GUEST_VMEXIT, %eax
235 ret
236END(vmx_exit_guest)
139 je do_launch
140
141 VMX_GUEST_RESTORE
142 vmresume
143 /*
144 * In the common case 'vmresume' returns back to the host through
145 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
146 *
147 * If there is an error we return VMX_VMRESUME_ERROR to the caller.
148 */
149 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
150 movl $VMX_VMRESUME_ERROR, %eax
151 jmp decode_inst_error
152
153do_launch:
154 VMX_GUEST_RESTORE
155 vmlaunch
156 /*
157 * In the common case 'vmlaunch' returns back to the host through
158 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
159 *
160 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
161 */
162 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
163 movl $VMX_VMLAUNCH_ERROR, %eax
164 jmp decode_inst_error
165
166invept_error:
167 movl $VMX_INVEPT_ERROR, %eax
168 jmp decode_inst_error
169
170decode_inst_error:
171 movl $VM_FAIL_VALID, %r11d
172 jz inst_error
173 movl $VM_FAIL_INVALID, %r11d
174inst_error:
175 movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
176
177 /*
178 * The return value is already populated in %eax so we cannot use
179 * it as a scratch register beyond this point.
180 */
181
182 /*
183 * Deactivate guest pmap from this cpu.
184 */
185 movq VMXCTX_PMAP(%rdi), %r11
186 movl PCPU(CPUID), %r10d
187 LK btrl %r10d, PM_ACTIVE(%r11)
188
189 VMX_HOST_RESTORE(%r10)
190 ret
191END(vmx_enter_guest)
192
193/*
194 * void vmx_exit_guest(void)
195 * %rsp points to the struct vmxctx
196 */
197ENTRY(vmx_exit_guest)
198 /*
199 * Save guest state that is not automatically saved in the vmcs.
200 */
201 movq %rdi,VMXCTX_GUEST_RDI(%rsp)
202 movq %rsi,VMXCTX_GUEST_RSI(%rsp)
203 movq %rdx,VMXCTX_GUEST_RDX(%rsp)
204 movq %rcx,VMXCTX_GUEST_RCX(%rsp)
205 movq %r8,VMXCTX_GUEST_R8(%rsp)
206 movq %r9,VMXCTX_GUEST_R9(%rsp)
207 movq %rax,VMXCTX_GUEST_RAX(%rsp)
208 movq %rbx,VMXCTX_GUEST_RBX(%rsp)
209 movq %rbp,VMXCTX_GUEST_RBP(%rsp)
210 movq %r10,VMXCTX_GUEST_R10(%rsp)
211 movq %r11,VMXCTX_GUEST_R11(%rsp)
212 movq %r12,VMXCTX_GUEST_R12(%rsp)
213 movq %r13,VMXCTX_GUEST_R13(%rsp)
214 movq %r14,VMXCTX_GUEST_R14(%rsp)
215 movq %r15,VMXCTX_GUEST_R15(%rsp)
216
217 movq %cr2,%rdi
218 movq %rdi,VMXCTX_GUEST_CR2(%rsp)
219
220 movq %rsp,%rdi
221
222 /*
223 * Deactivate guest pmap from this cpu.
224 */
225 movq VMXCTX_PMAP(%rdi), %r11
226 movl PCPU(CPUID), %r10d
227 LK btrl %r10d, PM_ACTIVE(%r11)
228
229 VMX_HOST_RESTORE(%r10)
230
231 /*
232 * This will return to the caller of 'vmx_enter_guest()' with a return
233 * value of VMX_GUEST_VMEXIT.
234 */
235 movl $VMX_GUEST_VMEXIT, %eax
236 ret
237END(vmx_exit_guest)
238
239/*
240 * %rdi = interrupt handler entry point
241 *
242 * Calling sequence described in the "Instruction Set Reference" for the "INT"
243 * instruction in Intel SDM, Vol 2.
244 */
245ENTRY(vmx_call_isr)
246 mov %rsp, %r11 /* save %rsp */
247 and $~0xf, %rsp /* align on 16-byte boundary */
248 pushq $KERNEL_SS /* %ss */
249 pushq %r11 /* %rsp */
250 pushfq /* %rflags */
251 pushq $KERNEL_CS /* %cs */
252 cli /* disable interrupts */
253 callq *%rdi /* push %rip and call isr */
254 ret
255END(vmx_call_isr)