Deleted Added
full compact
vmx.c (276349) vmx.c (276403)
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276349 2014-12-28 21:27:13Z neel $
26 * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276403 2014-12-30 08:24:14Z neel $
27 */
28
29#include <sys/cdefs.h>
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276349 2014-12-28 21:27:13Z neel $");
30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276403 2014-12-30 08:24:14Z neel $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/smp.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/proc.h>
39#include <sys/sysctl.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43
44#include <machine/psl.h>
45#include <machine/cpufunc.h>
46#include <machine/md_var.h>
47#include <machine/segments.h>
48#include <machine/smp.h>
49#include <machine/specialreg.h>
50#include <machine/vmparam.h>
51
52#include <machine/vmm.h>
53#include <machine/vmm_dev.h>
54#include <machine/vmm_instruction_emul.h>
55#include "vmm_lapic.h"
56#include "vmm_host.h"
57#include "vmm_ioport.h"
58#include "vmm_ipi.h"
59#include "vmm_ktr.h"
60#include "vmm_stat.h"
61#include "vatpic.h"
62#include "vlapic.h"
63#include "vlapic_priv.h"
64
65#include "ept.h"
66#include "vmx_cpufunc.h"
67#include "vmx.h"
68#include "vmx_msr.h"
69#include "x86.h"
70#include "vmx_controls.h"
71
72#define PINBASED_CTLS_ONE_SETTING \
73 (PINBASED_EXTINT_EXITING | \
74 PINBASED_NMI_EXITING | \
75 PINBASED_VIRTUAL_NMI)
76#define PINBASED_CTLS_ZERO_SETTING 0
77
78#define PROCBASED_CTLS_WINDOW_SETTING \
79 (PROCBASED_INT_WINDOW_EXITING | \
80 PROCBASED_NMI_WINDOW_EXITING)
81
82#define PROCBASED_CTLS_ONE_SETTING \
83 (PROCBASED_SECONDARY_CONTROLS | \
84 PROCBASED_MWAIT_EXITING | \
85 PROCBASED_MONITOR_EXITING | \
86 PROCBASED_IO_EXITING | \
87 PROCBASED_MSR_BITMAPS | \
88 PROCBASED_CTLS_WINDOW_SETTING | \
89 PROCBASED_CR8_LOAD_EXITING | \
90 PROCBASED_CR8_STORE_EXITING)
91#define PROCBASED_CTLS_ZERO_SETTING \
92 (PROCBASED_CR3_LOAD_EXITING | \
93 PROCBASED_CR3_STORE_EXITING | \
94 PROCBASED_IO_BITMAPS)
95
96#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT
97#define PROCBASED_CTLS2_ZERO_SETTING 0
98
99#define VM_EXIT_CTLS_ONE_SETTING \
100 (VM_EXIT_HOST_LMA | \
101 VM_EXIT_SAVE_EFER | \
102 VM_EXIT_LOAD_EFER | \
103 VM_EXIT_ACKNOWLEDGE_INTERRUPT | \
104 VM_EXIT_SAVE_PAT | \
105 VM_EXIT_LOAD_PAT)
106
107#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS
108
109#define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER | VM_ENTRY_LOAD_PAT)
110
111#define VM_ENTRY_CTLS_ZERO_SETTING \
112 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \
113 VM_ENTRY_INTO_SMM | \
114 VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
115
116#define HANDLED 1
117#define UNHANDLED 0
118
119static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
120static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
121
122SYSCTL_DECL(_hw_vmm);
123SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
124
125int vmxon_enabled[MAXCPU];
126static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
127
128static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
129static uint32_t exit_ctls, entry_ctls;
130
131static uint64_t cr0_ones_mask, cr0_zeros_mask;
132SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
133 &cr0_ones_mask, 0, NULL);
134SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
135 &cr0_zeros_mask, 0, NULL);
136
137static uint64_t cr4_ones_mask, cr4_zeros_mask;
138SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
139 &cr4_ones_mask, 0, NULL);
140SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
141 &cr4_zeros_mask, 0, NULL);
142
143static int vmx_initialized;
144SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
145 &vmx_initialized, 0, "Intel VMX initialized");
146
147/*
148 * Optional capabilities
149 */
150static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
151
152static int cap_halt_exit;
153SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
154 "HLT triggers a VM-exit");
155
156static int cap_pause_exit;
157SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
158 0, "PAUSE triggers a VM-exit");
159
160static int cap_unrestricted_guest;
161SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
162 &cap_unrestricted_guest, 0, "Unrestricted guests");
163
164static int cap_monitor_trap;
165SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
166 &cap_monitor_trap, 0, "Monitor trap flag");
167
168static int cap_invpcid;
169SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
170 0, "Guests are allowed to use INVPCID");
171
172static int virtual_interrupt_delivery;
173SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
174 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
175
176static int posted_interrupts;
177SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
178 &posted_interrupts, 0, "APICv posted interrupt support");
179
180static int pirvec;
181SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
182 &pirvec, 0, "APICv posted interrupt vector");
183
184static struct unrhdr *vpid_unr;
185static u_int vpid_alloc_failed;
186SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
187 &vpid_alloc_failed, 0, NULL);
188
189/*
190 * Use the last page below 4GB as the APIC access address. This address is
191 * occupied by the boot firmware so it is guaranteed that it will not conflict
192 * with a page in system memory.
193 */
194#define APIC_ACCESS_ADDRESS 0xFFFFF000
195
196static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
197static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
198static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
199static void vmx_inject_pir(struct vlapic *vlapic);
200
201#ifdef KTR
202static const char *
203exit_reason_to_str(int reason)
204{
205 static char reasonbuf[32];
206
207 switch (reason) {
208 case EXIT_REASON_EXCEPTION:
209 return "exception";
210 case EXIT_REASON_EXT_INTR:
211 return "extint";
212 case EXIT_REASON_TRIPLE_FAULT:
213 return "triplefault";
214 case EXIT_REASON_INIT:
215 return "init";
216 case EXIT_REASON_SIPI:
217 return "sipi";
218 case EXIT_REASON_IO_SMI:
219 return "iosmi";
220 case EXIT_REASON_SMI:
221 return "smi";
222 case EXIT_REASON_INTR_WINDOW:
223 return "intrwindow";
224 case EXIT_REASON_NMI_WINDOW:
225 return "nmiwindow";
226 case EXIT_REASON_TASK_SWITCH:
227 return "taskswitch";
228 case EXIT_REASON_CPUID:
229 return "cpuid";
230 case EXIT_REASON_GETSEC:
231 return "getsec";
232 case EXIT_REASON_HLT:
233 return "hlt";
234 case EXIT_REASON_INVD:
235 return "invd";
236 case EXIT_REASON_INVLPG:
237 return "invlpg";
238 case EXIT_REASON_RDPMC:
239 return "rdpmc";
240 case EXIT_REASON_RDTSC:
241 return "rdtsc";
242 case EXIT_REASON_RSM:
243 return "rsm";
244 case EXIT_REASON_VMCALL:
245 return "vmcall";
246 case EXIT_REASON_VMCLEAR:
247 return "vmclear";
248 case EXIT_REASON_VMLAUNCH:
249 return "vmlaunch";
250 case EXIT_REASON_VMPTRLD:
251 return "vmptrld";
252 case EXIT_REASON_VMPTRST:
253 return "vmptrst";
254 case EXIT_REASON_VMREAD:
255 return "vmread";
256 case EXIT_REASON_VMRESUME:
257 return "vmresume";
258 case EXIT_REASON_VMWRITE:
259 return "vmwrite";
260 case EXIT_REASON_VMXOFF:
261 return "vmxoff";
262 case EXIT_REASON_VMXON:
263 return "vmxon";
264 case EXIT_REASON_CR_ACCESS:
265 return "craccess";
266 case EXIT_REASON_DR_ACCESS:
267 return "draccess";
268 case EXIT_REASON_INOUT:
269 return "inout";
270 case EXIT_REASON_RDMSR:
271 return "rdmsr";
272 case EXIT_REASON_WRMSR:
273 return "wrmsr";
274 case EXIT_REASON_INVAL_VMCS:
275 return "invalvmcs";
276 case EXIT_REASON_INVAL_MSR:
277 return "invalmsr";
278 case EXIT_REASON_MWAIT:
279 return "mwait";
280 case EXIT_REASON_MTF:
281 return "mtf";
282 case EXIT_REASON_MONITOR:
283 return "monitor";
284 case EXIT_REASON_PAUSE:
285 return "pause";
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/smp.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/proc.h>
39#include <sys/sysctl.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43
44#include <machine/psl.h>
45#include <machine/cpufunc.h>
46#include <machine/md_var.h>
47#include <machine/segments.h>
48#include <machine/smp.h>
49#include <machine/specialreg.h>
50#include <machine/vmparam.h>
51
52#include <machine/vmm.h>
53#include <machine/vmm_dev.h>
54#include <machine/vmm_instruction_emul.h>
55#include "vmm_lapic.h"
56#include "vmm_host.h"
57#include "vmm_ioport.h"
58#include "vmm_ipi.h"
59#include "vmm_ktr.h"
60#include "vmm_stat.h"
61#include "vatpic.h"
62#include "vlapic.h"
63#include "vlapic_priv.h"
64
65#include "ept.h"
66#include "vmx_cpufunc.h"
67#include "vmx.h"
68#include "vmx_msr.h"
69#include "x86.h"
70#include "vmx_controls.h"
71
72#define PINBASED_CTLS_ONE_SETTING \
73 (PINBASED_EXTINT_EXITING | \
74 PINBASED_NMI_EXITING | \
75 PINBASED_VIRTUAL_NMI)
76#define PINBASED_CTLS_ZERO_SETTING 0
77
78#define PROCBASED_CTLS_WINDOW_SETTING \
79 (PROCBASED_INT_WINDOW_EXITING | \
80 PROCBASED_NMI_WINDOW_EXITING)
81
82#define PROCBASED_CTLS_ONE_SETTING \
83 (PROCBASED_SECONDARY_CONTROLS | \
84 PROCBASED_MWAIT_EXITING | \
85 PROCBASED_MONITOR_EXITING | \
86 PROCBASED_IO_EXITING | \
87 PROCBASED_MSR_BITMAPS | \
88 PROCBASED_CTLS_WINDOW_SETTING | \
89 PROCBASED_CR8_LOAD_EXITING | \
90 PROCBASED_CR8_STORE_EXITING)
91#define PROCBASED_CTLS_ZERO_SETTING \
92 (PROCBASED_CR3_LOAD_EXITING | \
93 PROCBASED_CR3_STORE_EXITING | \
94 PROCBASED_IO_BITMAPS)
95
96#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT
97#define PROCBASED_CTLS2_ZERO_SETTING 0
98
99#define VM_EXIT_CTLS_ONE_SETTING \
100 (VM_EXIT_HOST_LMA | \
101 VM_EXIT_SAVE_EFER | \
102 VM_EXIT_LOAD_EFER | \
103 VM_EXIT_ACKNOWLEDGE_INTERRUPT | \
104 VM_EXIT_SAVE_PAT | \
105 VM_EXIT_LOAD_PAT)
106
107#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS
108
109#define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER | VM_ENTRY_LOAD_PAT)
110
111#define VM_ENTRY_CTLS_ZERO_SETTING \
112 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \
113 VM_ENTRY_INTO_SMM | \
114 VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
115
116#define HANDLED 1
117#define UNHANDLED 0
118
119static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
120static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
121
122SYSCTL_DECL(_hw_vmm);
123SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
124
125int vmxon_enabled[MAXCPU];
126static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
127
128static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
129static uint32_t exit_ctls, entry_ctls;
130
131static uint64_t cr0_ones_mask, cr0_zeros_mask;
132SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
133 &cr0_ones_mask, 0, NULL);
134SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
135 &cr0_zeros_mask, 0, NULL);
136
137static uint64_t cr4_ones_mask, cr4_zeros_mask;
138SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
139 &cr4_ones_mask, 0, NULL);
140SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
141 &cr4_zeros_mask, 0, NULL);
142
143static int vmx_initialized;
144SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
145 &vmx_initialized, 0, "Intel VMX initialized");
146
147/*
148 * Optional capabilities
149 */
150static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
151
152static int cap_halt_exit;
153SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
154 "HLT triggers a VM-exit");
155
156static int cap_pause_exit;
157SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
158 0, "PAUSE triggers a VM-exit");
159
160static int cap_unrestricted_guest;
161SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
162 &cap_unrestricted_guest, 0, "Unrestricted guests");
163
164static int cap_monitor_trap;
165SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
166 &cap_monitor_trap, 0, "Monitor trap flag");
167
168static int cap_invpcid;
169SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
170 0, "Guests are allowed to use INVPCID");
171
172static int virtual_interrupt_delivery;
173SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
174 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
175
176static int posted_interrupts;
177SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
178 &posted_interrupts, 0, "APICv posted interrupt support");
179
180static int pirvec;
181SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
182 &pirvec, 0, "APICv posted interrupt vector");
183
184static struct unrhdr *vpid_unr;
185static u_int vpid_alloc_failed;
186SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
187 &vpid_alloc_failed, 0, NULL);
188
189/*
190 * Use the last page below 4GB as the APIC access address. This address is
191 * occupied by the boot firmware so it is guaranteed that it will not conflict
192 * with a page in system memory.
193 */
194#define APIC_ACCESS_ADDRESS 0xFFFFF000
195
196static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
197static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
198static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
199static void vmx_inject_pir(struct vlapic *vlapic);
200
201#ifdef KTR
202static const char *
203exit_reason_to_str(int reason)
204{
205 static char reasonbuf[32];
206
207 switch (reason) {
208 case EXIT_REASON_EXCEPTION:
209 return "exception";
210 case EXIT_REASON_EXT_INTR:
211 return "extint";
212 case EXIT_REASON_TRIPLE_FAULT:
213 return "triplefault";
214 case EXIT_REASON_INIT:
215 return "init";
216 case EXIT_REASON_SIPI:
217 return "sipi";
218 case EXIT_REASON_IO_SMI:
219 return "iosmi";
220 case EXIT_REASON_SMI:
221 return "smi";
222 case EXIT_REASON_INTR_WINDOW:
223 return "intrwindow";
224 case EXIT_REASON_NMI_WINDOW:
225 return "nmiwindow";
226 case EXIT_REASON_TASK_SWITCH:
227 return "taskswitch";
228 case EXIT_REASON_CPUID:
229 return "cpuid";
230 case EXIT_REASON_GETSEC:
231 return "getsec";
232 case EXIT_REASON_HLT:
233 return "hlt";
234 case EXIT_REASON_INVD:
235 return "invd";
236 case EXIT_REASON_INVLPG:
237 return "invlpg";
238 case EXIT_REASON_RDPMC:
239 return "rdpmc";
240 case EXIT_REASON_RDTSC:
241 return "rdtsc";
242 case EXIT_REASON_RSM:
243 return "rsm";
244 case EXIT_REASON_VMCALL:
245 return "vmcall";
246 case EXIT_REASON_VMCLEAR:
247 return "vmclear";
248 case EXIT_REASON_VMLAUNCH:
249 return "vmlaunch";
250 case EXIT_REASON_VMPTRLD:
251 return "vmptrld";
252 case EXIT_REASON_VMPTRST:
253 return "vmptrst";
254 case EXIT_REASON_VMREAD:
255 return "vmread";
256 case EXIT_REASON_VMRESUME:
257 return "vmresume";
258 case EXIT_REASON_VMWRITE:
259 return "vmwrite";
260 case EXIT_REASON_VMXOFF:
261 return "vmxoff";
262 case EXIT_REASON_VMXON:
263 return "vmxon";
264 case EXIT_REASON_CR_ACCESS:
265 return "craccess";
266 case EXIT_REASON_DR_ACCESS:
267 return "draccess";
268 case EXIT_REASON_INOUT:
269 return "inout";
270 case EXIT_REASON_RDMSR:
271 return "rdmsr";
272 case EXIT_REASON_WRMSR:
273 return "wrmsr";
274 case EXIT_REASON_INVAL_VMCS:
275 return "invalvmcs";
276 case EXIT_REASON_INVAL_MSR:
277 return "invalmsr";
278 case EXIT_REASON_MWAIT:
279 return "mwait";
280 case EXIT_REASON_MTF:
281 return "mtf";
282 case EXIT_REASON_MONITOR:
283 return "monitor";
284 case EXIT_REASON_PAUSE:
285 return "pause";
286 case EXIT_REASON_MCE:
287 return "mce";
286 case EXIT_REASON_MCE_DURING_ENTRY:
287 return "mce-during-entry";
288 case EXIT_REASON_TPR:
289 return "tpr";
290 case EXIT_REASON_APIC_ACCESS:
291 return "apic-access";
292 case EXIT_REASON_GDTR_IDTR:
293 return "gdtridtr";
294 case EXIT_REASON_LDTR_TR:
295 return "ldtrtr";
296 case EXIT_REASON_EPT_FAULT:
297 return "eptfault";
298 case EXIT_REASON_EPT_MISCONFIG:
299 return "eptmisconfig";
300 case EXIT_REASON_INVEPT:
301 return "invept";
302 case EXIT_REASON_RDTSCP:
303 return "rdtscp";
304 case EXIT_REASON_VMX_PREEMPT:
305 return "vmxpreempt";
306 case EXIT_REASON_INVVPID:
307 return "invvpid";
308 case EXIT_REASON_WBINVD:
309 return "wbinvd";
310 case EXIT_REASON_XSETBV:
311 return "xsetbv";
312 case EXIT_REASON_APIC_WRITE:
313 return "apic-write";
314 default:
315 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
316 return (reasonbuf);
317 }
318}
319#endif /* KTR */
320
321static int
322vmx_allow_x2apic_msrs(struct vmx *vmx)
323{
324 int i, error;
325
326 error = 0;
327
328 /*
329 * Allow readonly access to the following x2APIC MSRs from the guest.
330 */
331 error += guest_msr_ro(vmx, MSR_APIC_ID);
332 error += guest_msr_ro(vmx, MSR_APIC_VERSION);
333 error += guest_msr_ro(vmx, MSR_APIC_LDR);
334 error += guest_msr_ro(vmx, MSR_APIC_SVR);
335
336 for (i = 0; i < 8; i++)
337 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
338
339 for (i = 0; i < 8; i++)
340 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
341
342 for (i = 0; i < 8; i++)
343 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
344
345 error += guest_msr_ro(vmx, MSR_APIC_ESR);
346 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
347 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
348 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
349 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
350 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
351 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
352 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
353 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
354 error += guest_msr_ro(vmx, MSR_APIC_ICR);
355
356 /*
357 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
358 *
359 * These registers get special treatment described in the section
360 * "Virtualizing MSR-Based APIC Accesses".
361 */
362 error += guest_msr_rw(vmx, MSR_APIC_TPR);
363 error += guest_msr_rw(vmx, MSR_APIC_EOI);
364 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
365
366 return (error);
367}
368
369u_long
370vmx_fix_cr0(u_long cr0)
371{
372
373 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
374}
375
376u_long
377vmx_fix_cr4(u_long cr4)
378{
379
380 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
381}
382
383static void
384vpid_free(int vpid)
385{
386 if (vpid < 0 || vpid > 0xffff)
387 panic("vpid_free: invalid vpid %d", vpid);
388
389 /*
390 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
391 * the unit number allocator.
392 */
393
394 if (vpid > VM_MAXCPU)
395 free_unr(vpid_unr, vpid);
396}
397
398static void
399vpid_alloc(uint16_t *vpid, int num)
400{
401 int i, x;
402
403 if (num <= 0 || num > VM_MAXCPU)
404 panic("invalid number of vpids requested: %d", num);
405
406 /*
407 * If the "enable vpid" execution control is not enabled then the
408 * VPID is required to be 0 for all vcpus.
409 */
410 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
411 for (i = 0; i < num; i++)
412 vpid[i] = 0;
413 return;
414 }
415
416 /*
417 * Allocate a unique VPID for each vcpu from the unit number allocator.
418 */
419 for (i = 0; i < num; i++) {
420 x = alloc_unr(vpid_unr);
421 if (x == -1)
422 break;
423 else
424 vpid[i] = x;
425 }
426
427 if (i < num) {
428 atomic_add_int(&vpid_alloc_failed, 1);
429
430 /*
431 * If the unit number allocator does not have enough unique
432 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
433 *
434 * These VPIDs are not be unique across VMs but this does not
435 * affect correctness because the combined mappings are also
436 * tagged with the EP4TA which is unique for each VM.
437 *
438 * It is still sub-optimal because the invvpid will invalidate
439 * combined mappings for a particular VPID across all EP4TAs.
440 */
441 while (i-- > 0)
442 vpid_free(vpid[i]);
443
444 for (i = 0; i < num; i++)
445 vpid[i] = i + 1;
446 }
447}
448
449static void
450vpid_init(void)
451{
452 /*
453 * VPID 0 is required when the "enable VPID" execution control is
454 * disabled.
455 *
456 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
457 * unit number allocator does not have sufficient unique VPIDs to
458 * satisfy the allocation.
459 *
460 * The remaining VPIDs are managed by the unit number allocator.
461 */
462 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
463}
464
465static void
466vmx_disable(void *arg __unused)
467{
468 struct invvpid_desc invvpid_desc = { 0 };
469 struct invept_desc invept_desc = { 0 };
470
471 if (vmxon_enabled[curcpu]) {
472 /*
473 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
474 *
475 * VMXON or VMXOFF are not required to invalidate any TLB
476 * caching structures. This prevents potential retention of
477 * cached information in the TLB between distinct VMX episodes.
478 */
479 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
480 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
481 vmxoff();
482 }
483 load_cr4(rcr4() & ~CR4_VMXE);
484}
485
486static int
487vmx_cleanup(void)
488{
489
490 if (pirvec != 0)
491 vmm_ipi_free(pirvec);
492
493 if (vpid_unr != NULL) {
494 delete_unrhdr(vpid_unr);
495 vpid_unr = NULL;
496 }
497
498 smp_rendezvous(NULL, vmx_disable, NULL, NULL);
499
500 return (0);
501}
502
503static void
504vmx_enable(void *arg __unused)
505{
506 int error;
507 uint64_t feature_control;
508
509 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
510 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
511 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
512 wrmsr(MSR_IA32_FEATURE_CONTROL,
513 feature_control | IA32_FEATURE_CONTROL_VMX_EN |
514 IA32_FEATURE_CONTROL_LOCK);
515 }
516
517 load_cr4(rcr4() | CR4_VMXE);
518
519 *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
520 error = vmxon(vmxon_region[curcpu]);
521 if (error == 0)
522 vmxon_enabled[curcpu] = 1;
523}
524
525static void
526vmx_restore(void)
527{
528
529 if (vmxon_enabled[curcpu])
530 vmxon(vmxon_region[curcpu]);
531}
532
533static int
534vmx_init(int ipinum)
535{
536 int error, use_tpr_shadow;
537 uint64_t basic, fixed0, fixed1, feature_control;
538 uint32_t tmp, procbased2_vid_bits;
539
540 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
541 if (!(cpu_feature2 & CPUID2_VMX)) {
542 printf("vmx_init: processor does not support VMX operation\n");
543 return (ENXIO);
544 }
545
546 /*
547 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
548 * are set (bits 0 and 2 respectively).
549 */
550 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
551 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
552 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
553 printf("vmx_init: VMX operation disabled by BIOS\n");
554 return (ENXIO);
555 }
556
557 /*
558 * Verify capabilities MSR_VMX_BASIC:
559 * - bit 54 indicates support for INS/OUTS decoding
560 */
561 basic = rdmsr(MSR_VMX_BASIC);
562 if ((basic & (1UL << 54)) == 0) {
563 printf("vmx_init: processor does not support desired basic "
564 "capabilities\n");
565 return (EINVAL);
566 }
567
568 /* Check support for primary processor-based VM-execution controls */
569 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
570 MSR_VMX_TRUE_PROCBASED_CTLS,
571 PROCBASED_CTLS_ONE_SETTING,
572 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
573 if (error) {
574 printf("vmx_init: processor does not support desired primary "
575 "processor-based controls\n");
576 return (error);
577 }
578
579 /* Clear the processor-based ctl bits that are set on demand */
580 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
581
582 /* Check support for secondary processor-based VM-execution controls */
583 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
584 MSR_VMX_PROCBASED_CTLS2,
585 PROCBASED_CTLS2_ONE_SETTING,
586 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
587 if (error) {
588 printf("vmx_init: processor does not support desired secondary "
589 "processor-based controls\n");
590 return (error);
591 }
592
593 /* Check support for VPID */
594 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
595 PROCBASED2_ENABLE_VPID, 0, &tmp);
596 if (error == 0)
597 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
598
599 /* Check support for pin-based VM-execution controls */
600 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
601 MSR_VMX_TRUE_PINBASED_CTLS,
602 PINBASED_CTLS_ONE_SETTING,
603 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
604 if (error) {
605 printf("vmx_init: processor does not support desired "
606 "pin-based controls\n");
607 return (error);
608 }
609
610 /* Check support for VM-exit controls */
611 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
612 VM_EXIT_CTLS_ONE_SETTING,
613 VM_EXIT_CTLS_ZERO_SETTING,
614 &exit_ctls);
615 if (error) {
616 printf("vmx_init: processor does not support desired "
617 "exit controls\n");
618 return (error);
619 }
620
621 /* Check support for VM-entry controls */
622 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
623 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
624 &entry_ctls);
625 if (error) {
626 printf("vmx_init: processor does not support desired "
627 "entry controls\n");
628 return (error);
629 }
630
631 /*
632 * Check support for optional features by testing them
633 * as individual bits
634 */
635 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
636 MSR_VMX_TRUE_PROCBASED_CTLS,
637 PROCBASED_HLT_EXITING, 0,
638 &tmp) == 0);
639
640 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
641 MSR_VMX_PROCBASED_CTLS,
642 PROCBASED_MTF, 0,
643 &tmp) == 0);
644
645 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
646 MSR_VMX_TRUE_PROCBASED_CTLS,
647 PROCBASED_PAUSE_EXITING, 0,
648 &tmp) == 0);
649
650 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
651 MSR_VMX_PROCBASED_CTLS2,
652 PROCBASED2_UNRESTRICTED_GUEST, 0,
653 &tmp) == 0);
654
655 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
656 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
657 &tmp) == 0);
658
659 /*
660 * Check support for virtual interrupt delivery.
661 */
662 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
663 PROCBASED2_VIRTUALIZE_X2APIC_MODE |
664 PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
665 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
666
667 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
668 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
669 &tmp) == 0);
670
671 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
672 procbased2_vid_bits, 0, &tmp);
673 if (error == 0 && use_tpr_shadow) {
674 virtual_interrupt_delivery = 1;
675 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
676 &virtual_interrupt_delivery);
677 }
678
679 if (virtual_interrupt_delivery) {
680 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
681 procbased_ctls2 |= procbased2_vid_bits;
682 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
683
684 /*
685 * No need to emulate accesses to %CR8 if virtual
686 * interrupt delivery is enabled.
687 */
688 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
689 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
690
691 /*
692 * Check for Posted Interrupts only if Virtual Interrupt
693 * Delivery is enabled.
694 */
695 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
696 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
697 &tmp);
698 if (error == 0) {
699 pirvec = vmm_ipi_alloc();
700 if (pirvec == 0) {
701 if (bootverbose) {
702 printf("vmx_init: unable to allocate "
703 "posted interrupt vector\n");
704 }
705 } else {
706 posted_interrupts = 1;
707 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
708 &posted_interrupts);
709 }
710 }
711 }
712
713 if (posted_interrupts)
714 pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
715
716 /* Initialize EPT */
717 error = ept_init(ipinum);
718 if (error) {
719 printf("vmx_init: ept initialization failed (%d)\n", error);
720 return (error);
721 }
722
723 /*
724 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
725 */
726 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
727 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
728 cr0_ones_mask = fixed0 & fixed1;
729 cr0_zeros_mask = ~fixed0 & ~fixed1;
730
731 /*
732 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
733 * if unrestricted guest execution is allowed.
734 */
735 if (cap_unrestricted_guest)
736 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
737
738 /*
739 * Do not allow the guest to set CR0_NW or CR0_CD.
740 */
741 cr0_zeros_mask |= (CR0_NW | CR0_CD);
742
743 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
744 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
745 cr4_ones_mask = fixed0 & fixed1;
746 cr4_zeros_mask = ~fixed0 & ~fixed1;
747
748 vpid_init();
749
750 vmx_msr_init();
751
752 /* enable VMX operation */
753 smp_rendezvous(NULL, vmx_enable, NULL, NULL);
754
755 vmx_initialized = 1;
756
757 return (0);
758}
759
760static void
761vmx_trigger_hostintr(int vector)
762{
763 uintptr_t func;
764 struct gate_descriptor *gd;
765
766 gd = &idt[vector];
767
768 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
769 "invalid vector %d", vector));
770 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
771 vector));
772 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
773 "has invalid type %d", vector, gd->gd_type));
774 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
775 "has invalid dpl %d", vector, gd->gd_dpl));
776 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
777 "for vector %d has invalid selector %d", vector, gd->gd_selector));
778 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
779 "IST %d", vector, gd->gd_ist));
780
781 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
782 vmx_call_isr(func);
783}
784
785static int
786vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
787{
788 int error, mask_ident, shadow_ident;
789 uint64_t mask_value;
790
791 if (which != 0 && which != 4)
792 panic("vmx_setup_cr_shadow: unknown cr%d", which);
793
794 if (which == 0) {
795 mask_ident = VMCS_CR0_MASK;
796 mask_value = cr0_ones_mask | cr0_zeros_mask;
797 shadow_ident = VMCS_CR0_SHADOW;
798 } else {
799 mask_ident = VMCS_CR4_MASK;
800 mask_value = cr4_ones_mask | cr4_zeros_mask;
801 shadow_ident = VMCS_CR4_SHADOW;
802 }
803
804 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
805 if (error)
806 return (error);
807
808 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
809 if (error)
810 return (error);
811
812 return (0);
813}
814#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
815#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
816
817static void *
818vmx_vminit(struct vm *vm, pmap_t pmap)
819{
820 uint16_t vpid[VM_MAXCPU];
821 int i, error;
822 struct vmx *vmx;
823 struct vmcs *vmcs;
288 case EXIT_REASON_TPR:
289 return "tpr";
290 case EXIT_REASON_APIC_ACCESS:
291 return "apic-access";
292 case EXIT_REASON_GDTR_IDTR:
293 return "gdtridtr";
294 case EXIT_REASON_LDTR_TR:
295 return "ldtrtr";
296 case EXIT_REASON_EPT_FAULT:
297 return "eptfault";
298 case EXIT_REASON_EPT_MISCONFIG:
299 return "eptmisconfig";
300 case EXIT_REASON_INVEPT:
301 return "invept";
302 case EXIT_REASON_RDTSCP:
303 return "rdtscp";
304 case EXIT_REASON_VMX_PREEMPT:
305 return "vmxpreempt";
306 case EXIT_REASON_INVVPID:
307 return "invvpid";
308 case EXIT_REASON_WBINVD:
309 return "wbinvd";
310 case EXIT_REASON_XSETBV:
311 return "xsetbv";
312 case EXIT_REASON_APIC_WRITE:
313 return "apic-write";
314 default:
315 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
316 return (reasonbuf);
317 }
318}
319#endif /* KTR */
320
321static int
322vmx_allow_x2apic_msrs(struct vmx *vmx)
323{
324 int i, error;
325
326 error = 0;
327
328 /*
329 * Allow readonly access to the following x2APIC MSRs from the guest.
330 */
331 error += guest_msr_ro(vmx, MSR_APIC_ID);
332 error += guest_msr_ro(vmx, MSR_APIC_VERSION);
333 error += guest_msr_ro(vmx, MSR_APIC_LDR);
334 error += guest_msr_ro(vmx, MSR_APIC_SVR);
335
336 for (i = 0; i < 8; i++)
337 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
338
339 for (i = 0; i < 8; i++)
340 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
341
342 for (i = 0; i < 8; i++)
343 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
344
345 error += guest_msr_ro(vmx, MSR_APIC_ESR);
346 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
347 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
348 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
349 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
350 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
351 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
352 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
353 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
354 error += guest_msr_ro(vmx, MSR_APIC_ICR);
355
356 /*
357 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
358 *
359 * These registers get special treatment described in the section
360 * "Virtualizing MSR-Based APIC Accesses".
361 */
362 error += guest_msr_rw(vmx, MSR_APIC_TPR);
363 error += guest_msr_rw(vmx, MSR_APIC_EOI);
364 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
365
366 return (error);
367}
368
369u_long
370vmx_fix_cr0(u_long cr0)
371{
372
373 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
374}
375
376u_long
377vmx_fix_cr4(u_long cr4)
378{
379
380 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
381}
382
383static void
384vpid_free(int vpid)
385{
386 if (vpid < 0 || vpid > 0xffff)
387 panic("vpid_free: invalid vpid %d", vpid);
388
389 /*
390 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
391 * the unit number allocator.
392 */
393
394 if (vpid > VM_MAXCPU)
395 free_unr(vpid_unr, vpid);
396}
397
398static void
399vpid_alloc(uint16_t *vpid, int num)
400{
401 int i, x;
402
403 if (num <= 0 || num > VM_MAXCPU)
404 panic("invalid number of vpids requested: %d", num);
405
406 /*
407 * If the "enable vpid" execution control is not enabled then the
408 * VPID is required to be 0 for all vcpus.
409 */
410 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
411 for (i = 0; i < num; i++)
412 vpid[i] = 0;
413 return;
414 }
415
416 /*
417 * Allocate a unique VPID for each vcpu from the unit number allocator.
418 */
419 for (i = 0; i < num; i++) {
420 x = alloc_unr(vpid_unr);
421 if (x == -1)
422 break;
423 else
424 vpid[i] = x;
425 }
426
427 if (i < num) {
428 atomic_add_int(&vpid_alloc_failed, 1);
429
430 /*
431 * If the unit number allocator does not have enough unique
432 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
433 *
434 * These VPIDs are not be unique across VMs but this does not
435 * affect correctness because the combined mappings are also
436 * tagged with the EP4TA which is unique for each VM.
437 *
438 * It is still sub-optimal because the invvpid will invalidate
439 * combined mappings for a particular VPID across all EP4TAs.
440 */
441 while (i-- > 0)
442 vpid_free(vpid[i]);
443
444 for (i = 0; i < num; i++)
445 vpid[i] = i + 1;
446 }
447}
448
449static void
450vpid_init(void)
451{
452 /*
453 * VPID 0 is required when the "enable VPID" execution control is
454 * disabled.
455 *
456 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
457 * unit number allocator does not have sufficient unique VPIDs to
458 * satisfy the allocation.
459 *
460 * The remaining VPIDs are managed by the unit number allocator.
461 */
462 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
463}
464
465static void
466vmx_disable(void *arg __unused)
467{
468 struct invvpid_desc invvpid_desc = { 0 };
469 struct invept_desc invept_desc = { 0 };
470
471 if (vmxon_enabled[curcpu]) {
472 /*
473 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
474 *
475 * VMXON or VMXOFF are not required to invalidate any TLB
476 * caching structures. This prevents potential retention of
477 * cached information in the TLB between distinct VMX episodes.
478 */
479 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
480 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
481 vmxoff();
482 }
483 load_cr4(rcr4() & ~CR4_VMXE);
484}
485
486static int
487vmx_cleanup(void)
488{
489
490 if (pirvec != 0)
491 vmm_ipi_free(pirvec);
492
493 if (vpid_unr != NULL) {
494 delete_unrhdr(vpid_unr);
495 vpid_unr = NULL;
496 }
497
498 smp_rendezvous(NULL, vmx_disable, NULL, NULL);
499
500 return (0);
501}
502
503static void
504vmx_enable(void *arg __unused)
505{
506 int error;
507 uint64_t feature_control;
508
509 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
510 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
511 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
512 wrmsr(MSR_IA32_FEATURE_CONTROL,
513 feature_control | IA32_FEATURE_CONTROL_VMX_EN |
514 IA32_FEATURE_CONTROL_LOCK);
515 }
516
517 load_cr4(rcr4() | CR4_VMXE);
518
519 *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
520 error = vmxon(vmxon_region[curcpu]);
521 if (error == 0)
522 vmxon_enabled[curcpu] = 1;
523}
524
525static void
526vmx_restore(void)
527{
528
529 if (vmxon_enabled[curcpu])
530 vmxon(vmxon_region[curcpu]);
531}
532
533static int
534vmx_init(int ipinum)
535{
536 int error, use_tpr_shadow;
537 uint64_t basic, fixed0, fixed1, feature_control;
538 uint32_t tmp, procbased2_vid_bits;
539
540 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
541 if (!(cpu_feature2 & CPUID2_VMX)) {
542 printf("vmx_init: processor does not support VMX operation\n");
543 return (ENXIO);
544 }
545
546 /*
547 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
548 * are set (bits 0 and 2 respectively).
549 */
550 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
551 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
552 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
553 printf("vmx_init: VMX operation disabled by BIOS\n");
554 return (ENXIO);
555 }
556
557 /*
558 * Verify capabilities MSR_VMX_BASIC:
559 * - bit 54 indicates support for INS/OUTS decoding
560 */
561 basic = rdmsr(MSR_VMX_BASIC);
562 if ((basic & (1UL << 54)) == 0) {
563 printf("vmx_init: processor does not support desired basic "
564 "capabilities\n");
565 return (EINVAL);
566 }
567
568 /* Check support for primary processor-based VM-execution controls */
569 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
570 MSR_VMX_TRUE_PROCBASED_CTLS,
571 PROCBASED_CTLS_ONE_SETTING,
572 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
573 if (error) {
574 printf("vmx_init: processor does not support desired primary "
575 "processor-based controls\n");
576 return (error);
577 }
578
579 /* Clear the processor-based ctl bits that are set on demand */
580 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
581
582 /* Check support for secondary processor-based VM-execution controls */
583 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
584 MSR_VMX_PROCBASED_CTLS2,
585 PROCBASED_CTLS2_ONE_SETTING,
586 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
587 if (error) {
588 printf("vmx_init: processor does not support desired secondary "
589 "processor-based controls\n");
590 return (error);
591 }
592
593 /* Check support for VPID */
594 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
595 PROCBASED2_ENABLE_VPID, 0, &tmp);
596 if (error == 0)
597 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
598
599 /* Check support for pin-based VM-execution controls */
600 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
601 MSR_VMX_TRUE_PINBASED_CTLS,
602 PINBASED_CTLS_ONE_SETTING,
603 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
604 if (error) {
605 printf("vmx_init: processor does not support desired "
606 "pin-based controls\n");
607 return (error);
608 }
609
610 /* Check support for VM-exit controls */
611 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
612 VM_EXIT_CTLS_ONE_SETTING,
613 VM_EXIT_CTLS_ZERO_SETTING,
614 &exit_ctls);
615 if (error) {
616 printf("vmx_init: processor does not support desired "
617 "exit controls\n");
618 return (error);
619 }
620
621 /* Check support for VM-entry controls */
622 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
623 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
624 &entry_ctls);
625 if (error) {
626 printf("vmx_init: processor does not support desired "
627 "entry controls\n");
628 return (error);
629 }
630
631 /*
632 * Check support for optional features by testing them
633 * as individual bits
634 */
635 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
636 MSR_VMX_TRUE_PROCBASED_CTLS,
637 PROCBASED_HLT_EXITING, 0,
638 &tmp) == 0);
639
640 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
641 MSR_VMX_PROCBASED_CTLS,
642 PROCBASED_MTF, 0,
643 &tmp) == 0);
644
645 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
646 MSR_VMX_TRUE_PROCBASED_CTLS,
647 PROCBASED_PAUSE_EXITING, 0,
648 &tmp) == 0);
649
650 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
651 MSR_VMX_PROCBASED_CTLS2,
652 PROCBASED2_UNRESTRICTED_GUEST, 0,
653 &tmp) == 0);
654
655 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
656 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
657 &tmp) == 0);
658
659 /*
660 * Check support for virtual interrupt delivery.
661 */
662 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
663 PROCBASED2_VIRTUALIZE_X2APIC_MODE |
664 PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
665 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
666
667 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
668 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
669 &tmp) == 0);
670
671 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
672 procbased2_vid_bits, 0, &tmp);
673 if (error == 0 && use_tpr_shadow) {
674 virtual_interrupt_delivery = 1;
675 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
676 &virtual_interrupt_delivery);
677 }
678
679 if (virtual_interrupt_delivery) {
680 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
681 procbased_ctls2 |= procbased2_vid_bits;
682 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
683
684 /*
685 * No need to emulate accesses to %CR8 if virtual
686 * interrupt delivery is enabled.
687 */
688 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
689 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
690
691 /*
692 * Check for Posted Interrupts only if Virtual Interrupt
693 * Delivery is enabled.
694 */
695 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
696 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
697 &tmp);
698 if (error == 0) {
699 pirvec = vmm_ipi_alloc();
700 if (pirvec == 0) {
701 if (bootverbose) {
702 printf("vmx_init: unable to allocate "
703 "posted interrupt vector\n");
704 }
705 } else {
706 posted_interrupts = 1;
707 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
708 &posted_interrupts);
709 }
710 }
711 }
712
713 if (posted_interrupts)
714 pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
715
716 /* Initialize EPT */
717 error = ept_init(ipinum);
718 if (error) {
719 printf("vmx_init: ept initialization failed (%d)\n", error);
720 return (error);
721 }
722
723 /*
724 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
725 */
726 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
727 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
728 cr0_ones_mask = fixed0 & fixed1;
729 cr0_zeros_mask = ~fixed0 & ~fixed1;
730
731 /*
732 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
733 * if unrestricted guest execution is allowed.
734 */
735 if (cap_unrestricted_guest)
736 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
737
738 /*
739 * Do not allow the guest to set CR0_NW or CR0_CD.
740 */
741 cr0_zeros_mask |= (CR0_NW | CR0_CD);
742
743 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
744 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
745 cr4_ones_mask = fixed0 & fixed1;
746 cr4_zeros_mask = ~fixed0 & ~fixed1;
747
748 vpid_init();
749
750 vmx_msr_init();
751
752 /* enable VMX operation */
753 smp_rendezvous(NULL, vmx_enable, NULL, NULL);
754
755 vmx_initialized = 1;
756
757 return (0);
758}
759
760static void
761vmx_trigger_hostintr(int vector)
762{
763 uintptr_t func;
764 struct gate_descriptor *gd;
765
766 gd = &idt[vector];
767
768 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
769 "invalid vector %d", vector));
770 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
771 vector));
772 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
773 "has invalid type %d", vector, gd->gd_type));
774 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
775 "has invalid dpl %d", vector, gd->gd_dpl));
776 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
777 "for vector %d has invalid selector %d", vector, gd->gd_selector));
778 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
779 "IST %d", vector, gd->gd_ist));
780
781 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
782 vmx_call_isr(func);
783}
784
785static int
786vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
787{
788 int error, mask_ident, shadow_ident;
789 uint64_t mask_value;
790
791 if (which != 0 && which != 4)
792 panic("vmx_setup_cr_shadow: unknown cr%d", which);
793
794 if (which == 0) {
795 mask_ident = VMCS_CR0_MASK;
796 mask_value = cr0_ones_mask | cr0_zeros_mask;
797 shadow_ident = VMCS_CR0_SHADOW;
798 } else {
799 mask_ident = VMCS_CR4_MASK;
800 mask_value = cr4_ones_mask | cr4_zeros_mask;
801 shadow_ident = VMCS_CR4_SHADOW;
802 }
803
804 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
805 if (error)
806 return (error);
807
808 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
809 if (error)
810 return (error);
811
812 return (0);
813}
814#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
815#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
816
817static void *
818vmx_vminit(struct vm *vm, pmap_t pmap)
819{
820 uint16_t vpid[VM_MAXCPU];
821 int i, error;
822 struct vmx *vmx;
823 struct vmcs *vmcs;
824 uint32_t exc_bitmap;
824
825 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
826 if ((uintptr_t)vmx & PAGE_MASK) {
827 panic("malloc of struct vmx not aligned on %d byte boundary",
828 PAGE_SIZE);
829 }
830 vmx->vm = vm;
831
832 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
833
834 /*
835 * Clean up EPTP-tagged guest physical and combined mappings
836 *
837 * VMX transitions are not required to invalidate any guest physical
838 * mappings. So, it may be possible for stale guest physical mappings
839 * to be present in the processor TLBs.
840 *
841 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
842 */
843 ept_invalidate_mappings(vmx->eptp);
844
845 msr_bitmap_initialize(vmx->msr_bitmap);
846
847 /*
848 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
849 * The guest FSBASE and GSBASE are saved and restored during
850 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
851 * always restored from the vmcs host state area on vm-exit.
852 *
853 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
854 * how they are saved/restored so can be directly accessed by the
855 * guest.
856 *
857 * MSR_EFER is saved and restored in the guest VMCS area on a
858 * VM exit and entry respectively. It is also restored from the
859 * host VMCS area on a VM exit.
860 *
861 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
862 * and entry respectively. It is also restored from the host VMCS
863 * area on a VM exit.
864 *
865 * The TSC MSR is exposed read-only. Writes are disallowed as that
866 * will impact the host TSC.
867 * XXX Writes would be implemented with a wrmsr trap, and
868 * then modifying the TSC offset in the VMCS.
869 */
870 if (guest_msr_rw(vmx, MSR_GSBASE) ||
871 guest_msr_rw(vmx, MSR_FSBASE) ||
872 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
873 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
874 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
875 guest_msr_rw(vmx, MSR_EFER) ||
876 guest_msr_rw(vmx, MSR_PAT) ||
877 guest_msr_ro(vmx, MSR_TSC))
878 panic("vmx_vminit: error setting guest msr access");
879
880 vpid_alloc(vpid, VM_MAXCPU);
881
882 if (virtual_interrupt_delivery) {
883 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
884 APIC_ACCESS_ADDRESS);
885 /* XXX this should really return an error to the caller */
886 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
887 }
888
889 for (i = 0; i < VM_MAXCPU; i++) {
890 vmcs = &vmx->vmcs[i];
891 vmcs->identifier = vmx_revision();
892 error = vmclear(vmcs);
893 if (error != 0) {
894 panic("vmx_vminit: vmclear error %d on vcpu %d\n",
895 error, i);
896 }
897
898 vmx_msr_guest_init(vmx, i);
899
900 error = vmcs_init(vmcs);
901 KASSERT(error == 0, ("vmcs_init error %d", error));
902
903 VMPTRLD(vmcs);
904 error = 0;
905 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
906 error += vmwrite(VMCS_EPTP, vmx->eptp);
907 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
908 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
909 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
910 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
911 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
912 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
913 error += vmwrite(VMCS_VPID, vpid[i]);
825
826 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
827 if ((uintptr_t)vmx & PAGE_MASK) {
828 panic("malloc of struct vmx not aligned on %d byte boundary",
829 PAGE_SIZE);
830 }
831 vmx->vm = vm;
832
833 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
834
835 /*
836 * Clean up EPTP-tagged guest physical and combined mappings
837 *
838 * VMX transitions are not required to invalidate any guest physical
839 * mappings. So, it may be possible for stale guest physical mappings
840 * to be present in the processor TLBs.
841 *
842 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
843 */
844 ept_invalidate_mappings(vmx->eptp);
845
846 msr_bitmap_initialize(vmx->msr_bitmap);
847
848 /*
849 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
850 * The guest FSBASE and GSBASE are saved and restored during
851 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
852 * always restored from the vmcs host state area on vm-exit.
853 *
854 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
855 * how they are saved/restored so can be directly accessed by the
856 * guest.
857 *
858 * MSR_EFER is saved and restored in the guest VMCS area on a
859 * VM exit and entry respectively. It is also restored from the
860 * host VMCS area on a VM exit.
861 *
862 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
863 * and entry respectively. It is also restored from the host VMCS
864 * area on a VM exit.
865 *
866 * The TSC MSR is exposed read-only. Writes are disallowed as that
867 * will impact the host TSC.
868 * XXX Writes would be implemented with a wrmsr trap, and
869 * then modifying the TSC offset in the VMCS.
870 */
871 if (guest_msr_rw(vmx, MSR_GSBASE) ||
872 guest_msr_rw(vmx, MSR_FSBASE) ||
873 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
874 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
875 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
876 guest_msr_rw(vmx, MSR_EFER) ||
877 guest_msr_rw(vmx, MSR_PAT) ||
878 guest_msr_ro(vmx, MSR_TSC))
879 panic("vmx_vminit: error setting guest msr access");
880
881 vpid_alloc(vpid, VM_MAXCPU);
882
883 if (virtual_interrupt_delivery) {
884 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
885 APIC_ACCESS_ADDRESS);
886 /* XXX this should really return an error to the caller */
887 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
888 }
889
890 for (i = 0; i < VM_MAXCPU; i++) {
891 vmcs = &vmx->vmcs[i];
892 vmcs->identifier = vmx_revision();
893 error = vmclear(vmcs);
894 if (error != 0) {
895 panic("vmx_vminit: vmclear error %d on vcpu %d\n",
896 error, i);
897 }
898
899 vmx_msr_guest_init(vmx, i);
900
901 error = vmcs_init(vmcs);
902 KASSERT(error == 0, ("vmcs_init error %d", error));
903
904 VMPTRLD(vmcs);
905 error = 0;
906 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
907 error += vmwrite(VMCS_EPTP, vmx->eptp);
908 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
909 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
910 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
911 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
912 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
913 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
914 error += vmwrite(VMCS_VPID, vpid[i]);
915
916 /* exception bitmap */
917 if (vcpu_trace_exceptions(vm, i))
918 exc_bitmap = 0xffffffff;
919 else
920 exc_bitmap = 1 << IDT_MC;
921 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
922
914 if (virtual_interrupt_delivery) {
915 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
916 error += vmwrite(VMCS_VIRTUAL_APIC,
917 vtophys(&vmx->apic_page[i]));
918 error += vmwrite(VMCS_EOI_EXIT0, 0);
919 error += vmwrite(VMCS_EOI_EXIT1, 0);
920 error += vmwrite(VMCS_EOI_EXIT2, 0);
921 error += vmwrite(VMCS_EOI_EXIT3, 0);
922 }
923 if (posted_interrupts) {
924 error += vmwrite(VMCS_PIR_VECTOR, pirvec);
925 error += vmwrite(VMCS_PIR_DESC,
926 vtophys(&vmx->pir_desc[i]));
927 }
928 VMCLEAR(vmcs);
929 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
930
931 vmx->cap[i].set = 0;
932 vmx->cap[i].proc_ctls = procbased_ctls;
933 vmx->cap[i].proc_ctls2 = procbased_ctls2;
934
935 vmx->state[i].lastcpu = NOCPU;
936 vmx->state[i].vpid = vpid[i];
937
938 /*
939 * Set up the CR0/4 shadows, and init the read shadow
940 * to the power-on register value from the Intel Sys Arch.
941 * CR0 - 0x60000010
942 * CR4 - 0
943 */
944 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
945 if (error != 0)
946 panic("vmx_setup_cr0_shadow %d", error);
947
948 error = vmx_setup_cr4_shadow(vmcs, 0);
949 if (error != 0)
950 panic("vmx_setup_cr4_shadow %d", error);
951
952 vmx->ctx[i].pmap = pmap;
953 }
954
955 return (vmx);
956}
957
958static int
959vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
960{
961 int handled, func;
962
963 func = vmxctx->guest_rax;
964
965 handled = x86_emulate_cpuid(vm, vcpu,
966 (uint32_t*)(&vmxctx->guest_rax),
967 (uint32_t*)(&vmxctx->guest_rbx),
968 (uint32_t*)(&vmxctx->guest_rcx),
969 (uint32_t*)(&vmxctx->guest_rdx));
970 return (handled);
971}
972
973static __inline void
974vmx_run_trace(struct vmx *vmx, int vcpu)
975{
976#ifdef KTR
977 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
978#endif
979}
980
981static __inline void
982vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
983 int handled)
984{
985#ifdef KTR
986 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
987 handled ? "handled" : "unhandled",
988 exit_reason_to_str(exit_reason), rip);
989#endif
990}
991
992static __inline void
993vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
994{
995#ifdef KTR
996 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
997#endif
998}
999
1000static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1001static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1002
1003/*
1004 * Invalidate guest mappings identified by its vpid from the TLB.
1005 */
1006static __inline void
1007vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1008{
1009 struct vmxstate *vmxstate;
1010 struct invvpid_desc invvpid_desc;
1011
1012 vmxstate = &vmx->state[vcpu];
1013 if (vmxstate->vpid == 0)
1014 return;
1015
1016 if (!running) {
1017 /*
1018 * Set the 'lastcpu' to an invalid host cpu.
1019 *
1020 * This will invalidate TLB entries tagged with the vcpu's
1021 * vpid the next time it runs via vmx_set_pcpu_defaults().
1022 */
1023 vmxstate->lastcpu = NOCPU;
1024 return;
1025 }
1026
1027 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1028 "critical section", __func__, vcpu));
1029
1030 /*
1031 * Invalidate all mappings tagged with 'vpid'
1032 *
1033 * We do this because this vcpu was executing on a different host
1034 * cpu when it last ran. We do not track whether it invalidated
1035 * mappings associated with its 'vpid' during that run. So we must
1036 * assume that the mappings associated with 'vpid' on 'curcpu' are
1037 * stale and invalidate them.
1038 *
1039 * Note that we incur this penalty only when the scheduler chooses to
1040 * move the thread associated with this vcpu between host cpus.
1041 *
1042 * Note also that this will invalidate mappings tagged with 'vpid'
1043 * for "all" EP4TAs.
1044 */
1045 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1046 invvpid_desc._res1 = 0;
1047 invvpid_desc._res2 = 0;
1048 invvpid_desc.vpid = vmxstate->vpid;
1049 invvpid_desc.linear_addr = 0;
1050 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1051 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1052 } else {
1053 /*
1054 * The invvpid can be skipped if an invept is going to
1055 * be performed before entering the guest. The invept
1056 * will invalidate combined mappings tagged with
1057 * 'vmx->eptp' for all vpids.
1058 */
1059 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1060 }
1061}
1062
1063static void
1064vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1065{
1066 struct vmxstate *vmxstate;
1067
1068 vmxstate = &vmx->state[vcpu];
1069 if (vmxstate->lastcpu == curcpu)
1070 return;
1071
1072 vmxstate->lastcpu = curcpu;
1073
1074 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1075
1076 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1077 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1078 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1079 vmx_invvpid(vmx, vcpu, pmap, 1);
1080}
1081
1082/*
1083 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1084 */
1085CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1086
1087static void __inline
1088vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1089{
1090
1091 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1092 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1093 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1094 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1095 }
1096}
1097
1098static void __inline
1099vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1100{
1101
1102 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1103 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1104 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1105 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1106 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1107}
1108
1109static void __inline
1110vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1111{
1112
1113 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1114 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1115 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1116 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1117 }
1118}
1119
1120static void __inline
1121vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1122{
1123
1124 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1125 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1126 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1127 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1128 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1129}
1130
1131#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \
1132 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1133#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
1134 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1135
1136static void
1137vmx_inject_nmi(struct vmx *vmx, int vcpu)
1138{
1139 uint32_t gi, info;
1140
1141 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1142 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1143 "interruptibility-state %#x", gi));
1144
1145 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1146 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1147 "VM-entry interruption information %#x", info));
1148
1149 /*
1150 * Inject the virtual NMI. The vector must be the NMI IDT entry
1151 * or the VMCS entry check will fail.
1152 */
1153 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1154 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1155
1156 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1157
1158 /* Clear the request */
1159 vm_nmi_clear(vmx->vm, vcpu);
1160}
1161
1162static void
1163vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1164{
1165 int vector, need_nmi_exiting, extint_pending;
1166 uint64_t rflags, entryinfo;
1167 uint32_t gi, info;
1168
1169 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1170 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1171 "intinfo is not valid: %#lx", __func__, entryinfo));
1172
1173 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1174 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1175 "pending exception: %#lx/%#x", __func__, entryinfo, info));
1176
1177 info = entryinfo;
1178 vector = info & 0xff;
1179 if (vector == IDT_BP || vector == IDT_OF) {
1180 /*
1181 * VT-x requires #BP and #OF to be injected as software
1182 * exceptions.
1183 */
1184 info &= ~VMCS_INTR_T_MASK;
1185 info |= VMCS_INTR_T_SWEXCEPTION;
1186 }
1187
1188 if (info & VMCS_INTR_DEL_ERRCODE)
1189 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1190
1191 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1192 }
1193
1194 if (vm_nmi_pending(vmx->vm, vcpu)) {
1195 /*
1196 * If there are no conditions blocking NMI injection then
1197 * inject it directly here otherwise enable "NMI window
1198 * exiting" to inject it as soon as we can.
1199 *
1200 * We also check for STI_BLOCKING because some implementations
1201 * don't allow NMI injection in this case. If we are running
1202 * on a processor that doesn't have this restriction it will
1203 * immediately exit and the NMI will be injected in the
1204 * "NMI window exiting" handler.
1205 */
1206 need_nmi_exiting = 1;
1207 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1208 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1209 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1210 if ((info & VMCS_INTR_VALID) == 0) {
1211 vmx_inject_nmi(vmx, vcpu);
1212 need_nmi_exiting = 0;
1213 } else {
1214 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1215 "due to VM-entry intr info %#x", info);
1216 }
1217 } else {
1218 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1219 "Guest Interruptibility-state %#x", gi);
1220 }
1221
1222 if (need_nmi_exiting)
1223 vmx_set_nmi_window_exiting(vmx, vcpu);
1224 }
1225
1226 extint_pending = vm_extint_pending(vmx->vm, vcpu);
1227
1228 if (!extint_pending && virtual_interrupt_delivery) {
1229 vmx_inject_pir(vlapic);
1230 return;
1231 }
1232
1233 /*
1234 * If interrupt-window exiting is already in effect then don't bother
1235 * checking for pending interrupts. This is just an optimization and
1236 * not needed for correctness.
1237 */
1238 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1239 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1240 "pending int_window_exiting");
1241 return;
1242 }
1243
1244 if (!extint_pending) {
1245 /* Ask the local apic for a vector to inject */
1246 if (!vlapic_pending_intr(vlapic, &vector))
1247 return;
1248
1249 /*
1250 * From the Intel SDM, Volume 3, Section "Maskable
1251 * Hardware Interrupts":
1252 * - maskable interrupt vectors [16,255] can be delivered
1253 * through the local APIC.
1254 */
1255 KASSERT(vector >= 16 && vector <= 255,
1256 ("invalid vector %d from local APIC", vector));
1257 } else {
1258 /* Ask the legacy pic for a vector to inject */
1259 vatpic_pending_intr(vmx->vm, &vector);
1260
1261 /*
1262 * From the Intel SDM, Volume 3, Section "Maskable
1263 * Hardware Interrupts":
1264 * - maskable interrupt vectors [0,255] can be delivered
1265 * through the INTR pin.
1266 */
1267 KASSERT(vector >= 0 && vector <= 255,
1268 ("invalid vector %d from INTR", vector));
1269 }
1270
1271 /* Check RFLAGS.IF and the interruptibility state of the guest */
1272 rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1273 if ((rflags & PSL_I) == 0) {
1274 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1275 "rflags %#lx", vector, rflags);
1276 goto cantinject;
1277 }
1278
1279 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1280 if (gi & HWINTR_BLOCKING) {
1281 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1282 "Guest Interruptibility-state %#x", vector, gi);
1283 goto cantinject;
1284 }
1285
1286 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1287 if (info & VMCS_INTR_VALID) {
1288 /*
1289 * This is expected and could happen for multiple reasons:
1290 * - A vectoring VM-entry was aborted due to astpending
1291 * - A VM-exit happened during event injection.
1292 * - An exception was injected above.
1293 * - An NMI was injected above or after "NMI window exiting"
1294 */
1295 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1296 "VM-entry intr info %#x", vector, info);
1297 goto cantinject;
1298 }
1299
1300 /* Inject the interrupt */
1301 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1302 info |= vector;
1303 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1304
1305 if (!extint_pending) {
1306 /* Update the Local APIC ISR */
1307 vlapic_intr_accepted(vlapic, vector);
1308 } else {
1309 vm_extint_clear(vmx->vm, vcpu);
1310 vatpic_intr_accepted(vmx->vm, vector);
1311
1312 /*
1313 * After we accepted the current ExtINT the PIC may
1314 * have posted another one. If that is the case, set
1315 * the Interrupt Window Exiting execution control so
1316 * we can inject that one too.
1317 *
1318 * Also, interrupt window exiting allows us to inject any
1319 * pending APIC vector that was preempted by the ExtINT
1320 * as soon as possible. This applies both for the software
1321 * emulated vlapic and the hardware assisted virtual APIC.
1322 */
1323 vmx_set_int_window_exiting(vmx, vcpu);
1324 }
1325
1326 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1327
1328 return;
1329
1330cantinject:
1331 /*
1332 * Set the Interrupt Window Exiting execution control so we can inject
1333 * the interrupt as soon as blocking condition goes away.
1334 */
1335 vmx_set_int_window_exiting(vmx, vcpu);
1336}
1337
1338/*
1339 * If the Virtual NMIs execution control is '1' then the logical processor
1340 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1341 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1342 * virtual-NMI blocking.
1343 *
1344 * This unblocking occurs even if the IRET causes a fault. In this case the
1345 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1346 */
1347static void
1348vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1349{
1350 uint32_t gi;
1351
1352 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1353 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1354 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1355 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1356}
1357
1358static void
1359vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1360{
1361 uint32_t gi;
1362
1363 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1364 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1365 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1366 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1367}
1368
1369static void
1370vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1371{
1372 uint32_t gi;
1373
1374 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1375 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1376 ("NMI blocking is not in effect %#x", gi));
1377}
1378
1379static int
1380vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1381{
1382 struct vmxctx *vmxctx;
1383 uint64_t xcrval;
1384 const struct xsave_limits *limits;
1385
1386 vmxctx = &vmx->ctx[vcpu];
1387 limits = vmm_get_xsave_limits();
1388
1389 /*
1390 * Note that the processor raises a GP# fault on its own if
1391 * xsetbv is executed for CPL != 0, so we do not have to
1392 * emulate that fault here.
1393 */
1394
1395 /* Only xcr0 is supported. */
1396 if (vmxctx->guest_rcx != 0) {
1397 vm_inject_gp(vmx->vm, vcpu);
1398 return (HANDLED);
1399 }
1400
1401 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1402 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1403 vm_inject_ud(vmx->vm, vcpu);
1404 return (HANDLED);
1405 }
1406
1407 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1408 if ((xcrval & ~limits->xcr0_allowed) != 0) {
1409 vm_inject_gp(vmx->vm, vcpu);
1410 return (HANDLED);
1411 }
1412
1413 if (!(xcrval & XFEATURE_ENABLED_X87)) {
1414 vm_inject_gp(vmx->vm, vcpu);
1415 return (HANDLED);
1416 }
1417
1418 /* AVX (YMM_Hi128) requires SSE. */
1419 if (xcrval & XFEATURE_ENABLED_AVX &&
1420 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1421 vm_inject_gp(vmx->vm, vcpu);
1422 return (HANDLED);
1423 }
1424
1425 /*
1426 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1427 * ZMM_Hi256, and Hi16_ZMM.
1428 */
1429 if (xcrval & XFEATURE_AVX512 &&
1430 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1431 (XFEATURE_AVX512 | XFEATURE_AVX)) {
1432 vm_inject_gp(vmx->vm, vcpu);
1433 return (HANDLED);
1434 }
1435
1436 /*
1437 * Intel MPX requires both bound register state flags to be
1438 * set.
1439 */
1440 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1441 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1442 vm_inject_gp(vmx->vm, vcpu);
1443 return (HANDLED);
1444 }
1445
1446 /*
1447 * This runs "inside" vmrun() with the guest's FPU state, so
1448 * modifying xcr0 directly modifies the guest's xcr0, not the
1449 * host's.
1450 */
1451 load_xcr(0, xcrval);
1452 return (HANDLED);
1453}
1454
1455static uint64_t
1456vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1457{
1458 const struct vmxctx *vmxctx;
1459
1460 vmxctx = &vmx->ctx[vcpu];
1461
1462 switch (ident) {
1463 case 0:
1464 return (vmxctx->guest_rax);
1465 case 1:
1466 return (vmxctx->guest_rcx);
1467 case 2:
1468 return (vmxctx->guest_rdx);
1469 case 3:
1470 return (vmxctx->guest_rbx);
1471 case 4:
1472 return (vmcs_read(VMCS_GUEST_RSP));
1473 case 5:
1474 return (vmxctx->guest_rbp);
1475 case 6:
1476 return (vmxctx->guest_rsi);
1477 case 7:
1478 return (vmxctx->guest_rdi);
1479 case 8:
1480 return (vmxctx->guest_r8);
1481 case 9:
1482 return (vmxctx->guest_r9);
1483 case 10:
1484 return (vmxctx->guest_r10);
1485 case 11:
1486 return (vmxctx->guest_r11);
1487 case 12:
1488 return (vmxctx->guest_r12);
1489 case 13:
1490 return (vmxctx->guest_r13);
1491 case 14:
1492 return (vmxctx->guest_r14);
1493 case 15:
1494 return (vmxctx->guest_r15);
1495 default:
1496 panic("invalid vmx register %d", ident);
1497 }
1498}
1499
1500static void
1501vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1502{
1503 struct vmxctx *vmxctx;
1504
1505 vmxctx = &vmx->ctx[vcpu];
1506
1507 switch (ident) {
1508 case 0:
1509 vmxctx->guest_rax = regval;
1510 break;
1511 case 1:
1512 vmxctx->guest_rcx = regval;
1513 break;
1514 case 2:
1515 vmxctx->guest_rdx = regval;
1516 break;
1517 case 3:
1518 vmxctx->guest_rbx = regval;
1519 break;
1520 case 4:
1521 vmcs_write(VMCS_GUEST_RSP, regval);
1522 break;
1523 case 5:
1524 vmxctx->guest_rbp = regval;
1525 break;
1526 case 6:
1527 vmxctx->guest_rsi = regval;
1528 break;
1529 case 7:
1530 vmxctx->guest_rdi = regval;
1531 break;
1532 case 8:
1533 vmxctx->guest_r8 = regval;
1534 break;
1535 case 9:
1536 vmxctx->guest_r9 = regval;
1537 break;
1538 case 10:
1539 vmxctx->guest_r10 = regval;
1540 break;
1541 case 11:
1542 vmxctx->guest_r11 = regval;
1543 break;
1544 case 12:
1545 vmxctx->guest_r12 = regval;
1546 break;
1547 case 13:
1548 vmxctx->guest_r13 = regval;
1549 break;
1550 case 14:
1551 vmxctx->guest_r14 = regval;
1552 break;
1553 case 15:
1554 vmxctx->guest_r15 = regval;
1555 break;
1556 default:
1557 panic("invalid vmx register %d", ident);
1558 }
1559}
1560
1561static int
1562vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1563{
1564 uint64_t crval, regval;
1565
1566 /* We only handle mov to %cr0 at this time */
1567 if ((exitqual & 0xf0) != 0x00)
1568 return (UNHANDLED);
1569
1570 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1571
1572 vmcs_write(VMCS_CR0_SHADOW, regval);
1573
1574 crval = regval | cr0_ones_mask;
1575 crval &= ~cr0_zeros_mask;
1576 vmcs_write(VMCS_GUEST_CR0, crval);
1577
1578 if (regval & CR0_PG) {
1579 uint64_t efer, entry_ctls;
1580
1581 /*
1582 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1583 * the "IA-32e mode guest" bit in VM-entry control must be
1584 * equal.
1585 */
1586 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1587 if (efer & EFER_LME) {
1588 efer |= EFER_LMA;
1589 vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1590 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1591 entry_ctls |= VM_ENTRY_GUEST_LMA;
1592 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1593 }
1594 }
1595
1596 return (HANDLED);
1597}
1598
1599static int
1600vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1601{
1602 uint64_t crval, regval;
1603
1604 /* We only handle mov to %cr4 at this time */
1605 if ((exitqual & 0xf0) != 0x00)
1606 return (UNHANDLED);
1607
1608 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1609
1610 vmcs_write(VMCS_CR4_SHADOW, regval);
1611
1612 crval = regval | cr4_ones_mask;
1613 crval &= ~cr4_zeros_mask;
1614 vmcs_write(VMCS_GUEST_CR4, crval);
1615
1616 return (HANDLED);
1617}
1618
1619static int
1620vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1621{
1622 struct vlapic *vlapic;
1623 uint64_t cr8;
1624 int regnum;
1625
1626 /* We only handle mov %cr8 to/from a register at this time. */
1627 if ((exitqual & 0xe0) != 0x00) {
1628 return (UNHANDLED);
1629 }
1630
1631 vlapic = vm_lapic(vmx->vm, vcpu);
1632 regnum = (exitqual >> 8) & 0xf;
1633 if (exitqual & 0x10) {
1634 cr8 = vlapic_get_cr8(vlapic);
1635 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1636 } else {
1637 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1638 vlapic_set_cr8(vlapic, cr8);
1639 }
1640
1641 return (HANDLED);
1642}
1643
1644/*
1645 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1646 */
1647static int
1648vmx_cpl(void)
1649{
1650 uint32_t ssar;
1651
1652 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1653 return ((ssar >> 5) & 0x3);
1654}
1655
1656static enum vm_cpu_mode
1657vmx_cpu_mode(void)
1658{
1659 uint32_t csar;
1660
1661 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1662 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1663 if (csar & 0x2000)
1664 return (CPU_MODE_64BIT); /* CS.L = 1 */
1665 else
1666 return (CPU_MODE_COMPATIBILITY);
1667 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1668 return (CPU_MODE_PROTECTED);
1669 } else {
1670 return (CPU_MODE_REAL);
1671 }
1672}
1673
1674static enum vm_paging_mode
1675vmx_paging_mode(void)
1676{
1677
1678 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1679 return (PAGING_MODE_FLAT);
1680 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1681 return (PAGING_MODE_32);
1682 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1683 return (PAGING_MODE_64);
1684 else
1685 return (PAGING_MODE_PAE);
1686}
1687
1688static uint64_t
1689inout_str_index(struct vmx *vmx, int vcpuid, int in)
1690{
1691 uint64_t val;
1692 int error;
1693 enum vm_reg_name reg;
1694
1695 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1696 error = vmx_getreg(vmx, vcpuid, reg, &val);
1697 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1698 return (val);
1699}
1700
1701static uint64_t
1702inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1703{
1704 uint64_t val;
1705 int error;
1706
1707 if (rep) {
1708 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1709 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1710 } else {
1711 val = 1;
1712 }
1713 return (val);
1714}
1715
1716static int
1717inout_str_addrsize(uint32_t inst_info)
1718{
1719 uint32_t size;
1720
1721 size = (inst_info >> 7) & 0x7;
1722 switch (size) {
1723 case 0:
1724 return (2); /* 16 bit */
1725 case 1:
1726 return (4); /* 32 bit */
1727 case 2:
1728 return (8); /* 64 bit */
1729 default:
1730 panic("%s: invalid size encoding %d", __func__, size);
1731 }
1732}
1733
1734static void
1735inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1736 struct vm_inout_str *vis)
1737{
1738 int error, s;
1739
1740 if (in) {
1741 vis->seg_name = VM_REG_GUEST_ES;
1742 } else {
1743 s = (inst_info >> 15) & 0x7;
1744 vis->seg_name = vm_segment_name(s);
1745 }
1746
1747 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1748 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
923 if (virtual_interrupt_delivery) {
924 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
925 error += vmwrite(VMCS_VIRTUAL_APIC,
926 vtophys(&vmx->apic_page[i]));
927 error += vmwrite(VMCS_EOI_EXIT0, 0);
928 error += vmwrite(VMCS_EOI_EXIT1, 0);
929 error += vmwrite(VMCS_EOI_EXIT2, 0);
930 error += vmwrite(VMCS_EOI_EXIT3, 0);
931 }
932 if (posted_interrupts) {
933 error += vmwrite(VMCS_PIR_VECTOR, pirvec);
934 error += vmwrite(VMCS_PIR_DESC,
935 vtophys(&vmx->pir_desc[i]));
936 }
937 VMCLEAR(vmcs);
938 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
939
940 vmx->cap[i].set = 0;
941 vmx->cap[i].proc_ctls = procbased_ctls;
942 vmx->cap[i].proc_ctls2 = procbased_ctls2;
943
944 vmx->state[i].lastcpu = NOCPU;
945 vmx->state[i].vpid = vpid[i];
946
947 /*
948 * Set up the CR0/4 shadows, and init the read shadow
949 * to the power-on register value from the Intel Sys Arch.
950 * CR0 - 0x60000010
951 * CR4 - 0
952 */
953 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
954 if (error != 0)
955 panic("vmx_setup_cr0_shadow %d", error);
956
957 error = vmx_setup_cr4_shadow(vmcs, 0);
958 if (error != 0)
959 panic("vmx_setup_cr4_shadow %d", error);
960
961 vmx->ctx[i].pmap = pmap;
962 }
963
964 return (vmx);
965}
966
967static int
968vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
969{
970 int handled, func;
971
972 func = vmxctx->guest_rax;
973
974 handled = x86_emulate_cpuid(vm, vcpu,
975 (uint32_t*)(&vmxctx->guest_rax),
976 (uint32_t*)(&vmxctx->guest_rbx),
977 (uint32_t*)(&vmxctx->guest_rcx),
978 (uint32_t*)(&vmxctx->guest_rdx));
979 return (handled);
980}
981
982static __inline void
983vmx_run_trace(struct vmx *vmx, int vcpu)
984{
985#ifdef KTR
986 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
987#endif
988}
989
990static __inline void
991vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
992 int handled)
993{
994#ifdef KTR
995 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
996 handled ? "handled" : "unhandled",
997 exit_reason_to_str(exit_reason), rip);
998#endif
999}
1000
1001static __inline void
1002vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1003{
1004#ifdef KTR
1005 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1006#endif
1007}
1008
1009static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1010static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1011
1012/*
1013 * Invalidate guest mappings identified by its vpid from the TLB.
1014 */
1015static __inline void
1016vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1017{
1018 struct vmxstate *vmxstate;
1019 struct invvpid_desc invvpid_desc;
1020
1021 vmxstate = &vmx->state[vcpu];
1022 if (vmxstate->vpid == 0)
1023 return;
1024
1025 if (!running) {
1026 /*
1027 * Set the 'lastcpu' to an invalid host cpu.
1028 *
1029 * This will invalidate TLB entries tagged with the vcpu's
1030 * vpid the next time it runs via vmx_set_pcpu_defaults().
1031 */
1032 vmxstate->lastcpu = NOCPU;
1033 return;
1034 }
1035
1036 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1037 "critical section", __func__, vcpu));
1038
1039 /*
1040 * Invalidate all mappings tagged with 'vpid'
1041 *
1042 * We do this because this vcpu was executing on a different host
1043 * cpu when it last ran. We do not track whether it invalidated
1044 * mappings associated with its 'vpid' during that run. So we must
1045 * assume that the mappings associated with 'vpid' on 'curcpu' are
1046 * stale and invalidate them.
1047 *
1048 * Note that we incur this penalty only when the scheduler chooses to
1049 * move the thread associated with this vcpu between host cpus.
1050 *
1051 * Note also that this will invalidate mappings tagged with 'vpid'
1052 * for "all" EP4TAs.
1053 */
1054 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1055 invvpid_desc._res1 = 0;
1056 invvpid_desc._res2 = 0;
1057 invvpid_desc.vpid = vmxstate->vpid;
1058 invvpid_desc.linear_addr = 0;
1059 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1060 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1061 } else {
1062 /*
1063 * The invvpid can be skipped if an invept is going to
1064 * be performed before entering the guest. The invept
1065 * will invalidate combined mappings tagged with
1066 * 'vmx->eptp' for all vpids.
1067 */
1068 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1069 }
1070}
1071
1072static void
1073vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1074{
1075 struct vmxstate *vmxstate;
1076
1077 vmxstate = &vmx->state[vcpu];
1078 if (vmxstate->lastcpu == curcpu)
1079 return;
1080
1081 vmxstate->lastcpu = curcpu;
1082
1083 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1084
1085 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1086 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1087 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1088 vmx_invvpid(vmx, vcpu, pmap, 1);
1089}
1090
1091/*
1092 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1093 */
1094CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1095
1096static void __inline
1097vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1098{
1099
1100 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1101 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1102 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1103 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1104 }
1105}
1106
1107static void __inline
1108vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1109{
1110
1111 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1112 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1113 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1114 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1115 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1116}
1117
1118static void __inline
1119vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1120{
1121
1122 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1123 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1124 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1125 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1126 }
1127}
1128
1129static void __inline
1130vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1131{
1132
1133 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1134 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1135 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1136 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1137 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1138}
1139
1140#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \
1141 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1142#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
1143 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1144
1145static void
1146vmx_inject_nmi(struct vmx *vmx, int vcpu)
1147{
1148 uint32_t gi, info;
1149
1150 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1151 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1152 "interruptibility-state %#x", gi));
1153
1154 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1155 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1156 "VM-entry interruption information %#x", info));
1157
1158 /*
1159 * Inject the virtual NMI. The vector must be the NMI IDT entry
1160 * or the VMCS entry check will fail.
1161 */
1162 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1163 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1164
1165 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1166
1167 /* Clear the request */
1168 vm_nmi_clear(vmx->vm, vcpu);
1169}
1170
1171static void
1172vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1173{
1174 int vector, need_nmi_exiting, extint_pending;
1175 uint64_t rflags, entryinfo;
1176 uint32_t gi, info;
1177
1178 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1179 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1180 "intinfo is not valid: %#lx", __func__, entryinfo));
1181
1182 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1183 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1184 "pending exception: %#lx/%#x", __func__, entryinfo, info));
1185
1186 info = entryinfo;
1187 vector = info & 0xff;
1188 if (vector == IDT_BP || vector == IDT_OF) {
1189 /*
1190 * VT-x requires #BP and #OF to be injected as software
1191 * exceptions.
1192 */
1193 info &= ~VMCS_INTR_T_MASK;
1194 info |= VMCS_INTR_T_SWEXCEPTION;
1195 }
1196
1197 if (info & VMCS_INTR_DEL_ERRCODE)
1198 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1199
1200 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1201 }
1202
1203 if (vm_nmi_pending(vmx->vm, vcpu)) {
1204 /*
1205 * If there are no conditions blocking NMI injection then
1206 * inject it directly here otherwise enable "NMI window
1207 * exiting" to inject it as soon as we can.
1208 *
1209 * We also check for STI_BLOCKING because some implementations
1210 * don't allow NMI injection in this case. If we are running
1211 * on a processor that doesn't have this restriction it will
1212 * immediately exit and the NMI will be injected in the
1213 * "NMI window exiting" handler.
1214 */
1215 need_nmi_exiting = 1;
1216 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1217 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1218 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1219 if ((info & VMCS_INTR_VALID) == 0) {
1220 vmx_inject_nmi(vmx, vcpu);
1221 need_nmi_exiting = 0;
1222 } else {
1223 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1224 "due to VM-entry intr info %#x", info);
1225 }
1226 } else {
1227 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1228 "Guest Interruptibility-state %#x", gi);
1229 }
1230
1231 if (need_nmi_exiting)
1232 vmx_set_nmi_window_exiting(vmx, vcpu);
1233 }
1234
1235 extint_pending = vm_extint_pending(vmx->vm, vcpu);
1236
1237 if (!extint_pending && virtual_interrupt_delivery) {
1238 vmx_inject_pir(vlapic);
1239 return;
1240 }
1241
1242 /*
1243 * If interrupt-window exiting is already in effect then don't bother
1244 * checking for pending interrupts. This is just an optimization and
1245 * not needed for correctness.
1246 */
1247 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1248 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1249 "pending int_window_exiting");
1250 return;
1251 }
1252
1253 if (!extint_pending) {
1254 /* Ask the local apic for a vector to inject */
1255 if (!vlapic_pending_intr(vlapic, &vector))
1256 return;
1257
1258 /*
1259 * From the Intel SDM, Volume 3, Section "Maskable
1260 * Hardware Interrupts":
1261 * - maskable interrupt vectors [16,255] can be delivered
1262 * through the local APIC.
1263 */
1264 KASSERT(vector >= 16 && vector <= 255,
1265 ("invalid vector %d from local APIC", vector));
1266 } else {
1267 /* Ask the legacy pic for a vector to inject */
1268 vatpic_pending_intr(vmx->vm, &vector);
1269
1270 /*
1271 * From the Intel SDM, Volume 3, Section "Maskable
1272 * Hardware Interrupts":
1273 * - maskable interrupt vectors [0,255] can be delivered
1274 * through the INTR pin.
1275 */
1276 KASSERT(vector >= 0 && vector <= 255,
1277 ("invalid vector %d from INTR", vector));
1278 }
1279
1280 /* Check RFLAGS.IF and the interruptibility state of the guest */
1281 rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1282 if ((rflags & PSL_I) == 0) {
1283 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1284 "rflags %#lx", vector, rflags);
1285 goto cantinject;
1286 }
1287
1288 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1289 if (gi & HWINTR_BLOCKING) {
1290 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1291 "Guest Interruptibility-state %#x", vector, gi);
1292 goto cantinject;
1293 }
1294
1295 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1296 if (info & VMCS_INTR_VALID) {
1297 /*
1298 * This is expected and could happen for multiple reasons:
1299 * - A vectoring VM-entry was aborted due to astpending
1300 * - A VM-exit happened during event injection.
1301 * - An exception was injected above.
1302 * - An NMI was injected above or after "NMI window exiting"
1303 */
1304 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1305 "VM-entry intr info %#x", vector, info);
1306 goto cantinject;
1307 }
1308
1309 /* Inject the interrupt */
1310 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1311 info |= vector;
1312 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1313
1314 if (!extint_pending) {
1315 /* Update the Local APIC ISR */
1316 vlapic_intr_accepted(vlapic, vector);
1317 } else {
1318 vm_extint_clear(vmx->vm, vcpu);
1319 vatpic_intr_accepted(vmx->vm, vector);
1320
1321 /*
1322 * After we accepted the current ExtINT the PIC may
1323 * have posted another one. If that is the case, set
1324 * the Interrupt Window Exiting execution control so
1325 * we can inject that one too.
1326 *
1327 * Also, interrupt window exiting allows us to inject any
1328 * pending APIC vector that was preempted by the ExtINT
1329 * as soon as possible. This applies both for the software
1330 * emulated vlapic and the hardware assisted virtual APIC.
1331 */
1332 vmx_set_int_window_exiting(vmx, vcpu);
1333 }
1334
1335 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1336
1337 return;
1338
1339cantinject:
1340 /*
1341 * Set the Interrupt Window Exiting execution control so we can inject
1342 * the interrupt as soon as blocking condition goes away.
1343 */
1344 vmx_set_int_window_exiting(vmx, vcpu);
1345}
1346
1347/*
1348 * If the Virtual NMIs execution control is '1' then the logical processor
1349 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1350 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1351 * virtual-NMI blocking.
1352 *
1353 * This unblocking occurs even if the IRET causes a fault. In this case the
1354 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1355 */
1356static void
1357vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1358{
1359 uint32_t gi;
1360
1361 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1362 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1363 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1364 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1365}
1366
1367static void
1368vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1369{
1370 uint32_t gi;
1371
1372 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1373 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1374 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1375 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1376}
1377
1378static void
1379vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1380{
1381 uint32_t gi;
1382
1383 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1384 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1385 ("NMI blocking is not in effect %#x", gi));
1386}
1387
1388static int
1389vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1390{
1391 struct vmxctx *vmxctx;
1392 uint64_t xcrval;
1393 const struct xsave_limits *limits;
1394
1395 vmxctx = &vmx->ctx[vcpu];
1396 limits = vmm_get_xsave_limits();
1397
1398 /*
1399 * Note that the processor raises a GP# fault on its own if
1400 * xsetbv is executed for CPL != 0, so we do not have to
1401 * emulate that fault here.
1402 */
1403
1404 /* Only xcr0 is supported. */
1405 if (vmxctx->guest_rcx != 0) {
1406 vm_inject_gp(vmx->vm, vcpu);
1407 return (HANDLED);
1408 }
1409
1410 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1411 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1412 vm_inject_ud(vmx->vm, vcpu);
1413 return (HANDLED);
1414 }
1415
1416 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1417 if ((xcrval & ~limits->xcr0_allowed) != 0) {
1418 vm_inject_gp(vmx->vm, vcpu);
1419 return (HANDLED);
1420 }
1421
1422 if (!(xcrval & XFEATURE_ENABLED_X87)) {
1423 vm_inject_gp(vmx->vm, vcpu);
1424 return (HANDLED);
1425 }
1426
1427 /* AVX (YMM_Hi128) requires SSE. */
1428 if (xcrval & XFEATURE_ENABLED_AVX &&
1429 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1430 vm_inject_gp(vmx->vm, vcpu);
1431 return (HANDLED);
1432 }
1433
1434 /*
1435 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1436 * ZMM_Hi256, and Hi16_ZMM.
1437 */
1438 if (xcrval & XFEATURE_AVX512 &&
1439 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1440 (XFEATURE_AVX512 | XFEATURE_AVX)) {
1441 vm_inject_gp(vmx->vm, vcpu);
1442 return (HANDLED);
1443 }
1444
1445 /*
1446 * Intel MPX requires both bound register state flags to be
1447 * set.
1448 */
1449 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1450 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1451 vm_inject_gp(vmx->vm, vcpu);
1452 return (HANDLED);
1453 }
1454
1455 /*
1456 * This runs "inside" vmrun() with the guest's FPU state, so
1457 * modifying xcr0 directly modifies the guest's xcr0, not the
1458 * host's.
1459 */
1460 load_xcr(0, xcrval);
1461 return (HANDLED);
1462}
1463
1464static uint64_t
1465vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1466{
1467 const struct vmxctx *vmxctx;
1468
1469 vmxctx = &vmx->ctx[vcpu];
1470
1471 switch (ident) {
1472 case 0:
1473 return (vmxctx->guest_rax);
1474 case 1:
1475 return (vmxctx->guest_rcx);
1476 case 2:
1477 return (vmxctx->guest_rdx);
1478 case 3:
1479 return (vmxctx->guest_rbx);
1480 case 4:
1481 return (vmcs_read(VMCS_GUEST_RSP));
1482 case 5:
1483 return (vmxctx->guest_rbp);
1484 case 6:
1485 return (vmxctx->guest_rsi);
1486 case 7:
1487 return (vmxctx->guest_rdi);
1488 case 8:
1489 return (vmxctx->guest_r8);
1490 case 9:
1491 return (vmxctx->guest_r9);
1492 case 10:
1493 return (vmxctx->guest_r10);
1494 case 11:
1495 return (vmxctx->guest_r11);
1496 case 12:
1497 return (vmxctx->guest_r12);
1498 case 13:
1499 return (vmxctx->guest_r13);
1500 case 14:
1501 return (vmxctx->guest_r14);
1502 case 15:
1503 return (vmxctx->guest_r15);
1504 default:
1505 panic("invalid vmx register %d", ident);
1506 }
1507}
1508
1509static void
1510vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1511{
1512 struct vmxctx *vmxctx;
1513
1514 vmxctx = &vmx->ctx[vcpu];
1515
1516 switch (ident) {
1517 case 0:
1518 vmxctx->guest_rax = regval;
1519 break;
1520 case 1:
1521 vmxctx->guest_rcx = regval;
1522 break;
1523 case 2:
1524 vmxctx->guest_rdx = regval;
1525 break;
1526 case 3:
1527 vmxctx->guest_rbx = regval;
1528 break;
1529 case 4:
1530 vmcs_write(VMCS_GUEST_RSP, regval);
1531 break;
1532 case 5:
1533 vmxctx->guest_rbp = regval;
1534 break;
1535 case 6:
1536 vmxctx->guest_rsi = regval;
1537 break;
1538 case 7:
1539 vmxctx->guest_rdi = regval;
1540 break;
1541 case 8:
1542 vmxctx->guest_r8 = regval;
1543 break;
1544 case 9:
1545 vmxctx->guest_r9 = regval;
1546 break;
1547 case 10:
1548 vmxctx->guest_r10 = regval;
1549 break;
1550 case 11:
1551 vmxctx->guest_r11 = regval;
1552 break;
1553 case 12:
1554 vmxctx->guest_r12 = regval;
1555 break;
1556 case 13:
1557 vmxctx->guest_r13 = regval;
1558 break;
1559 case 14:
1560 vmxctx->guest_r14 = regval;
1561 break;
1562 case 15:
1563 vmxctx->guest_r15 = regval;
1564 break;
1565 default:
1566 panic("invalid vmx register %d", ident);
1567 }
1568}
1569
1570static int
1571vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1572{
1573 uint64_t crval, regval;
1574
1575 /* We only handle mov to %cr0 at this time */
1576 if ((exitqual & 0xf0) != 0x00)
1577 return (UNHANDLED);
1578
1579 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1580
1581 vmcs_write(VMCS_CR0_SHADOW, regval);
1582
1583 crval = regval | cr0_ones_mask;
1584 crval &= ~cr0_zeros_mask;
1585 vmcs_write(VMCS_GUEST_CR0, crval);
1586
1587 if (regval & CR0_PG) {
1588 uint64_t efer, entry_ctls;
1589
1590 /*
1591 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1592 * the "IA-32e mode guest" bit in VM-entry control must be
1593 * equal.
1594 */
1595 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1596 if (efer & EFER_LME) {
1597 efer |= EFER_LMA;
1598 vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1599 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1600 entry_ctls |= VM_ENTRY_GUEST_LMA;
1601 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1602 }
1603 }
1604
1605 return (HANDLED);
1606}
1607
1608static int
1609vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1610{
1611 uint64_t crval, regval;
1612
1613 /* We only handle mov to %cr4 at this time */
1614 if ((exitqual & 0xf0) != 0x00)
1615 return (UNHANDLED);
1616
1617 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1618
1619 vmcs_write(VMCS_CR4_SHADOW, regval);
1620
1621 crval = regval | cr4_ones_mask;
1622 crval &= ~cr4_zeros_mask;
1623 vmcs_write(VMCS_GUEST_CR4, crval);
1624
1625 return (HANDLED);
1626}
1627
1628static int
1629vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1630{
1631 struct vlapic *vlapic;
1632 uint64_t cr8;
1633 int regnum;
1634
1635 /* We only handle mov %cr8 to/from a register at this time. */
1636 if ((exitqual & 0xe0) != 0x00) {
1637 return (UNHANDLED);
1638 }
1639
1640 vlapic = vm_lapic(vmx->vm, vcpu);
1641 regnum = (exitqual >> 8) & 0xf;
1642 if (exitqual & 0x10) {
1643 cr8 = vlapic_get_cr8(vlapic);
1644 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1645 } else {
1646 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1647 vlapic_set_cr8(vlapic, cr8);
1648 }
1649
1650 return (HANDLED);
1651}
1652
1653/*
1654 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1655 */
1656static int
1657vmx_cpl(void)
1658{
1659 uint32_t ssar;
1660
1661 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1662 return ((ssar >> 5) & 0x3);
1663}
1664
1665static enum vm_cpu_mode
1666vmx_cpu_mode(void)
1667{
1668 uint32_t csar;
1669
1670 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1671 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1672 if (csar & 0x2000)
1673 return (CPU_MODE_64BIT); /* CS.L = 1 */
1674 else
1675 return (CPU_MODE_COMPATIBILITY);
1676 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1677 return (CPU_MODE_PROTECTED);
1678 } else {
1679 return (CPU_MODE_REAL);
1680 }
1681}
1682
1683static enum vm_paging_mode
1684vmx_paging_mode(void)
1685{
1686
1687 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1688 return (PAGING_MODE_FLAT);
1689 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1690 return (PAGING_MODE_32);
1691 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1692 return (PAGING_MODE_64);
1693 else
1694 return (PAGING_MODE_PAE);
1695}
1696
1697static uint64_t
1698inout_str_index(struct vmx *vmx, int vcpuid, int in)
1699{
1700 uint64_t val;
1701 int error;
1702 enum vm_reg_name reg;
1703
1704 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1705 error = vmx_getreg(vmx, vcpuid, reg, &val);
1706 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1707 return (val);
1708}
1709
1710static uint64_t
1711inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1712{
1713 uint64_t val;
1714 int error;
1715
1716 if (rep) {
1717 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1718 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1719 } else {
1720 val = 1;
1721 }
1722 return (val);
1723}
1724
1725static int
1726inout_str_addrsize(uint32_t inst_info)
1727{
1728 uint32_t size;
1729
1730 size = (inst_info >> 7) & 0x7;
1731 switch (size) {
1732 case 0:
1733 return (2); /* 16 bit */
1734 case 1:
1735 return (4); /* 32 bit */
1736 case 2:
1737 return (8); /* 64 bit */
1738 default:
1739 panic("%s: invalid size encoding %d", __func__, size);
1740 }
1741}
1742
1743static void
1744inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1745 struct vm_inout_str *vis)
1746{
1747 int error, s;
1748
1749 if (in) {
1750 vis->seg_name = VM_REG_GUEST_ES;
1751 } else {
1752 s = (inst_info >> 15) & 0x7;
1753 vis->seg_name = vm_segment_name(s);
1754 }
1755
1756 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1757 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1749
1750 /* XXX modify svm.c to update bit 16 of seg_desc.access (unusable) */
1751}
1752
1753static void
1754vmx_paging_info(struct vm_guest_paging *paging)
1755{
1756 paging->cr3 = vmcs_guest_cr3();
1757 paging->cpl = vmx_cpl();
1758 paging->cpu_mode = vmx_cpu_mode();
1759 paging->paging_mode = vmx_paging_mode();
1760}
1761
1762static void
1763vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1764{
1765 struct vm_guest_paging *paging;
1766 uint32_t csar;
1767
1768 paging = &vmexit->u.inst_emul.paging;
1769
1770 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1771 vmexit->u.inst_emul.gpa = gpa;
1772 vmexit->u.inst_emul.gla = gla;
1773 vmx_paging_info(paging);
1774 switch (paging->cpu_mode) {
1775 case CPU_MODE_PROTECTED:
1776 case CPU_MODE_COMPATIBILITY:
1777 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1778 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1779 break;
1780 default:
1781 vmexit->u.inst_emul.cs_d = 0;
1782 break;
1783 }
1758}
1759
1760static void
1761vmx_paging_info(struct vm_guest_paging *paging)
1762{
1763 paging->cr3 = vmcs_guest_cr3();
1764 paging->cpl = vmx_cpl();
1765 paging->cpu_mode = vmx_cpu_mode();
1766 paging->paging_mode = vmx_paging_mode();
1767}
1768
1769static void
1770vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1771{
1772 struct vm_guest_paging *paging;
1773 uint32_t csar;
1774
1775 paging = &vmexit->u.inst_emul.paging;
1776
1777 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1778 vmexit->u.inst_emul.gpa = gpa;
1779 vmexit->u.inst_emul.gla = gla;
1780 vmx_paging_info(paging);
1781 switch (paging->cpu_mode) {
1782 case CPU_MODE_PROTECTED:
1783 case CPU_MODE_COMPATIBILITY:
1784 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1785 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1786 break;
1787 default:
1788 vmexit->u.inst_emul.cs_d = 0;
1789 break;
1790 }
1791 vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1784}
1785
1786static int
1787ept_fault_type(uint64_t ept_qual)
1788{
1789 int fault_type;
1790
1791 if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1792 fault_type = VM_PROT_WRITE;
1793 else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1794 fault_type = VM_PROT_EXECUTE;
1795 else
1796 fault_type= VM_PROT_READ;
1797
1798 return (fault_type);
1799}
1800
1801static boolean_t
1802ept_emulation_fault(uint64_t ept_qual)
1803{
1804 int read, write;
1805
1806 /* EPT fault on an instruction fetch doesn't make sense here */
1807 if (ept_qual & EPT_VIOLATION_INST_FETCH)
1808 return (FALSE);
1809
1810 /* EPT fault must be a read fault or a write fault */
1811 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1812 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1813 if ((read | write) == 0)
1814 return (FALSE);
1815
1816 /*
1817 * The EPT violation must have been caused by accessing a
1818 * guest-physical address that is a translation of a guest-linear
1819 * address.
1820 */
1821 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1822 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1823 return (FALSE);
1824 }
1825
1826 return (TRUE);
1827}
1828
1829static __inline int
1830apic_access_virtualization(struct vmx *vmx, int vcpuid)
1831{
1832 uint32_t proc_ctls2;
1833
1834 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1835 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1836}
1837
1838static __inline int
1839x2apic_virtualization(struct vmx *vmx, int vcpuid)
1840{
1841 uint32_t proc_ctls2;
1842
1843 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1844 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1845}
1846
1847static int
1848vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1849 uint64_t qual)
1850{
1851 int error, handled, offset;
1852 uint32_t *apic_regs, vector;
1853 bool retu;
1854
1855 handled = HANDLED;
1856 offset = APIC_WRITE_OFFSET(qual);
1857
1858 if (!apic_access_virtualization(vmx, vcpuid)) {
1859 /*
1860 * In general there should not be any APIC write VM-exits
1861 * unless APIC-access virtualization is enabled.
1862 *
1863 * However self-IPI virtualization can legitimately trigger
1864 * an APIC-write VM-exit so treat it specially.
1865 */
1866 if (x2apic_virtualization(vmx, vcpuid) &&
1867 offset == APIC_OFFSET_SELF_IPI) {
1868 apic_regs = (uint32_t *)(vlapic->apic_page);
1869 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1870 vlapic_self_ipi_handler(vlapic, vector);
1871 return (HANDLED);
1872 } else
1873 return (UNHANDLED);
1874 }
1875
1876 switch (offset) {
1877 case APIC_OFFSET_ID:
1878 vlapic_id_write_handler(vlapic);
1879 break;
1880 case APIC_OFFSET_LDR:
1881 vlapic_ldr_write_handler(vlapic);
1882 break;
1883 case APIC_OFFSET_DFR:
1884 vlapic_dfr_write_handler(vlapic);
1885 break;
1886 case APIC_OFFSET_SVR:
1887 vlapic_svr_write_handler(vlapic);
1888 break;
1889 case APIC_OFFSET_ESR:
1890 vlapic_esr_write_handler(vlapic);
1891 break;
1892 case APIC_OFFSET_ICR_LOW:
1893 retu = false;
1894 error = vlapic_icrlo_write_handler(vlapic, &retu);
1895 if (error != 0 || retu)
1896 handled = UNHANDLED;
1897 break;
1898 case APIC_OFFSET_CMCI_LVT:
1899 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1900 vlapic_lvt_write_handler(vlapic, offset);
1901 break;
1902 case APIC_OFFSET_TIMER_ICR:
1903 vlapic_icrtmr_write_handler(vlapic);
1904 break;
1905 case APIC_OFFSET_TIMER_DCR:
1906 vlapic_dcr_write_handler(vlapic);
1907 break;
1908 default:
1909 handled = UNHANDLED;
1910 break;
1911 }
1912 return (handled);
1913}
1914
1915static bool
1916apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1917{
1918
1919 if (apic_access_virtualization(vmx, vcpuid) &&
1920 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1921 return (true);
1922 else
1923 return (false);
1924}
1925
1926static int
1927vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1928{
1929 uint64_t qual;
1930 int access_type, offset, allowed;
1931
1932 if (!apic_access_virtualization(vmx, vcpuid))
1933 return (UNHANDLED);
1934
1935 qual = vmexit->u.vmx.exit_qualification;
1936 access_type = APIC_ACCESS_TYPE(qual);
1937 offset = APIC_ACCESS_OFFSET(qual);
1938
1939 allowed = 0;
1940 if (access_type == 0) {
1941 /*
1942 * Read data access to the following registers is expected.
1943 */
1944 switch (offset) {
1945 case APIC_OFFSET_APR:
1946 case APIC_OFFSET_PPR:
1947 case APIC_OFFSET_RRR:
1948 case APIC_OFFSET_CMCI_LVT:
1949 case APIC_OFFSET_TIMER_CCR:
1950 allowed = 1;
1951 break;
1952 default:
1953 break;
1954 }
1955 } else if (access_type == 1) {
1956 /*
1957 * Write data access to the following registers is expected.
1958 */
1959 switch (offset) {
1960 case APIC_OFFSET_VER:
1961 case APIC_OFFSET_APR:
1962 case APIC_OFFSET_PPR:
1963 case APIC_OFFSET_RRR:
1964 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1965 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1966 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1967 case APIC_OFFSET_CMCI_LVT:
1968 case APIC_OFFSET_TIMER_CCR:
1969 allowed = 1;
1970 break;
1971 default:
1972 break;
1973 }
1974 }
1975
1976 if (allowed) {
1977 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
1978 VIE_INVALID_GLA);
1979 }
1980
1981 /*
1982 * Regardless of whether the APIC-access is allowed this handler
1983 * always returns UNHANDLED:
1984 * - if the access is allowed then it is handled by emulating the
1985 * instruction that caused the VM-exit (outside the critical section)
1986 * - if the access is not allowed then it will be converted to an
1987 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
1988 */
1989 return (UNHANDLED);
1990}
1991
1992static enum task_switch_reason
1993vmx_task_switch_reason(uint64_t qual)
1994{
1995 int reason;
1996
1997 reason = (qual >> 30) & 0x3;
1998 switch (reason) {
1999 case 0:
2000 return (TSR_CALL);
2001 case 1:
2002 return (TSR_IRET);
2003 case 2:
2004 return (TSR_JMP);
2005 case 3:
2006 return (TSR_IDT_GATE);
2007 default:
2008 panic("%s: invalid reason %d", __func__, reason);
2009 }
2010}
2011
2012static int
2013emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2014{
2015 int error;
2016
2017 if (lapic_msr(num))
2018 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2019 else
2020 error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2021
2022 return (error);
2023}
2024
2025static int
2026emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2027{
2028 struct vmxctx *vmxctx;
2029 uint64_t result;
2030 uint32_t eax, edx;
2031 int error;
2032
2033 if (lapic_msr(num))
2034 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2035 else
2036 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2037
2038 if (error == 0) {
2039 eax = result;
2040 vmxctx = &vmx->ctx[vcpuid];
2041 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2042 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2043
2044 edx = result >> 32;
2045 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2046 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2047 }
2048
2049 return (error);
2050}
2051
2052static int
2053vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2054{
2055 int error, handled, in;
2056 struct vmxctx *vmxctx;
2057 struct vlapic *vlapic;
2058 struct vm_inout_str *vis;
2059 struct vm_task_switch *ts;
1792}
1793
1794static int
1795ept_fault_type(uint64_t ept_qual)
1796{
1797 int fault_type;
1798
1799 if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1800 fault_type = VM_PROT_WRITE;
1801 else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1802 fault_type = VM_PROT_EXECUTE;
1803 else
1804 fault_type= VM_PROT_READ;
1805
1806 return (fault_type);
1807}
1808
1809static boolean_t
1810ept_emulation_fault(uint64_t ept_qual)
1811{
1812 int read, write;
1813
1814 /* EPT fault on an instruction fetch doesn't make sense here */
1815 if (ept_qual & EPT_VIOLATION_INST_FETCH)
1816 return (FALSE);
1817
1818 /* EPT fault must be a read fault or a write fault */
1819 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1820 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1821 if ((read | write) == 0)
1822 return (FALSE);
1823
1824 /*
1825 * The EPT violation must have been caused by accessing a
1826 * guest-physical address that is a translation of a guest-linear
1827 * address.
1828 */
1829 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1830 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1831 return (FALSE);
1832 }
1833
1834 return (TRUE);
1835}
1836
1837static __inline int
1838apic_access_virtualization(struct vmx *vmx, int vcpuid)
1839{
1840 uint32_t proc_ctls2;
1841
1842 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1843 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1844}
1845
1846static __inline int
1847x2apic_virtualization(struct vmx *vmx, int vcpuid)
1848{
1849 uint32_t proc_ctls2;
1850
1851 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1852 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1853}
1854
1855static int
1856vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1857 uint64_t qual)
1858{
1859 int error, handled, offset;
1860 uint32_t *apic_regs, vector;
1861 bool retu;
1862
1863 handled = HANDLED;
1864 offset = APIC_WRITE_OFFSET(qual);
1865
1866 if (!apic_access_virtualization(vmx, vcpuid)) {
1867 /*
1868 * In general there should not be any APIC write VM-exits
1869 * unless APIC-access virtualization is enabled.
1870 *
1871 * However self-IPI virtualization can legitimately trigger
1872 * an APIC-write VM-exit so treat it specially.
1873 */
1874 if (x2apic_virtualization(vmx, vcpuid) &&
1875 offset == APIC_OFFSET_SELF_IPI) {
1876 apic_regs = (uint32_t *)(vlapic->apic_page);
1877 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1878 vlapic_self_ipi_handler(vlapic, vector);
1879 return (HANDLED);
1880 } else
1881 return (UNHANDLED);
1882 }
1883
1884 switch (offset) {
1885 case APIC_OFFSET_ID:
1886 vlapic_id_write_handler(vlapic);
1887 break;
1888 case APIC_OFFSET_LDR:
1889 vlapic_ldr_write_handler(vlapic);
1890 break;
1891 case APIC_OFFSET_DFR:
1892 vlapic_dfr_write_handler(vlapic);
1893 break;
1894 case APIC_OFFSET_SVR:
1895 vlapic_svr_write_handler(vlapic);
1896 break;
1897 case APIC_OFFSET_ESR:
1898 vlapic_esr_write_handler(vlapic);
1899 break;
1900 case APIC_OFFSET_ICR_LOW:
1901 retu = false;
1902 error = vlapic_icrlo_write_handler(vlapic, &retu);
1903 if (error != 0 || retu)
1904 handled = UNHANDLED;
1905 break;
1906 case APIC_OFFSET_CMCI_LVT:
1907 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1908 vlapic_lvt_write_handler(vlapic, offset);
1909 break;
1910 case APIC_OFFSET_TIMER_ICR:
1911 vlapic_icrtmr_write_handler(vlapic);
1912 break;
1913 case APIC_OFFSET_TIMER_DCR:
1914 vlapic_dcr_write_handler(vlapic);
1915 break;
1916 default:
1917 handled = UNHANDLED;
1918 break;
1919 }
1920 return (handled);
1921}
1922
1923static bool
1924apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1925{
1926
1927 if (apic_access_virtualization(vmx, vcpuid) &&
1928 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1929 return (true);
1930 else
1931 return (false);
1932}
1933
1934static int
1935vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1936{
1937 uint64_t qual;
1938 int access_type, offset, allowed;
1939
1940 if (!apic_access_virtualization(vmx, vcpuid))
1941 return (UNHANDLED);
1942
1943 qual = vmexit->u.vmx.exit_qualification;
1944 access_type = APIC_ACCESS_TYPE(qual);
1945 offset = APIC_ACCESS_OFFSET(qual);
1946
1947 allowed = 0;
1948 if (access_type == 0) {
1949 /*
1950 * Read data access to the following registers is expected.
1951 */
1952 switch (offset) {
1953 case APIC_OFFSET_APR:
1954 case APIC_OFFSET_PPR:
1955 case APIC_OFFSET_RRR:
1956 case APIC_OFFSET_CMCI_LVT:
1957 case APIC_OFFSET_TIMER_CCR:
1958 allowed = 1;
1959 break;
1960 default:
1961 break;
1962 }
1963 } else if (access_type == 1) {
1964 /*
1965 * Write data access to the following registers is expected.
1966 */
1967 switch (offset) {
1968 case APIC_OFFSET_VER:
1969 case APIC_OFFSET_APR:
1970 case APIC_OFFSET_PPR:
1971 case APIC_OFFSET_RRR:
1972 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1973 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1974 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1975 case APIC_OFFSET_CMCI_LVT:
1976 case APIC_OFFSET_TIMER_CCR:
1977 allowed = 1;
1978 break;
1979 default:
1980 break;
1981 }
1982 }
1983
1984 if (allowed) {
1985 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
1986 VIE_INVALID_GLA);
1987 }
1988
1989 /*
1990 * Regardless of whether the APIC-access is allowed this handler
1991 * always returns UNHANDLED:
1992 * - if the access is allowed then it is handled by emulating the
1993 * instruction that caused the VM-exit (outside the critical section)
1994 * - if the access is not allowed then it will be converted to an
1995 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
1996 */
1997 return (UNHANDLED);
1998}
1999
2000static enum task_switch_reason
2001vmx_task_switch_reason(uint64_t qual)
2002{
2003 int reason;
2004
2005 reason = (qual >> 30) & 0x3;
2006 switch (reason) {
2007 case 0:
2008 return (TSR_CALL);
2009 case 1:
2010 return (TSR_IRET);
2011 case 2:
2012 return (TSR_JMP);
2013 case 3:
2014 return (TSR_IDT_GATE);
2015 default:
2016 panic("%s: invalid reason %d", __func__, reason);
2017 }
2018}
2019
2020static int
2021emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2022{
2023 int error;
2024
2025 if (lapic_msr(num))
2026 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2027 else
2028 error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2029
2030 return (error);
2031}
2032
2033static int
2034emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2035{
2036 struct vmxctx *vmxctx;
2037 uint64_t result;
2038 uint32_t eax, edx;
2039 int error;
2040
2041 if (lapic_msr(num))
2042 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2043 else
2044 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2045
2046 if (error == 0) {
2047 eax = result;
2048 vmxctx = &vmx->ctx[vcpuid];
2049 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2050 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2051
2052 edx = result >> 32;
2053 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2054 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2055 }
2056
2057 return (error);
2058}
2059
2060static int
2061vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2062{
2063 int error, handled, in;
2064 struct vmxctx *vmxctx;
2065 struct vlapic *vlapic;
2066 struct vm_inout_str *vis;
2067 struct vm_task_switch *ts;
2068 struct vm_exception vmexc;
2060 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2069 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2061 uint32_t intr_type, reason;
2070 uint32_t intr_type, intr_vec, reason;
2062 uint64_t exitintinfo, qual, gpa;
2063 bool retu;
2064
2065 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2066 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2067
2068 handled = UNHANDLED;
2069 vmxctx = &vmx->ctx[vcpu];
2070
2071 qual = vmexit->u.vmx.exit_qualification;
2072 reason = vmexit->u.vmx.exit_reason;
2073 vmexit->exitcode = VM_EXITCODE_BOGUS;
2074
2075 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2076
2077 /*
2071 uint64_t exitintinfo, qual, gpa;
2072 bool retu;
2073
2074 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2075 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2076
2077 handled = UNHANDLED;
2078 vmxctx = &vmx->ctx[vcpu];
2079
2080 qual = vmexit->u.vmx.exit_qualification;
2081 reason = vmexit->u.vmx.exit_reason;
2082 vmexit->exitcode = VM_EXITCODE_BOGUS;
2083
2084 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2085
2086 /*
2087 * VM-entry failures during or after loading guest state.
2088 *
2089 * These VM-exits are uncommon but must be handled specially
2090 * as most VM-exit fields are not populated as usual.
2091 */
2092 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2093 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2094 __asm __volatile("int $18");
2095 return (1);
2096 }
2097
2098 /*
2078 * VM exits that can be triggered during event delivery need to
2079 * be handled specially by re-injecting the event if the IDT
2080 * vectoring information field's valid bit is set.
2081 *
2082 * See "Information for VM Exits During Event Delivery" in Intel SDM
2083 * for details.
2084 */
2085 idtvec_info = vmcs_idt_vectoring_info();
2086 if (idtvec_info & VMCS_IDT_VEC_VALID) {
2087 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2088 exitintinfo = idtvec_info;
2089 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2090 idtvec_err = vmcs_idt_vectoring_err();
2091 exitintinfo |= (uint64_t)idtvec_err << 32;
2092 }
2093 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2094 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2095 __func__, error));
2096
2097 /*
2098 * If 'virtual NMIs' are being used and the VM-exit
2099 * happened while injecting an NMI during the previous
2100 * VM-entry, then clear "blocking by NMI" in the
2101 * Guest Interruptibility-State so the NMI can be
2102 * reinjected on the subsequent VM-entry.
2103 *
2104 * However, if the NMI was being delivered through a task
2105 * gate, then the new task must start execution with NMIs
2106 * blocked so don't clear NMI blocking in this case.
2107 */
2108 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2109 if (intr_type == VMCS_INTR_T_NMI) {
2110 if (reason != EXIT_REASON_TASK_SWITCH)
2111 vmx_clear_nmi_blocking(vmx, vcpu);
2112 else
2113 vmx_assert_nmi_blocking(vmx, vcpu);
2114 }
2115
2116 /*
2117 * Update VM-entry instruction length if the event being
2118 * delivered was a software interrupt or software exception.
2119 */
2120 if (intr_type == VMCS_INTR_T_SWINTR ||
2121 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2122 intr_type == VMCS_INTR_T_SWEXCEPTION) {
2123 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2124 }
2125 }
2126
2127 switch (reason) {
2128 case EXIT_REASON_TASK_SWITCH:
2129 ts = &vmexit->u.task_switch;
2130 ts->tsssel = qual & 0xffff;
2131 ts->reason = vmx_task_switch_reason(qual);
2132 ts->ext = 0;
2133 ts->errcode_valid = 0;
2134 vmx_paging_info(&ts->paging);
2135 /*
2136 * If the task switch was due to a CALL, JMP, IRET, software
2137 * interrupt (INT n) or software exception (INT3, INTO),
2138 * then the saved %rip references the instruction that caused
2139 * the task switch. The instruction length field in the VMCS
2140 * is valid in this case.
2141 *
2142 * In all other cases (e.g., NMI, hardware exception) the
2143 * saved %rip is one that would have been saved in the old TSS
2144 * had the task switch completed normally so the instruction
2145 * length field is not needed in this case and is explicitly
2146 * set to 0.
2147 */
2148 if (ts->reason == TSR_IDT_GATE) {
2149 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2150 ("invalid idtvec_info %#x for IDT task switch",
2151 idtvec_info));
2152 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2153 if (intr_type != VMCS_INTR_T_SWINTR &&
2154 intr_type != VMCS_INTR_T_SWEXCEPTION &&
2155 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2156 /* Task switch triggered by external event */
2157 ts->ext = 1;
2158 vmexit->inst_length = 0;
2159 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2160 ts->errcode_valid = 1;
2161 ts->errcode = vmcs_idt_vectoring_err();
2162 }
2163 }
2164 }
2165 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2166 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2167 "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2168 ts->ext ? "external" : "internal",
2169 ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2170 break;
2171 case EXIT_REASON_CR_ACCESS:
2172 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2173 switch (qual & 0xf) {
2174 case 0:
2175 handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2176 break;
2177 case 4:
2178 handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2179 break;
2180 case 8:
2181 handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2182 break;
2183 }
2184 break;
2185 case EXIT_REASON_RDMSR:
2186 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2187 retu = false;
2188 ecx = vmxctx->guest_rcx;
2189 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2190 error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2191 if (error) {
2192 vmexit->exitcode = VM_EXITCODE_RDMSR;
2193 vmexit->u.msr.code = ecx;
2194 } else if (!retu) {
2195 handled = HANDLED;
2196 } else {
2197 /* Return to userspace with a valid exitcode */
2198 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2199 ("emulate_rdmsr retu with bogus exitcode"));
2200 }
2201 break;
2202 case EXIT_REASON_WRMSR:
2203 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2204 retu = false;
2205 eax = vmxctx->guest_rax;
2206 ecx = vmxctx->guest_rcx;
2207 edx = vmxctx->guest_rdx;
2208 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2209 ecx, (uint64_t)edx << 32 | eax);
2210 error = emulate_wrmsr(vmx, vcpu, ecx,
2211 (uint64_t)edx << 32 | eax, &retu);
2212 if (error) {
2213 vmexit->exitcode = VM_EXITCODE_WRMSR;
2214 vmexit->u.msr.code = ecx;
2215 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2216 } else if (!retu) {
2217 handled = HANDLED;
2218 } else {
2219 /* Return to userspace with a valid exitcode */
2220 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2221 ("emulate_wrmsr retu with bogus exitcode"));
2222 }
2223 break;
2224 case EXIT_REASON_HLT:
2225 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2226 vmexit->exitcode = VM_EXITCODE_HLT;
2227 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2228 break;
2229 case EXIT_REASON_MTF:
2230 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2231 vmexit->exitcode = VM_EXITCODE_MTRAP;
2232 break;
2233 case EXIT_REASON_PAUSE:
2234 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2235 vmexit->exitcode = VM_EXITCODE_PAUSE;
2236 break;
2237 case EXIT_REASON_INTR_WINDOW:
2238 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2239 vmx_clear_int_window_exiting(vmx, vcpu);
2240 return (1);
2241 case EXIT_REASON_EXT_INTR:
2242 /*
2243 * External interrupts serve only to cause VM exits and allow
2244 * the host interrupt handler to run.
2245 *
2246 * If this external interrupt triggers a virtual interrupt
2247 * to a VM, then that state will be recorded by the
2248 * host interrupt handler in the VM's softc. We will inject
2249 * this virtual interrupt during the subsequent VM enter.
2250 */
2251 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2252
2253 /*
2254 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2255 * This appears to be a bug in VMware Fusion?
2256 */
2257 if (!(intr_info & VMCS_INTR_VALID))
2258 return (1);
2259 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2260 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2261 ("VM exit interruption info invalid: %#x", intr_info));
2262 vmx_trigger_hostintr(intr_info & 0xff);
2263
2264 /*
2265 * This is special. We want to treat this as an 'handled'
2266 * VM-exit but not increment the instruction pointer.
2267 */
2268 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2269 return (1);
2270 case EXIT_REASON_NMI_WINDOW:
2271 /* Exit to allow the pending virtual NMI to be injected */
2272 if (vm_nmi_pending(vmx->vm, vcpu))
2273 vmx_inject_nmi(vmx, vcpu);
2274 vmx_clear_nmi_window_exiting(vmx, vcpu);
2275 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2276 return (1);
2277 case EXIT_REASON_INOUT:
2278 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2279 vmexit->exitcode = VM_EXITCODE_INOUT;
2280 vmexit->u.inout.bytes = (qual & 0x7) + 1;
2281 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2282 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2283 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2284 vmexit->u.inout.port = (uint16_t)(qual >> 16);
2285 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2286 if (vmexit->u.inout.string) {
2287 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2288 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2289 vis = &vmexit->u.inout_str;
2290 vmx_paging_info(&vis->paging);
2291 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2292 vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2293 vis->index = inout_str_index(vmx, vcpu, in);
2294 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2295 vis->addrsize = inout_str_addrsize(inst_info);
2296 inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2297 }
2298 break;
2299 case EXIT_REASON_CPUID:
2300 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2301 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2302 break;
2303 case EXIT_REASON_EXCEPTION:
2304 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2305 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2306 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2307 ("VM exit interruption info invalid: %#x", intr_info));
2308
2099 * VM exits that can be triggered during event delivery need to
2100 * be handled specially by re-injecting the event if the IDT
2101 * vectoring information field's valid bit is set.
2102 *
2103 * See "Information for VM Exits During Event Delivery" in Intel SDM
2104 * for details.
2105 */
2106 idtvec_info = vmcs_idt_vectoring_info();
2107 if (idtvec_info & VMCS_IDT_VEC_VALID) {
2108 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2109 exitintinfo = idtvec_info;
2110 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2111 idtvec_err = vmcs_idt_vectoring_err();
2112 exitintinfo |= (uint64_t)idtvec_err << 32;
2113 }
2114 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2115 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2116 __func__, error));
2117
2118 /*
2119 * If 'virtual NMIs' are being used and the VM-exit
2120 * happened while injecting an NMI during the previous
2121 * VM-entry, then clear "blocking by NMI" in the
2122 * Guest Interruptibility-State so the NMI can be
2123 * reinjected on the subsequent VM-entry.
2124 *
2125 * However, if the NMI was being delivered through a task
2126 * gate, then the new task must start execution with NMIs
2127 * blocked so don't clear NMI blocking in this case.
2128 */
2129 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2130 if (intr_type == VMCS_INTR_T_NMI) {
2131 if (reason != EXIT_REASON_TASK_SWITCH)
2132 vmx_clear_nmi_blocking(vmx, vcpu);
2133 else
2134 vmx_assert_nmi_blocking(vmx, vcpu);
2135 }
2136
2137 /*
2138 * Update VM-entry instruction length if the event being
2139 * delivered was a software interrupt or software exception.
2140 */
2141 if (intr_type == VMCS_INTR_T_SWINTR ||
2142 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2143 intr_type == VMCS_INTR_T_SWEXCEPTION) {
2144 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2145 }
2146 }
2147
2148 switch (reason) {
2149 case EXIT_REASON_TASK_SWITCH:
2150 ts = &vmexit->u.task_switch;
2151 ts->tsssel = qual & 0xffff;
2152 ts->reason = vmx_task_switch_reason(qual);
2153 ts->ext = 0;
2154 ts->errcode_valid = 0;
2155 vmx_paging_info(&ts->paging);
2156 /*
2157 * If the task switch was due to a CALL, JMP, IRET, software
2158 * interrupt (INT n) or software exception (INT3, INTO),
2159 * then the saved %rip references the instruction that caused
2160 * the task switch. The instruction length field in the VMCS
2161 * is valid in this case.
2162 *
2163 * In all other cases (e.g., NMI, hardware exception) the
2164 * saved %rip is one that would have been saved in the old TSS
2165 * had the task switch completed normally so the instruction
2166 * length field is not needed in this case and is explicitly
2167 * set to 0.
2168 */
2169 if (ts->reason == TSR_IDT_GATE) {
2170 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2171 ("invalid idtvec_info %#x for IDT task switch",
2172 idtvec_info));
2173 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2174 if (intr_type != VMCS_INTR_T_SWINTR &&
2175 intr_type != VMCS_INTR_T_SWEXCEPTION &&
2176 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2177 /* Task switch triggered by external event */
2178 ts->ext = 1;
2179 vmexit->inst_length = 0;
2180 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2181 ts->errcode_valid = 1;
2182 ts->errcode = vmcs_idt_vectoring_err();
2183 }
2184 }
2185 }
2186 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2187 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2188 "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2189 ts->ext ? "external" : "internal",
2190 ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2191 break;
2192 case EXIT_REASON_CR_ACCESS:
2193 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2194 switch (qual & 0xf) {
2195 case 0:
2196 handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2197 break;
2198 case 4:
2199 handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2200 break;
2201 case 8:
2202 handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2203 break;
2204 }
2205 break;
2206 case EXIT_REASON_RDMSR:
2207 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2208 retu = false;
2209 ecx = vmxctx->guest_rcx;
2210 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2211 error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2212 if (error) {
2213 vmexit->exitcode = VM_EXITCODE_RDMSR;
2214 vmexit->u.msr.code = ecx;
2215 } else if (!retu) {
2216 handled = HANDLED;
2217 } else {
2218 /* Return to userspace with a valid exitcode */
2219 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2220 ("emulate_rdmsr retu with bogus exitcode"));
2221 }
2222 break;
2223 case EXIT_REASON_WRMSR:
2224 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2225 retu = false;
2226 eax = vmxctx->guest_rax;
2227 ecx = vmxctx->guest_rcx;
2228 edx = vmxctx->guest_rdx;
2229 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2230 ecx, (uint64_t)edx << 32 | eax);
2231 error = emulate_wrmsr(vmx, vcpu, ecx,
2232 (uint64_t)edx << 32 | eax, &retu);
2233 if (error) {
2234 vmexit->exitcode = VM_EXITCODE_WRMSR;
2235 vmexit->u.msr.code = ecx;
2236 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2237 } else if (!retu) {
2238 handled = HANDLED;
2239 } else {
2240 /* Return to userspace with a valid exitcode */
2241 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2242 ("emulate_wrmsr retu with bogus exitcode"));
2243 }
2244 break;
2245 case EXIT_REASON_HLT:
2246 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2247 vmexit->exitcode = VM_EXITCODE_HLT;
2248 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2249 break;
2250 case EXIT_REASON_MTF:
2251 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2252 vmexit->exitcode = VM_EXITCODE_MTRAP;
2253 break;
2254 case EXIT_REASON_PAUSE:
2255 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2256 vmexit->exitcode = VM_EXITCODE_PAUSE;
2257 break;
2258 case EXIT_REASON_INTR_WINDOW:
2259 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2260 vmx_clear_int_window_exiting(vmx, vcpu);
2261 return (1);
2262 case EXIT_REASON_EXT_INTR:
2263 /*
2264 * External interrupts serve only to cause VM exits and allow
2265 * the host interrupt handler to run.
2266 *
2267 * If this external interrupt triggers a virtual interrupt
2268 * to a VM, then that state will be recorded by the
2269 * host interrupt handler in the VM's softc. We will inject
2270 * this virtual interrupt during the subsequent VM enter.
2271 */
2272 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2273
2274 /*
2275 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2276 * This appears to be a bug in VMware Fusion?
2277 */
2278 if (!(intr_info & VMCS_INTR_VALID))
2279 return (1);
2280 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2281 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2282 ("VM exit interruption info invalid: %#x", intr_info));
2283 vmx_trigger_hostintr(intr_info & 0xff);
2284
2285 /*
2286 * This is special. We want to treat this as an 'handled'
2287 * VM-exit but not increment the instruction pointer.
2288 */
2289 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2290 return (1);
2291 case EXIT_REASON_NMI_WINDOW:
2292 /* Exit to allow the pending virtual NMI to be injected */
2293 if (vm_nmi_pending(vmx->vm, vcpu))
2294 vmx_inject_nmi(vmx, vcpu);
2295 vmx_clear_nmi_window_exiting(vmx, vcpu);
2296 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2297 return (1);
2298 case EXIT_REASON_INOUT:
2299 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2300 vmexit->exitcode = VM_EXITCODE_INOUT;
2301 vmexit->u.inout.bytes = (qual & 0x7) + 1;
2302 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2303 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2304 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2305 vmexit->u.inout.port = (uint16_t)(qual >> 16);
2306 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2307 if (vmexit->u.inout.string) {
2308 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2309 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2310 vis = &vmexit->u.inout_str;
2311 vmx_paging_info(&vis->paging);
2312 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2313 vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2314 vis->index = inout_str_index(vmx, vcpu, in);
2315 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2316 vis->addrsize = inout_str_addrsize(inst_info);
2317 inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2318 }
2319 break;
2320 case EXIT_REASON_CPUID:
2321 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2322 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2323 break;
2324 case EXIT_REASON_EXCEPTION:
2325 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2326 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2327 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2328 ("VM exit interruption info invalid: %#x", intr_info));
2329
2330 intr_vec = intr_info & 0xff;
2331 intr_type = intr_info & VMCS_INTR_T_MASK;
2332
2309 /*
2310 * If Virtual NMIs control is 1 and the VM-exit is due to a
2311 * fault encountered during the execution of IRET then we must
2312 * restore the state of "virtual-NMI blocking" before resuming
2313 * the guest.
2314 *
2315 * See "Resuming Guest Software after Handling an Exception".
2316 * See "Information for VM Exits Due to Vectored Events".
2317 */
2318 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2333 /*
2334 * If Virtual NMIs control is 1 and the VM-exit is due to a
2335 * fault encountered during the execution of IRET then we must
2336 * restore the state of "virtual-NMI blocking" before resuming
2337 * the guest.
2338 *
2339 * See "Resuming Guest Software after Handling an Exception".
2340 * See "Information for VM Exits Due to Vectored Events".
2341 */
2342 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2319 (intr_info & 0xff) != IDT_DF &&
2343 (intr_vec != IDT_DF) &&
2320 (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2321 vmx_restore_nmi_blocking(vmx, vcpu);
2322
2323 /*
2324 * The NMI has already been handled in vmx_exit_handle_nmi().
2325 */
2344 (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2345 vmx_restore_nmi_blocking(vmx, vcpu);
2346
2347 /*
2348 * The NMI has already been handled in vmx_exit_handle_nmi().
2349 */
2326 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI)
2350 if (intr_type == VMCS_INTR_T_NMI)
2327 return (1);
2351 return (1);
2328 break;
2352
2353 /*
2354 * Call the machine check handler by hand. Also don't reflect
2355 * the machine check back into the guest.
2356 */
2357 if (intr_vec == IDT_MC) {
2358 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2359 __asm __volatile("int $18");
2360 return (1);
2361 }
2362
2363 if (intr_vec == IDT_PF) {
2364 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2365 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2366 __func__, error));
2367 }
2368
2369 /*
2370 * Software exceptions exhibit trap-like behavior. This in
2371 * turn requires populating the VM-entry instruction length
2372 * so that the %rip in the trap frame is past the INT3/INTO
2373 * instruction.
2374 */
2375 if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2376 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2377
2378 /* Reflect all other exceptions back into the guest */
2379 bzero(&vmexc, sizeof(struct vm_exception));
2380 vmexc.vector = intr_vec;
2381 if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2382 vmexc.error_code_valid = 1;
2383 vmexc.error_code = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2384 }
2385 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2386 "the guest", vmexc.vector, vmexc.error_code);
2387 error = vm_inject_exception(vmx->vm, vcpu, &vmexc);
2388 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2389 __func__, error));
2390 return (1);
2391
2329 case EXIT_REASON_EPT_FAULT:
2330 /*
2331 * If 'gpa' lies within the address space allocated to
2332 * memory then this must be a nested page fault otherwise
2333 * this must be an instruction that accesses MMIO space.
2334 */
2335 gpa = vmcs_gpa();
2336 if (vm_mem_allocated(vmx->vm, gpa) ||
2337 apic_access_fault(vmx, vcpu, gpa)) {
2338 vmexit->exitcode = VM_EXITCODE_PAGING;
2339 vmexit->u.paging.gpa = gpa;
2340 vmexit->u.paging.fault_type = ept_fault_type(qual);
2341 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2342 } else if (ept_emulation_fault(qual)) {
2343 vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2344 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2345 }
2346 /*
2347 * If Virtual NMIs control is 1 and the VM-exit is due to an
2348 * EPT fault during the execution of IRET then we must restore
2349 * the state of "virtual-NMI blocking" before resuming.
2350 *
2351 * See description of "NMI unblocking due to IRET" in
2352 * "Exit Qualification for EPT Violations".
2353 */
2354 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2355 (qual & EXIT_QUAL_NMIUDTI) != 0)
2356 vmx_restore_nmi_blocking(vmx, vcpu);
2357 break;
2358 case EXIT_REASON_VIRTUALIZED_EOI:
2359 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2360 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2361 vmexit->inst_length = 0; /* trap-like */
2362 break;
2363 case EXIT_REASON_APIC_ACCESS:
2364 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2365 break;
2366 case EXIT_REASON_APIC_WRITE:
2367 /*
2368 * APIC-write VM exit is trap-like so the %rip is already
2369 * pointing to the next instruction.
2370 */
2371 vmexit->inst_length = 0;
2372 vlapic = vm_lapic(vmx->vm, vcpu);
2373 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2374 break;
2375 case EXIT_REASON_XSETBV:
2376 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2377 break;
2378 case EXIT_REASON_MONITOR:
2379 vmexit->exitcode = VM_EXITCODE_MONITOR;
2380 break;
2381 case EXIT_REASON_MWAIT:
2382 vmexit->exitcode = VM_EXITCODE_MWAIT;
2383 break;
2384 default:
2385 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2386 break;
2387 }
2388
2389 if (handled) {
2390 /*
2391 * It is possible that control is returned to userland
2392 * even though we were able to handle the VM exit in the
2393 * kernel.
2394 *
2395 * In such a case we want to make sure that the userland
2396 * restarts guest execution at the instruction *after*
2397 * the one we just processed. Therefore we update the
2398 * guest rip in the VMCS and in 'vmexit'.
2399 */
2400 vmexit->rip += vmexit->inst_length;
2401 vmexit->inst_length = 0;
2402 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2403 } else {
2404 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2405 /*
2406 * If this VM exit was not claimed by anybody then
2407 * treat it as a generic VMX exit.
2408 */
2409 vmexit->exitcode = VM_EXITCODE_VMX;
2410 vmexit->u.vmx.status = VM_SUCCESS;
2411 vmexit->u.vmx.inst_type = 0;
2412 vmexit->u.vmx.inst_error = 0;
2413 } else {
2414 /*
2415 * The exitcode and collateral have been populated.
2416 * The VM exit will be processed further in userland.
2417 */
2418 }
2419 }
2420 return (handled);
2421}
2422
2423static __inline void
2424vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2425{
2426
2427 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2428 ("vmx_exit_inst_error: invalid inst_fail_status %d",
2429 vmxctx->inst_fail_status));
2430
2431 vmexit->inst_length = 0;
2432 vmexit->exitcode = VM_EXITCODE_VMX;
2433 vmexit->u.vmx.status = vmxctx->inst_fail_status;
2434 vmexit->u.vmx.inst_error = vmcs_instruction_error();
2435 vmexit->u.vmx.exit_reason = ~0;
2436 vmexit->u.vmx.exit_qualification = ~0;
2437
2438 switch (rc) {
2439 case VMX_VMRESUME_ERROR:
2440 case VMX_VMLAUNCH_ERROR:
2441 case VMX_INVEPT_ERROR:
2442 vmexit->u.vmx.inst_type = rc;
2443 break;
2444 default:
2445 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2446 }
2447}
2448
2449/*
2450 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2451 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2452 * sufficient to simply vector to the NMI handler via a software interrupt.
2453 * However, this must be done before maskable interrupts are enabled
2454 * otherwise the "iret" issued by an interrupt handler will incorrectly
2455 * clear NMI blocking.
2456 */
2457static __inline void
2458vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2459{
2460 uint32_t intr_info;
2461
2462 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2463
2464 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2465 return;
2466
2467 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2468 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2469 ("VM exit interruption info invalid: %#x", intr_info));
2470
2471 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2472 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2473 "to NMI has invalid vector: %#x", intr_info));
2474 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2475 __asm __volatile("int $2");
2476 }
2477}
2478
2479static int
2480vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
2481 void *rendezvous_cookie, void *suspend_cookie)
2482{
2483 int rc, handled, launched;
2484 struct vmx *vmx;
2485 struct vm *vm;
2486 struct vmxctx *vmxctx;
2487 struct vmcs *vmcs;
2488 struct vm_exit *vmexit;
2489 struct vlapic *vlapic;
2490 uint64_t rip;
2491 uint32_t exit_reason;
2492
2493 vmx = arg;
2494 vm = vmx->vm;
2495 vmcs = &vmx->vmcs[vcpu];
2496 vmxctx = &vmx->ctx[vcpu];
2497 vlapic = vm_lapic(vm, vcpu);
2498 vmexit = vm_exitinfo(vm, vcpu);
2499 launched = 0;
2500
2501 KASSERT(vmxctx->pmap == pmap,
2502 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2503
2504 vmx_msr_guest_enter(vmx, vcpu);
2505
2506 VMPTRLD(vmcs);
2507
2508 /*
2509 * XXX
2510 * We do this every time because we may setup the virtual machine
2511 * from a different process than the one that actually runs it.
2512 *
2513 * If the life of a virtual machine was spent entirely in the context
2514 * of a single process we could do this once in vmx_vminit().
2515 */
2516 vmcs_write(VMCS_HOST_CR3, rcr3());
2517
2518 vmcs_write(VMCS_GUEST_RIP, startrip);
2519 vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2520 do {
2521 handled = UNHANDLED;
2522
2523 /*
2524 * Interrupts are disabled from this point on until the
2525 * guest starts executing. This is done for the following
2526 * reasons:
2527 *
2528 * If an AST is asserted on this thread after the check below,
2529 * then the IPI_AST notification will not be lost, because it
2530 * will cause a VM exit due to external interrupt as soon as
2531 * the guest state is loaded.
2532 *
2533 * A posted interrupt after 'vmx_inject_interrupts()' will
2534 * not be "lost" because it will be held pending in the host
2535 * APIC because interrupts are disabled. The pending interrupt
2536 * will be recognized as soon as the guest state is loaded.
2537 *
2538 * The same reasoning applies to the IPI generated by
2539 * pmap_invalidate_ept().
2540 */
2541 disable_intr();
2542 vmx_inject_interrupts(vmx, vcpu, vlapic);
2543
2544 /*
2545 * Check for vcpu suspension after injecting events because
2546 * vmx_inject_interrupts() can suspend the vcpu due to a
2547 * triple fault.
2548 */
2549 if (vcpu_suspended(suspend_cookie)) {
2550 enable_intr();
2551 vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
2552 break;
2553 }
2554
2555 if (vcpu_rendezvous_pending(rendezvous_cookie)) {
2556 enable_intr();
2557 vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
2558 break;
2559 }
2560
2561 if (vcpu_should_yield(vm, vcpu)) {
2562 enable_intr();
2563 vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
2564 vmx_astpending_trace(vmx, vcpu, vmexit->rip);
2565 handled = HANDLED;
2566 break;
2567 }
2568
2569 vmx_run_trace(vmx, vcpu);
2570 rc = vmx_enter_guest(vmxctx, vmx, launched);
2571
2572 /* Collect some information for VM exit processing */
2573 vmexit->rip = rip = vmcs_guest_rip();
2574 vmexit->inst_length = vmexit_instruction_length();
2575 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2576 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2577
2578 if (rc == VMX_GUEST_VMEXIT) {
2579 vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2580 enable_intr();
2581 handled = vmx_exit_process(vmx, vcpu, vmexit);
2582 } else {
2583 enable_intr();
2584 vmx_exit_inst_error(vmxctx, rc, vmexit);
2585 }
2586 launched = 1;
2587 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2588 } while (handled);
2589
2590 /*
2591 * If a VM exit has been handled then the exitcode must be BOGUS
2592 * If a VM exit is not handled then the exitcode must not be BOGUS
2593 */
2594 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2595 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2596 panic("Mismatch between handled (%d) and exitcode (%d)",
2597 handled, vmexit->exitcode);
2598 }
2599
2600 if (!handled)
2601 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2602
2603 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2604 vmexit->exitcode);
2605
2606 VMCLEAR(vmcs);
2607 vmx_msr_guest_exit(vmx, vcpu);
2608
2609 return (0);
2610}
2611
2612static void
2613vmx_vmcleanup(void *arg)
2614{
2615 int i;
2616 struct vmx *vmx = arg;
2617
2618 if (apic_access_virtualization(vmx, 0))
2619 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2620
2621 for (i = 0; i < VM_MAXCPU; i++)
2622 vpid_free(vmx->state[i].vpid);
2623
2624 free(vmx, M_VMX);
2625
2626 return;
2627}
2628
2629static register_t *
2630vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2631{
2632
2633 switch (reg) {
2634 case VM_REG_GUEST_RAX:
2635 return (&vmxctx->guest_rax);
2636 case VM_REG_GUEST_RBX:
2637 return (&vmxctx->guest_rbx);
2638 case VM_REG_GUEST_RCX:
2639 return (&vmxctx->guest_rcx);
2640 case VM_REG_GUEST_RDX:
2641 return (&vmxctx->guest_rdx);
2642 case VM_REG_GUEST_RSI:
2643 return (&vmxctx->guest_rsi);
2644 case VM_REG_GUEST_RDI:
2645 return (&vmxctx->guest_rdi);
2646 case VM_REG_GUEST_RBP:
2647 return (&vmxctx->guest_rbp);
2648 case VM_REG_GUEST_R8:
2649 return (&vmxctx->guest_r8);
2650 case VM_REG_GUEST_R9:
2651 return (&vmxctx->guest_r9);
2652 case VM_REG_GUEST_R10:
2653 return (&vmxctx->guest_r10);
2654 case VM_REG_GUEST_R11:
2655 return (&vmxctx->guest_r11);
2656 case VM_REG_GUEST_R12:
2657 return (&vmxctx->guest_r12);
2658 case VM_REG_GUEST_R13:
2659 return (&vmxctx->guest_r13);
2660 case VM_REG_GUEST_R14:
2661 return (&vmxctx->guest_r14);
2662 case VM_REG_GUEST_R15:
2663 return (&vmxctx->guest_r15);
2664 case VM_REG_GUEST_CR2:
2665 return (&vmxctx->guest_cr2);
2666 default:
2667 break;
2668 }
2669 return (NULL);
2670}
2671
2672static int
2673vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2674{
2675 register_t *regp;
2676
2677 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2678 *retval = *regp;
2679 return (0);
2680 } else
2681 return (EINVAL);
2682}
2683
2684static int
2685vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2686{
2687 register_t *regp;
2688
2689 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2690 *regp = val;
2691 return (0);
2692 } else
2693 return (EINVAL);
2694}
2695
2696static int
2697vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2698{
2699 uint64_t gi;
2700 int error;
2701
2702 error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2703 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2704 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2705 return (error);
2706}
2707
2708static int
2709vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2710{
2711 struct vmcs *vmcs;
2712 uint64_t gi;
2713 int error, ident;
2714
2715 /*
2716 * Forcing the vcpu into an interrupt shadow is not supported.
2717 */
2718 if (val) {
2719 error = EINVAL;
2720 goto done;
2721 }
2722
2723 vmcs = &vmx->vmcs[vcpu];
2724 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2725 error = vmcs_getreg(vmcs, running, ident, &gi);
2726 if (error == 0) {
2727 gi &= ~HWINTR_BLOCKING;
2728 error = vmcs_setreg(vmcs, running, ident, gi);
2729 }
2730done:
2731 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2732 error ? "failed" : "succeeded");
2733 return (error);
2734}
2735
2736static int
2737vmx_shadow_reg(int reg)
2738{
2739 int shreg;
2740
2741 shreg = -1;
2742
2743 switch (reg) {
2744 case VM_REG_GUEST_CR0:
2745 shreg = VMCS_CR0_SHADOW;
2746 break;
2747 case VM_REG_GUEST_CR4:
2748 shreg = VMCS_CR4_SHADOW;
2749 break;
2750 default:
2751 break;
2752 }
2753
2754 return (shreg);
2755}
2756
2757static int
2758vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2759{
2760 int running, hostcpu;
2761 struct vmx *vmx = arg;
2762
2763 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2764 if (running && hostcpu != curcpu)
2765 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2766
2767 if (reg == VM_REG_GUEST_INTR_SHADOW)
2768 return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2769
2770 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2771 return (0);
2772
2773 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2774}
2775
2776static int
2777vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2778{
2779 int error, hostcpu, running, shadow;
2780 uint64_t ctls;
2781 pmap_t pmap;
2782 struct vmx *vmx = arg;
2783
2784 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2785 if (running && hostcpu != curcpu)
2786 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2787
2788 if (reg == VM_REG_GUEST_INTR_SHADOW)
2789 return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2790
2791 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2792 return (0);
2793
2794 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2795
2796 if (error == 0) {
2797 /*
2798 * If the "load EFER" VM-entry control is 1 then the
2799 * value of EFER.LMA must be identical to "IA-32e mode guest"
2800 * bit in the VM-entry control.
2801 */
2802 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2803 (reg == VM_REG_GUEST_EFER)) {
2804 vmcs_getreg(&vmx->vmcs[vcpu], running,
2805 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2806 if (val & EFER_LMA)
2807 ctls |= VM_ENTRY_GUEST_LMA;
2808 else
2809 ctls &= ~VM_ENTRY_GUEST_LMA;
2810 vmcs_setreg(&vmx->vmcs[vcpu], running,
2811 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2812 }
2813
2814 shadow = vmx_shadow_reg(reg);
2815 if (shadow > 0) {
2816 /*
2817 * Store the unmodified value in the shadow
2818 */
2819 error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2820 VMCS_IDENT(shadow), val);
2821 }
2822
2823 if (reg == VM_REG_GUEST_CR3) {
2824 /*
2825 * Invalidate the guest vcpu's TLB mappings to emulate
2826 * the behavior of updating %cr3.
2827 *
2828 * XXX the processor retains global mappings when %cr3
2829 * is updated but vmx_invvpid() does not.
2830 */
2831 pmap = vmx->ctx[vcpu].pmap;
2832 vmx_invvpid(vmx, vcpu, pmap, running);
2833 }
2834 }
2835
2836 return (error);
2837}
2838
2839static int
2840vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2841{
2842 int hostcpu, running;
2843 struct vmx *vmx = arg;
2844
2845 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2846 if (running && hostcpu != curcpu)
2847 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2848
2849 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2850}
2851
2852static int
2853vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2854{
2855 int hostcpu, running;
2856 struct vmx *vmx = arg;
2857
2858 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2859 if (running && hostcpu != curcpu)
2860 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2861
2862 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2863}
2864
2865static int
2866vmx_getcap(void *arg, int vcpu, int type, int *retval)
2867{
2868 struct vmx *vmx = arg;
2869 int vcap;
2870 int ret;
2871
2872 ret = ENOENT;
2873
2874 vcap = vmx->cap[vcpu].set;
2875
2876 switch (type) {
2877 case VM_CAP_HALT_EXIT:
2878 if (cap_halt_exit)
2879 ret = 0;
2880 break;
2881 case VM_CAP_PAUSE_EXIT:
2882 if (cap_pause_exit)
2883 ret = 0;
2884 break;
2885 case VM_CAP_MTRAP_EXIT:
2886 if (cap_monitor_trap)
2887 ret = 0;
2888 break;
2889 case VM_CAP_UNRESTRICTED_GUEST:
2890 if (cap_unrestricted_guest)
2891 ret = 0;
2892 break;
2893 case VM_CAP_ENABLE_INVPCID:
2894 if (cap_invpcid)
2895 ret = 0;
2896 break;
2897 default:
2898 break;
2899 }
2900
2901 if (ret == 0)
2902 *retval = (vcap & (1 << type)) ? 1 : 0;
2903
2904 return (ret);
2905}
2906
2907static int
2908vmx_setcap(void *arg, int vcpu, int type, int val)
2909{
2910 struct vmx *vmx = arg;
2911 struct vmcs *vmcs = &vmx->vmcs[vcpu];
2912 uint32_t baseval;
2913 uint32_t *pptr;
2914 int error;
2915 int flag;
2916 int reg;
2917 int retval;
2918
2919 retval = ENOENT;
2920 pptr = NULL;
2921
2922 switch (type) {
2923 case VM_CAP_HALT_EXIT:
2924 if (cap_halt_exit) {
2925 retval = 0;
2926 pptr = &vmx->cap[vcpu].proc_ctls;
2927 baseval = *pptr;
2928 flag = PROCBASED_HLT_EXITING;
2929 reg = VMCS_PRI_PROC_BASED_CTLS;
2930 }
2931 break;
2932 case VM_CAP_MTRAP_EXIT:
2933 if (cap_monitor_trap) {
2934 retval = 0;
2935 pptr = &vmx->cap[vcpu].proc_ctls;
2936 baseval = *pptr;
2937 flag = PROCBASED_MTF;
2938 reg = VMCS_PRI_PROC_BASED_CTLS;
2939 }
2940 break;
2941 case VM_CAP_PAUSE_EXIT:
2942 if (cap_pause_exit) {
2943 retval = 0;
2944 pptr = &vmx->cap[vcpu].proc_ctls;
2945 baseval = *pptr;
2946 flag = PROCBASED_PAUSE_EXITING;
2947 reg = VMCS_PRI_PROC_BASED_CTLS;
2948 }
2949 break;
2950 case VM_CAP_UNRESTRICTED_GUEST:
2951 if (cap_unrestricted_guest) {
2952 retval = 0;
2953 pptr = &vmx->cap[vcpu].proc_ctls2;
2954 baseval = *pptr;
2955 flag = PROCBASED2_UNRESTRICTED_GUEST;
2956 reg = VMCS_SEC_PROC_BASED_CTLS;
2957 }
2958 break;
2959 case VM_CAP_ENABLE_INVPCID:
2960 if (cap_invpcid) {
2961 retval = 0;
2962 pptr = &vmx->cap[vcpu].proc_ctls2;
2963 baseval = *pptr;
2964 flag = PROCBASED2_ENABLE_INVPCID;
2965 reg = VMCS_SEC_PROC_BASED_CTLS;
2966 }
2967 break;
2968 default:
2969 break;
2970 }
2971
2972 if (retval == 0) {
2973 if (val) {
2974 baseval |= flag;
2975 } else {
2976 baseval &= ~flag;
2977 }
2978 VMPTRLD(vmcs);
2979 error = vmwrite(reg, baseval);
2980 VMCLEAR(vmcs);
2981
2982 if (error) {
2983 retval = error;
2984 } else {
2985 /*
2986 * Update optional stored flags, and record
2987 * setting
2988 */
2989 if (pptr != NULL) {
2990 *pptr = baseval;
2991 }
2992
2993 if (val) {
2994 vmx->cap[vcpu].set |= (1 << type);
2995 } else {
2996 vmx->cap[vcpu].set &= ~(1 << type);
2997 }
2998 }
2999 }
3000
3001 return (retval);
3002}
3003
3004struct vlapic_vtx {
3005 struct vlapic vlapic;
3006 struct pir_desc *pir_desc;
3007 struct vmx *vmx;
3008};
3009
3010#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \
3011do { \
3012 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \
3013 level ? "level" : "edge", vector); \
3014 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \
3015 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \
3016 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \
3017 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \
3018 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3019} while (0)
3020
3021/*
3022 * vlapic->ops handlers that utilize the APICv hardware assist described in
3023 * Chapter 29 of the Intel SDM.
3024 */
3025static int
3026vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3027{
3028 struct vlapic_vtx *vlapic_vtx;
3029 struct pir_desc *pir_desc;
3030 uint64_t mask;
3031 int idx, notify;
3032
3033 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3034 pir_desc = vlapic_vtx->pir_desc;
3035
3036 /*
3037 * Keep track of interrupt requests in the PIR descriptor. This is
3038 * because the virtual APIC page pointed to by the VMCS cannot be
3039 * modified if the vcpu is running.
3040 */
3041 idx = vector / 64;
3042 mask = 1UL << (vector % 64);
3043 atomic_set_long(&pir_desc->pir[idx], mask);
3044 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3045
3046 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3047 level, "vmx_set_intr_ready");
3048 return (notify);
3049}
3050
3051static int
3052vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3053{
3054 struct vlapic_vtx *vlapic_vtx;
3055 struct pir_desc *pir_desc;
3056 struct LAPIC *lapic;
3057 uint64_t pending, pirval;
3058 uint32_t ppr, vpr;
3059 int i;
3060
3061 /*
3062 * This function is only expected to be called from the 'HLT' exit
3063 * handler which does not care about the vector that is pending.
3064 */
3065 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3066
3067 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3068 pir_desc = vlapic_vtx->pir_desc;
3069
3070 pending = atomic_load_acq_long(&pir_desc->pending);
3071 if (!pending)
3072 return (0); /* common case */
3073
3074 /*
3075 * If there is an interrupt pending then it will be recognized only
3076 * if its priority is greater than the processor priority.
3077 *
3078 * Special case: if the processor priority is zero then any pending
3079 * interrupt will be recognized.
3080 */
3081 lapic = vlapic->apic_page;
3082 ppr = lapic->ppr & 0xf0;
3083 if (ppr == 0)
3084 return (1);
3085
3086 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3087 lapic->ppr);
3088
3089 for (i = 3; i >= 0; i--) {
3090 pirval = pir_desc->pir[i];
3091 if (pirval != 0) {
3092 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3093 return (vpr > ppr);
3094 }
3095 }
3096 return (0);
3097}
3098
3099static void
3100vmx_intr_accepted(struct vlapic *vlapic, int vector)
3101{
3102
3103 panic("vmx_intr_accepted: not expected to be called");
3104}
3105
3106static void
3107vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3108{
3109 struct vlapic_vtx *vlapic_vtx;
3110 struct vmx *vmx;
3111 struct vmcs *vmcs;
3112 uint64_t mask, val;
3113
3114 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3115 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3116 ("vmx_set_tmr: vcpu cannot be running"));
3117
3118 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3119 vmx = vlapic_vtx->vmx;
3120 vmcs = &vmx->vmcs[vlapic->vcpuid];
3121 mask = 1UL << (vector % 64);
3122
3123 VMPTRLD(vmcs);
3124 val = vmcs_read(VMCS_EOI_EXIT(vector));
3125 if (level)
3126 val |= mask;
3127 else
3128 val &= ~mask;
3129 vmcs_write(VMCS_EOI_EXIT(vector), val);
3130 VMCLEAR(vmcs);
3131}
3132
3133static void
3134vmx_enable_x2apic_mode(struct vlapic *vlapic)
3135{
3136 struct vmx *vmx;
3137 struct vmcs *vmcs;
3138 uint32_t proc_ctls2;
3139 int vcpuid, error;
3140
3141 vcpuid = vlapic->vcpuid;
3142 vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3143 vmcs = &vmx->vmcs[vcpuid];
3144
3145 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3146 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3147 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3148
3149 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3150 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3151 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3152
3153 VMPTRLD(vmcs);
3154 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3155 VMCLEAR(vmcs);
3156
3157 if (vlapic->vcpuid == 0) {
3158 /*
3159 * The nested page table mappings are shared by all vcpus
3160 * so unmap the APIC access page just once.
3161 */
3162 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3163 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3164 __func__, error));
3165
3166 /*
3167 * The MSR bitmap is shared by all vcpus so modify it only
3168 * once in the context of vcpu 0.
3169 */
3170 error = vmx_allow_x2apic_msrs(vmx);
3171 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3172 __func__, error));
3173 }
3174}
3175
3176static void
3177vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3178{
3179
3180 ipi_cpu(hostcpu, pirvec);
3181}
3182
3183/*
3184 * Transfer the pending interrupts in the PIR descriptor to the IRR
3185 * in the virtual APIC page.
3186 */
3187static void
3188vmx_inject_pir(struct vlapic *vlapic)
3189{
3190 struct vlapic_vtx *vlapic_vtx;
3191 struct pir_desc *pir_desc;
3192 struct LAPIC *lapic;
3193 uint64_t val, pirval;
3194 int rvi, pirbase = -1;
3195 uint16_t intr_status_old, intr_status_new;
3196
3197 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3198 pir_desc = vlapic_vtx->pir_desc;
3199 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3200 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3201 "no posted interrupt pending");
3202 return;
3203 }
3204
3205 pirval = 0;
3206 pirbase = -1;
3207 lapic = vlapic->apic_page;
3208
3209 val = atomic_readandclear_long(&pir_desc->pir[0]);
3210 if (val != 0) {
3211 lapic->irr0 |= val;
3212 lapic->irr1 |= val >> 32;
3213 pirbase = 0;
3214 pirval = val;
3215 }
3216
3217 val = atomic_readandclear_long(&pir_desc->pir[1]);
3218 if (val != 0) {
3219 lapic->irr2 |= val;
3220 lapic->irr3 |= val >> 32;
3221 pirbase = 64;
3222 pirval = val;
3223 }
3224
3225 val = atomic_readandclear_long(&pir_desc->pir[2]);
3226 if (val != 0) {
3227 lapic->irr4 |= val;
3228 lapic->irr5 |= val >> 32;
3229 pirbase = 128;
3230 pirval = val;
3231 }
3232
3233 val = atomic_readandclear_long(&pir_desc->pir[3]);
3234 if (val != 0) {
3235 lapic->irr6 |= val;
3236 lapic->irr7 |= val >> 32;
3237 pirbase = 192;
3238 pirval = val;
3239 }
3240
3241 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3242
3243 /*
3244 * Update RVI so the processor can evaluate pending virtual
3245 * interrupts on VM-entry.
3246 *
3247 * It is possible for pirval to be 0 here, even though the
3248 * pending bit has been set. The scenario is:
3249 * CPU-Y is sending a posted interrupt to CPU-X, which
3250 * is running a guest and processing posted interrupts in h/w.
3251 * CPU-X will eventually exit and the state seen in s/w is
3252 * the pending bit set, but no PIR bits set.
3253 *
3254 * CPU-X CPU-Y
3255 * (vm running) (host running)
3256 * rx posted interrupt
3257 * CLEAR pending bit
3258 * SET PIR bit
3259 * READ/CLEAR PIR bits
3260 * SET pending bit
3261 * (vm exit)
3262 * pending bit set, PIR 0
3263 */
3264 if (pirval != 0) {
3265 rvi = pirbase + flsl(pirval) - 1;
3266 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3267 intr_status_new = (intr_status_old & 0xFF00) | rvi;
3268 if (intr_status_new > intr_status_old) {
3269 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3270 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3271 "guest_intr_status changed from 0x%04x to 0x%04x",
3272 intr_status_old, intr_status_new);
3273 }
3274 }
3275}
3276
3277static struct vlapic *
3278vmx_vlapic_init(void *arg, int vcpuid)
3279{
3280 struct vmx *vmx;
3281 struct vlapic *vlapic;
3282 struct vlapic_vtx *vlapic_vtx;
3283
3284 vmx = arg;
3285
3286 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3287 vlapic->vm = vmx->vm;
3288 vlapic->vcpuid = vcpuid;
3289 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3290
3291 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3292 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3293 vlapic_vtx->vmx = vmx;
3294
3295 if (virtual_interrupt_delivery) {
3296 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3297 vlapic->ops.pending_intr = vmx_pending_intr;
3298 vlapic->ops.intr_accepted = vmx_intr_accepted;
3299 vlapic->ops.set_tmr = vmx_set_tmr;
3300 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3301 }
3302
3303 if (posted_interrupts)
3304 vlapic->ops.post_intr = vmx_post_intr;
3305
3306 vlapic_init(vlapic);
3307
3308 return (vlapic);
3309}
3310
3311static void
3312vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3313{
3314
3315 vlapic_cleanup(vlapic);
3316 free(vlapic, M_VLAPIC);
3317}
3318
3319struct vmm_ops vmm_ops_intel = {
3320 vmx_init,
3321 vmx_cleanup,
3322 vmx_restore,
3323 vmx_vminit,
3324 vmx_run,
3325 vmx_vmcleanup,
3326 vmx_getreg,
3327 vmx_setreg,
3328 vmx_getdesc,
3329 vmx_setdesc,
3330 vmx_getcap,
3331 vmx_setcap,
3332 ept_vmspace_alloc,
3333 ept_vmspace_free,
3334 vmx_vlapic_init,
3335 vmx_vlapic_cleanup,
3336};
2392 case EXIT_REASON_EPT_FAULT:
2393 /*
2394 * If 'gpa' lies within the address space allocated to
2395 * memory then this must be a nested page fault otherwise
2396 * this must be an instruction that accesses MMIO space.
2397 */
2398 gpa = vmcs_gpa();
2399 if (vm_mem_allocated(vmx->vm, gpa) ||
2400 apic_access_fault(vmx, vcpu, gpa)) {
2401 vmexit->exitcode = VM_EXITCODE_PAGING;
2402 vmexit->u.paging.gpa = gpa;
2403 vmexit->u.paging.fault_type = ept_fault_type(qual);
2404 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2405 } else if (ept_emulation_fault(qual)) {
2406 vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2407 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2408 }
2409 /*
2410 * If Virtual NMIs control is 1 and the VM-exit is due to an
2411 * EPT fault during the execution of IRET then we must restore
2412 * the state of "virtual-NMI blocking" before resuming.
2413 *
2414 * See description of "NMI unblocking due to IRET" in
2415 * "Exit Qualification for EPT Violations".
2416 */
2417 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2418 (qual & EXIT_QUAL_NMIUDTI) != 0)
2419 vmx_restore_nmi_blocking(vmx, vcpu);
2420 break;
2421 case EXIT_REASON_VIRTUALIZED_EOI:
2422 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2423 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2424 vmexit->inst_length = 0; /* trap-like */
2425 break;
2426 case EXIT_REASON_APIC_ACCESS:
2427 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2428 break;
2429 case EXIT_REASON_APIC_WRITE:
2430 /*
2431 * APIC-write VM exit is trap-like so the %rip is already
2432 * pointing to the next instruction.
2433 */
2434 vmexit->inst_length = 0;
2435 vlapic = vm_lapic(vmx->vm, vcpu);
2436 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2437 break;
2438 case EXIT_REASON_XSETBV:
2439 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2440 break;
2441 case EXIT_REASON_MONITOR:
2442 vmexit->exitcode = VM_EXITCODE_MONITOR;
2443 break;
2444 case EXIT_REASON_MWAIT:
2445 vmexit->exitcode = VM_EXITCODE_MWAIT;
2446 break;
2447 default:
2448 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2449 break;
2450 }
2451
2452 if (handled) {
2453 /*
2454 * It is possible that control is returned to userland
2455 * even though we were able to handle the VM exit in the
2456 * kernel.
2457 *
2458 * In such a case we want to make sure that the userland
2459 * restarts guest execution at the instruction *after*
2460 * the one we just processed. Therefore we update the
2461 * guest rip in the VMCS and in 'vmexit'.
2462 */
2463 vmexit->rip += vmexit->inst_length;
2464 vmexit->inst_length = 0;
2465 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2466 } else {
2467 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2468 /*
2469 * If this VM exit was not claimed by anybody then
2470 * treat it as a generic VMX exit.
2471 */
2472 vmexit->exitcode = VM_EXITCODE_VMX;
2473 vmexit->u.vmx.status = VM_SUCCESS;
2474 vmexit->u.vmx.inst_type = 0;
2475 vmexit->u.vmx.inst_error = 0;
2476 } else {
2477 /*
2478 * The exitcode and collateral have been populated.
2479 * The VM exit will be processed further in userland.
2480 */
2481 }
2482 }
2483 return (handled);
2484}
2485
2486static __inline void
2487vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2488{
2489
2490 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2491 ("vmx_exit_inst_error: invalid inst_fail_status %d",
2492 vmxctx->inst_fail_status));
2493
2494 vmexit->inst_length = 0;
2495 vmexit->exitcode = VM_EXITCODE_VMX;
2496 vmexit->u.vmx.status = vmxctx->inst_fail_status;
2497 vmexit->u.vmx.inst_error = vmcs_instruction_error();
2498 vmexit->u.vmx.exit_reason = ~0;
2499 vmexit->u.vmx.exit_qualification = ~0;
2500
2501 switch (rc) {
2502 case VMX_VMRESUME_ERROR:
2503 case VMX_VMLAUNCH_ERROR:
2504 case VMX_INVEPT_ERROR:
2505 vmexit->u.vmx.inst_type = rc;
2506 break;
2507 default:
2508 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2509 }
2510}
2511
2512/*
2513 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2514 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2515 * sufficient to simply vector to the NMI handler via a software interrupt.
2516 * However, this must be done before maskable interrupts are enabled
2517 * otherwise the "iret" issued by an interrupt handler will incorrectly
2518 * clear NMI blocking.
2519 */
2520static __inline void
2521vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2522{
2523 uint32_t intr_info;
2524
2525 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2526
2527 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2528 return;
2529
2530 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2531 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2532 ("VM exit interruption info invalid: %#x", intr_info));
2533
2534 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2535 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2536 "to NMI has invalid vector: %#x", intr_info));
2537 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2538 __asm __volatile("int $2");
2539 }
2540}
2541
2542static int
2543vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
2544 void *rendezvous_cookie, void *suspend_cookie)
2545{
2546 int rc, handled, launched;
2547 struct vmx *vmx;
2548 struct vm *vm;
2549 struct vmxctx *vmxctx;
2550 struct vmcs *vmcs;
2551 struct vm_exit *vmexit;
2552 struct vlapic *vlapic;
2553 uint64_t rip;
2554 uint32_t exit_reason;
2555
2556 vmx = arg;
2557 vm = vmx->vm;
2558 vmcs = &vmx->vmcs[vcpu];
2559 vmxctx = &vmx->ctx[vcpu];
2560 vlapic = vm_lapic(vm, vcpu);
2561 vmexit = vm_exitinfo(vm, vcpu);
2562 launched = 0;
2563
2564 KASSERT(vmxctx->pmap == pmap,
2565 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2566
2567 vmx_msr_guest_enter(vmx, vcpu);
2568
2569 VMPTRLD(vmcs);
2570
2571 /*
2572 * XXX
2573 * We do this every time because we may setup the virtual machine
2574 * from a different process than the one that actually runs it.
2575 *
2576 * If the life of a virtual machine was spent entirely in the context
2577 * of a single process we could do this once in vmx_vminit().
2578 */
2579 vmcs_write(VMCS_HOST_CR3, rcr3());
2580
2581 vmcs_write(VMCS_GUEST_RIP, startrip);
2582 vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2583 do {
2584 handled = UNHANDLED;
2585
2586 /*
2587 * Interrupts are disabled from this point on until the
2588 * guest starts executing. This is done for the following
2589 * reasons:
2590 *
2591 * If an AST is asserted on this thread after the check below,
2592 * then the IPI_AST notification will not be lost, because it
2593 * will cause a VM exit due to external interrupt as soon as
2594 * the guest state is loaded.
2595 *
2596 * A posted interrupt after 'vmx_inject_interrupts()' will
2597 * not be "lost" because it will be held pending in the host
2598 * APIC because interrupts are disabled. The pending interrupt
2599 * will be recognized as soon as the guest state is loaded.
2600 *
2601 * The same reasoning applies to the IPI generated by
2602 * pmap_invalidate_ept().
2603 */
2604 disable_intr();
2605 vmx_inject_interrupts(vmx, vcpu, vlapic);
2606
2607 /*
2608 * Check for vcpu suspension after injecting events because
2609 * vmx_inject_interrupts() can suspend the vcpu due to a
2610 * triple fault.
2611 */
2612 if (vcpu_suspended(suspend_cookie)) {
2613 enable_intr();
2614 vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
2615 break;
2616 }
2617
2618 if (vcpu_rendezvous_pending(rendezvous_cookie)) {
2619 enable_intr();
2620 vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
2621 break;
2622 }
2623
2624 if (vcpu_should_yield(vm, vcpu)) {
2625 enable_intr();
2626 vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
2627 vmx_astpending_trace(vmx, vcpu, vmexit->rip);
2628 handled = HANDLED;
2629 break;
2630 }
2631
2632 vmx_run_trace(vmx, vcpu);
2633 rc = vmx_enter_guest(vmxctx, vmx, launched);
2634
2635 /* Collect some information for VM exit processing */
2636 vmexit->rip = rip = vmcs_guest_rip();
2637 vmexit->inst_length = vmexit_instruction_length();
2638 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2639 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2640
2641 if (rc == VMX_GUEST_VMEXIT) {
2642 vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2643 enable_intr();
2644 handled = vmx_exit_process(vmx, vcpu, vmexit);
2645 } else {
2646 enable_intr();
2647 vmx_exit_inst_error(vmxctx, rc, vmexit);
2648 }
2649 launched = 1;
2650 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2651 } while (handled);
2652
2653 /*
2654 * If a VM exit has been handled then the exitcode must be BOGUS
2655 * If a VM exit is not handled then the exitcode must not be BOGUS
2656 */
2657 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2658 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2659 panic("Mismatch between handled (%d) and exitcode (%d)",
2660 handled, vmexit->exitcode);
2661 }
2662
2663 if (!handled)
2664 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2665
2666 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2667 vmexit->exitcode);
2668
2669 VMCLEAR(vmcs);
2670 vmx_msr_guest_exit(vmx, vcpu);
2671
2672 return (0);
2673}
2674
2675static void
2676vmx_vmcleanup(void *arg)
2677{
2678 int i;
2679 struct vmx *vmx = arg;
2680
2681 if (apic_access_virtualization(vmx, 0))
2682 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2683
2684 for (i = 0; i < VM_MAXCPU; i++)
2685 vpid_free(vmx->state[i].vpid);
2686
2687 free(vmx, M_VMX);
2688
2689 return;
2690}
2691
2692static register_t *
2693vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2694{
2695
2696 switch (reg) {
2697 case VM_REG_GUEST_RAX:
2698 return (&vmxctx->guest_rax);
2699 case VM_REG_GUEST_RBX:
2700 return (&vmxctx->guest_rbx);
2701 case VM_REG_GUEST_RCX:
2702 return (&vmxctx->guest_rcx);
2703 case VM_REG_GUEST_RDX:
2704 return (&vmxctx->guest_rdx);
2705 case VM_REG_GUEST_RSI:
2706 return (&vmxctx->guest_rsi);
2707 case VM_REG_GUEST_RDI:
2708 return (&vmxctx->guest_rdi);
2709 case VM_REG_GUEST_RBP:
2710 return (&vmxctx->guest_rbp);
2711 case VM_REG_GUEST_R8:
2712 return (&vmxctx->guest_r8);
2713 case VM_REG_GUEST_R9:
2714 return (&vmxctx->guest_r9);
2715 case VM_REG_GUEST_R10:
2716 return (&vmxctx->guest_r10);
2717 case VM_REG_GUEST_R11:
2718 return (&vmxctx->guest_r11);
2719 case VM_REG_GUEST_R12:
2720 return (&vmxctx->guest_r12);
2721 case VM_REG_GUEST_R13:
2722 return (&vmxctx->guest_r13);
2723 case VM_REG_GUEST_R14:
2724 return (&vmxctx->guest_r14);
2725 case VM_REG_GUEST_R15:
2726 return (&vmxctx->guest_r15);
2727 case VM_REG_GUEST_CR2:
2728 return (&vmxctx->guest_cr2);
2729 default:
2730 break;
2731 }
2732 return (NULL);
2733}
2734
2735static int
2736vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2737{
2738 register_t *regp;
2739
2740 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2741 *retval = *regp;
2742 return (0);
2743 } else
2744 return (EINVAL);
2745}
2746
2747static int
2748vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2749{
2750 register_t *regp;
2751
2752 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2753 *regp = val;
2754 return (0);
2755 } else
2756 return (EINVAL);
2757}
2758
2759static int
2760vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2761{
2762 uint64_t gi;
2763 int error;
2764
2765 error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2766 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2767 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2768 return (error);
2769}
2770
2771static int
2772vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2773{
2774 struct vmcs *vmcs;
2775 uint64_t gi;
2776 int error, ident;
2777
2778 /*
2779 * Forcing the vcpu into an interrupt shadow is not supported.
2780 */
2781 if (val) {
2782 error = EINVAL;
2783 goto done;
2784 }
2785
2786 vmcs = &vmx->vmcs[vcpu];
2787 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2788 error = vmcs_getreg(vmcs, running, ident, &gi);
2789 if (error == 0) {
2790 gi &= ~HWINTR_BLOCKING;
2791 error = vmcs_setreg(vmcs, running, ident, gi);
2792 }
2793done:
2794 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2795 error ? "failed" : "succeeded");
2796 return (error);
2797}
2798
2799static int
2800vmx_shadow_reg(int reg)
2801{
2802 int shreg;
2803
2804 shreg = -1;
2805
2806 switch (reg) {
2807 case VM_REG_GUEST_CR0:
2808 shreg = VMCS_CR0_SHADOW;
2809 break;
2810 case VM_REG_GUEST_CR4:
2811 shreg = VMCS_CR4_SHADOW;
2812 break;
2813 default:
2814 break;
2815 }
2816
2817 return (shreg);
2818}
2819
2820static int
2821vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2822{
2823 int running, hostcpu;
2824 struct vmx *vmx = arg;
2825
2826 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2827 if (running && hostcpu != curcpu)
2828 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2829
2830 if (reg == VM_REG_GUEST_INTR_SHADOW)
2831 return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2832
2833 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2834 return (0);
2835
2836 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2837}
2838
2839static int
2840vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2841{
2842 int error, hostcpu, running, shadow;
2843 uint64_t ctls;
2844 pmap_t pmap;
2845 struct vmx *vmx = arg;
2846
2847 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2848 if (running && hostcpu != curcpu)
2849 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2850
2851 if (reg == VM_REG_GUEST_INTR_SHADOW)
2852 return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2853
2854 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2855 return (0);
2856
2857 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2858
2859 if (error == 0) {
2860 /*
2861 * If the "load EFER" VM-entry control is 1 then the
2862 * value of EFER.LMA must be identical to "IA-32e mode guest"
2863 * bit in the VM-entry control.
2864 */
2865 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2866 (reg == VM_REG_GUEST_EFER)) {
2867 vmcs_getreg(&vmx->vmcs[vcpu], running,
2868 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2869 if (val & EFER_LMA)
2870 ctls |= VM_ENTRY_GUEST_LMA;
2871 else
2872 ctls &= ~VM_ENTRY_GUEST_LMA;
2873 vmcs_setreg(&vmx->vmcs[vcpu], running,
2874 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2875 }
2876
2877 shadow = vmx_shadow_reg(reg);
2878 if (shadow > 0) {
2879 /*
2880 * Store the unmodified value in the shadow
2881 */
2882 error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2883 VMCS_IDENT(shadow), val);
2884 }
2885
2886 if (reg == VM_REG_GUEST_CR3) {
2887 /*
2888 * Invalidate the guest vcpu's TLB mappings to emulate
2889 * the behavior of updating %cr3.
2890 *
2891 * XXX the processor retains global mappings when %cr3
2892 * is updated but vmx_invvpid() does not.
2893 */
2894 pmap = vmx->ctx[vcpu].pmap;
2895 vmx_invvpid(vmx, vcpu, pmap, running);
2896 }
2897 }
2898
2899 return (error);
2900}
2901
2902static int
2903vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2904{
2905 int hostcpu, running;
2906 struct vmx *vmx = arg;
2907
2908 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2909 if (running && hostcpu != curcpu)
2910 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2911
2912 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2913}
2914
2915static int
2916vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2917{
2918 int hostcpu, running;
2919 struct vmx *vmx = arg;
2920
2921 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2922 if (running && hostcpu != curcpu)
2923 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2924
2925 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2926}
2927
2928static int
2929vmx_getcap(void *arg, int vcpu, int type, int *retval)
2930{
2931 struct vmx *vmx = arg;
2932 int vcap;
2933 int ret;
2934
2935 ret = ENOENT;
2936
2937 vcap = vmx->cap[vcpu].set;
2938
2939 switch (type) {
2940 case VM_CAP_HALT_EXIT:
2941 if (cap_halt_exit)
2942 ret = 0;
2943 break;
2944 case VM_CAP_PAUSE_EXIT:
2945 if (cap_pause_exit)
2946 ret = 0;
2947 break;
2948 case VM_CAP_MTRAP_EXIT:
2949 if (cap_monitor_trap)
2950 ret = 0;
2951 break;
2952 case VM_CAP_UNRESTRICTED_GUEST:
2953 if (cap_unrestricted_guest)
2954 ret = 0;
2955 break;
2956 case VM_CAP_ENABLE_INVPCID:
2957 if (cap_invpcid)
2958 ret = 0;
2959 break;
2960 default:
2961 break;
2962 }
2963
2964 if (ret == 0)
2965 *retval = (vcap & (1 << type)) ? 1 : 0;
2966
2967 return (ret);
2968}
2969
2970static int
2971vmx_setcap(void *arg, int vcpu, int type, int val)
2972{
2973 struct vmx *vmx = arg;
2974 struct vmcs *vmcs = &vmx->vmcs[vcpu];
2975 uint32_t baseval;
2976 uint32_t *pptr;
2977 int error;
2978 int flag;
2979 int reg;
2980 int retval;
2981
2982 retval = ENOENT;
2983 pptr = NULL;
2984
2985 switch (type) {
2986 case VM_CAP_HALT_EXIT:
2987 if (cap_halt_exit) {
2988 retval = 0;
2989 pptr = &vmx->cap[vcpu].proc_ctls;
2990 baseval = *pptr;
2991 flag = PROCBASED_HLT_EXITING;
2992 reg = VMCS_PRI_PROC_BASED_CTLS;
2993 }
2994 break;
2995 case VM_CAP_MTRAP_EXIT:
2996 if (cap_monitor_trap) {
2997 retval = 0;
2998 pptr = &vmx->cap[vcpu].proc_ctls;
2999 baseval = *pptr;
3000 flag = PROCBASED_MTF;
3001 reg = VMCS_PRI_PROC_BASED_CTLS;
3002 }
3003 break;
3004 case VM_CAP_PAUSE_EXIT:
3005 if (cap_pause_exit) {
3006 retval = 0;
3007 pptr = &vmx->cap[vcpu].proc_ctls;
3008 baseval = *pptr;
3009 flag = PROCBASED_PAUSE_EXITING;
3010 reg = VMCS_PRI_PROC_BASED_CTLS;
3011 }
3012 break;
3013 case VM_CAP_UNRESTRICTED_GUEST:
3014 if (cap_unrestricted_guest) {
3015 retval = 0;
3016 pptr = &vmx->cap[vcpu].proc_ctls2;
3017 baseval = *pptr;
3018 flag = PROCBASED2_UNRESTRICTED_GUEST;
3019 reg = VMCS_SEC_PROC_BASED_CTLS;
3020 }
3021 break;
3022 case VM_CAP_ENABLE_INVPCID:
3023 if (cap_invpcid) {
3024 retval = 0;
3025 pptr = &vmx->cap[vcpu].proc_ctls2;
3026 baseval = *pptr;
3027 flag = PROCBASED2_ENABLE_INVPCID;
3028 reg = VMCS_SEC_PROC_BASED_CTLS;
3029 }
3030 break;
3031 default:
3032 break;
3033 }
3034
3035 if (retval == 0) {
3036 if (val) {
3037 baseval |= flag;
3038 } else {
3039 baseval &= ~flag;
3040 }
3041 VMPTRLD(vmcs);
3042 error = vmwrite(reg, baseval);
3043 VMCLEAR(vmcs);
3044
3045 if (error) {
3046 retval = error;
3047 } else {
3048 /*
3049 * Update optional stored flags, and record
3050 * setting
3051 */
3052 if (pptr != NULL) {
3053 *pptr = baseval;
3054 }
3055
3056 if (val) {
3057 vmx->cap[vcpu].set |= (1 << type);
3058 } else {
3059 vmx->cap[vcpu].set &= ~(1 << type);
3060 }
3061 }
3062 }
3063
3064 return (retval);
3065}
3066
3067struct vlapic_vtx {
3068 struct vlapic vlapic;
3069 struct pir_desc *pir_desc;
3070 struct vmx *vmx;
3071};
3072
3073#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \
3074do { \
3075 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \
3076 level ? "level" : "edge", vector); \
3077 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \
3078 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \
3079 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \
3080 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \
3081 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3082} while (0)
3083
3084/*
3085 * vlapic->ops handlers that utilize the APICv hardware assist described in
3086 * Chapter 29 of the Intel SDM.
3087 */
3088static int
3089vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3090{
3091 struct vlapic_vtx *vlapic_vtx;
3092 struct pir_desc *pir_desc;
3093 uint64_t mask;
3094 int idx, notify;
3095
3096 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3097 pir_desc = vlapic_vtx->pir_desc;
3098
3099 /*
3100 * Keep track of interrupt requests in the PIR descriptor. This is
3101 * because the virtual APIC page pointed to by the VMCS cannot be
3102 * modified if the vcpu is running.
3103 */
3104 idx = vector / 64;
3105 mask = 1UL << (vector % 64);
3106 atomic_set_long(&pir_desc->pir[idx], mask);
3107 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3108
3109 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3110 level, "vmx_set_intr_ready");
3111 return (notify);
3112}
3113
3114static int
3115vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3116{
3117 struct vlapic_vtx *vlapic_vtx;
3118 struct pir_desc *pir_desc;
3119 struct LAPIC *lapic;
3120 uint64_t pending, pirval;
3121 uint32_t ppr, vpr;
3122 int i;
3123
3124 /*
3125 * This function is only expected to be called from the 'HLT' exit
3126 * handler which does not care about the vector that is pending.
3127 */
3128 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3129
3130 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3131 pir_desc = vlapic_vtx->pir_desc;
3132
3133 pending = atomic_load_acq_long(&pir_desc->pending);
3134 if (!pending)
3135 return (0); /* common case */
3136
3137 /*
3138 * If there is an interrupt pending then it will be recognized only
3139 * if its priority is greater than the processor priority.
3140 *
3141 * Special case: if the processor priority is zero then any pending
3142 * interrupt will be recognized.
3143 */
3144 lapic = vlapic->apic_page;
3145 ppr = lapic->ppr & 0xf0;
3146 if (ppr == 0)
3147 return (1);
3148
3149 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3150 lapic->ppr);
3151
3152 for (i = 3; i >= 0; i--) {
3153 pirval = pir_desc->pir[i];
3154 if (pirval != 0) {
3155 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3156 return (vpr > ppr);
3157 }
3158 }
3159 return (0);
3160}
3161
3162static void
3163vmx_intr_accepted(struct vlapic *vlapic, int vector)
3164{
3165
3166 panic("vmx_intr_accepted: not expected to be called");
3167}
3168
3169static void
3170vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3171{
3172 struct vlapic_vtx *vlapic_vtx;
3173 struct vmx *vmx;
3174 struct vmcs *vmcs;
3175 uint64_t mask, val;
3176
3177 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3178 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3179 ("vmx_set_tmr: vcpu cannot be running"));
3180
3181 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3182 vmx = vlapic_vtx->vmx;
3183 vmcs = &vmx->vmcs[vlapic->vcpuid];
3184 mask = 1UL << (vector % 64);
3185
3186 VMPTRLD(vmcs);
3187 val = vmcs_read(VMCS_EOI_EXIT(vector));
3188 if (level)
3189 val |= mask;
3190 else
3191 val &= ~mask;
3192 vmcs_write(VMCS_EOI_EXIT(vector), val);
3193 VMCLEAR(vmcs);
3194}
3195
3196static void
3197vmx_enable_x2apic_mode(struct vlapic *vlapic)
3198{
3199 struct vmx *vmx;
3200 struct vmcs *vmcs;
3201 uint32_t proc_ctls2;
3202 int vcpuid, error;
3203
3204 vcpuid = vlapic->vcpuid;
3205 vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3206 vmcs = &vmx->vmcs[vcpuid];
3207
3208 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3209 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3210 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3211
3212 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3213 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3214 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3215
3216 VMPTRLD(vmcs);
3217 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3218 VMCLEAR(vmcs);
3219
3220 if (vlapic->vcpuid == 0) {
3221 /*
3222 * The nested page table mappings are shared by all vcpus
3223 * so unmap the APIC access page just once.
3224 */
3225 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3226 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3227 __func__, error));
3228
3229 /*
3230 * The MSR bitmap is shared by all vcpus so modify it only
3231 * once in the context of vcpu 0.
3232 */
3233 error = vmx_allow_x2apic_msrs(vmx);
3234 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3235 __func__, error));
3236 }
3237}
3238
3239static void
3240vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3241{
3242
3243 ipi_cpu(hostcpu, pirvec);
3244}
3245
3246/*
3247 * Transfer the pending interrupts in the PIR descriptor to the IRR
3248 * in the virtual APIC page.
3249 */
3250static void
3251vmx_inject_pir(struct vlapic *vlapic)
3252{
3253 struct vlapic_vtx *vlapic_vtx;
3254 struct pir_desc *pir_desc;
3255 struct LAPIC *lapic;
3256 uint64_t val, pirval;
3257 int rvi, pirbase = -1;
3258 uint16_t intr_status_old, intr_status_new;
3259
3260 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3261 pir_desc = vlapic_vtx->pir_desc;
3262 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3263 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3264 "no posted interrupt pending");
3265 return;
3266 }
3267
3268 pirval = 0;
3269 pirbase = -1;
3270 lapic = vlapic->apic_page;
3271
3272 val = atomic_readandclear_long(&pir_desc->pir[0]);
3273 if (val != 0) {
3274 lapic->irr0 |= val;
3275 lapic->irr1 |= val >> 32;
3276 pirbase = 0;
3277 pirval = val;
3278 }
3279
3280 val = atomic_readandclear_long(&pir_desc->pir[1]);
3281 if (val != 0) {
3282 lapic->irr2 |= val;
3283 lapic->irr3 |= val >> 32;
3284 pirbase = 64;
3285 pirval = val;
3286 }
3287
3288 val = atomic_readandclear_long(&pir_desc->pir[2]);
3289 if (val != 0) {
3290 lapic->irr4 |= val;
3291 lapic->irr5 |= val >> 32;
3292 pirbase = 128;
3293 pirval = val;
3294 }
3295
3296 val = atomic_readandclear_long(&pir_desc->pir[3]);
3297 if (val != 0) {
3298 lapic->irr6 |= val;
3299 lapic->irr7 |= val >> 32;
3300 pirbase = 192;
3301 pirval = val;
3302 }
3303
3304 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3305
3306 /*
3307 * Update RVI so the processor can evaluate pending virtual
3308 * interrupts on VM-entry.
3309 *
3310 * It is possible for pirval to be 0 here, even though the
3311 * pending bit has been set. The scenario is:
3312 * CPU-Y is sending a posted interrupt to CPU-X, which
3313 * is running a guest and processing posted interrupts in h/w.
3314 * CPU-X will eventually exit and the state seen in s/w is
3315 * the pending bit set, but no PIR bits set.
3316 *
3317 * CPU-X CPU-Y
3318 * (vm running) (host running)
3319 * rx posted interrupt
3320 * CLEAR pending bit
3321 * SET PIR bit
3322 * READ/CLEAR PIR bits
3323 * SET pending bit
3324 * (vm exit)
3325 * pending bit set, PIR 0
3326 */
3327 if (pirval != 0) {
3328 rvi = pirbase + flsl(pirval) - 1;
3329 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3330 intr_status_new = (intr_status_old & 0xFF00) | rvi;
3331 if (intr_status_new > intr_status_old) {
3332 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3333 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3334 "guest_intr_status changed from 0x%04x to 0x%04x",
3335 intr_status_old, intr_status_new);
3336 }
3337 }
3338}
3339
3340static struct vlapic *
3341vmx_vlapic_init(void *arg, int vcpuid)
3342{
3343 struct vmx *vmx;
3344 struct vlapic *vlapic;
3345 struct vlapic_vtx *vlapic_vtx;
3346
3347 vmx = arg;
3348
3349 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3350 vlapic->vm = vmx->vm;
3351 vlapic->vcpuid = vcpuid;
3352 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3353
3354 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3355 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3356 vlapic_vtx->vmx = vmx;
3357
3358 if (virtual_interrupt_delivery) {
3359 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3360 vlapic->ops.pending_intr = vmx_pending_intr;
3361 vlapic->ops.intr_accepted = vmx_intr_accepted;
3362 vlapic->ops.set_tmr = vmx_set_tmr;
3363 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3364 }
3365
3366 if (posted_interrupts)
3367 vlapic->ops.post_intr = vmx_post_intr;
3368
3369 vlapic_init(vlapic);
3370
3371 return (vlapic);
3372}
3373
3374static void
3375vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3376{
3377
3378 vlapic_cleanup(vlapic);
3379 free(vlapic, M_VLAPIC);
3380}
3381
3382struct vmm_ops vmm_ops_intel = {
3383 vmx_init,
3384 vmx_cleanup,
3385 vmx_restore,
3386 vmx_vminit,
3387 vmx_run,
3388 vmx_vmcleanup,
3389 vmx_getreg,
3390 vmx_setreg,
3391 vmx_getdesc,
3392 vmx_setdesc,
3393 vmx_getcap,
3394 vmx_setcap,
3395 ept_vmspace_alloc,
3396 ept_vmspace_free,
3397 vmx_vlapic_init,
3398 vmx_vlapic_cleanup,
3399};