vmx.c revision 284174
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 284174 2015-06-09 00:14:47Z tychon $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 284174 2015-06-09 00:14:47Z tychon $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/smp.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/proc.h>
39#include <sys/sysctl.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43
44#include <machine/psl.h>
45#include <machine/cpufunc.h>
46#include <machine/md_var.h>
47#include <machine/segments.h>
48#include <machine/smp.h>
49#include <machine/specialreg.h>
50#include <machine/vmparam.h>
51
52#include <machine/vmm.h>
53#include <machine/vmm_dev.h>
54#include <machine/vmm_instruction_emul.h>
55#include "vmm_lapic.h"
56#include "vmm_host.h"
57#include "vmm_ioport.h"
58#include "vmm_ktr.h"
59#include "vmm_stat.h"
60#include "vatpic.h"
61#include "vlapic.h"
62#include "vlapic_priv.h"
63
64#include "ept.h"
65#include "vmx_cpufunc.h"
66#include "vmx.h"
67#include "vmx_msr.h"
68#include "x86.h"
69#include "vmx_controls.h"
70
71#define	PINBASED_CTLS_ONE_SETTING					\
72	(PINBASED_EXTINT_EXITING	|				\
73	 PINBASED_NMI_EXITING		|				\
74	 PINBASED_VIRTUAL_NMI)
75#define	PINBASED_CTLS_ZERO_SETTING	0
76
77#define PROCBASED_CTLS_WINDOW_SETTING					\
78	(PROCBASED_INT_WINDOW_EXITING	|				\
79	 PROCBASED_NMI_WINDOW_EXITING)
80
81#define	PROCBASED_CTLS_ONE_SETTING 					\
82	(PROCBASED_SECONDARY_CONTROLS	|				\
83	 PROCBASED_MWAIT_EXITING	|				\
84	 PROCBASED_MONITOR_EXITING	|				\
85	 PROCBASED_IO_EXITING		|				\
86	 PROCBASED_MSR_BITMAPS		|				\
87	 PROCBASED_CTLS_WINDOW_SETTING	|				\
88	 PROCBASED_CR8_LOAD_EXITING	|				\
89	 PROCBASED_CR8_STORE_EXITING)
90#define	PROCBASED_CTLS_ZERO_SETTING	\
91	(PROCBASED_CR3_LOAD_EXITING |	\
92	PROCBASED_CR3_STORE_EXITING |	\
93	PROCBASED_IO_BITMAPS)
94
95#define	PROCBASED_CTLS2_ONE_SETTING	PROCBASED2_ENABLE_EPT
96#define	PROCBASED_CTLS2_ZERO_SETTING	0
97
98#define	VM_EXIT_CTLS_ONE_SETTING					\
99	(VM_EXIT_HOST_LMA			|			\
100	VM_EXIT_SAVE_EFER			|			\
101	VM_EXIT_LOAD_EFER			|			\
102	VM_EXIT_ACKNOWLEDGE_INTERRUPT)
103
104#define	VM_EXIT_CTLS_ZERO_SETTING	VM_EXIT_SAVE_DEBUG_CONTROLS
105
106#define	VM_ENTRY_CTLS_ONE_SETTING	(VM_ENTRY_LOAD_EFER)
107
108#define	VM_ENTRY_CTLS_ZERO_SETTING					\
109	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
110	VM_ENTRY_INTO_SMM			|			\
111	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
112
113#define	HANDLED		1
114#define	UNHANDLED	0
115
116static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
117static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
118
119SYSCTL_DECL(_hw_vmm);
120SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
121
122int vmxon_enabled[MAXCPU];
123static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
124
125static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
126static uint32_t exit_ctls, entry_ctls;
127
128static uint64_t cr0_ones_mask, cr0_zeros_mask;
129SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
130	     &cr0_ones_mask, 0, NULL);
131SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
132	     &cr0_zeros_mask, 0, NULL);
133
134static uint64_t cr4_ones_mask, cr4_zeros_mask;
135SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
136	     &cr4_ones_mask, 0, NULL);
137SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
138	     &cr4_zeros_mask, 0, NULL);
139
140static int vmx_initialized;
141SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
142	   &vmx_initialized, 0, "Intel VMX initialized");
143
144/*
145 * Optional capabilities
146 */
147static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
148
149static int cap_halt_exit;
150SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
151    "HLT triggers a VM-exit");
152
153static int cap_pause_exit;
154SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
155    0, "PAUSE triggers a VM-exit");
156
157static int cap_unrestricted_guest;
158SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
159    &cap_unrestricted_guest, 0, "Unrestricted guests");
160
161static int cap_monitor_trap;
162SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
163    &cap_monitor_trap, 0, "Monitor trap flag");
164
165static int cap_invpcid;
166SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
167    0, "Guests are allowed to use INVPCID");
168
169static int virtual_interrupt_delivery;
170SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
171    &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
172
173static int posted_interrupts;
174SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
175    &posted_interrupts, 0, "APICv posted interrupt support");
176
177static int pirvec = -1;
178SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
179    &pirvec, 0, "APICv posted interrupt vector");
180
181static struct unrhdr *vpid_unr;
182static u_int vpid_alloc_failed;
183SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
184	    &vpid_alloc_failed, 0, NULL);
185
186/*
187 * Use the last page below 4GB as the APIC access address. This address is
188 * occupied by the boot firmware so it is guaranteed that it will not conflict
189 * with a page in system memory.
190 */
191#define	APIC_ACCESS_ADDRESS	0xFFFFF000
192
193static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
194static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
195static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
196static void vmx_inject_pir(struct vlapic *vlapic);
197
198#ifdef KTR
199static const char *
200exit_reason_to_str(int reason)
201{
202	static char reasonbuf[32];
203
204	switch (reason) {
205	case EXIT_REASON_EXCEPTION:
206		return "exception";
207	case EXIT_REASON_EXT_INTR:
208		return "extint";
209	case EXIT_REASON_TRIPLE_FAULT:
210		return "triplefault";
211	case EXIT_REASON_INIT:
212		return "init";
213	case EXIT_REASON_SIPI:
214		return "sipi";
215	case EXIT_REASON_IO_SMI:
216		return "iosmi";
217	case EXIT_REASON_SMI:
218		return "smi";
219	case EXIT_REASON_INTR_WINDOW:
220		return "intrwindow";
221	case EXIT_REASON_NMI_WINDOW:
222		return "nmiwindow";
223	case EXIT_REASON_TASK_SWITCH:
224		return "taskswitch";
225	case EXIT_REASON_CPUID:
226		return "cpuid";
227	case EXIT_REASON_GETSEC:
228		return "getsec";
229	case EXIT_REASON_HLT:
230		return "hlt";
231	case EXIT_REASON_INVD:
232		return "invd";
233	case EXIT_REASON_INVLPG:
234		return "invlpg";
235	case EXIT_REASON_RDPMC:
236		return "rdpmc";
237	case EXIT_REASON_RDTSC:
238		return "rdtsc";
239	case EXIT_REASON_RSM:
240		return "rsm";
241	case EXIT_REASON_VMCALL:
242		return "vmcall";
243	case EXIT_REASON_VMCLEAR:
244		return "vmclear";
245	case EXIT_REASON_VMLAUNCH:
246		return "vmlaunch";
247	case EXIT_REASON_VMPTRLD:
248		return "vmptrld";
249	case EXIT_REASON_VMPTRST:
250		return "vmptrst";
251	case EXIT_REASON_VMREAD:
252		return "vmread";
253	case EXIT_REASON_VMRESUME:
254		return "vmresume";
255	case EXIT_REASON_VMWRITE:
256		return "vmwrite";
257	case EXIT_REASON_VMXOFF:
258		return "vmxoff";
259	case EXIT_REASON_VMXON:
260		return "vmxon";
261	case EXIT_REASON_CR_ACCESS:
262		return "craccess";
263	case EXIT_REASON_DR_ACCESS:
264		return "draccess";
265	case EXIT_REASON_INOUT:
266		return "inout";
267	case EXIT_REASON_RDMSR:
268		return "rdmsr";
269	case EXIT_REASON_WRMSR:
270		return "wrmsr";
271	case EXIT_REASON_INVAL_VMCS:
272		return "invalvmcs";
273	case EXIT_REASON_INVAL_MSR:
274		return "invalmsr";
275	case EXIT_REASON_MWAIT:
276		return "mwait";
277	case EXIT_REASON_MTF:
278		return "mtf";
279	case EXIT_REASON_MONITOR:
280		return "monitor";
281	case EXIT_REASON_PAUSE:
282		return "pause";
283	case EXIT_REASON_MCE_DURING_ENTRY:
284		return "mce-during-entry";
285	case EXIT_REASON_TPR:
286		return "tpr";
287	case EXIT_REASON_APIC_ACCESS:
288		return "apic-access";
289	case EXIT_REASON_GDTR_IDTR:
290		return "gdtridtr";
291	case EXIT_REASON_LDTR_TR:
292		return "ldtrtr";
293	case EXIT_REASON_EPT_FAULT:
294		return "eptfault";
295	case EXIT_REASON_EPT_MISCONFIG:
296		return "eptmisconfig";
297	case EXIT_REASON_INVEPT:
298		return "invept";
299	case EXIT_REASON_RDTSCP:
300		return "rdtscp";
301	case EXIT_REASON_VMX_PREEMPT:
302		return "vmxpreempt";
303	case EXIT_REASON_INVVPID:
304		return "invvpid";
305	case EXIT_REASON_WBINVD:
306		return "wbinvd";
307	case EXIT_REASON_XSETBV:
308		return "xsetbv";
309	case EXIT_REASON_APIC_WRITE:
310		return "apic-write";
311	default:
312		snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
313		return (reasonbuf);
314	}
315}
316#endif	/* KTR */
317
318static int
319vmx_allow_x2apic_msrs(struct vmx *vmx)
320{
321	int i, error;
322
323	error = 0;
324
325	/*
326	 * Allow readonly access to the following x2APIC MSRs from the guest.
327	 */
328	error += guest_msr_ro(vmx, MSR_APIC_ID);
329	error += guest_msr_ro(vmx, MSR_APIC_VERSION);
330	error += guest_msr_ro(vmx, MSR_APIC_LDR);
331	error += guest_msr_ro(vmx, MSR_APIC_SVR);
332
333	for (i = 0; i < 8; i++)
334		error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
335
336	for (i = 0; i < 8; i++)
337		error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
338
339	for (i = 0; i < 8; i++)
340		error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
341
342	error += guest_msr_ro(vmx, MSR_APIC_ESR);
343	error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
344	error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
345	error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
346	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
347	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
348	error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
349	error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
350	error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
351	error += guest_msr_ro(vmx, MSR_APIC_ICR);
352
353	/*
354	 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
355	 *
356	 * These registers get special treatment described in the section
357	 * "Virtualizing MSR-Based APIC Accesses".
358	 */
359	error += guest_msr_rw(vmx, MSR_APIC_TPR);
360	error += guest_msr_rw(vmx, MSR_APIC_EOI);
361	error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
362
363	return (error);
364}
365
366u_long
367vmx_fix_cr0(u_long cr0)
368{
369
370	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
371}
372
373u_long
374vmx_fix_cr4(u_long cr4)
375{
376
377	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
378}
379
380static void
381vpid_free(int vpid)
382{
383	if (vpid < 0 || vpid > 0xffff)
384		panic("vpid_free: invalid vpid %d", vpid);
385
386	/*
387	 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
388	 * the unit number allocator.
389	 */
390
391	if (vpid > VM_MAXCPU)
392		free_unr(vpid_unr, vpid);
393}
394
395static void
396vpid_alloc(uint16_t *vpid, int num)
397{
398	int i, x;
399
400	if (num <= 0 || num > VM_MAXCPU)
401		panic("invalid number of vpids requested: %d", num);
402
403	/*
404	 * If the "enable vpid" execution control is not enabled then the
405	 * VPID is required to be 0 for all vcpus.
406	 */
407	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
408		for (i = 0; i < num; i++)
409			vpid[i] = 0;
410		return;
411	}
412
413	/*
414	 * Allocate a unique VPID for each vcpu from the unit number allocator.
415	 */
416	for (i = 0; i < num; i++) {
417		x = alloc_unr(vpid_unr);
418		if (x == -1)
419			break;
420		else
421			vpid[i] = x;
422	}
423
424	if (i < num) {
425		atomic_add_int(&vpid_alloc_failed, 1);
426
427		/*
428		 * If the unit number allocator does not have enough unique
429		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
430		 *
431		 * These VPIDs are not be unique across VMs but this does not
432		 * affect correctness because the combined mappings are also
433		 * tagged with the EP4TA which is unique for each VM.
434		 *
435		 * It is still sub-optimal because the invvpid will invalidate
436		 * combined mappings for a particular VPID across all EP4TAs.
437		 */
438		while (i-- > 0)
439			vpid_free(vpid[i]);
440
441		for (i = 0; i < num; i++)
442			vpid[i] = i + 1;
443	}
444}
445
446static void
447vpid_init(void)
448{
449	/*
450	 * VPID 0 is required when the "enable VPID" execution control is
451	 * disabled.
452	 *
453	 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
454	 * unit number allocator does not have sufficient unique VPIDs to
455	 * satisfy the allocation.
456	 *
457	 * The remaining VPIDs are managed by the unit number allocator.
458	 */
459	vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
460}
461
462static void
463vmx_disable(void *arg __unused)
464{
465	struct invvpid_desc invvpid_desc = { 0 };
466	struct invept_desc invept_desc = { 0 };
467
468	if (vmxon_enabled[curcpu]) {
469		/*
470		 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
471		 *
472		 * VMXON or VMXOFF are not required to invalidate any TLB
473		 * caching structures. This prevents potential retention of
474		 * cached information in the TLB between distinct VMX episodes.
475		 */
476		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
477		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
478		vmxoff();
479	}
480	load_cr4(rcr4() & ~CR4_VMXE);
481}
482
483static int
484vmx_cleanup(void)
485{
486
487	if (pirvec >= 0)
488		lapic_ipi_free(pirvec);
489
490	if (vpid_unr != NULL) {
491		delete_unrhdr(vpid_unr);
492		vpid_unr = NULL;
493	}
494
495	smp_rendezvous(NULL, vmx_disable, NULL, NULL);
496
497	return (0);
498}
499
500static void
501vmx_enable(void *arg __unused)
502{
503	int error;
504	uint64_t feature_control;
505
506	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
507	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
508	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
509		wrmsr(MSR_IA32_FEATURE_CONTROL,
510		    feature_control | IA32_FEATURE_CONTROL_VMX_EN |
511		    IA32_FEATURE_CONTROL_LOCK);
512	}
513
514	load_cr4(rcr4() | CR4_VMXE);
515
516	*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
517	error = vmxon(vmxon_region[curcpu]);
518	if (error == 0)
519		vmxon_enabled[curcpu] = 1;
520}
521
522static void
523vmx_restore(void)
524{
525
526	if (vmxon_enabled[curcpu])
527		vmxon(vmxon_region[curcpu]);
528}
529
530static int
531vmx_init(int ipinum)
532{
533	int error, use_tpr_shadow;
534	uint64_t basic, fixed0, fixed1, feature_control;
535	uint32_t tmp, procbased2_vid_bits;
536
537	/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
538	if (!(cpu_feature2 & CPUID2_VMX)) {
539		printf("vmx_init: processor does not support VMX operation\n");
540		return (ENXIO);
541	}
542
543	/*
544	 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
545	 * are set (bits 0 and 2 respectively).
546	 */
547	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
548	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
549	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
550		printf("vmx_init: VMX operation disabled by BIOS\n");
551		return (ENXIO);
552	}
553
554	/*
555	 * Verify capabilities MSR_VMX_BASIC:
556	 * - bit 54 indicates support for INS/OUTS decoding
557	 */
558	basic = rdmsr(MSR_VMX_BASIC);
559	if ((basic & (1UL << 54)) == 0) {
560		printf("vmx_init: processor does not support desired basic "
561		    "capabilities\n");
562		return (EINVAL);
563	}
564
565	/* Check support for primary processor-based VM-execution controls */
566	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
567			       MSR_VMX_TRUE_PROCBASED_CTLS,
568			       PROCBASED_CTLS_ONE_SETTING,
569			       PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
570	if (error) {
571		printf("vmx_init: processor does not support desired primary "
572		       "processor-based controls\n");
573		return (error);
574	}
575
576	/* Clear the processor-based ctl bits that are set on demand */
577	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
578
579	/* Check support for secondary processor-based VM-execution controls */
580	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
581			       MSR_VMX_PROCBASED_CTLS2,
582			       PROCBASED_CTLS2_ONE_SETTING,
583			       PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
584	if (error) {
585		printf("vmx_init: processor does not support desired secondary "
586		       "processor-based controls\n");
587		return (error);
588	}
589
590	/* Check support for VPID */
591	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
592			       PROCBASED2_ENABLE_VPID, 0, &tmp);
593	if (error == 0)
594		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
595
596	/* Check support for pin-based VM-execution controls */
597	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
598			       MSR_VMX_TRUE_PINBASED_CTLS,
599			       PINBASED_CTLS_ONE_SETTING,
600			       PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
601	if (error) {
602		printf("vmx_init: processor does not support desired "
603		       "pin-based controls\n");
604		return (error);
605	}
606
607	/* Check support for VM-exit controls */
608	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
609			       VM_EXIT_CTLS_ONE_SETTING,
610			       VM_EXIT_CTLS_ZERO_SETTING,
611			       &exit_ctls);
612	if (error) {
613		printf("vmx_init: processor does not support desired "
614		    "exit controls\n");
615		return (error);
616	}
617
618	/* Check support for VM-entry controls */
619	error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
620	    VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
621	    &entry_ctls);
622	if (error) {
623		printf("vmx_init: processor does not support desired "
624		    "entry controls\n");
625		return (error);
626	}
627
628	/*
629	 * Check support for optional features by testing them
630	 * as individual bits
631	 */
632	cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
633					MSR_VMX_TRUE_PROCBASED_CTLS,
634					PROCBASED_HLT_EXITING, 0,
635					&tmp) == 0);
636
637	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
638					MSR_VMX_PROCBASED_CTLS,
639					PROCBASED_MTF, 0,
640					&tmp) == 0);
641
642	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
643					 MSR_VMX_TRUE_PROCBASED_CTLS,
644					 PROCBASED_PAUSE_EXITING, 0,
645					 &tmp) == 0);
646
647	cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
648					MSR_VMX_PROCBASED_CTLS2,
649					PROCBASED2_UNRESTRICTED_GUEST, 0,
650				        &tmp) == 0);
651
652	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
653	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
654	    &tmp) == 0);
655
656	/*
657	 * Check support for virtual interrupt delivery.
658	 */
659	procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
660	    PROCBASED2_VIRTUALIZE_X2APIC_MODE |
661	    PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
662	    PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
663
664	use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
665	    MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
666	    &tmp) == 0);
667
668	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
669	    procbased2_vid_bits, 0, &tmp);
670	if (error == 0 && use_tpr_shadow) {
671		virtual_interrupt_delivery = 1;
672		TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
673		    &virtual_interrupt_delivery);
674	}
675
676	if (virtual_interrupt_delivery) {
677		procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
678		procbased_ctls2 |= procbased2_vid_bits;
679		procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
680
681		/*
682		 * No need to emulate accesses to %CR8 if virtual
683		 * interrupt delivery is enabled.
684		 */
685		procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
686		procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
687
688		/*
689		 * Check for Posted Interrupts only if Virtual Interrupt
690		 * Delivery is enabled.
691		 */
692		error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
693		    MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
694		    &tmp);
695		if (error == 0) {
696			pirvec = lapic_ipi_alloc(&IDTVEC(justreturn));
697			if (pirvec < 0) {
698				if (bootverbose) {
699					printf("vmx_init: unable to allocate "
700					    "posted interrupt vector\n");
701				}
702			} else {
703				posted_interrupts = 1;
704				TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
705				    &posted_interrupts);
706			}
707		}
708	}
709
710	if (posted_interrupts)
711		    pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
712
713	/* Initialize EPT */
714	error = ept_init(ipinum);
715	if (error) {
716		printf("vmx_init: ept initialization failed (%d)\n", error);
717		return (error);
718	}
719
720	/*
721	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
722	 */
723	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
724	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
725	cr0_ones_mask = fixed0 & fixed1;
726	cr0_zeros_mask = ~fixed0 & ~fixed1;
727
728	/*
729	 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
730	 * if unrestricted guest execution is allowed.
731	 */
732	if (cap_unrestricted_guest)
733		cr0_ones_mask &= ~(CR0_PG | CR0_PE);
734
735	/*
736	 * Do not allow the guest to set CR0_NW or CR0_CD.
737	 */
738	cr0_zeros_mask |= (CR0_NW | CR0_CD);
739
740	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
741	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
742	cr4_ones_mask = fixed0 & fixed1;
743	cr4_zeros_mask = ~fixed0 & ~fixed1;
744
745	vpid_init();
746
747	vmx_msr_init();
748
749	/* enable VMX operation */
750	smp_rendezvous(NULL, vmx_enable, NULL, NULL);
751
752	vmx_initialized = 1;
753
754	return (0);
755}
756
757static void
758vmx_trigger_hostintr(int vector)
759{
760	uintptr_t func;
761	struct gate_descriptor *gd;
762
763	gd = &idt[vector];
764
765	KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
766	    "invalid vector %d", vector));
767	KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
768	    vector));
769	KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
770	    "has invalid type %d", vector, gd->gd_type));
771	KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
772	    "has invalid dpl %d", vector, gd->gd_dpl));
773	KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
774	    "for vector %d has invalid selector %d", vector, gd->gd_selector));
775	KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
776	    "IST %d", vector, gd->gd_ist));
777
778	func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
779	vmx_call_isr(func);
780}
781
782static int
783vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
784{
785	int error, mask_ident, shadow_ident;
786	uint64_t mask_value;
787
788	if (which != 0 && which != 4)
789		panic("vmx_setup_cr_shadow: unknown cr%d", which);
790
791	if (which == 0) {
792		mask_ident = VMCS_CR0_MASK;
793		mask_value = cr0_ones_mask | cr0_zeros_mask;
794		shadow_ident = VMCS_CR0_SHADOW;
795	} else {
796		mask_ident = VMCS_CR4_MASK;
797		mask_value = cr4_ones_mask | cr4_zeros_mask;
798		shadow_ident = VMCS_CR4_SHADOW;
799	}
800
801	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
802	if (error)
803		return (error);
804
805	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
806	if (error)
807		return (error);
808
809	return (0);
810}
811#define	vmx_setup_cr0_shadow(vmcs,init)	vmx_setup_cr_shadow(0, (vmcs), (init))
812#define	vmx_setup_cr4_shadow(vmcs,init)	vmx_setup_cr_shadow(4, (vmcs), (init))
813
814static void *
815vmx_vminit(struct vm *vm, pmap_t pmap)
816{
817	uint16_t vpid[VM_MAXCPU];
818	int i, error;
819	struct vmx *vmx;
820	struct vmcs *vmcs;
821	uint32_t exc_bitmap;
822
823	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
824	if ((uintptr_t)vmx & PAGE_MASK) {
825		panic("malloc of struct vmx not aligned on %d byte boundary",
826		      PAGE_SIZE);
827	}
828	vmx->vm = vm;
829
830	vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
831
832	/*
833	 * Clean up EPTP-tagged guest physical and combined mappings
834	 *
835	 * VMX transitions are not required to invalidate any guest physical
836	 * mappings. So, it may be possible for stale guest physical mappings
837	 * to be present in the processor TLBs.
838	 *
839	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
840	 */
841	ept_invalidate_mappings(vmx->eptp);
842
843	msr_bitmap_initialize(vmx->msr_bitmap);
844
845	/*
846	 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
847	 * The guest FSBASE and GSBASE are saved and restored during
848	 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
849	 * always restored from the vmcs host state area on vm-exit.
850	 *
851	 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
852	 * how they are saved/restored so can be directly accessed by the
853	 * guest.
854	 *
855	 * MSR_EFER is saved and restored in the guest VMCS area on a
856	 * VM exit and entry respectively. It is also restored from the
857	 * host VMCS area on a VM exit.
858	 *
859	 * The TSC MSR is exposed read-only. Writes are disallowed as
860	 * that will impact the host TSC.  If the guest does a write
861	 * the "use TSC offsetting" execution control is enabled and the
862	 * difference between the host TSC and the guest TSC is written
863	 * into the TSC offset in the VMCS.
864	 */
865	if (guest_msr_rw(vmx, MSR_GSBASE) ||
866	    guest_msr_rw(vmx, MSR_FSBASE) ||
867	    guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
868	    guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
869	    guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
870	    guest_msr_rw(vmx, MSR_EFER) ||
871	    guest_msr_ro(vmx, MSR_TSC))
872		panic("vmx_vminit: error setting guest msr access");
873
874	vpid_alloc(vpid, VM_MAXCPU);
875
876	if (virtual_interrupt_delivery) {
877		error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
878		    APIC_ACCESS_ADDRESS);
879		/* XXX this should really return an error to the caller */
880		KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
881	}
882
883	for (i = 0; i < VM_MAXCPU; i++) {
884		vmcs = &vmx->vmcs[i];
885		vmcs->identifier = vmx_revision();
886		error = vmclear(vmcs);
887		if (error != 0) {
888			panic("vmx_vminit: vmclear error %d on vcpu %d\n",
889			      error, i);
890		}
891
892		vmx_msr_guest_init(vmx, i);
893
894		error = vmcs_init(vmcs);
895		KASSERT(error == 0, ("vmcs_init error %d", error));
896
897		VMPTRLD(vmcs);
898		error = 0;
899		error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
900		error += vmwrite(VMCS_EPTP, vmx->eptp);
901		error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
902		error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
903		error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
904		error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
905		error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
906		error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
907		error += vmwrite(VMCS_VPID, vpid[i]);
908
909		/* exception bitmap */
910		if (vcpu_trace_exceptions(vm, i))
911			exc_bitmap = 0xffffffff;
912		else
913			exc_bitmap = 1 << IDT_MC;
914		error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
915
916		if (virtual_interrupt_delivery) {
917			error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
918			error += vmwrite(VMCS_VIRTUAL_APIC,
919			    vtophys(&vmx->apic_page[i]));
920			error += vmwrite(VMCS_EOI_EXIT0, 0);
921			error += vmwrite(VMCS_EOI_EXIT1, 0);
922			error += vmwrite(VMCS_EOI_EXIT2, 0);
923			error += vmwrite(VMCS_EOI_EXIT3, 0);
924		}
925		if (posted_interrupts) {
926			error += vmwrite(VMCS_PIR_VECTOR, pirvec);
927			error += vmwrite(VMCS_PIR_DESC,
928			    vtophys(&vmx->pir_desc[i]));
929		}
930		VMCLEAR(vmcs);
931		KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
932
933		vmx->cap[i].set = 0;
934		vmx->cap[i].proc_ctls = procbased_ctls;
935		vmx->cap[i].proc_ctls2 = procbased_ctls2;
936
937		vmx->state[i].nextrip = ~0;
938		vmx->state[i].lastcpu = NOCPU;
939		vmx->state[i].vpid = vpid[i];
940
941		/*
942		 * Set up the CR0/4 shadows, and init the read shadow
943		 * to the power-on register value from the Intel Sys Arch.
944		 *  CR0 - 0x60000010
945		 *  CR4 - 0
946		 */
947		error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
948		if (error != 0)
949			panic("vmx_setup_cr0_shadow %d", error);
950
951		error = vmx_setup_cr4_shadow(vmcs, 0);
952		if (error != 0)
953			panic("vmx_setup_cr4_shadow %d", error);
954
955		vmx->ctx[i].pmap = pmap;
956	}
957
958	return (vmx);
959}
960
961static int
962vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
963{
964	int handled, func;
965
966	func = vmxctx->guest_rax;
967
968	handled = x86_emulate_cpuid(vm, vcpu,
969				    (uint32_t*)(&vmxctx->guest_rax),
970				    (uint32_t*)(&vmxctx->guest_rbx),
971				    (uint32_t*)(&vmxctx->guest_rcx),
972				    (uint32_t*)(&vmxctx->guest_rdx));
973	return (handled);
974}
975
976static __inline void
977vmx_run_trace(struct vmx *vmx, int vcpu)
978{
979#ifdef KTR
980	VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
981#endif
982}
983
984static __inline void
985vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
986	       int handled)
987{
988#ifdef KTR
989	VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
990		 handled ? "handled" : "unhandled",
991		 exit_reason_to_str(exit_reason), rip);
992#endif
993}
994
995static __inline void
996vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
997{
998#ifdef KTR
999	VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1000#endif
1001}
1002
1003static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1004static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1005
1006/*
1007 * Invalidate guest mappings identified by its vpid from the TLB.
1008 */
1009static __inline void
1010vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1011{
1012	struct vmxstate *vmxstate;
1013	struct invvpid_desc invvpid_desc;
1014
1015	vmxstate = &vmx->state[vcpu];
1016	if (vmxstate->vpid == 0)
1017		return;
1018
1019	if (!running) {
1020		/*
1021		 * Set the 'lastcpu' to an invalid host cpu.
1022		 *
1023		 * This will invalidate TLB entries tagged with the vcpu's
1024		 * vpid the next time it runs via vmx_set_pcpu_defaults().
1025		 */
1026		vmxstate->lastcpu = NOCPU;
1027		return;
1028	}
1029
1030	KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1031	    "critical section", __func__, vcpu));
1032
1033	/*
1034	 * Invalidate all mappings tagged with 'vpid'
1035	 *
1036	 * We do this because this vcpu was executing on a different host
1037	 * cpu when it last ran. We do not track whether it invalidated
1038	 * mappings associated with its 'vpid' during that run. So we must
1039	 * assume that the mappings associated with 'vpid' on 'curcpu' are
1040	 * stale and invalidate them.
1041	 *
1042	 * Note that we incur this penalty only when the scheduler chooses to
1043	 * move the thread associated with this vcpu between host cpus.
1044	 *
1045	 * Note also that this will invalidate mappings tagged with 'vpid'
1046	 * for "all" EP4TAs.
1047	 */
1048	if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1049		invvpid_desc._res1 = 0;
1050		invvpid_desc._res2 = 0;
1051		invvpid_desc.vpid = vmxstate->vpid;
1052		invvpid_desc.linear_addr = 0;
1053		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1054		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1055	} else {
1056		/*
1057		 * The invvpid can be skipped if an invept is going to
1058		 * be performed before entering the guest. The invept
1059		 * will invalidate combined mappings tagged with
1060		 * 'vmx->eptp' for all vpids.
1061		 */
1062		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1063	}
1064}
1065
1066static void
1067vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1068{
1069	struct vmxstate *vmxstate;
1070
1071	vmxstate = &vmx->state[vcpu];
1072	if (vmxstate->lastcpu == curcpu)
1073		return;
1074
1075	vmxstate->lastcpu = curcpu;
1076
1077	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1078
1079	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1080	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1081	vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1082	vmx_invvpid(vmx, vcpu, pmap, 1);
1083}
1084
1085/*
1086 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1087 */
1088CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1089
1090static void __inline
1091vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1092{
1093
1094	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1095		vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1096		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1097		VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1098	}
1099}
1100
1101static void __inline
1102vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1103{
1104
1105	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1106	    ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1107	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1108	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1109	VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1110}
1111
1112static void __inline
1113vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1114{
1115
1116	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1117		vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1118		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1119		VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1120	}
1121}
1122
1123static void __inline
1124vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1125{
1126
1127	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1128	    ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1129	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1130	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1131	VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1132}
1133
1134int
1135vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
1136{
1137	int error;
1138
1139	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
1140		vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET;
1141		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1142		VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting");
1143	}
1144
1145	error = vmwrite(VMCS_TSC_OFFSET, offset);
1146
1147	return (error);
1148}
1149
1150#define	NMI_BLOCKING	(VMCS_INTERRUPTIBILITY_NMI_BLOCKING |		\
1151			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1152#define	HWINTR_BLOCKING	(VMCS_INTERRUPTIBILITY_STI_BLOCKING |		\
1153			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1154
1155static void
1156vmx_inject_nmi(struct vmx *vmx, int vcpu)
1157{
1158	uint32_t gi, info;
1159
1160	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1161	KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1162	    "interruptibility-state %#x", gi));
1163
1164	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1165	KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1166	    "VM-entry interruption information %#x", info));
1167
1168	/*
1169	 * Inject the virtual NMI. The vector must be the NMI IDT entry
1170	 * or the VMCS entry check will fail.
1171	 */
1172	info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1173	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1174
1175	VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1176
1177	/* Clear the request */
1178	vm_nmi_clear(vmx->vm, vcpu);
1179}
1180
1181static void
1182vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
1183    uint64_t guestrip)
1184{
1185	int vector, need_nmi_exiting, extint_pending;
1186	uint64_t rflags, entryinfo;
1187	uint32_t gi, info;
1188
1189	if (vmx->state[vcpu].nextrip != guestrip) {
1190		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1191		if (gi & HWINTR_BLOCKING) {
1192			VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
1193			    "cleared due to rip change: %#lx/%#lx",
1194			    vmx->state[vcpu].nextrip, guestrip);
1195			gi &= ~HWINTR_BLOCKING;
1196			vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1197		}
1198	}
1199
1200	if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1201		KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1202		    "intinfo is not valid: %#lx", __func__, entryinfo));
1203
1204		info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1205		KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1206		     "pending exception: %#lx/%#x", __func__, entryinfo, info));
1207
1208		info = entryinfo;
1209		vector = info & 0xff;
1210		if (vector == IDT_BP || vector == IDT_OF) {
1211			/*
1212			 * VT-x requires #BP and #OF to be injected as software
1213			 * exceptions.
1214			 */
1215			info &= ~VMCS_INTR_T_MASK;
1216			info |= VMCS_INTR_T_SWEXCEPTION;
1217		}
1218
1219		if (info & VMCS_INTR_DEL_ERRCODE)
1220			vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1221
1222		vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1223	}
1224
1225	if (vm_nmi_pending(vmx->vm, vcpu)) {
1226		/*
1227		 * If there are no conditions blocking NMI injection then
1228		 * inject it directly here otherwise enable "NMI window
1229		 * exiting" to inject it as soon as we can.
1230		 *
1231		 * We also check for STI_BLOCKING because some implementations
1232		 * don't allow NMI injection in this case. If we are running
1233		 * on a processor that doesn't have this restriction it will
1234		 * immediately exit and the NMI will be injected in the
1235		 * "NMI window exiting" handler.
1236		 */
1237		need_nmi_exiting = 1;
1238		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1239		if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1240			info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1241			if ((info & VMCS_INTR_VALID) == 0) {
1242				vmx_inject_nmi(vmx, vcpu);
1243				need_nmi_exiting = 0;
1244			} else {
1245				VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1246				    "due to VM-entry intr info %#x", info);
1247			}
1248		} else {
1249			VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1250			    "Guest Interruptibility-state %#x", gi);
1251		}
1252
1253		if (need_nmi_exiting)
1254			vmx_set_nmi_window_exiting(vmx, vcpu);
1255	}
1256
1257	extint_pending = vm_extint_pending(vmx->vm, vcpu);
1258
1259	if (!extint_pending && virtual_interrupt_delivery) {
1260		vmx_inject_pir(vlapic);
1261		return;
1262	}
1263
1264	/*
1265	 * If interrupt-window exiting is already in effect then don't bother
1266	 * checking for pending interrupts. This is just an optimization and
1267	 * not needed for correctness.
1268	 */
1269	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1270		VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1271		    "pending int_window_exiting");
1272		return;
1273	}
1274
1275	if (!extint_pending) {
1276		/* Ask the local apic for a vector to inject */
1277		if (!vlapic_pending_intr(vlapic, &vector))
1278			return;
1279
1280		/*
1281		 * From the Intel SDM, Volume 3, Section "Maskable
1282		 * Hardware Interrupts":
1283		 * - maskable interrupt vectors [16,255] can be delivered
1284		 *   through the local APIC.
1285		*/
1286		KASSERT(vector >= 16 && vector <= 255,
1287		    ("invalid vector %d from local APIC", vector));
1288	} else {
1289		/* Ask the legacy pic for a vector to inject */
1290		vatpic_pending_intr(vmx->vm, &vector);
1291
1292		/*
1293		 * From the Intel SDM, Volume 3, Section "Maskable
1294		 * Hardware Interrupts":
1295		 * - maskable interrupt vectors [0,255] can be delivered
1296		 *   through the INTR pin.
1297		 */
1298		KASSERT(vector >= 0 && vector <= 255,
1299		    ("invalid vector %d from INTR", vector));
1300	}
1301
1302	/* Check RFLAGS.IF and the interruptibility state of the guest */
1303	rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1304	if ((rflags & PSL_I) == 0) {
1305		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1306		    "rflags %#lx", vector, rflags);
1307		goto cantinject;
1308	}
1309
1310	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1311	if (gi & HWINTR_BLOCKING) {
1312		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1313		    "Guest Interruptibility-state %#x", vector, gi);
1314		goto cantinject;
1315	}
1316
1317	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1318	if (info & VMCS_INTR_VALID) {
1319		/*
1320		 * This is expected and could happen for multiple reasons:
1321		 * - A vectoring VM-entry was aborted due to astpending
1322		 * - A VM-exit happened during event injection.
1323		 * - An exception was injected above.
1324		 * - An NMI was injected above or after "NMI window exiting"
1325		 */
1326		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1327		    "VM-entry intr info %#x", vector, info);
1328		goto cantinject;
1329	}
1330
1331	/* Inject the interrupt */
1332	info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1333	info |= vector;
1334	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1335
1336	if (!extint_pending) {
1337		/* Update the Local APIC ISR */
1338		vlapic_intr_accepted(vlapic, vector);
1339	} else {
1340		vm_extint_clear(vmx->vm, vcpu);
1341		vatpic_intr_accepted(vmx->vm, vector);
1342
1343		/*
1344		 * After we accepted the current ExtINT the PIC may
1345		 * have posted another one.  If that is the case, set
1346		 * the Interrupt Window Exiting execution control so
1347		 * we can inject that one too.
1348		 *
1349		 * Also, interrupt window exiting allows us to inject any
1350		 * pending APIC vector that was preempted by the ExtINT
1351		 * as soon as possible. This applies both for the software
1352		 * emulated vlapic and the hardware assisted virtual APIC.
1353		 */
1354		vmx_set_int_window_exiting(vmx, vcpu);
1355	}
1356
1357	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1358
1359	return;
1360
1361cantinject:
1362	/*
1363	 * Set the Interrupt Window Exiting execution control so we can inject
1364	 * the interrupt as soon as blocking condition goes away.
1365	 */
1366	vmx_set_int_window_exiting(vmx, vcpu);
1367}
1368
1369/*
1370 * If the Virtual NMIs execution control is '1' then the logical processor
1371 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1372 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1373 * virtual-NMI blocking.
1374 *
1375 * This unblocking occurs even if the IRET causes a fault. In this case the
1376 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1377 */
1378static void
1379vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1380{
1381	uint32_t gi;
1382
1383	VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1384	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1385	gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1386	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1387}
1388
1389static void
1390vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1391{
1392	uint32_t gi;
1393
1394	VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1395	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1396	gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1397	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1398}
1399
1400static void
1401vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1402{
1403	uint32_t gi;
1404
1405	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1406	KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1407	    ("NMI blocking is not in effect %#x", gi));
1408}
1409
1410static int
1411vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1412{
1413	struct vmxctx *vmxctx;
1414	uint64_t xcrval;
1415	const struct xsave_limits *limits;
1416
1417	vmxctx = &vmx->ctx[vcpu];
1418	limits = vmm_get_xsave_limits();
1419
1420	/*
1421	 * Note that the processor raises a GP# fault on its own if
1422	 * xsetbv is executed for CPL != 0, so we do not have to
1423	 * emulate that fault here.
1424	 */
1425
1426	/* Only xcr0 is supported. */
1427	if (vmxctx->guest_rcx != 0) {
1428		vm_inject_gp(vmx->vm, vcpu);
1429		return (HANDLED);
1430	}
1431
1432	/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1433	if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1434		vm_inject_ud(vmx->vm, vcpu);
1435		return (HANDLED);
1436	}
1437
1438	xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1439	if ((xcrval & ~limits->xcr0_allowed) != 0) {
1440		vm_inject_gp(vmx->vm, vcpu);
1441		return (HANDLED);
1442	}
1443
1444	if (!(xcrval & XFEATURE_ENABLED_X87)) {
1445		vm_inject_gp(vmx->vm, vcpu);
1446		return (HANDLED);
1447	}
1448
1449	/* AVX (YMM_Hi128) requires SSE. */
1450	if (xcrval & XFEATURE_ENABLED_AVX &&
1451	    (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1452		vm_inject_gp(vmx->vm, vcpu);
1453		return (HANDLED);
1454	}
1455
1456	/*
1457	 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1458	 * ZMM_Hi256, and Hi16_ZMM.
1459	 */
1460	if (xcrval & XFEATURE_AVX512 &&
1461	    (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1462	    (XFEATURE_AVX512 | XFEATURE_AVX)) {
1463		vm_inject_gp(vmx->vm, vcpu);
1464		return (HANDLED);
1465	}
1466
1467	/*
1468	 * Intel MPX requires both bound register state flags to be
1469	 * set.
1470	 */
1471	if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1472	    ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1473		vm_inject_gp(vmx->vm, vcpu);
1474		return (HANDLED);
1475	}
1476
1477	/*
1478	 * This runs "inside" vmrun() with the guest's FPU state, so
1479	 * modifying xcr0 directly modifies the guest's xcr0, not the
1480	 * host's.
1481	 */
1482	load_xcr(0, xcrval);
1483	return (HANDLED);
1484}
1485
1486static uint64_t
1487vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1488{
1489	const struct vmxctx *vmxctx;
1490
1491	vmxctx = &vmx->ctx[vcpu];
1492
1493	switch (ident) {
1494	case 0:
1495		return (vmxctx->guest_rax);
1496	case 1:
1497		return (vmxctx->guest_rcx);
1498	case 2:
1499		return (vmxctx->guest_rdx);
1500	case 3:
1501		return (vmxctx->guest_rbx);
1502	case 4:
1503		return (vmcs_read(VMCS_GUEST_RSP));
1504	case 5:
1505		return (vmxctx->guest_rbp);
1506	case 6:
1507		return (vmxctx->guest_rsi);
1508	case 7:
1509		return (vmxctx->guest_rdi);
1510	case 8:
1511		return (vmxctx->guest_r8);
1512	case 9:
1513		return (vmxctx->guest_r9);
1514	case 10:
1515		return (vmxctx->guest_r10);
1516	case 11:
1517		return (vmxctx->guest_r11);
1518	case 12:
1519		return (vmxctx->guest_r12);
1520	case 13:
1521		return (vmxctx->guest_r13);
1522	case 14:
1523		return (vmxctx->guest_r14);
1524	case 15:
1525		return (vmxctx->guest_r15);
1526	default:
1527		panic("invalid vmx register %d", ident);
1528	}
1529}
1530
1531static void
1532vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1533{
1534	struct vmxctx *vmxctx;
1535
1536	vmxctx = &vmx->ctx[vcpu];
1537
1538	switch (ident) {
1539	case 0:
1540		vmxctx->guest_rax = regval;
1541		break;
1542	case 1:
1543		vmxctx->guest_rcx = regval;
1544		break;
1545	case 2:
1546		vmxctx->guest_rdx = regval;
1547		break;
1548	case 3:
1549		vmxctx->guest_rbx = regval;
1550		break;
1551	case 4:
1552		vmcs_write(VMCS_GUEST_RSP, regval);
1553		break;
1554	case 5:
1555		vmxctx->guest_rbp = regval;
1556		break;
1557	case 6:
1558		vmxctx->guest_rsi = regval;
1559		break;
1560	case 7:
1561		vmxctx->guest_rdi = regval;
1562		break;
1563	case 8:
1564		vmxctx->guest_r8 = regval;
1565		break;
1566	case 9:
1567		vmxctx->guest_r9 = regval;
1568		break;
1569	case 10:
1570		vmxctx->guest_r10 = regval;
1571		break;
1572	case 11:
1573		vmxctx->guest_r11 = regval;
1574		break;
1575	case 12:
1576		vmxctx->guest_r12 = regval;
1577		break;
1578	case 13:
1579		vmxctx->guest_r13 = regval;
1580		break;
1581	case 14:
1582		vmxctx->guest_r14 = regval;
1583		break;
1584	case 15:
1585		vmxctx->guest_r15 = regval;
1586		break;
1587	default:
1588		panic("invalid vmx register %d", ident);
1589	}
1590}
1591
1592static int
1593vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1594{
1595	uint64_t crval, regval;
1596
1597	/* We only handle mov to %cr0 at this time */
1598	if ((exitqual & 0xf0) != 0x00)
1599		return (UNHANDLED);
1600
1601	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1602
1603	vmcs_write(VMCS_CR0_SHADOW, regval);
1604
1605	crval = regval | cr0_ones_mask;
1606	crval &= ~cr0_zeros_mask;
1607	vmcs_write(VMCS_GUEST_CR0, crval);
1608
1609	if (regval & CR0_PG) {
1610		uint64_t efer, entry_ctls;
1611
1612		/*
1613		 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1614		 * the "IA-32e mode guest" bit in VM-entry control must be
1615		 * equal.
1616		 */
1617		efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1618		if (efer & EFER_LME) {
1619			efer |= EFER_LMA;
1620			vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1621			entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1622			entry_ctls |= VM_ENTRY_GUEST_LMA;
1623			vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1624		}
1625	}
1626
1627	return (HANDLED);
1628}
1629
1630static int
1631vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1632{
1633	uint64_t crval, regval;
1634
1635	/* We only handle mov to %cr4 at this time */
1636	if ((exitqual & 0xf0) != 0x00)
1637		return (UNHANDLED);
1638
1639	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1640
1641	vmcs_write(VMCS_CR4_SHADOW, regval);
1642
1643	crval = regval | cr4_ones_mask;
1644	crval &= ~cr4_zeros_mask;
1645	vmcs_write(VMCS_GUEST_CR4, crval);
1646
1647	return (HANDLED);
1648}
1649
1650static int
1651vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1652{
1653	struct vlapic *vlapic;
1654	uint64_t cr8;
1655	int regnum;
1656
1657	/* We only handle mov %cr8 to/from a register at this time. */
1658	if ((exitqual & 0xe0) != 0x00) {
1659		return (UNHANDLED);
1660	}
1661
1662	vlapic = vm_lapic(vmx->vm, vcpu);
1663	regnum = (exitqual >> 8) & 0xf;
1664	if (exitqual & 0x10) {
1665		cr8 = vlapic_get_cr8(vlapic);
1666		vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1667	} else {
1668		cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1669		vlapic_set_cr8(vlapic, cr8);
1670	}
1671
1672	return (HANDLED);
1673}
1674
1675/*
1676 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1677 */
1678static int
1679vmx_cpl(void)
1680{
1681	uint32_t ssar;
1682
1683	ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1684	return ((ssar >> 5) & 0x3);
1685}
1686
1687static enum vm_cpu_mode
1688vmx_cpu_mode(void)
1689{
1690	uint32_t csar;
1691
1692	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1693		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1694		if (csar & 0x2000)
1695			return (CPU_MODE_64BIT);	/* CS.L = 1 */
1696		else
1697			return (CPU_MODE_COMPATIBILITY);
1698	} else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1699		return (CPU_MODE_PROTECTED);
1700	} else {
1701		return (CPU_MODE_REAL);
1702	}
1703}
1704
1705static enum vm_paging_mode
1706vmx_paging_mode(void)
1707{
1708
1709	if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1710		return (PAGING_MODE_FLAT);
1711	if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1712		return (PAGING_MODE_32);
1713	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1714		return (PAGING_MODE_64);
1715	else
1716		return (PAGING_MODE_PAE);
1717}
1718
1719static uint64_t
1720inout_str_index(struct vmx *vmx, int vcpuid, int in)
1721{
1722	uint64_t val;
1723	int error;
1724	enum vm_reg_name reg;
1725
1726	reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1727	error = vmx_getreg(vmx, vcpuid, reg, &val);
1728	KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1729	return (val);
1730}
1731
1732static uint64_t
1733inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1734{
1735	uint64_t val;
1736	int error;
1737
1738	if (rep) {
1739		error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1740		KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1741	} else {
1742		val = 1;
1743	}
1744	return (val);
1745}
1746
1747static int
1748inout_str_addrsize(uint32_t inst_info)
1749{
1750	uint32_t size;
1751
1752	size = (inst_info >> 7) & 0x7;
1753	switch (size) {
1754	case 0:
1755		return (2);	/* 16 bit */
1756	case 1:
1757		return (4);	/* 32 bit */
1758	case 2:
1759		return (8);	/* 64 bit */
1760	default:
1761		panic("%s: invalid size encoding %d", __func__, size);
1762	}
1763}
1764
1765static void
1766inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1767    struct vm_inout_str *vis)
1768{
1769	int error, s;
1770
1771	if (in) {
1772		vis->seg_name = VM_REG_GUEST_ES;
1773	} else {
1774		s = (inst_info >> 15) & 0x7;
1775		vis->seg_name = vm_segment_name(s);
1776	}
1777
1778	error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1779	KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1780}
1781
1782static void
1783vmx_paging_info(struct vm_guest_paging *paging)
1784{
1785	paging->cr3 = vmcs_guest_cr3();
1786	paging->cpl = vmx_cpl();
1787	paging->cpu_mode = vmx_cpu_mode();
1788	paging->paging_mode = vmx_paging_mode();
1789}
1790
1791static void
1792vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1793{
1794	struct vm_guest_paging *paging;
1795	uint32_t csar;
1796
1797	paging = &vmexit->u.inst_emul.paging;
1798
1799	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1800	vmexit->inst_length = 0;
1801	vmexit->u.inst_emul.gpa = gpa;
1802	vmexit->u.inst_emul.gla = gla;
1803	vmx_paging_info(paging);
1804	switch (paging->cpu_mode) {
1805	case CPU_MODE_REAL:
1806		vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1807		vmexit->u.inst_emul.cs_d = 0;
1808		break;
1809	case CPU_MODE_PROTECTED:
1810	case CPU_MODE_COMPATIBILITY:
1811		vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1812		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1813		vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1814		break;
1815	default:
1816		vmexit->u.inst_emul.cs_base = 0;
1817		vmexit->u.inst_emul.cs_d = 0;
1818		break;
1819	}
1820	vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1821}
1822
1823static int
1824ept_fault_type(uint64_t ept_qual)
1825{
1826	int fault_type;
1827
1828	if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1829		fault_type = VM_PROT_WRITE;
1830	else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1831		fault_type = VM_PROT_EXECUTE;
1832	else
1833		fault_type= VM_PROT_READ;
1834
1835	return (fault_type);
1836}
1837
1838static boolean_t
1839ept_emulation_fault(uint64_t ept_qual)
1840{
1841	int read, write;
1842
1843	/* EPT fault on an instruction fetch doesn't make sense here */
1844	if (ept_qual & EPT_VIOLATION_INST_FETCH)
1845		return (FALSE);
1846
1847	/* EPT fault must be a read fault or a write fault */
1848	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1849	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1850	if ((read | write) == 0)
1851		return (FALSE);
1852
1853	/*
1854	 * The EPT violation must have been caused by accessing a
1855	 * guest-physical address that is a translation of a guest-linear
1856	 * address.
1857	 */
1858	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1859	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1860		return (FALSE);
1861	}
1862
1863	return (TRUE);
1864}
1865
1866static __inline int
1867apic_access_virtualization(struct vmx *vmx, int vcpuid)
1868{
1869	uint32_t proc_ctls2;
1870
1871	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1872	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1873}
1874
1875static __inline int
1876x2apic_virtualization(struct vmx *vmx, int vcpuid)
1877{
1878	uint32_t proc_ctls2;
1879
1880	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1881	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1882}
1883
1884static int
1885vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1886    uint64_t qual)
1887{
1888	int error, handled, offset;
1889	uint32_t *apic_regs, vector;
1890	bool retu;
1891
1892	handled = HANDLED;
1893	offset = APIC_WRITE_OFFSET(qual);
1894
1895	if (!apic_access_virtualization(vmx, vcpuid)) {
1896		/*
1897		 * In general there should not be any APIC write VM-exits
1898		 * unless APIC-access virtualization is enabled.
1899		 *
1900		 * However self-IPI virtualization can legitimately trigger
1901		 * an APIC-write VM-exit so treat it specially.
1902		 */
1903		if (x2apic_virtualization(vmx, vcpuid) &&
1904		    offset == APIC_OFFSET_SELF_IPI) {
1905			apic_regs = (uint32_t *)(vlapic->apic_page);
1906			vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1907			vlapic_self_ipi_handler(vlapic, vector);
1908			return (HANDLED);
1909		} else
1910			return (UNHANDLED);
1911	}
1912
1913	switch (offset) {
1914	case APIC_OFFSET_ID:
1915		vlapic_id_write_handler(vlapic);
1916		break;
1917	case APIC_OFFSET_LDR:
1918		vlapic_ldr_write_handler(vlapic);
1919		break;
1920	case APIC_OFFSET_DFR:
1921		vlapic_dfr_write_handler(vlapic);
1922		break;
1923	case APIC_OFFSET_SVR:
1924		vlapic_svr_write_handler(vlapic);
1925		break;
1926	case APIC_OFFSET_ESR:
1927		vlapic_esr_write_handler(vlapic);
1928		break;
1929	case APIC_OFFSET_ICR_LOW:
1930		retu = false;
1931		error = vlapic_icrlo_write_handler(vlapic, &retu);
1932		if (error != 0 || retu)
1933			handled = UNHANDLED;
1934		break;
1935	case APIC_OFFSET_CMCI_LVT:
1936	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1937		vlapic_lvt_write_handler(vlapic, offset);
1938		break;
1939	case APIC_OFFSET_TIMER_ICR:
1940		vlapic_icrtmr_write_handler(vlapic);
1941		break;
1942	case APIC_OFFSET_TIMER_DCR:
1943		vlapic_dcr_write_handler(vlapic);
1944		break;
1945	default:
1946		handled = UNHANDLED;
1947		break;
1948	}
1949	return (handled);
1950}
1951
1952static bool
1953apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1954{
1955
1956	if (apic_access_virtualization(vmx, vcpuid) &&
1957	    (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1958		return (true);
1959	else
1960		return (false);
1961}
1962
1963static int
1964vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1965{
1966	uint64_t qual;
1967	int access_type, offset, allowed;
1968
1969	if (!apic_access_virtualization(vmx, vcpuid))
1970		return (UNHANDLED);
1971
1972	qual = vmexit->u.vmx.exit_qualification;
1973	access_type = APIC_ACCESS_TYPE(qual);
1974	offset = APIC_ACCESS_OFFSET(qual);
1975
1976	allowed = 0;
1977	if (access_type == 0) {
1978		/*
1979		 * Read data access to the following registers is expected.
1980		 */
1981		switch (offset) {
1982		case APIC_OFFSET_APR:
1983		case APIC_OFFSET_PPR:
1984		case APIC_OFFSET_RRR:
1985		case APIC_OFFSET_CMCI_LVT:
1986		case APIC_OFFSET_TIMER_CCR:
1987			allowed = 1;
1988			break;
1989		default:
1990			break;
1991		}
1992	} else if (access_type == 1) {
1993		/*
1994		 * Write data access to the following registers is expected.
1995		 */
1996		switch (offset) {
1997		case APIC_OFFSET_VER:
1998		case APIC_OFFSET_APR:
1999		case APIC_OFFSET_PPR:
2000		case APIC_OFFSET_RRR:
2001		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
2002		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
2003		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
2004		case APIC_OFFSET_CMCI_LVT:
2005		case APIC_OFFSET_TIMER_CCR:
2006			allowed = 1;
2007			break;
2008		default:
2009			break;
2010		}
2011	}
2012
2013	if (allowed) {
2014		vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
2015		    VIE_INVALID_GLA);
2016	}
2017
2018	/*
2019	 * Regardless of whether the APIC-access is allowed this handler
2020	 * always returns UNHANDLED:
2021	 * - if the access is allowed then it is handled by emulating the
2022	 *   instruction that caused the VM-exit (outside the critical section)
2023	 * - if the access is not allowed then it will be converted to an
2024	 *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2025	 */
2026	return (UNHANDLED);
2027}
2028
2029static enum task_switch_reason
2030vmx_task_switch_reason(uint64_t qual)
2031{
2032	int reason;
2033
2034	reason = (qual >> 30) & 0x3;
2035	switch (reason) {
2036	case 0:
2037		return (TSR_CALL);
2038	case 1:
2039		return (TSR_IRET);
2040	case 2:
2041		return (TSR_JMP);
2042	case 3:
2043		return (TSR_IDT_GATE);
2044	default:
2045		panic("%s: invalid reason %d", __func__, reason);
2046	}
2047}
2048
2049static int
2050emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2051{
2052	int error;
2053
2054	if (lapic_msr(num))
2055		error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2056	else
2057		error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2058
2059	return (error);
2060}
2061
2062static int
2063emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2064{
2065	struct vmxctx *vmxctx;
2066	uint64_t result;
2067	uint32_t eax, edx;
2068	int error;
2069
2070	if (lapic_msr(num))
2071		error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2072	else
2073		error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2074
2075	if (error == 0) {
2076		eax = result;
2077		vmxctx = &vmx->ctx[vcpuid];
2078		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2079		KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2080
2081		edx = result >> 32;
2082		error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2083		KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2084	}
2085
2086	return (error);
2087}
2088
2089static int
2090vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2091{
2092	int error, errcode, errcode_valid, handled, in;
2093	struct vmxctx *vmxctx;
2094	struct vlapic *vlapic;
2095	struct vm_inout_str *vis;
2096	struct vm_task_switch *ts;
2097	uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2098	uint32_t intr_type, intr_vec, reason;
2099	uint64_t exitintinfo, qual, gpa;
2100	bool retu;
2101
2102	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2103	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2104
2105	handled = UNHANDLED;
2106	vmxctx = &vmx->ctx[vcpu];
2107
2108	qual = vmexit->u.vmx.exit_qualification;
2109	reason = vmexit->u.vmx.exit_reason;
2110	vmexit->exitcode = VM_EXITCODE_BOGUS;
2111
2112	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2113
2114	/*
2115	 * VM-entry failures during or after loading guest state.
2116	 *
2117	 * These VM-exits are uncommon but must be handled specially
2118	 * as most VM-exit fields are not populated as usual.
2119	 */
2120	if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2121		VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2122		__asm __volatile("int $18");
2123		return (1);
2124	}
2125
2126	/*
2127	 * VM exits that can be triggered during event delivery need to
2128	 * be handled specially by re-injecting the event if the IDT
2129	 * vectoring information field's valid bit is set.
2130	 *
2131	 * See "Information for VM Exits During Event Delivery" in Intel SDM
2132	 * for details.
2133	 */
2134	idtvec_info = vmcs_idt_vectoring_info();
2135	if (idtvec_info & VMCS_IDT_VEC_VALID) {
2136		idtvec_info &= ~(1 << 12); /* clear undefined bit */
2137		exitintinfo = idtvec_info;
2138		if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2139			idtvec_err = vmcs_idt_vectoring_err();
2140			exitintinfo |= (uint64_t)idtvec_err << 32;
2141		}
2142		error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2143		KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2144		    __func__, error));
2145
2146		/*
2147		 * If 'virtual NMIs' are being used and the VM-exit
2148		 * happened while injecting an NMI during the previous
2149		 * VM-entry, then clear "blocking by NMI" in the
2150		 * Guest Interruptibility-State so the NMI can be
2151		 * reinjected on the subsequent VM-entry.
2152		 *
2153		 * However, if the NMI was being delivered through a task
2154		 * gate, then the new task must start execution with NMIs
2155		 * blocked so don't clear NMI blocking in this case.
2156		 */
2157		intr_type = idtvec_info & VMCS_INTR_T_MASK;
2158		if (intr_type == VMCS_INTR_T_NMI) {
2159			if (reason != EXIT_REASON_TASK_SWITCH)
2160				vmx_clear_nmi_blocking(vmx, vcpu);
2161			else
2162				vmx_assert_nmi_blocking(vmx, vcpu);
2163		}
2164
2165		/*
2166		 * Update VM-entry instruction length if the event being
2167		 * delivered was a software interrupt or software exception.
2168		 */
2169		if (intr_type == VMCS_INTR_T_SWINTR ||
2170		    intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2171		    intr_type == VMCS_INTR_T_SWEXCEPTION) {
2172			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2173		}
2174	}
2175
2176	switch (reason) {
2177	case EXIT_REASON_TASK_SWITCH:
2178		ts = &vmexit->u.task_switch;
2179		ts->tsssel = qual & 0xffff;
2180		ts->reason = vmx_task_switch_reason(qual);
2181		ts->ext = 0;
2182		ts->errcode_valid = 0;
2183		vmx_paging_info(&ts->paging);
2184		/*
2185		 * If the task switch was due to a CALL, JMP, IRET, software
2186		 * interrupt (INT n) or software exception (INT3, INTO),
2187		 * then the saved %rip references the instruction that caused
2188		 * the task switch. The instruction length field in the VMCS
2189		 * is valid in this case.
2190		 *
2191		 * In all other cases (e.g., NMI, hardware exception) the
2192		 * saved %rip is one that would have been saved in the old TSS
2193		 * had the task switch completed normally so the instruction
2194		 * length field is not needed in this case and is explicitly
2195		 * set to 0.
2196		 */
2197		if (ts->reason == TSR_IDT_GATE) {
2198			KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2199			    ("invalid idtvec_info %#x for IDT task switch",
2200			    idtvec_info));
2201			intr_type = idtvec_info & VMCS_INTR_T_MASK;
2202			if (intr_type != VMCS_INTR_T_SWINTR &&
2203			    intr_type != VMCS_INTR_T_SWEXCEPTION &&
2204			    intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2205				/* Task switch triggered by external event */
2206				ts->ext = 1;
2207				vmexit->inst_length = 0;
2208				if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2209					ts->errcode_valid = 1;
2210					ts->errcode = vmcs_idt_vectoring_err();
2211				}
2212			}
2213		}
2214		vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2215		VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2216		    "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2217		    ts->ext ? "external" : "internal",
2218		    ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2219		break;
2220	case EXIT_REASON_CR_ACCESS:
2221		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2222		switch (qual & 0xf) {
2223		case 0:
2224			handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2225			break;
2226		case 4:
2227			handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2228			break;
2229		case 8:
2230			handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2231			break;
2232		}
2233		break;
2234	case EXIT_REASON_RDMSR:
2235		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2236		retu = false;
2237		ecx = vmxctx->guest_rcx;
2238		VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2239		error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2240		if (error) {
2241			vmexit->exitcode = VM_EXITCODE_RDMSR;
2242			vmexit->u.msr.code = ecx;
2243		} else if (!retu) {
2244			handled = HANDLED;
2245		} else {
2246			/* Return to userspace with a valid exitcode */
2247			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2248			    ("emulate_rdmsr retu with bogus exitcode"));
2249		}
2250		break;
2251	case EXIT_REASON_WRMSR:
2252		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2253		retu = false;
2254		eax = vmxctx->guest_rax;
2255		ecx = vmxctx->guest_rcx;
2256		edx = vmxctx->guest_rdx;
2257		VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2258		    ecx, (uint64_t)edx << 32 | eax);
2259		error = emulate_wrmsr(vmx, vcpu, ecx,
2260		    (uint64_t)edx << 32 | eax, &retu);
2261		if (error) {
2262			vmexit->exitcode = VM_EXITCODE_WRMSR;
2263			vmexit->u.msr.code = ecx;
2264			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2265		} else if (!retu) {
2266			handled = HANDLED;
2267		} else {
2268			/* Return to userspace with a valid exitcode */
2269			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2270			    ("emulate_wrmsr retu with bogus exitcode"));
2271		}
2272		break;
2273	case EXIT_REASON_HLT:
2274		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2275		vmexit->exitcode = VM_EXITCODE_HLT;
2276		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2277		break;
2278	case EXIT_REASON_MTF:
2279		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2280		vmexit->exitcode = VM_EXITCODE_MTRAP;
2281		vmexit->inst_length = 0;
2282		break;
2283	case EXIT_REASON_PAUSE:
2284		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2285		vmexit->exitcode = VM_EXITCODE_PAUSE;
2286		break;
2287	case EXIT_REASON_INTR_WINDOW:
2288		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2289		vmx_clear_int_window_exiting(vmx, vcpu);
2290		return (1);
2291	case EXIT_REASON_EXT_INTR:
2292		/*
2293		 * External interrupts serve only to cause VM exits and allow
2294		 * the host interrupt handler to run.
2295		 *
2296		 * If this external interrupt triggers a virtual interrupt
2297		 * to a VM, then that state will be recorded by the
2298		 * host interrupt handler in the VM's softc. We will inject
2299		 * this virtual interrupt during the subsequent VM enter.
2300		 */
2301		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2302
2303		/*
2304		 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2305		 * This appears to be a bug in VMware Fusion?
2306		 */
2307		if (!(intr_info & VMCS_INTR_VALID))
2308			return (1);
2309		KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2310		    (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2311		    ("VM exit interruption info invalid: %#x", intr_info));
2312		vmx_trigger_hostintr(intr_info & 0xff);
2313
2314		/*
2315		 * This is special. We want to treat this as an 'handled'
2316		 * VM-exit but not increment the instruction pointer.
2317		 */
2318		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2319		return (1);
2320	case EXIT_REASON_NMI_WINDOW:
2321		/* Exit to allow the pending virtual NMI to be injected */
2322		if (vm_nmi_pending(vmx->vm, vcpu))
2323			vmx_inject_nmi(vmx, vcpu);
2324		vmx_clear_nmi_window_exiting(vmx, vcpu);
2325		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2326		return (1);
2327	case EXIT_REASON_INOUT:
2328		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2329		vmexit->exitcode = VM_EXITCODE_INOUT;
2330		vmexit->u.inout.bytes = (qual & 0x7) + 1;
2331		vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2332		vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2333		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2334		vmexit->u.inout.port = (uint16_t)(qual >> 16);
2335		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2336		if (vmexit->u.inout.string) {
2337			inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2338			vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2339			vis = &vmexit->u.inout_str;
2340			vmx_paging_info(&vis->paging);
2341			vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2342			vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2343			vis->index = inout_str_index(vmx, vcpu, in);
2344			vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2345			vis->addrsize = inout_str_addrsize(inst_info);
2346			inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2347		}
2348		break;
2349	case EXIT_REASON_CPUID:
2350		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2351		handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2352		break;
2353	case EXIT_REASON_EXCEPTION:
2354		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2355		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2356		KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2357		    ("VM exit interruption info invalid: %#x", intr_info));
2358
2359		intr_vec = intr_info & 0xff;
2360		intr_type = intr_info & VMCS_INTR_T_MASK;
2361
2362		/*
2363		 * If Virtual NMIs control is 1 and the VM-exit is due to a
2364		 * fault encountered during the execution of IRET then we must
2365		 * restore the state of "virtual-NMI blocking" before resuming
2366		 * the guest.
2367		 *
2368		 * See "Resuming Guest Software after Handling an Exception".
2369		 * See "Information for VM Exits Due to Vectored Events".
2370		 */
2371		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2372		    (intr_vec != IDT_DF) &&
2373		    (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2374			vmx_restore_nmi_blocking(vmx, vcpu);
2375
2376		/*
2377		 * The NMI has already been handled in vmx_exit_handle_nmi().
2378		 */
2379		if (intr_type == VMCS_INTR_T_NMI)
2380			return (1);
2381
2382		/*
2383		 * Call the machine check handler by hand. Also don't reflect
2384		 * the machine check back into the guest.
2385		 */
2386		if (intr_vec == IDT_MC) {
2387			VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2388			__asm __volatile("int $18");
2389			return (1);
2390		}
2391
2392		if (intr_vec == IDT_PF) {
2393			error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2394			KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2395			    __func__, error));
2396		}
2397
2398		/*
2399		 * Software exceptions exhibit trap-like behavior. This in
2400		 * turn requires populating the VM-entry instruction length
2401		 * so that the %rip in the trap frame is past the INT3/INTO
2402		 * instruction.
2403		 */
2404		if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2405			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2406
2407		/* Reflect all other exceptions back into the guest */
2408		errcode_valid = errcode = 0;
2409		if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2410			errcode_valid = 1;
2411			errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2412		}
2413		VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2414		    "the guest", intr_vec, errcode);
2415		error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2416		    errcode_valid, errcode, 0);
2417		KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2418		    __func__, error));
2419		return (1);
2420
2421	case EXIT_REASON_EPT_FAULT:
2422		/*
2423		 * If 'gpa' lies within the address space allocated to
2424		 * memory then this must be a nested page fault otherwise
2425		 * this must be an instruction that accesses MMIO space.
2426		 */
2427		gpa = vmcs_gpa();
2428		if (vm_mem_allocated(vmx->vm, gpa) ||
2429		    apic_access_fault(vmx, vcpu, gpa)) {
2430			vmexit->exitcode = VM_EXITCODE_PAGING;
2431			vmexit->inst_length = 0;
2432			vmexit->u.paging.gpa = gpa;
2433			vmexit->u.paging.fault_type = ept_fault_type(qual);
2434			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2435		} else if (ept_emulation_fault(qual)) {
2436			vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2437			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2438		}
2439		/*
2440		 * If Virtual NMIs control is 1 and the VM-exit is due to an
2441		 * EPT fault during the execution of IRET then we must restore
2442		 * the state of "virtual-NMI blocking" before resuming.
2443		 *
2444		 * See description of "NMI unblocking due to IRET" in
2445		 * "Exit Qualification for EPT Violations".
2446		 */
2447		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2448		    (qual & EXIT_QUAL_NMIUDTI) != 0)
2449			vmx_restore_nmi_blocking(vmx, vcpu);
2450		break;
2451	case EXIT_REASON_VIRTUALIZED_EOI:
2452		vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2453		vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2454		vmexit->inst_length = 0;	/* trap-like */
2455		break;
2456	case EXIT_REASON_APIC_ACCESS:
2457		handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2458		break;
2459	case EXIT_REASON_APIC_WRITE:
2460		/*
2461		 * APIC-write VM exit is trap-like so the %rip is already
2462		 * pointing to the next instruction.
2463		 */
2464		vmexit->inst_length = 0;
2465		vlapic = vm_lapic(vmx->vm, vcpu);
2466		handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2467		break;
2468	case EXIT_REASON_XSETBV:
2469		handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2470		break;
2471	case EXIT_REASON_MONITOR:
2472		vmexit->exitcode = VM_EXITCODE_MONITOR;
2473		break;
2474	case EXIT_REASON_MWAIT:
2475		vmexit->exitcode = VM_EXITCODE_MWAIT;
2476		break;
2477	default:
2478		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2479		break;
2480	}
2481
2482	if (handled) {
2483		/*
2484		 * It is possible that control is returned to userland
2485		 * even though we were able to handle the VM exit in the
2486		 * kernel.
2487		 *
2488		 * In such a case we want to make sure that the userland
2489		 * restarts guest execution at the instruction *after*
2490		 * the one we just processed. Therefore we update the
2491		 * guest rip in the VMCS and in 'vmexit'.
2492		 */
2493		vmexit->rip += vmexit->inst_length;
2494		vmexit->inst_length = 0;
2495		vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2496	} else {
2497		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2498			/*
2499			 * If this VM exit was not claimed by anybody then
2500			 * treat it as a generic VMX exit.
2501			 */
2502			vmexit->exitcode = VM_EXITCODE_VMX;
2503			vmexit->u.vmx.status = VM_SUCCESS;
2504			vmexit->u.vmx.inst_type = 0;
2505			vmexit->u.vmx.inst_error = 0;
2506		} else {
2507			/*
2508			 * The exitcode and collateral have been populated.
2509			 * The VM exit will be processed further in userland.
2510			 */
2511		}
2512	}
2513	return (handled);
2514}
2515
2516static __inline void
2517vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2518{
2519
2520	KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2521	    ("vmx_exit_inst_error: invalid inst_fail_status %d",
2522	    vmxctx->inst_fail_status));
2523
2524	vmexit->inst_length = 0;
2525	vmexit->exitcode = VM_EXITCODE_VMX;
2526	vmexit->u.vmx.status = vmxctx->inst_fail_status;
2527	vmexit->u.vmx.inst_error = vmcs_instruction_error();
2528	vmexit->u.vmx.exit_reason = ~0;
2529	vmexit->u.vmx.exit_qualification = ~0;
2530
2531	switch (rc) {
2532	case VMX_VMRESUME_ERROR:
2533	case VMX_VMLAUNCH_ERROR:
2534	case VMX_INVEPT_ERROR:
2535		vmexit->u.vmx.inst_type = rc;
2536		break;
2537	default:
2538		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2539	}
2540}
2541
2542/*
2543 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2544 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2545 * sufficient to simply vector to the NMI handler via a software interrupt.
2546 * However, this must be done before maskable interrupts are enabled
2547 * otherwise the "iret" issued by an interrupt handler will incorrectly
2548 * clear NMI blocking.
2549 */
2550static __inline void
2551vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2552{
2553	uint32_t intr_info;
2554
2555	KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2556
2557	if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2558		return;
2559
2560	intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2561	KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2562	    ("VM exit interruption info invalid: %#x", intr_info));
2563
2564	if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2565		KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2566		    "to NMI has invalid vector: %#x", intr_info));
2567		VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2568		__asm __volatile("int $2");
2569	}
2570}
2571
2572static int
2573vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
2574    struct vm_eventinfo *evinfo)
2575{
2576	int rc, handled, launched;
2577	struct vmx *vmx;
2578	struct vm *vm;
2579	struct vmxctx *vmxctx;
2580	struct vmcs *vmcs;
2581	struct vm_exit *vmexit;
2582	struct vlapic *vlapic;
2583	uint32_t exit_reason;
2584
2585	vmx = arg;
2586	vm = vmx->vm;
2587	vmcs = &vmx->vmcs[vcpu];
2588	vmxctx = &vmx->ctx[vcpu];
2589	vlapic = vm_lapic(vm, vcpu);
2590	vmexit = vm_exitinfo(vm, vcpu);
2591	launched = 0;
2592
2593	KASSERT(vmxctx->pmap == pmap,
2594	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2595
2596	vmx_msr_guest_enter(vmx, vcpu);
2597
2598	VMPTRLD(vmcs);
2599
2600	/*
2601	 * XXX
2602	 * We do this every time because we may setup the virtual machine
2603	 * from a different process than the one that actually runs it.
2604	 *
2605	 * If the life of a virtual machine was spent entirely in the context
2606	 * of a single process we could do this once in vmx_vminit().
2607	 */
2608	vmcs_write(VMCS_HOST_CR3, rcr3());
2609
2610	vmcs_write(VMCS_GUEST_RIP, rip);
2611	vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2612	do {
2613		KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
2614		    "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
2615
2616		handled = UNHANDLED;
2617		/*
2618		 * Interrupts are disabled from this point on until the
2619		 * guest starts executing. This is done for the following
2620		 * reasons:
2621		 *
2622		 * If an AST is asserted on this thread after the check below,
2623		 * then the IPI_AST notification will not be lost, because it
2624		 * will cause a VM exit due to external interrupt as soon as
2625		 * the guest state is loaded.
2626		 *
2627		 * A posted interrupt after 'vmx_inject_interrupts()' will
2628		 * not be "lost" because it will be held pending in the host
2629		 * APIC because interrupts are disabled. The pending interrupt
2630		 * will be recognized as soon as the guest state is loaded.
2631		 *
2632		 * The same reasoning applies to the IPI generated by
2633		 * pmap_invalidate_ept().
2634		 */
2635		disable_intr();
2636		vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
2637
2638		/*
2639		 * Check for vcpu suspension after injecting events because
2640		 * vmx_inject_interrupts() can suspend the vcpu due to a
2641		 * triple fault.
2642		 */
2643		if (vcpu_suspended(evinfo)) {
2644			enable_intr();
2645			vm_exit_suspended(vmx->vm, vcpu, rip);
2646			break;
2647		}
2648
2649		if (vcpu_rendezvous_pending(evinfo)) {
2650			enable_intr();
2651			vm_exit_rendezvous(vmx->vm, vcpu, rip);
2652			break;
2653		}
2654
2655		if (vcpu_reqidle(evinfo)) {
2656			enable_intr();
2657			vm_exit_reqidle(vmx->vm, vcpu, rip);
2658			break;
2659		}
2660
2661		if (vcpu_should_yield(vm, vcpu)) {
2662			enable_intr();
2663			vm_exit_astpending(vmx->vm, vcpu, rip);
2664			vmx_astpending_trace(vmx, vcpu, rip);
2665			handled = HANDLED;
2666			break;
2667		}
2668
2669		vmx_run_trace(vmx, vcpu);
2670		rc = vmx_enter_guest(vmxctx, vmx, launched);
2671
2672		/* Collect some information for VM exit processing */
2673		vmexit->rip = rip = vmcs_guest_rip();
2674		vmexit->inst_length = vmexit_instruction_length();
2675		vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2676		vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2677
2678		/* Update 'nextrip' */
2679		vmx->state[vcpu].nextrip = rip;
2680
2681		if (rc == VMX_GUEST_VMEXIT) {
2682			vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2683			enable_intr();
2684			handled = vmx_exit_process(vmx, vcpu, vmexit);
2685		} else {
2686			enable_intr();
2687			vmx_exit_inst_error(vmxctx, rc, vmexit);
2688		}
2689		launched = 1;
2690		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2691		rip = vmexit->rip;
2692	} while (handled);
2693
2694	/*
2695	 * If a VM exit has been handled then the exitcode must be BOGUS
2696	 * If a VM exit is not handled then the exitcode must not be BOGUS
2697	 */
2698	if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2699	    (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2700		panic("Mismatch between handled (%d) and exitcode (%d)",
2701		      handled, vmexit->exitcode);
2702	}
2703
2704	if (!handled)
2705		vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2706
2707	VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2708	    vmexit->exitcode);
2709
2710	VMCLEAR(vmcs);
2711	vmx_msr_guest_exit(vmx, vcpu);
2712
2713	return (0);
2714}
2715
2716static void
2717vmx_vmcleanup(void *arg)
2718{
2719	int i;
2720	struct vmx *vmx = arg;
2721
2722	if (apic_access_virtualization(vmx, 0))
2723		vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2724
2725	for (i = 0; i < VM_MAXCPU; i++)
2726		vpid_free(vmx->state[i].vpid);
2727
2728	free(vmx, M_VMX);
2729
2730	return;
2731}
2732
2733static register_t *
2734vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2735{
2736
2737	switch (reg) {
2738	case VM_REG_GUEST_RAX:
2739		return (&vmxctx->guest_rax);
2740	case VM_REG_GUEST_RBX:
2741		return (&vmxctx->guest_rbx);
2742	case VM_REG_GUEST_RCX:
2743		return (&vmxctx->guest_rcx);
2744	case VM_REG_GUEST_RDX:
2745		return (&vmxctx->guest_rdx);
2746	case VM_REG_GUEST_RSI:
2747		return (&vmxctx->guest_rsi);
2748	case VM_REG_GUEST_RDI:
2749		return (&vmxctx->guest_rdi);
2750	case VM_REG_GUEST_RBP:
2751		return (&vmxctx->guest_rbp);
2752	case VM_REG_GUEST_R8:
2753		return (&vmxctx->guest_r8);
2754	case VM_REG_GUEST_R9:
2755		return (&vmxctx->guest_r9);
2756	case VM_REG_GUEST_R10:
2757		return (&vmxctx->guest_r10);
2758	case VM_REG_GUEST_R11:
2759		return (&vmxctx->guest_r11);
2760	case VM_REG_GUEST_R12:
2761		return (&vmxctx->guest_r12);
2762	case VM_REG_GUEST_R13:
2763		return (&vmxctx->guest_r13);
2764	case VM_REG_GUEST_R14:
2765		return (&vmxctx->guest_r14);
2766	case VM_REG_GUEST_R15:
2767		return (&vmxctx->guest_r15);
2768	case VM_REG_GUEST_CR2:
2769		return (&vmxctx->guest_cr2);
2770	default:
2771		break;
2772	}
2773	return (NULL);
2774}
2775
2776static int
2777vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2778{
2779	register_t *regp;
2780
2781	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2782		*retval = *regp;
2783		return (0);
2784	} else
2785		return (EINVAL);
2786}
2787
2788static int
2789vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2790{
2791	register_t *regp;
2792
2793	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2794		*regp = val;
2795		return (0);
2796	} else
2797		return (EINVAL);
2798}
2799
2800static int
2801vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2802{
2803	uint64_t gi;
2804	int error;
2805
2806	error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2807	    VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2808	*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2809	return (error);
2810}
2811
2812static int
2813vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2814{
2815	struct vmcs *vmcs;
2816	uint64_t gi;
2817	int error, ident;
2818
2819	/*
2820	 * Forcing the vcpu into an interrupt shadow is not supported.
2821	 */
2822	if (val) {
2823		error = EINVAL;
2824		goto done;
2825	}
2826
2827	vmcs = &vmx->vmcs[vcpu];
2828	ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2829	error = vmcs_getreg(vmcs, running, ident, &gi);
2830	if (error == 0) {
2831		gi &= ~HWINTR_BLOCKING;
2832		error = vmcs_setreg(vmcs, running, ident, gi);
2833	}
2834done:
2835	VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2836	    error ? "failed" : "succeeded");
2837	return (error);
2838}
2839
2840static int
2841vmx_shadow_reg(int reg)
2842{
2843	int shreg;
2844
2845	shreg = -1;
2846
2847	switch (reg) {
2848	case VM_REG_GUEST_CR0:
2849		shreg = VMCS_CR0_SHADOW;
2850                break;
2851        case VM_REG_GUEST_CR4:
2852		shreg = VMCS_CR4_SHADOW;
2853		break;
2854	default:
2855		break;
2856	}
2857
2858	return (shreg);
2859}
2860
2861static int
2862vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2863{
2864	int running, hostcpu;
2865	struct vmx *vmx = arg;
2866
2867	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2868	if (running && hostcpu != curcpu)
2869		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2870
2871	if (reg == VM_REG_GUEST_INTR_SHADOW)
2872		return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2873
2874	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2875		return (0);
2876
2877	return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2878}
2879
2880static int
2881vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2882{
2883	int error, hostcpu, running, shadow;
2884	uint64_t ctls;
2885	pmap_t pmap;
2886	struct vmx *vmx = arg;
2887
2888	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2889	if (running && hostcpu != curcpu)
2890		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2891
2892	if (reg == VM_REG_GUEST_INTR_SHADOW)
2893		return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2894
2895	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2896		return (0);
2897
2898	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2899
2900	if (error == 0) {
2901		/*
2902		 * If the "load EFER" VM-entry control is 1 then the
2903		 * value of EFER.LMA must be identical to "IA-32e mode guest"
2904		 * bit in the VM-entry control.
2905		 */
2906		if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2907		    (reg == VM_REG_GUEST_EFER)) {
2908			vmcs_getreg(&vmx->vmcs[vcpu], running,
2909				    VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2910			if (val & EFER_LMA)
2911				ctls |= VM_ENTRY_GUEST_LMA;
2912			else
2913				ctls &= ~VM_ENTRY_GUEST_LMA;
2914			vmcs_setreg(&vmx->vmcs[vcpu], running,
2915				    VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2916		}
2917
2918		shadow = vmx_shadow_reg(reg);
2919		if (shadow > 0) {
2920			/*
2921			 * Store the unmodified value in the shadow
2922			 */
2923			error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2924				    VMCS_IDENT(shadow), val);
2925		}
2926
2927		if (reg == VM_REG_GUEST_CR3) {
2928			/*
2929			 * Invalidate the guest vcpu's TLB mappings to emulate
2930			 * the behavior of updating %cr3.
2931			 *
2932			 * XXX the processor retains global mappings when %cr3
2933			 * is updated but vmx_invvpid() does not.
2934			 */
2935			pmap = vmx->ctx[vcpu].pmap;
2936			vmx_invvpid(vmx, vcpu, pmap, running);
2937		}
2938	}
2939
2940	return (error);
2941}
2942
2943static int
2944vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2945{
2946	int hostcpu, running;
2947	struct vmx *vmx = arg;
2948
2949	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2950	if (running && hostcpu != curcpu)
2951		panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2952
2953	return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2954}
2955
2956static int
2957vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2958{
2959	int hostcpu, running;
2960	struct vmx *vmx = arg;
2961
2962	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2963	if (running && hostcpu != curcpu)
2964		panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2965
2966	return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2967}
2968
2969static int
2970vmx_getcap(void *arg, int vcpu, int type, int *retval)
2971{
2972	struct vmx *vmx = arg;
2973	int vcap;
2974	int ret;
2975
2976	ret = ENOENT;
2977
2978	vcap = vmx->cap[vcpu].set;
2979
2980	switch (type) {
2981	case VM_CAP_HALT_EXIT:
2982		if (cap_halt_exit)
2983			ret = 0;
2984		break;
2985	case VM_CAP_PAUSE_EXIT:
2986		if (cap_pause_exit)
2987			ret = 0;
2988		break;
2989	case VM_CAP_MTRAP_EXIT:
2990		if (cap_monitor_trap)
2991			ret = 0;
2992		break;
2993	case VM_CAP_UNRESTRICTED_GUEST:
2994		if (cap_unrestricted_guest)
2995			ret = 0;
2996		break;
2997	case VM_CAP_ENABLE_INVPCID:
2998		if (cap_invpcid)
2999			ret = 0;
3000		break;
3001	default:
3002		break;
3003	}
3004
3005	if (ret == 0)
3006		*retval = (vcap & (1 << type)) ? 1 : 0;
3007
3008	return (ret);
3009}
3010
3011static int
3012vmx_setcap(void *arg, int vcpu, int type, int val)
3013{
3014	struct vmx *vmx = arg;
3015	struct vmcs *vmcs = &vmx->vmcs[vcpu];
3016	uint32_t baseval;
3017	uint32_t *pptr;
3018	int error;
3019	int flag;
3020	int reg;
3021	int retval;
3022
3023	retval = ENOENT;
3024	pptr = NULL;
3025
3026	switch (type) {
3027	case VM_CAP_HALT_EXIT:
3028		if (cap_halt_exit) {
3029			retval = 0;
3030			pptr = &vmx->cap[vcpu].proc_ctls;
3031			baseval = *pptr;
3032			flag = PROCBASED_HLT_EXITING;
3033			reg = VMCS_PRI_PROC_BASED_CTLS;
3034		}
3035		break;
3036	case VM_CAP_MTRAP_EXIT:
3037		if (cap_monitor_trap) {
3038			retval = 0;
3039			pptr = &vmx->cap[vcpu].proc_ctls;
3040			baseval = *pptr;
3041			flag = PROCBASED_MTF;
3042			reg = VMCS_PRI_PROC_BASED_CTLS;
3043		}
3044		break;
3045	case VM_CAP_PAUSE_EXIT:
3046		if (cap_pause_exit) {
3047			retval = 0;
3048			pptr = &vmx->cap[vcpu].proc_ctls;
3049			baseval = *pptr;
3050			flag = PROCBASED_PAUSE_EXITING;
3051			reg = VMCS_PRI_PROC_BASED_CTLS;
3052		}
3053		break;
3054	case VM_CAP_UNRESTRICTED_GUEST:
3055		if (cap_unrestricted_guest) {
3056			retval = 0;
3057			pptr = &vmx->cap[vcpu].proc_ctls2;
3058			baseval = *pptr;
3059			flag = PROCBASED2_UNRESTRICTED_GUEST;
3060			reg = VMCS_SEC_PROC_BASED_CTLS;
3061		}
3062		break;
3063	case VM_CAP_ENABLE_INVPCID:
3064		if (cap_invpcid) {
3065			retval = 0;
3066			pptr = &vmx->cap[vcpu].proc_ctls2;
3067			baseval = *pptr;
3068			flag = PROCBASED2_ENABLE_INVPCID;
3069			reg = VMCS_SEC_PROC_BASED_CTLS;
3070		}
3071		break;
3072	default:
3073		break;
3074	}
3075
3076	if (retval == 0) {
3077		if (val) {
3078			baseval |= flag;
3079		} else {
3080			baseval &= ~flag;
3081		}
3082		VMPTRLD(vmcs);
3083		error = vmwrite(reg, baseval);
3084		VMCLEAR(vmcs);
3085
3086		if (error) {
3087			retval = error;
3088		} else {
3089			/*
3090			 * Update optional stored flags, and record
3091			 * setting
3092			 */
3093			if (pptr != NULL) {
3094				*pptr = baseval;
3095			}
3096
3097			if (val) {
3098				vmx->cap[vcpu].set |= (1 << type);
3099			} else {
3100				vmx->cap[vcpu].set &= ~(1 << type);
3101			}
3102		}
3103	}
3104
3105        return (retval);
3106}
3107
3108struct vlapic_vtx {
3109	struct vlapic	vlapic;
3110	struct pir_desc	*pir_desc;
3111	struct vmx	*vmx;
3112};
3113
3114#define	VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)	\
3115do {									\
3116	VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",	\
3117	    level ? "level" : "edge", vector);				\
3118	VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);	\
3119	VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);	\
3120	VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);	\
3121	VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);	\
3122	VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3123} while (0)
3124
3125/*
3126 * vlapic->ops handlers that utilize the APICv hardware assist described in
3127 * Chapter 29 of the Intel SDM.
3128 */
3129static int
3130vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3131{
3132	struct vlapic_vtx *vlapic_vtx;
3133	struct pir_desc *pir_desc;
3134	uint64_t mask;
3135	int idx, notify;
3136
3137	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3138	pir_desc = vlapic_vtx->pir_desc;
3139
3140	/*
3141	 * Keep track of interrupt requests in the PIR descriptor. This is
3142	 * because the virtual APIC page pointed to by the VMCS cannot be
3143	 * modified if the vcpu is running.
3144	 */
3145	idx = vector / 64;
3146	mask = 1UL << (vector % 64);
3147	atomic_set_long(&pir_desc->pir[idx], mask);
3148	notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3149
3150	VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3151	    level, "vmx_set_intr_ready");
3152	return (notify);
3153}
3154
3155static int
3156vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3157{
3158	struct vlapic_vtx *vlapic_vtx;
3159	struct pir_desc *pir_desc;
3160	struct LAPIC *lapic;
3161	uint64_t pending, pirval;
3162	uint32_t ppr, vpr;
3163	int i;
3164
3165	/*
3166	 * This function is only expected to be called from the 'HLT' exit
3167	 * handler which does not care about the vector that is pending.
3168	 */
3169	KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3170
3171	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3172	pir_desc = vlapic_vtx->pir_desc;
3173
3174	pending = atomic_load_acq_long(&pir_desc->pending);
3175	if (!pending)
3176		return (0);	/* common case */
3177
3178	/*
3179	 * If there is an interrupt pending then it will be recognized only
3180	 * if its priority is greater than the processor priority.
3181	 *
3182	 * Special case: if the processor priority is zero then any pending
3183	 * interrupt will be recognized.
3184	 */
3185	lapic = vlapic->apic_page;
3186	ppr = lapic->ppr & 0xf0;
3187	if (ppr == 0)
3188		return (1);
3189
3190	VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3191	    lapic->ppr);
3192
3193	for (i = 3; i >= 0; i--) {
3194		pirval = pir_desc->pir[i];
3195		if (pirval != 0) {
3196			vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3197			return (vpr > ppr);
3198		}
3199	}
3200	return (0);
3201}
3202
3203static void
3204vmx_intr_accepted(struct vlapic *vlapic, int vector)
3205{
3206
3207	panic("vmx_intr_accepted: not expected to be called");
3208}
3209
3210static void
3211vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3212{
3213	struct vlapic_vtx *vlapic_vtx;
3214	struct vmx *vmx;
3215	struct vmcs *vmcs;
3216	uint64_t mask, val;
3217
3218	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3219	KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3220	    ("vmx_set_tmr: vcpu cannot be running"));
3221
3222	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3223	vmx = vlapic_vtx->vmx;
3224	vmcs = &vmx->vmcs[vlapic->vcpuid];
3225	mask = 1UL << (vector % 64);
3226
3227	VMPTRLD(vmcs);
3228	val = vmcs_read(VMCS_EOI_EXIT(vector));
3229	if (level)
3230		val |= mask;
3231	else
3232		val &= ~mask;
3233	vmcs_write(VMCS_EOI_EXIT(vector), val);
3234	VMCLEAR(vmcs);
3235}
3236
3237static void
3238vmx_enable_x2apic_mode(struct vlapic *vlapic)
3239{
3240	struct vmx *vmx;
3241	struct vmcs *vmcs;
3242	uint32_t proc_ctls2;
3243	int vcpuid, error;
3244
3245	vcpuid = vlapic->vcpuid;
3246	vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3247	vmcs = &vmx->vmcs[vcpuid];
3248
3249	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3250	KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3251	    ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3252
3253	proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3254	proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3255	vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3256
3257	VMPTRLD(vmcs);
3258	vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3259	VMCLEAR(vmcs);
3260
3261	if (vlapic->vcpuid == 0) {
3262		/*
3263		 * The nested page table mappings are shared by all vcpus
3264		 * so unmap the APIC access page just once.
3265		 */
3266		error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3267		KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3268		    __func__, error));
3269
3270		/*
3271		 * The MSR bitmap is shared by all vcpus so modify it only
3272		 * once in the context of vcpu 0.
3273		 */
3274		error = vmx_allow_x2apic_msrs(vmx);
3275		KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3276		    __func__, error));
3277	}
3278}
3279
3280static void
3281vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3282{
3283
3284	ipi_cpu(hostcpu, pirvec);
3285}
3286
3287/*
3288 * Transfer the pending interrupts in the PIR descriptor to the IRR
3289 * in the virtual APIC page.
3290 */
3291static void
3292vmx_inject_pir(struct vlapic *vlapic)
3293{
3294	struct vlapic_vtx *vlapic_vtx;
3295	struct pir_desc *pir_desc;
3296	struct LAPIC *lapic;
3297	uint64_t val, pirval;
3298	int rvi, pirbase = -1;
3299	uint16_t intr_status_old, intr_status_new;
3300
3301	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3302	pir_desc = vlapic_vtx->pir_desc;
3303	if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3304		VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3305		    "no posted interrupt pending");
3306		return;
3307	}
3308
3309	pirval = 0;
3310	pirbase = -1;
3311	lapic = vlapic->apic_page;
3312
3313	val = atomic_readandclear_long(&pir_desc->pir[0]);
3314	if (val != 0) {
3315		lapic->irr0 |= val;
3316		lapic->irr1 |= val >> 32;
3317		pirbase = 0;
3318		pirval = val;
3319	}
3320
3321	val = atomic_readandclear_long(&pir_desc->pir[1]);
3322	if (val != 0) {
3323		lapic->irr2 |= val;
3324		lapic->irr3 |= val >> 32;
3325		pirbase = 64;
3326		pirval = val;
3327	}
3328
3329	val = atomic_readandclear_long(&pir_desc->pir[2]);
3330	if (val != 0) {
3331		lapic->irr4 |= val;
3332		lapic->irr5 |= val >> 32;
3333		pirbase = 128;
3334		pirval = val;
3335	}
3336
3337	val = atomic_readandclear_long(&pir_desc->pir[3]);
3338	if (val != 0) {
3339		lapic->irr6 |= val;
3340		lapic->irr7 |= val >> 32;
3341		pirbase = 192;
3342		pirval = val;
3343	}
3344
3345	VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3346
3347	/*
3348	 * Update RVI so the processor can evaluate pending virtual
3349	 * interrupts on VM-entry.
3350	 *
3351	 * It is possible for pirval to be 0 here, even though the
3352	 * pending bit has been set. The scenario is:
3353	 * CPU-Y is sending a posted interrupt to CPU-X, which
3354	 * is running a guest and processing posted interrupts in h/w.
3355	 * CPU-X will eventually exit and the state seen in s/w is
3356	 * the pending bit set, but no PIR bits set.
3357	 *
3358	 *      CPU-X                      CPU-Y
3359	 *   (vm running)                (host running)
3360	 *   rx posted interrupt
3361	 *   CLEAR pending bit
3362	 *				 SET PIR bit
3363	 *   READ/CLEAR PIR bits
3364	 *				 SET pending bit
3365	 *   (vm exit)
3366	 *   pending bit set, PIR 0
3367	 */
3368	if (pirval != 0) {
3369		rvi = pirbase + flsl(pirval) - 1;
3370		intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3371		intr_status_new = (intr_status_old & 0xFF00) | rvi;
3372		if (intr_status_new > intr_status_old) {
3373			vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3374			VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3375			    "guest_intr_status changed from 0x%04x to 0x%04x",
3376			    intr_status_old, intr_status_new);
3377		}
3378	}
3379}
3380
3381static struct vlapic *
3382vmx_vlapic_init(void *arg, int vcpuid)
3383{
3384	struct vmx *vmx;
3385	struct vlapic *vlapic;
3386	struct vlapic_vtx *vlapic_vtx;
3387
3388	vmx = arg;
3389
3390	vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3391	vlapic->vm = vmx->vm;
3392	vlapic->vcpuid = vcpuid;
3393	vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3394
3395	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3396	vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3397	vlapic_vtx->vmx = vmx;
3398
3399	if (virtual_interrupt_delivery) {
3400		vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3401		vlapic->ops.pending_intr = vmx_pending_intr;
3402		vlapic->ops.intr_accepted = vmx_intr_accepted;
3403		vlapic->ops.set_tmr = vmx_set_tmr;
3404		vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3405	}
3406
3407	if (posted_interrupts)
3408		vlapic->ops.post_intr = vmx_post_intr;
3409
3410	vlapic_init(vlapic);
3411
3412	return (vlapic);
3413}
3414
3415static void
3416vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3417{
3418
3419	vlapic_cleanup(vlapic);
3420	free(vlapic, M_VLAPIC);
3421}
3422
3423struct vmm_ops vmm_ops_intel = {
3424	vmx_init,
3425	vmx_cleanup,
3426	vmx_restore,
3427	vmx_vminit,
3428	vmx_run,
3429	vmx_vmcleanup,
3430	vmx_getreg,
3431	vmx_setreg,
3432	vmx_getdesc,
3433	vmx_setdesc,
3434	vmx_getcap,
3435	vmx_setcap,
3436	ept_vmspace_alloc,
3437	ept_vmspace_free,
3438	vmx_vlapic_init,
3439	vmx_vlapic_cleanup,
3440};
3441