vmx.c revision 268777
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 268777 2014-07-16 21:26:26Z neel $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 268777 2014-07-16 21:26:26Z neel $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/smp.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/proc.h>
39#include <sys/sysctl.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43
44#include <machine/psl.h>
45#include <machine/cpufunc.h>
46#include <machine/md_var.h>
47#include <machine/segments.h>
48#include <machine/smp.h>
49#include <machine/specialreg.h>
50#include <machine/vmparam.h>
51
52#include <machine/vmm.h>
53#include <machine/vmm_dev.h>
54#include <machine/vmm_instruction_emul.h>
55#include "vmm_host.h"
56#include "vmm_ioport.h"
57#include "vmm_ipi.h"
58#include "vmm_msr.h"
59#include "vmm_ktr.h"
60#include "vmm_stat.h"
61#include "vatpic.h"
62#include "vlapic.h"
63#include "vlapic_priv.h"
64
65#include "vmx_msr.h"
66#include "ept.h"
67#include "vmx_cpufunc.h"
68#include "vmx.h"
69#include "x86.h"
70#include "vmx_controls.h"
71
72#define	PINBASED_CTLS_ONE_SETTING					\
73	(PINBASED_EXTINT_EXITING	|				\
74	 PINBASED_NMI_EXITING		|				\
75	 PINBASED_VIRTUAL_NMI)
76#define	PINBASED_CTLS_ZERO_SETTING	0
77
78#define PROCBASED_CTLS_WINDOW_SETTING					\
79	(PROCBASED_INT_WINDOW_EXITING	|				\
80	 PROCBASED_NMI_WINDOW_EXITING)
81
82#define	PROCBASED_CTLS_ONE_SETTING 					\
83	(PROCBASED_SECONDARY_CONTROLS	|				\
84	 PROCBASED_IO_EXITING		|				\
85	 PROCBASED_MSR_BITMAPS		|				\
86	 PROCBASED_CTLS_WINDOW_SETTING	|				\
87	 PROCBASED_CR8_LOAD_EXITING	|				\
88	 PROCBASED_CR8_STORE_EXITING)
89#define	PROCBASED_CTLS_ZERO_SETTING	\
90	(PROCBASED_CR3_LOAD_EXITING |	\
91	PROCBASED_CR3_STORE_EXITING |	\
92	PROCBASED_IO_BITMAPS)
93
94#define	PROCBASED_CTLS2_ONE_SETTING	PROCBASED2_ENABLE_EPT
95#define	PROCBASED_CTLS2_ZERO_SETTING	0
96
97#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT					\
98	(VM_EXIT_HOST_LMA			|			\
99	VM_EXIT_SAVE_EFER			|			\
100	VM_EXIT_LOAD_EFER)
101
102#define	VM_EXIT_CTLS_ONE_SETTING					\
103	(VM_EXIT_CTLS_ONE_SETTING_NO_PAT       	|			\
104	VM_EXIT_ACKNOWLEDGE_INTERRUPT		|			\
105	VM_EXIT_SAVE_PAT			|			\
106	VM_EXIT_LOAD_PAT)
107#define	VM_EXIT_CTLS_ZERO_SETTING	VM_EXIT_SAVE_DEBUG_CONTROLS
108
109#define	VM_ENTRY_CTLS_ONE_SETTING_NO_PAT	VM_ENTRY_LOAD_EFER
110
111#define	VM_ENTRY_CTLS_ONE_SETTING					\
112	(VM_ENTRY_CTLS_ONE_SETTING_NO_PAT     	|			\
113	VM_ENTRY_LOAD_PAT)
114#define	VM_ENTRY_CTLS_ZERO_SETTING					\
115	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
116	VM_ENTRY_INTO_SMM			|			\
117	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
118
119#define	guest_msr_rw(vmx, msr) \
120	msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
121
122#define	guest_msr_ro(vmx, msr) \
123    msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ)
124
125#define	HANDLED		1
126#define	UNHANDLED	0
127
128static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
129static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
130
131SYSCTL_DECL(_hw_vmm);
132SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
133
134int vmxon_enabled[MAXCPU];
135static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
136
137static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
138static uint32_t exit_ctls, entry_ctls;
139
140static uint64_t cr0_ones_mask, cr0_zeros_mask;
141SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
142	     &cr0_ones_mask, 0, NULL);
143SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
144	     &cr0_zeros_mask, 0, NULL);
145
146static uint64_t cr4_ones_mask, cr4_zeros_mask;
147SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
148	     &cr4_ones_mask, 0, NULL);
149SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
150	     &cr4_zeros_mask, 0, NULL);
151
152static int vmx_no_patmsr;
153
154static int vmx_initialized;
155SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
156	   &vmx_initialized, 0, "Intel VMX initialized");
157
158/*
159 * Optional capabilities
160 */
161static int cap_halt_exit;
162static int cap_pause_exit;
163static int cap_unrestricted_guest;
164static int cap_monitor_trap;
165static int cap_invpcid;
166
167static int virtual_interrupt_delivery;
168SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
169    &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
170
171static int posted_interrupts;
172SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD,
173    &posted_interrupts, 0, "APICv posted interrupt support");
174
175static int pirvec;
176SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
177    &pirvec, 0, "APICv posted interrupt vector");
178
179static struct unrhdr *vpid_unr;
180static u_int vpid_alloc_failed;
181SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
182	    &vpid_alloc_failed, 0, NULL);
183
184/*
185 * Use the last page below 4GB as the APIC access address. This address is
186 * occupied by the boot firmware so it is guaranteed that it will not conflict
187 * with a page in system memory.
188 */
189#define	APIC_ACCESS_ADDRESS	0xFFFFF000
190
191static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
192static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
193static void vmx_inject_pir(struct vlapic *vlapic);
194
195#ifdef KTR
196static const char *
197exit_reason_to_str(int reason)
198{
199	static char reasonbuf[32];
200
201	switch (reason) {
202	case EXIT_REASON_EXCEPTION:
203		return "exception";
204	case EXIT_REASON_EXT_INTR:
205		return "extint";
206	case EXIT_REASON_TRIPLE_FAULT:
207		return "triplefault";
208	case EXIT_REASON_INIT:
209		return "init";
210	case EXIT_REASON_SIPI:
211		return "sipi";
212	case EXIT_REASON_IO_SMI:
213		return "iosmi";
214	case EXIT_REASON_SMI:
215		return "smi";
216	case EXIT_REASON_INTR_WINDOW:
217		return "intrwindow";
218	case EXIT_REASON_NMI_WINDOW:
219		return "nmiwindow";
220	case EXIT_REASON_TASK_SWITCH:
221		return "taskswitch";
222	case EXIT_REASON_CPUID:
223		return "cpuid";
224	case EXIT_REASON_GETSEC:
225		return "getsec";
226	case EXIT_REASON_HLT:
227		return "hlt";
228	case EXIT_REASON_INVD:
229		return "invd";
230	case EXIT_REASON_INVLPG:
231		return "invlpg";
232	case EXIT_REASON_RDPMC:
233		return "rdpmc";
234	case EXIT_REASON_RDTSC:
235		return "rdtsc";
236	case EXIT_REASON_RSM:
237		return "rsm";
238	case EXIT_REASON_VMCALL:
239		return "vmcall";
240	case EXIT_REASON_VMCLEAR:
241		return "vmclear";
242	case EXIT_REASON_VMLAUNCH:
243		return "vmlaunch";
244	case EXIT_REASON_VMPTRLD:
245		return "vmptrld";
246	case EXIT_REASON_VMPTRST:
247		return "vmptrst";
248	case EXIT_REASON_VMREAD:
249		return "vmread";
250	case EXIT_REASON_VMRESUME:
251		return "vmresume";
252	case EXIT_REASON_VMWRITE:
253		return "vmwrite";
254	case EXIT_REASON_VMXOFF:
255		return "vmxoff";
256	case EXIT_REASON_VMXON:
257		return "vmxon";
258	case EXIT_REASON_CR_ACCESS:
259		return "craccess";
260	case EXIT_REASON_DR_ACCESS:
261		return "draccess";
262	case EXIT_REASON_INOUT:
263		return "inout";
264	case EXIT_REASON_RDMSR:
265		return "rdmsr";
266	case EXIT_REASON_WRMSR:
267		return "wrmsr";
268	case EXIT_REASON_INVAL_VMCS:
269		return "invalvmcs";
270	case EXIT_REASON_INVAL_MSR:
271		return "invalmsr";
272	case EXIT_REASON_MWAIT:
273		return "mwait";
274	case EXIT_REASON_MTF:
275		return "mtf";
276	case EXIT_REASON_MONITOR:
277		return "monitor";
278	case EXIT_REASON_PAUSE:
279		return "pause";
280	case EXIT_REASON_MCE:
281		return "mce";
282	case EXIT_REASON_TPR:
283		return "tpr";
284	case EXIT_REASON_APIC_ACCESS:
285		return "apic-access";
286	case EXIT_REASON_GDTR_IDTR:
287		return "gdtridtr";
288	case EXIT_REASON_LDTR_TR:
289		return "ldtrtr";
290	case EXIT_REASON_EPT_FAULT:
291		return "eptfault";
292	case EXIT_REASON_EPT_MISCONFIG:
293		return "eptmisconfig";
294	case EXIT_REASON_INVEPT:
295		return "invept";
296	case EXIT_REASON_RDTSCP:
297		return "rdtscp";
298	case EXIT_REASON_VMX_PREEMPT:
299		return "vmxpreempt";
300	case EXIT_REASON_INVVPID:
301		return "invvpid";
302	case EXIT_REASON_WBINVD:
303		return "wbinvd";
304	case EXIT_REASON_XSETBV:
305		return "xsetbv";
306	case EXIT_REASON_APIC_WRITE:
307		return "apic-write";
308	default:
309		snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
310		return (reasonbuf);
311	}
312}
313#endif	/* KTR */
314
315static int
316vmx_allow_x2apic_msrs(struct vmx *vmx)
317{
318	int i, error;
319
320	error = 0;
321
322	/*
323	 * Allow readonly access to the following x2APIC MSRs from the guest.
324	 */
325	error += guest_msr_ro(vmx, MSR_APIC_ID);
326	error += guest_msr_ro(vmx, MSR_APIC_VERSION);
327	error += guest_msr_ro(vmx, MSR_APIC_LDR);
328	error += guest_msr_ro(vmx, MSR_APIC_SVR);
329
330	for (i = 0; i < 8; i++)
331		error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
332
333	for (i = 0; i < 8; i++)
334		error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
335
336	for (i = 0; i < 8; i++)
337		error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
338
339	error += guest_msr_ro(vmx, MSR_APIC_ESR);
340	error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
341	error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
342	error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
343	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
344	error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
345	error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
346	error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
347	error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
348	error += guest_msr_ro(vmx, MSR_APIC_ICR);
349
350	/*
351	 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
352	 *
353	 * These registers get special treatment described in the section
354	 * "Virtualizing MSR-Based APIC Accesses".
355	 */
356	error += guest_msr_rw(vmx, MSR_APIC_TPR);
357	error += guest_msr_rw(vmx, MSR_APIC_EOI);
358	error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
359
360	return (error);
361}
362
363u_long
364vmx_fix_cr0(u_long cr0)
365{
366
367	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
368}
369
370u_long
371vmx_fix_cr4(u_long cr4)
372{
373
374	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
375}
376
377static void
378vpid_free(int vpid)
379{
380	if (vpid < 0 || vpid > 0xffff)
381		panic("vpid_free: invalid vpid %d", vpid);
382
383	/*
384	 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
385	 * the unit number allocator.
386	 */
387
388	if (vpid > VM_MAXCPU)
389		free_unr(vpid_unr, vpid);
390}
391
392static void
393vpid_alloc(uint16_t *vpid, int num)
394{
395	int i, x;
396
397	if (num <= 0 || num > VM_MAXCPU)
398		panic("invalid number of vpids requested: %d", num);
399
400	/*
401	 * If the "enable vpid" execution control is not enabled then the
402	 * VPID is required to be 0 for all vcpus.
403	 */
404	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
405		for (i = 0; i < num; i++)
406			vpid[i] = 0;
407		return;
408	}
409
410	/*
411	 * Allocate a unique VPID for each vcpu from the unit number allocator.
412	 */
413	for (i = 0; i < num; i++) {
414		x = alloc_unr(vpid_unr);
415		if (x == -1)
416			break;
417		else
418			vpid[i] = x;
419	}
420
421	if (i < num) {
422		atomic_add_int(&vpid_alloc_failed, 1);
423
424		/*
425		 * If the unit number allocator does not have enough unique
426		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
427		 *
428		 * These VPIDs are not be unique across VMs but this does not
429		 * affect correctness because the combined mappings are also
430		 * tagged with the EP4TA which is unique for each VM.
431		 *
432		 * It is still sub-optimal because the invvpid will invalidate
433		 * combined mappings for a particular VPID across all EP4TAs.
434		 */
435		while (i-- > 0)
436			vpid_free(vpid[i]);
437
438		for (i = 0; i < num; i++)
439			vpid[i] = i + 1;
440	}
441}
442
443static void
444vpid_init(void)
445{
446	/*
447	 * VPID 0 is required when the "enable VPID" execution control is
448	 * disabled.
449	 *
450	 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
451	 * unit number allocator does not have sufficient unique VPIDs to
452	 * satisfy the allocation.
453	 *
454	 * The remaining VPIDs are managed by the unit number allocator.
455	 */
456	vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
457}
458
459static void
460msr_save_area_init(struct msr_entry *g_area, int *g_count)
461{
462	int cnt;
463
464	static struct msr_entry guest_msrs[] = {
465		{ MSR_KGSBASE, 0, 0 },
466	};
467
468	cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
469	if (cnt > GUEST_MSR_MAX_ENTRIES)
470		panic("guest msr save area overrun");
471	bcopy(guest_msrs, g_area, sizeof(guest_msrs));
472	*g_count = cnt;
473}
474
475static void
476vmx_disable(void *arg __unused)
477{
478	struct invvpid_desc invvpid_desc = { 0 };
479	struct invept_desc invept_desc = { 0 };
480
481	if (vmxon_enabled[curcpu]) {
482		/*
483		 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
484		 *
485		 * VMXON or VMXOFF are not required to invalidate any TLB
486		 * caching structures. This prevents potential retention of
487		 * cached information in the TLB between distinct VMX episodes.
488		 */
489		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
490		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
491		vmxoff();
492	}
493	load_cr4(rcr4() & ~CR4_VMXE);
494}
495
496static int
497vmx_cleanup(void)
498{
499
500	if (pirvec != 0)
501		vmm_ipi_free(pirvec);
502
503	if (vpid_unr != NULL) {
504		delete_unrhdr(vpid_unr);
505		vpid_unr = NULL;
506	}
507
508	smp_rendezvous(NULL, vmx_disable, NULL, NULL);
509
510	return (0);
511}
512
513static void
514vmx_enable(void *arg __unused)
515{
516	int error;
517	uint64_t feature_control;
518
519	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
520	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
521	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
522		wrmsr(MSR_IA32_FEATURE_CONTROL,
523		    feature_control | IA32_FEATURE_CONTROL_VMX_EN |
524		    IA32_FEATURE_CONTROL_LOCK);
525	}
526
527	load_cr4(rcr4() | CR4_VMXE);
528
529	*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
530	error = vmxon(vmxon_region[curcpu]);
531	if (error == 0)
532		vmxon_enabled[curcpu] = 1;
533}
534
535static void
536vmx_restore(void)
537{
538
539	if (vmxon_enabled[curcpu])
540		vmxon(vmxon_region[curcpu]);
541}
542
543static int
544vmx_init(int ipinum)
545{
546	int error, use_tpr_shadow;
547	uint64_t basic, fixed0, fixed1, feature_control;
548	uint32_t tmp, procbased2_vid_bits;
549
550	/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
551	if (!(cpu_feature2 & CPUID2_VMX)) {
552		printf("vmx_init: processor does not support VMX operation\n");
553		return (ENXIO);
554	}
555
556	/*
557	 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
558	 * are set (bits 0 and 2 respectively).
559	 */
560	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
561	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
562	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
563		printf("vmx_init: VMX operation disabled by BIOS\n");
564		return (ENXIO);
565	}
566
567	/*
568	 * Verify capabilities MSR_VMX_BASIC:
569	 * - bit 54 indicates support for INS/OUTS decoding
570	 */
571	basic = rdmsr(MSR_VMX_BASIC);
572	if ((basic & (1UL << 54)) == 0) {
573		printf("vmx_init: processor does not support desired basic "
574		    "capabilities\n");
575		return (EINVAL);
576	}
577
578	/* Check support for primary processor-based VM-execution controls */
579	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
580			       MSR_VMX_TRUE_PROCBASED_CTLS,
581			       PROCBASED_CTLS_ONE_SETTING,
582			       PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
583	if (error) {
584		printf("vmx_init: processor does not support desired primary "
585		       "processor-based controls\n");
586		return (error);
587	}
588
589	/* Clear the processor-based ctl bits that are set on demand */
590	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
591
592	/* Check support for secondary processor-based VM-execution controls */
593	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
594			       MSR_VMX_PROCBASED_CTLS2,
595			       PROCBASED_CTLS2_ONE_SETTING,
596			       PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
597	if (error) {
598		printf("vmx_init: processor does not support desired secondary "
599		       "processor-based controls\n");
600		return (error);
601	}
602
603	/* Check support for VPID */
604	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
605			       PROCBASED2_ENABLE_VPID, 0, &tmp);
606	if (error == 0)
607		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
608
609	/* Check support for pin-based VM-execution controls */
610	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
611			       MSR_VMX_TRUE_PINBASED_CTLS,
612			       PINBASED_CTLS_ONE_SETTING,
613			       PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
614	if (error) {
615		printf("vmx_init: processor does not support desired "
616		       "pin-based controls\n");
617		return (error);
618	}
619
620	/* Check support for VM-exit controls */
621	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
622			       VM_EXIT_CTLS_ONE_SETTING,
623			       VM_EXIT_CTLS_ZERO_SETTING,
624			       &exit_ctls);
625	if (error) {
626		/* Try again without the PAT MSR bits */
627		error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS,
628				       MSR_VMX_TRUE_EXIT_CTLS,
629				       VM_EXIT_CTLS_ONE_SETTING_NO_PAT,
630				       VM_EXIT_CTLS_ZERO_SETTING,
631				       &exit_ctls);
632		if (error) {
633			printf("vmx_init: processor does not support desired "
634			       "exit controls\n");
635			return (error);
636		} else {
637			if (bootverbose)
638				printf("vmm: PAT MSR access not supported\n");
639			guest_msr_valid(MSR_PAT);
640			vmx_no_patmsr = 1;
641		}
642	}
643
644	/* Check support for VM-entry controls */
645	if (!vmx_no_patmsr) {
646		error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
647				       MSR_VMX_TRUE_ENTRY_CTLS,
648				       VM_ENTRY_CTLS_ONE_SETTING,
649				       VM_ENTRY_CTLS_ZERO_SETTING,
650				       &entry_ctls);
651	} else {
652		error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
653				       MSR_VMX_TRUE_ENTRY_CTLS,
654				       VM_ENTRY_CTLS_ONE_SETTING_NO_PAT,
655				       VM_ENTRY_CTLS_ZERO_SETTING,
656				       &entry_ctls);
657	}
658
659	if (error) {
660		printf("vmx_init: processor does not support desired "
661		       "entry controls\n");
662		       return (error);
663	}
664
665	/*
666	 * Check support for optional features by testing them
667	 * as individual bits
668	 */
669	cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
670					MSR_VMX_TRUE_PROCBASED_CTLS,
671					PROCBASED_HLT_EXITING, 0,
672					&tmp) == 0);
673
674	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
675					MSR_VMX_PROCBASED_CTLS,
676					PROCBASED_MTF, 0,
677					&tmp) == 0);
678
679	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
680					 MSR_VMX_TRUE_PROCBASED_CTLS,
681					 PROCBASED_PAUSE_EXITING, 0,
682					 &tmp) == 0);
683
684	cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
685					MSR_VMX_PROCBASED_CTLS2,
686					PROCBASED2_UNRESTRICTED_GUEST, 0,
687				        &tmp) == 0);
688
689	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
690	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
691	    &tmp) == 0);
692
693	/*
694	 * Check support for virtual interrupt delivery.
695	 */
696	procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
697	    PROCBASED2_VIRTUALIZE_X2APIC_MODE |
698	    PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
699	    PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
700
701	use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
702	    MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
703	    &tmp) == 0);
704
705	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
706	    procbased2_vid_bits, 0, &tmp);
707	if (error == 0 && use_tpr_shadow) {
708		virtual_interrupt_delivery = 1;
709		TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
710		    &virtual_interrupt_delivery);
711	}
712
713	if (virtual_interrupt_delivery) {
714		procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
715		procbased_ctls2 |= procbased2_vid_bits;
716		procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
717
718		/*
719		 * No need to emulate accesses to %CR8 if virtual
720		 * interrupt delivery is enabled.
721		 */
722		procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
723		procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
724
725		/*
726		 * Check for Posted Interrupts only if Virtual Interrupt
727		 * Delivery is enabled.
728		 */
729		error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
730		    MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
731		    &tmp);
732		if (error == 0) {
733			pirvec = vmm_ipi_alloc();
734			if (pirvec == 0) {
735				if (bootverbose) {
736					printf("vmx_init: unable to allocate "
737					    "posted interrupt vector\n");
738				}
739			} else {
740				posted_interrupts = 1;
741				TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
742				    &posted_interrupts);
743			}
744		}
745	}
746
747	if (posted_interrupts)
748		    pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
749
750	/* Initialize EPT */
751	error = ept_init(ipinum);
752	if (error) {
753		printf("vmx_init: ept initialization failed (%d)\n", error);
754		return (error);
755	}
756
757	/*
758	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
759	 */
760	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
761	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
762	cr0_ones_mask = fixed0 & fixed1;
763	cr0_zeros_mask = ~fixed0 & ~fixed1;
764
765	/*
766	 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
767	 * if unrestricted guest execution is allowed.
768	 */
769	if (cap_unrestricted_guest)
770		cr0_ones_mask &= ~(CR0_PG | CR0_PE);
771
772	/*
773	 * Do not allow the guest to set CR0_NW or CR0_CD.
774	 */
775	cr0_zeros_mask |= (CR0_NW | CR0_CD);
776
777	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
778	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
779	cr4_ones_mask = fixed0 & fixed1;
780	cr4_zeros_mask = ~fixed0 & ~fixed1;
781
782	vpid_init();
783
784	/* enable VMX operation */
785	smp_rendezvous(NULL, vmx_enable, NULL, NULL);
786
787	vmx_initialized = 1;
788
789	return (0);
790}
791
792static void
793vmx_trigger_hostintr(int vector)
794{
795	uintptr_t func;
796	struct gate_descriptor *gd;
797
798	gd = &idt[vector];
799
800	KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
801	    "invalid vector %d", vector));
802	KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
803	    vector));
804	KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
805	    "has invalid type %d", vector, gd->gd_type));
806	KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
807	    "has invalid dpl %d", vector, gd->gd_dpl));
808	KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
809	    "for vector %d has invalid selector %d", vector, gd->gd_selector));
810	KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
811	    "IST %d", vector, gd->gd_ist));
812
813	func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
814	vmx_call_isr(func);
815}
816
817static int
818vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
819{
820	int error, mask_ident, shadow_ident;
821	uint64_t mask_value;
822
823	if (which != 0 && which != 4)
824		panic("vmx_setup_cr_shadow: unknown cr%d", which);
825
826	if (which == 0) {
827		mask_ident = VMCS_CR0_MASK;
828		mask_value = cr0_ones_mask | cr0_zeros_mask;
829		shadow_ident = VMCS_CR0_SHADOW;
830	} else {
831		mask_ident = VMCS_CR4_MASK;
832		mask_value = cr4_ones_mask | cr4_zeros_mask;
833		shadow_ident = VMCS_CR4_SHADOW;
834	}
835
836	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
837	if (error)
838		return (error);
839
840	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
841	if (error)
842		return (error);
843
844	return (0);
845}
846#define	vmx_setup_cr0_shadow(vmcs,init)	vmx_setup_cr_shadow(0, (vmcs), (init))
847#define	vmx_setup_cr4_shadow(vmcs,init)	vmx_setup_cr_shadow(4, (vmcs), (init))
848
849static void *
850vmx_vminit(struct vm *vm, pmap_t pmap)
851{
852	uint16_t vpid[VM_MAXCPU];
853	int i, error, guest_msr_count;
854	struct vmx *vmx;
855	struct vmcs *vmcs;
856
857	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
858	if ((uintptr_t)vmx & PAGE_MASK) {
859		panic("malloc of struct vmx not aligned on %d byte boundary",
860		      PAGE_SIZE);
861	}
862	vmx->vm = vm;
863
864	vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
865
866	/*
867	 * Clean up EPTP-tagged guest physical and combined mappings
868	 *
869	 * VMX transitions are not required to invalidate any guest physical
870	 * mappings. So, it may be possible for stale guest physical mappings
871	 * to be present in the processor TLBs.
872	 *
873	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
874	 */
875	ept_invalidate_mappings(vmx->eptp);
876
877	msr_bitmap_initialize(vmx->msr_bitmap);
878
879	/*
880	 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
881	 * The guest FSBASE and GSBASE are saved and restored during
882	 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
883	 * always restored from the vmcs host state area on vm-exit.
884	 *
885	 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
886	 * how they are saved/restored so can be directly accessed by the
887	 * guest.
888	 *
889	 * Guest KGSBASE is saved and restored in the guest MSR save area.
890	 * Host KGSBASE is restored before returning to userland from the pcb.
891	 * There will be a window of time when we are executing in the host
892	 * kernel context with a value of KGSBASE from the guest. This is ok
893	 * because the value of KGSBASE is inconsequential in kernel context.
894	 *
895	 * MSR_EFER is saved and restored in the guest VMCS area on a
896	 * VM exit and entry respectively. It is also restored from the
897	 * host VMCS area on a VM exit.
898	 *
899	 * The TSC MSR is exposed read-only. Writes are disallowed as that
900	 * will impact the host TSC.
901	 * XXX Writes would be implemented with a wrmsr trap, and
902	 * then modifying the TSC offset in the VMCS.
903	 */
904	if (guest_msr_rw(vmx, MSR_GSBASE) ||
905	    guest_msr_rw(vmx, MSR_FSBASE) ||
906	    guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
907	    guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
908	    guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
909	    guest_msr_rw(vmx, MSR_KGSBASE) ||
910	    guest_msr_rw(vmx, MSR_EFER) ||
911	    guest_msr_ro(vmx, MSR_TSC))
912		panic("vmx_vminit: error setting guest msr access");
913
914	/*
915	 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
916	 * and entry respectively. It is also restored from the host VMCS
917	 * area on a VM exit. However, if running on a system with no
918	 * MSR_PAT save/restore support, leave access disabled so accesses
919	 * will be trapped.
920	 */
921	if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
922		panic("vmx_vminit: error setting guest pat msr access");
923
924	vpid_alloc(vpid, VM_MAXCPU);
925
926	if (virtual_interrupt_delivery) {
927		error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
928		    APIC_ACCESS_ADDRESS);
929		/* XXX this should really return an error to the caller */
930		KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
931	}
932
933	for (i = 0; i < VM_MAXCPU; i++) {
934		vmcs = &vmx->vmcs[i];
935		vmcs->identifier = vmx_revision();
936		error = vmclear(vmcs);
937		if (error != 0) {
938			panic("vmx_vminit: vmclear error %d on vcpu %d\n",
939			      error, i);
940		}
941
942		error = vmcs_init(vmcs);
943		KASSERT(error == 0, ("vmcs_init error %d", error));
944
945		VMPTRLD(vmcs);
946		error = 0;
947		error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
948		error += vmwrite(VMCS_EPTP, vmx->eptp);
949		error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
950		error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
951		error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
952		error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
953		error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
954		error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
955		error += vmwrite(VMCS_VPID, vpid[i]);
956		if (virtual_interrupt_delivery) {
957			error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
958			error += vmwrite(VMCS_VIRTUAL_APIC,
959			    vtophys(&vmx->apic_page[i]));
960			error += vmwrite(VMCS_EOI_EXIT0, 0);
961			error += vmwrite(VMCS_EOI_EXIT1, 0);
962			error += vmwrite(VMCS_EOI_EXIT2, 0);
963			error += vmwrite(VMCS_EOI_EXIT3, 0);
964		}
965		if (posted_interrupts) {
966			error += vmwrite(VMCS_PIR_VECTOR, pirvec);
967			error += vmwrite(VMCS_PIR_DESC,
968			    vtophys(&vmx->pir_desc[i]));
969		}
970		VMCLEAR(vmcs);
971		KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
972
973		vmx->cap[i].set = 0;
974		vmx->cap[i].proc_ctls = procbased_ctls;
975		vmx->cap[i].proc_ctls2 = procbased_ctls2;
976
977		vmx->state[i].lastcpu = NOCPU;
978		vmx->state[i].vpid = vpid[i];
979
980		msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
981
982		error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]),
983		    guest_msr_count);
984		if (error != 0)
985			panic("vmcs_set_msr_save error %d", error);
986
987		/*
988		 * Set up the CR0/4 shadows, and init the read shadow
989		 * to the power-on register value from the Intel Sys Arch.
990		 *  CR0 - 0x60000010
991		 *  CR4 - 0
992		 */
993		error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
994		if (error != 0)
995			panic("vmx_setup_cr0_shadow %d", error);
996
997		error = vmx_setup_cr4_shadow(vmcs, 0);
998		if (error != 0)
999			panic("vmx_setup_cr4_shadow %d", error);
1000
1001		vmx->ctx[i].pmap = pmap;
1002	}
1003
1004	return (vmx);
1005}
1006
1007static int
1008vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
1009{
1010	int handled, func;
1011
1012	func = vmxctx->guest_rax;
1013
1014	handled = x86_emulate_cpuid(vm, vcpu,
1015				    (uint32_t*)(&vmxctx->guest_rax),
1016				    (uint32_t*)(&vmxctx->guest_rbx),
1017				    (uint32_t*)(&vmxctx->guest_rcx),
1018				    (uint32_t*)(&vmxctx->guest_rdx));
1019	return (handled);
1020}
1021
1022static __inline void
1023vmx_run_trace(struct vmx *vmx, int vcpu)
1024{
1025#ifdef KTR
1026	VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
1027#endif
1028}
1029
1030static __inline void
1031vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
1032	       int handled)
1033{
1034#ifdef KTR
1035	VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
1036		 handled ? "handled" : "unhandled",
1037		 exit_reason_to_str(exit_reason), rip);
1038#endif
1039}
1040
1041static __inline void
1042vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1043{
1044#ifdef KTR
1045	VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1046#endif
1047}
1048
1049static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1050static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1051
1052/*
1053 * Invalidate guest mappings identified by its vpid from the TLB.
1054 */
1055static __inline void
1056vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1057{
1058	struct vmxstate *vmxstate;
1059	struct invvpid_desc invvpid_desc;
1060
1061	vmxstate = &vmx->state[vcpu];
1062	if (vmxstate->vpid == 0)
1063		return;
1064
1065	if (!running) {
1066		/*
1067		 * Set the 'lastcpu' to an invalid host cpu.
1068		 *
1069		 * This will invalidate TLB entries tagged with the vcpu's
1070		 * vpid the next time it runs via vmx_set_pcpu_defaults().
1071		 */
1072		vmxstate->lastcpu = NOCPU;
1073		return;
1074	}
1075
1076	KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1077	    "critical section", __func__, vcpu));
1078
1079	/*
1080	 * Invalidate all mappings tagged with 'vpid'
1081	 *
1082	 * We do this because this vcpu was executing on a different host
1083	 * cpu when it last ran. We do not track whether it invalidated
1084	 * mappings associated with its 'vpid' during that run. So we must
1085	 * assume that the mappings associated with 'vpid' on 'curcpu' are
1086	 * stale and invalidate them.
1087	 *
1088	 * Note that we incur this penalty only when the scheduler chooses to
1089	 * move the thread associated with this vcpu between host cpus.
1090	 *
1091	 * Note also that this will invalidate mappings tagged with 'vpid'
1092	 * for "all" EP4TAs.
1093	 */
1094	if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1095		invvpid_desc._res1 = 0;
1096		invvpid_desc._res2 = 0;
1097		invvpid_desc.vpid = vmxstate->vpid;
1098		invvpid_desc.linear_addr = 0;
1099		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1100		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1101	} else {
1102		/*
1103		 * The invvpid can be skipped if an invept is going to
1104		 * be performed before entering the guest. The invept
1105		 * will invalidate combined mappings tagged with
1106		 * 'vmx->eptp' for all vpids.
1107		 */
1108		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1109	}
1110}
1111
1112static void
1113vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1114{
1115	struct vmxstate *vmxstate;
1116
1117	vmxstate = &vmx->state[vcpu];
1118	if (vmxstate->lastcpu == curcpu)
1119		return;
1120
1121	vmxstate->lastcpu = curcpu;
1122
1123	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1124
1125	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1126	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1127	vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1128	vmx_invvpid(vmx, vcpu, pmap, 1);
1129}
1130
1131/*
1132 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1133 */
1134CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1135
1136static void __inline
1137vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1138{
1139
1140	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1141		vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1142		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1143		VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1144	}
1145}
1146
1147static void __inline
1148vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1149{
1150
1151	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1152	    ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1153	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1154	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1155	VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1156}
1157
1158static void __inline
1159vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1160{
1161
1162	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1163		vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1164		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1165		VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1166	}
1167}
1168
1169static void __inline
1170vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1171{
1172
1173	KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1174	    ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1175	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1176	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1177	VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1178}
1179
1180#define	NMI_BLOCKING	(VMCS_INTERRUPTIBILITY_NMI_BLOCKING |		\
1181			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1182#define	HWINTR_BLOCKING	(VMCS_INTERRUPTIBILITY_STI_BLOCKING |		\
1183			 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1184
1185static void
1186vmx_inject_nmi(struct vmx *vmx, int vcpu)
1187{
1188	uint32_t gi, info;
1189
1190	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1191	KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1192	    "interruptibility-state %#x", gi));
1193
1194	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1195	KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1196	    "VM-entry interruption information %#x", info));
1197
1198	/*
1199	 * Inject the virtual NMI. The vector must be the NMI IDT entry
1200	 * or the VMCS entry check will fail.
1201	 */
1202	info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1203	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1204
1205	VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1206
1207	/* Clear the request */
1208	vm_nmi_clear(vmx->vm, vcpu);
1209}
1210
1211static void
1212vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1213{
1214	struct vm_exception exc;
1215	int vector, need_nmi_exiting, extint_pending;
1216	uint64_t rflags;
1217	uint32_t gi, info;
1218
1219	if (vm_exception_pending(vmx->vm, vcpu, &exc)) {
1220		KASSERT(exc.vector >= 0 && exc.vector < 32,
1221		    ("%s: invalid exception vector %d", __func__, exc.vector));
1222
1223		info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1224		KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1225		     "pending exception %d: %#x", __func__, exc.vector, info));
1226
1227		info = exc.vector | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID;
1228		if (exc.error_code_valid) {
1229			info |= VMCS_INTR_DEL_ERRCODE;
1230			vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, exc.error_code);
1231		}
1232		vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1233	}
1234
1235	if (vm_nmi_pending(vmx->vm, vcpu)) {
1236		/*
1237		 * If there are no conditions blocking NMI injection then
1238		 * inject it directly here otherwise enable "NMI window
1239		 * exiting" to inject it as soon as we can.
1240		 *
1241		 * We also check for STI_BLOCKING because some implementations
1242		 * don't allow NMI injection in this case. If we are running
1243		 * on a processor that doesn't have this restriction it will
1244		 * immediately exit and the NMI will be injected in the
1245		 * "NMI window exiting" handler.
1246		 */
1247		need_nmi_exiting = 1;
1248		gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1249		if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1250			info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1251			if ((info & VMCS_INTR_VALID) == 0) {
1252				vmx_inject_nmi(vmx, vcpu);
1253				need_nmi_exiting = 0;
1254			} else {
1255				VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1256				    "due to VM-entry intr info %#x", info);
1257			}
1258		} else {
1259			VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1260			    "Guest Interruptibility-state %#x", gi);
1261		}
1262
1263		if (need_nmi_exiting)
1264			vmx_set_nmi_window_exiting(vmx, vcpu);
1265	}
1266
1267	extint_pending = vm_extint_pending(vmx->vm, vcpu);
1268
1269	if (!extint_pending && virtual_interrupt_delivery) {
1270		vmx_inject_pir(vlapic);
1271		return;
1272	}
1273
1274	/*
1275	 * If interrupt-window exiting is already in effect then don't bother
1276	 * checking for pending interrupts. This is just an optimization and
1277	 * not needed for correctness.
1278	 */
1279	if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1280		VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1281		    "pending int_window_exiting");
1282		return;
1283	}
1284
1285	if (!extint_pending) {
1286		/* Ask the local apic for a vector to inject */
1287		if (!vlapic_pending_intr(vlapic, &vector))
1288			return;
1289
1290		/*
1291		 * From the Intel SDM, Volume 3, Section "Maskable
1292		 * Hardware Interrupts":
1293		 * - maskable interrupt vectors [16,255] can be delivered
1294		 *   through the local APIC.
1295		*/
1296		KASSERT(vector >= 16 && vector <= 255,
1297		    ("invalid vector %d from local APIC", vector));
1298	} else {
1299		/* Ask the legacy pic for a vector to inject */
1300		vatpic_pending_intr(vmx->vm, &vector);
1301
1302		/*
1303		 * From the Intel SDM, Volume 3, Section "Maskable
1304		 * Hardware Interrupts":
1305		 * - maskable interrupt vectors [0,255] can be delivered
1306		 *   through the INTR pin.
1307		 */
1308		KASSERT(vector >= 0 && vector <= 255,
1309		    ("invalid vector %d from INTR", vector));
1310	}
1311
1312	/* Check RFLAGS.IF and the interruptibility state of the guest */
1313	rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1314	if ((rflags & PSL_I) == 0) {
1315		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1316		    "rflags %#lx", vector, rflags);
1317		goto cantinject;
1318	}
1319
1320	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1321	if (gi & HWINTR_BLOCKING) {
1322		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1323		    "Guest Interruptibility-state %#x", vector, gi);
1324		goto cantinject;
1325	}
1326
1327	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1328	if (info & VMCS_INTR_VALID) {
1329		/*
1330		 * This is expected and could happen for multiple reasons:
1331		 * - A vectoring VM-entry was aborted due to astpending
1332		 * - A VM-exit happened during event injection.
1333		 * - An exception was injected above.
1334		 * - An NMI was injected above or after "NMI window exiting"
1335		 */
1336		VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1337		    "VM-entry intr info %#x", vector, info);
1338		goto cantinject;
1339	}
1340
1341	/* Inject the interrupt */
1342	info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1343	info |= vector;
1344	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1345
1346	if (!extint_pending) {
1347		/* Update the Local APIC ISR */
1348		vlapic_intr_accepted(vlapic, vector);
1349	} else {
1350		vm_extint_clear(vmx->vm, vcpu);
1351		vatpic_intr_accepted(vmx->vm, vector);
1352
1353		/*
1354		 * After we accepted the current ExtINT the PIC may
1355		 * have posted another one.  If that is the case, set
1356		 * the Interrupt Window Exiting execution control so
1357		 * we can inject that one too.
1358		 *
1359		 * Also, interrupt window exiting allows us to inject any
1360		 * pending APIC vector that was preempted by the ExtINT
1361		 * as soon as possible. This applies both for the software
1362		 * emulated vlapic and the hardware assisted virtual APIC.
1363		 */
1364		vmx_set_int_window_exiting(vmx, vcpu);
1365	}
1366
1367	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1368
1369	return;
1370
1371cantinject:
1372	/*
1373	 * Set the Interrupt Window Exiting execution control so we can inject
1374	 * the interrupt as soon as blocking condition goes away.
1375	 */
1376	vmx_set_int_window_exiting(vmx, vcpu);
1377}
1378
1379/*
1380 * If the Virtual NMIs execution control is '1' then the logical processor
1381 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1382 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1383 * virtual-NMI blocking.
1384 *
1385 * This unblocking occurs even if the IRET causes a fault. In this case the
1386 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1387 */
1388static void
1389vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1390{
1391	uint32_t gi;
1392
1393	VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1394	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1395	gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1396	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1397}
1398
1399static void
1400vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1401{
1402	uint32_t gi;
1403
1404	VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1405	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1406	gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1407	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1408}
1409
1410static int
1411vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1412{
1413	struct vmxctx *vmxctx;
1414	uint64_t xcrval;
1415	const struct xsave_limits *limits;
1416
1417	vmxctx = &vmx->ctx[vcpu];
1418	limits = vmm_get_xsave_limits();
1419
1420	/*
1421	 * Note that the processor raises a GP# fault on its own if
1422	 * xsetbv is executed for CPL != 0, so we do not have to
1423	 * emulate that fault here.
1424	 */
1425
1426	/* Only xcr0 is supported. */
1427	if (vmxctx->guest_rcx != 0) {
1428		vm_inject_gp(vmx->vm, vcpu);
1429		return (HANDLED);
1430	}
1431
1432	/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1433	if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1434		vm_inject_ud(vmx->vm, vcpu);
1435		return (HANDLED);
1436	}
1437
1438	xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1439	if ((xcrval & ~limits->xcr0_allowed) != 0) {
1440		vm_inject_gp(vmx->vm, vcpu);
1441		return (HANDLED);
1442	}
1443
1444	if (!(xcrval & XFEATURE_ENABLED_X87)) {
1445		vm_inject_gp(vmx->vm, vcpu);
1446		return (HANDLED);
1447	}
1448
1449	/* AVX (YMM_Hi128) requires SSE. */
1450	if (xcrval & XFEATURE_ENABLED_AVX &&
1451	    (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1452		vm_inject_gp(vmx->vm, vcpu);
1453		return (HANDLED);
1454	}
1455
1456	/*
1457	 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1458	 * ZMM_Hi256, and Hi16_ZMM.
1459	 */
1460	if (xcrval & XFEATURE_AVX512 &&
1461	    (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1462	    (XFEATURE_AVX512 | XFEATURE_AVX)) {
1463		vm_inject_gp(vmx->vm, vcpu);
1464		return (HANDLED);
1465	}
1466
1467	/*
1468	 * Intel MPX requires both bound register state flags to be
1469	 * set.
1470	 */
1471	if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1472	    ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1473		vm_inject_gp(vmx->vm, vcpu);
1474		return (HANDLED);
1475	}
1476
1477	/*
1478	 * This runs "inside" vmrun() with the guest's FPU state, so
1479	 * modifying xcr0 directly modifies the guest's xcr0, not the
1480	 * host's.
1481	 */
1482	load_xcr(0, xcrval);
1483	return (HANDLED);
1484}
1485
1486static uint64_t
1487vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1488{
1489	const struct vmxctx *vmxctx;
1490
1491	vmxctx = &vmx->ctx[vcpu];
1492
1493	switch (ident) {
1494	case 0:
1495		return (vmxctx->guest_rax);
1496	case 1:
1497		return (vmxctx->guest_rcx);
1498	case 2:
1499		return (vmxctx->guest_rdx);
1500	case 3:
1501		return (vmxctx->guest_rbx);
1502	case 4:
1503		return (vmcs_read(VMCS_GUEST_RSP));
1504	case 5:
1505		return (vmxctx->guest_rbp);
1506	case 6:
1507		return (vmxctx->guest_rsi);
1508	case 7:
1509		return (vmxctx->guest_rdi);
1510	case 8:
1511		return (vmxctx->guest_r8);
1512	case 9:
1513		return (vmxctx->guest_r9);
1514	case 10:
1515		return (vmxctx->guest_r10);
1516	case 11:
1517		return (vmxctx->guest_r11);
1518	case 12:
1519		return (vmxctx->guest_r12);
1520	case 13:
1521		return (vmxctx->guest_r13);
1522	case 14:
1523		return (vmxctx->guest_r14);
1524	case 15:
1525		return (vmxctx->guest_r15);
1526	default:
1527		panic("invalid vmx register %d", ident);
1528	}
1529}
1530
1531static void
1532vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1533{
1534	struct vmxctx *vmxctx;
1535
1536	vmxctx = &vmx->ctx[vcpu];
1537
1538	switch (ident) {
1539	case 0:
1540		vmxctx->guest_rax = regval;
1541		break;
1542	case 1:
1543		vmxctx->guest_rcx = regval;
1544		break;
1545	case 2:
1546		vmxctx->guest_rdx = regval;
1547		break;
1548	case 3:
1549		vmxctx->guest_rbx = regval;
1550		break;
1551	case 4:
1552		vmcs_write(VMCS_GUEST_RSP, regval);
1553		break;
1554	case 5:
1555		vmxctx->guest_rbp = regval;
1556		break;
1557	case 6:
1558		vmxctx->guest_rsi = regval;
1559		break;
1560	case 7:
1561		vmxctx->guest_rdi = regval;
1562		break;
1563	case 8:
1564		vmxctx->guest_r8 = regval;
1565		break;
1566	case 9:
1567		vmxctx->guest_r9 = regval;
1568		break;
1569	case 10:
1570		vmxctx->guest_r10 = regval;
1571		break;
1572	case 11:
1573		vmxctx->guest_r11 = regval;
1574		break;
1575	case 12:
1576		vmxctx->guest_r12 = regval;
1577		break;
1578	case 13:
1579		vmxctx->guest_r13 = regval;
1580		break;
1581	case 14:
1582		vmxctx->guest_r14 = regval;
1583		break;
1584	case 15:
1585		vmxctx->guest_r15 = regval;
1586		break;
1587	default:
1588		panic("invalid vmx register %d", ident);
1589	}
1590}
1591
1592static int
1593vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1594{
1595	uint64_t crval, regval;
1596
1597	/* We only handle mov to %cr0 at this time */
1598	if ((exitqual & 0xf0) != 0x00)
1599		return (UNHANDLED);
1600
1601	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1602
1603	vmcs_write(VMCS_CR0_SHADOW, regval);
1604
1605	crval = regval | cr0_ones_mask;
1606	crval &= ~cr0_zeros_mask;
1607	vmcs_write(VMCS_GUEST_CR0, crval);
1608
1609	if (regval & CR0_PG) {
1610		uint64_t efer, entry_ctls;
1611
1612		/*
1613		 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1614		 * the "IA-32e mode guest" bit in VM-entry control must be
1615		 * equal.
1616		 */
1617		efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1618		if (efer & EFER_LME) {
1619			efer |= EFER_LMA;
1620			vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1621			entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1622			entry_ctls |= VM_ENTRY_GUEST_LMA;
1623			vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1624		}
1625	}
1626
1627	return (HANDLED);
1628}
1629
1630static int
1631vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1632{
1633	uint64_t crval, regval;
1634
1635	/* We only handle mov to %cr4 at this time */
1636	if ((exitqual & 0xf0) != 0x00)
1637		return (UNHANDLED);
1638
1639	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1640
1641	vmcs_write(VMCS_CR4_SHADOW, regval);
1642
1643	crval = regval | cr4_ones_mask;
1644	crval &= ~cr4_zeros_mask;
1645	vmcs_write(VMCS_GUEST_CR4, crval);
1646
1647	return (HANDLED);
1648}
1649
1650static int
1651vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1652{
1653	struct vlapic *vlapic;
1654	uint64_t cr8;
1655	int regnum;
1656
1657	/* We only handle mov %cr8 to/from a register at this time. */
1658	if ((exitqual & 0xe0) != 0x00) {
1659		return (UNHANDLED);
1660	}
1661
1662	vlapic = vm_lapic(vmx->vm, vcpu);
1663	regnum = (exitqual >> 8) & 0xf;
1664	if (exitqual & 0x10) {
1665		cr8 = vlapic_get_cr8(vlapic);
1666		vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1667	} else {
1668		cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1669		vlapic_set_cr8(vlapic, cr8);
1670	}
1671
1672	return (HANDLED);
1673}
1674
1675/*
1676 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1677 */
1678static int
1679vmx_cpl(void)
1680{
1681	uint32_t ssar;
1682
1683	ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1684	return ((ssar >> 5) & 0x3);
1685}
1686
1687static enum vm_cpu_mode
1688vmx_cpu_mode(void)
1689{
1690	uint32_t csar;
1691
1692	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1693		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1694		if (csar & 0x2000)
1695			return (CPU_MODE_64BIT);	/* CS.L = 1 */
1696		else
1697			return (CPU_MODE_COMPATIBILITY);
1698	} else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1699		return (CPU_MODE_PROTECTED);
1700	} else {
1701		return (CPU_MODE_REAL);
1702	}
1703}
1704
1705static enum vm_paging_mode
1706vmx_paging_mode(void)
1707{
1708
1709	if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1710		return (PAGING_MODE_FLAT);
1711	if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1712		return (PAGING_MODE_32);
1713	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1714		return (PAGING_MODE_64);
1715	else
1716		return (PAGING_MODE_PAE);
1717}
1718
1719static uint64_t
1720inout_str_index(struct vmx *vmx, int vcpuid, int in)
1721{
1722	uint64_t val;
1723	int error;
1724	enum vm_reg_name reg;
1725
1726	reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1727	error = vmx_getreg(vmx, vcpuid, reg, &val);
1728	KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1729	return (val);
1730}
1731
1732static uint64_t
1733inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1734{
1735	uint64_t val;
1736	int error;
1737
1738	if (rep) {
1739		error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1740		KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1741	} else {
1742		val = 1;
1743	}
1744	return (val);
1745}
1746
1747static int
1748inout_str_addrsize(uint32_t inst_info)
1749{
1750	uint32_t size;
1751
1752	size = (inst_info >> 7) & 0x7;
1753	switch (size) {
1754	case 0:
1755		return (2);	/* 16 bit */
1756	case 1:
1757		return (4);	/* 32 bit */
1758	case 2:
1759		return (8);	/* 64 bit */
1760	default:
1761		panic("%s: invalid size encoding %d", __func__, size);
1762	}
1763}
1764
1765static void
1766inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1767    struct vm_inout_str *vis)
1768{
1769	int error, s;
1770
1771	if (in) {
1772		vis->seg_name = VM_REG_GUEST_ES;
1773	} else {
1774		s = (inst_info >> 15) & 0x7;
1775		vis->seg_name = vm_segment_name(s);
1776	}
1777
1778	error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1779	KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1780
1781	/* XXX modify svm.c to update bit 16 of seg_desc.access (unusable) */
1782}
1783
1784static void
1785vmx_paging_info(struct vm_guest_paging *paging)
1786{
1787	paging->cr3 = vmcs_guest_cr3();
1788	paging->cpl = vmx_cpl();
1789	paging->cpu_mode = vmx_cpu_mode();
1790	paging->paging_mode = vmx_paging_mode();
1791}
1792
1793static void
1794vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1795{
1796	struct vm_guest_paging *paging;
1797	uint32_t csar;
1798
1799	paging = &vmexit->u.inst_emul.paging;
1800
1801	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1802	vmexit->u.inst_emul.gpa = gpa;
1803	vmexit->u.inst_emul.gla = gla;
1804	vmx_paging_info(paging);
1805	switch (paging->cpu_mode) {
1806	case CPU_MODE_PROTECTED:
1807	case CPU_MODE_COMPATIBILITY:
1808		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1809		vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1810		break;
1811	default:
1812		vmexit->u.inst_emul.cs_d = 0;
1813		break;
1814	}
1815}
1816
1817static int
1818ept_fault_type(uint64_t ept_qual)
1819{
1820	int fault_type;
1821
1822	if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1823		fault_type = VM_PROT_WRITE;
1824	else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1825		fault_type = VM_PROT_EXECUTE;
1826	else
1827		fault_type= VM_PROT_READ;
1828
1829	return (fault_type);
1830}
1831
1832static boolean_t
1833ept_emulation_fault(uint64_t ept_qual)
1834{
1835	int read, write;
1836
1837	/* EPT fault on an instruction fetch doesn't make sense here */
1838	if (ept_qual & EPT_VIOLATION_INST_FETCH)
1839		return (FALSE);
1840
1841	/* EPT fault must be a read fault or a write fault */
1842	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1843	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1844	if ((read | write) == 0)
1845		return (FALSE);
1846
1847	/*
1848	 * The EPT violation must have been caused by accessing a
1849	 * guest-physical address that is a translation of a guest-linear
1850	 * address.
1851	 */
1852	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1853	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1854		return (FALSE);
1855	}
1856
1857	return (TRUE);
1858}
1859
1860static __inline int
1861apic_access_virtualization(struct vmx *vmx, int vcpuid)
1862{
1863	uint32_t proc_ctls2;
1864
1865	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1866	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1867}
1868
1869static __inline int
1870x2apic_virtualization(struct vmx *vmx, int vcpuid)
1871{
1872	uint32_t proc_ctls2;
1873
1874	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1875	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1876}
1877
1878static int
1879vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1880    uint64_t qual)
1881{
1882	int error, handled, offset;
1883	uint32_t *apic_regs, vector;
1884	bool retu;
1885
1886	handled = HANDLED;
1887	offset = APIC_WRITE_OFFSET(qual);
1888
1889	if (!apic_access_virtualization(vmx, vcpuid)) {
1890		/*
1891		 * In general there should not be any APIC write VM-exits
1892		 * unless APIC-access virtualization is enabled.
1893		 *
1894		 * However self-IPI virtualization can legitimately trigger
1895		 * an APIC-write VM-exit so treat it specially.
1896		 */
1897		if (x2apic_virtualization(vmx, vcpuid) &&
1898		    offset == APIC_OFFSET_SELF_IPI) {
1899			apic_regs = (uint32_t *)(vlapic->apic_page);
1900			vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1901			vlapic_self_ipi_handler(vlapic, vector);
1902			return (HANDLED);
1903		} else
1904			return (UNHANDLED);
1905	}
1906
1907	switch (offset) {
1908	case APIC_OFFSET_ID:
1909		vlapic_id_write_handler(vlapic);
1910		break;
1911	case APIC_OFFSET_LDR:
1912		vlapic_ldr_write_handler(vlapic);
1913		break;
1914	case APIC_OFFSET_DFR:
1915		vlapic_dfr_write_handler(vlapic);
1916		break;
1917	case APIC_OFFSET_SVR:
1918		vlapic_svr_write_handler(vlapic);
1919		break;
1920	case APIC_OFFSET_ESR:
1921		vlapic_esr_write_handler(vlapic);
1922		break;
1923	case APIC_OFFSET_ICR_LOW:
1924		retu = false;
1925		error = vlapic_icrlo_write_handler(vlapic, &retu);
1926		if (error != 0 || retu)
1927			handled = UNHANDLED;
1928		break;
1929	case APIC_OFFSET_CMCI_LVT:
1930	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1931		vlapic_lvt_write_handler(vlapic, offset);
1932		break;
1933	case APIC_OFFSET_TIMER_ICR:
1934		vlapic_icrtmr_write_handler(vlapic);
1935		break;
1936	case APIC_OFFSET_TIMER_DCR:
1937		vlapic_dcr_write_handler(vlapic);
1938		break;
1939	default:
1940		handled = UNHANDLED;
1941		break;
1942	}
1943	return (handled);
1944}
1945
1946static bool
1947apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1948{
1949
1950	if (apic_access_virtualization(vmx, vcpuid) &&
1951	    (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1952		return (true);
1953	else
1954		return (false);
1955}
1956
1957static int
1958vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1959{
1960	uint64_t qual;
1961	int access_type, offset, allowed;
1962
1963	if (!apic_access_virtualization(vmx, vcpuid))
1964		return (UNHANDLED);
1965
1966	qual = vmexit->u.vmx.exit_qualification;
1967	access_type = APIC_ACCESS_TYPE(qual);
1968	offset = APIC_ACCESS_OFFSET(qual);
1969
1970	allowed = 0;
1971	if (access_type == 0) {
1972		/*
1973		 * Read data access to the following registers is expected.
1974		 */
1975		switch (offset) {
1976		case APIC_OFFSET_APR:
1977		case APIC_OFFSET_PPR:
1978		case APIC_OFFSET_RRR:
1979		case APIC_OFFSET_CMCI_LVT:
1980		case APIC_OFFSET_TIMER_CCR:
1981			allowed = 1;
1982			break;
1983		default:
1984			break;
1985		}
1986	} else if (access_type == 1) {
1987		/*
1988		 * Write data access to the following registers is expected.
1989		 */
1990		switch (offset) {
1991		case APIC_OFFSET_VER:
1992		case APIC_OFFSET_APR:
1993		case APIC_OFFSET_PPR:
1994		case APIC_OFFSET_RRR:
1995		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1996		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1997		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1998		case APIC_OFFSET_CMCI_LVT:
1999		case APIC_OFFSET_TIMER_CCR:
2000			allowed = 1;
2001			break;
2002		default:
2003			break;
2004		}
2005	}
2006
2007	if (allowed) {
2008		vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
2009		    VIE_INVALID_GLA);
2010	}
2011
2012	/*
2013	 * Regardless of whether the APIC-access is allowed this handler
2014	 * always returns UNHANDLED:
2015	 * - if the access is allowed then it is handled by emulating the
2016	 *   instruction that caused the VM-exit (outside the critical section)
2017	 * - if the access is not allowed then it will be converted to an
2018	 *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2019	 */
2020	return (UNHANDLED);
2021}
2022
2023static enum task_switch_reason
2024vmx_task_switch_reason(uint64_t qual)
2025{
2026	int reason;
2027
2028	reason = (qual >> 30) & 0x3;
2029	switch (reason) {
2030	case 0:
2031		return (TSR_CALL);
2032	case 1:
2033		return (TSR_IRET);
2034	case 2:
2035		return (TSR_JMP);
2036	case 3:
2037		return (TSR_IDT_GATE);
2038	default:
2039		panic("%s: invalid reason %d", __func__, reason);
2040	}
2041}
2042
2043static int
2044vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2045{
2046	int error, handled, in;
2047	struct vmxctx *vmxctx;
2048	struct vlapic *vlapic;
2049	struct vm_inout_str *vis;
2050	struct vm_task_switch *ts;
2051	uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2052	uint32_t intr_type, reason;
2053	uint64_t qual, gpa;
2054	bool retu;
2055
2056	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2057	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2058
2059	handled = UNHANDLED;
2060	vmxctx = &vmx->ctx[vcpu];
2061
2062	qual = vmexit->u.vmx.exit_qualification;
2063	reason = vmexit->u.vmx.exit_reason;
2064	vmexit->exitcode = VM_EXITCODE_BOGUS;
2065
2066	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2067
2068	/*
2069	 * VM exits that can be triggered during event delivery need to
2070	 * be handled specially by re-injecting the event if the IDT
2071	 * vectoring information field's valid bit is set.
2072	 *
2073	 * If the VM-exit is due to a task gate in the IDT then we don't
2074	 * reinject the event because emulating the task switch also
2075	 * completes the event delivery.
2076	 *
2077	 * See "Information for VM Exits During Event Delivery" in Intel SDM
2078	 * for details.
2079	 */
2080	switch (reason) {
2081	case EXIT_REASON_EPT_FAULT:
2082	case EXIT_REASON_EPT_MISCONFIG:
2083	case EXIT_REASON_APIC_ACCESS:
2084	case EXIT_REASON_TASK_SWITCH:
2085	case EXIT_REASON_EXCEPTION:
2086		idtvec_info = vmcs_idt_vectoring_info();
2087		VCPU_CTR2(vmx->vm, vcpu, "vm exit %s: idtvec_info 0x%08x",
2088		    exit_reason_to_str(reason), idtvec_info);
2089		if ((idtvec_info & VMCS_IDT_VEC_VALID) &&
2090		    (reason != EXIT_REASON_TASK_SWITCH)) {
2091			idtvec_info &= ~(1 << 12); /* clear undefined bit */
2092			vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info);
2093			if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2094				idtvec_err = vmcs_idt_vectoring_err();
2095				vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR,
2096				    idtvec_err);
2097			}
2098			/*
2099			 * If 'virtual NMIs' are being used and the VM-exit
2100			 * happened while injecting an NMI during the previous
2101			 * VM-entry, then clear "blocking by NMI" in the Guest
2102			 * Interruptibility-state.
2103			 */
2104			if ((idtvec_info & VMCS_INTR_T_MASK) ==
2105			    VMCS_INTR_T_NMI) {
2106				 vmx_clear_nmi_blocking(vmx, vcpu);
2107			}
2108			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2109		}
2110		break;
2111	default:
2112		idtvec_info = 0;
2113		break;
2114	}
2115
2116	switch (reason) {
2117	case EXIT_REASON_TASK_SWITCH:
2118		ts = &vmexit->u.task_switch;
2119		ts->tsssel = qual & 0xffff;
2120		ts->reason = vmx_task_switch_reason(qual);
2121		ts->ext = 0;
2122		ts->errcode_valid = 0;
2123		vmx_paging_info(&ts->paging);
2124		/*
2125		 * If the task switch was due to a CALL, JMP, IRET, software
2126		 * interrupt (INT n) or software exception (INT3, INTO),
2127		 * then the saved %rip references the instruction that caused
2128		 * the task switch. The instruction length field in the VMCS
2129		 * is valid in this case.
2130		 *
2131		 * In all other cases (e.g., NMI, hardware exception) the
2132		 * saved %rip is one that would have been saved in the old TSS
2133		 * had the task switch completed normally so the instruction
2134		 * length field is not needed in this case and is explicitly
2135		 * set to 0.
2136		 */
2137		if (ts->reason == TSR_IDT_GATE) {
2138			KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2139			    ("invalid idtvec_info %x for IDT task switch",
2140			    idtvec_info));
2141			intr_type = idtvec_info & VMCS_INTR_T_MASK;
2142			if (intr_type != VMCS_INTR_T_SWINTR &&
2143			    intr_type != VMCS_INTR_T_SWEXCEPTION &&
2144			    intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2145				/* Task switch triggered by external event */
2146				ts->ext = 1;
2147				vmexit->inst_length = 0;
2148				if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2149					ts->errcode_valid = 1;
2150					ts->errcode = vmcs_idt_vectoring_err();
2151				}
2152			}
2153		}
2154		vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2155		VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2156		    "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2157		    ts->ext ? "external" : "internal",
2158		    ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2159		break;
2160	case EXIT_REASON_CR_ACCESS:
2161		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2162		switch (qual & 0xf) {
2163		case 0:
2164			handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2165			break;
2166		case 4:
2167			handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2168			break;
2169		case 8:
2170			handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2171			break;
2172		}
2173		break;
2174	case EXIT_REASON_RDMSR:
2175		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2176		retu = false;
2177		ecx = vmxctx->guest_rcx;
2178		VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2179		error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu);
2180		if (error) {
2181			vmexit->exitcode = VM_EXITCODE_RDMSR;
2182			vmexit->u.msr.code = ecx;
2183		} else if (!retu) {
2184			handled = HANDLED;
2185		} else {
2186			/* Return to userspace with a valid exitcode */
2187			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2188			    ("emulate_wrmsr retu with bogus exitcode"));
2189		}
2190		break;
2191	case EXIT_REASON_WRMSR:
2192		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2193		retu = false;
2194		eax = vmxctx->guest_rax;
2195		ecx = vmxctx->guest_rcx;
2196		edx = vmxctx->guest_rdx;
2197		VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2198		    ecx, (uint64_t)edx << 32 | eax);
2199		error = emulate_wrmsr(vmx->vm, vcpu, ecx,
2200		    (uint64_t)edx << 32 | eax, &retu);
2201		if (error) {
2202			vmexit->exitcode = VM_EXITCODE_WRMSR;
2203			vmexit->u.msr.code = ecx;
2204			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2205		} else if (!retu) {
2206			handled = HANDLED;
2207		} else {
2208			/* Return to userspace with a valid exitcode */
2209			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2210			    ("emulate_wrmsr retu with bogus exitcode"));
2211		}
2212		break;
2213	case EXIT_REASON_HLT:
2214		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2215		vmexit->exitcode = VM_EXITCODE_HLT;
2216		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2217		break;
2218	case EXIT_REASON_MTF:
2219		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2220		vmexit->exitcode = VM_EXITCODE_MTRAP;
2221		break;
2222	case EXIT_REASON_PAUSE:
2223		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2224		vmexit->exitcode = VM_EXITCODE_PAUSE;
2225		break;
2226	case EXIT_REASON_INTR_WINDOW:
2227		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2228		vmx_clear_int_window_exiting(vmx, vcpu);
2229		return (1);
2230	case EXIT_REASON_EXT_INTR:
2231		/*
2232		 * External interrupts serve only to cause VM exits and allow
2233		 * the host interrupt handler to run.
2234		 *
2235		 * If this external interrupt triggers a virtual interrupt
2236		 * to a VM, then that state will be recorded by the
2237		 * host interrupt handler in the VM's softc. We will inject
2238		 * this virtual interrupt during the subsequent VM enter.
2239		 */
2240		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2241
2242		/*
2243		 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2244		 * This appears to be a bug in VMware Fusion?
2245		 */
2246		if (!(intr_info & VMCS_INTR_VALID))
2247			return (1);
2248		KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2249		    (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2250		    ("VM exit interruption info invalid: %#x", intr_info));
2251		vmx_trigger_hostintr(intr_info & 0xff);
2252
2253		/*
2254		 * This is special. We want to treat this as an 'handled'
2255		 * VM-exit but not increment the instruction pointer.
2256		 */
2257		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2258		return (1);
2259	case EXIT_REASON_NMI_WINDOW:
2260		/* Exit to allow the pending virtual NMI to be injected */
2261		if (vm_nmi_pending(vmx->vm, vcpu))
2262			vmx_inject_nmi(vmx, vcpu);
2263		vmx_clear_nmi_window_exiting(vmx, vcpu);
2264		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2265		return (1);
2266	case EXIT_REASON_INOUT:
2267		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2268		vmexit->exitcode = VM_EXITCODE_INOUT;
2269		vmexit->u.inout.bytes = (qual & 0x7) + 1;
2270		vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2271		vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2272		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2273		vmexit->u.inout.port = (uint16_t)(qual >> 16);
2274		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2275		if (vmexit->u.inout.string) {
2276			inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2277			vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2278			vis = &vmexit->u.inout_str;
2279			vmx_paging_info(&vis->paging);
2280			vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2281			vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2282			vis->index = inout_str_index(vmx, vcpu, in);
2283			vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2284			vis->addrsize = inout_str_addrsize(inst_info);
2285			inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2286		}
2287		break;
2288	case EXIT_REASON_CPUID:
2289		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2290		handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2291		break;
2292	case EXIT_REASON_EXCEPTION:
2293		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2294		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2295		KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2296		    ("VM exit interruption info invalid: %#x", intr_info));
2297
2298		/*
2299		 * If Virtual NMIs control is 1 and the VM-exit is due to a
2300		 * fault encountered during the execution of IRET then we must
2301		 * restore the state of "virtual-NMI blocking" before resuming
2302		 * the guest.
2303		 *
2304		 * See "Resuming Guest Software after Handling an Exception".
2305		 */
2306		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2307		    (intr_info & 0xff) != IDT_DF &&
2308		    (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2309			vmx_restore_nmi_blocking(vmx, vcpu);
2310
2311		/*
2312		 * The NMI has already been handled in vmx_exit_handle_nmi().
2313		 */
2314		if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI)
2315			return (1);
2316		break;
2317	case EXIT_REASON_EPT_FAULT:
2318		/*
2319		 * If 'gpa' lies within the address space allocated to
2320		 * memory then this must be a nested page fault otherwise
2321		 * this must be an instruction that accesses MMIO space.
2322		 */
2323		gpa = vmcs_gpa();
2324		if (vm_mem_allocated(vmx->vm, gpa) ||
2325		    apic_access_fault(vmx, vcpu, gpa)) {
2326			vmexit->exitcode = VM_EXITCODE_PAGING;
2327			vmexit->u.paging.gpa = gpa;
2328			vmexit->u.paging.fault_type = ept_fault_type(qual);
2329			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2330		} else if (ept_emulation_fault(qual)) {
2331			vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2332			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2333		}
2334		/*
2335		 * If Virtual NMIs control is 1 and the VM-exit is due to an
2336		 * EPT fault during the execution of IRET then we must restore
2337		 * the state of "virtual-NMI blocking" before resuming.
2338		 *
2339		 * See description of "NMI unblocking due to IRET" in
2340		 * "Exit Qualification for EPT Violations".
2341		 */
2342		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2343		    (qual & EXIT_QUAL_NMIUDTI) != 0)
2344			vmx_restore_nmi_blocking(vmx, vcpu);
2345		break;
2346	case EXIT_REASON_VIRTUALIZED_EOI:
2347		vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2348		vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2349		vmexit->inst_length = 0;	/* trap-like */
2350		break;
2351	case EXIT_REASON_APIC_ACCESS:
2352		handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2353		break;
2354	case EXIT_REASON_APIC_WRITE:
2355		/*
2356		 * APIC-write VM exit is trap-like so the %rip is already
2357		 * pointing to the next instruction.
2358		 */
2359		vmexit->inst_length = 0;
2360		vlapic = vm_lapic(vmx->vm, vcpu);
2361		handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2362		break;
2363	case EXIT_REASON_XSETBV:
2364		handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2365		break;
2366	default:
2367		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2368		break;
2369	}
2370
2371	if (handled) {
2372		/*
2373		 * It is possible that control is returned to userland
2374		 * even though we were able to handle the VM exit in the
2375		 * kernel.
2376		 *
2377		 * In such a case we want to make sure that the userland
2378		 * restarts guest execution at the instruction *after*
2379		 * the one we just processed. Therefore we update the
2380		 * guest rip in the VMCS and in 'vmexit'.
2381		 */
2382		vmexit->rip += vmexit->inst_length;
2383		vmexit->inst_length = 0;
2384		vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2385	} else {
2386		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2387			/*
2388			 * If this VM exit was not claimed by anybody then
2389			 * treat it as a generic VMX exit.
2390			 */
2391			vmexit->exitcode = VM_EXITCODE_VMX;
2392			vmexit->u.vmx.status = VM_SUCCESS;
2393			vmexit->u.vmx.inst_type = 0;
2394			vmexit->u.vmx.inst_error = 0;
2395		} else {
2396			/*
2397			 * The exitcode and collateral have been populated.
2398			 * The VM exit will be processed further in userland.
2399			 */
2400		}
2401	}
2402	return (handled);
2403}
2404
2405static __inline void
2406vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2407{
2408
2409	KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2410	    ("vmx_exit_inst_error: invalid inst_fail_status %d",
2411	    vmxctx->inst_fail_status));
2412
2413	vmexit->inst_length = 0;
2414	vmexit->exitcode = VM_EXITCODE_VMX;
2415	vmexit->u.vmx.status = vmxctx->inst_fail_status;
2416	vmexit->u.vmx.inst_error = vmcs_instruction_error();
2417	vmexit->u.vmx.exit_reason = ~0;
2418	vmexit->u.vmx.exit_qualification = ~0;
2419
2420	switch (rc) {
2421	case VMX_VMRESUME_ERROR:
2422	case VMX_VMLAUNCH_ERROR:
2423	case VMX_INVEPT_ERROR:
2424		vmexit->u.vmx.inst_type = rc;
2425		break;
2426	default:
2427		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2428	}
2429}
2430
2431/*
2432 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2433 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2434 * sufficient to simply vector to the NMI handler via a software interrupt.
2435 * However, this must be done before maskable interrupts are enabled
2436 * otherwise the "iret" issued by an interrupt handler will incorrectly
2437 * clear NMI blocking.
2438 */
2439static __inline void
2440vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2441{
2442	uint32_t intr_info;
2443
2444	KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2445
2446	if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2447		return;
2448
2449	intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2450	KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2451	    ("VM exit interruption info invalid: %#x", intr_info));
2452
2453	if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2454		KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2455		    "to NMI has invalid vector: %#x", intr_info));
2456		VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2457		__asm __volatile("int $2");
2458	}
2459}
2460
2461static int
2462vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
2463    void *rendezvous_cookie, void *suspend_cookie)
2464{
2465	int rc, handled, launched;
2466	struct vmx *vmx;
2467	struct vm *vm;
2468	struct vmxctx *vmxctx;
2469	struct vmcs *vmcs;
2470	struct vm_exit *vmexit;
2471	struct vlapic *vlapic;
2472	uint64_t rip;
2473	uint32_t exit_reason;
2474
2475	vmx = arg;
2476	vm = vmx->vm;
2477	vmcs = &vmx->vmcs[vcpu];
2478	vmxctx = &vmx->ctx[vcpu];
2479	vlapic = vm_lapic(vm, vcpu);
2480	vmexit = vm_exitinfo(vm, vcpu);
2481	launched = 0;
2482
2483	KASSERT(vmxctx->pmap == pmap,
2484	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2485
2486	VMPTRLD(vmcs);
2487
2488	/*
2489	 * XXX
2490	 * We do this every time because we may setup the virtual machine
2491	 * from a different process than the one that actually runs it.
2492	 *
2493	 * If the life of a virtual machine was spent entirely in the context
2494	 * of a single process we could do this once in vmx_vminit().
2495	 */
2496	vmcs_write(VMCS_HOST_CR3, rcr3());
2497
2498	vmcs_write(VMCS_GUEST_RIP, startrip);
2499	vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2500	do {
2501		handled = UNHANDLED;
2502
2503		/*
2504		 * Interrupts are disabled from this point on until the
2505		 * guest starts executing. This is done for the following
2506		 * reasons:
2507		 *
2508		 * If an AST is asserted on this thread after the check below,
2509		 * then the IPI_AST notification will not be lost, because it
2510		 * will cause a VM exit due to external interrupt as soon as
2511		 * the guest state is loaded.
2512		 *
2513		 * A posted interrupt after 'vmx_inject_interrupts()' will
2514		 * not be "lost" because it will be held pending in the host
2515		 * APIC because interrupts are disabled. The pending interrupt
2516		 * will be recognized as soon as the guest state is loaded.
2517		 *
2518		 * The same reasoning applies to the IPI generated by
2519		 * pmap_invalidate_ept().
2520		 */
2521		disable_intr();
2522		if (vcpu_suspended(suspend_cookie)) {
2523			enable_intr();
2524			vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
2525			break;
2526		}
2527
2528		if (vcpu_rendezvous_pending(rendezvous_cookie)) {
2529			enable_intr();
2530			vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
2531			break;
2532		}
2533
2534		if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
2535			enable_intr();
2536			vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
2537			vmx_astpending_trace(vmx, vcpu, vmexit->rip);
2538			handled = HANDLED;
2539			break;
2540		}
2541
2542		vmx_inject_interrupts(vmx, vcpu, vlapic);
2543		vmx_run_trace(vmx, vcpu);
2544		rc = vmx_enter_guest(vmxctx, vmx, launched);
2545
2546		/* Collect some information for VM exit processing */
2547		vmexit->rip = rip = vmcs_guest_rip();
2548		vmexit->inst_length = vmexit_instruction_length();
2549		vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2550		vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2551
2552		if (rc == VMX_GUEST_VMEXIT) {
2553			vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2554			enable_intr();
2555			handled = vmx_exit_process(vmx, vcpu, vmexit);
2556		} else {
2557			enable_intr();
2558			vmx_exit_inst_error(vmxctx, rc, vmexit);
2559		}
2560		launched = 1;
2561		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2562	} while (handled);
2563
2564	/*
2565	 * If a VM exit has been handled then the exitcode must be BOGUS
2566	 * If a VM exit is not handled then the exitcode must not be BOGUS
2567	 */
2568	if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2569	    (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2570		panic("Mismatch between handled (%d) and exitcode (%d)",
2571		      handled, vmexit->exitcode);
2572	}
2573
2574	if (!handled)
2575		vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2576
2577	VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2578	    vmexit->exitcode);
2579
2580	VMCLEAR(vmcs);
2581	return (0);
2582}
2583
2584static void
2585vmx_vmcleanup(void *arg)
2586{
2587	int i;
2588	struct vmx *vmx = arg;
2589
2590	if (apic_access_virtualization(vmx, 0))
2591		vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2592
2593	for (i = 0; i < VM_MAXCPU; i++)
2594		vpid_free(vmx->state[i].vpid);
2595
2596	free(vmx, M_VMX);
2597
2598	return;
2599}
2600
2601static register_t *
2602vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2603{
2604
2605	switch (reg) {
2606	case VM_REG_GUEST_RAX:
2607		return (&vmxctx->guest_rax);
2608	case VM_REG_GUEST_RBX:
2609		return (&vmxctx->guest_rbx);
2610	case VM_REG_GUEST_RCX:
2611		return (&vmxctx->guest_rcx);
2612	case VM_REG_GUEST_RDX:
2613		return (&vmxctx->guest_rdx);
2614	case VM_REG_GUEST_RSI:
2615		return (&vmxctx->guest_rsi);
2616	case VM_REG_GUEST_RDI:
2617		return (&vmxctx->guest_rdi);
2618	case VM_REG_GUEST_RBP:
2619		return (&vmxctx->guest_rbp);
2620	case VM_REG_GUEST_R8:
2621		return (&vmxctx->guest_r8);
2622	case VM_REG_GUEST_R9:
2623		return (&vmxctx->guest_r9);
2624	case VM_REG_GUEST_R10:
2625		return (&vmxctx->guest_r10);
2626	case VM_REG_GUEST_R11:
2627		return (&vmxctx->guest_r11);
2628	case VM_REG_GUEST_R12:
2629		return (&vmxctx->guest_r12);
2630	case VM_REG_GUEST_R13:
2631		return (&vmxctx->guest_r13);
2632	case VM_REG_GUEST_R14:
2633		return (&vmxctx->guest_r14);
2634	case VM_REG_GUEST_R15:
2635		return (&vmxctx->guest_r15);
2636	case VM_REG_GUEST_CR2:
2637		return (&vmxctx->guest_cr2);
2638	default:
2639		break;
2640	}
2641	return (NULL);
2642}
2643
2644static int
2645vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2646{
2647	register_t *regp;
2648
2649	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2650		*retval = *regp;
2651		return (0);
2652	} else
2653		return (EINVAL);
2654}
2655
2656static int
2657vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2658{
2659	register_t *regp;
2660
2661	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2662		*regp = val;
2663		return (0);
2664	} else
2665		return (EINVAL);
2666}
2667
2668static int
2669vmx_shadow_reg(int reg)
2670{
2671	int shreg;
2672
2673	shreg = -1;
2674
2675	switch (reg) {
2676	case VM_REG_GUEST_CR0:
2677		shreg = VMCS_CR0_SHADOW;
2678                break;
2679        case VM_REG_GUEST_CR4:
2680		shreg = VMCS_CR4_SHADOW;
2681		break;
2682	default:
2683		break;
2684	}
2685
2686	return (shreg);
2687}
2688
2689static int
2690vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2691{
2692	int running, hostcpu;
2693	struct vmx *vmx = arg;
2694
2695	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2696	if (running && hostcpu != curcpu)
2697		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2698
2699	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2700		return (0);
2701
2702	return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2703}
2704
2705static int
2706vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2707{
2708	int error, hostcpu, running, shadow;
2709	uint64_t ctls;
2710	pmap_t pmap;
2711	struct vmx *vmx = arg;
2712
2713	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2714	if (running && hostcpu != curcpu)
2715		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2716
2717	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2718		return (0);
2719
2720	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2721
2722	if (error == 0) {
2723		/*
2724		 * If the "load EFER" VM-entry control is 1 then the
2725		 * value of EFER.LMA must be identical to "IA-32e mode guest"
2726		 * bit in the VM-entry control.
2727		 */
2728		if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2729		    (reg == VM_REG_GUEST_EFER)) {
2730			vmcs_getreg(&vmx->vmcs[vcpu], running,
2731				    VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2732			if (val & EFER_LMA)
2733				ctls |= VM_ENTRY_GUEST_LMA;
2734			else
2735				ctls &= ~VM_ENTRY_GUEST_LMA;
2736			vmcs_setreg(&vmx->vmcs[vcpu], running,
2737				    VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2738		}
2739
2740		shadow = vmx_shadow_reg(reg);
2741		if (shadow > 0) {
2742			/*
2743			 * Store the unmodified value in the shadow
2744			 */
2745			error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2746				    VMCS_IDENT(shadow), val);
2747		}
2748
2749		if (reg == VM_REG_GUEST_CR3) {
2750			/*
2751			 * Invalidate the guest vcpu's TLB mappings to emulate
2752			 * the behavior of updating %cr3.
2753			 *
2754			 * XXX the processor retains global mappings when %cr3
2755			 * is updated but vmx_invvpid() does not.
2756			 */
2757			pmap = vmx->ctx[vcpu].pmap;
2758			vmx_invvpid(vmx, vcpu, pmap, running);
2759		}
2760	}
2761
2762	return (error);
2763}
2764
2765static int
2766vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2767{
2768	int hostcpu, running;
2769	struct vmx *vmx = arg;
2770
2771	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2772	if (running && hostcpu != curcpu)
2773		panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2774
2775	return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2776}
2777
2778static int
2779vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2780{
2781	int hostcpu, running;
2782	struct vmx *vmx = arg;
2783
2784	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2785	if (running && hostcpu != curcpu)
2786		panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2787
2788	return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2789}
2790
2791static int
2792vmx_getcap(void *arg, int vcpu, int type, int *retval)
2793{
2794	struct vmx *vmx = arg;
2795	int vcap;
2796	int ret;
2797
2798	ret = ENOENT;
2799
2800	vcap = vmx->cap[vcpu].set;
2801
2802	switch (type) {
2803	case VM_CAP_HALT_EXIT:
2804		if (cap_halt_exit)
2805			ret = 0;
2806		break;
2807	case VM_CAP_PAUSE_EXIT:
2808		if (cap_pause_exit)
2809			ret = 0;
2810		break;
2811	case VM_CAP_MTRAP_EXIT:
2812		if (cap_monitor_trap)
2813			ret = 0;
2814		break;
2815	case VM_CAP_UNRESTRICTED_GUEST:
2816		if (cap_unrestricted_guest)
2817			ret = 0;
2818		break;
2819	case VM_CAP_ENABLE_INVPCID:
2820		if (cap_invpcid)
2821			ret = 0;
2822		break;
2823	default:
2824		break;
2825	}
2826
2827	if (ret == 0)
2828		*retval = (vcap & (1 << type)) ? 1 : 0;
2829
2830	return (ret);
2831}
2832
2833static int
2834vmx_setcap(void *arg, int vcpu, int type, int val)
2835{
2836	struct vmx *vmx = arg;
2837	struct vmcs *vmcs = &vmx->vmcs[vcpu];
2838	uint32_t baseval;
2839	uint32_t *pptr;
2840	int error;
2841	int flag;
2842	int reg;
2843	int retval;
2844
2845	retval = ENOENT;
2846	pptr = NULL;
2847
2848	switch (type) {
2849	case VM_CAP_HALT_EXIT:
2850		if (cap_halt_exit) {
2851			retval = 0;
2852			pptr = &vmx->cap[vcpu].proc_ctls;
2853			baseval = *pptr;
2854			flag = PROCBASED_HLT_EXITING;
2855			reg = VMCS_PRI_PROC_BASED_CTLS;
2856		}
2857		break;
2858	case VM_CAP_MTRAP_EXIT:
2859		if (cap_monitor_trap) {
2860			retval = 0;
2861			pptr = &vmx->cap[vcpu].proc_ctls;
2862			baseval = *pptr;
2863			flag = PROCBASED_MTF;
2864			reg = VMCS_PRI_PROC_BASED_CTLS;
2865		}
2866		break;
2867	case VM_CAP_PAUSE_EXIT:
2868		if (cap_pause_exit) {
2869			retval = 0;
2870			pptr = &vmx->cap[vcpu].proc_ctls;
2871			baseval = *pptr;
2872			flag = PROCBASED_PAUSE_EXITING;
2873			reg = VMCS_PRI_PROC_BASED_CTLS;
2874		}
2875		break;
2876	case VM_CAP_UNRESTRICTED_GUEST:
2877		if (cap_unrestricted_guest) {
2878			retval = 0;
2879			pptr = &vmx->cap[vcpu].proc_ctls2;
2880			baseval = *pptr;
2881			flag = PROCBASED2_UNRESTRICTED_GUEST;
2882			reg = VMCS_SEC_PROC_BASED_CTLS;
2883		}
2884		break;
2885	case VM_CAP_ENABLE_INVPCID:
2886		if (cap_invpcid) {
2887			retval = 0;
2888			pptr = &vmx->cap[vcpu].proc_ctls2;
2889			baseval = *pptr;
2890			flag = PROCBASED2_ENABLE_INVPCID;
2891			reg = VMCS_SEC_PROC_BASED_CTLS;
2892		}
2893		break;
2894	default:
2895		break;
2896	}
2897
2898	if (retval == 0) {
2899		if (val) {
2900			baseval |= flag;
2901		} else {
2902			baseval &= ~flag;
2903		}
2904		VMPTRLD(vmcs);
2905		error = vmwrite(reg, baseval);
2906		VMCLEAR(vmcs);
2907
2908		if (error) {
2909			retval = error;
2910		} else {
2911			/*
2912			 * Update optional stored flags, and record
2913			 * setting
2914			 */
2915			if (pptr != NULL) {
2916				*pptr = baseval;
2917			}
2918
2919			if (val) {
2920				vmx->cap[vcpu].set |= (1 << type);
2921			} else {
2922				vmx->cap[vcpu].set &= ~(1 << type);
2923			}
2924		}
2925	}
2926
2927        return (retval);
2928}
2929
2930struct vlapic_vtx {
2931	struct vlapic	vlapic;
2932	struct pir_desc	*pir_desc;
2933	struct vmx	*vmx;
2934};
2935
2936#define	VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)	\
2937do {									\
2938	VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",	\
2939	    level ? "level" : "edge", vector);				\
2940	VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);	\
2941	VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);	\
2942	VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);	\
2943	VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);	\
2944	VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
2945} while (0)
2946
2947/*
2948 * vlapic->ops handlers that utilize the APICv hardware assist described in
2949 * Chapter 29 of the Intel SDM.
2950 */
2951static int
2952vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
2953{
2954	struct vlapic_vtx *vlapic_vtx;
2955	struct pir_desc *pir_desc;
2956	uint64_t mask;
2957	int idx, notify;
2958
2959	vlapic_vtx = (struct vlapic_vtx *)vlapic;
2960	pir_desc = vlapic_vtx->pir_desc;
2961
2962	/*
2963	 * Keep track of interrupt requests in the PIR descriptor. This is
2964	 * because the virtual APIC page pointed to by the VMCS cannot be
2965	 * modified if the vcpu is running.
2966	 */
2967	idx = vector / 64;
2968	mask = 1UL << (vector % 64);
2969	atomic_set_long(&pir_desc->pir[idx], mask);
2970	notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
2971
2972	VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
2973	    level, "vmx_set_intr_ready");
2974	return (notify);
2975}
2976
2977static int
2978vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
2979{
2980	struct vlapic_vtx *vlapic_vtx;
2981	struct pir_desc *pir_desc;
2982	struct LAPIC *lapic;
2983	uint64_t pending, pirval;
2984	uint32_t ppr, vpr;
2985	int i;
2986
2987	/*
2988	 * This function is only expected to be called from the 'HLT' exit
2989	 * handler which does not care about the vector that is pending.
2990	 */
2991	KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
2992
2993	vlapic_vtx = (struct vlapic_vtx *)vlapic;
2994	pir_desc = vlapic_vtx->pir_desc;
2995
2996	pending = atomic_load_acq_long(&pir_desc->pending);
2997	if (!pending)
2998		return (0);	/* common case */
2999
3000	/*
3001	 * If there is an interrupt pending then it will be recognized only
3002	 * if its priority is greater than the processor priority.
3003	 *
3004	 * Special case: if the processor priority is zero then any pending
3005	 * interrupt will be recognized.
3006	 */
3007	lapic = vlapic->apic_page;
3008	ppr = lapic->ppr & 0xf0;
3009	if (ppr == 0)
3010		return (1);
3011
3012	VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3013	    lapic->ppr);
3014
3015	for (i = 3; i >= 0; i--) {
3016		pirval = pir_desc->pir[i];
3017		if (pirval != 0) {
3018			vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3019			return (vpr > ppr);
3020		}
3021	}
3022	return (0);
3023}
3024
3025static void
3026vmx_intr_accepted(struct vlapic *vlapic, int vector)
3027{
3028
3029	panic("vmx_intr_accepted: not expected to be called");
3030}
3031
3032static void
3033vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3034{
3035	struct vlapic_vtx *vlapic_vtx;
3036	struct vmx *vmx;
3037	struct vmcs *vmcs;
3038	uint64_t mask, val;
3039
3040	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3041	KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3042	    ("vmx_set_tmr: vcpu cannot be running"));
3043
3044	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3045	vmx = vlapic_vtx->vmx;
3046	vmcs = &vmx->vmcs[vlapic->vcpuid];
3047	mask = 1UL << (vector % 64);
3048
3049	VMPTRLD(vmcs);
3050	val = vmcs_read(VMCS_EOI_EXIT(vector));
3051	if (level)
3052		val |= mask;
3053	else
3054		val &= ~mask;
3055	vmcs_write(VMCS_EOI_EXIT(vector), val);
3056	VMCLEAR(vmcs);
3057}
3058
3059static void
3060vmx_enable_x2apic_mode(struct vlapic *vlapic)
3061{
3062	struct vmx *vmx;
3063	struct vmcs *vmcs;
3064	uint32_t proc_ctls2;
3065	int vcpuid, error;
3066
3067	vcpuid = vlapic->vcpuid;
3068	vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3069	vmcs = &vmx->vmcs[vcpuid];
3070
3071	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3072	KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3073	    ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3074
3075	proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3076	proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3077	vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3078
3079	VMPTRLD(vmcs);
3080	vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3081	VMCLEAR(vmcs);
3082
3083	if (vlapic->vcpuid == 0) {
3084		/*
3085		 * The nested page table mappings are shared by all vcpus
3086		 * so unmap the APIC access page just once.
3087		 */
3088		error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3089		KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3090		    __func__, error));
3091
3092		/*
3093		 * The MSR bitmap is shared by all vcpus so modify it only
3094		 * once in the context of vcpu 0.
3095		 */
3096		error = vmx_allow_x2apic_msrs(vmx);
3097		KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3098		    __func__, error));
3099	}
3100}
3101
3102static void
3103vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3104{
3105
3106	ipi_cpu(hostcpu, pirvec);
3107}
3108
3109/*
3110 * Transfer the pending interrupts in the PIR descriptor to the IRR
3111 * in the virtual APIC page.
3112 */
3113static void
3114vmx_inject_pir(struct vlapic *vlapic)
3115{
3116	struct vlapic_vtx *vlapic_vtx;
3117	struct pir_desc *pir_desc;
3118	struct LAPIC *lapic;
3119	uint64_t val, pirval;
3120	int rvi, pirbase = -1;
3121	uint16_t intr_status_old, intr_status_new;
3122
3123	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3124	pir_desc = vlapic_vtx->pir_desc;
3125	if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3126		VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3127		    "no posted interrupt pending");
3128		return;
3129	}
3130
3131	pirval = 0;
3132	pirbase = -1;
3133	lapic = vlapic->apic_page;
3134
3135	val = atomic_readandclear_long(&pir_desc->pir[0]);
3136	if (val != 0) {
3137		lapic->irr0 |= val;
3138		lapic->irr1 |= val >> 32;
3139		pirbase = 0;
3140		pirval = val;
3141	}
3142
3143	val = atomic_readandclear_long(&pir_desc->pir[1]);
3144	if (val != 0) {
3145		lapic->irr2 |= val;
3146		lapic->irr3 |= val >> 32;
3147		pirbase = 64;
3148		pirval = val;
3149	}
3150
3151	val = atomic_readandclear_long(&pir_desc->pir[2]);
3152	if (val != 0) {
3153		lapic->irr4 |= val;
3154		lapic->irr5 |= val >> 32;
3155		pirbase = 128;
3156		pirval = val;
3157	}
3158
3159	val = atomic_readandclear_long(&pir_desc->pir[3]);
3160	if (val != 0) {
3161		lapic->irr6 |= val;
3162		lapic->irr7 |= val >> 32;
3163		pirbase = 192;
3164		pirval = val;
3165	}
3166
3167	VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3168
3169	/*
3170	 * Update RVI so the processor can evaluate pending virtual
3171	 * interrupts on VM-entry.
3172	 *
3173	 * It is possible for pirval to be 0 here, even though the
3174	 * pending bit has been set. The scenario is:
3175	 * CPU-Y is sending a posted interrupt to CPU-X, which
3176	 * is running a guest and processing posted interrupts in h/w.
3177	 * CPU-X will eventually exit and the state seen in s/w is
3178	 * the pending bit set, but no PIR bits set.
3179	 *
3180	 *      CPU-X                      CPU-Y
3181	 *   (vm running)                (host running)
3182	 *   rx posted interrupt
3183	 *   CLEAR pending bit
3184	 *				 SET PIR bit
3185	 *   READ/CLEAR PIR bits
3186	 *				 SET pending bit
3187	 *   (vm exit)
3188	 *   pending bit set, PIR 0
3189	 */
3190	if (pirval != 0) {
3191		rvi = pirbase + flsl(pirval) - 1;
3192		intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3193		intr_status_new = (intr_status_old & 0xFF00) | rvi;
3194		if (intr_status_new > intr_status_old) {
3195			vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3196			VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3197			    "guest_intr_status changed from 0x%04x to 0x%04x",
3198			    intr_status_old, intr_status_new);
3199		}
3200	}
3201}
3202
3203static struct vlapic *
3204vmx_vlapic_init(void *arg, int vcpuid)
3205{
3206	struct vmx *vmx;
3207	struct vlapic *vlapic;
3208	struct vlapic_vtx *vlapic_vtx;
3209
3210	vmx = arg;
3211
3212	vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3213	vlapic->vm = vmx->vm;
3214	vlapic->vcpuid = vcpuid;
3215	vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3216
3217	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3218	vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3219	vlapic_vtx->vmx = vmx;
3220
3221	if (virtual_interrupt_delivery) {
3222		vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3223		vlapic->ops.pending_intr = vmx_pending_intr;
3224		vlapic->ops.intr_accepted = vmx_intr_accepted;
3225		vlapic->ops.set_tmr = vmx_set_tmr;
3226		vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3227	}
3228
3229	if (posted_interrupts)
3230		vlapic->ops.post_intr = vmx_post_intr;
3231
3232	vlapic_init(vlapic);
3233
3234	return (vlapic);
3235}
3236
3237static void
3238vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3239{
3240
3241	vlapic_cleanup(vlapic);
3242	free(vlapic, M_VLAPIC);
3243}
3244
3245struct vmm_ops vmm_ops_intel = {
3246	vmx_init,
3247	vmx_cleanup,
3248	vmx_restore,
3249	vmx_vminit,
3250	vmx_run,
3251	vmx_vmcleanup,
3252	vmx_getreg,
3253	vmx_setreg,
3254	vmx_getdesc,
3255	vmx_setdesc,
3256	vmx_getcap,
3257	vmx_setcap,
3258	ept_vmspace_alloc,
3259	ept_vmspace_free,
3260	vmx_vlapic_init,
3261	vmx_vlapic_cleanup,
3262};
3263