vmx.c revision 260167
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 260167 2014-01-01 21:17:08Z neel $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 260167 2014-01-01 21:17:08Z neel $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/smp.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/proc.h>
39#include <sys/sysctl.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43
44#include <machine/psl.h>
45#include <machine/cpufunc.h>
46#include <machine/md_var.h>
47#include <machine/segments.h>
48#include <machine/specialreg.h>
49#include <machine/vmparam.h>
50
51#include <machine/vmm.h>
52#include "vmm_host.h"
53#include "vmm_msr.h"
54#include "vmm_ktr.h"
55#include "vmm_stat.h"
56#include "vlapic.h"
57#include "vlapic_priv.h"
58
59#include "vmx_msr.h"
60#include "ept.h"
61#include "vmx_cpufunc.h"
62#include "vmx.h"
63#include "x86.h"
64#include "vmx_controls.h"
65
66#define	PINBASED_CTLS_ONE_SETTING					\
67	(PINBASED_EXTINT_EXITING	|				\
68	 PINBASED_NMI_EXITING		|				\
69	 PINBASED_VIRTUAL_NMI)
70#define	PINBASED_CTLS_ZERO_SETTING	0
71
72#define PROCBASED_CTLS_WINDOW_SETTING					\
73	(PROCBASED_INT_WINDOW_EXITING	|				\
74	 PROCBASED_NMI_WINDOW_EXITING)
75
76#define	PROCBASED_CTLS_ONE_SETTING 					\
77	(PROCBASED_SECONDARY_CONTROLS	|				\
78	 PROCBASED_IO_EXITING		|				\
79	 PROCBASED_MSR_BITMAPS		|				\
80	 PROCBASED_CTLS_WINDOW_SETTING)
81#define	PROCBASED_CTLS_ZERO_SETTING	\
82	(PROCBASED_CR3_LOAD_EXITING |	\
83	PROCBASED_CR3_STORE_EXITING |	\
84	PROCBASED_IO_BITMAPS)
85
86#define	PROCBASED_CTLS2_ONE_SETTING	PROCBASED2_ENABLE_EPT
87#define	PROCBASED_CTLS2_ZERO_SETTING	0
88
89#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT					\
90	(VM_EXIT_HOST_LMA			|			\
91	VM_EXIT_SAVE_EFER			|			\
92	VM_EXIT_LOAD_EFER)
93
94#define	VM_EXIT_CTLS_ONE_SETTING					\
95	(VM_EXIT_CTLS_ONE_SETTING_NO_PAT       	|			\
96	VM_EXIT_SAVE_PAT			|			\
97	VM_EXIT_LOAD_PAT)
98#define	VM_EXIT_CTLS_ZERO_SETTING	VM_EXIT_SAVE_DEBUG_CONTROLS
99
100#define	VM_ENTRY_CTLS_ONE_SETTING_NO_PAT	VM_ENTRY_LOAD_EFER
101
102#define	VM_ENTRY_CTLS_ONE_SETTING					\
103	(VM_ENTRY_CTLS_ONE_SETTING_NO_PAT     	|			\
104	VM_ENTRY_LOAD_PAT)
105#define	VM_ENTRY_CTLS_ZERO_SETTING					\
106	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
107	VM_ENTRY_INTO_SMM			|			\
108	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
109
110#define	guest_msr_rw(vmx, msr) \
111	msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
112
113#define	HANDLED		1
114#define	UNHANDLED	0
115
116static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
117static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
118
119SYSCTL_DECL(_hw_vmm);
120SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
121
122int vmxon_enabled[MAXCPU];
123static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
124
125static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
126static uint32_t exit_ctls, entry_ctls;
127
128static uint64_t cr0_ones_mask, cr0_zeros_mask;
129SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
130	     &cr0_ones_mask, 0, NULL);
131SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
132	     &cr0_zeros_mask, 0, NULL);
133
134static uint64_t cr4_ones_mask, cr4_zeros_mask;
135SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
136	     &cr4_ones_mask, 0, NULL);
137SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
138	     &cr4_zeros_mask, 0, NULL);
139
140static int vmx_no_patmsr;
141
142static int vmx_initialized;
143SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
144	   &vmx_initialized, 0, "Intel VMX initialized");
145
146/*
147 * Virtual NMI blocking conditions.
148 *
149 * Some processor implementations also require NMI to be blocked if
150 * the STI_BLOCKING bit is set. It is possible to detect this at runtime
151 * based on the (exit_reason,exit_qual) tuple being set to
152 * (EXIT_REASON_INVAL_VMCS, EXIT_QUAL_NMI_WHILE_STI_BLOCKING).
153 *
154 * We take the easy way out and also include STI_BLOCKING as one of the
155 * gating items for vNMI injection.
156 */
157static uint64_t nmi_blocking_bits = VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING |
158				    VMCS_INTERRUPTIBILITY_NMI_BLOCKING |
159				    VMCS_INTERRUPTIBILITY_STI_BLOCKING;
160
161/*
162 * Optional capabilities
163 */
164static int cap_halt_exit;
165static int cap_pause_exit;
166static int cap_unrestricted_guest;
167static int cap_monitor_trap;
168static int cap_invpcid;
169
170static struct unrhdr *vpid_unr;
171static u_int vpid_alloc_failed;
172SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
173	    &vpid_alloc_failed, 0, NULL);
174
175#ifdef KTR
176static const char *
177exit_reason_to_str(int reason)
178{
179	static char reasonbuf[32];
180
181	switch (reason) {
182	case EXIT_REASON_EXCEPTION:
183		return "exception";
184	case EXIT_REASON_EXT_INTR:
185		return "extint";
186	case EXIT_REASON_TRIPLE_FAULT:
187		return "triplefault";
188	case EXIT_REASON_INIT:
189		return "init";
190	case EXIT_REASON_SIPI:
191		return "sipi";
192	case EXIT_REASON_IO_SMI:
193		return "iosmi";
194	case EXIT_REASON_SMI:
195		return "smi";
196	case EXIT_REASON_INTR_WINDOW:
197		return "intrwindow";
198	case EXIT_REASON_NMI_WINDOW:
199		return "nmiwindow";
200	case EXIT_REASON_TASK_SWITCH:
201		return "taskswitch";
202	case EXIT_REASON_CPUID:
203		return "cpuid";
204	case EXIT_REASON_GETSEC:
205		return "getsec";
206	case EXIT_REASON_HLT:
207		return "hlt";
208	case EXIT_REASON_INVD:
209		return "invd";
210	case EXIT_REASON_INVLPG:
211		return "invlpg";
212	case EXIT_REASON_RDPMC:
213		return "rdpmc";
214	case EXIT_REASON_RDTSC:
215		return "rdtsc";
216	case EXIT_REASON_RSM:
217		return "rsm";
218	case EXIT_REASON_VMCALL:
219		return "vmcall";
220	case EXIT_REASON_VMCLEAR:
221		return "vmclear";
222	case EXIT_REASON_VMLAUNCH:
223		return "vmlaunch";
224	case EXIT_REASON_VMPTRLD:
225		return "vmptrld";
226	case EXIT_REASON_VMPTRST:
227		return "vmptrst";
228	case EXIT_REASON_VMREAD:
229		return "vmread";
230	case EXIT_REASON_VMRESUME:
231		return "vmresume";
232	case EXIT_REASON_VMWRITE:
233		return "vmwrite";
234	case EXIT_REASON_VMXOFF:
235		return "vmxoff";
236	case EXIT_REASON_VMXON:
237		return "vmxon";
238	case EXIT_REASON_CR_ACCESS:
239		return "craccess";
240	case EXIT_REASON_DR_ACCESS:
241		return "draccess";
242	case EXIT_REASON_INOUT:
243		return "inout";
244	case EXIT_REASON_RDMSR:
245		return "rdmsr";
246	case EXIT_REASON_WRMSR:
247		return "wrmsr";
248	case EXIT_REASON_INVAL_VMCS:
249		return "invalvmcs";
250	case EXIT_REASON_INVAL_MSR:
251		return "invalmsr";
252	case EXIT_REASON_MWAIT:
253		return "mwait";
254	case EXIT_REASON_MTF:
255		return "mtf";
256	case EXIT_REASON_MONITOR:
257		return "monitor";
258	case EXIT_REASON_PAUSE:
259		return "pause";
260	case EXIT_REASON_MCE:
261		return "mce";
262	case EXIT_REASON_TPR:
263		return "tpr";
264	case EXIT_REASON_APIC:
265		return "apic";
266	case EXIT_REASON_GDTR_IDTR:
267		return "gdtridtr";
268	case EXIT_REASON_LDTR_TR:
269		return "ldtrtr";
270	case EXIT_REASON_EPT_FAULT:
271		return "eptfault";
272	case EXIT_REASON_EPT_MISCONFIG:
273		return "eptmisconfig";
274	case EXIT_REASON_INVEPT:
275		return "invept";
276	case EXIT_REASON_RDTSCP:
277		return "rdtscp";
278	case EXIT_REASON_VMX_PREEMPT:
279		return "vmxpreempt";
280	case EXIT_REASON_INVVPID:
281		return "invvpid";
282	case EXIT_REASON_WBINVD:
283		return "wbinvd";
284	case EXIT_REASON_XSETBV:
285		return "xsetbv";
286	default:
287		snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
288		return (reasonbuf);
289	}
290}
291#endif	/* KTR */
292
293u_long
294vmx_fix_cr0(u_long cr0)
295{
296
297	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
298}
299
300u_long
301vmx_fix_cr4(u_long cr4)
302{
303
304	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
305}
306
307static void
308vpid_free(int vpid)
309{
310	if (vpid < 0 || vpid > 0xffff)
311		panic("vpid_free: invalid vpid %d", vpid);
312
313	/*
314	 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
315	 * the unit number allocator.
316	 */
317
318	if (vpid > VM_MAXCPU)
319		free_unr(vpid_unr, vpid);
320}
321
322static void
323vpid_alloc(uint16_t *vpid, int num)
324{
325	int i, x;
326
327	if (num <= 0 || num > VM_MAXCPU)
328		panic("invalid number of vpids requested: %d", num);
329
330	/*
331	 * If the "enable vpid" execution control is not enabled then the
332	 * VPID is required to be 0 for all vcpus.
333	 */
334	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
335		for (i = 0; i < num; i++)
336			vpid[i] = 0;
337		return;
338	}
339
340	/*
341	 * Allocate a unique VPID for each vcpu from the unit number allocator.
342	 */
343	for (i = 0; i < num; i++) {
344		x = alloc_unr(vpid_unr);
345		if (x == -1)
346			break;
347		else
348			vpid[i] = x;
349	}
350
351	if (i < num) {
352		atomic_add_int(&vpid_alloc_failed, 1);
353
354		/*
355		 * If the unit number allocator does not have enough unique
356		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
357		 *
358		 * These VPIDs are not be unique across VMs but this does not
359		 * affect correctness because the combined mappings are also
360		 * tagged with the EP4TA which is unique for each VM.
361		 *
362		 * It is still sub-optimal because the invvpid will invalidate
363		 * combined mappings for a particular VPID across all EP4TAs.
364		 */
365		while (i-- > 0)
366			vpid_free(vpid[i]);
367
368		for (i = 0; i < num; i++)
369			vpid[i] = i + 1;
370	}
371}
372
373static void
374vpid_init(void)
375{
376	/*
377	 * VPID 0 is required when the "enable VPID" execution control is
378	 * disabled.
379	 *
380	 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
381	 * unit number allocator does not have sufficient unique VPIDs to
382	 * satisfy the allocation.
383	 *
384	 * The remaining VPIDs are managed by the unit number allocator.
385	 */
386	vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
387}
388
389static void
390msr_save_area_init(struct msr_entry *g_area, int *g_count)
391{
392	int cnt;
393
394	static struct msr_entry guest_msrs[] = {
395		{ MSR_KGSBASE, 0, 0 },
396	};
397
398	cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
399	if (cnt > GUEST_MSR_MAX_ENTRIES)
400		panic("guest msr save area overrun");
401	bcopy(guest_msrs, g_area, sizeof(guest_msrs));
402	*g_count = cnt;
403}
404
405static void
406vmx_disable(void *arg __unused)
407{
408	struct invvpid_desc invvpid_desc = { 0 };
409	struct invept_desc invept_desc = { 0 };
410
411	if (vmxon_enabled[curcpu]) {
412		/*
413		 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
414		 *
415		 * VMXON or VMXOFF are not required to invalidate any TLB
416		 * caching structures. This prevents potential retention of
417		 * cached information in the TLB between distinct VMX episodes.
418		 */
419		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
420		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
421		vmxoff();
422	}
423	load_cr4(rcr4() & ~CR4_VMXE);
424}
425
426static int
427vmx_cleanup(void)
428{
429
430	if (vpid_unr != NULL) {
431		delete_unrhdr(vpid_unr);
432		vpid_unr = NULL;
433	}
434
435	smp_rendezvous(NULL, vmx_disable, NULL, NULL);
436
437	return (0);
438}
439
440static void
441vmx_enable(void *arg __unused)
442{
443	int error;
444
445	load_cr4(rcr4() | CR4_VMXE);
446
447	*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
448	error = vmxon(vmxon_region[curcpu]);
449	if (error == 0)
450		vmxon_enabled[curcpu] = 1;
451}
452
453static void
454vmx_restore(void)
455{
456
457	if (vmxon_enabled[curcpu])
458		vmxon(vmxon_region[curcpu]);
459}
460
461static int
462vmx_init(void)
463{
464	int error;
465	uint64_t fixed0, fixed1, feature_control;
466	uint32_t tmp;
467
468	/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
469	if (!(cpu_feature2 & CPUID2_VMX)) {
470		printf("vmx_init: processor does not support VMX operation\n");
471		return (ENXIO);
472	}
473
474	/*
475	 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
476	 * are set (bits 0 and 2 respectively).
477	 */
478	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
479	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
480	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
481		printf("vmx_init: VMX operation disabled by BIOS\n");
482		return (ENXIO);
483	}
484
485	/* Check support for primary processor-based VM-execution controls */
486	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
487			       MSR_VMX_TRUE_PROCBASED_CTLS,
488			       PROCBASED_CTLS_ONE_SETTING,
489			       PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
490	if (error) {
491		printf("vmx_init: processor does not support desired primary "
492		       "processor-based controls\n");
493		return (error);
494	}
495
496	/* Clear the processor-based ctl bits that are set on demand */
497	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
498
499	/* Check support for secondary processor-based VM-execution controls */
500	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
501			       MSR_VMX_PROCBASED_CTLS2,
502			       PROCBASED_CTLS2_ONE_SETTING,
503			       PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
504	if (error) {
505		printf("vmx_init: processor does not support desired secondary "
506		       "processor-based controls\n");
507		return (error);
508	}
509
510	/* Check support for VPID */
511	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
512			       PROCBASED2_ENABLE_VPID, 0, &tmp);
513	if (error == 0)
514		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
515
516	/* Check support for pin-based VM-execution controls */
517	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
518			       MSR_VMX_TRUE_PINBASED_CTLS,
519			       PINBASED_CTLS_ONE_SETTING,
520			       PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
521	if (error) {
522		printf("vmx_init: processor does not support desired "
523		       "pin-based controls\n");
524		return (error);
525	}
526
527	/* Check support for VM-exit controls */
528	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
529			       VM_EXIT_CTLS_ONE_SETTING,
530			       VM_EXIT_CTLS_ZERO_SETTING,
531			       &exit_ctls);
532	if (error) {
533		/* Try again without the PAT MSR bits */
534		error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS,
535				       MSR_VMX_TRUE_EXIT_CTLS,
536				       VM_EXIT_CTLS_ONE_SETTING_NO_PAT,
537				       VM_EXIT_CTLS_ZERO_SETTING,
538				       &exit_ctls);
539		if (error) {
540			printf("vmx_init: processor does not support desired "
541			       "exit controls\n");
542			return (error);
543		} else {
544			if (bootverbose)
545				printf("vmm: PAT MSR access not supported\n");
546			guest_msr_valid(MSR_PAT);
547			vmx_no_patmsr = 1;
548		}
549	}
550
551	/* Check support for VM-entry controls */
552	if (!vmx_no_patmsr) {
553		error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
554				       MSR_VMX_TRUE_ENTRY_CTLS,
555				       VM_ENTRY_CTLS_ONE_SETTING,
556				       VM_ENTRY_CTLS_ZERO_SETTING,
557				       &entry_ctls);
558	} else {
559		error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
560				       MSR_VMX_TRUE_ENTRY_CTLS,
561				       VM_ENTRY_CTLS_ONE_SETTING_NO_PAT,
562				       VM_ENTRY_CTLS_ZERO_SETTING,
563				       &entry_ctls);
564	}
565
566	if (error) {
567		printf("vmx_init: processor does not support desired "
568		       "entry controls\n");
569		       return (error);
570	}
571
572	/*
573	 * Check support for optional features by testing them
574	 * as individual bits
575	 */
576	cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
577					MSR_VMX_TRUE_PROCBASED_CTLS,
578					PROCBASED_HLT_EXITING, 0,
579					&tmp) == 0);
580
581	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
582					MSR_VMX_PROCBASED_CTLS,
583					PROCBASED_MTF, 0,
584					&tmp) == 0);
585
586	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
587					 MSR_VMX_TRUE_PROCBASED_CTLS,
588					 PROCBASED_PAUSE_EXITING, 0,
589					 &tmp) == 0);
590
591	cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
592					MSR_VMX_PROCBASED_CTLS2,
593					PROCBASED2_UNRESTRICTED_GUEST, 0,
594				        &tmp) == 0);
595
596	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
597	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
598	    &tmp) == 0);
599
600
601	/* Initialize EPT */
602	error = ept_init();
603	if (error) {
604		printf("vmx_init: ept initialization failed (%d)\n", error);
605		return (error);
606	}
607
608	/*
609	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
610	 */
611	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
612	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
613	cr0_ones_mask = fixed0 & fixed1;
614	cr0_zeros_mask = ~fixed0 & ~fixed1;
615
616	/*
617	 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
618	 * if unrestricted guest execution is allowed.
619	 */
620	if (cap_unrestricted_guest)
621		cr0_ones_mask &= ~(CR0_PG | CR0_PE);
622
623	/*
624	 * Do not allow the guest to set CR0_NW or CR0_CD.
625	 */
626	cr0_zeros_mask |= (CR0_NW | CR0_CD);
627
628	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
629	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
630	cr4_ones_mask = fixed0 & fixed1;
631	cr4_zeros_mask = ~fixed0 & ~fixed1;
632
633	vpid_init();
634
635	/* enable VMX operation */
636	smp_rendezvous(NULL, vmx_enable, NULL, NULL);
637
638	vmx_initialized = 1;
639
640	return (0);
641}
642
643static int
644vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
645{
646	int error, mask_ident, shadow_ident;
647	uint64_t mask_value;
648
649	if (which != 0 && which != 4)
650		panic("vmx_setup_cr_shadow: unknown cr%d", which);
651
652	if (which == 0) {
653		mask_ident = VMCS_CR0_MASK;
654		mask_value = cr0_ones_mask | cr0_zeros_mask;
655		shadow_ident = VMCS_CR0_SHADOW;
656	} else {
657		mask_ident = VMCS_CR4_MASK;
658		mask_value = cr4_ones_mask | cr4_zeros_mask;
659		shadow_ident = VMCS_CR4_SHADOW;
660	}
661
662	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
663	if (error)
664		return (error);
665
666	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
667	if (error)
668		return (error);
669
670	return (0);
671}
672#define	vmx_setup_cr0_shadow(vmcs,init)	vmx_setup_cr_shadow(0, (vmcs), (init))
673#define	vmx_setup_cr4_shadow(vmcs,init)	vmx_setup_cr_shadow(4, (vmcs), (init))
674
675static void *
676vmx_vminit(struct vm *vm, pmap_t pmap)
677{
678	uint16_t vpid[VM_MAXCPU];
679	int i, error, guest_msr_count;
680	struct vmx *vmx;
681
682	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
683	if ((uintptr_t)vmx & PAGE_MASK) {
684		panic("malloc of struct vmx not aligned on %d byte boundary",
685		      PAGE_SIZE);
686	}
687	vmx->vm = vm;
688
689	vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
690
691	/*
692	 * Clean up EPTP-tagged guest physical and combined mappings
693	 *
694	 * VMX transitions are not required to invalidate any guest physical
695	 * mappings. So, it may be possible for stale guest physical mappings
696	 * to be present in the processor TLBs.
697	 *
698	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
699	 */
700	ept_invalidate_mappings(vmx->eptp);
701
702	msr_bitmap_initialize(vmx->msr_bitmap);
703
704	/*
705	 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
706	 * The guest FSBASE and GSBASE are saved and restored during
707	 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
708	 * always restored from the vmcs host state area on vm-exit.
709	 *
710	 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
711	 * how they are saved/restored so can be directly accessed by the
712	 * guest.
713	 *
714	 * Guest KGSBASE is saved and restored in the guest MSR save area.
715	 * Host KGSBASE is restored before returning to userland from the pcb.
716	 * There will be a window of time when we are executing in the host
717	 * kernel context with a value of KGSBASE from the guest. This is ok
718	 * because the value of KGSBASE is inconsequential in kernel context.
719	 *
720	 * MSR_EFER is saved and restored in the guest VMCS area on a
721	 * VM exit and entry respectively. It is also restored from the
722	 * host VMCS area on a VM exit.
723	 */
724	if (guest_msr_rw(vmx, MSR_GSBASE) ||
725	    guest_msr_rw(vmx, MSR_FSBASE) ||
726	    guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
727	    guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
728	    guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
729	    guest_msr_rw(vmx, MSR_KGSBASE) ||
730	    guest_msr_rw(vmx, MSR_EFER))
731		panic("vmx_vminit: error setting guest msr access");
732
733	/*
734	 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
735	 * and entry respectively. It is also restored from the host VMCS
736	 * area on a VM exit. However, if running on a system with no
737	 * MSR_PAT save/restore support, leave access disabled so accesses
738	 * will be trapped.
739	 */
740	if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
741		panic("vmx_vminit: error setting guest pat msr access");
742
743	vpid_alloc(vpid, VM_MAXCPU);
744
745	for (i = 0; i < VM_MAXCPU; i++) {
746		vmx->vmcs[i].identifier = vmx_revision();
747		error = vmclear(&vmx->vmcs[i]);
748		if (error != 0) {
749			panic("vmx_vminit: vmclear error %d on vcpu %d\n",
750			      error, i);
751		}
752
753		error = vmcs_set_defaults(&vmx->vmcs[i],
754					  (u_long)vmx_exit_guest,
755					  (u_long)&vmx->ctx[i],
756					  vmx->eptp,
757					  pinbased_ctls,
758					  procbased_ctls,
759					  procbased_ctls2,
760					  exit_ctls, entry_ctls,
761					  vtophys(vmx->msr_bitmap),
762					  vpid[i]);
763
764		if (error != 0)
765			panic("vmx_vminit: vmcs_set_defaults error %d", error);
766
767		vmx->cap[i].set = 0;
768		vmx->cap[i].proc_ctls = procbased_ctls;
769		vmx->cap[i].proc_ctls2 = procbased_ctls2;
770
771		vmx->state[i].lastcpu = -1;
772		vmx->state[i].vpid = vpid[i];
773
774		msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
775
776		error = vmcs_set_msr_save(&vmx->vmcs[i],
777					  vtophys(vmx->guest_msrs[i]),
778					  guest_msr_count);
779		if (error != 0)
780			panic("vmcs_set_msr_save error %d", error);
781
782		/*
783		 * Set up the CR0/4 shadows, and init the read shadow
784		 * to the power-on register value from the Intel Sys Arch.
785		 *  CR0 - 0x60000010
786		 *  CR4 - 0
787		 */
788		error = vmx_setup_cr0_shadow(&vmx->vmcs[i], 0x60000010);
789		if (error != 0)
790			panic("vmx_setup_cr0_shadow %d", error);
791
792		error = vmx_setup_cr4_shadow(&vmx->vmcs[i], 0);
793		if (error != 0)
794			panic("vmx_setup_cr4_shadow %d", error);
795
796		vmx->ctx[i].pmap = pmap;
797		vmx->ctx[i].eptp = vmx->eptp;
798	}
799
800	return (vmx);
801}
802
803static int
804vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
805{
806	int handled, func;
807
808	func = vmxctx->guest_rax;
809
810	handled = x86_emulate_cpuid(vm, vcpu,
811				    (uint32_t*)(&vmxctx->guest_rax),
812				    (uint32_t*)(&vmxctx->guest_rbx),
813				    (uint32_t*)(&vmxctx->guest_rcx),
814				    (uint32_t*)(&vmxctx->guest_rdx));
815	return (handled);
816}
817
818static __inline void
819vmx_run_trace(struct vmx *vmx, int vcpu)
820{
821#ifdef KTR
822	VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
823#endif
824}
825
826static __inline void
827vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
828	       int handled)
829{
830#ifdef KTR
831	VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
832		 handled ? "handled" : "unhandled",
833		 exit_reason_to_str(exit_reason), rip);
834#endif
835}
836
837static __inline void
838vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
839{
840#ifdef KTR
841	VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
842#endif
843}
844
845static void
846vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu)
847{
848	int lastcpu;
849	struct vmxstate *vmxstate;
850	struct invvpid_desc invvpid_desc = { 0 };
851
852	vmxstate = &vmx->state[vcpu];
853	lastcpu = vmxstate->lastcpu;
854	vmxstate->lastcpu = curcpu;
855
856	if (lastcpu == curcpu)
857		return;
858
859	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
860
861	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
862	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
863	vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
864
865	/*
866	 * If we are using VPIDs then invalidate all mappings tagged with 'vpid'
867	 *
868	 * We do this because this vcpu was executing on a different host
869	 * cpu when it last ran. We do not track whether it invalidated
870	 * mappings associated with its 'vpid' during that run. So we must
871	 * assume that the mappings associated with 'vpid' on 'curcpu' are
872	 * stale and invalidate them.
873	 *
874	 * Note that we incur this penalty only when the scheduler chooses to
875	 * move the thread associated with this vcpu between host cpus.
876	 *
877	 * Note also that this will invalidate mappings tagged with 'vpid'
878	 * for "all" EP4TAs.
879	 */
880	if (vmxstate->vpid != 0) {
881		invvpid_desc.vpid = vmxstate->vpid;
882		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
883	}
884}
885
886/*
887 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
888 */
889CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
890
891static void __inline
892vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
893{
894
895	vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
896	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
897}
898
899static void __inline
900vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
901{
902
903	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
904	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
905}
906
907static void __inline
908vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
909{
910
911	vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
912	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
913}
914
915static void __inline
916vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
917{
918
919	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
920	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
921}
922
923static int
924vmx_inject_nmi(struct vmx *vmx, int vcpu)
925{
926	uint64_t info, interruptibility;
927
928	/* Bail out if no NMI requested */
929	if (!vm_nmi_pending(vmx->vm, vcpu))
930		return (0);
931
932	interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
933	if (interruptibility & nmi_blocking_bits)
934		goto nmiblocked;
935
936	/*
937	 * Inject the virtual NMI. The vector must be the NMI IDT entry
938	 * or the VMCS entry check will fail.
939	 */
940	info = VMCS_INTERRUPTION_INFO_NMI | VMCS_INTERRUPTION_INFO_VALID;
941	info |= IDT_NMI;
942	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
943
944	VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
945
946	/* Clear the request */
947	vm_nmi_clear(vmx->vm, vcpu);
948	return (1);
949
950nmiblocked:
951	/*
952	 * Set the NMI Window Exiting execution control so we can inject
953	 * the virtual NMI as soon as blocking condition goes away.
954	 */
955	vmx_set_nmi_window_exiting(vmx, vcpu);
956
957	VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
958	return (1);
959}
960
961static void
962vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
963{
964	int vector;
965	uint64_t info, rflags, interruptibility;
966
967	const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING |
968				   VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING;
969
970	/*
971	 * If there is already an interrupt pending then just return.
972	 *
973	 * This could happen if an interrupt was injected on a prior
974	 * VM entry but the actual entry into guest mode was aborted
975	 * because of a pending AST.
976	 */
977	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
978	if (info & VMCS_INTERRUPTION_INFO_VALID)
979		return;
980
981	/*
982	 * NMI injection has priority so deal with those first
983	 */
984	if (vmx_inject_nmi(vmx, vcpu))
985		return;
986
987	/* Ask the local apic for a vector to inject */
988	vector = vlapic_pending_intr(vlapic);
989	if (vector < 0)
990		return;
991
992	if (vector < 32 || vector > 255)
993		panic("vmx_inject_interrupts: invalid vector %d\n", vector);
994
995	/* Check RFLAGS.IF and the interruptibility state of the guest */
996	rflags = vmcs_read(VMCS_GUEST_RFLAGS);
997	if ((rflags & PSL_I) == 0)
998		goto cantinject;
999
1000	interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1001	if (interruptibility & HWINTR_BLOCKED)
1002		goto cantinject;
1003
1004	/* Inject the interrupt */
1005	info = VMCS_INTERRUPTION_INFO_HW_INTR | VMCS_INTERRUPTION_INFO_VALID;
1006	info |= vector;
1007	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1008
1009	/* Update the Local APIC ISR */
1010	vlapic_intr_accepted(vlapic, vector);
1011
1012	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1013
1014	return;
1015
1016cantinject:
1017	/*
1018	 * Set the Interrupt Window Exiting execution control so we can inject
1019	 * the interrupt as soon as blocking condition goes away.
1020	 */
1021	vmx_set_int_window_exiting(vmx, vcpu);
1022
1023	VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1024}
1025
1026static int
1027vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1028{
1029	int cr, vmcs_guest_cr, vmcs_shadow_cr;
1030	uint64_t crval, regval, ones_mask, zeros_mask;
1031	const struct vmxctx *vmxctx;
1032
1033	/* We only handle mov to %cr0 or %cr4 at this time */
1034	if ((exitqual & 0xf0) != 0x00)
1035		return (UNHANDLED);
1036
1037	cr = exitqual & 0xf;
1038	if (cr != 0 && cr != 4)
1039		return (UNHANDLED);
1040
1041	regval = 0; /* silence gcc */
1042	vmxctx = &vmx->ctx[vcpu];
1043
1044	/*
1045	 * We must use vmcs_write() directly here because vmcs_setreg() will
1046	 * call vmclear(vmcs) as a side-effect which we certainly don't want.
1047	 */
1048	switch ((exitqual >> 8) & 0xf) {
1049	case 0:
1050		regval = vmxctx->guest_rax;
1051		break;
1052	case 1:
1053		regval = vmxctx->guest_rcx;
1054		break;
1055	case 2:
1056		regval = vmxctx->guest_rdx;
1057		break;
1058	case 3:
1059		regval = vmxctx->guest_rbx;
1060		break;
1061	case 4:
1062		regval = vmcs_read(VMCS_GUEST_RSP);
1063		break;
1064	case 5:
1065		regval = vmxctx->guest_rbp;
1066		break;
1067	case 6:
1068		regval = vmxctx->guest_rsi;
1069		break;
1070	case 7:
1071		regval = vmxctx->guest_rdi;
1072		break;
1073	case 8:
1074		regval = vmxctx->guest_r8;
1075		break;
1076	case 9:
1077		regval = vmxctx->guest_r9;
1078		break;
1079	case 10:
1080		regval = vmxctx->guest_r10;
1081		break;
1082	case 11:
1083		regval = vmxctx->guest_r11;
1084		break;
1085	case 12:
1086		regval = vmxctx->guest_r12;
1087		break;
1088	case 13:
1089		regval = vmxctx->guest_r13;
1090		break;
1091	case 14:
1092		regval = vmxctx->guest_r14;
1093		break;
1094	case 15:
1095		regval = vmxctx->guest_r15;
1096		break;
1097	}
1098
1099	if (cr == 0) {
1100		ones_mask = cr0_ones_mask;
1101		zeros_mask = cr0_zeros_mask;
1102		vmcs_guest_cr = VMCS_GUEST_CR0;
1103		vmcs_shadow_cr = VMCS_CR0_SHADOW;
1104	} else {
1105		ones_mask = cr4_ones_mask;
1106		zeros_mask = cr4_zeros_mask;
1107		vmcs_guest_cr = VMCS_GUEST_CR4;
1108		vmcs_shadow_cr = VMCS_CR4_SHADOW;
1109	}
1110	vmcs_write(vmcs_shadow_cr, regval);
1111
1112	crval = regval | ones_mask;
1113	crval &= ~zeros_mask;
1114	vmcs_write(vmcs_guest_cr, crval);
1115
1116	if (cr == 0 && regval & CR0_PG) {
1117		uint64_t efer, entry_ctls;
1118
1119		/*
1120		 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1121		 * the "IA-32e mode guest" bit in VM-entry control must be
1122		 * equal.
1123		 */
1124		efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1125		if (efer & EFER_LME) {
1126			efer |= EFER_LMA;
1127			vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1128			entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1129			entry_ctls |= VM_ENTRY_GUEST_LMA;
1130			vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1131		}
1132	}
1133
1134	return (HANDLED);
1135}
1136
1137static int
1138ept_fault_type(uint64_t ept_qual)
1139{
1140	int fault_type;
1141
1142	if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1143		fault_type = VM_PROT_WRITE;
1144	else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1145		fault_type = VM_PROT_EXECUTE;
1146	else
1147		fault_type= VM_PROT_READ;
1148
1149	return (fault_type);
1150}
1151
1152static boolean_t
1153ept_emulation_fault(uint64_t ept_qual)
1154{
1155	int read, write;
1156
1157	/* EPT fault on an instruction fetch doesn't make sense here */
1158	if (ept_qual & EPT_VIOLATION_INST_FETCH)
1159		return (FALSE);
1160
1161	/* EPT fault must be a read fault or a write fault */
1162	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1163	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1164	if ((read | write) == 0)
1165		return (FALSE);
1166
1167	/*
1168	 * The EPT violation must have been caused by accessing a
1169	 * guest-physical address that is a translation of a guest-linear
1170	 * address.
1171	 */
1172	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1173	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1174		return (FALSE);
1175	}
1176
1177	return (TRUE);
1178}
1179
1180static int
1181vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1182{
1183	int error, handled;
1184	struct vmxctx *vmxctx;
1185	uint32_t eax, ecx, edx, idtvec_info, idtvec_err, reason;
1186	uint64_t qual, gpa;
1187	bool retu;
1188
1189	handled = 0;
1190	vmxctx = &vmx->ctx[vcpu];
1191
1192	/* Collect some information for VM exit processing */
1193	vmexit->rip = vmcs_guest_rip();
1194	vmexit->inst_length = vmexit_instruction_length();
1195	vmexit->u.vmx.exit_reason = vmcs_exit_reason();
1196	vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
1197
1198	qual = vmexit->u.vmx.exit_qualification;
1199	reason = vmexit->u.vmx.exit_reason;
1200	vmexit->exitcode = VM_EXITCODE_BOGUS;
1201
1202	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
1203
1204	/*
1205	 * VM exits that could be triggered during event injection on the
1206	 * previous VM entry need to be handled specially by re-injecting
1207	 * the event.
1208	 *
1209	 * See "Information for VM Exits During Event Delivery" in Intel SDM
1210	 * for details.
1211	 */
1212	switch (reason) {
1213	case EXIT_REASON_EPT_FAULT:
1214	case EXIT_REASON_EPT_MISCONFIG:
1215	case EXIT_REASON_APIC:
1216	case EXIT_REASON_TASK_SWITCH:
1217	case EXIT_REASON_EXCEPTION:
1218		idtvec_info = vmcs_idt_vectoring_info();
1219		if (idtvec_info & VMCS_IDT_VEC_VALID) {
1220			idtvec_info &= ~(1 << 12); /* clear undefined bit */
1221			vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info);
1222			if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
1223				idtvec_err = vmcs_idt_vectoring_err();
1224				vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR,
1225				    idtvec_err);
1226			}
1227			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
1228		}
1229	default:
1230		break;
1231	}
1232
1233	switch (reason) {
1234	case EXIT_REASON_CR_ACCESS:
1235		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
1236		handled = vmx_emulate_cr_access(vmx, vcpu, qual);
1237		break;
1238	case EXIT_REASON_RDMSR:
1239		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
1240		retu = false;
1241		ecx = vmxctx->guest_rcx;
1242		error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu);
1243		if (error) {
1244			vmexit->exitcode = VM_EXITCODE_RDMSR;
1245			vmexit->u.msr.code = ecx;
1246		} else if (!retu) {
1247			handled = 1;
1248		} else {
1249			/* Return to userspace with a valid exitcode */
1250			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1251			    ("emulate_wrmsr retu with bogus exitcode"));
1252		}
1253		break;
1254	case EXIT_REASON_WRMSR:
1255		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
1256		retu = false;
1257		eax = vmxctx->guest_rax;
1258		ecx = vmxctx->guest_rcx;
1259		edx = vmxctx->guest_rdx;
1260		error = emulate_wrmsr(vmx->vm, vcpu, ecx,
1261		    (uint64_t)edx << 32 | eax, &retu);
1262		if (error) {
1263			vmexit->exitcode = VM_EXITCODE_WRMSR;
1264			vmexit->u.msr.code = ecx;
1265			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
1266		} else if (!retu) {
1267			handled = 1;
1268		} else {
1269			/* Return to userspace with a valid exitcode */
1270			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1271			    ("emulate_wrmsr retu with bogus exitcode"));
1272		}
1273		break;
1274	case EXIT_REASON_HLT:
1275		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
1276		vmexit->exitcode = VM_EXITCODE_HLT;
1277		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1278		break;
1279	case EXIT_REASON_MTF:
1280		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
1281		vmexit->exitcode = VM_EXITCODE_MTRAP;
1282		break;
1283	case EXIT_REASON_PAUSE:
1284		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
1285		vmexit->exitcode = VM_EXITCODE_PAUSE;
1286		break;
1287	case EXIT_REASON_INTR_WINDOW:
1288		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
1289		vmx_clear_int_window_exiting(vmx, vcpu);
1290		VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1291		return (1);
1292	case EXIT_REASON_EXT_INTR:
1293		/*
1294		 * External interrupts serve only to cause VM exits and allow
1295		 * the host interrupt handler to run.
1296		 *
1297		 * If this external interrupt triggers a virtual interrupt
1298		 * to a VM, then that state will be recorded by the
1299		 * host interrupt handler in the VM's softc. We will inject
1300		 * this virtual interrupt during the subsequent VM enter.
1301		 */
1302
1303		/*
1304		 * This is special. We want to treat this as an 'handled'
1305		 * VM-exit but not increment the instruction pointer.
1306		 */
1307		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
1308		return (1);
1309	case EXIT_REASON_NMI_WINDOW:
1310		/* Exit to allow the pending virtual NMI to be injected */
1311		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
1312		vmx_clear_nmi_window_exiting(vmx, vcpu);
1313		VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1314		return (1);
1315	case EXIT_REASON_INOUT:
1316		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
1317		vmexit->exitcode = VM_EXITCODE_INOUT;
1318		vmexit->u.inout.bytes = (qual & 0x7) + 1;
1319		vmexit->u.inout.in = (qual & 0x8) ? 1 : 0;
1320		vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
1321		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
1322		vmexit->u.inout.port = (uint16_t)(qual >> 16);
1323		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
1324		break;
1325	case EXIT_REASON_CPUID:
1326		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
1327		handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
1328		break;
1329	case EXIT_REASON_EPT_FAULT:
1330		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EPT_FAULT, 1);
1331		/*
1332		 * If 'gpa' lies within the address space allocated to
1333		 * memory then this must be a nested page fault otherwise
1334		 * this must be an instruction that accesses MMIO space.
1335		 */
1336		gpa = vmcs_gpa();
1337		if (vm_mem_allocated(vmx->vm, gpa)) {
1338			vmexit->exitcode = VM_EXITCODE_PAGING;
1339			vmexit->u.paging.gpa = gpa;
1340			vmexit->u.paging.fault_type = ept_fault_type(qual);
1341		} else if (ept_emulation_fault(qual)) {
1342			vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1343			vmexit->u.inst_emul.gpa = gpa;
1344			vmexit->u.inst_emul.gla = vmcs_gla();
1345			vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
1346		}
1347		break;
1348	default:
1349		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
1350		break;
1351	}
1352
1353	if (handled) {
1354		/*
1355		 * It is possible that control is returned to userland
1356		 * even though we were able to handle the VM exit in the
1357		 * kernel.
1358		 *
1359		 * In such a case we want to make sure that the userland
1360		 * restarts guest execution at the instruction *after*
1361		 * the one we just processed. Therefore we update the
1362		 * guest rip in the VMCS and in 'vmexit'.
1363		 */
1364		vmexit->rip += vmexit->inst_length;
1365		vmexit->inst_length = 0;
1366		vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
1367	} else {
1368		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1369			/*
1370			 * If this VM exit was not claimed by anybody then
1371			 * treat it as a generic VMX exit.
1372			 */
1373			vmexit->exitcode = VM_EXITCODE_VMX;
1374			vmexit->u.vmx.status = VM_SUCCESS;
1375		} else {
1376			/*
1377			 * The exitcode and collateral have been populated.
1378			 * The VM exit will be processed further in userland.
1379			 */
1380		}
1381	}
1382	return (handled);
1383}
1384
1385static __inline int
1386vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1387{
1388
1389	vmexit->rip = vmcs_guest_rip();
1390	vmexit->inst_length = 0;
1391	vmexit->exitcode = VM_EXITCODE_BOGUS;
1392	vmx_astpending_trace(vmx, vcpu, vmexit->rip);
1393	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
1394
1395	return (HANDLED);
1396}
1397
1398static __inline int
1399vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
1400{
1401
1402	KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
1403	    ("vmx_exit_inst_error: invalid inst_fail_status %d",
1404	    vmxctx->inst_fail_status));
1405
1406	vmexit->rip = vmcs_guest_rip();
1407	vmexit->inst_length = 0;
1408
1409	vmexit->exitcode = VM_EXITCODE_VMX;
1410	vmexit->u.vmx.status = vmxctx->inst_fail_status;
1411	vmexit->u.vmx.inst_error = vmcs_instruction_error();
1412	vmexit->u.vmx.exit_reason = ~0;
1413	vmexit->u.vmx.exit_qualification = ~0;
1414
1415	switch (rc) {
1416	case VMX_VMRESUME_ERROR:
1417	case VMX_VMLAUNCH_ERROR:
1418	case VMX_INVEPT_ERROR:
1419		vmexit->u.vmx.inst_type = rc;
1420		break;
1421	default:
1422		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
1423	}
1424
1425	return (UNHANDLED);
1426}
1427
1428static int
1429vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap)
1430{
1431	int rc, handled, launched;
1432	struct vmx *vmx;
1433	struct vmxctx *vmxctx;
1434	struct vmcs *vmcs;
1435	struct vm_exit *vmexit;
1436	struct vlapic *vlapic;
1437
1438	vmx = arg;
1439	vmcs = &vmx->vmcs[vcpu];
1440	vmxctx = &vmx->ctx[vcpu];
1441	vlapic = vm_lapic(vmx->vm, vcpu);
1442	vmexit = vm_exitinfo(vmx->vm, vcpu);
1443	launched = 0;
1444
1445	KASSERT(vmxctx->pmap == pmap,
1446	    ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
1447	KASSERT(vmxctx->eptp == vmx->eptp,
1448	    ("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp));
1449
1450	VMPTRLD(vmcs);
1451
1452	/*
1453	 * XXX
1454	 * We do this every time because we may setup the virtual machine
1455	 * from a different process than the one that actually runs it.
1456	 *
1457	 * If the life of a virtual machine was spent entirely in the context
1458	 * of a single process we could do this once in vmcs_set_defaults().
1459	 */
1460	vmcs_write(VMCS_HOST_CR3, rcr3());
1461
1462	vmcs_write(VMCS_GUEST_RIP, startrip);
1463	vmx_set_pcpu_defaults(vmx, vcpu);
1464	do {
1465		/*
1466		 * Interrupts are disabled from this point on until the
1467		 * guest starts executing. This is done for the following
1468		 * reasons:
1469		 *
1470		 * If an AST is asserted on this thread after the check below,
1471		 * then the IPI_AST notification will not be lost, because it
1472		 * will cause a VM exit due to external interrupt as soon as
1473		 * the guest state is loaded.
1474		 *
1475		 * A posted interrupt after 'vmx_inject_interrupts()' will
1476		 * not be "lost" because it will be held pending in the host
1477		 * APIC because interrupts are disabled. The pending interrupt
1478		 * will be recognized as soon as the guest state is loaded.
1479		 *
1480		 * The same reasoning applies to the IPI generated by
1481		 * pmap_invalidate_ept().
1482		 */
1483		disable_intr();
1484		if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
1485			enable_intr();
1486			handled = vmx_exit_astpending(vmx, vcpu, vmexit);
1487			break;
1488		}
1489
1490		vmx_inject_interrupts(vmx, vcpu, vlapic);
1491		vmx_run_trace(vmx, vcpu);
1492		rc = vmx_enter_guest(vmxctx, launched);
1493		enable_intr();
1494		if (rc == VMX_GUEST_VMEXIT) {
1495			launched = 1;
1496			handled = vmx_exit_process(vmx, vcpu, vmexit);
1497		} else {
1498			handled = vmx_exit_inst_error(vmxctx, rc, vmexit);
1499		}
1500
1501		vmx_exit_trace(vmx, vcpu, vmexit->rip,
1502		    vmexit->u.vmx.exit_reason, handled);
1503	} while (handled);
1504
1505	/*
1506	 * If a VM exit has been handled then the exitcode must be BOGUS
1507	 * If a VM exit is not handled then the exitcode must not be BOGUS
1508	 */
1509	if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
1510	    (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
1511		panic("Mismatch between handled (%d) and exitcode (%d)",
1512		      handled, vmexit->exitcode);
1513	}
1514
1515	if (!handled)
1516		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1);
1517
1518	VCPU_CTR1(vmx->vm, vcpu, "returning from vmx_run: exitcode %d",
1519	    vmexit->exitcode);
1520
1521	VMCLEAR(vmcs);
1522	return (0);
1523}
1524
1525static void
1526vmx_vmcleanup(void *arg)
1527{
1528	int i, error;
1529	struct vmx *vmx = arg;
1530
1531	for (i = 0; i < VM_MAXCPU; i++)
1532		vpid_free(vmx->state[i].vpid);
1533
1534	/*
1535	 * XXXSMP we also need to clear the VMCS active on the other vcpus.
1536	 */
1537	error = vmclear(&vmx->vmcs[0]);
1538	if (error != 0)
1539		panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error);
1540
1541	free(vmx, M_VMX);
1542
1543	return;
1544}
1545
1546static register_t *
1547vmxctx_regptr(struct vmxctx *vmxctx, int reg)
1548{
1549
1550	switch (reg) {
1551	case VM_REG_GUEST_RAX:
1552		return (&vmxctx->guest_rax);
1553	case VM_REG_GUEST_RBX:
1554		return (&vmxctx->guest_rbx);
1555	case VM_REG_GUEST_RCX:
1556		return (&vmxctx->guest_rcx);
1557	case VM_REG_GUEST_RDX:
1558		return (&vmxctx->guest_rdx);
1559	case VM_REG_GUEST_RSI:
1560		return (&vmxctx->guest_rsi);
1561	case VM_REG_GUEST_RDI:
1562		return (&vmxctx->guest_rdi);
1563	case VM_REG_GUEST_RBP:
1564		return (&vmxctx->guest_rbp);
1565	case VM_REG_GUEST_R8:
1566		return (&vmxctx->guest_r8);
1567	case VM_REG_GUEST_R9:
1568		return (&vmxctx->guest_r9);
1569	case VM_REG_GUEST_R10:
1570		return (&vmxctx->guest_r10);
1571	case VM_REG_GUEST_R11:
1572		return (&vmxctx->guest_r11);
1573	case VM_REG_GUEST_R12:
1574		return (&vmxctx->guest_r12);
1575	case VM_REG_GUEST_R13:
1576		return (&vmxctx->guest_r13);
1577	case VM_REG_GUEST_R14:
1578		return (&vmxctx->guest_r14);
1579	case VM_REG_GUEST_R15:
1580		return (&vmxctx->guest_r15);
1581	default:
1582		break;
1583	}
1584	return (NULL);
1585}
1586
1587static int
1588vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
1589{
1590	register_t *regp;
1591
1592	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1593		*retval = *regp;
1594		return (0);
1595	} else
1596		return (EINVAL);
1597}
1598
1599static int
1600vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
1601{
1602	register_t *regp;
1603
1604	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1605		*regp = val;
1606		return (0);
1607	} else
1608		return (EINVAL);
1609}
1610
1611static int
1612vmx_shadow_reg(int reg)
1613{
1614	int shreg;
1615
1616	shreg = -1;
1617
1618	switch (reg) {
1619	case VM_REG_GUEST_CR0:
1620		shreg = VMCS_CR0_SHADOW;
1621                break;
1622        case VM_REG_GUEST_CR4:
1623		shreg = VMCS_CR4_SHADOW;
1624		break;
1625	default:
1626		break;
1627	}
1628
1629	return (shreg);
1630}
1631
1632static int
1633vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
1634{
1635	int running, hostcpu;
1636	struct vmx *vmx = arg;
1637
1638	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
1639	if (running && hostcpu != curcpu)
1640		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
1641
1642	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
1643		return (0);
1644
1645	return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
1646}
1647
1648static int
1649vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
1650{
1651	int error, hostcpu, running, shadow;
1652	uint64_t ctls;
1653	struct vmx *vmx = arg;
1654
1655	running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
1656	if (running && hostcpu != curcpu)
1657		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
1658
1659	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
1660		return (0);
1661
1662	error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
1663
1664	if (error == 0) {
1665		/*
1666		 * If the "load EFER" VM-entry control is 1 then the
1667		 * value of EFER.LMA must be identical to "IA-32e mode guest"
1668		 * bit in the VM-entry control.
1669		 */
1670		if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
1671		    (reg == VM_REG_GUEST_EFER)) {
1672			vmcs_getreg(&vmx->vmcs[vcpu], running,
1673				    VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
1674			if (val & EFER_LMA)
1675				ctls |= VM_ENTRY_GUEST_LMA;
1676			else
1677				ctls &= ~VM_ENTRY_GUEST_LMA;
1678			vmcs_setreg(&vmx->vmcs[vcpu], running,
1679				    VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
1680		}
1681
1682		shadow = vmx_shadow_reg(reg);
1683		if (shadow > 0) {
1684			/*
1685			 * Store the unmodified value in the shadow
1686			 */
1687			error = vmcs_setreg(&vmx->vmcs[vcpu], running,
1688				    VMCS_IDENT(shadow), val);
1689		}
1690	}
1691
1692	return (error);
1693}
1694
1695static int
1696vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1697{
1698	struct vmx *vmx = arg;
1699
1700	return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc));
1701}
1702
1703static int
1704vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1705{
1706	struct vmx *vmx = arg;
1707
1708	return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc));
1709}
1710
1711static int
1712vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code,
1713	   int code_valid)
1714{
1715	int error;
1716	uint64_t info;
1717	struct vmx *vmx = arg;
1718	struct vmcs *vmcs = &vmx->vmcs[vcpu];
1719
1720	static uint32_t type_map[VM_EVENT_MAX] = {
1721		0x1,		/* VM_EVENT_NONE */
1722		0x0,		/* VM_HW_INTR */
1723		0x2,		/* VM_NMI */
1724		0x3,		/* VM_HW_EXCEPTION */
1725		0x4,		/* VM_SW_INTR */
1726		0x5,		/* VM_PRIV_SW_EXCEPTION */
1727		0x6,		/* VM_SW_EXCEPTION */
1728	};
1729
1730	/*
1731	 * If there is already an exception pending to be delivered to the
1732	 * vcpu then just return.
1733	 */
1734	error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info);
1735	if (error)
1736		return (error);
1737
1738	if (info & VMCS_INTERRUPTION_INFO_VALID)
1739		return (EAGAIN);
1740
1741	info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0);
1742	info |= VMCS_INTERRUPTION_INFO_VALID;
1743	error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info);
1744	if (error != 0)
1745		return (error);
1746
1747	if (code_valid) {
1748		error = vmcs_setreg(vmcs, 0,
1749				    VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR),
1750				    code);
1751	}
1752	return (error);
1753}
1754
1755static int
1756vmx_getcap(void *arg, int vcpu, int type, int *retval)
1757{
1758	struct vmx *vmx = arg;
1759	int vcap;
1760	int ret;
1761
1762	ret = ENOENT;
1763
1764	vcap = vmx->cap[vcpu].set;
1765
1766	switch (type) {
1767	case VM_CAP_HALT_EXIT:
1768		if (cap_halt_exit)
1769			ret = 0;
1770		break;
1771	case VM_CAP_PAUSE_EXIT:
1772		if (cap_pause_exit)
1773			ret = 0;
1774		break;
1775	case VM_CAP_MTRAP_EXIT:
1776		if (cap_monitor_trap)
1777			ret = 0;
1778		break;
1779	case VM_CAP_UNRESTRICTED_GUEST:
1780		if (cap_unrestricted_guest)
1781			ret = 0;
1782		break;
1783	case VM_CAP_ENABLE_INVPCID:
1784		if (cap_invpcid)
1785			ret = 0;
1786		break;
1787	default:
1788		break;
1789	}
1790
1791	if (ret == 0)
1792		*retval = (vcap & (1 << type)) ? 1 : 0;
1793
1794	return (ret);
1795}
1796
1797static int
1798vmx_setcap(void *arg, int vcpu, int type, int val)
1799{
1800	struct vmx *vmx = arg;
1801	struct vmcs *vmcs = &vmx->vmcs[vcpu];
1802	uint32_t baseval;
1803	uint32_t *pptr;
1804	int error;
1805	int flag;
1806	int reg;
1807	int retval;
1808
1809	retval = ENOENT;
1810	pptr = NULL;
1811
1812	switch (type) {
1813	case VM_CAP_HALT_EXIT:
1814		if (cap_halt_exit) {
1815			retval = 0;
1816			pptr = &vmx->cap[vcpu].proc_ctls;
1817			baseval = *pptr;
1818			flag = PROCBASED_HLT_EXITING;
1819			reg = VMCS_PRI_PROC_BASED_CTLS;
1820		}
1821		break;
1822	case VM_CAP_MTRAP_EXIT:
1823		if (cap_monitor_trap) {
1824			retval = 0;
1825			pptr = &vmx->cap[vcpu].proc_ctls;
1826			baseval = *pptr;
1827			flag = PROCBASED_MTF;
1828			reg = VMCS_PRI_PROC_BASED_CTLS;
1829		}
1830		break;
1831	case VM_CAP_PAUSE_EXIT:
1832		if (cap_pause_exit) {
1833			retval = 0;
1834			pptr = &vmx->cap[vcpu].proc_ctls;
1835			baseval = *pptr;
1836			flag = PROCBASED_PAUSE_EXITING;
1837			reg = VMCS_PRI_PROC_BASED_CTLS;
1838		}
1839		break;
1840	case VM_CAP_UNRESTRICTED_GUEST:
1841		if (cap_unrestricted_guest) {
1842			retval = 0;
1843			pptr = &vmx->cap[vcpu].proc_ctls2;
1844			baseval = *pptr;
1845			flag = PROCBASED2_UNRESTRICTED_GUEST;
1846			reg = VMCS_SEC_PROC_BASED_CTLS;
1847		}
1848		break;
1849	case VM_CAP_ENABLE_INVPCID:
1850		if (cap_invpcid) {
1851			retval = 0;
1852			pptr = &vmx->cap[vcpu].proc_ctls2;
1853			baseval = *pptr;
1854			flag = PROCBASED2_ENABLE_INVPCID;
1855			reg = VMCS_SEC_PROC_BASED_CTLS;
1856		}
1857		break;
1858	default:
1859		break;
1860	}
1861
1862	if (retval == 0) {
1863		if (val) {
1864			baseval |= flag;
1865		} else {
1866			baseval &= ~flag;
1867		}
1868		VMPTRLD(vmcs);
1869		error = vmwrite(reg, baseval);
1870		VMCLEAR(vmcs);
1871
1872		if (error) {
1873			retval = error;
1874		} else {
1875			/*
1876			 * Update optional stored flags, and record
1877			 * setting
1878			 */
1879			if (pptr != NULL) {
1880				*pptr = baseval;
1881			}
1882
1883			if (val) {
1884				vmx->cap[vcpu].set |= (1 << type);
1885			} else {
1886				vmx->cap[vcpu].set &= ~(1 << type);
1887			}
1888		}
1889	}
1890
1891        return (retval);
1892}
1893
1894static struct vlapic *
1895vmx_vlapic_init(void *arg, int vcpuid)
1896{
1897	struct vmx *vmx;
1898	struct vlapic *vlapic;
1899
1900	vmx = arg;
1901
1902	vlapic = malloc(sizeof(struct vlapic), M_VLAPIC, M_WAITOK | M_ZERO);
1903	vlapic->vm = vmx->vm;
1904	vlapic->vcpuid = vcpuid;
1905	vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
1906
1907	vlapic_init(vlapic);
1908
1909	return (vlapic);
1910}
1911
1912static void
1913vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
1914{
1915
1916	vlapic_cleanup(vlapic);
1917	free(vlapic, M_VLAPIC);
1918}
1919
1920struct vmm_ops vmm_ops_intel = {
1921	vmx_init,
1922	vmx_cleanup,
1923	vmx_restore,
1924	vmx_vminit,
1925	vmx_run,
1926	vmx_vmcleanup,
1927	vmx_getreg,
1928	vmx_setreg,
1929	vmx_getdesc,
1930	vmx_setdesc,
1931	vmx_inject,
1932	vmx_getcap,
1933	vmx_setcap,
1934	ept_vmspace_alloc,
1935	ept_vmspace_free,
1936	vmx_vlapic_init,
1937	vmx_vlapic_cleanup,
1938};
1939