vmx.c revision 249351
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 249351 2013-04-11 04:29:45Z neel $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 249351 2013-04-11 04:29:45Z neel $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/smp.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/proc.h>
39
40#include <vm/vm.h>
41#include <vm/pmap.h>
42
43#include <machine/psl.h>
44#include <machine/cpufunc.h>
45#include <machine/md_var.h>
46#include <machine/pmap.h>
47#include <machine/segments.h>
48#include <machine/specialreg.h>
49#include <machine/vmparam.h>
50
51#include <x86/apicreg.h>
52
53#include <machine/vmm.h>
54#include "vmm_host.h"
55#include "vmm_lapic.h"
56#include "vmm_msr.h"
57#include "vmm_ktr.h"
58#include "vmm_stat.h"
59
60#include "vmx_msr.h"
61#include "ept.h"
62#include "vmx_cpufunc.h"
63#include "vmx.h"
64#include "x86.h"
65#include "vmx_controls.h"
66
67#define	PINBASED_CTLS_ONE_SETTING					\
68	(PINBASED_EXTINT_EXITING	|				\
69	 PINBASED_NMI_EXITING		|				\
70	 PINBASED_VIRTUAL_NMI)
71#define	PINBASED_CTLS_ZERO_SETTING	0
72
73#define PROCBASED_CTLS_WINDOW_SETTING					\
74	(PROCBASED_INT_WINDOW_EXITING	|				\
75	 PROCBASED_NMI_WINDOW_EXITING)
76
77#define	PROCBASED_CTLS_ONE_SETTING 					\
78	(PROCBASED_SECONDARY_CONTROLS	|				\
79	 PROCBASED_IO_EXITING		|				\
80	 PROCBASED_MSR_BITMAPS		|				\
81	 PROCBASED_CTLS_WINDOW_SETTING)
82#define	PROCBASED_CTLS_ZERO_SETTING	\
83	(PROCBASED_CR3_LOAD_EXITING |	\
84	PROCBASED_CR3_STORE_EXITING |	\
85	PROCBASED_IO_BITMAPS)
86
87#define	PROCBASED_CTLS2_ONE_SETTING	PROCBASED2_ENABLE_EPT
88#define	PROCBASED_CTLS2_ZERO_SETTING	0
89
90#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT					\
91	(VM_EXIT_HOST_LMA			|			\
92	VM_EXIT_SAVE_EFER			|			\
93	VM_EXIT_LOAD_EFER)
94
95#define	VM_EXIT_CTLS_ONE_SETTING					\
96	(VM_EXIT_CTLS_ONE_SETTING_NO_PAT       	|			\
97	VM_EXIT_SAVE_PAT			|			\
98	VM_EXIT_LOAD_PAT)
99#define	VM_EXIT_CTLS_ZERO_SETTING	VM_EXIT_SAVE_DEBUG_CONTROLS
100
101#define	VM_ENTRY_CTLS_ONE_SETTING_NO_PAT	VM_ENTRY_LOAD_EFER
102
103#define	VM_ENTRY_CTLS_ONE_SETTING					\
104	(VM_ENTRY_CTLS_ONE_SETTING_NO_PAT     	|			\
105	VM_ENTRY_LOAD_PAT)
106#define	VM_ENTRY_CTLS_ZERO_SETTING					\
107	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
108	VM_ENTRY_INTO_SMM			|			\
109	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
110
111#define	guest_msr_rw(vmx, msr) \
112	msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
113
114#define	HANDLED		1
115#define	UNHANDLED	0
116
117MALLOC_DEFINE(M_VMX, "vmx", "vmx");
118
119int vmxon_enabled[MAXCPU];
120static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
121
122static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
123static uint32_t exit_ctls, entry_ctls;
124
125static uint64_t cr0_ones_mask, cr0_zeros_mask;
126static uint64_t cr4_ones_mask, cr4_zeros_mask;
127
128static volatile u_int nextvpid;
129
130static int vmx_no_patmsr;
131
132/*
133 * Virtual NMI blocking conditions.
134 *
135 * Some processor implementations also require NMI to be blocked if
136 * the STI_BLOCKING bit is set. It is possible to detect this at runtime
137 * based on the (exit_reason,exit_qual) tuple being set to
138 * (EXIT_REASON_INVAL_VMCS, EXIT_QUAL_NMI_WHILE_STI_BLOCKING).
139 *
140 * We take the easy way out and also include STI_BLOCKING as one of the
141 * gating items for vNMI injection.
142 */
143static uint64_t nmi_blocking_bits = VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING |
144				    VMCS_INTERRUPTIBILITY_NMI_BLOCKING |
145				    VMCS_INTERRUPTIBILITY_STI_BLOCKING;
146
147/*
148 * Optional capabilities
149 */
150static int cap_halt_exit;
151static int cap_pause_exit;
152static int cap_unrestricted_guest;
153static int cap_monitor_trap;
154
155/* statistics */
156static VMM_STAT_INTEL(VMEXIT_HLT_IGNORED, "number of times hlt was ignored");
157
158#ifdef KTR
159static const char *
160exit_reason_to_str(int reason)
161{
162	static char reasonbuf[32];
163
164	switch (reason) {
165	case EXIT_REASON_EXCEPTION:
166		return "exception";
167	case EXIT_REASON_EXT_INTR:
168		return "extint";
169	case EXIT_REASON_TRIPLE_FAULT:
170		return "triplefault";
171	case EXIT_REASON_INIT:
172		return "init";
173	case EXIT_REASON_SIPI:
174		return "sipi";
175	case EXIT_REASON_IO_SMI:
176		return "iosmi";
177	case EXIT_REASON_SMI:
178		return "smi";
179	case EXIT_REASON_INTR_WINDOW:
180		return "intrwindow";
181	case EXIT_REASON_NMI_WINDOW:
182		return "nmiwindow";
183	case EXIT_REASON_TASK_SWITCH:
184		return "taskswitch";
185	case EXIT_REASON_CPUID:
186		return "cpuid";
187	case EXIT_REASON_GETSEC:
188		return "getsec";
189	case EXIT_REASON_HLT:
190		return "hlt";
191	case EXIT_REASON_INVD:
192		return "invd";
193	case EXIT_REASON_INVLPG:
194		return "invlpg";
195	case EXIT_REASON_RDPMC:
196		return "rdpmc";
197	case EXIT_REASON_RDTSC:
198		return "rdtsc";
199	case EXIT_REASON_RSM:
200		return "rsm";
201	case EXIT_REASON_VMCALL:
202		return "vmcall";
203	case EXIT_REASON_VMCLEAR:
204		return "vmclear";
205	case EXIT_REASON_VMLAUNCH:
206		return "vmlaunch";
207	case EXIT_REASON_VMPTRLD:
208		return "vmptrld";
209	case EXIT_REASON_VMPTRST:
210		return "vmptrst";
211	case EXIT_REASON_VMREAD:
212		return "vmread";
213	case EXIT_REASON_VMRESUME:
214		return "vmresume";
215	case EXIT_REASON_VMWRITE:
216		return "vmwrite";
217	case EXIT_REASON_VMXOFF:
218		return "vmxoff";
219	case EXIT_REASON_VMXON:
220		return "vmxon";
221	case EXIT_REASON_CR_ACCESS:
222		return "craccess";
223	case EXIT_REASON_DR_ACCESS:
224		return "draccess";
225	case EXIT_REASON_INOUT:
226		return "inout";
227	case EXIT_REASON_RDMSR:
228		return "rdmsr";
229	case EXIT_REASON_WRMSR:
230		return "wrmsr";
231	case EXIT_REASON_INVAL_VMCS:
232		return "invalvmcs";
233	case EXIT_REASON_INVAL_MSR:
234		return "invalmsr";
235	case EXIT_REASON_MWAIT:
236		return "mwait";
237	case EXIT_REASON_MTF:
238		return "mtf";
239	case EXIT_REASON_MONITOR:
240		return "monitor";
241	case EXIT_REASON_PAUSE:
242		return "pause";
243	case EXIT_REASON_MCE:
244		return "mce";
245	case EXIT_REASON_TPR:
246		return "tpr";
247	case EXIT_REASON_APIC:
248		return "apic";
249	case EXIT_REASON_GDTR_IDTR:
250		return "gdtridtr";
251	case EXIT_REASON_LDTR_TR:
252		return "ldtrtr";
253	case EXIT_REASON_EPT_FAULT:
254		return "eptfault";
255	case EXIT_REASON_EPT_MISCONFIG:
256		return "eptmisconfig";
257	case EXIT_REASON_INVEPT:
258		return "invept";
259	case EXIT_REASON_RDTSCP:
260		return "rdtscp";
261	case EXIT_REASON_VMX_PREEMPT:
262		return "vmxpreempt";
263	case EXIT_REASON_INVVPID:
264		return "invvpid";
265	case EXIT_REASON_WBINVD:
266		return "wbinvd";
267	case EXIT_REASON_XSETBV:
268		return "xsetbv";
269	default:
270		snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
271		return (reasonbuf);
272	}
273}
274
275#ifdef SETJMP_TRACE
276static const char *
277vmx_setjmp_rc2str(int rc)
278{
279	switch (rc) {
280	case VMX_RETURN_DIRECT:
281		return "direct";
282	case VMX_RETURN_LONGJMP:
283		return "longjmp";
284	case VMX_RETURN_VMRESUME:
285		return "vmresume";
286	case VMX_RETURN_VMLAUNCH:
287		return "vmlaunch";
288	case VMX_RETURN_AST:
289		return "ast";
290	default:
291		return "unknown";
292	}
293}
294
295#define	SETJMP_TRACE(vmx, vcpu, vmxctx, regname)			  \
296	VMM_CTR1((vmx)->vm, (vcpu), "setjmp trace " #regname " 0x%016lx", \
297		 (vmxctx)->regname)
298
299static void
300vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
301{
302	uint64_t host_rip, host_rsp;
303
304	if (vmxctx != &vmx->ctx[vcpu])
305		panic("vmx_setjmp_trace: invalid vmxctx %p; should be %p",
306			vmxctx, &vmx->ctx[vcpu]);
307
308	VMM_CTR1((vmx)->vm, (vcpu), "vmxctx = %p", vmxctx);
309	VMM_CTR2((vmx)->vm, (vcpu), "setjmp return code %s(%d)",
310		 vmx_setjmp_rc2str(rc), rc);
311
312	host_rsp = host_rip = ~0;
313	vmread(VMCS_HOST_RIP, &host_rip);
314	vmread(VMCS_HOST_RSP, &host_rsp);
315	VMM_CTR2((vmx)->vm, (vcpu), "vmcs host_rip 0x%016lx, host_rsp 0x%016lx",
316		 host_rip, host_rsp);
317
318	SETJMP_TRACE(vmx, vcpu, vmxctx, host_r15);
319	SETJMP_TRACE(vmx, vcpu, vmxctx, host_r14);
320	SETJMP_TRACE(vmx, vcpu, vmxctx, host_r13);
321	SETJMP_TRACE(vmx, vcpu, vmxctx, host_r12);
322	SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbp);
323	SETJMP_TRACE(vmx, vcpu, vmxctx, host_rsp);
324	SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbx);
325	SETJMP_TRACE(vmx, vcpu, vmxctx, host_rip);
326
327	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdi);
328	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rsi);
329	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdx);
330	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rcx);
331	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r8);
332	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r9);
333	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rax);
334	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbx);
335	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbp);
336	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r10);
337	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r11);
338	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r12);
339	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r13);
340	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r14);
341	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r15);
342	SETJMP_TRACE(vmx, vcpu, vmxctx, guest_cr2);
343}
344#endif
345#else
346static void __inline
347vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
348{
349	return;
350}
351#endif	/* KTR */
352
353u_long
354vmx_fix_cr0(u_long cr0)
355{
356
357	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
358}
359
360u_long
361vmx_fix_cr4(u_long cr4)
362{
363
364	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
365}
366
367static void
368msr_save_area_init(struct msr_entry *g_area, int *g_count)
369{
370	int cnt;
371
372	static struct msr_entry guest_msrs[] = {
373		{ MSR_KGSBASE, 0, 0 },
374	};
375
376	cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
377	if (cnt > GUEST_MSR_MAX_ENTRIES)
378		panic("guest msr save area overrun");
379	bcopy(guest_msrs, g_area, sizeof(guest_msrs));
380	*g_count = cnt;
381}
382
383static void
384vmx_disable(void *arg __unused)
385{
386	struct invvpid_desc invvpid_desc = { 0 };
387	struct invept_desc invept_desc = { 0 };
388
389	if (vmxon_enabled[curcpu]) {
390		/*
391		 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
392		 *
393		 * VMXON or VMXOFF are not required to invalidate any TLB
394		 * caching structures. This prevents potential retention of
395		 * cached information in the TLB between distinct VMX episodes.
396		 */
397		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
398		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
399		vmxoff();
400	}
401	load_cr4(rcr4() & ~CR4_VMXE);
402}
403
404static int
405vmx_cleanup(void)
406{
407
408	smp_rendezvous(NULL, vmx_disable, NULL, NULL);
409
410	return (0);
411}
412
413static void
414vmx_enable(void *arg __unused)
415{
416	int error;
417
418	load_cr4(rcr4() | CR4_VMXE);
419
420	*(uint32_t *)vmxon_region[curcpu] = vmx_revision();
421	error = vmxon(vmxon_region[curcpu]);
422	if (error == 0)
423		vmxon_enabled[curcpu] = 1;
424}
425
426static int
427vmx_init(void)
428{
429	int error;
430	uint64_t fixed0, fixed1, feature_control;
431	uint32_t tmp;
432
433	/* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
434	if (!(cpu_feature2 & CPUID2_VMX)) {
435		printf("vmx_init: processor does not support VMX operation\n");
436		return (ENXIO);
437	}
438
439	/*
440	 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
441	 * are set (bits 0 and 2 respectively).
442	 */
443	feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
444	if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
445	    (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
446		printf("vmx_init: VMX operation disabled by BIOS\n");
447		return (ENXIO);
448	}
449
450	/* Check support for primary processor-based VM-execution controls */
451	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
452			       MSR_VMX_TRUE_PROCBASED_CTLS,
453			       PROCBASED_CTLS_ONE_SETTING,
454			       PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
455	if (error) {
456		printf("vmx_init: processor does not support desired primary "
457		       "processor-based controls\n");
458		return (error);
459	}
460
461	/* Clear the processor-based ctl bits that are set on demand */
462	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
463
464	/* Check support for secondary processor-based VM-execution controls */
465	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
466			       MSR_VMX_PROCBASED_CTLS2,
467			       PROCBASED_CTLS2_ONE_SETTING,
468			       PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
469	if (error) {
470		printf("vmx_init: processor does not support desired secondary "
471		       "processor-based controls\n");
472		return (error);
473	}
474
475	/* Check support for VPID */
476	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
477			       PROCBASED2_ENABLE_VPID, 0, &tmp);
478	if (error == 0)
479		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
480
481	/* Check support for pin-based VM-execution controls */
482	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
483			       MSR_VMX_TRUE_PINBASED_CTLS,
484			       PINBASED_CTLS_ONE_SETTING,
485			       PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
486	if (error) {
487		printf("vmx_init: processor does not support desired "
488		       "pin-based controls\n");
489		return (error);
490	}
491
492	/* Check support for VM-exit controls */
493	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
494			       VM_EXIT_CTLS_ONE_SETTING,
495			       VM_EXIT_CTLS_ZERO_SETTING,
496			       &exit_ctls);
497	if (error) {
498		/* Try again without the PAT MSR bits */
499		error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS,
500				       MSR_VMX_TRUE_EXIT_CTLS,
501				       VM_EXIT_CTLS_ONE_SETTING_NO_PAT,
502				       VM_EXIT_CTLS_ZERO_SETTING,
503				       &exit_ctls);
504		if (error) {
505			printf("vmx_init: processor does not support desired "
506			       "exit controls\n");
507			return (error);
508		} else {
509			if (bootverbose)
510				printf("vmm: PAT MSR access not supported\n");
511			guest_msr_valid(MSR_PAT);
512			vmx_no_patmsr = 1;
513		}
514	}
515
516	/* Check support for VM-entry controls */
517	if (!vmx_no_patmsr) {
518		error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
519				       MSR_VMX_TRUE_ENTRY_CTLS,
520				       VM_ENTRY_CTLS_ONE_SETTING,
521				       VM_ENTRY_CTLS_ZERO_SETTING,
522				       &entry_ctls);
523	} else {
524		error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
525				       MSR_VMX_TRUE_ENTRY_CTLS,
526				       VM_ENTRY_CTLS_ONE_SETTING_NO_PAT,
527				       VM_ENTRY_CTLS_ZERO_SETTING,
528				       &entry_ctls);
529	}
530
531	if (error) {
532		printf("vmx_init: processor does not support desired "
533		       "entry controls\n");
534		       return (error);
535	}
536
537	/*
538	 * Check support for optional features by testing them
539	 * as individual bits
540	 */
541	cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
542					MSR_VMX_TRUE_PROCBASED_CTLS,
543					PROCBASED_HLT_EXITING, 0,
544					&tmp) == 0);
545
546	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
547					MSR_VMX_PROCBASED_CTLS,
548					PROCBASED_MTF, 0,
549					&tmp) == 0);
550
551	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
552					 MSR_VMX_TRUE_PROCBASED_CTLS,
553					 PROCBASED_PAUSE_EXITING, 0,
554					 &tmp) == 0);
555
556	cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
557					MSR_VMX_PROCBASED_CTLS2,
558					PROCBASED2_UNRESTRICTED_GUEST, 0,
559				        &tmp) == 0);
560
561	/* Initialize EPT */
562	error = ept_init();
563	if (error) {
564		printf("vmx_init: ept initialization failed (%d)\n", error);
565		return (error);
566	}
567
568	/*
569	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
570	 */
571	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
572	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
573	cr0_ones_mask = fixed0 & fixed1;
574	cr0_zeros_mask = ~fixed0 & ~fixed1;
575
576	/*
577	 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
578	 * if unrestricted guest execution is allowed.
579	 */
580	if (cap_unrestricted_guest)
581		cr0_ones_mask &= ~(CR0_PG | CR0_PE);
582
583	/*
584	 * Do not allow the guest to set CR0_NW or CR0_CD.
585	 */
586	cr0_zeros_mask |= (CR0_NW | CR0_CD);
587
588	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
589	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
590	cr4_ones_mask = fixed0 & fixed1;
591	cr4_zeros_mask = ~fixed0 & ~fixed1;
592
593	/* enable VMX operation */
594	smp_rendezvous(NULL, vmx_enable, NULL, NULL);
595
596	return (0);
597}
598
599/*
600 * If this processor does not support VPIDs then simply return 0.
601 *
602 * Otherwise generate the next value of VPID to use. Any value is alright
603 * as long as it is non-zero.
604 *
605 * We always execute in VMX non-root context with EPT enabled. Thus all
606 * combined mappings are tagged with the (EP4TA, VPID, PCID) tuple. This
607 * in turn means that multiple VMs can share the same VPID as long as
608 * they have distinct EPT page tables.
609 *
610 * XXX
611 * We should optimize this so that it returns VPIDs that are not in
612 * use. Then we will not unnecessarily invalidate mappings in
613 * vmx_set_pcpu_defaults() just because two or more vcpus happen to
614 * use the same 'vpid'.
615 */
616static uint16_t
617vmx_vpid(void)
618{
619	uint16_t vpid = 0;
620
621	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) != 0) {
622		do {
623			vpid = atomic_fetchadd_int(&nextvpid, 1);
624		} while (vpid == 0);
625	}
626
627	return (vpid);
628}
629
630static int
631vmx_setup_cr_shadow(int which, struct vmcs *vmcs)
632{
633	int error, mask_ident, shadow_ident;
634	uint64_t mask_value, shadow_value;
635
636	if (which != 0 && which != 4)
637		panic("vmx_setup_cr_shadow: unknown cr%d", which);
638
639	if (which == 0) {
640		mask_ident = VMCS_CR0_MASK;
641		mask_value = cr0_ones_mask | cr0_zeros_mask;
642		shadow_ident = VMCS_CR0_SHADOW;
643		shadow_value = cr0_ones_mask;
644	} else {
645		mask_ident = VMCS_CR4_MASK;
646		mask_value = cr4_ones_mask | cr4_zeros_mask;
647		shadow_ident = VMCS_CR4_SHADOW;
648		shadow_value = cr4_ones_mask;
649	}
650
651	error = vmcs_setreg(vmcs, VMCS_IDENT(mask_ident), mask_value);
652	if (error)
653		return (error);
654
655	error = vmcs_setreg(vmcs, VMCS_IDENT(shadow_ident), shadow_value);
656	if (error)
657		return (error);
658
659	return (0);
660}
661#define	vmx_setup_cr0_shadow(vmcs)	vmx_setup_cr_shadow(0, (vmcs))
662#define	vmx_setup_cr4_shadow(vmcs)	vmx_setup_cr_shadow(4, (vmcs))
663
664static void *
665vmx_vminit(struct vm *vm)
666{
667	uint16_t vpid;
668	int i, error, guest_msr_count;
669	struct vmx *vmx;
670
671	vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
672	if ((uintptr_t)vmx & PAGE_MASK) {
673		panic("malloc of struct vmx not aligned on %d byte boundary",
674		      PAGE_SIZE);
675	}
676	vmx->vm = vm;
677
678	/*
679	 * Clean up EPTP-tagged guest physical and combined mappings
680	 *
681	 * VMX transitions are not required to invalidate any guest physical
682	 * mappings. So, it may be possible for stale guest physical mappings
683	 * to be present in the processor TLBs.
684	 *
685	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
686	 */
687	ept_invalidate_mappings(vtophys(vmx->pml4ept));
688
689	msr_bitmap_initialize(vmx->msr_bitmap);
690
691	/*
692	 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
693	 * The guest FSBASE and GSBASE are saved and restored during
694	 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
695	 * always restored from the vmcs host state area on vm-exit.
696	 *
697	 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
698	 * how they are saved/restored so can be directly accessed by the
699	 * guest.
700	 *
701	 * Guest KGSBASE is saved and restored in the guest MSR save area.
702	 * Host KGSBASE is restored before returning to userland from the pcb.
703	 * There will be a window of time when we are executing in the host
704	 * kernel context with a value of KGSBASE from the guest. This is ok
705	 * because the value of KGSBASE is inconsequential in kernel context.
706	 *
707	 * MSR_EFER is saved and restored in the guest VMCS area on a
708	 * VM exit and entry respectively. It is also restored from the
709	 * host VMCS area on a VM exit.
710	 */
711	if (guest_msr_rw(vmx, MSR_GSBASE) ||
712	    guest_msr_rw(vmx, MSR_FSBASE) ||
713	    guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
714	    guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
715	    guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
716	    guest_msr_rw(vmx, MSR_KGSBASE) ||
717	    guest_msr_rw(vmx, MSR_EFER))
718		panic("vmx_vminit: error setting guest msr access");
719
720	/*
721	 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
722	 * and entry respectively. It is also restored from the host VMCS
723	 * area on a VM exit. However, if running on a system with no
724	 * MSR_PAT save/restore support, leave access disabled so accesses
725	 * will be trapped.
726	 */
727	if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
728		panic("vmx_vminit: error setting guest pat msr access");
729
730	for (i = 0; i < VM_MAXCPU; i++) {
731		vmx->vmcs[i].identifier = vmx_revision();
732		error = vmclear(&vmx->vmcs[i]);
733		if (error != 0) {
734			panic("vmx_vminit: vmclear error %d on vcpu %d\n",
735			      error, i);
736		}
737
738		vpid = vmx_vpid();
739
740		error = vmcs_set_defaults(&vmx->vmcs[i],
741					  (u_long)vmx_longjmp,
742					  (u_long)&vmx->ctx[i],
743					  vtophys(vmx->pml4ept),
744					  pinbased_ctls,
745					  procbased_ctls,
746					  procbased_ctls2,
747					  exit_ctls, entry_ctls,
748					  vtophys(vmx->msr_bitmap),
749					  vpid);
750
751		if (error != 0)
752			panic("vmx_vminit: vmcs_set_defaults error %d", error);
753
754		vmx->cap[i].set = 0;
755		vmx->cap[i].proc_ctls = procbased_ctls;
756
757		vmx->state[i].lastcpu = -1;
758		vmx->state[i].vpid = vpid;
759
760		msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
761
762		error = vmcs_set_msr_save(&vmx->vmcs[i],
763					  vtophys(vmx->guest_msrs[i]),
764					  guest_msr_count);
765		if (error != 0)
766			panic("vmcs_set_msr_save error %d", error);
767
768		error = vmx_setup_cr0_shadow(&vmx->vmcs[i]);
769		if (error != 0)
770			panic("vmx_setup_cr0_shadow %d", error);
771
772		error = vmx_setup_cr4_shadow(&vmx->vmcs[i]);
773		if (error != 0)
774			panic("vmx_setup_cr4_shadow %d", error);
775	}
776
777	return (vmx);
778}
779
780static int
781vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
782{
783	int handled, func;
784
785	func = vmxctx->guest_rax;
786
787	handled = x86_emulate_cpuid(vm, vcpu,
788				    (uint32_t*)(&vmxctx->guest_rax),
789				    (uint32_t*)(&vmxctx->guest_rbx),
790				    (uint32_t*)(&vmxctx->guest_rcx),
791				    (uint32_t*)(&vmxctx->guest_rdx));
792	return (handled);
793}
794
795static __inline void
796vmx_run_trace(struct vmx *vmx, int vcpu)
797{
798#ifdef KTR
799	VMM_CTR1(vmx->vm, vcpu, "Resume execution at 0x%0lx", vmcs_guest_rip());
800#endif
801}
802
803static __inline void
804vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
805	       int handled)
806{
807#ifdef KTR
808	VMM_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
809		 handled ? "handled" : "unhandled",
810		 exit_reason_to_str(exit_reason), rip);
811#endif
812}
813
814static __inline void
815vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
816{
817#ifdef KTR
818	VMM_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
819#endif
820}
821
822static int
823vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu)
824{
825	int error, lastcpu;
826	struct vmxstate *vmxstate;
827	struct invvpid_desc invvpid_desc = { 0 };
828
829	vmxstate = &vmx->state[vcpu];
830	lastcpu = vmxstate->lastcpu;
831	vmxstate->lastcpu = curcpu;
832
833	if (lastcpu == curcpu) {
834		error = 0;
835		goto done;
836	}
837
838	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
839
840	error = vmwrite(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
841	if (error != 0)
842		goto done;
843
844	error = vmwrite(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
845	if (error != 0)
846		goto done;
847
848	error = vmwrite(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
849	if (error != 0)
850		goto done;
851
852	/*
853	 * If we are using VPIDs then invalidate all mappings tagged with 'vpid'
854	 *
855	 * We do this because this vcpu was executing on a different host
856	 * cpu when it last ran. We do not track whether it invalidated
857	 * mappings associated with its 'vpid' during that run. So we must
858	 * assume that the mappings associated with 'vpid' on 'curcpu' are
859	 * stale and invalidate them.
860	 *
861	 * Note that we incur this penalty only when the scheduler chooses to
862	 * move the thread associated with this vcpu between host cpus.
863	 *
864	 * Note also that this will invalidate mappings tagged with 'vpid'
865	 * for "all" EP4TAs.
866	 */
867	if (vmxstate->vpid != 0) {
868		invvpid_desc.vpid = vmxstate->vpid;
869		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
870	}
871done:
872	return (error);
873}
874
875static void
876vm_exit_update_rip(struct vm_exit *vmexit)
877{
878	int error;
879
880	error = vmwrite(VMCS_GUEST_RIP, vmexit->rip + vmexit->inst_length);
881	if (error)
882		panic("vmx_run: error %d writing to VMCS_GUEST_RIP", error);
883}
884
885/*
886 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
887 */
888CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
889
890static void __inline
891vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
892{
893	int error;
894
895	vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
896
897	error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
898	if (error)
899		panic("vmx_set_int_window_exiting: vmwrite error %d", error);
900}
901
902static void __inline
903vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
904{
905	int error;
906
907	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
908
909	error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
910	if (error)
911		panic("vmx_clear_int_window_exiting: vmwrite error %d", error);
912}
913
914static void __inline
915vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
916{
917	int error;
918
919	vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
920
921	error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
922	if (error)
923		panic("vmx_set_nmi_window_exiting: vmwrite error %d", error);
924}
925
926static void __inline
927vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
928{
929	int error;
930
931	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
932
933	error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
934	if (error)
935		panic("vmx_clear_nmi_window_exiting: vmwrite error %d", error);
936}
937
938static int
939vmx_inject_nmi(struct vmx *vmx, int vcpu)
940{
941	int error;
942	uint64_t info, interruptibility;
943
944	/* Bail out if no NMI requested */
945	if (!vm_nmi_pending(vmx->vm, vcpu))
946		return (0);
947
948	error = vmread(VMCS_GUEST_INTERRUPTIBILITY, &interruptibility);
949	if (error) {
950		panic("vmx_inject_nmi: vmread(interruptibility) %d",
951			error);
952	}
953	if (interruptibility & nmi_blocking_bits)
954		goto nmiblocked;
955
956	/*
957	 * Inject the virtual NMI. The vector must be the NMI IDT entry
958	 * or the VMCS entry check will fail.
959	 */
960	info = VMCS_INTERRUPTION_INFO_NMI | VMCS_INTERRUPTION_INFO_VALID;
961	info |= IDT_NMI;
962
963	error = vmwrite(VMCS_ENTRY_INTR_INFO, info);
964	if (error)
965		panic("vmx_inject_nmi: vmwrite(intrinfo) %d", error);
966
967	VMM_CTR0(vmx->vm, vcpu, "Injecting vNMI");
968
969	/* Clear the request */
970	vm_nmi_clear(vmx->vm, vcpu);
971	return (1);
972
973nmiblocked:
974	/*
975	 * Set the NMI Window Exiting execution control so we can inject
976	 * the virtual NMI as soon as blocking condition goes away.
977	 */
978	vmx_set_nmi_window_exiting(vmx, vcpu);
979
980	VMM_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
981	return (1);
982}
983
984static void
985vmx_inject_interrupts(struct vmx *vmx, int vcpu)
986{
987	int error, vector;
988	uint64_t info, rflags, interruptibility;
989
990	const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING |
991				   VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING;
992
993	/*
994	 * If there is already an interrupt pending then just return.
995	 *
996	 * This could happen if an interrupt was injected on a prior
997	 * VM entry but the actual entry into guest mode was aborted
998	 * because of a pending AST.
999	 */
1000	error = vmread(VMCS_ENTRY_INTR_INFO, &info);
1001	if (error)
1002		panic("vmx_inject_interrupts: vmread(intrinfo) %d", error);
1003	if (info & VMCS_INTERRUPTION_INFO_VALID)
1004		return;
1005
1006	/*
1007	 * NMI injection has priority so deal with those first
1008	 */
1009	if (vmx_inject_nmi(vmx, vcpu))
1010		return;
1011
1012	/* Ask the local apic for a vector to inject */
1013	vector = lapic_pending_intr(vmx->vm, vcpu);
1014	if (vector < 0)
1015		return;
1016
1017	if (vector < 32 || vector > 255)
1018		panic("vmx_inject_interrupts: invalid vector %d\n", vector);
1019
1020	/* Check RFLAGS.IF and the interruptibility state of the guest */
1021	error = vmread(VMCS_GUEST_RFLAGS, &rflags);
1022	if (error)
1023		panic("vmx_inject_interrupts: vmread(rflags) %d", error);
1024
1025	if ((rflags & PSL_I) == 0)
1026		goto cantinject;
1027
1028	error = vmread(VMCS_GUEST_INTERRUPTIBILITY, &interruptibility);
1029	if (error) {
1030		panic("vmx_inject_interrupts: vmread(interruptibility) %d",
1031			error);
1032	}
1033	if (interruptibility & HWINTR_BLOCKED)
1034		goto cantinject;
1035
1036	/* Inject the interrupt */
1037	info = VMCS_INTERRUPTION_INFO_HW_INTR | VMCS_INTERRUPTION_INFO_VALID;
1038	info |= vector;
1039	error = vmwrite(VMCS_ENTRY_INTR_INFO, info);
1040	if (error)
1041		panic("vmx_inject_interrupts: vmwrite(intrinfo) %d", error);
1042
1043	/* Update the Local APIC ISR */
1044	lapic_intr_accepted(vmx->vm, vcpu, vector);
1045
1046	VMM_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1047
1048	return;
1049
1050cantinject:
1051	/*
1052	 * Set the Interrupt Window Exiting execution control so we can inject
1053	 * the interrupt as soon as blocking condition goes away.
1054	 */
1055	vmx_set_int_window_exiting(vmx, vcpu);
1056
1057	VMM_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1058}
1059
1060static int
1061vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1062{
1063	int error, cr, vmcs_guest_cr;
1064	uint64_t regval, ones_mask, zeros_mask;
1065	const struct vmxctx *vmxctx;
1066
1067	/* We only handle mov to %cr0 or %cr4 at this time */
1068	if ((exitqual & 0xf0) != 0x00)
1069		return (UNHANDLED);
1070
1071	cr = exitqual & 0xf;
1072	if (cr != 0 && cr != 4)
1073		return (UNHANDLED);
1074
1075	vmxctx = &vmx->ctx[vcpu];
1076
1077	/*
1078	 * We must use vmwrite() directly here because vmcs_setreg() will
1079	 * call vmclear(vmcs) as a side-effect which we certainly don't want.
1080	 */
1081	switch ((exitqual >> 8) & 0xf) {
1082	case 0:
1083		regval = vmxctx->guest_rax;
1084		break;
1085	case 1:
1086		regval = vmxctx->guest_rcx;
1087		break;
1088	case 2:
1089		regval = vmxctx->guest_rdx;
1090		break;
1091	case 3:
1092		regval = vmxctx->guest_rbx;
1093		break;
1094	case 4:
1095		error = vmread(VMCS_GUEST_RSP, &regval);
1096		if (error) {
1097			panic("vmx_emulate_cr_access: "
1098			      "error %d reading guest rsp", error);
1099		}
1100		break;
1101	case 5:
1102		regval = vmxctx->guest_rbp;
1103		break;
1104	case 6:
1105		regval = vmxctx->guest_rsi;
1106		break;
1107	case 7:
1108		regval = vmxctx->guest_rdi;
1109		break;
1110	case 8:
1111		regval = vmxctx->guest_r8;
1112		break;
1113	case 9:
1114		regval = vmxctx->guest_r9;
1115		break;
1116	case 10:
1117		regval = vmxctx->guest_r10;
1118		break;
1119	case 11:
1120		regval = vmxctx->guest_r11;
1121		break;
1122	case 12:
1123		regval = vmxctx->guest_r12;
1124		break;
1125	case 13:
1126		regval = vmxctx->guest_r13;
1127		break;
1128	case 14:
1129		regval = vmxctx->guest_r14;
1130		break;
1131	case 15:
1132		regval = vmxctx->guest_r15;
1133		break;
1134	}
1135
1136	if (cr == 0) {
1137		ones_mask = cr0_ones_mask;
1138		zeros_mask = cr0_zeros_mask;
1139		vmcs_guest_cr = VMCS_GUEST_CR0;
1140	} else {
1141		ones_mask = cr4_ones_mask;
1142		zeros_mask = cr4_zeros_mask;
1143		vmcs_guest_cr = VMCS_GUEST_CR4;
1144	}
1145	regval |= ones_mask;
1146	regval &= ~zeros_mask;
1147	error = vmwrite(vmcs_guest_cr, regval);
1148	if (error) {
1149		panic("vmx_emulate_cr_access: error %d writing cr%d",
1150		      error, cr);
1151	}
1152
1153	return (HANDLED);
1154}
1155
1156static int
1157vmx_ept_fault(struct vm *vm, int cpu,
1158	      uint64_t gla, uint64_t gpa, uint64_t rip, int inst_length,
1159	      uint64_t cr3, uint64_t ept_qual, struct vie *vie)
1160{
1161	int read, write, error;
1162
1163	/* EPT violation on an instruction fetch doesn't make sense here */
1164	if (ept_qual & EPT_VIOLATION_INST_FETCH)
1165		return (UNHANDLED);
1166
1167	/* EPT violation must be a read fault or a write fault */
1168	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1169	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1170	if ((read | write) == 0)
1171		return (UNHANDLED);
1172
1173	/*
1174	 * The EPT violation must have been caused by accessing a
1175	 * guest-physical address that is a translation of a guest-linear
1176	 * address.
1177	 */
1178	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1179	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1180		return (UNHANDLED);
1181	}
1182
1183	/* Fetch, decode and emulate the faulting instruction */
1184	if (vmm_fetch_instruction(vm, cpu, rip, inst_length, cr3, vie) != 0)
1185		return (UNHANDLED);
1186
1187	if (vmm_decode_instruction(vm, cpu, gla, vie) != 0)
1188		return (UNHANDLED);
1189
1190	/*
1191	 * Check if this is a local apic access
1192	 */
1193	if (gpa < DEFAULT_APIC_BASE || gpa >= DEFAULT_APIC_BASE + PAGE_SIZE)
1194		return (UNHANDLED);
1195
1196	error = vmm_emulate_instruction(vm, cpu, gpa, vie,
1197					lapic_mmio_read, lapic_mmio_write, 0);
1198
1199	return (error ? UNHANDLED : HANDLED);
1200}
1201
1202static int
1203vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1204{
1205	int error, handled;
1206	struct vmcs *vmcs;
1207	struct vmxctx *vmxctx;
1208	uint32_t eax, ecx, edx;
1209	uint64_t qual, gla, gpa, cr3, intr_info;
1210
1211	handled = 0;
1212	vmcs = &vmx->vmcs[vcpu];
1213	vmxctx = &vmx->ctx[vcpu];
1214	qual = vmexit->u.vmx.exit_qualification;
1215	vmexit->exitcode = VM_EXITCODE_BOGUS;
1216
1217	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
1218
1219	switch (vmexit->u.vmx.exit_reason) {
1220	case EXIT_REASON_CR_ACCESS:
1221		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
1222		handled = vmx_emulate_cr_access(vmx, vcpu, qual);
1223		break;
1224	case EXIT_REASON_RDMSR:
1225		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
1226		ecx = vmxctx->guest_rcx;
1227		error = emulate_rdmsr(vmx->vm, vcpu, ecx);
1228		if (error) {
1229			vmexit->exitcode = VM_EXITCODE_RDMSR;
1230			vmexit->u.msr.code = ecx;
1231		} else
1232			handled = 1;
1233		break;
1234	case EXIT_REASON_WRMSR:
1235		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
1236		eax = vmxctx->guest_rax;
1237		ecx = vmxctx->guest_rcx;
1238		edx = vmxctx->guest_rdx;
1239		error = emulate_wrmsr(vmx->vm, vcpu, ecx,
1240					(uint64_t)edx << 32 | eax);
1241		if (error) {
1242			vmexit->exitcode = VM_EXITCODE_WRMSR;
1243			vmexit->u.msr.code = ecx;
1244			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
1245		} else
1246			handled = 1;
1247		break;
1248	case EXIT_REASON_HLT:
1249		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
1250		/*
1251		 * If there is an event waiting to be injected then there is
1252		 * no need to 'hlt'.
1253		 */
1254		error = vmread(VMCS_ENTRY_INTR_INFO, &intr_info);
1255		if (error)
1256			panic("vmx_exit_process: vmread(intrinfo) %d", error);
1257
1258		if (intr_info & VMCS_INTERRUPTION_INFO_VALID) {
1259			handled = 1;
1260			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT_IGNORED, 1);
1261		} else
1262			vmexit->exitcode = VM_EXITCODE_HLT;
1263		break;
1264	case EXIT_REASON_MTF:
1265		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
1266		vmexit->exitcode = VM_EXITCODE_MTRAP;
1267		break;
1268	case EXIT_REASON_PAUSE:
1269		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
1270		vmexit->exitcode = VM_EXITCODE_PAUSE;
1271		break;
1272	case EXIT_REASON_INTR_WINDOW:
1273		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
1274		vmx_clear_int_window_exiting(vmx, vcpu);
1275		VMM_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1276		return (1);
1277	case EXIT_REASON_EXT_INTR:
1278		/*
1279		 * External interrupts serve only to cause VM exits and allow
1280		 * the host interrupt handler to run.
1281		 *
1282		 * If this external interrupt triggers a virtual interrupt
1283		 * to a VM, then that state will be recorded by the
1284		 * host interrupt handler in the VM's softc. We will inject
1285		 * this virtual interrupt during the subsequent VM enter.
1286		 */
1287
1288		/*
1289		 * This is special. We want to treat this as an 'handled'
1290		 * VM-exit but not increment the instruction pointer.
1291		 */
1292		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
1293		return (1);
1294	case EXIT_REASON_NMI_WINDOW:
1295		/* Exit to allow the pending virtual NMI to be injected */
1296		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
1297		vmx_clear_nmi_window_exiting(vmx, vcpu);
1298		VMM_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1299		return (1);
1300	case EXIT_REASON_INOUT:
1301		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
1302		vmexit->exitcode = VM_EXITCODE_INOUT;
1303		vmexit->u.inout.bytes = (qual & 0x7) + 1;
1304		vmexit->u.inout.in = (qual & 0x8) ? 1 : 0;
1305		vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
1306		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
1307		vmexit->u.inout.port = (uint16_t)(qual >> 16);
1308		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
1309		break;
1310	case EXIT_REASON_CPUID:
1311		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
1312		handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
1313		break;
1314	case EXIT_REASON_EPT_FAULT:
1315		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EPT_FAULT, 1);
1316		gla = vmcs_gla();
1317		gpa = vmcs_gpa();
1318		cr3 = vmcs_guest_cr3();
1319		handled = vmx_ept_fault(vmx->vm, vcpu, gla, gpa,
1320					vmexit->rip, vmexit->inst_length,
1321					cr3, qual, &vmexit->u.paging.vie);
1322		if (!handled) {
1323			vmexit->exitcode = VM_EXITCODE_PAGING;
1324			vmexit->u.paging.gpa = gpa;
1325		}
1326		break;
1327	default:
1328		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
1329		break;
1330	}
1331
1332	if (handled) {
1333		/*
1334		 * It is possible that control is returned to userland
1335		 * even though we were able to handle the VM exit in the
1336		 * kernel.
1337		 *
1338		 * In such a case we want to make sure that the userland
1339		 * restarts guest execution at the instruction *after*
1340		 * the one we just processed. Therefore we update the
1341		 * guest rip in the VMCS and in 'vmexit'.
1342		 */
1343		vm_exit_update_rip(vmexit);
1344		vmexit->rip += vmexit->inst_length;
1345		vmexit->inst_length = 0;
1346
1347		/*
1348		 * Special case for spinning up an AP - exit to userspace to
1349		 * give the controlling process a chance to intercept and
1350		 * spin up a thread for the AP.
1351		 */
1352		if (vmexit->exitcode == VM_EXITCODE_SPINUP_AP)
1353			handled = 0;
1354	} else {
1355		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1356			/*
1357			 * If this VM exit was not claimed by anybody then
1358			 * treat it as a generic VMX exit.
1359			 */
1360			vmexit->exitcode = VM_EXITCODE_VMX;
1361			vmexit->u.vmx.error = 0;
1362		} else {
1363			/*
1364			 * The exitcode and collateral have been populated.
1365			 * The VM exit will be processed further in userland.
1366			 */
1367		}
1368	}
1369	return (handled);
1370}
1371
1372static int
1373vmx_run(void *arg, int vcpu, register_t rip)
1374{
1375	int error, vie, rc, handled, astpending;
1376	uint32_t exit_reason;
1377	struct vmx *vmx;
1378	struct vmxctx *vmxctx;
1379	struct vmcs *vmcs;
1380	struct vm_exit *vmexit;
1381
1382	vmx = arg;
1383	vmcs = &vmx->vmcs[vcpu];
1384	vmxctx = &vmx->ctx[vcpu];
1385	vmxctx->launched = 0;
1386
1387	astpending = 0;
1388	vmexit = vm_exitinfo(vmx->vm, vcpu);
1389
1390	/*
1391	 * XXX Can we avoid doing this every time we do a vm run?
1392	 */
1393	VMPTRLD(vmcs);
1394
1395	/*
1396	 * XXX
1397	 * We do this every time because we may setup the virtual machine
1398	 * from a different process than the one that actually runs it.
1399	 *
1400	 * If the life of a virtual machine was spent entirely in the context
1401	 * of a single process we could do this once in vmcs_set_defaults().
1402	 */
1403	if ((error = vmwrite(VMCS_HOST_CR3, rcr3())) != 0)
1404		panic("vmx_run: error %d writing to VMCS_HOST_CR3", error);
1405
1406	if ((error = vmwrite(VMCS_GUEST_RIP, rip)) != 0)
1407		panic("vmx_run: error %d writing to VMCS_GUEST_RIP", error);
1408
1409	if ((error = vmx_set_pcpu_defaults(vmx, vcpu)) != 0)
1410		panic("vmx_run: error %d setting up pcpu defaults", error);
1411
1412	do {
1413		lapic_timer_tick(vmx->vm, vcpu);
1414		vmx_inject_interrupts(vmx, vcpu);
1415		vmx_run_trace(vmx, vcpu);
1416		rc = vmx_setjmp(vmxctx);
1417#ifdef SETJMP_TRACE
1418		vmx_setjmp_trace(vmx, vcpu, vmxctx, rc);
1419#endif
1420		switch (rc) {
1421		case VMX_RETURN_DIRECT:
1422			if (vmxctx->launched == 0) {
1423				vmxctx->launched = 1;
1424				vmx_launch(vmxctx);
1425			} else
1426				vmx_resume(vmxctx);
1427			panic("vmx_launch/resume should not return");
1428			break;
1429		case VMX_RETURN_LONGJMP:
1430			break;			/* vm exit */
1431		case VMX_RETURN_AST:
1432			astpending = 1;
1433			break;
1434		case VMX_RETURN_VMRESUME:
1435			vie = vmcs_instruction_error();
1436			if (vmxctx->launch_error == VM_FAIL_INVALID ||
1437			    vie != VMRESUME_WITH_NON_LAUNCHED_VMCS) {
1438				printf("vmresume error %d vmcs inst error %d\n",
1439					vmxctx->launch_error, vie);
1440				goto err_exit;
1441			}
1442			vmx_launch(vmxctx);	/* try to launch the guest */
1443			panic("vmx_launch should not return");
1444			break;
1445		case VMX_RETURN_VMLAUNCH:
1446			vie = vmcs_instruction_error();
1447#if 1
1448			printf("vmlaunch error %d vmcs inst error %d\n",
1449				vmxctx->launch_error, vie);
1450#endif
1451			goto err_exit;
1452		default:
1453			panic("vmx_setjmp returned %d", rc);
1454		}
1455
1456		/* enable interrupts */
1457		enable_intr();
1458
1459		/* collect some basic information for VM exit processing */
1460		vmexit->rip = rip = vmcs_guest_rip();
1461		vmexit->inst_length = vmexit_instruction_length();
1462		vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
1463		vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
1464
1465		if (astpending) {
1466			handled = 1;
1467			vmexit->inst_length = 0;
1468			vmexit->exitcode = VM_EXITCODE_BOGUS;
1469			vmx_astpending_trace(vmx, vcpu, rip);
1470			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
1471			break;
1472		}
1473
1474		handled = vmx_exit_process(vmx, vcpu, vmexit);
1475		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
1476
1477	} while (handled);
1478
1479	/*
1480	 * If a VM exit has been handled then the exitcode must be BOGUS
1481	 * If a VM exit is not handled then the exitcode must not be BOGUS
1482	 */
1483	if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
1484	    (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
1485		panic("Mismatch between handled (%d) and exitcode (%d)",
1486		      handled, vmexit->exitcode);
1487	}
1488
1489	if (!handled)
1490		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1);
1491
1492	VMM_CTR1(vmx->vm, vcpu, "goto userland: exitcode %d",vmexit->exitcode);
1493
1494	/*
1495	 * XXX
1496	 * We need to do this to ensure that any VMCS state cached by the
1497	 * processor is flushed to memory. We need to do this in case the
1498	 * VM moves to a different cpu the next time it runs.
1499	 *
1500	 * Can we avoid doing this?
1501	 */
1502	VMCLEAR(vmcs);
1503	return (0);
1504
1505err_exit:
1506	vmexit->exitcode = VM_EXITCODE_VMX;
1507	vmexit->u.vmx.exit_reason = (uint32_t)-1;
1508	vmexit->u.vmx.exit_qualification = (uint32_t)-1;
1509	vmexit->u.vmx.error = vie;
1510	VMCLEAR(vmcs);
1511	return (ENOEXEC);
1512}
1513
1514static void
1515vmx_vmcleanup(void *arg)
1516{
1517	int error;
1518	struct vmx *vmx = arg;
1519
1520	/*
1521	 * XXXSMP we also need to clear the VMCS active on the other vcpus.
1522	 */
1523	error = vmclear(&vmx->vmcs[0]);
1524	if (error != 0)
1525		panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error);
1526
1527	ept_vmcleanup(vmx);
1528	free(vmx, M_VMX);
1529
1530	return;
1531}
1532
1533static register_t *
1534vmxctx_regptr(struct vmxctx *vmxctx, int reg)
1535{
1536
1537	switch (reg) {
1538	case VM_REG_GUEST_RAX:
1539		return (&vmxctx->guest_rax);
1540	case VM_REG_GUEST_RBX:
1541		return (&vmxctx->guest_rbx);
1542	case VM_REG_GUEST_RCX:
1543		return (&vmxctx->guest_rcx);
1544	case VM_REG_GUEST_RDX:
1545		return (&vmxctx->guest_rdx);
1546	case VM_REG_GUEST_RSI:
1547		return (&vmxctx->guest_rsi);
1548	case VM_REG_GUEST_RDI:
1549		return (&vmxctx->guest_rdi);
1550	case VM_REG_GUEST_RBP:
1551		return (&vmxctx->guest_rbp);
1552	case VM_REG_GUEST_R8:
1553		return (&vmxctx->guest_r8);
1554	case VM_REG_GUEST_R9:
1555		return (&vmxctx->guest_r9);
1556	case VM_REG_GUEST_R10:
1557		return (&vmxctx->guest_r10);
1558	case VM_REG_GUEST_R11:
1559		return (&vmxctx->guest_r11);
1560	case VM_REG_GUEST_R12:
1561		return (&vmxctx->guest_r12);
1562	case VM_REG_GUEST_R13:
1563		return (&vmxctx->guest_r13);
1564	case VM_REG_GUEST_R14:
1565		return (&vmxctx->guest_r14);
1566	case VM_REG_GUEST_R15:
1567		return (&vmxctx->guest_r15);
1568	default:
1569		break;
1570	}
1571	return (NULL);
1572}
1573
1574static int
1575vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
1576{
1577	register_t *regp;
1578
1579	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1580		*retval = *regp;
1581		return (0);
1582	} else
1583		return (EINVAL);
1584}
1585
1586static int
1587vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
1588{
1589	register_t *regp;
1590
1591	if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1592		*regp = val;
1593		return (0);
1594	} else
1595		return (EINVAL);
1596}
1597
1598static int
1599vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
1600{
1601	struct vmx *vmx = arg;
1602
1603	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
1604		return (0);
1605
1606	/*
1607	 * If the vcpu is running then don't mess with the VMCS.
1608	 *
1609	 * vmcs_getreg will VMCLEAR the vmcs when it is done which will cause
1610	 * the subsequent vmlaunch/vmresume to fail.
1611	 */
1612	if (vcpu_is_running(vmx->vm, vcpu))
1613		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
1614
1615	return (vmcs_getreg(&vmx->vmcs[vcpu], reg, retval));
1616}
1617
1618static int
1619vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
1620{
1621	int error;
1622	uint64_t ctls;
1623	struct vmx *vmx = arg;
1624
1625	/*
1626	 * XXX Allow caller to set contents of the guest registers saved in
1627	 * the 'vmxctx' even though the vcpu might be running. We need this
1628	 * specifically to support the rdmsr emulation that will set the
1629	 * %eax and %edx registers during vm exit processing.
1630	 */
1631	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
1632		return (0);
1633
1634	/*
1635	 * If the vcpu is running then don't mess with the VMCS.
1636	 *
1637	 * vmcs_setreg will VMCLEAR the vmcs when it is done which will cause
1638	 * the subsequent vmlaunch/vmresume to fail.
1639	 */
1640	if (vcpu_is_running(vmx->vm, vcpu))
1641		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
1642
1643	error = vmcs_setreg(&vmx->vmcs[vcpu], reg, val);
1644
1645	if (error == 0) {
1646		/*
1647		 * If the "load EFER" VM-entry control is 1 then the
1648		 * value of EFER.LMA must be identical to "IA-32e mode guest"
1649		 * bit in the VM-entry control.
1650		 */
1651		if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
1652		    (reg == VM_REG_GUEST_EFER)) {
1653			vmcs_getreg(&vmx->vmcs[vcpu],
1654				    VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
1655			if (val & EFER_LMA)
1656				ctls |= VM_ENTRY_GUEST_LMA;
1657			else
1658				ctls &= ~VM_ENTRY_GUEST_LMA;
1659			vmcs_setreg(&vmx->vmcs[vcpu],
1660				    VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
1661		}
1662	}
1663
1664	return (error);
1665}
1666
1667static int
1668vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1669{
1670	struct vmx *vmx = arg;
1671
1672	return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc));
1673}
1674
1675static int
1676vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1677{
1678	struct vmx *vmx = arg;
1679
1680	return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc));
1681}
1682
1683static int
1684vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code,
1685	   int code_valid)
1686{
1687	int error;
1688	uint64_t info;
1689	struct vmx *vmx = arg;
1690	struct vmcs *vmcs = &vmx->vmcs[vcpu];
1691
1692	static uint32_t type_map[VM_EVENT_MAX] = {
1693		0x1,		/* VM_EVENT_NONE */
1694		0x0,		/* VM_HW_INTR */
1695		0x2,		/* VM_NMI */
1696		0x3,		/* VM_HW_EXCEPTION */
1697		0x4,		/* VM_SW_INTR */
1698		0x5,		/* VM_PRIV_SW_EXCEPTION */
1699		0x6,		/* VM_SW_EXCEPTION */
1700	};
1701
1702	/*
1703	 * If there is already an exception pending to be delivered to the
1704	 * vcpu then just return.
1705	 */
1706	error = vmcs_getreg(vmcs, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info);
1707	if (error)
1708		return (error);
1709
1710	if (info & VMCS_INTERRUPTION_INFO_VALID)
1711		return (EAGAIN);
1712
1713	info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0);
1714	info |= VMCS_INTERRUPTION_INFO_VALID;
1715	error = vmcs_setreg(vmcs, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info);
1716	if (error != 0)
1717		return (error);
1718
1719	if (code_valid) {
1720		error = vmcs_setreg(vmcs,
1721				    VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR),
1722				    code);
1723	}
1724	return (error);
1725}
1726
1727static int
1728vmx_getcap(void *arg, int vcpu, int type, int *retval)
1729{
1730	struct vmx *vmx = arg;
1731	int vcap;
1732	int ret;
1733
1734	ret = ENOENT;
1735
1736	vcap = vmx->cap[vcpu].set;
1737
1738	switch (type) {
1739	case VM_CAP_HALT_EXIT:
1740		if (cap_halt_exit)
1741			ret = 0;
1742		break;
1743	case VM_CAP_PAUSE_EXIT:
1744		if (cap_pause_exit)
1745			ret = 0;
1746		break;
1747	case VM_CAP_MTRAP_EXIT:
1748		if (cap_monitor_trap)
1749			ret = 0;
1750		break;
1751	case VM_CAP_UNRESTRICTED_GUEST:
1752		if (cap_unrestricted_guest)
1753			ret = 0;
1754		break;
1755	default:
1756		break;
1757	}
1758
1759	if (ret == 0)
1760		*retval = (vcap & (1 << type)) ? 1 : 0;
1761
1762	return (ret);
1763}
1764
1765static int
1766vmx_setcap(void *arg, int vcpu, int type, int val)
1767{
1768	struct vmx *vmx = arg;
1769	struct vmcs *vmcs = &vmx->vmcs[vcpu];
1770	uint32_t baseval;
1771	uint32_t *pptr;
1772	int error;
1773	int flag;
1774	int reg;
1775	int retval;
1776
1777	retval = ENOENT;
1778	pptr = NULL;
1779
1780	switch (type) {
1781	case VM_CAP_HALT_EXIT:
1782		if (cap_halt_exit) {
1783			retval = 0;
1784			pptr = &vmx->cap[vcpu].proc_ctls;
1785			baseval = *pptr;
1786			flag = PROCBASED_HLT_EXITING;
1787			reg = VMCS_PRI_PROC_BASED_CTLS;
1788		}
1789		break;
1790	case VM_CAP_MTRAP_EXIT:
1791		if (cap_monitor_trap) {
1792			retval = 0;
1793			pptr = &vmx->cap[vcpu].proc_ctls;
1794			baseval = *pptr;
1795			flag = PROCBASED_MTF;
1796			reg = VMCS_PRI_PROC_BASED_CTLS;
1797		}
1798		break;
1799	case VM_CAP_PAUSE_EXIT:
1800		if (cap_pause_exit) {
1801			retval = 0;
1802			pptr = &vmx->cap[vcpu].proc_ctls;
1803			baseval = *pptr;
1804			flag = PROCBASED_PAUSE_EXITING;
1805			reg = VMCS_PRI_PROC_BASED_CTLS;
1806		}
1807		break;
1808	case VM_CAP_UNRESTRICTED_GUEST:
1809		if (cap_unrestricted_guest) {
1810			retval = 0;
1811			baseval = procbased_ctls2;
1812			flag = PROCBASED2_UNRESTRICTED_GUEST;
1813			reg = VMCS_SEC_PROC_BASED_CTLS;
1814		}
1815		break;
1816	default:
1817		break;
1818	}
1819
1820	if (retval == 0) {
1821		if (val) {
1822			baseval |= flag;
1823		} else {
1824			baseval &= ~flag;
1825		}
1826		VMPTRLD(vmcs);
1827		error = vmwrite(reg, baseval);
1828		VMCLEAR(vmcs);
1829
1830		if (error) {
1831			retval = error;
1832		} else {
1833			/*
1834			 * Update optional stored flags, and record
1835			 * setting
1836			 */
1837			if (pptr != NULL) {
1838				*pptr = baseval;
1839			}
1840
1841			if (val) {
1842				vmx->cap[vcpu].set |= (1 << type);
1843			} else {
1844				vmx->cap[vcpu].set &= ~(1 << type);
1845			}
1846		}
1847	}
1848
1849        return (retval);
1850}
1851
1852struct vmm_ops vmm_ops_intel = {
1853	vmx_init,
1854	vmx_cleanup,
1855	vmx_vminit,
1856	vmx_run,
1857	vmx_vmcleanup,
1858	ept_vmmmap_set,
1859	ept_vmmmap_get,
1860	vmx_getreg,
1861	vmx_setreg,
1862	vmx_getdesc,
1863	vmx_setdesc,
1864	vmx_inject,
1865	vmx_getcap,
1866	vmx_setcap
1867};
1868