vmx.c revision 264179
11573Srgrimes/*- 21573Srgrimes * Copyright (c) 2011 NetApp, Inc. 31573Srgrimes * All rights reserved. 41573Srgrimes * 51573Srgrimes * Redistribution and use in source and binary forms, with or without 61573Srgrimes * modification, are permitted provided that the following conditions 71573Srgrimes * are met: 81573Srgrimes * 1. Redistributions of source code must retain the above copyright 91573Srgrimes * notice, this list of conditions and the following disclaimer. 101573Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 111573Srgrimes * notice, this list of conditions and the following disclaimer in the 121573Srgrimes * documentation and/or other materials provided with the distribution. 131573Srgrimes * 141573Srgrimes * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 151573Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16148834Sstefanf * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 171573Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 181573Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 191573Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 201573Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 211573Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 221573Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 231573Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 241573Srgrimes * SUCH DAMAGE. 251573Srgrimes * 261573Srgrimes * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 264179 2014-04-05 22:43:23Z imp $ 271573Srgrimes */ 281573Srgrimes 291573Srgrimes#include <sys/cdefs.h> 301573Srgrimes__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 264179 2014-04-05 22:43:23Z imp $"); 311573Srgrimes 321573Srgrimes#include <sys/param.h> 3384260Sobrien#include <sys/systm.h> 341573Srgrimes#include <sys/smp.h> 3584260Sobrien#include <sys/kernel.h> 3684260Sobrien#include <sys/malloc.h> 371573Srgrimes#include <sys/pcpu.h> 381573Srgrimes#include <sys/proc.h> 391573Srgrimes#include <sys/sysctl.h> 401573Srgrimes 411573Srgrimes#include <vm/vm.h> 42268502Spfg#include <vm/pmap.h> 4384260Sobrien 441573Srgrimes#include <machine/psl.h> 451573Srgrimes#include <machine/cpufunc.h> 461573Srgrimes#include <machine/md_var.h> 471573Srgrimes#include <machine/segments.h> 481573Srgrimes#include <machine/smp.h> 491573Srgrimes#include <machine/specialreg.h> 501573Srgrimes#include <machine/vmparam.h> 511573Srgrimes 521573Srgrimes#include <machine/vmm.h> 531573Srgrimes#include <machine/vmm_dev.h> 541573Srgrimes#include "vmm_host.h" 551573Srgrimes#include "vmm_ioport.h" 561573Srgrimes#include "vmm_ipi.h" 571573Srgrimes#include "vmm_msr.h" 581573Srgrimes#include "vmm_ktr.h" 591573Srgrimes#include "vmm_stat.h" 601573Srgrimes#include "vatpic.h" 61148834Sstefanf#include "vlapic.h" 621573Srgrimes#include "vlapic_priv.h" 63148834Sstefanf 6484260Sobrien#include "vmx_msr.h" 6584260Sobrien#include "ept.h" 6684260Sobrien#include "vmx_cpufunc.h" 6784260Sobrien#include "vmx.h" 681573Srgrimes#include "x86.h" 6984260Sobrien#include "vmx_controls.h" 701573Srgrimes 71268502Spfg#define PINBASED_CTLS_ONE_SETTING \ 72148834Sstefanf (PINBASED_EXTINT_EXITING | \ 7384260Sobrien PINBASED_NMI_EXITING | \ 7484260Sobrien PINBASED_VIRTUAL_NMI) 751573Srgrimes#define PINBASED_CTLS_ZERO_SETTING 0 761573Srgrimes 771573Srgrimes#define PROCBASED_CTLS_WINDOW_SETTING \ 7884260Sobrien (PROCBASED_INT_WINDOW_EXITING | \ 791573Srgrimes PROCBASED_NMI_WINDOW_EXITING) 80148834Sstefanf 811573Srgrimes#define PROCBASED_CTLS_ONE_SETTING \ 821573Srgrimes (PROCBASED_SECONDARY_CONTROLS | \ 831573Srgrimes PROCBASED_IO_EXITING | \ 8484260Sobrien PROCBASED_MSR_BITMAPS | \ 851573Srgrimes PROCBASED_CTLS_WINDOW_SETTING) 8684260Sobrien#define PROCBASED_CTLS_ZERO_SETTING \ 8784260Sobrien (PROCBASED_CR3_LOAD_EXITING | \ 8884260Sobrien PROCBASED_CR3_STORE_EXITING | \ 8984260Sobrien PROCBASED_IO_BITMAPS) 9084260Sobrien 911573Srgrimes#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 9284260Sobrien#define PROCBASED_CTLS2_ZERO_SETTING 0 9384260Sobrien 9484260Sobrien#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \ 95148834Sstefanf (VM_EXIT_HOST_LMA | \ 96148834Sstefanf VM_EXIT_SAVE_EFER | \ 9784260Sobrien VM_EXIT_LOAD_EFER) 9884260Sobrien 991573Srgrimes#define VM_EXIT_CTLS_ONE_SETTING \ 10084260Sobrien (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \ 10184260Sobrien VM_EXIT_ACKNOWLEDGE_INTERRUPT | \ 10284260Sobrien VM_EXIT_SAVE_PAT | \ 10384260Sobrien VM_EXIT_LOAD_PAT) 10484260Sobrien#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 10584260Sobrien 10684260Sobrien#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER 10784260Sobrien 10884260Sobrien#define VM_ENTRY_CTLS_ONE_SETTING \ 10984260Sobrien (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \ 1101573Srgrimes VM_ENTRY_LOAD_PAT) 1111573Srgrimes#define VM_ENTRY_CTLS_ZERO_SETTING \ 11284260Sobrien (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 11384260Sobrien VM_ENTRY_INTO_SMM | \ 1141573Srgrimes VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 1151573Srgrimes 1161573Srgrimes#define guest_msr_rw(vmx, msr) \ 11784260Sobrien msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW) 1181573Srgrimes 119148834Sstefanf#define guest_msr_ro(vmx, msr) \ 12084260Sobrien msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ) 12184260Sobrien 12284260Sobrien#define HANDLED 1 123148834Sstefanf#define UNHANDLED 0 124148834Sstefanf 125148834Sstefanfstatic MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 126148834Sstefanfstatic MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 12784260Sobrien 12884260SobrienSYSCTL_DECL(_hw_vmm); 1291573SrgrimesSYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 13084260Sobrien 13184260Sobrienint vmxon_enabled[MAXCPU]; 13284260Sobrienstatic char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 13384260Sobrien 1341573Srgrimesstatic uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 13584260Sobrienstatic uint32_t exit_ctls, entry_ctls; 13684260Sobrien 13784260Sobrienstatic uint64_t cr0_ones_mask, cr0_zeros_mask; 1381573SrgrimesSYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 13984260Sobrien &cr0_ones_mask, 0, NULL); 1401573SrgrimesSYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 14184260Sobrien &cr0_zeros_mask, 0, NULL); 14284260Sobrien 1431573Srgrimesstatic uint64_t cr4_ones_mask, cr4_zeros_mask; 14484260SobrienSYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 14584260Sobrien &cr4_ones_mask, 0, NULL); 146268502SpfgSYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 1471573Srgrimes &cr4_zeros_mask, 0, NULL); 14884260Sobrien 14984260Sobrienstatic int vmx_no_patmsr; 1501573Srgrimes 15184260Sobrienstatic int vmx_initialized; 15284260SobrienSYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 1531573Srgrimes &vmx_initialized, 0, "Intel VMX initialized"); 15484260Sobrien 15584260Sobrien/* 1561573Srgrimes * Optional capabilities 15784260Sobrien */ 15884260Sobrienstatic int cap_halt_exit; 15984260Sobrienstatic int cap_pause_exit; 16084260Sobrienstatic int cap_unrestricted_guest; 16184260Sobrienstatic int cap_monitor_trap; 16284260Sobrienstatic int cap_invpcid; 1631573Srgrimes 16484260Sobrienstatic int virtual_interrupt_delivery; 16584260SobrienSYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 16684260Sobrien &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 16784260Sobrien 1681573Srgrimesstatic int posted_interrupts; 16984260SobrienSYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD, 170148834Sstefanf &posted_interrupts, 0, "APICv posted interrupt support"); 1711573Srgrimes 172148834Sstefanfstatic int pirvec; 1731573SrgrimesSYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 174148834Sstefanf &pirvec, 0, "APICv posted interrupt vector"); 175148834Sstefanf 176148834Sstefanfstatic struct unrhdr *vpid_unr; 177148834Sstefanfstatic u_int vpid_alloc_failed; 178148834SstefanfSYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 179148834Sstefanf &vpid_alloc_failed, 0, NULL); 180148834Sstefanf 181148834Sstefanf/* 182148834Sstefanf * Use the last page below 4GB as the APIC access address. This address is 183148834Sstefanf * occupied by the boot firmware so it is guaranteed that it will not conflict 184148834Sstefanf * with a page in system memory. 185148834Sstefanf */ 186268502Spfg#define APIC_ACCESS_ADDRESS 0xFFFFF000 187148834Sstefanf 188148834Sstefanfstatic void vmx_inject_pir(struct vlapic *vlapic); 189148834Sstefanf 190148834Sstefanf#ifdef KTR 19184260Sobrienstatic const char * 19284260Sobrienexit_reason_to_str(int reason) 1931573Srgrimes{ 194148834Sstefanf static char reasonbuf[32]; 195148834Sstefanf 196148834Sstefanf switch (reason) { 197148834Sstefanf case EXIT_REASON_EXCEPTION: 198148834Sstefanf return "exception"; 199148834Sstefanf case EXIT_REASON_EXT_INTR: 200148834Sstefanf return "extint"; 201148834Sstefanf case EXIT_REASON_TRIPLE_FAULT: 202148834Sstefanf return "triplefault"; 203148834Sstefanf case EXIT_REASON_INIT: 204148834Sstefanf return "init"; 20584260Sobrien case EXIT_REASON_SIPI: 20684260Sobrien return "sipi"; 20784260Sobrien case EXIT_REASON_IO_SMI: 20884260Sobrien return "iosmi"; 20984260Sobrien case EXIT_REASON_SMI: 21084260Sobrien return "smi"; 21184260Sobrien case EXIT_REASON_INTR_WINDOW: 212148834Sstefanf return "intrwindow"; 21384260Sobrien case EXIT_REASON_NMI_WINDOW: 21484260Sobrien return "nmiwindow"; 21584260Sobrien case EXIT_REASON_TASK_SWITCH: 21684260Sobrien return "taskswitch"; 21784260Sobrien case EXIT_REASON_CPUID: 21884260Sobrien return "cpuid"; 21984260Sobrien case EXIT_REASON_GETSEC: 22084260Sobrien return "getsec"; 22184260Sobrien case EXIT_REASON_HLT: 2221573Srgrimes return "hlt"; 22384260Sobrien case EXIT_REASON_INVD: 22484260Sobrien return "invd"; 225148834Sstefanf case EXIT_REASON_INVLPG: 226148834Sstefanf return "invlpg"; 227148834Sstefanf case EXIT_REASON_RDPMC: 228148834Sstefanf return "rdpmc"; 229148834Sstefanf case EXIT_REASON_RDTSC: 230148834Sstefanf return "rdtsc"; 231148834Sstefanf case EXIT_REASON_RSM: 232148834Sstefanf return "rsm"; 233148834Sstefanf case EXIT_REASON_VMCALL: 234148834Sstefanf return "vmcall"; 235148834Sstefanf case EXIT_REASON_VMCLEAR: 236148834Sstefanf return "vmclear"; 2371573Srgrimes case EXIT_REASON_VMLAUNCH: 23884260Sobrien return "vmlaunch"; 23984260Sobrien case EXIT_REASON_VMPTRLD: 24026927Smsmith return "vmptrld"; 24184260Sobrien case EXIT_REASON_VMPTRST: 24284260Sobrien return "vmptrst"; 24384260Sobrien case EXIT_REASON_VMREAD: 24484260Sobrien return "vmread"; 24584260Sobrien case EXIT_REASON_VMRESUME: 24684260Sobrien return "vmresume"; 24784260Sobrien case EXIT_REASON_VMWRITE: 24826927Smsmith return "vmwrite"; 24984260Sobrien case EXIT_REASON_VMXOFF: 25084260Sobrien return "vmxoff"; 25184260Sobrien case EXIT_REASON_VMXON: 25284260Sobrien return "vmxon"; 25384260Sobrien case EXIT_REASON_CR_ACCESS: 25484260Sobrien return "craccess"; 25526927Smsmith case EXIT_REASON_DR_ACCESS: 25684260Sobrien return "draccess"; 25784260Sobrien case EXIT_REASON_INOUT: 25884260Sobrien return "inout"; 25984260Sobrien case EXIT_REASON_RDMSR: 26084260Sobrien return "rdmsr"; 26184260Sobrien case EXIT_REASON_WRMSR: 26226927Smsmith return "wrmsr"; 26384260Sobrien case EXIT_REASON_INVAL_VMCS: 26484260Sobrien return "invalvmcs"; 26584260Sobrien case EXIT_REASON_INVAL_MSR: 26684260Sobrien return "invalmsr"; 26784260Sobrien case EXIT_REASON_MWAIT: 26884260Sobrien return "mwait"; 26984260Sobrien case EXIT_REASON_MTF: 27084260Sobrien return "mtf"; 27184260Sobrien case EXIT_REASON_MONITOR: 272269107Spfg return "monitor"; 27384260Sobrien case EXIT_REASON_PAUSE: 27484260Sobrien return "pause"; 27584260Sobrien case EXIT_REASON_MCE: 27684260Sobrien return "mce"; 27784260Sobrien case EXIT_REASON_TPR: 27884260Sobrien return "tpr"; 27984260Sobrien case EXIT_REASON_APIC_ACCESS: 28084260Sobrien return "apic-access"; 28184260Sobrien case EXIT_REASON_GDTR_IDTR: 28284260Sobrien return "gdtridtr"; 28384260Sobrien case EXIT_REASON_LDTR_TR: 28484260Sobrien return "ldtrtr"; 28584260Sobrien case EXIT_REASON_EPT_FAULT: 28684260Sobrien return "eptfault"; 28784260Sobrien case EXIT_REASON_EPT_MISCONFIG: 28884260Sobrien return "eptmisconfig"; 28984260Sobrien case EXIT_REASON_INVEPT: 29084260Sobrien return "invept"; 29126927Smsmith case EXIT_REASON_RDTSCP: 2921573Srgrimes return "rdtscp"; 29384260Sobrien case EXIT_REASON_VMX_PREEMPT: 29484260Sobrien return "vmxpreempt"; 29584260Sobrien case EXIT_REASON_INVVPID: 2961573Srgrimes return "invvpid"; 29784260Sobrien case EXIT_REASON_WBINVD: 2981573Srgrimes return "wbinvd"; 299 case EXIT_REASON_XSETBV: 300 return "xsetbv"; 301 case EXIT_REASON_APIC_WRITE: 302 return "apic-write"; 303 default: 304 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 305 return (reasonbuf); 306 } 307} 308#endif /* KTR */ 309 310static int 311vmx_allow_x2apic_msrs(struct vmx *vmx) 312{ 313 int i, error; 314 315 error = 0; 316 317 /* 318 * Allow readonly access to the following x2APIC MSRs from the guest. 319 */ 320 error += guest_msr_ro(vmx, MSR_APIC_ID); 321 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 322 error += guest_msr_ro(vmx, MSR_APIC_LDR); 323 error += guest_msr_ro(vmx, MSR_APIC_SVR); 324 325 for (i = 0; i < 8; i++) 326 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 327 328 for (i = 0; i < 8; i++) 329 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 330 331 for (i = 0; i < 8; i++) 332 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 333 334 error += guest_msr_ro(vmx, MSR_APIC_ESR); 335 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 336 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 337 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 338 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 339 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 340 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 341 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 342 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 343 error += guest_msr_ro(vmx, MSR_APIC_ICR); 344 345 /* 346 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 347 * 348 * These registers get special treatment described in the section 349 * "Virtualizing MSR-Based APIC Accesses". 350 */ 351 error += guest_msr_rw(vmx, MSR_APIC_TPR); 352 error += guest_msr_rw(vmx, MSR_APIC_EOI); 353 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 354 355 return (error); 356} 357 358u_long 359vmx_fix_cr0(u_long cr0) 360{ 361 362 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 363} 364 365u_long 366vmx_fix_cr4(u_long cr4) 367{ 368 369 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 370} 371 372static void 373vpid_free(int vpid) 374{ 375 if (vpid < 0 || vpid > 0xffff) 376 panic("vpid_free: invalid vpid %d", vpid); 377 378 /* 379 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 380 * the unit number allocator. 381 */ 382 383 if (vpid > VM_MAXCPU) 384 free_unr(vpid_unr, vpid); 385} 386 387static void 388vpid_alloc(uint16_t *vpid, int num) 389{ 390 int i, x; 391 392 if (num <= 0 || num > VM_MAXCPU) 393 panic("invalid number of vpids requested: %d", num); 394 395 /* 396 * If the "enable vpid" execution control is not enabled then the 397 * VPID is required to be 0 for all vcpus. 398 */ 399 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 400 for (i = 0; i < num; i++) 401 vpid[i] = 0; 402 return; 403 } 404 405 /* 406 * Allocate a unique VPID for each vcpu from the unit number allocator. 407 */ 408 for (i = 0; i < num; i++) { 409 x = alloc_unr(vpid_unr); 410 if (x == -1) 411 break; 412 else 413 vpid[i] = x; 414 } 415 416 if (i < num) { 417 atomic_add_int(&vpid_alloc_failed, 1); 418 419 /* 420 * If the unit number allocator does not have enough unique 421 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 422 * 423 * These VPIDs are not be unique across VMs but this does not 424 * affect correctness because the combined mappings are also 425 * tagged with the EP4TA which is unique for each VM. 426 * 427 * It is still sub-optimal because the invvpid will invalidate 428 * combined mappings for a particular VPID across all EP4TAs. 429 */ 430 while (i-- > 0) 431 vpid_free(vpid[i]); 432 433 for (i = 0; i < num; i++) 434 vpid[i] = i + 1; 435 } 436} 437 438static void 439vpid_init(void) 440{ 441 /* 442 * VPID 0 is required when the "enable VPID" execution control is 443 * disabled. 444 * 445 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 446 * unit number allocator does not have sufficient unique VPIDs to 447 * satisfy the allocation. 448 * 449 * The remaining VPIDs are managed by the unit number allocator. 450 */ 451 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 452} 453 454static void 455msr_save_area_init(struct msr_entry *g_area, int *g_count) 456{ 457 int cnt; 458 459 static struct msr_entry guest_msrs[] = { 460 { MSR_KGSBASE, 0, 0 }, 461 }; 462 463 cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]); 464 if (cnt > GUEST_MSR_MAX_ENTRIES) 465 panic("guest msr save area overrun"); 466 bcopy(guest_msrs, g_area, sizeof(guest_msrs)); 467 *g_count = cnt; 468} 469 470static void 471vmx_disable(void *arg __unused) 472{ 473 struct invvpid_desc invvpid_desc = { 0 }; 474 struct invept_desc invept_desc = { 0 }; 475 476 if (vmxon_enabled[curcpu]) { 477 /* 478 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 479 * 480 * VMXON or VMXOFF are not required to invalidate any TLB 481 * caching structures. This prevents potential retention of 482 * cached information in the TLB between distinct VMX episodes. 483 */ 484 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 485 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 486 vmxoff(); 487 } 488 load_cr4(rcr4() & ~CR4_VMXE); 489} 490 491static int 492vmx_cleanup(void) 493{ 494 495 if (pirvec != 0) 496 vmm_ipi_free(pirvec); 497 498 if (vpid_unr != NULL) { 499 delete_unrhdr(vpid_unr); 500 vpid_unr = NULL; 501 } 502 503 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 504 505 return (0); 506} 507 508static void 509vmx_enable(void *arg __unused) 510{ 511 int error; 512 513 load_cr4(rcr4() | CR4_VMXE); 514 515 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 516 error = vmxon(vmxon_region[curcpu]); 517 if (error == 0) 518 vmxon_enabled[curcpu] = 1; 519} 520 521static void 522vmx_restore(void) 523{ 524 525 if (vmxon_enabled[curcpu]) 526 vmxon(vmxon_region[curcpu]); 527} 528 529static int 530vmx_init(int ipinum) 531{ 532 int error, use_tpr_shadow; 533 uint64_t fixed0, fixed1, feature_control; 534 uint32_t tmp, procbased2_vid_bits; 535 536 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 537 if (!(cpu_feature2 & CPUID2_VMX)) { 538 printf("vmx_init: processor does not support VMX operation\n"); 539 return (ENXIO); 540 } 541 542 /* 543 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 544 * are set (bits 0 and 2 respectively). 545 */ 546 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 547 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 548 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 549 printf("vmx_init: VMX operation disabled by BIOS\n"); 550 return (ENXIO); 551 } 552 553 /* Check support for primary processor-based VM-execution controls */ 554 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 555 MSR_VMX_TRUE_PROCBASED_CTLS, 556 PROCBASED_CTLS_ONE_SETTING, 557 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 558 if (error) { 559 printf("vmx_init: processor does not support desired primary " 560 "processor-based controls\n"); 561 return (error); 562 } 563 564 /* Clear the processor-based ctl bits that are set on demand */ 565 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 566 567 /* Check support for secondary processor-based VM-execution controls */ 568 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 569 MSR_VMX_PROCBASED_CTLS2, 570 PROCBASED_CTLS2_ONE_SETTING, 571 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 572 if (error) { 573 printf("vmx_init: processor does not support desired secondary " 574 "processor-based controls\n"); 575 return (error); 576 } 577 578 /* Check support for VPID */ 579 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 580 PROCBASED2_ENABLE_VPID, 0, &tmp); 581 if (error == 0) 582 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 583 584 /* Check support for pin-based VM-execution controls */ 585 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 586 MSR_VMX_TRUE_PINBASED_CTLS, 587 PINBASED_CTLS_ONE_SETTING, 588 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 589 if (error) { 590 printf("vmx_init: processor does not support desired " 591 "pin-based controls\n"); 592 return (error); 593 } 594 595 /* Check support for VM-exit controls */ 596 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 597 VM_EXIT_CTLS_ONE_SETTING, 598 VM_EXIT_CTLS_ZERO_SETTING, 599 &exit_ctls); 600 if (error) { 601 /* Try again without the PAT MSR bits */ 602 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, 603 MSR_VMX_TRUE_EXIT_CTLS, 604 VM_EXIT_CTLS_ONE_SETTING_NO_PAT, 605 VM_EXIT_CTLS_ZERO_SETTING, 606 &exit_ctls); 607 if (error) { 608 printf("vmx_init: processor does not support desired " 609 "exit controls\n"); 610 return (error); 611 } else { 612 if (bootverbose) 613 printf("vmm: PAT MSR access not supported\n"); 614 guest_msr_valid(MSR_PAT); 615 vmx_no_patmsr = 1; 616 } 617 } 618 619 /* Check support for VM-entry controls */ 620 if (!vmx_no_patmsr) { 621 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 622 MSR_VMX_TRUE_ENTRY_CTLS, 623 VM_ENTRY_CTLS_ONE_SETTING, 624 VM_ENTRY_CTLS_ZERO_SETTING, 625 &entry_ctls); 626 } else { 627 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 628 MSR_VMX_TRUE_ENTRY_CTLS, 629 VM_ENTRY_CTLS_ONE_SETTING_NO_PAT, 630 VM_ENTRY_CTLS_ZERO_SETTING, 631 &entry_ctls); 632 } 633 634 if (error) { 635 printf("vmx_init: processor does not support desired " 636 "entry controls\n"); 637 return (error); 638 } 639 640 /* 641 * Check support for optional features by testing them 642 * as individual bits 643 */ 644 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 645 MSR_VMX_TRUE_PROCBASED_CTLS, 646 PROCBASED_HLT_EXITING, 0, 647 &tmp) == 0); 648 649 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 650 MSR_VMX_PROCBASED_CTLS, 651 PROCBASED_MTF, 0, 652 &tmp) == 0); 653 654 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 655 MSR_VMX_TRUE_PROCBASED_CTLS, 656 PROCBASED_PAUSE_EXITING, 0, 657 &tmp) == 0); 658 659 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 660 MSR_VMX_PROCBASED_CTLS2, 661 PROCBASED2_UNRESTRICTED_GUEST, 0, 662 &tmp) == 0); 663 664 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 665 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 666 &tmp) == 0); 667 668 /* 669 * Check support for virtual interrupt delivery. 670 */ 671 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 672 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 673 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 674 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 675 676 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 677 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 678 &tmp) == 0); 679 680 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 681 procbased2_vid_bits, 0, &tmp); 682 if (error == 0 && use_tpr_shadow) { 683 virtual_interrupt_delivery = 1; 684 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 685 &virtual_interrupt_delivery); 686 } 687 688 if (virtual_interrupt_delivery) { 689 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 690 procbased_ctls2 |= procbased2_vid_bits; 691 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 692 693 /* 694 * Check for Posted Interrupts only if Virtual Interrupt 695 * Delivery is enabled. 696 */ 697 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 698 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 699 &tmp); 700 if (error == 0) { 701 pirvec = vmm_ipi_alloc(); 702 if (pirvec == 0) { 703 if (bootverbose) { 704 printf("vmx_init: unable to allocate " 705 "posted interrupt vector\n"); 706 } 707 } else { 708 posted_interrupts = 1; 709 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 710 &posted_interrupts); 711 } 712 } 713 } 714 715 if (posted_interrupts) 716 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 717 718 /* Initialize EPT */ 719 error = ept_init(ipinum); 720 if (error) { 721 printf("vmx_init: ept initialization failed (%d)\n", error); 722 return (error); 723 } 724 725 /* 726 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 727 */ 728 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 729 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 730 cr0_ones_mask = fixed0 & fixed1; 731 cr0_zeros_mask = ~fixed0 & ~fixed1; 732 733 /* 734 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 735 * if unrestricted guest execution is allowed. 736 */ 737 if (cap_unrestricted_guest) 738 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 739 740 /* 741 * Do not allow the guest to set CR0_NW or CR0_CD. 742 */ 743 cr0_zeros_mask |= (CR0_NW | CR0_CD); 744 745 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 746 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 747 cr4_ones_mask = fixed0 & fixed1; 748 cr4_zeros_mask = ~fixed0 & ~fixed1; 749 750 vpid_init(); 751 752 /* enable VMX operation */ 753 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 754 755 vmx_initialized = 1; 756 757 return (0); 758} 759 760static void 761vmx_trigger_hostintr(int vector) 762{ 763 uintptr_t func; 764 struct gate_descriptor *gd; 765 766 gd = &idt[vector]; 767 768 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 769 "invalid vector %d", vector)); 770 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 771 vector)); 772 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 773 "has invalid type %d", vector, gd->gd_type)); 774 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 775 "has invalid dpl %d", vector, gd->gd_dpl)); 776 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 777 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 778 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 779 "IST %d", vector, gd->gd_ist)); 780 781 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 782 vmx_call_isr(func); 783} 784 785static int 786vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 787{ 788 int error, mask_ident, shadow_ident; 789 uint64_t mask_value; 790 791 if (which != 0 && which != 4) 792 panic("vmx_setup_cr_shadow: unknown cr%d", which); 793 794 if (which == 0) { 795 mask_ident = VMCS_CR0_MASK; 796 mask_value = cr0_ones_mask | cr0_zeros_mask; 797 shadow_ident = VMCS_CR0_SHADOW; 798 } else { 799 mask_ident = VMCS_CR4_MASK; 800 mask_value = cr4_ones_mask | cr4_zeros_mask; 801 shadow_ident = VMCS_CR4_SHADOW; 802 } 803 804 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 805 if (error) 806 return (error); 807 808 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 809 if (error) 810 return (error); 811 812 return (0); 813} 814#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 815#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 816 817static void * 818vmx_vminit(struct vm *vm, pmap_t pmap) 819{ 820 uint16_t vpid[VM_MAXCPU]; 821 int i, error, guest_msr_count; 822 struct vmx *vmx; 823 struct vmcs *vmcs; 824 825 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 826 if ((uintptr_t)vmx & PAGE_MASK) { 827 panic("malloc of struct vmx not aligned on %d byte boundary", 828 PAGE_SIZE); 829 } 830 vmx->vm = vm; 831 832 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 833 834 /* 835 * Clean up EPTP-tagged guest physical and combined mappings 836 * 837 * VMX transitions are not required to invalidate any guest physical 838 * mappings. So, it may be possible for stale guest physical mappings 839 * to be present in the processor TLBs. 840 * 841 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 842 */ 843 ept_invalidate_mappings(vmx->eptp); 844 845 msr_bitmap_initialize(vmx->msr_bitmap); 846 847 /* 848 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 849 * The guest FSBASE and GSBASE are saved and restored during 850 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 851 * always restored from the vmcs host state area on vm-exit. 852 * 853 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 854 * how they are saved/restored so can be directly accessed by the 855 * guest. 856 * 857 * Guest KGSBASE is saved and restored in the guest MSR save area. 858 * Host KGSBASE is restored before returning to userland from the pcb. 859 * There will be a window of time when we are executing in the host 860 * kernel context with a value of KGSBASE from the guest. This is ok 861 * because the value of KGSBASE is inconsequential in kernel context. 862 * 863 * MSR_EFER is saved and restored in the guest VMCS area on a 864 * VM exit and entry respectively. It is also restored from the 865 * host VMCS area on a VM exit. 866 */ 867 if (guest_msr_rw(vmx, MSR_GSBASE) || 868 guest_msr_rw(vmx, MSR_FSBASE) || 869 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 870 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 871 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 872 guest_msr_rw(vmx, MSR_KGSBASE) || 873 guest_msr_rw(vmx, MSR_EFER)) 874 panic("vmx_vminit: error setting guest msr access"); 875 876 /* 877 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit 878 * and entry respectively. It is also restored from the host VMCS 879 * area on a VM exit. However, if running on a system with no 880 * MSR_PAT save/restore support, leave access disabled so accesses 881 * will be trapped. 882 */ 883 if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT)) 884 panic("vmx_vminit: error setting guest pat msr access"); 885 886 vpid_alloc(vpid, VM_MAXCPU); 887 888 if (virtual_interrupt_delivery) { 889 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 890 APIC_ACCESS_ADDRESS); 891 /* XXX this should really return an error to the caller */ 892 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 893 } 894 895 for (i = 0; i < VM_MAXCPU; i++) { 896 vmcs = &vmx->vmcs[i]; 897 vmcs->identifier = vmx_revision(); 898 error = vmclear(vmcs); 899 if (error != 0) { 900 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 901 error, i); 902 } 903 904 error = vmcs_init(vmcs); 905 KASSERT(error == 0, ("vmcs_init error %d", error)); 906 907 VMPTRLD(vmcs); 908 error = 0; 909 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 910 error += vmwrite(VMCS_EPTP, vmx->eptp); 911 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 912 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 913 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 914 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 915 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 916 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 917 error += vmwrite(VMCS_VPID, vpid[i]); 918 if (virtual_interrupt_delivery) { 919 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 920 error += vmwrite(VMCS_VIRTUAL_APIC, 921 vtophys(&vmx->apic_page[i])); 922 error += vmwrite(VMCS_EOI_EXIT0, 0); 923 error += vmwrite(VMCS_EOI_EXIT1, 0); 924 error += vmwrite(VMCS_EOI_EXIT2, 0); 925 error += vmwrite(VMCS_EOI_EXIT3, 0); 926 } 927 if (posted_interrupts) { 928 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 929 error += vmwrite(VMCS_PIR_DESC, 930 vtophys(&vmx->pir_desc[i])); 931 } 932 VMCLEAR(vmcs); 933 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 934 935 vmx->cap[i].set = 0; 936 vmx->cap[i].proc_ctls = procbased_ctls; 937 vmx->cap[i].proc_ctls2 = procbased_ctls2; 938 939 vmx->state[i].lastcpu = -1; 940 vmx->state[i].vpid = vpid[i]; 941 942 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count); 943 944 error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]), 945 guest_msr_count); 946 if (error != 0) 947 panic("vmcs_set_msr_save error %d", error); 948 949 /* 950 * Set up the CR0/4 shadows, and init the read shadow 951 * to the power-on register value from the Intel Sys Arch. 952 * CR0 - 0x60000010 953 * CR4 - 0 954 */ 955 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 956 if (error != 0) 957 panic("vmx_setup_cr0_shadow %d", error); 958 959 error = vmx_setup_cr4_shadow(vmcs, 0); 960 if (error != 0) 961 panic("vmx_setup_cr4_shadow %d", error); 962 963 vmx->ctx[i].pmap = pmap; 964 } 965 966 return (vmx); 967} 968 969static int 970vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 971{ 972 int handled, func; 973 974 func = vmxctx->guest_rax; 975 976 handled = x86_emulate_cpuid(vm, vcpu, 977 (uint32_t*)(&vmxctx->guest_rax), 978 (uint32_t*)(&vmxctx->guest_rbx), 979 (uint32_t*)(&vmxctx->guest_rcx), 980 (uint32_t*)(&vmxctx->guest_rdx)); 981 return (handled); 982} 983 984static __inline void 985vmx_run_trace(struct vmx *vmx, int vcpu) 986{ 987#ifdef KTR 988 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 989#endif 990} 991 992static __inline void 993vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 994 int handled) 995{ 996#ifdef KTR 997 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 998 handled ? "handled" : "unhandled", 999 exit_reason_to_str(exit_reason), rip); 1000#endif 1001} 1002 1003static __inline void 1004vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 1005{ 1006#ifdef KTR 1007 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 1008#endif 1009} 1010 1011static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1012 1013static void 1014vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1015{ 1016 struct vmxstate *vmxstate; 1017 struct invvpid_desc invvpid_desc; 1018 1019 vmxstate = &vmx->state[vcpu]; 1020 if (vmxstate->lastcpu == curcpu) 1021 return; 1022 1023 vmxstate->lastcpu = curcpu; 1024 1025 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1026 1027 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1028 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1029 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1030 1031 /* 1032 * If we are using VPIDs then invalidate all mappings tagged with 'vpid' 1033 * 1034 * We do this because this vcpu was executing on a different host 1035 * cpu when it last ran. We do not track whether it invalidated 1036 * mappings associated with its 'vpid' during that run. So we must 1037 * assume that the mappings associated with 'vpid' on 'curcpu' are 1038 * stale and invalidate them. 1039 * 1040 * Note that we incur this penalty only when the scheduler chooses to 1041 * move the thread associated with this vcpu between host cpus. 1042 * 1043 * Note also that this will invalidate mappings tagged with 'vpid' 1044 * for "all" EP4TAs. 1045 */ 1046 if (vmxstate->vpid != 0) { 1047 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1048 invvpid_desc._res1 = 0; 1049 invvpid_desc._res2 = 0; 1050 invvpid_desc.vpid = vmxstate->vpid; 1051 invvpid_desc.linear_addr = 0; 1052 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1053 } else { 1054 /* 1055 * The invvpid can be skipped if an invept is going to 1056 * be performed before entering the guest. The invept 1057 * will invalidate combined mappings tagged with 1058 * 'vmx->eptp' for all vpids. 1059 */ 1060 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1061 } 1062 } 1063} 1064 1065/* 1066 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1067 */ 1068CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1069 1070static void __inline 1071vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1072{ 1073 1074 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1075 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1076 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1077 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1078 } 1079} 1080 1081static void __inline 1082vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1083{ 1084 1085 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1086 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1087 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1088 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1089 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1090} 1091 1092static void __inline 1093vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1094{ 1095 1096 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1097 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1098 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1099 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1100 } 1101} 1102 1103static void __inline 1104vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1105{ 1106 1107 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1108 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1109 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1110 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1111 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1112} 1113 1114#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1115 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1116#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1117 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1118 1119static void 1120vmx_inject_nmi(struct vmx *vmx, int vcpu) 1121{ 1122 uint32_t gi, info; 1123 1124 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1125 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1126 "interruptibility-state %#x", gi)); 1127 1128 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1129 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1130 "VM-entry interruption information %#x", info)); 1131 1132 /* 1133 * Inject the virtual NMI. The vector must be the NMI IDT entry 1134 * or the VMCS entry check will fail. 1135 */ 1136 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1137 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1138 1139 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1140 1141 /* Clear the request */ 1142 vm_nmi_clear(vmx->vm, vcpu); 1143} 1144 1145static void 1146vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1147{ 1148 struct vm_exception exc; 1149 int vector, need_nmi_exiting, extint_pending; 1150 uint64_t rflags; 1151 uint32_t gi, info; 1152 1153 if (vm_exception_pending(vmx->vm, vcpu, &exc)) { 1154 KASSERT(exc.vector >= 0 && exc.vector < 32, 1155 ("%s: invalid exception vector %d", __func__, exc.vector)); 1156 1157 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1158 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1159 "pending exception %d: %#x", __func__, exc.vector, info)); 1160 1161 info = exc.vector | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID; 1162 if (exc.error_code_valid) { 1163 info |= VMCS_INTR_DEL_ERRCODE; 1164 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, exc.error_code); 1165 } 1166 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1167 } 1168 1169 if (vm_nmi_pending(vmx->vm, vcpu)) { 1170 /* 1171 * If there are no conditions blocking NMI injection then 1172 * inject it directly here otherwise enable "NMI window 1173 * exiting" to inject it as soon as we can. 1174 * 1175 * We also check for STI_BLOCKING because some implementations 1176 * don't allow NMI injection in this case. If we are running 1177 * on a processor that doesn't have this restriction it will 1178 * immediately exit and the NMI will be injected in the 1179 * "NMI window exiting" handler. 1180 */ 1181 need_nmi_exiting = 1; 1182 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1183 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1184 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1185 if ((info & VMCS_INTR_VALID) == 0) { 1186 vmx_inject_nmi(vmx, vcpu); 1187 need_nmi_exiting = 0; 1188 } else { 1189 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1190 "due to VM-entry intr info %#x", info); 1191 } 1192 } else { 1193 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1194 "Guest Interruptibility-state %#x", gi); 1195 } 1196 1197 if (need_nmi_exiting) 1198 vmx_set_nmi_window_exiting(vmx, vcpu); 1199 } 1200 1201 extint_pending = vm_extint_pending(vmx->vm, vcpu); 1202 1203 if (!extint_pending && virtual_interrupt_delivery) { 1204 vmx_inject_pir(vlapic); 1205 return; 1206 } 1207 1208 /* 1209 * If interrupt-window exiting is already in effect then don't bother 1210 * checking for pending interrupts. This is just an optimization and 1211 * not needed for correctness. 1212 */ 1213 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 1214 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to " 1215 "pending int_window_exiting"); 1216 return; 1217 } 1218 1219 if (!extint_pending) { 1220 /* Ask the local apic for a vector to inject */ 1221 if (!vlapic_pending_intr(vlapic, &vector)) 1222 return; 1223 } else { 1224 /* Ask the legacy pic for a vector to inject */ 1225 vatpic_pending_intr(vmx->vm, &vector); 1226 } 1227 1228 KASSERT(vector >= 32 && vector <= 255, ("invalid vector %d", vector)); 1229 1230 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1231 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1232 if ((rflags & PSL_I) == 0) { 1233 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1234 "rflags %#lx", vector, rflags); 1235 goto cantinject; 1236 } 1237 1238 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1239 if (gi & HWINTR_BLOCKING) { 1240 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1241 "Guest Interruptibility-state %#x", vector, gi); 1242 goto cantinject; 1243 } 1244 1245 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1246 if (info & VMCS_INTR_VALID) { 1247 /* 1248 * This is expected and could happen for multiple reasons: 1249 * - A vectoring VM-entry was aborted due to astpending 1250 * - A VM-exit happened during event injection. 1251 * - An exception was injected above. 1252 * - An NMI was injected above or after "NMI window exiting" 1253 */ 1254 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1255 "VM-entry intr info %#x", vector, info); 1256 goto cantinject; 1257 } 1258 1259 /* Inject the interrupt */ 1260 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1261 info |= vector; 1262 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1263 1264 if (!extint_pending) { 1265 /* Update the Local APIC ISR */ 1266 vlapic_intr_accepted(vlapic, vector); 1267 } else { 1268 vm_extint_clear(vmx->vm, vcpu); 1269 vatpic_intr_accepted(vmx->vm, vector); 1270 1271 /* 1272 * After we accepted the current ExtINT the PIC may 1273 * have posted another one. If that is the case, set 1274 * the Interrupt Window Exiting execution control so 1275 * we can inject that one too. 1276 */ 1277 if (vm_extint_pending(vmx->vm, vcpu)) 1278 vmx_set_int_window_exiting(vmx, vcpu); 1279 } 1280 1281 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1282 1283 return; 1284 1285cantinject: 1286 /* 1287 * Set the Interrupt Window Exiting execution control so we can inject 1288 * the interrupt as soon as blocking condition goes away. 1289 */ 1290 vmx_set_int_window_exiting(vmx, vcpu); 1291} 1292 1293/* 1294 * If the Virtual NMIs execution control is '1' then the logical processor 1295 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1296 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1297 * virtual-NMI blocking. 1298 * 1299 * This unblocking occurs even if the IRET causes a fault. In this case the 1300 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1301 */ 1302static void 1303vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1304{ 1305 uint32_t gi; 1306 1307 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1308 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1309 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1310 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1311} 1312 1313static void 1314vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1315{ 1316 uint32_t gi; 1317 1318 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1319 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1320 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1321 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1322} 1323 1324static int 1325vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1326{ 1327 struct vmxctx *vmxctx; 1328 uint64_t xcrval; 1329 const struct xsave_limits *limits; 1330 1331 vmxctx = &vmx->ctx[vcpu]; 1332 limits = vmm_get_xsave_limits(); 1333 1334 /* 1335 * Note that the processor raises a GP# fault on its own if 1336 * xsetbv is executed for CPL != 0, so we do not have to 1337 * emulate that fault here. 1338 */ 1339 1340 /* Only xcr0 is supported. */ 1341 if (vmxctx->guest_rcx != 0) { 1342 vm_inject_gp(vmx->vm, vcpu); 1343 return (HANDLED); 1344 } 1345 1346 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1347 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1348 vm_inject_ud(vmx->vm, vcpu); 1349 return (HANDLED); 1350 } 1351 1352 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1353 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1354 vm_inject_gp(vmx->vm, vcpu); 1355 return (HANDLED); 1356 } 1357 1358 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1359 vm_inject_gp(vmx->vm, vcpu); 1360 return (HANDLED); 1361 } 1362 1363 if ((xcrval & (XFEATURE_ENABLED_AVX | XFEATURE_ENABLED_SSE)) == 1364 XFEATURE_ENABLED_AVX) { 1365 vm_inject_gp(vmx->vm, vcpu); 1366 return (HANDLED); 1367 } 1368 1369 /* 1370 * This runs "inside" vmrun() with the guest's FPU state, so 1371 * modifying xcr0 directly modifies the guest's xcr0, not the 1372 * host's. 1373 */ 1374 load_xcr(0, xcrval); 1375 return (HANDLED); 1376} 1377 1378static int 1379vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1380{ 1381 int cr, vmcs_guest_cr, vmcs_shadow_cr; 1382 uint64_t crval, regval, ones_mask, zeros_mask; 1383 const struct vmxctx *vmxctx; 1384 1385 /* We only handle mov to %cr0 or %cr4 at this time */ 1386 if ((exitqual & 0xf0) != 0x00) 1387 return (UNHANDLED); 1388 1389 cr = exitqual & 0xf; 1390 if (cr != 0 && cr != 4) 1391 return (UNHANDLED); 1392 1393 regval = 0; /* silence gcc */ 1394 vmxctx = &vmx->ctx[vcpu]; 1395 1396 /* 1397 * We must use vmcs_write() directly here because vmcs_setreg() will 1398 * call vmclear(vmcs) as a side-effect which we certainly don't want. 1399 */ 1400 switch ((exitqual >> 8) & 0xf) { 1401 case 0: 1402 regval = vmxctx->guest_rax; 1403 break; 1404 case 1: 1405 regval = vmxctx->guest_rcx; 1406 break; 1407 case 2: 1408 regval = vmxctx->guest_rdx; 1409 break; 1410 case 3: 1411 regval = vmxctx->guest_rbx; 1412 break; 1413 case 4: 1414 regval = vmcs_read(VMCS_GUEST_RSP); 1415 break; 1416 case 5: 1417 regval = vmxctx->guest_rbp; 1418 break; 1419 case 6: 1420 regval = vmxctx->guest_rsi; 1421 break; 1422 case 7: 1423 regval = vmxctx->guest_rdi; 1424 break; 1425 case 8: 1426 regval = vmxctx->guest_r8; 1427 break; 1428 case 9: 1429 regval = vmxctx->guest_r9; 1430 break; 1431 case 10: 1432 regval = vmxctx->guest_r10; 1433 break; 1434 case 11: 1435 regval = vmxctx->guest_r11; 1436 break; 1437 case 12: 1438 regval = vmxctx->guest_r12; 1439 break; 1440 case 13: 1441 regval = vmxctx->guest_r13; 1442 break; 1443 case 14: 1444 regval = vmxctx->guest_r14; 1445 break; 1446 case 15: 1447 regval = vmxctx->guest_r15; 1448 break; 1449 } 1450 1451 if (cr == 0) { 1452 ones_mask = cr0_ones_mask; 1453 zeros_mask = cr0_zeros_mask; 1454 vmcs_guest_cr = VMCS_GUEST_CR0; 1455 vmcs_shadow_cr = VMCS_CR0_SHADOW; 1456 } else { 1457 ones_mask = cr4_ones_mask; 1458 zeros_mask = cr4_zeros_mask; 1459 vmcs_guest_cr = VMCS_GUEST_CR4; 1460 vmcs_shadow_cr = VMCS_CR4_SHADOW; 1461 } 1462 vmcs_write(vmcs_shadow_cr, regval); 1463 1464 crval = regval | ones_mask; 1465 crval &= ~zeros_mask; 1466 vmcs_write(vmcs_guest_cr, crval); 1467 1468 if (cr == 0 && regval & CR0_PG) { 1469 uint64_t efer, entry_ctls; 1470 1471 /* 1472 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1473 * the "IA-32e mode guest" bit in VM-entry control must be 1474 * equal. 1475 */ 1476 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1477 if (efer & EFER_LME) { 1478 efer |= EFER_LMA; 1479 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1480 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1481 entry_ctls |= VM_ENTRY_GUEST_LMA; 1482 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1483 } 1484 } 1485 1486 return (HANDLED); 1487} 1488 1489static enum vie_cpu_mode 1490vmx_cpu_mode(void) 1491{ 1492 1493 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) 1494 return (CPU_MODE_64BIT); 1495 else 1496 return (CPU_MODE_COMPATIBILITY); 1497} 1498 1499static enum vie_paging_mode 1500vmx_paging_mode(void) 1501{ 1502 1503 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1504 return (PAGING_MODE_FLAT); 1505 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1506 return (PAGING_MODE_32); 1507 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1508 return (PAGING_MODE_64); 1509 else 1510 return (PAGING_MODE_PAE); 1511} 1512 1513static int 1514ept_fault_type(uint64_t ept_qual) 1515{ 1516 int fault_type; 1517 1518 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1519 fault_type = VM_PROT_WRITE; 1520 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1521 fault_type = VM_PROT_EXECUTE; 1522 else 1523 fault_type= VM_PROT_READ; 1524 1525 return (fault_type); 1526} 1527 1528static boolean_t 1529ept_emulation_fault(uint64_t ept_qual) 1530{ 1531 int read, write; 1532 1533 /* EPT fault on an instruction fetch doesn't make sense here */ 1534 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1535 return (FALSE); 1536 1537 /* EPT fault must be a read fault or a write fault */ 1538 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1539 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1540 if ((read | write) == 0) 1541 return (FALSE); 1542 1543 /* 1544 * The EPT violation must have been caused by accessing a 1545 * guest-physical address that is a translation of a guest-linear 1546 * address. 1547 */ 1548 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1549 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1550 return (FALSE); 1551 } 1552 1553 return (TRUE); 1554} 1555 1556static __inline int 1557apic_access_virtualization(struct vmx *vmx, int vcpuid) 1558{ 1559 uint32_t proc_ctls2; 1560 1561 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1562 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1563} 1564 1565static __inline int 1566x2apic_virtualization(struct vmx *vmx, int vcpuid) 1567{ 1568 uint32_t proc_ctls2; 1569 1570 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1571 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1572} 1573 1574static int 1575vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1576 uint64_t qual) 1577{ 1578 int error, handled, offset; 1579 uint32_t *apic_regs, vector; 1580 bool retu; 1581 1582 handled = HANDLED; 1583 offset = APIC_WRITE_OFFSET(qual); 1584 1585 if (!apic_access_virtualization(vmx, vcpuid)) { 1586 /* 1587 * In general there should not be any APIC write VM-exits 1588 * unless APIC-access virtualization is enabled. 1589 * 1590 * However self-IPI virtualization can legitimately trigger 1591 * an APIC-write VM-exit so treat it specially. 1592 */ 1593 if (x2apic_virtualization(vmx, vcpuid) && 1594 offset == APIC_OFFSET_SELF_IPI) { 1595 apic_regs = (uint32_t *)(vlapic->apic_page); 1596 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1597 vlapic_self_ipi_handler(vlapic, vector); 1598 return (HANDLED); 1599 } else 1600 return (UNHANDLED); 1601 } 1602 1603 switch (offset) { 1604 case APIC_OFFSET_ID: 1605 vlapic_id_write_handler(vlapic); 1606 break; 1607 case APIC_OFFSET_LDR: 1608 vlapic_ldr_write_handler(vlapic); 1609 break; 1610 case APIC_OFFSET_DFR: 1611 vlapic_dfr_write_handler(vlapic); 1612 break; 1613 case APIC_OFFSET_SVR: 1614 vlapic_svr_write_handler(vlapic); 1615 break; 1616 case APIC_OFFSET_ESR: 1617 vlapic_esr_write_handler(vlapic); 1618 break; 1619 case APIC_OFFSET_ICR_LOW: 1620 retu = false; 1621 error = vlapic_icrlo_write_handler(vlapic, &retu); 1622 if (error != 0 || retu) 1623 handled = UNHANDLED; 1624 break; 1625 case APIC_OFFSET_CMCI_LVT: 1626 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1627 vlapic_lvt_write_handler(vlapic, offset); 1628 break; 1629 case APIC_OFFSET_TIMER_ICR: 1630 vlapic_icrtmr_write_handler(vlapic); 1631 break; 1632 case APIC_OFFSET_TIMER_DCR: 1633 vlapic_dcr_write_handler(vlapic); 1634 break; 1635 default: 1636 handled = UNHANDLED; 1637 break; 1638 } 1639 return (handled); 1640} 1641 1642static bool 1643apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1644{ 1645 1646 if (apic_access_virtualization(vmx, vcpuid) && 1647 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1648 return (true); 1649 else 1650 return (false); 1651} 1652 1653static int 1654vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1655{ 1656 uint64_t qual; 1657 int access_type, offset, allowed; 1658 1659 if (!apic_access_virtualization(vmx, vcpuid)) 1660 return (UNHANDLED); 1661 1662 qual = vmexit->u.vmx.exit_qualification; 1663 access_type = APIC_ACCESS_TYPE(qual); 1664 offset = APIC_ACCESS_OFFSET(qual); 1665 1666 allowed = 0; 1667 if (access_type == 0) { 1668 /* 1669 * Read data access to the following registers is expected. 1670 */ 1671 switch (offset) { 1672 case APIC_OFFSET_APR: 1673 case APIC_OFFSET_PPR: 1674 case APIC_OFFSET_RRR: 1675 case APIC_OFFSET_CMCI_LVT: 1676 case APIC_OFFSET_TIMER_CCR: 1677 allowed = 1; 1678 break; 1679 default: 1680 break; 1681 } 1682 } else if (access_type == 1) { 1683 /* 1684 * Write data access to the following registers is expected. 1685 */ 1686 switch (offset) { 1687 case APIC_OFFSET_VER: 1688 case APIC_OFFSET_APR: 1689 case APIC_OFFSET_PPR: 1690 case APIC_OFFSET_RRR: 1691 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1692 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1693 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1694 case APIC_OFFSET_CMCI_LVT: 1695 case APIC_OFFSET_TIMER_CCR: 1696 allowed = 1; 1697 break; 1698 default: 1699 break; 1700 } 1701 } 1702 1703 if (allowed) { 1704 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1705 vmexit->u.inst_emul.gpa = DEFAULT_APIC_BASE + offset; 1706 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 1707 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1708 vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode(); 1709 vmexit->u.inst_emul.paging_mode = vmx_paging_mode(); 1710 } 1711 1712 /* 1713 * Regardless of whether the APIC-access is allowed this handler 1714 * always returns UNHANDLED: 1715 * - if the access is allowed then it is handled by emulating the 1716 * instruction that caused the VM-exit (outside the critical section) 1717 * - if the access is not allowed then it will be converted to an 1718 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 1719 */ 1720 return (UNHANDLED); 1721} 1722 1723static int 1724vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1725{ 1726 int error, handled; 1727 struct vmxctx *vmxctx; 1728 struct vlapic *vlapic; 1729 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, reason; 1730 uint64_t qual, gpa; 1731 bool retu; 1732 1733 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 1734 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 1735 1736 handled = UNHANDLED; 1737 vmxctx = &vmx->ctx[vcpu]; 1738 1739 qual = vmexit->u.vmx.exit_qualification; 1740 reason = vmexit->u.vmx.exit_reason; 1741 vmexit->exitcode = VM_EXITCODE_BOGUS; 1742 1743 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 1744 1745 /* 1746 * VM exits that could be triggered during event injection on the 1747 * previous VM entry need to be handled specially by re-injecting 1748 * the event. 1749 * 1750 * See "Information for VM Exits During Event Delivery" in Intel SDM 1751 * for details. 1752 */ 1753 switch (reason) { 1754 case EXIT_REASON_EPT_FAULT: 1755 case EXIT_REASON_EPT_MISCONFIG: 1756 case EXIT_REASON_APIC_ACCESS: 1757 case EXIT_REASON_TASK_SWITCH: 1758 case EXIT_REASON_EXCEPTION: 1759 idtvec_info = vmcs_idt_vectoring_info(); 1760 if (idtvec_info & VMCS_IDT_VEC_VALID) { 1761 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 1762 vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info); 1763 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 1764 idtvec_err = vmcs_idt_vectoring_err(); 1765 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1766 idtvec_err); 1767 } 1768 /* 1769 * If 'virtual NMIs' are being used and the VM-exit 1770 * happened while injecting an NMI during the previous 1771 * VM-entry, then clear "blocking by NMI" in the Guest 1772 * Interruptibility-state. 1773 */ 1774 if ((idtvec_info & VMCS_INTR_T_MASK) == 1775 VMCS_INTR_T_NMI) { 1776 vmx_clear_nmi_blocking(vmx, vcpu); 1777 } 1778 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 1779 } 1780 default: 1781 idtvec_info = 0; 1782 break; 1783 } 1784 1785 switch (reason) { 1786 case EXIT_REASON_CR_ACCESS: 1787 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 1788 handled = vmx_emulate_cr_access(vmx, vcpu, qual); 1789 break; 1790 case EXIT_REASON_RDMSR: 1791 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 1792 retu = false; 1793 ecx = vmxctx->guest_rcx; 1794 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu); 1795 if (error) { 1796 vmexit->exitcode = VM_EXITCODE_RDMSR; 1797 vmexit->u.msr.code = ecx; 1798 } else if (!retu) { 1799 handled = HANDLED; 1800 } else { 1801 /* Return to userspace with a valid exitcode */ 1802 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1803 ("emulate_wrmsr retu with bogus exitcode")); 1804 } 1805 break; 1806 case EXIT_REASON_WRMSR: 1807 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 1808 retu = false; 1809 eax = vmxctx->guest_rax; 1810 ecx = vmxctx->guest_rcx; 1811 edx = vmxctx->guest_rdx; 1812 error = emulate_wrmsr(vmx->vm, vcpu, ecx, 1813 (uint64_t)edx << 32 | eax, &retu); 1814 if (error) { 1815 vmexit->exitcode = VM_EXITCODE_WRMSR; 1816 vmexit->u.msr.code = ecx; 1817 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 1818 } else if (!retu) { 1819 handled = HANDLED; 1820 } else { 1821 /* Return to userspace with a valid exitcode */ 1822 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1823 ("emulate_wrmsr retu with bogus exitcode")); 1824 } 1825 break; 1826 case EXIT_REASON_HLT: 1827 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 1828 vmexit->exitcode = VM_EXITCODE_HLT; 1829 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1830 break; 1831 case EXIT_REASON_MTF: 1832 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 1833 vmexit->exitcode = VM_EXITCODE_MTRAP; 1834 break; 1835 case EXIT_REASON_PAUSE: 1836 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 1837 vmexit->exitcode = VM_EXITCODE_PAUSE; 1838 break; 1839 case EXIT_REASON_INTR_WINDOW: 1840 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 1841 vmx_clear_int_window_exiting(vmx, vcpu); 1842 return (1); 1843 case EXIT_REASON_EXT_INTR: 1844 /* 1845 * External interrupts serve only to cause VM exits and allow 1846 * the host interrupt handler to run. 1847 * 1848 * If this external interrupt triggers a virtual interrupt 1849 * to a VM, then that state will be recorded by the 1850 * host interrupt handler in the VM's softc. We will inject 1851 * this virtual interrupt during the subsequent VM enter. 1852 */ 1853 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1854 1855 /* 1856 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 1857 * This appears to be a bug in VMware Fusion? 1858 */ 1859 if (!(intr_info & VMCS_INTR_VALID)) 1860 return (1); 1861 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 1862 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 1863 ("VM exit interruption info invalid: %#x", intr_info)); 1864 vmx_trigger_hostintr(intr_info & 0xff); 1865 1866 /* 1867 * This is special. We want to treat this as an 'handled' 1868 * VM-exit but not increment the instruction pointer. 1869 */ 1870 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 1871 return (1); 1872 case EXIT_REASON_NMI_WINDOW: 1873 /* Exit to allow the pending virtual NMI to be injected */ 1874 if (vm_nmi_pending(vmx->vm, vcpu)) 1875 vmx_inject_nmi(vmx, vcpu); 1876 vmx_clear_nmi_window_exiting(vmx, vcpu); 1877 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 1878 return (1); 1879 case EXIT_REASON_INOUT: 1880 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 1881 vmexit->exitcode = VM_EXITCODE_INOUT; 1882 vmexit->u.inout.bytes = (qual & 0x7) + 1; 1883 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0; 1884 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 1885 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 1886 vmexit->u.inout.port = (uint16_t)(qual >> 16); 1887 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 1888 error = emulate_ioport(vmx->vm, vcpu, vmexit); 1889 if (error == 0) { 1890 handled = 1; 1891 vmxctx->guest_rax = vmexit->u.inout.eax; 1892 } 1893 break; 1894 case EXIT_REASON_CPUID: 1895 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 1896 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 1897 break; 1898 case EXIT_REASON_EXCEPTION: 1899 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 1900 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1901 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 1902 ("VM exit interruption info invalid: %#x", intr_info)); 1903 1904 /* 1905 * If Virtual NMIs control is 1 and the VM-exit is due to a 1906 * fault encountered during the execution of IRET then we must 1907 * restore the state of "virtual-NMI blocking" before resuming 1908 * the guest. 1909 * 1910 * See "Resuming Guest Software after Handling an Exception". 1911 */ 1912 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 1913 (intr_info & 0xff) != IDT_DF && 1914 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 1915 vmx_restore_nmi_blocking(vmx, vcpu); 1916 1917 /* 1918 * The NMI has already been handled in vmx_exit_handle_nmi(). 1919 */ 1920 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) 1921 return (1); 1922 break; 1923 case EXIT_REASON_EPT_FAULT: 1924 /* 1925 * If 'gpa' lies within the address space allocated to 1926 * memory then this must be a nested page fault otherwise 1927 * this must be an instruction that accesses MMIO space. 1928 */ 1929 gpa = vmcs_gpa(); 1930 if (vm_mem_allocated(vmx->vm, gpa) || 1931 apic_access_fault(vmx, vcpu, gpa)) { 1932 vmexit->exitcode = VM_EXITCODE_PAGING; 1933 vmexit->u.paging.gpa = gpa; 1934 vmexit->u.paging.fault_type = ept_fault_type(qual); 1935 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1936 } else if (ept_emulation_fault(qual)) { 1937 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1938 vmexit->u.inst_emul.gpa = gpa; 1939 vmexit->u.inst_emul.gla = vmcs_gla(); 1940 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1941 vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode(); 1942 vmexit->u.inst_emul.paging_mode = vmx_paging_mode(); 1943 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); 1944 } 1945 /* 1946 * If Virtual NMIs control is 1 and the VM-exit is due to an 1947 * EPT fault during the execution of IRET then we must restore 1948 * the state of "virtual-NMI blocking" before resuming. 1949 * 1950 * See description of "NMI unblocking due to IRET" in 1951 * "Exit Qualification for EPT Violations". 1952 */ 1953 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 1954 (qual & EXIT_QUAL_NMIUDTI) != 0) 1955 vmx_restore_nmi_blocking(vmx, vcpu); 1956 break; 1957 case EXIT_REASON_VIRTUALIZED_EOI: 1958 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 1959 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 1960 vmexit->inst_length = 0; /* trap-like */ 1961 break; 1962 case EXIT_REASON_APIC_ACCESS: 1963 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 1964 break; 1965 case EXIT_REASON_APIC_WRITE: 1966 /* 1967 * APIC-write VM exit is trap-like so the %rip is already 1968 * pointing to the next instruction. 1969 */ 1970 vmexit->inst_length = 0; 1971 vlapic = vm_lapic(vmx->vm, vcpu); 1972 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 1973 break; 1974 case EXIT_REASON_XSETBV: 1975 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 1976 break; 1977 default: 1978 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 1979 break; 1980 } 1981 1982 if (handled) { 1983 /* 1984 * It is possible that control is returned to userland 1985 * even though we were able to handle the VM exit in the 1986 * kernel. 1987 * 1988 * In such a case we want to make sure that the userland 1989 * restarts guest execution at the instruction *after* 1990 * the one we just processed. Therefore we update the 1991 * guest rip in the VMCS and in 'vmexit'. 1992 */ 1993 vmexit->rip += vmexit->inst_length; 1994 vmexit->inst_length = 0; 1995 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 1996 } else { 1997 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1998 /* 1999 * If this VM exit was not claimed by anybody then 2000 * treat it as a generic VMX exit. 2001 */ 2002 vmexit->exitcode = VM_EXITCODE_VMX; 2003 vmexit->u.vmx.status = VM_SUCCESS; 2004 vmexit->u.vmx.inst_type = 0; 2005 vmexit->u.vmx.inst_error = 0; 2006 } else { 2007 /* 2008 * The exitcode and collateral have been populated. 2009 * The VM exit will be processed further in userland. 2010 */ 2011 } 2012 } 2013 return (handled); 2014} 2015 2016static __inline int 2017vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2018{ 2019 2020 vmexit->rip = vmcs_guest_rip(); 2021 vmexit->inst_length = 0; 2022 vmexit->exitcode = VM_EXITCODE_BOGUS; 2023 vmx_astpending_trace(vmx, vcpu, vmexit->rip); 2024 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1); 2025 2026 return (HANDLED); 2027} 2028 2029static __inline int 2030vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2031{ 2032 2033 vmexit->rip = vmcs_guest_rip(); 2034 vmexit->inst_length = 0; 2035 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 2036 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1); 2037 2038 return (UNHANDLED); 2039} 2040 2041static __inline int 2042vmx_exit_suspended(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2043{ 2044 2045 vmexit->rip = vmcs_guest_rip(); 2046 vmexit->inst_length = 0; 2047 vmexit->exitcode = VM_EXITCODE_SUSPENDED; 2048 return (UNHANDLED); 2049} 2050 2051static __inline int 2052vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2053{ 2054 2055 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2056 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2057 vmxctx->inst_fail_status)); 2058 2059 vmexit->inst_length = 0; 2060 vmexit->exitcode = VM_EXITCODE_VMX; 2061 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2062 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2063 vmexit->u.vmx.exit_reason = ~0; 2064 vmexit->u.vmx.exit_qualification = ~0; 2065 2066 switch (rc) { 2067 case VMX_VMRESUME_ERROR: 2068 case VMX_VMLAUNCH_ERROR: 2069 case VMX_INVEPT_ERROR: 2070 vmexit->u.vmx.inst_type = rc; 2071 break; 2072 default: 2073 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2074 } 2075 2076 return (UNHANDLED); 2077} 2078 2079/* 2080 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2081 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2082 * sufficient to simply vector to the NMI handler via a software interrupt. 2083 * However, this must be done before maskable interrupts are enabled 2084 * otherwise the "iret" issued by an interrupt handler will incorrectly 2085 * clear NMI blocking. 2086 */ 2087static __inline void 2088vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2089{ 2090 uint32_t intr_info; 2091 2092 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2093 2094 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2095 return; 2096 2097 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2098 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2099 ("VM exit interruption info invalid: %#x", intr_info)); 2100 2101 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2102 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2103 "to NMI has invalid vector: %#x", intr_info)); 2104 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2105 __asm __volatile("int $2"); 2106 } 2107} 2108 2109static int 2110vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap, 2111 void *rendezvous_cookie, void *suspend_cookie) 2112{ 2113 int rc, handled, launched; 2114 struct vmx *vmx; 2115 struct vm *vm; 2116 struct vmxctx *vmxctx; 2117 struct vmcs *vmcs; 2118 struct vm_exit *vmexit; 2119 struct vlapic *vlapic; 2120 uint64_t rip; 2121 uint32_t exit_reason; 2122 2123 vmx = arg; 2124 vm = vmx->vm; 2125 vmcs = &vmx->vmcs[vcpu]; 2126 vmxctx = &vmx->ctx[vcpu]; 2127 vlapic = vm_lapic(vm, vcpu); 2128 vmexit = vm_exitinfo(vm, vcpu); 2129 launched = 0; 2130 2131 KASSERT(vmxctx->pmap == pmap, 2132 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2133 2134 VMPTRLD(vmcs); 2135 2136 /* 2137 * XXX 2138 * We do this every time because we may setup the virtual machine 2139 * from a different process than the one that actually runs it. 2140 * 2141 * If the life of a virtual machine was spent entirely in the context 2142 * of a single process we could do this once in vmx_vminit(). 2143 */ 2144 vmcs_write(VMCS_HOST_CR3, rcr3()); 2145 2146 vmcs_write(VMCS_GUEST_RIP, startrip); 2147 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2148 do { 2149 /* 2150 * Interrupts are disabled from this point on until the 2151 * guest starts executing. This is done for the following 2152 * reasons: 2153 * 2154 * If an AST is asserted on this thread after the check below, 2155 * then the IPI_AST notification will not be lost, because it 2156 * will cause a VM exit due to external interrupt as soon as 2157 * the guest state is loaded. 2158 * 2159 * A posted interrupt after 'vmx_inject_interrupts()' will 2160 * not be "lost" because it will be held pending in the host 2161 * APIC because interrupts are disabled. The pending interrupt 2162 * will be recognized as soon as the guest state is loaded. 2163 * 2164 * The same reasoning applies to the IPI generated by 2165 * pmap_invalidate_ept(). 2166 */ 2167 disable_intr(); 2168 if (vcpu_suspended(suspend_cookie)) { 2169 enable_intr(); 2170 handled = vmx_exit_suspended(vmx, vcpu, vmexit); 2171 break; 2172 } 2173 2174 if (vcpu_rendezvous_pending(rendezvous_cookie)) { 2175 enable_intr(); 2176 handled = vmx_exit_rendezvous(vmx, vcpu, vmexit); 2177 break; 2178 } 2179 2180 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 2181 enable_intr(); 2182 handled = vmx_exit_astpending(vmx, vcpu, vmexit); 2183 break; 2184 } 2185 2186 vmx_inject_interrupts(vmx, vcpu, vlapic); 2187 vmx_run_trace(vmx, vcpu); 2188 rc = vmx_enter_guest(vmxctx, vmx, launched); 2189 2190 /* Collect some information for VM exit processing */ 2191 vmexit->rip = rip = vmcs_guest_rip(); 2192 vmexit->inst_length = vmexit_instruction_length(); 2193 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2194 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2195 2196 if (rc == VMX_GUEST_VMEXIT) { 2197 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2198 enable_intr(); 2199 handled = vmx_exit_process(vmx, vcpu, vmexit); 2200 } else { 2201 enable_intr(); 2202 handled = vmx_exit_inst_error(vmxctx, rc, vmexit); 2203 } 2204 launched = 1; 2205 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 2206 } while (handled); 2207 2208 /* 2209 * If a VM exit has been handled then the exitcode must be BOGUS 2210 * If a VM exit is not handled then the exitcode must not be BOGUS 2211 */ 2212 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 2213 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 2214 panic("Mismatch between handled (%d) and exitcode (%d)", 2215 handled, vmexit->exitcode); 2216 } 2217 2218 if (!handled) 2219 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 2220 2221 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2222 vmexit->exitcode); 2223 2224 VMCLEAR(vmcs); 2225 return (0); 2226} 2227 2228static void 2229vmx_vmcleanup(void *arg) 2230{ 2231 int i, error; 2232 struct vmx *vmx = arg; 2233 2234 if (apic_access_virtualization(vmx, 0)) 2235 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2236 2237 for (i = 0; i < VM_MAXCPU; i++) 2238 vpid_free(vmx->state[i].vpid); 2239 2240 /* 2241 * XXXSMP we also need to clear the VMCS active on the other vcpus. 2242 */ 2243 error = vmclear(&vmx->vmcs[0]); 2244 if (error != 0) 2245 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error); 2246 2247 free(vmx, M_VMX); 2248 2249 return; 2250} 2251 2252static register_t * 2253vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2254{ 2255 2256 switch (reg) { 2257 case VM_REG_GUEST_RAX: 2258 return (&vmxctx->guest_rax); 2259 case VM_REG_GUEST_RBX: 2260 return (&vmxctx->guest_rbx); 2261 case VM_REG_GUEST_RCX: 2262 return (&vmxctx->guest_rcx); 2263 case VM_REG_GUEST_RDX: 2264 return (&vmxctx->guest_rdx); 2265 case VM_REG_GUEST_RSI: 2266 return (&vmxctx->guest_rsi); 2267 case VM_REG_GUEST_RDI: 2268 return (&vmxctx->guest_rdi); 2269 case VM_REG_GUEST_RBP: 2270 return (&vmxctx->guest_rbp); 2271 case VM_REG_GUEST_R8: 2272 return (&vmxctx->guest_r8); 2273 case VM_REG_GUEST_R9: 2274 return (&vmxctx->guest_r9); 2275 case VM_REG_GUEST_R10: 2276 return (&vmxctx->guest_r10); 2277 case VM_REG_GUEST_R11: 2278 return (&vmxctx->guest_r11); 2279 case VM_REG_GUEST_R12: 2280 return (&vmxctx->guest_r12); 2281 case VM_REG_GUEST_R13: 2282 return (&vmxctx->guest_r13); 2283 case VM_REG_GUEST_R14: 2284 return (&vmxctx->guest_r14); 2285 case VM_REG_GUEST_R15: 2286 return (&vmxctx->guest_r15); 2287 default: 2288 break; 2289 } 2290 return (NULL); 2291} 2292 2293static int 2294vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 2295{ 2296 register_t *regp; 2297 2298 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2299 *retval = *regp; 2300 return (0); 2301 } else 2302 return (EINVAL); 2303} 2304 2305static int 2306vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 2307{ 2308 register_t *regp; 2309 2310 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2311 *regp = val; 2312 return (0); 2313 } else 2314 return (EINVAL); 2315} 2316 2317static int 2318vmx_shadow_reg(int reg) 2319{ 2320 int shreg; 2321 2322 shreg = -1; 2323 2324 switch (reg) { 2325 case VM_REG_GUEST_CR0: 2326 shreg = VMCS_CR0_SHADOW; 2327 break; 2328 case VM_REG_GUEST_CR4: 2329 shreg = VMCS_CR4_SHADOW; 2330 break; 2331 default: 2332 break; 2333 } 2334 2335 return (shreg); 2336} 2337 2338static int 2339vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2340{ 2341 int running, hostcpu; 2342 struct vmx *vmx = arg; 2343 2344 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2345 if (running && hostcpu != curcpu) 2346 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2347 2348 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 2349 return (0); 2350 2351 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 2352} 2353 2354static int 2355vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 2356{ 2357 int error, hostcpu, running, shadow; 2358 uint64_t ctls; 2359 struct vmx *vmx = arg; 2360 2361 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2362 if (running && hostcpu != curcpu) 2363 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 2364 2365 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 2366 return (0); 2367 2368 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 2369 2370 if (error == 0) { 2371 /* 2372 * If the "load EFER" VM-entry control is 1 then the 2373 * value of EFER.LMA must be identical to "IA-32e mode guest" 2374 * bit in the VM-entry control. 2375 */ 2376 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 2377 (reg == VM_REG_GUEST_EFER)) { 2378 vmcs_getreg(&vmx->vmcs[vcpu], running, 2379 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 2380 if (val & EFER_LMA) 2381 ctls |= VM_ENTRY_GUEST_LMA; 2382 else 2383 ctls &= ~VM_ENTRY_GUEST_LMA; 2384 vmcs_setreg(&vmx->vmcs[vcpu], running, 2385 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 2386 } 2387 2388 shadow = vmx_shadow_reg(reg); 2389 if (shadow > 0) { 2390 /* 2391 * Store the unmodified value in the shadow 2392 */ 2393 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 2394 VMCS_IDENT(shadow), val); 2395 } 2396 } 2397 2398 return (error); 2399} 2400 2401static int 2402vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2403{ 2404 struct vmx *vmx = arg; 2405 2406 return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc)); 2407} 2408 2409static int 2410vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2411{ 2412 struct vmx *vmx = arg; 2413 2414 return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc)); 2415} 2416 2417static int 2418vmx_getcap(void *arg, int vcpu, int type, int *retval) 2419{ 2420 struct vmx *vmx = arg; 2421 int vcap; 2422 int ret; 2423 2424 ret = ENOENT; 2425 2426 vcap = vmx->cap[vcpu].set; 2427 2428 switch (type) { 2429 case VM_CAP_HALT_EXIT: 2430 if (cap_halt_exit) 2431 ret = 0; 2432 break; 2433 case VM_CAP_PAUSE_EXIT: 2434 if (cap_pause_exit) 2435 ret = 0; 2436 break; 2437 case VM_CAP_MTRAP_EXIT: 2438 if (cap_monitor_trap) 2439 ret = 0; 2440 break; 2441 case VM_CAP_UNRESTRICTED_GUEST: 2442 if (cap_unrestricted_guest) 2443 ret = 0; 2444 break; 2445 case VM_CAP_ENABLE_INVPCID: 2446 if (cap_invpcid) 2447 ret = 0; 2448 break; 2449 default: 2450 break; 2451 } 2452 2453 if (ret == 0) 2454 *retval = (vcap & (1 << type)) ? 1 : 0; 2455 2456 return (ret); 2457} 2458 2459static int 2460vmx_setcap(void *arg, int vcpu, int type, int val) 2461{ 2462 struct vmx *vmx = arg; 2463 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2464 uint32_t baseval; 2465 uint32_t *pptr; 2466 int error; 2467 int flag; 2468 int reg; 2469 int retval; 2470 2471 retval = ENOENT; 2472 pptr = NULL; 2473 2474 switch (type) { 2475 case VM_CAP_HALT_EXIT: 2476 if (cap_halt_exit) { 2477 retval = 0; 2478 pptr = &vmx->cap[vcpu].proc_ctls; 2479 baseval = *pptr; 2480 flag = PROCBASED_HLT_EXITING; 2481 reg = VMCS_PRI_PROC_BASED_CTLS; 2482 } 2483 break; 2484 case VM_CAP_MTRAP_EXIT: 2485 if (cap_monitor_trap) { 2486 retval = 0; 2487 pptr = &vmx->cap[vcpu].proc_ctls; 2488 baseval = *pptr; 2489 flag = PROCBASED_MTF; 2490 reg = VMCS_PRI_PROC_BASED_CTLS; 2491 } 2492 break; 2493 case VM_CAP_PAUSE_EXIT: 2494 if (cap_pause_exit) { 2495 retval = 0; 2496 pptr = &vmx->cap[vcpu].proc_ctls; 2497 baseval = *pptr; 2498 flag = PROCBASED_PAUSE_EXITING; 2499 reg = VMCS_PRI_PROC_BASED_CTLS; 2500 } 2501 break; 2502 case VM_CAP_UNRESTRICTED_GUEST: 2503 if (cap_unrestricted_guest) { 2504 retval = 0; 2505 pptr = &vmx->cap[vcpu].proc_ctls2; 2506 baseval = *pptr; 2507 flag = PROCBASED2_UNRESTRICTED_GUEST; 2508 reg = VMCS_SEC_PROC_BASED_CTLS; 2509 } 2510 break; 2511 case VM_CAP_ENABLE_INVPCID: 2512 if (cap_invpcid) { 2513 retval = 0; 2514 pptr = &vmx->cap[vcpu].proc_ctls2; 2515 baseval = *pptr; 2516 flag = PROCBASED2_ENABLE_INVPCID; 2517 reg = VMCS_SEC_PROC_BASED_CTLS; 2518 } 2519 break; 2520 default: 2521 break; 2522 } 2523 2524 if (retval == 0) { 2525 if (val) { 2526 baseval |= flag; 2527 } else { 2528 baseval &= ~flag; 2529 } 2530 VMPTRLD(vmcs); 2531 error = vmwrite(reg, baseval); 2532 VMCLEAR(vmcs); 2533 2534 if (error) { 2535 retval = error; 2536 } else { 2537 /* 2538 * Update optional stored flags, and record 2539 * setting 2540 */ 2541 if (pptr != NULL) { 2542 *pptr = baseval; 2543 } 2544 2545 if (val) { 2546 vmx->cap[vcpu].set |= (1 << type); 2547 } else { 2548 vmx->cap[vcpu].set &= ~(1 << type); 2549 } 2550 } 2551 } 2552 2553 return (retval); 2554} 2555 2556struct vlapic_vtx { 2557 struct vlapic vlapic; 2558 struct pir_desc *pir_desc; 2559 struct vmx *vmx; 2560}; 2561 2562#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 2563do { \ 2564 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 2565 level ? "level" : "edge", vector); \ 2566 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 2567 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 2568 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 2569 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 2570 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 2571} while (0) 2572 2573/* 2574 * vlapic->ops handlers that utilize the APICv hardware assist described in 2575 * Chapter 29 of the Intel SDM. 2576 */ 2577static int 2578vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 2579{ 2580 struct vlapic_vtx *vlapic_vtx; 2581 struct pir_desc *pir_desc; 2582 uint64_t mask; 2583 int idx, notify; 2584 2585 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2586 pir_desc = vlapic_vtx->pir_desc; 2587 2588 /* 2589 * Keep track of interrupt requests in the PIR descriptor. This is 2590 * because the virtual APIC page pointed to by the VMCS cannot be 2591 * modified if the vcpu is running. 2592 */ 2593 idx = vector / 64; 2594 mask = 1UL << (vector % 64); 2595 atomic_set_long(&pir_desc->pir[idx], mask); 2596 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1); 2597 2598 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 2599 level, "vmx_set_intr_ready"); 2600 return (notify); 2601} 2602 2603static int 2604vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 2605{ 2606 struct vlapic_vtx *vlapic_vtx; 2607 struct pir_desc *pir_desc; 2608 struct LAPIC *lapic; 2609 uint64_t pending, pirval; 2610 uint32_t ppr, vpr; 2611 int i; 2612 2613 /* 2614 * This function is only expected to be called from the 'HLT' exit 2615 * handler which does not care about the vector that is pending. 2616 */ 2617 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 2618 2619 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2620 pir_desc = vlapic_vtx->pir_desc; 2621 2622 pending = atomic_load_acq_long(&pir_desc->pending); 2623 if (!pending) 2624 return (0); /* common case */ 2625 2626 /* 2627 * If there is an interrupt pending then it will be recognized only 2628 * if its priority is greater than the processor priority. 2629 * 2630 * Special case: if the processor priority is zero then any pending 2631 * interrupt will be recognized. 2632 */ 2633 lapic = vlapic->apic_page; 2634 ppr = lapic->ppr & 0xf0; 2635 if (ppr == 0) 2636 return (1); 2637 2638 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 2639 lapic->ppr); 2640 2641 for (i = 3; i >= 0; i--) { 2642 pirval = pir_desc->pir[i]; 2643 if (pirval != 0) { 2644 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0; 2645 return (vpr > ppr); 2646 } 2647 } 2648 return (0); 2649} 2650 2651static void 2652vmx_intr_accepted(struct vlapic *vlapic, int vector) 2653{ 2654 2655 panic("vmx_intr_accepted: not expected to be called"); 2656} 2657 2658static void 2659vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 2660{ 2661 struct vlapic_vtx *vlapic_vtx; 2662 struct vmx *vmx; 2663 struct vmcs *vmcs; 2664 uint64_t mask, val; 2665 2666 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 2667 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 2668 ("vmx_set_tmr: vcpu cannot be running")); 2669 2670 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2671 vmx = vlapic_vtx->vmx; 2672 vmcs = &vmx->vmcs[vlapic->vcpuid]; 2673 mask = 1UL << (vector % 64); 2674 2675 VMPTRLD(vmcs); 2676 val = vmcs_read(VMCS_EOI_EXIT(vector)); 2677 if (level) 2678 val |= mask; 2679 else 2680 val &= ~mask; 2681 vmcs_write(VMCS_EOI_EXIT(vector), val); 2682 VMCLEAR(vmcs); 2683} 2684 2685static void 2686vmx_enable_x2apic_mode(struct vlapic *vlapic) 2687{ 2688 struct vmx *vmx; 2689 struct vmcs *vmcs; 2690 uint32_t proc_ctls2; 2691 int vcpuid, error; 2692 2693 vcpuid = vlapic->vcpuid; 2694 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 2695 vmcs = &vmx->vmcs[vcpuid]; 2696 2697 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2698 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 2699 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 2700 2701 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 2702 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 2703 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 2704 2705 VMPTRLD(vmcs); 2706 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 2707 VMCLEAR(vmcs); 2708 2709 if (vlapic->vcpuid == 0) { 2710 /* 2711 * The nested page table mappings are shared by all vcpus 2712 * so unmap the APIC access page just once. 2713 */ 2714 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2715 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 2716 __func__, error)); 2717 2718 /* 2719 * The MSR bitmap is shared by all vcpus so modify it only 2720 * once in the context of vcpu 0. 2721 */ 2722 error = vmx_allow_x2apic_msrs(vmx); 2723 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 2724 __func__, error)); 2725 } 2726} 2727 2728static void 2729vmx_post_intr(struct vlapic *vlapic, int hostcpu) 2730{ 2731 2732 ipi_cpu(hostcpu, pirvec); 2733} 2734 2735/* 2736 * Transfer the pending interrupts in the PIR descriptor to the IRR 2737 * in the virtual APIC page. 2738 */ 2739static void 2740vmx_inject_pir(struct vlapic *vlapic) 2741{ 2742 struct vlapic_vtx *vlapic_vtx; 2743 struct pir_desc *pir_desc; 2744 struct LAPIC *lapic; 2745 uint64_t val, pirval; 2746 int rvi, pirbase = -1; 2747 uint16_t intr_status_old, intr_status_new; 2748 2749 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2750 pir_desc = vlapic_vtx->pir_desc; 2751 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 2752 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2753 "no posted interrupt pending"); 2754 return; 2755 } 2756 2757 pirval = 0; 2758 lapic = vlapic->apic_page; 2759 2760 val = atomic_readandclear_long(&pir_desc->pir[0]); 2761 if (val != 0) { 2762 lapic->irr0 |= val; 2763 lapic->irr1 |= val >> 32; 2764 pirbase = 0; 2765 pirval = val; 2766 } 2767 2768 val = atomic_readandclear_long(&pir_desc->pir[1]); 2769 if (val != 0) { 2770 lapic->irr2 |= val; 2771 lapic->irr3 |= val >> 32; 2772 pirbase = 64; 2773 pirval = val; 2774 } 2775 2776 val = atomic_readandclear_long(&pir_desc->pir[2]); 2777 if (val != 0) { 2778 lapic->irr4 |= val; 2779 lapic->irr5 |= val >> 32; 2780 pirbase = 128; 2781 pirval = val; 2782 } 2783 2784 val = atomic_readandclear_long(&pir_desc->pir[3]); 2785 if (val != 0) { 2786 lapic->irr6 |= val; 2787 lapic->irr7 |= val >> 32; 2788 pirbase = 192; 2789 pirval = val; 2790 } 2791 if (pirbase == -1) { 2792 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2793 "no posted interrupt found"); 2794 return; 2795 } 2796 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 2797 2798 /* 2799 * Update RVI so the processor can evaluate pending virtual 2800 * interrupts on VM-entry. 2801 */ 2802 if (pirval != 0) { 2803 rvi = pirbase + flsl(pirval) - 1; 2804 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 2805 intr_status_new = (intr_status_old & 0xFF00) | rvi; 2806 if (intr_status_new > intr_status_old) { 2807 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 2808 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2809 "guest_intr_status changed from 0x%04x to 0x%04x", 2810 intr_status_old, intr_status_new); 2811 } 2812 } 2813} 2814 2815static struct vlapic * 2816vmx_vlapic_init(void *arg, int vcpuid) 2817{ 2818 struct vmx *vmx; 2819 struct vlapic *vlapic; 2820 struct vlapic_vtx *vlapic_vtx; 2821 2822 vmx = arg; 2823 2824 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 2825 vlapic->vm = vmx->vm; 2826 vlapic->vcpuid = vcpuid; 2827 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 2828 2829 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2830 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 2831 vlapic_vtx->vmx = vmx; 2832 2833 if (virtual_interrupt_delivery) { 2834 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 2835 vlapic->ops.pending_intr = vmx_pending_intr; 2836 vlapic->ops.intr_accepted = vmx_intr_accepted; 2837 vlapic->ops.set_tmr = vmx_set_tmr; 2838 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode; 2839 } 2840 2841 if (posted_interrupts) 2842 vlapic->ops.post_intr = vmx_post_intr; 2843 2844 vlapic_init(vlapic); 2845 2846 return (vlapic); 2847} 2848 2849static void 2850vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2851{ 2852 2853 vlapic_cleanup(vlapic); 2854 free(vlapic, M_VLAPIC); 2855} 2856 2857struct vmm_ops vmm_ops_intel = { 2858 vmx_init, 2859 vmx_cleanup, 2860 vmx_restore, 2861 vmx_vminit, 2862 vmx_run, 2863 vmx_vmcleanup, 2864 vmx_getreg, 2865 vmx_setreg, 2866 vmx_getdesc, 2867 vmx_setdesc, 2868 vmx_getcap, 2869 vmx_setcap, 2870 ept_vmspace_alloc, 2871 ept_vmspace_free, 2872 vmx_vlapic_init, 2873 vmx_vlapic_cleanup, 2874}; 2875