vmx.c revision 249879
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 249879 2013-04-25 04:56:43Z grehan $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 249879 2013-04-25 04:56:43Z grehan $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/smp.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/pcpu.h> 38#include <sys/proc.h> 39#include <sys/sysctl.h> 40 41#include <vm/vm.h> 42#include <vm/pmap.h> 43 44#include <machine/psl.h> 45#include <machine/cpufunc.h> 46#include <machine/md_var.h> 47#include <machine/pmap.h> 48#include <machine/segments.h> 49#include <machine/specialreg.h> 50#include <machine/vmparam.h> 51 52#include <x86/apicreg.h> 53 54#include <machine/vmm.h> 55#include "vmm_host.h" 56#include "vmm_lapic.h" 57#include "vmm_msr.h" 58#include "vmm_ktr.h" 59#include "vmm_stat.h" 60 61#include "vmx_msr.h" 62#include "ept.h" 63#include "vmx_cpufunc.h" 64#include "vmx.h" 65#include "x86.h" 66#include "vmx_controls.h" 67 68#define PINBASED_CTLS_ONE_SETTING \ 69 (PINBASED_EXTINT_EXITING | \ 70 PINBASED_NMI_EXITING | \ 71 PINBASED_VIRTUAL_NMI) 72#define PINBASED_CTLS_ZERO_SETTING 0 73 74#define PROCBASED_CTLS_WINDOW_SETTING \ 75 (PROCBASED_INT_WINDOW_EXITING | \ 76 PROCBASED_NMI_WINDOW_EXITING) 77 78#define PROCBASED_CTLS_ONE_SETTING \ 79 (PROCBASED_SECONDARY_CONTROLS | \ 80 PROCBASED_IO_EXITING | \ 81 PROCBASED_MSR_BITMAPS | \ 82 PROCBASED_CTLS_WINDOW_SETTING) 83#define PROCBASED_CTLS_ZERO_SETTING \ 84 (PROCBASED_CR3_LOAD_EXITING | \ 85 PROCBASED_CR3_STORE_EXITING | \ 86 PROCBASED_IO_BITMAPS) 87 88#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 89#define PROCBASED_CTLS2_ZERO_SETTING 0 90 91#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \ 92 (VM_EXIT_HOST_LMA | \ 93 VM_EXIT_SAVE_EFER | \ 94 VM_EXIT_LOAD_EFER) 95 96#define VM_EXIT_CTLS_ONE_SETTING \ 97 (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \ 98 VM_EXIT_SAVE_PAT | \ 99 VM_EXIT_LOAD_PAT) 100#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 101 102#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER 103 104#define VM_ENTRY_CTLS_ONE_SETTING \ 105 (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \ 106 VM_ENTRY_LOAD_PAT) 107#define VM_ENTRY_CTLS_ZERO_SETTING \ 108 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 109 VM_ENTRY_INTO_SMM | \ 110 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 111 112#define guest_msr_rw(vmx, msr) \ 113 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW) 114 115#define HANDLED 1 116#define UNHANDLED 0 117 118MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 119 120SYSCTL_DECL(_hw_vmm); 121SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 122 123int vmxon_enabled[MAXCPU]; 124static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 125 126static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 127static uint32_t exit_ctls, entry_ctls; 128 129static uint64_t cr0_ones_mask, cr0_zeros_mask; 130SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 131 &cr0_ones_mask, 0, NULL); 132SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 133 &cr0_zeros_mask, 0, NULL); 134 135static uint64_t cr4_ones_mask, cr4_zeros_mask; 136SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 137 &cr4_ones_mask, 0, NULL); 138SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 139 &cr4_zeros_mask, 0, NULL); 140 141static volatile u_int nextvpid; 142 143static int vmx_no_patmsr; 144 145static int vmx_initialized; 146SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 147 &vmx_initialized, 0, "Intel VMX initialized"); 148 149/* 150 * Virtual NMI blocking conditions. 151 * 152 * Some processor implementations also require NMI to be blocked if 153 * the STI_BLOCKING bit is set. It is possible to detect this at runtime 154 * based on the (exit_reason,exit_qual) tuple being set to 155 * (EXIT_REASON_INVAL_VMCS, EXIT_QUAL_NMI_WHILE_STI_BLOCKING). 156 * 157 * We take the easy way out and also include STI_BLOCKING as one of the 158 * gating items for vNMI injection. 159 */ 160static uint64_t nmi_blocking_bits = VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING | 161 VMCS_INTERRUPTIBILITY_NMI_BLOCKING | 162 VMCS_INTERRUPTIBILITY_STI_BLOCKING; 163 164/* 165 * Optional capabilities 166 */ 167static int cap_halt_exit; 168static int cap_pause_exit; 169static int cap_unrestricted_guest; 170static int cap_monitor_trap; 171 172/* statistics */ 173static VMM_STAT_INTEL(VMEXIT_HLT_IGNORED, "number of times hlt was ignored"); 174 175#ifdef KTR 176static const char * 177exit_reason_to_str(int reason) 178{ 179 static char reasonbuf[32]; 180 181 switch (reason) { 182 case EXIT_REASON_EXCEPTION: 183 return "exception"; 184 case EXIT_REASON_EXT_INTR: 185 return "extint"; 186 case EXIT_REASON_TRIPLE_FAULT: 187 return "triplefault"; 188 case EXIT_REASON_INIT: 189 return "init"; 190 case EXIT_REASON_SIPI: 191 return "sipi"; 192 case EXIT_REASON_IO_SMI: 193 return "iosmi"; 194 case EXIT_REASON_SMI: 195 return "smi"; 196 case EXIT_REASON_INTR_WINDOW: 197 return "intrwindow"; 198 case EXIT_REASON_NMI_WINDOW: 199 return "nmiwindow"; 200 case EXIT_REASON_TASK_SWITCH: 201 return "taskswitch"; 202 case EXIT_REASON_CPUID: 203 return "cpuid"; 204 case EXIT_REASON_GETSEC: 205 return "getsec"; 206 case EXIT_REASON_HLT: 207 return "hlt"; 208 case EXIT_REASON_INVD: 209 return "invd"; 210 case EXIT_REASON_INVLPG: 211 return "invlpg"; 212 case EXIT_REASON_RDPMC: 213 return "rdpmc"; 214 case EXIT_REASON_RDTSC: 215 return "rdtsc"; 216 case EXIT_REASON_RSM: 217 return "rsm"; 218 case EXIT_REASON_VMCALL: 219 return "vmcall"; 220 case EXIT_REASON_VMCLEAR: 221 return "vmclear"; 222 case EXIT_REASON_VMLAUNCH: 223 return "vmlaunch"; 224 case EXIT_REASON_VMPTRLD: 225 return "vmptrld"; 226 case EXIT_REASON_VMPTRST: 227 return "vmptrst"; 228 case EXIT_REASON_VMREAD: 229 return "vmread"; 230 case EXIT_REASON_VMRESUME: 231 return "vmresume"; 232 case EXIT_REASON_VMWRITE: 233 return "vmwrite"; 234 case EXIT_REASON_VMXOFF: 235 return "vmxoff"; 236 case EXIT_REASON_VMXON: 237 return "vmxon"; 238 case EXIT_REASON_CR_ACCESS: 239 return "craccess"; 240 case EXIT_REASON_DR_ACCESS: 241 return "draccess"; 242 case EXIT_REASON_INOUT: 243 return "inout"; 244 case EXIT_REASON_RDMSR: 245 return "rdmsr"; 246 case EXIT_REASON_WRMSR: 247 return "wrmsr"; 248 case EXIT_REASON_INVAL_VMCS: 249 return "invalvmcs"; 250 case EXIT_REASON_INVAL_MSR: 251 return "invalmsr"; 252 case EXIT_REASON_MWAIT: 253 return "mwait"; 254 case EXIT_REASON_MTF: 255 return "mtf"; 256 case EXIT_REASON_MONITOR: 257 return "monitor"; 258 case EXIT_REASON_PAUSE: 259 return "pause"; 260 case EXIT_REASON_MCE: 261 return "mce"; 262 case EXIT_REASON_TPR: 263 return "tpr"; 264 case EXIT_REASON_APIC: 265 return "apic"; 266 case EXIT_REASON_GDTR_IDTR: 267 return "gdtridtr"; 268 case EXIT_REASON_LDTR_TR: 269 return "ldtrtr"; 270 case EXIT_REASON_EPT_FAULT: 271 return "eptfault"; 272 case EXIT_REASON_EPT_MISCONFIG: 273 return "eptmisconfig"; 274 case EXIT_REASON_INVEPT: 275 return "invept"; 276 case EXIT_REASON_RDTSCP: 277 return "rdtscp"; 278 case EXIT_REASON_VMX_PREEMPT: 279 return "vmxpreempt"; 280 case EXIT_REASON_INVVPID: 281 return "invvpid"; 282 case EXIT_REASON_WBINVD: 283 return "wbinvd"; 284 case EXIT_REASON_XSETBV: 285 return "xsetbv"; 286 default: 287 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 288 return (reasonbuf); 289 } 290} 291 292#ifdef SETJMP_TRACE 293static const char * 294vmx_setjmp_rc2str(int rc) 295{ 296 switch (rc) { 297 case VMX_RETURN_DIRECT: 298 return "direct"; 299 case VMX_RETURN_LONGJMP: 300 return "longjmp"; 301 case VMX_RETURN_VMRESUME: 302 return "vmresume"; 303 case VMX_RETURN_VMLAUNCH: 304 return "vmlaunch"; 305 case VMX_RETURN_AST: 306 return "ast"; 307 default: 308 return "unknown"; 309 } 310} 311 312#define SETJMP_TRACE(vmx, vcpu, vmxctx, regname) \ 313 VMM_CTR1((vmx)->vm, (vcpu), "setjmp trace " #regname " 0x%016lx", \ 314 (vmxctx)->regname) 315 316static void 317vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc) 318{ 319 uint64_t host_rip, host_rsp; 320 321 if (vmxctx != &vmx->ctx[vcpu]) 322 panic("vmx_setjmp_trace: invalid vmxctx %p; should be %p", 323 vmxctx, &vmx->ctx[vcpu]); 324 325 VMM_CTR1((vmx)->vm, (vcpu), "vmxctx = %p", vmxctx); 326 VMM_CTR2((vmx)->vm, (vcpu), "setjmp return code %s(%d)", 327 vmx_setjmp_rc2str(rc), rc); 328 329 host_rsp = host_rip = ~0; 330 vmread(VMCS_HOST_RIP, &host_rip); 331 vmread(VMCS_HOST_RSP, &host_rsp); 332 VMM_CTR2((vmx)->vm, (vcpu), "vmcs host_rip 0x%016lx, host_rsp 0x%016lx", 333 host_rip, host_rsp); 334 335 SETJMP_TRACE(vmx, vcpu, vmxctx, host_r15); 336 SETJMP_TRACE(vmx, vcpu, vmxctx, host_r14); 337 SETJMP_TRACE(vmx, vcpu, vmxctx, host_r13); 338 SETJMP_TRACE(vmx, vcpu, vmxctx, host_r12); 339 SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbp); 340 SETJMP_TRACE(vmx, vcpu, vmxctx, host_rsp); 341 SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbx); 342 SETJMP_TRACE(vmx, vcpu, vmxctx, host_rip); 343 344 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdi); 345 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rsi); 346 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdx); 347 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rcx); 348 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r8); 349 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r9); 350 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rax); 351 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbx); 352 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbp); 353 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r10); 354 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r11); 355 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r12); 356 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r13); 357 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r14); 358 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r15); 359 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_cr2); 360} 361#endif 362#else 363static void __inline 364vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc) 365{ 366 return; 367} 368#endif /* KTR */ 369 370u_long 371vmx_fix_cr0(u_long cr0) 372{ 373 374 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 375} 376 377u_long 378vmx_fix_cr4(u_long cr4) 379{ 380 381 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 382} 383 384static void 385msr_save_area_init(struct msr_entry *g_area, int *g_count) 386{ 387 int cnt; 388 389 static struct msr_entry guest_msrs[] = { 390 { MSR_KGSBASE, 0, 0 }, 391 }; 392 393 cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]); 394 if (cnt > GUEST_MSR_MAX_ENTRIES) 395 panic("guest msr save area overrun"); 396 bcopy(guest_msrs, g_area, sizeof(guest_msrs)); 397 *g_count = cnt; 398} 399 400static void 401vmx_disable(void *arg __unused) 402{ 403 struct invvpid_desc invvpid_desc = { 0 }; 404 struct invept_desc invept_desc = { 0 }; 405 406 if (vmxon_enabled[curcpu]) { 407 /* 408 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 409 * 410 * VMXON or VMXOFF are not required to invalidate any TLB 411 * caching structures. This prevents potential retention of 412 * cached information in the TLB between distinct VMX episodes. 413 */ 414 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 415 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 416 vmxoff(); 417 } 418 load_cr4(rcr4() & ~CR4_VMXE); 419} 420 421static int 422vmx_cleanup(void) 423{ 424 425 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 426 427 return (0); 428} 429 430static void 431vmx_enable(void *arg __unused) 432{ 433 int error; 434 435 load_cr4(rcr4() | CR4_VMXE); 436 437 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 438 error = vmxon(vmxon_region[curcpu]); 439 if (error == 0) 440 vmxon_enabled[curcpu] = 1; 441} 442 443static int 444vmx_init(void) 445{ 446 int error; 447 uint64_t fixed0, fixed1, feature_control; 448 uint32_t tmp; 449 450 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 451 if (!(cpu_feature2 & CPUID2_VMX)) { 452 printf("vmx_init: processor does not support VMX operation\n"); 453 return (ENXIO); 454 } 455 456 /* 457 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 458 * are set (bits 0 and 2 respectively). 459 */ 460 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 461 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 462 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 463 printf("vmx_init: VMX operation disabled by BIOS\n"); 464 return (ENXIO); 465 } 466 467 /* Check support for primary processor-based VM-execution controls */ 468 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 469 MSR_VMX_TRUE_PROCBASED_CTLS, 470 PROCBASED_CTLS_ONE_SETTING, 471 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 472 if (error) { 473 printf("vmx_init: processor does not support desired primary " 474 "processor-based controls\n"); 475 return (error); 476 } 477 478 /* Clear the processor-based ctl bits that are set on demand */ 479 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 480 481 /* Check support for secondary processor-based VM-execution controls */ 482 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 483 MSR_VMX_PROCBASED_CTLS2, 484 PROCBASED_CTLS2_ONE_SETTING, 485 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 486 if (error) { 487 printf("vmx_init: processor does not support desired secondary " 488 "processor-based controls\n"); 489 return (error); 490 } 491 492 /* Check support for VPID */ 493 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 494 PROCBASED2_ENABLE_VPID, 0, &tmp); 495 if (error == 0) 496 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 497 498 /* Check support for pin-based VM-execution controls */ 499 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 500 MSR_VMX_TRUE_PINBASED_CTLS, 501 PINBASED_CTLS_ONE_SETTING, 502 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 503 if (error) { 504 printf("vmx_init: processor does not support desired " 505 "pin-based controls\n"); 506 return (error); 507 } 508 509 /* Check support for VM-exit controls */ 510 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 511 VM_EXIT_CTLS_ONE_SETTING, 512 VM_EXIT_CTLS_ZERO_SETTING, 513 &exit_ctls); 514 if (error) { 515 /* Try again without the PAT MSR bits */ 516 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, 517 MSR_VMX_TRUE_EXIT_CTLS, 518 VM_EXIT_CTLS_ONE_SETTING_NO_PAT, 519 VM_EXIT_CTLS_ZERO_SETTING, 520 &exit_ctls); 521 if (error) { 522 printf("vmx_init: processor does not support desired " 523 "exit controls\n"); 524 return (error); 525 } else { 526 if (bootverbose) 527 printf("vmm: PAT MSR access not supported\n"); 528 guest_msr_valid(MSR_PAT); 529 vmx_no_patmsr = 1; 530 } 531 } 532 533 /* Check support for VM-entry controls */ 534 if (!vmx_no_patmsr) { 535 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 536 MSR_VMX_TRUE_ENTRY_CTLS, 537 VM_ENTRY_CTLS_ONE_SETTING, 538 VM_ENTRY_CTLS_ZERO_SETTING, 539 &entry_ctls); 540 } else { 541 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 542 MSR_VMX_TRUE_ENTRY_CTLS, 543 VM_ENTRY_CTLS_ONE_SETTING_NO_PAT, 544 VM_ENTRY_CTLS_ZERO_SETTING, 545 &entry_ctls); 546 } 547 548 if (error) { 549 printf("vmx_init: processor does not support desired " 550 "entry controls\n"); 551 return (error); 552 } 553 554 /* 555 * Check support for optional features by testing them 556 * as individual bits 557 */ 558 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 559 MSR_VMX_TRUE_PROCBASED_CTLS, 560 PROCBASED_HLT_EXITING, 0, 561 &tmp) == 0); 562 563 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 564 MSR_VMX_PROCBASED_CTLS, 565 PROCBASED_MTF, 0, 566 &tmp) == 0); 567 568 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 569 MSR_VMX_TRUE_PROCBASED_CTLS, 570 PROCBASED_PAUSE_EXITING, 0, 571 &tmp) == 0); 572 573 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 574 MSR_VMX_PROCBASED_CTLS2, 575 PROCBASED2_UNRESTRICTED_GUEST, 0, 576 &tmp) == 0); 577 578 /* Initialize EPT */ 579 error = ept_init(); 580 if (error) { 581 printf("vmx_init: ept initialization failed (%d)\n", error); 582 return (error); 583 } 584 585 /* 586 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 587 */ 588 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 589 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 590 cr0_ones_mask = fixed0 & fixed1; 591 cr0_zeros_mask = ~fixed0 & ~fixed1; 592 593 /* 594 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 595 * if unrestricted guest execution is allowed. 596 */ 597 if (cap_unrestricted_guest) 598 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 599 600 /* 601 * Do not allow the guest to set CR0_NW or CR0_CD. 602 */ 603 cr0_zeros_mask |= (CR0_NW | CR0_CD); 604 605 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 606 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 607 cr4_ones_mask = fixed0 & fixed1; 608 cr4_zeros_mask = ~fixed0 & ~fixed1; 609 610 /* enable VMX operation */ 611 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 612 613 vmx_initialized = 1; 614 615 return (0); 616} 617 618/* 619 * If this processor does not support VPIDs then simply return 0. 620 * 621 * Otherwise generate the next value of VPID to use. Any value is alright 622 * as long as it is non-zero. 623 * 624 * We always execute in VMX non-root context with EPT enabled. Thus all 625 * combined mappings are tagged with the (EP4TA, VPID, PCID) tuple. This 626 * in turn means that multiple VMs can share the same VPID as long as 627 * they have distinct EPT page tables. 628 * 629 * XXX 630 * We should optimize this so that it returns VPIDs that are not in 631 * use. Then we will not unnecessarily invalidate mappings in 632 * vmx_set_pcpu_defaults() just because two or more vcpus happen to 633 * use the same 'vpid'. 634 */ 635static uint16_t 636vmx_vpid(void) 637{ 638 uint16_t vpid = 0; 639 640 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) != 0) { 641 do { 642 vpid = atomic_fetchadd_int(&nextvpid, 1); 643 } while (vpid == 0); 644 } 645 646 return (vpid); 647} 648 649static int 650vmx_setup_cr_shadow(int which, struct vmcs *vmcs) 651{ 652 int error, mask_ident, shadow_ident; 653 uint64_t mask_value, shadow_value; 654 655 if (which != 0 && which != 4) 656 panic("vmx_setup_cr_shadow: unknown cr%d", which); 657 658 if (which == 0) { 659 mask_ident = VMCS_CR0_MASK; 660 mask_value = cr0_ones_mask | cr0_zeros_mask; 661 shadow_ident = VMCS_CR0_SHADOW; 662 shadow_value = cr0_ones_mask; 663 } else { 664 mask_ident = VMCS_CR4_MASK; 665 mask_value = cr4_ones_mask | cr4_zeros_mask; 666 shadow_ident = VMCS_CR4_SHADOW; 667 shadow_value = cr4_ones_mask; 668 } 669 670 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 671 if (error) 672 return (error); 673 674 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), shadow_value); 675 if (error) 676 return (error); 677 678 return (0); 679} 680#define vmx_setup_cr0_shadow(vmcs) vmx_setup_cr_shadow(0, (vmcs)) 681#define vmx_setup_cr4_shadow(vmcs) vmx_setup_cr_shadow(4, (vmcs)) 682 683static void * 684vmx_vminit(struct vm *vm) 685{ 686 uint16_t vpid; 687 int i, error, guest_msr_count; 688 struct vmx *vmx; 689 690 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 691 if ((uintptr_t)vmx & PAGE_MASK) { 692 panic("malloc of struct vmx not aligned on %d byte boundary", 693 PAGE_SIZE); 694 } 695 vmx->vm = vm; 696 697 /* 698 * Clean up EPTP-tagged guest physical and combined mappings 699 * 700 * VMX transitions are not required to invalidate any guest physical 701 * mappings. So, it may be possible for stale guest physical mappings 702 * to be present in the processor TLBs. 703 * 704 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 705 */ 706 ept_invalidate_mappings(vtophys(vmx->pml4ept)); 707 708 msr_bitmap_initialize(vmx->msr_bitmap); 709 710 /* 711 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 712 * The guest FSBASE and GSBASE are saved and restored during 713 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 714 * always restored from the vmcs host state area on vm-exit. 715 * 716 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 717 * how they are saved/restored so can be directly accessed by the 718 * guest. 719 * 720 * Guest KGSBASE is saved and restored in the guest MSR save area. 721 * Host KGSBASE is restored before returning to userland from the pcb. 722 * There will be a window of time when we are executing in the host 723 * kernel context with a value of KGSBASE from the guest. This is ok 724 * because the value of KGSBASE is inconsequential in kernel context. 725 * 726 * MSR_EFER is saved and restored in the guest VMCS area on a 727 * VM exit and entry respectively. It is also restored from the 728 * host VMCS area on a VM exit. 729 */ 730 if (guest_msr_rw(vmx, MSR_GSBASE) || 731 guest_msr_rw(vmx, MSR_FSBASE) || 732 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 733 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 734 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 735 guest_msr_rw(vmx, MSR_KGSBASE) || 736 guest_msr_rw(vmx, MSR_EFER)) 737 panic("vmx_vminit: error setting guest msr access"); 738 739 /* 740 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit 741 * and entry respectively. It is also restored from the host VMCS 742 * area on a VM exit. However, if running on a system with no 743 * MSR_PAT save/restore support, leave access disabled so accesses 744 * will be trapped. 745 */ 746 if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT)) 747 panic("vmx_vminit: error setting guest pat msr access"); 748 749 for (i = 0; i < VM_MAXCPU; i++) { 750 vmx->vmcs[i].identifier = vmx_revision(); 751 error = vmclear(&vmx->vmcs[i]); 752 if (error != 0) { 753 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 754 error, i); 755 } 756 757 vpid = vmx_vpid(); 758 759 error = vmcs_set_defaults(&vmx->vmcs[i], 760 (u_long)vmx_longjmp, 761 (u_long)&vmx->ctx[i], 762 vtophys(vmx->pml4ept), 763 pinbased_ctls, 764 procbased_ctls, 765 procbased_ctls2, 766 exit_ctls, entry_ctls, 767 vtophys(vmx->msr_bitmap), 768 vpid); 769 770 if (error != 0) 771 panic("vmx_vminit: vmcs_set_defaults error %d", error); 772 773 vmx->cap[i].set = 0; 774 vmx->cap[i].proc_ctls = procbased_ctls; 775 776 vmx->state[i].lastcpu = -1; 777 vmx->state[i].vpid = vpid; 778 779 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count); 780 781 error = vmcs_set_msr_save(&vmx->vmcs[i], 782 vtophys(vmx->guest_msrs[i]), 783 guest_msr_count); 784 if (error != 0) 785 panic("vmcs_set_msr_save error %d", error); 786 787 error = vmx_setup_cr0_shadow(&vmx->vmcs[i]); 788 if (error != 0) 789 panic("vmx_setup_cr0_shadow %d", error); 790 791 error = vmx_setup_cr4_shadow(&vmx->vmcs[i]); 792 if (error != 0) 793 panic("vmx_setup_cr4_shadow %d", error); 794 } 795 796 return (vmx); 797} 798 799static int 800vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 801{ 802 int handled, func; 803 804 func = vmxctx->guest_rax; 805 806 handled = x86_emulate_cpuid(vm, vcpu, 807 (uint32_t*)(&vmxctx->guest_rax), 808 (uint32_t*)(&vmxctx->guest_rbx), 809 (uint32_t*)(&vmxctx->guest_rcx), 810 (uint32_t*)(&vmxctx->guest_rdx)); 811 return (handled); 812} 813 814static __inline void 815vmx_run_trace(struct vmx *vmx, int vcpu) 816{ 817#ifdef KTR 818 VMM_CTR1(vmx->vm, vcpu, "Resume execution at 0x%0lx", vmcs_guest_rip()); 819#endif 820} 821 822static __inline void 823vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 824 int handled) 825{ 826#ifdef KTR 827 VMM_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 828 handled ? "handled" : "unhandled", 829 exit_reason_to_str(exit_reason), rip); 830#endif 831} 832 833static __inline void 834vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 835{ 836#ifdef KTR 837 VMM_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 838#endif 839} 840 841static int 842vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 843{ 844 int error, lastcpu; 845 struct vmxstate *vmxstate; 846 struct invvpid_desc invvpid_desc = { 0 }; 847 848 vmxstate = &vmx->state[vcpu]; 849 lastcpu = vmxstate->lastcpu; 850 vmxstate->lastcpu = curcpu; 851 852 if (lastcpu == curcpu) { 853 error = 0; 854 goto done; 855 } 856 857 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 858 859 error = vmwrite(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 860 if (error != 0) 861 goto done; 862 863 error = vmwrite(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 864 if (error != 0) 865 goto done; 866 867 error = vmwrite(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 868 if (error != 0) 869 goto done; 870 871 /* 872 * If we are using VPIDs then invalidate all mappings tagged with 'vpid' 873 * 874 * We do this because this vcpu was executing on a different host 875 * cpu when it last ran. We do not track whether it invalidated 876 * mappings associated with its 'vpid' during that run. So we must 877 * assume that the mappings associated with 'vpid' on 'curcpu' are 878 * stale and invalidate them. 879 * 880 * Note that we incur this penalty only when the scheduler chooses to 881 * move the thread associated with this vcpu between host cpus. 882 * 883 * Note also that this will invalidate mappings tagged with 'vpid' 884 * for "all" EP4TAs. 885 */ 886 if (vmxstate->vpid != 0) { 887 invvpid_desc.vpid = vmxstate->vpid; 888 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 889 } 890done: 891 return (error); 892} 893 894static void 895vm_exit_update_rip(struct vm_exit *vmexit) 896{ 897 int error; 898 899 error = vmwrite(VMCS_GUEST_RIP, vmexit->rip + vmexit->inst_length); 900 if (error) 901 panic("vmx_run: error %d writing to VMCS_GUEST_RIP", error); 902} 903 904/* 905 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 906 */ 907CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 908 909static void __inline 910vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 911{ 912 int error; 913 914 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 915 916 error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 917 if (error) 918 panic("vmx_set_int_window_exiting: vmwrite error %d", error); 919} 920 921static void __inline 922vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 923{ 924 int error; 925 926 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 927 928 error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 929 if (error) 930 panic("vmx_clear_int_window_exiting: vmwrite error %d", error); 931} 932 933static void __inline 934vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 935{ 936 int error; 937 938 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 939 940 error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 941 if (error) 942 panic("vmx_set_nmi_window_exiting: vmwrite error %d", error); 943} 944 945static void __inline 946vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 947{ 948 int error; 949 950 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 951 952 error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 953 if (error) 954 panic("vmx_clear_nmi_window_exiting: vmwrite error %d", error); 955} 956 957static int 958vmx_inject_nmi(struct vmx *vmx, int vcpu) 959{ 960 int error; 961 uint64_t info, interruptibility; 962 963 /* Bail out if no NMI requested */ 964 if (!vm_nmi_pending(vmx->vm, vcpu)) 965 return (0); 966 967 error = vmread(VMCS_GUEST_INTERRUPTIBILITY, &interruptibility); 968 if (error) { 969 panic("vmx_inject_nmi: vmread(interruptibility) %d", 970 error); 971 } 972 if (interruptibility & nmi_blocking_bits) 973 goto nmiblocked; 974 975 /* 976 * Inject the virtual NMI. The vector must be the NMI IDT entry 977 * or the VMCS entry check will fail. 978 */ 979 info = VMCS_INTERRUPTION_INFO_NMI | VMCS_INTERRUPTION_INFO_VALID; 980 info |= IDT_NMI; 981 982 error = vmwrite(VMCS_ENTRY_INTR_INFO, info); 983 if (error) 984 panic("vmx_inject_nmi: vmwrite(intrinfo) %d", error); 985 986 VMM_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 987 988 /* Clear the request */ 989 vm_nmi_clear(vmx->vm, vcpu); 990 return (1); 991 992nmiblocked: 993 /* 994 * Set the NMI Window Exiting execution control so we can inject 995 * the virtual NMI as soon as blocking condition goes away. 996 */ 997 vmx_set_nmi_window_exiting(vmx, vcpu); 998 999 VMM_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1000 return (1); 1001} 1002 1003static void 1004vmx_inject_interrupts(struct vmx *vmx, int vcpu) 1005{ 1006 int error, vector; 1007 uint64_t info, rflags, interruptibility; 1008 1009 const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING | 1010 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING; 1011 1012 /* 1013 * If there is already an interrupt pending then just return. 1014 * 1015 * This could happen if an interrupt was injected on a prior 1016 * VM entry but the actual entry into guest mode was aborted 1017 * because of a pending AST. 1018 */ 1019 error = vmread(VMCS_ENTRY_INTR_INFO, &info); 1020 if (error) 1021 panic("vmx_inject_interrupts: vmread(intrinfo) %d", error); 1022 if (info & VMCS_INTERRUPTION_INFO_VALID) 1023 return; 1024 1025 /* 1026 * NMI injection has priority so deal with those first 1027 */ 1028 if (vmx_inject_nmi(vmx, vcpu)) 1029 return; 1030 1031 /* Ask the local apic for a vector to inject */ 1032 vector = lapic_pending_intr(vmx->vm, vcpu); 1033 if (vector < 0) 1034 return; 1035 1036 if (vector < 32 || vector > 255) 1037 panic("vmx_inject_interrupts: invalid vector %d\n", vector); 1038 1039 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1040 error = vmread(VMCS_GUEST_RFLAGS, &rflags); 1041 if (error) 1042 panic("vmx_inject_interrupts: vmread(rflags) %d", error); 1043 1044 if ((rflags & PSL_I) == 0) 1045 goto cantinject; 1046 1047 error = vmread(VMCS_GUEST_INTERRUPTIBILITY, &interruptibility); 1048 if (error) { 1049 panic("vmx_inject_interrupts: vmread(interruptibility) %d", 1050 error); 1051 } 1052 if (interruptibility & HWINTR_BLOCKED) 1053 goto cantinject; 1054 1055 /* Inject the interrupt */ 1056 info = VMCS_INTERRUPTION_INFO_HW_INTR | VMCS_INTERRUPTION_INFO_VALID; 1057 info |= vector; 1058 error = vmwrite(VMCS_ENTRY_INTR_INFO, info); 1059 if (error) 1060 panic("vmx_inject_interrupts: vmwrite(intrinfo) %d", error); 1061 1062 /* Update the Local APIC ISR */ 1063 lapic_intr_accepted(vmx->vm, vcpu, vector); 1064 1065 VMM_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1066 1067 return; 1068 1069cantinject: 1070 /* 1071 * Set the Interrupt Window Exiting execution control so we can inject 1072 * the interrupt as soon as blocking condition goes away. 1073 */ 1074 vmx_set_int_window_exiting(vmx, vcpu); 1075 1076 VMM_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1077} 1078 1079static int 1080vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1081{ 1082 int error, cr, vmcs_guest_cr; 1083 uint64_t regval, ones_mask, zeros_mask; 1084 const struct vmxctx *vmxctx; 1085 1086 /* We only handle mov to %cr0 or %cr4 at this time */ 1087 if ((exitqual & 0xf0) != 0x00) 1088 return (UNHANDLED); 1089 1090 cr = exitqual & 0xf; 1091 if (cr != 0 && cr != 4) 1092 return (UNHANDLED); 1093 1094 vmxctx = &vmx->ctx[vcpu]; 1095 1096 /* 1097 * We must use vmwrite() directly here because vmcs_setreg() will 1098 * call vmclear(vmcs) as a side-effect which we certainly don't want. 1099 */ 1100 switch ((exitqual >> 8) & 0xf) { 1101 case 0: 1102 regval = vmxctx->guest_rax; 1103 break; 1104 case 1: 1105 regval = vmxctx->guest_rcx; 1106 break; 1107 case 2: 1108 regval = vmxctx->guest_rdx; 1109 break; 1110 case 3: 1111 regval = vmxctx->guest_rbx; 1112 break; 1113 case 4: 1114 error = vmread(VMCS_GUEST_RSP, ®val); 1115 if (error) { 1116 panic("vmx_emulate_cr_access: " 1117 "error %d reading guest rsp", error); 1118 } 1119 break; 1120 case 5: 1121 regval = vmxctx->guest_rbp; 1122 break; 1123 case 6: 1124 regval = vmxctx->guest_rsi; 1125 break; 1126 case 7: 1127 regval = vmxctx->guest_rdi; 1128 break; 1129 case 8: 1130 regval = vmxctx->guest_r8; 1131 break; 1132 case 9: 1133 regval = vmxctx->guest_r9; 1134 break; 1135 case 10: 1136 regval = vmxctx->guest_r10; 1137 break; 1138 case 11: 1139 regval = vmxctx->guest_r11; 1140 break; 1141 case 12: 1142 regval = vmxctx->guest_r12; 1143 break; 1144 case 13: 1145 regval = vmxctx->guest_r13; 1146 break; 1147 case 14: 1148 regval = vmxctx->guest_r14; 1149 break; 1150 case 15: 1151 regval = vmxctx->guest_r15; 1152 break; 1153 } 1154 1155 if (cr == 0) { 1156 ones_mask = cr0_ones_mask; 1157 zeros_mask = cr0_zeros_mask; 1158 vmcs_guest_cr = VMCS_GUEST_CR0; 1159 } else { 1160 ones_mask = cr4_ones_mask; 1161 zeros_mask = cr4_zeros_mask; 1162 vmcs_guest_cr = VMCS_GUEST_CR4; 1163 } 1164 regval |= ones_mask; 1165 regval &= ~zeros_mask; 1166 error = vmwrite(vmcs_guest_cr, regval); 1167 if (error) { 1168 panic("vmx_emulate_cr_access: error %d writing cr%d", 1169 error, cr); 1170 } 1171 1172 return (HANDLED); 1173} 1174 1175static int 1176vmx_ept_fault(struct vm *vm, int cpu, 1177 uint64_t gla, uint64_t gpa, uint64_t rip, int inst_length, 1178 uint64_t cr3, uint64_t ept_qual, struct vie *vie) 1179{ 1180 int read, write, error; 1181 1182 /* EPT violation on an instruction fetch doesn't make sense here */ 1183 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1184 return (UNHANDLED); 1185 1186 /* EPT violation must be a read fault or a write fault */ 1187 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1188 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1189 if ((read | write) == 0) 1190 return (UNHANDLED); 1191 1192 /* 1193 * The EPT violation must have been caused by accessing a 1194 * guest-physical address that is a translation of a guest-linear 1195 * address. 1196 */ 1197 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1198 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1199 return (UNHANDLED); 1200 } 1201 1202 /* Fetch, decode and emulate the faulting instruction */ 1203 if (vmm_fetch_instruction(vm, cpu, rip, inst_length, cr3, vie) != 0) 1204 return (UNHANDLED); 1205 1206 if (vmm_decode_instruction(vm, cpu, gla, vie) != 0) 1207 return (UNHANDLED); 1208 1209 /* 1210 * Check if this is a local apic access 1211 */ 1212 if (gpa < DEFAULT_APIC_BASE || gpa >= DEFAULT_APIC_BASE + PAGE_SIZE) 1213 return (UNHANDLED); 1214 1215 error = vmm_emulate_instruction(vm, cpu, gpa, vie, 1216 lapic_mmio_read, lapic_mmio_write, 0); 1217 1218 return (error ? UNHANDLED : HANDLED); 1219} 1220 1221static int 1222vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1223{ 1224 int error, handled; 1225 struct vmcs *vmcs; 1226 struct vmxctx *vmxctx; 1227 uint32_t eax, ecx, edx; 1228 uint64_t qual, gla, gpa, cr3, intr_info; 1229 1230 handled = 0; 1231 vmcs = &vmx->vmcs[vcpu]; 1232 vmxctx = &vmx->ctx[vcpu]; 1233 qual = vmexit->u.vmx.exit_qualification; 1234 vmexit->exitcode = VM_EXITCODE_BOGUS; 1235 1236 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 1237 1238 switch (vmexit->u.vmx.exit_reason) { 1239 case EXIT_REASON_CR_ACCESS: 1240 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 1241 handled = vmx_emulate_cr_access(vmx, vcpu, qual); 1242 break; 1243 case EXIT_REASON_RDMSR: 1244 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 1245 ecx = vmxctx->guest_rcx; 1246 error = emulate_rdmsr(vmx->vm, vcpu, ecx); 1247 if (error) { 1248 vmexit->exitcode = VM_EXITCODE_RDMSR; 1249 vmexit->u.msr.code = ecx; 1250 } else 1251 handled = 1; 1252 break; 1253 case EXIT_REASON_WRMSR: 1254 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 1255 eax = vmxctx->guest_rax; 1256 ecx = vmxctx->guest_rcx; 1257 edx = vmxctx->guest_rdx; 1258 error = emulate_wrmsr(vmx->vm, vcpu, ecx, 1259 (uint64_t)edx << 32 | eax); 1260 if (error) { 1261 vmexit->exitcode = VM_EXITCODE_WRMSR; 1262 vmexit->u.msr.code = ecx; 1263 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 1264 } else 1265 handled = 1; 1266 break; 1267 case EXIT_REASON_HLT: 1268 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 1269 /* 1270 * If there is an event waiting to be injected then there is 1271 * no need to 'hlt'. 1272 */ 1273 error = vmread(VMCS_ENTRY_INTR_INFO, &intr_info); 1274 if (error) 1275 panic("vmx_exit_process: vmread(intrinfo) %d", error); 1276 1277 if (intr_info & VMCS_INTERRUPTION_INFO_VALID) { 1278 handled = 1; 1279 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT_IGNORED, 1); 1280 } else 1281 vmexit->exitcode = VM_EXITCODE_HLT; 1282 break; 1283 case EXIT_REASON_MTF: 1284 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 1285 vmexit->exitcode = VM_EXITCODE_MTRAP; 1286 break; 1287 case EXIT_REASON_PAUSE: 1288 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 1289 vmexit->exitcode = VM_EXITCODE_PAUSE; 1290 break; 1291 case EXIT_REASON_INTR_WINDOW: 1292 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 1293 vmx_clear_int_window_exiting(vmx, vcpu); 1294 VMM_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1295 return (1); 1296 case EXIT_REASON_EXT_INTR: 1297 /* 1298 * External interrupts serve only to cause VM exits and allow 1299 * the host interrupt handler to run. 1300 * 1301 * If this external interrupt triggers a virtual interrupt 1302 * to a VM, then that state will be recorded by the 1303 * host interrupt handler in the VM's softc. We will inject 1304 * this virtual interrupt during the subsequent VM enter. 1305 */ 1306 1307 /* 1308 * This is special. We want to treat this as an 'handled' 1309 * VM-exit but not increment the instruction pointer. 1310 */ 1311 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 1312 return (1); 1313 case EXIT_REASON_NMI_WINDOW: 1314 /* Exit to allow the pending virtual NMI to be injected */ 1315 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 1316 vmx_clear_nmi_window_exiting(vmx, vcpu); 1317 VMM_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1318 return (1); 1319 case EXIT_REASON_INOUT: 1320 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 1321 vmexit->exitcode = VM_EXITCODE_INOUT; 1322 vmexit->u.inout.bytes = (qual & 0x7) + 1; 1323 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0; 1324 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 1325 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 1326 vmexit->u.inout.port = (uint16_t)(qual >> 16); 1327 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 1328 break; 1329 case EXIT_REASON_CPUID: 1330 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 1331 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 1332 break; 1333 case EXIT_REASON_EPT_FAULT: 1334 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EPT_FAULT, 1); 1335 gla = vmcs_gla(); 1336 gpa = vmcs_gpa(); 1337 cr3 = vmcs_guest_cr3(); 1338 handled = vmx_ept_fault(vmx->vm, vcpu, gla, gpa, 1339 vmexit->rip, vmexit->inst_length, 1340 cr3, qual, &vmexit->u.paging.vie); 1341 if (!handled) { 1342 vmexit->exitcode = VM_EXITCODE_PAGING; 1343 vmexit->u.paging.gpa = gpa; 1344 } 1345 break; 1346 default: 1347 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 1348 break; 1349 } 1350 1351 if (handled) { 1352 /* 1353 * It is possible that control is returned to userland 1354 * even though we were able to handle the VM exit in the 1355 * kernel. 1356 * 1357 * In such a case we want to make sure that the userland 1358 * restarts guest execution at the instruction *after* 1359 * the one we just processed. Therefore we update the 1360 * guest rip in the VMCS and in 'vmexit'. 1361 */ 1362 vm_exit_update_rip(vmexit); 1363 vmexit->rip += vmexit->inst_length; 1364 vmexit->inst_length = 0; 1365 1366 /* 1367 * Special case for spinning up an AP - exit to userspace to 1368 * give the controlling process a chance to intercept and 1369 * spin up a thread for the AP. 1370 */ 1371 if (vmexit->exitcode == VM_EXITCODE_SPINUP_AP) 1372 handled = 0; 1373 } else { 1374 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1375 /* 1376 * If this VM exit was not claimed by anybody then 1377 * treat it as a generic VMX exit. 1378 */ 1379 vmexit->exitcode = VM_EXITCODE_VMX; 1380 vmexit->u.vmx.error = 0; 1381 } else { 1382 /* 1383 * The exitcode and collateral have been populated. 1384 * The VM exit will be processed further in userland. 1385 */ 1386 } 1387 } 1388 return (handled); 1389} 1390 1391static int 1392vmx_run(void *arg, int vcpu, register_t rip) 1393{ 1394 int error, vie, rc, handled, astpending; 1395 uint32_t exit_reason; 1396 struct vmx *vmx; 1397 struct vmxctx *vmxctx; 1398 struct vmcs *vmcs; 1399 struct vm_exit *vmexit; 1400 1401 vmx = arg; 1402 vmcs = &vmx->vmcs[vcpu]; 1403 vmxctx = &vmx->ctx[vcpu]; 1404 vmxctx->launched = 0; 1405 1406 astpending = 0; 1407 vmexit = vm_exitinfo(vmx->vm, vcpu); 1408 1409 /* 1410 * XXX Can we avoid doing this every time we do a vm run? 1411 */ 1412 VMPTRLD(vmcs); 1413 1414 /* 1415 * XXX 1416 * We do this every time because we may setup the virtual machine 1417 * from a different process than the one that actually runs it. 1418 * 1419 * If the life of a virtual machine was spent entirely in the context 1420 * of a single process we could do this once in vmcs_set_defaults(). 1421 */ 1422 if ((error = vmwrite(VMCS_HOST_CR3, rcr3())) != 0) 1423 panic("vmx_run: error %d writing to VMCS_HOST_CR3", error); 1424 1425 if ((error = vmwrite(VMCS_GUEST_RIP, rip)) != 0) 1426 panic("vmx_run: error %d writing to VMCS_GUEST_RIP", error); 1427 1428 if ((error = vmx_set_pcpu_defaults(vmx, vcpu)) != 0) 1429 panic("vmx_run: error %d setting up pcpu defaults", error); 1430 1431 do { 1432 lapic_timer_tick(vmx->vm, vcpu); 1433 vmx_inject_interrupts(vmx, vcpu); 1434 vmx_run_trace(vmx, vcpu); 1435 rc = vmx_setjmp(vmxctx); 1436#ifdef SETJMP_TRACE 1437 vmx_setjmp_trace(vmx, vcpu, vmxctx, rc); 1438#endif 1439 switch (rc) { 1440 case VMX_RETURN_DIRECT: 1441 if (vmxctx->launched == 0) { 1442 vmxctx->launched = 1; 1443 vmx_launch(vmxctx); 1444 } else 1445 vmx_resume(vmxctx); 1446 panic("vmx_launch/resume should not return"); 1447 break; 1448 case VMX_RETURN_LONGJMP: 1449 break; /* vm exit */ 1450 case VMX_RETURN_AST: 1451 astpending = 1; 1452 break; 1453 case VMX_RETURN_VMRESUME: 1454 vie = vmcs_instruction_error(); 1455 if (vmxctx->launch_error == VM_FAIL_INVALID || 1456 vie != VMRESUME_WITH_NON_LAUNCHED_VMCS) { 1457 printf("vmresume error %d vmcs inst error %d\n", 1458 vmxctx->launch_error, vie); 1459 goto err_exit; 1460 } 1461 vmx_launch(vmxctx); /* try to launch the guest */ 1462 panic("vmx_launch should not return"); 1463 break; 1464 case VMX_RETURN_VMLAUNCH: 1465 vie = vmcs_instruction_error(); 1466#if 1 1467 printf("vmlaunch error %d vmcs inst error %d\n", 1468 vmxctx->launch_error, vie); 1469#endif 1470 goto err_exit; 1471 default: 1472 panic("vmx_setjmp returned %d", rc); 1473 } 1474 1475 /* enable interrupts */ 1476 enable_intr(); 1477 1478 /* collect some basic information for VM exit processing */ 1479 vmexit->rip = rip = vmcs_guest_rip(); 1480 vmexit->inst_length = vmexit_instruction_length(); 1481 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 1482 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 1483 1484 if (astpending) { 1485 handled = 1; 1486 vmexit->inst_length = 0; 1487 vmexit->exitcode = VM_EXITCODE_BOGUS; 1488 vmx_astpending_trace(vmx, vcpu, rip); 1489 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1); 1490 break; 1491 } 1492 1493 handled = vmx_exit_process(vmx, vcpu, vmexit); 1494 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 1495 1496 } while (handled); 1497 1498 /* 1499 * If a VM exit has been handled then the exitcode must be BOGUS 1500 * If a VM exit is not handled then the exitcode must not be BOGUS 1501 */ 1502 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 1503 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 1504 panic("Mismatch between handled (%d) and exitcode (%d)", 1505 handled, vmexit->exitcode); 1506 } 1507 1508 if (!handled) 1509 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1); 1510 1511 VMM_CTR1(vmx->vm, vcpu, "goto userland: exitcode %d",vmexit->exitcode); 1512 1513 /* 1514 * XXX 1515 * We need to do this to ensure that any VMCS state cached by the 1516 * processor is flushed to memory. We need to do this in case the 1517 * VM moves to a different cpu the next time it runs. 1518 * 1519 * Can we avoid doing this? 1520 */ 1521 VMCLEAR(vmcs); 1522 return (0); 1523 1524err_exit: 1525 vmexit->exitcode = VM_EXITCODE_VMX; 1526 vmexit->u.vmx.exit_reason = (uint32_t)-1; 1527 vmexit->u.vmx.exit_qualification = (uint32_t)-1; 1528 vmexit->u.vmx.error = vie; 1529 VMCLEAR(vmcs); 1530 return (ENOEXEC); 1531} 1532 1533static void 1534vmx_vmcleanup(void *arg) 1535{ 1536 int error; 1537 struct vmx *vmx = arg; 1538 1539 /* 1540 * XXXSMP we also need to clear the VMCS active on the other vcpus. 1541 */ 1542 error = vmclear(&vmx->vmcs[0]); 1543 if (error != 0) 1544 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error); 1545 1546 ept_vmcleanup(vmx); 1547 free(vmx, M_VMX); 1548 1549 return; 1550} 1551 1552static register_t * 1553vmxctx_regptr(struct vmxctx *vmxctx, int reg) 1554{ 1555 1556 switch (reg) { 1557 case VM_REG_GUEST_RAX: 1558 return (&vmxctx->guest_rax); 1559 case VM_REG_GUEST_RBX: 1560 return (&vmxctx->guest_rbx); 1561 case VM_REG_GUEST_RCX: 1562 return (&vmxctx->guest_rcx); 1563 case VM_REG_GUEST_RDX: 1564 return (&vmxctx->guest_rdx); 1565 case VM_REG_GUEST_RSI: 1566 return (&vmxctx->guest_rsi); 1567 case VM_REG_GUEST_RDI: 1568 return (&vmxctx->guest_rdi); 1569 case VM_REG_GUEST_RBP: 1570 return (&vmxctx->guest_rbp); 1571 case VM_REG_GUEST_R8: 1572 return (&vmxctx->guest_r8); 1573 case VM_REG_GUEST_R9: 1574 return (&vmxctx->guest_r9); 1575 case VM_REG_GUEST_R10: 1576 return (&vmxctx->guest_r10); 1577 case VM_REG_GUEST_R11: 1578 return (&vmxctx->guest_r11); 1579 case VM_REG_GUEST_R12: 1580 return (&vmxctx->guest_r12); 1581 case VM_REG_GUEST_R13: 1582 return (&vmxctx->guest_r13); 1583 case VM_REG_GUEST_R14: 1584 return (&vmxctx->guest_r14); 1585 case VM_REG_GUEST_R15: 1586 return (&vmxctx->guest_r15); 1587 default: 1588 break; 1589 } 1590 return (NULL); 1591} 1592 1593static int 1594vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 1595{ 1596 register_t *regp; 1597 1598 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 1599 *retval = *regp; 1600 return (0); 1601 } else 1602 return (EINVAL); 1603} 1604 1605static int 1606vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 1607{ 1608 register_t *regp; 1609 1610 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 1611 *regp = val; 1612 return (0); 1613 } else 1614 return (EINVAL); 1615} 1616 1617static int 1618vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 1619{ 1620 int running, hostcpu; 1621 struct vmx *vmx = arg; 1622 1623 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 1624 if (running && hostcpu != curcpu) 1625 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 1626 1627 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 1628 return (0); 1629 1630 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 1631} 1632 1633static int 1634vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 1635{ 1636 int error, hostcpu, running; 1637 uint64_t ctls; 1638 struct vmx *vmx = arg; 1639 1640 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 1641 if (running && hostcpu != curcpu) 1642 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 1643 1644 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 1645 return (0); 1646 1647 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 1648 1649 if (error == 0) { 1650 /* 1651 * If the "load EFER" VM-entry control is 1 then the 1652 * value of EFER.LMA must be identical to "IA-32e mode guest" 1653 * bit in the VM-entry control. 1654 */ 1655 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 1656 (reg == VM_REG_GUEST_EFER)) { 1657 vmcs_getreg(&vmx->vmcs[vcpu], running, 1658 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 1659 if (val & EFER_LMA) 1660 ctls |= VM_ENTRY_GUEST_LMA; 1661 else 1662 ctls &= ~VM_ENTRY_GUEST_LMA; 1663 vmcs_setreg(&vmx->vmcs[vcpu], running, 1664 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 1665 } 1666 } 1667 1668 return (error); 1669} 1670 1671static int 1672vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 1673{ 1674 struct vmx *vmx = arg; 1675 1676 return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc)); 1677} 1678 1679static int 1680vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 1681{ 1682 struct vmx *vmx = arg; 1683 1684 return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc)); 1685} 1686 1687static int 1688vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code, 1689 int code_valid) 1690{ 1691 int error; 1692 uint64_t info; 1693 struct vmx *vmx = arg; 1694 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 1695 1696 static uint32_t type_map[VM_EVENT_MAX] = { 1697 0x1, /* VM_EVENT_NONE */ 1698 0x0, /* VM_HW_INTR */ 1699 0x2, /* VM_NMI */ 1700 0x3, /* VM_HW_EXCEPTION */ 1701 0x4, /* VM_SW_INTR */ 1702 0x5, /* VM_PRIV_SW_EXCEPTION */ 1703 0x6, /* VM_SW_EXCEPTION */ 1704 }; 1705 1706 /* 1707 * If there is already an exception pending to be delivered to the 1708 * vcpu then just return. 1709 */ 1710 error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info); 1711 if (error) 1712 return (error); 1713 1714 if (info & VMCS_INTERRUPTION_INFO_VALID) 1715 return (EAGAIN); 1716 1717 info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0); 1718 info |= VMCS_INTERRUPTION_INFO_VALID; 1719 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info); 1720 if (error != 0) 1721 return (error); 1722 1723 if (code_valid) { 1724 error = vmcs_setreg(vmcs, 0, 1725 VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR), 1726 code); 1727 } 1728 return (error); 1729} 1730 1731static int 1732vmx_getcap(void *arg, int vcpu, int type, int *retval) 1733{ 1734 struct vmx *vmx = arg; 1735 int vcap; 1736 int ret; 1737 1738 ret = ENOENT; 1739 1740 vcap = vmx->cap[vcpu].set; 1741 1742 switch (type) { 1743 case VM_CAP_HALT_EXIT: 1744 if (cap_halt_exit) 1745 ret = 0; 1746 break; 1747 case VM_CAP_PAUSE_EXIT: 1748 if (cap_pause_exit) 1749 ret = 0; 1750 break; 1751 case VM_CAP_MTRAP_EXIT: 1752 if (cap_monitor_trap) 1753 ret = 0; 1754 break; 1755 case VM_CAP_UNRESTRICTED_GUEST: 1756 if (cap_unrestricted_guest) 1757 ret = 0; 1758 break; 1759 default: 1760 break; 1761 } 1762 1763 if (ret == 0) 1764 *retval = (vcap & (1 << type)) ? 1 : 0; 1765 1766 return (ret); 1767} 1768 1769static int 1770vmx_setcap(void *arg, int vcpu, int type, int val) 1771{ 1772 struct vmx *vmx = arg; 1773 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 1774 uint32_t baseval; 1775 uint32_t *pptr; 1776 int error; 1777 int flag; 1778 int reg; 1779 int retval; 1780 1781 retval = ENOENT; 1782 pptr = NULL; 1783 1784 switch (type) { 1785 case VM_CAP_HALT_EXIT: 1786 if (cap_halt_exit) { 1787 retval = 0; 1788 pptr = &vmx->cap[vcpu].proc_ctls; 1789 baseval = *pptr; 1790 flag = PROCBASED_HLT_EXITING; 1791 reg = VMCS_PRI_PROC_BASED_CTLS; 1792 } 1793 break; 1794 case VM_CAP_MTRAP_EXIT: 1795 if (cap_monitor_trap) { 1796 retval = 0; 1797 pptr = &vmx->cap[vcpu].proc_ctls; 1798 baseval = *pptr; 1799 flag = PROCBASED_MTF; 1800 reg = VMCS_PRI_PROC_BASED_CTLS; 1801 } 1802 break; 1803 case VM_CAP_PAUSE_EXIT: 1804 if (cap_pause_exit) { 1805 retval = 0; 1806 pptr = &vmx->cap[vcpu].proc_ctls; 1807 baseval = *pptr; 1808 flag = PROCBASED_PAUSE_EXITING; 1809 reg = VMCS_PRI_PROC_BASED_CTLS; 1810 } 1811 break; 1812 case VM_CAP_UNRESTRICTED_GUEST: 1813 if (cap_unrestricted_guest) { 1814 retval = 0; 1815 baseval = procbased_ctls2; 1816 flag = PROCBASED2_UNRESTRICTED_GUEST; 1817 reg = VMCS_SEC_PROC_BASED_CTLS; 1818 } 1819 break; 1820 default: 1821 break; 1822 } 1823 1824 if (retval == 0) { 1825 if (val) { 1826 baseval |= flag; 1827 } else { 1828 baseval &= ~flag; 1829 } 1830 VMPTRLD(vmcs); 1831 error = vmwrite(reg, baseval); 1832 VMCLEAR(vmcs); 1833 1834 if (error) { 1835 retval = error; 1836 } else { 1837 /* 1838 * Update optional stored flags, and record 1839 * setting 1840 */ 1841 if (pptr != NULL) { 1842 *pptr = baseval; 1843 } 1844 1845 if (val) { 1846 vmx->cap[vcpu].set |= (1 << type); 1847 } else { 1848 vmx->cap[vcpu].set &= ~(1 << type); 1849 } 1850 } 1851 } 1852 1853 return (retval); 1854} 1855 1856struct vmm_ops vmm_ops_intel = { 1857 vmx_init, 1858 vmx_cleanup, 1859 vmx_vminit, 1860 vmx_run, 1861 vmx_vmcleanup, 1862 ept_vmmmap_set, 1863 ept_vmmmap_get, 1864 vmx_getreg, 1865 vmx_setreg, 1866 vmx_getdesc, 1867 vmx_setdesc, 1868 vmx_inject, 1869 vmx_getcap, 1870 vmx_setcap 1871}; 1872