vmx.c revision 268701
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 268701 2014-07-15 17:37:17Z neel $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 268701 2014-07-15 17:37:17Z neel $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/smp.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/pcpu.h> 38#include <sys/proc.h> 39#include <sys/sysctl.h> 40 41#include <vm/vm.h> 42#include <vm/pmap.h> 43 44#include <machine/psl.h> 45#include <machine/cpufunc.h> 46#include <machine/md_var.h> 47#include <machine/segments.h> 48#include <machine/smp.h> 49#include <machine/specialreg.h> 50#include <machine/vmparam.h> 51 52#include <machine/vmm.h> 53#include <machine/vmm_dev.h> 54#include <machine/vmm_instruction_emul.h> 55#include "vmm_host.h" 56#include "vmm_ioport.h" 57#include "vmm_ipi.h" 58#include "vmm_msr.h" 59#include "vmm_ktr.h" 60#include "vmm_stat.h" 61#include "vatpic.h" 62#include "vlapic.h" 63#include "vlapic_priv.h" 64 65#include "vmx_msr.h" 66#include "ept.h" 67#include "vmx_cpufunc.h" 68#include "vmx.h" 69#include "x86.h" 70#include "vmx_controls.h" 71 72#define PINBASED_CTLS_ONE_SETTING \ 73 (PINBASED_EXTINT_EXITING | \ 74 PINBASED_NMI_EXITING | \ 75 PINBASED_VIRTUAL_NMI) 76#define PINBASED_CTLS_ZERO_SETTING 0 77 78#define PROCBASED_CTLS_WINDOW_SETTING \ 79 (PROCBASED_INT_WINDOW_EXITING | \ 80 PROCBASED_NMI_WINDOW_EXITING) 81 82#define PROCBASED_CTLS_ONE_SETTING \ 83 (PROCBASED_SECONDARY_CONTROLS | \ 84 PROCBASED_IO_EXITING | \ 85 PROCBASED_MSR_BITMAPS | \ 86 PROCBASED_CTLS_WINDOW_SETTING | \ 87 PROCBASED_CR8_LOAD_EXITING | \ 88 PROCBASED_CR8_STORE_EXITING) 89#define PROCBASED_CTLS_ZERO_SETTING \ 90 (PROCBASED_CR3_LOAD_EXITING | \ 91 PROCBASED_CR3_STORE_EXITING | \ 92 PROCBASED_IO_BITMAPS) 93 94#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 95#define PROCBASED_CTLS2_ZERO_SETTING 0 96 97#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \ 98 (VM_EXIT_HOST_LMA | \ 99 VM_EXIT_SAVE_EFER | \ 100 VM_EXIT_LOAD_EFER) 101 102#define VM_EXIT_CTLS_ONE_SETTING \ 103 (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \ 104 VM_EXIT_ACKNOWLEDGE_INTERRUPT | \ 105 VM_EXIT_SAVE_PAT | \ 106 VM_EXIT_LOAD_PAT) 107#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 108 109#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER 110 111#define VM_ENTRY_CTLS_ONE_SETTING \ 112 (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \ 113 VM_ENTRY_LOAD_PAT) 114#define VM_ENTRY_CTLS_ZERO_SETTING \ 115 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 116 VM_ENTRY_INTO_SMM | \ 117 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 118 119#define guest_msr_rw(vmx, msr) \ 120 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW) 121 122#define guest_msr_ro(vmx, msr) \ 123 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ) 124 125#define HANDLED 1 126#define UNHANDLED 0 127 128static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 129static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 130 131SYSCTL_DECL(_hw_vmm); 132SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 133 134int vmxon_enabled[MAXCPU]; 135static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 136 137static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 138static uint32_t exit_ctls, entry_ctls; 139 140static uint64_t cr0_ones_mask, cr0_zeros_mask; 141SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 142 &cr0_ones_mask, 0, NULL); 143SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 144 &cr0_zeros_mask, 0, NULL); 145 146static uint64_t cr4_ones_mask, cr4_zeros_mask; 147SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 148 &cr4_ones_mask, 0, NULL); 149SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 150 &cr4_zeros_mask, 0, NULL); 151 152static int vmx_no_patmsr; 153 154static int vmx_initialized; 155SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 156 &vmx_initialized, 0, "Intel VMX initialized"); 157 158/* 159 * Optional capabilities 160 */ 161static int cap_halt_exit; 162static int cap_pause_exit; 163static int cap_unrestricted_guest; 164static int cap_monitor_trap; 165static int cap_invpcid; 166 167static int virtual_interrupt_delivery; 168SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 169 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 170 171static int posted_interrupts; 172SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD, 173 &posted_interrupts, 0, "APICv posted interrupt support"); 174 175static int pirvec; 176SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 177 &pirvec, 0, "APICv posted interrupt vector"); 178 179static struct unrhdr *vpid_unr; 180static u_int vpid_alloc_failed; 181SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 182 &vpid_alloc_failed, 0, NULL); 183 184/* 185 * Use the last page below 4GB as the APIC access address. This address is 186 * occupied by the boot firmware so it is guaranteed that it will not conflict 187 * with a page in system memory. 188 */ 189#define APIC_ACCESS_ADDRESS 0xFFFFF000 190 191static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 192static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 193static void vmx_inject_pir(struct vlapic *vlapic); 194 195#ifdef KTR 196static const char * 197exit_reason_to_str(int reason) 198{ 199 static char reasonbuf[32]; 200 201 switch (reason) { 202 case EXIT_REASON_EXCEPTION: 203 return "exception"; 204 case EXIT_REASON_EXT_INTR: 205 return "extint"; 206 case EXIT_REASON_TRIPLE_FAULT: 207 return "triplefault"; 208 case EXIT_REASON_INIT: 209 return "init"; 210 case EXIT_REASON_SIPI: 211 return "sipi"; 212 case EXIT_REASON_IO_SMI: 213 return "iosmi"; 214 case EXIT_REASON_SMI: 215 return "smi"; 216 case EXIT_REASON_INTR_WINDOW: 217 return "intrwindow"; 218 case EXIT_REASON_NMI_WINDOW: 219 return "nmiwindow"; 220 case EXIT_REASON_TASK_SWITCH: 221 return "taskswitch"; 222 case EXIT_REASON_CPUID: 223 return "cpuid"; 224 case EXIT_REASON_GETSEC: 225 return "getsec"; 226 case EXIT_REASON_HLT: 227 return "hlt"; 228 case EXIT_REASON_INVD: 229 return "invd"; 230 case EXIT_REASON_INVLPG: 231 return "invlpg"; 232 case EXIT_REASON_RDPMC: 233 return "rdpmc"; 234 case EXIT_REASON_RDTSC: 235 return "rdtsc"; 236 case EXIT_REASON_RSM: 237 return "rsm"; 238 case EXIT_REASON_VMCALL: 239 return "vmcall"; 240 case EXIT_REASON_VMCLEAR: 241 return "vmclear"; 242 case EXIT_REASON_VMLAUNCH: 243 return "vmlaunch"; 244 case EXIT_REASON_VMPTRLD: 245 return "vmptrld"; 246 case EXIT_REASON_VMPTRST: 247 return "vmptrst"; 248 case EXIT_REASON_VMREAD: 249 return "vmread"; 250 case EXIT_REASON_VMRESUME: 251 return "vmresume"; 252 case EXIT_REASON_VMWRITE: 253 return "vmwrite"; 254 case EXIT_REASON_VMXOFF: 255 return "vmxoff"; 256 case EXIT_REASON_VMXON: 257 return "vmxon"; 258 case EXIT_REASON_CR_ACCESS: 259 return "craccess"; 260 case EXIT_REASON_DR_ACCESS: 261 return "draccess"; 262 case EXIT_REASON_INOUT: 263 return "inout"; 264 case EXIT_REASON_RDMSR: 265 return "rdmsr"; 266 case EXIT_REASON_WRMSR: 267 return "wrmsr"; 268 case EXIT_REASON_INVAL_VMCS: 269 return "invalvmcs"; 270 case EXIT_REASON_INVAL_MSR: 271 return "invalmsr"; 272 case EXIT_REASON_MWAIT: 273 return "mwait"; 274 case EXIT_REASON_MTF: 275 return "mtf"; 276 case EXIT_REASON_MONITOR: 277 return "monitor"; 278 case EXIT_REASON_PAUSE: 279 return "pause"; 280 case EXIT_REASON_MCE: 281 return "mce"; 282 case EXIT_REASON_TPR: 283 return "tpr"; 284 case EXIT_REASON_APIC_ACCESS: 285 return "apic-access"; 286 case EXIT_REASON_GDTR_IDTR: 287 return "gdtridtr"; 288 case EXIT_REASON_LDTR_TR: 289 return "ldtrtr"; 290 case EXIT_REASON_EPT_FAULT: 291 return "eptfault"; 292 case EXIT_REASON_EPT_MISCONFIG: 293 return "eptmisconfig"; 294 case EXIT_REASON_INVEPT: 295 return "invept"; 296 case EXIT_REASON_RDTSCP: 297 return "rdtscp"; 298 case EXIT_REASON_VMX_PREEMPT: 299 return "vmxpreempt"; 300 case EXIT_REASON_INVVPID: 301 return "invvpid"; 302 case EXIT_REASON_WBINVD: 303 return "wbinvd"; 304 case EXIT_REASON_XSETBV: 305 return "xsetbv"; 306 case EXIT_REASON_APIC_WRITE: 307 return "apic-write"; 308 default: 309 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 310 return (reasonbuf); 311 } 312} 313#endif /* KTR */ 314 315static int 316vmx_allow_x2apic_msrs(struct vmx *vmx) 317{ 318 int i, error; 319 320 error = 0; 321 322 /* 323 * Allow readonly access to the following x2APIC MSRs from the guest. 324 */ 325 error += guest_msr_ro(vmx, MSR_APIC_ID); 326 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 327 error += guest_msr_ro(vmx, MSR_APIC_LDR); 328 error += guest_msr_ro(vmx, MSR_APIC_SVR); 329 330 for (i = 0; i < 8; i++) 331 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 332 333 for (i = 0; i < 8; i++) 334 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 335 336 for (i = 0; i < 8; i++) 337 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 338 339 error += guest_msr_ro(vmx, MSR_APIC_ESR); 340 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 341 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 342 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 343 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 344 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 345 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 346 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 347 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 348 error += guest_msr_ro(vmx, MSR_APIC_ICR); 349 350 /* 351 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 352 * 353 * These registers get special treatment described in the section 354 * "Virtualizing MSR-Based APIC Accesses". 355 */ 356 error += guest_msr_rw(vmx, MSR_APIC_TPR); 357 error += guest_msr_rw(vmx, MSR_APIC_EOI); 358 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 359 360 return (error); 361} 362 363u_long 364vmx_fix_cr0(u_long cr0) 365{ 366 367 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 368} 369 370u_long 371vmx_fix_cr4(u_long cr4) 372{ 373 374 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 375} 376 377static void 378vpid_free(int vpid) 379{ 380 if (vpid < 0 || vpid > 0xffff) 381 panic("vpid_free: invalid vpid %d", vpid); 382 383 /* 384 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 385 * the unit number allocator. 386 */ 387 388 if (vpid > VM_MAXCPU) 389 free_unr(vpid_unr, vpid); 390} 391 392static void 393vpid_alloc(uint16_t *vpid, int num) 394{ 395 int i, x; 396 397 if (num <= 0 || num > VM_MAXCPU) 398 panic("invalid number of vpids requested: %d", num); 399 400 /* 401 * If the "enable vpid" execution control is not enabled then the 402 * VPID is required to be 0 for all vcpus. 403 */ 404 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 405 for (i = 0; i < num; i++) 406 vpid[i] = 0; 407 return; 408 } 409 410 /* 411 * Allocate a unique VPID for each vcpu from the unit number allocator. 412 */ 413 for (i = 0; i < num; i++) { 414 x = alloc_unr(vpid_unr); 415 if (x == -1) 416 break; 417 else 418 vpid[i] = x; 419 } 420 421 if (i < num) { 422 atomic_add_int(&vpid_alloc_failed, 1); 423 424 /* 425 * If the unit number allocator does not have enough unique 426 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 427 * 428 * These VPIDs are not be unique across VMs but this does not 429 * affect correctness because the combined mappings are also 430 * tagged with the EP4TA which is unique for each VM. 431 * 432 * It is still sub-optimal because the invvpid will invalidate 433 * combined mappings for a particular VPID across all EP4TAs. 434 */ 435 while (i-- > 0) 436 vpid_free(vpid[i]); 437 438 for (i = 0; i < num; i++) 439 vpid[i] = i + 1; 440 } 441} 442 443static void 444vpid_init(void) 445{ 446 /* 447 * VPID 0 is required when the "enable VPID" execution control is 448 * disabled. 449 * 450 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 451 * unit number allocator does not have sufficient unique VPIDs to 452 * satisfy the allocation. 453 * 454 * The remaining VPIDs are managed by the unit number allocator. 455 */ 456 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 457} 458 459static void 460msr_save_area_init(struct msr_entry *g_area, int *g_count) 461{ 462 int cnt; 463 464 static struct msr_entry guest_msrs[] = { 465 { MSR_KGSBASE, 0, 0 }, 466 }; 467 468 cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]); 469 if (cnt > GUEST_MSR_MAX_ENTRIES) 470 panic("guest msr save area overrun"); 471 bcopy(guest_msrs, g_area, sizeof(guest_msrs)); 472 *g_count = cnt; 473} 474 475static void 476vmx_disable(void *arg __unused) 477{ 478 struct invvpid_desc invvpid_desc = { 0 }; 479 struct invept_desc invept_desc = { 0 }; 480 481 if (vmxon_enabled[curcpu]) { 482 /* 483 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 484 * 485 * VMXON or VMXOFF are not required to invalidate any TLB 486 * caching structures. This prevents potential retention of 487 * cached information in the TLB between distinct VMX episodes. 488 */ 489 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 490 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 491 vmxoff(); 492 } 493 load_cr4(rcr4() & ~CR4_VMXE); 494} 495 496static int 497vmx_cleanup(void) 498{ 499 500 if (pirvec != 0) 501 vmm_ipi_free(pirvec); 502 503 if (vpid_unr != NULL) { 504 delete_unrhdr(vpid_unr); 505 vpid_unr = NULL; 506 } 507 508 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 509 510 return (0); 511} 512 513static void 514vmx_enable(void *arg __unused) 515{ 516 int error; 517 uint64_t feature_control; 518 519 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 520 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 521 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 522 wrmsr(MSR_IA32_FEATURE_CONTROL, 523 feature_control | IA32_FEATURE_CONTROL_VMX_EN | 524 IA32_FEATURE_CONTROL_LOCK); 525 } 526 527 load_cr4(rcr4() | CR4_VMXE); 528 529 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 530 error = vmxon(vmxon_region[curcpu]); 531 if (error == 0) 532 vmxon_enabled[curcpu] = 1; 533} 534 535static void 536vmx_restore(void) 537{ 538 539 if (vmxon_enabled[curcpu]) 540 vmxon(vmxon_region[curcpu]); 541} 542 543static int 544vmx_init(int ipinum) 545{ 546 int error, use_tpr_shadow; 547 uint64_t basic, fixed0, fixed1, feature_control; 548 uint32_t tmp, procbased2_vid_bits; 549 550 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 551 if (!(cpu_feature2 & CPUID2_VMX)) { 552 printf("vmx_init: processor does not support VMX operation\n"); 553 return (ENXIO); 554 } 555 556 /* 557 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 558 * are set (bits 0 and 2 respectively). 559 */ 560 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 561 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 && 562 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 563 printf("vmx_init: VMX operation disabled by BIOS\n"); 564 return (ENXIO); 565 } 566 567 /* 568 * Verify capabilities MSR_VMX_BASIC: 569 * - bit 54 indicates support for INS/OUTS decoding 570 */ 571 basic = rdmsr(MSR_VMX_BASIC); 572 if ((basic & (1UL << 54)) == 0) { 573 printf("vmx_init: processor does not support desired basic " 574 "capabilities\n"); 575 return (EINVAL); 576 } 577 578 /* Check support for primary processor-based VM-execution controls */ 579 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 580 MSR_VMX_TRUE_PROCBASED_CTLS, 581 PROCBASED_CTLS_ONE_SETTING, 582 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 583 if (error) { 584 printf("vmx_init: processor does not support desired primary " 585 "processor-based controls\n"); 586 return (error); 587 } 588 589 /* Clear the processor-based ctl bits that are set on demand */ 590 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 591 592 /* Check support for secondary processor-based VM-execution controls */ 593 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 594 MSR_VMX_PROCBASED_CTLS2, 595 PROCBASED_CTLS2_ONE_SETTING, 596 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 597 if (error) { 598 printf("vmx_init: processor does not support desired secondary " 599 "processor-based controls\n"); 600 return (error); 601 } 602 603 /* Check support for VPID */ 604 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 605 PROCBASED2_ENABLE_VPID, 0, &tmp); 606 if (error == 0) 607 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 608 609 /* Check support for pin-based VM-execution controls */ 610 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 611 MSR_VMX_TRUE_PINBASED_CTLS, 612 PINBASED_CTLS_ONE_SETTING, 613 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 614 if (error) { 615 printf("vmx_init: processor does not support desired " 616 "pin-based controls\n"); 617 return (error); 618 } 619 620 /* Check support for VM-exit controls */ 621 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 622 VM_EXIT_CTLS_ONE_SETTING, 623 VM_EXIT_CTLS_ZERO_SETTING, 624 &exit_ctls); 625 if (error) { 626 /* Try again without the PAT MSR bits */ 627 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, 628 MSR_VMX_TRUE_EXIT_CTLS, 629 VM_EXIT_CTLS_ONE_SETTING_NO_PAT, 630 VM_EXIT_CTLS_ZERO_SETTING, 631 &exit_ctls); 632 if (error) { 633 printf("vmx_init: processor does not support desired " 634 "exit controls\n"); 635 return (error); 636 } else { 637 if (bootverbose) 638 printf("vmm: PAT MSR access not supported\n"); 639 guest_msr_valid(MSR_PAT); 640 vmx_no_patmsr = 1; 641 } 642 } 643 644 /* Check support for VM-entry controls */ 645 if (!vmx_no_patmsr) { 646 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 647 MSR_VMX_TRUE_ENTRY_CTLS, 648 VM_ENTRY_CTLS_ONE_SETTING, 649 VM_ENTRY_CTLS_ZERO_SETTING, 650 &entry_ctls); 651 } else { 652 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 653 MSR_VMX_TRUE_ENTRY_CTLS, 654 VM_ENTRY_CTLS_ONE_SETTING_NO_PAT, 655 VM_ENTRY_CTLS_ZERO_SETTING, 656 &entry_ctls); 657 } 658 659 if (error) { 660 printf("vmx_init: processor does not support desired " 661 "entry controls\n"); 662 return (error); 663 } 664 665 /* 666 * Check support for optional features by testing them 667 * as individual bits 668 */ 669 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 670 MSR_VMX_TRUE_PROCBASED_CTLS, 671 PROCBASED_HLT_EXITING, 0, 672 &tmp) == 0); 673 674 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 675 MSR_VMX_PROCBASED_CTLS, 676 PROCBASED_MTF, 0, 677 &tmp) == 0); 678 679 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 680 MSR_VMX_TRUE_PROCBASED_CTLS, 681 PROCBASED_PAUSE_EXITING, 0, 682 &tmp) == 0); 683 684 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 685 MSR_VMX_PROCBASED_CTLS2, 686 PROCBASED2_UNRESTRICTED_GUEST, 0, 687 &tmp) == 0); 688 689 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 690 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 691 &tmp) == 0); 692 693 /* 694 * Check support for virtual interrupt delivery. 695 */ 696 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 697 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 698 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 699 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 700 701 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 702 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 703 &tmp) == 0); 704 705 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 706 procbased2_vid_bits, 0, &tmp); 707 if (error == 0 && use_tpr_shadow) { 708 virtual_interrupt_delivery = 1; 709 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 710 &virtual_interrupt_delivery); 711 } 712 713 if (virtual_interrupt_delivery) { 714 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 715 procbased_ctls2 |= procbased2_vid_bits; 716 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 717 718 /* 719 * No need to emulate accesses to %CR8 if virtual 720 * interrupt delivery is enabled. 721 */ 722 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 723 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; 724 725 /* 726 * Check for Posted Interrupts only if Virtual Interrupt 727 * Delivery is enabled. 728 */ 729 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 730 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 731 &tmp); 732 if (error == 0) { 733 pirvec = vmm_ipi_alloc(); 734 if (pirvec == 0) { 735 if (bootverbose) { 736 printf("vmx_init: unable to allocate " 737 "posted interrupt vector\n"); 738 } 739 } else { 740 posted_interrupts = 1; 741 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 742 &posted_interrupts); 743 } 744 } 745 } 746 747 if (posted_interrupts) 748 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 749 750 /* Initialize EPT */ 751 error = ept_init(ipinum); 752 if (error) { 753 printf("vmx_init: ept initialization failed (%d)\n", error); 754 return (error); 755 } 756 757 /* 758 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 759 */ 760 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 761 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 762 cr0_ones_mask = fixed0 & fixed1; 763 cr0_zeros_mask = ~fixed0 & ~fixed1; 764 765 /* 766 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 767 * if unrestricted guest execution is allowed. 768 */ 769 if (cap_unrestricted_guest) 770 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 771 772 /* 773 * Do not allow the guest to set CR0_NW or CR0_CD. 774 */ 775 cr0_zeros_mask |= (CR0_NW | CR0_CD); 776 777 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 778 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 779 cr4_ones_mask = fixed0 & fixed1; 780 cr4_zeros_mask = ~fixed0 & ~fixed1; 781 782 vpid_init(); 783 784 /* enable VMX operation */ 785 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 786 787 vmx_initialized = 1; 788 789 return (0); 790} 791 792static void 793vmx_trigger_hostintr(int vector) 794{ 795 uintptr_t func; 796 struct gate_descriptor *gd; 797 798 gd = &idt[vector]; 799 800 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 801 "invalid vector %d", vector)); 802 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 803 vector)); 804 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 805 "has invalid type %d", vector, gd->gd_type)); 806 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 807 "has invalid dpl %d", vector, gd->gd_dpl)); 808 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 809 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 810 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 811 "IST %d", vector, gd->gd_ist)); 812 813 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 814 vmx_call_isr(func); 815} 816 817static int 818vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 819{ 820 int error, mask_ident, shadow_ident; 821 uint64_t mask_value; 822 823 if (which != 0 && which != 4) 824 panic("vmx_setup_cr_shadow: unknown cr%d", which); 825 826 if (which == 0) { 827 mask_ident = VMCS_CR0_MASK; 828 mask_value = cr0_ones_mask | cr0_zeros_mask; 829 shadow_ident = VMCS_CR0_SHADOW; 830 } else { 831 mask_ident = VMCS_CR4_MASK; 832 mask_value = cr4_ones_mask | cr4_zeros_mask; 833 shadow_ident = VMCS_CR4_SHADOW; 834 } 835 836 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 837 if (error) 838 return (error); 839 840 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 841 if (error) 842 return (error); 843 844 return (0); 845} 846#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 847#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 848 849static void * 850vmx_vminit(struct vm *vm, pmap_t pmap) 851{ 852 uint16_t vpid[VM_MAXCPU]; 853 int i, error, guest_msr_count; 854 struct vmx *vmx; 855 struct vmcs *vmcs; 856 857 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 858 if ((uintptr_t)vmx & PAGE_MASK) { 859 panic("malloc of struct vmx not aligned on %d byte boundary", 860 PAGE_SIZE); 861 } 862 vmx->vm = vm; 863 864 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 865 866 /* 867 * Clean up EPTP-tagged guest physical and combined mappings 868 * 869 * VMX transitions are not required to invalidate any guest physical 870 * mappings. So, it may be possible for stale guest physical mappings 871 * to be present in the processor TLBs. 872 * 873 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 874 */ 875 ept_invalidate_mappings(vmx->eptp); 876 877 msr_bitmap_initialize(vmx->msr_bitmap); 878 879 /* 880 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 881 * The guest FSBASE and GSBASE are saved and restored during 882 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 883 * always restored from the vmcs host state area on vm-exit. 884 * 885 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 886 * how they are saved/restored so can be directly accessed by the 887 * guest. 888 * 889 * Guest KGSBASE is saved and restored in the guest MSR save area. 890 * Host KGSBASE is restored before returning to userland from the pcb. 891 * There will be a window of time when we are executing in the host 892 * kernel context with a value of KGSBASE from the guest. This is ok 893 * because the value of KGSBASE is inconsequential in kernel context. 894 * 895 * MSR_EFER is saved and restored in the guest VMCS area on a 896 * VM exit and entry respectively. It is also restored from the 897 * host VMCS area on a VM exit. 898 * 899 * The TSC MSR is exposed read-only. Writes are disallowed as that 900 * will impact the host TSC. 901 * XXX Writes would be implemented with a wrmsr trap, and 902 * then modifying the TSC offset in the VMCS. 903 */ 904 if (guest_msr_rw(vmx, MSR_GSBASE) || 905 guest_msr_rw(vmx, MSR_FSBASE) || 906 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 907 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 908 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 909 guest_msr_rw(vmx, MSR_KGSBASE) || 910 guest_msr_rw(vmx, MSR_EFER) || 911 guest_msr_ro(vmx, MSR_TSC)) 912 panic("vmx_vminit: error setting guest msr access"); 913 914 /* 915 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit 916 * and entry respectively. It is also restored from the host VMCS 917 * area on a VM exit. However, if running on a system with no 918 * MSR_PAT save/restore support, leave access disabled so accesses 919 * will be trapped. 920 */ 921 if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT)) 922 panic("vmx_vminit: error setting guest pat msr access"); 923 924 vpid_alloc(vpid, VM_MAXCPU); 925 926 if (virtual_interrupt_delivery) { 927 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 928 APIC_ACCESS_ADDRESS); 929 /* XXX this should really return an error to the caller */ 930 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 931 } 932 933 for (i = 0; i < VM_MAXCPU; i++) { 934 vmcs = &vmx->vmcs[i]; 935 vmcs->identifier = vmx_revision(); 936 error = vmclear(vmcs); 937 if (error != 0) { 938 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 939 error, i); 940 } 941 942 error = vmcs_init(vmcs); 943 KASSERT(error == 0, ("vmcs_init error %d", error)); 944 945 VMPTRLD(vmcs); 946 error = 0; 947 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 948 error += vmwrite(VMCS_EPTP, vmx->eptp); 949 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 950 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 951 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 952 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 953 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 954 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 955 error += vmwrite(VMCS_VPID, vpid[i]); 956 if (virtual_interrupt_delivery) { 957 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 958 error += vmwrite(VMCS_VIRTUAL_APIC, 959 vtophys(&vmx->apic_page[i])); 960 error += vmwrite(VMCS_EOI_EXIT0, 0); 961 error += vmwrite(VMCS_EOI_EXIT1, 0); 962 error += vmwrite(VMCS_EOI_EXIT2, 0); 963 error += vmwrite(VMCS_EOI_EXIT3, 0); 964 } 965 if (posted_interrupts) { 966 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 967 error += vmwrite(VMCS_PIR_DESC, 968 vtophys(&vmx->pir_desc[i])); 969 } 970 VMCLEAR(vmcs); 971 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 972 973 vmx->cap[i].set = 0; 974 vmx->cap[i].proc_ctls = procbased_ctls; 975 vmx->cap[i].proc_ctls2 = procbased_ctls2; 976 977 vmx->state[i].lastcpu = NOCPU; 978 vmx->state[i].vpid = vpid[i]; 979 980 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count); 981 982 error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]), 983 guest_msr_count); 984 if (error != 0) 985 panic("vmcs_set_msr_save error %d", error); 986 987 /* 988 * Set up the CR0/4 shadows, and init the read shadow 989 * to the power-on register value from the Intel Sys Arch. 990 * CR0 - 0x60000010 991 * CR4 - 0 992 */ 993 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 994 if (error != 0) 995 panic("vmx_setup_cr0_shadow %d", error); 996 997 error = vmx_setup_cr4_shadow(vmcs, 0); 998 if (error != 0) 999 panic("vmx_setup_cr4_shadow %d", error); 1000 1001 vmx->ctx[i].pmap = pmap; 1002 } 1003 1004 return (vmx); 1005} 1006 1007static int 1008vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 1009{ 1010 int handled, func; 1011 1012 func = vmxctx->guest_rax; 1013 1014 handled = x86_emulate_cpuid(vm, vcpu, 1015 (uint32_t*)(&vmxctx->guest_rax), 1016 (uint32_t*)(&vmxctx->guest_rbx), 1017 (uint32_t*)(&vmxctx->guest_rcx), 1018 (uint32_t*)(&vmxctx->guest_rdx)); 1019 return (handled); 1020} 1021 1022static __inline void 1023vmx_run_trace(struct vmx *vmx, int vcpu) 1024{ 1025#ifdef KTR 1026 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 1027#endif 1028} 1029 1030static __inline void 1031vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 1032 int handled) 1033{ 1034#ifdef KTR 1035 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 1036 handled ? "handled" : "unhandled", 1037 exit_reason_to_str(exit_reason), rip); 1038#endif 1039} 1040 1041static __inline void 1042vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 1043{ 1044#ifdef KTR 1045 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 1046#endif 1047} 1048 1049static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1050static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 1051 1052/* 1053 * Invalidate guest mappings identified by its vpid from the TLB. 1054 */ 1055static __inline void 1056vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running) 1057{ 1058 struct vmxstate *vmxstate; 1059 struct invvpid_desc invvpid_desc; 1060 1061 vmxstate = &vmx->state[vcpu]; 1062 if (vmxstate->vpid == 0) 1063 return; 1064 1065 if (!running) { 1066 /* 1067 * Set the 'lastcpu' to an invalid host cpu. 1068 * 1069 * This will invalidate TLB entries tagged with the vcpu's 1070 * vpid the next time it runs via vmx_set_pcpu_defaults(). 1071 */ 1072 vmxstate->lastcpu = NOCPU; 1073 return; 1074 } 1075 1076 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 1077 "critical section", __func__, vcpu)); 1078 1079 /* 1080 * Invalidate all mappings tagged with 'vpid' 1081 * 1082 * We do this because this vcpu was executing on a different host 1083 * cpu when it last ran. We do not track whether it invalidated 1084 * mappings associated with its 'vpid' during that run. So we must 1085 * assume that the mappings associated with 'vpid' on 'curcpu' are 1086 * stale and invalidate them. 1087 * 1088 * Note that we incur this penalty only when the scheduler chooses to 1089 * move the thread associated with this vcpu between host cpus. 1090 * 1091 * Note also that this will invalidate mappings tagged with 'vpid' 1092 * for "all" EP4TAs. 1093 */ 1094 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1095 invvpid_desc._res1 = 0; 1096 invvpid_desc._res2 = 0; 1097 invvpid_desc.vpid = vmxstate->vpid; 1098 invvpid_desc.linear_addr = 0; 1099 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1100 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 1101 } else { 1102 /* 1103 * The invvpid can be skipped if an invept is going to 1104 * be performed before entering the guest. The invept 1105 * will invalidate combined mappings tagged with 1106 * 'vmx->eptp' for all vpids. 1107 */ 1108 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1109 } 1110} 1111 1112static void 1113vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1114{ 1115 struct vmxstate *vmxstate; 1116 1117 vmxstate = &vmx->state[vcpu]; 1118 if (vmxstate->lastcpu == curcpu) 1119 return; 1120 1121 vmxstate->lastcpu = curcpu; 1122 1123 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1124 1125 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1126 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1127 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1128 vmx_invvpid(vmx, vcpu, pmap, 1); 1129} 1130 1131/* 1132 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1133 */ 1134CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1135 1136static void __inline 1137vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1138{ 1139 1140 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1141 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1142 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1143 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1144 } 1145} 1146 1147static void __inline 1148vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1149{ 1150 1151 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1152 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1153 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1154 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1155 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1156} 1157 1158static void __inline 1159vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1160{ 1161 1162 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1163 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1164 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1165 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1166 } 1167} 1168 1169static void __inline 1170vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1171{ 1172 1173 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1174 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1175 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1176 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1177 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1178} 1179 1180#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1181 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1182#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1183 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1184 1185static void 1186vmx_inject_nmi(struct vmx *vmx, int vcpu) 1187{ 1188 uint32_t gi, info; 1189 1190 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1191 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1192 "interruptibility-state %#x", gi)); 1193 1194 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1195 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1196 "VM-entry interruption information %#x", info)); 1197 1198 /* 1199 * Inject the virtual NMI. The vector must be the NMI IDT entry 1200 * or the VMCS entry check will fail. 1201 */ 1202 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1203 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1204 1205 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1206 1207 /* Clear the request */ 1208 vm_nmi_clear(vmx->vm, vcpu); 1209} 1210 1211static void 1212vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1213{ 1214 struct vm_exception exc; 1215 int vector, need_nmi_exiting, extint_pending; 1216 uint64_t rflags; 1217 uint32_t gi, info; 1218 1219 if (vm_exception_pending(vmx->vm, vcpu, &exc)) { 1220 KASSERT(exc.vector >= 0 && exc.vector < 32, 1221 ("%s: invalid exception vector %d", __func__, exc.vector)); 1222 1223 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1224 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1225 "pending exception %d: %#x", __func__, exc.vector, info)); 1226 1227 info = exc.vector | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID; 1228 if (exc.error_code_valid) { 1229 info |= VMCS_INTR_DEL_ERRCODE; 1230 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, exc.error_code); 1231 } 1232 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1233 } 1234 1235 if (vm_nmi_pending(vmx->vm, vcpu)) { 1236 /* 1237 * If there are no conditions blocking NMI injection then 1238 * inject it directly here otherwise enable "NMI window 1239 * exiting" to inject it as soon as we can. 1240 * 1241 * We also check for STI_BLOCKING because some implementations 1242 * don't allow NMI injection in this case. If we are running 1243 * on a processor that doesn't have this restriction it will 1244 * immediately exit and the NMI will be injected in the 1245 * "NMI window exiting" handler. 1246 */ 1247 need_nmi_exiting = 1; 1248 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1249 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1250 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1251 if ((info & VMCS_INTR_VALID) == 0) { 1252 vmx_inject_nmi(vmx, vcpu); 1253 need_nmi_exiting = 0; 1254 } else { 1255 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1256 "due to VM-entry intr info %#x", info); 1257 } 1258 } else { 1259 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1260 "Guest Interruptibility-state %#x", gi); 1261 } 1262 1263 if (need_nmi_exiting) 1264 vmx_set_nmi_window_exiting(vmx, vcpu); 1265 } 1266 1267 extint_pending = vm_extint_pending(vmx->vm, vcpu); 1268 1269 if (!extint_pending && virtual_interrupt_delivery) { 1270 vmx_inject_pir(vlapic); 1271 return; 1272 } 1273 1274 /* 1275 * If interrupt-window exiting is already in effect then don't bother 1276 * checking for pending interrupts. This is just an optimization and 1277 * not needed for correctness. 1278 */ 1279 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 1280 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to " 1281 "pending int_window_exiting"); 1282 return; 1283 } 1284 1285 if (!extint_pending) { 1286 /* Ask the local apic for a vector to inject */ 1287 if (!vlapic_pending_intr(vlapic, &vector)) 1288 return; 1289 1290 /* 1291 * From the Intel SDM, Volume 3, Section "Maskable 1292 * Hardware Interrupts": 1293 * - maskable interrupt vectors [16,255] can be delivered 1294 * through the local APIC. 1295 */ 1296 KASSERT(vector >= 16 && vector <= 255, 1297 ("invalid vector %d from local APIC", vector)); 1298 } else { 1299 /* Ask the legacy pic for a vector to inject */ 1300 vatpic_pending_intr(vmx->vm, &vector); 1301 1302 /* 1303 * From the Intel SDM, Volume 3, Section "Maskable 1304 * Hardware Interrupts": 1305 * - maskable interrupt vectors [0,255] can be delivered 1306 * through the INTR pin. 1307 */ 1308 KASSERT(vector >= 0 && vector <= 255, 1309 ("invalid vector %d from INTR", vector)); 1310 } 1311 1312 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1313 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1314 if ((rflags & PSL_I) == 0) { 1315 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1316 "rflags %#lx", vector, rflags); 1317 goto cantinject; 1318 } 1319 1320 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1321 if (gi & HWINTR_BLOCKING) { 1322 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1323 "Guest Interruptibility-state %#x", vector, gi); 1324 goto cantinject; 1325 } 1326 1327 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1328 if (info & VMCS_INTR_VALID) { 1329 /* 1330 * This is expected and could happen for multiple reasons: 1331 * - A vectoring VM-entry was aborted due to astpending 1332 * - A VM-exit happened during event injection. 1333 * - An exception was injected above. 1334 * - An NMI was injected above or after "NMI window exiting" 1335 */ 1336 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1337 "VM-entry intr info %#x", vector, info); 1338 goto cantinject; 1339 } 1340 1341 /* Inject the interrupt */ 1342 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1343 info |= vector; 1344 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1345 1346 if (!extint_pending) { 1347 /* Update the Local APIC ISR */ 1348 vlapic_intr_accepted(vlapic, vector); 1349 } else { 1350 vm_extint_clear(vmx->vm, vcpu); 1351 vatpic_intr_accepted(vmx->vm, vector); 1352 1353 /* 1354 * After we accepted the current ExtINT the PIC may 1355 * have posted another one. If that is the case, set 1356 * the Interrupt Window Exiting execution control so 1357 * we can inject that one too. 1358 * 1359 * Also, interrupt window exiting allows us to inject any 1360 * pending APIC vector that was preempted by the ExtINT 1361 * as soon as possible. This applies both for the software 1362 * emulated vlapic and the hardware assisted virtual APIC. 1363 */ 1364 vmx_set_int_window_exiting(vmx, vcpu); 1365 } 1366 1367 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1368 1369 return; 1370 1371cantinject: 1372 /* 1373 * Set the Interrupt Window Exiting execution control so we can inject 1374 * the interrupt as soon as blocking condition goes away. 1375 */ 1376 vmx_set_int_window_exiting(vmx, vcpu); 1377} 1378 1379/* 1380 * If the Virtual NMIs execution control is '1' then the logical processor 1381 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1382 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1383 * virtual-NMI blocking. 1384 * 1385 * This unblocking occurs even if the IRET causes a fault. In this case the 1386 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1387 */ 1388static void 1389vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1390{ 1391 uint32_t gi; 1392 1393 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1394 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1395 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1396 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1397} 1398 1399static void 1400vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1401{ 1402 uint32_t gi; 1403 1404 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1405 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1406 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1407 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1408} 1409 1410static int 1411vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1412{ 1413 struct vmxctx *vmxctx; 1414 uint64_t xcrval; 1415 const struct xsave_limits *limits; 1416 1417 vmxctx = &vmx->ctx[vcpu]; 1418 limits = vmm_get_xsave_limits(); 1419 1420 /* 1421 * Note that the processor raises a GP# fault on its own if 1422 * xsetbv is executed for CPL != 0, so we do not have to 1423 * emulate that fault here. 1424 */ 1425 1426 /* Only xcr0 is supported. */ 1427 if (vmxctx->guest_rcx != 0) { 1428 vm_inject_gp(vmx->vm, vcpu); 1429 return (HANDLED); 1430 } 1431 1432 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1433 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1434 vm_inject_ud(vmx->vm, vcpu); 1435 return (HANDLED); 1436 } 1437 1438 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1439 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1440 vm_inject_gp(vmx->vm, vcpu); 1441 return (HANDLED); 1442 } 1443 1444 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1445 vm_inject_gp(vmx->vm, vcpu); 1446 return (HANDLED); 1447 } 1448 1449 /* AVX (YMM_Hi128) requires SSE. */ 1450 if (xcrval & XFEATURE_ENABLED_AVX && 1451 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1452 vm_inject_gp(vmx->vm, vcpu); 1453 return (HANDLED); 1454 } 1455 1456 /* 1457 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1458 * ZMM_Hi256, and Hi16_ZMM. 1459 */ 1460 if (xcrval & XFEATURE_AVX512 && 1461 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1462 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1463 vm_inject_gp(vmx->vm, vcpu); 1464 return (HANDLED); 1465 } 1466 1467 /* 1468 * Intel MPX requires both bound register state flags to be 1469 * set. 1470 */ 1471 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1472 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1473 vm_inject_gp(vmx->vm, vcpu); 1474 return (HANDLED); 1475 } 1476 1477 /* 1478 * This runs "inside" vmrun() with the guest's FPU state, so 1479 * modifying xcr0 directly modifies the guest's xcr0, not the 1480 * host's. 1481 */ 1482 load_xcr(0, xcrval); 1483 return (HANDLED); 1484} 1485 1486static uint64_t 1487vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1488{ 1489 const struct vmxctx *vmxctx; 1490 1491 vmxctx = &vmx->ctx[vcpu]; 1492 1493 switch (ident) { 1494 case 0: 1495 return (vmxctx->guest_rax); 1496 case 1: 1497 return (vmxctx->guest_rcx); 1498 case 2: 1499 return (vmxctx->guest_rdx); 1500 case 3: 1501 return (vmxctx->guest_rbx); 1502 case 4: 1503 return (vmcs_read(VMCS_GUEST_RSP)); 1504 case 5: 1505 return (vmxctx->guest_rbp); 1506 case 6: 1507 return (vmxctx->guest_rsi); 1508 case 7: 1509 return (vmxctx->guest_rdi); 1510 case 8: 1511 return (vmxctx->guest_r8); 1512 case 9: 1513 return (vmxctx->guest_r9); 1514 case 10: 1515 return (vmxctx->guest_r10); 1516 case 11: 1517 return (vmxctx->guest_r11); 1518 case 12: 1519 return (vmxctx->guest_r12); 1520 case 13: 1521 return (vmxctx->guest_r13); 1522 case 14: 1523 return (vmxctx->guest_r14); 1524 case 15: 1525 return (vmxctx->guest_r15); 1526 default: 1527 panic("invalid vmx register %d", ident); 1528 } 1529} 1530 1531static void 1532vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1533{ 1534 struct vmxctx *vmxctx; 1535 1536 vmxctx = &vmx->ctx[vcpu]; 1537 1538 switch (ident) { 1539 case 0: 1540 vmxctx->guest_rax = regval; 1541 break; 1542 case 1: 1543 vmxctx->guest_rcx = regval; 1544 break; 1545 case 2: 1546 vmxctx->guest_rdx = regval; 1547 break; 1548 case 3: 1549 vmxctx->guest_rbx = regval; 1550 break; 1551 case 4: 1552 vmcs_write(VMCS_GUEST_RSP, regval); 1553 break; 1554 case 5: 1555 vmxctx->guest_rbp = regval; 1556 break; 1557 case 6: 1558 vmxctx->guest_rsi = regval; 1559 break; 1560 case 7: 1561 vmxctx->guest_rdi = regval; 1562 break; 1563 case 8: 1564 vmxctx->guest_r8 = regval; 1565 break; 1566 case 9: 1567 vmxctx->guest_r9 = regval; 1568 break; 1569 case 10: 1570 vmxctx->guest_r10 = regval; 1571 break; 1572 case 11: 1573 vmxctx->guest_r11 = regval; 1574 break; 1575 case 12: 1576 vmxctx->guest_r12 = regval; 1577 break; 1578 case 13: 1579 vmxctx->guest_r13 = regval; 1580 break; 1581 case 14: 1582 vmxctx->guest_r14 = regval; 1583 break; 1584 case 15: 1585 vmxctx->guest_r15 = regval; 1586 break; 1587 default: 1588 panic("invalid vmx register %d", ident); 1589 } 1590} 1591 1592static int 1593vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1594{ 1595 uint64_t crval, regval; 1596 1597 /* We only handle mov to %cr0 at this time */ 1598 if ((exitqual & 0xf0) != 0x00) 1599 return (UNHANDLED); 1600 1601 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1602 1603 vmcs_write(VMCS_CR0_SHADOW, regval); 1604 1605 crval = regval | cr0_ones_mask; 1606 crval &= ~cr0_zeros_mask; 1607 vmcs_write(VMCS_GUEST_CR0, crval); 1608 1609 if (regval & CR0_PG) { 1610 uint64_t efer, entry_ctls; 1611 1612 /* 1613 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1614 * the "IA-32e mode guest" bit in VM-entry control must be 1615 * equal. 1616 */ 1617 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1618 if (efer & EFER_LME) { 1619 efer |= EFER_LMA; 1620 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1621 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1622 entry_ctls |= VM_ENTRY_GUEST_LMA; 1623 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1624 } 1625 } 1626 1627 return (HANDLED); 1628} 1629 1630static int 1631vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1632{ 1633 uint64_t crval, regval; 1634 1635 /* We only handle mov to %cr4 at this time */ 1636 if ((exitqual & 0xf0) != 0x00) 1637 return (UNHANDLED); 1638 1639 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1640 1641 vmcs_write(VMCS_CR4_SHADOW, regval); 1642 1643 crval = regval | cr4_ones_mask; 1644 crval &= ~cr4_zeros_mask; 1645 vmcs_write(VMCS_GUEST_CR4, crval); 1646 1647 return (HANDLED); 1648} 1649 1650static int 1651vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1652{ 1653 struct vlapic *vlapic; 1654 uint64_t cr8; 1655 int regnum; 1656 1657 /* We only handle mov %cr8 to/from a register at this time. */ 1658 if ((exitqual & 0xe0) != 0x00) { 1659 return (UNHANDLED); 1660 } 1661 1662 vlapic = vm_lapic(vmx->vm, vcpu); 1663 regnum = (exitqual >> 8) & 0xf; 1664 if (exitqual & 0x10) { 1665 cr8 = vlapic_get_cr8(vlapic); 1666 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1667 } else { 1668 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1669 vlapic_set_cr8(vlapic, cr8); 1670 } 1671 1672 return (HANDLED); 1673} 1674 1675/* 1676 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1677 */ 1678static int 1679vmx_cpl(void) 1680{ 1681 uint32_t ssar; 1682 1683 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1684 return ((ssar >> 5) & 0x3); 1685} 1686 1687static enum vm_cpu_mode 1688vmx_cpu_mode(void) 1689{ 1690 uint32_t csar; 1691 1692 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1693 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1694 if (csar & 0x2000) 1695 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1696 else 1697 return (CPU_MODE_COMPATIBILITY); 1698 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1699 return (CPU_MODE_PROTECTED); 1700 } else { 1701 return (CPU_MODE_REAL); 1702 } 1703} 1704 1705static enum vm_paging_mode 1706vmx_paging_mode(void) 1707{ 1708 1709 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1710 return (PAGING_MODE_FLAT); 1711 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1712 return (PAGING_MODE_32); 1713 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1714 return (PAGING_MODE_64); 1715 else 1716 return (PAGING_MODE_PAE); 1717} 1718 1719static uint64_t 1720inout_str_index(struct vmx *vmx, int vcpuid, int in) 1721{ 1722 uint64_t val; 1723 int error; 1724 enum vm_reg_name reg; 1725 1726 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; 1727 error = vmx_getreg(vmx, vcpuid, reg, &val); 1728 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); 1729 return (val); 1730} 1731 1732static uint64_t 1733inout_str_count(struct vmx *vmx, int vcpuid, int rep) 1734{ 1735 uint64_t val; 1736 int error; 1737 1738 if (rep) { 1739 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val); 1740 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); 1741 } else { 1742 val = 1; 1743 } 1744 return (val); 1745} 1746 1747static int 1748inout_str_addrsize(uint32_t inst_info) 1749{ 1750 uint32_t size; 1751 1752 size = (inst_info >> 7) & 0x7; 1753 switch (size) { 1754 case 0: 1755 return (2); /* 16 bit */ 1756 case 1: 1757 return (4); /* 32 bit */ 1758 case 2: 1759 return (8); /* 64 bit */ 1760 default: 1761 panic("%s: invalid size encoding %d", __func__, size); 1762 } 1763} 1764 1765static void 1766inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in, 1767 struct vm_inout_str *vis) 1768{ 1769 int error, s; 1770 1771 if (in) { 1772 vis->seg_name = VM_REG_GUEST_ES; 1773 } else { 1774 s = (inst_info >> 15) & 0x7; 1775 vis->seg_name = vm_segment_name(s); 1776 } 1777 1778 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc); 1779 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); 1780 1781 /* XXX modify svm.c to update bit 16 of seg_desc.access (unusable) */ 1782} 1783 1784static void 1785vmx_paging_info(struct vm_guest_paging *paging) 1786{ 1787 paging->cr3 = vmcs_guest_cr3(); 1788 paging->cpl = vmx_cpl(); 1789 paging->cpu_mode = vmx_cpu_mode(); 1790 paging->paging_mode = vmx_paging_mode(); 1791} 1792 1793static void 1794vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla) 1795{ 1796 struct vm_guest_paging *paging; 1797 uint32_t csar; 1798 1799 paging = &vmexit->u.inst_emul.paging; 1800 1801 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1802 vmexit->u.inst_emul.gpa = gpa; 1803 vmexit->u.inst_emul.gla = gla; 1804 vmx_paging_info(paging); 1805 switch (paging->cpu_mode) { 1806 case CPU_MODE_PROTECTED: 1807 case CPU_MODE_COMPATIBILITY: 1808 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1809 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); 1810 break; 1811 default: 1812 vmexit->u.inst_emul.cs_d = 0; 1813 break; 1814 } 1815} 1816 1817static int 1818ept_fault_type(uint64_t ept_qual) 1819{ 1820 int fault_type; 1821 1822 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1823 fault_type = VM_PROT_WRITE; 1824 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1825 fault_type = VM_PROT_EXECUTE; 1826 else 1827 fault_type= VM_PROT_READ; 1828 1829 return (fault_type); 1830} 1831 1832static boolean_t 1833ept_emulation_fault(uint64_t ept_qual) 1834{ 1835 int read, write; 1836 1837 /* EPT fault on an instruction fetch doesn't make sense here */ 1838 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1839 return (FALSE); 1840 1841 /* EPT fault must be a read fault or a write fault */ 1842 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1843 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1844 if ((read | write) == 0) 1845 return (FALSE); 1846 1847 /* 1848 * The EPT violation must have been caused by accessing a 1849 * guest-physical address that is a translation of a guest-linear 1850 * address. 1851 */ 1852 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1853 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1854 return (FALSE); 1855 } 1856 1857 return (TRUE); 1858} 1859 1860static __inline int 1861apic_access_virtualization(struct vmx *vmx, int vcpuid) 1862{ 1863 uint32_t proc_ctls2; 1864 1865 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1866 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1867} 1868 1869static __inline int 1870x2apic_virtualization(struct vmx *vmx, int vcpuid) 1871{ 1872 uint32_t proc_ctls2; 1873 1874 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1875 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1876} 1877 1878static int 1879vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1880 uint64_t qual) 1881{ 1882 int error, handled, offset; 1883 uint32_t *apic_regs, vector; 1884 bool retu; 1885 1886 handled = HANDLED; 1887 offset = APIC_WRITE_OFFSET(qual); 1888 1889 if (!apic_access_virtualization(vmx, vcpuid)) { 1890 /* 1891 * In general there should not be any APIC write VM-exits 1892 * unless APIC-access virtualization is enabled. 1893 * 1894 * However self-IPI virtualization can legitimately trigger 1895 * an APIC-write VM-exit so treat it specially. 1896 */ 1897 if (x2apic_virtualization(vmx, vcpuid) && 1898 offset == APIC_OFFSET_SELF_IPI) { 1899 apic_regs = (uint32_t *)(vlapic->apic_page); 1900 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1901 vlapic_self_ipi_handler(vlapic, vector); 1902 return (HANDLED); 1903 } else 1904 return (UNHANDLED); 1905 } 1906 1907 switch (offset) { 1908 case APIC_OFFSET_ID: 1909 vlapic_id_write_handler(vlapic); 1910 break; 1911 case APIC_OFFSET_LDR: 1912 vlapic_ldr_write_handler(vlapic); 1913 break; 1914 case APIC_OFFSET_DFR: 1915 vlapic_dfr_write_handler(vlapic); 1916 break; 1917 case APIC_OFFSET_SVR: 1918 vlapic_svr_write_handler(vlapic); 1919 break; 1920 case APIC_OFFSET_ESR: 1921 vlapic_esr_write_handler(vlapic); 1922 break; 1923 case APIC_OFFSET_ICR_LOW: 1924 retu = false; 1925 error = vlapic_icrlo_write_handler(vlapic, &retu); 1926 if (error != 0 || retu) 1927 handled = UNHANDLED; 1928 break; 1929 case APIC_OFFSET_CMCI_LVT: 1930 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1931 vlapic_lvt_write_handler(vlapic, offset); 1932 break; 1933 case APIC_OFFSET_TIMER_ICR: 1934 vlapic_icrtmr_write_handler(vlapic); 1935 break; 1936 case APIC_OFFSET_TIMER_DCR: 1937 vlapic_dcr_write_handler(vlapic); 1938 break; 1939 default: 1940 handled = UNHANDLED; 1941 break; 1942 } 1943 return (handled); 1944} 1945 1946static bool 1947apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 1948{ 1949 1950 if (apic_access_virtualization(vmx, vcpuid) && 1951 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1952 return (true); 1953 else 1954 return (false); 1955} 1956 1957static int 1958vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1959{ 1960 uint64_t qual; 1961 int access_type, offset, allowed; 1962 1963 if (!apic_access_virtualization(vmx, vcpuid)) 1964 return (UNHANDLED); 1965 1966 qual = vmexit->u.vmx.exit_qualification; 1967 access_type = APIC_ACCESS_TYPE(qual); 1968 offset = APIC_ACCESS_OFFSET(qual); 1969 1970 allowed = 0; 1971 if (access_type == 0) { 1972 /* 1973 * Read data access to the following registers is expected. 1974 */ 1975 switch (offset) { 1976 case APIC_OFFSET_APR: 1977 case APIC_OFFSET_PPR: 1978 case APIC_OFFSET_RRR: 1979 case APIC_OFFSET_CMCI_LVT: 1980 case APIC_OFFSET_TIMER_CCR: 1981 allowed = 1; 1982 break; 1983 default: 1984 break; 1985 } 1986 } else if (access_type == 1) { 1987 /* 1988 * Write data access to the following registers is expected. 1989 */ 1990 switch (offset) { 1991 case APIC_OFFSET_VER: 1992 case APIC_OFFSET_APR: 1993 case APIC_OFFSET_PPR: 1994 case APIC_OFFSET_RRR: 1995 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1996 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1997 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1998 case APIC_OFFSET_CMCI_LVT: 1999 case APIC_OFFSET_TIMER_CCR: 2000 allowed = 1; 2001 break; 2002 default: 2003 break; 2004 } 2005 } 2006 2007 if (allowed) { 2008 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset, 2009 VIE_INVALID_GLA); 2010 } 2011 2012 /* 2013 * Regardless of whether the APIC-access is allowed this handler 2014 * always returns UNHANDLED: 2015 * - if the access is allowed then it is handled by emulating the 2016 * instruction that caused the VM-exit (outside the critical section) 2017 * - if the access is not allowed then it will be converted to an 2018 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2019 */ 2020 return (UNHANDLED); 2021} 2022 2023static int 2024vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2025{ 2026 int error, handled, in; 2027 struct vmxctx *vmxctx; 2028 struct vlapic *vlapic; 2029 struct vm_inout_str *vis; 2030 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 2031 uint32_t reason; 2032 uint64_t qual, gpa; 2033 bool retu; 2034 2035 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2036 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2037 2038 handled = UNHANDLED; 2039 vmxctx = &vmx->ctx[vcpu]; 2040 2041 qual = vmexit->u.vmx.exit_qualification; 2042 reason = vmexit->u.vmx.exit_reason; 2043 vmexit->exitcode = VM_EXITCODE_BOGUS; 2044 2045 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2046 2047 /* 2048 * VM exits that could be triggered during event injection on the 2049 * previous VM entry need to be handled specially by re-injecting 2050 * the event. 2051 * 2052 * See "Information for VM Exits During Event Delivery" in Intel SDM 2053 * for details. 2054 */ 2055 switch (reason) { 2056 case EXIT_REASON_EPT_FAULT: 2057 case EXIT_REASON_EPT_MISCONFIG: 2058 case EXIT_REASON_APIC_ACCESS: 2059 case EXIT_REASON_TASK_SWITCH: 2060 case EXIT_REASON_EXCEPTION: 2061 idtvec_info = vmcs_idt_vectoring_info(); 2062 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2063 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2064 vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info); 2065 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2066 idtvec_err = vmcs_idt_vectoring_err(); 2067 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 2068 idtvec_err); 2069 } 2070 /* 2071 * If 'virtual NMIs' are being used and the VM-exit 2072 * happened while injecting an NMI during the previous 2073 * VM-entry, then clear "blocking by NMI" in the Guest 2074 * Interruptibility-state. 2075 */ 2076 if ((idtvec_info & VMCS_INTR_T_MASK) == 2077 VMCS_INTR_T_NMI) { 2078 vmx_clear_nmi_blocking(vmx, vcpu); 2079 } 2080 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2081 } 2082 default: 2083 idtvec_info = 0; 2084 break; 2085 } 2086 2087 switch (reason) { 2088 case EXIT_REASON_CR_ACCESS: 2089 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2090 switch (qual & 0xf) { 2091 case 0: 2092 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2093 break; 2094 case 4: 2095 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2096 break; 2097 case 8: 2098 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2099 break; 2100 } 2101 break; 2102 case EXIT_REASON_RDMSR: 2103 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2104 retu = false; 2105 ecx = vmxctx->guest_rcx; 2106 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2107 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu); 2108 if (error) { 2109 vmexit->exitcode = VM_EXITCODE_RDMSR; 2110 vmexit->u.msr.code = ecx; 2111 } else if (!retu) { 2112 handled = HANDLED; 2113 } else { 2114 /* Return to userspace with a valid exitcode */ 2115 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2116 ("emulate_wrmsr retu with bogus exitcode")); 2117 } 2118 break; 2119 case EXIT_REASON_WRMSR: 2120 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2121 retu = false; 2122 eax = vmxctx->guest_rax; 2123 ecx = vmxctx->guest_rcx; 2124 edx = vmxctx->guest_rdx; 2125 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2126 ecx, (uint64_t)edx << 32 | eax); 2127 error = emulate_wrmsr(vmx->vm, vcpu, ecx, 2128 (uint64_t)edx << 32 | eax, &retu); 2129 if (error) { 2130 vmexit->exitcode = VM_EXITCODE_WRMSR; 2131 vmexit->u.msr.code = ecx; 2132 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2133 } else if (!retu) { 2134 handled = HANDLED; 2135 } else { 2136 /* Return to userspace with a valid exitcode */ 2137 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2138 ("emulate_wrmsr retu with bogus exitcode")); 2139 } 2140 break; 2141 case EXIT_REASON_HLT: 2142 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2143 vmexit->exitcode = VM_EXITCODE_HLT; 2144 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2145 break; 2146 case EXIT_REASON_MTF: 2147 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2148 vmexit->exitcode = VM_EXITCODE_MTRAP; 2149 break; 2150 case EXIT_REASON_PAUSE: 2151 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2152 vmexit->exitcode = VM_EXITCODE_PAUSE; 2153 break; 2154 case EXIT_REASON_INTR_WINDOW: 2155 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2156 vmx_clear_int_window_exiting(vmx, vcpu); 2157 return (1); 2158 case EXIT_REASON_EXT_INTR: 2159 /* 2160 * External interrupts serve only to cause VM exits and allow 2161 * the host interrupt handler to run. 2162 * 2163 * If this external interrupt triggers a virtual interrupt 2164 * to a VM, then that state will be recorded by the 2165 * host interrupt handler in the VM's softc. We will inject 2166 * this virtual interrupt during the subsequent VM enter. 2167 */ 2168 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2169 2170 /* 2171 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2172 * This appears to be a bug in VMware Fusion? 2173 */ 2174 if (!(intr_info & VMCS_INTR_VALID)) 2175 return (1); 2176 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2177 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2178 ("VM exit interruption info invalid: %#x", intr_info)); 2179 vmx_trigger_hostintr(intr_info & 0xff); 2180 2181 /* 2182 * This is special. We want to treat this as an 'handled' 2183 * VM-exit but not increment the instruction pointer. 2184 */ 2185 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2186 return (1); 2187 case EXIT_REASON_NMI_WINDOW: 2188 /* Exit to allow the pending virtual NMI to be injected */ 2189 if (vm_nmi_pending(vmx->vm, vcpu)) 2190 vmx_inject_nmi(vmx, vcpu); 2191 vmx_clear_nmi_window_exiting(vmx, vcpu); 2192 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2193 return (1); 2194 case EXIT_REASON_INOUT: 2195 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2196 vmexit->exitcode = VM_EXITCODE_INOUT; 2197 vmexit->u.inout.bytes = (qual & 0x7) + 1; 2198 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; 2199 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 2200 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 2201 vmexit->u.inout.port = (uint16_t)(qual >> 16); 2202 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 2203 if (vmexit->u.inout.string) { 2204 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 2205 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 2206 vis = &vmexit->u.inout_str; 2207 vmx_paging_info(&vis->paging); 2208 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2209 vis->cr0 = vmcs_read(VMCS_GUEST_CR0); 2210 vis->index = inout_str_index(vmx, vcpu, in); 2211 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep); 2212 vis->addrsize = inout_str_addrsize(inst_info); 2213 inout_str_seginfo(vmx, vcpu, inst_info, in, vis); 2214 } 2215 break; 2216 case EXIT_REASON_CPUID: 2217 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2218 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2219 break; 2220 case EXIT_REASON_EXCEPTION: 2221 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2222 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2223 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2224 ("VM exit interruption info invalid: %#x", intr_info)); 2225 2226 /* 2227 * If Virtual NMIs control is 1 and the VM-exit is due to a 2228 * fault encountered during the execution of IRET then we must 2229 * restore the state of "virtual-NMI blocking" before resuming 2230 * the guest. 2231 * 2232 * See "Resuming Guest Software after Handling an Exception". 2233 */ 2234 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2235 (intr_info & 0xff) != IDT_DF && 2236 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2237 vmx_restore_nmi_blocking(vmx, vcpu); 2238 2239 /* 2240 * The NMI has already been handled in vmx_exit_handle_nmi(). 2241 */ 2242 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) 2243 return (1); 2244 break; 2245 case EXIT_REASON_EPT_FAULT: 2246 /* 2247 * If 'gpa' lies within the address space allocated to 2248 * memory then this must be a nested page fault otherwise 2249 * this must be an instruction that accesses MMIO space. 2250 */ 2251 gpa = vmcs_gpa(); 2252 if (vm_mem_allocated(vmx->vm, gpa) || 2253 apic_access_fault(vmx, vcpu, gpa)) { 2254 vmexit->exitcode = VM_EXITCODE_PAGING; 2255 vmexit->u.paging.gpa = gpa; 2256 vmexit->u.paging.fault_type = ept_fault_type(qual); 2257 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2258 } else if (ept_emulation_fault(qual)) { 2259 vmexit_inst_emul(vmexit, gpa, vmcs_gla()); 2260 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); 2261 } 2262 /* 2263 * If Virtual NMIs control is 1 and the VM-exit is due to an 2264 * EPT fault during the execution of IRET then we must restore 2265 * the state of "virtual-NMI blocking" before resuming. 2266 * 2267 * See description of "NMI unblocking due to IRET" in 2268 * "Exit Qualification for EPT Violations". 2269 */ 2270 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2271 (qual & EXIT_QUAL_NMIUDTI) != 0) 2272 vmx_restore_nmi_blocking(vmx, vcpu); 2273 break; 2274 case EXIT_REASON_VIRTUALIZED_EOI: 2275 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2276 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2277 vmexit->inst_length = 0; /* trap-like */ 2278 break; 2279 case EXIT_REASON_APIC_ACCESS: 2280 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2281 break; 2282 case EXIT_REASON_APIC_WRITE: 2283 /* 2284 * APIC-write VM exit is trap-like so the %rip is already 2285 * pointing to the next instruction. 2286 */ 2287 vmexit->inst_length = 0; 2288 vlapic = vm_lapic(vmx->vm, vcpu); 2289 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2290 break; 2291 case EXIT_REASON_XSETBV: 2292 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2293 break; 2294 default: 2295 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2296 break; 2297 } 2298 2299 if (handled) { 2300 /* 2301 * It is possible that control is returned to userland 2302 * even though we were able to handle the VM exit in the 2303 * kernel. 2304 * 2305 * In such a case we want to make sure that the userland 2306 * restarts guest execution at the instruction *after* 2307 * the one we just processed. Therefore we update the 2308 * guest rip in the VMCS and in 'vmexit'. 2309 */ 2310 vmexit->rip += vmexit->inst_length; 2311 vmexit->inst_length = 0; 2312 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2313 } else { 2314 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2315 /* 2316 * If this VM exit was not claimed by anybody then 2317 * treat it as a generic VMX exit. 2318 */ 2319 vmexit->exitcode = VM_EXITCODE_VMX; 2320 vmexit->u.vmx.status = VM_SUCCESS; 2321 vmexit->u.vmx.inst_type = 0; 2322 vmexit->u.vmx.inst_error = 0; 2323 } else { 2324 /* 2325 * The exitcode and collateral have been populated. 2326 * The VM exit will be processed further in userland. 2327 */ 2328 } 2329 } 2330 return (handled); 2331} 2332 2333static __inline void 2334vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2335{ 2336 2337 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2338 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2339 vmxctx->inst_fail_status)); 2340 2341 vmexit->inst_length = 0; 2342 vmexit->exitcode = VM_EXITCODE_VMX; 2343 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2344 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2345 vmexit->u.vmx.exit_reason = ~0; 2346 vmexit->u.vmx.exit_qualification = ~0; 2347 2348 switch (rc) { 2349 case VMX_VMRESUME_ERROR: 2350 case VMX_VMLAUNCH_ERROR: 2351 case VMX_INVEPT_ERROR: 2352 vmexit->u.vmx.inst_type = rc; 2353 break; 2354 default: 2355 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2356 } 2357} 2358 2359/* 2360 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2361 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2362 * sufficient to simply vector to the NMI handler via a software interrupt. 2363 * However, this must be done before maskable interrupts are enabled 2364 * otherwise the "iret" issued by an interrupt handler will incorrectly 2365 * clear NMI blocking. 2366 */ 2367static __inline void 2368vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2369{ 2370 uint32_t intr_info; 2371 2372 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2373 2374 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2375 return; 2376 2377 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2378 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2379 ("VM exit interruption info invalid: %#x", intr_info)); 2380 2381 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2382 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2383 "to NMI has invalid vector: %#x", intr_info)); 2384 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2385 __asm __volatile("int $2"); 2386 } 2387} 2388 2389static int 2390vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap, 2391 void *rendezvous_cookie, void *suspend_cookie) 2392{ 2393 int rc, handled, launched; 2394 struct vmx *vmx; 2395 struct vm *vm; 2396 struct vmxctx *vmxctx; 2397 struct vmcs *vmcs; 2398 struct vm_exit *vmexit; 2399 struct vlapic *vlapic; 2400 uint64_t rip; 2401 uint32_t exit_reason; 2402 2403 vmx = arg; 2404 vm = vmx->vm; 2405 vmcs = &vmx->vmcs[vcpu]; 2406 vmxctx = &vmx->ctx[vcpu]; 2407 vlapic = vm_lapic(vm, vcpu); 2408 vmexit = vm_exitinfo(vm, vcpu); 2409 launched = 0; 2410 2411 KASSERT(vmxctx->pmap == pmap, 2412 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2413 2414 VMPTRLD(vmcs); 2415 2416 /* 2417 * XXX 2418 * We do this every time because we may setup the virtual machine 2419 * from a different process than the one that actually runs it. 2420 * 2421 * If the life of a virtual machine was spent entirely in the context 2422 * of a single process we could do this once in vmx_vminit(). 2423 */ 2424 vmcs_write(VMCS_HOST_CR3, rcr3()); 2425 2426 vmcs_write(VMCS_GUEST_RIP, startrip); 2427 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2428 do { 2429 handled = UNHANDLED; 2430 2431 /* 2432 * Interrupts are disabled from this point on until the 2433 * guest starts executing. This is done for the following 2434 * reasons: 2435 * 2436 * If an AST is asserted on this thread after the check below, 2437 * then the IPI_AST notification will not be lost, because it 2438 * will cause a VM exit due to external interrupt as soon as 2439 * the guest state is loaded. 2440 * 2441 * A posted interrupt after 'vmx_inject_interrupts()' will 2442 * not be "lost" because it will be held pending in the host 2443 * APIC because interrupts are disabled. The pending interrupt 2444 * will be recognized as soon as the guest state is loaded. 2445 * 2446 * The same reasoning applies to the IPI generated by 2447 * pmap_invalidate_ept(). 2448 */ 2449 disable_intr(); 2450 if (vcpu_suspended(suspend_cookie)) { 2451 enable_intr(); 2452 vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip()); 2453 break; 2454 } 2455 2456 if (vcpu_rendezvous_pending(rendezvous_cookie)) { 2457 enable_intr(); 2458 vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip()); 2459 break; 2460 } 2461 2462 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 2463 enable_intr(); 2464 vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip()); 2465 vmx_astpending_trace(vmx, vcpu, vmexit->rip); 2466 handled = HANDLED; 2467 break; 2468 } 2469 2470 vmx_inject_interrupts(vmx, vcpu, vlapic); 2471 vmx_run_trace(vmx, vcpu); 2472 rc = vmx_enter_guest(vmxctx, vmx, launched); 2473 2474 /* Collect some information for VM exit processing */ 2475 vmexit->rip = rip = vmcs_guest_rip(); 2476 vmexit->inst_length = vmexit_instruction_length(); 2477 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2478 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2479 2480 if (rc == VMX_GUEST_VMEXIT) { 2481 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2482 enable_intr(); 2483 handled = vmx_exit_process(vmx, vcpu, vmexit); 2484 } else { 2485 enable_intr(); 2486 vmx_exit_inst_error(vmxctx, rc, vmexit); 2487 } 2488 launched = 1; 2489 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 2490 } while (handled); 2491 2492 /* 2493 * If a VM exit has been handled then the exitcode must be BOGUS 2494 * If a VM exit is not handled then the exitcode must not be BOGUS 2495 */ 2496 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 2497 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 2498 panic("Mismatch between handled (%d) and exitcode (%d)", 2499 handled, vmexit->exitcode); 2500 } 2501 2502 if (!handled) 2503 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 2504 2505 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2506 vmexit->exitcode); 2507 2508 VMCLEAR(vmcs); 2509 return (0); 2510} 2511 2512static void 2513vmx_vmcleanup(void *arg) 2514{ 2515 int i; 2516 struct vmx *vmx = arg; 2517 2518 if (apic_access_virtualization(vmx, 0)) 2519 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2520 2521 for (i = 0; i < VM_MAXCPU; i++) 2522 vpid_free(vmx->state[i].vpid); 2523 2524 free(vmx, M_VMX); 2525 2526 return; 2527} 2528 2529static register_t * 2530vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2531{ 2532 2533 switch (reg) { 2534 case VM_REG_GUEST_RAX: 2535 return (&vmxctx->guest_rax); 2536 case VM_REG_GUEST_RBX: 2537 return (&vmxctx->guest_rbx); 2538 case VM_REG_GUEST_RCX: 2539 return (&vmxctx->guest_rcx); 2540 case VM_REG_GUEST_RDX: 2541 return (&vmxctx->guest_rdx); 2542 case VM_REG_GUEST_RSI: 2543 return (&vmxctx->guest_rsi); 2544 case VM_REG_GUEST_RDI: 2545 return (&vmxctx->guest_rdi); 2546 case VM_REG_GUEST_RBP: 2547 return (&vmxctx->guest_rbp); 2548 case VM_REG_GUEST_R8: 2549 return (&vmxctx->guest_r8); 2550 case VM_REG_GUEST_R9: 2551 return (&vmxctx->guest_r9); 2552 case VM_REG_GUEST_R10: 2553 return (&vmxctx->guest_r10); 2554 case VM_REG_GUEST_R11: 2555 return (&vmxctx->guest_r11); 2556 case VM_REG_GUEST_R12: 2557 return (&vmxctx->guest_r12); 2558 case VM_REG_GUEST_R13: 2559 return (&vmxctx->guest_r13); 2560 case VM_REG_GUEST_R14: 2561 return (&vmxctx->guest_r14); 2562 case VM_REG_GUEST_R15: 2563 return (&vmxctx->guest_r15); 2564 case VM_REG_GUEST_CR2: 2565 return (&vmxctx->guest_cr2); 2566 default: 2567 break; 2568 } 2569 return (NULL); 2570} 2571 2572static int 2573vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 2574{ 2575 register_t *regp; 2576 2577 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2578 *retval = *regp; 2579 return (0); 2580 } else 2581 return (EINVAL); 2582} 2583 2584static int 2585vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 2586{ 2587 register_t *regp; 2588 2589 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2590 *regp = val; 2591 return (0); 2592 } else 2593 return (EINVAL); 2594} 2595 2596static int 2597vmx_shadow_reg(int reg) 2598{ 2599 int shreg; 2600 2601 shreg = -1; 2602 2603 switch (reg) { 2604 case VM_REG_GUEST_CR0: 2605 shreg = VMCS_CR0_SHADOW; 2606 break; 2607 case VM_REG_GUEST_CR4: 2608 shreg = VMCS_CR4_SHADOW; 2609 break; 2610 default: 2611 break; 2612 } 2613 2614 return (shreg); 2615} 2616 2617static int 2618vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2619{ 2620 int running, hostcpu; 2621 struct vmx *vmx = arg; 2622 2623 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2624 if (running && hostcpu != curcpu) 2625 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2626 2627 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 2628 return (0); 2629 2630 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 2631} 2632 2633static int 2634vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 2635{ 2636 int error, hostcpu, running, shadow; 2637 uint64_t ctls; 2638 pmap_t pmap; 2639 struct vmx *vmx = arg; 2640 2641 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2642 if (running && hostcpu != curcpu) 2643 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 2644 2645 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 2646 return (0); 2647 2648 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 2649 2650 if (error == 0) { 2651 /* 2652 * If the "load EFER" VM-entry control is 1 then the 2653 * value of EFER.LMA must be identical to "IA-32e mode guest" 2654 * bit in the VM-entry control. 2655 */ 2656 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 2657 (reg == VM_REG_GUEST_EFER)) { 2658 vmcs_getreg(&vmx->vmcs[vcpu], running, 2659 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 2660 if (val & EFER_LMA) 2661 ctls |= VM_ENTRY_GUEST_LMA; 2662 else 2663 ctls &= ~VM_ENTRY_GUEST_LMA; 2664 vmcs_setreg(&vmx->vmcs[vcpu], running, 2665 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 2666 } 2667 2668 shadow = vmx_shadow_reg(reg); 2669 if (shadow > 0) { 2670 /* 2671 * Store the unmodified value in the shadow 2672 */ 2673 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 2674 VMCS_IDENT(shadow), val); 2675 } 2676 2677 if (reg == VM_REG_GUEST_CR3) { 2678 /* 2679 * Invalidate the guest vcpu's TLB mappings to emulate 2680 * the behavior of updating %cr3. 2681 * 2682 * XXX the processor retains global mappings when %cr3 2683 * is updated but vmx_invvpid() does not. 2684 */ 2685 pmap = vmx->ctx[vcpu].pmap; 2686 vmx_invvpid(vmx, vcpu, pmap, running); 2687 } 2688 } 2689 2690 return (error); 2691} 2692 2693static int 2694vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2695{ 2696 int hostcpu, running; 2697 struct vmx *vmx = arg; 2698 2699 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2700 if (running && hostcpu != curcpu) 2701 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 2702 2703 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc)); 2704} 2705 2706static int 2707vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2708{ 2709 int hostcpu, running; 2710 struct vmx *vmx = arg; 2711 2712 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2713 if (running && hostcpu != curcpu) 2714 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 2715 2716 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc)); 2717} 2718 2719static int 2720vmx_getcap(void *arg, int vcpu, int type, int *retval) 2721{ 2722 struct vmx *vmx = arg; 2723 int vcap; 2724 int ret; 2725 2726 ret = ENOENT; 2727 2728 vcap = vmx->cap[vcpu].set; 2729 2730 switch (type) { 2731 case VM_CAP_HALT_EXIT: 2732 if (cap_halt_exit) 2733 ret = 0; 2734 break; 2735 case VM_CAP_PAUSE_EXIT: 2736 if (cap_pause_exit) 2737 ret = 0; 2738 break; 2739 case VM_CAP_MTRAP_EXIT: 2740 if (cap_monitor_trap) 2741 ret = 0; 2742 break; 2743 case VM_CAP_UNRESTRICTED_GUEST: 2744 if (cap_unrestricted_guest) 2745 ret = 0; 2746 break; 2747 case VM_CAP_ENABLE_INVPCID: 2748 if (cap_invpcid) 2749 ret = 0; 2750 break; 2751 default: 2752 break; 2753 } 2754 2755 if (ret == 0) 2756 *retval = (vcap & (1 << type)) ? 1 : 0; 2757 2758 return (ret); 2759} 2760 2761static int 2762vmx_setcap(void *arg, int vcpu, int type, int val) 2763{ 2764 struct vmx *vmx = arg; 2765 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2766 uint32_t baseval; 2767 uint32_t *pptr; 2768 int error; 2769 int flag; 2770 int reg; 2771 int retval; 2772 2773 retval = ENOENT; 2774 pptr = NULL; 2775 2776 switch (type) { 2777 case VM_CAP_HALT_EXIT: 2778 if (cap_halt_exit) { 2779 retval = 0; 2780 pptr = &vmx->cap[vcpu].proc_ctls; 2781 baseval = *pptr; 2782 flag = PROCBASED_HLT_EXITING; 2783 reg = VMCS_PRI_PROC_BASED_CTLS; 2784 } 2785 break; 2786 case VM_CAP_MTRAP_EXIT: 2787 if (cap_monitor_trap) { 2788 retval = 0; 2789 pptr = &vmx->cap[vcpu].proc_ctls; 2790 baseval = *pptr; 2791 flag = PROCBASED_MTF; 2792 reg = VMCS_PRI_PROC_BASED_CTLS; 2793 } 2794 break; 2795 case VM_CAP_PAUSE_EXIT: 2796 if (cap_pause_exit) { 2797 retval = 0; 2798 pptr = &vmx->cap[vcpu].proc_ctls; 2799 baseval = *pptr; 2800 flag = PROCBASED_PAUSE_EXITING; 2801 reg = VMCS_PRI_PROC_BASED_CTLS; 2802 } 2803 break; 2804 case VM_CAP_UNRESTRICTED_GUEST: 2805 if (cap_unrestricted_guest) { 2806 retval = 0; 2807 pptr = &vmx->cap[vcpu].proc_ctls2; 2808 baseval = *pptr; 2809 flag = PROCBASED2_UNRESTRICTED_GUEST; 2810 reg = VMCS_SEC_PROC_BASED_CTLS; 2811 } 2812 break; 2813 case VM_CAP_ENABLE_INVPCID: 2814 if (cap_invpcid) { 2815 retval = 0; 2816 pptr = &vmx->cap[vcpu].proc_ctls2; 2817 baseval = *pptr; 2818 flag = PROCBASED2_ENABLE_INVPCID; 2819 reg = VMCS_SEC_PROC_BASED_CTLS; 2820 } 2821 break; 2822 default: 2823 break; 2824 } 2825 2826 if (retval == 0) { 2827 if (val) { 2828 baseval |= flag; 2829 } else { 2830 baseval &= ~flag; 2831 } 2832 VMPTRLD(vmcs); 2833 error = vmwrite(reg, baseval); 2834 VMCLEAR(vmcs); 2835 2836 if (error) { 2837 retval = error; 2838 } else { 2839 /* 2840 * Update optional stored flags, and record 2841 * setting 2842 */ 2843 if (pptr != NULL) { 2844 *pptr = baseval; 2845 } 2846 2847 if (val) { 2848 vmx->cap[vcpu].set |= (1 << type); 2849 } else { 2850 vmx->cap[vcpu].set &= ~(1 << type); 2851 } 2852 } 2853 } 2854 2855 return (retval); 2856} 2857 2858struct vlapic_vtx { 2859 struct vlapic vlapic; 2860 struct pir_desc *pir_desc; 2861 struct vmx *vmx; 2862}; 2863 2864#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 2865do { \ 2866 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 2867 level ? "level" : "edge", vector); \ 2868 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 2869 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 2870 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 2871 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 2872 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 2873} while (0) 2874 2875/* 2876 * vlapic->ops handlers that utilize the APICv hardware assist described in 2877 * Chapter 29 of the Intel SDM. 2878 */ 2879static int 2880vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 2881{ 2882 struct vlapic_vtx *vlapic_vtx; 2883 struct pir_desc *pir_desc; 2884 uint64_t mask; 2885 int idx, notify; 2886 2887 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2888 pir_desc = vlapic_vtx->pir_desc; 2889 2890 /* 2891 * Keep track of interrupt requests in the PIR descriptor. This is 2892 * because the virtual APIC page pointed to by the VMCS cannot be 2893 * modified if the vcpu is running. 2894 */ 2895 idx = vector / 64; 2896 mask = 1UL << (vector % 64); 2897 atomic_set_long(&pir_desc->pir[idx], mask); 2898 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1); 2899 2900 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 2901 level, "vmx_set_intr_ready"); 2902 return (notify); 2903} 2904 2905static int 2906vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 2907{ 2908 struct vlapic_vtx *vlapic_vtx; 2909 struct pir_desc *pir_desc; 2910 struct LAPIC *lapic; 2911 uint64_t pending, pirval; 2912 uint32_t ppr, vpr; 2913 int i; 2914 2915 /* 2916 * This function is only expected to be called from the 'HLT' exit 2917 * handler which does not care about the vector that is pending. 2918 */ 2919 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 2920 2921 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2922 pir_desc = vlapic_vtx->pir_desc; 2923 2924 pending = atomic_load_acq_long(&pir_desc->pending); 2925 if (!pending) 2926 return (0); /* common case */ 2927 2928 /* 2929 * If there is an interrupt pending then it will be recognized only 2930 * if its priority is greater than the processor priority. 2931 * 2932 * Special case: if the processor priority is zero then any pending 2933 * interrupt will be recognized. 2934 */ 2935 lapic = vlapic->apic_page; 2936 ppr = lapic->ppr & 0xf0; 2937 if (ppr == 0) 2938 return (1); 2939 2940 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 2941 lapic->ppr); 2942 2943 for (i = 3; i >= 0; i--) { 2944 pirval = pir_desc->pir[i]; 2945 if (pirval != 0) { 2946 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0; 2947 return (vpr > ppr); 2948 } 2949 } 2950 return (0); 2951} 2952 2953static void 2954vmx_intr_accepted(struct vlapic *vlapic, int vector) 2955{ 2956 2957 panic("vmx_intr_accepted: not expected to be called"); 2958} 2959 2960static void 2961vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 2962{ 2963 struct vlapic_vtx *vlapic_vtx; 2964 struct vmx *vmx; 2965 struct vmcs *vmcs; 2966 uint64_t mask, val; 2967 2968 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 2969 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 2970 ("vmx_set_tmr: vcpu cannot be running")); 2971 2972 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2973 vmx = vlapic_vtx->vmx; 2974 vmcs = &vmx->vmcs[vlapic->vcpuid]; 2975 mask = 1UL << (vector % 64); 2976 2977 VMPTRLD(vmcs); 2978 val = vmcs_read(VMCS_EOI_EXIT(vector)); 2979 if (level) 2980 val |= mask; 2981 else 2982 val &= ~mask; 2983 vmcs_write(VMCS_EOI_EXIT(vector), val); 2984 VMCLEAR(vmcs); 2985} 2986 2987static void 2988vmx_enable_x2apic_mode(struct vlapic *vlapic) 2989{ 2990 struct vmx *vmx; 2991 struct vmcs *vmcs; 2992 uint32_t proc_ctls2; 2993 int vcpuid, error; 2994 2995 vcpuid = vlapic->vcpuid; 2996 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 2997 vmcs = &vmx->vmcs[vcpuid]; 2998 2999 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3000 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3001 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 3002 3003 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3004 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3005 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3006 3007 VMPTRLD(vmcs); 3008 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3009 VMCLEAR(vmcs); 3010 3011 if (vlapic->vcpuid == 0) { 3012 /* 3013 * The nested page table mappings are shared by all vcpus 3014 * so unmap the APIC access page just once. 3015 */ 3016 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3017 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3018 __func__, error)); 3019 3020 /* 3021 * The MSR bitmap is shared by all vcpus so modify it only 3022 * once in the context of vcpu 0. 3023 */ 3024 error = vmx_allow_x2apic_msrs(vmx); 3025 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3026 __func__, error)); 3027 } 3028} 3029 3030static void 3031vmx_post_intr(struct vlapic *vlapic, int hostcpu) 3032{ 3033 3034 ipi_cpu(hostcpu, pirvec); 3035} 3036 3037/* 3038 * Transfer the pending interrupts in the PIR descriptor to the IRR 3039 * in the virtual APIC page. 3040 */ 3041static void 3042vmx_inject_pir(struct vlapic *vlapic) 3043{ 3044 struct vlapic_vtx *vlapic_vtx; 3045 struct pir_desc *pir_desc; 3046 struct LAPIC *lapic; 3047 uint64_t val, pirval; 3048 int rvi, pirbase = -1; 3049 uint16_t intr_status_old, intr_status_new; 3050 3051 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3052 pir_desc = vlapic_vtx->pir_desc; 3053 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3054 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3055 "no posted interrupt pending"); 3056 return; 3057 } 3058 3059 pirval = 0; 3060 pirbase = -1; 3061 lapic = vlapic->apic_page; 3062 3063 val = atomic_readandclear_long(&pir_desc->pir[0]); 3064 if (val != 0) { 3065 lapic->irr0 |= val; 3066 lapic->irr1 |= val >> 32; 3067 pirbase = 0; 3068 pirval = val; 3069 } 3070 3071 val = atomic_readandclear_long(&pir_desc->pir[1]); 3072 if (val != 0) { 3073 lapic->irr2 |= val; 3074 lapic->irr3 |= val >> 32; 3075 pirbase = 64; 3076 pirval = val; 3077 } 3078 3079 val = atomic_readandclear_long(&pir_desc->pir[2]); 3080 if (val != 0) { 3081 lapic->irr4 |= val; 3082 lapic->irr5 |= val >> 32; 3083 pirbase = 128; 3084 pirval = val; 3085 } 3086 3087 val = atomic_readandclear_long(&pir_desc->pir[3]); 3088 if (val != 0) { 3089 lapic->irr6 |= val; 3090 lapic->irr7 |= val >> 32; 3091 pirbase = 192; 3092 pirval = val; 3093 } 3094 3095 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 3096 3097 /* 3098 * Update RVI so the processor can evaluate pending virtual 3099 * interrupts on VM-entry. 3100 * 3101 * It is possible for pirval to be 0 here, even though the 3102 * pending bit has been set. The scenario is: 3103 * CPU-Y is sending a posted interrupt to CPU-X, which 3104 * is running a guest and processing posted interrupts in h/w. 3105 * CPU-X will eventually exit and the state seen in s/w is 3106 * the pending bit set, but no PIR bits set. 3107 * 3108 * CPU-X CPU-Y 3109 * (vm running) (host running) 3110 * rx posted interrupt 3111 * CLEAR pending bit 3112 * SET PIR bit 3113 * READ/CLEAR PIR bits 3114 * SET pending bit 3115 * (vm exit) 3116 * pending bit set, PIR 0 3117 */ 3118 if (pirval != 0) { 3119 rvi = pirbase + flsl(pirval) - 1; 3120 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 3121 intr_status_new = (intr_status_old & 0xFF00) | rvi; 3122 if (intr_status_new > intr_status_old) { 3123 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 3124 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3125 "guest_intr_status changed from 0x%04x to 0x%04x", 3126 intr_status_old, intr_status_new); 3127 } 3128 } 3129} 3130 3131static struct vlapic * 3132vmx_vlapic_init(void *arg, int vcpuid) 3133{ 3134 struct vmx *vmx; 3135 struct vlapic *vlapic; 3136 struct vlapic_vtx *vlapic_vtx; 3137 3138 vmx = arg; 3139 3140 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 3141 vlapic->vm = vmx->vm; 3142 vlapic->vcpuid = vcpuid; 3143 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3144 3145 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3146 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3147 vlapic_vtx->vmx = vmx; 3148 3149 if (virtual_interrupt_delivery) { 3150 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 3151 vlapic->ops.pending_intr = vmx_pending_intr; 3152 vlapic->ops.intr_accepted = vmx_intr_accepted; 3153 vlapic->ops.set_tmr = vmx_set_tmr; 3154 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode; 3155 } 3156 3157 if (posted_interrupts) 3158 vlapic->ops.post_intr = vmx_post_intr; 3159 3160 vlapic_init(vlapic); 3161 3162 return (vlapic); 3163} 3164 3165static void 3166vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3167{ 3168 3169 vlapic_cleanup(vlapic); 3170 free(vlapic, M_VLAPIC); 3171} 3172 3173struct vmm_ops vmm_ops_intel = { 3174 vmx_init, 3175 vmx_cleanup, 3176 vmx_restore, 3177 vmx_vminit, 3178 vmx_run, 3179 vmx_vmcleanup, 3180 vmx_getreg, 3181 vmx_setreg, 3182 vmx_getdesc, 3183 vmx_setdesc, 3184 vmx_getcap, 3185 vmx_setcap, 3186 ept_vmspace_alloc, 3187 ept_vmspace_free, 3188 vmx_vlapic_init, 3189 vmx_vlapic_cleanup, 3190}; 3191