vmx.c revision 347416
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * Copyright (c) 2018 Joyent, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: stable/11/sys/amd64/vmm/intel/vmx.c 347416 2019-05-09 23:57:02Z jhb $ 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/11/sys/amd64/vmm/intel/vmx.c 347416 2019-05-09 23:57:02Z jhb $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/smp.h> 36#include <sys/kernel.h> 37#include <sys/malloc.h> 38#include <sys/pcpu.h> 39#include <sys/proc.h> 40#include <sys/sysctl.h> 41 42#include <vm/vm.h> 43#include <vm/pmap.h> 44 45#include <machine/psl.h> 46#include <machine/cpufunc.h> 47#include <machine/md_var.h> 48#include <machine/reg.h> 49#include <machine/segments.h> 50#include <machine/smp.h> 51#include <machine/specialreg.h> 52#include <machine/vmparam.h> 53 54#include <machine/vmm.h> 55#include <machine/vmm_dev.h> 56#include <machine/vmm_instruction_emul.h> 57#include "vmm_lapic.h" 58#include "vmm_host.h" 59#include "vmm_ioport.h" 60#include "vmm_ktr.h" 61#include "vmm_stat.h" 62#include "vatpic.h" 63#include "vlapic.h" 64#include "vlapic_priv.h" 65 66#include "ept.h" 67#include "vmx_cpufunc.h" 68#include "vmx.h" 69#include "vmx_msr.h" 70#include "x86.h" 71#include "vmx_controls.h" 72 73#define PINBASED_CTLS_ONE_SETTING \ 74 (PINBASED_EXTINT_EXITING | \ 75 PINBASED_NMI_EXITING | \ 76 PINBASED_VIRTUAL_NMI) 77#define PINBASED_CTLS_ZERO_SETTING 0 78 79#define PROCBASED_CTLS_WINDOW_SETTING \ 80 (PROCBASED_INT_WINDOW_EXITING | \ 81 PROCBASED_NMI_WINDOW_EXITING) 82 83#define PROCBASED_CTLS_ONE_SETTING \ 84 (PROCBASED_SECONDARY_CONTROLS | \ 85 PROCBASED_MWAIT_EXITING | \ 86 PROCBASED_MONITOR_EXITING | \ 87 PROCBASED_IO_EXITING | \ 88 PROCBASED_MSR_BITMAPS | \ 89 PROCBASED_CTLS_WINDOW_SETTING | \ 90 PROCBASED_CR8_LOAD_EXITING | \ 91 PROCBASED_CR8_STORE_EXITING) 92#define PROCBASED_CTLS_ZERO_SETTING \ 93 (PROCBASED_CR3_LOAD_EXITING | \ 94 PROCBASED_CR3_STORE_EXITING | \ 95 PROCBASED_IO_BITMAPS) 96 97#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 98#define PROCBASED_CTLS2_ZERO_SETTING 0 99 100#define VM_EXIT_CTLS_ONE_SETTING \ 101 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 102 VM_EXIT_HOST_LMA | \ 103 VM_EXIT_SAVE_EFER | \ 104 VM_EXIT_LOAD_EFER | \ 105 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 106 107#define VM_EXIT_CTLS_ZERO_SETTING 0 108 109#define VM_ENTRY_CTLS_ONE_SETTING \ 110 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 111 VM_ENTRY_LOAD_EFER) 112 113#define VM_ENTRY_CTLS_ZERO_SETTING \ 114 (VM_ENTRY_INTO_SMM | \ 115 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 116 117#define HANDLED 1 118#define UNHANDLED 0 119 120static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 121static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 122 123SYSCTL_DECL(_hw_vmm); 124SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 125 126int vmxon_enabled[MAXCPU]; 127static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 128 129static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 130static uint32_t exit_ctls, entry_ctls; 131 132static uint64_t cr0_ones_mask, cr0_zeros_mask; 133SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 134 &cr0_ones_mask, 0, NULL); 135SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 136 &cr0_zeros_mask, 0, NULL); 137 138static uint64_t cr4_ones_mask, cr4_zeros_mask; 139SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 140 &cr4_ones_mask, 0, NULL); 141SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 142 &cr4_zeros_mask, 0, NULL); 143 144static int vmx_initialized; 145SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 146 &vmx_initialized, 0, "Intel VMX initialized"); 147 148/* 149 * Optional capabilities 150 */ 151static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL); 152 153static int cap_halt_exit; 154SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0, 155 "HLT triggers a VM-exit"); 156 157static int cap_pause_exit; 158SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit, 159 0, "PAUSE triggers a VM-exit"); 160 161static int cap_unrestricted_guest; 162SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD, 163 &cap_unrestricted_guest, 0, "Unrestricted guests"); 164 165static int cap_monitor_trap; 166SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, 167 &cap_monitor_trap, 0, "Monitor trap flag"); 168 169static int cap_invpcid; 170SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 171 0, "Guests are allowed to use INVPCID"); 172 173static int virtual_interrupt_delivery; 174SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 175 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 176 177static int posted_interrupts; 178SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD, 179 &posted_interrupts, 0, "APICv posted interrupt support"); 180 181static int pirvec = -1; 182SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 183 &pirvec, 0, "APICv posted interrupt vector"); 184 185static struct unrhdr *vpid_unr; 186static u_int vpid_alloc_failed; 187SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 188 &vpid_alloc_failed, 0, NULL); 189 190static int guest_l1d_flush; 191SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD, 192 &guest_l1d_flush, 0, NULL); 193static int guest_l1d_flush_sw; 194SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD, 195 &guest_l1d_flush_sw, 0, NULL); 196 197static struct msr_entry msr_load_list[1] __aligned(16); 198 199/* 200 * The definitions of SDT probes for VMX. 201 */ 202 203SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 204 "struct vmx *", "int", "struct vm_exit *"); 205 206SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 207 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 208 209SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 210 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 211 212SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 213 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 214 215SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 216 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 217 218SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 219 "struct vmx *", "int", "struct vm_exit *"); 220 221SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 222 "struct vmx *", "int", "struct vm_exit *"); 223 224SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 225 "struct vmx *", "int", "struct vm_exit *"); 226 227SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 228 "struct vmx *", "int", "struct vm_exit *"); 229 230SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 231 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 232 233SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 234 "struct vmx *", "int", "struct vm_exit *"); 235 236SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 237 "struct vmx *", "int", "struct vm_exit *"); 238 239SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 240 "struct vmx *", "int", "struct vm_exit *"); 241 242SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 243 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 244 245SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 246 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 247 248SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 249 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 250 251SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 252 "struct vmx *", "int", "struct vm_exit *"); 253 254SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 255 "struct vmx *", "int", "struct vm_exit *"); 256 257SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 258 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 259 260SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 261 "struct vmx *", "int", "struct vm_exit *"); 262 263SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 264 "struct vmx *", "int", "struct vm_exit *"); 265 266SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 267 "struct vmx *", "int", "struct vm_exit *"); 268 269SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 270 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 271 272SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 273 "struct vmx *", "int", "struct vm_exit *", "int"); 274 275/* 276 * Use the last page below 4GB as the APIC access address. This address is 277 * occupied by the boot firmware so it is guaranteed that it will not conflict 278 * with a page in system memory. 279 */ 280#define APIC_ACCESS_ADDRESS 0xFFFFF000 281 282static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 283static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 284static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val); 285static void vmx_inject_pir(struct vlapic *vlapic); 286 287#ifdef KTR 288static const char * 289exit_reason_to_str(int reason) 290{ 291 static char reasonbuf[32]; 292 293 switch (reason) { 294 case EXIT_REASON_EXCEPTION: 295 return "exception"; 296 case EXIT_REASON_EXT_INTR: 297 return "extint"; 298 case EXIT_REASON_TRIPLE_FAULT: 299 return "triplefault"; 300 case EXIT_REASON_INIT: 301 return "init"; 302 case EXIT_REASON_SIPI: 303 return "sipi"; 304 case EXIT_REASON_IO_SMI: 305 return "iosmi"; 306 case EXIT_REASON_SMI: 307 return "smi"; 308 case EXIT_REASON_INTR_WINDOW: 309 return "intrwindow"; 310 case EXIT_REASON_NMI_WINDOW: 311 return "nmiwindow"; 312 case EXIT_REASON_TASK_SWITCH: 313 return "taskswitch"; 314 case EXIT_REASON_CPUID: 315 return "cpuid"; 316 case EXIT_REASON_GETSEC: 317 return "getsec"; 318 case EXIT_REASON_HLT: 319 return "hlt"; 320 case EXIT_REASON_INVD: 321 return "invd"; 322 case EXIT_REASON_INVLPG: 323 return "invlpg"; 324 case EXIT_REASON_RDPMC: 325 return "rdpmc"; 326 case EXIT_REASON_RDTSC: 327 return "rdtsc"; 328 case EXIT_REASON_RSM: 329 return "rsm"; 330 case EXIT_REASON_VMCALL: 331 return "vmcall"; 332 case EXIT_REASON_VMCLEAR: 333 return "vmclear"; 334 case EXIT_REASON_VMLAUNCH: 335 return "vmlaunch"; 336 case EXIT_REASON_VMPTRLD: 337 return "vmptrld"; 338 case EXIT_REASON_VMPTRST: 339 return "vmptrst"; 340 case EXIT_REASON_VMREAD: 341 return "vmread"; 342 case EXIT_REASON_VMRESUME: 343 return "vmresume"; 344 case EXIT_REASON_VMWRITE: 345 return "vmwrite"; 346 case EXIT_REASON_VMXOFF: 347 return "vmxoff"; 348 case EXIT_REASON_VMXON: 349 return "vmxon"; 350 case EXIT_REASON_CR_ACCESS: 351 return "craccess"; 352 case EXIT_REASON_DR_ACCESS: 353 return "draccess"; 354 case EXIT_REASON_INOUT: 355 return "inout"; 356 case EXIT_REASON_RDMSR: 357 return "rdmsr"; 358 case EXIT_REASON_WRMSR: 359 return "wrmsr"; 360 case EXIT_REASON_INVAL_VMCS: 361 return "invalvmcs"; 362 case EXIT_REASON_INVAL_MSR: 363 return "invalmsr"; 364 case EXIT_REASON_MWAIT: 365 return "mwait"; 366 case EXIT_REASON_MTF: 367 return "mtf"; 368 case EXIT_REASON_MONITOR: 369 return "monitor"; 370 case EXIT_REASON_PAUSE: 371 return "pause"; 372 case EXIT_REASON_MCE_DURING_ENTRY: 373 return "mce-during-entry"; 374 case EXIT_REASON_TPR: 375 return "tpr"; 376 case EXIT_REASON_APIC_ACCESS: 377 return "apic-access"; 378 case EXIT_REASON_GDTR_IDTR: 379 return "gdtridtr"; 380 case EXIT_REASON_LDTR_TR: 381 return "ldtrtr"; 382 case EXIT_REASON_EPT_FAULT: 383 return "eptfault"; 384 case EXIT_REASON_EPT_MISCONFIG: 385 return "eptmisconfig"; 386 case EXIT_REASON_INVEPT: 387 return "invept"; 388 case EXIT_REASON_RDTSCP: 389 return "rdtscp"; 390 case EXIT_REASON_VMX_PREEMPT: 391 return "vmxpreempt"; 392 case EXIT_REASON_INVVPID: 393 return "invvpid"; 394 case EXIT_REASON_WBINVD: 395 return "wbinvd"; 396 case EXIT_REASON_XSETBV: 397 return "xsetbv"; 398 case EXIT_REASON_APIC_WRITE: 399 return "apic-write"; 400 default: 401 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 402 return (reasonbuf); 403 } 404} 405#endif /* KTR */ 406 407static int 408vmx_allow_x2apic_msrs(struct vmx *vmx) 409{ 410 int i, error; 411 412 error = 0; 413 414 /* 415 * Allow readonly access to the following x2APIC MSRs from the guest. 416 */ 417 error += guest_msr_ro(vmx, MSR_APIC_ID); 418 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 419 error += guest_msr_ro(vmx, MSR_APIC_LDR); 420 error += guest_msr_ro(vmx, MSR_APIC_SVR); 421 422 for (i = 0; i < 8; i++) 423 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 424 425 for (i = 0; i < 8; i++) 426 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 427 428 for (i = 0; i < 8; i++) 429 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 430 431 error += guest_msr_ro(vmx, MSR_APIC_ESR); 432 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 433 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 434 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 435 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 436 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 437 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 438 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 439 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 440 error += guest_msr_ro(vmx, MSR_APIC_ICR); 441 442 /* 443 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 444 * 445 * These registers get special treatment described in the section 446 * "Virtualizing MSR-Based APIC Accesses". 447 */ 448 error += guest_msr_rw(vmx, MSR_APIC_TPR); 449 error += guest_msr_rw(vmx, MSR_APIC_EOI); 450 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 451 452 return (error); 453} 454 455u_long 456vmx_fix_cr0(u_long cr0) 457{ 458 459 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 460} 461 462u_long 463vmx_fix_cr4(u_long cr4) 464{ 465 466 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 467} 468 469static void 470vpid_free(int vpid) 471{ 472 if (vpid < 0 || vpid > 0xffff) 473 panic("vpid_free: invalid vpid %d", vpid); 474 475 /* 476 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 477 * the unit number allocator. 478 */ 479 480 if (vpid > VM_MAXCPU) 481 free_unr(vpid_unr, vpid); 482} 483 484static void 485vpid_alloc(uint16_t *vpid, int num) 486{ 487 int i, x; 488 489 if (num <= 0 || num > VM_MAXCPU) 490 panic("invalid number of vpids requested: %d", num); 491 492 /* 493 * If the "enable vpid" execution control is not enabled then the 494 * VPID is required to be 0 for all vcpus. 495 */ 496 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 497 for (i = 0; i < num; i++) 498 vpid[i] = 0; 499 return; 500 } 501 502 /* 503 * Allocate a unique VPID for each vcpu from the unit number allocator. 504 */ 505 for (i = 0; i < num; i++) { 506 x = alloc_unr(vpid_unr); 507 if (x == -1) 508 break; 509 else 510 vpid[i] = x; 511 } 512 513 if (i < num) { 514 atomic_add_int(&vpid_alloc_failed, 1); 515 516 /* 517 * If the unit number allocator does not have enough unique 518 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 519 * 520 * These VPIDs are not be unique across VMs but this does not 521 * affect correctness because the combined mappings are also 522 * tagged with the EP4TA which is unique for each VM. 523 * 524 * It is still sub-optimal because the invvpid will invalidate 525 * combined mappings for a particular VPID across all EP4TAs. 526 */ 527 while (i-- > 0) 528 vpid_free(vpid[i]); 529 530 for (i = 0; i < num; i++) 531 vpid[i] = i + 1; 532 } 533} 534 535static void 536vpid_init(void) 537{ 538 /* 539 * VPID 0 is required when the "enable VPID" execution control is 540 * disabled. 541 * 542 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 543 * unit number allocator does not have sufficient unique VPIDs to 544 * satisfy the allocation. 545 * 546 * The remaining VPIDs are managed by the unit number allocator. 547 */ 548 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 549} 550 551static void 552vmx_disable(void *arg __unused) 553{ 554 struct invvpid_desc invvpid_desc = { 0 }; 555 struct invept_desc invept_desc = { 0 }; 556 557 if (vmxon_enabled[curcpu]) { 558 /* 559 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 560 * 561 * VMXON or VMXOFF are not required to invalidate any TLB 562 * caching structures. This prevents potential retention of 563 * cached information in the TLB between distinct VMX episodes. 564 */ 565 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 566 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 567 vmxoff(); 568 } 569 load_cr4(rcr4() & ~CR4_VMXE); 570} 571 572static int 573vmx_cleanup(void) 574{ 575 576 if (pirvec >= 0) 577 lapic_ipi_free(pirvec); 578 579 if (vpid_unr != NULL) { 580 delete_unrhdr(vpid_unr); 581 vpid_unr = NULL; 582 } 583 584 if (nmi_flush_l1d_sw == 1) 585 nmi_flush_l1d_sw = 0; 586 587 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 588 589 return (0); 590} 591 592static void 593vmx_enable(void *arg __unused) 594{ 595 int error; 596 uint64_t feature_control; 597 598 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 599 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 600 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 601 wrmsr(MSR_IA32_FEATURE_CONTROL, 602 feature_control | IA32_FEATURE_CONTROL_VMX_EN | 603 IA32_FEATURE_CONTROL_LOCK); 604 } 605 606 load_cr4(rcr4() | CR4_VMXE); 607 608 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 609 error = vmxon(vmxon_region[curcpu]); 610 if (error == 0) 611 vmxon_enabled[curcpu] = 1; 612} 613 614static void 615vmx_restore(void) 616{ 617 618 if (vmxon_enabled[curcpu]) 619 vmxon(vmxon_region[curcpu]); 620} 621 622static int 623vmx_init(int ipinum) 624{ 625 int error, use_tpr_shadow; 626 uint64_t basic, fixed0, fixed1, feature_control; 627 uint32_t tmp, procbased2_vid_bits; 628 629 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 630 if (!(cpu_feature2 & CPUID2_VMX)) { 631 printf("vmx_init: processor does not support VMX operation\n"); 632 return (ENXIO); 633 } 634 635 /* 636 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 637 * are set (bits 0 and 2 respectively). 638 */ 639 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 640 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 && 641 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 642 printf("vmx_init: VMX operation disabled by BIOS\n"); 643 return (ENXIO); 644 } 645 646 /* 647 * Verify capabilities MSR_VMX_BASIC: 648 * - bit 54 indicates support for INS/OUTS decoding 649 */ 650 basic = rdmsr(MSR_VMX_BASIC); 651 if ((basic & (1UL << 54)) == 0) { 652 printf("vmx_init: processor does not support desired basic " 653 "capabilities\n"); 654 return (EINVAL); 655 } 656 657 /* Check support for primary processor-based VM-execution controls */ 658 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 659 MSR_VMX_TRUE_PROCBASED_CTLS, 660 PROCBASED_CTLS_ONE_SETTING, 661 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 662 if (error) { 663 printf("vmx_init: processor does not support desired primary " 664 "processor-based controls\n"); 665 return (error); 666 } 667 668 /* Clear the processor-based ctl bits that are set on demand */ 669 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 670 671 /* Check support for secondary processor-based VM-execution controls */ 672 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 673 MSR_VMX_PROCBASED_CTLS2, 674 PROCBASED_CTLS2_ONE_SETTING, 675 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 676 if (error) { 677 printf("vmx_init: processor does not support desired secondary " 678 "processor-based controls\n"); 679 return (error); 680 } 681 682 /* Check support for VPID */ 683 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 684 PROCBASED2_ENABLE_VPID, 0, &tmp); 685 if (error == 0) 686 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 687 688 /* Check support for pin-based VM-execution controls */ 689 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 690 MSR_VMX_TRUE_PINBASED_CTLS, 691 PINBASED_CTLS_ONE_SETTING, 692 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 693 if (error) { 694 printf("vmx_init: processor does not support desired " 695 "pin-based controls\n"); 696 return (error); 697 } 698 699 /* Check support for VM-exit controls */ 700 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 701 VM_EXIT_CTLS_ONE_SETTING, 702 VM_EXIT_CTLS_ZERO_SETTING, 703 &exit_ctls); 704 if (error) { 705 printf("vmx_init: processor does not support desired " 706 "exit controls\n"); 707 return (error); 708 } 709 710 /* Check support for VM-entry controls */ 711 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 712 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 713 &entry_ctls); 714 if (error) { 715 printf("vmx_init: processor does not support desired " 716 "entry controls\n"); 717 return (error); 718 } 719 720 /* 721 * Check support for optional features by testing them 722 * as individual bits 723 */ 724 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 725 MSR_VMX_TRUE_PROCBASED_CTLS, 726 PROCBASED_HLT_EXITING, 0, 727 &tmp) == 0); 728 729 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 730 MSR_VMX_PROCBASED_CTLS, 731 PROCBASED_MTF, 0, 732 &tmp) == 0); 733 734 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 735 MSR_VMX_TRUE_PROCBASED_CTLS, 736 PROCBASED_PAUSE_EXITING, 0, 737 &tmp) == 0); 738 739 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 740 MSR_VMX_PROCBASED_CTLS2, 741 PROCBASED2_UNRESTRICTED_GUEST, 0, 742 &tmp) == 0); 743 744 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 745 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 746 &tmp) == 0); 747 748 /* 749 * Check support for virtual interrupt delivery. 750 */ 751 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 752 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 753 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 754 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 755 756 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 757 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 758 &tmp) == 0); 759 760 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 761 procbased2_vid_bits, 0, &tmp); 762 if (error == 0 && use_tpr_shadow) { 763 virtual_interrupt_delivery = 1; 764 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 765 &virtual_interrupt_delivery); 766 } 767 768 if (virtual_interrupt_delivery) { 769 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 770 procbased_ctls2 |= procbased2_vid_bits; 771 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 772 773 /* 774 * No need to emulate accesses to %CR8 if virtual 775 * interrupt delivery is enabled. 776 */ 777 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 778 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; 779 780 /* 781 * Check for Posted Interrupts only if Virtual Interrupt 782 * Delivery is enabled. 783 */ 784 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 785 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 786 &tmp); 787 if (error == 0) { 788 pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : 789 &IDTVEC(justreturn)); 790 if (pirvec < 0) { 791 if (bootverbose) { 792 printf("vmx_init: unable to allocate " 793 "posted interrupt vector\n"); 794 } 795 } else { 796 posted_interrupts = 1; 797 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 798 &posted_interrupts); 799 } 800 } 801 } 802 803 if (posted_interrupts) 804 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 805 806 /* Initialize EPT */ 807 error = ept_init(ipinum); 808 if (error) { 809 printf("vmx_init: ept initialization failed (%d)\n", error); 810 return (error); 811 } 812 813 guest_l1d_flush = (cpu_ia32_arch_caps & 814 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 815 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 816 817 /* 818 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 819 * available. Otherwise fall back to the software flush 820 * method which loads enough data from the kernel text to 821 * flush existing L1D content, both on VMX entry and on NMI 822 * return. 823 */ 824 if (guest_l1d_flush) { 825 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 826 guest_l1d_flush_sw = 1; 827 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 828 &guest_l1d_flush_sw); 829 } 830 if (guest_l1d_flush_sw) { 831 if (nmi_flush_l1d_sw <= 1) 832 nmi_flush_l1d_sw = 1; 833 } else { 834 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 835 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 836 } 837 } 838 839 /* 840 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 841 */ 842 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 843 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 844 cr0_ones_mask = fixed0 & fixed1; 845 cr0_zeros_mask = ~fixed0 & ~fixed1; 846 847 /* 848 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 849 * if unrestricted guest execution is allowed. 850 */ 851 if (cap_unrestricted_guest) 852 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 853 854 /* 855 * Do not allow the guest to set CR0_NW or CR0_CD. 856 */ 857 cr0_zeros_mask |= (CR0_NW | CR0_CD); 858 859 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 860 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 861 cr4_ones_mask = fixed0 & fixed1; 862 cr4_zeros_mask = ~fixed0 & ~fixed1; 863 864 vpid_init(); 865 866 vmx_msr_init(); 867 868 /* enable VMX operation */ 869 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 870 871 vmx_initialized = 1; 872 873 return (0); 874} 875 876static void 877vmx_trigger_hostintr(int vector) 878{ 879 uintptr_t func; 880 struct gate_descriptor *gd; 881 882 gd = &idt[vector]; 883 884 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 885 "invalid vector %d", vector)); 886 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 887 vector)); 888 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 889 "has invalid type %d", vector, gd->gd_type)); 890 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 891 "has invalid dpl %d", vector, gd->gd_dpl)); 892 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 893 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 894 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 895 "IST %d", vector, gd->gd_ist)); 896 897 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 898 vmx_call_isr(func); 899} 900 901static int 902vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 903{ 904 int error, mask_ident, shadow_ident; 905 uint64_t mask_value; 906 907 if (which != 0 && which != 4) 908 panic("vmx_setup_cr_shadow: unknown cr%d", which); 909 910 if (which == 0) { 911 mask_ident = VMCS_CR0_MASK; 912 mask_value = cr0_ones_mask | cr0_zeros_mask; 913 shadow_ident = VMCS_CR0_SHADOW; 914 } else { 915 mask_ident = VMCS_CR4_MASK; 916 mask_value = cr4_ones_mask | cr4_zeros_mask; 917 shadow_ident = VMCS_CR4_SHADOW; 918 } 919 920 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 921 if (error) 922 return (error); 923 924 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 925 if (error) 926 return (error); 927 928 return (0); 929} 930#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 931#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 932 933static void * 934vmx_vminit(struct vm *vm, pmap_t pmap) 935{ 936 uint16_t vpid[VM_MAXCPU]; 937 int i, error; 938 struct vmx *vmx; 939 struct vmcs *vmcs; 940 uint32_t exc_bitmap; 941 942 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 943 if ((uintptr_t)vmx & PAGE_MASK) { 944 panic("malloc of struct vmx not aligned on %d byte boundary", 945 PAGE_SIZE); 946 } 947 vmx->vm = vm; 948 949 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 950 951 /* 952 * Clean up EPTP-tagged guest physical and combined mappings 953 * 954 * VMX transitions are not required to invalidate any guest physical 955 * mappings. So, it may be possible for stale guest physical mappings 956 * to be present in the processor TLBs. 957 * 958 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 959 */ 960 ept_invalidate_mappings(vmx->eptp); 961 962 msr_bitmap_initialize(vmx->msr_bitmap); 963 964 /* 965 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 966 * The guest FSBASE and GSBASE are saved and restored during 967 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 968 * always restored from the vmcs host state area on vm-exit. 969 * 970 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 971 * how they are saved/restored so can be directly accessed by the 972 * guest. 973 * 974 * MSR_EFER is saved and restored in the guest VMCS area on a 975 * VM exit and entry respectively. It is also restored from the 976 * host VMCS area on a VM exit. 977 * 978 * The TSC MSR is exposed read-only. Writes are disallowed as 979 * that will impact the host TSC. If the guest does a write 980 * the "use TSC offsetting" execution control is enabled and the 981 * difference between the host TSC and the guest TSC is written 982 * into the TSC offset in the VMCS. 983 */ 984 if (guest_msr_rw(vmx, MSR_GSBASE) || 985 guest_msr_rw(vmx, MSR_FSBASE) || 986 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 987 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 988 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 989 guest_msr_rw(vmx, MSR_EFER) || 990 guest_msr_ro(vmx, MSR_TSC)) 991 panic("vmx_vminit: error setting guest msr access"); 992 993 vpid_alloc(vpid, VM_MAXCPU); 994 995 if (virtual_interrupt_delivery) { 996 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 997 APIC_ACCESS_ADDRESS); 998 /* XXX this should really return an error to the caller */ 999 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 1000 } 1001 1002 for (i = 0; i < VM_MAXCPU; i++) { 1003 vmcs = &vmx->vmcs[i]; 1004 vmcs->identifier = vmx_revision(); 1005 error = vmclear(vmcs); 1006 if (error != 0) { 1007 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 1008 error, i); 1009 } 1010 1011 vmx_msr_guest_init(vmx, i); 1012 1013 error = vmcs_init(vmcs); 1014 KASSERT(error == 0, ("vmcs_init error %d", error)); 1015 1016 VMPTRLD(vmcs); 1017 error = 0; 1018 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 1019 error += vmwrite(VMCS_EPTP, vmx->eptp); 1020 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 1021 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 1022 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 1023 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 1024 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 1025 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 1026 error += vmwrite(VMCS_VPID, vpid[i]); 1027 1028 if (guest_l1d_flush && !guest_l1d_flush_sw) { 1029 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( 1030 (vm_offset_t)&msr_load_list[0])); 1031 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 1032 nitems(msr_load_list)); 1033 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 1034 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 1035 } 1036 1037 /* exception bitmap */ 1038 if (vcpu_trace_exceptions(vm, i)) 1039 exc_bitmap = 0xffffffff; 1040 else 1041 exc_bitmap = 1 << IDT_MC; 1042 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); 1043 1044 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 1045 error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 1046 1047 if (virtual_interrupt_delivery) { 1048 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 1049 error += vmwrite(VMCS_VIRTUAL_APIC, 1050 vtophys(&vmx->apic_page[i])); 1051 error += vmwrite(VMCS_EOI_EXIT0, 0); 1052 error += vmwrite(VMCS_EOI_EXIT1, 0); 1053 error += vmwrite(VMCS_EOI_EXIT2, 0); 1054 error += vmwrite(VMCS_EOI_EXIT3, 0); 1055 } 1056 if (posted_interrupts) { 1057 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 1058 error += vmwrite(VMCS_PIR_DESC, 1059 vtophys(&vmx->pir_desc[i])); 1060 } 1061 VMCLEAR(vmcs); 1062 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 1063 1064 vmx->cap[i].set = 0; 1065 vmx->cap[i].proc_ctls = procbased_ctls; 1066 vmx->cap[i].proc_ctls2 = procbased_ctls2; 1067 1068 vmx->state[i].nextrip = ~0; 1069 vmx->state[i].lastcpu = NOCPU; 1070 vmx->state[i].vpid = vpid[i]; 1071 1072 /* 1073 * Set up the CR0/4 shadows, and init the read shadow 1074 * to the power-on register value from the Intel Sys Arch. 1075 * CR0 - 0x60000010 1076 * CR4 - 0 1077 */ 1078 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 1079 if (error != 0) 1080 panic("vmx_setup_cr0_shadow %d", error); 1081 1082 error = vmx_setup_cr4_shadow(vmcs, 0); 1083 if (error != 0) 1084 panic("vmx_setup_cr4_shadow %d", error); 1085 1086 vmx->ctx[i].pmap = pmap; 1087 } 1088 1089 return (vmx); 1090} 1091 1092static int 1093vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 1094{ 1095 int handled, func; 1096 1097 func = vmxctx->guest_rax; 1098 1099 handled = x86_emulate_cpuid(vm, vcpu, 1100 (uint32_t*)(&vmxctx->guest_rax), 1101 (uint32_t*)(&vmxctx->guest_rbx), 1102 (uint32_t*)(&vmxctx->guest_rcx), 1103 (uint32_t*)(&vmxctx->guest_rdx)); 1104 return (handled); 1105} 1106 1107static __inline void 1108vmx_run_trace(struct vmx *vmx, int vcpu) 1109{ 1110#ifdef KTR 1111 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 1112#endif 1113} 1114 1115static __inline void 1116vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 1117 int handled) 1118{ 1119#ifdef KTR 1120 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 1121 handled ? "handled" : "unhandled", 1122 exit_reason_to_str(exit_reason), rip); 1123#endif 1124} 1125 1126static __inline void 1127vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 1128{ 1129#ifdef KTR 1130 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 1131#endif 1132} 1133 1134static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1135static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 1136 1137/* 1138 * Invalidate guest mappings identified by its vpid from the TLB. 1139 */ 1140static __inline void 1141vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running) 1142{ 1143 struct vmxstate *vmxstate; 1144 struct invvpid_desc invvpid_desc; 1145 1146 vmxstate = &vmx->state[vcpu]; 1147 if (vmxstate->vpid == 0) 1148 return; 1149 1150 if (!running) { 1151 /* 1152 * Set the 'lastcpu' to an invalid host cpu. 1153 * 1154 * This will invalidate TLB entries tagged with the vcpu's 1155 * vpid the next time it runs via vmx_set_pcpu_defaults(). 1156 */ 1157 vmxstate->lastcpu = NOCPU; 1158 return; 1159 } 1160 1161 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 1162 "critical section", __func__, vcpu)); 1163 1164 /* 1165 * Invalidate all mappings tagged with 'vpid' 1166 * 1167 * We do this because this vcpu was executing on a different host 1168 * cpu when it last ran. We do not track whether it invalidated 1169 * mappings associated with its 'vpid' during that run. So we must 1170 * assume that the mappings associated with 'vpid' on 'curcpu' are 1171 * stale and invalidate them. 1172 * 1173 * Note that we incur this penalty only when the scheduler chooses to 1174 * move the thread associated with this vcpu between host cpus. 1175 * 1176 * Note also that this will invalidate mappings tagged with 'vpid' 1177 * for "all" EP4TAs. 1178 */ 1179 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1180 invvpid_desc._res1 = 0; 1181 invvpid_desc._res2 = 0; 1182 invvpid_desc.vpid = vmxstate->vpid; 1183 invvpid_desc.linear_addr = 0; 1184 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1185 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 1186 } else { 1187 /* 1188 * The invvpid can be skipped if an invept is going to 1189 * be performed before entering the guest. The invept 1190 * will invalidate combined mappings tagged with 1191 * 'vmx->eptp' for all vpids. 1192 */ 1193 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1194 } 1195} 1196 1197static void 1198vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1199{ 1200 struct vmxstate *vmxstate; 1201 1202 vmxstate = &vmx->state[vcpu]; 1203 if (vmxstate->lastcpu == curcpu) 1204 return; 1205 1206 vmxstate->lastcpu = curcpu; 1207 1208 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1209 1210 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1211 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1212 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1213 vmx_invvpid(vmx, vcpu, pmap, 1); 1214} 1215 1216/* 1217 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1218 */ 1219CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1220 1221static void __inline 1222vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1223{ 1224 1225 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1226 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1227 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1228 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1229 } 1230} 1231 1232static void __inline 1233vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1234{ 1235 1236 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1237 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1238 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1239 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1240 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1241} 1242 1243static void __inline 1244vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1245{ 1246 1247 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1248 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1249 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1250 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1251 } 1252} 1253 1254static void __inline 1255vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1256{ 1257 1258 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1259 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1260 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1261 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1262 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1263} 1264 1265int 1266vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset) 1267{ 1268 int error; 1269 1270 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) { 1271 vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET; 1272 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1273 VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting"); 1274 } 1275 1276 error = vmwrite(VMCS_TSC_OFFSET, offset); 1277 1278 return (error); 1279} 1280 1281#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1282 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1283#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1284 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1285 1286static void 1287vmx_inject_nmi(struct vmx *vmx, int vcpu) 1288{ 1289 uint32_t gi, info; 1290 1291 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1292 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1293 "interruptibility-state %#x", gi)); 1294 1295 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1296 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1297 "VM-entry interruption information %#x", info)); 1298 1299 /* 1300 * Inject the virtual NMI. The vector must be the NMI IDT entry 1301 * or the VMCS entry check will fail. 1302 */ 1303 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1304 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1305 1306 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1307 1308 /* Clear the request */ 1309 vm_nmi_clear(vmx->vm, vcpu); 1310} 1311 1312static void 1313vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic, 1314 uint64_t guestrip) 1315{ 1316 int vector, need_nmi_exiting, extint_pending; 1317 uint64_t rflags, entryinfo; 1318 uint32_t gi, info; 1319 1320 if (vmx->state[vcpu].nextrip != guestrip) { 1321 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1322 if (gi & HWINTR_BLOCKING) { 1323 VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking " 1324 "cleared due to rip change: %#lx/%#lx", 1325 vmx->state[vcpu].nextrip, guestrip); 1326 gi &= ~HWINTR_BLOCKING; 1327 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1328 } 1329 } 1330 1331 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1332 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " 1333 "intinfo is not valid: %#lx", __func__, entryinfo)); 1334 1335 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1336 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1337 "pending exception: %#lx/%#x", __func__, entryinfo, info)); 1338 1339 info = entryinfo; 1340 vector = info & 0xff; 1341 if (vector == IDT_BP || vector == IDT_OF) { 1342 /* 1343 * VT-x requires #BP and #OF to be injected as software 1344 * exceptions. 1345 */ 1346 info &= ~VMCS_INTR_T_MASK; 1347 info |= VMCS_INTR_T_SWEXCEPTION; 1348 } 1349 1350 if (info & VMCS_INTR_DEL_ERRCODE) 1351 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1352 1353 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1354 } 1355 1356 if (vm_nmi_pending(vmx->vm, vcpu)) { 1357 /* 1358 * If there are no conditions blocking NMI injection then 1359 * inject it directly here otherwise enable "NMI window 1360 * exiting" to inject it as soon as we can. 1361 * 1362 * We also check for STI_BLOCKING because some implementations 1363 * don't allow NMI injection in this case. If we are running 1364 * on a processor that doesn't have this restriction it will 1365 * immediately exit and the NMI will be injected in the 1366 * "NMI window exiting" handler. 1367 */ 1368 need_nmi_exiting = 1; 1369 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1370 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1371 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1372 if ((info & VMCS_INTR_VALID) == 0) { 1373 vmx_inject_nmi(vmx, vcpu); 1374 need_nmi_exiting = 0; 1375 } else { 1376 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1377 "due to VM-entry intr info %#x", info); 1378 } 1379 } else { 1380 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1381 "Guest Interruptibility-state %#x", gi); 1382 } 1383 1384 if (need_nmi_exiting) 1385 vmx_set_nmi_window_exiting(vmx, vcpu); 1386 } 1387 1388 extint_pending = vm_extint_pending(vmx->vm, vcpu); 1389 1390 if (!extint_pending && virtual_interrupt_delivery) { 1391 vmx_inject_pir(vlapic); 1392 return; 1393 } 1394 1395 /* 1396 * If interrupt-window exiting is already in effect then don't bother 1397 * checking for pending interrupts. This is just an optimization and 1398 * not needed for correctness. 1399 */ 1400 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 1401 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to " 1402 "pending int_window_exiting"); 1403 return; 1404 } 1405 1406 if (!extint_pending) { 1407 /* Ask the local apic for a vector to inject */ 1408 if (!vlapic_pending_intr(vlapic, &vector)) 1409 return; 1410 1411 /* 1412 * From the Intel SDM, Volume 3, Section "Maskable 1413 * Hardware Interrupts": 1414 * - maskable interrupt vectors [16,255] can be delivered 1415 * through the local APIC. 1416 */ 1417 KASSERT(vector >= 16 && vector <= 255, 1418 ("invalid vector %d from local APIC", vector)); 1419 } else { 1420 /* Ask the legacy pic for a vector to inject */ 1421 vatpic_pending_intr(vmx->vm, &vector); 1422 1423 /* 1424 * From the Intel SDM, Volume 3, Section "Maskable 1425 * Hardware Interrupts": 1426 * - maskable interrupt vectors [0,255] can be delivered 1427 * through the INTR pin. 1428 */ 1429 KASSERT(vector >= 0 && vector <= 255, 1430 ("invalid vector %d from INTR", vector)); 1431 } 1432 1433 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1434 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1435 if ((rflags & PSL_I) == 0) { 1436 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1437 "rflags %#lx", vector, rflags); 1438 goto cantinject; 1439 } 1440 1441 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1442 if (gi & HWINTR_BLOCKING) { 1443 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1444 "Guest Interruptibility-state %#x", vector, gi); 1445 goto cantinject; 1446 } 1447 1448 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1449 if (info & VMCS_INTR_VALID) { 1450 /* 1451 * This is expected and could happen for multiple reasons: 1452 * - A vectoring VM-entry was aborted due to astpending 1453 * - A VM-exit happened during event injection. 1454 * - An exception was injected above. 1455 * - An NMI was injected above or after "NMI window exiting" 1456 */ 1457 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1458 "VM-entry intr info %#x", vector, info); 1459 goto cantinject; 1460 } 1461 1462 /* Inject the interrupt */ 1463 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1464 info |= vector; 1465 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1466 1467 if (!extint_pending) { 1468 /* Update the Local APIC ISR */ 1469 vlapic_intr_accepted(vlapic, vector); 1470 } else { 1471 vm_extint_clear(vmx->vm, vcpu); 1472 vatpic_intr_accepted(vmx->vm, vector); 1473 1474 /* 1475 * After we accepted the current ExtINT the PIC may 1476 * have posted another one. If that is the case, set 1477 * the Interrupt Window Exiting execution control so 1478 * we can inject that one too. 1479 * 1480 * Also, interrupt window exiting allows us to inject any 1481 * pending APIC vector that was preempted by the ExtINT 1482 * as soon as possible. This applies both for the software 1483 * emulated vlapic and the hardware assisted virtual APIC. 1484 */ 1485 vmx_set_int_window_exiting(vmx, vcpu); 1486 } 1487 1488 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1489 1490 return; 1491 1492cantinject: 1493 /* 1494 * Set the Interrupt Window Exiting execution control so we can inject 1495 * the interrupt as soon as blocking condition goes away. 1496 */ 1497 vmx_set_int_window_exiting(vmx, vcpu); 1498} 1499 1500/* 1501 * If the Virtual NMIs execution control is '1' then the logical processor 1502 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1503 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1504 * virtual-NMI blocking. 1505 * 1506 * This unblocking occurs even if the IRET causes a fault. In this case the 1507 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1508 */ 1509static void 1510vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1511{ 1512 uint32_t gi; 1513 1514 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1515 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1516 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1517 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1518} 1519 1520static void 1521vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1522{ 1523 uint32_t gi; 1524 1525 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1526 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1527 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1528 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1529} 1530 1531static void 1532vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1533{ 1534 uint32_t gi; 1535 1536 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1537 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1538 ("NMI blocking is not in effect %#x", gi)); 1539} 1540 1541static int 1542vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1543{ 1544 struct vmxctx *vmxctx; 1545 uint64_t xcrval; 1546 const struct xsave_limits *limits; 1547 1548 vmxctx = &vmx->ctx[vcpu]; 1549 limits = vmm_get_xsave_limits(); 1550 1551 /* 1552 * Note that the processor raises a GP# fault on its own if 1553 * xsetbv is executed for CPL != 0, so we do not have to 1554 * emulate that fault here. 1555 */ 1556 1557 /* Only xcr0 is supported. */ 1558 if (vmxctx->guest_rcx != 0) { 1559 vm_inject_gp(vmx->vm, vcpu); 1560 return (HANDLED); 1561 } 1562 1563 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1564 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1565 vm_inject_ud(vmx->vm, vcpu); 1566 return (HANDLED); 1567 } 1568 1569 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1570 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1571 vm_inject_gp(vmx->vm, vcpu); 1572 return (HANDLED); 1573 } 1574 1575 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1576 vm_inject_gp(vmx->vm, vcpu); 1577 return (HANDLED); 1578 } 1579 1580 /* AVX (YMM_Hi128) requires SSE. */ 1581 if (xcrval & XFEATURE_ENABLED_AVX && 1582 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1583 vm_inject_gp(vmx->vm, vcpu); 1584 return (HANDLED); 1585 } 1586 1587 /* 1588 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1589 * ZMM_Hi256, and Hi16_ZMM. 1590 */ 1591 if (xcrval & XFEATURE_AVX512 && 1592 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1593 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1594 vm_inject_gp(vmx->vm, vcpu); 1595 return (HANDLED); 1596 } 1597 1598 /* 1599 * Intel MPX requires both bound register state flags to be 1600 * set. 1601 */ 1602 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1603 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1604 vm_inject_gp(vmx->vm, vcpu); 1605 return (HANDLED); 1606 } 1607 1608 /* 1609 * This runs "inside" vmrun() with the guest's FPU state, so 1610 * modifying xcr0 directly modifies the guest's xcr0, not the 1611 * host's. 1612 */ 1613 load_xcr(0, xcrval); 1614 return (HANDLED); 1615} 1616 1617static uint64_t 1618vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1619{ 1620 const struct vmxctx *vmxctx; 1621 1622 vmxctx = &vmx->ctx[vcpu]; 1623 1624 switch (ident) { 1625 case 0: 1626 return (vmxctx->guest_rax); 1627 case 1: 1628 return (vmxctx->guest_rcx); 1629 case 2: 1630 return (vmxctx->guest_rdx); 1631 case 3: 1632 return (vmxctx->guest_rbx); 1633 case 4: 1634 return (vmcs_read(VMCS_GUEST_RSP)); 1635 case 5: 1636 return (vmxctx->guest_rbp); 1637 case 6: 1638 return (vmxctx->guest_rsi); 1639 case 7: 1640 return (vmxctx->guest_rdi); 1641 case 8: 1642 return (vmxctx->guest_r8); 1643 case 9: 1644 return (vmxctx->guest_r9); 1645 case 10: 1646 return (vmxctx->guest_r10); 1647 case 11: 1648 return (vmxctx->guest_r11); 1649 case 12: 1650 return (vmxctx->guest_r12); 1651 case 13: 1652 return (vmxctx->guest_r13); 1653 case 14: 1654 return (vmxctx->guest_r14); 1655 case 15: 1656 return (vmxctx->guest_r15); 1657 default: 1658 panic("invalid vmx register %d", ident); 1659 } 1660} 1661 1662static void 1663vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1664{ 1665 struct vmxctx *vmxctx; 1666 1667 vmxctx = &vmx->ctx[vcpu]; 1668 1669 switch (ident) { 1670 case 0: 1671 vmxctx->guest_rax = regval; 1672 break; 1673 case 1: 1674 vmxctx->guest_rcx = regval; 1675 break; 1676 case 2: 1677 vmxctx->guest_rdx = regval; 1678 break; 1679 case 3: 1680 vmxctx->guest_rbx = regval; 1681 break; 1682 case 4: 1683 vmcs_write(VMCS_GUEST_RSP, regval); 1684 break; 1685 case 5: 1686 vmxctx->guest_rbp = regval; 1687 break; 1688 case 6: 1689 vmxctx->guest_rsi = regval; 1690 break; 1691 case 7: 1692 vmxctx->guest_rdi = regval; 1693 break; 1694 case 8: 1695 vmxctx->guest_r8 = regval; 1696 break; 1697 case 9: 1698 vmxctx->guest_r9 = regval; 1699 break; 1700 case 10: 1701 vmxctx->guest_r10 = regval; 1702 break; 1703 case 11: 1704 vmxctx->guest_r11 = regval; 1705 break; 1706 case 12: 1707 vmxctx->guest_r12 = regval; 1708 break; 1709 case 13: 1710 vmxctx->guest_r13 = regval; 1711 break; 1712 case 14: 1713 vmxctx->guest_r14 = regval; 1714 break; 1715 case 15: 1716 vmxctx->guest_r15 = regval; 1717 break; 1718 default: 1719 panic("invalid vmx register %d", ident); 1720 } 1721} 1722 1723static int 1724vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1725{ 1726 uint64_t crval, regval; 1727 1728 /* We only handle mov to %cr0 at this time */ 1729 if ((exitqual & 0xf0) != 0x00) 1730 return (UNHANDLED); 1731 1732 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1733 1734 vmcs_write(VMCS_CR0_SHADOW, regval); 1735 1736 crval = regval | cr0_ones_mask; 1737 crval &= ~cr0_zeros_mask; 1738 vmcs_write(VMCS_GUEST_CR0, crval); 1739 1740 if (regval & CR0_PG) { 1741 uint64_t efer, entry_ctls; 1742 1743 /* 1744 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1745 * the "IA-32e mode guest" bit in VM-entry control must be 1746 * equal. 1747 */ 1748 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1749 if (efer & EFER_LME) { 1750 efer |= EFER_LMA; 1751 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1752 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1753 entry_ctls |= VM_ENTRY_GUEST_LMA; 1754 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1755 } 1756 } 1757 1758 return (HANDLED); 1759} 1760 1761static int 1762vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1763{ 1764 uint64_t crval, regval; 1765 1766 /* We only handle mov to %cr4 at this time */ 1767 if ((exitqual & 0xf0) != 0x00) 1768 return (UNHANDLED); 1769 1770 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1771 1772 vmcs_write(VMCS_CR4_SHADOW, regval); 1773 1774 crval = regval | cr4_ones_mask; 1775 crval &= ~cr4_zeros_mask; 1776 vmcs_write(VMCS_GUEST_CR4, crval); 1777 1778 return (HANDLED); 1779} 1780 1781static int 1782vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1783{ 1784 struct vlapic *vlapic; 1785 uint64_t cr8; 1786 int regnum; 1787 1788 /* We only handle mov %cr8 to/from a register at this time. */ 1789 if ((exitqual & 0xe0) != 0x00) { 1790 return (UNHANDLED); 1791 } 1792 1793 vlapic = vm_lapic(vmx->vm, vcpu); 1794 regnum = (exitqual >> 8) & 0xf; 1795 if (exitqual & 0x10) { 1796 cr8 = vlapic_get_cr8(vlapic); 1797 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1798 } else { 1799 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1800 vlapic_set_cr8(vlapic, cr8); 1801 } 1802 1803 return (HANDLED); 1804} 1805 1806/* 1807 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1808 */ 1809static int 1810vmx_cpl(void) 1811{ 1812 uint32_t ssar; 1813 1814 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1815 return ((ssar >> 5) & 0x3); 1816} 1817 1818static enum vm_cpu_mode 1819vmx_cpu_mode(void) 1820{ 1821 uint32_t csar; 1822 1823 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1824 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1825 if (csar & 0x2000) 1826 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1827 else 1828 return (CPU_MODE_COMPATIBILITY); 1829 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1830 return (CPU_MODE_PROTECTED); 1831 } else { 1832 return (CPU_MODE_REAL); 1833 } 1834} 1835 1836static enum vm_paging_mode 1837vmx_paging_mode(void) 1838{ 1839 1840 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1841 return (PAGING_MODE_FLAT); 1842 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1843 return (PAGING_MODE_32); 1844 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1845 return (PAGING_MODE_64); 1846 else 1847 return (PAGING_MODE_PAE); 1848} 1849 1850static uint64_t 1851inout_str_index(struct vmx *vmx, int vcpuid, int in) 1852{ 1853 uint64_t val; 1854 int error; 1855 enum vm_reg_name reg; 1856 1857 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; 1858 error = vmx_getreg(vmx, vcpuid, reg, &val); 1859 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); 1860 return (val); 1861} 1862 1863static uint64_t 1864inout_str_count(struct vmx *vmx, int vcpuid, int rep) 1865{ 1866 uint64_t val; 1867 int error; 1868 1869 if (rep) { 1870 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val); 1871 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); 1872 } else { 1873 val = 1; 1874 } 1875 return (val); 1876} 1877 1878static int 1879inout_str_addrsize(uint32_t inst_info) 1880{ 1881 uint32_t size; 1882 1883 size = (inst_info >> 7) & 0x7; 1884 switch (size) { 1885 case 0: 1886 return (2); /* 16 bit */ 1887 case 1: 1888 return (4); /* 32 bit */ 1889 case 2: 1890 return (8); /* 64 bit */ 1891 default: 1892 panic("%s: invalid size encoding %d", __func__, size); 1893 } 1894} 1895 1896static void 1897inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in, 1898 struct vm_inout_str *vis) 1899{ 1900 int error, s; 1901 1902 if (in) { 1903 vis->seg_name = VM_REG_GUEST_ES; 1904 } else { 1905 s = (inst_info >> 15) & 0x7; 1906 vis->seg_name = vm_segment_name(s); 1907 } 1908 1909 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc); 1910 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); 1911} 1912 1913static void 1914vmx_paging_info(struct vm_guest_paging *paging) 1915{ 1916 paging->cr3 = vmcs_guest_cr3(); 1917 paging->cpl = vmx_cpl(); 1918 paging->cpu_mode = vmx_cpu_mode(); 1919 paging->paging_mode = vmx_paging_mode(); 1920} 1921 1922static void 1923vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla) 1924{ 1925 struct vm_guest_paging *paging; 1926 uint32_t csar; 1927 1928 paging = &vmexit->u.inst_emul.paging; 1929 1930 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1931 vmexit->inst_length = 0; 1932 vmexit->u.inst_emul.gpa = gpa; 1933 vmexit->u.inst_emul.gla = gla; 1934 vmx_paging_info(paging); 1935 switch (paging->cpu_mode) { 1936 case CPU_MODE_REAL: 1937 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1938 vmexit->u.inst_emul.cs_d = 0; 1939 break; 1940 case CPU_MODE_PROTECTED: 1941 case CPU_MODE_COMPATIBILITY: 1942 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1943 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1944 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); 1945 break; 1946 default: 1947 vmexit->u.inst_emul.cs_base = 0; 1948 vmexit->u.inst_emul.cs_d = 0; 1949 break; 1950 } 1951 vie_init(&vmexit->u.inst_emul.vie, NULL, 0); 1952} 1953 1954static int 1955ept_fault_type(uint64_t ept_qual) 1956{ 1957 int fault_type; 1958 1959 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1960 fault_type = VM_PROT_WRITE; 1961 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1962 fault_type = VM_PROT_EXECUTE; 1963 else 1964 fault_type= VM_PROT_READ; 1965 1966 return (fault_type); 1967} 1968 1969static boolean_t 1970ept_emulation_fault(uint64_t ept_qual) 1971{ 1972 int read, write; 1973 1974 /* EPT fault on an instruction fetch doesn't make sense here */ 1975 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1976 return (FALSE); 1977 1978 /* EPT fault must be a read fault or a write fault */ 1979 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1980 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1981 if ((read | write) == 0) 1982 return (FALSE); 1983 1984 /* 1985 * The EPT violation must have been caused by accessing a 1986 * guest-physical address that is a translation of a guest-linear 1987 * address. 1988 */ 1989 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1990 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1991 return (FALSE); 1992 } 1993 1994 return (TRUE); 1995} 1996 1997static __inline int 1998apic_access_virtualization(struct vmx *vmx, int vcpuid) 1999{ 2000 uint32_t proc_ctls2; 2001 2002 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2003 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 2004} 2005 2006static __inline int 2007x2apic_virtualization(struct vmx *vmx, int vcpuid) 2008{ 2009 uint32_t proc_ctls2; 2010 2011 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2012 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 2013} 2014 2015static int 2016vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 2017 uint64_t qual) 2018{ 2019 int error, handled, offset; 2020 uint32_t *apic_regs, vector; 2021 bool retu; 2022 2023 handled = HANDLED; 2024 offset = APIC_WRITE_OFFSET(qual); 2025 2026 if (!apic_access_virtualization(vmx, vcpuid)) { 2027 /* 2028 * In general there should not be any APIC write VM-exits 2029 * unless APIC-access virtualization is enabled. 2030 * 2031 * However self-IPI virtualization can legitimately trigger 2032 * an APIC-write VM-exit so treat it specially. 2033 */ 2034 if (x2apic_virtualization(vmx, vcpuid) && 2035 offset == APIC_OFFSET_SELF_IPI) { 2036 apic_regs = (uint32_t *)(vlapic->apic_page); 2037 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 2038 vlapic_self_ipi_handler(vlapic, vector); 2039 return (HANDLED); 2040 } else 2041 return (UNHANDLED); 2042 } 2043 2044 switch (offset) { 2045 case APIC_OFFSET_ID: 2046 vlapic_id_write_handler(vlapic); 2047 break; 2048 case APIC_OFFSET_LDR: 2049 vlapic_ldr_write_handler(vlapic); 2050 break; 2051 case APIC_OFFSET_DFR: 2052 vlapic_dfr_write_handler(vlapic); 2053 break; 2054 case APIC_OFFSET_SVR: 2055 vlapic_svr_write_handler(vlapic); 2056 break; 2057 case APIC_OFFSET_ESR: 2058 vlapic_esr_write_handler(vlapic); 2059 break; 2060 case APIC_OFFSET_ICR_LOW: 2061 retu = false; 2062 error = vlapic_icrlo_write_handler(vlapic, &retu); 2063 if (error != 0 || retu) 2064 handled = UNHANDLED; 2065 break; 2066 case APIC_OFFSET_CMCI_LVT: 2067 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 2068 vlapic_lvt_write_handler(vlapic, offset); 2069 break; 2070 case APIC_OFFSET_TIMER_ICR: 2071 vlapic_icrtmr_write_handler(vlapic); 2072 break; 2073 case APIC_OFFSET_TIMER_DCR: 2074 vlapic_dcr_write_handler(vlapic); 2075 break; 2076 default: 2077 handled = UNHANDLED; 2078 break; 2079 } 2080 return (handled); 2081} 2082 2083static bool 2084apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 2085{ 2086 2087 if (apic_access_virtualization(vmx, vcpuid) && 2088 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2089 return (true); 2090 else 2091 return (false); 2092} 2093 2094static int 2095vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2096{ 2097 uint64_t qual; 2098 int access_type, offset, allowed; 2099 2100 if (!apic_access_virtualization(vmx, vcpuid)) 2101 return (UNHANDLED); 2102 2103 qual = vmexit->u.vmx.exit_qualification; 2104 access_type = APIC_ACCESS_TYPE(qual); 2105 offset = APIC_ACCESS_OFFSET(qual); 2106 2107 allowed = 0; 2108 if (access_type == 0) { 2109 /* 2110 * Read data access to the following registers is expected. 2111 */ 2112 switch (offset) { 2113 case APIC_OFFSET_APR: 2114 case APIC_OFFSET_PPR: 2115 case APIC_OFFSET_RRR: 2116 case APIC_OFFSET_CMCI_LVT: 2117 case APIC_OFFSET_TIMER_CCR: 2118 allowed = 1; 2119 break; 2120 default: 2121 break; 2122 } 2123 } else if (access_type == 1) { 2124 /* 2125 * Write data access to the following registers is expected. 2126 */ 2127 switch (offset) { 2128 case APIC_OFFSET_VER: 2129 case APIC_OFFSET_APR: 2130 case APIC_OFFSET_PPR: 2131 case APIC_OFFSET_RRR: 2132 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2133 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2134 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2135 case APIC_OFFSET_CMCI_LVT: 2136 case APIC_OFFSET_TIMER_CCR: 2137 allowed = 1; 2138 break; 2139 default: 2140 break; 2141 } 2142 } 2143 2144 if (allowed) { 2145 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset, 2146 VIE_INVALID_GLA); 2147 } 2148 2149 /* 2150 * Regardless of whether the APIC-access is allowed this handler 2151 * always returns UNHANDLED: 2152 * - if the access is allowed then it is handled by emulating the 2153 * instruction that caused the VM-exit (outside the critical section) 2154 * - if the access is not allowed then it will be converted to an 2155 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2156 */ 2157 return (UNHANDLED); 2158} 2159 2160static enum task_switch_reason 2161vmx_task_switch_reason(uint64_t qual) 2162{ 2163 int reason; 2164 2165 reason = (qual >> 30) & 0x3; 2166 switch (reason) { 2167 case 0: 2168 return (TSR_CALL); 2169 case 1: 2170 return (TSR_IRET); 2171 case 2: 2172 return (TSR_JMP); 2173 case 3: 2174 return (TSR_IDT_GATE); 2175 default: 2176 panic("%s: invalid reason %d", __func__, reason); 2177 } 2178} 2179 2180static int 2181emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu) 2182{ 2183 int error; 2184 2185 if (lapic_msr(num)) 2186 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu); 2187 else 2188 error = vmx_wrmsr(vmx, vcpuid, num, val, retu); 2189 2190 return (error); 2191} 2192 2193static int 2194emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu) 2195{ 2196 struct vmxctx *vmxctx; 2197 uint64_t result; 2198 uint32_t eax, edx; 2199 int error; 2200 2201 if (lapic_msr(num)) 2202 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu); 2203 else 2204 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu); 2205 2206 if (error == 0) { 2207 eax = result; 2208 vmxctx = &vmx->ctx[vcpuid]; 2209 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax); 2210 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error)); 2211 2212 edx = result >> 32; 2213 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx); 2214 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error)); 2215 } 2216 2217 return (error); 2218} 2219 2220static int 2221vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2222{ 2223 int error, errcode, errcode_valid, handled, in; 2224 struct vmxctx *vmxctx; 2225 struct vlapic *vlapic; 2226 struct vm_inout_str *vis; 2227 struct vm_task_switch *ts; 2228 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 2229 uint32_t intr_type, intr_vec, reason; 2230 uint64_t exitintinfo, qual, gpa; 2231 bool retu; 2232 2233 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2234 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2235 2236 handled = UNHANDLED; 2237 vmxctx = &vmx->ctx[vcpu]; 2238 2239 qual = vmexit->u.vmx.exit_qualification; 2240 reason = vmexit->u.vmx.exit_reason; 2241 vmexit->exitcode = VM_EXITCODE_BOGUS; 2242 2243 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2244 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2245 2246 /* 2247 * VM-entry failures during or after loading guest state. 2248 * 2249 * These VM-exits are uncommon but must be handled specially 2250 * as most VM-exit fields are not populated as usual. 2251 */ 2252 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { 2253 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2254 __asm __volatile("int $18"); 2255 return (1); 2256 } 2257 2258 /* 2259 * VM exits that can be triggered during event delivery need to 2260 * be handled specially by re-injecting the event if the IDT 2261 * vectoring information field's valid bit is set. 2262 * 2263 * See "Information for VM Exits During Event Delivery" in Intel SDM 2264 * for details. 2265 */ 2266 idtvec_info = vmcs_idt_vectoring_info(); 2267 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2268 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2269 exitintinfo = idtvec_info; 2270 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2271 idtvec_err = vmcs_idt_vectoring_err(); 2272 exitintinfo |= (uint64_t)idtvec_err << 32; 2273 } 2274 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2275 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2276 __func__, error)); 2277 2278 /* 2279 * If 'virtual NMIs' are being used and the VM-exit 2280 * happened while injecting an NMI during the previous 2281 * VM-entry, then clear "blocking by NMI" in the 2282 * Guest Interruptibility-State so the NMI can be 2283 * reinjected on the subsequent VM-entry. 2284 * 2285 * However, if the NMI was being delivered through a task 2286 * gate, then the new task must start execution with NMIs 2287 * blocked so don't clear NMI blocking in this case. 2288 */ 2289 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2290 if (intr_type == VMCS_INTR_T_NMI) { 2291 if (reason != EXIT_REASON_TASK_SWITCH) 2292 vmx_clear_nmi_blocking(vmx, vcpu); 2293 else 2294 vmx_assert_nmi_blocking(vmx, vcpu); 2295 } 2296 2297 /* 2298 * Update VM-entry instruction length if the event being 2299 * delivered was a software interrupt or software exception. 2300 */ 2301 if (intr_type == VMCS_INTR_T_SWINTR || 2302 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2303 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2304 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2305 } 2306 } 2307 2308 switch (reason) { 2309 case EXIT_REASON_TASK_SWITCH: 2310 ts = &vmexit->u.task_switch; 2311 ts->tsssel = qual & 0xffff; 2312 ts->reason = vmx_task_switch_reason(qual); 2313 ts->ext = 0; 2314 ts->errcode_valid = 0; 2315 vmx_paging_info(&ts->paging); 2316 /* 2317 * If the task switch was due to a CALL, JMP, IRET, software 2318 * interrupt (INT n) or software exception (INT3, INTO), 2319 * then the saved %rip references the instruction that caused 2320 * the task switch. The instruction length field in the VMCS 2321 * is valid in this case. 2322 * 2323 * In all other cases (e.g., NMI, hardware exception) the 2324 * saved %rip is one that would have been saved in the old TSS 2325 * had the task switch completed normally so the instruction 2326 * length field is not needed in this case and is explicitly 2327 * set to 0. 2328 */ 2329 if (ts->reason == TSR_IDT_GATE) { 2330 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2331 ("invalid idtvec_info %#x for IDT task switch", 2332 idtvec_info)); 2333 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2334 if (intr_type != VMCS_INTR_T_SWINTR && 2335 intr_type != VMCS_INTR_T_SWEXCEPTION && 2336 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2337 /* Task switch triggered by external event */ 2338 ts->ext = 1; 2339 vmexit->inst_length = 0; 2340 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2341 ts->errcode_valid = 1; 2342 ts->errcode = vmcs_idt_vectoring_err(); 2343 } 2344 } 2345 } 2346 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2347 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2348 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2349 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2350 ts->ext ? "external" : "internal", 2351 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2352 break; 2353 case EXIT_REASON_CR_ACCESS: 2354 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2355 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2356 switch (qual & 0xf) { 2357 case 0: 2358 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2359 break; 2360 case 4: 2361 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2362 break; 2363 case 8: 2364 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2365 break; 2366 } 2367 break; 2368 case EXIT_REASON_RDMSR: 2369 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2370 retu = false; 2371 ecx = vmxctx->guest_rcx; 2372 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2373 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx); 2374 error = emulate_rdmsr(vmx, vcpu, ecx, &retu); 2375 if (error) { 2376 vmexit->exitcode = VM_EXITCODE_RDMSR; 2377 vmexit->u.msr.code = ecx; 2378 } else if (!retu) { 2379 handled = HANDLED; 2380 } else { 2381 /* Return to userspace with a valid exitcode */ 2382 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2383 ("emulate_rdmsr retu with bogus exitcode")); 2384 } 2385 break; 2386 case EXIT_REASON_WRMSR: 2387 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2388 retu = false; 2389 eax = vmxctx->guest_rax; 2390 ecx = vmxctx->guest_rcx; 2391 edx = vmxctx->guest_rdx; 2392 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2393 ecx, (uint64_t)edx << 32 | eax); 2394 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx, 2395 (uint64_t)edx << 32 | eax); 2396 error = emulate_wrmsr(vmx, vcpu, ecx, 2397 (uint64_t)edx << 32 | eax, &retu); 2398 if (error) { 2399 vmexit->exitcode = VM_EXITCODE_WRMSR; 2400 vmexit->u.msr.code = ecx; 2401 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2402 } else if (!retu) { 2403 handled = HANDLED; 2404 } else { 2405 /* Return to userspace with a valid exitcode */ 2406 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2407 ("emulate_wrmsr retu with bogus exitcode")); 2408 } 2409 break; 2410 case EXIT_REASON_HLT: 2411 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2412 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2413 vmexit->exitcode = VM_EXITCODE_HLT; 2414 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2415 if (virtual_interrupt_delivery) 2416 vmexit->u.hlt.intr_status = 2417 vmcs_read(VMCS_GUEST_INTR_STATUS); 2418 else 2419 vmexit->u.hlt.intr_status = 0; 2420 break; 2421 case EXIT_REASON_MTF: 2422 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2423 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2424 vmexit->exitcode = VM_EXITCODE_MTRAP; 2425 vmexit->inst_length = 0; 2426 break; 2427 case EXIT_REASON_PAUSE: 2428 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2429 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2430 vmexit->exitcode = VM_EXITCODE_PAUSE; 2431 break; 2432 case EXIT_REASON_INTR_WINDOW: 2433 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2434 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2435 vmx_clear_int_window_exiting(vmx, vcpu); 2436 return (1); 2437 case EXIT_REASON_EXT_INTR: 2438 /* 2439 * External interrupts serve only to cause VM exits and allow 2440 * the host interrupt handler to run. 2441 * 2442 * If this external interrupt triggers a virtual interrupt 2443 * to a VM, then that state will be recorded by the 2444 * host interrupt handler in the VM's softc. We will inject 2445 * this virtual interrupt during the subsequent VM enter. 2446 */ 2447 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2448 SDT_PROBE4(vmm, vmx, exit, interrupt, 2449 vmx, vcpu, vmexit, intr_info); 2450 2451 /* 2452 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2453 * This appears to be a bug in VMware Fusion? 2454 */ 2455 if (!(intr_info & VMCS_INTR_VALID)) 2456 return (1); 2457 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2458 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2459 ("VM exit interruption info invalid: %#x", intr_info)); 2460 vmx_trigger_hostintr(intr_info & 0xff); 2461 2462 /* 2463 * This is special. We want to treat this as an 'handled' 2464 * VM-exit but not increment the instruction pointer. 2465 */ 2466 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2467 return (1); 2468 case EXIT_REASON_NMI_WINDOW: 2469 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2470 /* Exit to allow the pending virtual NMI to be injected */ 2471 if (vm_nmi_pending(vmx->vm, vcpu)) 2472 vmx_inject_nmi(vmx, vcpu); 2473 vmx_clear_nmi_window_exiting(vmx, vcpu); 2474 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2475 return (1); 2476 case EXIT_REASON_INOUT: 2477 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2478 vmexit->exitcode = VM_EXITCODE_INOUT; 2479 vmexit->u.inout.bytes = (qual & 0x7) + 1; 2480 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; 2481 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 2482 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 2483 vmexit->u.inout.port = (uint16_t)(qual >> 16); 2484 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 2485 if (vmexit->u.inout.string) { 2486 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 2487 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 2488 vis = &vmexit->u.inout_str; 2489 vmx_paging_info(&vis->paging); 2490 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2491 vis->cr0 = vmcs_read(VMCS_GUEST_CR0); 2492 vis->index = inout_str_index(vmx, vcpu, in); 2493 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep); 2494 vis->addrsize = inout_str_addrsize(inst_info); 2495 inout_str_seginfo(vmx, vcpu, inst_info, in, vis); 2496 } 2497 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2498 break; 2499 case EXIT_REASON_CPUID: 2500 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2501 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2502 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2503 break; 2504 case EXIT_REASON_EXCEPTION: 2505 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2506 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2507 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2508 ("VM exit interruption info invalid: %#x", intr_info)); 2509 2510 intr_vec = intr_info & 0xff; 2511 intr_type = intr_info & VMCS_INTR_T_MASK; 2512 2513 /* 2514 * If Virtual NMIs control is 1 and the VM-exit is due to a 2515 * fault encountered during the execution of IRET then we must 2516 * restore the state of "virtual-NMI blocking" before resuming 2517 * the guest. 2518 * 2519 * See "Resuming Guest Software after Handling an Exception". 2520 * See "Information for VM Exits Due to Vectored Events". 2521 */ 2522 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2523 (intr_vec != IDT_DF) && 2524 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2525 vmx_restore_nmi_blocking(vmx, vcpu); 2526 2527 /* 2528 * The NMI has already been handled in vmx_exit_handle_nmi(). 2529 */ 2530 if (intr_type == VMCS_INTR_T_NMI) 2531 return (1); 2532 2533 /* 2534 * Call the machine check handler by hand. Also don't reflect 2535 * the machine check back into the guest. 2536 */ 2537 if (intr_vec == IDT_MC) { 2538 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2539 __asm __volatile("int $18"); 2540 return (1); 2541 } 2542 2543 if (intr_vec == IDT_PF) { 2544 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual); 2545 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d", 2546 __func__, error)); 2547 } 2548 2549 /* 2550 * Software exceptions exhibit trap-like behavior. This in 2551 * turn requires populating the VM-entry instruction length 2552 * so that the %rip in the trap frame is past the INT3/INTO 2553 * instruction. 2554 */ 2555 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2556 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2557 2558 /* Reflect all other exceptions back into the guest */ 2559 errcode_valid = errcode = 0; 2560 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2561 errcode_valid = 1; 2562 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2563 } 2564 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into " 2565 "the guest", intr_vec, errcode); 2566 SDT_PROBE5(vmm, vmx, exit, exception, 2567 vmx, vcpu, vmexit, intr_vec, errcode); 2568 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2569 errcode_valid, errcode, 0); 2570 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2571 __func__, error)); 2572 return (1); 2573 2574 case EXIT_REASON_EPT_FAULT: 2575 /* 2576 * If 'gpa' lies within the address space allocated to 2577 * memory then this must be a nested page fault otherwise 2578 * this must be an instruction that accesses MMIO space. 2579 */ 2580 gpa = vmcs_gpa(); 2581 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2582 apic_access_fault(vmx, vcpu, gpa)) { 2583 vmexit->exitcode = VM_EXITCODE_PAGING; 2584 vmexit->inst_length = 0; 2585 vmexit->u.paging.gpa = gpa; 2586 vmexit->u.paging.fault_type = ept_fault_type(qual); 2587 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2588 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2589 vmx, vcpu, vmexit, gpa, qual); 2590 } else if (ept_emulation_fault(qual)) { 2591 vmexit_inst_emul(vmexit, gpa, vmcs_gla()); 2592 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); 2593 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2594 vmx, vcpu, vmexit, gpa); 2595 } 2596 /* 2597 * If Virtual NMIs control is 1 and the VM-exit is due to an 2598 * EPT fault during the execution of IRET then we must restore 2599 * the state of "virtual-NMI blocking" before resuming. 2600 * 2601 * See description of "NMI unblocking due to IRET" in 2602 * "Exit Qualification for EPT Violations". 2603 */ 2604 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2605 (qual & EXIT_QUAL_NMIUDTI) != 0) 2606 vmx_restore_nmi_blocking(vmx, vcpu); 2607 break; 2608 case EXIT_REASON_VIRTUALIZED_EOI: 2609 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2610 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2611 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2612 vmexit->inst_length = 0; /* trap-like */ 2613 break; 2614 case EXIT_REASON_APIC_ACCESS: 2615 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2616 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2617 break; 2618 case EXIT_REASON_APIC_WRITE: 2619 /* 2620 * APIC-write VM exit is trap-like so the %rip is already 2621 * pointing to the next instruction. 2622 */ 2623 vmexit->inst_length = 0; 2624 vlapic = vm_lapic(vmx->vm, vcpu); 2625 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2626 vmx, vcpu, vmexit, vlapic); 2627 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2628 break; 2629 case EXIT_REASON_XSETBV: 2630 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2631 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2632 break; 2633 case EXIT_REASON_MONITOR: 2634 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2635 vmexit->exitcode = VM_EXITCODE_MONITOR; 2636 break; 2637 case EXIT_REASON_MWAIT: 2638 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2639 vmexit->exitcode = VM_EXITCODE_MWAIT; 2640 break; 2641 default: 2642 SDT_PROBE4(vmm, vmx, exit, unknown, 2643 vmx, vcpu, vmexit, reason); 2644 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2645 break; 2646 } 2647 2648 if (handled) { 2649 /* 2650 * It is possible that control is returned to userland 2651 * even though we were able to handle the VM exit in the 2652 * kernel. 2653 * 2654 * In such a case we want to make sure that the userland 2655 * restarts guest execution at the instruction *after* 2656 * the one we just processed. Therefore we update the 2657 * guest rip in the VMCS and in 'vmexit'. 2658 */ 2659 vmexit->rip += vmexit->inst_length; 2660 vmexit->inst_length = 0; 2661 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2662 } else { 2663 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2664 /* 2665 * If this VM exit was not claimed by anybody then 2666 * treat it as a generic VMX exit. 2667 */ 2668 vmexit->exitcode = VM_EXITCODE_VMX; 2669 vmexit->u.vmx.status = VM_SUCCESS; 2670 vmexit->u.vmx.inst_type = 0; 2671 vmexit->u.vmx.inst_error = 0; 2672 } else { 2673 /* 2674 * The exitcode and collateral have been populated. 2675 * The VM exit will be processed further in userland. 2676 */ 2677 } 2678 } 2679 2680 SDT_PROBE4(vmm, vmx, exit, return, 2681 vmx, vcpu, vmexit, handled); 2682 return (handled); 2683} 2684 2685static __inline void 2686vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2687{ 2688 2689 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2690 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2691 vmxctx->inst_fail_status)); 2692 2693 vmexit->inst_length = 0; 2694 vmexit->exitcode = VM_EXITCODE_VMX; 2695 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2696 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2697 vmexit->u.vmx.exit_reason = ~0; 2698 vmexit->u.vmx.exit_qualification = ~0; 2699 2700 switch (rc) { 2701 case VMX_VMRESUME_ERROR: 2702 case VMX_VMLAUNCH_ERROR: 2703 case VMX_INVEPT_ERROR: 2704 vmexit->u.vmx.inst_type = rc; 2705 break; 2706 default: 2707 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2708 } 2709} 2710 2711/* 2712 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2713 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2714 * sufficient to simply vector to the NMI handler via a software interrupt. 2715 * However, this must be done before maskable interrupts are enabled 2716 * otherwise the "iret" issued by an interrupt handler will incorrectly 2717 * clear NMI blocking. 2718 */ 2719static __inline void 2720vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2721{ 2722 uint32_t intr_info; 2723 2724 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2725 2726 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2727 return; 2728 2729 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2730 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2731 ("VM exit interruption info invalid: %#x", intr_info)); 2732 2733 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2734 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2735 "to NMI has invalid vector: %#x", intr_info)); 2736 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2737 __asm __volatile("int $2"); 2738 } 2739} 2740 2741static __inline void 2742vmx_dr_enter_guest(struct vmxctx *vmxctx) 2743{ 2744 register_t rflags; 2745 2746 /* Save host control debug registers. */ 2747 vmxctx->host_dr7 = rdr7(); 2748 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2749 2750 /* 2751 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2752 * exceptions in the host based on the guest DRx values. The 2753 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2754 */ 2755 load_dr7(0); 2756 wrmsr(MSR_DEBUGCTLMSR, 0); 2757 2758 /* 2759 * Disable single stepping the kernel to avoid corrupting the 2760 * guest DR6. A debugger might still be able to corrupt the 2761 * guest DR6 by setting a breakpoint after this point and then 2762 * single stepping. 2763 */ 2764 rflags = read_rflags(); 2765 vmxctx->host_tf = rflags & PSL_T; 2766 write_rflags(rflags & ~PSL_T); 2767 2768 /* Save host debug registers. */ 2769 vmxctx->host_dr0 = rdr0(); 2770 vmxctx->host_dr1 = rdr1(); 2771 vmxctx->host_dr2 = rdr2(); 2772 vmxctx->host_dr3 = rdr3(); 2773 vmxctx->host_dr6 = rdr6(); 2774 2775 /* Restore guest debug registers. */ 2776 load_dr0(vmxctx->guest_dr0); 2777 load_dr1(vmxctx->guest_dr1); 2778 load_dr2(vmxctx->guest_dr2); 2779 load_dr3(vmxctx->guest_dr3); 2780 load_dr6(vmxctx->guest_dr6); 2781} 2782 2783static __inline void 2784vmx_dr_leave_guest(struct vmxctx *vmxctx) 2785{ 2786 2787 /* Save guest debug registers. */ 2788 vmxctx->guest_dr0 = rdr0(); 2789 vmxctx->guest_dr1 = rdr1(); 2790 vmxctx->guest_dr2 = rdr2(); 2791 vmxctx->guest_dr3 = rdr3(); 2792 vmxctx->guest_dr6 = rdr6(); 2793 2794 /* 2795 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2796 * PSL_T last. 2797 */ 2798 load_dr0(vmxctx->host_dr0); 2799 load_dr1(vmxctx->host_dr1); 2800 load_dr2(vmxctx->host_dr2); 2801 load_dr3(vmxctx->host_dr3); 2802 load_dr6(vmxctx->host_dr6); 2803 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2804 load_dr7(vmxctx->host_dr7); 2805 write_rflags(read_rflags() | vmxctx->host_tf); 2806} 2807 2808static int 2809vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap, 2810 struct vm_eventinfo *evinfo) 2811{ 2812 int rc, handled, launched; 2813 struct vmx *vmx; 2814 struct vm *vm; 2815 struct vmxctx *vmxctx; 2816 struct vmcs *vmcs; 2817 struct vm_exit *vmexit; 2818 struct vlapic *vlapic; 2819 uint32_t exit_reason; 2820 struct region_descriptor gdtr, idtr; 2821 uint16_t ldt_sel; 2822 2823 vmx = arg; 2824 vm = vmx->vm; 2825 vmcs = &vmx->vmcs[vcpu]; 2826 vmxctx = &vmx->ctx[vcpu]; 2827 vlapic = vm_lapic(vm, vcpu); 2828 vmexit = vm_exitinfo(vm, vcpu); 2829 launched = 0; 2830 2831 KASSERT(vmxctx->pmap == pmap, 2832 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2833 2834 vmx_msr_guest_enter(vmx, vcpu); 2835 2836 VMPTRLD(vmcs); 2837 2838 /* 2839 * XXX 2840 * We do this every time because we may setup the virtual machine 2841 * from a different process than the one that actually runs it. 2842 * 2843 * If the life of a virtual machine was spent entirely in the context 2844 * of a single process we could do this once in vmx_vminit(). 2845 */ 2846 vmcs_write(VMCS_HOST_CR3, rcr3()); 2847 2848 vmcs_write(VMCS_GUEST_RIP, rip); 2849 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2850 do { 2851 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2852 "%#lx/%#lx", __func__, vmcs_guest_rip(), rip)); 2853 2854 handled = UNHANDLED; 2855 /* 2856 * Interrupts are disabled from this point on until the 2857 * guest starts executing. This is done for the following 2858 * reasons: 2859 * 2860 * If an AST is asserted on this thread after the check below, 2861 * then the IPI_AST notification will not be lost, because it 2862 * will cause a VM exit due to external interrupt as soon as 2863 * the guest state is loaded. 2864 * 2865 * A posted interrupt after 'vmx_inject_interrupts()' will 2866 * not be "lost" because it will be held pending in the host 2867 * APIC because interrupts are disabled. The pending interrupt 2868 * will be recognized as soon as the guest state is loaded. 2869 * 2870 * The same reasoning applies to the IPI generated by 2871 * pmap_invalidate_ept(). 2872 */ 2873 disable_intr(); 2874 vmx_inject_interrupts(vmx, vcpu, vlapic, rip); 2875 2876 /* 2877 * Check for vcpu suspension after injecting events because 2878 * vmx_inject_interrupts() can suspend the vcpu due to a 2879 * triple fault. 2880 */ 2881 if (vcpu_suspended(evinfo)) { 2882 enable_intr(); 2883 vm_exit_suspended(vmx->vm, vcpu, rip); 2884 break; 2885 } 2886 2887 if (vcpu_rendezvous_pending(evinfo)) { 2888 enable_intr(); 2889 vm_exit_rendezvous(vmx->vm, vcpu, rip); 2890 break; 2891 } 2892 2893 if (vcpu_reqidle(evinfo)) { 2894 enable_intr(); 2895 vm_exit_reqidle(vmx->vm, vcpu, rip); 2896 break; 2897 } 2898 2899 if (vcpu_should_yield(vm, vcpu)) { 2900 enable_intr(); 2901 vm_exit_astpending(vmx->vm, vcpu, rip); 2902 vmx_astpending_trace(vmx, vcpu, rip); 2903 handled = HANDLED; 2904 break; 2905 } 2906 2907 /* 2908 * VM exits restore the base address but not the 2909 * limits of GDTR and IDTR. The VMCS only stores the 2910 * base address, so VM exits set the limits to 0xffff. 2911 * Save and restore the full GDTR and IDTR to restore 2912 * the limits. 2913 * 2914 * The VMCS does not save the LDTR at all, and VM 2915 * exits clear LDTR as if a NULL selector were loaded. 2916 * The userspace hypervisor probably doesn't use a 2917 * LDT, but save and restore it to be safe. 2918 */ 2919 sgdt(&gdtr); 2920 sidt(&idtr); 2921 ldt_sel = sldt(); 2922 2923 vmx_run_trace(vmx, vcpu); 2924 vmx_dr_enter_guest(vmxctx); 2925 rc = vmx_enter_guest(vmxctx, vmx, launched); 2926 vmx_dr_leave_guest(vmxctx); 2927 2928 bare_lgdt(&gdtr); 2929 lidt(&idtr); 2930 lldt(ldt_sel); 2931 2932 /* Collect some information for VM exit processing */ 2933 vmexit->rip = rip = vmcs_guest_rip(); 2934 vmexit->inst_length = vmexit_instruction_length(); 2935 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2936 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2937 2938 /* Update 'nextrip' */ 2939 vmx->state[vcpu].nextrip = rip; 2940 2941 if (rc == VMX_GUEST_VMEXIT) { 2942 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2943 enable_intr(); 2944 handled = vmx_exit_process(vmx, vcpu, vmexit); 2945 } else { 2946 enable_intr(); 2947 vmx_exit_inst_error(vmxctx, rc, vmexit); 2948 } 2949 launched = 1; 2950 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 2951 rip = vmexit->rip; 2952 } while (handled); 2953 2954 /* 2955 * If a VM exit has been handled then the exitcode must be BOGUS 2956 * If a VM exit is not handled then the exitcode must not be BOGUS 2957 */ 2958 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 2959 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 2960 panic("Mismatch between handled (%d) and exitcode (%d)", 2961 handled, vmexit->exitcode); 2962 } 2963 2964 if (!handled) 2965 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 2966 2967 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2968 vmexit->exitcode); 2969 2970 VMCLEAR(vmcs); 2971 vmx_msr_guest_exit(vmx, vcpu); 2972 2973 return (0); 2974} 2975 2976static void 2977vmx_vmcleanup(void *arg) 2978{ 2979 int i; 2980 struct vmx *vmx = arg; 2981 2982 if (apic_access_virtualization(vmx, 0)) 2983 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2984 2985 for (i = 0; i < VM_MAXCPU; i++) 2986 vpid_free(vmx->state[i].vpid); 2987 2988 free(vmx, M_VMX); 2989 2990 return; 2991} 2992 2993static register_t * 2994vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2995{ 2996 2997 switch (reg) { 2998 case VM_REG_GUEST_RAX: 2999 return (&vmxctx->guest_rax); 3000 case VM_REG_GUEST_RBX: 3001 return (&vmxctx->guest_rbx); 3002 case VM_REG_GUEST_RCX: 3003 return (&vmxctx->guest_rcx); 3004 case VM_REG_GUEST_RDX: 3005 return (&vmxctx->guest_rdx); 3006 case VM_REG_GUEST_RSI: 3007 return (&vmxctx->guest_rsi); 3008 case VM_REG_GUEST_RDI: 3009 return (&vmxctx->guest_rdi); 3010 case VM_REG_GUEST_RBP: 3011 return (&vmxctx->guest_rbp); 3012 case VM_REG_GUEST_R8: 3013 return (&vmxctx->guest_r8); 3014 case VM_REG_GUEST_R9: 3015 return (&vmxctx->guest_r9); 3016 case VM_REG_GUEST_R10: 3017 return (&vmxctx->guest_r10); 3018 case VM_REG_GUEST_R11: 3019 return (&vmxctx->guest_r11); 3020 case VM_REG_GUEST_R12: 3021 return (&vmxctx->guest_r12); 3022 case VM_REG_GUEST_R13: 3023 return (&vmxctx->guest_r13); 3024 case VM_REG_GUEST_R14: 3025 return (&vmxctx->guest_r14); 3026 case VM_REG_GUEST_R15: 3027 return (&vmxctx->guest_r15); 3028 case VM_REG_GUEST_CR2: 3029 return (&vmxctx->guest_cr2); 3030 case VM_REG_GUEST_DR0: 3031 return (&vmxctx->guest_dr0); 3032 case VM_REG_GUEST_DR1: 3033 return (&vmxctx->guest_dr1); 3034 case VM_REG_GUEST_DR2: 3035 return (&vmxctx->guest_dr2); 3036 case VM_REG_GUEST_DR3: 3037 return (&vmxctx->guest_dr3); 3038 case VM_REG_GUEST_DR6: 3039 return (&vmxctx->guest_dr6); 3040 default: 3041 break; 3042 } 3043 return (NULL); 3044} 3045 3046static int 3047vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 3048{ 3049 register_t *regp; 3050 3051 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3052 *retval = *regp; 3053 return (0); 3054 } else 3055 return (EINVAL); 3056} 3057 3058static int 3059vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 3060{ 3061 register_t *regp; 3062 3063 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3064 *regp = val; 3065 return (0); 3066 } else 3067 return (EINVAL); 3068} 3069 3070static int 3071vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval) 3072{ 3073 uint64_t gi; 3074 int error; 3075 3076 error = vmcs_getreg(&vmx->vmcs[vcpu], running, 3077 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); 3078 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3079 return (error); 3080} 3081 3082static int 3083vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val) 3084{ 3085 struct vmcs *vmcs; 3086 uint64_t gi; 3087 int error, ident; 3088 3089 /* 3090 * Forcing the vcpu into an interrupt shadow is not supported. 3091 */ 3092 if (val) { 3093 error = EINVAL; 3094 goto done; 3095 } 3096 3097 vmcs = &vmx->vmcs[vcpu]; 3098 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY); 3099 error = vmcs_getreg(vmcs, running, ident, &gi); 3100 if (error == 0) { 3101 gi &= ~HWINTR_BLOCKING; 3102 error = vmcs_setreg(vmcs, running, ident, gi); 3103 } 3104done: 3105 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val, 3106 error ? "failed" : "succeeded"); 3107 return (error); 3108} 3109 3110static int 3111vmx_shadow_reg(int reg) 3112{ 3113 int shreg; 3114 3115 shreg = -1; 3116 3117 switch (reg) { 3118 case VM_REG_GUEST_CR0: 3119 shreg = VMCS_CR0_SHADOW; 3120 break; 3121 case VM_REG_GUEST_CR4: 3122 shreg = VMCS_CR4_SHADOW; 3123 break; 3124 default: 3125 break; 3126 } 3127 3128 return (shreg); 3129} 3130 3131static int 3132vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3133{ 3134 int running, hostcpu; 3135 struct vmx *vmx = arg; 3136 3137 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3138 if (running && hostcpu != curcpu) 3139 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 3140 3141 if (reg == VM_REG_GUEST_INTR_SHADOW) 3142 return (vmx_get_intr_shadow(vmx, vcpu, running, retval)); 3143 3144 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 3145 return (0); 3146 3147 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 3148} 3149 3150static int 3151vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3152{ 3153 int error, hostcpu, running, shadow; 3154 uint64_t ctls; 3155 pmap_t pmap; 3156 struct vmx *vmx = arg; 3157 3158 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3159 if (running && hostcpu != curcpu) 3160 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 3161 3162 if (reg == VM_REG_GUEST_INTR_SHADOW) 3163 return (vmx_modify_intr_shadow(vmx, vcpu, running, val)); 3164 3165 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 3166 return (0); 3167 3168 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 3169 3170 if (error == 0) { 3171 /* 3172 * If the "load EFER" VM-entry control is 1 then the 3173 * value of EFER.LMA must be identical to "IA-32e mode guest" 3174 * bit in the VM-entry control. 3175 */ 3176 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 3177 (reg == VM_REG_GUEST_EFER)) { 3178 vmcs_getreg(&vmx->vmcs[vcpu], running, 3179 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 3180 if (val & EFER_LMA) 3181 ctls |= VM_ENTRY_GUEST_LMA; 3182 else 3183 ctls &= ~VM_ENTRY_GUEST_LMA; 3184 vmcs_setreg(&vmx->vmcs[vcpu], running, 3185 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 3186 } 3187 3188 shadow = vmx_shadow_reg(reg); 3189 if (shadow > 0) { 3190 /* 3191 * Store the unmodified value in the shadow 3192 */ 3193 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 3194 VMCS_IDENT(shadow), val); 3195 } 3196 3197 if (reg == VM_REG_GUEST_CR3) { 3198 /* 3199 * Invalidate the guest vcpu's TLB mappings to emulate 3200 * the behavior of updating %cr3. 3201 * 3202 * XXX the processor retains global mappings when %cr3 3203 * is updated but vmx_invvpid() does not. 3204 */ 3205 pmap = vmx->ctx[vcpu].pmap; 3206 vmx_invvpid(vmx, vcpu, pmap, running); 3207 } 3208 } 3209 3210 return (error); 3211} 3212 3213static int 3214vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 3215{ 3216 int hostcpu, running; 3217 struct vmx *vmx = arg; 3218 3219 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3220 if (running && hostcpu != curcpu) 3221 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3222 3223 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc)); 3224} 3225 3226static int 3227vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 3228{ 3229 int hostcpu, running; 3230 struct vmx *vmx = arg; 3231 3232 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3233 if (running && hostcpu != curcpu) 3234 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3235 3236 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc)); 3237} 3238 3239static int 3240vmx_getcap(void *arg, int vcpu, int type, int *retval) 3241{ 3242 struct vmx *vmx = arg; 3243 int vcap; 3244 int ret; 3245 3246 ret = ENOENT; 3247 3248 vcap = vmx->cap[vcpu].set; 3249 3250 switch (type) { 3251 case VM_CAP_HALT_EXIT: 3252 if (cap_halt_exit) 3253 ret = 0; 3254 break; 3255 case VM_CAP_PAUSE_EXIT: 3256 if (cap_pause_exit) 3257 ret = 0; 3258 break; 3259 case VM_CAP_MTRAP_EXIT: 3260 if (cap_monitor_trap) 3261 ret = 0; 3262 break; 3263 case VM_CAP_UNRESTRICTED_GUEST: 3264 if (cap_unrestricted_guest) 3265 ret = 0; 3266 break; 3267 case VM_CAP_ENABLE_INVPCID: 3268 if (cap_invpcid) 3269 ret = 0; 3270 break; 3271 default: 3272 break; 3273 } 3274 3275 if (ret == 0) 3276 *retval = (vcap & (1 << type)) ? 1 : 0; 3277 3278 return (ret); 3279} 3280 3281static int 3282vmx_setcap(void *arg, int vcpu, int type, int val) 3283{ 3284 struct vmx *vmx = arg; 3285 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 3286 uint32_t baseval; 3287 uint32_t *pptr; 3288 int error; 3289 int flag; 3290 int reg; 3291 int retval; 3292 3293 retval = ENOENT; 3294 pptr = NULL; 3295 3296 switch (type) { 3297 case VM_CAP_HALT_EXIT: 3298 if (cap_halt_exit) { 3299 retval = 0; 3300 pptr = &vmx->cap[vcpu].proc_ctls; 3301 baseval = *pptr; 3302 flag = PROCBASED_HLT_EXITING; 3303 reg = VMCS_PRI_PROC_BASED_CTLS; 3304 } 3305 break; 3306 case VM_CAP_MTRAP_EXIT: 3307 if (cap_monitor_trap) { 3308 retval = 0; 3309 pptr = &vmx->cap[vcpu].proc_ctls; 3310 baseval = *pptr; 3311 flag = PROCBASED_MTF; 3312 reg = VMCS_PRI_PROC_BASED_CTLS; 3313 } 3314 break; 3315 case VM_CAP_PAUSE_EXIT: 3316 if (cap_pause_exit) { 3317 retval = 0; 3318 pptr = &vmx->cap[vcpu].proc_ctls; 3319 baseval = *pptr; 3320 flag = PROCBASED_PAUSE_EXITING; 3321 reg = VMCS_PRI_PROC_BASED_CTLS; 3322 } 3323 break; 3324 case VM_CAP_UNRESTRICTED_GUEST: 3325 if (cap_unrestricted_guest) { 3326 retval = 0; 3327 pptr = &vmx->cap[vcpu].proc_ctls2; 3328 baseval = *pptr; 3329 flag = PROCBASED2_UNRESTRICTED_GUEST; 3330 reg = VMCS_SEC_PROC_BASED_CTLS; 3331 } 3332 break; 3333 case VM_CAP_ENABLE_INVPCID: 3334 if (cap_invpcid) { 3335 retval = 0; 3336 pptr = &vmx->cap[vcpu].proc_ctls2; 3337 baseval = *pptr; 3338 flag = PROCBASED2_ENABLE_INVPCID; 3339 reg = VMCS_SEC_PROC_BASED_CTLS; 3340 } 3341 break; 3342 default: 3343 break; 3344 } 3345 3346 if (retval == 0) { 3347 if (val) { 3348 baseval |= flag; 3349 } else { 3350 baseval &= ~flag; 3351 } 3352 VMPTRLD(vmcs); 3353 error = vmwrite(reg, baseval); 3354 VMCLEAR(vmcs); 3355 3356 if (error) { 3357 retval = error; 3358 } else { 3359 /* 3360 * Update optional stored flags, and record 3361 * setting 3362 */ 3363 if (pptr != NULL) { 3364 *pptr = baseval; 3365 } 3366 3367 if (val) { 3368 vmx->cap[vcpu].set |= (1 << type); 3369 } else { 3370 vmx->cap[vcpu].set &= ~(1 << type); 3371 } 3372 } 3373 } 3374 3375 return (retval); 3376} 3377 3378struct vlapic_vtx { 3379 struct vlapic vlapic; 3380 struct pir_desc *pir_desc; 3381 struct vmx *vmx; 3382 u_int pending_prio; 3383}; 3384 3385#define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3386 3387#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 3388do { \ 3389 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 3390 level ? "level" : "edge", vector); \ 3391 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 3392 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 3393 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 3394 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 3395 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 3396} while (0) 3397 3398/* 3399 * vlapic->ops handlers that utilize the APICv hardware assist described in 3400 * Chapter 29 of the Intel SDM. 3401 */ 3402static int 3403vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 3404{ 3405 struct vlapic_vtx *vlapic_vtx; 3406 struct pir_desc *pir_desc; 3407 uint64_t mask; 3408 int idx, notify = 0; 3409 3410 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3411 pir_desc = vlapic_vtx->pir_desc; 3412 3413 /* 3414 * Keep track of interrupt requests in the PIR descriptor. This is 3415 * because the virtual APIC page pointed to by the VMCS cannot be 3416 * modified if the vcpu is running. 3417 */ 3418 idx = vector / 64; 3419 mask = 1UL << (vector % 64); 3420 atomic_set_long(&pir_desc->pir[idx], mask); 3421 3422 /* 3423 * A notification is required whenever the 'pending' bit makes a 3424 * transition from 0->1. 3425 * 3426 * Even if the 'pending' bit is already asserted, notification about 3427 * the incoming interrupt may still be necessary. For example, if a 3428 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3429 * the 0->1 'pending' transition with a notification, but the vCPU 3430 * would ignore the interrupt for the time being. The same vCPU would 3431 * need to then be notified if a high-priority interrupt arrived which 3432 * satisfied the PPR. 3433 * 3434 * The priorities of interrupts injected while 'pending' is asserted 3435 * are tracked in a custom bitfield 'pending_prio'. Should the 3436 * to-be-injected interrupt exceed the priorities already present, the 3437 * notification is sent. The priorities recorded in 'pending_prio' are 3438 * cleared whenever the 'pending' bit makes another 0->1 transition. 3439 */ 3440 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3441 notify = 1; 3442 vlapic_vtx->pending_prio = 0; 3443 } else { 3444 const u_int old_prio = vlapic_vtx->pending_prio; 3445 const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3446 3447 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3448 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3449 notify = 1; 3450 } 3451 } 3452 3453 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 3454 level, "vmx_set_intr_ready"); 3455 return (notify); 3456} 3457 3458static int 3459vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 3460{ 3461 struct vlapic_vtx *vlapic_vtx; 3462 struct pir_desc *pir_desc; 3463 struct LAPIC *lapic; 3464 uint64_t pending, pirval; 3465 uint32_t ppr, vpr; 3466 int i; 3467 3468 /* 3469 * This function is only expected to be called from the 'HLT' exit 3470 * handler which does not care about the vector that is pending. 3471 */ 3472 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 3473 3474 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3475 pir_desc = vlapic_vtx->pir_desc; 3476 3477 pending = atomic_load_acq_long(&pir_desc->pending); 3478 if (!pending) { 3479 /* 3480 * While a virtual interrupt may have already been 3481 * processed the actual delivery maybe pending the 3482 * interruptibility of the guest. Recognize a pending 3483 * interrupt by reevaluating virtual interrupts 3484 * following Section 29.2.1 in the Intel SDM Volume 3. 3485 */ 3486 struct vm_exit *vmexit; 3487 uint8_t rvi, ppr; 3488 3489 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); 3490 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT, 3491 ("vmx_pending_intr: exitcode not 'HLT'")); 3492 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; 3493 lapic = vlapic->apic_page; 3494 ppr = lapic->ppr & APIC_TPR_INT; 3495 if (rvi > ppr) { 3496 return (1); 3497 } 3498 3499 return (0); 3500 } 3501 3502 /* 3503 * If there is an interrupt pending then it will be recognized only 3504 * if its priority is greater than the processor priority. 3505 * 3506 * Special case: if the processor priority is zero then any pending 3507 * interrupt will be recognized. 3508 */ 3509 lapic = vlapic->apic_page; 3510 ppr = lapic->ppr & APIC_TPR_INT; 3511 if (ppr == 0) 3512 return (1); 3513 3514 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 3515 lapic->ppr); 3516 3517 vpr = 0; 3518 for (i = 3; i >= 0; i--) { 3519 pirval = pir_desc->pir[i]; 3520 if (pirval != 0) { 3521 vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; 3522 break; 3523 } 3524 } 3525 3526 /* 3527 * If the highest-priority pending interrupt falls short of the 3528 * processor priority of this vCPU, ensure that 'pending_prio' does not 3529 * have any stale bits which would preclude a higher-priority interrupt 3530 * from incurring a notification later. 3531 */ 3532 if (vpr <= ppr) { 3533 const u_int prio_bit = VPR_PRIO_BIT(vpr); 3534 const u_int old = vlapic_vtx->pending_prio; 3535 3536 if (old > prio_bit && (old & prio_bit) == 0) { 3537 vlapic_vtx->pending_prio = prio_bit; 3538 } 3539 return (0); 3540 } 3541 return (1); 3542} 3543 3544static void 3545vmx_intr_accepted(struct vlapic *vlapic, int vector) 3546{ 3547 3548 panic("vmx_intr_accepted: not expected to be called"); 3549} 3550 3551static void 3552vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 3553{ 3554 struct vlapic_vtx *vlapic_vtx; 3555 struct vmx *vmx; 3556 struct vmcs *vmcs; 3557 uint64_t mask, val; 3558 3559 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 3560 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 3561 ("vmx_set_tmr: vcpu cannot be running")); 3562 3563 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3564 vmx = vlapic_vtx->vmx; 3565 vmcs = &vmx->vmcs[vlapic->vcpuid]; 3566 mask = 1UL << (vector % 64); 3567 3568 VMPTRLD(vmcs); 3569 val = vmcs_read(VMCS_EOI_EXIT(vector)); 3570 if (level) 3571 val |= mask; 3572 else 3573 val &= ~mask; 3574 vmcs_write(VMCS_EOI_EXIT(vector), val); 3575 VMCLEAR(vmcs); 3576} 3577 3578static void 3579vmx_enable_x2apic_mode(struct vlapic *vlapic) 3580{ 3581 struct vmx *vmx; 3582 struct vmcs *vmcs; 3583 uint32_t proc_ctls2; 3584 int vcpuid, error; 3585 3586 vcpuid = vlapic->vcpuid; 3587 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3588 vmcs = &vmx->vmcs[vcpuid]; 3589 3590 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3591 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3592 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 3593 3594 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3595 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3596 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3597 3598 VMPTRLD(vmcs); 3599 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3600 VMCLEAR(vmcs); 3601 3602 if (vlapic->vcpuid == 0) { 3603 /* 3604 * The nested page table mappings are shared by all vcpus 3605 * so unmap the APIC access page just once. 3606 */ 3607 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3608 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3609 __func__, error)); 3610 3611 /* 3612 * The MSR bitmap is shared by all vcpus so modify it only 3613 * once in the context of vcpu 0. 3614 */ 3615 error = vmx_allow_x2apic_msrs(vmx); 3616 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3617 __func__, error)); 3618 } 3619} 3620 3621static void 3622vmx_post_intr(struct vlapic *vlapic, int hostcpu) 3623{ 3624 3625 ipi_cpu(hostcpu, pirvec); 3626} 3627 3628/* 3629 * Transfer the pending interrupts in the PIR descriptor to the IRR 3630 * in the virtual APIC page. 3631 */ 3632static void 3633vmx_inject_pir(struct vlapic *vlapic) 3634{ 3635 struct vlapic_vtx *vlapic_vtx; 3636 struct pir_desc *pir_desc; 3637 struct LAPIC *lapic; 3638 uint64_t val, pirval; 3639 int rvi, pirbase = -1; 3640 uint16_t intr_status_old, intr_status_new; 3641 3642 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3643 pir_desc = vlapic_vtx->pir_desc; 3644 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3645 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3646 "no posted interrupt pending"); 3647 return; 3648 } 3649 3650 pirval = 0; 3651 pirbase = -1; 3652 lapic = vlapic->apic_page; 3653 3654 val = atomic_readandclear_long(&pir_desc->pir[0]); 3655 if (val != 0) { 3656 lapic->irr0 |= val; 3657 lapic->irr1 |= val >> 32; 3658 pirbase = 0; 3659 pirval = val; 3660 } 3661 3662 val = atomic_readandclear_long(&pir_desc->pir[1]); 3663 if (val != 0) { 3664 lapic->irr2 |= val; 3665 lapic->irr3 |= val >> 32; 3666 pirbase = 64; 3667 pirval = val; 3668 } 3669 3670 val = atomic_readandclear_long(&pir_desc->pir[2]); 3671 if (val != 0) { 3672 lapic->irr4 |= val; 3673 lapic->irr5 |= val >> 32; 3674 pirbase = 128; 3675 pirval = val; 3676 } 3677 3678 val = atomic_readandclear_long(&pir_desc->pir[3]); 3679 if (val != 0) { 3680 lapic->irr6 |= val; 3681 lapic->irr7 |= val >> 32; 3682 pirbase = 192; 3683 pirval = val; 3684 } 3685 3686 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 3687 3688 /* 3689 * Update RVI so the processor can evaluate pending virtual 3690 * interrupts on VM-entry. 3691 * 3692 * It is possible for pirval to be 0 here, even though the 3693 * pending bit has been set. The scenario is: 3694 * CPU-Y is sending a posted interrupt to CPU-X, which 3695 * is running a guest and processing posted interrupts in h/w. 3696 * CPU-X will eventually exit and the state seen in s/w is 3697 * the pending bit set, but no PIR bits set. 3698 * 3699 * CPU-X CPU-Y 3700 * (vm running) (host running) 3701 * rx posted interrupt 3702 * CLEAR pending bit 3703 * SET PIR bit 3704 * READ/CLEAR PIR bits 3705 * SET pending bit 3706 * (vm exit) 3707 * pending bit set, PIR 0 3708 */ 3709 if (pirval != 0) { 3710 rvi = pirbase + flsl(pirval) - 1; 3711 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 3712 intr_status_new = (intr_status_old & 0xFF00) | rvi; 3713 if (intr_status_new > intr_status_old) { 3714 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 3715 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3716 "guest_intr_status changed from 0x%04x to 0x%04x", 3717 intr_status_old, intr_status_new); 3718 } 3719 } 3720} 3721 3722static struct vlapic * 3723vmx_vlapic_init(void *arg, int vcpuid) 3724{ 3725 struct vmx *vmx; 3726 struct vlapic *vlapic; 3727 struct vlapic_vtx *vlapic_vtx; 3728 3729 vmx = arg; 3730 3731 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 3732 vlapic->vm = vmx->vm; 3733 vlapic->vcpuid = vcpuid; 3734 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3735 3736 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3737 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3738 vlapic_vtx->vmx = vmx; 3739 3740 if (virtual_interrupt_delivery) { 3741 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 3742 vlapic->ops.pending_intr = vmx_pending_intr; 3743 vlapic->ops.intr_accepted = vmx_intr_accepted; 3744 vlapic->ops.set_tmr = vmx_set_tmr; 3745 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode; 3746 } 3747 3748 if (posted_interrupts) 3749 vlapic->ops.post_intr = vmx_post_intr; 3750 3751 vlapic_init(vlapic); 3752 3753 return (vlapic); 3754} 3755 3756static void 3757vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3758{ 3759 3760 vlapic_cleanup(vlapic); 3761 free(vlapic, M_VLAPIC); 3762} 3763 3764struct vmm_ops vmm_ops_intel = { 3765 vmx_init, 3766 vmx_cleanup, 3767 vmx_restore, 3768 vmx_vminit, 3769 vmx_run, 3770 vmx_vmcleanup, 3771 vmx_getreg, 3772 vmx_setreg, 3773 vmx_getdesc, 3774 vmx_setdesc, 3775 vmx_getcap, 3776 vmx_setcap, 3777 ept_vmspace_alloc, 3778 ept_vmspace_free, 3779 vmx_vlapic_init, 3780 vmx_vlapic_cleanup, 3781}; 3782