vmx.c revision 365777
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * Copyright (c) 2018 Joyent, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: stable/11/sys/amd64/vmm/intel/vmx.c 365777 2020-09-15 21:28:47Z emaste $ 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/11/sys/amd64/vmm/intel/vmx.c 365777 2020-09-15 21:28:47Z emaste $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/smp.h> 36#include <sys/kernel.h> 37#include <sys/malloc.h> 38#include <sys/pcpu.h> 39#include <sys/proc.h> 40#include <sys/sysctl.h> 41 42#include <vm/vm.h> 43#include <vm/pmap.h> 44 45#include <machine/psl.h> 46#include <machine/cpufunc.h> 47#include <machine/md_var.h> 48#include <machine/reg.h> 49#include <machine/segments.h> 50#include <machine/smp.h> 51#include <machine/specialreg.h> 52#include <machine/vmparam.h> 53 54#include <machine/vmm.h> 55#include <machine/vmm_dev.h> 56#include <machine/vmm_instruction_emul.h> 57#include "vmm_lapic.h" 58#include "vmm_host.h" 59#include "vmm_ioport.h" 60#include "vmm_ktr.h" 61#include "vmm_stat.h" 62#include "vatpic.h" 63#include "vlapic.h" 64#include "vlapic_priv.h" 65 66#include "ept.h" 67#include "vmx_cpufunc.h" 68#include "vmx.h" 69#include "vmx_msr.h" 70#include "x86.h" 71#include "vmx_controls.h" 72 73#define PINBASED_CTLS_ONE_SETTING \ 74 (PINBASED_EXTINT_EXITING | \ 75 PINBASED_NMI_EXITING | \ 76 PINBASED_VIRTUAL_NMI) 77#define PINBASED_CTLS_ZERO_SETTING 0 78 79#define PROCBASED_CTLS_WINDOW_SETTING \ 80 (PROCBASED_INT_WINDOW_EXITING | \ 81 PROCBASED_NMI_WINDOW_EXITING) 82 83#define PROCBASED_CTLS_ONE_SETTING \ 84 (PROCBASED_SECONDARY_CONTROLS | \ 85 PROCBASED_MWAIT_EXITING | \ 86 PROCBASED_MONITOR_EXITING | \ 87 PROCBASED_IO_EXITING | \ 88 PROCBASED_MSR_BITMAPS | \ 89 PROCBASED_CTLS_WINDOW_SETTING | \ 90 PROCBASED_CR8_LOAD_EXITING | \ 91 PROCBASED_CR8_STORE_EXITING) 92#define PROCBASED_CTLS_ZERO_SETTING \ 93 (PROCBASED_CR3_LOAD_EXITING | \ 94 PROCBASED_CR3_STORE_EXITING | \ 95 PROCBASED_IO_BITMAPS) 96 97#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 98#define PROCBASED_CTLS2_ZERO_SETTING 0 99 100#define VM_EXIT_CTLS_ONE_SETTING \ 101 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 102 VM_EXIT_HOST_LMA | \ 103 VM_EXIT_SAVE_EFER | \ 104 VM_EXIT_LOAD_EFER | \ 105 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 106 107#define VM_EXIT_CTLS_ZERO_SETTING 0 108 109#define VM_ENTRY_CTLS_ONE_SETTING \ 110 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 111 VM_ENTRY_LOAD_EFER) 112 113#define VM_ENTRY_CTLS_ZERO_SETTING \ 114 (VM_ENTRY_INTO_SMM | \ 115 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 116 117#define HANDLED 1 118#define UNHANDLED 0 119 120static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 121static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 122 123SYSCTL_DECL(_hw_vmm); 124SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 125 126int vmxon_enabled[MAXCPU]; 127static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 128 129static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 130static uint32_t exit_ctls, entry_ctls; 131 132static uint64_t cr0_ones_mask, cr0_zeros_mask; 133SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 134 &cr0_ones_mask, 0, NULL); 135SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 136 &cr0_zeros_mask, 0, NULL); 137 138static uint64_t cr4_ones_mask, cr4_zeros_mask; 139SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 140 &cr4_ones_mask, 0, NULL); 141SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 142 &cr4_zeros_mask, 0, NULL); 143 144static int vmx_initialized; 145SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 146 &vmx_initialized, 0, "Intel VMX initialized"); 147 148/* 149 * Optional capabilities 150 */ 151static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL); 152 153static int cap_halt_exit; 154SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0, 155 "HLT triggers a VM-exit"); 156 157static int cap_pause_exit; 158SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit, 159 0, "PAUSE triggers a VM-exit"); 160 161static int cap_unrestricted_guest; 162SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD, 163 &cap_unrestricted_guest, 0, "Unrestricted guests"); 164 165static int cap_monitor_trap; 166SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, 167 &cap_monitor_trap, 0, "Monitor trap flag"); 168 169static int cap_invpcid; 170SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 171 0, "Guests are allowed to use INVPCID"); 172 173static int tpr_shadowing; 174SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, CTLFLAG_RD, 175 &tpr_shadowing, 0, "TPR shadowing support"); 176 177static int virtual_interrupt_delivery; 178SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 179 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 180 181static int posted_interrupts; 182SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD, 183 &posted_interrupts, 0, "APICv posted interrupt support"); 184 185static int pirvec = -1; 186SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 187 &pirvec, 0, "APICv posted interrupt vector"); 188 189static struct unrhdr *vpid_unr; 190static u_int vpid_alloc_failed; 191SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 192 &vpid_alloc_failed, 0, NULL); 193 194static int guest_l1d_flush; 195SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD, 196 &guest_l1d_flush, 0, NULL); 197static int guest_l1d_flush_sw; 198SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD, 199 &guest_l1d_flush_sw, 0, NULL); 200 201static struct msr_entry msr_load_list[1] __aligned(16); 202 203/* 204 * The definitions of SDT probes for VMX. 205 */ 206 207SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 208 "struct vmx *", "int", "struct vm_exit *"); 209 210SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 211 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 212 213SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 214 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 215 216SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 217 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 218 219SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 220 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 221 222SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 223 "struct vmx *", "int", "struct vm_exit *"); 224 225SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 226 "struct vmx *", "int", "struct vm_exit *"); 227 228SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 229 "struct vmx *", "int", "struct vm_exit *"); 230 231SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 232 "struct vmx *", "int", "struct vm_exit *"); 233 234SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 235 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 236 237SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 238 "struct vmx *", "int", "struct vm_exit *"); 239 240SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 241 "struct vmx *", "int", "struct vm_exit *"); 242 243SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 244 "struct vmx *", "int", "struct vm_exit *"); 245 246SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 247 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 248 249SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 250 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 251 252SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 253 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 254 255SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 256 "struct vmx *", "int", "struct vm_exit *"); 257 258SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 259 "struct vmx *", "int", "struct vm_exit *"); 260 261SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 262 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 263 264SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 265 "struct vmx *", "int", "struct vm_exit *"); 266 267SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 268 "struct vmx *", "int", "struct vm_exit *"); 269 270SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 271 "struct vmx *", "int", "struct vm_exit *"); 272 273SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 274 "struct vmx *", "int", "struct vm_exit *"); 275 276SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 277 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 278 279SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 280 "struct vmx *", "int", "struct vm_exit *", "int"); 281 282/* 283 * Use the last page below 4GB as the APIC access address. This address is 284 * occupied by the boot firmware so it is guaranteed that it will not conflict 285 * with a page in system memory. 286 */ 287#define APIC_ACCESS_ADDRESS 0xFFFFF000 288 289static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 290static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 291static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val); 292static void vmx_inject_pir(struct vlapic *vlapic); 293 294#ifdef KTR 295static const char * 296exit_reason_to_str(int reason) 297{ 298 static char reasonbuf[32]; 299 300 switch (reason) { 301 case EXIT_REASON_EXCEPTION: 302 return "exception"; 303 case EXIT_REASON_EXT_INTR: 304 return "extint"; 305 case EXIT_REASON_TRIPLE_FAULT: 306 return "triplefault"; 307 case EXIT_REASON_INIT: 308 return "init"; 309 case EXIT_REASON_SIPI: 310 return "sipi"; 311 case EXIT_REASON_IO_SMI: 312 return "iosmi"; 313 case EXIT_REASON_SMI: 314 return "smi"; 315 case EXIT_REASON_INTR_WINDOW: 316 return "intrwindow"; 317 case EXIT_REASON_NMI_WINDOW: 318 return "nmiwindow"; 319 case EXIT_REASON_TASK_SWITCH: 320 return "taskswitch"; 321 case EXIT_REASON_CPUID: 322 return "cpuid"; 323 case EXIT_REASON_GETSEC: 324 return "getsec"; 325 case EXIT_REASON_HLT: 326 return "hlt"; 327 case EXIT_REASON_INVD: 328 return "invd"; 329 case EXIT_REASON_INVLPG: 330 return "invlpg"; 331 case EXIT_REASON_RDPMC: 332 return "rdpmc"; 333 case EXIT_REASON_RDTSC: 334 return "rdtsc"; 335 case EXIT_REASON_RSM: 336 return "rsm"; 337 case EXIT_REASON_VMCALL: 338 return "vmcall"; 339 case EXIT_REASON_VMCLEAR: 340 return "vmclear"; 341 case EXIT_REASON_VMLAUNCH: 342 return "vmlaunch"; 343 case EXIT_REASON_VMPTRLD: 344 return "vmptrld"; 345 case EXIT_REASON_VMPTRST: 346 return "vmptrst"; 347 case EXIT_REASON_VMREAD: 348 return "vmread"; 349 case EXIT_REASON_VMRESUME: 350 return "vmresume"; 351 case EXIT_REASON_VMWRITE: 352 return "vmwrite"; 353 case EXIT_REASON_VMXOFF: 354 return "vmxoff"; 355 case EXIT_REASON_VMXON: 356 return "vmxon"; 357 case EXIT_REASON_CR_ACCESS: 358 return "craccess"; 359 case EXIT_REASON_DR_ACCESS: 360 return "draccess"; 361 case EXIT_REASON_INOUT: 362 return "inout"; 363 case EXIT_REASON_RDMSR: 364 return "rdmsr"; 365 case EXIT_REASON_WRMSR: 366 return "wrmsr"; 367 case EXIT_REASON_INVAL_VMCS: 368 return "invalvmcs"; 369 case EXIT_REASON_INVAL_MSR: 370 return "invalmsr"; 371 case EXIT_REASON_MWAIT: 372 return "mwait"; 373 case EXIT_REASON_MTF: 374 return "mtf"; 375 case EXIT_REASON_MONITOR: 376 return "monitor"; 377 case EXIT_REASON_PAUSE: 378 return "pause"; 379 case EXIT_REASON_MCE_DURING_ENTRY: 380 return "mce-during-entry"; 381 case EXIT_REASON_TPR: 382 return "tpr"; 383 case EXIT_REASON_APIC_ACCESS: 384 return "apic-access"; 385 case EXIT_REASON_GDTR_IDTR: 386 return "gdtridtr"; 387 case EXIT_REASON_LDTR_TR: 388 return "ldtrtr"; 389 case EXIT_REASON_EPT_FAULT: 390 return "eptfault"; 391 case EXIT_REASON_EPT_MISCONFIG: 392 return "eptmisconfig"; 393 case EXIT_REASON_INVEPT: 394 return "invept"; 395 case EXIT_REASON_RDTSCP: 396 return "rdtscp"; 397 case EXIT_REASON_VMX_PREEMPT: 398 return "vmxpreempt"; 399 case EXIT_REASON_INVVPID: 400 return "invvpid"; 401 case EXIT_REASON_WBINVD: 402 return "wbinvd"; 403 case EXIT_REASON_XSETBV: 404 return "xsetbv"; 405 case EXIT_REASON_APIC_WRITE: 406 return "apic-write"; 407 default: 408 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 409 return (reasonbuf); 410 } 411} 412#endif /* KTR */ 413 414static int 415vmx_allow_x2apic_msrs(struct vmx *vmx) 416{ 417 int i, error; 418 419 error = 0; 420 421 /* 422 * Allow readonly access to the following x2APIC MSRs from the guest. 423 */ 424 error += guest_msr_ro(vmx, MSR_APIC_ID); 425 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 426 error += guest_msr_ro(vmx, MSR_APIC_LDR); 427 error += guest_msr_ro(vmx, MSR_APIC_SVR); 428 429 for (i = 0; i < 8; i++) 430 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 431 432 for (i = 0; i < 8; i++) 433 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 434 435 for (i = 0; i < 8; i++) 436 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 437 438 error += guest_msr_ro(vmx, MSR_APIC_ESR); 439 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 440 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 441 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 442 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 443 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 444 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 445 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 446 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 447 error += guest_msr_ro(vmx, MSR_APIC_ICR); 448 449 /* 450 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 451 * 452 * These registers get special treatment described in the section 453 * "Virtualizing MSR-Based APIC Accesses". 454 */ 455 error += guest_msr_rw(vmx, MSR_APIC_TPR); 456 error += guest_msr_rw(vmx, MSR_APIC_EOI); 457 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 458 459 return (error); 460} 461 462u_long 463vmx_fix_cr0(u_long cr0) 464{ 465 466 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 467} 468 469u_long 470vmx_fix_cr4(u_long cr4) 471{ 472 473 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 474} 475 476static void 477vpid_free(int vpid) 478{ 479 if (vpid < 0 || vpid > 0xffff) 480 panic("vpid_free: invalid vpid %d", vpid); 481 482 /* 483 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 484 * the unit number allocator. 485 */ 486 487 if (vpid > VM_MAXCPU) 488 free_unr(vpid_unr, vpid); 489} 490 491static void 492vpid_alloc(uint16_t *vpid, int num) 493{ 494 int i, x; 495 496 if (num <= 0 || num > VM_MAXCPU) 497 panic("invalid number of vpids requested: %d", num); 498 499 /* 500 * If the "enable vpid" execution control is not enabled then the 501 * VPID is required to be 0 for all vcpus. 502 */ 503 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 504 for (i = 0; i < num; i++) 505 vpid[i] = 0; 506 return; 507 } 508 509 /* 510 * Allocate a unique VPID for each vcpu from the unit number allocator. 511 */ 512 for (i = 0; i < num; i++) { 513 x = alloc_unr(vpid_unr); 514 if (x == -1) 515 break; 516 else 517 vpid[i] = x; 518 } 519 520 if (i < num) { 521 atomic_add_int(&vpid_alloc_failed, 1); 522 523 /* 524 * If the unit number allocator does not have enough unique 525 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 526 * 527 * These VPIDs are not be unique across VMs but this does not 528 * affect correctness because the combined mappings are also 529 * tagged with the EP4TA which is unique for each VM. 530 * 531 * It is still sub-optimal because the invvpid will invalidate 532 * combined mappings for a particular VPID across all EP4TAs. 533 */ 534 while (i-- > 0) 535 vpid_free(vpid[i]); 536 537 for (i = 0; i < num; i++) 538 vpid[i] = i + 1; 539 } 540} 541 542static void 543vpid_init(void) 544{ 545 /* 546 * VPID 0 is required when the "enable VPID" execution control is 547 * disabled. 548 * 549 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 550 * unit number allocator does not have sufficient unique VPIDs to 551 * satisfy the allocation. 552 * 553 * The remaining VPIDs are managed by the unit number allocator. 554 */ 555 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 556} 557 558static void 559vmx_disable(void *arg __unused) 560{ 561 struct invvpid_desc invvpid_desc = { 0 }; 562 struct invept_desc invept_desc = { 0 }; 563 564 if (vmxon_enabled[curcpu]) { 565 /* 566 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 567 * 568 * VMXON or VMXOFF are not required to invalidate any TLB 569 * caching structures. This prevents potential retention of 570 * cached information in the TLB between distinct VMX episodes. 571 */ 572 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 573 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 574 vmxoff(); 575 } 576 load_cr4(rcr4() & ~CR4_VMXE); 577} 578 579static int 580vmx_cleanup(void) 581{ 582 583 if (pirvec >= 0) 584 lapic_ipi_free(pirvec); 585 586 if (vpid_unr != NULL) { 587 delete_unrhdr(vpid_unr); 588 vpid_unr = NULL; 589 } 590 591 if (nmi_flush_l1d_sw == 1) 592 nmi_flush_l1d_sw = 0; 593 594 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 595 596 return (0); 597} 598 599static void 600vmx_enable(void *arg __unused) 601{ 602 int error; 603 uint64_t feature_control; 604 605 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 606 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 607 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 608 wrmsr(MSR_IA32_FEATURE_CONTROL, 609 feature_control | IA32_FEATURE_CONTROL_VMX_EN | 610 IA32_FEATURE_CONTROL_LOCK); 611 } 612 613 load_cr4(rcr4() | CR4_VMXE); 614 615 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 616 error = vmxon(vmxon_region[curcpu]); 617 if (error == 0) 618 vmxon_enabled[curcpu] = 1; 619} 620 621static void 622vmx_restore(void) 623{ 624 625 if (vmxon_enabled[curcpu]) 626 vmxon(vmxon_region[curcpu]); 627} 628 629static int 630vmx_init(int ipinum) 631{ 632 int error; 633 uint64_t basic, fixed0, fixed1, feature_control; 634 uint32_t tmp, procbased2_vid_bits; 635 636 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 637 if (!(cpu_feature2 & CPUID2_VMX)) { 638 printf("vmx_init: processor does not support VMX operation\n"); 639 return (ENXIO); 640 } 641 642 /* 643 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 644 * are set (bits 0 and 2 respectively). 645 */ 646 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 647 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 && 648 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 649 printf("vmx_init: VMX operation disabled by BIOS\n"); 650 return (ENXIO); 651 } 652 653 /* 654 * Verify capabilities MSR_VMX_BASIC: 655 * - bit 54 indicates support for INS/OUTS decoding 656 */ 657 basic = rdmsr(MSR_VMX_BASIC); 658 if ((basic & (1UL << 54)) == 0) { 659 printf("vmx_init: processor does not support desired basic " 660 "capabilities\n"); 661 return (EINVAL); 662 } 663 664 /* Check support for primary processor-based VM-execution controls */ 665 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 666 MSR_VMX_TRUE_PROCBASED_CTLS, 667 PROCBASED_CTLS_ONE_SETTING, 668 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 669 if (error) { 670 printf("vmx_init: processor does not support desired primary " 671 "processor-based controls\n"); 672 return (error); 673 } 674 675 /* Clear the processor-based ctl bits that are set on demand */ 676 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 677 678 /* Check support for secondary processor-based VM-execution controls */ 679 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 680 MSR_VMX_PROCBASED_CTLS2, 681 PROCBASED_CTLS2_ONE_SETTING, 682 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 683 if (error) { 684 printf("vmx_init: processor does not support desired secondary " 685 "processor-based controls\n"); 686 return (error); 687 } 688 689 /* Check support for VPID */ 690 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 691 PROCBASED2_ENABLE_VPID, 0, &tmp); 692 if (error == 0) 693 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 694 695 /* Check support for pin-based VM-execution controls */ 696 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 697 MSR_VMX_TRUE_PINBASED_CTLS, 698 PINBASED_CTLS_ONE_SETTING, 699 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 700 if (error) { 701 printf("vmx_init: processor does not support desired " 702 "pin-based controls\n"); 703 return (error); 704 } 705 706 /* Check support for VM-exit controls */ 707 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 708 VM_EXIT_CTLS_ONE_SETTING, 709 VM_EXIT_CTLS_ZERO_SETTING, 710 &exit_ctls); 711 if (error) { 712 printf("vmx_init: processor does not support desired " 713 "exit controls\n"); 714 return (error); 715 } 716 717 /* Check support for VM-entry controls */ 718 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 719 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 720 &entry_ctls); 721 if (error) { 722 printf("vmx_init: processor does not support desired " 723 "entry controls\n"); 724 return (error); 725 } 726 727 /* 728 * Check support for optional features by testing them 729 * as individual bits 730 */ 731 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 732 MSR_VMX_TRUE_PROCBASED_CTLS, 733 PROCBASED_HLT_EXITING, 0, 734 &tmp) == 0); 735 736 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 737 MSR_VMX_PROCBASED_CTLS, 738 PROCBASED_MTF, 0, 739 &tmp) == 0); 740 741 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 742 MSR_VMX_TRUE_PROCBASED_CTLS, 743 PROCBASED_PAUSE_EXITING, 0, 744 &tmp) == 0); 745 746 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 747 MSR_VMX_PROCBASED_CTLS2, 748 PROCBASED2_UNRESTRICTED_GUEST, 0, 749 &tmp) == 0); 750 751 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 752 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 753 &tmp) == 0); 754 755 /* 756 * Check support for TPR shadow. 757 */ 758 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 759 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 760 &tmp); 761 if (error == 0) { 762 tpr_shadowing = 1; 763 TUNABLE_INT_FETCH("hw.vmm.vmx.use_tpr_shadowing", 764 &tpr_shadowing); 765 } 766 767 if (tpr_shadowing) { 768 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 769 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 770 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; 771 } 772 773 /* 774 * Check support for virtual interrupt delivery. 775 */ 776 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 777 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 778 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 779 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 780 781 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 782 procbased2_vid_bits, 0, &tmp); 783 if (error == 0 && tpr_shadowing) { 784 virtual_interrupt_delivery = 1; 785 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 786 &virtual_interrupt_delivery); 787 } 788 789 if (virtual_interrupt_delivery) { 790 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 791 procbased_ctls2 |= procbased2_vid_bits; 792 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 793 794 /* 795 * Check for Posted Interrupts only if Virtual Interrupt 796 * Delivery is enabled. 797 */ 798 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 799 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 800 &tmp); 801 if (error == 0) { 802 pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : 803 &IDTVEC(justreturn)); 804 if (pirvec < 0) { 805 if (bootverbose) { 806 printf("vmx_init: unable to allocate " 807 "posted interrupt vector\n"); 808 } 809 } else { 810 posted_interrupts = 1; 811 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 812 &posted_interrupts); 813 } 814 } 815 } 816 817 if (posted_interrupts) 818 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 819 820 /* Initialize EPT */ 821 error = ept_init(ipinum); 822 if (error) { 823 printf("vmx_init: ept initialization failed (%d)\n", error); 824 return (error); 825 } 826 827 guest_l1d_flush = (cpu_ia32_arch_caps & 828 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 829 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 830 831 /* 832 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 833 * available. Otherwise fall back to the software flush 834 * method which loads enough data from the kernel text to 835 * flush existing L1D content, both on VMX entry and on NMI 836 * return. 837 */ 838 if (guest_l1d_flush) { 839 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 840 guest_l1d_flush_sw = 1; 841 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 842 &guest_l1d_flush_sw); 843 } 844 if (guest_l1d_flush_sw) { 845 if (nmi_flush_l1d_sw <= 1) 846 nmi_flush_l1d_sw = 1; 847 } else { 848 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 849 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 850 } 851 } 852 853 /* 854 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 855 */ 856 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 857 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 858 cr0_ones_mask = fixed0 & fixed1; 859 cr0_zeros_mask = ~fixed0 & ~fixed1; 860 861 /* 862 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 863 * if unrestricted guest execution is allowed. 864 */ 865 if (cap_unrestricted_guest) 866 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 867 868 /* 869 * Do not allow the guest to set CR0_NW or CR0_CD. 870 */ 871 cr0_zeros_mask |= (CR0_NW | CR0_CD); 872 873 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 874 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 875 cr4_ones_mask = fixed0 & fixed1; 876 cr4_zeros_mask = ~fixed0 & ~fixed1; 877 878 vpid_init(); 879 880 vmx_msr_init(); 881 882 /* enable VMX operation */ 883 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 884 885 vmx_initialized = 1; 886 887 return (0); 888} 889 890static void 891vmx_trigger_hostintr(int vector) 892{ 893 uintptr_t func; 894 struct gate_descriptor *gd; 895 896 gd = &idt[vector]; 897 898 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 899 "invalid vector %d", vector)); 900 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 901 vector)); 902 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 903 "has invalid type %d", vector, gd->gd_type)); 904 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 905 "has invalid dpl %d", vector, gd->gd_dpl)); 906 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 907 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 908 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 909 "IST %d", vector, gd->gd_ist)); 910 911 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 912 vmx_call_isr(func); 913} 914 915static int 916vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 917{ 918 int error, mask_ident, shadow_ident; 919 uint64_t mask_value; 920 921 if (which != 0 && which != 4) 922 panic("vmx_setup_cr_shadow: unknown cr%d", which); 923 924 if (which == 0) { 925 mask_ident = VMCS_CR0_MASK; 926 mask_value = cr0_ones_mask | cr0_zeros_mask; 927 shadow_ident = VMCS_CR0_SHADOW; 928 } else { 929 mask_ident = VMCS_CR4_MASK; 930 mask_value = cr4_ones_mask | cr4_zeros_mask; 931 shadow_ident = VMCS_CR4_SHADOW; 932 } 933 934 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 935 if (error) 936 return (error); 937 938 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 939 if (error) 940 return (error); 941 942 return (0); 943} 944#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 945#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 946 947static void * 948vmx_vminit(struct vm *vm, pmap_t pmap) 949{ 950 uint16_t vpid[VM_MAXCPU]; 951 int i, error; 952 struct vmx *vmx; 953 struct vmcs *vmcs; 954 uint32_t exc_bitmap; 955 uint16_t maxcpus; 956 957 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 958 if ((uintptr_t)vmx & PAGE_MASK) { 959 panic("malloc of struct vmx not aligned on %d byte boundary", 960 PAGE_SIZE); 961 } 962 vmx->vm = vm; 963 964 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 965 966 /* 967 * Clean up EPTP-tagged guest physical and combined mappings 968 * 969 * VMX transitions are not required to invalidate any guest physical 970 * mappings. So, it may be possible for stale guest physical mappings 971 * to be present in the processor TLBs. 972 * 973 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 974 */ 975 ept_invalidate_mappings(vmx->eptp); 976 977 msr_bitmap_initialize(vmx->msr_bitmap); 978 979 /* 980 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 981 * The guest FSBASE and GSBASE are saved and restored during 982 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 983 * always restored from the vmcs host state area on vm-exit. 984 * 985 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 986 * how they are saved/restored so can be directly accessed by the 987 * guest. 988 * 989 * MSR_EFER is saved and restored in the guest VMCS area on a 990 * VM exit and entry respectively. It is also restored from the 991 * host VMCS area on a VM exit. 992 * 993 * The TSC MSR is exposed read-only. Writes are disallowed as 994 * that will impact the host TSC. If the guest does a write 995 * the "use TSC offsetting" execution control is enabled and the 996 * difference between the host TSC and the guest TSC is written 997 * into the TSC offset in the VMCS. 998 */ 999 if (guest_msr_rw(vmx, MSR_GSBASE) || 1000 guest_msr_rw(vmx, MSR_FSBASE) || 1001 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 1002 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 1003 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 1004 guest_msr_rw(vmx, MSR_EFER) || 1005 guest_msr_ro(vmx, MSR_TSC)) 1006 panic("vmx_vminit: error setting guest msr access"); 1007 1008 vpid_alloc(vpid, VM_MAXCPU); 1009 1010 if (virtual_interrupt_delivery) { 1011 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 1012 APIC_ACCESS_ADDRESS); 1013 /* XXX this should really return an error to the caller */ 1014 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 1015 } 1016 1017 maxcpus = vm_get_maxcpus(vm); 1018 for (i = 0; i < maxcpus; i++) { 1019 vmcs = &vmx->vmcs[i]; 1020 vmcs->identifier = vmx_revision(); 1021 error = vmclear(vmcs); 1022 if (error != 0) { 1023 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 1024 error, i); 1025 } 1026 1027 vmx_msr_guest_init(vmx, i); 1028 1029 error = vmcs_init(vmcs); 1030 KASSERT(error == 0, ("vmcs_init error %d", error)); 1031 1032 VMPTRLD(vmcs); 1033 error = 0; 1034 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 1035 error += vmwrite(VMCS_EPTP, vmx->eptp); 1036 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 1037 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 1038 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 1039 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 1040 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 1041 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 1042 error += vmwrite(VMCS_VPID, vpid[i]); 1043 1044 if (guest_l1d_flush && !guest_l1d_flush_sw) { 1045 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( 1046 (vm_offset_t)&msr_load_list[0])); 1047 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 1048 nitems(msr_load_list)); 1049 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 1050 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 1051 } 1052 1053 /* exception bitmap */ 1054 if (vcpu_trace_exceptions(vm, i)) 1055 exc_bitmap = 0xffffffff; 1056 else 1057 exc_bitmap = 1 << IDT_MC; 1058 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); 1059 1060 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 1061 error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 1062 1063 if (tpr_shadowing) { 1064 error += vmwrite(VMCS_VIRTUAL_APIC, 1065 vtophys(&vmx->apic_page[i])); 1066 } 1067 1068 if (virtual_interrupt_delivery) { 1069 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 1070 error += vmwrite(VMCS_EOI_EXIT0, 0); 1071 error += vmwrite(VMCS_EOI_EXIT1, 0); 1072 error += vmwrite(VMCS_EOI_EXIT2, 0); 1073 error += vmwrite(VMCS_EOI_EXIT3, 0); 1074 } 1075 if (posted_interrupts) { 1076 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 1077 error += vmwrite(VMCS_PIR_DESC, 1078 vtophys(&vmx->pir_desc[i])); 1079 } 1080 VMCLEAR(vmcs); 1081 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 1082 1083 vmx->cap[i].set = 0; 1084 vmx->cap[i].proc_ctls = procbased_ctls; 1085 vmx->cap[i].proc_ctls2 = procbased_ctls2; 1086 1087 vmx->state[i].nextrip = ~0; 1088 vmx->state[i].lastcpu = NOCPU; 1089 vmx->state[i].vpid = vpid[i]; 1090 1091 /* 1092 * Set up the CR0/4 shadows, and init the read shadow 1093 * to the power-on register value from the Intel Sys Arch. 1094 * CR0 - 0x60000010 1095 * CR4 - 0 1096 */ 1097 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 1098 if (error != 0) 1099 panic("vmx_setup_cr0_shadow %d", error); 1100 1101 error = vmx_setup_cr4_shadow(vmcs, 0); 1102 if (error != 0) 1103 panic("vmx_setup_cr4_shadow %d", error); 1104 1105 vmx->ctx[i].pmap = pmap; 1106 } 1107 1108 return (vmx); 1109} 1110 1111static int 1112vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 1113{ 1114 int handled, func; 1115 1116 func = vmxctx->guest_rax; 1117 1118 handled = x86_emulate_cpuid(vm, vcpu, 1119 (uint32_t*)(&vmxctx->guest_rax), 1120 (uint32_t*)(&vmxctx->guest_rbx), 1121 (uint32_t*)(&vmxctx->guest_rcx), 1122 (uint32_t*)(&vmxctx->guest_rdx)); 1123 return (handled); 1124} 1125 1126static __inline void 1127vmx_run_trace(struct vmx *vmx, int vcpu) 1128{ 1129#ifdef KTR 1130 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 1131#endif 1132} 1133 1134static __inline void 1135vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 1136 int handled) 1137{ 1138#ifdef KTR 1139 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 1140 handled ? "handled" : "unhandled", 1141 exit_reason_to_str(exit_reason), rip); 1142#endif 1143} 1144 1145static __inline void 1146vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 1147{ 1148#ifdef KTR 1149 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 1150#endif 1151} 1152 1153static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1154static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 1155 1156/* 1157 * Invalidate guest mappings identified by its vpid from the TLB. 1158 */ 1159static __inline void 1160vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running) 1161{ 1162 struct vmxstate *vmxstate; 1163 struct invvpid_desc invvpid_desc; 1164 1165 vmxstate = &vmx->state[vcpu]; 1166 if (vmxstate->vpid == 0) 1167 return; 1168 1169 if (!running) { 1170 /* 1171 * Set the 'lastcpu' to an invalid host cpu. 1172 * 1173 * This will invalidate TLB entries tagged with the vcpu's 1174 * vpid the next time it runs via vmx_set_pcpu_defaults(). 1175 */ 1176 vmxstate->lastcpu = NOCPU; 1177 return; 1178 } 1179 1180 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 1181 "critical section", __func__, vcpu)); 1182 1183 /* 1184 * Invalidate all mappings tagged with 'vpid' 1185 * 1186 * We do this because this vcpu was executing on a different host 1187 * cpu when it last ran. We do not track whether it invalidated 1188 * mappings associated with its 'vpid' during that run. So we must 1189 * assume that the mappings associated with 'vpid' on 'curcpu' are 1190 * stale and invalidate them. 1191 * 1192 * Note that we incur this penalty only when the scheduler chooses to 1193 * move the thread associated with this vcpu between host cpus. 1194 * 1195 * Note also that this will invalidate mappings tagged with 'vpid' 1196 * for "all" EP4TAs. 1197 */ 1198 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1199 invvpid_desc._res1 = 0; 1200 invvpid_desc._res2 = 0; 1201 invvpid_desc.vpid = vmxstate->vpid; 1202 invvpid_desc.linear_addr = 0; 1203 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1204 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 1205 } else { 1206 /* 1207 * The invvpid can be skipped if an invept is going to 1208 * be performed before entering the guest. The invept 1209 * will invalidate combined mappings tagged with 1210 * 'vmx->eptp' for all vpids. 1211 */ 1212 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1213 } 1214} 1215 1216static void 1217vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 1218{ 1219 struct vmxstate *vmxstate; 1220 1221 vmxstate = &vmx->state[vcpu]; 1222 if (vmxstate->lastcpu == curcpu) 1223 return; 1224 1225 vmxstate->lastcpu = curcpu; 1226 1227 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1228 1229 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1230 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1231 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1232 vmx_invvpid(vmx, vcpu, pmap, 1); 1233} 1234 1235/* 1236 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1237 */ 1238CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1239 1240static void __inline 1241vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1242{ 1243 1244 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1245 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1246 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1247 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1248 } 1249} 1250 1251static void __inline 1252vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1253{ 1254 1255 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1256 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1257 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1258 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1259 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1260} 1261 1262static void __inline 1263vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1264{ 1265 1266 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1267 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1268 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1269 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1270 } 1271} 1272 1273static void __inline 1274vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1275{ 1276 1277 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1278 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1279 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1280 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1281 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1282} 1283 1284int 1285vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset) 1286{ 1287 int error; 1288 1289 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) { 1290 vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET; 1291 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1292 VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting"); 1293 } 1294 1295 error = vmwrite(VMCS_TSC_OFFSET, offset); 1296 1297 return (error); 1298} 1299 1300#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1301 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1302#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1303 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1304 1305static void 1306vmx_inject_nmi(struct vmx *vmx, int vcpu) 1307{ 1308 uint32_t gi, info; 1309 1310 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1311 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1312 "interruptibility-state %#x", gi)); 1313 1314 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1315 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1316 "VM-entry interruption information %#x", info)); 1317 1318 /* 1319 * Inject the virtual NMI. The vector must be the NMI IDT entry 1320 * or the VMCS entry check will fail. 1321 */ 1322 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1323 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1324 1325 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1326 1327 /* Clear the request */ 1328 vm_nmi_clear(vmx->vm, vcpu); 1329} 1330 1331static void 1332vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic, 1333 uint64_t guestrip) 1334{ 1335 int vector, need_nmi_exiting, extint_pending; 1336 uint64_t rflags, entryinfo; 1337 uint32_t gi, info; 1338 1339 if (vmx->state[vcpu].nextrip != guestrip) { 1340 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1341 if (gi & HWINTR_BLOCKING) { 1342 VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking " 1343 "cleared due to rip change: %#lx/%#lx", 1344 vmx->state[vcpu].nextrip, guestrip); 1345 gi &= ~HWINTR_BLOCKING; 1346 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1347 } 1348 } 1349 1350 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1351 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " 1352 "intinfo is not valid: %#lx", __func__, entryinfo)); 1353 1354 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1355 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1356 "pending exception: %#lx/%#x", __func__, entryinfo, info)); 1357 1358 info = entryinfo; 1359 vector = info & 0xff; 1360 if (vector == IDT_BP || vector == IDT_OF) { 1361 /* 1362 * VT-x requires #BP and #OF to be injected as software 1363 * exceptions. 1364 */ 1365 info &= ~VMCS_INTR_T_MASK; 1366 info |= VMCS_INTR_T_SWEXCEPTION; 1367 } 1368 1369 if (info & VMCS_INTR_DEL_ERRCODE) 1370 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1371 1372 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1373 } 1374 1375 if (vm_nmi_pending(vmx->vm, vcpu)) { 1376 /* 1377 * If there are no conditions blocking NMI injection then 1378 * inject it directly here otherwise enable "NMI window 1379 * exiting" to inject it as soon as we can. 1380 * 1381 * We also check for STI_BLOCKING because some implementations 1382 * don't allow NMI injection in this case. If we are running 1383 * on a processor that doesn't have this restriction it will 1384 * immediately exit and the NMI will be injected in the 1385 * "NMI window exiting" handler. 1386 */ 1387 need_nmi_exiting = 1; 1388 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1389 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1390 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1391 if ((info & VMCS_INTR_VALID) == 0) { 1392 vmx_inject_nmi(vmx, vcpu); 1393 need_nmi_exiting = 0; 1394 } else { 1395 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1396 "due to VM-entry intr info %#x", info); 1397 } 1398 } else { 1399 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1400 "Guest Interruptibility-state %#x", gi); 1401 } 1402 1403 if (need_nmi_exiting) 1404 vmx_set_nmi_window_exiting(vmx, vcpu); 1405 } 1406 1407 extint_pending = vm_extint_pending(vmx->vm, vcpu); 1408 1409 if (!extint_pending && virtual_interrupt_delivery) { 1410 vmx_inject_pir(vlapic); 1411 return; 1412 } 1413 1414 /* 1415 * If interrupt-window exiting is already in effect then don't bother 1416 * checking for pending interrupts. This is just an optimization and 1417 * not needed for correctness. 1418 */ 1419 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 1420 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to " 1421 "pending int_window_exiting"); 1422 return; 1423 } 1424 1425 if (!extint_pending) { 1426 /* Ask the local apic for a vector to inject */ 1427 if (!vlapic_pending_intr(vlapic, &vector)) 1428 return; 1429 1430 /* 1431 * From the Intel SDM, Volume 3, Section "Maskable 1432 * Hardware Interrupts": 1433 * - maskable interrupt vectors [16,255] can be delivered 1434 * through the local APIC. 1435 */ 1436 KASSERT(vector >= 16 && vector <= 255, 1437 ("invalid vector %d from local APIC", vector)); 1438 } else { 1439 /* Ask the legacy pic for a vector to inject */ 1440 vatpic_pending_intr(vmx->vm, &vector); 1441 1442 /* 1443 * From the Intel SDM, Volume 3, Section "Maskable 1444 * Hardware Interrupts": 1445 * - maskable interrupt vectors [0,255] can be delivered 1446 * through the INTR pin. 1447 */ 1448 KASSERT(vector >= 0 && vector <= 255, 1449 ("invalid vector %d from INTR", vector)); 1450 } 1451 1452 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1453 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1454 if ((rflags & PSL_I) == 0) { 1455 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1456 "rflags %#lx", vector, rflags); 1457 goto cantinject; 1458 } 1459 1460 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1461 if (gi & HWINTR_BLOCKING) { 1462 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1463 "Guest Interruptibility-state %#x", vector, gi); 1464 goto cantinject; 1465 } 1466 1467 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1468 if (info & VMCS_INTR_VALID) { 1469 /* 1470 * This is expected and could happen for multiple reasons: 1471 * - A vectoring VM-entry was aborted due to astpending 1472 * - A VM-exit happened during event injection. 1473 * - An exception was injected above. 1474 * - An NMI was injected above or after "NMI window exiting" 1475 */ 1476 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1477 "VM-entry intr info %#x", vector, info); 1478 goto cantinject; 1479 } 1480 1481 /* Inject the interrupt */ 1482 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1483 info |= vector; 1484 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1485 1486 if (!extint_pending) { 1487 /* Update the Local APIC ISR */ 1488 vlapic_intr_accepted(vlapic, vector); 1489 } else { 1490 vm_extint_clear(vmx->vm, vcpu); 1491 vatpic_intr_accepted(vmx->vm, vector); 1492 1493 /* 1494 * After we accepted the current ExtINT the PIC may 1495 * have posted another one. If that is the case, set 1496 * the Interrupt Window Exiting execution control so 1497 * we can inject that one too. 1498 * 1499 * Also, interrupt window exiting allows us to inject any 1500 * pending APIC vector that was preempted by the ExtINT 1501 * as soon as possible. This applies both for the software 1502 * emulated vlapic and the hardware assisted virtual APIC. 1503 */ 1504 vmx_set_int_window_exiting(vmx, vcpu); 1505 } 1506 1507 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1508 1509 return; 1510 1511cantinject: 1512 /* 1513 * Set the Interrupt Window Exiting execution control so we can inject 1514 * the interrupt as soon as blocking condition goes away. 1515 */ 1516 vmx_set_int_window_exiting(vmx, vcpu); 1517} 1518 1519/* 1520 * If the Virtual NMIs execution control is '1' then the logical processor 1521 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1522 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1523 * virtual-NMI blocking. 1524 * 1525 * This unblocking occurs even if the IRET causes a fault. In this case the 1526 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1527 */ 1528static void 1529vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1530{ 1531 uint32_t gi; 1532 1533 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1534 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1535 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1536 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1537} 1538 1539static void 1540vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1541{ 1542 uint32_t gi; 1543 1544 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1545 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1546 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1547 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1548} 1549 1550static void 1551vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1552{ 1553 uint32_t gi; 1554 1555 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1556 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1557 ("NMI blocking is not in effect %#x", gi)); 1558} 1559 1560static int 1561vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1562{ 1563 struct vmxctx *vmxctx; 1564 uint64_t xcrval; 1565 const struct xsave_limits *limits; 1566 1567 vmxctx = &vmx->ctx[vcpu]; 1568 limits = vmm_get_xsave_limits(); 1569 1570 /* 1571 * Note that the processor raises a GP# fault on its own if 1572 * xsetbv is executed for CPL != 0, so we do not have to 1573 * emulate that fault here. 1574 */ 1575 1576 /* Only xcr0 is supported. */ 1577 if (vmxctx->guest_rcx != 0) { 1578 vm_inject_gp(vmx->vm, vcpu); 1579 return (HANDLED); 1580 } 1581 1582 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1583 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1584 vm_inject_ud(vmx->vm, vcpu); 1585 return (HANDLED); 1586 } 1587 1588 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1589 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1590 vm_inject_gp(vmx->vm, vcpu); 1591 return (HANDLED); 1592 } 1593 1594 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1595 vm_inject_gp(vmx->vm, vcpu); 1596 return (HANDLED); 1597 } 1598 1599 /* AVX (YMM_Hi128) requires SSE. */ 1600 if (xcrval & XFEATURE_ENABLED_AVX && 1601 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1602 vm_inject_gp(vmx->vm, vcpu); 1603 return (HANDLED); 1604 } 1605 1606 /* 1607 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1608 * ZMM_Hi256, and Hi16_ZMM. 1609 */ 1610 if (xcrval & XFEATURE_AVX512 && 1611 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1612 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1613 vm_inject_gp(vmx->vm, vcpu); 1614 return (HANDLED); 1615 } 1616 1617 /* 1618 * Intel MPX requires both bound register state flags to be 1619 * set. 1620 */ 1621 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1622 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1623 vm_inject_gp(vmx->vm, vcpu); 1624 return (HANDLED); 1625 } 1626 1627 /* 1628 * This runs "inside" vmrun() with the guest's FPU state, so 1629 * modifying xcr0 directly modifies the guest's xcr0, not the 1630 * host's. 1631 */ 1632 load_xcr(0, xcrval); 1633 return (HANDLED); 1634} 1635 1636static uint64_t 1637vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1638{ 1639 const struct vmxctx *vmxctx; 1640 1641 vmxctx = &vmx->ctx[vcpu]; 1642 1643 switch (ident) { 1644 case 0: 1645 return (vmxctx->guest_rax); 1646 case 1: 1647 return (vmxctx->guest_rcx); 1648 case 2: 1649 return (vmxctx->guest_rdx); 1650 case 3: 1651 return (vmxctx->guest_rbx); 1652 case 4: 1653 return (vmcs_read(VMCS_GUEST_RSP)); 1654 case 5: 1655 return (vmxctx->guest_rbp); 1656 case 6: 1657 return (vmxctx->guest_rsi); 1658 case 7: 1659 return (vmxctx->guest_rdi); 1660 case 8: 1661 return (vmxctx->guest_r8); 1662 case 9: 1663 return (vmxctx->guest_r9); 1664 case 10: 1665 return (vmxctx->guest_r10); 1666 case 11: 1667 return (vmxctx->guest_r11); 1668 case 12: 1669 return (vmxctx->guest_r12); 1670 case 13: 1671 return (vmxctx->guest_r13); 1672 case 14: 1673 return (vmxctx->guest_r14); 1674 case 15: 1675 return (vmxctx->guest_r15); 1676 default: 1677 panic("invalid vmx register %d", ident); 1678 } 1679} 1680 1681static void 1682vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1683{ 1684 struct vmxctx *vmxctx; 1685 1686 vmxctx = &vmx->ctx[vcpu]; 1687 1688 switch (ident) { 1689 case 0: 1690 vmxctx->guest_rax = regval; 1691 break; 1692 case 1: 1693 vmxctx->guest_rcx = regval; 1694 break; 1695 case 2: 1696 vmxctx->guest_rdx = regval; 1697 break; 1698 case 3: 1699 vmxctx->guest_rbx = regval; 1700 break; 1701 case 4: 1702 vmcs_write(VMCS_GUEST_RSP, regval); 1703 break; 1704 case 5: 1705 vmxctx->guest_rbp = regval; 1706 break; 1707 case 6: 1708 vmxctx->guest_rsi = regval; 1709 break; 1710 case 7: 1711 vmxctx->guest_rdi = regval; 1712 break; 1713 case 8: 1714 vmxctx->guest_r8 = regval; 1715 break; 1716 case 9: 1717 vmxctx->guest_r9 = regval; 1718 break; 1719 case 10: 1720 vmxctx->guest_r10 = regval; 1721 break; 1722 case 11: 1723 vmxctx->guest_r11 = regval; 1724 break; 1725 case 12: 1726 vmxctx->guest_r12 = regval; 1727 break; 1728 case 13: 1729 vmxctx->guest_r13 = regval; 1730 break; 1731 case 14: 1732 vmxctx->guest_r14 = regval; 1733 break; 1734 case 15: 1735 vmxctx->guest_r15 = regval; 1736 break; 1737 default: 1738 panic("invalid vmx register %d", ident); 1739 } 1740} 1741 1742static int 1743vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1744{ 1745 uint64_t crval, regval; 1746 1747 /* We only handle mov to %cr0 at this time */ 1748 if ((exitqual & 0xf0) != 0x00) 1749 return (UNHANDLED); 1750 1751 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1752 1753 vmcs_write(VMCS_CR0_SHADOW, regval); 1754 1755 crval = regval | cr0_ones_mask; 1756 crval &= ~cr0_zeros_mask; 1757 vmcs_write(VMCS_GUEST_CR0, crval); 1758 1759 if (regval & CR0_PG) { 1760 uint64_t efer, entry_ctls; 1761 1762 /* 1763 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1764 * the "IA-32e mode guest" bit in VM-entry control must be 1765 * equal. 1766 */ 1767 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1768 if (efer & EFER_LME) { 1769 efer |= EFER_LMA; 1770 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1771 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1772 entry_ctls |= VM_ENTRY_GUEST_LMA; 1773 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1774 } 1775 } 1776 1777 return (HANDLED); 1778} 1779 1780static int 1781vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1782{ 1783 uint64_t crval, regval; 1784 1785 /* We only handle mov to %cr4 at this time */ 1786 if ((exitqual & 0xf0) != 0x00) 1787 return (UNHANDLED); 1788 1789 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1790 1791 vmcs_write(VMCS_CR4_SHADOW, regval); 1792 1793 crval = regval | cr4_ones_mask; 1794 crval &= ~cr4_zeros_mask; 1795 vmcs_write(VMCS_GUEST_CR4, crval); 1796 1797 return (HANDLED); 1798} 1799 1800static int 1801vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1802{ 1803 struct vlapic *vlapic; 1804 uint64_t cr8; 1805 int regnum; 1806 1807 /* We only handle mov %cr8 to/from a register at this time. */ 1808 if ((exitqual & 0xe0) != 0x00) { 1809 return (UNHANDLED); 1810 } 1811 1812 vlapic = vm_lapic(vmx->vm, vcpu); 1813 regnum = (exitqual >> 8) & 0xf; 1814 if (exitqual & 0x10) { 1815 cr8 = vlapic_get_cr8(vlapic); 1816 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1817 } else { 1818 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1819 vlapic_set_cr8(vlapic, cr8); 1820 } 1821 1822 return (HANDLED); 1823} 1824 1825/* 1826 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1827 */ 1828static int 1829vmx_cpl(void) 1830{ 1831 uint32_t ssar; 1832 1833 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1834 return ((ssar >> 5) & 0x3); 1835} 1836 1837static enum vm_cpu_mode 1838vmx_cpu_mode(void) 1839{ 1840 uint32_t csar; 1841 1842 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1843 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1844 if (csar & 0x2000) 1845 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1846 else 1847 return (CPU_MODE_COMPATIBILITY); 1848 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1849 return (CPU_MODE_PROTECTED); 1850 } else { 1851 return (CPU_MODE_REAL); 1852 } 1853} 1854 1855static enum vm_paging_mode 1856vmx_paging_mode(void) 1857{ 1858 1859 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1860 return (PAGING_MODE_FLAT); 1861 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1862 return (PAGING_MODE_32); 1863 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1864 return (PAGING_MODE_64); 1865 else 1866 return (PAGING_MODE_PAE); 1867} 1868 1869static uint64_t 1870inout_str_index(struct vmx *vmx, int vcpuid, int in) 1871{ 1872 uint64_t val; 1873 int error; 1874 enum vm_reg_name reg; 1875 1876 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; 1877 error = vmx_getreg(vmx, vcpuid, reg, &val); 1878 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); 1879 return (val); 1880} 1881 1882static uint64_t 1883inout_str_count(struct vmx *vmx, int vcpuid, int rep) 1884{ 1885 uint64_t val; 1886 int error; 1887 1888 if (rep) { 1889 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val); 1890 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); 1891 } else { 1892 val = 1; 1893 } 1894 return (val); 1895} 1896 1897static int 1898inout_str_addrsize(uint32_t inst_info) 1899{ 1900 uint32_t size; 1901 1902 size = (inst_info >> 7) & 0x7; 1903 switch (size) { 1904 case 0: 1905 return (2); /* 16 bit */ 1906 case 1: 1907 return (4); /* 32 bit */ 1908 case 2: 1909 return (8); /* 64 bit */ 1910 default: 1911 panic("%s: invalid size encoding %d", __func__, size); 1912 } 1913} 1914 1915static void 1916inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in, 1917 struct vm_inout_str *vis) 1918{ 1919 int error, s; 1920 1921 if (in) { 1922 vis->seg_name = VM_REG_GUEST_ES; 1923 } else { 1924 s = (inst_info >> 15) & 0x7; 1925 vis->seg_name = vm_segment_name(s); 1926 } 1927 1928 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc); 1929 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); 1930} 1931 1932static void 1933vmx_paging_info(struct vm_guest_paging *paging) 1934{ 1935 paging->cr3 = vmcs_guest_cr3(); 1936 paging->cpl = vmx_cpl(); 1937 paging->cpu_mode = vmx_cpu_mode(); 1938 paging->paging_mode = vmx_paging_mode(); 1939} 1940 1941static void 1942vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla) 1943{ 1944 struct vm_guest_paging *paging; 1945 uint32_t csar; 1946 1947 paging = &vmexit->u.inst_emul.paging; 1948 1949 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1950 vmexit->inst_length = 0; 1951 vmexit->u.inst_emul.gpa = gpa; 1952 vmexit->u.inst_emul.gla = gla; 1953 vmx_paging_info(paging); 1954 switch (paging->cpu_mode) { 1955 case CPU_MODE_REAL: 1956 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1957 vmexit->u.inst_emul.cs_d = 0; 1958 break; 1959 case CPU_MODE_PROTECTED: 1960 case CPU_MODE_COMPATIBILITY: 1961 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1962 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1963 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); 1964 break; 1965 default: 1966 vmexit->u.inst_emul.cs_base = 0; 1967 vmexit->u.inst_emul.cs_d = 0; 1968 break; 1969 } 1970 vie_init(&vmexit->u.inst_emul.vie, NULL, 0); 1971} 1972 1973static int 1974ept_fault_type(uint64_t ept_qual) 1975{ 1976 int fault_type; 1977 1978 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1979 fault_type = VM_PROT_WRITE; 1980 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1981 fault_type = VM_PROT_EXECUTE; 1982 else 1983 fault_type= VM_PROT_READ; 1984 1985 return (fault_type); 1986} 1987 1988static bool 1989ept_emulation_fault(uint64_t ept_qual) 1990{ 1991 int read, write; 1992 1993 /* EPT fault on an instruction fetch doesn't make sense here */ 1994 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1995 return (false); 1996 1997 /* EPT fault must be a read fault or a write fault */ 1998 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1999 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 2000 if ((read | write) == 0) 2001 return (false); 2002 2003 /* 2004 * The EPT violation must have been caused by accessing a 2005 * guest-physical address that is a translation of a guest-linear 2006 * address. 2007 */ 2008 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 2009 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 2010 return (false); 2011 } 2012 2013 return (true); 2014} 2015 2016static __inline int 2017apic_access_virtualization(struct vmx *vmx, int vcpuid) 2018{ 2019 uint32_t proc_ctls2; 2020 2021 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2022 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 2023} 2024 2025static __inline int 2026x2apic_virtualization(struct vmx *vmx, int vcpuid) 2027{ 2028 uint32_t proc_ctls2; 2029 2030 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 2031 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 2032} 2033 2034static int 2035vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 2036 uint64_t qual) 2037{ 2038 int error, handled, offset; 2039 uint32_t *apic_regs, vector; 2040 bool retu; 2041 2042 handled = HANDLED; 2043 offset = APIC_WRITE_OFFSET(qual); 2044 2045 if (!apic_access_virtualization(vmx, vcpuid)) { 2046 /* 2047 * In general there should not be any APIC write VM-exits 2048 * unless APIC-access virtualization is enabled. 2049 * 2050 * However self-IPI virtualization can legitimately trigger 2051 * an APIC-write VM-exit so treat it specially. 2052 */ 2053 if (x2apic_virtualization(vmx, vcpuid) && 2054 offset == APIC_OFFSET_SELF_IPI) { 2055 apic_regs = (uint32_t *)(vlapic->apic_page); 2056 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 2057 vlapic_self_ipi_handler(vlapic, vector); 2058 return (HANDLED); 2059 } else 2060 return (UNHANDLED); 2061 } 2062 2063 switch (offset) { 2064 case APIC_OFFSET_ID: 2065 vlapic_id_write_handler(vlapic); 2066 break; 2067 case APIC_OFFSET_LDR: 2068 vlapic_ldr_write_handler(vlapic); 2069 break; 2070 case APIC_OFFSET_DFR: 2071 vlapic_dfr_write_handler(vlapic); 2072 break; 2073 case APIC_OFFSET_SVR: 2074 vlapic_svr_write_handler(vlapic); 2075 break; 2076 case APIC_OFFSET_ESR: 2077 vlapic_esr_write_handler(vlapic); 2078 break; 2079 case APIC_OFFSET_ICR_LOW: 2080 retu = false; 2081 error = vlapic_icrlo_write_handler(vlapic, &retu); 2082 if (error != 0 || retu) 2083 handled = UNHANDLED; 2084 break; 2085 case APIC_OFFSET_CMCI_LVT: 2086 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 2087 vlapic_lvt_write_handler(vlapic, offset); 2088 break; 2089 case APIC_OFFSET_TIMER_ICR: 2090 vlapic_icrtmr_write_handler(vlapic); 2091 break; 2092 case APIC_OFFSET_TIMER_DCR: 2093 vlapic_dcr_write_handler(vlapic); 2094 break; 2095 default: 2096 handled = UNHANDLED; 2097 break; 2098 } 2099 return (handled); 2100} 2101 2102static bool 2103apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 2104{ 2105 2106 if (apic_access_virtualization(vmx, vcpuid) && 2107 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2108 return (true); 2109 else 2110 return (false); 2111} 2112 2113static int 2114vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2115{ 2116 uint64_t qual; 2117 int access_type, offset, allowed; 2118 2119 if (!apic_access_virtualization(vmx, vcpuid)) 2120 return (UNHANDLED); 2121 2122 qual = vmexit->u.vmx.exit_qualification; 2123 access_type = APIC_ACCESS_TYPE(qual); 2124 offset = APIC_ACCESS_OFFSET(qual); 2125 2126 allowed = 0; 2127 if (access_type == 0) { 2128 /* 2129 * Read data access to the following registers is expected. 2130 */ 2131 switch (offset) { 2132 case APIC_OFFSET_APR: 2133 case APIC_OFFSET_PPR: 2134 case APIC_OFFSET_RRR: 2135 case APIC_OFFSET_CMCI_LVT: 2136 case APIC_OFFSET_TIMER_CCR: 2137 allowed = 1; 2138 break; 2139 default: 2140 break; 2141 } 2142 } else if (access_type == 1) { 2143 /* 2144 * Write data access to the following registers is expected. 2145 */ 2146 switch (offset) { 2147 case APIC_OFFSET_VER: 2148 case APIC_OFFSET_APR: 2149 case APIC_OFFSET_PPR: 2150 case APIC_OFFSET_RRR: 2151 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2152 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2153 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2154 case APIC_OFFSET_CMCI_LVT: 2155 case APIC_OFFSET_TIMER_CCR: 2156 allowed = 1; 2157 break; 2158 default: 2159 break; 2160 } 2161 } 2162 2163 if (allowed) { 2164 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset, 2165 VIE_INVALID_GLA); 2166 } 2167 2168 /* 2169 * Regardless of whether the APIC-access is allowed this handler 2170 * always returns UNHANDLED: 2171 * - if the access is allowed then it is handled by emulating the 2172 * instruction that caused the VM-exit (outside the critical section) 2173 * - if the access is not allowed then it will be converted to an 2174 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2175 */ 2176 return (UNHANDLED); 2177} 2178 2179static enum task_switch_reason 2180vmx_task_switch_reason(uint64_t qual) 2181{ 2182 int reason; 2183 2184 reason = (qual >> 30) & 0x3; 2185 switch (reason) { 2186 case 0: 2187 return (TSR_CALL); 2188 case 1: 2189 return (TSR_IRET); 2190 case 2: 2191 return (TSR_JMP); 2192 case 3: 2193 return (TSR_IDT_GATE); 2194 default: 2195 panic("%s: invalid reason %d", __func__, reason); 2196 } 2197} 2198 2199static int 2200emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu) 2201{ 2202 int error; 2203 2204 if (lapic_msr(num)) 2205 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu); 2206 else 2207 error = vmx_wrmsr(vmx, vcpuid, num, val, retu); 2208 2209 return (error); 2210} 2211 2212static int 2213emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu) 2214{ 2215 struct vmxctx *vmxctx; 2216 uint64_t result; 2217 uint32_t eax, edx; 2218 int error; 2219 2220 if (lapic_msr(num)) 2221 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu); 2222 else 2223 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu); 2224 2225 if (error == 0) { 2226 eax = result; 2227 vmxctx = &vmx->ctx[vcpuid]; 2228 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax); 2229 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error)); 2230 2231 edx = result >> 32; 2232 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx); 2233 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error)); 2234 } 2235 2236 return (error); 2237} 2238 2239static int 2240vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2241{ 2242 int error, errcode, errcode_valid, handled, in; 2243 struct vmxctx *vmxctx; 2244 struct vlapic *vlapic; 2245 struct vm_inout_str *vis; 2246 struct vm_task_switch *ts; 2247 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 2248 uint32_t intr_type, intr_vec, reason; 2249 uint64_t exitintinfo, qual, gpa; 2250 bool retu; 2251 2252 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2253 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2254 2255 handled = UNHANDLED; 2256 vmxctx = &vmx->ctx[vcpu]; 2257 2258 qual = vmexit->u.vmx.exit_qualification; 2259 reason = vmexit->u.vmx.exit_reason; 2260 vmexit->exitcode = VM_EXITCODE_BOGUS; 2261 2262 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2263 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2264 2265 /* 2266 * VM-entry failures during or after loading guest state. 2267 * 2268 * These VM-exits are uncommon but must be handled specially 2269 * as most VM-exit fields are not populated as usual. 2270 */ 2271 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { 2272 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry"); 2273 __asm __volatile("int $18"); 2274 return (1); 2275 } 2276 2277 /* 2278 * VM exits that can be triggered during event delivery need to 2279 * be handled specially by re-injecting the event if the IDT 2280 * vectoring information field's valid bit is set. 2281 * 2282 * See "Information for VM Exits During Event Delivery" in Intel SDM 2283 * for details. 2284 */ 2285 idtvec_info = vmcs_idt_vectoring_info(); 2286 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2287 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2288 exitintinfo = idtvec_info; 2289 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2290 idtvec_err = vmcs_idt_vectoring_err(); 2291 exitintinfo |= (uint64_t)idtvec_err << 32; 2292 } 2293 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo); 2294 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2295 __func__, error)); 2296 2297 /* 2298 * If 'virtual NMIs' are being used and the VM-exit 2299 * happened while injecting an NMI during the previous 2300 * VM-entry, then clear "blocking by NMI" in the 2301 * Guest Interruptibility-State so the NMI can be 2302 * reinjected on the subsequent VM-entry. 2303 * 2304 * However, if the NMI was being delivered through a task 2305 * gate, then the new task must start execution with NMIs 2306 * blocked so don't clear NMI blocking in this case. 2307 */ 2308 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2309 if (intr_type == VMCS_INTR_T_NMI) { 2310 if (reason != EXIT_REASON_TASK_SWITCH) 2311 vmx_clear_nmi_blocking(vmx, vcpu); 2312 else 2313 vmx_assert_nmi_blocking(vmx, vcpu); 2314 } 2315 2316 /* 2317 * Update VM-entry instruction length if the event being 2318 * delivered was a software interrupt or software exception. 2319 */ 2320 if (intr_type == VMCS_INTR_T_SWINTR || 2321 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2322 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2323 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2324 } 2325 } 2326 2327 switch (reason) { 2328 case EXIT_REASON_TASK_SWITCH: 2329 ts = &vmexit->u.task_switch; 2330 ts->tsssel = qual & 0xffff; 2331 ts->reason = vmx_task_switch_reason(qual); 2332 ts->ext = 0; 2333 ts->errcode_valid = 0; 2334 vmx_paging_info(&ts->paging); 2335 /* 2336 * If the task switch was due to a CALL, JMP, IRET, software 2337 * interrupt (INT n) or software exception (INT3, INTO), 2338 * then the saved %rip references the instruction that caused 2339 * the task switch. The instruction length field in the VMCS 2340 * is valid in this case. 2341 * 2342 * In all other cases (e.g., NMI, hardware exception) the 2343 * saved %rip is one that would have been saved in the old TSS 2344 * had the task switch completed normally so the instruction 2345 * length field is not needed in this case and is explicitly 2346 * set to 0. 2347 */ 2348 if (ts->reason == TSR_IDT_GATE) { 2349 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2350 ("invalid idtvec_info %#x for IDT task switch", 2351 idtvec_info)); 2352 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2353 if (intr_type != VMCS_INTR_T_SWINTR && 2354 intr_type != VMCS_INTR_T_SWEXCEPTION && 2355 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2356 /* Task switch triggered by external event */ 2357 ts->ext = 1; 2358 vmexit->inst_length = 0; 2359 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2360 ts->errcode_valid = 1; 2361 ts->errcode = vmcs_idt_vectoring_err(); 2362 } 2363 } 2364 } 2365 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2366 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2367 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, " 2368 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2369 ts->ext ? "external" : "internal", 2370 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2371 break; 2372 case EXIT_REASON_CR_ACCESS: 2373 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2374 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2375 switch (qual & 0xf) { 2376 case 0: 2377 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2378 break; 2379 case 4: 2380 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2381 break; 2382 case 8: 2383 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2384 break; 2385 } 2386 break; 2387 case EXIT_REASON_RDMSR: 2388 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 2389 retu = false; 2390 ecx = vmxctx->guest_rcx; 2391 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx); 2392 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx); 2393 error = emulate_rdmsr(vmx, vcpu, ecx, &retu); 2394 if (error) { 2395 vmexit->exitcode = VM_EXITCODE_RDMSR; 2396 vmexit->u.msr.code = ecx; 2397 } else if (!retu) { 2398 handled = HANDLED; 2399 } else { 2400 /* Return to userspace with a valid exitcode */ 2401 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2402 ("emulate_rdmsr retu with bogus exitcode")); 2403 } 2404 break; 2405 case EXIT_REASON_WRMSR: 2406 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 2407 retu = false; 2408 eax = vmxctx->guest_rax; 2409 ecx = vmxctx->guest_rcx; 2410 edx = vmxctx->guest_rdx; 2411 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx", 2412 ecx, (uint64_t)edx << 32 | eax); 2413 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx, 2414 (uint64_t)edx << 32 | eax); 2415 error = emulate_wrmsr(vmx, vcpu, ecx, 2416 (uint64_t)edx << 32 | eax, &retu); 2417 if (error) { 2418 vmexit->exitcode = VM_EXITCODE_WRMSR; 2419 vmexit->u.msr.code = ecx; 2420 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2421 } else if (!retu) { 2422 handled = HANDLED; 2423 } else { 2424 /* Return to userspace with a valid exitcode */ 2425 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2426 ("emulate_wrmsr retu with bogus exitcode")); 2427 } 2428 break; 2429 case EXIT_REASON_HLT: 2430 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2431 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2432 vmexit->exitcode = VM_EXITCODE_HLT; 2433 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2434 if (virtual_interrupt_delivery) 2435 vmexit->u.hlt.intr_status = 2436 vmcs_read(VMCS_GUEST_INTR_STATUS); 2437 else 2438 vmexit->u.hlt.intr_status = 0; 2439 break; 2440 case EXIT_REASON_MTF: 2441 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2442 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2443 vmexit->exitcode = VM_EXITCODE_MTRAP; 2444 vmexit->inst_length = 0; 2445 break; 2446 case EXIT_REASON_PAUSE: 2447 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2448 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2449 vmexit->exitcode = VM_EXITCODE_PAUSE; 2450 break; 2451 case EXIT_REASON_INTR_WINDOW: 2452 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2453 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2454 vmx_clear_int_window_exiting(vmx, vcpu); 2455 return (1); 2456 case EXIT_REASON_EXT_INTR: 2457 /* 2458 * External interrupts serve only to cause VM exits and allow 2459 * the host interrupt handler to run. 2460 * 2461 * If this external interrupt triggers a virtual interrupt 2462 * to a VM, then that state will be recorded by the 2463 * host interrupt handler in the VM's softc. We will inject 2464 * this virtual interrupt during the subsequent VM enter. 2465 */ 2466 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2467 SDT_PROBE4(vmm, vmx, exit, interrupt, 2468 vmx, vcpu, vmexit, intr_info); 2469 2470 /* 2471 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2472 * This appears to be a bug in VMware Fusion? 2473 */ 2474 if (!(intr_info & VMCS_INTR_VALID)) 2475 return (1); 2476 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2477 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2478 ("VM exit interruption info invalid: %#x", intr_info)); 2479 vmx_trigger_hostintr(intr_info & 0xff); 2480 2481 /* 2482 * This is special. We want to treat this as an 'handled' 2483 * VM-exit but not increment the instruction pointer. 2484 */ 2485 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2486 return (1); 2487 case EXIT_REASON_NMI_WINDOW: 2488 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2489 /* Exit to allow the pending virtual NMI to be injected */ 2490 if (vm_nmi_pending(vmx->vm, vcpu)) 2491 vmx_inject_nmi(vmx, vcpu); 2492 vmx_clear_nmi_window_exiting(vmx, vcpu); 2493 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2494 return (1); 2495 case EXIT_REASON_INOUT: 2496 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2497 vmexit->exitcode = VM_EXITCODE_INOUT; 2498 vmexit->u.inout.bytes = (qual & 0x7) + 1; 2499 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; 2500 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 2501 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 2502 vmexit->u.inout.port = (uint16_t)(qual >> 16); 2503 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 2504 if (vmexit->u.inout.string) { 2505 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 2506 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 2507 vis = &vmexit->u.inout_str; 2508 vmx_paging_info(&vis->paging); 2509 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2510 vis->cr0 = vmcs_read(VMCS_GUEST_CR0); 2511 vis->index = inout_str_index(vmx, vcpu, in); 2512 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep); 2513 vis->addrsize = inout_str_addrsize(inst_info); 2514 inout_str_seginfo(vmx, vcpu, inst_info, in, vis); 2515 } 2516 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2517 break; 2518 case EXIT_REASON_CPUID: 2519 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2520 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2521 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 2522 break; 2523 case EXIT_REASON_EXCEPTION: 2524 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2525 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2526 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2527 ("VM exit interruption info invalid: %#x", intr_info)); 2528 2529 intr_vec = intr_info & 0xff; 2530 intr_type = intr_info & VMCS_INTR_T_MASK; 2531 2532 /* 2533 * If Virtual NMIs control is 1 and the VM-exit is due to a 2534 * fault encountered during the execution of IRET then we must 2535 * restore the state of "virtual-NMI blocking" before resuming 2536 * the guest. 2537 * 2538 * See "Resuming Guest Software after Handling an Exception". 2539 * See "Information for VM Exits Due to Vectored Events". 2540 */ 2541 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2542 (intr_vec != IDT_DF) && 2543 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2544 vmx_restore_nmi_blocking(vmx, vcpu); 2545 2546 /* 2547 * The NMI has already been handled in vmx_exit_handle_nmi(). 2548 */ 2549 if (intr_type == VMCS_INTR_T_NMI) 2550 return (1); 2551 2552 /* 2553 * Call the machine check handler by hand. Also don't reflect 2554 * the machine check back into the guest. 2555 */ 2556 if (intr_vec == IDT_MC) { 2557 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler"); 2558 __asm __volatile("int $18"); 2559 return (1); 2560 } 2561 2562 if (intr_vec == IDT_PF) { 2563 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual); 2564 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d", 2565 __func__, error)); 2566 } 2567 2568 /* 2569 * Software exceptions exhibit trap-like behavior. This in 2570 * turn requires populating the VM-entry instruction length 2571 * so that the %rip in the trap frame is past the INT3/INTO 2572 * instruction. 2573 */ 2574 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2575 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2576 2577 /* Reflect all other exceptions back into the guest */ 2578 errcode_valid = errcode = 0; 2579 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2580 errcode_valid = 1; 2581 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2582 } 2583 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into " 2584 "the guest", intr_vec, errcode); 2585 SDT_PROBE5(vmm, vmx, exit, exception, 2586 vmx, vcpu, vmexit, intr_vec, errcode); 2587 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2588 errcode_valid, errcode, 0); 2589 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2590 __func__, error)); 2591 return (1); 2592 2593 case EXIT_REASON_EPT_FAULT: 2594 /* 2595 * If 'gpa' lies within the address space allocated to 2596 * memory then this must be a nested page fault otherwise 2597 * this must be an instruction that accesses MMIO space. 2598 */ 2599 gpa = vmcs_gpa(); 2600 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2601 apic_access_fault(vmx, vcpu, gpa)) { 2602 vmexit->exitcode = VM_EXITCODE_PAGING; 2603 vmexit->inst_length = 0; 2604 vmexit->u.paging.gpa = gpa; 2605 vmexit->u.paging.fault_type = ept_fault_type(qual); 2606 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2607 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2608 vmx, vcpu, vmexit, gpa, qual); 2609 } else if (ept_emulation_fault(qual)) { 2610 vmexit_inst_emul(vmexit, gpa, vmcs_gla()); 2611 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); 2612 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2613 vmx, vcpu, vmexit, gpa); 2614 } 2615 /* 2616 * If Virtual NMIs control is 1 and the VM-exit is due to an 2617 * EPT fault during the execution of IRET then we must restore 2618 * the state of "virtual-NMI blocking" before resuming. 2619 * 2620 * See description of "NMI unblocking due to IRET" in 2621 * "Exit Qualification for EPT Violations". 2622 */ 2623 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2624 (qual & EXIT_QUAL_NMIUDTI) != 0) 2625 vmx_restore_nmi_blocking(vmx, vcpu); 2626 break; 2627 case EXIT_REASON_VIRTUALIZED_EOI: 2628 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2629 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2630 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2631 vmexit->inst_length = 0; /* trap-like */ 2632 break; 2633 case EXIT_REASON_APIC_ACCESS: 2634 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2635 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2636 break; 2637 case EXIT_REASON_APIC_WRITE: 2638 /* 2639 * APIC-write VM exit is trap-like so the %rip is already 2640 * pointing to the next instruction. 2641 */ 2642 vmexit->inst_length = 0; 2643 vlapic = vm_lapic(vmx->vm, vcpu); 2644 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2645 vmx, vcpu, vmexit, vlapic); 2646 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2647 break; 2648 case EXIT_REASON_XSETBV: 2649 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2650 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2651 break; 2652 case EXIT_REASON_MONITOR: 2653 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2654 vmexit->exitcode = VM_EXITCODE_MONITOR; 2655 break; 2656 case EXIT_REASON_MWAIT: 2657 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2658 vmexit->exitcode = VM_EXITCODE_MWAIT; 2659 break; 2660 case EXIT_REASON_TPR: 2661 vlapic = vm_lapic(vmx->vm, vcpu); 2662 vlapic_sync_tpr(vlapic); 2663 vmexit->inst_length = 0; 2664 handled = HANDLED; 2665 break; 2666 case EXIT_REASON_VMCALL: 2667 case EXIT_REASON_VMCLEAR: 2668 case EXIT_REASON_VMLAUNCH: 2669 case EXIT_REASON_VMPTRLD: 2670 case EXIT_REASON_VMPTRST: 2671 case EXIT_REASON_VMREAD: 2672 case EXIT_REASON_VMRESUME: 2673 case EXIT_REASON_VMWRITE: 2674 case EXIT_REASON_VMXOFF: 2675 case EXIT_REASON_VMXON: 2676 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2677 vmexit->exitcode = VM_EXITCODE_VMINSN; 2678 break; 2679 default: 2680 SDT_PROBE4(vmm, vmx, exit, unknown, 2681 vmx, vcpu, vmexit, reason); 2682 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2683 break; 2684 } 2685 2686 if (handled) { 2687 /* 2688 * It is possible that control is returned to userland 2689 * even though we were able to handle the VM exit in the 2690 * kernel. 2691 * 2692 * In such a case we want to make sure that the userland 2693 * restarts guest execution at the instruction *after* 2694 * the one we just processed. Therefore we update the 2695 * guest rip in the VMCS and in 'vmexit'. 2696 */ 2697 vmexit->rip += vmexit->inst_length; 2698 vmexit->inst_length = 0; 2699 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2700 } else { 2701 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2702 /* 2703 * If this VM exit was not claimed by anybody then 2704 * treat it as a generic VMX exit. 2705 */ 2706 vmexit->exitcode = VM_EXITCODE_VMX; 2707 vmexit->u.vmx.status = VM_SUCCESS; 2708 vmexit->u.vmx.inst_type = 0; 2709 vmexit->u.vmx.inst_error = 0; 2710 } else { 2711 /* 2712 * The exitcode and collateral have been populated. 2713 * The VM exit will be processed further in userland. 2714 */ 2715 } 2716 } 2717 2718 SDT_PROBE4(vmm, vmx, exit, return, 2719 vmx, vcpu, vmexit, handled); 2720 return (handled); 2721} 2722 2723static __inline void 2724vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2725{ 2726 2727 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2728 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2729 vmxctx->inst_fail_status)); 2730 2731 vmexit->inst_length = 0; 2732 vmexit->exitcode = VM_EXITCODE_VMX; 2733 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2734 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2735 vmexit->u.vmx.exit_reason = ~0; 2736 vmexit->u.vmx.exit_qualification = ~0; 2737 2738 switch (rc) { 2739 case VMX_VMRESUME_ERROR: 2740 case VMX_VMLAUNCH_ERROR: 2741 case VMX_INVEPT_ERROR: 2742 vmexit->u.vmx.inst_type = rc; 2743 break; 2744 default: 2745 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2746 } 2747} 2748 2749/* 2750 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2751 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2752 * sufficient to simply vector to the NMI handler via a software interrupt. 2753 * However, this must be done before maskable interrupts are enabled 2754 * otherwise the "iret" issued by an interrupt handler will incorrectly 2755 * clear NMI blocking. 2756 */ 2757static __inline void 2758vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2759{ 2760 uint32_t intr_info; 2761 2762 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2763 2764 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2765 return; 2766 2767 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2768 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2769 ("VM exit interruption info invalid: %#x", intr_info)); 2770 2771 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2772 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2773 "to NMI has invalid vector: %#x", intr_info)); 2774 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 2775 __asm __volatile("int $2"); 2776 } 2777} 2778 2779static __inline void 2780vmx_dr_enter_guest(struct vmxctx *vmxctx) 2781{ 2782 register_t rflags; 2783 2784 /* Save host control debug registers. */ 2785 vmxctx->host_dr7 = rdr7(); 2786 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2787 2788 /* 2789 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2790 * exceptions in the host based on the guest DRx values. The 2791 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2792 */ 2793 load_dr7(0); 2794 wrmsr(MSR_DEBUGCTLMSR, 0); 2795 2796 /* 2797 * Disable single stepping the kernel to avoid corrupting the 2798 * guest DR6. A debugger might still be able to corrupt the 2799 * guest DR6 by setting a breakpoint after this point and then 2800 * single stepping. 2801 */ 2802 rflags = read_rflags(); 2803 vmxctx->host_tf = rflags & PSL_T; 2804 write_rflags(rflags & ~PSL_T); 2805 2806 /* Save host debug registers. */ 2807 vmxctx->host_dr0 = rdr0(); 2808 vmxctx->host_dr1 = rdr1(); 2809 vmxctx->host_dr2 = rdr2(); 2810 vmxctx->host_dr3 = rdr3(); 2811 vmxctx->host_dr6 = rdr6(); 2812 2813 /* Restore guest debug registers. */ 2814 load_dr0(vmxctx->guest_dr0); 2815 load_dr1(vmxctx->guest_dr1); 2816 load_dr2(vmxctx->guest_dr2); 2817 load_dr3(vmxctx->guest_dr3); 2818 load_dr6(vmxctx->guest_dr6); 2819} 2820 2821static __inline void 2822vmx_dr_leave_guest(struct vmxctx *vmxctx) 2823{ 2824 2825 /* Save guest debug registers. */ 2826 vmxctx->guest_dr0 = rdr0(); 2827 vmxctx->guest_dr1 = rdr1(); 2828 vmxctx->guest_dr2 = rdr2(); 2829 vmxctx->guest_dr3 = rdr3(); 2830 vmxctx->guest_dr6 = rdr6(); 2831 2832 /* 2833 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2834 * PSL_T last. 2835 */ 2836 load_dr0(vmxctx->host_dr0); 2837 load_dr1(vmxctx->host_dr1); 2838 load_dr2(vmxctx->host_dr2); 2839 load_dr3(vmxctx->host_dr3); 2840 load_dr6(vmxctx->host_dr6); 2841 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2842 load_dr7(vmxctx->host_dr7); 2843 write_rflags(read_rflags() | vmxctx->host_tf); 2844} 2845 2846static int 2847vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap, 2848 struct vm_eventinfo *evinfo) 2849{ 2850 int rc, handled, launched; 2851 struct vmx *vmx; 2852 struct vm *vm; 2853 struct vmxctx *vmxctx; 2854 struct vmcs *vmcs; 2855 struct vm_exit *vmexit; 2856 struct vlapic *vlapic; 2857 uint32_t exit_reason; 2858 struct region_descriptor gdtr, idtr; 2859 uint16_t ldt_sel; 2860 2861 vmx = arg; 2862 vm = vmx->vm; 2863 vmcs = &vmx->vmcs[vcpu]; 2864 vmxctx = &vmx->ctx[vcpu]; 2865 vlapic = vm_lapic(vm, vcpu); 2866 vmexit = vm_exitinfo(vm, vcpu); 2867 launched = 0; 2868 2869 KASSERT(vmxctx->pmap == pmap, 2870 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2871 2872 vmx_msr_guest_enter(vmx, vcpu); 2873 2874 VMPTRLD(vmcs); 2875 2876 /* 2877 * XXX 2878 * We do this every time because we may setup the virtual machine 2879 * from a different process than the one that actually runs it. 2880 * 2881 * If the life of a virtual machine was spent entirely in the context 2882 * of a single process we could do this once in vmx_vminit(). 2883 */ 2884 vmcs_write(VMCS_HOST_CR3, rcr3()); 2885 2886 vmcs_write(VMCS_GUEST_RIP, rip); 2887 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2888 do { 2889 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 2890 "%#lx/%#lx", __func__, vmcs_guest_rip(), rip)); 2891 2892 handled = UNHANDLED; 2893 /* 2894 * Interrupts are disabled from this point on until the 2895 * guest starts executing. This is done for the following 2896 * reasons: 2897 * 2898 * If an AST is asserted on this thread after the check below, 2899 * then the IPI_AST notification will not be lost, because it 2900 * will cause a VM exit due to external interrupt as soon as 2901 * the guest state is loaded. 2902 * 2903 * A posted interrupt after 'vmx_inject_interrupts()' will 2904 * not be "lost" because it will be held pending in the host 2905 * APIC because interrupts are disabled. The pending interrupt 2906 * will be recognized as soon as the guest state is loaded. 2907 * 2908 * The same reasoning applies to the IPI generated by 2909 * pmap_invalidate_ept(). 2910 */ 2911 disable_intr(); 2912 vmx_inject_interrupts(vmx, vcpu, vlapic, rip); 2913 2914 /* 2915 * Check for vcpu suspension after injecting events because 2916 * vmx_inject_interrupts() can suspend the vcpu due to a 2917 * triple fault. 2918 */ 2919 if (vcpu_suspended(evinfo)) { 2920 enable_intr(); 2921 vm_exit_suspended(vmx->vm, vcpu, rip); 2922 break; 2923 } 2924 2925 if (vcpu_rendezvous_pending(evinfo)) { 2926 enable_intr(); 2927 vm_exit_rendezvous(vmx->vm, vcpu, rip); 2928 break; 2929 } 2930 2931 if (vcpu_reqidle(evinfo)) { 2932 enable_intr(); 2933 vm_exit_reqidle(vmx->vm, vcpu, rip); 2934 break; 2935 } 2936 2937 if (vcpu_should_yield(vm, vcpu)) { 2938 enable_intr(); 2939 vm_exit_astpending(vmx->vm, vcpu, rip); 2940 vmx_astpending_trace(vmx, vcpu, rip); 2941 handled = HANDLED; 2942 break; 2943 } 2944 2945 /* 2946 * If TPR Shadowing is enabled, the TPR Threshold 2947 * must be updated right before entering the guest. 2948 */ 2949 if (tpr_shadowing && !virtual_interrupt_delivery) { 2950 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { 2951 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 2952 } 2953 } 2954 2955 /* 2956 * VM exits restore the base address but not the 2957 * limits of GDTR and IDTR. The VMCS only stores the 2958 * base address, so VM exits set the limits to 0xffff. 2959 * Save and restore the full GDTR and IDTR to restore 2960 * the limits. 2961 * 2962 * The VMCS does not save the LDTR at all, and VM 2963 * exits clear LDTR as if a NULL selector were loaded. 2964 * The userspace hypervisor probably doesn't use a 2965 * LDT, but save and restore it to be safe. 2966 */ 2967 sgdt(&gdtr); 2968 sidt(&idtr); 2969 ldt_sel = sldt(); 2970 2971 vmx_run_trace(vmx, vcpu); 2972 vmx_dr_enter_guest(vmxctx); 2973 rc = vmx_enter_guest(vmxctx, vmx, launched); 2974 vmx_dr_leave_guest(vmxctx); 2975 2976 bare_lgdt(&gdtr); 2977 lidt(&idtr); 2978 lldt(ldt_sel); 2979 2980 /* Collect some information for VM exit processing */ 2981 vmexit->rip = rip = vmcs_guest_rip(); 2982 vmexit->inst_length = vmexit_instruction_length(); 2983 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2984 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2985 2986 /* Update 'nextrip' */ 2987 vmx->state[vcpu].nextrip = rip; 2988 2989 if (rc == VMX_GUEST_VMEXIT) { 2990 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2991 enable_intr(); 2992 handled = vmx_exit_process(vmx, vcpu, vmexit); 2993 } else { 2994 enable_intr(); 2995 vmx_exit_inst_error(vmxctx, rc, vmexit); 2996 } 2997 launched = 1; 2998 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 2999 rip = vmexit->rip; 3000 } while (handled); 3001 3002 /* 3003 * If a VM exit has been handled then the exitcode must be BOGUS 3004 * If a VM exit is not handled then the exitcode must not be BOGUS 3005 */ 3006 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 3007 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 3008 panic("Mismatch between handled (%d) and exitcode (%d)", 3009 handled, vmexit->exitcode); 3010 } 3011 3012 if (!handled) 3013 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 3014 3015 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 3016 vmexit->exitcode); 3017 3018 VMCLEAR(vmcs); 3019 vmx_msr_guest_exit(vmx, vcpu); 3020 3021 return (0); 3022} 3023 3024static void 3025vmx_vmcleanup(void *arg) 3026{ 3027 int i; 3028 struct vmx *vmx = arg; 3029 uint16_t maxcpus; 3030 3031 if (apic_access_virtualization(vmx, 0)) 3032 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3033 3034 maxcpus = vm_get_maxcpus(vmx->vm); 3035 for (i = 0; i < maxcpus; i++) 3036 vpid_free(vmx->state[i].vpid); 3037 3038 free(vmx, M_VMX); 3039 3040 return; 3041} 3042 3043static register_t * 3044vmxctx_regptr(struct vmxctx *vmxctx, int reg) 3045{ 3046 3047 switch (reg) { 3048 case VM_REG_GUEST_RAX: 3049 return (&vmxctx->guest_rax); 3050 case VM_REG_GUEST_RBX: 3051 return (&vmxctx->guest_rbx); 3052 case VM_REG_GUEST_RCX: 3053 return (&vmxctx->guest_rcx); 3054 case VM_REG_GUEST_RDX: 3055 return (&vmxctx->guest_rdx); 3056 case VM_REG_GUEST_RSI: 3057 return (&vmxctx->guest_rsi); 3058 case VM_REG_GUEST_RDI: 3059 return (&vmxctx->guest_rdi); 3060 case VM_REG_GUEST_RBP: 3061 return (&vmxctx->guest_rbp); 3062 case VM_REG_GUEST_R8: 3063 return (&vmxctx->guest_r8); 3064 case VM_REG_GUEST_R9: 3065 return (&vmxctx->guest_r9); 3066 case VM_REG_GUEST_R10: 3067 return (&vmxctx->guest_r10); 3068 case VM_REG_GUEST_R11: 3069 return (&vmxctx->guest_r11); 3070 case VM_REG_GUEST_R12: 3071 return (&vmxctx->guest_r12); 3072 case VM_REG_GUEST_R13: 3073 return (&vmxctx->guest_r13); 3074 case VM_REG_GUEST_R14: 3075 return (&vmxctx->guest_r14); 3076 case VM_REG_GUEST_R15: 3077 return (&vmxctx->guest_r15); 3078 case VM_REG_GUEST_CR2: 3079 return (&vmxctx->guest_cr2); 3080 case VM_REG_GUEST_DR0: 3081 return (&vmxctx->guest_dr0); 3082 case VM_REG_GUEST_DR1: 3083 return (&vmxctx->guest_dr1); 3084 case VM_REG_GUEST_DR2: 3085 return (&vmxctx->guest_dr2); 3086 case VM_REG_GUEST_DR3: 3087 return (&vmxctx->guest_dr3); 3088 case VM_REG_GUEST_DR6: 3089 return (&vmxctx->guest_dr6); 3090 default: 3091 break; 3092 } 3093 return (NULL); 3094} 3095 3096static int 3097vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 3098{ 3099 register_t *regp; 3100 3101 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3102 *retval = *regp; 3103 return (0); 3104 } else 3105 return (EINVAL); 3106} 3107 3108static int 3109vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 3110{ 3111 register_t *regp; 3112 3113 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3114 *regp = val; 3115 return (0); 3116 } else 3117 return (EINVAL); 3118} 3119 3120static int 3121vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval) 3122{ 3123 uint64_t gi; 3124 int error; 3125 3126 error = vmcs_getreg(&vmx->vmcs[vcpu], running, 3127 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); 3128 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3129 return (error); 3130} 3131 3132static int 3133vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val) 3134{ 3135 struct vmcs *vmcs; 3136 uint64_t gi; 3137 int error, ident; 3138 3139 /* 3140 * Forcing the vcpu into an interrupt shadow is not supported. 3141 */ 3142 if (val) { 3143 error = EINVAL; 3144 goto done; 3145 } 3146 3147 vmcs = &vmx->vmcs[vcpu]; 3148 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY); 3149 error = vmcs_getreg(vmcs, running, ident, &gi); 3150 if (error == 0) { 3151 gi &= ~HWINTR_BLOCKING; 3152 error = vmcs_setreg(vmcs, running, ident, gi); 3153 } 3154done: 3155 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val, 3156 error ? "failed" : "succeeded"); 3157 return (error); 3158} 3159 3160static int 3161vmx_shadow_reg(int reg) 3162{ 3163 int shreg; 3164 3165 shreg = -1; 3166 3167 switch (reg) { 3168 case VM_REG_GUEST_CR0: 3169 shreg = VMCS_CR0_SHADOW; 3170 break; 3171 case VM_REG_GUEST_CR4: 3172 shreg = VMCS_CR4_SHADOW; 3173 break; 3174 default: 3175 break; 3176 } 3177 3178 return (shreg); 3179} 3180 3181static int 3182vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3183{ 3184 int running, hostcpu; 3185 struct vmx *vmx = arg; 3186 3187 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3188 if (running && hostcpu != curcpu) 3189 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 3190 3191 if (reg == VM_REG_GUEST_INTR_SHADOW) 3192 return (vmx_get_intr_shadow(vmx, vcpu, running, retval)); 3193 3194 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 3195 return (0); 3196 3197 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 3198} 3199 3200static int 3201vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3202{ 3203 int error, hostcpu, running, shadow; 3204 uint64_t ctls; 3205 pmap_t pmap; 3206 struct vmx *vmx = arg; 3207 3208 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3209 if (running && hostcpu != curcpu) 3210 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 3211 3212 if (reg == VM_REG_GUEST_INTR_SHADOW) 3213 return (vmx_modify_intr_shadow(vmx, vcpu, running, val)); 3214 3215 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 3216 return (0); 3217 3218 /* Do not permit user write access to VMCS fields by offset. */ 3219 if (reg < 0) 3220 return (EINVAL); 3221 3222 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 3223 3224 if (error == 0) { 3225 /* 3226 * If the "load EFER" VM-entry control is 1 then the 3227 * value of EFER.LMA must be identical to "IA-32e mode guest" 3228 * bit in the VM-entry control. 3229 */ 3230 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 3231 (reg == VM_REG_GUEST_EFER)) { 3232 vmcs_getreg(&vmx->vmcs[vcpu], running, 3233 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 3234 if (val & EFER_LMA) 3235 ctls |= VM_ENTRY_GUEST_LMA; 3236 else 3237 ctls &= ~VM_ENTRY_GUEST_LMA; 3238 vmcs_setreg(&vmx->vmcs[vcpu], running, 3239 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 3240 } 3241 3242 shadow = vmx_shadow_reg(reg); 3243 if (shadow > 0) { 3244 /* 3245 * Store the unmodified value in the shadow 3246 */ 3247 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 3248 VMCS_IDENT(shadow), val); 3249 } 3250 3251 if (reg == VM_REG_GUEST_CR3) { 3252 /* 3253 * Invalidate the guest vcpu's TLB mappings to emulate 3254 * the behavior of updating %cr3. 3255 * 3256 * XXX the processor retains global mappings when %cr3 3257 * is updated but vmx_invvpid() does not. 3258 */ 3259 pmap = vmx->ctx[vcpu].pmap; 3260 vmx_invvpid(vmx, vcpu, pmap, running); 3261 } 3262 } 3263 3264 return (error); 3265} 3266 3267static int 3268vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 3269{ 3270 int hostcpu, running; 3271 struct vmx *vmx = arg; 3272 3273 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3274 if (running && hostcpu != curcpu) 3275 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3276 3277 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc)); 3278} 3279 3280static int 3281vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 3282{ 3283 int hostcpu, running; 3284 struct vmx *vmx = arg; 3285 3286 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 3287 if (running && hostcpu != curcpu) 3288 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu); 3289 3290 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc)); 3291} 3292 3293static int 3294vmx_getcap(void *arg, int vcpu, int type, int *retval) 3295{ 3296 struct vmx *vmx = arg; 3297 int vcap; 3298 int ret; 3299 3300 ret = ENOENT; 3301 3302 vcap = vmx->cap[vcpu].set; 3303 3304 switch (type) { 3305 case VM_CAP_HALT_EXIT: 3306 if (cap_halt_exit) 3307 ret = 0; 3308 break; 3309 case VM_CAP_PAUSE_EXIT: 3310 if (cap_pause_exit) 3311 ret = 0; 3312 break; 3313 case VM_CAP_MTRAP_EXIT: 3314 if (cap_monitor_trap) 3315 ret = 0; 3316 break; 3317 case VM_CAP_UNRESTRICTED_GUEST: 3318 if (cap_unrestricted_guest) 3319 ret = 0; 3320 break; 3321 case VM_CAP_ENABLE_INVPCID: 3322 if (cap_invpcid) 3323 ret = 0; 3324 break; 3325 default: 3326 break; 3327 } 3328 3329 if (ret == 0) 3330 *retval = (vcap & (1 << type)) ? 1 : 0; 3331 3332 return (ret); 3333} 3334 3335static int 3336vmx_setcap(void *arg, int vcpu, int type, int val) 3337{ 3338 struct vmx *vmx = arg; 3339 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 3340 uint32_t baseval; 3341 uint32_t *pptr; 3342 int error; 3343 int flag; 3344 int reg; 3345 int retval; 3346 3347 retval = ENOENT; 3348 pptr = NULL; 3349 3350 switch (type) { 3351 case VM_CAP_HALT_EXIT: 3352 if (cap_halt_exit) { 3353 retval = 0; 3354 pptr = &vmx->cap[vcpu].proc_ctls; 3355 baseval = *pptr; 3356 flag = PROCBASED_HLT_EXITING; 3357 reg = VMCS_PRI_PROC_BASED_CTLS; 3358 } 3359 break; 3360 case VM_CAP_MTRAP_EXIT: 3361 if (cap_monitor_trap) { 3362 retval = 0; 3363 pptr = &vmx->cap[vcpu].proc_ctls; 3364 baseval = *pptr; 3365 flag = PROCBASED_MTF; 3366 reg = VMCS_PRI_PROC_BASED_CTLS; 3367 } 3368 break; 3369 case VM_CAP_PAUSE_EXIT: 3370 if (cap_pause_exit) { 3371 retval = 0; 3372 pptr = &vmx->cap[vcpu].proc_ctls; 3373 baseval = *pptr; 3374 flag = PROCBASED_PAUSE_EXITING; 3375 reg = VMCS_PRI_PROC_BASED_CTLS; 3376 } 3377 break; 3378 case VM_CAP_UNRESTRICTED_GUEST: 3379 if (cap_unrestricted_guest) { 3380 retval = 0; 3381 pptr = &vmx->cap[vcpu].proc_ctls2; 3382 baseval = *pptr; 3383 flag = PROCBASED2_UNRESTRICTED_GUEST; 3384 reg = VMCS_SEC_PROC_BASED_CTLS; 3385 } 3386 break; 3387 case VM_CAP_ENABLE_INVPCID: 3388 if (cap_invpcid) { 3389 retval = 0; 3390 pptr = &vmx->cap[vcpu].proc_ctls2; 3391 baseval = *pptr; 3392 flag = PROCBASED2_ENABLE_INVPCID; 3393 reg = VMCS_SEC_PROC_BASED_CTLS; 3394 } 3395 break; 3396 default: 3397 break; 3398 } 3399 3400 if (retval == 0) { 3401 if (val) { 3402 baseval |= flag; 3403 } else { 3404 baseval &= ~flag; 3405 } 3406 VMPTRLD(vmcs); 3407 error = vmwrite(reg, baseval); 3408 VMCLEAR(vmcs); 3409 3410 if (error) { 3411 retval = error; 3412 } else { 3413 /* 3414 * Update optional stored flags, and record 3415 * setting 3416 */ 3417 if (pptr != NULL) { 3418 *pptr = baseval; 3419 } 3420 3421 if (val) { 3422 vmx->cap[vcpu].set |= (1 << type); 3423 } else { 3424 vmx->cap[vcpu].set &= ~(1 << type); 3425 } 3426 } 3427 } 3428 3429 return (retval); 3430} 3431 3432struct vlapic_vtx { 3433 struct vlapic vlapic; 3434 struct pir_desc *pir_desc; 3435 struct vmx *vmx; 3436 u_int pending_prio; 3437}; 3438 3439#define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3440 3441#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 3442do { \ 3443 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 3444 level ? "level" : "edge", vector); \ 3445 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 3446 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 3447 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 3448 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 3449 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 3450} while (0) 3451 3452/* 3453 * vlapic->ops handlers that utilize the APICv hardware assist described in 3454 * Chapter 29 of the Intel SDM. 3455 */ 3456static int 3457vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 3458{ 3459 struct vlapic_vtx *vlapic_vtx; 3460 struct pir_desc *pir_desc; 3461 uint64_t mask; 3462 int idx, notify = 0; 3463 3464 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3465 pir_desc = vlapic_vtx->pir_desc; 3466 3467 /* 3468 * Keep track of interrupt requests in the PIR descriptor. This is 3469 * because the virtual APIC page pointed to by the VMCS cannot be 3470 * modified if the vcpu is running. 3471 */ 3472 idx = vector / 64; 3473 mask = 1UL << (vector % 64); 3474 atomic_set_long(&pir_desc->pir[idx], mask); 3475 3476 /* 3477 * A notification is required whenever the 'pending' bit makes a 3478 * transition from 0->1. 3479 * 3480 * Even if the 'pending' bit is already asserted, notification about 3481 * the incoming interrupt may still be necessary. For example, if a 3482 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3483 * the 0->1 'pending' transition with a notification, but the vCPU 3484 * would ignore the interrupt for the time being. The same vCPU would 3485 * need to then be notified if a high-priority interrupt arrived which 3486 * satisfied the PPR. 3487 * 3488 * The priorities of interrupts injected while 'pending' is asserted 3489 * are tracked in a custom bitfield 'pending_prio'. Should the 3490 * to-be-injected interrupt exceed the priorities already present, the 3491 * notification is sent. The priorities recorded in 'pending_prio' are 3492 * cleared whenever the 'pending' bit makes another 0->1 transition. 3493 */ 3494 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3495 notify = 1; 3496 vlapic_vtx->pending_prio = 0; 3497 } else { 3498 const u_int old_prio = vlapic_vtx->pending_prio; 3499 const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3500 3501 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3502 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3503 notify = 1; 3504 } 3505 } 3506 3507 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 3508 level, "vmx_set_intr_ready"); 3509 return (notify); 3510} 3511 3512static int 3513vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 3514{ 3515 struct vlapic_vtx *vlapic_vtx; 3516 struct pir_desc *pir_desc; 3517 struct LAPIC *lapic; 3518 uint64_t pending, pirval; 3519 uint32_t ppr, vpr; 3520 int i; 3521 3522 /* 3523 * This function is only expected to be called from the 'HLT' exit 3524 * handler which does not care about the vector that is pending. 3525 */ 3526 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 3527 3528 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3529 pir_desc = vlapic_vtx->pir_desc; 3530 3531 pending = atomic_load_acq_long(&pir_desc->pending); 3532 if (!pending) { 3533 /* 3534 * While a virtual interrupt may have already been 3535 * processed the actual delivery maybe pending the 3536 * interruptibility of the guest. Recognize a pending 3537 * interrupt by reevaluating virtual interrupts 3538 * following Section 29.2.1 in the Intel SDM Volume 3. 3539 */ 3540 struct vm_exit *vmexit; 3541 uint8_t rvi, ppr; 3542 3543 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); 3544 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT, 3545 ("vmx_pending_intr: exitcode not 'HLT'")); 3546 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; 3547 lapic = vlapic->apic_page; 3548 ppr = lapic->ppr & APIC_TPR_INT; 3549 if (rvi > ppr) { 3550 return (1); 3551 } 3552 3553 return (0); 3554 } 3555 3556 /* 3557 * If there is an interrupt pending then it will be recognized only 3558 * if its priority is greater than the processor priority. 3559 * 3560 * Special case: if the processor priority is zero then any pending 3561 * interrupt will be recognized. 3562 */ 3563 lapic = vlapic->apic_page; 3564 ppr = lapic->ppr & APIC_TPR_INT; 3565 if (ppr == 0) 3566 return (1); 3567 3568 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 3569 lapic->ppr); 3570 3571 vpr = 0; 3572 for (i = 3; i >= 0; i--) { 3573 pirval = pir_desc->pir[i]; 3574 if (pirval != 0) { 3575 vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; 3576 break; 3577 } 3578 } 3579 3580 /* 3581 * If the highest-priority pending interrupt falls short of the 3582 * processor priority of this vCPU, ensure that 'pending_prio' does not 3583 * have any stale bits which would preclude a higher-priority interrupt 3584 * from incurring a notification later. 3585 */ 3586 if (vpr <= ppr) { 3587 const u_int prio_bit = VPR_PRIO_BIT(vpr); 3588 const u_int old = vlapic_vtx->pending_prio; 3589 3590 if (old > prio_bit && (old & prio_bit) == 0) { 3591 vlapic_vtx->pending_prio = prio_bit; 3592 } 3593 return (0); 3594 } 3595 return (1); 3596} 3597 3598static void 3599vmx_intr_accepted(struct vlapic *vlapic, int vector) 3600{ 3601 3602 panic("vmx_intr_accepted: not expected to be called"); 3603} 3604 3605static void 3606vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 3607{ 3608 struct vlapic_vtx *vlapic_vtx; 3609 struct vmx *vmx; 3610 struct vmcs *vmcs; 3611 uint64_t mask, val; 3612 3613 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 3614 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 3615 ("vmx_set_tmr: vcpu cannot be running")); 3616 3617 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3618 vmx = vlapic_vtx->vmx; 3619 vmcs = &vmx->vmcs[vlapic->vcpuid]; 3620 mask = 1UL << (vector % 64); 3621 3622 VMPTRLD(vmcs); 3623 val = vmcs_read(VMCS_EOI_EXIT(vector)); 3624 if (level) 3625 val |= mask; 3626 else 3627 val &= ~mask; 3628 vmcs_write(VMCS_EOI_EXIT(vector), val); 3629 VMCLEAR(vmcs); 3630} 3631 3632static void 3633vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3634{ 3635 struct vmx *vmx; 3636 struct vmcs *vmcs; 3637 uint32_t proc_ctls; 3638 int vcpuid; 3639 3640 vcpuid = vlapic->vcpuid; 3641 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3642 vmcs = &vmx->vmcs[vcpuid]; 3643 3644 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3645 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3646 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3647 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3648 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3649 3650 VMPTRLD(vmcs); 3651 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3652 VMCLEAR(vmcs); 3653} 3654 3655static void 3656vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3657{ 3658 struct vmx *vmx; 3659 struct vmcs *vmcs; 3660 uint32_t proc_ctls2; 3661 int vcpuid, error; 3662 3663 vcpuid = vlapic->vcpuid; 3664 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3665 vmcs = &vmx->vmcs[vcpuid]; 3666 3667 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3668 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3669 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 3670 3671 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3672 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3673 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3674 3675 VMPTRLD(vmcs); 3676 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3677 VMCLEAR(vmcs); 3678 3679 if (vlapic->vcpuid == 0) { 3680 /* 3681 * The nested page table mappings are shared by all vcpus 3682 * so unmap the APIC access page just once. 3683 */ 3684 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3685 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3686 __func__, error)); 3687 3688 /* 3689 * The MSR bitmap is shared by all vcpus so modify it only 3690 * once in the context of vcpu 0. 3691 */ 3692 error = vmx_allow_x2apic_msrs(vmx); 3693 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3694 __func__, error)); 3695 } 3696} 3697 3698static void 3699vmx_post_intr(struct vlapic *vlapic, int hostcpu) 3700{ 3701 3702 ipi_cpu(hostcpu, pirvec); 3703} 3704 3705/* 3706 * Transfer the pending interrupts in the PIR descriptor to the IRR 3707 * in the virtual APIC page. 3708 */ 3709static void 3710vmx_inject_pir(struct vlapic *vlapic) 3711{ 3712 struct vlapic_vtx *vlapic_vtx; 3713 struct pir_desc *pir_desc; 3714 struct LAPIC *lapic; 3715 uint64_t val, pirval; 3716 int rvi, pirbase = -1; 3717 uint16_t intr_status_old, intr_status_new; 3718 3719 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3720 pir_desc = vlapic_vtx->pir_desc; 3721 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3722 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3723 "no posted interrupt pending"); 3724 return; 3725 } 3726 3727 pirval = 0; 3728 pirbase = -1; 3729 lapic = vlapic->apic_page; 3730 3731 val = atomic_readandclear_long(&pir_desc->pir[0]); 3732 if (val != 0) { 3733 lapic->irr0 |= val; 3734 lapic->irr1 |= val >> 32; 3735 pirbase = 0; 3736 pirval = val; 3737 } 3738 3739 val = atomic_readandclear_long(&pir_desc->pir[1]); 3740 if (val != 0) { 3741 lapic->irr2 |= val; 3742 lapic->irr3 |= val >> 32; 3743 pirbase = 64; 3744 pirval = val; 3745 } 3746 3747 val = atomic_readandclear_long(&pir_desc->pir[2]); 3748 if (val != 0) { 3749 lapic->irr4 |= val; 3750 lapic->irr5 |= val >> 32; 3751 pirbase = 128; 3752 pirval = val; 3753 } 3754 3755 val = atomic_readandclear_long(&pir_desc->pir[3]); 3756 if (val != 0) { 3757 lapic->irr6 |= val; 3758 lapic->irr7 |= val >> 32; 3759 pirbase = 192; 3760 pirval = val; 3761 } 3762 3763 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 3764 3765 /* 3766 * Update RVI so the processor can evaluate pending virtual 3767 * interrupts on VM-entry. 3768 * 3769 * It is possible for pirval to be 0 here, even though the 3770 * pending bit has been set. The scenario is: 3771 * CPU-Y is sending a posted interrupt to CPU-X, which 3772 * is running a guest and processing posted interrupts in h/w. 3773 * CPU-X will eventually exit and the state seen in s/w is 3774 * the pending bit set, but no PIR bits set. 3775 * 3776 * CPU-X CPU-Y 3777 * (vm running) (host running) 3778 * rx posted interrupt 3779 * CLEAR pending bit 3780 * SET PIR bit 3781 * READ/CLEAR PIR bits 3782 * SET pending bit 3783 * (vm exit) 3784 * pending bit set, PIR 0 3785 */ 3786 if (pirval != 0) { 3787 rvi = pirbase + flsl(pirval) - 1; 3788 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 3789 intr_status_new = (intr_status_old & 0xFF00) | rvi; 3790 if (intr_status_new > intr_status_old) { 3791 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 3792 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3793 "guest_intr_status changed from 0x%04x to 0x%04x", 3794 intr_status_old, intr_status_new); 3795 } 3796 } 3797} 3798 3799static struct vlapic * 3800vmx_vlapic_init(void *arg, int vcpuid) 3801{ 3802 struct vmx *vmx; 3803 struct vlapic *vlapic; 3804 struct vlapic_vtx *vlapic_vtx; 3805 3806 vmx = arg; 3807 3808 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 3809 vlapic->vm = vmx->vm; 3810 vlapic->vcpuid = vcpuid; 3811 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3812 3813 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3814 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3815 vlapic_vtx->vmx = vmx; 3816 3817 if (tpr_shadowing) { 3818 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3819 } 3820 3821 if (virtual_interrupt_delivery) { 3822 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 3823 vlapic->ops.pending_intr = vmx_pending_intr; 3824 vlapic->ops.intr_accepted = vmx_intr_accepted; 3825 vlapic->ops.set_tmr = vmx_set_tmr; 3826 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3827 } 3828 3829 if (posted_interrupts) 3830 vlapic->ops.post_intr = vmx_post_intr; 3831 3832 vlapic_init(vlapic); 3833 3834 return (vlapic); 3835} 3836 3837static void 3838vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3839{ 3840 3841 vlapic_cleanup(vlapic); 3842 free(vlapic, M_VLAPIC); 3843} 3844 3845struct vmm_ops vmm_ops_intel = { 3846 vmx_init, 3847 vmx_cleanup, 3848 vmx_restore, 3849 vmx_vminit, 3850 vmx_run, 3851 vmx_vmcleanup, 3852 vmx_getreg, 3853 vmx_setreg, 3854 vmx_getdesc, 3855 vmx_setdesc, 3856 vmx_getcap, 3857 vmx_setcap, 3858 ept_vmspace_alloc, 3859 ept_vmspace_free, 3860 vmx_vlapic_init, 3861 vmx_vlapic_cleanup, 3862}; 3863