vmx.c revision 261001
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 261001 2014-01-22 04:03:11Z neel $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 261001 2014-01-22 04:03:11Z neel $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/smp.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/pcpu.h> 38#include <sys/proc.h> 39#include <sys/sysctl.h> 40 41#include <vm/vm.h> 42#include <vm/pmap.h> 43 44#include <machine/psl.h> 45#include <machine/cpufunc.h> 46#include <machine/md_var.h> 47#include <machine/segments.h> 48#include <machine/smp.h> 49#include <machine/specialreg.h> 50#include <machine/vmparam.h> 51 52#include <machine/vmm.h> 53#include "vmm_host.h" 54#include "vmm_ipi.h" 55#include "vmm_msr.h" 56#include "vmm_ktr.h" 57#include "vmm_stat.h" 58#include "vlapic.h" 59#include "vlapic_priv.h" 60 61#include "vmx_msr.h" 62#include "ept.h" 63#include "vmx_cpufunc.h" 64#include "vmx.h" 65#include "x86.h" 66#include "vmx_controls.h" 67 68#define PINBASED_CTLS_ONE_SETTING \ 69 (PINBASED_EXTINT_EXITING | \ 70 PINBASED_NMI_EXITING | \ 71 PINBASED_VIRTUAL_NMI) 72#define PINBASED_CTLS_ZERO_SETTING 0 73 74#define PROCBASED_CTLS_WINDOW_SETTING \ 75 (PROCBASED_INT_WINDOW_EXITING | \ 76 PROCBASED_NMI_WINDOW_EXITING) 77 78#define PROCBASED_CTLS_ONE_SETTING \ 79 (PROCBASED_SECONDARY_CONTROLS | \ 80 PROCBASED_IO_EXITING | \ 81 PROCBASED_MSR_BITMAPS | \ 82 PROCBASED_CTLS_WINDOW_SETTING) 83#define PROCBASED_CTLS_ZERO_SETTING \ 84 (PROCBASED_CR3_LOAD_EXITING | \ 85 PROCBASED_CR3_STORE_EXITING | \ 86 PROCBASED_IO_BITMAPS) 87 88#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 89#define PROCBASED_CTLS2_ZERO_SETTING 0 90 91#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \ 92 (VM_EXIT_HOST_LMA | \ 93 VM_EXIT_SAVE_EFER | \ 94 VM_EXIT_LOAD_EFER) 95 96#define VM_EXIT_CTLS_ONE_SETTING \ 97 (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \ 98 VM_EXIT_ACKNOWLEDGE_INTERRUPT | \ 99 VM_EXIT_SAVE_PAT | \ 100 VM_EXIT_LOAD_PAT) 101#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 102 103#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER 104 105#define VM_ENTRY_CTLS_ONE_SETTING \ 106 (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \ 107 VM_ENTRY_LOAD_PAT) 108#define VM_ENTRY_CTLS_ZERO_SETTING \ 109 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 110 VM_ENTRY_INTO_SMM | \ 111 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 112 113#define guest_msr_rw(vmx, msr) \ 114 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW) 115 116#define HANDLED 1 117#define UNHANDLED 0 118 119static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 120static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 121 122SYSCTL_DECL(_hw_vmm); 123SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 124 125int vmxon_enabled[MAXCPU]; 126static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 127 128static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 129static uint32_t exit_ctls, entry_ctls; 130 131static uint64_t cr0_ones_mask, cr0_zeros_mask; 132SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 133 &cr0_ones_mask, 0, NULL); 134SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 135 &cr0_zeros_mask, 0, NULL); 136 137static uint64_t cr4_ones_mask, cr4_zeros_mask; 138SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 139 &cr4_ones_mask, 0, NULL); 140SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 141 &cr4_zeros_mask, 0, NULL); 142 143static int vmx_no_patmsr; 144 145static int vmx_initialized; 146SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 147 &vmx_initialized, 0, "Intel VMX initialized"); 148 149/* 150 * Optional capabilities 151 */ 152static int cap_halt_exit; 153static int cap_pause_exit; 154static int cap_unrestricted_guest; 155static int cap_monitor_trap; 156static int cap_invpcid; 157 158static int virtual_interrupt_delivery; 159SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 160 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 161 162static int posted_interrupts; 163SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD, 164 &posted_interrupts, 0, "APICv posted interrupt support"); 165 166static int pirvec; 167SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 168 &pirvec, 0, "APICv posted interrupt vector"); 169 170static struct unrhdr *vpid_unr; 171static u_int vpid_alloc_failed; 172SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 173 &vpid_alloc_failed, 0, NULL); 174 175/* 176 * Use the last page below 4GB as the APIC access address. This address is 177 * occupied by the boot firmware so it is guaranteed that it will not conflict 178 * with a page in system memory. 179 */ 180#define APIC_ACCESS_ADDRESS 0xFFFFF000 181 182static void vmx_inject_pir(struct vlapic *vlapic); 183 184#ifdef KTR 185static const char * 186exit_reason_to_str(int reason) 187{ 188 static char reasonbuf[32]; 189 190 switch (reason) { 191 case EXIT_REASON_EXCEPTION: 192 return "exception"; 193 case EXIT_REASON_EXT_INTR: 194 return "extint"; 195 case EXIT_REASON_TRIPLE_FAULT: 196 return "triplefault"; 197 case EXIT_REASON_INIT: 198 return "init"; 199 case EXIT_REASON_SIPI: 200 return "sipi"; 201 case EXIT_REASON_IO_SMI: 202 return "iosmi"; 203 case EXIT_REASON_SMI: 204 return "smi"; 205 case EXIT_REASON_INTR_WINDOW: 206 return "intrwindow"; 207 case EXIT_REASON_NMI_WINDOW: 208 return "nmiwindow"; 209 case EXIT_REASON_TASK_SWITCH: 210 return "taskswitch"; 211 case EXIT_REASON_CPUID: 212 return "cpuid"; 213 case EXIT_REASON_GETSEC: 214 return "getsec"; 215 case EXIT_REASON_HLT: 216 return "hlt"; 217 case EXIT_REASON_INVD: 218 return "invd"; 219 case EXIT_REASON_INVLPG: 220 return "invlpg"; 221 case EXIT_REASON_RDPMC: 222 return "rdpmc"; 223 case EXIT_REASON_RDTSC: 224 return "rdtsc"; 225 case EXIT_REASON_RSM: 226 return "rsm"; 227 case EXIT_REASON_VMCALL: 228 return "vmcall"; 229 case EXIT_REASON_VMCLEAR: 230 return "vmclear"; 231 case EXIT_REASON_VMLAUNCH: 232 return "vmlaunch"; 233 case EXIT_REASON_VMPTRLD: 234 return "vmptrld"; 235 case EXIT_REASON_VMPTRST: 236 return "vmptrst"; 237 case EXIT_REASON_VMREAD: 238 return "vmread"; 239 case EXIT_REASON_VMRESUME: 240 return "vmresume"; 241 case EXIT_REASON_VMWRITE: 242 return "vmwrite"; 243 case EXIT_REASON_VMXOFF: 244 return "vmxoff"; 245 case EXIT_REASON_VMXON: 246 return "vmxon"; 247 case EXIT_REASON_CR_ACCESS: 248 return "craccess"; 249 case EXIT_REASON_DR_ACCESS: 250 return "draccess"; 251 case EXIT_REASON_INOUT: 252 return "inout"; 253 case EXIT_REASON_RDMSR: 254 return "rdmsr"; 255 case EXIT_REASON_WRMSR: 256 return "wrmsr"; 257 case EXIT_REASON_INVAL_VMCS: 258 return "invalvmcs"; 259 case EXIT_REASON_INVAL_MSR: 260 return "invalmsr"; 261 case EXIT_REASON_MWAIT: 262 return "mwait"; 263 case EXIT_REASON_MTF: 264 return "mtf"; 265 case EXIT_REASON_MONITOR: 266 return "monitor"; 267 case EXIT_REASON_PAUSE: 268 return "pause"; 269 case EXIT_REASON_MCE: 270 return "mce"; 271 case EXIT_REASON_TPR: 272 return "tpr"; 273 case EXIT_REASON_APIC_ACCESS: 274 return "apic-access"; 275 case EXIT_REASON_GDTR_IDTR: 276 return "gdtridtr"; 277 case EXIT_REASON_LDTR_TR: 278 return "ldtrtr"; 279 case EXIT_REASON_EPT_FAULT: 280 return "eptfault"; 281 case EXIT_REASON_EPT_MISCONFIG: 282 return "eptmisconfig"; 283 case EXIT_REASON_INVEPT: 284 return "invept"; 285 case EXIT_REASON_RDTSCP: 286 return "rdtscp"; 287 case EXIT_REASON_VMX_PREEMPT: 288 return "vmxpreempt"; 289 case EXIT_REASON_INVVPID: 290 return "invvpid"; 291 case EXIT_REASON_WBINVD: 292 return "wbinvd"; 293 case EXIT_REASON_XSETBV: 294 return "xsetbv"; 295 case EXIT_REASON_APIC_WRITE: 296 return "apic-write"; 297 default: 298 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 299 return (reasonbuf); 300 } 301} 302#endif /* KTR */ 303 304u_long 305vmx_fix_cr0(u_long cr0) 306{ 307 308 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 309} 310 311u_long 312vmx_fix_cr4(u_long cr4) 313{ 314 315 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 316} 317 318static void 319vpid_free(int vpid) 320{ 321 if (vpid < 0 || vpid > 0xffff) 322 panic("vpid_free: invalid vpid %d", vpid); 323 324 /* 325 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 326 * the unit number allocator. 327 */ 328 329 if (vpid > VM_MAXCPU) 330 free_unr(vpid_unr, vpid); 331} 332 333static void 334vpid_alloc(uint16_t *vpid, int num) 335{ 336 int i, x; 337 338 if (num <= 0 || num > VM_MAXCPU) 339 panic("invalid number of vpids requested: %d", num); 340 341 /* 342 * If the "enable vpid" execution control is not enabled then the 343 * VPID is required to be 0 for all vcpus. 344 */ 345 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 346 for (i = 0; i < num; i++) 347 vpid[i] = 0; 348 return; 349 } 350 351 /* 352 * Allocate a unique VPID for each vcpu from the unit number allocator. 353 */ 354 for (i = 0; i < num; i++) { 355 x = alloc_unr(vpid_unr); 356 if (x == -1) 357 break; 358 else 359 vpid[i] = x; 360 } 361 362 if (i < num) { 363 atomic_add_int(&vpid_alloc_failed, 1); 364 365 /* 366 * If the unit number allocator does not have enough unique 367 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 368 * 369 * These VPIDs are not be unique across VMs but this does not 370 * affect correctness because the combined mappings are also 371 * tagged with the EP4TA which is unique for each VM. 372 * 373 * It is still sub-optimal because the invvpid will invalidate 374 * combined mappings for a particular VPID across all EP4TAs. 375 */ 376 while (i-- > 0) 377 vpid_free(vpid[i]); 378 379 for (i = 0; i < num; i++) 380 vpid[i] = i + 1; 381 } 382} 383 384static void 385vpid_init(void) 386{ 387 /* 388 * VPID 0 is required when the "enable VPID" execution control is 389 * disabled. 390 * 391 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 392 * unit number allocator does not have sufficient unique VPIDs to 393 * satisfy the allocation. 394 * 395 * The remaining VPIDs are managed by the unit number allocator. 396 */ 397 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 398} 399 400static void 401msr_save_area_init(struct msr_entry *g_area, int *g_count) 402{ 403 int cnt; 404 405 static struct msr_entry guest_msrs[] = { 406 { MSR_KGSBASE, 0, 0 }, 407 }; 408 409 cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]); 410 if (cnt > GUEST_MSR_MAX_ENTRIES) 411 panic("guest msr save area overrun"); 412 bcopy(guest_msrs, g_area, sizeof(guest_msrs)); 413 *g_count = cnt; 414} 415 416static void 417vmx_disable(void *arg __unused) 418{ 419 struct invvpid_desc invvpid_desc = { 0 }; 420 struct invept_desc invept_desc = { 0 }; 421 422 if (vmxon_enabled[curcpu]) { 423 /* 424 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 425 * 426 * VMXON or VMXOFF are not required to invalidate any TLB 427 * caching structures. This prevents potential retention of 428 * cached information in the TLB between distinct VMX episodes. 429 */ 430 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 431 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 432 vmxoff(); 433 } 434 load_cr4(rcr4() & ~CR4_VMXE); 435} 436 437static int 438vmx_cleanup(void) 439{ 440 441 if (pirvec != 0) 442 vmm_ipi_free(pirvec); 443 444 if (vpid_unr != NULL) { 445 delete_unrhdr(vpid_unr); 446 vpid_unr = NULL; 447 } 448 449 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 450 451 return (0); 452} 453 454static void 455vmx_enable(void *arg __unused) 456{ 457 int error; 458 459 load_cr4(rcr4() | CR4_VMXE); 460 461 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 462 error = vmxon(vmxon_region[curcpu]); 463 if (error == 0) 464 vmxon_enabled[curcpu] = 1; 465} 466 467static void 468vmx_restore(void) 469{ 470 471 if (vmxon_enabled[curcpu]) 472 vmxon(vmxon_region[curcpu]); 473} 474 475static int 476vmx_init(int ipinum) 477{ 478 int error, use_tpr_shadow; 479 uint64_t fixed0, fixed1, feature_control; 480 uint32_t tmp, procbased2_vid_bits; 481 482 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 483 if (!(cpu_feature2 & CPUID2_VMX)) { 484 printf("vmx_init: processor does not support VMX operation\n"); 485 return (ENXIO); 486 } 487 488 /* 489 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 490 * are set (bits 0 and 2 respectively). 491 */ 492 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 493 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 494 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 495 printf("vmx_init: VMX operation disabled by BIOS\n"); 496 return (ENXIO); 497 } 498 499 /* Check support for primary processor-based VM-execution controls */ 500 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 501 MSR_VMX_TRUE_PROCBASED_CTLS, 502 PROCBASED_CTLS_ONE_SETTING, 503 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 504 if (error) { 505 printf("vmx_init: processor does not support desired primary " 506 "processor-based controls\n"); 507 return (error); 508 } 509 510 /* Clear the processor-based ctl bits that are set on demand */ 511 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 512 513 /* Check support for secondary processor-based VM-execution controls */ 514 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 515 MSR_VMX_PROCBASED_CTLS2, 516 PROCBASED_CTLS2_ONE_SETTING, 517 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 518 if (error) { 519 printf("vmx_init: processor does not support desired secondary " 520 "processor-based controls\n"); 521 return (error); 522 } 523 524 /* Check support for VPID */ 525 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 526 PROCBASED2_ENABLE_VPID, 0, &tmp); 527 if (error == 0) 528 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 529 530 /* Check support for pin-based VM-execution controls */ 531 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 532 MSR_VMX_TRUE_PINBASED_CTLS, 533 PINBASED_CTLS_ONE_SETTING, 534 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 535 if (error) { 536 printf("vmx_init: processor does not support desired " 537 "pin-based controls\n"); 538 return (error); 539 } 540 541 /* Check support for VM-exit controls */ 542 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 543 VM_EXIT_CTLS_ONE_SETTING, 544 VM_EXIT_CTLS_ZERO_SETTING, 545 &exit_ctls); 546 if (error) { 547 /* Try again without the PAT MSR bits */ 548 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, 549 MSR_VMX_TRUE_EXIT_CTLS, 550 VM_EXIT_CTLS_ONE_SETTING_NO_PAT, 551 VM_EXIT_CTLS_ZERO_SETTING, 552 &exit_ctls); 553 if (error) { 554 printf("vmx_init: processor does not support desired " 555 "exit controls\n"); 556 return (error); 557 } else { 558 if (bootverbose) 559 printf("vmm: PAT MSR access not supported\n"); 560 guest_msr_valid(MSR_PAT); 561 vmx_no_patmsr = 1; 562 } 563 } 564 565 /* Check support for VM-entry controls */ 566 if (!vmx_no_patmsr) { 567 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 568 MSR_VMX_TRUE_ENTRY_CTLS, 569 VM_ENTRY_CTLS_ONE_SETTING, 570 VM_ENTRY_CTLS_ZERO_SETTING, 571 &entry_ctls); 572 } else { 573 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 574 MSR_VMX_TRUE_ENTRY_CTLS, 575 VM_ENTRY_CTLS_ONE_SETTING_NO_PAT, 576 VM_ENTRY_CTLS_ZERO_SETTING, 577 &entry_ctls); 578 } 579 580 if (error) { 581 printf("vmx_init: processor does not support desired " 582 "entry controls\n"); 583 return (error); 584 } 585 586 /* 587 * Check support for optional features by testing them 588 * as individual bits 589 */ 590 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 591 MSR_VMX_TRUE_PROCBASED_CTLS, 592 PROCBASED_HLT_EXITING, 0, 593 &tmp) == 0); 594 595 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 596 MSR_VMX_PROCBASED_CTLS, 597 PROCBASED_MTF, 0, 598 &tmp) == 0); 599 600 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 601 MSR_VMX_TRUE_PROCBASED_CTLS, 602 PROCBASED_PAUSE_EXITING, 0, 603 &tmp) == 0); 604 605 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 606 MSR_VMX_PROCBASED_CTLS2, 607 PROCBASED2_UNRESTRICTED_GUEST, 0, 608 &tmp) == 0); 609 610 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 611 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 612 &tmp) == 0); 613 614 /* 615 * Check support for virtual interrupt delivery. 616 */ 617 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 618 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 619 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 620 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 621 622 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 623 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 624 &tmp) == 0); 625 626 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 627 procbased2_vid_bits, 0, &tmp); 628 if (error == 0 && use_tpr_shadow) { 629 virtual_interrupt_delivery = 1; 630 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 631 &virtual_interrupt_delivery); 632 } 633 634 if (virtual_interrupt_delivery) { 635 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 636 procbased_ctls2 |= procbased2_vid_bits; 637 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 638 639 /* 640 * Check for Posted Interrupts only if Virtual Interrupt 641 * Delivery is enabled. 642 */ 643 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 644 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 645 &tmp); 646 if (error == 0) { 647 pirvec = vmm_ipi_alloc(); 648 if (pirvec == 0) { 649 if (bootverbose) { 650 printf("vmx_init: unable to allocate " 651 "posted interrupt vector\n"); 652 } 653 } else { 654 posted_interrupts = 1; 655 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 656 &posted_interrupts); 657 } 658 } 659 } 660 661 if (posted_interrupts) 662 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 663 664 /* Initialize EPT */ 665 error = ept_init(ipinum); 666 if (error) { 667 printf("vmx_init: ept initialization failed (%d)\n", error); 668 return (error); 669 } 670 671 /* 672 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 673 */ 674 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 675 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 676 cr0_ones_mask = fixed0 & fixed1; 677 cr0_zeros_mask = ~fixed0 & ~fixed1; 678 679 /* 680 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 681 * if unrestricted guest execution is allowed. 682 */ 683 if (cap_unrestricted_guest) 684 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 685 686 /* 687 * Do not allow the guest to set CR0_NW or CR0_CD. 688 */ 689 cr0_zeros_mask |= (CR0_NW | CR0_CD); 690 691 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 692 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 693 cr4_ones_mask = fixed0 & fixed1; 694 cr4_zeros_mask = ~fixed0 & ~fixed1; 695 696 vpid_init(); 697 698 /* enable VMX operation */ 699 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 700 701 vmx_initialized = 1; 702 703 return (0); 704} 705 706static void 707vmx_trigger_hostintr(int vector) 708{ 709 uintptr_t func; 710 struct gate_descriptor *gd; 711 712 gd = &idt[vector]; 713 714 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 715 "invalid vector %d", vector)); 716 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 717 vector)); 718 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 719 "has invalid type %d", vector, gd->gd_type)); 720 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 721 "has invalid dpl %d", vector, gd->gd_dpl)); 722 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 723 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 724 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 725 "IST %d", vector, gd->gd_ist)); 726 727 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 728 vmx_call_isr(func); 729} 730 731static int 732vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 733{ 734 int error, mask_ident, shadow_ident; 735 uint64_t mask_value; 736 737 if (which != 0 && which != 4) 738 panic("vmx_setup_cr_shadow: unknown cr%d", which); 739 740 if (which == 0) { 741 mask_ident = VMCS_CR0_MASK; 742 mask_value = cr0_ones_mask | cr0_zeros_mask; 743 shadow_ident = VMCS_CR0_SHADOW; 744 } else { 745 mask_ident = VMCS_CR4_MASK; 746 mask_value = cr4_ones_mask | cr4_zeros_mask; 747 shadow_ident = VMCS_CR4_SHADOW; 748 } 749 750 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 751 if (error) 752 return (error); 753 754 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 755 if (error) 756 return (error); 757 758 return (0); 759} 760#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 761#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 762 763static void * 764vmx_vminit(struct vm *vm, pmap_t pmap) 765{ 766 uint16_t vpid[VM_MAXCPU]; 767 int i, error, guest_msr_count; 768 struct vmx *vmx; 769 struct vmcs *vmcs; 770 771 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 772 if ((uintptr_t)vmx & PAGE_MASK) { 773 panic("malloc of struct vmx not aligned on %d byte boundary", 774 PAGE_SIZE); 775 } 776 vmx->vm = vm; 777 778 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 779 780 /* 781 * Clean up EPTP-tagged guest physical and combined mappings 782 * 783 * VMX transitions are not required to invalidate any guest physical 784 * mappings. So, it may be possible for stale guest physical mappings 785 * to be present in the processor TLBs. 786 * 787 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 788 */ 789 ept_invalidate_mappings(vmx->eptp); 790 791 msr_bitmap_initialize(vmx->msr_bitmap); 792 793 /* 794 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 795 * The guest FSBASE and GSBASE are saved and restored during 796 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 797 * always restored from the vmcs host state area on vm-exit. 798 * 799 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 800 * how they are saved/restored so can be directly accessed by the 801 * guest. 802 * 803 * Guest KGSBASE is saved and restored in the guest MSR save area. 804 * Host KGSBASE is restored before returning to userland from the pcb. 805 * There will be a window of time when we are executing in the host 806 * kernel context with a value of KGSBASE from the guest. This is ok 807 * because the value of KGSBASE is inconsequential in kernel context. 808 * 809 * MSR_EFER is saved and restored in the guest VMCS area on a 810 * VM exit and entry respectively. It is also restored from the 811 * host VMCS area on a VM exit. 812 */ 813 if (guest_msr_rw(vmx, MSR_GSBASE) || 814 guest_msr_rw(vmx, MSR_FSBASE) || 815 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 816 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 817 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 818 guest_msr_rw(vmx, MSR_KGSBASE) || 819 guest_msr_rw(vmx, MSR_EFER)) 820 panic("vmx_vminit: error setting guest msr access"); 821 822 /* 823 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit 824 * and entry respectively. It is also restored from the host VMCS 825 * area on a VM exit. However, if running on a system with no 826 * MSR_PAT save/restore support, leave access disabled so accesses 827 * will be trapped. 828 */ 829 if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT)) 830 panic("vmx_vminit: error setting guest pat msr access"); 831 832 vpid_alloc(vpid, VM_MAXCPU); 833 834 if (virtual_interrupt_delivery) { 835 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 836 APIC_ACCESS_ADDRESS); 837 /* XXX this should really return an error to the caller */ 838 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 839 } 840 841 for (i = 0; i < VM_MAXCPU; i++) { 842 vmcs = &vmx->vmcs[i]; 843 vmcs->identifier = vmx_revision(); 844 error = vmclear(vmcs); 845 if (error != 0) { 846 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 847 error, i); 848 } 849 850 error = vmcs_init(vmcs); 851 KASSERT(error == 0, ("vmcs_init error %d", error)); 852 853 VMPTRLD(vmcs); 854 error = 0; 855 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 856 error += vmwrite(VMCS_EPTP, vmx->eptp); 857 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 858 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 859 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 860 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 861 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 862 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 863 error += vmwrite(VMCS_VPID, vpid[i]); 864 if (virtual_interrupt_delivery) { 865 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 866 error += vmwrite(VMCS_VIRTUAL_APIC, 867 vtophys(&vmx->apic_page[i])); 868 error += vmwrite(VMCS_EOI_EXIT0, 0); 869 error += vmwrite(VMCS_EOI_EXIT1, 0); 870 error += vmwrite(VMCS_EOI_EXIT2, 0); 871 error += vmwrite(VMCS_EOI_EXIT3, 0); 872 } 873 if (posted_interrupts) { 874 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 875 error += vmwrite(VMCS_PIR_DESC, 876 vtophys(&vmx->pir_desc[i])); 877 } 878 VMCLEAR(vmcs); 879 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 880 881 vmx->cap[i].set = 0; 882 vmx->cap[i].proc_ctls = procbased_ctls; 883 vmx->cap[i].proc_ctls2 = procbased_ctls2; 884 885 vmx->state[i].lastcpu = -1; 886 vmx->state[i].vpid = vpid[i]; 887 888 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count); 889 890 error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]), 891 guest_msr_count); 892 if (error != 0) 893 panic("vmcs_set_msr_save error %d", error); 894 895 /* 896 * Set up the CR0/4 shadows, and init the read shadow 897 * to the power-on register value from the Intel Sys Arch. 898 * CR0 - 0x60000010 899 * CR4 - 0 900 */ 901 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 902 if (error != 0) 903 panic("vmx_setup_cr0_shadow %d", error); 904 905 error = vmx_setup_cr4_shadow(vmcs, 0); 906 if (error != 0) 907 panic("vmx_setup_cr4_shadow %d", error); 908 909 vmx->ctx[i].pmap = pmap; 910 vmx->ctx[i].eptp = vmx->eptp; 911 } 912 913 return (vmx); 914} 915 916static int 917vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 918{ 919 int handled, func; 920 921 func = vmxctx->guest_rax; 922 923 handled = x86_emulate_cpuid(vm, vcpu, 924 (uint32_t*)(&vmxctx->guest_rax), 925 (uint32_t*)(&vmxctx->guest_rbx), 926 (uint32_t*)(&vmxctx->guest_rcx), 927 (uint32_t*)(&vmxctx->guest_rdx)); 928 return (handled); 929} 930 931static __inline void 932vmx_run_trace(struct vmx *vmx, int vcpu) 933{ 934#ifdef KTR 935 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 936#endif 937} 938 939static __inline void 940vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 941 int handled) 942{ 943#ifdef KTR 944 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 945 handled ? "handled" : "unhandled", 946 exit_reason_to_str(exit_reason), rip); 947#endif 948} 949 950static __inline void 951vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 952{ 953#ifdef KTR 954 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 955#endif 956} 957 958static void 959vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 960{ 961 int lastcpu; 962 struct vmxstate *vmxstate; 963 struct invvpid_desc invvpid_desc = { 0 }; 964 965 vmxstate = &vmx->state[vcpu]; 966 lastcpu = vmxstate->lastcpu; 967 vmxstate->lastcpu = curcpu; 968 969 if (lastcpu == curcpu) 970 return; 971 972 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 973 974 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 975 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 976 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 977 978 /* 979 * If we are using VPIDs then invalidate all mappings tagged with 'vpid' 980 * 981 * We do this because this vcpu was executing on a different host 982 * cpu when it last ran. We do not track whether it invalidated 983 * mappings associated with its 'vpid' during that run. So we must 984 * assume that the mappings associated with 'vpid' on 'curcpu' are 985 * stale and invalidate them. 986 * 987 * Note that we incur this penalty only when the scheduler chooses to 988 * move the thread associated with this vcpu between host cpus. 989 * 990 * Note also that this will invalidate mappings tagged with 'vpid' 991 * for "all" EP4TAs. 992 */ 993 if (vmxstate->vpid != 0) { 994 invvpid_desc.vpid = vmxstate->vpid; 995 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 996 } 997} 998 999/* 1000 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1001 */ 1002CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1003 1004static void __inline 1005vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1006{ 1007 1008 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1009 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1010 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1011 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1012 } 1013} 1014 1015static void __inline 1016vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1017{ 1018 1019 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1020 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1021 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1022 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1023 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1024} 1025 1026static void __inline 1027vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1028{ 1029 1030 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1031 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1032 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1033 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1034 } 1035} 1036 1037static void __inline 1038vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1039{ 1040 1041 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1042 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1043 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1044 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1045 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1046} 1047 1048#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1049 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1050#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1051 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1052 1053static void 1054vmx_inject_nmi(struct vmx *vmx, int vcpu) 1055{ 1056 uint32_t gi, info; 1057 1058 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1059 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1060 "interruptibility-state %#x", gi)); 1061 1062 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1063 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1064 "VM-entry interruption information %#x", info)); 1065 1066 /* 1067 * Inject the virtual NMI. The vector must be the NMI IDT entry 1068 * or the VMCS entry check will fail. 1069 */ 1070 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1071 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1072 1073 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1074 1075 /* Clear the request */ 1076 vm_nmi_clear(vmx->vm, vcpu); 1077} 1078 1079static void 1080vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1081{ 1082 int vector, need_nmi_exiting; 1083 uint64_t rflags; 1084 uint32_t gi, info; 1085 1086 if (vm_nmi_pending(vmx->vm, vcpu)) { 1087 /* 1088 * If there are no conditions blocking NMI injection then 1089 * inject it directly here otherwise enable "NMI window 1090 * exiting" to inject it as soon as we can. 1091 * 1092 * We also check for STI_BLOCKING because some implementations 1093 * don't allow NMI injection in this case. If we are running 1094 * on a processor that doesn't have this restriction it will 1095 * immediately exit and the NMI will be injected in the 1096 * "NMI window exiting" handler. 1097 */ 1098 need_nmi_exiting = 1; 1099 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1100 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1101 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1102 if ((info & VMCS_INTR_VALID) == 0) { 1103 vmx_inject_nmi(vmx, vcpu); 1104 need_nmi_exiting = 0; 1105 } else { 1106 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1107 "due to VM-entry intr info %#x", info); 1108 } 1109 } else { 1110 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1111 "Guest Interruptibility-state %#x", gi); 1112 } 1113 1114 if (need_nmi_exiting) 1115 vmx_set_nmi_window_exiting(vmx, vcpu); 1116 } 1117 1118 if (virtual_interrupt_delivery) { 1119 vmx_inject_pir(vlapic); 1120 return; 1121 } 1122 1123 /* 1124 * If there is already an interrupt pending then just return. This 1125 * could happen for multiple reasons: 1126 * - A vectoring VM-entry was aborted due to astpending or rendezvous. 1127 * - A VM-exit happened during event injection. 1128 * - A NMI was injected above or after "NMI window exiting" VM-exit. 1129 */ 1130 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1131 if (info & VMCS_INTR_VALID) 1132 return; 1133 1134 /* Ask the local apic for a vector to inject */ 1135 if (!vlapic_pending_intr(vlapic, &vector)) 1136 return; 1137 1138 KASSERT(vector >= 32 && vector <= 255, ("invalid vector %d", vector)); 1139 1140 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1141 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1142 if ((rflags & PSL_I) == 0) 1143 goto cantinject; 1144 1145 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1146 if (gi & HWINTR_BLOCKING) 1147 goto cantinject; 1148 1149 /* Inject the interrupt */ 1150 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1151 info |= vector; 1152 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1153 1154 /* Update the Local APIC ISR */ 1155 vlapic_intr_accepted(vlapic, vector); 1156 1157 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1158 1159 return; 1160 1161cantinject: 1162 /* 1163 * Set the Interrupt Window Exiting execution control so we can inject 1164 * the interrupt as soon as blocking condition goes away. 1165 */ 1166 vmx_set_int_window_exiting(vmx, vcpu); 1167} 1168 1169/* 1170 * If the Virtual NMIs execution control is '1' then the logical processor 1171 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1172 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1173 * virtual-NMI blocking. 1174 * 1175 * This unblocking occurs even if the IRET causes a fault. In this case the 1176 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1177 */ 1178static void 1179vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1180{ 1181 uint32_t gi; 1182 1183 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1184 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1185 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1186 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1187} 1188 1189static void 1190vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1191{ 1192 uint32_t gi; 1193 1194 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1195 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1196 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1197 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1198} 1199 1200static int 1201vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1202{ 1203 int cr, vmcs_guest_cr, vmcs_shadow_cr; 1204 uint64_t crval, regval, ones_mask, zeros_mask; 1205 const struct vmxctx *vmxctx; 1206 1207 /* We only handle mov to %cr0 or %cr4 at this time */ 1208 if ((exitqual & 0xf0) != 0x00) 1209 return (UNHANDLED); 1210 1211 cr = exitqual & 0xf; 1212 if (cr != 0 && cr != 4) 1213 return (UNHANDLED); 1214 1215 regval = 0; /* silence gcc */ 1216 vmxctx = &vmx->ctx[vcpu]; 1217 1218 /* 1219 * We must use vmcs_write() directly here because vmcs_setreg() will 1220 * call vmclear(vmcs) as a side-effect which we certainly don't want. 1221 */ 1222 switch ((exitqual >> 8) & 0xf) { 1223 case 0: 1224 regval = vmxctx->guest_rax; 1225 break; 1226 case 1: 1227 regval = vmxctx->guest_rcx; 1228 break; 1229 case 2: 1230 regval = vmxctx->guest_rdx; 1231 break; 1232 case 3: 1233 regval = vmxctx->guest_rbx; 1234 break; 1235 case 4: 1236 regval = vmcs_read(VMCS_GUEST_RSP); 1237 break; 1238 case 5: 1239 regval = vmxctx->guest_rbp; 1240 break; 1241 case 6: 1242 regval = vmxctx->guest_rsi; 1243 break; 1244 case 7: 1245 regval = vmxctx->guest_rdi; 1246 break; 1247 case 8: 1248 regval = vmxctx->guest_r8; 1249 break; 1250 case 9: 1251 regval = vmxctx->guest_r9; 1252 break; 1253 case 10: 1254 regval = vmxctx->guest_r10; 1255 break; 1256 case 11: 1257 regval = vmxctx->guest_r11; 1258 break; 1259 case 12: 1260 regval = vmxctx->guest_r12; 1261 break; 1262 case 13: 1263 regval = vmxctx->guest_r13; 1264 break; 1265 case 14: 1266 regval = vmxctx->guest_r14; 1267 break; 1268 case 15: 1269 regval = vmxctx->guest_r15; 1270 break; 1271 } 1272 1273 if (cr == 0) { 1274 ones_mask = cr0_ones_mask; 1275 zeros_mask = cr0_zeros_mask; 1276 vmcs_guest_cr = VMCS_GUEST_CR0; 1277 vmcs_shadow_cr = VMCS_CR0_SHADOW; 1278 } else { 1279 ones_mask = cr4_ones_mask; 1280 zeros_mask = cr4_zeros_mask; 1281 vmcs_guest_cr = VMCS_GUEST_CR4; 1282 vmcs_shadow_cr = VMCS_CR4_SHADOW; 1283 } 1284 vmcs_write(vmcs_shadow_cr, regval); 1285 1286 crval = regval | ones_mask; 1287 crval &= ~zeros_mask; 1288 vmcs_write(vmcs_guest_cr, crval); 1289 1290 if (cr == 0 && regval & CR0_PG) { 1291 uint64_t efer, entry_ctls; 1292 1293 /* 1294 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1295 * the "IA-32e mode guest" bit in VM-entry control must be 1296 * equal. 1297 */ 1298 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1299 if (efer & EFER_LME) { 1300 efer |= EFER_LMA; 1301 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1302 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1303 entry_ctls |= VM_ENTRY_GUEST_LMA; 1304 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1305 } 1306 } 1307 1308 return (HANDLED); 1309} 1310 1311static int 1312ept_fault_type(uint64_t ept_qual) 1313{ 1314 int fault_type; 1315 1316 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1317 fault_type = VM_PROT_WRITE; 1318 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1319 fault_type = VM_PROT_EXECUTE; 1320 else 1321 fault_type= VM_PROT_READ; 1322 1323 return (fault_type); 1324} 1325 1326static boolean_t 1327ept_emulation_fault(uint64_t ept_qual) 1328{ 1329 int read, write; 1330 1331 /* EPT fault on an instruction fetch doesn't make sense here */ 1332 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1333 return (FALSE); 1334 1335 /* EPT fault must be a read fault or a write fault */ 1336 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1337 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1338 if ((read | write) == 0) 1339 return (FALSE); 1340 1341 /* 1342 * The EPT violation must have been caused by accessing a 1343 * guest-physical address that is a translation of a guest-linear 1344 * address. 1345 */ 1346 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1347 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1348 return (FALSE); 1349 } 1350 1351 return (TRUE); 1352} 1353 1354static int 1355vmx_handle_apic_write(struct vlapic *vlapic, uint64_t qual) 1356{ 1357 int error, handled, offset; 1358 bool retu; 1359 1360 if (!virtual_interrupt_delivery) 1361 return (UNHANDLED); 1362 1363 handled = 1; 1364 offset = APIC_WRITE_OFFSET(qual); 1365 switch (offset) { 1366 case APIC_OFFSET_ID: 1367 vlapic_id_write_handler(vlapic); 1368 break; 1369 case APIC_OFFSET_LDR: 1370 vlapic_ldr_write_handler(vlapic); 1371 break; 1372 case APIC_OFFSET_DFR: 1373 vlapic_dfr_write_handler(vlapic); 1374 break; 1375 case APIC_OFFSET_SVR: 1376 vlapic_svr_write_handler(vlapic); 1377 break; 1378 case APIC_OFFSET_ESR: 1379 vlapic_esr_write_handler(vlapic); 1380 break; 1381 case APIC_OFFSET_ICR_LOW: 1382 retu = false; 1383 error = vlapic_icrlo_write_handler(vlapic, &retu); 1384 if (error != 0 || retu) 1385 handled = 0; 1386 break; 1387 case APIC_OFFSET_CMCI_LVT: 1388 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1389 vlapic_lvt_write_handler(vlapic, offset); 1390 break; 1391 case APIC_OFFSET_TIMER_ICR: 1392 vlapic_icrtmr_write_handler(vlapic); 1393 break; 1394 case APIC_OFFSET_TIMER_DCR: 1395 vlapic_dcr_write_handler(vlapic); 1396 break; 1397 default: 1398 handled = 0; 1399 break; 1400 } 1401 return (handled); 1402} 1403 1404static bool 1405apic_access_fault(uint64_t gpa) 1406{ 1407 1408 if (virtual_interrupt_delivery && 1409 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1410 return (true); 1411 else 1412 return (false); 1413} 1414 1415static int 1416vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1417{ 1418 uint64_t qual; 1419 int access_type, offset, allowed; 1420 1421 if (!virtual_interrupt_delivery) 1422 return (UNHANDLED); 1423 1424 qual = vmexit->u.vmx.exit_qualification; 1425 access_type = APIC_ACCESS_TYPE(qual); 1426 offset = APIC_ACCESS_OFFSET(qual); 1427 1428 allowed = 0; 1429 if (access_type == 0) { 1430 /* 1431 * Read data access to the following registers is expected. 1432 */ 1433 switch (offset) { 1434 case APIC_OFFSET_APR: 1435 case APIC_OFFSET_PPR: 1436 case APIC_OFFSET_RRR: 1437 case APIC_OFFSET_CMCI_LVT: 1438 case APIC_OFFSET_TIMER_CCR: 1439 allowed = 1; 1440 break; 1441 default: 1442 break; 1443 } 1444 } else if (access_type == 1) { 1445 /* 1446 * Write data access to the following registers is expected. 1447 */ 1448 switch (offset) { 1449 case APIC_OFFSET_VER: 1450 case APIC_OFFSET_APR: 1451 case APIC_OFFSET_PPR: 1452 case APIC_OFFSET_RRR: 1453 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1454 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1455 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1456 case APIC_OFFSET_CMCI_LVT: 1457 case APIC_OFFSET_TIMER_CCR: 1458 allowed = 1; 1459 break; 1460 default: 1461 break; 1462 } 1463 } 1464 1465 if (allowed) { 1466 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1467 vmexit->u.inst_emul.gpa = DEFAULT_APIC_BASE + offset; 1468 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 1469 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1470 } 1471 1472 /* 1473 * Regardless of whether the APIC-access is allowed this handler 1474 * always returns UNHANDLED: 1475 * - if the access is allowed then it is handled by emulating the 1476 * instruction that caused the VM-exit (outside the critical section) 1477 * - if the access is not allowed then it will be converted to an 1478 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 1479 */ 1480 return (UNHANDLED); 1481} 1482 1483static int 1484vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1485{ 1486 int error, handled; 1487 struct vmxctx *vmxctx; 1488 struct vlapic *vlapic; 1489 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, reason; 1490 uint64_t qual, gpa; 1491 bool retu; 1492 1493 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 1494 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 1495 1496 handled = 0; 1497 vmxctx = &vmx->ctx[vcpu]; 1498 1499 qual = vmexit->u.vmx.exit_qualification; 1500 reason = vmexit->u.vmx.exit_reason; 1501 vmexit->exitcode = VM_EXITCODE_BOGUS; 1502 1503 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 1504 1505 /* 1506 * VM exits that could be triggered during event injection on the 1507 * previous VM entry need to be handled specially by re-injecting 1508 * the event. 1509 * 1510 * See "Information for VM Exits During Event Delivery" in Intel SDM 1511 * for details. 1512 */ 1513 switch (reason) { 1514 case EXIT_REASON_EPT_FAULT: 1515 case EXIT_REASON_EPT_MISCONFIG: 1516 case EXIT_REASON_APIC_ACCESS: 1517 case EXIT_REASON_TASK_SWITCH: 1518 case EXIT_REASON_EXCEPTION: 1519 idtvec_info = vmcs_idt_vectoring_info(); 1520 if (idtvec_info & VMCS_IDT_VEC_VALID) { 1521 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 1522 vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info); 1523 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 1524 idtvec_err = vmcs_idt_vectoring_err(); 1525 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1526 idtvec_err); 1527 } 1528 /* 1529 * If 'virtual NMIs' are being used and the VM-exit 1530 * happened while injecting an NMI during the previous 1531 * VM-entry, then clear "blocking by NMI" in the Guest 1532 * Interruptibility-state. 1533 */ 1534 if ((idtvec_info & VMCS_INTR_T_MASK) == 1535 VMCS_INTR_T_NMI) { 1536 vmx_clear_nmi_blocking(vmx, vcpu); 1537 } 1538 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 1539 } 1540 default: 1541 idtvec_info = 0; 1542 break; 1543 } 1544 1545 switch (reason) { 1546 case EXIT_REASON_CR_ACCESS: 1547 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 1548 handled = vmx_emulate_cr_access(vmx, vcpu, qual); 1549 break; 1550 case EXIT_REASON_RDMSR: 1551 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 1552 retu = false; 1553 ecx = vmxctx->guest_rcx; 1554 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu); 1555 if (error) { 1556 vmexit->exitcode = VM_EXITCODE_RDMSR; 1557 vmexit->u.msr.code = ecx; 1558 } else if (!retu) { 1559 handled = 1; 1560 } else { 1561 /* Return to userspace with a valid exitcode */ 1562 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1563 ("emulate_wrmsr retu with bogus exitcode")); 1564 } 1565 break; 1566 case EXIT_REASON_WRMSR: 1567 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 1568 retu = false; 1569 eax = vmxctx->guest_rax; 1570 ecx = vmxctx->guest_rcx; 1571 edx = vmxctx->guest_rdx; 1572 error = emulate_wrmsr(vmx->vm, vcpu, ecx, 1573 (uint64_t)edx << 32 | eax, &retu); 1574 if (error) { 1575 vmexit->exitcode = VM_EXITCODE_WRMSR; 1576 vmexit->u.msr.code = ecx; 1577 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 1578 } else if (!retu) { 1579 handled = 1; 1580 } else { 1581 /* Return to userspace with a valid exitcode */ 1582 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1583 ("emulate_wrmsr retu with bogus exitcode")); 1584 } 1585 break; 1586 case EXIT_REASON_HLT: 1587 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 1588 vmexit->exitcode = VM_EXITCODE_HLT; 1589 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1590 break; 1591 case EXIT_REASON_MTF: 1592 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 1593 vmexit->exitcode = VM_EXITCODE_MTRAP; 1594 break; 1595 case EXIT_REASON_PAUSE: 1596 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 1597 vmexit->exitcode = VM_EXITCODE_PAUSE; 1598 break; 1599 case EXIT_REASON_INTR_WINDOW: 1600 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 1601 vmx_clear_int_window_exiting(vmx, vcpu); 1602 return (1); 1603 case EXIT_REASON_EXT_INTR: 1604 /* 1605 * External interrupts serve only to cause VM exits and allow 1606 * the host interrupt handler to run. 1607 * 1608 * If this external interrupt triggers a virtual interrupt 1609 * to a VM, then that state will be recorded by the 1610 * host interrupt handler in the VM's softc. We will inject 1611 * this virtual interrupt during the subsequent VM enter. 1612 */ 1613 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1614 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 1615 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 1616 ("VM exit interruption info invalid: %#x", intr_info)); 1617 vmx_trigger_hostintr(intr_info & 0xff); 1618 1619 /* 1620 * This is special. We want to treat this as an 'handled' 1621 * VM-exit but not increment the instruction pointer. 1622 */ 1623 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 1624 return (1); 1625 case EXIT_REASON_NMI_WINDOW: 1626 /* Exit to allow the pending virtual NMI to be injected */ 1627 if (vm_nmi_pending(vmx->vm, vcpu)) 1628 vmx_inject_nmi(vmx, vcpu); 1629 vmx_clear_nmi_window_exiting(vmx, vcpu); 1630 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 1631 return (1); 1632 case EXIT_REASON_INOUT: 1633 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 1634 vmexit->exitcode = VM_EXITCODE_INOUT; 1635 vmexit->u.inout.bytes = (qual & 0x7) + 1; 1636 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0; 1637 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 1638 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 1639 vmexit->u.inout.port = (uint16_t)(qual >> 16); 1640 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 1641 break; 1642 case EXIT_REASON_CPUID: 1643 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 1644 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 1645 break; 1646 case EXIT_REASON_EXCEPTION: 1647 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 1648 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1649 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 1650 ("VM exit interruption info invalid: %#x", intr_info)); 1651 1652 /* 1653 * If Virtual NMIs control is 1 and the VM-exit is due to a 1654 * fault encountered during the execution of IRET then we must 1655 * restore the state of "virtual-NMI blocking" before resuming 1656 * the guest. 1657 * 1658 * See "Resuming Guest Software after Handling an Exception". 1659 */ 1660 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 1661 (intr_info & 0xff) != IDT_DF && 1662 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 1663 vmx_restore_nmi_blocking(vmx, vcpu); 1664 1665 /* 1666 * If the NMI-exiting VM execution control is set to '1' 1667 * then an NMI in non-root operation causes a VM-exit. 1668 * NMI blocking is in effect for this logical processor so 1669 * it is sufficient to simply vector to the NMI handler via 1670 * a software interrupt. 1671 */ 1672 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 1673 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 1674 "to NMI has invalid vector: %#x", intr_info)); 1675 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to NMI handler"); 1676 __asm __volatile("int $2"); 1677 return (1); 1678 } 1679 break; 1680 case EXIT_REASON_EPT_FAULT: 1681 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EPT_FAULT, 1); 1682 /* 1683 * If 'gpa' lies within the address space allocated to 1684 * memory then this must be a nested page fault otherwise 1685 * this must be an instruction that accesses MMIO space. 1686 */ 1687 gpa = vmcs_gpa(); 1688 if (vm_mem_allocated(vmx->vm, gpa) || apic_access_fault(gpa)) { 1689 vmexit->exitcode = VM_EXITCODE_PAGING; 1690 vmexit->u.paging.gpa = gpa; 1691 vmexit->u.paging.fault_type = ept_fault_type(qual); 1692 } else if (ept_emulation_fault(qual)) { 1693 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1694 vmexit->u.inst_emul.gpa = gpa; 1695 vmexit->u.inst_emul.gla = vmcs_gla(); 1696 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1697 } 1698 /* 1699 * If Virtual NMIs control is 1 and the VM-exit is due to an 1700 * EPT fault during the execution of IRET then we must restore 1701 * the state of "virtual-NMI blocking" before resuming. 1702 * 1703 * See description of "NMI unblocking due to IRET" in 1704 * "Exit Qualification for EPT Violations". 1705 */ 1706 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 1707 (qual & EXIT_QUAL_NMIUDTI) != 0) 1708 vmx_restore_nmi_blocking(vmx, vcpu); 1709 break; 1710 case EXIT_REASON_APIC_ACCESS: 1711 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 1712 break; 1713 case EXIT_REASON_APIC_WRITE: 1714 /* 1715 * APIC-write VM exit is trap-like so the %rip is already 1716 * pointing to the next instruction. 1717 */ 1718 vmexit->inst_length = 0; 1719 vlapic = vm_lapic(vmx->vm, vcpu); 1720 handled = vmx_handle_apic_write(vlapic, qual); 1721 break; 1722 default: 1723 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 1724 break; 1725 } 1726 1727 if (handled) { 1728 /* 1729 * It is possible that control is returned to userland 1730 * even though we were able to handle the VM exit in the 1731 * kernel. 1732 * 1733 * In such a case we want to make sure that the userland 1734 * restarts guest execution at the instruction *after* 1735 * the one we just processed. Therefore we update the 1736 * guest rip in the VMCS and in 'vmexit'. 1737 */ 1738 vmexit->rip += vmexit->inst_length; 1739 vmexit->inst_length = 0; 1740 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 1741 } else { 1742 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1743 /* 1744 * If this VM exit was not claimed by anybody then 1745 * treat it as a generic VMX exit. 1746 */ 1747 vmexit->exitcode = VM_EXITCODE_VMX; 1748 vmexit->u.vmx.status = VM_SUCCESS; 1749 vmexit->u.vmx.inst_type = 0; 1750 vmexit->u.vmx.inst_error = 0; 1751 } else { 1752 /* 1753 * The exitcode and collateral have been populated. 1754 * The VM exit will be processed further in userland. 1755 */ 1756 } 1757 } 1758 return (handled); 1759} 1760 1761static __inline int 1762vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1763{ 1764 1765 vmexit->rip = vmcs_guest_rip(); 1766 vmexit->inst_length = 0; 1767 vmexit->exitcode = VM_EXITCODE_BOGUS; 1768 vmx_astpending_trace(vmx, vcpu, vmexit->rip); 1769 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1); 1770 1771 return (HANDLED); 1772} 1773 1774static __inline int 1775vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1776{ 1777 1778 vmexit->rip = vmcs_guest_rip(); 1779 vmexit->inst_length = 0; 1780 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1781 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1); 1782 1783 return (UNHANDLED); 1784} 1785 1786static __inline int 1787vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 1788{ 1789 1790 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 1791 ("vmx_exit_inst_error: invalid inst_fail_status %d", 1792 vmxctx->inst_fail_status)); 1793 1794 vmexit->inst_length = 0; 1795 vmexit->exitcode = VM_EXITCODE_VMX; 1796 vmexit->u.vmx.status = vmxctx->inst_fail_status; 1797 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 1798 vmexit->u.vmx.exit_reason = ~0; 1799 vmexit->u.vmx.exit_qualification = ~0; 1800 1801 switch (rc) { 1802 case VMX_VMRESUME_ERROR: 1803 case VMX_VMLAUNCH_ERROR: 1804 case VMX_INVEPT_ERROR: 1805 vmexit->u.vmx.inst_type = rc; 1806 break; 1807 default: 1808 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 1809 } 1810 1811 return (UNHANDLED); 1812} 1813 1814static int 1815vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap, 1816 void *rendezvous_cookie) 1817{ 1818 int rc, handled, launched; 1819 struct vmx *vmx; 1820 struct vm *vm; 1821 struct vmxctx *vmxctx; 1822 struct vmcs *vmcs; 1823 struct vm_exit *vmexit; 1824 struct vlapic *vlapic; 1825 uint64_t rip; 1826 uint32_t exit_reason; 1827 1828 vmx = arg; 1829 vm = vmx->vm; 1830 vmcs = &vmx->vmcs[vcpu]; 1831 vmxctx = &vmx->ctx[vcpu]; 1832 vlapic = vm_lapic(vm, vcpu); 1833 vmexit = vm_exitinfo(vm, vcpu); 1834 launched = 0; 1835 1836 KASSERT(vmxctx->pmap == pmap, 1837 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 1838 KASSERT(vmxctx->eptp == vmx->eptp, 1839 ("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp)); 1840 1841 VMPTRLD(vmcs); 1842 1843 /* 1844 * XXX 1845 * We do this every time because we may setup the virtual machine 1846 * from a different process than the one that actually runs it. 1847 * 1848 * If the life of a virtual machine was spent entirely in the context 1849 * of a single process we could do this once in vmx_vminit(). 1850 */ 1851 vmcs_write(VMCS_HOST_CR3, rcr3()); 1852 1853 vmcs_write(VMCS_GUEST_RIP, startrip); 1854 vmx_set_pcpu_defaults(vmx, vcpu); 1855 do { 1856 /* 1857 * Interrupts are disabled from this point on until the 1858 * guest starts executing. This is done for the following 1859 * reasons: 1860 * 1861 * If an AST is asserted on this thread after the check below, 1862 * then the IPI_AST notification will not be lost, because it 1863 * will cause a VM exit due to external interrupt as soon as 1864 * the guest state is loaded. 1865 * 1866 * A posted interrupt after 'vmx_inject_interrupts()' will 1867 * not be "lost" because it will be held pending in the host 1868 * APIC because interrupts are disabled. The pending interrupt 1869 * will be recognized as soon as the guest state is loaded. 1870 * 1871 * The same reasoning applies to the IPI generated by 1872 * pmap_invalidate_ept(). 1873 */ 1874 disable_intr(); 1875 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1876 enable_intr(); 1877 handled = vmx_exit_astpending(vmx, vcpu, vmexit); 1878 break; 1879 } 1880 1881 if (vcpu_rendezvous_pending(rendezvous_cookie)) { 1882 enable_intr(); 1883 handled = vmx_exit_rendezvous(vmx, vcpu, vmexit); 1884 break; 1885 } 1886 1887 vmx_inject_interrupts(vmx, vcpu, vlapic); 1888 vmx_run_trace(vmx, vcpu); 1889 rc = vmx_enter_guest(vmxctx, launched); 1890 1891 enable_intr(); 1892 1893 /* Collect some information for VM exit processing */ 1894 vmexit->rip = rip = vmcs_guest_rip(); 1895 vmexit->inst_length = vmexit_instruction_length(); 1896 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 1897 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 1898 1899 if (rc == VMX_GUEST_VMEXIT) { 1900 launched = 1; 1901 handled = vmx_exit_process(vmx, vcpu, vmexit); 1902 } else { 1903 handled = vmx_exit_inst_error(vmxctx, rc, vmexit); 1904 } 1905 1906 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 1907 } while (handled); 1908 1909 /* 1910 * If a VM exit has been handled then the exitcode must be BOGUS 1911 * If a VM exit is not handled then the exitcode must not be BOGUS 1912 */ 1913 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 1914 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 1915 panic("Mismatch between handled (%d) and exitcode (%d)", 1916 handled, vmexit->exitcode); 1917 } 1918 1919 if (!handled) 1920 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 1921 1922 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 1923 vmexit->exitcode); 1924 1925 VMCLEAR(vmcs); 1926 return (0); 1927} 1928 1929static void 1930vmx_vmcleanup(void *arg) 1931{ 1932 int i, error; 1933 struct vmx *vmx = arg; 1934 1935 if (virtual_interrupt_delivery) 1936 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 1937 1938 for (i = 0; i < VM_MAXCPU; i++) 1939 vpid_free(vmx->state[i].vpid); 1940 1941 /* 1942 * XXXSMP we also need to clear the VMCS active on the other vcpus. 1943 */ 1944 error = vmclear(&vmx->vmcs[0]); 1945 if (error != 0) 1946 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error); 1947 1948 free(vmx, M_VMX); 1949 1950 return; 1951} 1952 1953static register_t * 1954vmxctx_regptr(struct vmxctx *vmxctx, int reg) 1955{ 1956 1957 switch (reg) { 1958 case VM_REG_GUEST_RAX: 1959 return (&vmxctx->guest_rax); 1960 case VM_REG_GUEST_RBX: 1961 return (&vmxctx->guest_rbx); 1962 case VM_REG_GUEST_RCX: 1963 return (&vmxctx->guest_rcx); 1964 case VM_REG_GUEST_RDX: 1965 return (&vmxctx->guest_rdx); 1966 case VM_REG_GUEST_RSI: 1967 return (&vmxctx->guest_rsi); 1968 case VM_REG_GUEST_RDI: 1969 return (&vmxctx->guest_rdi); 1970 case VM_REG_GUEST_RBP: 1971 return (&vmxctx->guest_rbp); 1972 case VM_REG_GUEST_R8: 1973 return (&vmxctx->guest_r8); 1974 case VM_REG_GUEST_R9: 1975 return (&vmxctx->guest_r9); 1976 case VM_REG_GUEST_R10: 1977 return (&vmxctx->guest_r10); 1978 case VM_REG_GUEST_R11: 1979 return (&vmxctx->guest_r11); 1980 case VM_REG_GUEST_R12: 1981 return (&vmxctx->guest_r12); 1982 case VM_REG_GUEST_R13: 1983 return (&vmxctx->guest_r13); 1984 case VM_REG_GUEST_R14: 1985 return (&vmxctx->guest_r14); 1986 case VM_REG_GUEST_R15: 1987 return (&vmxctx->guest_r15); 1988 default: 1989 break; 1990 } 1991 return (NULL); 1992} 1993 1994static int 1995vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 1996{ 1997 register_t *regp; 1998 1999 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2000 *retval = *regp; 2001 return (0); 2002 } else 2003 return (EINVAL); 2004} 2005 2006static int 2007vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 2008{ 2009 register_t *regp; 2010 2011 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2012 *regp = val; 2013 return (0); 2014 } else 2015 return (EINVAL); 2016} 2017 2018static int 2019vmx_shadow_reg(int reg) 2020{ 2021 int shreg; 2022 2023 shreg = -1; 2024 2025 switch (reg) { 2026 case VM_REG_GUEST_CR0: 2027 shreg = VMCS_CR0_SHADOW; 2028 break; 2029 case VM_REG_GUEST_CR4: 2030 shreg = VMCS_CR4_SHADOW; 2031 break; 2032 default: 2033 break; 2034 } 2035 2036 return (shreg); 2037} 2038 2039static int 2040vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2041{ 2042 int running, hostcpu; 2043 struct vmx *vmx = arg; 2044 2045 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2046 if (running && hostcpu != curcpu) 2047 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2048 2049 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 2050 return (0); 2051 2052 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 2053} 2054 2055static int 2056vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 2057{ 2058 int error, hostcpu, running, shadow; 2059 uint64_t ctls; 2060 struct vmx *vmx = arg; 2061 2062 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2063 if (running && hostcpu != curcpu) 2064 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 2065 2066 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 2067 return (0); 2068 2069 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 2070 2071 if (error == 0) { 2072 /* 2073 * If the "load EFER" VM-entry control is 1 then the 2074 * value of EFER.LMA must be identical to "IA-32e mode guest" 2075 * bit in the VM-entry control. 2076 */ 2077 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 2078 (reg == VM_REG_GUEST_EFER)) { 2079 vmcs_getreg(&vmx->vmcs[vcpu], running, 2080 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 2081 if (val & EFER_LMA) 2082 ctls |= VM_ENTRY_GUEST_LMA; 2083 else 2084 ctls &= ~VM_ENTRY_GUEST_LMA; 2085 vmcs_setreg(&vmx->vmcs[vcpu], running, 2086 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 2087 } 2088 2089 shadow = vmx_shadow_reg(reg); 2090 if (shadow > 0) { 2091 /* 2092 * Store the unmodified value in the shadow 2093 */ 2094 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 2095 VMCS_IDENT(shadow), val); 2096 } 2097 } 2098 2099 return (error); 2100} 2101 2102static int 2103vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2104{ 2105 struct vmx *vmx = arg; 2106 2107 return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc)); 2108} 2109 2110static int 2111vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2112{ 2113 struct vmx *vmx = arg; 2114 2115 return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc)); 2116} 2117 2118static int 2119vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code, 2120 int code_valid) 2121{ 2122 int error; 2123 uint64_t info; 2124 struct vmx *vmx = arg; 2125 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2126 2127 static uint32_t type_map[VM_EVENT_MAX] = { 2128 0x1, /* VM_EVENT_NONE */ 2129 0x0, /* VM_HW_INTR */ 2130 0x2, /* VM_NMI */ 2131 0x3, /* VM_HW_EXCEPTION */ 2132 0x4, /* VM_SW_INTR */ 2133 0x5, /* VM_PRIV_SW_EXCEPTION */ 2134 0x6, /* VM_SW_EXCEPTION */ 2135 }; 2136 2137 /* 2138 * If there is already an exception pending to be delivered to the 2139 * vcpu then just return. 2140 */ 2141 error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info); 2142 if (error) 2143 return (error); 2144 2145 if (info & VMCS_INTR_VALID) 2146 return (EAGAIN); 2147 2148 info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0); 2149 info |= VMCS_INTR_VALID; 2150 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info); 2151 if (error != 0) 2152 return (error); 2153 2154 if (code_valid) { 2155 error = vmcs_setreg(vmcs, 0, 2156 VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR), 2157 code); 2158 } 2159 return (error); 2160} 2161 2162static int 2163vmx_getcap(void *arg, int vcpu, int type, int *retval) 2164{ 2165 struct vmx *vmx = arg; 2166 int vcap; 2167 int ret; 2168 2169 ret = ENOENT; 2170 2171 vcap = vmx->cap[vcpu].set; 2172 2173 switch (type) { 2174 case VM_CAP_HALT_EXIT: 2175 if (cap_halt_exit) 2176 ret = 0; 2177 break; 2178 case VM_CAP_PAUSE_EXIT: 2179 if (cap_pause_exit) 2180 ret = 0; 2181 break; 2182 case VM_CAP_MTRAP_EXIT: 2183 if (cap_monitor_trap) 2184 ret = 0; 2185 break; 2186 case VM_CAP_UNRESTRICTED_GUEST: 2187 if (cap_unrestricted_guest) 2188 ret = 0; 2189 break; 2190 case VM_CAP_ENABLE_INVPCID: 2191 if (cap_invpcid) 2192 ret = 0; 2193 break; 2194 default: 2195 break; 2196 } 2197 2198 if (ret == 0) 2199 *retval = (vcap & (1 << type)) ? 1 : 0; 2200 2201 return (ret); 2202} 2203 2204static int 2205vmx_setcap(void *arg, int vcpu, int type, int val) 2206{ 2207 struct vmx *vmx = arg; 2208 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2209 uint32_t baseval; 2210 uint32_t *pptr; 2211 int error; 2212 int flag; 2213 int reg; 2214 int retval; 2215 2216 retval = ENOENT; 2217 pptr = NULL; 2218 2219 switch (type) { 2220 case VM_CAP_HALT_EXIT: 2221 if (cap_halt_exit) { 2222 retval = 0; 2223 pptr = &vmx->cap[vcpu].proc_ctls; 2224 baseval = *pptr; 2225 flag = PROCBASED_HLT_EXITING; 2226 reg = VMCS_PRI_PROC_BASED_CTLS; 2227 } 2228 break; 2229 case VM_CAP_MTRAP_EXIT: 2230 if (cap_monitor_trap) { 2231 retval = 0; 2232 pptr = &vmx->cap[vcpu].proc_ctls; 2233 baseval = *pptr; 2234 flag = PROCBASED_MTF; 2235 reg = VMCS_PRI_PROC_BASED_CTLS; 2236 } 2237 break; 2238 case VM_CAP_PAUSE_EXIT: 2239 if (cap_pause_exit) { 2240 retval = 0; 2241 pptr = &vmx->cap[vcpu].proc_ctls; 2242 baseval = *pptr; 2243 flag = PROCBASED_PAUSE_EXITING; 2244 reg = VMCS_PRI_PROC_BASED_CTLS; 2245 } 2246 break; 2247 case VM_CAP_UNRESTRICTED_GUEST: 2248 if (cap_unrestricted_guest) { 2249 retval = 0; 2250 pptr = &vmx->cap[vcpu].proc_ctls2; 2251 baseval = *pptr; 2252 flag = PROCBASED2_UNRESTRICTED_GUEST; 2253 reg = VMCS_SEC_PROC_BASED_CTLS; 2254 } 2255 break; 2256 case VM_CAP_ENABLE_INVPCID: 2257 if (cap_invpcid) { 2258 retval = 0; 2259 pptr = &vmx->cap[vcpu].proc_ctls2; 2260 baseval = *pptr; 2261 flag = PROCBASED2_ENABLE_INVPCID; 2262 reg = VMCS_SEC_PROC_BASED_CTLS; 2263 } 2264 break; 2265 default: 2266 break; 2267 } 2268 2269 if (retval == 0) { 2270 if (val) { 2271 baseval |= flag; 2272 } else { 2273 baseval &= ~flag; 2274 } 2275 VMPTRLD(vmcs); 2276 error = vmwrite(reg, baseval); 2277 VMCLEAR(vmcs); 2278 2279 if (error) { 2280 retval = error; 2281 } else { 2282 /* 2283 * Update optional stored flags, and record 2284 * setting 2285 */ 2286 if (pptr != NULL) { 2287 *pptr = baseval; 2288 } 2289 2290 if (val) { 2291 vmx->cap[vcpu].set |= (1 << type); 2292 } else { 2293 vmx->cap[vcpu].set &= ~(1 << type); 2294 } 2295 } 2296 } 2297 2298 return (retval); 2299} 2300 2301struct vlapic_vtx { 2302 struct vlapic vlapic; 2303 struct pir_desc *pir_desc; 2304}; 2305 2306#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 2307do { \ 2308 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 2309 level ? "level" : "edge", vector); \ 2310 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 2311 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 2312 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 2313 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 2314 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 2315} while (0) 2316 2317/* 2318 * vlapic->ops handlers that utilize the APICv hardware assist described in 2319 * Chapter 29 of the Intel SDM. 2320 */ 2321static int 2322vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 2323{ 2324 struct vlapic_vtx *vlapic_vtx; 2325 struct pir_desc *pir_desc; 2326 uint64_t mask; 2327 int idx, notify; 2328 2329 /* 2330 * XXX need to deal with level triggered interrupts 2331 */ 2332 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2333 pir_desc = vlapic_vtx->pir_desc; 2334 2335 /* 2336 * Keep track of interrupt requests in the PIR descriptor. This is 2337 * because the virtual APIC page pointed to by the VMCS cannot be 2338 * modified if the vcpu is running. 2339 */ 2340 idx = vector / 64; 2341 mask = 1UL << (vector % 64); 2342 atomic_set_long(&pir_desc->pir[idx], mask); 2343 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1); 2344 2345 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 2346 level, "vmx_set_intr_ready"); 2347 return (notify); 2348} 2349 2350static int 2351vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 2352{ 2353 struct vlapic_vtx *vlapic_vtx; 2354 struct pir_desc *pir_desc; 2355 struct LAPIC *lapic; 2356 uint64_t pending, pirval; 2357 uint32_t ppr, vpr; 2358 int i; 2359 2360 /* 2361 * This function is only expected to be called from the 'HLT' exit 2362 * handler which does not care about the vector that is pending. 2363 */ 2364 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 2365 2366 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2367 pir_desc = vlapic_vtx->pir_desc; 2368 2369 pending = atomic_load_acq_long(&pir_desc->pending); 2370 if (!pending) 2371 return (0); /* common case */ 2372 2373 /* 2374 * If there is an interrupt pending then it will be recognized only 2375 * if its priority is greater than the processor priority. 2376 * 2377 * Special case: if the processor priority is zero then any pending 2378 * interrupt will be recognized. 2379 */ 2380 lapic = vlapic->apic_page; 2381 ppr = lapic->ppr & 0xf0; 2382 if (ppr == 0) 2383 return (1); 2384 2385 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 2386 lapic->ppr); 2387 2388 for (i = 3; i >= 0; i--) { 2389 pirval = pir_desc->pir[i]; 2390 if (pirval != 0) { 2391 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0; 2392 return (vpr > ppr); 2393 } 2394 } 2395 return (0); 2396} 2397 2398static void 2399vmx_intr_accepted(struct vlapic *vlapic, int vector) 2400{ 2401 2402 panic("vmx_intr_accepted: not expected to be called"); 2403} 2404 2405static void 2406vmx_post_intr(struct vlapic *vlapic, int hostcpu) 2407{ 2408 2409 ipi_cpu(hostcpu, pirvec); 2410} 2411 2412/* 2413 * Transfer the pending interrupts in the PIR descriptor to the IRR 2414 * in the virtual APIC page. 2415 */ 2416static void 2417vmx_inject_pir(struct vlapic *vlapic) 2418{ 2419 struct vlapic_vtx *vlapic_vtx; 2420 struct pir_desc *pir_desc; 2421 struct LAPIC *lapic; 2422 uint64_t val, pirval; 2423 int rvi, pirbase; 2424 uint16_t intr_status_old, intr_status_new; 2425 2426 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2427 pir_desc = vlapic_vtx->pir_desc; 2428 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 2429 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2430 "no posted interrupt pending"); 2431 return; 2432 } 2433 2434 pirval = 0; 2435 lapic = vlapic->apic_page; 2436 2437 val = atomic_readandclear_long(&pir_desc->pir[0]); 2438 if (val != 0) { 2439 lapic->irr0 |= val; 2440 lapic->irr1 |= val >> 32; 2441 pirbase = 0; 2442 pirval = val; 2443 } 2444 2445 val = atomic_readandclear_long(&pir_desc->pir[1]); 2446 if (val != 0) { 2447 lapic->irr2 |= val; 2448 lapic->irr3 |= val >> 32; 2449 pirbase = 64; 2450 pirval = val; 2451 } 2452 2453 val = atomic_readandclear_long(&pir_desc->pir[2]); 2454 if (val != 0) { 2455 lapic->irr4 |= val; 2456 lapic->irr5 |= val >> 32; 2457 pirbase = 128; 2458 pirval = val; 2459 } 2460 2461 val = atomic_readandclear_long(&pir_desc->pir[3]); 2462 if (val != 0) { 2463 lapic->irr6 |= val; 2464 lapic->irr7 |= val >> 32; 2465 pirbase = 192; 2466 pirval = val; 2467 } 2468 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 2469 2470 /* 2471 * Update RVI so the processor can evaluate pending virtual 2472 * interrupts on VM-entry. 2473 */ 2474 if (pirval != 0) { 2475 rvi = pirbase + flsl(pirval) - 1; 2476 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 2477 intr_status_new = (intr_status_old & 0xFF00) | rvi; 2478 if (intr_status_new > intr_status_old) { 2479 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 2480 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2481 "guest_intr_status changed from 0x%04x to 0x%04x", 2482 intr_status_old, intr_status_new); 2483 } 2484 } 2485} 2486 2487static struct vlapic * 2488vmx_vlapic_init(void *arg, int vcpuid) 2489{ 2490 struct vmx *vmx; 2491 struct vlapic *vlapic; 2492 struct vlapic_vtx *vlapic_vtx; 2493 2494 vmx = arg; 2495 2496 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 2497 vlapic->vm = vmx->vm; 2498 vlapic->vcpuid = vcpuid; 2499 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 2500 2501 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2502 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 2503 2504 if (virtual_interrupt_delivery) { 2505 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 2506 vlapic->ops.pending_intr = vmx_pending_intr; 2507 vlapic->ops.intr_accepted = vmx_intr_accepted; 2508 } 2509 2510 if (posted_interrupts) 2511 vlapic->ops.post_intr = vmx_post_intr; 2512 2513 vlapic_init(vlapic); 2514 2515 return (vlapic); 2516} 2517 2518static void 2519vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2520{ 2521 2522 vlapic_cleanup(vlapic); 2523 free(vlapic, M_VLAPIC); 2524} 2525 2526struct vmm_ops vmm_ops_intel = { 2527 vmx_init, 2528 vmx_cleanup, 2529 vmx_restore, 2530 vmx_vminit, 2531 vmx_run, 2532 vmx_vmcleanup, 2533 vmx_getreg, 2534 vmx_setreg, 2535 vmx_getdesc, 2536 vmx_setdesc, 2537 vmx_inject, 2538 vmx_getcap, 2539 vmx_setcap, 2540 ept_vmspace_alloc, 2541 ept_vmspace_free, 2542 vmx_vlapic_init, 2543 vmx_vlapic_cleanup, 2544}; 2545