vmx.c revision 260836
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.c 260836 2014-01-18 02:20:10Z neel $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/amd64/vmm/intel/vmx.c 260836 2014-01-18 02:20:10Z neel $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/smp.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/pcpu.h> 38#include <sys/proc.h> 39#include <sys/sysctl.h> 40 41#include <vm/vm.h> 42#include <vm/pmap.h> 43 44#include <machine/psl.h> 45#include <machine/cpufunc.h> 46#include <machine/md_var.h> 47#include <machine/segments.h> 48#include <machine/smp.h> 49#include <machine/specialreg.h> 50#include <machine/vmparam.h> 51 52#include <machine/vmm.h> 53#include "vmm_host.h" 54#include "vmm_ipi.h" 55#include "vmm_msr.h" 56#include "vmm_ktr.h" 57#include "vmm_stat.h" 58#include "vlapic.h" 59#include "vlapic_priv.h" 60 61#include "vmx_msr.h" 62#include "ept.h" 63#include "vmx_cpufunc.h" 64#include "vmx.h" 65#include "x86.h" 66#include "vmx_controls.h" 67 68#define PINBASED_CTLS_ONE_SETTING \ 69 (PINBASED_EXTINT_EXITING | \ 70 PINBASED_NMI_EXITING | \ 71 PINBASED_VIRTUAL_NMI) 72#define PINBASED_CTLS_ZERO_SETTING 0 73 74#define PROCBASED_CTLS_WINDOW_SETTING \ 75 (PROCBASED_INT_WINDOW_EXITING | \ 76 PROCBASED_NMI_WINDOW_EXITING) 77 78#define PROCBASED_CTLS_ONE_SETTING \ 79 (PROCBASED_SECONDARY_CONTROLS | \ 80 PROCBASED_IO_EXITING | \ 81 PROCBASED_MSR_BITMAPS | \ 82 PROCBASED_CTLS_WINDOW_SETTING) 83#define PROCBASED_CTLS_ZERO_SETTING \ 84 (PROCBASED_CR3_LOAD_EXITING | \ 85 PROCBASED_CR3_STORE_EXITING | \ 86 PROCBASED_IO_BITMAPS) 87 88#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 89#define PROCBASED_CTLS2_ZERO_SETTING 0 90 91#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \ 92 (VM_EXIT_HOST_LMA | \ 93 VM_EXIT_SAVE_EFER | \ 94 VM_EXIT_LOAD_EFER) 95 96#define VM_EXIT_CTLS_ONE_SETTING \ 97 (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \ 98 VM_EXIT_ACKNOWLEDGE_INTERRUPT | \ 99 VM_EXIT_SAVE_PAT | \ 100 VM_EXIT_LOAD_PAT) 101#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 102 103#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER 104 105#define VM_ENTRY_CTLS_ONE_SETTING \ 106 (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \ 107 VM_ENTRY_LOAD_PAT) 108#define VM_ENTRY_CTLS_ZERO_SETTING \ 109 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 110 VM_ENTRY_INTO_SMM | \ 111 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 112 113#define guest_msr_rw(vmx, msr) \ 114 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW) 115 116#define HANDLED 1 117#define UNHANDLED 0 118 119static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 120static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 121 122SYSCTL_DECL(_hw_vmm); 123SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 124 125int vmxon_enabled[MAXCPU]; 126static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 127 128static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 129static uint32_t exit_ctls, entry_ctls; 130 131static uint64_t cr0_ones_mask, cr0_zeros_mask; 132SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 133 &cr0_ones_mask, 0, NULL); 134SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 135 &cr0_zeros_mask, 0, NULL); 136 137static uint64_t cr4_ones_mask, cr4_zeros_mask; 138SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 139 &cr4_ones_mask, 0, NULL); 140SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 141 &cr4_zeros_mask, 0, NULL); 142 143static int vmx_no_patmsr; 144 145static int vmx_initialized; 146SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 147 &vmx_initialized, 0, "Intel VMX initialized"); 148 149/* 150 * Virtual NMI blocking conditions. 151 * 152 * Some processor implementations also require NMI to be blocked if 153 * the STI_BLOCKING bit is set. It is possible to detect this at runtime 154 * based on the (exit_reason,exit_qual) tuple being set to 155 * (EXIT_REASON_INVAL_VMCS, EXIT_QUAL_NMI_WHILE_STI_BLOCKING). 156 * 157 * We take the easy way out and also include STI_BLOCKING as one of the 158 * gating items for vNMI injection. 159 */ 160static uint64_t nmi_blocking_bits = VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING | 161 VMCS_INTERRUPTIBILITY_NMI_BLOCKING | 162 VMCS_INTERRUPTIBILITY_STI_BLOCKING; 163 164/* 165 * Optional capabilities 166 */ 167static int cap_halt_exit; 168static int cap_pause_exit; 169static int cap_unrestricted_guest; 170static int cap_monitor_trap; 171static int cap_invpcid; 172 173static int virtual_interrupt_delivery; 174SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 175 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 176 177static int posted_interrupts; 178SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD, 179 &posted_interrupts, 0, "APICv posted interrupt support"); 180 181static int pirvec; 182SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 183 &pirvec, 0, "APICv posted interrupt vector"); 184 185static struct unrhdr *vpid_unr; 186static u_int vpid_alloc_failed; 187SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 188 &vpid_alloc_failed, 0, NULL); 189 190/* 191 * Use the last page below 4GB as the APIC access address. This address is 192 * occupied by the boot firmware so it is guaranteed that it will not conflict 193 * with a page in system memory. 194 */ 195#define APIC_ACCESS_ADDRESS 0xFFFFF000 196 197static void vmx_inject_pir(struct vlapic *vlapic); 198 199#ifdef KTR 200static const char * 201exit_reason_to_str(int reason) 202{ 203 static char reasonbuf[32]; 204 205 switch (reason) { 206 case EXIT_REASON_EXCEPTION: 207 return "exception"; 208 case EXIT_REASON_EXT_INTR: 209 return "extint"; 210 case EXIT_REASON_TRIPLE_FAULT: 211 return "triplefault"; 212 case EXIT_REASON_INIT: 213 return "init"; 214 case EXIT_REASON_SIPI: 215 return "sipi"; 216 case EXIT_REASON_IO_SMI: 217 return "iosmi"; 218 case EXIT_REASON_SMI: 219 return "smi"; 220 case EXIT_REASON_INTR_WINDOW: 221 return "intrwindow"; 222 case EXIT_REASON_NMI_WINDOW: 223 return "nmiwindow"; 224 case EXIT_REASON_TASK_SWITCH: 225 return "taskswitch"; 226 case EXIT_REASON_CPUID: 227 return "cpuid"; 228 case EXIT_REASON_GETSEC: 229 return "getsec"; 230 case EXIT_REASON_HLT: 231 return "hlt"; 232 case EXIT_REASON_INVD: 233 return "invd"; 234 case EXIT_REASON_INVLPG: 235 return "invlpg"; 236 case EXIT_REASON_RDPMC: 237 return "rdpmc"; 238 case EXIT_REASON_RDTSC: 239 return "rdtsc"; 240 case EXIT_REASON_RSM: 241 return "rsm"; 242 case EXIT_REASON_VMCALL: 243 return "vmcall"; 244 case EXIT_REASON_VMCLEAR: 245 return "vmclear"; 246 case EXIT_REASON_VMLAUNCH: 247 return "vmlaunch"; 248 case EXIT_REASON_VMPTRLD: 249 return "vmptrld"; 250 case EXIT_REASON_VMPTRST: 251 return "vmptrst"; 252 case EXIT_REASON_VMREAD: 253 return "vmread"; 254 case EXIT_REASON_VMRESUME: 255 return "vmresume"; 256 case EXIT_REASON_VMWRITE: 257 return "vmwrite"; 258 case EXIT_REASON_VMXOFF: 259 return "vmxoff"; 260 case EXIT_REASON_VMXON: 261 return "vmxon"; 262 case EXIT_REASON_CR_ACCESS: 263 return "craccess"; 264 case EXIT_REASON_DR_ACCESS: 265 return "draccess"; 266 case EXIT_REASON_INOUT: 267 return "inout"; 268 case EXIT_REASON_RDMSR: 269 return "rdmsr"; 270 case EXIT_REASON_WRMSR: 271 return "wrmsr"; 272 case EXIT_REASON_INVAL_VMCS: 273 return "invalvmcs"; 274 case EXIT_REASON_INVAL_MSR: 275 return "invalmsr"; 276 case EXIT_REASON_MWAIT: 277 return "mwait"; 278 case EXIT_REASON_MTF: 279 return "mtf"; 280 case EXIT_REASON_MONITOR: 281 return "monitor"; 282 case EXIT_REASON_PAUSE: 283 return "pause"; 284 case EXIT_REASON_MCE: 285 return "mce"; 286 case EXIT_REASON_TPR: 287 return "tpr"; 288 case EXIT_REASON_APIC_ACCESS: 289 return "apic-access"; 290 case EXIT_REASON_GDTR_IDTR: 291 return "gdtridtr"; 292 case EXIT_REASON_LDTR_TR: 293 return "ldtrtr"; 294 case EXIT_REASON_EPT_FAULT: 295 return "eptfault"; 296 case EXIT_REASON_EPT_MISCONFIG: 297 return "eptmisconfig"; 298 case EXIT_REASON_INVEPT: 299 return "invept"; 300 case EXIT_REASON_RDTSCP: 301 return "rdtscp"; 302 case EXIT_REASON_VMX_PREEMPT: 303 return "vmxpreempt"; 304 case EXIT_REASON_INVVPID: 305 return "invvpid"; 306 case EXIT_REASON_WBINVD: 307 return "wbinvd"; 308 case EXIT_REASON_XSETBV: 309 return "xsetbv"; 310 case EXIT_REASON_APIC_WRITE: 311 return "apic-write"; 312 default: 313 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 314 return (reasonbuf); 315 } 316} 317#endif /* KTR */ 318 319u_long 320vmx_fix_cr0(u_long cr0) 321{ 322 323 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 324} 325 326u_long 327vmx_fix_cr4(u_long cr4) 328{ 329 330 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 331} 332 333static void 334vpid_free(int vpid) 335{ 336 if (vpid < 0 || vpid > 0xffff) 337 panic("vpid_free: invalid vpid %d", vpid); 338 339 /* 340 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 341 * the unit number allocator. 342 */ 343 344 if (vpid > VM_MAXCPU) 345 free_unr(vpid_unr, vpid); 346} 347 348static void 349vpid_alloc(uint16_t *vpid, int num) 350{ 351 int i, x; 352 353 if (num <= 0 || num > VM_MAXCPU) 354 panic("invalid number of vpids requested: %d", num); 355 356 /* 357 * If the "enable vpid" execution control is not enabled then the 358 * VPID is required to be 0 for all vcpus. 359 */ 360 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 361 for (i = 0; i < num; i++) 362 vpid[i] = 0; 363 return; 364 } 365 366 /* 367 * Allocate a unique VPID for each vcpu from the unit number allocator. 368 */ 369 for (i = 0; i < num; i++) { 370 x = alloc_unr(vpid_unr); 371 if (x == -1) 372 break; 373 else 374 vpid[i] = x; 375 } 376 377 if (i < num) { 378 atomic_add_int(&vpid_alloc_failed, 1); 379 380 /* 381 * If the unit number allocator does not have enough unique 382 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 383 * 384 * These VPIDs are not be unique across VMs but this does not 385 * affect correctness because the combined mappings are also 386 * tagged with the EP4TA which is unique for each VM. 387 * 388 * It is still sub-optimal because the invvpid will invalidate 389 * combined mappings for a particular VPID across all EP4TAs. 390 */ 391 while (i-- > 0) 392 vpid_free(vpid[i]); 393 394 for (i = 0; i < num; i++) 395 vpid[i] = i + 1; 396 } 397} 398 399static void 400vpid_init(void) 401{ 402 /* 403 * VPID 0 is required when the "enable VPID" execution control is 404 * disabled. 405 * 406 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 407 * unit number allocator does not have sufficient unique VPIDs to 408 * satisfy the allocation. 409 * 410 * The remaining VPIDs are managed by the unit number allocator. 411 */ 412 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 413} 414 415static void 416msr_save_area_init(struct msr_entry *g_area, int *g_count) 417{ 418 int cnt; 419 420 static struct msr_entry guest_msrs[] = { 421 { MSR_KGSBASE, 0, 0 }, 422 }; 423 424 cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]); 425 if (cnt > GUEST_MSR_MAX_ENTRIES) 426 panic("guest msr save area overrun"); 427 bcopy(guest_msrs, g_area, sizeof(guest_msrs)); 428 *g_count = cnt; 429} 430 431static void 432vmx_disable(void *arg __unused) 433{ 434 struct invvpid_desc invvpid_desc = { 0 }; 435 struct invept_desc invept_desc = { 0 }; 436 437 if (vmxon_enabled[curcpu]) { 438 /* 439 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 440 * 441 * VMXON or VMXOFF are not required to invalidate any TLB 442 * caching structures. This prevents potential retention of 443 * cached information in the TLB between distinct VMX episodes. 444 */ 445 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 446 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 447 vmxoff(); 448 } 449 load_cr4(rcr4() & ~CR4_VMXE); 450} 451 452static int 453vmx_cleanup(void) 454{ 455 456 if (pirvec != 0) 457 vmm_ipi_free(pirvec); 458 459 if (vpid_unr != NULL) { 460 delete_unrhdr(vpid_unr); 461 vpid_unr = NULL; 462 } 463 464 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 465 466 return (0); 467} 468 469static void 470vmx_enable(void *arg __unused) 471{ 472 int error; 473 474 load_cr4(rcr4() | CR4_VMXE); 475 476 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 477 error = vmxon(vmxon_region[curcpu]); 478 if (error == 0) 479 vmxon_enabled[curcpu] = 1; 480} 481 482static void 483vmx_restore(void) 484{ 485 486 if (vmxon_enabled[curcpu]) 487 vmxon(vmxon_region[curcpu]); 488} 489 490static int 491vmx_init(int ipinum) 492{ 493 int error, use_tpr_shadow; 494 uint64_t fixed0, fixed1, feature_control; 495 uint32_t tmp, procbased2_vid_bits; 496 497 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 498 if (!(cpu_feature2 & CPUID2_VMX)) { 499 printf("vmx_init: processor does not support VMX operation\n"); 500 return (ENXIO); 501 } 502 503 /* 504 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 505 * are set (bits 0 and 2 respectively). 506 */ 507 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 508 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 509 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 510 printf("vmx_init: VMX operation disabled by BIOS\n"); 511 return (ENXIO); 512 } 513 514 /* Check support for primary processor-based VM-execution controls */ 515 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 516 MSR_VMX_TRUE_PROCBASED_CTLS, 517 PROCBASED_CTLS_ONE_SETTING, 518 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 519 if (error) { 520 printf("vmx_init: processor does not support desired primary " 521 "processor-based controls\n"); 522 return (error); 523 } 524 525 /* Clear the processor-based ctl bits that are set on demand */ 526 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 527 528 /* Check support for secondary processor-based VM-execution controls */ 529 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 530 MSR_VMX_PROCBASED_CTLS2, 531 PROCBASED_CTLS2_ONE_SETTING, 532 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 533 if (error) { 534 printf("vmx_init: processor does not support desired secondary " 535 "processor-based controls\n"); 536 return (error); 537 } 538 539 /* Check support for VPID */ 540 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 541 PROCBASED2_ENABLE_VPID, 0, &tmp); 542 if (error == 0) 543 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 544 545 /* Check support for pin-based VM-execution controls */ 546 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 547 MSR_VMX_TRUE_PINBASED_CTLS, 548 PINBASED_CTLS_ONE_SETTING, 549 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 550 if (error) { 551 printf("vmx_init: processor does not support desired " 552 "pin-based controls\n"); 553 return (error); 554 } 555 556 /* Check support for VM-exit controls */ 557 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 558 VM_EXIT_CTLS_ONE_SETTING, 559 VM_EXIT_CTLS_ZERO_SETTING, 560 &exit_ctls); 561 if (error) { 562 /* Try again without the PAT MSR bits */ 563 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, 564 MSR_VMX_TRUE_EXIT_CTLS, 565 VM_EXIT_CTLS_ONE_SETTING_NO_PAT, 566 VM_EXIT_CTLS_ZERO_SETTING, 567 &exit_ctls); 568 if (error) { 569 printf("vmx_init: processor does not support desired " 570 "exit controls\n"); 571 return (error); 572 } else { 573 if (bootverbose) 574 printf("vmm: PAT MSR access not supported\n"); 575 guest_msr_valid(MSR_PAT); 576 vmx_no_patmsr = 1; 577 } 578 } 579 580 /* Check support for VM-entry controls */ 581 if (!vmx_no_patmsr) { 582 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 583 MSR_VMX_TRUE_ENTRY_CTLS, 584 VM_ENTRY_CTLS_ONE_SETTING, 585 VM_ENTRY_CTLS_ZERO_SETTING, 586 &entry_ctls); 587 } else { 588 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 589 MSR_VMX_TRUE_ENTRY_CTLS, 590 VM_ENTRY_CTLS_ONE_SETTING_NO_PAT, 591 VM_ENTRY_CTLS_ZERO_SETTING, 592 &entry_ctls); 593 } 594 595 if (error) { 596 printf("vmx_init: processor does not support desired " 597 "entry controls\n"); 598 return (error); 599 } 600 601 /* 602 * Check support for optional features by testing them 603 * as individual bits 604 */ 605 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 606 MSR_VMX_TRUE_PROCBASED_CTLS, 607 PROCBASED_HLT_EXITING, 0, 608 &tmp) == 0); 609 610 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 611 MSR_VMX_PROCBASED_CTLS, 612 PROCBASED_MTF, 0, 613 &tmp) == 0); 614 615 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 616 MSR_VMX_TRUE_PROCBASED_CTLS, 617 PROCBASED_PAUSE_EXITING, 0, 618 &tmp) == 0); 619 620 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 621 MSR_VMX_PROCBASED_CTLS2, 622 PROCBASED2_UNRESTRICTED_GUEST, 0, 623 &tmp) == 0); 624 625 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 626 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 627 &tmp) == 0); 628 629 /* 630 * Check support for virtual interrupt delivery. 631 */ 632 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 633 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 634 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 635 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 636 637 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 638 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 639 &tmp) == 0); 640 641 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 642 procbased2_vid_bits, 0, &tmp); 643 if (error == 0 && use_tpr_shadow) { 644 virtual_interrupt_delivery = 1; 645 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 646 &virtual_interrupt_delivery); 647 } 648 649 if (virtual_interrupt_delivery) { 650 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 651 procbased_ctls2 |= procbased2_vid_bits; 652 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 653 654 /* 655 * Check for Posted Interrupts only if Virtual Interrupt 656 * Delivery is enabled. 657 */ 658 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 659 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 660 &tmp); 661 if (error == 0) { 662 pirvec = vmm_ipi_alloc(); 663 if (pirvec == 0) { 664 if (bootverbose) { 665 printf("vmx_init: unable to allocate " 666 "posted interrupt vector\n"); 667 } 668 } else { 669 posted_interrupts = 1; 670 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 671 &posted_interrupts); 672 } 673 } 674 } 675 676 if (posted_interrupts) 677 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 678 679 /* Initialize EPT */ 680 error = ept_init(ipinum); 681 if (error) { 682 printf("vmx_init: ept initialization failed (%d)\n", error); 683 return (error); 684 } 685 686 /* 687 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 688 */ 689 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 690 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 691 cr0_ones_mask = fixed0 & fixed1; 692 cr0_zeros_mask = ~fixed0 & ~fixed1; 693 694 /* 695 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 696 * if unrestricted guest execution is allowed. 697 */ 698 if (cap_unrestricted_guest) 699 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 700 701 /* 702 * Do not allow the guest to set CR0_NW or CR0_CD. 703 */ 704 cr0_zeros_mask |= (CR0_NW | CR0_CD); 705 706 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 707 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 708 cr4_ones_mask = fixed0 & fixed1; 709 cr4_zeros_mask = ~fixed0 & ~fixed1; 710 711 vpid_init(); 712 713 /* enable VMX operation */ 714 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 715 716 vmx_initialized = 1; 717 718 return (0); 719} 720 721static void 722vmx_trigger_hostintr(int vector) 723{ 724 uintptr_t func; 725 struct gate_descriptor *gd; 726 727 gd = &idt[vector]; 728 729 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 730 "invalid vector %d", vector)); 731 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 732 vector)); 733 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 734 "has invalid type %d", vector, gd->gd_type)); 735 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 736 "has invalid dpl %d", vector, gd->gd_dpl)); 737 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 738 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 739 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 740 "IST %d", vector, gd->gd_ist)); 741 742 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 743 vmx_call_isr(func); 744} 745 746static int 747vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 748{ 749 int error, mask_ident, shadow_ident; 750 uint64_t mask_value; 751 752 if (which != 0 && which != 4) 753 panic("vmx_setup_cr_shadow: unknown cr%d", which); 754 755 if (which == 0) { 756 mask_ident = VMCS_CR0_MASK; 757 mask_value = cr0_ones_mask | cr0_zeros_mask; 758 shadow_ident = VMCS_CR0_SHADOW; 759 } else { 760 mask_ident = VMCS_CR4_MASK; 761 mask_value = cr4_ones_mask | cr4_zeros_mask; 762 shadow_ident = VMCS_CR4_SHADOW; 763 } 764 765 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 766 if (error) 767 return (error); 768 769 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 770 if (error) 771 return (error); 772 773 return (0); 774} 775#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 776#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 777 778static void * 779vmx_vminit(struct vm *vm, pmap_t pmap) 780{ 781 uint16_t vpid[VM_MAXCPU]; 782 int i, error, guest_msr_count; 783 struct vmx *vmx; 784 struct vmcs *vmcs; 785 786 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 787 if ((uintptr_t)vmx & PAGE_MASK) { 788 panic("malloc of struct vmx not aligned on %d byte boundary", 789 PAGE_SIZE); 790 } 791 vmx->vm = vm; 792 793 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 794 795 /* 796 * Clean up EPTP-tagged guest physical and combined mappings 797 * 798 * VMX transitions are not required to invalidate any guest physical 799 * mappings. So, it may be possible for stale guest physical mappings 800 * to be present in the processor TLBs. 801 * 802 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 803 */ 804 ept_invalidate_mappings(vmx->eptp); 805 806 msr_bitmap_initialize(vmx->msr_bitmap); 807 808 /* 809 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 810 * The guest FSBASE and GSBASE are saved and restored during 811 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 812 * always restored from the vmcs host state area on vm-exit. 813 * 814 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 815 * how they are saved/restored so can be directly accessed by the 816 * guest. 817 * 818 * Guest KGSBASE is saved and restored in the guest MSR save area. 819 * Host KGSBASE is restored before returning to userland from the pcb. 820 * There will be a window of time when we are executing in the host 821 * kernel context with a value of KGSBASE from the guest. This is ok 822 * because the value of KGSBASE is inconsequential in kernel context. 823 * 824 * MSR_EFER is saved and restored in the guest VMCS area on a 825 * VM exit and entry respectively. It is also restored from the 826 * host VMCS area on a VM exit. 827 */ 828 if (guest_msr_rw(vmx, MSR_GSBASE) || 829 guest_msr_rw(vmx, MSR_FSBASE) || 830 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 831 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 832 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 833 guest_msr_rw(vmx, MSR_KGSBASE) || 834 guest_msr_rw(vmx, MSR_EFER)) 835 panic("vmx_vminit: error setting guest msr access"); 836 837 /* 838 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit 839 * and entry respectively. It is also restored from the host VMCS 840 * area on a VM exit. However, if running on a system with no 841 * MSR_PAT save/restore support, leave access disabled so accesses 842 * will be trapped. 843 */ 844 if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT)) 845 panic("vmx_vminit: error setting guest pat msr access"); 846 847 vpid_alloc(vpid, VM_MAXCPU); 848 849 if (virtual_interrupt_delivery) { 850 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 851 APIC_ACCESS_ADDRESS); 852 /* XXX this should really return an error to the caller */ 853 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 854 } 855 856 for (i = 0; i < VM_MAXCPU; i++) { 857 vmcs = &vmx->vmcs[i]; 858 vmcs->identifier = vmx_revision(); 859 error = vmclear(vmcs); 860 if (error != 0) { 861 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 862 error, i); 863 } 864 865 error = vmcs_init(vmcs); 866 KASSERT(error == 0, ("vmcs_init error %d", error)); 867 868 VMPTRLD(vmcs); 869 error = 0; 870 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 871 error += vmwrite(VMCS_EPTP, vmx->eptp); 872 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 873 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 874 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 875 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 876 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 877 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 878 error += vmwrite(VMCS_VPID, vpid[i]); 879 if (virtual_interrupt_delivery) { 880 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 881 error += vmwrite(VMCS_VIRTUAL_APIC, 882 vtophys(&vmx->apic_page[i])); 883 error += vmwrite(VMCS_EOI_EXIT0, 0); 884 error += vmwrite(VMCS_EOI_EXIT1, 0); 885 error += vmwrite(VMCS_EOI_EXIT2, 0); 886 error += vmwrite(VMCS_EOI_EXIT3, 0); 887 } 888 if (posted_interrupts) { 889 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 890 error += vmwrite(VMCS_PIR_DESC, 891 vtophys(&vmx->pir_desc[i])); 892 } 893 VMCLEAR(vmcs); 894 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 895 896 vmx->cap[i].set = 0; 897 vmx->cap[i].proc_ctls = procbased_ctls; 898 vmx->cap[i].proc_ctls2 = procbased_ctls2; 899 900 vmx->state[i].lastcpu = -1; 901 vmx->state[i].vpid = vpid[i]; 902 903 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count); 904 905 error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]), 906 guest_msr_count); 907 if (error != 0) 908 panic("vmcs_set_msr_save error %d", error); 909 910 /* 911 * Set up the CR0/4 shadows, and init the read shadow 912 * to the power-on register value from the Intel Sys Arch. 913 * CR0 - 0x60000010 914 * CR4 - 0 915 */ 916 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 917 if (error != 0) 918 panic("vmx_setup_cr0_shadow %d", error); 919 920 error = vmx_setup_cr4_shadow(vmcs, 0); 921 if (error != 0) 922 panic("vmx_setup_cr4_shadow %d", error); 923 924 vmx->ctx[i].pmap = pmap; 925 vmx->ctx[i].eptp = vmx->eptp; 926 } 927 928 return (vmx); 929} 930 931static int 932vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 933{ 934 int handled, func; 935 936 func = vmxctx->guest_rax; 937 938 handled = x86_emulate_cpuid(vm, vcpu, 939 (uint32_t*)(&vmxctx->guest_rax), 940 (uint32_t*)(&vmxctx->guest_rbx), 941 (uint32_t*)(&vmxctx->guest_rcx), 942 (uint32_t*)(&vmxctx->guest_rdx)); 943 return (handled); 944} 945 946static __inline void 947vmx_run_trace(struct vmx *vmx, int vcpu) 948{ 949#ifdef KTR 950 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 951#endif 952} 953 954static __inline void 955vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 956 int handled) 957{ 958#ifdef KTR 959 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 960 handled ? "handled" : "unhandled", 961 exit_reason_to_str(exit_reason), rip); 962#endif 963} 964 965static __inline void 966vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 967{ 968#ifdef KTR 969 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 970#endif 971} 972 973static void 974vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 975{ 976 int lastcpu; 977 struct vmxstate *vmxstate; 978 struct invvpid_desc invvpid_desc = { 0 }; 979 980 vmxstate = &vmx->state[vcpu]; 981 lastcpu = vmxstate->lastcpu; 982 vmxstate->lastcpu = curcpu; 983 984 if (lastcpu == curcpu) 985 return; 986 987 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 988 989 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 990 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 991 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 992 993 /* 994 * If we are using VPIDs then invalidate all mappings tagged with 'vpid' 995 * 996 * We do this because this vcpu was executing on a different host 997 * cpu when it last ran. We do not track whether it invalidated 998 * mappings associated with its 'vpid' during that run. So we must 999 * assume that the mappings associated with 'vpid' on 'curcpu' are 1000 * stale and invalidate them. 1001 * 1002 * Note that we incur this penalty only when the scheduler chooses to 1003 * move the thread associated with this vcpu between host cpus. 1004 * 1005 * Note also that this will invalidate mappings tagged with 'vpid' 1006 * for "all" EP4TAs. 1007 */ 1008 if (vmxstate->vpid != 0) { 1009 invvpid_desc.vpid = vmxstate->vpid; 1010 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1011 } 1012} 1013 1014/* 1015 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1016 */ 1017CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1018 1019static void __inline 1020vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1021{ 1022 1023 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1024 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1025} 1026 1027static void __inline 1028vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1029{ 1030 1031 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1032 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1033} 1034 1035static void __inline 1036vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1037{ 1038 1039 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1040 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1041} 1042 1043static void __inline 1044vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1045{ 1046 1047 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1048 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1049} 1050 1051static int 1052vmx_inject_nmi(struct vmx *vmx, int vcpu) 1053{ 1054 uint64_t info, interruptibility; 1055 1056 /* Bail out if no NMI requested */ 1057 if (!vm_nmi_pending(vmx->vm, vcpu)) 1058 return (0); 1059 1060 interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1061 if (interruptibility & nmi_blocking_bits) 1062 goto nmiblocked; 1063 1064 /* 1065 * Inject the virtual NMI. The vector must be the NMI IDT entry 1066 * or the VMCS entry check will fail. 1067 */ 1068 info = VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1069 info |= IDT_NMI; 1070 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1071 1072 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1073 1074 /* Clear the request */ 1075 vm_nmi_clear(vmx->vm, vcpu); 1076 return (1); 1077 1078nmiblocked: 1079 /* 1080 * Set the NMI Window Exiting execution control so we can inject 1081 * the virtual NMI as soon as blocking condition goes away. 1082 */ 1083 vmx_set_nmi_window_exiting(vmx, vcpu); 1084 1085 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1086 return (1); 1087} 1088 1089static void 1090vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1091{ 1092 int vector; 1093 uint64_t info, rflags, interruptibility; 1094 1095 const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING | 1096 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING; 1097 1098 /* 1099 * If there is already an interrupt pending then just return. 1100 * 1101 * This could happen if an interrupt was injected on a prior 1102 * VM entry but the actual entry into guest mode was aborted 1103 * because of a pending AST. 1104 */ 1105 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1106 if (info & VMCS_INTR_VALID) 1107 return; 1108 1109 /* 1110 * NMI injection has priority so deal with those first 1111 */ 1112 if (vmx_inject_nmi(vmx, vcpu)) 1113 return; 1114 1115 if (virtual_interrupt_delivery) { 1116 vmx_inject_pir(vlapic); 1117 return; 1118 } 1119 1120 /* Ask the local apic for a vector to inject */ 1121 if (!vlapic_pending_intr(vlapic, &vector)) 1122 return; 1123 1124 if (vector < 32 || vector > 255) 1125 panic("vmx_inject_interrupts: invalid vector %d\n", vector); 1126 1127 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1128 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1129 if ((rflags & PSL_I) == 0) 1130 goto cantinject; 1131 1132 interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1133 if (interruptibility & HWINTR_BLOCKED) 1134 goto cantinject; 1135 1136 /* Inject the interrupt */ 1137 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1138 info |= vector; 1139 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1140 1141 /* Update the Local APIC ISR */ 1142 vlapic_intr_accepted(vlapic, vector); 1143 1144 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1145 1146 return; 1147 1148cantinject: 1149 /* 1150 * Set the Interrupt Window Exiting execution control so we can inject 1151 * the interrupt as soon as blocking condition goes away. 1152 */ 1153 vmx_set_int_window_exiting(vmx, vcpu); 1154 1155 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1156} 1157 1158/* 1159 * If the Virtual NMIs execution control is '1' then the logical processor 1160 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1161 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1162 * virtual-NMI blocking. 1163 * 1164 * This unblocking occurs even if the IRET causes a fault. In this case the 1165 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1166 */ 1167static void 1168vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1169{ 1170 uint32_t gi; 1171 1172 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1173 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1174 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1175 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1176} 1177 1178static void 1179vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1180{ 1181 uint32_t gi; 1182 1183 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1184 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1185 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1186 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1187} 1188 1189static int 1190vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1191{ 1192 int cr, vmcs_guest_cr, vmcs_shadow_cr; 1193 uint64_t crval, regval, ones_mask, zeros_mask; 1194 const struct vmxctx *vmxctx; 1195 1196 /* We only handle mov to %cr0 or %cr4 at this time */ 1197 if ((exitqual & 0xf0) != 0x00) 1198 return (UNHANDLED); 1199 1200 cr = exitqual & 0xf; 1201 if (cr != 0 && cr != 4) 1202 return (UNHANDLED); 1203 1204 regval = 0; /* silence gcc */ 1205 vmxctx = &vmx->ctx[vcpu]; 1206 1207 /* 1208 * We must use vmcs_write() directly here because vmcs_setreg() will 1209 * call vmclear(vmcs) as a side-effect which we certainly don't want. 1210 */ 1211 switch ((exitqual >> 8) & 0xf) { 1212 case 0: 1213 regval = vmxctx->guest_rax; 1214 break; 1215 case 1: 1216 regval = vmxctx->guest_rcx; 1217 break; 1218 case 2: 1219 regval = vmxctx->guest_rdx; 1220 break; 1221 case 3: 1222 regval = vmxctx->guest_rbx; 1223 break; 1224 case 4: 1225 regval = vmcs_read(VMCS_GUEST_RSP); 1226 break; 1227 case 5: 1228 regval = vmxctx->guest_rbp; 1229 break; 1230 case 6: 1231 regval = vmxctx->guest_rsi; 1232 break; 1233 case 7: 1234 regval = vmxctx->guest_rdi; 1235 break; 1236 case 8: 1237 regval = vmxctx->guest_r8; 1238 break; 1239 case 9: 1240 regval = vmxctx->guest_r9; 1241 break; 1242 case 10: 1243 regval = vmxctx->guest_r10; 1244 break; 1245 case 11: 1246 regval = vmxctx->guest_r11; 1247 break; 1248 case 12: 1249 regval = vmxctx->guest_r12; 1250 break; 1251 case 13: 1252 regval = vmxctx->guest_r13; 1253 break; 1254 case 14: 1255 regval = vmxctx->guest_r14; 1256 break; 1257 case 15: 1258 regval = vmxctx->guest_r15; 1259 break; 1260 } 1261 1262 if (cr == 0) { 1263 ones_mask = cr0_ones_mask; 1264 zeros_mask = cr0_zeros_mask; 1265 vmcs_guest_cr = VMCS_GUEST_CR0; 1266 vmcs_shadow_cr = VMCS_CR0_SHADOW; 1267 } else { 1268 ones_mask = cr4_ones_mask; 1269 zeros_mask = cr4_zeros_mask; 1270 vmcs_guest_cr = VMCS_GUEST_CR4; 1271 vmcs_shadow_cr = VMCS_CR4_SHADOW; 1272 } 1273 vmcs_write(vmcs_shadow_cr, regval); 1274 1275 crval = regval | ones_mask; 1276 crval &= ~zeros_mask; 1277 vmcs_write(vmcs_guest_cr, crval); 1278 1279 if (cr == 0 && regval & CR0_PG) { 1280 uint64_t efer, entry_ctls; 1281 1282 /* 1283 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1284 * the "IA-32e mode guest" bit in VM-entry control must be 1285 * equal. 1286 */ 1287 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1288 if (efer & EFER_LME) { 1289 efer |= EFER_LMA; 1290 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1291 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1292 entry_ctls |= VM_ENTRY_GUEST_LMA; 1293 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1294 } 1295 } 1296 1297 return (HANDLED); 1298} 1299 1300static int 1301ept_fault_type(uint64_t ept_qual) 1302{ 1303 int fault_type; 1304 1305 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1306 fault_type = VM_PROT_WRITE; 1307 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1308 fault_type = VM_PROT_EXECUTE; 1309 else 1310 fault_type= VM_PROT_READ; 1311 1312 return (fault_type); 1313} 1314 1315static boolean_t 1316ept_emulation_fault(uint64_t ept_qual) 1317{ 1318 int read, write; 1319 1320 /* EPT fault on an instruction fetch doesn't make sense here */ 1321 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1322 return (FALSE); 1323 1324 /* EPT fault must be a read fault or a write fault */ 1325 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1326 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1327 if ((read | write) == 0) 1328 return (FALSE); 1329 1330 /* 1331 * The EPT violation must have been caused by accessing a 1332 * guest-physical address that is a translation of a guest-linear 1333 * address. 1334 */ 1335 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1336 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1337 return (FALSE); 1338 } 1339 1340 return (TRUE); 1341} 1342 1343static int 1344vmx_handle_apic_write(struct vlapic *vlapic, uint64_t qual) 1345{ 1346 int error, handled, offset; 1347 bool retu; 1348 1349 if (!virtual_interrupt_delivery) 1350 return (UNHANDLED); 1351 1352 handled = 1; 1353 offset = APIC_WRITE_OFFSET(qual); 1354 switch (offset) { 1355 case APIC_OFFSET_ID: 1356 vlapic_id_write_handler(vlapic); 1357 break; 1358 case APIC_OFFSET_LDR: 1359 vlapic_ldr_write_handler(vlapic); 1360 break; 1361 case APIC_OFFSET_DFR: 1362 vlapic_dfr_write_handler(vlapic); 1363 break; 1364 case APIC_OFFSET_SVR: 1365 vlapic_svr_write_handler(vlapic); 1366 break; 1367 case APIC_OFFSET_ESR: 1368 vlapic_esr_write_handler(vlapic); 1369 break; 1370 case APIC_OFFSET_ICR_LOW: 1371 retu = false; 1372 error = vlapic_icrlo_write_handler(vlapic, &retu); 1373 if (error != 0 || retu) 1374 handled = 0; 1375 break; 1376 case APIC_OFFSET_CMCI_LVT: 1377 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1378 vlapic_lvt_write_handler(vlapic, offset); 1379 break; 1380 case APIC_OFFSET_TIMER_ICR: 1381 vlapic_icrtmr_write_handler(vlapic); 1382 break; 1383 case APIC_OFFSET_TIMER_DCR: 1384 vlapic_dcr_write_handler(vlapic); 1385 break; 1386 default: 1387 handled = 0; 1388 break; 1389 } 1390 return (handled); 1391} 1392 1393static bool 1394apic_access_fault(uint64_t gpa) 1395{ 1396 1397 if (virtual_interrupt_delivery && 1398 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1399 return (true); 1400 else 1401 return (false); 1402} 1403 1404static int 1405vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1406{ 1407 uint64_t qual; 1408 int access_type, offset, allowed; 1409 1410 if (!virtual_interrupt_delivery) 1411 return (UNHANDLED); 1412 1413 qual = vmexit->u.vmx.exit_qualification; 1414 access_type = APIC_ACCESS_TYPE(qual); 1415 offset = APIC_ACCESS_OFFSET(qual); 1416 1417 allowed = 0; 1418 if (access_type == 0) { 1419 /* 1420 * Read data access to the following registers is expected. 1421 */ 1422 switch (offset) { 1423 case APIC_OFFSET_APR: 1424 case APIC_OFFSET_PPR: 1425 case APIC_OFFSET_RRR: 1426 case APIC_OFFSET_CMCI_LVT: 1427 case APIC_OFFSET_TIMER_CCR: 1428 allowed = 1; 1429 break; 1430 default: 1431 break; 1432 } 1433 } else if (access_type == 1) { 1434 /* 1435 * Write data access to the following registers is expected. 1436 */ 1437 switch (offset) { 1438 case APIC_OFFSET_VER: 1439 case APIC_OFFSET_APR: 1440 case APIC_OFFSET_PPR: 1441 case APIC_OFFSET_RRR: 1442 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1443 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1444 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1445 case APIC_OFFSET_CMCI_LVT: 1446 case APIC_OFFSET_TIMER_CCR: 1447 allowed = 1; 1448 break; 1449 default: 1450 break; 1451 } 1452 } 1453 1454 if (allowed) { 1455 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1456 vmexit->u.inst_emul.gpa = DEFAULT_APIC_BASE + offset; 1457 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 1458 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1459 } 1460 1461 /* 1462 * Regardless of whether the APIC-access is allowed this handler 1463 * always returns UNHANDLED: 1464 * - if the access is allowed then it is handled by emulating the 1465 * instruction that caused the VM-exit (outside the critical section) 1466 * - if the access is not allowed then it will be converted to an 1467 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 1468 */ 1469 return (UNHANDLED); 1470} 1471 1472static int 1473vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1474{ 1475 int error, handled; 1476 struct vmxctx *vmxctx; 1477 struct vlapic *vlapic; 1478 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, reason; 1479 uint64_t qual, gpa; 1480 bool retu; 1481 1482 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 1483 1484 handled = 0; 1485 vmxctx = &vmx->ctx[vcpu]; 1486 1487 qual = vmexit->u.vmx.exit_qualification; 1488 reason = vmexit->u.vmx.exit_reason; 1489 vmexit->exitcode = VM_EXITCODE_BOGUS; 1490 1491 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 1492 1493 /* 1494 * VM exits that could be triggered during event injection on the 1495 * previous VM entry need to be handled specially by re-injecting 1496 * the event. 1497 * 1498 * See "Information for VM Exits During Event Delivery" in Intel SDM 1499 * for details. 1500 */ 1501 switch (reason) { 1502 case EXIT_REASON_EPT_FAULT: 1503 case EXIT_REASON_EPT_MISCONFIG: 1504 case EXIT_REASON_APIC_ACCESS: 1505 case EXIT_REASON_TASK_SWITCH: 1506 case EXIT_REASON_EXCEPTION: 1507 idtvec_info = vmcs_idt_vectoring_info(); 1508 if (idtvec_info & VMCS_IDT_VEC_VALID) { 1509 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 1510 vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info); 1511 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 1512 idtvec_err = vmcs_idt_vectoring_err(); 1513 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1514 idtvec_err); 1515 } 1516 /* 1517 * If 'virtual NMIs' are being used and the VM-exit 1518 * happened while injecting an NMI during the previous 1519 * VM-entry, then clear "blocking by NMI" in the Guest 1520 * Interruptibility-state. 1521 */ 1522 if ((idtvec_info & VMCS_INTR_T_MASK) == 1523 VMCS_INTR_T_NMI) { 1524 vmx_clear_nmi_blocking(vmx, vcpu); 1525 } 1526 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 1527 } 1528 default: 1529 idtvec_info = 0; 1530 break; 1531 } 1532 1533 switch (reason) { 1534 case EXIT_REASON_CR_ACCESS: 1535 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 1536 handled = vmx_emulate_cr_access(vmx, vcpu, qual); 1537 break; 1538 case EXIT_REASON_RDMSR: 1539 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 1540 retu = false; 1541 ecx = vmxctx->guest_rcx; 1542 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu); 1543 if (error) { 1544 vmexit->exitcode = VM_EXITCODE_RDMSR; 1545 vmexit->u.msr.code = ecx; 1546 } else if (!retu) { 1547 handled = 1; 1548 } else { 1549 /* Return to userspace with a valid exitcode */ 1550 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1551 ("emulate_wrmsr retu with bogus exitcode")); 1552 } 1553 break; 1554 case EXIT_REASON_WRMSR: 1555 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 1556 retu = false; 1557 eax = vmxctx->guest_rax; 1558 ecx = vmxctx->guest_rcx; 1559 edx = vmxctx->guest_rdx; 1560 error = emulate_wrmsr(vmx->vm, vcpu, ecx, 1561 (uint64_t)edx << 32 | eax, &retu); 1562 if (error) { 1563 vmexit->exitcode = VM_EXITCODE_WRMSR; 1564 vmexit->u.msr.code = ecx; 1565 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 1566 } else if (!retu) { 1567 handled = 1; 1568 } else { 1569 /* Return to userspace with a valid exitcode */ 1570 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1571 ("emulate_wrmsr retu with bogus exitcode")); 1572 } 1573 break; 1574 case EXIT_REASON_HLT: 1575 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 1576 vmexit->exitcode = VM_EXITCODE_HLT; 1577 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1578 break; 1579 case EXIT_REASON_MTF: 1580 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 1581 vmexit->exitcode = VM_EXITCODE_MTRAP; 1582 break; 1583 case EXIT_REASON_PAUSE: 1584 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 1585 vmexit->exitcode = VM_EXITCODE_PAUSE; 1586 break; 1587 case EXIT_REASON_INTR_WINDOW: 1588 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 1589 vmx_clear_int_window_exiting(vmx, vcpu); 1590 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1591 return (1); 1592 case EXIT_REASON_EXT_INTR: 1593 /* 1594 * External interrupts serve only to cause VM exits and allow 1595 * the host interrupt handler to run. 1596 * 1597 * If this external interrupt triggers a virtual interrupt 1598 * to a VM, then that state will be recorded by the 1599 * host interrupt handler in the VM's softc. We will inject 1600 * this virtual interrupt during the subsequent VM enter. 1601 */ 1602 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1603 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 1604 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 1605 ("VM exit interruption info invalid: %#x", intr_info)); 1606 vmx_trigger_hostintr(intr_info & 0xff); 1607 1608 /* 1609 * This is special. We want to treat this as an 'handled' 1610 * VM-exit but not increment the instruction pointer. 1611 */ 1612 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 1613 return (1); 1614 case EXIT_REASON_NMI_WINDOW: 1615 /* Exit to allow the pending virtual NMI to be injected */ 1616 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 1617 vmx_clear_nmi_window_exiting(vmx, vcpu); 1618 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1619 return (1); 1620 case EXIT_REASON_INOUT: 1621 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 1622 vmexit->exitcode = VM_EXITCODE_INOUT; 1623 vmexit->u.inout.bytes = (qual & 0x7) + 1; 1624 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0; 1625 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 1626 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 1627 vmexit->u.inout.port = (uint16_t)(qual >> 16); 1628 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 1629 break; 1630 case EXIT_REASON_CPUID: 1631 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 1632 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 1633 break; 1634 case EXIT_REASON_EXCEPTION: 1635 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1636 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 1637 ("VM exit interruption info invalid: %#x", intr_info)); 1638 /* 1639 * If Virtual NMIs control is 1 and the VM-exit is due to a 1640 * fault encountered during the execution of IRET then we must 1641 * restore the state of "virtual-NMI blocking" before resuming 1642 * the guest. 1643 * 1644 * See "Resuming Guest Software after Handling an Exception". 1645 */ 1646 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 1647 (intr_info & 0xff) != IDT_DF && 1648 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 1649 vmx_restore_nmi_blocking(vmx, vcpu); 1650 break; 1651 case EXIT_REASON_EPT_FAULT: 1652 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EPT_FAULT, 1); 1653 /* 1654 * If 'gpa' lies within the address space allocated to 1655 * memory then this must be a nested page fault otherwise 1656 * this must be an instruction that accesses MMIO space. 1657 */ 1658 gpa = vmcs_gpa(); 1659 if (vm_mem_allocated(vmx->vm, gpa) || apic_access_fault(gpa)) { 1660 vmexit->exitcode = VM_EXITCODE_PAGING; 1661 vmexit->u.paging.gpa = gpa; 1662 vmexit->u.paging.fault_type = ept_fault_type(qual); 1663 } else if (ept_emulation_fault(qual)) { 1664 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1665 vmexit->u.inst_emul.gpa = gpa; 1666 vmexit->u.inst_emul.gla = vmcs_gla(); 1667 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1668 } 1669 /* 1670 * If Virtual NMIs control is 1 and the VM-exit is due to an 1671 * EPT fault during the execution of IRET then we must restore 1672 * the state of "virtual-NMI blocking" before resuming. 1673 * 1674 * See description of "NMI unblocking due to IRET" in 1675 * "Exit Qualification for EPT Violations". 1676 */ 1677 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 1678 (qual & EXIT_QUAL_NMIUDTI) != 0) 1679 vmx_restore_nmi_blocking(vmx, vcpu); 1680 break; 1681 case EXIT_REASON_APIC_ACCESS: 1682 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 1683 break; 1684 case EXIT_REASON_APIC_WRITE: 1685 /* 1686 * APIC-write VM exit is trap-like so the %rip is already 1687 * pointing to the next instruction. 1688 */ 1689 vmexit->inst_length = 0; 1690 vlapic = vm_lapic(vmx->vm, vcpu); 1691 handled = vmx_handle_apic_write(vlapic, qual); 1692 break; 1693 default: 1694 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 1695 break; 1696 } 1697 1698 if (handled) { 1699 /* 1700 * It is possible that control is returned to userland 1701 * even though we were able to handle the VM exit in the 1702 * kernel. 1703 * 1704 * In such a case we want to make sure that the userland 1705 * restarts guest execution at the instruction *after* 1706 * the one we just processed. Therefore we update the 1707 * guest rip in the VMCS and in 'vmexit'. 1708 */ 1709 vmexit->rip += vmexit->inst_length; 1710 vmexit->inst_length = 0; 1711 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 1712 } else { 1713 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1714 /* 1715 * If this VM exit was not claimed by anybody then 1716 * treat it as a generic VMX exit. 1717 */ 1718 vmexit->exitcode = VM_EXITCODE_VMX; 1719 vmexit->u.vmx.status = VM_SUCCESS; 1720 } else { 1721 /* 1722 * The exitcode and collateral have been populated. 1723 * The VM exit will be processed further in userland. 1724 */ 1725 } 1726 } 1727 return (handled); 1728} 1729 1730static __inline int 1731vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1732{ 1733 1734 vmexit->rip = vmcs_guest_rip(); 1735 vmexit->inst_length = 0; 1736 vmexit->exitcode = VM_EXITCODE_BOGUS; 1737 vmx_astpending_trace(vmx, vcpu, vmexit->rip); 1738 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1); 1739 1740 return (HANDLED); 1741} 1742 1743static __inline int 1744vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1745{ 1746 1747 vmexit->rip = vmcs_guest_rip(); 1748 vmexit->inst_length = 0; 1749 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1750 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1); 1751 1752 return (UNHANDLED); 1753} 1754 1755static __inline int 1756vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 1757{ 1758 1759 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 1760 ("vmx_exit_inst_error: invalid inst_fail_status %d", 1761 vmxctx->inst_fail_status)); 1762 1763 vmexit->inst_length = 0; 1764 vmexit->exitcode = VM_EXITCODE_VMX; 1765 vmexit->u.vmx.status = vmxctx->inst_fail_status; 1766 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 1767 vmexit->u.vmx.exit_reason = ~0; 1768 vmexit->u.vmx.exit_qualification = ~0; 1769 1770 switch (rc) { 1771 case VMX_VMRESUME_ERROR: 1772 case VMX_VMLAUNCH_ERROR: 1773 case VMX_INVEPT_ERROR: 1774 vmexit->u.vmx.inst_type = rc; 1775 break; 1776 default: 1777 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 1778 } 1779 1780 return (UNHANDLED); 1781} 1782 1783static int 1784vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap, 1785 void *rendezvous_cookie) 1786{ 1787 int rc, handled, launched; 1788 struct vmx *vmx; 1789 struct vm *vm; 1790 struct vmxctx *vmxctx; 1791 struct vmcs *vmcs; 1792 struct vm_exit *vmexit; 1793 struct vlapic *vlapic; 1794 uint64_t rip; 1795 uint32_t exit_reason; 1796 1797 vmx = arg; 1798 vm = vmx->vm; 1799 vmcs = &vmx->vmcs[vcpu]; 1800 vmxctx = &vmx->ctx[vcpu]; 1801 vlapic = vm_lapic(vm, vcpu); 1802 vmexit = vm_exitinfo(vm, vcpu); 1803 launched = 0; 1804 1805 KASSERT(vmxctx->pmap == pmap, 1806 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 1807 KASSERT(vmxctx->eptp == vmx->eptp, 1808 ("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp)); 1809 1810 VMPTRLD(vmcs); 1811 1812 /* 1813 * XXX 1814 * We do this every time because we may setup the virtual machine 1815 * from a different process than the one that actually runs it. 1816 * 1817 * If the life of a virtual machine was spent entirely in the context 1818 * of a single process we could do this once in vmx_vminit(). 1819 */ 1820 vmcs_write(VMCS_HOST_CR3, rcr3()); 1821 1822 vmcs_write(VMCS_GUEST_RIP, startrip); 1823 vmx_set_pcpu_defaults(vmx, vcpu); 1824 do { 1825 /* 1826 * Interrupts are disabled from this point on until the 1827 * guest starts executing. This is done for the following 1828 * reasons: 1829 * 1830 * If an AST is asserted on this thread after the check below, 1831 * then the IPI_AST notification will not be lost, because it 1832 * will cause a VM exit due to external interrupt as soon as 1833 * the guest state is loaded. 1834 * 1835 * A posted interrupt after 'vmx_inject_interrupts()' will 1836 * not be "lost" because it will be held pending in the host 1837 * APIC because interrupts are disabled. The pending interrupt 1838 * will be recognized as soon as the guest state is loaded. 1839 * 1840 * The same reasoning applies to the IPI generated by 1841 * pmap_invalidate_ept(). 1842 */ 1843 disable_intr(); 1844 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1845 enable_intr(); 1846 handled = vmx_exit_astpending(vmx, vcpu, vmexit); 1847 break; 1848 } 1849 1850 if (vcpu_rendezvous_pending(rendezvous_cookie)) { 1851 enable_intr(); 1852 handled = vmx_exit_rendezvous(vmx, vcpu, vmexit); 1853 break; 1854 } 1855 1856 vmx_inject_interrupts(vmx, vcpu, vlapic); 1857 vmx_run_trace(vmx, vcpu); 1858 rc = vmx_enter_guest(vmxctx, launched); 1859 1860 enable_intr(); 1861 1862 /* Collect some information for VM exit processing */ 1863 vmexit->rip = rip = vmcs_guest_rip(); 1864 vmexit->inst_length = vmexit_instruction_length(); 1865 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 1866 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 1867 1868 if (rc == VMX_GUEST_VMEXIT) { 1869 launched = 1; 1870 handled = vmx_exit_process(vmx, vcpu, vmexit); 1871 } else { 1872 handled = vmx_exit_inst_error(vmxctx, rc, vmexit); 1873 } 1874 1875 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 1876 } while (handled); 1877 1878 /* 1879 * If a VM exit has been handled then the exitcode must be BOGUS 1880 * If a VM exit is not handled then the exitcode must not be BOGUS 1881 */ 1882 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 1883 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 1884 panic("Mismatch between handled (%d) and exitcode (%d)", 1885 handled, vmexit->exitcode); 1886 } 1887 1888 if (!handled) 1889 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 1890 1891 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 1892 vmexit->exitcode); 1893 1894 VMCLEAR(vmcs); 1895 return (0); 1896} 1897 1898static void 1899vmx_vmcleanup(void *arg) 1900{ 1901 int i, error; 1902 struct vmx *vmx = arg; 1903 1904 if (virtual_interrupt_delivery) 1905 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 1906 1907 for (i = 0; i < VM_MAXCPU; i++) 1908 vpid_free(vmx->state[i].vpid); 1909 1910 /* 1911 * XXXSMP we also need to clear the VMCS active on the other vcpus. 1912 */ 1913 error = vmclear(&vmx->vmcs[0]); 1914 if (error != 0) 1915 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error); 1916 1917 free(vmx, M_VMX); 1918 1919 return; 1920} 1921 1922static register_t * 1923vmxctx_regptr(struct vmxctx *vmxctx, int reg) 1924{ 1925 1926 switch (reg) { 1927 case VM_REG_GUEST_RAX: 1928 return (&vmxctx->guest_rax); 1929 case VM_REG_GUEST_RBX: 1930 return (&vmxctx->guest_rbx); 1931 case VM_REG_GUEST_RCX: 1932 return (&vmxctx->guest_rcx); 1933 case VM_REG_GUEST_RDX: 1934 return (&vmxctx->guest_rdx); 1935 case VM_REG_GUEST_RSI: 1936 return (&vmxctx->guest_rsi); 1937 case VM_REG_GUEST_RDI: 1938 return (&vmxctx->guest_rdi); 1939 case VM_REG_GUEST_RBP: 1940 return (&vmxctx->guest_rbp); 1941 case VM_REG_GUEST_R8: 1942 return (&vmxctx->guest_r8); 1943 case VM_REG_GUEST_R9: 1944 return (&vmxctx->guest_r9); 1945 case VM_REG_GUEST_R10: 1946 return (&vmxctx->guest_r10); 1947 case VM_REG_GUEST_R11: 1948 return (&vmxctx->guest_r11); 1949 case VM_REG_GUEST_R12: 1950 return (&vmxctx->guest_r12); 1951 case VM_REG_GUEST_R13: 1952 return (&vmxctx->guest_r13); 1953 case VM_REG_GUEST_R14: 1954 return (&vmxctx->guest_r14); 1955 case VM_REG_GUEST_R15: 1956 return (&vmxctx->guest_r15); 1957 default: 1958 break; 1959 } 1960 return (NULL); 1961} 1962 1963static int 1964vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 1965{ 1966 register_t *regp; 1967 1968 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 1969 *retval = *regp; 1970 return (0); 1971 } else 1972 return (EINVAL); 1973} 1974 1975static int 1976vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 1977{ 1978 register_t *regp; 1979 1980 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 1981 *regp = val; 1982 return (0); 1983 } else 1984 return (EINVAL); 1985} 1986 1987static int 1988vmx_shadow_reg(int reg) 1989{ 1990 int shreg; 1991 1992 shreg = -1; 1993 1994 switch (reg) { 1995 case VM_REG_GUEST_CR0: 1996 shreg = VMCS_CR0_SHADOW; 1997 break; 1998 case VM_REG_GUEST_CR4: 1999 shreg = VMCS_CR4_SHADOW; 2000 break; 2001 default: 2002 break; 2003 } 2004 2005 return (shreg); 2006} 2007 2008static int 2009vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2010{ 2011 int running, hostcpu; 2012 struct vmx *vmx = arg; 2013 2014 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2015 if (running && hostcpu != curcpu) 2016 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2017 2018 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 2019 return (0); 2020 2021 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 2022} 2023 2024static int 2025vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 2026{ 2027 int error, hostcpu, running, shadow; 2028 uint64_t ctls; 2029 struct vmx *vmx = arg; 2030 2031 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2032 if (running && hostcpu != curcpu) 2033 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 2034 2035 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 2036 return (0); 2037 2038 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 2039 2040 if (error == 0) { 2041 /* 2042 * If the "load EFER" VM-entry control is 1 then the 2043 * value of EFER.LMA must be identical to "IA-32e mode guest" 2044 * bit in the VM-entry control. 2045 */ 2046 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 2047 (reg == VM_REG_GUEST_EFER)) { 2048 vmcs_getreg(&vmx->vmcs[vcpu], running, 2049 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 2050 if (val & EFER_LMA) 2051 ctls |= VM_ENTRY_GUEST_LMA; 2052 else 2053 ctls &= ~VM_ENTRY_GUEST_LMA; 2054 vmcs_setreg(&vmx->vmcs[vcpu], running, 2055 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 2056 } 2057 2058 shadow = vmx_shadow_reg(reg); 2059 if (shadow > 0) { 2060 /* 2061 * Store the unmodified value in the shadow 2062 */ 2063 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 2064 VMCS_IDENT(shadow), val); 2065 } 2066 } 2067 2068 return (error); 2069} 2070 2071static int 2072vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2073{ 2074 struct vmx *vmx = arg; 2075 2076 return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc)); 2077} 2078 2079static int 2080vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2081{ 2082 struct vmx *vmx = arg; 2083 2084 return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc)); 2085} 2086 2087static int 2088vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code, 2089 int code_valid) 2090{ 2091 int error; 2092 uint64_t info; 2093 struct vmx *vmx = arg; 2094 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2095 2096 static uint32_t type_map[VM_EVENT_MAX] = { 2097 0x1, /* VM_EVENT_NONE */ 2098 0x0, /* VM_HW_INTR */ 2099 0x2, /* VM_NMI */ 2100 0x3, /* VM_HW_EXCEPTION */ 2101 0x4, /* VM_SW_INTR */ 2102 0x5, /* VM_PRIV_SW_EXCEPTION */ 2103 0x6, /* VM_SW_EXCEPTION */ 2104 }; 2105 2106 /* 2107 * If there is already an exception pending to be delivered to the 2108 * vcpu then just return. 2109 */ 2110 error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info); 2111 if (error) 2112 return (error); 2113 2114 if (info & VMCS_INTR_VALID) 2115 return (EAGAIN); 2116 2117 info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0); 2118 info |= VMCS_INTR_VALID; 2119 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info); 2120 if (error != 0) 2121 return (error); 2122 2123 if (code_valid) { 2124 error = vmcs_setreg(vmcs, 0, 2125 VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR), 2126 code); 2127 } 2128 return (error); 2129} 2130 2131static int 2132vmx_getcap(void *arg, int vcpu, int type, int *retval) 2133{ 2134 struct vmx *vmx = arg; 2135 int vcap; 2136 int ret; 2137 2138 ret = ENOENT; 2139 2140 vcap = vmx->cap[vcpu].set; 2141 2142 switch (type) { 2143 case VM_CAP_HALT_EXIT: 2144 if (cap_halt_exit) 2145 ret = 0; 2146 break; 2147 case VM_CAP_PAUSE_EXIT: 2148 if (cap_pause_exit) 2149 ret = 0; 2150 break; 2151 case VM_CAP_MTRAP_EXIT: 2152 if (cap_monitor_trap) 2153 ret = 0; 2154 break; 2155 case VM_CAP_UNRESTRICTED_GUEST: 2156 if (cap_unrestricted_guest) 2157 ret = 0; 2158 break; 2159 case VM_CAP_ENABLE_INVPCID: 2160 if (cap_invpcid) 2161 ret = 0; 2162 break; 2163 default: 2164 break; 2165 } 2166 2167 if (ret == 0) 2168 *retval = (vcap & (1 << type)) ? 1 : 0; 2169 2170 return (ret); 2171} 2172 2173static int 2174vmx_setcap(void *arg, int vcpu, int type, int val) 2175{ 2176 struct vmx *vmx = arg; 2177 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2178 uint32_t baseval; 2179 uint32_t *pptr; 2180 int error; 2181 int flag; 2182 int reg; 2183 int retval; 2184 2185 retval = ENOENT; 2186 pptr = NULL; 2187 2188 switch (type) { 2189 case VM_CAP_HALT_EXIT: 2190 if (cap_halt_exit) { 2191 retval = 0; 2192 pptr = &vmx->cap[vcpu].proc_ctls; 2193 baseval = *pptr; 2194 flag = PROCBASED_HLT_EXITING; 2195 reg = VMCS_PRI_PROC_BASED_CTLS; 2196 } 2197 break; 2198 case VM_CAP_MTRAP_EXIT: 2199 if (cap_monitor_trap) { 2200 retval = 0; 2201 pptr = &vmx->cap[vcpu].proc_ctls; 2202 baseval = *pptr; 2203 flag = PROCBASED_MTF; 2204 reg = VMCS_PRI_PROC_BASED_CTLS; 2205 } 2206 break; 2207 case VM_CAP_PAUSE_EXIT: 2208 if (cap_pause_exit) { 2209 retval = 0; 2210 pptr = &vmx->cap[vcpu].proc_ctls; 2211 baseval = *pptr; 2212 flag = PROCBASED_PAUSE_EXITING; 2213 reg = VMCS_PRI_PROC_BASED_CTLS; 2214 } 2215 break; 2216 case VM_CAP_UNRESTRICTED_GUEST: 2217 if (cap_unrestricted_guest) { 2218 retval = 0; 2219 pptr = &vmx->cap[vcpu].proc_ctls2; 2220 baseval = *pptr; 2221 flag = PROCBASED2_UNRESTRICTED_GUEST; 2222 reg = VMCS_SEC_PROC_BASED_CTLS; 2223 } 2224 break; 2225 case VM_CAP_ENABLE_INVPCID: 2226 if (cap_invpcid) { 2227 retval = 0; 2228 pptr = &vmx->cap[vcpu].proc_ctls2; 2229 baseval = *pptr; 2230 flag = PROCBASED2_ENABLE_INVPCID; 2231 reg = VMCS_SEC_PROC_BASED_CTLS; 2232 } 2233 break; 2234 default: 2235 break; 2236 } 2237 2238 if (retval == 0) { 2239 if (val) { 2240 baseval |= flag; 2241 } else { 2242 baseval &= ~flag; 2243 } 2244 VMPTRLD(vmcs); 2245 error = vmwrite(reg, baseval); 2246 VMCLEAR(vmcs); 2247 2248 if (error) { 2249 retval = error; 2250 } else { 2251 /* 2252 * Update optional stored flags, and record 2253 * setting 2254 */ 2255 if (pptr != NULL) { 2256 *pptr = baseval; 2257 } 2258 2259 if (val) { 2260 vmx->cap[vcpu].set |= (1 << type); 2261 } else { 2262 vmx->cap[vcpu].set &= ~(1 << type); 2263 } 2264 } 2265 } 2266 2267 return (retval); 2268} 2269 2270struct vlapic_vtx { 2271 struct vlapic vlapic; 2272 struct pir_desc *pir_desc; 2273}; 2274 2275#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 2276do { \ 2277 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 2278 level ? "level" : "edge", vector); \ 2279 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 2280 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 2281 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 2282 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 2283 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 2284} while (0) 2285 2286/* 2287 * vlapic->ops handlers that utilize the APICv hardware assist described in 2288 * Chapter 29 of the Intel SDM. 2289 */ 2290static int 2291vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 2292{ 2293 struct vlapic_vtx *vlapic_vtx; 2294 struct pir_desc *pir_desc; 2295 uint64_t mask; 2296 int idx, notify; 2297 2298 /* 2299 * XXX need to deal with level triggered interrupts 2300 */ 2301 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2302 pir_desc = vlapic_vtx->pir_desc; 2303 2304 /* 2305 * Keep track of interrupt requests in the PIR descriptor. This is 2306 * because the virtual APIC page pointed to by the VMCS cannot be 2307 * modified if the vcpu is running. 2308 */ 2309 idx = vector / 64; 2310 mask = 1UL << (vector % 64); 2311 atomic_set_long(&pir_desc->pir[idx], mask); 2312 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1); 2313 2314 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 2315 level, "vmx_set_intr_ready"); 2316 return (notify); 2317} 2318 2319static int 2320vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 2321{ 2322 struct vlapic_vtx *vlapic_vtx; 2323 struct pir_desc *pir_desc; 2324 struct LAPIC *lapic; 2325 uint64_t pending, pirval; 2326 uint32_t ppr, vpr; 2327 int i; 2328 2329 /* 2330 * This function is only expected to be called from the 'HLT' exit 2331 * handler which does not care about the vector that is pending. 2332 */ 2333 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 2334 2335 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2336 pir_desc = vlapic_vtx->pir_desc; 2337 2338 pending = atomic_load_acq_long(&pir_desc->pending); 2339 if (!pending) 2340 return (0); /* common case */ 2341 2342 /* 2343 * If there is an interrupt pending then it will be recognized only 2344 * if its priority is greater than the processor priority. 2345 * 2346 * Special case: if the processor priority is zero then any pending 2347 * interrupt will be recognized. 2348 */ 2349 lapic = vlapic->apic_page; 2350 ppr = lapic->ppr & 0xf0; 2351 if (ppr == 0) 2352 return (1); 2353 2354 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 2355 lapic->ppr); 2356 2357 for (i = 3; i >= 0; i--) { 2358 pirval = pir_desc->pir[i]; 2359 if (pirval != 0) { 2360 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0; 2361 return (vpr > ppr); 2362 } 2363 } 2364 return (0); 2365} 2366 2367static void 2368vmx_intr_accepted(struct vlapic *vlapic, int vector) 2369{ 2370 2371 panic("vmx_intr_accepted: not expected to be called"); 2372} 2373 2374static void 2375vmx_post_intr(struct vlapic *vlapic, int hostcpu) 2376{ 2377 2378 ipi_cpu(hostcpu, pirvec); 2379} 2380 2381/* 2382 * Transfer the pending interrupts in the PIR descriptor to the IRR 2383 * in the virtual APIC page. 2384 */ 2385static void 2386vmx_inject_pir(struct vlapic *vlapic) 2387{ 2388 struct vlapic_vtx *vlapic_vtx; 2389 struct pir_desc *pir_desc; 2390 struct LAPIC *lapic; 2391 uint64_t val, pirval; 2392 int rvi, pirbase; 2393 uint16_t intr_status_old, intr_status_new; 2394 2395 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2396 pir_desc = vlapic_vtx->pir_desc; 2397 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 2398 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2399 "no posted interrupt pending"); 2400 return; 2401 } 2402 2403 pirval = 0; 2404 lapic = vlapic->apic_page; 2405 2406 val = atomic_readandclear_long(&pir_desc->pir[0]); 2407 if (val != 0) { 2408 lapic->irr0 |= val; 2409 lapic->irr1 |= val >> 32; 2410 pirbase = 0; 2411 pirval = val; 2412 } 2413 2414 val = atomic_readandclear_long(&pir_desc->pir[1]); 2415 if (val != 0) { 2416 lapic->irr2 |= val; 2417 lapic->irr3 |= val >> 32; 2418 pirbase = 64; 2419 pirval = val; 2420 } 2421 2422 val = atomic_readandclear_long(&pir_desc->pir[2]); 2423 if (val != 0) { 2424 lapic->irr4 |= val; 2425 lapic->irr5 |= val >> 32; 2426 pirbase = 128; 2427 pirval = val; 2428 } 2429 2430 val = atomic_readandclear_long(&pir_desc->pir[3]); 2431 if (val != 0) { 2432 lapic->irr6 |= val; 2433 lapic->irr7 |= val >> 32; 2434 pirbase = 192; 2435 pirval = val; 2436 } 2437 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 2438 2439 /* 2440 * Update RVI so the processor can evaluate pending virtual 2441 * interrupts on VM-entry. 2442 */ 2443 if (pirval != 0) { 2444 rvi = pirbase + flsl(pirval) - 1; 2445 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 2446 intr_status_new = (intr_status_old & 0xFF00) | rvi; 2447 if (intr_status_new > intr_status_old) { 2448 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 2449 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2450 "guest_intr_status changed from 0x%04x to 0x%04x", 2451 intr_status_old, intr_status_new); 2452 } 2453 } 2454} 2455 2456static struct vlapic * 2457vmx_vlapic_init(void *arg, int vcpuid) 2458{ 2459 struct vmx *vmx; 2460 struct vlapic *vlapic; 2461 struct vlapic_vtx *vlapic_vtx; 2462 2463 vmx = arg; 2464 2465 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 2466 vlapic->vm = vmx->vm; 2467 vlapic->vcpuid = vcpuid; 2468 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 2469 2470 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2471 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 2472 2473 if (virtual_interrupt_delivery) { 2474 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 2475 vlapic->ops.pending_intr = vmx_pending_intr; 2476 vlapic->ops.intr_accepted = vmx_intr_accepted; 2477 } 2478 2479 if (posted_interrupts) 2480 vlapic->ops.post_intr = vmx_post_intr; 2481 2482 vlapic_init(vlapic); 2483 2484 return (vlapic); 2485} 2486 2487static void 2488vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2489{ 2490 2491 vlapic_cleanup(vlapic); 2492 free(vlapic, M_VLAPIC); 2493} 2494 2495struct vmm_ops vmm_ops_intel = { 2496 vmx_init, 2497 vmx_cleanup, 2498 vmx_restore, 2499 vmx_vminit, 2500 vmx_run, 2501 vmx_vmcleanup, 2502 vmx_getreg, 2503 vmx_setreg, 2504 vmx_getdesc, 2505 vmx_setdesc, 2506 vmx_inject, 2507 vmx_getcap, 2508 vmx_setcap, 2509 ept_vmspace_alloc, 2510 ept_vmspace_free, 2511 vmx_vlapic_init, 2512 vmx_vlapic_cleanup, 2513}; 2514