vmx.c revision 266339
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 266339 2014-05-17 19:11:08Z jhb $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 266339 2014-05-17 19:11:08Z jhb $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/smp.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/pcpu.h> 38#include <sys/proc.h> 39#include <sys/sysctl.h> 40 41#include <vm/vm.h> 42#include <vm/pmap.h> 43 44#include <machine/psl.h> 45#include <machine/cpufunc.h> 46#include <machine/md_var.h> 47#include <machine/segments.h> 48#include <machine/smp.h> 49#include <machine/specialreg.h> 50#include <machine/vmparam.h> 51 52#include <machine/vmm.h> 53#include "vmm_host.h" 54#include "vmm_ipi.h" 55#include "vmm_msr.h" 56#include "vmm_ktr.h" 57#include "vmm_stat.h" 58#include "vlapic.h" 59#include "vlapic_priv.h" 60 61#include "vmx_msr.h" 62#include "ept.h" 63#include "vmx_cpufunc.h" 64#include "vmx.h" 65#include "x86.h" 66#include "vmx_controls.h" 67 68#define PINBASED_CTLS_ONE_SETTING \ 69 (PINBASED_EXTINT_EXITING | \ 70 PINBASED_NMI_EXITING | \ 71 PINBASED_VIRTUAL_NMI) 72#define PINBASED_CTLS_ZERO_SETTING 0 73 74#define PROCBASED_CTLS_WINDOW_SETTING \ 75 (PROCBASED_INT_WINDOW_EXITING | \ 76 PROCBASED_NMI_WINDOW_EXITING) 77 78#define PROCBASED_CTLS_ONE_SETTING \ 79 (PROCBASED_SECONDARY_CONTROLS | \ 80 PROCBASED_IO_EXITING | \ 81 PROCBASED_MSR_BITMAPS | \ 82 PROCBASED_CTLS_WINDOW_SETTING) 83#define PROCBASED_CTLS_ZERO_SETTING \ 84 (PROCBASED_CR3_LOAD_EXITING | \ 85 PROCBASED_CR3_STORE_EXITING | \ 86 PROCBASED_IO_BITMAPS) 87 88#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 89#define PROCBASED_CTLS2_ZERO_SETTING 0 90 91#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \ 92 (VM_EXIT_HOST_LMA | \ 93 VM_EXIT_SAVE_EFER | \ 94 VM_EXIT_LOAD_EFER) 95 96#define VM_EXIT_CTLS_ONE_SETTING \ 97 (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \ 98 VM_EXIT_ACKNOWLEDGE_INTERRUPT | \ 99 VM_EXIT_SAVE_PAT | \ 100 VM_EXIT_LOAD_PAT) 101#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 102 103#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER 104 105#define VM_ENTRY_CTLS_ONE_SETTING \ 106 (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \ 107 VM_ENTRY_LOAD_PAT) 108#define VM_ENTRY_CTLS_ZERO_SETTING \ 109 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 110 VM_ENTRY_INTO_SMM | \ 111 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 112 113#define guest_msr_rw(vmx, msr) \ 114 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW) 115 116#define HANDLED 1 117#define UNHANDLED 0 118 119static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 120static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 121 122SYSCTL_DECL(_hw_vmm); 123SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 124 125int vmxon_enabled[MAXCPU]; 126static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 127 128static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 129static uint32_t exit_ctls, entry_ctls; 130 131static uint64_t cr0_ones_mask, cr0_zeros_mask; 132SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 133 &cr0_ones_mask, 0, NULL); 134SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 135 &cr0_zeros_mask, 0, NULL); 136 137static uint64_t cr4_ones_mask, cr4_zeros_mask; 138SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 139 &cr4_ones_mask, 0, NULL); 140SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 141 &cr4_zeros_mask, 0, NULL); 142 143static int vmx_no_patmsr; 144 145static int vmx_initialized; 146SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 147 &vmx_initialized, 0, "Intel VMX initialized"); 148 149/* 150 * Virtual NMI blocking conditions. 151 * 152 * Some processor implementations also require NMI to be blocked if 153 * the STI_BLOCKING bit is set. It is possible to detect this at runtime 154 * based on the (exit_reason,exit_qual) tuple being set to 155 * (EXIT_REASON_INVAL_VMCS, EXIT_QUAL_NMI_WHILE_STI_BLOCKING). 156 * 157 * We take the easy way out and also include STI_BLOCKING as one of the 158 * gating items for vNMI injection. 159 */ 160static uint64_t nmi_blocking_bits = VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING | 161 VMCS_INTERRUPTIBILITY_NMI_BLOCKING | 162 VMCS_INTERRUPTIBILITY_STI_BLOCKING; 163 164/* 165 * Optional capabilities 166 */ 167static int cap_halt_exit; 168static int cap_pause_exit; 169static int cap_unrestricted_guest; 170static int cap_monitor_trap; 171static int cap_invpcid; 172 173static int virtual_interrupt_delivery; 174SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 175 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 176 177static int posted_interrupts; 178SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD, 179 &posted_interrupts, 0, "APICv posted interrupt support"); 180 181static int pirvec; 182SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 183 &pirvec, 0, "APICv posted interrupt vector"); 184 185static struct unrhdr *vpid_unr; 186static u_int vpid_alloc_failed; 187SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 188 &vpid_alloc_failed, 0, NULL); 189 190/* 191 * Use the last page below 4GB as the APIC access address. This address is 192 * occupied by the boot firmware so it is guaranteed that it will not conflict 193 * with a page in system memory. 194 */ 195#define APIC_ACCESS_ADDRESS 0xFFFFF000 196 197static void vmx_inject_pir(struct vlapic *vlapic); 198 199#ifdef KTR 200static const char * 201exit_reason_to_str(int reason) 202{ 203 static char reasonbuf[32]; 204 205 switch (reason) { 206 case EXIT_REASON_EXCEPTION: 207 return "exception"; 208 case EXIT_REASON_EXT_INTR: 209 return "extint"; 210 case EXIT_REASON_TRIPLE_FAULT: 211 return "triplefault"; 212 case EXIT_REASON_INIT: 213 return "init"; 214 case EXIT_REASON_SIPI: 215 return "sipi"; 216 case EXIT_REASON_IO_SMI: 217 return "iosmi"; 218 case EXIT_REASON_SMI: 219 return "smi"; 220 case EXIT_REASON_INTR_WINDOW: 221 return "intrwindow"; 222 case EXIT_REASON_NMI_WINDOW: 223 return "nmiwindow"; 224 case EXIT_REASON_TASK_SWITCH: 225 return "taskswitch"; 226 case EXIT_REASON_CPUID: 227 return "cpuid"; 228 case EXIT_REASON_GETSEC: 229 return "getsec"; 230 case EXIT_REASON_HLT: 231 return "hlt"; 232 case EXIT_REASON_INVD: 233 return "invd"; 234 case EXIT_REASON_INVLPG: 235 return "invlpg"; 236 case EXIT_REASON_RDPMC: 237 return "rdpmc"; 238 case EXIT_REASON_RDTSC: 239 return "rdtsc"; 240 case EXIT_REASON_RSM: 241 return "rsm"; 242 case EXIT_REASON_VMCALL: 243 return "vmcall"; 244 case EXIT_REASON_VMCLEAR: 245 return "vmclear"; 246 case EXIT_REASON_VMLAUNCH: 247 return "vmlaunch"; 248 case EXIT_REASON_VMPTRLD: 249 return "vmptrld"; 250 case EXIT_REASON_VMPTRST: 251 return "vmptrst"; 252 case EXIT_REASON_VMREAD: 253 return "vmread"; 254 case EXIT_REASON_VMRESUME: 255 return "vmresume"; 256 case EXIT_REASON_VMWRITE: 257 return "vmwrite"; 258 case EXIT_REASON_VMXOFF: 259 return "vmxoff"; 260 case EXIT_REASON_VMXON: 261 return "vmxon"; 262 case EXIT_REASON_CR_ACCESS: 263 return "craccess"; 264 case EXIT_REASON_DR_ACCESS: 265 return "draccess"; 266 case EXIT_REASON_INOUT: 267 return "inout"; 268 case EXIT_REASON_RDMSR: 269 return "rdmsr"; 270 case EXIT_REASON_WRMSR: 271 return "wrmsr"; 272 case EXIT_REASON_INVAL_VMCS: 273 return "invalvmcs"; 274 case EXIT_REASON_INVAL_MSR: 275 return "invalmsr"; 276 case EXIT_REASON_MWAIT: 277 return "mwait"; 278 case EXIT_REASON_MTF: 279 return "mtf"; 280 case EXIT_REASON_MONITOR: 281 return "monitor"; 282 case EXIT_REASON_PAUSE: 283 return "pause"; 284 case EXIT_REASON_MCE: 285 return "mce"; 286 case EXIT_REASON_TPR: 287 return "tpr"; 288 case EXIT_REASON_APIC_ACCESS: 289 return "apic-access"; 290 case EXIT_REASON_GDTR_IDTR: 291 return "gdtridtr"; 292 case EXIT_REASON_LDTR_TR: 293 return "ldtrtr"; 294 case EXIT_REASON_EPT_FAULT: 295 return "eptfault"; 296 case EXIT_REASON_EPT_MISCONFIG: 297 return "eptmisconfig"; 298 case EXIT_REASON_INVEPT: 299 return "invept"; 300 case EXIT_REASON_RDTSCP: 301 return "rdtscp"; 302 case EXIT_REASON_VMX_PREEMPT: 303 return "vmxpreempt"; 304 case EXIT_REASON_INVVPID: 305 return "invvpid"; 306 case EXIT_REASON_WBINVD: 307 return "wbinvd"; 308 case EXIT_REASON_XSETBV: 309 return "xsetbv"; 310 case EXIT_REASON_APIC_WRITE: 311 return "apic-write"; 312 default: 313 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 314 return (reasonbuf); 315 } 316} 317#endif /* KTR */ 318 319u_long 320vmx_fix_cr0(u_long cr0) 321{ 322 323 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 324} 325 326u_long 327vmx_fix_cr4(u_long cr4) 328{ 329 330 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 331} 332 333static void 334vpid_free(int vpid) 335{ 336 if (vpid < 0 || vpid > 0xffff) 337 panic("vpid_free: invalid vpid %d", vpid); 338 339 /* 340 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 341 * the unit number allocator. 342 */ 343 344 if (vpid > VM_MAXCPU) 345 free_unr(vpid_unr, vpid); 346} 347 348static void 349vpid_alloc(uint16_t *vpid, int num) 350{ 351 int i, x; 352 353 if (num <= 0 || num > VM_MAXCPU) 354 panic("invalid number of vpids requested: %d", num); 355 356 /* 357 * If the "enable vpid" execution control is not enabled then the 358 * VPID is required to be 0 for all vcpus. 359 */ 360 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 361 for (i = 0; i < num; i++) 362 vpid[i] = 0; 363 return; 364 } 365 366 /* 367 * Allocate a unique VPID for each vcpu from the unit number allocator. 368 */ 369 for (i = 0; i < num; i++) { 370 x = alloc_unr(vpid_unr); 371 if (x == -1) 372 break; 373 else 374 vpid[i] = x; 375 } 376 377 if (i < num) { 378 atomic_add_int(&vpid_alloc_failed, 1); 379 380 /* 381 * If the unit number allocator does not have enough unique 382 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 383 * 384 * These VPIDs are not be unique across VMs but this does not 385 * affect correctness because the combined mappings are also 386 * tagged with the EP4TA which is unique for each VM. 387 * 388 * It is still sub-optimal because the invvpid will invalidate 389 * combined mappings for a particular VPID across all EP4TAs. 390 */ 391 while (i-- > 0) 392 vpid_free(vpid[i]); 393 394 for (i = 0; i < num; i++) 395 vpid[i] = i + 1; 396 } 397} 398 399static void 400vpid_init(void) 401{ 402 /* 403 * VPID 0 is required when the "enable VPID" execution control is 404 * disabled. 405 * 406 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 407 * unit number allocator does not have sufficient unique VPIDs to 408 * satisfy the allocation. 409 * 410 * The remaining VPIDs are managed by the unit number allocator. 411 */ 412 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 413} 414 415static void 416msr_save_area_init(struct msr_entry *g_area, int *g_count) 417{ 418 int cnt; 419 420 static struct msr_entry guest_msrs[] = { 421 { MSR_KGSBASE, 0, 0 }, 422 }; 423 424 cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]); 425 if (cnt > GUEST_MSR_MAX_ENTRIES) 426 panic("guest msr save area overrun"); 427 bcopy(guest_msrs, g_area, sizeof(guest_msrs)); 428 *g_count = cnt; 429} 430 431static void 432vmx_disable(void *arg __unused) 433{ 434 struct invvpid_desc invvpid_desc = { 0 }; 435 struct invept_desc invept_desc = { 0 }; 436 437 if (vmxon_enabled[curcpu]) { 438 /* 439 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 440 * 441 * VMXON or VMXOFF are not required to invalidate any TLB 442 * caching structures. This prevents potential retention of 443 * cached information in the TLB between distinct VMX episodes. 444 */ 445 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 446 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 447 vmxoff(); 448 } 449 load_cr4(rcr4() & ~CR4_VMXE); 450} 451 452static int 453vmx_cleanup(void) 454{ 455 456 if (pirvec != 0) 457 vmm_ipi_free(pirvec); 458 459 if (vpid_unr != NULL) { 460 delete_unrhdr(vpid_unr); 461 vpid_unr = NULL; 462 } 463 464 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 465 466 return (0); 467} 468 469static void 470vmx_enable(void *arg __unused) 471{ 472 int error; 473 474 load_cr4(rcr4() | CR4_VMXE); 475 476 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 477 error = vmxon(vmxon_region[curcpu]); 478 if (error == 0) 479 vmxon_enabled[curcpu] = 1; 480} 481 482static void 483vmx_restore(void) 484{ 485 486 if (vmxon_enabled[curcpu]) 487 vmxon(vmxon_region[curcpu]); 488} 489 490static int 491vmx_init(int ipinum) 492{ 493 int error, use_tpr_shadow; 494 uint64_t fixed0, fixed1, feature_control; 495 uint32_t tmp, procbased2_vid_bits; 496 497 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 498 if (!(cpu_feature2 & CPUID2_VMX)) { 499 printf("vmx_init: processor does not support VMX operation\n"); 500 return (ENXIO); 501 } 502 503 /* 504 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 505 * are set (bits 0 and 2 respectively). 506 */ 507 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 508 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 509 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 510 printf("vmx_init: VMX operation disabled by BIOS\n"); 511 return (ENXIO); 512 } 513 514 /* Check support for primary processor-based VM-execution controls */ 515 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 516 MSR_VMX_TRUE_PROCBASED_CTLS, 517 PROCBASED_CTLS_ONE_SETTING, 518 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 519 if (error) { 520 printf("vmx_init: processor does not support desired primary " 521 "processor-based controls\n"); 522 return (error); 523 } 524 525 /* Clear the processor-based ctl bits that are set on demand */ 526 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 527 528 /* Check support for secondary processor-based VM-execution controls */ 529 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 530 MSR_VMX_PROCBASED_CTLS2, 531 PROCBASED_CTLS2_ONE_SETTING, 532 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 533 if (error) { 534 printf("vmx_init: processor does not support desired secondary " 535 "processor-based controls\n"); 536 return (error); 537 } 538 539 /* Check support for VPID */ 540 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 541 PROCBASED2_ENABLE_VPID, 0, &tmp); 542 if (error == 0) 543 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 544 545 /* Check support for pin-based VM-execution controls */ 546 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 547 MSR_VMX_TRUE_PINBASED_CTLS, 548 PINBASED_CTLS_ONE_SETTING, 549 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 550 if (error) { 551 printf("vmx_init: processor does not support desired " 552 "pin-based controls\n"); 553 return (error); 554 } 555 556 /* Check support for VM-exit controls */ 557 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 558 VM_EXIT_CTLS_ONE_SETTING, 559 VM_EXIT_CTLS_ZERO_SETTING, 560 &exit_ctls); 561 if (error) { 562 /* Try again without the PAT MSR bits */ 563 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, 564 MSR_VMX_TRUE_EXIT_CTLS, 565 VM_EXIT_CTLS_ONE_SETTING_NO_PAT, 566 VM_EXIT_CTLS_ZERO_SETTING, 567 &exit_ctls); 568 if (error) { 569 printf("vmx_init: processor does not support desired " 570 "exit controls\n"); 571 return (error); 572 } else { 573 if (bootverbose) 574 printf("vmm: PAT MSR access not supported\n"); 575 guest_msr_valid(MSR_PAT); 576 vmx_no_patmsr = 1; 577 } 578 } 579 580 /* Check support for VM-entry controls */ 581 if (!vmx_no_patmsr) { 582 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 583 MSR_VMX_TRUE_ENTRY_CTLS, 584 VM_ENTRY_CTLS_ONE_SETTING, 585 VM_ENTRY_CTLS_ZERO_SETTING, 586 &entry_ctls); 587 } else { 588 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 589 MSR_VMX_TRUE_ENTRY_CTLS, 590 VM_ENTRY_CTLS_ONE_SETTING_NO_PAT, 591 VM_ENTRY_CTLS_ZERO_SETTING, 592 &entry_ctls); 593 } 594 595 if (error) { 596 printf("vmx_init: processor does not support desired " 597 "entry controls\n"); 598 return (error); 599 } 600 601 /* 602 * Check support for optional features by testing them 603 * as individual bits 604 */ 605 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 606 MSR_VMX_TRUE_PROCBASED_CTLS, 607 PROCBASED_HLT_EXITING, 0, 608 &tmp) == 0); 609 610 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 611 MSR_VMX_PROCBASED_CTLS, 612 PROCBASED_MTF, 0, 613 &tmp) == 0); 614 615 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 616 MSR_VMX_TRUE_PROCBASED_CTLS, 617 PROCBASED_PAUSE_EXITING, 0, 618 &tmp) == 0); 619 620 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 621 MSR_VMX_PROCBASED_CTLS2, 622 PROCBASED2_UNRESTRICTED_GUEST, 0, 623 &tmp) == 0); 624 625 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 626 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 627 &tmp) == 0); 628 629 /* 630 * Check support for virtual interrupt delivery. 631 */ 632 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 633 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 634 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 635 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 636 637 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 638 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 639 &tmp) == 0); 640 641 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 642 procbased2_vid_bits, 0, &tmp); 643 if (error == 0 && use_tpr_shadow) { 644 virtual_interrupt_delivery = 1; 645 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 646 &virtual_interrupt_delivery); 647 } 648 649 if (virtual_interrupt_delivery) { 650 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 651 procbased_ctls2 |= procbased2_vid_bits; 652 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 653 654 /* 655 * Check for Posted Interrupts only if Virtual Interrupt 656 * Delivery is enabled. 657 */ 658 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 659 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 660 &tmp); 661 if (error == 0) { 662 pirvec = vmm_ipi_alloc(); 663 if (pirvec == 0) { 664 if (bootverbose) { 665 printf("vmx_init: unable to allocate " 666 "posted interrupt vector\n"); 667 } 668 } else { 669 posted_interrupts = 1; 670 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 671 &posted_interrupts); 672 } 673 } 674 } 675 676 if (posted_interrupts) 677 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 678 679 /* Initialize EPT */ 680 error = ept_init(ipinum); 681 if (error) { 682 printf("vmx_init: ept initialization failed (%d)\n", error); 683 return (error); 684 } 685 686 /* 687 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 688 */ 689 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 690 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 691 cr0_ones_mask = fixed0 & fixed1; 692 cr0_zeros_mask = ~fixed0 & ~fixed1; 693 694 /* 695 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 696 * if unrestricted guest execution is allowed. 697 */ 698 if (cap_unrestricted_guest) 699 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 700 701 /* 702 * Do not allow the guest to set CR0_NW or CR0_CD. 703 */ 704 cr0_zeros_mask |= (CR0_NW | CR0_CD); 705 706 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 707 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 708 cr4_ones_mask = fixed0 & fixed1; 709 cr4_zeros_mask = ~fixed0 & ~fixed1; 710 711 vpid_init(); 712 713 /* enable VMX operation */ 714 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 715 716 vmx_initialized = 1; 717 718 return (0); 719} 720 721static void 722vmx_trigger_hostintr(int vector) 723{ 724 uintptr_t func; 725 struct gate_descriptor *gd; 726 727 gd = &idt[vector]; 728 729 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 730 "invalid vector %d", vector)); 731 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 732 vector)); 733 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 734 "has invalid type %d", vector, gd->gd_type)); 735 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 736 "has invalid dpl %d", vector, gd->gd_dpl)); 737 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 738 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 739 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 740 "IST %d", vector, gd->gd_ist)); 741 742 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 743 vmx_call_isr(func); 744} 745 746static int 747vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 748{ 749 int error, mask_ident, shadow_ident; 750 uint64_t mask_value; 751 752 if (which != 0 && which != 4) 753 panic("vmx_setup_cr_shadow: unknown cr%d", which); 754 755 if (which == 0) { 756 mask_ident = VMCS_CR0_MASK; 757 mask_value = cr0_ones_mask | cr0_zeros_mask; 758 shadow_ident = VMCS_CR0_SHADOW; 759 } else { 760 mask_ident = VMCS_CR4_MASK; 761 mask_value = cr4_ones_mask | cr4_zeros_mask; 762 shadow_ident = VMCS_CR4_SHADOW; 763 } 764 765 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 766 if (error) 767 return (error); 768 769 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 770 if (error) 771 return (error); 772 773 return (0); 774} 775#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 776#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 777 778static void * 779vmx_vminit(struct vm *vm, pmap_t pmap) 780{ 781 uint16_t vpid[VM_MAXCPU]; 782 int i, error, guest_msr_count; 783 struct vmx *vmx; 784 struct vmcs *vmcs; 785 786 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 787 if ((uintptr_t)vmx & PAGE_MASK) { 788 panic("malloc of struct vmx not aligned on %d byte boundary", 789 PAGE_SIZE); 790 } 791 vmx->vm = vm; 792 793 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 794 795 /* 796 * Clean up EPTP-tagged guest physical and combined mappings 797 * 798 * VMX transitions are not required to invalidate any guest physical 799 * mappings. So, it may be possible for stale guest physical mappings 800 * to be present in the processor TLBs. 801 * 802 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 803 */ 804 ept_invalidate_mappings(vmx->eptp); 805 806 msr_bitmap_initialize(vmx->msr_bitmap); 807 808 /* 809 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 810 * The guest FSBASE and GSBASE are saved and restored during 811 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 812 * always restored from the vmcs host state area on vm-exit. 813 * 814 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 815 * how they are saved/restored so can be directly accessed by the 816 * guest. 817 * 818 * Guest KGSBASE is saved and restored in the guest MSR save area. 819 * Host KGSBASE is restored before returning to userland from the pcb. 820 * There will be a window of time when we are executing in the host 821 * kernel context with a value of KGSBASE from the guest. This is ok 822 * because the value of KGSBASE is inconsequential in kernel context. 823 * 824 * MSR_EFER is saved and restored in the guest VMCS area on a 825 * VM exit and entry respectively. It is also restored from the 826 * host VMCS area on a VM exit. 827 */ 828 if (guest_msr_rw(vmx, MSR_GSBASE) || 829 guest_msr_rw(vmx, MSR_FSBASE) || 830 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 831 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 832 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 833 guest_msr_rw(vmx, MSR_KGSBASE) || 834 guest_msr_rw(vmx, MSR_EFER)) 835 panic("vmx_vminit: error setting guest msr access"); 836 837 /* 838 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit 839 * and entry respectively. It is also restored from the host VMCS 840 * area on a VM exit. However, if running on a system with no 841 * MSR_PAT save/restore support, leave access disabled so accesses 842 * will be trapped. 843 */ 844 if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT)) 845 panic("vmx_vminit: error setting guest pat msr access"); 846 847 vpid_alloc(vpid, VM_MAXCPU); 848 849 if (virtual_interrupt_delivery) { 850 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 851 APIC_ACCESS_ADDRESS); 852 /* XXX this should really return an error to the caller */ 853 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 854 } 855 856 for (i = 0; i < VM_MAXCPU; i++) { 857 vmcs = &vmx->vmcs[i]; 858 vmcs->identifier = vmx_revision(); 859 error = vmclear(vmcs); 860 if (error != 0) { 861 panic("vmx_vminit: vmclear error %d on vcpu %d\n", 862 error, i); 863 } 864 865 error = vmcs_init(vmcs); 866 KASSERT(error == 0, ("vmcs_init error %d", error)); 867 868 VMPTRLD(vmcs); 869 error = 0; 870 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 871 error += vmwrite(VMCS_EPTP, vmx->eptp); 872 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 873 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 874 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 875 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 876 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 877 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 878 error += vmwrite(VMCS_VPID, vpid[i]); 879 if (virtual_interrupt_delivery) { 880 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 881 error += vmwrite(VMCS_VIRTUAL_APIC, 882 vtophys(&vmx->apic_page[i])); 883 error += vmwrite(VMCS_EOI_EXIT0, 0); 884 error += vmwrite(VMCS_EOI_EXIT1, 0); 885 error += vmwrite(VMCS_EOI_EXIT2, 0); 886 error += vmwrite(VMCS_EOI_EXIT3, 0); 887 } 888 if (posted_interrupts) { 889 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 890 error += vmwrite(VMCS_PIR_DESC, 891 vtophys(&vmx->pir_desc[i])); 892 } 893 VMCLEAR(vmcs); 894 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 895 896 vmx->cap[i].set = 0; 897 vmx->cap[i].proc_ctls = procbased_ctls; 898 vmx->cap[i].proc_ctls2 = procbased_ctls2; 899 900 vmx->state[i].lastcpu = -1; 901 vmx->state[i].vpid = vpid[i]; 902 903 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count); 904 905 error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]), 906 guest_msr_count); 907 if (error != 0) 908 panic("vmcs_set_msr_save error %d", error); 909 910 /* 911 * Set up the CR0/4 shadows, and init the read shadow 912 * to the power-on register value from the Intel Sys Arch. 913 * CR0 - 0x60000010 914 * CR4 - 0 915 */ 916 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 917 if (error != 0) 918 panic("vmx_setup_cr0_shadow %d", error); 919 920 error = vmx_setup_cr4_shadow(vmcs, 0); 921 if (error != 0) 922 panic("vmx_setup_cr4_shadow %d", error); 923 924 vmx->ctx[i].pmap = pmap; 925 } 926 927 return (vmx); 928} 929 930static int 931vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 932{ 933 int handled, func; 934 935 func = vmxctx->guest_rax; 936 937 handled = x86_emulate_cpuid(vm, vcpu, 938 (uint32_t*)(&vmxctx->guest_rax), 939 (uint32_t*)(&vmxctx->guest_rbx), 940 (uint32_t*)(&vmxctx->guest_rcx), 941 (uint32_t*)(&vmxctx->guest_rdx)); 942 return (handled); 943} 944 945static __inline void 946vmx_run_trace(struct vmx *vmx, int vcpu) 947{ 948#ifdef KTR 949 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 950#endif 951} 952 953static __inline void 954vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 955 int handled) 956{ 957#ifdef KTR 958 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 959 handled ? "handled" : "unhandled", 960 exit_reason_to_str(exit_reason), rip); 961#endif 962} 963 964static __inline void 965vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 966{ 967#ifdef KTR 968 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 969#endif 970} 971 972static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 973 974static void 975vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 976{ 977 struct vmxstate *vmxstate; 978 struct invvpid_desc invvpid_desc; 979 980 vmxstate = &vmx->state[vcpu]; 981 if (vmxstate->lastcpu == curcpu) 982 return; 983 984 vmxstate->lastcpu = curcpu; 985 986 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 987 988 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 989 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 990 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 991 992 /* 993 * If we are using VPIDs then invalidate all mappings tagged with 'vpid' 994 * 995 * We do this because this vcpu was executing on a different host 996 * cpu when it last ran. We do not track whether it invalidated 997 * mappings associated with its 'vpid' during that run. So we must 998 * assume that the mappings associated with 'vpid' on 'curcpu' are 999 * stale and invalidate them. 1000 * 1001 * Note that we incur this penalty only when the scheduler chooses to 1002 * move the thread associated with this vcpu between host cpus. 1003 * 1004 * Note also that this will invalidate mappings tagged with 'vpid' 1005 * for "all" EP4TAs. 1006 */ 1007 if (vmxstate->vpid != 0) { 1008 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 1009 invvpid_desc._res1 = 0; 1010 invvpid_desc._res2 = 0; 1011 invvpid_desc.vpid = vmxstate->vpid; 1012 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1013 } else { 1014 /* 1015 * The invvpid can be skipped if an invept is going to 1016 * be performed before entering the guest. The invept 1017 * will invalidate combined mappings tagged with 1018 * 'vmx->eptp' for all vpids. 1019 */ 1020 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1021 } 1022 } 1023} 1024 1025/* 1026 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1027 */ 1028CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1029 1030static void __inline 1031vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1032{ 1033 1034 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1035 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1036} 1037 1038static void __inline 1039vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1040{ 1041 1042 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1043 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1044} 1045 1046static void __inline 1047vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1048{ 1049 1050 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1051 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1052} 1053 1054static void __inline 1055vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1056{ 1057 1058 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1059 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1060} 1061 1062static int 1063vmx_inject_nmi(struct vmx *vmx, int vcpu) 1064{ 1065 uint64_t info, interruptibility; 1066 1067 /* Bail out if no NMI requested */ 1068 if (!vm_nmi_pending(vmx->vm, vcpu)) 1069 return (0); 1070 1071 interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1072 if (interruptibility & nmi_blocking_bits) 1073 goto nmiblocked; 1074 1075 /* 1076 * Inject the virtual NMI. The vector must be the NMI IDT entry 1077 * or the VMCS entry check will fail. 1078 */ 1079 info = VMCS_INTR_INFO_NMI | VMCS_INTR_INFO_VALID; 1080 info |= IDT_NMI; 1081 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1082 1083 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1084 1085 /* Clear the request */ 1086 vm_nmi_clear(vmx->vm, vcpu); 1087 return (1); 1088 1089nmiblocked: 1090 /* 1091 * Set the NMI Window Exiting execution control so we can inject 1092 * the virtual NMI as soon as blocking condition goes away. 1093 */ 1094 vmx_set_nmi_window_exiting(vmx, vcpu); 1095 1096 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1097 return (1); 1098} 1099 1100static void 1101vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1102{ 1103 int vector; 1104 uint64_t info, rflags, interruptibility; 1105 1106 const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING | 1107 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING; 1108 1109 /* 1110 * If there is already an interrupt pending then just return. 1111 * 1112 * This could happen if an interrupt was injected on a prior 1113 * VM entry but the actual entry into guest mode was aborted 1114 * because of a pending AST. 1115 */ 1116 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1117 if (info & VMCS_INTR_INFO_VALID) 1118 return; 1119 1120 /* 1121 * NMI injection has priority so deal with those first 1122 */ 1123 if (vmx_inject_nmi(vmx, vcpu)) 1124 return; 1125 1126 if (virtual_interrupt_delivery) { 1127 vmx_inject_pir(vlapic); 1128 return; 1129 } 1130 1131 /* Ask the local apic for a vector to inject */ 1132 if (!vlapic_pending_intr(vlapic, &vector)) 1133 return; 1134 1135 if (vector < 32 || vector > 255) 1136 panic("vmx_inject_interrupts: invalid vector %d\n", vector); 1137 1138 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1139 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1140 if ((rflags & PSL_I) == 0) 1141 goto cantinject; 1142 1143 interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1144 if (interruptibility & HWINTR_BLOCKED) 1145 goto cantinject; 1146 1147 /* Inject the interrupt */ 1148 info = VMCS_INTR_INFO_HW_INTR | VMCS_INTR_INFO_VALID; 1149 info |= vector; 1150 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1151 1152 /* Update the Local APIC ISR */ 1153 vlapic_intr_accepted(vlapic, vector); 1154 1155 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1156 1157 return; 1158 1159cantinject: 1160 /* 1161 * Set the Interrupt Window Exiting execution control so we can inject 1162 * the interrupt as soon as blocking condition goes away. 1163 */ 1164 vmx_set_int_window_exiting(vmx, vcpu); 1165 1166 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1167} 1168 1169static int 1170vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1171{ 1172 int cr, vmcs_guest_cr, vmcs_shadow_cr; 1173 uint64_t crval, regval, ones_mask, zeros_mask; 1174 const struct vmxctx *vmxctx; 1175 1176 /* We only handle mov to %cr0 or %cr4 at this time */ 1177 if ((exitqual & 0xf0) != 0x00) 1178 return (UNHANDLED); 1179 1180 cr = exitqual & 0xf; 1181 if (cr != 0 && cr != 4) 1182 return (UNHANDLED); 1183 1184 vmxctx = &vmx->ctx[vcpu]; 1185 1186 /* 1187 * We must use vmcs_write() directly here because vmcs_setreg() will 1188 * call vmclear(vmcs) as a side-effect which we certainly don't want. 1189 */ 1190 switch ((exitqual >> 8) & 0xf) { 1191 case 0: 1192 regval = vmxctx->guest_rax; 1193 break; 1194 case 1: 1195 regval = vmxctx->guest_rcx; 1196 break; 1197 case 2: 1198 regval = vmxctx->guest_rdx; 1199 break; 1200 case 3: 1201 regval = vmxctx->guest_rbx; 1202 break; 1203 case 4: 1204 regval = vmcs_read(VMCS_GUEST_RSP); 1205 break; 1206 case 5: 1207 regval = vmxctx->guest_rbp; 1208 break; 1209 case 6: 1210 regval = vmxctx->guest_rsi; 1211 break; 1212 case 7: 1213 regval = vmxctx->guest_rdi; 1214 break; 1215 case 8: 1216 regval = vmxctx->guest_r8; 1217 break; 1218 case 9: 1219 regval = vmxctx->guest_r9; 1220 break; 1221 case 10: 1222 regval = vmxctx->guest_r10; 1223 break; 1224 case 11: 1225 regval = vmxctx->guest_r11; 1226 break; 1227 case 12: 1228 regval = vmxctx->guest_r12; 1229 break; 1230 case 13: 1231 regval = vmxctx->guest_r13; 1232 break; 1233 case 14: 1234 regval = vmxctx->guest_r14; 1235 break; 1236 case 15: 1237 regval = vmxctx->guest_r15; 1238 break; 1239 } 1240 1241 if (cr == 0) { 1242 ones_mask = cr0_ones_mask; 1243 zeros_mask = cr0_zeros_mask; 1244 vmcs_guest_cr = VMCS_GUEST_CR0; 1245 vmcs_shadow_cr = VMCS_CR0_SHADOW; 1246 } else { 1247 ones_mask = cr4_ones_mask; 1248 zeros_mask = cr4_zeros_mask; 1249 vmcs_guest_cr = VMCS_GUEST_CR4; 1250 vmcs_shadow_cr = VMCS_CR4_SHADOW; 1251 } 1252 vmcs_write(vmcs_shadow_cr, regval); 1253 1254 crval = regval | ones_mask; 1255 crval &= ~zeros_mask; 1256 vmcs_write(vmcs_guest_cr, crval); 1257 1258 if (cr == 0 && regval & CR0_PG) { 1259 uint64_t efer, entry_ctls; 1260 1261 /* 1262 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1263 * the "IA-32e mode guest" bit in VM-entry control must be 1264 * equal. 1265 */ 1266 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1267 if (efer & EFER_LME) { 1268 efer |= EFER_LMA; 1269 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1270 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1271 entry_ctls |= VM_ENTRY_GUEST_LMA; 1272 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1273 } 1274 } 1275 1276 return (HANDLED); 1277} 1278 1279static int 1280ept_fault_type(uint64_t ept_qual) 1281{ 1282 int fault_type; 1283 1284 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1285 fault_type = VM_PROT_WRITE; 1286 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1287 fault_type = VM_PROT_EXECUTE; 1288 else 1289 fault_type= VM_PROT_READ; 1290 1291 return (fault_type); 1292} 1293 1294static boolean_t 1295ept_emulation_fault(uint64_t ept_qual) 1296{ 1297 int read, write; 1298 1299 /* EPT fault on an instruction fetch doesn't make sense here */ 1300 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1301 return (FALSE); 1302 1303 /* EPT fault must be a read fault or a write fault */ 1304 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1305 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1306 if ((read | write) == 0) 1307 return (FALSE); 1308 1309 /* 1310 * The EPT violation must have been caused by accessing a 1311 * guest-physical address that is a translation of a guest-linear 1312 * address. 1313 */ 1314 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1315 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1316 return (FALSE); 1317 } 1318 1319 return (TRUE); 1320} 1321 1322static int 1323vmx_handle_apic_write(struct vlapic *vlapic, uint64_t qual) 1324{ 1325 int error, handled, offset; 1326 bool retu; 1327 1328 if (!virtual_interrupt_delivery) 1329 return (UNHANDLED); 1330 1331 handled = 1; 1332 offset = APIC_WRITE_OFFSET(qual); 1333 switch (offset) { 1334 case APIC_OFFSET_ID: 1335 vlapic_id_write_handler(vlapic); 1336 break; 1337 case APIC_OFFSET_LDR: 1338 vlapic_ldr_write_handler(vlapic); 1339 break; 1340 case APIC_OFFSET_DFR: 1341 vlapic_dfr_write_handler(vlapic); 1342 break; 1343 case APIC_OFFSET_SVR: 1344 vlapic_svr_write_handler(vlapic); 1345 break; 1346 case APIC_OFFSET_ESR: 1347 vlapic_esr_write_handler(vlapic); 1348 break; 1349 case APIC_OFFSET_ICR_LOW: 1350 retu = false; 1351 error = vlapic_icrlo_write_handler(vlapic, &retu); 1352 if (error != 0 || retu) 1353 handled = 0; 1354 break; 1355 case APIC_OFFSET_CMCI_LVT: 1356 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1357 vlapic_lvt_write_handler(vlapic, offset); 1358 break; 1359 case APIC_OFFSET_TIMER_ICR: 1360 vlapic_icrtmr_write_handler(vlapic); 1361 break; 1362 case APIC_OFFSET_TIMER_DCR: 1363 vlapic_dcr_write_handler(vlapic); 1364 break; 1365 default: 1366 handled = 0; 1367 break; 1368 } 1369 return (handled); 1370} 1371 1372static bool 1373apic_access_fault(uint64_t gpa) 1374{ 1375 1376 if (virtual_interrupt_delivery && 1377 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1378 return (true); 1379 else 1380 return (false); 1381} 1382 1383static int 1384vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1385{ 1386 uint64_t qual; 1387 int access_type, offset, allowed; 1388 1389 if (!virtual_interrupt_delivery) 1390 return (UNHANDLED); 1391 1392 qual = vmexit->u.vmx.exit_qualification; 1393 access_type = APIC_ACCESS_TYPE(qual); 1394 offset = APIC_ACCESS_OFFSET(qual); 1395 1396 allowed = 0; 1397 if (access_type == 0) { 1398 /* 1399 * Read data access to the following registers is expected. 1400 */ 1401 switch (offset) { 1402 case APIC_OFFSET_APR: 1403 case APIC_OFFSET_PPR: 1404 case APIC_OFFSET_RRR: 1405 case APIC_OFFSET_CMCI_LVT: 1406 case APIC_OFFSET_TIMER_CCR: 1407 allowed = 1; 1408 break; 1409 default: 1410 break; 1411 } 1412 } else if (access_type == 1) { 1413 /* 1414 * Write data access to the following registers is expected. 1415 */ 1416 switch (offset) { 1417 case APIC_OFFSET_VER: 1418 case APIC_OFFSET_APR: 1419 case APIC_OFFSET_PPR: 1420 case APIC_OFFSET_RRR: 1421 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1422 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1423 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1424 case APIC_OFFSET_CMCI_LVT: 1425 case APIC_OFFSET_TIMER_CCR: 1426 allowed = 1; 1427 break; 1428 default: 1429 break; 1430 } 1431 } 1432 1433 if (allowed) { 1434 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1435 vmexit->u.inst_emul.gpa = DEFAULT_APIC_BASE + offset; 1436 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 1437 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1438 } 1439 1440 /* 1441 * Regardless of whether the APIC-access is allowed this handler 1442 * always returns UNHANDLED: 1443 * - if the access is allowed then it is handled by emulating the 1444 * instruction that caused the VM-exit (outside the critical section) 1445 * - if the access is not allowed then it will be converted to an 1446 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 1447 */ 1448 return (UNHANDLED); 1449} 1450 1451static int 1452vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1453{ 1454 int error, handled; 1455 struct vmxctx *vmxctx; 1456 struct vlapic *vlapic; 1457 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, reason; 1458 uint64_t qual, gpa; 1459 bool retu; 1460 1461 handled = 0; 1462 vmxctx = &vmx->ctx[vcpu]; 1463 1464 qual = vmexit->u.vmx.exit_qualification; 1465 reason = vmexit->u.vmx.exit_reason; 1466 vmexit->exitcode = VM_EXITCODE_BOGUS; 1467 1468 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 1469 1470 /* 1471 * VM exits that could be triggered during event injection on the 1472 * previous VM entry need to be handled specially by re-injecting 1473 * the event. 1474 * 1475 * See "Information for VM Exits During Event Delivery" in Intel SDM 1476 * for details. 1477 */ 1478 switch (reason) { 1479 case EXIT_REASON_EPT_FAULT: 1480 case EXIT_REASON_EPT_MISCONFIG: 1481 case EXIT_REASON_APIC_ACCESS: 1482 case EXIT_REASON_TASK_SWITCH: 1483 case EXIT_REASON_EXCEPTION: 1484 idtvec_info = vmcs_idt_vectoring_info(); 1485 if (idtvec_info & VMCS_IDT_VEC_VALID) { 1486 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 1487 vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info); 1488 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 1489 idtvec_err = vmcs_idt_vectoring_err(); 1490 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1491 idtvec_err); 1492 } 1493 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 1494 } 1495 default: 1496 break; 1497 } 1498 1499 switch (reason) { 1500 case EXIT_REASON_CR_ACCESS: 1501 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 1502 handled = vmx_emulate_cr_access(vmx, vcpu, qual); 1503 break; 1504 case EXIT_REASON_RDMSR: 1505 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 1506 retu = false; 1507 ecx = vmxctx->guest_rcx; 1508 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu); 1509 if (error) { 1510 vmexit->exitcode = VM_EXITCODE_RDMSR; 1511 vmexit->u.msr.code = ecx; 1512 } else if (!retu) { 1513 handled = 1; 1514 } else { 1515 /* Return to userspace with a valid exitcode */ 1516 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1517 ("emulate_wrmsr retu with bogus exitcode")); 1518 } 1519 break; 1520 case EXIT_REASON_WRMSR: 1521 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 1522 retu = false; 1523 eax = vmxctx->guest_rax; 1524 ecx = vmxctx->guest_rcx; 1525 edx = vmxctx->guest_rdx; 1526 error = emulate_wrmsr(vmx->vm, vcpu, ecx, 1527 (uint64_t)edx << 32 | eax, &retu); 1528 if (error) { 1529 vmexit->exitcode = VM_EXITCODE_WRMSR; 1530 vmexit->u.msr.code = ecx; 1531 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 1532 } else if (!retu) { 1533 handled = 1; 1534 } else { 1535 /* Return to userspace with a valid exitcode */ 1536 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1537 ("emulate_wrmsr retu with bogus exitcode")); 1538 } 1539 break; 1540 case EXIT_REASON_HLT: 1541 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 1542 vmexit->exitcode = VM_EXITCODE_HLT; 1543 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1544 break; 1545 case EXIT_REASON_MTF: 1546 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 1547 vmexit->exitcode = VM_EXITCODE_MTRAP; 1548 break; 1549 case EXIT_REASON_PAUSE: 1550 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 1551 vmexit->exitcode = VM_EXITCODE_PAUSE; 1552 break; 1553 case EXIT_REASON_INTR_WINDOW: 1554 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 1555 vmx_clear_int_window_exiting(vmx, vcpu); 1556 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1557 return (1); 1558 case EXIT_REASON_EXT_INTR: 1559 /* 1560 * External interrupts serve only to cause VM exits and allow 1561 * the host interrupt handler to run. 1562 * 1563 * If this external interrupt triggers a virtual interrupt 1564 * to a VM, then that state will be recorded by the 1565 * host interrupt handler in the VM's softc. We will inject 1566 * this virtual interrupt during the subsequent VM enter. 1567 */ 1568 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1569 KASSERT((intr_info & VMCS_INTR_INFO_VALID) != 0 && 1570 VMCS_INTR_INFO_TYPE(intr_info) == 0, 1571 ("VM exit interruption info invalid: %#x", intr_info)); 1572 vmx_trigger_hostintr(intr_info & 0xff); 1573 1574 /* 1575 * This is special. We want to treat this as an 'handled' 1576 * VM-exit but not increment the instruction pointer. 1577 */ 1578 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 1579 return (1); 1580 case EXIT_REASON_NMI_WINDOW: 1581 /* Exit to allow the pending virtual NMI to be injected */ 1582 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 1583 vmx_clear_nmi_window_exiting(vmx, vcpu); 1584 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1585 return (1); 1586 case EXIT_REASON_INOUT: 1587 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 1588 vmexit->exitcode = VM_EXITCODE_INOUT; 1589 vmexit->u.inout.bytes = (qual & 0x7) + 1; 1590 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0; 1591 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 1592 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 1593 vmexit->u.inout.port = (uint16_t)(qual >> 16); 1594 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 1595 break; 1596 case EXIT_REASON_CPUID: 1597 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 1598 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 1599 break; 1600 case EXIT_REASON_EPT_FAULT: 1601 /* 1602 * If 'gpa' lies within the address space allocated to 1603 * memory then this must be a nested page fault otherwise 1604 * this must be an instruction that accesses MMIO space. 1605 */ 1606 gpa = vmcs_gpa(); 1607 if (vm_mem_allocated(vmx->vm, gpa) || apic_access_fault(gpa)) { 1608 vmexit->exitcode = VM_EXITCODE_PAGING; 1609 vmexit->u.paging.gpa = gpa; 1610 vmexit->u.paging.fault_type = ept_fault_type(qual); 1611 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1612 } else if (ept_emulation_fault(qual)) { 1613 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1614 vmexit->u.inst_emul.gpa = gpa; 1615 vmexit->u.inst_emul.gla = vmcs_gla(); 1616 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1617 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); 1618 } 1619 break; 1620 case EXIT_REASON_VIRTUALIZED_EOI: 1621 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 1622 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 1623 vmexit->inst_length = 0; /* trap-like */ 1624 break; 1625 case EXIT_REASON_APIC_ACCESS: 1626 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 1627 break; 1628 case EXIT_REASON_APIC_WRITE: 1629 /* 1630 * APIC-write VM exit is trap-like so the %rip is already 1631 * pointing to the next instruction. 1632 */ 1633 vmexit->inst_length = 0; 1634 vlapic = vm_lapic(vmx->vm, vcpu); 1635 handled = vmx_handle_apic_write(vlapic, qual); 1636 break; 1637 default: 1638 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 1639 break; 1640 } 1641 1642 if (handled) { 1643 /* 1644 * It is possible that control is returned to userland 1645 * even though we were able to handle the VM exit in the 1646 * kernel. 1647 * 1648 * In such a case we want to make sure that the userland 1649 * restarts guest execution at the instruction *after* 1650 * the one we just processed. Therefore we update the 1651 * guest rip in the VMCS and in 'vmexit'. 1652 */ 1653 vmexit->rip += vmexit->inst_length; 1654 vmexit->inst_length = 0; 1655 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 1656 } else { 1657 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1658 /* 1659 * If this VM exit was not claimed by anybody then 1660 * treat it as a generic VMX exit. 1661 */ 1662 vmexit->exitcode = VM_EXITCODE_VMX; 1663 vmexit->u.vmx.status = VM_SUCCESS; 1664 } else { 1665 /* 1666 * The exitcode and collateral have been populated. 1667 * The VM exit will be processed further in userland. 1668 */ 1669 } 1670 } 1671 return (handled); 1672} 1673 1674static __inline int 1675vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1676{ 1677 1678 vmexit->rip = vmcs_guest_rip(); 1679 vmexit->inst_length = 0; 1680 vmexit->exitcode = VM_EXITCODE_BOGUS; 1681 vmx_astpending_trace(vmx, vcpu, vmexit->rip); 1682 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1); 1683 1684 return (HANDLED); 1685} 1686 1687static __inline int 1688vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1689{ 1690 1691 vmexit->rip = vmcs_guest_rip(); 1692 vmexit->inst_length = 0; 1693 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1694 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1); 1695 1696 return (UNHANDLED); 1697} 1698 1699static __inline int 1700vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 1701{ 1702 1703 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 1704 ("vmx_exit_inst_error: invalid inst_fail_status %d", 1705 vmxctx->inst_fail_status)); 1706 1707 vmexit->inst_length = 0; 1708 vmexit->exitcode = VM_EXITCODE_VMX; 1709 vmexit->u.vmx.status = vmxctx->inst_fail_status; 1710 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 1711 vmexit->u.vmx.exit_reason = ~0; 1712 vmexit->u.vmx.exit_qualification = ~0; 1713 1714 switch (rc) { 1715 case VMX_VMRESUME_ERROR: 1716 case VMX_VMLAUNCH_ERROR: 1717 case VMX_INVEPT_ERROR: 1718 vmexit->u.vmx.inst_type = rc; 1719 break; 1720 default: 1721 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 1722 } 1723 1724 return (UNHANDLED); 1725} 1726 1727static int 1728vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap, 1729 void *rendezvous_cookie) 1730{ 1731 int rc, handled, launched; 1732 struct vmx *vmx; 1733 struct vm *vm; 1734 struct vmxctx *vmxctx; 1735 struct vmcs *vmcs; 1736 struct vm_exit *vmexit; 1737 struct vlapic *vlapic; 1738 uint64_t rip; 1739 uint32_t exit_reason; 1740 1741 vmx = arg; 1742 vm = vmx->vm; 1743 vmcs = &vmx->vmcs[vcpu]; 1744 vmxctx = &vmx->ctx[vcpu]; 1745 vlapic = vm_lapic(vm, vcpu); 1746 vmexit = vm_exitinfo(vm, vcpu); 1747 launched = 0; 1748 1749 KASSERT(vmxctx->pmap == pmap, 1750 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 1751 1752 VMPTRLD(vmcs); 1753 1754 /* 1755 * XXX 1756 * We do this every time because we may setup the virtual machine 1757 * from a different process than the one that actually runs it. 1758 * 1759 * If the life of a virtual machine was spent entirely in the context 1760 * of a single process we could do this once in vmx_vminit(). 1761 */ 1762 vmcs_write(VMCS_HOST_CR3, rcr3()); 1763 1764 vmcs_write(VMCS_GUEST_RIP, startrip); 1765 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 1766 do { 1767 /* 1768 * Interrupts are disabled from this point on until the 1769 * guest starts executing. This is done for the following 1770 * reasons: 1771 * 1772 * If an AST is asserted on this thread after the check below, 1773 * then the IPI_AST notification will not be lost, because it 1774 * will cause a VM exit due to external interrupt as soon as 1775 * the guest state is loaded. 1776 * 1777 * A posted interrupt after 'vmx_inject_interrupts()' will 1778 * not be "lost" because it will be held pending in the host 1779 * APIC because interrupts are disabled. The pending interrupt 1780 * will be recognized as soon as the guest state is loaded. 1781 * 1782 * The same reasoning applies to the IPI generated by 1783 * pmap_invalidate_ept(). 1784 */ 1785 disable_intr(); 1786 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1787 enable_intr(); 1788 handled = vmx_exit_astpending(vmx, vcpu, vmexit); 1789 break; 1790 } 1791 1792 if (vcpu_rendezvous_pending(rendezvous_cookie)) { 1793 enable_intr(); 1794 handled = vmx_exit_rendezvous(vmx, vcpu, vmexit); 1795 break; 1796 } 1797 1798 vmx_inject_interrupts(vmx, vcpu, vlapic); 1799 vmx_run_trace(vmx, vcpu); 1800 rc = vmx_enter_guest(vmxctx, vmx, launched); 1801 1802 enable_intr(); 1803 1804 /* Collect some information for VM exit processing */ 1805 vmexit->rip = rip = vmcs_guest_rip(); 1806 vmexit->inst_length = vmexit_instruction_length(); 1807 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 1808 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 1809 1810 if (rc == VMX_GUEST_VMEXIT) { 1811 launched = 1; 1812 handled = vmx_exit_process(vmx, vcpu, vmexit); 1813 } else { 1814 handled = vmx_exit_inst_error(vmxctx, rc, vmexit); 1815 } 1816 1817 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 1818 } while (handled); 1819 1820 /* 1821 * If a VM exit has been handled then the exitcode must be BOGUS 1822 * If a VM exit is not handled then the exitcode must not be BOGUS 1823 */ 1824 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 1825 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 1826 panic("Mismatch between handled (%d) and exitcode (%d)", 1827 handled, vmexit->exitcode); 1828 } 1829 1830 if (!handled) 1831 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 1832 1833 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 1834 vmexit->exitcode); 1835 1836 VMCLEAR(vmcs); 1837 return (0); 1838} 1839 1840static void 1841vmx_vmcleanup(void *arg) 1842{ 1843 int i, error; 1844 struct vmx *vmx = arg; 1845 1846 if (virtual_interrupt_delivery) 1847 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 1848 1849 for (i = 0; i < VM_MAXCPU; i++) 1850 vpid_free(vmx->state[i].vpid); 1851 1852 /* 1853 * XXXSMP we also need to clear the VMCS active on the other vcpus. 1854 */ 1855 error = vmclear(&vmx->vmcs[0]); 1856 if (error != 0) 1857 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error); 1858 1859 free(vmx, M_VMX); 1860 1861 return; 1862} 1863 1864static register_t * 1865vmxctx_regptr(struct vmxctx *vmxctx, int reg) 1866{ 1867 1868 switch (reg) { 1869 case VM_REG_GUEST_RAX: 1870 return (&vmxctx->guest_rax); 1871 case VM_REG_GUEST_RBX: 1872 return (&vmxctx->guest_rbx); 1873 case VM_REG_GUEST_RCX: 1874 return (&vmxctx->guest_rcx); 1875 case VM_REG_GUEST_RDX: 1876 return (&vmxctx->guest_rdx); 1877 case VM_REG_GUEST_RSI: 1878 return (&vmxctx->guest_rsi); 1879 case VM_REG_GUEST_RDI: 1880 return (&vmxctx->guest_rdi); 1881 case VM_REG_GUEST_RBP: 1882 return (&vmxctx->guest_rbp); 1883 case VM_REG_GUEST_R8: 1884 return (&vmxctx->guest_r8); 1885 case VM_REG_GUEST_R9: 1886 return (&vmxctx->guest_r9); 1887 case VM_REG_GUEST_R10: 1888 return (&vmxctx->guest_r10); 1889 case VM_REG_GUEST_R11: 1890 return (&vmxctx->guest_r11); 1891 case VM_REG_GUEST_R12: 1892 return (&vmxctx->guest_r12); 1893 case VM_REG_GUEST_R13: 1894 return (&vmxctx->guest_r13); 1895 case VM_REG_GUEST_R14: 1896 return (&vmxctx->guest_r14); 1897 case VM_REG_GUEST_R15: 1898 return (&vmxctx->guest_r15); 1899 default: 1900 break; 1901 } 1902 return (NULL); 1903} 1904 1905static int 1906vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 1907{ 1908 register_t *regp; 1909 1910 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 1911 *retval = *regp; 1912 return (0); 1913 } else 1914 return (EINVAL); 1915} 1916 1917static int 1918vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 1919{ 1920 register_t *regp; 1921 1922 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 1923 *regp = val; 1924 return (0); 1925 } else 1926 return (EINVAL); 1927} 1928 1929static int 1930vmx_shadow_reg(int reg) 1931{ 1932 int shreg; 1933 1934 shreg = -1; 1935 1936 switch (reg) { 1937 case VM_REG_GUEST_CR0: 1938 shreg = VMCS_CR0_SHADOW; 1939 break; 1940 case VM_REG_GUEST_CR4: 1941 shreg = VMCS_CR4_SHADOW; 1942 break; 1943 default: 1944 break; 1945 } 1946 1947 return (shreg); 1948} 1949 1950static int 1951vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 1952{ 1953 int running, hostcpu; 1954 struct vmx *vmx = arg; 1955 1956 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 1957 if (running && hostcpu != curcpu) 1958 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 1959 1960 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 1961 return (0); 1962 1963 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 1964} 1965 1966static int 1967vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 1968{ 1969 int error, hostcpu, running, shadow; 1970 uint64_t ctls; 1971 struct vmx *vmx = arg; 1972 1973 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 1974 if (running && hostcpu != curcpu) 1975 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 1976 1977 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 1978 return (0); 1979 1980 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 1981 1982 if (error == 0) { 1983 /* 1984 * If the "load EFER" VM-entry control is 1 then the 1985 * value of EFER.LMA must be identical to "IA-32e mode guest" 1986 * bit in the VM-entry control. 1987 */ 1988 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 1989 (reg == VM_REG_GUEST_EFER)) { 1990 vmcs_getreg(&vmx->vmcs[vcpu], running, 1991 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 1992 if (val & EFER_LMA) 1993 ctls |= VM_ENTRY_GUEST_LMA; 1994 else 1995 ctls &= ~VM_ENTRY_GUEST_LMA; 1996 vmcs_setreg(&vmx->vmcs[vcpu], running, 1997 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 1998 } 1999 2000 shadow = vmx_shadow_reg(reg); 2001 if (shadow > 0) { 2002 /* 2003 * Store the unmodified value in the shadow 2004 */ 2005 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 2006 VMCS_IDENT(shadow), val); 2007 } 2008 } 2009 2010 return (error); 2011} 2012 2013static int 2014vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2015{ 2016 struct vmx *vmx = arg; 2017 2018 return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc)); 2019} 2020 2021static int 2022vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2023{ 2024 struct vmx *vmx = arg; 2025 2026 return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc)); 2027} 2028 2029static int 2030vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code, 2031 int code_valid) 2032{ 2033 int error; 2034 uint64_t info; 2035 struct vmx *vmx = arg; 2036 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2037 2038 static uint32_t type_map[VM_EVENT_MAX] = { 2039 0x1, /* VM_EVENT_NONE */ 2040 0x0, /* VM_HW_INTR */ 2041 0x2, /* VM_NMI */ 2042 0x3, /* VM_HW_EXCEPTION */ 2043 0x4, /* VM_SW_INTR */ 2044 0x5, /* VM_PRIV_SW_EXCEPTION */ 2045 0x6, /* VM_SW_EXCEPTION */ 2046 }; 2047 2048 /* 2049 * If there is already an exception pending to be delivered to the 2050 * vcpu then just return. 2051 */ 2052 error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info); 2053 if (error) 2054 return (error); 2055 2056 if (info & VMCS_INTR_INFO_VALID) 2057 return (EAGAIN); 2058 2059 info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0); 2060 info |= VMCS_INTR_INFO_VALID; 2061 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info); 2062 if (error != 0) 2063 return (error); 2064 2065 if (code_valid) { 2066 error = vmcs_setreg(vmcs, 0, 2067 VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR), 2068 code); 2069 } 2070 return (error); 2071} 2072 2073static int 2074vmx_getcap(void *arg, int vcpu, int type, int *retval) 2075{ 2076 struct vmx *vmx = arg; 2077 int vcap; 2078 int ret; 2079 2080 ret = ENOENT; 2081 2082 vcap = vmx->cap[vcpu].set; 2083 2084 switch (type) { 2085 case VM_CAP_HALT_EXIT: 2086 if (cap_halt_exit) 2087 ret = 0; 2088 break; 2089 case VM_CAP_PAUSE_EXIT: 2090 if (cap_pause_exit) 2091 ret = 0; 2092 break; 2093 case VM_CAP_MTRAP_EXIT: 2094 if (cap_monitor_trap) 2095 ret = 0; 2096 break; 2097 case VM_CAP_UNRESTRICTED_GUEST: 2098 if (cap_unrestricted_guest) 2099 ret = 0; 2100 break; 2101 case VM_CAP_ENABLE_INVPCID: 2102 if (cap_invpcid) 2103 ret = 0; 2104 break; 2105 default: 2106 break; 2107 } 2108 2109 if (ret == 0) 2110 *retval = (vcap & (1 << type)) ? 1 : 0; 2111 2112 return (ret); 2113} 2114 2115static int 2116vmx_setcap(void *arg, int vcpu, int type, int val) 2117{ 2118 struct vmx *vmx = arg; 2119 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2120 uint32_t baseval; 2121 uint32_t *pptr; 2122 int error; 2123 int flag; 2124 int reg; 2125 int retval; 2126 2127 retval = ENOENT; 2128 pptr = NULL; 2129 2130 switch (type) { 2131 case VM_CAP_HALT_EXIT: 2132 if (cap_halt_exit) { 2133 retval = 0; 2134 pptr = &vmx->cap[vcpu].proc_ctls; 2135 baseval = *pptr; 2136 flag = PROCBASED_HLT_EXITING; 2137 reg = VMCS_PRI_PROC_BASED_CTLS; 2138 } 2139 break; 2140 case VM_CAP_MTRAP_EXIT: 2141 if (cap_monitor_trap) { 2142 retval = 0; 2143 pptr = &vmx->cap[vcpu].proc_ctls; 2144 baseval = *pptr; 2145 flag = PROCBASED_MTF; 2146 reg = VMCS_PRI_PROC_BASED_CTLS; 2147 } 2148 break; 2149 case VM_CAP_PAUSE_EXIT: 2150 if (cap_pause_exit) { 2151 retval = 0; 2152 pptr = &vmx->cap[vcpu].proc_ctls; 2153 baseval = *pptr; 2154 flag = PROCBASED_PAUSE_EXITING; 2155 reg = VMCS_PRI_PROC_BASED_CTLS; 2156 } 2157 break; 2158 case VM_CAP_UNRESTRICTED_GUEST: 2159 if (cap_unrestricted_guest) { 2160 retval = 0; 2161 pptr = &vmx->cap[vcpu].proc_ctls2; 2162 baseval = *pptr; 2163 flag = PROCBASED2_UNRESTRICTED_GUEST; 2164 reg = VMCS_SEC_PROC_BASED_CTLS; 2165 } 2166 break; 2167 case VM_CAP_ENABLE_INVPCID: 2168 if (cap_invpcid) { 2169 retval = 0; 2170 pptr = &vmx->cap[vcpu].proc_ctls2; 2171 baseval = *pptr; 2172 flag = PROCBASED2_ENABLE_INVPCID; 2173 reg = VMCS_SEC_PROC_BASED_CTLS; 2174 } 2175 break; 2176 default: 2177 break; 2178 } 2179 2180 if (retval == 0) { 2181 if (val) { 2182 baseval |= flag; 2183 } else { 2184 baseval &= ~flag; 2185 } 2186 VMPTRLD(vmcs); 2187 error = vmwrite(reg, baseval); 2188 VMCLEAR(vmcs); 2189 2190 if (error) { 2191 retval = error; 2192 } else { 2193 /* 2194 * Update optional stored flags, and record 2195 * setting 2196 */ 2197 if (pptr != NULL) { 2198 *pptr = baseval; 2199 } 2200 2201 if (val) { 2202 vmx->cap[vcpu].set |= (1 << type); 2203 } else { 2204 vmx->cap[vcpu].set &= ~(1 << type); 2205 } 2206 } 2207 } 2208 2209 return (retval); 2210} 2211 2212struct vlapic_vtx { 2213 struct vlapic vlapic; 2214 struct pir_desc *pir_desc; 2215 struct vmx *vmx; 2216}; 2217 2218#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 2219do { \ 2220 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 2221 level ? "level" : "edge", vector); \ 2222 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 2223 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 2224 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 2225 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 2226 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 2227} while (0) 2228 2229/* 2230 * vlapic->ops handlers that utilize the APICv hardware assist described in 2231 * Chapter 29 of the Intel SDM. 2232 */ 2233static int 2234vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 2235{ 2236 struct vlapic_vtx *vlapic_vtx; 2237 struct pir_desc *pir_desc; 2238 uint64_t mask; 2239 int idx, notify; 2240 2241 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2242 pir_desc = vlapic_vtx->pir_desc; 2243 2244 /* 2245 * Keep track of interrupt requests in the PIR descriptor. This is 2246 * because the virtual APIC page pointed to by the VMCS cannot be 2247 * modified if the vcpu is running. 2248 */ 2249 idx = vector / 64; 2250 mask = 1UL << (vector % 64); 2251 atomic_set_long(&pir_desc->pir[idx], mask); 2252 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1); 2253 2254 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 2255 level, "vmx_set_intr_ready"); 2256 return (notify); 2257} 2258 2259static int 2260vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 2261{ 2262 struct vlapic_vtx *vlapic_vtx; 2263 struct pir_desc *pir_desc; 2264 struct LAPIC *lapic; 2265 uint64_t pending, pirval; 2266 uint32_t ppr, vpr; 2267 int i; 2268 2269 /* 2270 * This function is only expected to be called from the 'HLT' exit 2271 * handler which does not care about the vector that is pending. 2272 */ 2273 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 2274 2275 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2276 pir_desc = vlapic_vtx->pir_desc; 2277 2278 pending = atomic_load_acq_long(&pir_desc->pending); 2279 if (!pending) 2280 return (0); /* common case */ 2281 2282 /* 2283 * If there is an interrupt pending then it will be recognized only 2284 * if its priority is greater than the processor priority. 2285 * 2286 * Special case: if the processor priority is zero then any pending 2287 * interrupt will be recognized. 2288 */ 2289 lapic = vlapic->apic_page; 2290 ppr = lapic->ppr & 0xf0; 2291 if (ppr == 0) 2292 return (1); 2293 2294 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 2295 lapic->ppr); 2296 2297 for (i = 3; i >= 0; i--) { 2298 pirval = pir_desc->pir[i]; 2299 if (pirval != 0) { 2300 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0; 2301 return (vpr > ppr); 2302 } 2303 } 2304 return (0); 2305} 2306 2307static void 2308vmx_intr_accepted(struct vlapic *vlapic, int vector) 2309{ 2310 2311 panic("vmx_intr_accepted: not expected to be called"); 2312} 2313 2314static void 2315vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 2316{ 2317 struct vlapic_vtx *vlapic_vtx; 2318 struct vmx *vmx; 2319 struct vmcs *vmcs; 2320 uint64_t mask, val; 2321 2322 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 2323 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 2324 ("vmx_set_tmr: vcpu cannot be running")); 2325 2326 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2327 vmx = vlapic_vtx->vmx; 2328 vmcs = &vmx->vmcs[vlapic->vcpuid]; 2329 mask = 1UL << (vector % 64); 2330 2331 VMPTRLD(vmcs); 2332 val = vmcs_read(VMCS_EOI_EXIT(vector)); 2333 if (level) 2334 val |= mask; 2335 else 2336 val &= ~mask; 2337 vmcs_write(VMCS_EOI_EXIT(vector), val); 2338 VMCLEAR(vmcs); 2339} 2340 2341static void 2342vmx_post_intr(struct vlapic *vlapic, int hostcpu) 2343{ 2344 2345 ipi_cpu(hostcpu, pirvec); 2346} 2347 2348/* 2349 * Transfer the pending interrupts in the PIR descriptor to the IRR 2350 * in the virtual APIC page. 2351 */ 2352static void 2353vmx_inject_pir(struct vlapic *vlapic) 2354{ 2355 struct vlapic_vtx *vlapic_vtx; 2356 struct pir_desc *pir_desc; 2357 struct LAPIC *lapic; 2358 uint64_t val, pirval; 2359 int rvi, pirbase; 2360 uint16_t intr_status_old, intr_status_new; 2361 2362 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2363 pir_desc = vlapic_vtx->pir_desc; 2364 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 2365 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2366 "no posted interrupt pending"); 2367 return; 2368 } 2369 2370 pirval = 0; 2371 lapic = vlapic->apic_page; 2372 2373 val = atomic_readandclear_long(&pir_desc->pir[0]); 2374 if (val != 0) { 2375 lapic->irr0 |= val; 2376 lapic->irr1 |= val >> 32; 2377 pirbase = 0; 2378 pirval = val; 2379 } 2380 2381 val = atomic_readandclear_long(&pir_desc->pir[1]); 2382 if (val != 0) { 2383 lapic->irr2 |= val; 2384 lapic->irr3 |= val >> 32; 2385 pirbase = 64; 2386 pirval = val; 2387 } 2388 2389 val = atomic_readandclear_long(&pir_desc->pir[2]); 2390 if (val != 0) { 2391 lapic->irr4 |= val; 2392 lapic->irr5 |= val >> 32; 2393 pirbase = 128; 2394 pirval = val; 2395 } 2396 2397 val = atomic_readandclear_long(&pir_desc->pir[3]); 2398 if (val != 0) { 2399 lapic->irr6 |= val; 2400 lapic->irr7 |= val >> 32; 2401 pirbase = 192; 2402 pirval = val; 2403 } 2404 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 2405 2406 /* 2407 * Update RVI so the processor can evaluate pending virtual 2408 * interrupts on VM-entry. 2409 */ 2410 if (pirval != 0) { 2411 rvi = pirbase + flsl(pirval) - 1; 2412 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 2413 intr_status_new = (intr_status_old & 0xFF00) | rvi; 2414 if (intr_status_new > intr_status_old) { 2415 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 2416 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2417 "guest_intr_status changed from 0x%04x to 0x%04x", 2418 intr_status_old, intr_status_new); 2419 } 2420 } 2421} 2422 2423static struct vlapic * 2424vmx_vlapic_init(void *arg, int vcpuid) 2425{ 2426 struct vmx *vmx; 2427 struct vlapic *vlapic; 2428 struct vlapic_vtx *vlapic_vtx; 2429 2430 vmx = arg; 2431 2432 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 2433 vlapic->vm = vmx->vm; 2434 vlapic->vcpuid = vcpuid; 2435 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 2436 2437 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2438 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 2439 vlapic_vtx->vmx = vmx; 2440 2441 if (virtual_interrupt_delivery) { 2442 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 2443 vlapic->ops.pending_intr = vmx_pending_intr; 2444 vlapic->ops.intr_accepted = vmx_intr_accepted; 2445 vlapic->ops.set_tmr = vmx_set_tmr; 2446 } 2447 2448 if (posted_interrupts) 2449 vlapic->ops.post_intr = vmx_post_intr; 2450 2451 vlapic_init(vlapic); 2452 2453 return (vlapic); 2454} 2455 2456static void 2457vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2458{ 2459 2460 vlapic_cleanup(vlapic); 2461 free(vlapic, M_VLAPIC); 2462} 2463 2464struct vmm_ops vmm_ops_intel = { 2465 vmx_init, 2466 vmx_cleanup, 2467 vmx_restore, 2468 vmx_vminit, 2469 vmx_run, 2470 vmx_vmcleanup, 2471 vmx_getreg, 2472 vmx_setreg, 2473 vmx_getdesc, 2474 vmx_setdesc, 2475 vmx_inject, 2476 vmx_getcap, 2477 vmx_setcap, 2478 ept_vmspace_alloc, 2479 ept_vmspace_free, 2480 vmx_vlapic_init, 2481 vmx_vlapic_cleanup, 2482}; 2483