vmx.c revision 267427
1218799Snwhitehorn/*- 2218799Snwhitehorn * Copyright (c) 2011 NetApp, Inc. 3218799Snwhitehorn * All rights reserved. 4218799Snwhitehorn * 5218799Snwhitehorn * Redistribution and use in source and binary forms, with or without 6218799Snwhitehorn * modification, are permitted provided that the following conditions 7218799Snwhitehorn * are met: 8218799Snwhitehorn * 1. Redistributions of source code must retain the above copyright 9218799Snwhitehorn * notice, this list of conditions and the following disclaimer. 10218799Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 11218799Snwhitehorn * notice, this list of conditions and the following disclaimer in the 12218799Snwhitehorn * documentation and/or other materials provided with the distribution. 13218799Snwhitehorn * 14218799Snwhitehorn * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15218799Snwhitehorn * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16218799Snwhitehorn * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17218799Snwhitehorn * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18218799Snwhitehorn * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19218799Snwhitehorn * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20218799Snwhitehorn * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21218799Snwhitehorn * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22218799Snwhitehorn * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23218799Snwhitehorn * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24218799Snwhitehorn * SUCH DAMAGE. 25218799Snwhitehorn * 26218799Snwhitehorn * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 267427 2014-06-12 19:58:12Z jhb $ 27218799Snwhitehorn */ 28218799Snwhitehorn 29218799Snwhitehorn#include <sys/cdefs.h> 30273831Snwhitehorn__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 267427 2014-06-12 19:58:12Z jhb $"); 31218799Snwhitehorn 32218799Snwhitehorn#include <sys/param.h> 33218799Snwhitehorn#include <sys/systm.h> 34218799Snwhitehorn#include <sys/smp.h> 35218799Snwhitehorn#include <sys/kernel.h> 36218799Snwhitehorn#include <sys/malloc.h> 37218799Snwhitehorn#include <sys/pcpu.h> 38218799Snwhitehorn#include <sys/proc.h> 39218799Snwhitehorn#include <sys/sysctl.h> 40218799Snwhitehorn 41218799Snwhitehorn#include <vm/vm.h> 42218799Snwhitehorn#include <vm/pmap.h> 43218799Snwhitehorn 44218799Snwhitehorn#include <machine/psl.h> 45218799Snwhitehorn#include <machine/cpufunc.h> 46218799Snwhitehorn#include <machine/md_var.h> 47218799Snwhitehorn#include <machine/segments.h> 48218799Snwhitehorn#include <machine/smp.h> 49218799Snwhitehorn#include <machine/specialreg.h> 50218799Snwhitehorn#include <machine/vmparam.h> 51218799Snwhitehorn 52218799Snwhitehorn#include <machine/vmm.h> 53218799Snwhitehorn#include <machine/vmm_dev.h> 54218799Snwhitehorn#include "vmm_host.h" 55218799Snwhitehorn#include "vmm_ipi.h" 56218799Snwhitehorn#include "vmm_msr.h" 57218799Snwhitehorn#include "vmm_ktr.h" 58218799Snwhitehorn#include "vmm_stat.h" 59218799Snwhitehorn#include "vlapic.h" 60218799Snwhitehorn#include "vlapic_priv.h" 61218799Snwhitehorn 62218799Snwhitehorn#include "vmx_msr.h" 63218799Snwhitehorn#include "ept.h" 64218799Snwhitehorn#include "vmx_cpufunc.h" 65218799Snwhitehorn#include "vmx.h" 66218799Snwhitehorn#include "x86.h" 67218799Snwhitehorn#include "vmx_controls.h" 68218799Snwhitehorn 69218853Snwhitehorn#define PINBASED_CTLS_ONE_SETTING \ 70218853Snwhitehorn (PINBASED_EXTINT_EXITING | \ 71218853Snwhitehorn PINBASED_NMI_EXITING | \ 72218853Snwhitehorn PINBASED_VIRTUAL_NMI) 73218853Snwhitehorn#define PINBASED_CTLS_ZERO_SETTING 0 74218853Snwhitehorn 75218853Snwhitehorn#define PROCBASED_CTLS_WINDOW_SETTING \ 76218853Snwhitehorn (PROCBASED_INT_WINDOW_EXITING | \ 77218853Snwhitehorn PROCBASED_NMI_WINDOW_EXITING) 78218853Snwhitehorn 79218853Snwhitehorn#define PROCBASED_CTLS_ONE_SETTING \ 80218853Snwhitehorn (PROCBASED_SECONDARY_CONTROLS | \ 81218853Snwhitehorn PROCBASED_IO_EXITING | \ 82219892Snwhitehorn PROCBASED_MSR_BITMAPS | \ 83219892Snwhitehorn PROCBASED_CTLS_WINDOW_SETTING) 84219892Snwhitehorn#define PROCBASED_CTLS_ZERO_SETTING \ 85219892Snwhitehorn (PROCBASED_CR3_LOAD_EXITING | \ 86219892Snwhitehorn PROCBASED_CR3_STORE_EXITING | \ 87219892Snwhitehorn PROCBASED_IO_BITMAPS) 88219892Snwhitehorn 89219892Snwhitehorn#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 90219892Snwhitehorn#define PROCBASED_CTLS2_ZERO_SETTING 0 91219892Snwhitehorn 92219892Snwhitehorn#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \ 93219892Snwhitehorn (VM_EXIT_HOST_LMA | \ 94219892Snwhitehorn VM_EXIT_SAVE_EFER | \ 95219892Snwhitehorn VM_EXIT_LOAD_EFER) 96219892Snwhitehorn 97219892Snwhitehorn#define VM_EXIT_CTLS_ONE_SETTING \ 98219892Snwhitehorn (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \ 99219892Snwhitehorn VM_EXIT_ACKNOWLEDGE_INTERRUPT | \ 100219892Snwhitehorn VM_EXIT_SAVE_PAT | \ 101219892Snwhitehorn VM_EXIT_LOAD_PAT) 102219892Snwhitehorn#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS 103219892Snwhitehorn 104219892Snwhitehorn#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER 105219892Snwhitehorn 106219892Snwhitehorn#define VM_ENTRY_CTLS_ONE_SETTING \ 107219892Snwhitehorn (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \ 108219892Snwhitehorn VM_ENTRY_LOAD_PAT) 109219892Snwhitehorn#define VM_ENTRY_CTLS_ZERO_SETTING \ 110219892Snwhitehorn (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 111219892Snwhitehorn VM_ENTRY_INTO_SMM | \ 112219892Snwhitehorn VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 113219892Snwhitehorn 114219892Snwhitehorn#define guest_msr_rw(vmx, msr) \ 115219892Snwhitehorn msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW) 116219892Snwhitehorn 117219892Snwhitehorn#define HANDLED 1 118219892Snwhitehorn#define UNHANDLED 0 119219892Snwhitehorn 120219892Snwhitehornstatic MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 121219892Snwhitehornstatic MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 122219892Snwhitehorn 123273831SnwhitehornSYSCTL_DECL(_hw_vmm); 124273831SnwhitehornSYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL); 125273831Snwhitehorn 126273831Snwhitehornint vmxon_enabled[MAXCPU]; 127273831Snwhitehornstatic char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 128273831Snwhitehorn 129273831Snwhitehornstatic uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 130273831Snwhitehornstatic uint32_t exit_ctls, entry_ctls; 131273831Snwhitehorn 132273831Snwhitehornstatic uint64_t cr0_ones_mask, cr0_zeros_mask; 133273831SnwhitehornSYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 134273831Snwhitehorn &cr0_ones_mask, 0, NULL); 135273831SnwhitehornSYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 136273831Snwhitehorn &cr0_zeros_mask, 0, NULL); 137273831Snwhitehorn 138273831Snwhitehornstatic uint64_t cr4_ones_mask, cr4_zeros_mask; 139273831SnwhitehornSYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 140273831Snwhitehorn &cr4_ones_mask, 0, NULL); 141273831SnwhitehornSYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 142273831Snwhitehorn &cr4_zeros_mask, 0, NULL); 143273831Snwhitehorn 144273831Snwhitehornstatic int vmx_no_patmsr; 145273831Snwhitehorn 146273831Snwhitehornstatic int vmx_initialized; 147273831SnwhitehornSYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 148273831Snwhitehorn &vmx_initialized, 0, "Intel VMX initialized"); 149273831Snwhitehorn 150273831Snwhitehorn/* 151273831Snwhitehorn * Optional capabilities 152273831Snwhitehorn */ 153273831Snwhitehornstatic int cap_halt_exit; 154273831Snwhitehornstatic int cap_pause_exit; 155273831Snwhitehornstatic int cap_unrestricted_guest; 156273831Snwhitehornstatic int cap_monitor_trap; 157273831Snwhitehornstatic int cap_invpcid; 158273831Snwhitehorn 159273831Snwhitehornstatic int virtual_interrupt_delivery; 160273831SnwhitehornSYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 161273831Snwhitehorn &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 162273831Snwhitehorn 163273831Snwhitehornstatic int posted_interrupts; 164273831SnwhitehornSYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD, 165273831Snwhitehorn &posted_interrupts, 0, "APICv posted interrupt support"); 166273831Snwhitehorn 167273831Snwhitehornstatic int pirvec; 168273831SnwhitehornSYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 169273831Snwhitehorn &pirvec, 0, "APICv posted interrupt vector"); 170219892Snwhitehorn 171219892Snwhitehornstatic struct unrhdr *vpid_unr; 172219892Snwhitehornstatic u_int vpid_alloc_failed; 173219892SnwhitehornSYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 174219892Snwhitehorn &vpid_alloc_failed, 0, NULL); 175219892Snwhitehorn 176219892Snwhitehorn/* 177219892Snwhitehorn * Use the last page below 4GB as the APIC access address. This address is 178219892Snwhitehorn * occupied by the boot firmware so it is guaranteed that it will not conflict 179219892Snwhitehorn * with a page in system memory. 180219892Snwhitehorn */ 181219892Snwhitehorn#define APIC_ACCESS_ADDRESS 0xFFFFF000 182219892Snwhitehorn 183219892Snwhitehornstatic void vmx_inject_pir(struct vlapic *vlapic); 184219892Snwhitehorn 185219892Snwhitehorn#ifdef KTR 186219892Snwhitehornstatic const char * 187219892Snwhitehornexit_reason_to_str(int reason) 188219892Snwhitehorn{ 189219892Snwhitehorn static char reasonbuf[32]; 190219892Snwhitehorn 191219892Snwhitehorn switch (reason) { 192219892Snwhitehorn case EXIT_REASON_EXCEPTION: 193219892Snwhitehorn return "exception"; 194219892Snwhitehorn case EXIT_REASON_EXT_INTR: 195219892Snwhitehorn return "extint"; 196219892Snwhitehorn case EXIT_REASON_TRIPLE_FAULT: 197219892Snwhitehorn return "triplefault"; 198233904Snwhitehorn case EXIT_REASON_INIT: 199219892Snwhitehorn return "init"; 200219892Snwhitehorn case EXIT_REASON_SIPI: 201219892Snwhitehorn return "sipi"; 202219892Snwhitehorn case EXIT_REASON_IO_SMI: 203219892Snwhitehorn return "iosmi"; 204219892Snwhitehorn case EXIT_REASON_SMI: 205219892Snwhitehorn return "smi"; 206219892Snwhitehorn case EXIT_REASON_INTR_WINDOW: 207219892Snwhitehorn return "intrwindow"; 208219892Snwhitehorn case EXIT_REASON_NMI_WINDOW: 209285769Sallanjude return "nmiwindow"; 210285769Sallanjude case EXIT_REASON_TASK_SWITCH: 211218799Snwhitehorn return "taskswitch"; 212218799Snwhitehorn case EXIT_REASON_CPUID: 213285769Sallanjude return "cpuid"; 214218799Snwhitehorn case EXIT_REASON_GETSEC: 215218799Snwhitehorn return "getsec"; 216218799Snwhitehorn case EXIT_REASON_HLT: 217218799Snwhitehorn return "hlt"; 218218799Snwhitehorn case EXIT_REASON_INVD: 219218799Snwhitehorn return "invd"; 220218799Snwhitehorn case EXIT_REASON_INVLPG: 221218799Snwhitehorn return "invlpg"; 222218799Snwhitehorn case EXIT_REASON_RDPMC: 223218799Snwhitehorn return "rdpmc"; 224218799Snwhitehorn case EXIT_REASON_RDTSC: 225218799Snwhitehorn return "rdtsc"; 226218799Snwhitehorn case EXIT_REASON_RSM: 227218799Snwhitehorn return "rsm"; 228218799Snwhitehorn case EXIT_REASON_VMCALL: 229218799Snwhitehorn return "vmcall"; 230285769Sallanjude case EXIT_REASON_VMCLEAR: 231285769Sallanjude return "vmclear"; 232285769Sallanjude case EXIT_REASON_VMLAUNCH: 233285769Sallanjude return "vmlaunch"; 234285769Sallanjude case EXIT_REASON_VMPTRLD: 235285769Sallanjude return "vmptrld"; 236285769Sallanjude case EXIT_REASON_VMPTRST: 237285769Sallanjude return "vmptrst"; 238285769Sallanjude case EXIT_REASON_VMREAD: 239285769Sallanjude return "vmread"; 240285769Sallanjude case EXIT_REASON_VMRESUME: 241285769Sallanjude return "vmresume"; 242285769Sallanjude case EXIT_REASON_VMWRITE: 243285769Sallanjude return "vmwrite"; 244285769Sallanjude case EXIT_REASON_VMXOFF: 245285769Sallanjude return "vmxoff"; 246285769Sallanjude case EXIT_REASON_VMXON: 247285769Sallanjude return "vmxon"; 248285769Sallanjude case EXIT_REASON_CR_ACCESS: 249285769Sallanjude return "craccess"; 250285769Sallanjude case EXIT_REASON_DR_ACCESS: 251285769Sallanjude return "draccess"; 252285769Sallanjude case EXIT_REASON_INOUT: 253285769Sallanjude return "inout"; 254285769Sallanjude case EXIT_REASON_RDMSR: 255285769Sallanjude return "rdmsr"; 256285769Sallanjude case EXIT_REASON_WRMSR: 257285769Sallanjude return "wrmsr"; 258285769Sallanjude case EXIT_REASON_INVAL_VMCS: 259285769Sallanjude return "invalvmcs"; 260285769Sallanjude case EXIT_REASON_INVAL_MSR: 261285769Sallanjude return "invalmsr"; 262285769Sallanjude case EXIT_REASON_MWAIT: 263285769Sallanjude return "mwait"; 264218799Snwhitehorn case EXIT_REASON_MTF: 265218799Snwhitehorn return "mtf"; 266285769Sallanjude case EXIT_REASON_MONITOR: 267218799Snwhitehorn return "monitor"; 268285769Sallanjude case EXIT_REASON_PAUSE: 269218799Snwhitehorn return "pause"; 270218799Snwhitehorn case EXIT_REASON_MCE: 271285769Sallanjude return "mce"; 272218799Snwhitehorn case EXIT_REASON_TPR: 273218799Snwhitehorn return "tpr"; 274218799Snwhitehorn case EXIT_REASON_APIC_ACCESS: 275285769Sallanjude return "apic-access"; 276218799Snwhitehorn case EXIT_REASON_GDTR_IDTR: 277218799Snwhitehorn return "gdtridtr"; 278218799Snwhitehorn case EXIT_REASON_LDTR_TR: 279285769Sallanjude return "ldtrtr"; 280285769Sallanjude case EXIT_REASON_EPT_FAULT: 281285769Sallanjude return "eptfault"; 282218799Snwhitehorn case EXIT_REASON_EPT_MISCONFIG: 283285769Sallanjude return "eptmisconfig"; 284218799Snwhitehorn case EXIT_REASON_INVEPT: 285218799Snwhitehorn return "invept"; 286218799Snwhitehorn case EXIT_REASON_RDTSCP: 287218799Snwhitehorn return "rdtscp"; 288218799Snwhitehorn case EXIT_REASON_VMX_PREEMPT: 289218799Snwhitehorn return "vmxpreempt"; 290218799Snwhitehorn case EXIT_REASON_INVVPID: 291218799Snwhitehorn return "invvpid"; 292218799Snwhitehorn case EXIT_REASON_WBINVD: 293218799Snwhitehorn return "wbinvd"; 294218799Snwhitehorn case EXIT_REASON_XSETBV: 295218799Snwhitehorn return "xsetbv"; 296218799Snwhitehorn case EXIT_REASON_APIC_WRITE: 297218799Snwhitehorn return "apic-write"; 298218799Snwhitehorn default: 299218799Snwhitehorn snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 300218799Snwhitehorn return (reasonbuf); 301218799Snwhitehorn } 302218799Snwhitehorn} 303218799Snwhitehorn#endif /* KTR */ 304218799Snwhitehorn 305218799Snwhitehornu_long 306218799Snwhitehornvmx_fix_cr0(u_long cr0) 307218799Snwhitehorn{ 308218799Snwhitehorn 309218799Snwhitehorn return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 310218799Snwhitehorn} 311218799Snwhitehorn 312218799Snwhitehornu_long 313218799Snwhitehornvmx_fix_cr4(u_long cr4) 314218799Snwhitehorn{ 315218799Snwhitehorn 316218799Snwhitehorn return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 317218799Snwhitehorn} 318218799Snwhitehorn 319218799Snwhitehornstatic void 320218799Snwhitehornvpid_free(int vpid) 321218799Snwhitehorn{ 322218799Snwhitehorn if (vpid < 0 || vpid > 0xffff) 323218799Snwhitehorn panic("vpid_free: invalid vpid %d", vpid); 324218799Snwhitehorn 325218799Snwhitehorn /* 326218799Snwhitehorn * VPIDs [0,VM_MAXCPU] are special and are not allocated from 327218799Snwhitehorn * the unit number allocator. 328218799Snwhitehorn */ 329218799Snwhitehorn 330218799Snwhitehorn if (vpid > VM_MAXCPU) 331218799Snwhitehorn free_unr(vpid_unr, vpid); 332218799Snwhitehorn} 333218799Snwhitehorn 334218799Snwhitehornstatic void 335218799Snwhitehornvpid_alloc(uint16_t *vpid, int num) 336218799Snwhitehorn{ 337218799Snwhitehorn int i, x; 338218799Snwhitehorn 339218799Snwhitehorn if (num <= 0 || num > VM_MAXCPU) 340218799Snwhitehorn panic("invalid number of vpids requested: %d", num); 341218799Snwhitehorn 342218799Snwhitehorn /* 343218799Snwhitehorn * If the "enable vpid" execution control is not enabled then the 344218799Snwhitehorn * VPID is required to be 0 for all vcpus. 345218799Snwhitehorn */ 346218799Snwhitehorn if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 347218799Snwhitehorn for (i = 0; i < num; i++) 348218799Snwhitehorn vpid[i] = 0; 349218799Snwhitehorn return; 350218799Snwhitehorn } 351218799Snwhitehorn 352218799Snwhitehorn /* 353218799Snwhitehorn * Allocate a unique VPID for each vcpu from the unit number allocator. 354218799Snwhitehorn */ 355285769Sallanjude for (i = 0; i < num; i++) { 356285769Sallanjude x = alloc_unr(vpid_unr); 357285769Sallanjude if (x == -1) 358285769Sallanjude break; 359285769Sallanjude else 360285769Sallanjude vpid[i] = x; 361285769Sallanjude } 362285769Sallanjude 363285769Sallanjude if (i < num) { 364285769Sallanjude atomic_add_int(&vpid_alloc_failed, 1); 365285769Sallanjude 366285769Sallanjude /* 367285769Sallanjude * If the unit number allocator does not have enough unique 368285769Sallanjude * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 369285769Sallanjude * 370285769Sallanjude * These VPIDs are not be unique across VMs but this does not 371285769Sallanjude * affect correctness because the combined mappings are also 372285769Sallanjude * tagged with the EP4TA which is unique for each VM. 373285769Sallanjude * 374285769Sallanjude * It is still sub-optimal because the invvpid will invalidate 375218799Snwhitehorn * combined mappings for a particular VPID across all EP4TAs. 376218799Snwhitehorn */ 377218799Snwhitehorn while (i-- > 0) 378218799Snwhitehorn vpid_free(vpid[i]); 379218799Snwhitehorn 380218799Snwhitehorn for (i = 0; i < num; i++) 381218799Snwhitehorn vpid[i] = i + 1; 382218799Snwhitehorn } 383218799Snwhitehorn} 384218799Snwhitehorn 385218799Snwhitehornstatic void 386218799Snwhitehornvpid_init(void) 387218799Snwhitehorn{ 388218799Snwhitehorn /* 389218799Snwhitehorn * VPID 0 is required when the "enable VPID" execution control is 390218799Snwhitehorn * disabled. 391218799Snwhitehorn * 392218799Snwhitehorn * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 393218799Snwhitehorn * unit number allocator does not have sufficient unique VPIDs to 394218799Snwhitehorn * satisfy the allocation. 395218799Snwhitehorn * 396218799Snwhitehorn * The remaining VPIDs are managed by the unit number allocator. 397218799Snwhitehorn */ 398218799Snwhitehorn vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 399218799Snwhitehorn} 400218799Snwhitehorn 401218799Snwhitehornstatic void 402228048Skevlomsr_save_area_init(struct msr_entry *g_area, int *g_count) 403218799Snwhitehorn{ 404218799Snwhitehorn int cnt; 405218799Snwhitehorn 406218799Snwhitehorn static struct msr_entry guest_msrs[] = { 407218799Snwhitehorn { MSR_KGSBASE, 0, 0 }, 408218799Snwhitehorn }; 409218799Snwhitehorn 410218799Snwhitehorn cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]); 411218799Snwhitehorn if (cnt > GUEST_MSR_MAX_ENTRIES) 412218799Snwhitehorn panic("guest msr save area overrun"); 413218799Snwhitehorn bcopy(guest_msrs, g_area, sizeof(guest_msrs)); 414218799Snwhitehorn *g_count = cnt; 415218799Snwhitehorn} 416218799Snwhitehorn 417218799Snwhitehornstatic void 418218799Snwhitehornvmx_disable(void *arg __unused) 419218799Snwhitehorn{ 420218799Snwhitehorn struct invvpid_desc invvpid_desc = { 0 }; 421218799Snwhitehorn struct invept_desc invept_desc = { 0 }; 422218799Snwhitehorn 423218799Snwhitehorn if (vmxon_enabled[curcpu]) { 424218799Snwhitehorn /* 425218799Snwhitehorn * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 426218799Snwhitehorn * 427218799Snwhitehorn * VMXON or VMXOFF are not required to invalidate any TLB 428218799Snwhitehorn * caching structures. This prevents potential retention of 429218799Snwhitehorn * cached information in the TLB between distinct VMX episodes. 430273831Snwhitehorn */ 431218799Snwhitehorn invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 432218799Snwhitehorn invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 433218799Snwhitehorn vmxoff(); 434218799Snwhitehorn } 435218799Snwhitehorn load_cr4(rcr4() & ~CR4_VMXE); 436218799Snwhitehorn} 437218799Snwhitehorn 438218799Snwhitehornstatic int 439218799Snwhitehornvmx_cleanup(void) 440218799Snwhitehorn{ 441218799Snwhitehorn 442218799Snwhitehorn if (pirvec != 0) 443218799Snwhitehorn vmm_ipi_free(pirvec); 444218799Snwhitehorn 445273831Snwhitehorn if (vpid_unr != NULL) { 446218799Snwhitehorn delete_unrhdr(vpid_unr); 447218799Snwhitehorn vpid_unr = NULL; 448218799Snwhitehorn } 449218799Snwhitehorn 450218799Snwhitehorn smp_rendezvous(NULL, vmx_disable, NULL, NULL); 451218799Snwhitehorn 452218799Snwhitehorn return (0); 453218799Snwhitehorn} 454218799Snwhitehorn 455218799Snwhitehornstatic void 456218799Snwhitehornvmx_enable(void *arg __unused) 457273831Snwhitehorn{ 458218799Snwhitehorn int error; 459218799Snwhitehorn 460218799Snwhitehorn load_cr4(rcr4() | CR4_VMXE); 461218799Snwhitehorn 462218799Snwhitehorn *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 463218799Snwhitehorn error = vmxon(vmxon_region[curcpu]); 464218799Snwhitehorn if (error == 0) 465218799Snwhitehorn vmxon_enabled[curcpu] = 1; 466226083Snwhitehorn} 467218799Snwhitehorn 468226083Snwhitehornstatic void 469218799Snwhitehornvmx_restore(void) 470218799Snwhitehorn{ 471226083Snwhitehorn 472218799Snwhitehorn if (vmxon_enabled[curcpu]) 473226083Snwhitehorn vmxon(vmxon_region[curcpu]); 474218799Snwhitehorn} 475218799Snwhitehorn 476218799Snwhitehornstatic int 477226083Snwhitehornvmx_init(int ipinum) 478226083Snwhitehorn{ 479226083Snwhitehorn int error, use_tpr_shadow; 480226083Snwhitehorn uint64_t fixed0, fixed1, feature_control; 481226083Snwhitehorn uint32_t tmp, procbased2_vid_bits; 482226083Snwhitehorn 483226083Snwhitehorn /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 484226083Snwhitehorn if (!(cpu_feature2 & CPUID2_VMX)) { 485218799Snwhitehorn printf("vmx_init: processor does not support VMX operation\n"); 486218799Snwhitehorn return (ENXIO); 487218799Snwhitehorn } 488218799Snwhitehorn 489218799Snwhitehorn /* 490226083Snwhitehorn * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 491218799Snwhitehorn * are set (bits 0 and 2 respectively). 492218799Snwhitehorn */ 493226666Snwhitehorn feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 494226666Snwhitehorn if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 495226666Snwhitehorn (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 496226666Snwhitehorn printf("vmx_init: VMX operation disabled by BIOS\n"); 497226666Snwhitehorn return (ENXIO); 498226666Snwhitehorn } 499226666Snwhitehorn 500226666Snwhitehorn /* Check support for primary processor-based VM-execution controls */ 501218799Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 502218799Snwhitehorn MSR_VMX_TRUE_PROCBASED_CTLS, 503218799Snwhitehorn PROCBASED_CTLS_ONE_SETTING, 504218799Snwhitehorn PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 505218799Snwhitehorn if (error) { 506218799Snwhitehorn printf("vmx_init: processor does not support desired primary " 507218799Snwhitehorn "processor-based controls\n"); 508218799Snwhitehorn return (error); 509218799Snwhitehorn } 510218799Snwhitehorn 511218799Snwhitehorn /* Clear the processor-based ctl bits that are set on demand */ 512218799Snwhitehorn procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 513218799Snwhitehorn 514218799Snwhitehorn /* Check support for secondary processor-based VM-execution controls */ 515218799Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 516218799Snwhitehorn MSR_VMX_PROCBASED_CTLS2, 517273831Snwhitehorn PROCBASED_CTLS2_ONE_SETTING, 518218799Snwhitehorn PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 519218799Snwhitehorn if (error) { 520218799Snwhitehorn printf("vmx_init: processor does not support desired secondary " 521218799Snwhitehorn "processor-based controls\n"); 522218799Snwhitehorn return (error); 523218799Snwhitehorn } 524273831Snwhitehorn 525273831Snwhitehorn /* Check support for VPID */ 526218799Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 527218799Snwhitehorn PROCBASED2_ENABLE_VPID, 0, &tmp); 528218799Snwhitehorn if (error == 0) 529218799Snwhitehorn procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 530218799Snwhitehorn 531218799Snwhitehorn /* Check support for pin-based VM-execution controls */ 532218799Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 533218799Snwhitehorn MSR_VMX_TRUE_PINBASED_CTLS, 534218799Snwhitehorn PINBASED_CTLS_ONE_SETTING, 535218799Snwhitehorn PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 536218799Snwhitehorn if (error) { 537218799Snwhitehorn printf("vmx_init: processor does not support desired " 538218799Snwhitehorn "pin-based controls\n"); 539218799Snwhitehorn return (error); 540218799Snwhitehorn } 541218799Snwhitehorn 542218799Snwhitehorn /* Check support for VM-exit controls */ 543218799Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 544226083Snwhitehorn VM_EXIT_CTLS_ONE_SETTING, 545226083Snwhitehorn VM_EXIT_CTLS_ZERO_SETTING, 546226083Snwhitehorn &exit_ctls); 547226083Snwhitehorn if (error) { 548226083Snwhitehorn /* Try again without the PAT MSR bits */ 549226083Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, 550226083Snwhitehorn MSR_VMX_TRUE_EXIT_CTLS, 551226083Snwhitehorn VM_EXIT_CTLS_ONE_SETTING_NO_PAT, 552226083Snwhitehorn VM_EXIT_CTLS_ZERO_SETTING, 553226083Snwhitehorn &exit_ctls); 554218799Snwhitehorn if (error) { 555226083Snwhitehorn printf("vmx_init: processor does not support desired " 556218799Snwhitehorn "exit controls\n"); 557226666Snwhitehorn return (error); 558226666Snwhitehorn } else { 559226666Snwhitehorn if (bootverbose) 560226666Snwhitehorn printf("vmm: PAT MSR access not supported\n"); 561218799Snwhitehorn guest_msr_valid(MSR_PAT); 562226083Snwhitehorn vmx_no_patmsr = 1; 563218799Snwhitehorn } 564218799Snwhitehorn } 565218799Snwhitehorn 566218799Snwhitehorn /* Check support for VM-entry controls */ 567218799Snwhitehorn if (!vmx_no_patmsr) { 568218799Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 569218799Snwhitehorn MSR_VMX_TRUE_ENTRY_CTLS, 570218799Snwhitehorn VM_ENTRY_CTLS_ONE_SETTING, 571218799Snwhitehorn VM_ENTRY_CTLS_ZERO_SETTING, 572218799Snwhitehorn &entry_ctls); 573218799Snwhitehorn } else { 574225066Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, 575218799Snwhitehorn MSR_VMX_TRUE_ENTRY_CTLS, 576218799Snwhitehorn VM_ENTRY_CTLS_ONE_SETTING_NO_PAT, 577218799Snwhitehorn VM_ENTRY_CTLS_ZERO_SETTING, 578218799Snwhitehorn &entry_ctls); 579218799Snwhitehorn } 580218799Snwhitehorn 581218799Snwhitehorn if (error) { 582218799Snwhitehorn printf("vmx_init: processor does not support desired " 583218799Snwhitehorn "entry controls\n"); 584218799Snwhitehorn return (error); 585218853Snwhitehorn } 586218799Snwhitehorn 587218799Snwhitehorn /* 588218799Snwhitehorn * Check support for optional features by testing them 589218799Snwhitehorn * as individual bits 590218799Snwhitehorn */ 591218799Snwhitehorn cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 592218799Snwhitehorn MSR_VMX_TRUE_PROCBASED_CTLS, 593218799Snwhitehorn PROCBASED_HLT_EXITING, 0, 594218799Snwhitehorn &tmp) == 0); 595218799Snwhitehorn 596218799Snwhitehorn cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 597218799Snwhitehorn MSR_VMX_PROCBASED_CTLS, 598218799Snwhitehorn PROCBASED_MTF, 0, 599218799Snwhitehorn &tmp) == 0); 600218799Snwhitehorn 601218799Snwhitehorn cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 602218799Snwhitehorn MSR_VMX_TRUE_PROCBASED_CTLS, 603218799Snwhitehorn PROCBASED_PAUSE_EXITING, 0, 604218799Snwhitehorn &tmp) == 0); 605218799Snwhitehorn 606218799Snwhitehorn cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 607218799Snwhitehorn MSR_VMX_PROCBASED_CTLS2, 608218799Snwhitehorn PROCBASED2_UNRESTRICTED_GUEST, 0, 609218799Snwhitehorn &tmp) == 0); 610218799Snwhitehorn 611218799Snwhitehorn cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 612218799Snwhitehorn MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 613218799Snwhitehorn &tmp) == 0); 614218799Snwhitehorn 615218799Snwhitehorn /* 616218799Snwhitehorn * Check support for virtual interrupt delivery. 617218799Snwhitehorn */ 618226666Snwhitehorn procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 619218799Snwhitehorn PROCBASED2_VIRTUALIZE_X2APIC_MODE | 620218799Snwhitehorn PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 621218799Snwhitehorn PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 622218799Snwhitehorn 623218799Snwhitehorn use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 624218799Snwhitehorn MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 625218799Snwhitehorn &tmp) == 0); 626218799Snwhitehorn 627218799Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 628218799Snwhitehorn procbased2_vid_bits, 0, &tmp); 629218799Snwhitehorn if (error == 0 && use_tpr_shadow) { 630218799Snwhitehorn virtual_interrupt_delivery = 1; 631218799Snwhitehorn TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 632218799Snwhitehorn &virtual_interrupt_delivery); 633218799Snwhitehorn } 634218799Snwhitehorn 635218799Snwhitehorn if (virtual_interrupt_delivery) { 636218799Snwhitehorn procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 637218799Snwhitehorn procbased_ctls2 |= procbased2_vid_bits; 638218799Snwhitehorn procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 639218799Snwhitehorn 640218799Snwhitehorn /* 641218799Snwhitehorn * Check for Posted Interrupts only if Virtual Interrupt 642218799Snwhitehorn * Delivery is enabled. 643218799Snwhitehorn */ 644219892Snwhitehorn error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 645218799Snwhitehorn MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 646219892Snwhitehorn &tmp); 647219892Snwhitehorn if (error == 0) { 648218799Snwhitehorn pirvec = vmm_ipi_alloc(); 649226666Snwhitehorn if (pirvec == 0) { 650226666Snwhitehorn if (bootverbose) { 651226666Snwhitehorn printf("vmx_init: unable to allocate " 652226666Snwhitehorn "posted interrupt vector\n"); 653226666Snwhitehorn } 654226666Snwhitehorn } else { 655226666Snwhitehorn posted_interrupts = 1; 656218799Snwhitehorn TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 657218799Snwhitehorn &posted_interrupts); 658218799Snwhitehorn } 659218799Snwhitehorn } 660218799Snwhitehorn } 661218799Snwhitehorn 662218799Snwhitehorn if (posted_interrupts) 663219892Snwhitehorn pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 664218799Snwhitehorn 665218799Snwhitehorn /* Initialize EPT */ 666273831Snwhitehorn error = ept_init(ipinum); 667273831Snwhitehorn if (error) { 668218799Snwhitehorn printf("vmx_init: ept initialization failed (%d)\n", error); 669218799Snwhitehorn return (error); 670218799Snwhitehorn } 671218799Snwhitehorn 672218799Snwhitehorn /* 673218799Snwhitehorn * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 674218799Snwhitehorn */ 675218799Snwhitehorn fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 676218799Snwhitehorn fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 677218799Snwhitehorn cr0_ones_mask = fixed0 & fixed1; 678219892Snwhitehorn cr0_zeros_mask = ~fixed0 & ~fixed1; 679219892Snwhitehorn 680273831Snwhitehorn /* 681273831Snwhitehorn * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 682273831Snwhitehorn * if unrestricted guest execution is allowed. 683273831Snwhitehorn */ 684273831Snwhitehorn if (cap_unrestricted_guest) 685273831Snwhitehorn cr0_ones_mask &= ~(CR0_PG | CR0_PE); 686273831Snwhitehorn 687273831Snwhitehorn /* 688273831Snwhitehorn * Do not allow the guest to set CR0_NW or CR0_CD. 689273831Snwhitehorn */ 690273831Snwhitehorn cr0_zeros_mask |= (CR0_NW | CR0_CD); 691273831Snwhitehorn 692218799Snwhitehorn fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 693218799Snwhitehorn fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 694218799Snwhitehorn cr4_ones_mask = fixed0 & fixed1; 695218799Snwhitehorn cr4_zeros_mask = ~fixed0 & ~fixed1; 696218799Snwhitehorn 697271636Semaste vpid_init(); 698218799Snwhitehorn 699218799Snwhitehorn /* enable VMX operation */ 700273831Snwhitehorn smp_rendezvous(NULL, vmx_enable, NULL, NULL); 701273831Snwhitehorn 702273831Snwhitehorn vmx_initialized = 1; 703218799Snwhitehorn 704218799Snwhitehorn return (0); 705218799Snwhitehorn} 706218799Snwhitehorn 707218799Snwhitehornstatic void 708218799Snwhitehornvmx_trigger_hostintr(int vector) 709218799Snwhitehorn{ 710218799Snwhitehorn uintptr_t func; 711218799Snwhitehorn struct gate_descriptor *gd; 712218799Snwhitehorn 713218799Snwhitehorn gd = &idt[vector]; 714218799Snwhitehorn 715218799Snwhitehorn KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 716218799Snwhitehorn "invalid vector %d", vector)); 717218799Snwhitehorn KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 718218799Snwhitehorn vector)); 719218799Snwhitehorn KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 720218799Snwhitehorn "has invalid type %d", vector, gd->gd_type)); 721218799Snwhitehorn KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 722218799Snwhitehorn "has invalid dpl %d", vector, gd->gd_dpl)); 723218799Snwhitehorn KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 724218799Snwhitehorn "for vector %d has invalid selector %d", vector, gd->gd_selector)); 725273831Snwhitehorn KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 726273831Snwhitehorn "IST %d", vector, gd->gd_ist)); 727273831Snwhitehorn 728273831Snwhitehorn func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 729273831Snwhitehorn vmx_call_isr(func); 730273831Snwhitehorn} 731273831Snwhitehorn 732218799Snwhitehornstatic int 733218799Snwhitehornvmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 734219892Snwhitehorn{ 735218799Snwhitehorn int error, mask_ident, shadow_ident; 736219892Snwhitehorn uint64_t mask_value; 737219892Snwhitehorn 738218799Snwhitehorn if (which != 0 && which != 4) 739218799Snwhitehorn panic("vmx_setup_cr_shadow: unknown cr%d", which); 740218799Snwhitehorn 741218799Snwhitehorn if (which == 0) { 742218799Snwhitehorn mask_ident = VMCS_CR0_MASK; 743218799Snwhitehorn mask_value = cr0_ones_mask | cr0_zeros_mask; 744273831Snwhitehorn shadow_ident = VMCS_CR0_SHADOW; 745273831Snwhitehorn } else { 746273831Snwhitehorn mask_ident = VMCS_CR4_MASK; 747273831Snwhitehorn mask_value = cr4_ones_mask | cr4_zeros_mask; 748218799Snwhitehorn shadow_ident = VMCS_CR4_SHADOW; 749218799Snwhitehorn } 750218799Snwhitehorn 751218799Snwhitehorn error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 752218799Snwhitehorn if (error) 753218799Snwhitehorn return (error); 754218799Snwhitehorn 755218799Snwhitehorn error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 756218799Snwhitehorn if (error) 757218799Snwhitehorn return (error); 758218799Snwhitehorn 759218799Snwhitehorn return (0); 760273831Snwhitehorn} 761273831Snwhitehorn#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 762273831Snwhitehorn#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 763218799Snwhitehorn 764218799Snwhitehornstatic void * 765218799Snwhitehornvmx_vminit(struct vm *vm, pmap_t pmap) 766218799Snwhitehorn{ 767218799Snwhitehorn uint16_t vpid[VM_MAXCPU]; 768218799Snwhitehorn int i, error, guest_msr_count; 769218799Snwhitehorn struct vmx *vmx; 770218799Snwhitehorn struct vmcs *vmcs; 771218799Snwhitehorn 772218799Snwhitehorn vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 773218799Snwhitehorn if ((uintptr_t)vmx & PAGE_MASK) { 774218799Snwhitehorn panic("malloc of struct vmx not aligned on %d byte boundary", 775218799Snwhitehorn PAGE_SIZE); 776218799Snwhitehorn } 777218799Snwhitehorn vmx->vm = vm; 778218799Snwhitehorn 779218799Snwhitehorn vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4)); 780218799Snwhitehorn 781218799Snwhitehorn /* 782218799Snwhitehorn * Clean up EPTP-tagged guest physical and combined mappings 783218799Snwhitehorn * 784218799Snwhitehorn * VMX transitions are not required to invalidate any guest physical 785218799Snwhitehorn * mappings. So, it may be possible for stale guest physical mappings 786218799Snwhitehorn * to be present in the processor TLBs. 787218799Snwhitehorn * 788218799Snwhitehorn * Combined mappings for this EP4TA are also invalidated for all VPIDs. 789218799Snwhitehorn */ 790218799Snwhitehorn ept_invalidate_mappings(vmx->eptp); 791218799Snwhitehorn 792218799Snwhitehorn msr_bitmap_initialize(vmx->msr_bitmap); 793218799Snwhitehorn 794218799Snwhitehorn /* 795218799Snwhitehorn * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 796218799Snwhitehorn * The guest FSBASE and GSBASE are saved and restored during 797218799Snwhitehorn * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 798313433Srpokala * always restored from the vmcs host state area on vm-exit. 799218799Snwhitehorn * 800218799Snwhitehorn * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 801218799Snwhitehorn * how they are saved/restored so can be directly accessed by the 802218799Snwhitehorn * guest. 803218799Snwhitehorn * 804218799Snwhitehorn * Guest KGSBASE is saved and restored in the guest MSR save area. 805218799Snwhitehorn * Host KGSBASE is restored before returning to userland from the pcb. 806218799Snwhitehorn * There will be a window of time when we are executing in the host 807218799Snwhitehorn * kernel context with a value of KGSBASE from the guest. This is ok 808218799Snwhitehorn * because the value of KGSBASE is inconsequential in kernel context. 809218799Snwhitehorn * 810218799Snwhitehorn * MSR_EFER is saved and restored in the guest VMCS area on a 811218799Snwhitehorn * VM exit and entry respectively. It is also restored from the 812218799Snwhitehorn * host VMCS area on a VM exit. 813218799Snwhitehorn */ 814218799Snwhitehorn if (guest_msr_rw(vmx, MSR_GSBASE) || 815218799Snwhitehorn guest_msr_rw(vmx, MSR_FSBASE) || 816218799Snwhitehorn guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 817218799Snwhitehorn guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 818218799Snwhitehorn guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 819218799Snwhitehorn guest_msr_rw(vmx, MSR_KGSBASE) || 820218799Snwhitehorn guest_msr_rw(vmx, MSR_EFER)) 821218799Snwhitehorn panic("vmx_vminit: error setting guest msr access"); 822218799Snwhitehorn 823218799Snwhitehorn /* 824218799Snwhitehorn * MSR_PAT is saved and restored in the guest VMCS are on a VM exit 825218799Snwhitehorn * and entry respectively. It is also restored from the host VMCS 826218799Snwhitehorn * area on a VM exit. However, if running on a system with no 827218799Snwhitehorn * MSR_PAT save/restore support, leave access disabled so accesses 828218799Snwhitehorn * will be trapped. 829218799Snwhitehorn */ 830218799Snwhitehorn if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT)) 831218799Snwhitehorn panic("vmx_vminit: error setting guest pat msr access"); 832218799Snwhitehorn 833218799Snwhitehorn vpid_alloc(vpid, VM_MAXCPU); 834218799Snwhitehorn 835218799Snwhitehorn if (virtual_interrupt_delivery) { 836218799Snwhitehorn error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 837218799Snwhitehorn APIC_ACCESS_ADDRESS); 838218799Snwhitehorn /* XXX this should really return an error to the caller */ 839218799Snwhitehorn KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 840218799Snwhitehorn } 841218799Snwhitehorn 842218799Snwhitehorn for (i = 0; i < VM_MAXCPU; i++) { 843218799Snwhitehorn vmcs = &vmx->vmcs[i]; 844218799Snwhitehorn vmcs->identifier = vmx_revision(); 845218799Snwhitehorn error = vmclear(vmcs); 846218799Snwhitehorn if (error != 0) { 847218799Snwhitehorn panic("vmx_vminit: vmclear error %d on vcpu %d\n", 848218799Snwhitehorn error, i); 849313433Srpokala } 850313433Srpokala 851313433Srpokala error = vmcs_init(vmcs); 852313433Srpokala KASSERT(error == 0, ("vmcs_init error %d", error)); 853313433Srpokala 854313433Srpokala VMPTRLD(vmcs); 855313433Srpokala error = 0; 856313433Srpokala error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]); 857313433Srpokala error += vmwrite(VMCS_EPTP, vmx->eptp); 858313433Srpokala error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 859313433Srpokala error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 860313433Srpokala error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 861313433Srpokala error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 862313433Srpokala error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 863313433Srpokala error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 864313433Srpokala error += vmwrite(VMCS_VPID, vpid[i]); 865313433Srpokala if (virtual_interrupt_delivery) { 866313433Srpokala error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 867313433Srpokala error += vmwrite(VMCS_VIRTUAL_APIC, 868218799Snwhitehorn vtophys(&vmx->apic_page[i])); 869218799Snwhitehorn error += vmwrite(VMCS_EOI_EXIT0, 0); 870218799Snwhitehorn error += vmwrite(VMCS_EOI_EXIT1, 0); 871218799Snwhitehorn error += vmwrite(VMCS_EOI_EXIT2, 0); 872218799Snwhitehorn error += vmwrite(VMCS_EOI_EXIT3, 0); 873218799Snwhitehorn } 874218799Snwhitehorn if (posted_interrupts) { 875218799Snwhitehorn error += vmwrite(VMCS_PIR_VECTOR, pirvec); 876218799Snwhitehorn error += vmwrite(VMCS_PIR_DESC, 877218799Snwhitehorn vtophys(&vmx->pir_desc[i])); 878218799Snwhitehorn } 879218799Snwhitehorn VMCLEAR(vmcs); 880218799Snwhitehorn KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs")); 881218799Snwhitehorn 882218799Snwhitehorn vmx->cap[i].set = 0; 883218799Snwhitehorn vmx->cap[i].proc_ctls = procbased_ctls; 884218799Snwhitehorn vmx->cap[i].proc_ctls2 = procbased_ctls2; 885218799Snwhitehorn 886218799Snwhitehorn vmx->state[i].lastcpu = -1; 887226161Snwhitehorn vmx->state[i].vpid = vpid[i]; 888273831Snwhitehorn 889218799Snwhitehorn msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count); 890218799Snwhitehorn 891218799Snwhitehorn error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]), 892218799Snwhitehorn guest_msr_count); 893218799Snwhitehorn if (error != 0) 894218799Snwhitehorn panic("vmcs_set_msr_save error %d", error); 895218799Snwhitehorn 896273831Snwhitehorn /* 897273831Snwhitehorn * Set up the CR0/4 shadows, and init the read shadow 898218799Snwhitehorn * to the power-on register value from the Intel Sys Arch. 899218799Snwhitehorn * CR0 - 0x60000010 900218799Snwhitehorn * CR4 - 0 901218799Snwhitehorn */ 902218799Snwhitehorn error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 903218799Snwhitehorn if (error != 0) 904218799Snwhitehorn panic("vmx_setup_cr0_shadow %d", error); 905218799Snwhitehorn 906218799Snwhitehorn error = vmx_setup_cr4_shadow(vmcs, 0); 907218799Snwhitehorn if (error != 0) 908218799Snwhitehorn panic("vmx_setup_cr4_shadow %d", error); 909218799Snwhitehorn 910218799Snwhitehorn vmx->ctx[i].pmap = pmap; 911218799Snwhitehorn } 912218799Snwhitehorn 913218799Snwhitehorn return (vmx); 914218799Snwhitehorn} 915218799Snwhitehorn 916218799Snwhitehornstatic int 917218799Snwhitehornvmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 918218799Snwhitehorn{ 919218799Snwhitehorn int handled, func; 920218799Snwhitehorn 921218799Snwhitehorn func = vmxctx->guest_rax; 922218799Snwhitehorn 923218799Snwhitehorn handled = x86_emulate_cpuid(vm, vcpu, 924218799Snwhitehorn (uint32_t*)(&vmxctx->guest_rax), 925218799Snwhitehorn (uint32_t*)(&vmxctx->guest_rbx), 926218799Snwhitehorn (uint32_t*)(&vmxctx->guest_rcx), 927218799Snwhitehorn (uint32_t*)(&vmxctx->guest_rdx)); 928218799Snwhitehorn return (handled); 929218799Snwhitehorn} 930218799Snwhitehorn 931218799Snwhitehornstatic __inline void 932218799Snwhitehornvmx_run_trace(struct vmx *vmx, int vcpu) 933218799Snwhitehorn{ 934218799Snwhitehorn#ifdef KTR 935218799Snwhitehorn VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip()); 936218799Snwhitehorn#endif 937218799Snwhitehorn} 938218799Snwhitehorn 939225066Snwhitehornstatic __inline void 940218799Snwhitehornvmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason, 941218799Snwhitehorn int handled) 942218799Snwhitehorn{ 943218799Snwhitehorn#ifdef KTR 944218799Snwhitehorn VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx", 945218799Snwhitehorn handled ? "handled" : "unhandled", 946218799Snwhitehorn exit_reason_to_str(exit_reason), rip); 947218799Snwhitehorn#endif 948218799Snwhitehorn} 949218799Snwhitehorn 950218799Snwhitehornstatic __inline void 951218799Snwhitehornvmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip) 952218799Snwhitehorn{ 953218799Snwhitehorn#ifdef KTR 954218799Snwhitehorn VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip); 955218799Snwhitehorn#endif 956218799Snwhitehorn} 957218799Snwhitehorn 958218799Snwhitehornstatic VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 959218799Snwhitehorn 960218799Snwhitehornstatic void 961218799Snwhitehornvmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap) 962218799Snwhitehorn{ 963218799Snwhitehorn struct vmxstate *vmxstate; 964218799Snwhitehorn struct invvpid_desc invvpid_desc; 965218799Snwhitehorn 966218799Snwhitehorn vmxstate = &vmx->state[vcpu]; 967218799Snwhitehorn if (vmxstate->lastcpu == curcpu) 968225066Snwhitehorn return; 969218799Snwhitehorn 970225066Snwhitehorn vmxstate->lastcpu = curcpu; 971225066Snwhitehorn 972218799Snwhitehorn vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 973218853Snwhitehorn 974218799Snwhitehorn vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 975218799Snwhitehorn vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 976218799Snwhitehorn vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 977218799Snwhitehorn 978218799Snwhitehorn /* 979218799Snwhitehorn * If we are using VPIDs then invalidate all mappings tagged with 'vpid' 980218799Snwhitehorn * 981218799Snwhitehorn * We do this because this vcpu was executing on a different host 982219892Snwhitehorn * cpu when it last ran. We do not track whether it invalidated 983219892Snwhitehorn * mappings associated with its 'vpid' during that run. So we must 984219892Snwhitehorn * assume that the mappings associated with 'vpid' on 'curcpu' are 985219892Snwhitehorn * stale and invalidate them. 986218799Snwhitehorn * 987218799Snwhitehorn * Note that we incur this penalty only when the scheduler chooses to 988219892Snwhitehorn * move the thread associated with this vcpu between host cpus. 989219892Snwhitehorn * 990218799Snwhitehorn * Note also that this will invalidate mappings tagged with 'vpid' 991218799Snwhitehorn * for "all" EP4TAs. 992219892Snwhitehorn */ 993219892Snwhitehorn if (vmxstate->vpid != 0) { 994219892Snwhitehorn if (pmap->pm_eptgen == vmx->eptgen[curcpu]) { 995219892Snwhitehorn invvpid_desc._res1 = 0; 996219892Snwhitehorn invvpid_desc._res2 = 0; 997218799Snwhitehorn invvpid_desc.vpid = vmxstate->vpid; 998219892Snwhitehorn invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 999219892Snwhitehorn } else { 1000219892Snwhitehorn /* 1001219892Snwhitehorn * The invvpid can be skipped if an invept is going to 1002219892Snwhitehorn * be performed before entering the guest. The invept 1003219892Snwhitehorn * will invalidate combined mappings tagged with 1004218799Snwhitehorn * 'vmx->eptp' for all vpids. 1005218799Snwhitehorn */ 1006219892Snwhitehorn vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 1007219892Snwhitehorn } 1008219892Snwhitehorn } 1009219892Snwhitehorn} 1010225613Snwhitehorn 1011219892Snwhitehorn/* 1012219892Snwhitehorn * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1013219892Snwhitehorn */ 1014219892SnwhitehornCTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1015218799Snwhitehorn 1016218799Snwhitehornstatic void __inline 1017218799Snwhitehornvmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1018218799Snwhitehorn{ 1019218799Snwhitehorn 1020218799Snwhitehorn if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1021218799Snwhitehorn vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1022218799Snwhitehorn vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1023218799Snwhitehorn VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting"); 1024218799Snwhitehorn } 1025218799Snwhitehorn} 1026218799Snwhitehorn 1027218799Snwhitehornstatic void __inline 1028218799Snwhitehornvmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1029218799Snwhitehorn{ 1030218799Snwhitehorn 1031218799Snwhitehorn KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1032218799Snwhitehorn ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls)); 1033218799Snwhitehorn vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1034218799Snwhitehorn vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1035218799Snwhitehorn VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting"); 1036218799Snwhitehorn} 1037329870Srpokala 1038329870Srpokalastatic void __inline 1039329870Srpokalavmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1040329870Srpokala{ 1041329870Srpokala 1042329870Srpokala if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1043329870Srpokala vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1044329870Srpokala vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1045329870Srpokala VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting"); 1046329870Srpokala } 1047329870Srpokala} 1048218799Snwhitehorn 1049218799Snwhitehornstatic void __inline 1050218799Snwhitehornvmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1051218799Snwhitehorn{ 1052226160Snwhitehorn 1053226160Snwhitehorn KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1054226160Snwhitehorn ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls)); 1055226160Snwhitehorn vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1056226160Snwhitehorn vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1057226160Snwhitehorn VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); 1058226160Snwhitehorn} 1059226160Snwhitehorn 1060226160Snwhitehorn#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1061226160Snwhitehorn VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1062226160Snwhitehorn#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1063226160Snwhitehorn VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1064226160Snwhitehorn 1065218799Snwhitehornstatic void 1066218799Snwhitehornvmx_inject_nmi(struct vmx *vmx, int vcpu) 1067218799Snwhitehorn{ 1068218799Snwhitehorn uint32_t gi, info; 1069218799Snwhitehorn 1070218799Snwhitehorn gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1071218799Snwhitehorn KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1072218799Snwhitehorn "interruptibility-state %#x", gi)); 1073218799Snwhitehorn 1074218799Snwhitehorn info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1075218799Snwhitehorn KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1076218799Snwhitehorn "VM-entry interruption information %#x", info)); 1077218799Snwhitehorn 1078273831Snwhitehorn /* 1079273831Snwhitehorn * Inject the virtual NMI. The vector must be the NMI IDT entry 1080273831Snwhitehorn * or the VMCS entry check will fail. 1081273831Snwhitehorn */ 1082273831Snwhitehorn info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1083273831Snwhitehorn vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1084273831Snwhitehorn 1085273831Snwhitehorn VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI"); 1086273831Snwhitehorn 1087273831Snwhitehorn /* Clear the request */ 1088273831Snwhitehorn vm_nmi_clear(vmx->vm, vcpu); 1089273831Snwhitehorn} 1090273831Snwhitehorn 1091273831Snwhitehornstatic void 1092218799Snwhitehornvmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1093218799Snwhitehorn{ 1094218799Snwhitehorn struct vm_exception exc; 1095218799Snwhitehorn int vector, need_nmi_exiting; 1096226160Snwhitehorn uint64_t rflags; 1097226160Snwhitehorn uint32_t gi, info; 1098226160Snwhitehorn 1099226160Snwhitehorn if (vm_exception_pending(vmx->vm, vcpu, &exc)) { 1100226160Snwhitehorn KASSERT(exc.vector >= 0 && exc.vector < 32, 1101226160Snwhitehorn ("%s: invalid exception vector %d", __func__, exc.vector)); 1102226160Snwhitehorn 1103226160Snwhitehorn info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1104226160Snwhitehorn KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1105226160Snwhitehorn "pending exception %d: %#x", __func__, exc.vector, info)); 1106271636Semaste 1107271636Semaste info = exc.vector | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID; 1108226160Snwhitehorn if (exc.error_code_valid) { 1109226160Snwhitehorn info |= VMCS_INTR_DEL_ERRCODE; 1110226160Snwhitehorn vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, exc.error_code); 1111226160Snwhitehorn } 1112255817Snwhitehorn vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1113255817Snwhitehorn } 1114226160Snwhitehorn 1115218799Snwhitehorn if (vm_nmi_pending(vmx->vm, vcpu)) { 1116218799Snwhitehorn /* 1117218799Snwhitehorn * If there are no conditions blocking NMI injection then 1118218799Snwhitehorn * inject it directly here otherwise enable "NMI window 1119218799Snwhitehorn * exiting" to inject it as soon as we can. 1120218799Snwhitehorn * 1121218799Snwhitehorn * We also check for STI_BLOCKING because some implementations 1122218799Snwhitehorn * don't allow NMI injection in this case. If we are running 1123218799Snwhitehorn * on a processor that doesn't have this restriction it will 1124218799Snwhitehorn * immediately exit and the NMI will be injected in the 1125218799Snwhitehorn * "NMI window exiting" handler. 1126218799Snwhitehorn */ 1127218799Snwhitehorn need_nmi_exiting = 1; 1128218799Snwhitehorn gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1129271636Semaste if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1130218799Snwhitehorn info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1131218799Snwhitehorn if ((info & VMCS_INTR_VALID) == 0) { 1132218799Snwhitehorn vmx_inject_nmi(vmx, vcpu); 1133218799Snwhitehorn need_nmi_exiting = 0; 1134218799Snwhitehorn } else { 1135218799Snwhitehorn VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI " 1136218799Snwhitehorn "due to VM-entry intr info %#x", info); 1137218799Snwhitehorn } 1138218799Snwhitehorn } else { 1139218799Snwhitehorn VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to " 1140218799Snwhitehorn "Guest Interruptibility-state %#x", gi); 1141218799Snwhitehorn } 1142218799Snwhitehorn 1143218799Snwhitehorn if (need_nmi_exiting) 1144218799Snwhitehorn vmx_set_nmi_window_exiting(vmx, vcpu); 1145218799Snwhitehorn } 1146218799Snwhitehorn 1147218799Snwhitehorn if (virtual_interrupt_delivery) { 1148218799Snwhitehorn vmx_inject_pir(vlapic); 1149218799Snwhitehorn return; 1150218799Snwhitehorn } 1151218799Snwhitehorn 1152218799Snwhitehorn /* 1153218799Snwhitehorn * If interrupt-window exiting is already in effect then don't bother 1154218799Snwhitehorn * checking for pending interrupts. This is just an optimization and 1155218799Snwhitehorn * not needed for correctness. 1156218799Snwhitehorn */ 1157218799Snwhitehorn if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 1158218799Snwhitehorn VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to " 1159218799Snwhitehorn "pending int_window_exiting"); 1160218799Snwhitehorn return; 1161218799Snwhitehorn } 1162218799Snwhitehorn 1163218799Snwhitehorn /* Ask the local apic for a vector to inject */ 1164218799Snwhitehorn if (!vlapic_pending_intr(vlapic, &vector)) 1165218799Snwhitehorn return; 1166218799Snwhitehorn 1167218799Snwhitehorn KASSERT(vector >= 32 && vector <= 255, ("invalid vector %d", vector)); 1168218799Snwhitehorn 1169218799Snwhitehorn /* Check RFLAGS.IF and the interruptibility state of the guest */ 1170218799Snwhitehorn rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1171218799Snwhitehorn if ((rflags & PSL_I) == 0) { 1172226161Snwhitehorn VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1173226161Snwhitehorn "rflags %#lx", vector, rflags); 1174218799Snwhitehorn goto cantinject; 1175226161Snwhitehorn } 1176226161Snwhitehorn 1177226161Snwhitehorn gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1178226161Snwhitehorn if (gi & HWINTR_BLOCKING) { 1179226161Snwhitehorn VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1180226161Snwhitehorn "Guest Interruptibility-state %#x", vector, gi); 1181226161Snwhitehorn goto cantinject; 1182226161Snwhitehorn } 1183226161Snwhitehorn 1184226161Snwhitehorn info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1185226161Snwhitehorn if (info & VMCS_INTR_VALID) { 1186226161Snwhitehorn /* 1187226161Snwhitehorn * This is expected and could happen for multiple reasons: 1188226161Snwhitehorn * - A vectoring VM-entry was aborted due to astpending 1189271636Semaste * - A VM-exit happened during event injection. 1190226161Snwhitehorn * - An exception was injected above. 1191218799Snwhitehorn * - An NMI was injected above or after "NMI window exiting" 1192226161Snwhitehorn */ 1193218799Snwhitehorn VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to " 1194226161Snwhitehorn "VM-entry intr info %#x", vector, info); 1195219892Snwhitehorn goto cantinject; 1196218799Snwhitehorn } 1197218799Snwhitehorn 1198218799Snwhitehorn /* Inject the interrupt */ 1199218799Snwhitehorn info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1200218799Snwhitehorn info |= vector; 1201218799Snwhitehorn vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1202226161Snwhitehorn 1203218799Snwhitehorn /* Update the Local APIC ISR */ 1204218799Snwhitehorn vlapic_intr_accepted(vlapic, vector); 1205218799Snwhitehorn 1206218799Snwhitehorn VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector); 1207218799Snwhitehorn 1208218799Snwhitehorn return; 1209218799Snwhitehorn 1210218799Snwhitehorncantinject: 1211218799Snwhitehorn /* 1212218799Snwhitehorn * Set the Interrupt Window Exiting execution control so we can inject 1213218799Snwhitehorn * the interrupt as soon as blocking condition goes away. 1214226083Snwhitehorn */ 1215218799Snwhitehorn vmx_set_int_window_exiting(vmx, vcpu); 1216218799Snwhitehorn} 1217218799Snwhitehorn 1218218799Snwhitehorn/* 1219218799Snwhitehorn * If the Virtual NMIs execution control is '1' then the logical processor 1220218799Snwhitehorn * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1221218799Snwhitehorn * the VMCS. An IRET instruction in VMX non-root operation will remove any 1222218799Snwhitehorn * virtual-NMI blocking. 1223218799Snwhitehorn * 1224218799Snwhitehorn * This unblocking occurs even if the IRET causes a fault. In this case the 1225218799Snwhitehorn * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1226218799Snwhitehorn */ 1227226083Snwhitehornstatic void 1228218799Snwhitehornvmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1229226083Snwhitehorn{ 1230226083Snwhitehorn uint32_t gi; 1231226083Snwhitehorn 1232218799Snwhitehorn VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking"); 1233226083Snwhitehorn gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1234226083Snwhitehorn gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1235226083Snwhitehorn vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1236226083Snwhitehorn} 1237226083Snwhitehorn 1238226083Snwhitehornstatic void 1239218799Snwhitehornvmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1240226083Snwhitehorn{ 1241218799Snwhitehorn uint32_t gi; 1242218799Snwhitehorn 1243218799Snwhitehorn VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking"); 1244218799Snwhitehorn gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1245218799Snwhitehorn gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1246218799Snwhitehorn vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1247218799Snwhitehorn} 1248218799Snwhitehorn 1249218799Snwhitehornstatic int 1250218799Snwhitehornvmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1251218799Snwhitehorn{ 1252218799Snwhitehorn struct vmxctx *vmxctx; 1253218799Snwhitehorn uint64_t xcrval; 1254218799Snwhitehorn const struct xsave_limits *limits; 1255218799Snwhitehorn 1256218799Snwhitehorn vmxctx = &vmx->ctx[vcpu]; 1257218799Snwhitehorn limits = vmm_get_xsave_limits(); 1258218799Snwhitehorn 1259218799Snwhitehorn /* 1260218799Snwhitehorn * Note that the processor raises a GP# fault on its own if 1261218799Snwhitehorn * xsetbv is executed for CPL != 0, so we do not have to 1262218799Snwhitehorn * emulate that fault here. 1263218799Snwhitehorn */ 1264218799Snwhitehorn 1265218799Snwhitehorn /* Only xcr0 is supported. */ 1266218799Snwhitehorn if (vmxctx->guest_rcx != 0) { 1267218799Snwhitehorn vm_inject_gp(vmx->vm, vcpu); 1268218799Snwhitehorn return (HANDLED); 1269218799Snwhitehorn } 1270218799Snwhitehorn 1271218799Snwhitehorn /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1272218799Snwhitehorn if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1273218799Snwhitehorn vm_inject_ud(vmx->vm, vcpu); 1274218799Snwhitehorn return (HANDLED); 1275218799Snwhitehorn } 1276218799Snwhitehorn 1277218799Snwhitehorn xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1278218799Snwhitehorn if ((xcrval & ~limits->xcr0_allowed) != 0) { 1279218799Snwhitehorn vm_inject_gp(vmx->vm, vcpu); 1280218799Snwhitehorn return (HANDLED); 1281218799Snwhitehorn } 1282218799Snwhitehorn 1283218799Snwhitehorn if (!(xcrval & XFEATURE_ENABLED_X87)) { 1284218799Snwhitehorn vm_inject_gp(vmx->vm, vcpu); 1285218799Snwhitehorn return (HANDLED); 1286218799Snwhitehorn } 1287218799Snwhitehorn 1288218799Snwhitehorn /* AVX (YMM_Hi128) requires SSE. */ 1289218799Snwhitehorn if (xcrval & XFEATURE_ENABLED_AVX && 1290218799Snwhitehorn (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1291218799Snwhitehorn vm_inject_gp(vmx->vm, vcpu); 1292218799Snwhitehorn return (HANDLED); 1293218799Snwhitehorn } 1294218799Snwhitehorn 1295218799Snwhitehorn /* 1296218799Snwhitehorn * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1297218799Snwhitehorn * ZMM_Hi256, and Hi16_ZMM. 1298218799Snwhitehorn */ 1299218799Snwhitehorn if (xcrval & XFEATURE_AVX512 && 1300218799Snwhitehorn (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1301218799Snwhitehorn (XFEATURE_AVX512 | XFEATURE_AVX)) { 1302218799Snwhitehorn vm_inject_gp(vmx->vm, vcpu); 1303218799Snwhitehorn return (HANDLED); 1304218799Snwhitehorn } 1305218799Snwhitehorn 1306218799Snwhitehorn /* 1307218799Snwhitehorn * Intel MPX requires both bound register state flags to be 1308218799Snwhitehorn * set. 1309218799Snwhitehorn */ 1310218799Snwhitehorn if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1311218799Snwhitehorn ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1312218799Snwhitehorn vm_inject_gp(vmx->vm, vcpu); 1313218799Snwhitehorn return (HANDLED); 1314218799Snwhitehorn } 1315218799Snwhitehorn 1316218799Snwhitehorn /* 1317226161Snwhitehorn * This runs "inside" vmrun() with the guest's FPU state, so 1318218799Snwhitehorn * modifying xcr0 directly modifies the guest's xcr0, not the 1319218799Snwhitehorn * host's. 1320218799Snwhitehorn */ 1321218799Snwhitehorn load_xcr(0, xcrval); 1322218799Snwhitehorn return (HANDLED); 1323218799Snwhitehorn} 1324218799Snwhitehorn 1325218799Snwhitehornstatic int 1326218799Snwhitehornvmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1327218799Snwhitehorn{ 1328218799Snwhitehorn int cr, vmcs_guest_cr, vmcs_shadow_cr; 1329218799Snwhitehorn uint64_t crval, regval, ones_mask, zeros_mask; 1330218799Snwhitehorn const struct vmxctx *vmxctx; 1331218799Snwhitehorn 1332218799Snwhitehorn /* We only handle mov to %cr0 or %cr4 at this time */ 1333218799Snwhitehorn if ((exitqual & 0xf0) != 0x00) 1334273831Snwhitehorn return (UNHANDLED); 1335218799Snwhitehorn 1336218799Snwhitehorn cr = exitqual & 0xf; 1337218799Snwhitehorn if (cr != 0 && cr != 4) 1338218799Snwhitehorn return (UNHANDLED); 1339218799Snwhitehorn 1340218799Snwhitehorn vmxctx = &vmx->ctx[vcpu]; 1341273831Snwhitehorn 1342273831Snwhitehorn /* 1343273831Snwhitehorn * We must use vmcs_write() directly here because vmcs_setreg() will 1344273831Snwhitehorn * call vmclear(vmcs) as a side-effect which we certainly don't want. 1345273831Snwhitehorn */ 1346273831Snwhitehorn switch ((exitqual >> 8) & 0xf) { 1347273831Snwhitehorn case 0: 1348273831Snwhitehorn regval = vmxctx->guest_rax; 1349273831Snwhitehorn break; 1350218799Snwhitehorn case 1: 1351218799Snwhitehorn regval = vmxctx->guest_rcx; 1352218799Snwhitehorn break; 1353218799Snwhitehorn case 2: 1354218799Snwhitehorn regval = vmxctx->guest_rdx; 1355218799Snwhitehorn break; 1356218799Snwhitehorn case 3: 1357218799Snwhitehorn regval = vmxctx->guest_rbx; 1358218799Snwhitehorn break; 1359218799Snwhitehorn case 4: 1360218799Snwhitehorn regval = vmcs_read(VMCS_GUEST_RSP); 1361218799Snwhitehorn break; 1362218799Snwhitehorn case 5: 1363218799Snwhitehorn regval = vmxctx->guest_rbp; 1364218799Snwhitehorn break; 1365218799Snwhitehorn case 6: 1366218799Snwhitehorn regval = vmxctx->guest_rsi; 1367218799Snwhitehorn break; 1368218799Snwhitehorn case 7: 1369218799Snwhitehorn regval = vmxctx->guest_rdi; 1370218799Snwhitehorn break; 1371218799Snwhitehorn case 8: 1372218799Snwhitehorn regval = vmxctx->guest_r8; 1373218799Snwhitehorn break; 1374218799Snwhitehorn case 9: 1375218799Snwhitehorn regval = vmxctx->guest_r9; 1376218799Snwhitehorn break; 1377218799Snwhitehorn case 10: 1378218799Snwhitehorn regval = vmxctx->guest_r10; 1379218799Snwhitehorn break; 1380218799Snwhitehorn case 11: 1381218799Snwhitehorn regval = vmxctx->guest_r11; 1382218799Snwhitehorn break; 1383218799Snwhitehorn case 12: 1384218799Snwhitehorn regval = vmxctx->guest_r12; 1385218799Snwhitehorn break; 1386218799Snwhitehorn case 13: 1387218799Snwhitehorn regval = vmxctx->guest_r13; 1388218799Snwhitehorn break; 1389273831Snwhitehorn case 14: 1390218799Snwhitehorn regval = vmxctx->guest_r14; 1391218799Snwhitehorn break; 1392218799Snwhitehorn case 15: 1393218799Snwhitehorn regval = vmxctx->guest_r15; 1394218799Snwhitehorn break; 1395218799Snwhitehorn } 1396218799Snwhitehorn 1397218799Snwhitehorn if (cr == 0) { 1398218799Snwhitehorn ones_mask = cr0_ones_mask; 1399218799Snwhitehorn zeros_mask = cr0_zeros_mask; 1400218799Snwhitehorn vmcs_guest_cr = VMCS_GUEST_CR0; 1401218799Snwhitehorn vmcs_shadow_cr = VMCS_CR0_SHADOW; 1402218799Snwhitehorn } else { 1403218799Snwhitehorn ones_mask = cr4_ones_mask; 1404 zeros_mask = cr4_zeros_mask; 1405 vmcs_guest_cr = VMCS_GUEST_CR4; 1406 vmcs_shadow_cr = VMCS_CR4_SHADOW; 1407 } 1408 vmcs_write(vmcs_shadow_cr, regval); 1409 1410 crval = regval | ones_mask; 1411 crval &= ~zeros_mask; 1412 vmcs_write(vmcs_guest_cr, crval); 1413 1414 if (cr == 0 && regval & CR0_PG) { 1415 uint64_t efer, entry_ctls; 1416 1417 /* 1418 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1419 * the "IA-32e mode guest" bit in VM-entry control must be 1420 * equal. 1421 */ 1422 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1423 if (efer & EFER_LME) { 1424 efer |= EFER_LMA; 1425 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1426 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1427 entry_ctls |= VM_ENTRY_GUEST_LMA; 1428 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1429 } 1430 } 1431 1432 return (HANDLED); 1433} 1434 1435static enum vie_cpu_mode 1436vmx_cpu_mode(void) 1437{ 1438 1439 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) 1440 return (CPU_MODE_64BIT); 1441 else 1442 return (CPU_MODE_COMPATIBILITY); 1443} 1444 1445static enum vie_paging_mode 1446vmx_paging_mode(void) 1447{ 1448 1449 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1450 return (PAGING_MODE_FLAT); 1451 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1452 return (PAGING_MODE_32); 1453 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1454 return (PAGING_MODE_64); 1455 else 1456 return (PAGING_MODE_PAE); 1457} 1458 1459static int 1460ept_fault_type(uint64_t ept_qual) 1461{ 1462 int fault_type; 1463 1464 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1465 fault_type = VM_PROT_WRITE; 1466 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1467 fault_type = VM_PROT_EXECUTE; 1468 else 1469 fault_type= VM_PROT_READ; 1470 1471 return (fault_type); 1472} 1473 1474static boolean_t 1475ept_emulation_fault(uint64_t ept_qual) 1476{ 1477 int read, write; 1478 1479 /* EPT fault on an instruction fetch doesn't make sense here */ 1480 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1481 return (FALSE); 1482 1483 /* EPT fault must be a read fault or a write fault */ 1484 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1485 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1486 if ((read | write) == 0) 1487 return (FALSE); 1488 1489 /* 1490 * The EPT violation must have been caused by accessing a 1491 * guest-physical address that is a translation of a guest-linear 1492 * address. 1493 */ 1494 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1495 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1496 return (FALSE); 1497 } 1498 1499 return (TRUE); 1500} 1501 1502static int 1503vmx_handle_apic_write(struct vlapic *vlapic, uint64_t qual) 1504{ 1505 int error, handled, offset; 1506 bool retu; 1507 1508 if (!virtual_interrupt_delivery) 1509 return (UNHANDLED); 1510 1511 handled = HANDLED; 1512 offset = APIC_WRITE_OFFSET(qual); 1513 switch (offset) { 1514 case APIC_OFFSET_ID: 1515 vlapic_id_write_handler(vlapic); 1516 break; 1517 case APIC_OFFSET_LDR: 1518 vlapic_ldr_write_handler(vlapic); 1519 break; 1520 case APIC_OFFSET_DFR: 1521 vlapic_dfr_write_handler(vlapic); 1522 break; 1523 case APIC_OFFSET_SVR: 1524 vlapic_svr_write_handler(vlapic); 1525 break; 1526 case APIC_OFFSET_ESR: 1527 vlapic_esr_write_handler(vlapic); 1528 break; 1529 case APIC_OFFSET_ICR_LOW: 1530 retu = false; 1531 error = vlapic_icrlo_write_handler(vlapic, &retu); 1532 if (error != 0 || retu) 1533 handled = UNHANDLED; 1534 break; 1535 case APIC_OFFSET_CMCI_LVT: 1536 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1537 vlapic_lvt_write_handler(vlapic, offset); 1538 break; 1539 case APIC_OFFSET_TIMER_ICR: 1540 vlapic_icrtmr_write_handler(vlapic); 1541 break; 1542 case APIC_OFFSET_TIMER_DCR: 1543 vlapic_dcr_write_handler(vlapic); 1544 break; 1545 default: 1546 handled = UNHANDLED; 1547 break; 1548 } 1549 return (handled); 1550} 1551 1552static bool 1553apic_access_fault(uint64_t gpa) 1554{ 1555 1556 if (virtual_interrupt_delivery && 1557 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 1558 return (true); 1559 else 1560 return (false); 1561} 1562 1563static int 1564vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1565{ 1566 uint64_t qual; 1567 int access_type, offset, allowed; 1568 1569 if (!virtual_interrupt_delivery) 1570 return (UNHANDLED); 1571 1572 qual = vmexit->u.vmx.exit_qualification; 1573 access_type = APIC_ACCESS_TYPE(qual); 1574 offset = APIC_ACCESS_OFFSET(qual); 1575 1576 allowed = 0; 1577 if (access_type == 0) { 1578 /* 1579 * Read data access to the following registers is expected. 1580 */ 1581 switch (offset) { 1582 case APIC_OFFSET_APR: 1583 case APIC_OFFSET_PPR: 1584 case APIC_OFFSET_RRR: 1585 case APIC_OFFSET_CMCI_LVT: 1586 case APIC_OFFSET_TIMER_CCR: 1587 allowed = 1; 1588 break; 1589 default: 1590 break; 1591 } 1592 } else if (access_type == 1) { 1593 /* 1594 * Write data access to the following registers is expected. 1595 */ 1596 switch (offset) { 1597 case APIC_OFFSET_VER: 1598 case APIC_OFFSET_APR: 1599 case APIC_OFFSET_PPR: 1600 case APIC_OFFSET_RRR: 1601 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1602 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1603 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1604 case APIC_OFFSET_CMCI_LVT: 1605 case APIC_OFFSET_TIMER_CCR: 1606 allowed = 1; 1607 break; 1608 default: 1609 break; 1610 } 1611 } 1612 1613 if (allowed) { 1614 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1615 vmexit->u.inst_emul.gpa = DEFAULT_APIC_BASE + offset; 1616 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 1617 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1618 vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode(); 1619 vmexit->u.inst_emul.paging_mode = vmx_paging_mode(); 1620 } 1621 1622 /* 1623 * Regardless of whether the APIC-access is allowed this handler 1624 * always returns UNHANDLED: 1625 * - if the access is allowed then it is handled by emulating the 1626 * instruction that caused the VM-exit (outside the critical section) 1627 * - if the access is not allowed then it will be converted to an 1628 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 1629 */ 1630 return (UNHANDLED); 1631} 1632 1633static int 1634vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1635{ 1636 int error, handled; 1637 struct vmxctx *vmxctx; 1638 struct vlapic *vlapic; 1639 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, reason; 1640 uint64_t qual, gpa; 1641 bool retu; 1642 1643 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 1644 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 1645 1646 handled = UNHANDLED; 1647 vmxctx = &vmx->ctx[vcpu]; 1648 1649 qual = vmexit->u.vmx.exit_qualification; 1650 reason = vmexit->u.vmx.exit_reason; 1651 vmexit->exitcode = VM_EXITCODE_BOGUS; 1652 1653 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 1654 1655 /* 1656 * VM exits that could be triggered during event injection on the 1657 * previous VM entry need to be handled specially by re-injecting 1658 * the event. 1659 * 1660 * See "Information for VM Exits During Event Delivery" in Intel SDM 1661 * for details. 1662 */ 1663 switch (reason) { 1664 case EXIT_REASON_EPT_FAULT: 1665 case EXIT_REASON_EPT_MISCONFIG: 1666 case EXIT_REASON_APIC_ACCESS: 1667 case EXIT_REASON_TASK_SWITCH: 1668 case EXIT_REASON_EXCEPTION: 1669 idtvec_info = vmcs_idt_vectoring_info(); 1670 if (idtvec_info & VMCS_IDT_VEC_VALID) { 1671 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 1672 vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info); 1673 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 1674 idtvec_err = vmcs_idt_vectoring_err(); 1675 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1676 idtvec_err); 1677 } 1678 /* 1679 * If 'virtual NMIs' are being used and the VM-exit 1680 * happened while injecting an NMI during the previous 1681 * VM-entry, then clear "blocking by NMI" in the Guest 1682 * Interruptibility-state. 1683 */ 1684 if ((idtvec_info & VMCS_INTR_T_MASK) == 1685 VMCS_INTR_T_NMI) { 1686 vmx_clear_nmi_blocking(vmx, vcpu); 1687 } 1688 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 1689 } 1690 default: 1691 idtvec_info = 0; 1692 break; 1693 } 1694 1695 switch (reason) { 1696 case EXIT_REASON_CR_ACCESS: 1697 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 1698 handled = vmx_emulate_cr_access(vmx, vcpu, qual); 1699 break; 1700 case EXIT_REASON_RDMSR: 1701 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1); 1702 retu = false; 1703 ecx = vmxctx->guest_rcx; 1704 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu); 1705 if (error) { 1706 vmexit->exitcode = VM_EXITCODE_RDMSR; 1707 vmexit->u.msr.code = ecx; 1708 } else if (!retu) { 1709 handled = HANDLED; 1710 } else { 1711 /* Return to userspace with a valid exitcode */ 1712 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1713 ("emulate_wrmsr retu with bogus exitcode")); 1714 } 1715 break; 1716 case EXIT_REASON_WRMSR: 1717 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1); 1718 retu = false; 1719 eax = vmxctx->guest_rax; 1720 ecx = vmxctx->guest_rcx; 1721 edx = vmxctx->guest_rdx; 1722 error = emulate_wrmsr(vmx->vm, vcpu, ecx, 1723 (uint64_t)edx << 32 | eax, &retu); 1724 if (error) { 1725 vmexit->exitcode = VM_EXITCODE_WRMSR; 1726 vmexit->u.msr.code = ecx; 1727 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 1728 } else if (!retu) { 1729 handled = HANDLED; 1730 } else { 1731 /* Return to userspace with a valid exitcode */ 1732 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1733 ("emulate_wrmsr retu with bogus exitcode")); 1734 } 1735 break; 1736 case EXIT_REASON_HLT: 1737 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 1738 vmexit->exitcode = VM_EXITCODE_HLT; 1739 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1740 break; 1741 case EXIT_REASON_MTF: 1742 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 1743 vmexit->exitcode = VM_EXITCODE_MTRAP; 1744 break; 1745 case EXIT_REASON_PAUSE: 1746 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 1747 vmexit->exitcode = VM_EXITCODE_PAUSE; 1748 break; 1749 case EXIT_REASON_INTR_WINDOW: 1750 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 1751 vmx_clear_int_window_exiting(vmx, vcpu); 1752 return (1); 1753 case EXIT_REASON_EXT_INTR: 1754 /* 1755 * External interrupts serve only to cause VM exits and allow 1756 * the host interrupt handler to run. 1757 * 1758 * If this external interrupt triggers a virtual interrupt 1759 * to a VM, then that state will be recorded by the 1760 * host interrupt handler in the VM's softc. We will inject 1761 * this virtual interrupt during the subsequent VM enter. 1762 */ 1763 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1764 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 1765 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 1766 ("VM exit interruption info invalid: %#x", intr_info)); 1767 vmx_trigger_hostintr(intr_info & 0xff); 1768 1769 /* 1770 * This is special. We want to treat this as an 'handled' 1771 * VM-exit but not increment the instruction pointer. 1772 */ 1773 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 1774 return (1); 1775 case EXIT_REASON_NMI_WINDOW: 1776 /* Exit to allow the pending virtual NMI to be injected */ 1777 if (vm_nmi_pending(vmx->vm, vcpu)) 1778 vmx_inject_nmi(vmx, vcpu); 1779 vmx_clear_nmi_window_exiting(vmx, vcpu); 1780 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 1781 return (1); 1782 case EXIT_REASON_INOUT: 1783 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 1784 vmexit->exitcode = VM_EXITCODE_INOUT; 1785 vmexit->u.inout.bytes = (qual & 0x7) + 1; 1786 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0; 1787 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 1788 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 1789 vmexit->u.inout.port = (uint16_t)(qual >> 16); 1790 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 1791 break; 1792 case EXIT_REASON_CPUID: 1793 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 1794 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx); 1795 break; 1796 case EXIT_REASON_EXCEPTION: 1797 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 1798 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1799 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 1800 ("VM exit interruption info invalid: %#x", intr_info)); 1801 1802 /* 1803 * If Virtual NMIs control is 1 and the VM-exit is due to a 1804 * fault encountered during the execution of IRET then we must 1805 * restore the state of "virtual-NMI blocking" before resuming 1806 * the guest. 1807 * 1808 * See "Resuming Guest Software after Handling an Exception". 1809 */ 1810 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 1811 (intr_info & 0xff) != IDT_DF && 1812 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 1813 vmx_restore_nmi_blocking(vmx, vcpu); 1814 1815 /* 1816 * The NMI has already been handled in vmx_exit_handle_nmi(). 1817 */ 1818 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) 1819 return (1); 1820 break; 1821 case EXIT_REASON_EPT_FAULT: 1822 /* 1823 * If 'gpa' lies within the address space allocated to 1824 * memory then this must be a nested page fault otherwise 1825 * this must be an instruction that accesses MMIO space. 1826 */ 1827 gpa = vmcs_gpa(); 1828 if (vm_mem_allocated(vmx->vm, gpa) || apic_access_fault(gpa)) { 1829 vmexit->exitcode = VM_EXITCODE_PAGING; 1830 vmexit->u.paging.gpa = gpa; 1831 vmexit->u.paging.fault_type = ept_fault_type(qual); 1832 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1833 } else if (ept_emulation_fault(qual)) { 1834 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 1835 vmexit->u.inst_emul.gpa = gpa; 1836 vmexit->u.inst_emul.gla = vmcs_gla(); 1837 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3(); 1838 vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode(); 1839 vmexit->u.inst_emul.paging_mode = vmx_paging_mode(); 1840 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1); 1841 } 1842 /* 1843 * If Virtual NMIs control is 1 and the VM-exit is due to an 1844 * EPT fault during the execution of IRET then we must restore 1845 * the state of "virtual-NMI blocking" before resuming. 1846 * 1847 * See description of "NMI unblocking due to IRET" in 1848 * "Exit Qualification for EPT Violations". 1849 */ 1850 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 1851 (qual & EXIT_QUAL_NMIUDTI) != 0) 1852 vmx_restore_nmi_blocking(vmx, vcpu); 1853 break; 1854 case EXIT_REASON_VIRTUALIZED_EOI: 1855 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 1856 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 1857 vmexit->inst_length = 0; /* trap-like */ 1858 break; 1859 case EXIT_REASON_APIC_ACCESS: 1860 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 1861 break; 1862 case EXIT_REASON_APIC_WRITE: 1863 /* 1864 * APIC-write VM exit is trap-like so the %rip is already 1865 * pointing to the next instruction. 1866 */ 1867 vmexit->inst_length = 0; 1868 vlapic = vm_lapic(vmx->vm, vcpu); 1869 handled = vmx_handle_apic_write(vlapic, qual); 1870 break; 1871 case EXIT_REASON_XSETBV: 1872 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 1873 break; 1874 default: 1875 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 1876 break; 1877 } 1878 1879 if (handled) { 1880 /* 1881 * It is possible that control is returned to userland 1882 * even though we were able to handle the VM exit in the 1883 * kernel. 1884 * 1885 * In such a case we want to make sure that the userland 1886 * restarts guest execution at the instruction *after* 1887 * the one we just processed. Therefore we update the 1888 * guest rip in the VMCS and in 'vmexit'. 1889 */ 1890 vmexit->rip += vmexit->inst_length; 1891 vmexit->inst_length = 0; 1892 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 1893 } else { 1894 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1895 /* 1896 * If this VM exit was not claimed by anybody then 1897 * treat it as a generic VMX exit. 1898 */ 1899 vmexit->exitcode = VM_EXITCODE_VMX; 1900 vmexit->u.vmx.status = VM_SUCCESS; 1901 vmexit->u.vmx.inst_type = 0; 1902 vmexit->u.vmx.inst_error = 0; 1903 } else { 1904 /* 1905 * The exitcode and collateral have been populated. 1906 * The VM exit will be processed further in userland. 1907 */ 1908 } 1909 } 1910 return (handled); 1911} 1912 1913static __inline int 1914vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1915{ 1916 1917 vmexit->rip = vmcs_guest_rip(); 1918 vmexit->inst_length = 0; 1919 vmexit->exitcode = VM_EXITCODE_BOGUS; 1920 vmx_astpending_trace(vmx, vcpu, vmexit->rip); 1921 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1); 1922 1923 return (HANDLED); 1924} 1925 1926static __inline int 1927vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1928{ 1929 1930 vmexit->rip = vmcs_guest_rip(); 1931 vmexit->inst_length = 0; 1932 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1933 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1); 1934 1935 return (UNHANDLED); 1936} 1937 1938static __inline int 1939vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 1940{ 1941 1942 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 1943 ("vmx_exit_inst_error: invalid inst_fail_status %d", 1944 vmxctx->inst_fail_status)); 1945 1946 vmexit->inst_length = 0; 1947 vmexit->exitcode = VM_EXITCODE_VMX; 1948 vmexit->u.vmx.status = vmxctx->inst_fail_status; 1949 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 1950 vmexit->u.vmx.exit_reason = ~0; 1951 vmexit->u.vmx.exit_qualification = ~0; 1952 1953 switch (rc) { 1954 case VMX_VMRESUME_ERROR: 1955 case VMX_VMLAUNCH_ERROR: 1956 case VMX_INVEPT_ERROR: 1957 vmexit->u.vmx.inst_type = rc; 1958 break; 1959 default: 1960 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 1961 } 1962 1963 return (UNHANDLED); 1964} 1965 1966/* 1967 * If the NMI-exiting VM execution control is set to '1' then an NMI in 1968 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 1969 * sufficient to simply vector to the NMI handler via a software interrupt. 1970 * However, this must be done before maskable interrupts are enabled 1971 * otherwise the "iret" issued by an interrupt handler will incorrectly 1972 * clear NMI blocking. 1973 */ 1974static __inline void 1975vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 1976{ 1977 uint32_t intr_info; 1978 1979 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 1980 1981 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 1982 return; 1983 1984 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 1985 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 1986 ("VM exit interruption info invalid: %#x", intr_info)); 1987 1988 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 1989 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 1990 "to NMI has invalid vector: %#x", intr_info)); 1991 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler"); 1992 __asm __volatile("int $2"); 1993 } 1994} 1995 1996static int 1997vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap, 1998 void *rendezvous_cookie) 1999{ 2000 int rc, handled, launched; 2001 struct vmx *vmx; 2002 struct vm *vm; 2003 struct vmxctx *vmxctx; 2004 struct vmcs *vmcs; 2005 struct vm_exit *vmexit; 2006 struct vlapic *vlapic; 2007 uint64_t rip; 2008 uint32_t exit_reason; 2009 2010 vmx = arg; 2011 vm = vmx->vm; 2012 vmcs = &vmx->vmcs[vcpu]; 2013 vmxctx = &vmx->ctx[vcpu]; 2014 vlapic = vm_lapic(vm, vcpu); 2015 vmexit = vm_exitinfo(vm, vcpu); 2016 launched = 0; 2017 2018 KASSERT(vmxctx->pmap == pmap, 2019 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 2020 2021 VMPTRLD(vmcs); 2022 2023 /* 2024 * XXX 2025 * We do this every time because we may setup the virtual machine 2026 * from a different process than the one that actually runs it. 2027 * 2028 * If the life of a virtual machine was spent entirely in the context 2029 * of a single process we could do this once in vmx_vminit(). 2030 */ 2031 vmcs_write(VMCS_HOST_CR3, rcr3()); 2032 2033 vmcs_write(VMCS_GUEST_RIP, startrip); 2034 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 2035 do { 2036 /* 2037 * Interrupts are disabled from this point on until the 2038 * guest starts executing. This is done for the following 2039 * reasons: 2040 * 2041 * If an AST is asserted on this thread after the check below, 2042 * then the IPI_AST notification will not be lost, because it 2043 * will cause a VM exit due to external interrupt as soon as 2044 * the guest state is loaded. 2045 * 2046 * A posted interrupt after 'vmx_inject_interrupts()' will 2047 * not be "lost" because it will be held pending in the host 2048 * APIC because interrupts are disabled. The pending interrupt 2049 * will be recognized as soon as the guest state is loaded. 2050 * 2051 * The same reasoning applies to the IPI generated by 2052 * pmap_invalidate_ept(). 2053 */ 2054 disable_intr(); 2055 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 2056 enable_intr(); 2057 handled = vmx_exit_astpending(vmx, vcpu, vmexit); 2058 break; 2059 } 2060 2061 if (vcpu_rendezvous_pending(rendezvous_cookie)) { 2062 enable_intr(); 2063 handled = vmx_exit_rendezvous(vmx, vcpu, vmexit); 2064 break; 2065 } 2066 2067 vmx_inject_interrupts(vmx, vcpu, vlapic); 2068 vmx_run_trace(vmx, vcpu); 2069 rc = vmx_enter_guest(vmxctx, vmx, launched); 2070 2071 /* Collect some information for VM exit processing */ 2072 vmexit->rip = rip = vmcs_guest_rip(); 2073 vmexit->inst_length = vmexit_instruction_length(); 2074 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 2075 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 2076 2077 if (rc == VMX_GUEST_VMEXIT) { 2078 vmx_exit_handle_nmi(vmx, vcpu, vmexit); 2079 enable_intr(); 2080 handled = vmx_exit_process(vmx, vcpu, vmexit); 2081 } else { 2082 enable_intr(); 2083 handled = vmx_exit_inst_error(vmxctx, rc, vmexit); 2084 } 2085 launched = 1; 2086 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled); 2087 } while (handled); 2088 2089 /* 2090 * If a VM exit has been handled then the exitcode must be BOGUS 2091 * If a VM exit is not handled then the exitcode must not be BOGUS 2092 */ 2093 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 2094 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 2095 panic("Mismatch between handled (%d) and exitcode (%d)", 2096 handled, vmexit->exitcode); 2097 } 2098 2099 if (!handled) 2100 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1); 2101 2102 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d", 2103 vmexit->exitcode); 2104 2105 VMCLEAR(vmcs); 2106 return (0); 2107} 2108 2109static void 2110vmx_vmcleanup(void *arg) 2111{ 2112 int i, error; 2113 struct vmx *vmx = arg; 2114 2115 if (virtual_interrupt_delivery) 2116 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2117 2118 for (i = 0; i < VM_MAXCPU; i++) 2119 vpid_free(vmx->state[i].vpid); 2120 2121 /* 2122 * XXXSMP we also need to clear the VMCS active on the other vcpus. 2123 */ 2124 error = vmclear(&vmx->vmcs[0]); 2125 if (error != 0) 2126 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error); 2127 2128 free(vmx, M_VMX); 2129 2130 return; 2131} 2132 2133static register_t * 2134vmxctx_regptr(struct vmxctx *vmxctx, int reg) 2135{ 2136 2137 switch (reg) { 2138 case VM_REG_GUEST_RAX: 2139 return (&vmxctx->guest_rax); 2140 case VM_REG_GUEST_RBX: 2141 return (&vmxctx->guest_rbx); 2142 case VM_REG_GUEST_RCX: 2143 return (&vmxctx->guest_rcx); 2144 case VM_REG_GUEST_RDX: 2145 return (&vmxctx->guest_rdx); 2146 case VM_REG_GUEST_RSI: 2147 return (&vmxctx->guest_rsi); 2148 case VM_REG_GUEST_RDI: 2149 return (&vmxctx->guest_rdi); 2150 case VM_REG_GUEST_RBP: 2151 return (&vmxctx->guest_rbp); 2152 case VM_REG_GUEST_R8: 2153 return (&vmxctx->guest_r8); 2154 case VM_REG_GUEST_R9: 2155 return (&vmxctx->guest_r9); 2156 case VM_REG_GUEST_R10: 2157 return (&vmxctx->guest_r10); 2158 case VM_REG_GUEST_R11: 2159 return (&vmxctx->guest_r11); 2160 case VM_REG_GUEST_R12: 2161 return (&vmxctx->guest_r12); 2162 case VM_REG_GUEST_R13: 2163 return (&vmxctx->guest_r13); 2164 case VM_REG_GUEST_R14: 2165 return (&vmxctx->guest_r14); 2166 case VM_REG_GUEST_R15: 2167 return (&vmxctx->guest_r15); 2168 default: 2169 break; 2170 } 2171 return (NULL); 2172} 2173 2174static int 2175vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 2176{ 2177 register_t *regp; 2178 2179 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2180 *retval = *regp; 2181 return (0); 2182 } else 2183 return (EINVAL); 2184} 2185 2186static int 2187vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 2188{ 2189 register_t *regp; 2190 2191 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 2192 *regp = val; 2193 return (0); 2194 } else 2195 return (EINVAL); 2196} 2197 2198static int 2199vmx_shadow_reg(int reg) 2200{ 2201 int shreg; 2202 2203 shreg = -1; 2204 2205 switch (reg) { 2206 case VM_REG_GUEST_CR0: 2207 shreg = VMCS_CR0_SHADOW; 2208 break; 2209 case VM_REG_GUEST_CR4: 2210 shreg = VMCS_CR4_SHADOW; 2211 break; 2212 default: 2213 break; 2214 } 2215 2216 return (shreg); 2217} 2218 2219static int 2220vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 2221{ 2222 int running, hostcpu; 2223 struct vmx *vmx = arg; 2224 2225 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2226 if (running && hostcpu != curcpu) 2227 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); 2228 2229 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) 2230 return (0); 2231 2232 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); 2233} 2234 2235static int 2236vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 2237{ 2238 int error, hostcpu, running, shadow; 2239 uint64_t ctls; 2240 struct vmx *vmx = arg; 2241 2242 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); 2243 if (running && hostcpu != curcpu) 2244 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); 2245 2246 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) 2247 return (0); 2248 2249 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); 2250 2251 if (error == 0) { 2252 /* 2253 * If the "load EFER" VM-entry control is 1 then the 2254 * value of EFER.LMA must be identical to "IA-32e mode guest" 2255 * bit in the VM-entry control. 2256 */ 2257 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 2258 (reg == VM_REG_GUEST_EFER)) { 2259 vmcs_getreg(&vmx->vmcs[vcpu], running, 2260 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 2261 if (val & EFER_LMA) 2262 ctls |= VM_ENTRY_GUEST_LMA; 2263 else 2264 ctls &= ~VM_ENTRY_GUEST_LMA; 2265 vmcs_setreg(&vmx->vmcs[vcpu], running, 2266 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 2267 } 2268 2269 shadow = vmx_shadow_reg(reg); 2270 if (shadow > 0) { 2271 /* 2272 * Store the unmodified value in the shadow 2273 */ 2274 error = vmcs_setreg(&vmx->vmcs[vcpu], running, 2275 VMCS_IDENT(shadow), val); 2276 } 2277 } 2278 2279 return (error); 2280} 2281 2282static int 2283vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2284{ 2285 struct vmx *vmx = arg; 2286 2287 return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc)); 2288} 2289 2290static int 2291vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2292{ 2293 struct vmx *vmx = arg; 2294 2295 return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc)); 2296} 2297 2298static int 2299vmx_getcap(void *arg, int vcpu, int type, int *retval) 2300{ 2301 struct vmx *vmx = arg; 2302 int vcap; 2303 int ret; 2304 2305 ret = ENOENT; 2306 2307 vcap = vmx->cap[vcpu].set; 2308 2309 switch (type) { 2310 case VM_CAP_HALT_EXIT: 2311 if (cap_halt_exit) 2312 ret = 0; 2313 break; 2314 case VM_CAP_PAUSE_EXIT: 2315 if (cap_pause_exit) 2316 ret = 0; 2317 break; 2318 case VM_CAP_MTRAP_EXIT: 2319 if (cap_monitor_trap) 2320 ret = 0; 2321 break; 2322 case VM_CAP_UNRESTRICTED_GUEST: 2323 if (cap_unrestricted_guest) 2324 ret = 0; 2325 break; 2326 case VM_CAP_ENABLE_INVPCID: 2327 if (cap_invpcid) 2328 ret = 0; 2329 break; 2330 default: 2331 break; 2332 } 2333 2334 if (ret == 0) 2335 *retval = (vcap & (1 << type)) ? 1 : 0; 2336 2337 return (ret); 2338} 2339 2340static int 2341vmx_setcap(void *arg, int vcpu, int type, int val) 2342{ 2343 struct vmx *vmx = arg; 2344 struct vmcs *vmcs = &vmx->vmcs[vcpu]; 2345 uint32_t baseval; 2346 uint32_t *pptr; 2347 int error; 2348 int flag; 2349 int reg; 2350 int retval; 2351 2352 retval = ENOENT; 2353 pptr = NULL; 2354 2355 switch (type) { 2356 case VM_CAP_HALT_EXIT: 2357 if (cap_halt_exit) { 2358 retval = 0; 2359 pptr = &vmx->cap[vcpu].proc_ctls; 2360 baseval = *pptr; 2361 flag = PROCBASED_HLT_EXITING; 2362 reg = VMCS_PRI_PROC_BASED_CTLS; 2363 } 2364 break; 2365 case VM_CAP_MTRAP_EXIT: 2366 if (cap_monitor_trap) { 2367 retval = 0; 2368 pptr = &vmx->cap[vcpu].proc_ctls; 2369 baseval = *pptr; 2370 flag = PROCBASED_MTF; 2371 reg = VMCS_PRI_PROC_BASED_CTLS; 2372 } 2373 break; 2374 case VM_CAP_PAUSE_EXIT: 2375 if (cap_pause_exit) { 2376 retval = 0; 2377 pptr = &vmx->cap[vcpu].proc_ctls; 2378 baseval = *pptr; 2379 flag = PROCBASED_PAUSE_EXITING; 2380 reg = VMCS_PRI_PROC_BASED_CTLS; 2381 } 2382 break; 2383 case VM_CAP_UNRESTRICTED_GUEST: 2384 if (cap_unrestricted_guest) { 2385 retval = 0; 2386 pptr = &vmx->cap[vcpu].proc_ctls2; 2387 baseval = *pptr; 2388 flag = PROCBASED2_UNRESTRICTED_GUEST; 2389 reg = VMCS_SEC_PROC_BASED_CTLS; 2390 } 2391 break; 2392 case VM_CAP_ENABLE_INVPCID: 2393 if (cap_invpcid) { 2394 retval = 0; 2395 pptr = &vmx->cap[vcpu].proc_ctls2; 2396 baseval = *pptr; 2397 flag = PROCBASED2_ENABLE_INVPCID; 2398 reg = VMCS_SEC_PROC_BASED_CTLS; 2399 } 2400 break; 2401 default: 2402 break; 2403 } 2404 2405 if (retval == 0) { 2406 if (val) { 2407 baseval |= flag; 2408 } else { 2409 baseval &= ~flag; 2410 } 2411 VMPTRLD(vmcs); 2412 error = vmwrite(reg, baseval); 2413 VMCLEAR(vmcs); 2414 2415 if (error) { 2416 retval = error; 2417 } else { 2418 /* 2419 * Update optional stored flags, and record 2420 * setting 2421 */ 2422 if (pptr != NULL) { 2423 *pptr = baseval; 2424 } 2425 2426 if (val) { 2427 vmx->cap[vcpu].set |= (1 << type); 2428 } else { 2429 vmx->cap[vcpu].set &= ~(1 << type); 2430 } 2431 } 2432 } 2433 2434 return (retval); 2435} 2436 2437struct vlapic_vtx { 2438 struct vlapic vlapic; 2439 struct pir_desc *pir_desc; 2440 struct vmx *vmx; 2441}; 2442 2443#define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 2444do { \ 2445 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 2446 level ? "level" : "edge", vector); \ 2447 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 2448 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 2449 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 2450 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 2451 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 2452} while (0) 2453 2454/* 2455 * vlapic->ops handlers that utilize the APICv hardware assist described in 2456 * Chapter 29 of the Intel SDM. 2457 */ 2458static int 2459vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 2460{ 2461 struct vlapic_vtx *vlapic_vtx; 2462 struct pir_desc *pir_desc; 2463 uint64_t mask; 2464 int idx, notify; 2465 2466 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2467 pir_desc = vlapic_vtx->pir_desc; 2468 2469 /* 2470 * Keep track of interrupt requests in the PIR descriptor. This is 2471 * because the virtual APIC page pointed to by the VMCS cannot be 2472 * modified if the vcpu is running. 2473 */ 2474 idx = vector / 64; 2475 mask = 1UL << (vector % 64); 2476 atomic_set_long(&pir_desc->pir[idx], mask); 2477 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1); 2478 2479 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 2480 level, "vmx_set_intr_ready"); 2481 return (notify); 2482} 2483 2484static int 2485vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 2486{ 2487 struct vlapic_vtx *vlapic_vtx; 2488 struct pir_desc *pir_desc; 2489 struct LAPIC *lapic; 2490 uint64_t pending, pirval; 2491 uint32_t ppr, vpr; 2492 int i; 2493 2494 /* 2495 * This function is only expected to be called from the 'HLT' exit 2496 * handler which does not care about the vector that is pending. 2497 */ 2498 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 2499 2500 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2501 pir_desc = vlapic_vtx->pir_desc; 2502 2503 pending = atomic_load_acq_long(&pir_desc->pending); 2504 if (!pending) 2505 return (0); /* common case */ 2506 2507 /* 2508 * If there is an interrupt pending then it will be recognized only 2509 * if its priority is greater than the processor priority. 2510 * 2511 * Special case: if the processor priority is zero then any pending 2512 * interrupt will be recognized. 2513 */ 2514 lapic = vlapic->apic_page; 2515 ppr = lapic->ppr & 0xf0; 2516 if (ppr == 0) 2517 return (1); 2518 2519 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 2520 lapic->ppr); 2521 2522 for (i = 3; i >= 0; i--) { 2523 pirval = pir_desc->pir[i]; 2524 if (pirval != 0) { 2525 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0; 2526 return (vpr > ppr); 2527 } 2528 } 2529 return (0); 2530} 2531 2532static void 2533vmx_intr_accepted(struct vlapic *vlapic, int vector) 2534{ 2535 2536 panic("vmx_intr_accepted: not expected to be called"); 2537} 2538 2539static void 2540vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 2541{ 2542 struct vlapic_vtx *vlapic_vtx; 2543 struct vmx *vmx; 2544 struct vmcs *vmcs; 2545 uint64_t mask, val; 2546 2547 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 2548 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 2549 ("vmx_set_tmr: vcpu cannot be running")); 2550 2551 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2552 vmx = vlapic_vtx->vmx; 2553 vmcs = &vmx->vmcs[vlapic->vcpuid]; 2554 mask = 1UL << (vector % 64); 2555 2556 VMPTRLD(vmcs); 2557 val = vmcs_read(VMCS_EOI_EXIT(vector)); 2558 if (level) 2559 val |= mask; 2560 else 2561 val &= ~mask; 2562 vmcs_write(VMCS_EOI_EXIT(vector), val); 2563 VMCLEAR(vmcs); 2564} 2565 2566static void 2567vmx_post_intr(struct vlapic *vlapic, int hostcpu) 2568{ 2569 2570 ipi_cpu(hostcpu, pirvec); 2571} 2572 2573/* 2574 * Transfer the pending interrupts in the PIR descriptor to the IRR 2575 * in the virtual APIC page. 2576 */ 2577static void 2578vmx_inject_pir(struct vlapic *vlapic) 2579{ 2580 struct vlapic_vtx *vlapic_vtx; 2581 struct pir_desc *pir_desc; 2582 struct LAPIC *lapic; 2583 uint64_t val, pirval; 2584 int rvi, pirbase; 2585 uint16_t intr_status_old, intr_status_new; 2586 2587 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2588 pir_desc = vlapic_vtx->pir_desc; 2589 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 2590 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2591 "no posted interrupt pending"); 2592 return; 2593 } 2594 2595 pirval = 0; 2596 lapic = vlapic->apic_page; 2597 2598 val = atomic_readandclear_long(&pir_desc->pir[0]); 2599 if (val != 0) { 2600 lapic->irr0 |= val; 2601 lapic->irr1 |= val >> 32; 2602 pirbase = 0; 2603 pirval = val; 2604 } 2605 2606 val = atomic_readandclear_long(&pir_desc->pir[1]); 2607 if (val != 0) { 2608 lapic->irr2 |= val; 2609 lapic->irr3 |= val >> 32; 2610 pirbase = 64; 2611 pirval = val; 2612 } 2613 2614 val = atomic_readandclear_long(&pir_desc->pir[2]); 2615 if (val != 0) { 2616 lapic->irr4 |= val; 2617 lapic->irr5 |= val >> 32; 2618 pirbase = 128; 2619 pirval = val; 2620 } 2621 2622 val = atomic_readandclear_long(&pir_desc->pir[3]); 2623 if (val != 0) { 2624 lapic->irr6 |= val; 2625 lapic->irr7 |= val >> 32; 2626 pirbase = 192; 2627 pirval = val; 2628 } 2629 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 2630 2631 /* 2632 * Update RVI so the processor can evaluate pending virtual 2633 * interrupts on VM-entry. 2634 */ 2635 if (pirval != 0) { 2636 rvi = pirbase + flsl(pirval) - 1; 2637 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 2638 intr_status_new = (intr_status_old & 0xFF00) | rvi; 2639 if (intr_status_new > intr_status_old) { 2640 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 2641 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 2642 "guest_intr_status changed from 0x%04x to 0x%04x", 2643 intr_status_old, intr_status_new); 2644 } 2645 } 2646} 2647 2648static struct vlapic * 2649vmx_vlapic_init(void *arg, int vcpuid) 2650{ 2651 struct vmx *vmx; 2652 struct vlapic *vlapic; 2653 struct vlapic_vtx *vlapic_vtx; 2654 2655 vmx = arg; 2656 2657 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 2658 vlapic->vm = vmx->vm; 2659 vlapic->vcpuid = vcpuid; 2660 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 2661 2662 vlapic_vtx = (struct vlapic_vtx *)vlapic; 2663 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 2664 vlapic_vtx->vmx = vmx; 2665 2666 if (virtual_interrupt_delivery) { 2667 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 2668 vlapic->ops.pending_intr = vmx_pending_intr; 2669 vlapic->ops.intr_accepted = vmx_intr_accepted; 2670 vlapic->ops.set_tmr = vmx_set_tmr; 2671 } 2672 2673 if (posted_interrupts) 2674 vlapic->ops.post_intr = vmx_post_intr; 2675 2676 vlapic_init(vlapic); 2677 2678 return (vlapic); 2679} 2680 2681static void 2682vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2683{ 2684 2685 vlapic_cleanup(vlapic); 2686 free(vlapic, M_VLAPIC); 2687} 2688 2689struct vmm_ops vmm_ops_intel = { 2690 vmx_init, 2691 vmx_cleanup, 2692 vmx_restore, 2693 vmx_vminit, 2694 vmx_run, 2695 vmx_vmcleanup, 2696 vmx_getreg, 2697 vmx_setreg, 2698 vmx_getdesc, 2699 vmx_setdesc, 2700 vmx_getcap, 2701 vmx_setcap, 2702 ept_vmspace_alloc, 2703 ept_vmspace_free, 2704 vmx_vlapic_init, 2705 vmx_vlapic_cleanup, 2706}; 2707