svm.c revision 271348
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271348 2014-09-10 03:13:40Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37 38#include <vm/vm.h> 39#include <vm/pmap.h> 40 41#include <machine/cpufunc.h> 42#include <machine/psl.h> 43#include <machine/pmap.h> 44#include <machine/md_var.h> 45#include <machine/vmparam.h> 46#include <machine/specialreg.h> 47#include <machine/segments.h> 48#include <machine/vmm.h> 49#include <machine/vmm_dev.h> 50#include <machine/vmm_instruction_emul.h> 51 52#include <x86/apicreg.h> 53 54#include "vmm_lapic.h" 55#include "vmm_msr.h" 56#include "vmm_stat.h" 57#include "vmm_ktr.h" 58#include "vmm_ioport.h" 59#include "vatpic.h" 60#include "vlapic.h" 61#include "vlapic_priv.h" 62 63#include "x86.h" 64#include "vmcb.h" 65#include "svm.h" 66#include "svm_softc.h" 67#include "npt.h" 68 69/* 70 * SVM CPUID function 0x8000_000A, edx bit decoding. 71 */ 72#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 73#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 74#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 75#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 76#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 77#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 78#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 79#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 80#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 81#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 82 83#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 84 VMCB_CACHE_IOPM | \ 85 VMCB_CACHE_I | \ 86 VMCB_CACHE_TPR | \ 87 VMCB_CACHE_NP) 88 89MALLOC_DEFINE(M_SVM, "svm", "svm"); 90MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 91 92/* Per-CPU context area. */ 93extern struct pcpu __pcpu[]; 94 95static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 96 97static uint32_t svm_feature; /* AMD SVM features. */ 98 99/* Maximum ASIDs supported by the processor */ 100static uint32_t nasid; 101 102/* Current ASID generation for each host cpu */ 103static struct asid asid[MAXCPU]; 104 105/* 106 * SVM host state saved area of size 4KB for each core. 107 */ 108static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 109 110/* 111 * S/w saved host context. 112 */ 113static struct svm_regctx host_ctx[MAXCPU]; 114 115static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid VMCB EXITINTINFO"); 116static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "VMM pending exception injected"); 117 118/* 119 * Common function to enable or disabled SVM for a CPU. 120 */ 121static int 122cpu_svm_enable_disable(boolean_t enable) 123{ 124 uint64_t efer_msr; 125 126 efer_msr = rdmsr(MSR_EFER); 127 128 if (enable) 129 efer_msr |= EFER_SVM; 130 else 131 efer_msr &= ~EFER_SVM; 132 133 wrmsr(MSR_EFER, efer_msr); 134 135 return(0); 136} 137 138/* 139 * Disable SVM on a CPU. 140 */ 141static void 142svm_disable(void *arg __unused) 143{ 144 145 (void)cpu_svm_enable_disable(FALSE); 146} 147 148/* 149 * Disable SVM for all CPUs. 150 */ 151static int 152svm_cleanup(void) 153{ 154 155 smp_rendezvous(NULL, svm_disable, NULL, NULL); 156 return (0); 157} 158 159/* 160 * Check for required BHyVe SVM features in a CPU. 161 */ 162static int 163svm_cpuid_features(void) 164{ 165 u_int regs[4]; 166 167 /* CPUID Fn8000_000A is for SVM */ 168 do_cpuid(0x8000000A, regs); 169 svm_feature = regs[3]; 170 171 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 172 nasid = regs[1]; 173 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 174 175 printf("SVM Features:0x%b\n", svm_feature, 176 "\020" 177 "\001NP" /* Nested paging */ 178 "\002LbrVirt" /* LBR virtualization */ 179 "\003SVML" /* SVM lock */ 180 "\004NRIPS" /* NRIP save */ 181 "\005TscRateMsr" /* MSR based TSC rate control */ 182 "\006VmcbClean" /* VMCB clean bits */ 183 "\007FlushByAsid" /* Flush by ASID */ 184 "\010DecodeAssist" /* Decode assist */ 185 "\011<b20>" 186 "\012<b20>" 187 "\013PauseFilter" 188 "\014<b20>" 189 "\015PauseFilterThreshold" 190 "\016AVIC" 191 ); 192 193 /* SVM Lock */ 194 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 195 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 196 return (ENXIO); 197 } 198 199 /* 200 * bhyve need RVI to work. 201 */ 202 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 203 printf("Missing Nested paging or RVI SVM support in processor.\n"); 204 return (EIO); 205 } 206 207 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 208 return (0); 209 210 return (EIO); 211} 212 213static __inline int 214flush_by_asid(void) 215{ 216 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 217} 218 219/* 220 * Enable SVM for a CPU. 221 */ 222static void 223svm_enable(void *arg __unused) 224{ 225 uint64_t hsave_pa; 226 227 (void)cpu_svm_enable_disable(TRUE); 228 229 hsave_pa = vtophys(hsave[curcpu]); 230 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 231 232 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 233 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 234 } 235} 236 237/* 238 * Check if a processor support SVM. 239 */ 240static int 241is_svm_enabled(void) 242{ 243 uint64_t msr; 244 245 /* Section 15.4 Enabling SVM from APM2. */ 246 if ((amd_feature2 & AMDID2_SVM) == 0) { 247 printf("SVM is not supported on this processor.\n"); 248 return (ENXIO); 249 } 250 251 msr = rdmsr(MSR_VM_CR); 252 /* Make sure SVM is not disabled by BIOS. */ 253 if ((msr & VM_CR_SVMDIS) == 0) { 254 return svm_cpuid_features(); 255 } 256 257 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 258 return (ENXIO); 259} 260 261/* 262 * Enable SVM on CPU and initialize nested page table h/w. 263 */ 264static int 265svm_init(int ipinum) 266{ 267 int err, cpu; 268 269 err = is_svm_enabled(); 270 if (err) 271 return (err); 272 273 for (cpu = 0; cpu < MAXCPU; cpu++) { 274 /* 275 * Initialize the host ASIDs to their "highest" valid values. 276 * 277 * The next ASID allocation will rollover both 'gen' and 'num' 278 * and start off the sequence at {1,1}. 279 */ 280 asid[cpu].gen = ~0UL; 281 asid[cpu].num = nasid - 1; 282 } 283 284 svm_npt_init(ipinum); 285 286 /* Start SVM on all CPUs */ 287 smp_rendezvous(NULL, svm_enable, NULL, NULL); 288 289 return (0); 290} 291 292static void 293svm_restore(void) 294{ 295 svm_enable(NULL); 296} 297 298/* 299 * Get index and bit position for a MSR in MSR permission 300 * bitmap. Two bits are used for each MSR, lower bit is 301 * for read and higher bit is for write. 302 */ 303static int 304svm_msr_index(uint64_t msr, int *index, int *bit) 305{ 306 uint32_t base, off; 307 308/* Pentium compatible MSRs */ 309#define MSR_PENTIUM_START 0 310#define MSR_PENTIUM_END 0x1FFF 311/* AMD 6th generation and Intel compatible MSRs */ 312#define MSR_AMD6TH_START 0xC0000000UL 313#define MSR_AMD6TH_END 0xC0001FFFUL 314/* AMD 7th and 8th generation compatible MSRs */ 315#define MSR_AMD7TH_START 0xC0010000UL 316#define MSR_AMD7TH_END 0xC0011FFFUL 317 318 *index = -1; 319 *bit = (msr % 4) * 2; 320 base = 0; 321 322 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 323 *index = msr / 4; 324 return (0); 325 } 326 327 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 328 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 329 off = (msr - MSR_AMD6TH_START); 330 *index = (off + base) / 4; 331 return (0); 332 } 333 334 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 335 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 336 off = (msr - MSR_AMD7TH_START); 337 *index = (off + base) / 4; 338 return (0); 339 } 340 341 return (EIO); 342} 343 344/* 345 * Give virtual cpu the complete access to MSR(read & write). 346 */ 347static int 348svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 349{ 350 int index, bit, err; 351 352 err = svm_msr_index(msr, &index, &bit); 353 if (err) { 354 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 355 return (err); 356 } 357 358 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 359 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 360 return (EINVAL); 361 } 362 if (bit < 0 || bit > 8) { 363 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 364 return (EINVAL); 365 } 366 367 /* Disable intercept for read and write. */ 368 if (read) 369 perm_bitmap[index] &= ~(1UL << bit); 370 if (write) 371 perm_bitmap[index] &= ~(2UL << bit); 372 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 373 (perm_bitmap[index] >> bit) & 0x3, msr); 374 375 return (0); 376} 377 378static int 379svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 380{ 381 return svm_msr_perm(perm_bitmap, msr, true, true); 382} 383 384static int 385svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 386{ 387 return svm_msr_perm(perm_bitmap, msr, true, false); 388} 389 390static __inline void 391vcpu_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits) 392{ 393 struct svm_vcpu *vcpustate; 394 395 vcpustate = svm_get_vcpu(sc, vcpu); 396 397 vcpustate->dirty |= dirtybits; 398} 399 400static __inline int 401svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 402{ 403 struct vmcb_ctrl *ctrl; 404 405 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 406 407 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 408 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 409} 410 411static __inline void 412svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 413 int enabled) 414{ 415 struct vmcb_ctrl *ctrl; 416 uint32_t oldval; 417 418 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 419 420 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 421 oldval = ctrl->intercept[idx]; 422 423 if (enabled) 424 ctrl->intercept[idx] |= bitmask; 425 else 426 ctrl->intercept[idx] &= ~bitmask; 427 428 if (ctrl->intercept[idx] != oldval) { 429 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_I); 430 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 431 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 432 } 433} 434 435static __inline void 436svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 437{ 438 svm_set_intercept(sc, vcpu, off, bitmask, 0); 439} 440 441static __inline void 442svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 443{ 444 svm_set_intercept(sc, vcpu, off, bitmask, 1); 445} 446 447static void 448vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 449 uint64_t msrpm_base_pa, uint64_t np_pml4) 450{ 451 struct vmcb_ctrl *ctrl; 452 struct vmcb_state *state; 453 uint32_t mask; 454 int n; 455 456 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 457 state = svm_get_vmcb_state(sc, vcpu); 458 459 ctrl->iopm_base_pa = iopm_base_pa; 460 ctrl->msrpm_base_pa = msrpm_base_pa; 461 462 /* Enable nested paging */ 463 ctrl->np_enable = 1; 464 ctrl->n_cr3 = np_pml4; 465 466 /* 467 * Intercept accesses to the control registers that are not shadowed 468 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 469 */ 470 for (n = 0; n < 16; n++) { 471 mask = (BIT(n) << 16) | BIT(n); 472 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 473 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 474 else 475 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 476 } 477 478 /* Intercept Machine Check exceptions. */ 479 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 480 481 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 482 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 483 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 484 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT); 485 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 486 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 487 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 488 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 489 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 490 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 491 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 492 VMCB_INTCPT_FERR_FREEZE); 493 494 /* 495 * From section "Canonicalization and Consistency Checks" in APMv2 496 * the VMRUN intercept bit must be set to pass the consistency check. 497 */ 498 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 499 500 /* 501 * The ASID will be set to a non-zero value just before VMRUN. 502 */ 503 ctrl->asid = 0; 504 505 /* 506 * Section 15.21.1, Interrupt Masking in EFLAGS 507 * Section 15.21.2, Virtualizing APIC.TPR 508 * 509 * This must be set for %rflag and %cr8 isolation of guest and host. 510 */ 511 ctrl->v_intr_masking = 1; 512 513 /* Enable Last Branch Record aka LBR for debugging */ 514 ctrl->lbr_virt_en = 1; 515 state->dbgctl = BIT(0); 516 517 /* EFER_SVM must always be set when the guest is executing */ 518 state->efer = EFER_SVM; 519 520 /* Set up the PAT to power-on state */ 521 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 522 PAT_VALUE(1, PAT_WRITE_THROUGH) | 523 PAT_VALUE(2, PAT_UNCACHED) | 524 PAT_VALUE(3, PAT_UNCACHEABLE) | 525 PAT_VALUE(4, PAT_WRITE_BACK) | 526 PAT_VALUE(5, PAT_WRITE_THROUGH) | 527 PAT_VALUE(6, PAT_UNCACHED) | 528 PAT_VALUE(7, PAT_UNCACHEABLE); 529} 530 531/* 532 * Initialise a virtual machine. 533 */ 534static void * 535svm_vminit(struct vm *vm, pmap_t pmap) 536{ 537 struct svm_softc *svm_sc; 538 struct svm_vcpu *vcpu; 539 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 540 int i; 541 542 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 543 M_SVM, M_WAITOK | M_ZERO); 544 545 svm_sc->vm = vm; 546 svm_sc->svm_feature = svm_feature; 547 svm_sc->vcpu_cnt = VM_MAXCPU; 548 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 549 550 /* 551 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 552 */ 553 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 554 555 /* 556 * Following MSR can be completely controlled by virtual machines 557 * since access to following are translated to access to VMCB. 558 */ 559 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 560 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 561 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 562 563 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 564 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 565 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 566 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 567 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 568 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 569 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 570 571 /* For Nested Paging/RVI only. */ 572 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 573 574 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 575 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 576 577 /* Intercept access to all I/O ports. */ 578 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 579 580 /* Cache physical address for multiple vcpus. */ 581 iopm_pa = vtophys(svm_sc->iopm_bitmap); 582 msrpm_pa = vtophys(svm_sc->msr_bitmap); 583 pml4_pa = svm_sc->nptp; 584 585 for (i = 0; i < svm_sc->vcpu_cnt; i++) { 586 vcpu = svm_get_vcpu(svm_sc, i); 587 vcpu->lastcpu = NOCPU; 588 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 589 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 590 } 591 return (svm_sc); 592} 593 594static int 595svm_cpl(struct vmcb_state *state) 596{ 597 598 /* 599 * From APMv2: 600 * "Retrieve the CPL from the CPL field in the VMCB, not 601 * from any segment DPL" 602 */ 603 return (state->cpl); 604} 605 606static enum vm_cpu_mode 607svm_vcpu_mode(struct vmcb *vmcb) 608{ 609 struct vmcb_segment *seg; 610 struct vmcb_state *state; 611 612 state = &vmcb->state; 613 614 if (state->efer & EFER_LMA) { 615 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 616 /* 617 * Section 4.8.1 for APM2, check if Code Segment has 618 * Long attribute set in descriptor. 619 */ 620 if (seg->attrib & VMCB_CS_ATTRIB_L) 621 return (CPU_MODE_64BIT); 622 else 623 return (CPU_MODE_COMPATIBILITY); 624 } else if (state->cr0 & CR0_PE) { 625 return (CPU_MODE_PROTECTED); 626 } else { 627 return (CPU_MODE_REAL); 628 } 629} 630 631static enum vm_paging_mode 632svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 633{ 634 635 if ((cr0 & CR0_PG) == 0) 636 return (PAGING_MODE_FLAT); 637 if ((cr4 & CR4_PAE) == 0) 638 return (PAGING_MODE_32); 639 if (efer & EFER_LME) 640 return (PAGING_MODE_64); 641 else 642 return (PAGING_MODE_PAE); 643} 644 645/* 646 * ins/outs utility routines 647 */ 648static uint64_t 649svm_inout_str_index(struct svm_regctx *regs, int in) 650{ 651 uint64_t val; 652 653 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 654 655 return (val); 656} 657 658static uint64_t 659svm_inout_str_count(struct svm_regctx *regs, int rep) 660{ 661 uint64_t val; 662 663 val = rep ? regs->sctx_rcx : 1; 664 665 return (val); 666} 667 668static void 669svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 670 int in, struct vm_inout_str *vis) 671{ 672 int error, s; 673 674 if (in) { 675 vis->seg_name = VM_REG_GUEST_ES; 676 } else { 677 /* The segment field has standard encoding */ 678 s = (info1 >> 10) & 0x7; 679 vis->seg_name = vm_segment_name(s); 680 } 681 682 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 683 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 684} 685 686static int 687svm_inout_str_addrsize(uint64_t info1) 688{ 689 uint32_t size; 690 691 size = (info1 >> 7) & 0x7; 692 switch (size) { 693 case 1: 694 return (2); /* 16 bit */ 695 case 2: 696 return (4); /* 32 bit */ 697 case 4: 698 return (8); /* 64 bit */ 699 default: 700 panic("%s: invalid size encoding %d", __func__, size); 701 } 702} 703 704static void 705svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 706{ 707 struct vmcb_state *state; 708 709 state = &vmcb->state; 710 paging->cr3 = state->cr3; 711 paging->cpl = svm_cpl(state); 712 paging->cpu_mode = svm_vcpu_mode(vmcb); 713 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 714 state->efer); 715} 716 717 718/* 719 * Handle guest I/O intercept. 720 */ 721static bool 722svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 723{ 724 struct vmcb_ctrl *ctrl; 725 struct vmcb_state *state; 726 struct svm_regctx *regs; 727 struct vm_inout_str *vis; 728 uint64_t info1; 729 730 state = svm_get_vmcb_state(svm_sc, vcpu); 731 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 732 regs = svm_get_guest_regctx(svm_sc, vcpu); 733 info1 = ctrl->exitinfo1; 734 735 vmexit->exitcode = VM_EXITCODE_INOUT; 736 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 737 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0; 738 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 739 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 740 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 741 vmexit->u.inout.eax = (uint32_t)(state->rax); 742 743 if (vmexit->u.inout.string) { 744 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 745 vis = &vmexit->u.inout_str; 746 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 747 vis->rflags = state->rflags; 748 vis->cr0 = state->cr0; 749 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 750 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 751 vis->addrsize = svm_inout_str_addrsize(info1); 752 svm_inout_str_seginfo(svm_sc, vcpu, info1, 753 vmexit->u.inout.in, vis); 754 } 755 756 return (false); 757} 758 759static int 760svm_npf_paging(uint64_t exitinfo1) 761{ 762 763 if (exitinfo1 & VMCB_NPF_INFO1_W) 764 return (VM_PROT_WRITE); 765 766 return (VM_PROT_READ); 767} 768 769static bool 770svm_npf_emul_fault(uint64_t exitinfo1) 771{ 772 773 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 774 return (false); 775 } 776 777 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 778 return (false); 779 } 780 781 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 782 return (false); 783 } 784 785 return (true); 786} 787 788static void 789svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 790{ 791 struct vm_guest_paging *paging; 792 struct vmcb_segment *seg; 793 794 paging = &vmexit->u.inst_emul.paging; 795 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 796 vmexit->u.inst_emul.gpa = gpa; 797 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 798 svm_paging_info(vmcb, paging); 799 800 /* 801 * If DecodeAssist SVM feature doesn't exist, we don't have NPF 802 * instuction length. RIP will be calculated based on the length 803 * determined by instruction emulation. 804 */ 805 vmexit->inst_length = VIE_INST_SIZE; 806 807 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 808 switch(paging->cpu_mode) { 809 case CPU_MODE_PROTECTED: 810 case CPU_MODE_COMPATIBILITY: 811 /* 812 * Section 4.8.1 of APM2, Default Operand Size or D bit. 813 */ 814 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 815 1 : 0; 816 break; 817 default: 818 vmexit->u.inst_emul.cs_d = 0; 819 break; 820 } 821} 822 823/* 824 * Intercept access to MSR_EFER to prevent the guest from clearing the 825 * SVM enable bit. 826 */ 827static void 828svm_write_efer(struct svm_softc *sc, int vcpu, uint32_t edx, uint32_t eax) 829{ 830 struct vmcb_state *state; 831 uint64_t oldval; 832 833 state = svm_get_vmcb_state(sc, vcpu); 834 835 oldval = state->efer; 836 state->efer = (uint64_t)edx << 32 | eax | EFER_SVM; 837 if (state->efer != oldval) { 838 VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx", 839 oldval, state->efer); 840 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR); 841 } 842} 843 844#ifdef KTR 845static const char * 846intrtype_to_str(int intr_type) 847{ 848 switch (intr_type) { 849 case VMCB_EVENTINJ_TYPE_INTR: 850 return ("hwintr"); 851 case VMCB_EVENTINJ_TYPE_NMI: 852 return ("nmi"); 853 case VMCB_EVENTINJ_TYPE_INTn: 854 return ("swintr"); 855 case VMCB_EVENTINJ_TYPE_EXCEPTION: 856 return ("exception"); 857 default: 858 panic("%s: unknown intr_type %d", __func__, intr_type); 859 } 860} 861#endif 862 863/* 864 * Inject an event to vcpu as described in section 15.20, "Event injection". 865 */ 866static void 867svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 868 uint32_t error, bool ec_valid) 869{ 870 struct vmcb_ctrl *ctrl; 871 872 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 873 874 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 875 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 876 877 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 878 __func__, vector)); 879 880 switch (intr_type) { 881 case VMCB_EVENTINJ_TYPE_INTR: 882 case VMCB_EVENTINJ_TYPE_NMI: 883 case VMCB_EVENTINJ_TYPE_INTn: 884 break; 885 case VMCB_EVENTINJ_TYPE_EXCEPTION: 886 if (vector >= 0 && vector <= 31 && vector != 2) 887 break; 888 /* FALLTHROUGH */ 889 default: 890 panic("%s: invalid intr_type/vector: %d/%d", __func__, 891 intr_type, vector); 892 } 893 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 894 if (ec_valid) { 895 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 896 ctrl->eventinj |= (uint64_t)error << 32; 897 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 898 intrtype_to_str(intr_type), vector, error); 899 } else { 900 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 901 intrtype_to_str(intr_type), vector); 902 } 903} 904 905static void 906svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 907{ 908 struct vmcb_ctrl *ctrl; 909 uint64_t intinfo; 910 911 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 912 intinfo = ctrl->exitintinfo; 913 if (!VMCB_EXITINTINFO_VALID(intinfo)) 914 return; 915 916 /* 917 * From APMv2, Section "Intercepts during IDT interrupt delivery" 918 * 919 * If a #VMEXIT happened during event delivery then record the event 920 * that was being delivered. 921 */ 922 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 923 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 924 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 925 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 926} 927 928#ifdef KTR 929static const char * 930exit_reason_to_str(uint64_t reason) 931{ 932 static char reasonbuf[32]; 933 934 switch (reason) { 935 case VMCB_EXIT_INVALID: 936 return ("invalvmcb"); 937 case VMCB_EXIT_SHUTDOWN: 938 return ("shutdown"); 939 case VMCB_EXIT_NPF: 940 return ("nptfault"); 941 case VMCB_EXIT_PAUSE: 942 return ("pause"); 943 case VMCB_EXIT_HLT: 944 return ("hlt"); 945 case VMCB_EXIT_CPUID: 946 return ("cpuid"); 947 case VMCB_EXIT_IO: 948 return ("inout"); 949 case VMCB_EXIT_MC: 950 return ("mchk"); 951 case VMCB_EXIT_INTR: 952 return ("extintr"); 953 case VMCB_EXIT_VINTR: 954 return ("vintr"); 955 case VMCB_EXIT_MSR: 956 return ("msr"); 957 default: 958 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 959 return (reasonbuf); 960 } 961} 962#endif /* KTR */ 963 964/* 965 * Determine the cause of virtual cpu exit and handle VMEXIT. 966 * Return: false - Break vcpu execution loop and handle vmexit 967 * in kernel or user space. 968 * true - Continue vcpu run. 969 */ 970static bool 971svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 972{ 973 struct vmcb_state *state; 974 struct vmcb_ctrl *ctrl; 975 struct svm_regctx *ctx; 976 uint64_t code, info1, info2, val; 977 uint32_t eax, ecx, edx; 978 bool update_rip, loop, retu; 979 980 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 981 982 state = svm_get_vmcb_state(svm_sc, vcpu); 983 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 984 ctx = svm_get_guest_regctx(svm_sc, vcpu); 985 code = ctrl->exitcode; 986 info1 = ctrl->exitinfo1; 987 info2 = ctrl->exitinfo2; 988 989 update_rip = true; 990 loop = true; 991 vmexit->exitcode = VM_EXITCODE_VMX; 992 vmexit->u.vmx.status = 0; 993 994 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 995 996 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 997 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 998 999 svm_save_intinfo(svm_sc, vcpu); 1000 1001 switch (code) { 1002 case VMCB_EXIT_MC: /* Machine Check. */ 1003 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1); 1004 vmexit->exitcode = VM_EXITCODE_MTRAP; 1005 loop = false; 1006 break; 1007 1008 case VMCB_EXIT_MSR: /* MSR access. */ 1009 eax = state->rax; 1010 ecx = ctx->sctx_rcx; 1011 edx = ctx->e.g.sctx_rdx; 1012 1013 if (ecx == MSR_EFER) { 1014 KASSERT(info1 != 0, ("rdmsr(MSR_EFER) is not " 1015 "emulated: info1(%#lx) info2(%#lx)", 1016 info1, info2)); 1017 svm_write_efer(svm_sc, vcpu, edx, eax); 1018 break; 1019 } 1020 1021 retu = false; 1022 if (info1) { 1023 /* VM exited because of write MSR */ 1024 vmm_stat_incr(svm_sc->vm, vcpu, 1025 VMEXIT_WRMSR, 1); 1026 vmexit->exitcode = VM_EXITCODE_WRMSR; 1027 vmexit->u.msr.code = ecx; 1028 val = (uint64_t)edx << 32 | eax; 1029 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, 1030 &retu)) { 1031 vmexit->u.msr.wval = val; 1032 loop = false; 1033 } else 1034 loop = retu ? false : true; 1035 1036 VCPU_CTR3(svm_sc->vm, vcpu, 1037 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x", 1038 loop ? "kernel" : "user", val, ecx); 1039 } else { 1040 vmm_stat_incr(svm_sc->vm, vcpu, 1041 VMEXIT_RDMSR, 1); 1042 vmexit->exitcode = VM_EXITCODE_RDMSR; 1043 vmexit->u.msr.code = ecx; 1044 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, 1045 &retu)) { 1046 loop = false; 1047 } else 1048 loop = retu ? false : true; 1049 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR" 1050 " MSB=0x%08x, LSB=%08x @0x%x", 1051 ctx->e.g.sctx_rdx, state->rax, ecx); 1052 } 1053 1054#define MSR_AMDK8_IPM 0xc0010055 1055 /* 1056 * We can't hide AMD C1E idle capability since its 1057 * based on CPU generation, for now ignore access to 1058 * this MSR by vcpus 1059 * XXX: special handling of AMD C1E - Ignore. 1060 */ 1061 if (ecx == MSR_AMDK8_IPM) 1062 loop = true; 1063 break; 1064 1065 case VMCB_EXIT_INTR: 1066 /* 1067 * Exit on External Interrupt. 1068 * Give host interrupt handler to run and if its guest 1069 * interrupt, local APIC will inject event in guest. 1070 */ 1071 update_rip = false; 1072 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1073 break; 1074 1075 case VMCB_EXIT_IO: 1076 loop = svm_handle_io(svm_sc, vcpu, vmexit); 1077 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1078 break; 1079 1080 case VMCB_EXIT_CPUID: 1081 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1082 (void)x86_emulate_cpuid(svm_sc->vm, vcpu, 1083 (uint32_t *)&state->rax, 1084 (uint32_t *)&ctx->sctx_rbx, 1085 (uint32_t *)&ctx->sctx_rcx, 1086 (uint32_t *)&ctx->e.g.sctx_rdx); 1087 break; 1088 1089 case VMCB_EXIT_HLT: 1090 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1091 vmexit->exitcode = VM_EXITCODE_HLT; 1092 vmexit->u.hlt.rflags = state->rflags; 1093 loop = false; 1094 break; 1095 1096 case VMCB_EXIT_PAUSE: 1097 vmexit->exitcode = VM_EXITCODE_PAUSE; 1098 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1099 1100 break; 1101 1102 case VMCB_EXIT_NPF: 1103 loop = false; 1104 update_rip = false; 1105 1106 if (info1 & VMCB_NPF_INFO1_RSV) { 1107 VCPU_CTR2(svm_sc->vm, vcpu, "SVM_ERR:NPT" 1108 " reserved bit is set," 1109 "INFO1:0x%lx INFO2:0x%lx .\n", 1110 info1, info2); 1111 break; 1112 } 1113 1114 /* EXITINFO2 has the physical fault address (GPA). */ 1115 if(vm_mem_allocated(svm_sc->vm, info2)) { 1116 vmexit->exitcode = VM_EXITCODE_PAGING; 1117 vmexit->u.paging.gpa = info2; 1118 vmexit->u.paging.fault_type = 1119 svm_npf_paging(info1); 1120 vmm_stat_incr(svm_sc->vm, vcpu, 1121 VMEXIT_NESTED_FAULT, 1); 1122 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1123 "on gpa %#lx/%#lx at rip %#lx", 1124 info2, info1, state->rip); 1125 } else if (svm_npf_emul_fault(info1)) { 1126 svm_handle_inst_emul(svm_get_vmcb(svm_sc, vcpu), 1127 info2, vmexit); 1128 vmm_stat_incr(svm_sc->vm, vcpu, 1129 VMEXIT_INST_EMUL, 1); 1130 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1131 "for gpa %#lx/%#lx at rip %#lx", 1132 info2, info1, state->rip); 1133 } 1134 break; 1135 1136 case VMCB_EXIT_SHUTDOWN: 1137 loop = false; 1138 break; 1139 1140 case VMCB_EXIT_INVALID: 1141 loop = false; 1142 break; 1143 1144 default: 1145 /* Return to user space. */ 1146 loop = false; 1147 update_rip = false; 1148 VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" 1149 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n", 1150 ctrl->exitcode, info1, info2); 1151 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" 1152 " Inst decoder len:%d\n", state->rip, 1153 ctrl->nrip, ctrl->inst_decode_size); 1154 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1155 break; 1156 } 1157 1158 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx nrip %#lx", 1159 loop ? "handled" : "unhandled", exit_reason_to_str(code), 1160 state->rip, update_rip ? ctrl->nrip : state->rip); 1161 1162 vmexit->rip = state->rip; 1163 if (update_rip) { 1164 if (ctrl->nrip == 0) { 1165 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set " 1166 "for RIP0x%lx.\n", state->rip); 1167 vmexit->exitcode = VM_EXITCODE_VMX; 1168 } else 1169 vmexit->rip = ctrl->nrip; 1170 } 1171 1172 /* If vcpu execution is continued, update RIP. */ 1173 if (loop) { 1174 state->rip = vmexit->rip; 1175 } 1176 1177 return (loop); 1178} 1179 1180/* 1181 * Inject NMI to virtual cpu. 1182 */ 1183static int 1184svm_inject_nmi(struct svm_softc *svm_sc, int vcpu) 1185{ 1186 struct vmcb_ctrl *ctrl; 1187 1188 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1189 1190 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1191 /* Can't inject another NMI if last one is pending.*/ 1192 if (!vm_nmi_pending(svm_sc->vm, vcpu)) 1193 return (0); 1194 1195 /* Inject NMI, vector number is not used.*/ 1196 svm_eventinject(svm_sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, IDT_NMI, 0, 1197 false); 1198 1199 /* Acknowledge the request is accepted.*/ 1200 vm_nmi_clear(svm_sc->vm, vcpu); 1201 1202 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n"); 1203 1204 return (1); 1205} 1206 1207static void 1208svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1209{ 1210 uint64_t intinfo; 1211 1212 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1213 return; 1214 1215 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1216 "valid: %#lx", __func__, intinfo)); 1217 1218 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1219 VMCB_EXITINTINFO_VECTOR(intinfo), 1220 VMCB_EXITINTINFO_EC(intinfo), 1221 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1222 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1223 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1224} 1225 1226/* 1227 * Inject event to virtual cpu. 1228 */ 1229static void 1230svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu, struct vlapic *vlapic) 1231{ 1232 struct vmcb_ctrl *ctrl; 1233 struct vmcb_state *state; 1234 int extint_pending; 1235 int vector; 1236 1237 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1238 1239 state = svm_get_vmcb_state(svm_sc, vcpu); 1240 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1241 1242 svm_inj_intinfo(svm_sc, vcpu); 1243 1244 /* Can't inject multiple events at once. */ 1245 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1246 VCPU_CTR1(svm_sc->vm, vcpu, 1247 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj); 1248 return ; 1249 } 1250 1251 /* Wait for guest to come out of interrupt shadow. */ 1252 if (ctrl->intr_shadow) { 1253 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n"); 1254 return; 1255 } 1256 1257 /* NMI event has priority over interrupts.*/ 1258 if (svm_inject_nmi(svm_sc, vcpu)) { 1259 return; 1260 } 1261 1262 extint_pending = vm_extint_pending(svm_sc->vm, vcpu); 1263 1264 if (!extint_pending) { 1265 /* Ask the local apic for a vector to inject */ 1266 if (!vlapic_pending_intr(vlapic, &vector)) 1267 return; 1268 } else { 1269 /* Ask the legacy pic for a vector to inject */ 1270 vatpic_pending_intr(svm_sc->vm, &vector); 1271 } 1272 1273 if (vector < 32 || vector > 255) { 1274 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:Event injection" 1275 "invalid vector=%d.\n", vector); 1276 ERR("SVM_ERR:Event injection invalid vector=%d.\n", vector); 1277 return; 1278 } 1279 1280 if ((state->rflags & PSL_I) == 0) { 1281 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n"); 1282 return; 1283 } 1284 1285 svm_eventinject(svm_sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, 1286 false); 1287 1288 if (!extint_pending) { 1289 /* Update the Local APIC ISR */ 1290 vlapic_intr_accepted(vlapic, vector); 1291 } else { 1292 vm_extint_clear(svm_sc->vm, vcpu); 1293 vatpic_intr_accepted(svm_sc->vm, vector); 1294 1295 /* 1296 * XXX need to recheck exting_pending ala VT-x 1297 */ 1298 } 1299 1300 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector); 1301} 1302 1303static __inline void 1304restore_host_tss(void) 1305{ 1306 struct system_segment_descriptor *tss_sd; 1307 1308 /* 1309 * The TSS descriptor was in use prior to launching the guest so it 1310 * has been marked busy. 1311 * 1312 * 'ltr' requires the descriptor to be marked available so change the 1313 * type to "64-bit available TSS". 1314 */ 1315 tss_sd = PCPU_GET(tss); 1316 tss_sd->sd_type = SDT_SYSTSS; 1317 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1318} 1319 1320static void 1321check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1322{ 1323 struct svm_vcpu *vcpustate; 1324 struct vmcb_ctrl *ctrl; 1325 long eptgen; 1326 bool alloc_asid; 1327 1328 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1329 "active on cpu %u", __func__, thiscpu)); 1330 1331 vcpustate = svm_get_vcpu(sc, vcpuid); 1332 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1333 1334 /* 1335 * The TLB entries associated with the vcpu's ASID are not valid 1336 * if either of the following conditions is true: 1337 * 1338 * 1. The vcpu's ASID generation is different than the host cpu's 1339 * ASID generation. This happens when the vcpu migrates to a new 1340 * host cpu. It can also happen when the number of vcpus executing 1341 * on a host cpu is greater than the number of ASIDs available. 1342 * 1343 * 2. The pmap generation number is different than the value cached in 1344 * the 'vcpustate'. This happens when the host invalidates pages 1345 * belonging to the guest. 1346 * 1347 * asidgen eptgen Action 1348 * mismatch mismatch 1349 * 0 0 (a) 1350 * 0 1 (b1) or (b2) 1351 * 1 0 (c) 1352 * 1 1 (d) 1353 * 1354 * (a) There is no mismatch in eptgen or ASID generation and therefore 1355 * no further action is needed. 1356 * 1357 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1358 * retained and the TLB entries associated with this ASID 1359 * are flushed by VMRUN. 1360 * 1361 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1362 * allocated. 1363 * 1364 * (c) A new ASID is allocated. 1365 * 1366 * (d) A new ASID is allocated. 1367 */ 1368 1369 alloc_asid = false; 1370 eptgen = pmap->pm_eptgen; 1371 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1372 1373 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1374 alloc_asid = true; /* (c) and (d) */ 1375 } else if (vcpustate->eptgen != eptgen) { 1376 if (flush_by_asid()) 1377 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1378 else 1379 alloc_asid = true; /* (b2) */ 1380 } else { 1381 /* 1382 * This is the common case (a). 1383 */ 1384 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1385 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1386 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1387 } 1388 1389 if (alloc_asid) { 1390 if (++asid[thiscpu].num >= nasid) { 1391 asid[thiscpu].num = 1; 1392 if (++asid[thiscpu].gen == 0) 1393 asid[thiscpu].gen = 1; 1394 /* 1395 * If this cpu does not support "flush-by-asid" 1396 * then flush the entire TLB on a generation 1397 * bump. Subsequent ASID allocation in this 1398 * generation can be done without a TLB flush. 1399 */ 1400 if (!flush_by_asid()) 1401 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1402 } 1403 vcpustate->asid.gen = asid[thiscpu].gen; 1404 vcpustate->asid.num = asid[thiscpu].num; 1405 1406 ctrl->asid = vcpustate->asid.num; 1407 vcpu_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1408 /* 1409 * If this cpu supports "flush-by-asid" then the TLB 1410 * was not flushed after the generation bump. The TLB 1411 * is flushed selectively after every new ASID allocation. 1412 */ 1413 if (flush_by_asid()) 1414 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1415 } 1416 vcpustate->eptgen = eptgen; 1417 1418 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1419 KASSERT(ctrl->asid == vcpustate->asid.num, 1420 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1421} 1422 1423/* 1424 * Start vcpu with specified RIP. 1425 */ 1426static int 1427svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1428 void *rend_cookie, void *suspended_cookie) 1429{ 1430 struct svm_regctx *hctx, *gctx; 1431 struct svm_softc *svm_sc; 1432 struct svm_vcpu *vcpustate; 1433 struct vmcb_state *state; 1434 struct vmcb_ctrl *ctrl; 1435 struct vm_exit *vmexit; 1436 struct vlapic *vlapic; 1437 struct vm *vm; 1438 uint64_t vmcb_pa; 1439 u_int thiscpu; 1440 bool loop; /* Continue vcpu execution loop. */ 1441 1442 loop = true; 1443 svm_sc = arg; 1444 vm = svm_sc->vm; 1445 1446 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1447 state = svm_get_vmcb_state(svm_sc, vcpu); 1448 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1449 vmexit = vm_exitinfo(vm, vcpu); 1450 vlapic = vm_lapic(vm, vcpu); 1451 1452 /* 1453 * Stash 'curcpu' on the stack as 'thiscpu'. 1454 * 1455 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1456 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1457 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1458 */ 1459 thiscpu = curcpu; 1460 1461 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1462 hctx = &host_ctx[thiscpu]; 1463 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1464 1465 if (vcpustate->lastcpu != thiscpu) { 1466 /* 1467 * Force new ASID allocation by invalidating the generation. 1468 */ 1469 vcpustate->asid.gen = 0; 1470 1471 /* 1472 * Invalidate the VMCB state cache by marking all fields dirty. 1473 */ 1474 vcpu_set_dirty(svm_sc, vcpu, 0xffffffff); 1475 1476 /* 1477 * XXX 1478 * Setting 'vcpustate->lastcpu' here is bit premature because 1479 * we may return from this function without actually executing 1480 * the VMRUN instruction. This could happen if a rendezvous 1481 * or an AST is pending on the first time through the loop. 1482 * 1483 * This works for now but any new side-effects of vcpu 1484 * migration should take this case into account. 1485 */ 1486 vcpustate->lastcpu = thiscpu; 1487 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1488 } 1489 1490 /* Update Guest RIP */ 1491 state->rip = rip; 1492 1493 do { 1494 vmexit->inst_length = 0; 1495 1496 /* 1497 * Disable global interrupts to guarantee atomicity during 1498 * loading of guest state. This includes not only the state 1499 * loaded by the "vmrun" instruction but also software state 1500 * maintained by the hypervisor: suspended and rendezvous 1501 * state, NPT generation number, vlapic interrupts etc. 1502 */ 1503 disable_gintr(); 1504 1505 if (vcpu_suspended(suspended_cookie)) { 1506 enable_gintr(); 1507 vm_exit_suspended(vm, vcpu, state->rip); 1508 break; 1509 } 1510 1511 if (vcpu_rendezvous_pending(rend_cookie)) { 1512 enable_gintr(); 1513 vm_exit_rendezvous(vm, vcpu, state->rip); 1514 break; 1515 } 1516 1517 /* We are asked to give the cpu by scheduler. */ 1518 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1519 enable_gintr(); 1520 vm_exit_astpending(vm, vcpu, state->rip); 1521 break; 1522 } 1523 1524 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1525 1526 /* Activate the nested pmap on 'thiscpu' */ 1527 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1528 1529 /* 1530 * Check the pmap generation and the ASID generation to 1531 * ensure that the vcpu does not use stale TLB mappings. 1532 */ 1533 check_asid(svm_sc, vcpu, pmap, thiscpu); 1534 1535 ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty; 1536 vcpustate->dirty = 0; 1537 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1538 1539 /* Launch Virtual Machine. */ 1540 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1541 svm_launch(vmcb_pa, gctx, hctx); 1542 1543 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1544 1545 /* 1546 * Restore MSR_GSBASE to point to the pcpu data area. 1547 * 1548 * Note that accesses done via PCPU_GET/PCPU_SET will work 1549 * only after MSR_GSBASE is restored. 1550 * 1551 * Also note that we don't bother restoring MSR_KGSBASE 1552 * since it is not used in the kernel and will be restored 1553 * when the VMRUN ioctl returns to userspace. 1554 */ 1555 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1556 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1557 thiscpu, curcpu)); 1558 1559 /* 1560 * The host GDTR and IDTR is saved by VMRUN and restored 1561 * automatically on #VMEXIT. However, the host TSS needs 1562 * to be restored explicitly. 1563 */ 1564 restore_host_tss(); 1565 1566 /* #VMEXIT disables interrupts so re-enable them here. */ 1567 enable_gintr(); 1568 1569 /* Handle #VMEXIT and if required return to user space. */ 1570 loop = svm_vmexit(svm_sc, vcpu, vmexit); 1571 } while (loop); 1572 1573 return (0); 1574} 1575 1576/* 1577 * Cleanup for virtual machine. 1578 */ 1579static void 1580svm_vmcleanup(void *arg) 1581{ 1582 struct svm_softc *svm_sc; 1583 1584 svm_sc = arg; 1585 1586 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1587 1588 free(svm_sc, M_SVM); 1589} 1590 1591/* 1592 * Return pointer to hypervisor saved register state. 1593 */ 1594static register_t * 1595swctx_regptr(struct svm_regctx *regctx, int reg) 1596{ 1597 1598 switch (reg) { 1599 case VM_REG_GUEST_RBX: 1600 return (®ctx->sctx_rbx); 1601 case VM_REG_GUEST_RCX: 1602 return (®ctx->sctx_rcx); 1603 case VM_REG_GUEST_RDX: 1604 return (®ctx->e.g.sctx_rdx); 1605 case VM_REG_GUEST_RDI: 1606 return (®ctx->e.g.sctx_rdi); 1607 case VM_REG_GUEST_RSI: 1608 return (®ctx->e.g.sctx_rsi); 1609 case VM_REG_GUEST_RBP: 1610 return (®ctx->sctx_rbp); 1611 case VM_REG_GUEST_R8: 1612 return (®ctx->sctx_r8); 1613 case VM_REG_GUEST_R9: 1614 return (®ctx->sctx_r9); 1615 case VM_REG_GUEST_R10: 1616 return (®ctx->sctx_r10); 1617 case VM_REG_GUEST_R11: 1618 return (®ctx->sctx_r11); 1619 case VM_REG_GUEST_R12: 1620 return (®ctx->sctx_r12); 1621 case VM_REG_GUEST_R13: 1622 return (®ctx->sctx_r13); 1623 case VM_REG_GUEST_R14: 1624 return (®ctx->sctx_r14); 1625 case VM_REG_GUEST_R15: 1626 return (®ctx->sctx_r15); 1627 default: 1628 ERR("Unknown register requested, reg=%d.\n", reg); 1629 break; 1630 } 1631 1632 return (NULL); 1633} 1634 1635/* 1636 * Interface to read guest registers. 1637 * This can be SVM h/w saved or hypervisor saved register. 1638 */ 1639static int 1640svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1641{ 1642 struct svm_softc *svm_sc; 1643 struct vmcb *vmcb; 1644 register_t *reg; 1645 1646 svm_sc = arg; 1647 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1648 1649 vmcb = svm_get_vmcb(svm_sc, vcpu); 1650 1651 if (vmcb_read(vmcb, ident, val) == 0) { 1652 return (0); 1653 } 1654 1655 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1656 1657 if (reg != NULL) { 1658 *val = *reg; 1659 return (0); 1660 } 1661 1662 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1663 return (EINVAL); 1664} 1665 1666/* 1667 * Interface to write to guest registers. 1668 * This can be SVM h/w saved or hypervisor saved register. 1669 */ 1670static int 1671svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1672{ 1673 struct svm_softc *svm_sc; 1674 struct vmcb *vmcb; 1675 register_t *reg; 1676 1677 svm_sc = arg; 1678 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1679 1680 vmcb = svm_get_vmcb(svm_sc, vcpu); 1681 if (vmcb_write(vmcb, ident, val) == 0) { 1682 return (0); 1683 } 1684 1685 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1686 1687 if (reg != NULL) { 1688 *reg = val; 1689 return (0); 1690 } 1691 1692 /* 1693 * XXX deal with CR3 and invalidate TLB entries tagged with the 1694 * vcpu's ASID. This needs to be treated differently depending on 1695 * whether 'running' is true/false. 1696 */ 1697 1698 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1699 return (EINVAL); 1700} 1701 1702 1703/* 1704 * Inteface to set various descriptors. 1705 */ 1706static int 1707svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1708{ 1709 struct svm_softc *svm_sc; 1710 struct vmcb *vmcb; 1711 struct vmcb_segment *seg; 1712 uint16_t attrib; 1713 1714 svm_sc = arg; 1715 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1716 1717 vmcb = svm_get_vmcb(svm_sc, vcpu); 1718 1719 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 1720 1721 seg = vmcb_seg(vmcb, type); 1722 if (seg == NULL) { 1723 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1724 return (EINVAL); 1725 } 1726 1727 /* Map seg_desc access to VMCB attribute format.*/ 1728 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 1729 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 1730 type, desc->access, desc->limit); 1731 seg->attrib = attrib; 1732 seg->base = desc->base; 1733 seg->limit = desc->limit; 1734 1735 return (0); 1736} 1737 1738/* 1739 * Interface to get guest descriptor. 1740 */ 1741static int 1742svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1743{ 1744 struct svm_softc *svm_sc; 1745 struct vmcb_segment *seg; 1746 1747 svm_sc = arg; 1748 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1749 1750 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 1751 1752 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 1753 if (!seg) { 1754 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1755 return (EINVAL); 1756 } 1757 1758 /* Map seg_desc access to VMCB attribute format.*/ 1759 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 1760 desc->base = seg->base; 1761 desc->limit = seg->limit; 1762 1763 /* 1764 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 1765 * loaded with a NULL segment selector. The 'desc->access' field is 1766 * interpreted in the VT-x format by the processor-independent code. 1767 * 1768 * SVM uses the 'P' bit to convey the same information so convert it 1769 * into the VT-x format. For more details refer to section 1770 * "Segment State in the VMCB" in APMv2. 1771 */ 1772 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 1773 desc->access |= 0x80; /* CS and TS always present */ 1774 1775 if (!(desc->access & 0x80)) 1776 desc->access |= 0x10000; /* Unusable segment */ 1777 1778 return (0); 1779} 1780 1781static int 1782svm_setcap(void *arg, int vcpu, int type, int val) 1783{ 1784 struct svm_softc *sc; 1785 int error; 1786 1787 sc = arg; 1788 error = 0; 1789 switch (type) { 1790 case VM_CAP_HALT_EXIT: 1791 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1792 VMCB_INTCPT_HLT, val); 1793 break; 1794 case VM_CAP_PAUSE_EXIT: 1795 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1796 VMCB_INTCPT_PAUSE, val); 1797 break; 1798 case VM_CAP_UNRESTRICTED_GUEST: 1799 /* Unrestricted guest execution cannot be disabled in SVM */ 1800 if (val == 0) 1801 error = EINVAL; 1802 break; 1803 default: 1804 error = ENOENT; 1805 break; 1806 } 1807 return (error); 1808} 1809 1810static int 1811svm_getcap(void *arg, int vcpu, int type, int *retval) 1812{ 1813 struct svm_softc *sc; 1814 int error; 1815 1816 sc = arg; 1817 error = 0; 1818 1819 switch (type) { 1820 case VM_CAP_HALT_EXIT: 1821 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1822 VMCB_INTCPT_HLT); 1823 break; 1824 case VM_CAP_PAUSE_EXIT: 1825 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1826 VMCB_INTCPT_PAUSE); 1827 break; 1828 case VM_CAP_UNRESTRICTED_GUEST: 1829 *retval = 1; /* unrestricted guest is always enabled */ 1830 break; 1831 default: 1832 error = ENOENT; 1833 break; 1834 } 1835 return (error); 1836} 1837 1838static struct vlapic * 1839svm_vlapic_init(void *arg, int vcpuid) 1840{ 1841 struct svm_softc *svm_sc; 1842 struct vlapic *vlapic; 1843 1844 svm_sc = arg; 1845 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 1846 vlapic->vm = svm_sc->vm; 1847 vlapic->vcpuid = vcpuid; 1848 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 1849 1850 vlapic_init(vlapic); 1851 1852 return (vlapic); 1853} 1854 1855static void 1856svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 1857{ 1858 1859 vlapic_cleanup(vlapic); 1860 free(vlapic, M_SVM_VLAPIC); 1861} 1862 1863struct vmm_ops vmm_ops_amd = { 1864 svm_init, 1865 svm_cleanup, 1866 svm_restore, 1867 svm_vminit, 1868 svm_vmrun, 1869 svm_vmcleanup, 1870 svm_getreg, 1871 svm_setreg, 1872 svm_getdesc, 1873 svm_setdesc, 1874 svm_getcap, 1875 svm_setcap, 1876 svm_npt_alloc, 1877 svm_npt_free, 1878 svm_vlapic_init, 1879 svm_vlapic_cleanup 1880}; 1881