svm.c revision 271694
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271694 2014-09-17 00:30:25Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/cpufunc.h> 43#include <machine/psl.h> 44#include <machine/pmap.h> 45#include <machine/md_var.h> 46#include <machine/vmparam.h> 47#include <machine/specialreg.h> 48#include <machine/segments.h> 49#include <machine/smp.h> 50#include <machine/vmm.h> 51#include <machine/vmm_dev.h> 52#include <machine/vmm_instruction_emul.h> 53 54#include <x86/apicreg.h> 55 56#include "vmm_lapic.h" 57#include "vmm_msr.h" 58#include "vmm_stat.h" 59#include "vmm_ktr.h" 60#include "vmm_ioport.h" 61#include "vatpic.h" 62#include "vlapic.h" 63#include "vlapic_priv.h" 64 65#include "x86.h" 66#include "vmcb.h" 67#include "svm.h" 68#include "svm_softc.h" 69#include "npt.h" 70 71SYSCTL_DECL(_hw_vmm); 72SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 73 74/* 75 * SVM CPUID function 0x8000_000A, edx bit decoding. 76 */ 77#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 78#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 79#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 80#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 81#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 82#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 83#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 84#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 85#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 86#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 87 88#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 89 VMCB_CACHE_IOPM | \ 90 VMCB_CACHE_I | \ 91 VMCB_CACHE_TPR | \ 92 VMCB_CACHE_NP) 93 94MALLOC_DEFINE(M_SVM, "svm", "svm"); 95MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 96 97/* Per-CPU context area. */ 98extern struct pcpu __pcpu[]; 99 100static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 101 102static uint32_t svm_feature; /* AMD SVM features. */ 103SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 104 "SVM features advertised by CPUID.8000000AH:EDX"); 105 106static int disable_npf_assist; 107SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 108 &disable_npf_assist, 0, NULL); 109 110/* Maximum ASIDs supported by the processor */ 111static uint32_t nasid; 112SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, 113 "Number of ASIDs supported by this processor"); 114 115/* Current ASID generation for each host cpu */ 116static struct asid asid[MAXCPU]; 117 118/* 119 * SVM host state saved area of size 4KB for each core. 120 */ 121static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 122 123/* 124 * S/w saved host context. 125 */ 126static struct svm_regctx host_ctx[MAXCPU]; 127 128static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 129static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 130static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 131 132/* 133 * Common function to enable or disabled SVM for a CPU. 134 */ 135static int 136cpu_svm_enable_disable(boolean_t enable) 137{ 138 uint64_t efer_msr; 139 140 efer_msr = rdmsr(MSR_EFER); 141 142 if (enable) 143 efer_msr |= EFER_SVM; 144 else 145 efer_msr &= ~EFER_SVM; 146 147 wrmsr(MSR_EFER, efer_msr); 148 149 return(0); 150} 151 152/* 153 * Disable SVM on a CPU. 154 */ 155static void 156svm_disable(void *arg __unused) 157{ 158 159 (void)cpu_svm_enable_disable(FALSE); 160} 161 162/* 163 * Disable SVM for all CPUs. 164 */ 165static int 166svm_cleanup(void) 167{ 168 169 smp_rendezvous(NULL, svm_disable, NULL, NULL); 170 return (0); 171} 172 173/* 174 * Check for required BHyVe SVM features in a CPU. 175 */ 176static int 177svm_cpuid_features(void) 178{ 179 u_int regs[4]; 180 181 /* CPUID Fn8000_000A is for SVM */ 182 do_cpuid(0x8000000A, regs); 183 svm_feature = regs[3]; 184 185 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 186 nasid = regs[1]; 187 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 188 189 printf("SVM Features:0x%b\n", svm_feature, 190 "\020" 191 "\001NP" /* Nested paging */ 192 "\002LbrVirt" /* LBR virtualization */ 193 "\003SVML" /* SVM lock */ 194 "\004NRIPS" /* NRIP save */ 195 "\005TscRateMsr" /* MSR based TSC rate control */ 196 "\006VmcbClean" /* VMCB clean bits */ 197 "\007FlushByAsid" /* Flush by ASID */ 198 "\010DecodeAssist" /* Decode assist */ 199 "\011<b20>" 200 "\012<b20>" 201 "\013PauseFilter" 202 "\014<b20>" 203 "\015PauseFilterThreshold" 204 "\016AVIC" 205 ); 206 207 /* SVM Lock */ 208 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 209 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 210 return (ENXIO); 211 } 212 213 /* 214 * bhyve need RVI to work. 215 */ 216 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 217 printf("Missing Nested paging or RVI SVM support in processor.\n"); 218 return (EIO); 219 } 220 221 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 222 return (0); 223 224 return (EIO); 225} 226 227static __inline int 228flush_by_asid(void) 229{ 230 231 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 232} 233 234static __inline int 235decode_assist(void) 236{ 237 238 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 239} 240 241/* 242 * Enable SVM for a CPU. 243 */ 244static void 245svm_enable(void *arg __unused) 246{ 247 uint64_t hsave_pa; 248 249 (void)cpu_svm_enable_disable(TRUE); 250 251 hsave_pa = vtophys(hsave[curcpu]); 252 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 253 254 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 255 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 256 } 257} 258 259/* 260 * Check if a processor support SVM. 261 */ 262static int 263is_svm_enabled(void) 264{ 265 uint64_t msr; 266 267 /* Section 15.4 Enabling SVM from APM2. */ 268 if ((amd_feature2 & AMDID2_SVM) == 0) { 269 printf("SVM is not supported on this processor.\n"); 270 return (ENXIO); 271 } 272 273 msr = rdmsr(MSR_VM_CR); 274 /* Make sure SVM is not disabled by BIOS. */ 275 if ((msr & VM_CR_SVMDIS) == 0) { 276 return svm_cpuid_features(); 277 } 278 279 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 280 return (ENXIO); 281} 282 283/* 284 * Enable SVM on CPU and initialize nested page table h/w. 285 */ 286static int 287svm_init(int ipinum) 288{ 289 int err, cpu; 290 291 err = is_svm_enabled(); 292 if (err) 293 return (err); 294 295 for (cpu = 0; cpu < MAXCPU; cpu++) { 296 /* 297 * Initialize the host ASIDs to their "highest" valid values. 298 * 299 * The next ASID allocation will rollover both 'gen' and 'num' 300 * and start off the sequence at {1,1}. 301 */ 302 asid[cpu].gen = ~0UL; 303 asid[cpu].num = nasid - 1; 304 } 305 306 svm_npt_init(ipinum); 307 308 /* Start SVM on all CPUs */ 309 smp_rendezvous(NULL, svm_enable, NULL, NULL); 310 311 return (0); 312} 313 314static void 315svm_restore(void) 316{ 317 svm_enable(NULL); 318} 319 320/* 321 * Get index and bit position for a MSR in MSR permission 322 * bitmap. Two bits are used for each MSR, lower bit is 323 * for read and higher bit is for write. 324 */ 325static int 326svm_msr_index(uint64_t msr, int *index, int *bit) 327{ 328 uint32_t base, off; 329 330/* Pentium compatible MSRs */ 331#define MSR_PENTIUM_START 0 332#define MSR_PENTIUM_END 0x1FFF 333/* AMD 6th generation and Intel compatible MSRs */ 334#define MSR_AMD6TH_START 0xC0000000UL 335#define MSR_AMD6TH_END 0xC0001FFFUL 336/* AMD 7th and 8th generation compatible MSRs */ 337#define MSR_AMD7TH_START 0xC0010000UL 338#define MSR_AMD7TH_END 0xC0011FFFUL 339 340 *index = -1; 341 *bit = (msr % 4) * 2; 342 base = 0; 343 344 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 345 *index = msr / 4; 346 return (0); 347 } 348 349 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 350 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 351 off = (msr - MSR_AMD6TH_START); 352 *index = (off + base) / 4; 353 return (0); 354 } 355 356 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 357 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 358 off = (msr - MSR_AMD7TH_START); 359 *index = (off + base) / 4; 360 return (0); 361 } 362 363 return (EIO); 364} 365 366/* 367 * Give virtual cpu the complete access to MSR(read & write). 368 */ 369static int 370svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 371{ 372 int index, bit, err; 373 374 err = svm_msr_index(msr, &index, &bit); 375 if (err) { 376 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 377 return (err); 378 } 379 380 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 381 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 382 return (EINVAL); 383 } 384 if (bit < 0 || bit > 8) { 385 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 386 return (EINVAL); 387 } 388 389 /* Disable intercept for read and write. */ 390 if (read) 391 perm_bitmap[index] &= ~(1UL << bit); 392 if (write) 393 perm_bitmap[index] &= ~(2UL << bit); 394 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 395 (perm_bitmap[index] >> bit) & 0x3, msr); 396 397 return (0); 398} 399 400static int 401svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 402{ 403 return svm_msr_perm(perm_bitmap, msr, true, true); 404} 405 406static int 407svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 408{ 409 return svm_msr_perm(perm_bitmap, msr, true, false); 410} 411 412static __inline void 413vcpu_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits) 414{ 415 struct svm_vcpu *vcpustate; 416 417 vcpustate = svm_get_vcpu(sc, vcpu); 418 419 vcpustate->dirty |= dirtybits; 420} 421 422static __inline int 423svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 424{ 425 struct vmcb_ctrl *ctrl; 426 427 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 428 429 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 430 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 431} 432 433static __inline void 434svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 435 int enabled) 436{ 437 struct vmcb_ctrl *ctrl; 438 uint32_t oldval; 439 440 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 441 442 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 443 oldval = ctrl->intercept[idx]; 444 445 if (enabled) 446 ctrl->intercept[idx] |= bitmask; 447 else 448 ctrl->intercept[idx] &= ~bitmask; 449 450 if (ctrl->intercept[idx] != oldval) { 451 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_I); 452 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 453 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 454 } 455} 456 457static __inline void 458svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 459{ 460 svm_set_intercept(sc, vcpu, off, bitmask, 0); 461} 462 463static __inline void 464svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 465{ 466 svm_set_intercept(sc, vcpu, off, bitmask, 1); 467} 468 469static void 470vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 471 uint64_t msrpm_base_pa, uint64_t np_pml4) 472{ 473 struct vmcb_ctrl *ctrl; 474 struct vmcb_state *state; 475 uint32_t mask; 476 int n; 477 478 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 479 state = svm_get_vmcb_state(sc, vcpu); 480 481 ctrl->iopm_base_pa = iopm_base_pa; 482 ctrl->msrpm_base_pa = msrpm_base_pa; 483 484 /* Enable nested paging */ 485 ctrl->np_enable = 1; 486 ctrl->n_cr3 = np_pml4; 487 488 /* 489 * Intercept accesses to the control registers that are not shadowed 490 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 491 */ 492 for (n = 0; n < 16; n++) { 493 mask = (BIT(n) << 16) | BIT(n); 494 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 495 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 496 else 497 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 498 } 499 500 /* Intercept Machine Check exceptions. */ 501 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 502 503 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 504 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 505 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 506 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 507 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 508 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 509 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 510 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 511 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 512 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 513 VMCB_INTCPT_FERR_FREEZE); 514 515 /* 516 * From section "Canonicalization and Consistency Checks" in APMv2 517 * the VMRUN intercept bit must be set to pass the consistency check. 518 */ 519 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 520 521 /* 522 * The ASID will be set to a non-zero value just before VMRUN. 523 */ 524 ctrl->asid = 0; 525 526 /* 527 * Section 15.21.1, Interrupt Masking in EFLAGS 528 * Section 15.21.2, Virtualizing APIC.TPR 529 * 530 * This must be set for %rflag and %cr8 isolation of guest and host. 531 */ 532 ctrl->v_intr_masking = 1; 533 534 /* Enable Last Branch Record aka LBR for debugging */ 535 ctrl->lbr_virt_en = 1; 536 state->dbgctl = BIT(0); 537 538 /* EFER_SVM must always be set when the guest is executing */ 539 state->efer = EFER_SVM; 540 541 /* Set up the PAT to power-on state */ 542 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 543 PAT_VALUE(1, PAT_WRITE_THROUGH) | 544 PAT_VALUE(2, PAT_UNCACHED) | 545 PAT_VALUE(3, PAT_UNCACHEABLE) | 546 PAT_VALUE(4, PAT_WRITE_BACK) | 547 PAT_VALUE(5, PAT_WRITE_THROUGH) | 548 PAT_VALUE(6, PAT_UNCACHED) | 549 PAT_VALUE(7, PAT_UNCACHEABLE); 550} 551 552/* 553 * Initialise a virtual machine. 554 */ 555static void * 556svm_vminit(struct vm *vm, pmap_t pmap) 557{ 558 struct svm_softc *svm_sc; 559 struct svm_vcpu *vcpu; 560 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 561 int i; 562 563 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 564 M_SVM, M_WAITOK | M_ZERO); 565 566 svm_sc->vm = vm; 567 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 568 569 /* 570 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 571 */ 572 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 573 574 /* 575 * Following MSR can be completely controlled by virtual machines 576 * since access to following are translated to access to VMCB. 577 */ 578 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 579 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 580 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 581 582 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 583 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 584 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 585 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 586 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 589 590 /* For Nested Paging/RVI only. */ 591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 592 593 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 594 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 595 596 /* Intercept access to all I/O ports. */ 597 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 598 599 /* Cache physical address for multiple vcpus. */ 600 iopm_pa = vtophys(svm_sc->iopm_bitmap); 601 msrpm_pa = vtophys(svm_sc->msr_bitmap); 602 pml4_pa = svm_sc->nptp; 603 604 for (i = 0; i < VM_MAXCPU; i++) { 605 vcpu = svm_get_vcpu(svm_sc, i); 606 vcpu->lastcpu = NOCPU; 607 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 608 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 609 } 610 return (svm_sc); 611} 612 613static int 614svm_cpl(struct vmcb_state *state) 615{ 616 617 /* 618 * From APMv2: 619 * "Retrieve the CPL from the CPL field in the VMCB, not 620 * from any segment DPL" 621 */ 622 return (state->cpl); 623} 624 625static enum vm_cpu_mode 626svm_vcpu_mode(struct vmcb *vmcb) 627{ 628 struct vmcb_segment *seg; 629 struct vmcb_state *state; 630 631 state = &vmcb->state; 632 633 if (state->efer & EFER_LMA) { 634 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 635 /* 636 * Section 4.8.1 for APM2, check if Code Segment has 637 * Long attribute set in descriptor. 638 */ 639 if (seg->attrib & VMCB_CS_ATTRIB_L) 640 return (CPU_MODE_64BIT); 641 else 642 return (CPU_MODE_COMPATIBILITY); 643 } else if (state->cr0 & CR0_PE) { 644 return (CPU_MODE_PROTECTED); 645 } else { 646 return (CPU_MODE_REAL); 647 } 648} 649 650static enum vm_paging_mode 651svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 652{ 653 654 if ((cr0 & CR0_PG) == 0) 655 return (PAGING_MODE_FLAT); 656 if ((cr4 & CR4_PAE) == 0) 657 return (PAGING_MODE_32); 658 if (efer & EFER_LME) 659 return (PAGING_MODE_64); 660 else 661 return (PAGING_MODE_PAE); 662} 663 664/* 665 * ins/outs utility routines 666 */ 667static uint64_t 668svm_inout_str_index(struct svm_regctx *regs, int in) 669{ 670 uint64_t val; 671 672 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 673 674 return (val); 675} 676 677static uint64_t 678svm_inout_str_count(struct svm_regctx *regs, int rep) 679{ 680 uint64_t val; 681 682 val = rep ? regs->sctx_rcx : 1; 683 684 return (val); 685} 686 687static void 688svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 689 int in, struct vm_inout_str *vis) 690{ 691 int error, s; 692 693 if (in) { 694 vis->seg_name = VM_REG_GUEST_ES; 695 } else { 696 /* The segment field has standard encoding */ 697 s = (info1 >> 10) & 0x7; 698 vis->seg_name = vm_segment_name(s); 699 } 700 701 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 702 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 703} 704 705static int 706svm_inout_str_addrsize(uint64_t info1) 707{ 708 uint32_t size; 709 710 size = (info1 >> 7) & 0x7; 711 switch (size) { 712 case 1: 713 return (2); /* 16 bit */ 714 case 2: 715 return (4); /* 32 bit */ 716 case 4: 717 return (8); /* 64 bit */ 718 default: 719 panic("%s: invalid size encoding %d", __func__, size); 720 } 721} 722 723static void 724svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 725{ 726 struct vmcb_state *state; 727 728 state = &vmcb->state; 729 paging->cr3 = state->cr3; 730 paging->cpl = svm_cpl(state); 731 paging->cpu_mode = svm_vcpu_mode(vmcb); 732 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 733 state->efer); 734} 735 736#define UNHANDLED 0 737 738/* 739 * Handle guest I/O intercept. 740 */ 741static int 742svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 743{ 744 struct vmcb_ctrl *ctrl; 745 struct vmcb_state *state; 746 struct svm_regctx *regs; 747 struct vm_inout_str *vis; 748 uint64_t info1; 749 int inout_string; 750 751 state = svm_get_vmcb_state(svm_sc, vcpu); 752 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 753 regs = svm_get_guest_regctx(svm_sc, vcpu); 754 755 info1 = ctrl->exitinfo1; 756 inout_string = info1 & BIT(2) ? 1 : 0; 757 758 /* 759 * The effective segment number in EXITINFO1[12:10] is populated 760 * only if the processor has the DecodeAssist capability. 761 * 762 * XXX this is not specified explicitly in APMv2 but can be verified 763 * empirically. 764 */ 765 if (inout_string && !decode_assist()) 766 return (UNHANDLED); 767 768 vmexit->exitcode = VM_EXITCODE_INOUT; 769 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 770 vmexit->u.inout.string = inout_string; 771 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 772 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 773 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 774 vmexit->u.inout.eax = (uint32_t)(state->rax); 775 776 if (inout_string) { 777 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 778 vis = &vmexit->u.inout_str; 779 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 780 vis->rflags = state->rflags; 781 vis->cr0 = state->cr0; 782 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 783 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 784 vis->addrsize = svm_inout_str_addrsize(info1); 785 svm_inout_str_seginfo(svm_sc, vcpu, info1, 786 vmexit->u.inout.in, vis); 787 } 788 789 return (UNHANDLED); 790} 791 792static int 793svm_npf_paging(uint64_t exitinfo1) 794{ 795 796 if (exitinfo1 & VMCB_NPF_INFO1_W) 797 return (VM_PROT_WRITE); 798 799 return (VM_PROT_READ); 800} 801 802static bool 803svm_npf_emul_fault(uint64_t exitinfo1) 804{ 805 806 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 807 return (false); 808 } 809 810 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 811 return (false); 812 } 813 814 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 815 return (false); 816 } 817 818 return (true); 819} 820 821static void 822svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 823{ 824 struct vm_guest_paging *paging; 825 struct vmcb_segment *seg; 826 struct vmcb_ctrl *ctrl; 827 char *inst_bytes; 828 int inst_len; 829 830 ctrl = &vmcb->ctrl; 831 paging = &vmexit->u.inst_emul.paging; 832 833 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 834 vmexit->u.inst_emul.gpa = gpa; 835 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 836 svm_paging_info(vmcb, paging); 837 838 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 839 switch(paging->cpu_mode) { 840 case CPU_MODE_PROTECTED: 841 case CPU_MODE_COMPATIBILITY: 842 /* 843 * Section 4.8.1 of APM2, Default Operand Size or D bit. 844 */ 845 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 846 1 : 0; 847 break; 848 default: 849 vmexit->u.inst_emul.cs_d = 0; 850 break; 851 } 852 853 /* 854 * Copy the instruction bytes into 'vie' if available. 855 */ 856 if (decode_assist() && !disable_npf_assist) { 857 inst_len = ctrl->inst_len; 858 inst_bytes = ctrl->inst_bytes; 859 } else { 860 inst_len = 0; 861 inst_bytes = NULL; 862 } 863 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 864} 865 866/* 867 * Intercept access to MSR_EFER to prevent the guest from clearing the 868 * SVM enable bit. 869 */ 870static void 871svm_write_efer(struct svm_softc *sc, int vcpu, uint32_t edx, uint32_t eax) 872{ 873 struct vmcb_state *state; 874 uint64_t oldval; 875 876 state = svm_get_vmcb_state(sc, vcpu); 877 878 oldval = state->efer; 879 state->efer = (uint64_t)edx << 32 | eax | EFER_SVM; 880 if (state->efer != oldval) { 881 VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx", 882 oldval, state->efer); 883 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR); 884 } 885} 886 887#ifdef KTR 888static const char * 889intrtype_to_str(int intr_type) 890{ 891 switch (intr_type) { 892 case VMCB_EVENTINJ_TYPE_INTR: 893 return ("hwintr"); 894 case VMCB_EVENTINJ_TYPE_NMI: 895 return ("nmi"); 896 case VMCB_EVENTINJ_TYPE_INTn: 897 return ("swintr"); 898 case VMCB_EVENTINJ_TYPE_EXCEPTION: 899 return ("exception"); 900 default: 901 panic("%s: unknown intr_type %d", __func__, intr_type); 902 } 903} 904#endif 905 906/* 907 * Inject an event to vcpu as described in section 15.20, "Event injection". 908 */ 909static void 910svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 911 uint32_t error, bool ec_valid) 912{ 913 struct vmcb_ctrl *ctrl; 914 915 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 916 917 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 918 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 919 920 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 921 __func__, vector)); 922 923 switch (intr_type) { 924 case VMCB_EVENTINJ_TYPE_INTR: 925 case VMCB_EVENTINJ_TYPE_NMI: 926 case VMCB_EVENTINJ_TYPE_INTn: 927 break; 928 case VMCB_EVENTINJ_TYPE_EXCEPTION: 929 if (vector >= 0 && vector <= 31 && vector != 2) 930 break; 931 /* FALLTHROUGH */ 932 default: 933 panic("%s: invalid intr_type/vector: %d/%d", __func__, 934 intr_type, vector); 935 } 936 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 937 if (ec_valid) { 938 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 939 ctrl->eventinj |= (uint64_t)error << 32; 940 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 941 intrtype_to_str(intr_type), vector, error); 942 } else { 943 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 944 intrtype_to_str(intr_type), vector); 945 } 946} 947 948static void 949svm_update_virqinfo(struct svm_softc *sc, int vcpu) 950{ 951 struct vm *vm; 952 struct vlapic *vlapic; 953 struct vmcb_ctrl *ctrl; 954 int pending; 955 956 vm = sc->vm; 957 vlapic = vm_lapic(vm, vcpu); 958 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 959 960 /* Update %cr8 in the emulated vlapic */ 961 vlapic_set_cr8(vlapic, ctrl->v_tpr); 962 963 /* 964 * If V_IRQ indicates that the interrupt injection attempted on then 965 * last VMRUN was successful then update the vlapic accordingly. 966 */ 967 if (ctrl->v_intr_vector != 0) { 968 pending = ctrl->v_irq; 969 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 970 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 971 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 972 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 973 pending ? "pending" : "accepted"); 974 if (!pending) 975 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 976 } 977} 978 979static void 980svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 981{ 982 struct vmcb_ctrl *ctrl; 983 uint64_t intinfo; 984 985 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 986 intinfo = ctrl->exitintinfo; 987 if (!VMCB_EXITINTINFO_VALID(intinfo)) 988 return; 989 990 /* 991 * From APMv2, Section "Intercepts during IDT interrupt delivery" 992 * 993 * If a #VMEXIT happened during event delivery then record the event 994 * that was being delivered. 995 */ 996 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 997 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 998 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 999 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 1000} 1001 1002static __inline int 1003vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 1004{ 1005 1006 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1007 VMCB_INTCPT_VINTR)); 1008} 1009 1010static __inline void 1011enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1012{ 1013 struct vmcb_ctrl *ctrl; 1014 1015 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1016 1017 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1018 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1019 KASSERT(vintr_intercept_enabled(sc, vcpu), 1020 ("%s: vintr intercept should be enabled", __func__)); 1021 return; 1022 } 1023 1024 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1025 ctrl->v_irq = 1; 1026 ctrl->v_ign_tpr = 1; 1027 ctrl->v_intr_vector = 0; 1028 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1029 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1030} 1031 1032static __inline void 1033disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1034{ 1035 struct vmcb_ctrl *ctrl; 1036 1037 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1038 1039 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1040 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1041 ("%s: vintr intercept should be disabled", __func__)); 1042 return; 1043 } 1044 1045#ifdef KTR 1046 if (ctrl->v_intr_vector == 0) 1047 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1048 else 1049 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1050#endif 1051 ctrl->v_irq = 0; 1052 ctrl->v_intr_vector = 0; 1053 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1054 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1055} 1056 1057static int 1058svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, int running, 1059 uint64_t val) 1060{ 1061 struct vmcb_ctrl *ctrl; 1062 int oldval, newval; 1063 1064 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1065 oldval = ctrl->intr_shadow; 1066 newval = val ? 1 : 0; 1067 if (newval != oldval) { 1068 ctrl->intr_shadow = newval; 1069 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1070 } 1071 return (0); 1072} 1073 1074/* 1075 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1076 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1077 * to track when the vcpu is done handling the NMI. 1078 */ 1079static int 1080nmi_blocked(struct svm_softc *sc, int vcpu) 1081{ 1082 int blocked; 1083 1084 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1085 VMCB_INTCPT_IRET); 1086 return (blocked); 1087} 1088 1089static void 1090enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1091{ 1092 1093 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1094 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1095 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1096} 1097 1098static void 1099clear_nmi_blocking(struct svm_softc *sc, int vcpu, int running) 1100{ 1101 int error; 1102 1103 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1104 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1105 /* 1106 * When the IRET intercept is cleared the vcpu will attempt to execute 1107 * the "iret" when it runs next. However, it is possible to inject 1108 * another NMI into the vcpu before the "iret" has actually executed. 1109 * 1110 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1111 * it will trap back into the hypervisor. If an NMI is pending for 1112 * the vcpu it will be injected into the guest. 1113 * 1114 * XXX this needs to be fixed 1115 */ 1116 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1117 1118 /* 1119 * Set 'intr_shadow' to prevent an NMI from being injected on the 1120 * immediate VMRUN. 1121 */ 1122 error = svm_modify_intr_shadow(sc, vcpu, running, 1); 1123 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1124} 1125 1126#ifdef KTR 1127static const char * 1128exit_reason_to_str(uint64_t reason) 1129{ 1130 static char reasonbuf[32]; 1131 1132 switch (reason) { 1133 case VMCB_EXIT_INVALID: 1134 return ("invalvmcb"); 1135 case VMCB_EXIT_SHUTDOWN: 1136 return ("shutdown"); 1137 case VMCB_EXIT_NPF: 1138 return ("nptfault"); 1139 case VMCB_EXIT_PAUSE: 1140 return ("pause"); 1141 case VMCB_EXIT_HLT: 1142 return ("hlt"); 1143 case VMCB_EXIT_CPUID: 1144 return ("cpuid"); 1145 case VMCB_EXIT_IO: 1146 return ("inout"); 1147 case VMCB_EXIT_MC: 1148 return ("mchk"); 1149 case VMCB_EXIT_INTR: 1150 return ("extintr"); 1151 case VMCB_EXIT_NMI: 1152 return ("nmi"); 1153 case VMCB_EXIT_VINTR: 1154 return ("vintr"); 1155 case VMCB_EXIT_MSR: 1156 return ("msr"); 1157 case VMCB_EXIT_IRET: 1158 return ("iret"); 1159 default: 1160 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1161 return (reasonbuf); 1162 } 1163} 1164#endif /* KTR */ 1165 1166/* 1167 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1168 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1169 * and exceptions caused by INT3, INTO and BOUND instructions. 1170 * 1171 * Return 1 if the nRIP is valid and 0 otherwise. 1172 */ 1173static int 1174nrip_valid(uint64_t exitcode) 1175{ 1176 switch (exitcode) { 1177 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1178 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1179 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1180 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1181 case 0x43: /* INT3 */ 1182 case 0x44: /* INTO */ 1183 case 0x45: /* BOUND */ 1184 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1185 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1186 return (1); 1187 default: 1188 return (0); 1189 } 1190} 1191 1192/* 1193 * Collateral for a generic SVM VM-exit. 1194 */ 1195static void 1196vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 1197{ 1198 1199 vme->exitcode = VM_EXITCODE_SVM; 1200 vme->u.svm.exitcode = code; 1201 vme->u.svm.exitinfo1 = info1; 1202 vme->u.svm.exitinfo2 = info2; 1203} 1204 1205static int 1206svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1207{ 1208 struct vmcb *vmcb; 1209 struct vmcb_state *state; 1210 struct vmcb_ctrl *ctrl; 1211 struct svm_regctx *ctx; 1212 uint64_t code, info1, info2, val; 1213 uint32_t eax, ecx, edx; 1214 int handled; 1215 bool retu; 1216 1217 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1218 vmcb = svm_get_vmcb(svm_sc, vcpu); 1219 state = &vmcb->state; 1220 ctrl = &vmcb->ctrl; 1221 1222 handled = 0; 1223 code = ctrl->exitcode; 1224 info1 = ctrl->exitinfo1; 1225 info2 = ctrl->exitinfo2; 1226 1227 vmexit->exitcode = VM_EXITCODE_BOGUS; 1228 vmexit->rip = state->rip; 1229 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1230 1231 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1232 1233 /* 1234 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1235 * in an inconsistent state and can trigger assertions that would 1236 * never happen otherwise. 1237 */ 1238 if (code == VMCB_EXIT_INVALID) { 1239 vm_exit_svm(vmexit, code, info1, info2); 1240 return (0); 1241 } 1242 1243 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1244 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1245 1246 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1247 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1248 vmexit->inst_length, code, info1, info2)); 1249 1250 svm_update_virqinfo(svm_sc, vcpu); 1251 svm_save_intinfo(svm_sc, vcpu); 1252 1253 switch (code) { 1254 case VMCB_EXIT_IRET: 1255 /* 1256 * Restart execution at "iret" but with the intercept cleared. 1257 */ 1258 vmexit->inst_length = 0; 1259 clear_nmi_blocking(svm_sc, vcpu, 1); 1260 handled = 1; 1261 break; 1262 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1263 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1264 handled = 1; 1265 break; 1266 case VMCB_EXIT_INTR: /* external interrupt */ 1267 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1268 handled = 1; 1269 break; 1270 case VMCB_EXIT_NMI: /* external NMI */ 1271 handled = 1; 1272 break; 1273 case VMCB_EXIT_MC: /* machine check */ 1274 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1275 break; 1276 case VMCB_EXIT_MSR: /* MSR access. */ 1277 eax = state->rax; 1278 ecx = ctx->sctx_rcx; 1279 edx = ctx->e.g.sctx_rdx; 1280 retu = false; 1281 1282 if (ecx == MSR_EFER) { 1283 KASSERT(info1 != 0, ("rdmsr(MSR_EFER) is not emulated: " 1284 "info1(%#lx) info2(%#lx)", info1, info2)); 1285 svm_write_efer(svm_sc, vcpu, edx, eax); 1286 handled = 1; 1287 break; 1288 } 1289 1290#define MSR_AMDK8_IPM 0xc0010055 1291 /* 1292 * Ignore access to the "Interrupt Pending Message" MSR. 1293 */ 1294 if (ecx == MSR_AMDK8_IPM) { 1295 if (!info1) 1296 state->rax = ctx->e.g.sctx_rdx = 0; 1297 handled = 1; 1298 break; 1299 } 1300 1301 if (info1) { 1302 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1303 val = (uint64_t)edx << 32 | eax; 1304 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1305 ecx, val); 1306 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, &retu)) { 1307 vmexit->exitcode = VM_EXITCODE_WRMSR; 1308 vmexit->u.msr.code = ecx; 1309 vmexit->u.msr.wval = val; 1310 } else if (!retu) { 1311 handled = 1; 1312 } else { 1313 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1314 ("emulate_wrmsr retu with bogus exitcode")); 1315 } 1316 } else { 1317 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1318 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1319 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, &retu)) { 1320 vmexit->exitcode = VM_EXITCODE_RDMSR; 1321 vmexit->u.msr.code = ecx; 1322 } else if (!retu) { 1323 handled = 1; 1324 } else { 1325 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1326 ("emulate_rdmsr retu with bogus exitcode")); 1327 } 1328 } 1329 break; 1330 case VMCB_EXIT_IO: 1331 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1332 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1333 break; 1334 case VMCB_EXIT_CPUID: 1335 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1336 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1337 (uint32_t *)&state->rax, 1338 (uint32_t *)&ctx->sctx_rbx, 1339 (uint32_t *)&ctx->sctx_rcx, 1340 (uint32_t *)&ctx->e.g.sctx_rdx); 1341 break; 1342 case VMCB_EXIT_HLT: 1343 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1344 vmexit->exitcode = VM_EXITCODE_HLT; 1345 vmexit->u.hlt.rflags = state->rflags; 1346 break; 1347 case VMCB_EXIT_PAUSE: 1348 vmexit->exitcode = VM_EXITCODE_PAUSE; 1349 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1350 break; 1351 case VMCB_EXIT_NPF: 1352 /* EXITINFO2 contains the faulting guest physical address */ 1353 if (info1 & VMCB_NPF_INFO1_RSV) { 1354 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1355 "reserved bits set: info1(%#lx) info2(%#lx)", 1356 info1, info2); 1357 } else if (vm_mem_allocated(svm_sc->vm, info2)) { 1358 vmexit->exitcode = VM_EXITCODE_PAGING; 1359 vmexit->u.paging.gpa = info2; 1360 vmexit->u.paging.fault_type = svm_npf_paging(info1); 1361 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1362 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1363 "on gpa %#lx/%#lx at rip %#lx", 1364 info2, info1, state->rip); 1365 } else if (svm_npf_emul_fault(info1)) { 1366 svm_handle_inst_emul(vmcb, info2, vmexit); 1367 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1368 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1369 "for gpa %#lx/%#lx at rip %#lx", 1370 info2, info1, state->rip); 1371 } 1372 break; 1373 default: 1374 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1375 break; 1376 } 1377 1378 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1379 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1380 vmexit->rip, vmexit->inst_length); 1381 1382 if (handled) { 1383 vmexit->rip += vmexit->inst_length; 1384 vmexit->inst_length = 0; 1385 state->rip = vmexit->rip; 1386 } else { 1387 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1388 /* 1389 * If this VM exit was not claimed by anybody then 1390 * treat it as a generic SVM exit. 1391 */ 1392 vm_exit_svm(vmexit, code, info1, info2); 1393 } else { 1394 /* 1395 * The exitcode and collateral have been populated. 1396 * The VM exit will be processed further in userland. 1397 */ 1398 } 1399 } 1400 return (handled); 1401} 1402 1403static void 1404svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1405{ 1406 uint64_t intinfo; 1407 1408 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1409 return; 1410 1411 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1412 "valid: %#lx", __func__, intinfo)); 1413 1414 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1415 VMCB_EXITINTINFO_VECTOR(intinfo), 1416 VMCB_EXITINTINFO_EC(intinfo), 1417 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1418 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1419 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1420} 1421 1422/* 1423 * Inject event to virtual cpu. 1424 */ 1425static void 1426svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1427{ 1428 struct vmcb_ctrl *ctrl; 1429 struct vmcb_state *state; 1430 uint8_t v_tpr; 1431 int vector, need_intr_window, pending_apic_vector; 1432 1433 state = svm_get_vmcb_state(sc, vcpu); 1434 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1435 1436 need_intr_window = 0; 1437 pending_apic_vector = 0; 1438 1439 /* 1440 * Inject pending events or exceptions for this vcpu. 1441 * 1442 * An event might be pending because the previous #VMEXIT happened 1443 * during event delivery (i.e. ctrl->exitintinfo). 1444 * 1445 * An event might also be pending because an exception was injected 1446 * by the hypervisor (e.g. #PF during instruction emulation). 1447 */ 1448 svm_inj_intinfo(sc, vcpu); 1449 1450 /* NMI event has priority over interrupts. */ 1451 if (vm_nmi_pending(sc->vm, vcpu)) { 1452 if (nmi_blocked(sc, vcpu)) { 1453 /* 1454 * Can't inject another NMI if the guest has not 1455 * yet executed an "iret" after the last NMI. 1456 */ 1457 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1458 "to NMI-blocking"); 1459 } else if (ctrl->intr_shadow) { 1460 /* 1461 * Can't inject an NMI if the vcpu is in an intr_shadow. 1462 */ 1463 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1464 "interrupt shadow"); 1465 need_intr_window = 1; 1466 goto done; 1467 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1468 /* 1469 * If there is already an exception/interrupt pending 1470 * then defer the NMI until after that. 1471 */ 1472 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1473 "eventinj %#lx", ctrl->eventinj); 1474 1475 /* 1476 * Use self-IPI to trigger a VM-exit as soon as 1477 * possible after the event injection is completed. 1478 * 1479 * This works only if the external interrupt exiting 1480 * is at a lower priority than the event injection. 1481 * 1482 * Although not explicitly specified in APMv2 the 1483 * relative priorities were verified empirically. 1484 */ 1485 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1486 } else { 1487 vm_nmi_clear(sc->vm, vcpu); 1488 1489 /* Inject NMI, vector number is not used */ 1490 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1491 IDT_NMI, 0, false); 1492 1493 /* virtual NMI blocking is now in effect */ 1494 enable_nmi_blocking(sc, vcpu); 1495 1496 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1497 } 1498 } 1499 1500 if (!vm_extint_pending(sc->vm, vcpu)) { 1501 /* 1502 * APIC interrupts are delivered using the V_IRQ offload. 1503 * 1504 * The primary benefit is that the hypervisor doesn't need to 1505 * deal with the various conditions that inhibit interrupts. 1506 * It also means that TPR changes via CR8 will be handled 1507 * without any hypervisor involvement. 1508 * 1509 * Note that the APIC vector must remain pending in the vIRR 1510 * until it is confirmed that it was delivered to the guest. 1511 * This can be confirmed based on the value of V_IRQ at the 1512 * next #VMEXIT (1 = pending, 0 = delivered). 1513 * 1514 * Also note that it is possible that another higher priority 1515 * vector can become pending before this vector is delivered 1516 * to the guest. This is alright because vcpu_notify_event() 1517 * will send an IPI and force the vcpu to trap back into the 1518 * hypervisor. The higher priority vector will be injected on 1519 * the next VMRUN. 1520 */ 1521 if (vlapic_pending_intr(vlapic, &vector)) { 1522 KASSERT(vector >= 16 && vector <= 255, 1523 ("invalid vector %d from local APIC", vector)); 1524 pending_apic_vector = vector; 1525 } 1526 goto done; 1527 } 1528 1529 /* Ask the legacy pic for a vector to inject */ 1530 vatpic_pending_intr(sc->vm, &vector); 1531 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1532 vector)); 1533 1534 /* 1535 * If the guest has disabled interrupts or is in an interrupt shadow 1536 * then we cannot inject the pending interrupt. 1537 */ 1538 if ((state->rflags & PSL_I) == 0) { 1539 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1540 "rflags %#lx", vector, state->rflags); 1541 need_intr_window = 1; 1542 goto done; 1543 } 1544 1545 if (ctrl->intr_shadow) { 1546 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1547 "interrupt shadow", vector); 1548 need_intr_window = 1; 1549 goto done; 1550 } 1551 1552 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1553 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1554 "eventinj %#lx", vector, ctrl->eventinj); 1555 need_intr_window = 1; 1556 goto done; 1557 } 1558 1559 /* 1560 * Legacy PIC interrupts are delivered via the event injection 1561 * mechanism. 1562 */ 1563 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1564 1565 vm_extint_clear(sc->vm, vcpu); 1566 vatpic_intr_accepted(sc->vm, vector); 1567 1568 /* 1569 * Force a VM-exit as soon as the vcpu is ready to accept another 1570 * interrupt. This is done because the PIC might have another vector 1571 * that it wants to inject. Also, if the APIC has a pending interrupt 1572 * that was preempted by the ExtInt then it allows us to inject the 1573 * APIC vector as soon as possible. 1574 */ 1575 need_intr_window = 1; 1576done: 1577 /* 1578 * The guest can modify the TPR by writing to %CR8. In guest mode 1579 * the processor reflects this write to V_TPR without hypervisor 1580 * intervention. 1581 * 1582 * The guest can also modify the TPR by writing to it via the memory 1583 * mapped APIC page. In this case, the write will be emulated by the 1584 * hypervisor. For this reason V_TPR must be updated before every 1585 * VMRUN. 1586 */ 1587 v_tpr = vlapic_get_cr8(vlapic); 1588 KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1589 if (ctrl->v_tpr != v_tpr) { 1590 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1591 ctrl->v_tpr, v_tpr); 1592 ctrl->v_tpr = v_tpr; 1593 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1594 } 1595 1596 if (pending_apic_vector) { 1597 /* 1598 * If an APIC vector is being injected then interrupt window 1599 * exiting is not possible on this VMRUN. 1600 */ 1601 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1602 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1603 pending_apic_vector); 1604 1605 ctrl->v_irq = 1; 1606 ctrl->v_ign_tpr = 0; 1607 ctrl->v_intr_vector = pending_apic_vector; 1608 ctrl->v_intr_prio = pending_apic_vector >> 4; 1609 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1610 } else if (need_intr_window) { 1611 /* 1612 * We use V_IRQ in conjunction with the VINTR intercept to 1613 * trap into the hypervisor as soon as a virtual interrupt 1614 * can be delivered. 1615 * 1616 * Since injected events are not subject to intercept checks 1617 * we need to ensure that the V_IRQ is not actually going to 1618 * be delivered on VM entry. The KASSERT below enforces this. 1619 */ 1620 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1621 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1622 ("Bogus intr_window_exiting: eventinj (%#lx), " 1623 "intr_shadow (%u), rflags (%#lx)", 1624 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1625 enable_intr_window_exiting(sc, vcpu); 1626 } else { 1627 disable_intr_window_exiting(sc, vcpu); 1628 } 1629} 1630 1631static __inline void 1632restore_host_tss(void) 1633{ 1634 struct system_segment_descriptor *tss_sd; 1635 1636 /* 1637 * The TSS descriptor was in use prior to launching the guest so it 1638 * has been marked busy. 1639 * 1640 * 'ltr' requires the descriptor to be marked available so change the 1641 * type to "64-bit available TSS". 1642 */ 1643 tss_sd = PCPU_GET(tss); 1644 tss_sd->sd_type = SDT_SYSTSS; 1645 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1646} 1647 1648static void 1649check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1650{ 1651 struct svm_vcpu *vcpustate; 1652 struct vmcb_ctrl *ctrl; 1653 long eptgen; 1654 bool alloc_asid; 1655 1656 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1657 "active on cpu %u", __func__, thiscpu)); 1658 1659 vcpustate = svm_get_vcpu(sc, vcpuid); 1660 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1661 1662 /* 1663 * The TLB entries associated with the vcpu's ASID are not valid 1664 * if either of the following conditions is true: 1665 * 1666 * 1. The vcpu's ASID generation is different than the host cpu's 1667 * ASID generation. This happens when the vcpu migrates to a new 1668 * host cpu. It can also happen when the number of vcpus executing 1669 * on a host cpu is greater than the number of ASIDs available. 1670 * 1671 * 2. The pmap generation number is different than the value cached in 1672 * the 'vcpustate'. This happens when the host invalidates pages 1673 * belonging to the guest. 1674 * 1675 * asidgen eptgen Action 1676 * mismatch mismatch 1677 * 0 0 (a) 1678 * 0 1 (b1) or (b2) 1679 * 1 0 (c) 1680 * 1 1 (d) 1681 * 1682 * (a) There is no mismatch in eptgen or ASID generation and therefore 1683 * no further action is needed. 1684 * 1685 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1686 * retained and the TLB entries associated with this ASID 1687 * are flushed by VMRUN. 1688 * 1689 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1690 * allocated. 1691 * 1692 * (c) A new ASID is allocated. 1693 * 1694 * (d) A new ASID is allocated. 1695 */ 1696 1697 alloc_asid = false; 1698 eptgen = pmap->pm_eptgen; 1699 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1700 1701 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1702 alloc_asid = true; /* (c) and (d) */ 1703 } else if (vcpustate->eptgen != eptgen) { 1704 if (flush_by_asid()) 1705 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1706 else 1707 alloc_asid = true; /* (b2) */ 1708 } else { 1709 /* 1710 * This is the common case (a). 1711 */ 1712 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1713 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1714 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1715 } 1716 1717 if (alloc_asid) { 1718 if (++asid[thiscpu].num >= nasid) { 1719 asid[thiscpu].num = 1; 1720 if (++asid[thiscpu].gen == 0) 1721 asid[thiscpu].gen = 1; 1722 /* 1723 * If this cpu does not support "flush-by-asid" 1724 * then flush the entire TLB on a generation 1725 * bump. Subsequent ASID allocation in this 1726 * generation can be done without a TLB flush. 1727 */ 1728 if (!flush_by_asid()) 1729 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1730 } 1731 vcpustate->asid.gen = asid[thiscpu].gen; 1732 vcpustate->asid.num = asid[thiscpu].num; 1733 1734 ctrl->asid = vcpustate->asid.num; 1735 vcpu_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1736 /* 1737 * If this cpu supports "flush-by-asid" then the TLB 1738 * was not flushed after the generation bump. The TLB 1739 * is flushed selectively after every new ASID allocation. 1740 */ 1741 if (flush_by_asid()) 1742 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1743 } 1744 vcpustate->eptgen = eptgen; 1745 1746 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1747 KASSERT(ctrl->asid == vcpustate->asid.num, 1748 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1749} 1750 1751/* 1752 * Start vcpu with specified RIP. 1753 */ 1754static int 1755svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1756 void *rend_cookie, void *suspended_cookie) 1757{ 1758 struct svm_regctx *hctx, *gctx; 1759 struct svm_softc *svm_sc; 1760 struct svm_vcpu *vcpustate; 1761 struct vmcb_state *state; 1762 struct vmcb_ctrl *ctrl; 1763 struct vm_exit *vmexit; 1764 struct vlapic *vlapic; 1765 struct vm *vm; 1766 uint64_t vmcb_pa; 1767 u_int thiscpu; 1768 int handled; 1769 1770 svm_sc = arg; 1771 vm = svm_sc->vm; 1772 1773 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1774 state = svm_get_vmcb_state(svm_sc, vcpu); 1775 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1776 vmexit = vm_exitinfo(vm, vcpu); 1777 vlapic = vm_lapic(vm, vcpu); 1778 1779 /* 1780 * Stash 'curcpu' on the stack as 'thiscpu'. 1781 * 1782 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1783 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1784 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1785 */ 1786 thiscpu = curcpu; 1787 1788 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1789 hctx = &host_ctx[thiscpu]; 1790 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1791 1792 if (vcpustate->lastcpu != thiscpu) { 1793 /* 1794 * Force new ASID allocation by invalidating the generation. 1795 */ 1796 vcpustate->asid.gen = 0; 1797 1798 /* 1799 * Invalidate the VMCB state cache by marking all fields dirty. 1800 */ 1801 vcpu_set_dirty(svm_sc, vcpu, 0xffffffff); 1802 1803 /* 1804 * XXX 1805 * Setting 'vcpustate->lastcpu' here is bit premature because 1806 * we may return from this function without actually executing 1807 * the VMRUN instruction. This could happen if a rendezvous 1808 * or an AST is pending on the first time through the loop. 1809 * 1810 * This works for now but any new side-effects of vcpu 1811 * migration should take this case into account. 1812 */ 1813 vcpustate->lastcpu = thiscpu; 1814 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1815 } 1816 1817 /* Update Guest RIP */ 1818 state->rip = rip; 1819 1820 do { 1821 /* 1822 * Disable global interrupts to guarantee atomicity during 1823 * loading of guest state. This includes not only the state 1824 * loaded by the "vmrun" instruction but also software state 1825 * maintained by the hypervisor: suspended and rendezvous 1826 * state, NPT generation number, vlapic interrupts etc. 1827 */ 1828 disable_gintr(); 1829 1830 if (vcpu_suspended(suspended_cookie)) { 1831 enable_gintr(); 1832 vm_exit_suspended(vm, vcpu, state->rip); 1833 break; 1834 } 1835 1836 if (vcpu_rendezvous_pending(rend_cookie)) { 1837 enable_gintr(); 1838 vm_exit_rendezvous(vm, vcpu, state->rip); 1839 break; 1840 } 1841 1842 /* We are asked to give the cpu by scheduler. */ 1843 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1844 enable_gintr(); 1845 vm_exit_astpending(vm, vcpu, state->rip); 1846 break; 1847 } 1848 1849 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1850 1851 /* Activate the nested pmap on 'thiscpu' */ 1852 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1853 1854 /* 1855 * Check the pmap generation and the ASID generation to 1856 * ensure that the vcpu does not use stale TLB mappings. 1857 */ 1858 check_asid(svm_sc, vcpu, pmap, thiscpu); 1859 1860 ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty; 1861 vcpustate->dirty = 0; 1862 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1863 1864 /* Launch Virtual Machine. */ 1865 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1866 svm_launch(vmcb_pa, gctx, hctx); 1867 1868 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1869 1870 /* 1871 * Restore MSR_GSBASE to point to the pcpu data area. 1872 * 1873 * Note that accesses done via PCPU_GET/PCPU_SET will work 1874 * only after MSR_GSBASE is restored. 1875 * 1876 * Also note that we don't bother restoring MSR_KGSBASE 1877 * since it is not used in the kernel and will be restored 1878 * when the VMRUN ioctl returns to userspace. 1879 */ 1880 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1881 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1882 thiscpu, curcpu)); 1883 1884 /* 1885 * The host GDTR and IDTR is saved by VMRUN and restored 1886 * automatically on #VMEXIT. However, the host TSS needs 1887 * to be restored explicitly. 1888 */ 1889 restore_host_tss(); 1890 1891 /* #VMEXIT disables interrupts so re-enable them here. */ 1892 enable_gintr(); 1893 1894 /* Handle #VMEXIT and if required return to user space. */ 1895 handled = svm_vmexit(svm_sc, vcpu, vmexit); 1896 } while (handled); 1897 1898 return (0); 1899} 1900 1901/* 1902 * Cleanup for virtual machine. 1903 */ 1904static void 1905svm_vmcleanup(void *arg) 1906{ 1907 struct svm_softc *svm_sc; 1908 1909 svm_sc = arg; 1910 1911 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1912 1913 free(svm_sc, M_SVM); 1914} 1915 1916/* 1917 * Return pointer to hypervisor saved register state. 1918 */ 1919static register_t * 1920swctx_regptr(struct svm_regctx *regctx, int reg) 1921{ 1922 1923 switch (reg) { 1924 case VM_REG_GUEST_RBX: 1925 return (®ctx->sctx_rbx); 1926 case VM_REG_GUEST_RCX: 1927 return (®ctx->sctx_rcx); 1928 case VM_REG_GUEST_RDX: 1929 return (®ctx->e.g.sctx_rdx); 1930 case VM_REG_GUEST_RDI: 1931 return (®ctx->e.g.sctx_rdi); 1932 case VM_REG_GUEST_RSI: 1933 return (®ctx->e.g.sctx_rsi); 1934 case VM_REG_GUEST_RBP: 1935 return (®ctx->sctx_rbp); 1936 case VM_REG_GUEST_R8: 1937 return (®ctx->sctx_r8); 1938 case VM_REG_GUEST_R9: 1939 return (®ctx->sctx_r9); 1940 case VM_REG_GUEST_R10: 1941 return (®ctx->sctx_r10); 1942 case VM_REG_GUEST_R11: 1943 return (®ctx->sctx_r11); 1944 case VM_REG_GUEST_R12: 1945 return (®ctx->sctx_r12); 1946 case VM_REG_GUEST_R13: 1947 return (®ctx->sctx_r13); 1948 case VM_REG_GUEST_R14: 1949 return (®ctx->sctx_r14); 1950 case VM_REG_GUEST_R15: 1951 return (®ctx->sctx_r15); 1952 default: 1953 ERR("Unknown register requested, reg=%d.\n", reg); 1954 break; 1955 } 1956 1957 return (NULL); 1958} 1959 1960/* 1961 * Interface to read guest registers. 1962 * This can be SVM h/w saved or hypervisor saved register. 1963 */ 1964static int 1965svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1966{ 1967 struct svm_softc *svm_sc; 1968 struct vmcb *vmcb; 1969 register_t *reg; 1970 1971 svm_sc = arg; 1972 vmcb = svm_get_vmcb(svm_sc, vcpu); 1973 1974 if (vmcb_read(vmcb, ident, val) == 0) { 1975 return (0); 1976 } 1977 1978 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1979 1980 if (reg != NULL) { 1981 *val = *reg; 1982 return (0); 1983 } 1984 1985 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1986 return (EINVAL); 1987} 1988 1989/* 1990 * Interface to write to guest registers. 1991 * This can be SVM h/w saved or hypervisor saved register. 1992 */ 1993static int 1994svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1995{ 1996 struct svm_softc *svm_sc; 1997 struct vmcb *vmcb; 1998 register_t *reg; 1999 2000 svm_sc = arg; 2001 vmcb = svm_get_vmcb(svm_sc, vcpu); 2002 if (vmcb_write(vmcb, ident, val) == 0) { 2003 return (0); 2004 } 2005 2006 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2007 2008 if (reg != NULL) { 2009 *reg = val; 2010 return (0); 2011 } 2012 2013 /* 2014 * XXX deal with CR3 and invalidate TLB entries tagged with the 2015 * vcpu's ASID. This needs to be treated differently depending on 2016 * whether 'running' is true/false. 2017 */ 2018 2019 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2020 return (EINVAL); 2021} 2022 2023 2024/* 2025 * Inteface to set various descriptors. 2026 */ 2027static int 2028svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 2029{ 2030 struct svm_softc *svm_sc; 2031 struct vmcb *vmcb; 2032 struct vmcb_segment *seg; 2033 uint16_t attrib; 2034 2035 svm_sc = arg; 2036 vmcb = svm_get_vmcb(svm_sc, vcpu); 2037 2038 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 2039 2040 seg = vmcb_seg(vmcb, type); 2041 if (seg == NULL) { 2042 ERR("SVM_ERR:Unsupported segment type%d\n", type); 2043 return (EINVAL); 2044 } 2045 2046 /* Map seg_desc access to VMCB attribute format.*/ 2047 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 2048 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 2049 type, desc->access, desc->limit); 2050 seg->attrib = attrib; 2051 seg->base = desc->base; 2052 seg->limit = desc->limit; 2053 2054 return (0); 2055} 2056 2057/* 2058 * Interface to get guest descriptor. 2059 */ 2060static int 2061svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 2062{ 2063 struct svm_softc *svm_sc; 2064 struct vmcb_segment *seg; 2065 2066 svm_sc = arg; 2067 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 2068 2069 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 2070 if (!seg) { 2071 ERR("SVM_ERR:Unsupported segment type%d\n", type); 2072 return (EINVAL); 2073 } 2074 2075 /* Map seg_desc access to VMCB attribute format.*/ 2076 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 2077 desc->base = seg->base; 2078 desc->limit = seg->limit; 2079 2080 /* 2081 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 2082 * loaded with a NULL segment selector. The 'desc->access' field is 2083 * interpreted in the VT-x format by the processor-independent code. 2084 * 2085 * SVM uses the 'P' bit to convey the same information so convert it 2086 * into the VT-x format. For more details refer to section 2087 * "Segment State in the VMCB" in APMv2. 2088 */ 2089 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 2090 desc->access |= 0x80; /* CS and TS always present */ 2091 2092 if (!(desc->access & 0x80)) 2093 desc->access |= 0x10000; /* Unusable segment */ 2094 2095 return (0); 2096} 2097 2098static int 2099svm_setcap(void *arg, int vcpu, int type, int val) 2100{ 2101 struct svm_softc *sc; 2102 int error; 2103 2104 sc = arg; 2105 error = 0; 2106 switch (type) { 2107 case VM_CAP_HALT_EXIT: 2108 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2109 VMCB_INTCPT_HLT, val); 2110 break; 2111 case VM_CAP_PAUSE_EXIT: 2112 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2113 VMCB_INTCPT_PAUSE, val); 2114 break; 2115 case VM_CAP_UNRESTRICTED_GUEST: 2116 /* Unrestricted guest execution cannot be disabled in SVM */ 2117 if (val == 0) 2118 error = EINVAL; 2119 break; 2120 default: 2121 error = ENOENT; 2122 break; 2123 } 2124 return (error); 2125} 2126 2127static int 2128svm_getcap(void *arg, int vcpu, int type, int *retval) 2129{ 2130 struct svm_softc *sc; 2131 int error; 2132 2133 sc = arg; 2134 error = 0; 2135 2136 switch (type) { 2137 case VM_CAP_HALT_EXIT: 2138 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2139 VMCB_INTCPT_HLT); 2140 break; 2141 case VM_CAP_PAUSE_EXIT: 2142 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2143 VMCB_INTCPT_PAUSE); 2144 break; 2145 case VM_CAP_UNRESTRICTED_GUEST: 2146 *retval = 1; /* unrestricted guest is always enabled */ 2147 break; 2148 default: 2149 error = ENOENT; 2150 break; 2151 } 2152 return (error); 2153} 2154 2155static struct vlapic * 2156svm_vlapic_init(void *arg, int vcpuid) 2157{ 2158 struct svm_softc *svm_sc; 2159 struct vlapic *vlapic; 2160 2161 svm_sc = arg; 2162 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2163 vlapic->vm = svm_sc->vm; 2164 vlapic->vcpuid = vcpuid; 2165 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2166 2167 vlapic_init(vlapic); 2168 2169 return (vlapic); 2170} 2171 2172static void 2173svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2174{ 2175 2176 vlapic_cleanup(vlapic); 2177 free(vlapic, M_SVM_VLAPIC); 2178} 2179 2180struct vmm_ops vmm_ops_amd = { 2181 svm_init, 2182 svm_cleanup, 2183 svm_restore, 2184 svm_vminit, 2185 svm_vmrun, 2186 svm_vmcleanup, 2187 svm_getreg, 2188 svm_setreg, 2189 svm_getdesc, 2190 svm_setdesc, 2191 svm_getcap, 2192 svm_setcap, 2193 svm_npt_alloc, 2194 svm_npt_free, 2195 svm_vlapic_init, 2196 svm_vlapic_cleanup 2197}; 2198