svm.c revision 272929
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 272929 2014-10-11 04:41:21Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/cpufunc.h> 43#include <machine/psl.h> 44#include <machine/pmap.h> 45#include <machine/md_var.h> 46#include <machine/specialreg.h> 47#include <machine/smp.h> 48#include <machine/vmm.h> 49#include <machine/vmm_instruction_emul.h> 50 51#include "vmm_lapic.h" 52#include "vmm_stat.h" 53#include "vmm_ktr.h" 54#include "vmm_ioport.h" 55#include "vatpic.h" 56#include "vlapic.h" 57#include "vlapic_priv.h" 58 59#include "x86.h" 60#include "vmcb.h" 61#include "svm.h" 62#include "svm_softc.h" 63#include "svm_msr.h" 64#include "npt.h" 65 66SYSCTL_DECL(_hw_vmm); 67SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 68 69/* 70 * SVM CPUID function 0x8000_000A, edx bit decoding. 71 */ 72#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 73#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 74#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 75#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 76#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 77#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 78#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 79#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 80#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 81#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 82 83#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 84 VMCB_CACHE_IOPM | \ 85 VMCB_CACHE_I | \ 86 VMCB_CACHE_TPR | \ 87 VMCB_CACHE_CR2 | \ 88 VMCB_CACHE_CR | \ 89 VMCB_CACHE_DT | \ 90 VMCB_CACHE_SEG | \ 91 VMCB_CACHE_NP) 92 93static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 94SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 95 0, NULL); 96 97static MALLOC_DEFINE(M_SVM, "svm", "svm"); 98static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 99 100/* Per-CPU context area. */ 101extern struct pcpu __pcpu[]; 102 103static uint32_t svm_feature; /* AMD SVM features. */ 104SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 105 "SVM features advertised by CPUID.8000000AH:EDX"); 106 107static int disable_npf_assist; 108SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 109 &disable_npf_assist, 0, NULL); 110 111/* Maximum ASIDs supported by the processor */ 112static uint32_t nasid; 113SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, 114 "Number of ASIDs supported by this processor"); 115 116/* Current ASID generation for each host cpu */ 117static struct asid asid[MAXCPU]; 118 119/* 120 * SVM host state saved area of size 4KB for each core. 121 */ 122static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 123 124static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 125static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 126static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 127 128static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 129 130static __inline int 131flush_by_asid(void) 132{ 133 134 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 135} 136 137static __inline int 138decode_assist(void) 139{ 140 141 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 142} 143 144static void 145svm_disable(void *arg __unused) 146{ 147 uint64_t efer; 148 149 efer = rdmsr(MSR_EFER); 150 efer &= ~EFER_SVM; 151 wrmsr(MSR_EFER, efer); 152} 153 154/* 155 * Disable SVM on all CPUs. 156 */ 157static int 158svm_cleanup(void) 159{ 160 161 smp_rendezvous(NULL, svm_disable, NULL, NULL); 162 return (0); 163} 164 165/* 166 * Verify that all the features required by bhyve are available. 167 */ 168static int 169check_svm_features(void) 170{ 171 u_int regs[4]; 172 173 /* CPUID Fn8000_000A is for SVM */ 174 do_cpuid(0x8000000A, regs); 175 svm_feature = regs[3]; 176 177 printf("SVM: Revision %d\n", regs[0] & 0xFF); 178 printf("SVM: NumASID %u\n", regs[1]); 179 180 nasid = regs[1]; 181 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 182 183 printf("SVM: Features 0x%b\n", svm_feature, 184 "\020" 185 "\001NP" /* Nested paging */ 186 "\002LbrVirt" /* LBR virtualization */ 187 "\003SVML" /* SVM lock */ 188 "\004NRIPS" /* NRIP save */ 189 "\005TscRateMsr" /* MSR based TSC rate control */ 190 "\006VmcbClean" /* VMCB clean bits */ 191 "\007FlushByAsid" /* Flush by ASID */ 192 "\010DecodeAssist" /* Decode assist */ 193 "\011<b8>" 194 "\012<b9>" 195 "\013PauseFilter" 196 "\014<b11>" 197 "\015PauseFilterThreshold" 198 "\016AVIC" 199 ); 200 201 /* bhyve requires the Nested Paging feature */ 202 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 203 printf("SVM: Nested Paging feature not available.\n"); 204 return (ENXIO); 205 } 206 207 /* bhyve requires the NRIP Save feature */ 208 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 209 printf("SVM: NRIP Save feature not available.\n"); 210 return (ENXIO); 211 } 212 213 return (0); 214} 215 216static void 217svm_enable(void *arg __unused) 218{ 219 uint64_t efer; 220 221 efer = rdmsr(MSR_EFER); 222 efer |= EFER_SVM; 223 wrmsr(MSR_EFER, efer); 224 225 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 226} 227 228/* 229 * Return 1 if SVM is enabled on this processor and 0 otherwise. 230 */ 231static int 232svm_available(void) 233{ 234 uint64_t msr; 235 236 /* Section 15.4 Enabling SVM from APM2. */ 237 if ((amd_feature2 & AMDID2_SVM) == 0) { 238 printf("SVM: not available.\n"); 239 return (0); 240 } 241 242 msr = rdmsr(MSR_VM_CR); 243 if ((msr & VM_CR_SVMDIS) != 0) { 244 printf("SVM: disabled by BIOS.\n"); 245 return (0); 246 } 247 248 return (1); 249} 250 251static int 252svm_init(int ipinum) 253{ 254 int error, cpu; 255 256 if (!svm_available()) 257 return (ENXIO); 258 259 error = check_svm_features(); 260 if (error) 261 return (error); 262 263 vmcb_clean &= VMCB_CACHE_DEFAULT; 264 265 for (cpu = 0; cpu < MAXCPU; cpu++) { 266 /* 267 * Initialize the host ASIDs to their "highest" valid values. 268 * 269 * The next ASID allocation will rollover both 'gen' and 'num' 270 * and start off the sequence at {1,1}. 271 */ 272 asid[cpu].gen = ~0UL; 273 asid[cpu].num = nasid - 1; 274 } 275 276 svm_msr_init(); 277 svm_npt_init(ipinum); 278 279 /* Enable SVM on all CPUs */ 280 smp_rendezvous(NULL, svm_enable, NULL, NULL); 281 282 return (0); 283} 284 285static void 286svm_restore(void) 287{ 288 289 svm_enable(NULL); 290} 291 292/* Pentium compatible MSRs */ 293#define MSR_PENTIUM_START 0 294#define MSR_PENTIUM_END 0x1FFF 295/* AMD 6th generation and Intel compatible MSRs */ 296#define MSR_AMD6TH_START 0xC0000000UL 297#define MSR_AMD6TH_END 0xC0001FFFUL 298/* AMD 7th and 8th generation compatible MSRs */ 299#define MSR_AMD7TH_START 0xC0010000UL 300#define MSR_AMD7TH_END 0xC0011FFFUL 301 302/* 303 * Get the index and bit position for a MSR in permission bitmap. 304 * Two bits are used for each MSR: lower bit for read and higher bit for write. 305 */ 306static int 307svm_msr_index(uint64_t msr, int *index, int *bit) 308{ 309 uint32_t base, off; 310 311 *index = -1; 312 *bit = (msr % 4) * 2; 313 base = 0; 314 315 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 316 *index = msr / 4; 317 return (0); 318 } 319 320 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 321 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 322 off = (msr - MSR_AMD6TH_START); 323 *index = (off + base) / 4; 324 return (0); 325 } 326 327 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 328 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 329 off = (msr - MSR_AMD7TH_START); 330 *index = (off + base) / 4; 331 return (0); 332 } 333 334 return (EINVAL); 335} 336 337/* 338 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 339 */ 340static void 341svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 342{ 343 int index, bit, error; 344 345 error = svm_msr_index(msr, &index, &bit); 346 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 347 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 348 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 349 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 350 "msr %#lx", __func__, bit, msr)); 351 352 if (read) 353 perm_bitmap[index] &= ~(1UL << bit); 354 355 if (write) 356 perm_bitmap[index] &= ~(2UL << bit); 357} 358 359static void 360svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 361{ 362 363 svm_msr_perm(perm_bitmap, msr, true, true); 364} 365 366static void 367svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 368{ 369 370 svm_msr_perm(perm_bitmap, msr, true, false); 371} 372 373static __inline int 374svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 375{ 376 struct vmcb_ctrl *ctrl; 377 378 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 379 380 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 381 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 382} 383 384static __inline void 385svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 386 int enabled) 387{ 388 struct vmcb_ctrl *ctrl; 389 uint32_t oldval; 390 391 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 392 393 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 394 oldval = ctrl->intercept[idx]; 395 396 if (enabled) 397 ctrl->intercept[idx] |= bitmask; 398 else 399 ctrl->intercept[idx] &= ~bitmask; 400 401 if (ctrl->intercept[idx] != oldval) { 402 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 403 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 404 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 405 } 406} 407 408static __inline void 409svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 410{ 411 412 svm_set_intercept(sc, vcpu, off, bitmask, 0); 413} 414 415static __inline void 416svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 417{ 418 419 svm_set_intercept(sc, vcpu, off, bitmask, 1); 420} 421 422static void 423vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 424 uint64_t msrpm_base_pa, uint64_t np_pml4) 425{ 426 struct vmcb_ctrl *ctrl; 427 struct vmcb_state *state; 428 uint32_t mask; 429 int n; 430 431 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 432 state = svm_get_vmcb_state(sc, vcpu); 433 434 ctrl->iopm_base_pa = iopm_base_pa; 435 ctrl->msrpm_base_pa = msrpm_base_pa; 436 437 /* Enable nested paging */ 438 ctrl->np_enable = 1; 439 ctrl->n_cr3 = np_pml4; 440 441 /* 442 * Intercept accesses to the control registers that are not shadowed 443 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 444 */ 445 for (n = 0; n < 16; n++) { 446 mask = (BIT(n) << 16) | BIT(n); 447 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 448 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 449 else 450 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 451 } 452 453 /* Intercept Machine Check exceptions. */ 454 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 455 456 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 457 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 458 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 459 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 466 VMCB_INTCPT_FERR_FREEZE); 467 468 /* 469 * From section "Canonicalization and Consistency Checks" in APMv2 470 * the VMRUN intercept bit must be set to pass the consistency check. 471 */ 472 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 473 474 /* 475 * The ASID will be set to a non-zero value just before VMRUN. 476 */ 477 ctrl->asid = 0; 478 479 /* 480 * Section 15.21.1, Interrupt Masking in EFLAGS 481 * Section 15.21.2, Virtualizing APIC.TPR 482 * 483 * This must be set for %rflag and %cr8 isolation of guest and host. 484 */ 485 ctrl->v_intr_masking = 1; 486 487 /* Enable Last Branch Record aka LBR for debugging */ 488 ctrl->lbr_virt_en = 1; 489 state->dbgctl = BIT(0); 490 491 /* EFER_SVM must always be set when the guest is executing */ 492 state->efer = EFER_SVM; 493 494 /* Set up the PAT to power-on state */ 495 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 496 PAT_VALUE(1, PAT_WRITE_THROUGH) | 497 PAT_VALUE(2, PAT_UNCACHED) | 498 PAT_VALUE(3, PAT_UNCACHEABLE) | 499 PAT_VALUE(4, PAT_WRITE_BACK) | 500 PAT_VALUE(5, PAT_WRITE_THROUGH) | 501 PAT_VALUE(6, PAT_UNCACHED) | 502 PAT_VALUE(7, PAT_UNCACHEABLE); 503} 504 505/* 506 * Initialize a virtual machine. 507 */ 508static void * 509svm_vminit(struct vm *vm, pmap_t pmap) 510{ 511 struct svm_softc *svm_sc; 512 struct svm_vcpu *vcpu; 513 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 514 int i; 515 516 svm_sc = malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO); 517 svm_sc->vm = vm; 518 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 519 520 /* 521 * Intercept read and write accesses to all MSRs. 522 */ 523 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 524 525 /* 526 * Access to the following MSRs is redirected to the VMCB when the 527 * guest is executing. Therefore it is safe to allow the guest to 528 * read/write these MSRs directly without hypervisor involvement. 529 */ 530 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 531 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 532 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 533 534 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 535 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 536 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 537 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 538 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 539 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 540 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 541 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 542 543 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 544 545 /* 546 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 547 */ 548 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 549 550 /* Intercept access to all I/O ports. */ 551 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 552 553 iopm_pa = vtophys(svm_sc->iopm_bitmap); 554 msrpm_pa = vtophys(svm_sc->msr_bitmap); 555 pml4_pa = svm_sc->nptp; 556 for (i = 0; i < VM_MAXCPU; i++) { 557 vcpu = svm_get_vcpu(svm_sc, i); 558 vcpu->lastcpu = NOCPU; 559 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 560 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 561 svm_msr_guest_init(svm_sc, i); 562 } 563 return (svm_sc); 564} 565 566static int 567svm_cpl(struct vmcb_state *state) 568{ 569 570 /* 571 * From APMv2: 572 * "Retrieve the CPL from the CPL field in the VMCB, not 573 * from any segment DPL" 574 */ 575 return (state->cpl); 576} 577 578static enum vm_cpu_mode 579svm_vcpu_mode(struct vmcb *vmcb) 580{ 581 struct vmcb_segment seg; 582 struct vmcb_state *state; 583 int error; 584 585 state = &vmcb->state; 586 587 if (state->efer & EFER_LMA) { 588 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 589 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 590 error)); 591 592 /* 593 * Section 4.8.1 for APM2, check if Code Segment has 594 * Long attribute set in descriptor. 595 */ 596 if (seg.attrib & VMCB_CS_ATTRIB_L) 597 return (CPU_MODE_64BIT); 598 else 599 return (CPU_MODE_COMPATIBILITY); 600 } else if (state->cr0 & CR0_PE) { 601 return (CPU_MODE_PROTECTED); 602 } else { 603 return (CPU_MODE_REAL); 604 } 605} 606 607static enum vm_paging_mode 608svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 609{ 610 611 if ((cr0 & CR0_PG) == 0) 612 return (PAGING_MODE_FLAT); 613 if ((cr4 & CR4_PAE) == 0) 614 return (PAGING_MODE_32); 615 if (efer & EFER_LME) 616 return (PAGING_MODE_64); 617 else 618 return (PAGING_MODE_PAE); 619} 620 621/* 622 * ins/outs utility routines 623 */ 624static uint64_t 625svm_inout_str_index(struct svm_regctx *regs, int in) 626{ 627 uint64_t val; 628 629 val = in ? regs->sctx_rdi : regs->sctx_rsi; 630 631 return (val); 632} 633 634static uint64_t 635svm_inout_str_count(struct svm_regctx *regs, int rep) 636{ 637 uint64_t val; 638 639 val = rep ? regs->sctx_rcx : 1; 640 641 return (val); 642} 643 644static void 645svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 646 int in, struct vm_inout_str *vis) 647{ 648 int error, s; 649 650 if (in) { 651 vis->seg_name = VM_REG_GUEST_ES; 652 } else { 653 /* The segment field has standard encoding */ 654 s = (info1 >> 10) & 0x7; 655 vis->seg_name = vm_segment_name(s); 656 } 657 658 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 659 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 660} 661 662static int 663svm_inout_str_addrsize(uint64_t info1) 664{ 665 uint32_t size; 666 667 size = (info1 >> 7) & 0x7; 668 switch (size) { 669 case 1: 670 return (2); /* 16 bit */ 671 case 2: 672 return (4); /* 32 bit */ 673 case 4: 674 return (8); /* 64 bit */ 675 default: 676 panic("%s: invalid size encoding %d", __func__, size); 677 } 678} 679 680static void 681svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 682{ 683 struct vmcb_state *state; 684 685 state = &vmcb->state; 686 paging->cr3 = state->cr3; 687 paging->cpl = svm_cpl(state); 688 paging->cpu_mode = svm_vcpu_mode(vmcb); 689 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 690 state->efer); 691} 692 693#define UNHANDLED 0 694 695/* 696 * Handle guest I/O intercept. 697 */ 698static int 699svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 700{ 701 struct vmcb_ctrl *ctrl; 702 struct vmcb_state *state; 703 struct svm_regctx *regs; 704 struct vm_inout_str *vis; 705 uint64_t info1; 706 int inout_string; 707 708 state = svm_get_vmcb_state(svm_sc, vcpu); 709 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 710 regs = svm_get_guest_regctx(svm_sc, vcpu); 711 712 info1 = ctrl->exitinfo1; 713 inout_string = info1 & BIT(2) ? 1 : 0; 714 715 /* 716 * The effective segment number in EXITINFO1[12:10] is populated 717 * only if the processor has the DecodeAssist capability. 718 * 719 * XXX this is not specified explicitly in APMv2 but can be verified 720 * empirically. 721 */ 722 if (inout_string && !decode_assist()) 723 return (UNHANDLED); 724 725 vmexit->exitcode = VM_EXITCODE_INOUT; 726 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 727 vmexit->u.inout.string = inout_string; 728 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 729 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 730 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 731 vmexit->u.inout.eax = (uint32_t)(state->rax); 732 733 if (inout_string) { 734 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 735 vis = &vmexit->u.inout_str; 736 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 737 vis->rflags = state->rflags; 738 vis->cr0 = state->cr0; 739 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 740 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 741 vis->addrsize = svm_inout_str_addrsize(info1); 742 svm_inout_str_seginfo(svm_sc, vcpu, info1, 743 vmexit->u.inout.in, vis); 744 } 745 746 return (UNHANDLED); 747} 748 749static int 750npf_fault_type(uint64_t exitinfo1) 751{ 752 753 if (exitinfo1 & VMCB_NPF_INFO1_W) 754 return (VM_PROT_WRITE); 755 else 756 return (VM_PROT_READ); 757} 758 759static bool 760svm_npf_emul_fault(uint64_t exitinfo1) 761{ 762 763 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 764 return (false); 765 } 766 767 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 768 return (false); 769 } 770 771 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 772 return (false); 773 } 774 775 return (true); 776} 777 778static void 779svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 780{ 781 struct vm_guest_paging *paging; 782 struct vmcb_segment seg; 783 struct vmcb_ctrl *ctrl; 784 char *inst_bytes; 785 int error, inst_len; 786 787 ctrl = &vmcb->ctrl; 788 paging = &vmexit->u.inst_emul.paging; 789 790 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 791 vmexit->u.inst_emul.gpa = gpa; 792 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 793 svm_paging_info(vmcb, paging); 794 795 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 796 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 797 798 switch(paging->cpu_mode) { 799 case CPU_MODE_PROTECTED: 800 case CPU_MODE_COMPATIBILITY: 801 /* 802 * Section 4.8.1 of APM2, Default Operand Size or D bit. 803 */ 804 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 805 1 : 0; 806 break; 807 default: 808 vmexit->u.inst_emul.cs_d = 0; 809 break; 810 } 811 812 /* 813 * Copy the instruction bytes into 'vie' if available. 814 */ 815 if (decode_assist() && !disable_npf_assist) { 816 inst_len = ctrl->inst_len; 817 inst_bytes = ctrl->inst_bytes; 818 } else { 819 inst_len = 0; 820 inst_bytes = NULL; 821 } 822 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 823} 824 825#ifdef KTR 826static const char * 827intrtype_to_str(int intr_type) 828{ 829 switch (intr_type) { 830 case VMCB_EVENTINJ_TYPE_INTR: 831 return ("hwintr"); 832 case VMCB_EVENTINJ_TYPE_NMI: 833 return ("nmi"); 834 case VMCB_EVENTINJ_TYPE_INTn: 835 return ("swintr"); 836 case VMCB_EVENTINJ_TYPE_EXCEPTION: 837 return ("exception"); 838 default: 839 panic("%s: unknown intr_type %d", __func__, intr_type); 840 } 841} 842#endif 843 844/* 845 * Inject an event to vcpu as described in section 15.20, "Event injection". 846 */ 847static void 848svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 849 uint32_t error, bool ec_valid) 850{ 851 struct vmcb_ctrl *ctrl; 852 853 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 854 855 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 856 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 857 858 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 859 __func__, vector)); 860 861 switch (intr_type) { 862 case VMCB_EVENTINJ_TYPE_INTR: 863 case VMCB_EVENTINJ_TYPE_NMI: 864 case VMCB_EVENTINJ_TYPE_INTn: 865 break; 866 case VMCB_EVENTINJ_TYPE_EXCEPTION: 867 if (vector >= 0 && vector <= 31 && vector != 2) 868 break; 869 /* FALLTHROUGH */ 870 default: 871 panic("%s: invalid intr_type/vector: %d/%d", __func__, 872 intr_type, vector); 873 } 874 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 875 if (ec_valid) { 876 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 877 ctrl->eventinj |= (uint64_t)error << 32; 878 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 879 intrtype_to_str(intr_type), vector, error); 880 } else { 881 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 882 intrtype_to_str(intr_type), vector); 883 } 884} 885 886static void 887svm_update_virqinfo(struct svm_softc *sc, int vcpu) 888{ 889 struct vm *vm; 890 struct vlapic *vlapic; 891 struct vmcb_ctrl *ctrl; 892 int pending; 893 894 vm = sc->vm; 895 vlapic = vm_lapic(vm, vcpu); 896 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 897 898 /* Update %cr8 in the emulated vlapic */ 899 vlapic_set_cr8(vlapic, ctrl->v_tpr); 900 901 /* 902 * If V_IRQ indicates that the interrupt injection attempted on then 903 * last VMRUN was successful then update the vlapic accordingly. 904 */ 905 if (ctrl->v_intr_vector != 0) { 906 pending = ctrl->v_irq; 907 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 908 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 909 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 910 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 911 pending ? "pending" : "accepted"); 912 if (!pending) 913 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 914 } 915} 916 917static void 918svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 919{ 920 struct vmcb_ctrl *ctrl; 921 uint64_t intinfo; 922 923 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 924 intinfo = ctrl->exitintinfo; 925 if (!VMCB_EXITINTINFO_VALID(intinfo)) 926 return; 927 928 /* 929 * From APMv2, Section "Intercepts during IDT interrupt delivery" 930 * 931 * If a #VMEXIT happened during event delivery then record the event 932 * that was being delivered. 933 */ 934 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 935 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 936 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 937 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 938} 939 940static __inline int 941vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 942{ 943 944 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 945 VMCB_INTCPT_VINTR)); 946} 947 948static __inline void 949enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 950{ 951 struct vmcb_ctrl *ctrl; 952 953 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 954 955 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 956 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 957 KASSERT(vintr_intercept_enabled(sc, vcpu), 958 ("%s: vintr intercept should be enabled", __func__)); 959 return; 960 } 961 962 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 963 ctrl->v_irq = 1; 964 ctrl->v_ign_tpr = 1; 965 ctrl->v_intr_vector = 0; 966 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 967 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 968} 969 970static __inline void 971disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 972{ 973 struct vmcb_ctrl *ctrl; 974 975 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 976 977 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 978 KASSERT(!vintr_intercept_enabled(sc, vcpu), 979 ("%s: vintr intercept should be disabled", __func__)); 980 return; 981 } 982 983#ifdef KTR 984 if (ctrl->v_intr_vector == 0) 985 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 986 else 987 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 988#endif 989 ctrl->v_irq = 0; 990 ctrl->v_intr_vector = 0; 991 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 992 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 993} 994 995static int 996svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 997{ 998 struct vmcb_ctrl *ctrl; 999 int oldval, newval; 1000 1001 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1002 oldval = ctrl->intr_shadow; 1003 newval = val ? 1 : 0; 1004 if (newval != oldval) { 1005 ctrl->intr_shadow = newval; 1006 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1007 } 1008 return (0); 1009} 1010 1011static int 1012svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1013{ 1014 struct vmcb_ctrl *ctrl; 1015 1016 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1017 *val = ctrl->intr_shadow; 1018 return (0); 1019} 1020 1021/* 1022 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1023 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1024 * to track when the vcpu is done handling the NMI. 1025 */ 1026static int 1027nmi_blocked(struct svm_softc *sc, int vcpu) 1028{ 1029 int blocked; 1030 1031 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1032 VMCB_INTCPT_IRET); 1033 return (blocked); 1034} 1035 1036static void 1037enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1038{ 1039 1040 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1041 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1042 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1043} 1044 1045static void 1046clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1047{ 1048 int error; 1049 1050 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1051 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1052 /* 1053 * When the IRET intercept is cleared the vcpu will attempt to execute 1054 * the "iret" when it runs next. However, it is possible to inject 1055 * another NMI into the vcpu before the "iret" has actually executed. 1056 * 1057 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1058 * it will trap back into the hypervisor. If an NMI is pending for 1059 * the vcpu it will be injected into the guest. 1060 * 1061 * XXX this needs to be fixed 1062 */ 1063 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1064 1065 /* 1066 * Set 'intr_shadow' to prevent an NMI from being injected on the 1067 * immediate VMRUN. 1068 */ 1069 error = svm_modify_intr_shadow(sc, vcpu, 1); 1070 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1071} 1072 1073static int 1074emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1075 bool *retu) 1076{ 1077 int error; 1078 1079 if (lapic_msr(num)) 1080 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1081 else if (num == MSR_EFER) 1082 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val); 1083 else 1084 error = svm_wrmsr(sc, vcpu, num, val, retu); 1085 1086 return (error); 1087} 1088 1089static int 1090emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1091{ 1092 struct vmcb_state *state; 1093 struct svm_regctx *ctx; 1094 uint64_t result; 1095 int error; 1096 1097 if (lapic_msr(num)) 1098 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1099 else 1100 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1101 1102 if (error == 0) { 1103 state = svm_get_vmcb_state(sc, vcpu); 1104 ctx = svm_get_guest_regctx(sc, vcpu); 1105 state->rax = result & 0xffffffff; 1106 ctx->sctx_rdx = result >> 32; 1107 } 1108 1109 return (error); 1110} 1111 1112#ifdef KTR 1113static const char * 1114exit_reason_to_str(uint64_t reason) 1115{ 1116 static char reasonbuf[32]; 1117 1118 switch (reason) { 1119 case VMCB_EXIT_INVALID: 1120 return ("invalvmcb"); 1121 case VMCB_EXIT_SHUTDOWN: 1122 return ("shutdown"); 1123 case VMCB_EXIT_NPF: 1124 return ("nptfault"); 1125 case VMCB_EXIT_PAUSE: 1126 return ("pause"); 1127 case VMCB_EXIT_HLT: 1128 return ("hlt"); 1129 case VMCB_EXIT_CPUID: 1130 return ("cpuid"); 1131 case VMCB_EXIT_IO: 1132 return ("inout"); 1133 case VMCB_EXIT_MC: 1134 return ("mchk"); 1135 case VMCB_EXIT_INTR: 1136 return ("extintr"); 1137 case VMCB_EXIT_NMI: 1138 return ("nmi"); 1139 case VMCB_EXIT_VINTR: 1140 return ("vintr"); 1141 case VMCB_EXIT_MSR: 1142 return ("msr"); 1143 case VMCB_EXIT_IRET: 1144 return ("iret"); 1145 default: 1146 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1147 return (reasonbuf); 1148 } 1149} 1150#endif /* KTR */ 1151 1152/* 1153 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1154 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1155 * and exceptions caused by INT3, INTO and BOUND instructions. 1156 * 1157 * Return 1 if the nRIP is valid and 0 otherwise. 1158 */ 1159static int 1160nrip_valid(uint64_t exitcode) 1161{ 1162 switch (exitcode) { 1163 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1164 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1165 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1166 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1167 case 0x43: /* INT3 */ 1168 case 0x44: /* INTO */ 1169 case 0x45: /* BOUND */ 1170 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1171 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1172 return (1); 1173 default: 1174 return (0); 1175 } 1176} 1177 1178/* 1179 * Collateral for a generic SVM VM-exit. 1180 */ 1181static void 1182vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 1183{ 1184 1185 vme->exitcode = VM_EXITCODE_SVM; 1186 vme->u.svm.exitcode = code; 1187 vme->u.svm.exitinfo1 = info1; 1188 vme->u.svm.exitinfo2 = info2; 1189} 1190 1191static int 1192svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1193{ 1194 struct vmcb *vmcb; 1195 struct vmcb_state *state; 1196 struct vmcb_ctrl *ctrl; 1197 struct svm_regctx *ctx; 1198 uint64_t code, info1, info2, val; 1199 uint32_t eax, ecx, edx; 1200 int handled; 1201 bool retu; 1202 1203 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1204 vmcb = svm_get_vmcb(svm_sc, vcpu); 1205 state = &vmcb->state; 1206 ctrl = &vmcb->ctrl; 1207 1208 handled = 0; 1209 code = ctrl->exitcode; 1210 info1 = ctrl->exitinfo1; 1211 info2 = ctrl->exitinfo2; 1212 1213 vmexit->exitcode = VM_EXITCODE_BOGUS; 1214 vmexit->rip = state->rip; 1215 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1216 1217 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1218 1219 /* 1220 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1221 * in an inconsistent state and can trigger assertions that would 1222 * never happen otherwise. 1223 */ 1224 if (code == VMCB_EXIT_INVALID) { 1225 vm_exit_svm(vmexit, code, info1, info2); 1226 return (0); 1227 } 1228 1229 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1230 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1231 1232 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1233 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1234 vmexit->inst_length, code, info1, info2)); 1235 1236 svm_update_virqinfo(svm_sc, vcpu); 1237 svm_save_intinfo(svm_sc, vcpu); 1238 1239 switch (code) { 1240 case VMCB_EXIT_IRET: 1241 /* 1242 * Restart execution at "iret" but with the intercept cleared. 1243 */ 1244 vmexit->inst_length = 0; 1245 clear_nmi_blocking(svm_sc, vcpu); 1246 handled = 1; 1247 break; 1248 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1249 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1250 handled = 1; 1251 break; 1252 case VMCB_EXIT_INTR: /* external interrupt */ 1253 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1254 handled = 1; 1255 break; 1256 case VMCB_EXIT_NMI: /* external NMI */ 1257 handled = 1; 1258 break; 1259 case VMCB_EXIT_MC: /* machine check */ 1260 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1261 break; 1262 case VMCB_EXIT_MSR: /* MSR access. */ 1263 eax = state->rax; 1264 ecx = ctx->sctx_rcx; 1265 edx = ctx->sctx_rdx; 1266 retu = false; 1267 1268 if (info1) { 1269 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1270 val = (uint64_t)edx << 32 | eax; 1271 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1272 ecx, val); 1273 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1274 vmexit->exitcode = VM_EXITCODE_WRMSR; 1275 vmexit->u.msr.code = ecx; 1276 vmexit->u.msr.wval = val; 1277 } else if (!retu) { 1278 handled = 1; 1279 } else { 1280 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1281 ("emulate_wrmsr retu with bogus exitcode")); 1282 } 1283 } else { 1284 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1285 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1286 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1287 vmexit->exitcode = VM_EXITCODE_RDMSR; 1288 vmexit->u.msr.code = ecx; 1289 } else if (!retu) { 1290 handled = 1; 1291 } else { 1292 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1293 ("emulate_rdmsr retu with bogus exitcode")); 1294 } 1295 } 1296 break; 1297 case VMCB_EXIT_IO: 1298 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1299 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1300 break; 1301 case VMCB_EXIT_CPUID: 1302 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1303 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1304 (uint32_t *)&state->rax, 1305 (uint32_t *)&ctx->sctx_rbx, 1306 (uint32_t *)&ctx->sctx_rcx, 1307 (uint32_t *)&ctx->sctx_rdx); 1308 break; 1309 case VMCB_EXIT_HLT: 1310 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1311 vmexit->exitcode = VM_EXITCODE_HLT; 1312 vmexit->u.hlt.rflags = state->rflags; 1313 break; 1314 case VMCB_EXIT_PAUSE: 1315 vmexit->exitcode = VM_EXITCODE_PAUSE; 1316 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1317 break; 1318 case VMCB_EXIT_NPF: 1319 /* EXITINFO2 contains the faulting guest physical address */ 1320 if (info1 & VMCB_NPF_INFO1_RSV) { 1321 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1322 "reserved bits set: info1(%#lx) info2(%#lx)", 1323 info1, info2); 1324 } else if (vm_mem_allocated(svm_sc->vm, info2)) { 1325 vmexit->exitcode = VM_EXITCODE_PAGING; 1326 vmexit->u.paging.gpa = info2; 1327 vmexit->u.paging.fault_type = npf_fault_type(info1); 1328 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1329 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1330 "on gpa %#lx/%#lx at rip %#lx", 1331 info2, info1, state->rip); 1332 } else if (svm_npf_emul_fault(info1)) { 1333 svm_handle_inst_emul(vmcb, info2, vmexit); 1334 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1335 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1336 "for gpa %#lx/%#lx at rip %#lx", 1337 info2, info1, state->rip); 1338 } 1339 break; 1340 default: 1341 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1342 break; 1343 } 1344 1345 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1346 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1347 vmexit->rip, vmexit->inst_length); 1348 1349 if (handled) { 1350 vmexit->rip += vmexit->inst_length; 1351 vmexit->inst_length = 0; 1352 state->rip = vmexit->rip; 1353 } else { 1354 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1355 /* 1356 * If this VM exit was not claimed by anybody then 1357 * treat it as a generic SVM exit. 1358 */ 1359 vm_exit_svm(vmexit, code, info1, info2); 1360 } else { 1361 /* 1362 * The exitcode and collateral have been populated. 1363 * The VM exit will be processed further in userland. 1364 */ 1365 } 1366 } 1367 return (handled); 1368} 1369 1370static void 1371svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1372{ 1373 uint64_t intinfo; 1374 1375 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1376 return; 1377 1378 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1379 "valid: %#lx", __func__, intinfo)); 1380 1381 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1382 VMCB_EXITINTINFO_VECTOR(intinfo), 1383 VMCB_EXITINTINFO_EC(intinfo), 1384 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1385 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1386 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1387} 1388 1389/* 1390 * Inject event to virtual cpu. 1391 */ 1392static void 1393svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1394{ 1395 struct vmcb_ctrl *ctrl; 1396 struct vmcb_state *state; 1397 uint8_t v_tpr; 1398 int vector, need_intr_window, pending_apic_vector; 1399 1400 state = svm_get_vmcb_state(sc, vcpu); 1401 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1402 1403 need_intr_window = 0; 1404 pending_apic_vector = 0; 1405 1406 /* 1407 * Inject pending events or exceptions for this vcpu. 1408 * 1409 * An event might be pending because the previous #VMEXIT happened 1410 * during event delivery (i.e. ctrl->exitintinfo). 1411 * 1412 * An event might also be pending because an exception was injected 1413 * by the hypervisor (e.g. #PF during instruction emulation). 1414 */ 1415 svm_inj_intinfo(sc, vcpu); 1416 1417 /* NMI event has priority over interrupts. */ 1418 if (vm_nmi_pending(sc->vm, vcpu)) { 1419 if (nmi_blocked(sc, vcpu)) { 1420 /* 1421 * Can't inject another NMI if the guest has not 1422 * yet executed an "iret" after the last NMI. 1423 */ 1424 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1425 "to NMI-blocking"); 1426 } else if (ctrl->intr_shadow) { 1427 /* 1428 * Can't inject an NMI if the vcpu is in an intr_shadow. 1429 */ 1430 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1431 "interrupt shadow"); 1432 need_intr_window = 1; 1433 goto done; 1434 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1435 /* 1436 * If there is already an exception/interrupt pending 1437 * then defer the NMI until after that. 1438 */ 1439 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1440 "eventinj %#lx", ctrl->eventinj); 1441 1442 /* 1443 * Use self-IPI to trigger a VM-exit as soon as 1444 * possible after the event injection is completed. 1445 * 1446 * This works only if the external interrupt exiting 1447 * is at a lower priority than the event injection. 1448 * 1449 * Although not explicitly specified in APMv2 the 1450 * relative priorities were verified empirically. 1451 */ 1452 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1453 } else { 1454 vm_nmi_clear(sc->vm, vcpu); 1455 1456 /* Inject NMI, vector number is not used */ 1457 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1458 IDT_NMI, 0, false); 1459 1460 /* virtual NMI blocking is now in effect */ 1461 enable_nmi_blocking(sc, vcpu); 1462 1463 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1464 } 1465 } 1466 1467 if (!vm_extint_pending(sc->vm, vcpu)) { 1468 /* 1469 * APIC interrupts are delivered using the V_IRQ offload. 1470 * 1471 * The primary benefit is that the hypervisor doesn't need to 1472 * deal with the various conditions that inhibit interrupts. 1473 * It also means that TPR changes via CR8 will be handled 1474 * without any hypervisor involvement. 1475 * 1476 * Note that the APIC vector must remain pending in the vIRR 1477 * until it is confirmed that it was delivered to the guest. 1478 * This can be confirmed based on the value of V_IRQ at the 1479 * next #VMEXIT (1 = pending, 0 = delivered). 1480 * 1481 * Also note that it is possible that another higher priority 1482 * vector can become pending before this vector is delivered 1483 * to the guest. This is alright because vcpu_notify_event() 1484 * will send an IPI and force the vcpu to trap back into the 1485 * hypervisor. The higher priority vector will be injected on 1486 * the next VMRUN. 1487 */ 1488 if (vlapic_pending_intr(vlapic, &vector)) { 1489 KASSERT(vector >= 16 && vector <= 255, 1490 ("invalid vector %d from local APIC", vector)); 1491 pending_apic_vector = vector; 1492 } 1493 goto done; 1494 } 1495 1496 /* Ask the legacy pic for a vector to inject */ 1497 vatpic_pending_intr(sc->vm, &vector); 1498 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1499 vector)); 1500 1501 /* 1502 * If the guest has disabled interrupts or is in an interrupt shadow 1503 * then we cannot inject the pending interrupt. 1504 */ 1505 if ((state->rflags & PSL_I) == 0) { 1506 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1507 "rflags %#lx", vector, state->rflags); 1508 need_intr_window = 1; 1509 goto done; 1510 } 1511 1512 if (ctrl->intr_shadow) { 1513 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1514 "interrupt shadow", vector); 1515 need_intr_window = 1; 1516 goto done; 1517 } 1518 1519 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1520 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1521 "eventinj %#lx", vector, ctrl->eventinj); 1522 need_intr_window = 1; 1523 goto done; 1524 } 1525 1526 /* 1527 * Legacy PIC interrupts are delivered via the event injection 1528 * mechanism. 1529 */ 1530 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1531 1532 vm_extint_clear(sc->vm, vcpu); 1533 vatpic_intr_accepted(sc->vm, vector); 1534 1535 /* 1536 * Force a VM-exit as soon as the vcpu is ready to accept another 1537 * interrupt. This is done because the PIC might have another vector 1538 * that it wants to inject. Also, if the APIC has a pending interrupt 1539 * that was preempted by the ExtInt then it allows us to inject the 1540 * APIC vector as soon as possible. 1541 */ 1542 need_intr_window = 1; 1543done: 1544 /* 1545 * The guest can modify the TPR by writing to %CR8. In guest mode 1546 * the processor reflects this write to V_TPR without hypervisor 1547 * intervention. 1548 * 1549 * The guest can also modify the TPR by writing to it via the memory 1550 * mapped APIC page. In this case, the write will be emulated by the 1551 * hypervisor. For this reason V_TPR must be updated before every 1552 * VMRUN. 1553 */ 1554 v_tpr = vlapic_get_cr8(vlapic); 1555 KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1556 if (ctrl->v_tpr != v_tpr) { 1557 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1558 ctrl->v_tpr, v_tpr); 1559 ctrl->v_tpr = v_tpr; 1560 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1561 } 1562 1563 if (pending_apic_vector) { 1564 /* 1565 * If an APIC vector is being injected then interrupt window 1566 * exiting is not possible on this VMRUN. 1567 */ 1568 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1569 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1570 pending_apic_vector); 1571 1572 ctrl->v_irq = 1; 1573 ctrl->v_ign_tpr = 0; 1574 ctrl->v_intr_vector = pending_apic_vector; 1575 ctrl->v_intr_prio = pending_apic_vector >> 4; 1576 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1577 } else if (need_intr_window) { 1578 /* 1579 * We use V_IRQ in conjunction with the VINTR intercept to 1580 * trap into the hypervisor as soon as a virtual interrupt 1581 * can be delivered. 1582 * 1583 * Since injected events are not subject to intercept checks 1584 * we need to ensure that the V_IRQ is not actually going to 1585 * be delivered on VM entry. The KASSERT below enforces this. 1586 */ 1587 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1588 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1589 ("Bogus intr_window_exiting: eventinj (%#lx), " 1590 "intr_shadow (%u), rflags (%#lx)", 1591 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1592 enable_intr_window_exiting(sc, vcpu); 1593 } else { 1594 disable_intr_window_exiting(sc, vcpu); 1595 } 1596} 1597 1598static __inline void 1599restore_host_tss(void) 1600{ 1601 struct system_segment_descriptor *tss_sd; 1602 1603 /* 1604 * The TSS descriptor was in use prior to launching the guest so it 1605 * has been marked busy. 1606 * 1607 * 'ltr' requires the descriptor to be marked available so change the 1608 * type to "64-bit available TSS". 1609 */ 1610 tss_sd = PCPU_GET(tss); 1611 tss_sd->sd_type = SDT_SYSTSS; 1612 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1613} 1614 1615static void 1616check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1617{ 1618 struct svm_vcpu *vcpustate; 1619 struct vmcb_ctrl *ctrl; 1620 long eptgen; 1621 bool alloc_asid; 1622 1623 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1624 "active on cpu %u", __func__, thiscpu)); 1625 1626 vcpustate = svm_get_vcpu(sc, vcpuid); 1627 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1628 1629 /* 1630 * The TLB entries associated with the vcpu's ASID are not valid 1631 * if either of the following conditions is true: 1632 * 1633 * 1. The vcpu's ASID generation is different than the host cpu's 1634 * ASID generation. This happens when the vcpu migrates to a new 1635 * host cpu. It can also happen when the number of vcpus executing 1636 * on a host cpu is greater than the number of ASIDs available. 1637 * 1638 * 2. The pmap generation number is different than the value cached in 1639 * the 'vcpustate'. This happens when the host invalidates pages 1640 * belonging to the guest. 1641 * 1642 * asidgen eptgen Action 1643 * mismatch mismatch 1644 * 0 0 (a) 1645 * 0 1 (b1) or (b2) 1646 * 1 0 (c) 1647 * 1 1 (d) 1648 * 1649 * (a) There is no mismatch in eptgen or ASID generation and therefore 1650 * no further action is needed. 1651 * 1652 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1653 * retained and the TLB entries associated with this ASID 1654 * are flushed by VMRUN. 1655 * 1656 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1657 * allocated. 1658 * 1659 * (c) A new ASID is allocated. 1660 * 1661 * (d) A new ASID is allocated. 1662 */ 1663 1664 alloc_asid = false; 1665 eptgen = pmap->pm_eptgen; 1666 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1667 1668 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1669 alloc_asid = true; /* (c) and (d) */ 1670 } else if (vcpustate->eptgen != eptgen) { 1671 if (flush_by_asid()) 1672 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1673 else 1674 alloc_asid = true; /* (b2) */ 1675 } else { 1676 /* 1677 * This is the common case (a). 1678 */ 1679 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1680 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1681 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1682 } 1683 1684 if (alloc_asid) { 1685 if (++asid[thiscpu].num >= nasid) { 1686 asid[thiscpu].num = 1; 1687 if (++asid[thiscpu].gen == 0) 1688 asid[thiscpu].gen = 1; 1689 /* 1690 * If this cpu does not support "flush-by-asid" 1691 * then flush the entire TLB on a generation 1692 * bump. Subsequent ASID allocation in this 1693 * generation can be done without a TLB flush. 1694 */ 1695 if (!flush_by_asid()) 1696 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1697 } 1698 vcpustate->asid.gen = asid[thiscpu].gen; 1699 vcpustate->asid.num = asid[thiscpu].num; 1700 1701 ctrl->asid = vcpustate->asid.num; 1702 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1703 /* 1704 * If this cpu supports "flush-by-asid" then the TLB 1705 * was not flushed after the generation bump. The TLB 1706 * is flushed selectively after every new ASID allocation. 1707 */ 1708 if (flush_by_asid()) 1709 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1710 } 1711 vcpustate->eptgen = eptgen; 1712 1713 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1714 KASSERT(ctrl->asid == vcpustate->asid.num, 1715 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1716} 1717 1718static __inline void 1719disable_gintr(void) 1720{ 1721 1722 __asm __volatile("clgi" : : :); 1723} 1724 1725static __inline void 1726enable_gintr(void) 1727{ 1728 1729 __asm __volatile("stgi" : : :); 1730} 1731 1732/* 1733 * Start vcpu with specified RIP. 1734 */ 1735static int 1736svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1737 void *rend_cookie, void *suspended_cookie) 1738{ 1739 struct svm_regctx *gctx; 1740 struct svm_softc *svm_sc; 1741 struct svm_vcpu *vcpustate; 1742 struct vmcb_state *state; 1743 struct vmcb_ctrl *ctrl; 1744 struct vm_exit *vmexit; 1745 struct vlapic *vlapic; 1746 struct vm *vm; 1747 uint64_t vmcb_pa; 1748 u_int thiscpu; 1749 int handled; 1750 1751 svm_sc = arg; 1752 vm = svm_sc->vm; 1753 1754 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1755 state = svm_get_vmcb_state(svm_sc, vcpu); 1756 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1757 vmexit = vm_exitinfo(vm, vcpu); 1758 vlapic = vm_lapic(vm, vcpu); 1759 1760 /* 1761 * Stash 'curcpu' on the stack as 'thiscpu'. 1762 * 1763 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1764 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1765 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1766 */ 1767 thiscpu = curcpu; 1768 1769 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1770 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1771 1772 if (vcpustate->lastcpu != thiscpu) { 1773 /* 1774 * Force new ASID allocation by invalidating the generation. 1775 */ 1776 vcpustate->asid.gen = 0; 1777 1778 /* 1779 * Invalidate the VMCB state cache by marking all fields dirty. 1780 */ 1781 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1782 1783 /* 1784 * XXX 1785 * Setting 'vcpustate->lastcpu' here is bit premature because 1786 * we may return from this function without actually executing 1787 * the VMRUN instruction. This could happen if a rendezvous 1788 * or an AST is pending on the first time through the loop. 1789 * 1790 * This works for now but any new side-effects of vcpu 1791 * migration should take this case into account. 1792 */ 1793 vcpustate->lastcpu = thiscpu; 1794 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1795 } 1796 1797 svm_msr_guest_enter(svm_sc, vcpu); 1798 1799 /* Update Guest RIP */ 1800 state->rip = rip; 1801 1802 do { 1803 /* 1804 * Disable global interrupts to guarantee atomicity during 1805 * loading of guest state. This includes not only the state 1806 * loaded by the "vmrun" instruction but also software state 1807 * maintained by the hypervisor: suspended and rendezvous 1808 * state, NPT generation number, vlapic interrupts etc. 1809 */ 1810 disable_gintr(); 1811 1812 if (vcpu_suspended(suspended_cookie)) { 1813 enable_gintr(); 1814 vm_exit_suspended(vm, vcpu, state->rip); 1815 break; 1816 } 1817 1818 if (vcpu_rendezvous_pending(rend_cookie)) { 1819 enable_gintr(); 1820 vm_exit_rendezvous(vm, vcpu, state->rip); 1821 break; 1822 } 1823 1824 /* We are asked to give the cpu by scheduler. */ 1825 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1826 enable_gintr(); 1827 vm_exit_astpending(vm, vcpu, state->rip); 1828 break; 1829 } 1830 1831 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1832 1833 /* Activate the nested pmap on 'thiscpu' */ 1834 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1835 1836 /* 1837 * Check the pmap generation and the ASID generation to 1838 * ensure that the vcpu does not use stale TLB mappings. 1839 */ 1840 check_asid(svm_sc, vcpu, pmap, thiscpu); 1841 1842 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 1843 vcpustate->dirty = 0; 1844 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1845 1846 /* Launch Virtual Machine. */ 1847 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1848 svm_launch(vmcb_pa, gctx); 1849 1850 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1851 1852 /* 1853 * Restore MSR_GSBASE to point to the pcpu data area. 1854 * 1855 * Note that accesses done via PCPU_GET/PCPU_SET will work 1856 * only after MSR_GSBASE is restored. 1857 * 1858 * Also note that we don't bother restoring MSR_KGSBASE 1859 * since it is not used in the kernel and will be restored 1860 * when the VMRUN ioctl returns to userspace. 1861 */ 1862 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1863 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1864 thiscpu, curcpu)); 1865 1866 /* 1867 * The host GDTR and IDTR is saved by VMRUN and restored 1868 * automatically on #VMEXIT. However, the host TSS needs 1869 * to be restored explicitly. 1870 */ 1871 restore_host_tss(); 1872 1873 /* #VMEXIT disables interrupts so re-enable them here. */ 1874 enable_gintr(); 1875 1876 /* Handle #VMEXIT and if required return to user space. */ 1877 handled = svm_vmexit(svm_sc, vcpu, vmexit); 1878 } while (handled); 1879 1880 svm_msr_guest_exit(svm_sc, vcpu); 1881 1882 return (0); 1883} 1884 1885static void 1886svm_vmcleanup(void *arg) 1887{ 1888 struct svm_softc *sc = arg; 1889 1890 free(sc, M_SVM); 1891} 1892 1893static register_t * 1894swctx_regptr(struct svm_regctx *regctx, int reg) 1895{ 1896 1897 switch (reg) { 1898 case VM_REG_GUEST_RBX: 1899 return (®ctx->sctx_rbx); 1900 case VM_REG_GUEST_RCX: 1901 return (®ctx->sctx_rcx); 1902 case VM_REG_GUEST_RDX: 1903 return (®ctx->sctx_rdx); 1904 case VM_REG_GUEST_RDI: 1905 return (®ctx->sctx_rdi); 1906 case VM_REG_GUEST_RSI: 1907 return (®ctx->sctx_rsi); 1908 case VM_REG_GUEST_RBP: 1909 return (®ctx->sctx_rbp); 1910 case VM_REG_GUEST_R8: 1911 return (®ctx->sctx_r8); 1912 case VM_REG_GUEST_R9: 1913 return (®ctx->sctx_r9); 1914 case VM_REG_GUEST_R10: 1915 return (®ctx->sctx_r10); 1916 case VM_REG_GUEST_R11: 1917 return (®ctx->sctx_r11); 1918 case VM_REG_GUEST_R12: 1919 return (®ctx->sctx_r12); 1920 case VM_REG_GUEST_R13: 1921 return (®ctx->sctx_r13); 1922 case VM_REG_GUEST_R14: 1923 return (®ctx->sctx_r14); 1924 case VM_REG_GUEST_R15: 1925 return (®ctx->sctx_r15); 1926 default: 1927 return (NULL); 1928 } 1929} 1930 1931static int 1932svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1933{ 1934 struct svm_softc *svm_sc; 1935 register_t *reg; 1936 1937 svm_sc = arg; 1938 1939 if (ident == VM_REG_GUEST_INTR_SHADOW) { 1940 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 1941 } 1942 1943 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 1944 return (0); 1945 } 1946 1947 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1948 1949 if (reg != NULL) { 1950 *val = *reg; 1951 return (0); 1952 } 1953 1954 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 1955 return (EINVAL); 1956} 1957 1958static int 1959svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1960{ 1961 struct svm_softc *svm_sc; 1962 register_t *reg; 1963 1964 svm_sc = arg; 1965 1966 if (ident == VM_REG_GUEST_INTR_SHADOW) { 1967 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 1968 } 1969 1970 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 1971 return (0); 1972 } 1973 1974 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1975 1976 if (reg != NULL) { 1977 *reg = val; 1978 return (0); 1979 } 1980 1981 /* 1982 * XXX deal with CR3 and invalidate TLB entries tagged with the 1983 * vcpu's ASID. This needs to be treated differently depending on 1984 * whether 'running' is true/false. 1985 */ 1986 1987 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 1988 return (EINVAL); 1989} 1990 1991static int 1992svm_setcap(void *arg, int vcpu, int type, int val) 1993{ 1994 struct svm_softc *sc; 1995 int error; 1996 1997 sc = arg; 1998 error = 0; 1999 switch (type) { 2000 case VM_CAP_HALT_EXIT: 2001 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2002 VMCB_INTCPT_HLT, val); 2003 break; 2004 case VM_CAP_PAUSE_EXIT: 2005 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2006 VMCB_INTCPT_PAUSE, val); 2007 break; 2008 case VM_CAP_UNRESTRICTED_GUEST: 2009 /* Unrestricted guest execution cannot be disabled in SVM */ 2010 if (val == 0) 2011 error = EINVAL; 2012 break; 2013 default: 2014 error = ENOENT; 2015 break; 2016 } 2017 return (error); 2018} 2019 2020static int 2021svm_getcap(void *arg, int vcpu, int type, int *retval) 2022{ 2023 struct svm_softc *sc; 2024 int error; 2025 2026 sc = arg; 2027 error = 0; 2028 2029 switch (type) { 2030 case VM_CAP_HALT_EXIT: 2031 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2032 VMCB_INTCPT_HLT); 2033 break; 2034 case VM_CAP_PAUSE_EXIT: 2035 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2036 VMCB_INTCPT_PAUSE); 2037 break; 2038 case VM_CAP_UNRESTRICTED_GUEST: 2039 *retval = 1; /* unrestricted guest is always enabled */ 2040 break; 2041 default: 2042 error = ENOENT; 2043 break; 2044 } 2045 return (error); 2046} 2047 2048static struct vlapic * 2049svm_vlapic_init(void *arg, int vcpuid) 2050{ 2051 struct svm_softc *svm_sc; 2052 struct vlapic *vlapic; 2053 2054 svm_sc = arg; 2055 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2056 vlapic->vm = svm_sc->vm; 2057 vlapic->vcpuid = vcpuid; 2058 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2059 2060 vlapic_init(vlapic); 2061 2062 return (vlapic); 2063} 2064 2065static void 2066svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2067{ 2068 2069 vlapic_cleanup(vlapic); 2070 free(vlapic, M_SVM_VLAPIC); 2071} 2072 2073struct vmm_ops vmm_ops_amd = { 2074 svm_init, 2075 svm_cleanup, 2076 svm_restore, 2077 svm_vminit, 2078 svm_vmrun, 2079 svm_vmcleanup, 2080 svm_getreg, 2081 svm_setreg, 2082 vmcb_getdesc, 2083 vmcb_setdesc, 2084 svm_getcap, 2085 svm_setcap, 2086 svm_npt_alloc, 2087 svm_npt_free, 2088 svm_vlapic_init, 2089 svm_vlapic_cleanup 2090}; 2091