svm.c revision 283657
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/amd64/vmm/amd/svm.c 283657 2015-05-28 17:37:01Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/cpufunc.h> 43#include <machine/psl.h> 44#include <machine/pmap.h> 45#include <machine/md_var.h> 46#include <machine/specialreg.h> 47#include <machine/smp.h> 48#include <machine/vmm.h> 49#include <machine/vmm_dev.h> 50#include <machine/vmm_instruction_emul.h> 51 52#include "vmm_lapic.h" 53#include "vmm_stat.h" 54#include "vmm_ktr.h" 55#include "vmm_ioport.h" 56#include "vatpic.h" 57#include "vlapic.h" 58#include "vlapic_priv.h" 59 60#include "x86.h" 61#include "vmcb.h" 62#include "svm.h" 63#include "svm_softc.h" 64#include "svm_msr.h" 65#include "npt.h" 66 67SYSCTL_DECL(_hw_vmm); 68SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 69 70/* 71 * SVM CPUID function 0x8000_000A, edx bit decoding. 72 */ 73#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 74#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 75#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 76#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 77#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 78#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 79#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 80#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 81#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 82#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 83#define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 84 85#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 86 VMCB_CACHE_IOPM | \ 87 VMCB_CACHE_I | \ 88 VMCB_CACHE_TPR | \ 89 VMCB_CACHE_CR2 | \ 90 VMCB_CACHE_CR | \ 91 VMCB_CACHE_DT | \ 92 VMCB_CACHE_SEG | \ 93 VMCB_CACHE_NP) 94 95static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 96SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 97 0, NULL); 98 99static MALLOC_DEFINE(M_SVM, "svm", "svm"); 100static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 101 102/* Per-CPU context area. */ 103extern struct pcpu __pcpu[]; 104 105static uint32_t svm_feature; /* AMD SVM features. */ 106SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 107 "SVM features advertised by CPUID.8000000AH:EDX"); 108 109static int disable_npf_assist; 110SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 111 &disable_npf_assist, 0, NULL); 112 113/* Maximum ASIDs supported by the processor */ 114static uint32_t nasid; 115SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, 116 "Number of ASIDs supported by this processor"); 117 118/* Current ASID generation for each host cpu */ 119static struct asid asid[MAXCPU]; 120 121/* 122 * SVM host state saved area of size 4KB for each core. 123 */ 124static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 125 126static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 127static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 128static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 129 130static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 131 132static __inline int 133flush_by_asid(void) 134{ 135 136 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 137} 138 139static __inline int 140decode_assist(void) 141{ 142 143 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 144} 145 146static void 147svm_disable(void *arg __unused) 148{ 149 uint64_t efer; 150 151 efer = rdmsr(MSR_EFER); 152 efer &= ~EFER_SVM; 153 wrmsr(MSR_EFER, efer); 154} 155 156/* 157 * Disable SVM on all CPUs. 158 */ 159static int 160svm_cleanup(void) 161{ 162 163 smp_rendezvous(NULL, svm_disable, NULL, NULL); 164 return (0); 165} 166 167/* 168 * Verify that all the features required by bhyve are available. 169 */ 170static int 171check_svm_features(void) 172{ 173 u_int regs[4]; 174 175 /* CPUID Fn8000_000A is for SVM */ 176 do_cpuid(0x8000000A, regs); 177 svm_feature = regs[3]; 178 179 nasid = regs[1]; 180 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 181 182 /* bhyve requires the Nested Paging feature */ 183 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 184 printf("SVM: Nested Paging feature not available.\n"); 185 return (ENXIO); 186 } 187 188 /* bhyve requires the NRIP Save feature */ 189 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 190 printf("SVM: NRIP Save feature not available.\n"); 191 return (ENXIO); 192 } 193 194 return (0); 195} 196 197static void 198svm_enable(void *arg __unused) 199{ 200 uint64_t efer; 201 202 efer = rdmsr(MSR_EFER); 203 efer |= EFER_SVM; 204 wrmsr(MSR_EFER, efer); 205 206 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 207} 208 209/* 210 * Return 1 if SVM is enabled on this processor and 0 otherwise. 211 */ 212static int 213svm_available(void) 214{ 215 uint64_t msr; 216 217 /* Section 15.4 Enabling SVM from APM2. */ 218 if ((amd_feature2 & AMDID2_SVM) == 0) { 219 printf("SVM: not available.\n"); 220 return (0); 221 } 222 223 msr = rdmsr(MSR_VM_CR); 224 if ((msr & VM_CR_SVMDIS) != 0) { 225 printf("SVM: disabled by BIOS.\n"); 226 return (0); 227 } 228 229 return (1); 230} 231 232static int 233svm_init(int ipinum) 234{ 235 int error, cpu; 236 237 if (!svm_available()) 238 return (ENXIO); 239 240 error = check_svm_features(); 241 if (error) 242 return (error); 243 244 vmcb_clean &= VMCB_CACHE_DEFAULT; 245 246 for (cpu = 0; cpu < MAXCPU; cpu++) { 247 /* 248 * Initialize the host ASIDs to their "highest" valid values. 249 * 250 * The next ASID allocation will rollover both 'gen' and 'num' 251 * and start off the sequence at {1,1}. 252 */ 253 asid[cpu].gen = ~0UL; 254 asid[cpu].num = nasid - 1; 255 } 256 257 svm_msr_init(); 258 svm_npt_init(ipinum); 259 260 /* Enable SVM on all CPUs */ 261 smp_rendezvous(NULL, svm_enable, NULL, NULL); 262 263 return (0); 264} 265 266static void 267svm_restore(void) 268{ 269 270 svm_enable(NULL); 271} 272 273/* Pentium compatible MSRs */ 274#define MSR_PENTIUM_START 0 275#define MSR_PENTIUM_END 0x1FFF 276/* AMD 6th generation and Intel compatible MSRs */ 277#define MSR_AMD6TH_START 0xC0000000UL 278#define MSR_AMD6TH_END 0xC0001FFFUL 279/* AMD 7th and 8th generation compatible MSRs */ 280#define MSR_AMD7TH_START 0xC0010000UL 281#define MSR_AMD7TH_END 0xC0011FFFUL 282 283/* 284 * Get the index and bit position for a MSR in permission bitmap. 285 * Two bits are used for each MSR: lower bit for read and higher bit for write. 286 */ 287static int 288svm_msr_index(uint64_t msr, int *index, int *bit) 289{ 290 uint32_t base, off; 291 292 *index = -1; 293 *bit = (msr % 4) * 2; 294 base = 0; 295 296 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 297 *index = msr / 4; 298 return (0); 299 } 300 301 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 302 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 303 off = (msr - MSR_AMD6TH_START); 304 *index = (off + base) / 4; 305 return (0); 306 } 307 308 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 309 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 310 off = (msr - MSR_AMD7TH_START); 311 *index = (off + base) / 4; 312 return (0); 313 } 314 315 return (EINVAL); 316} 317 318/* 319 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 320 */ 321static void 322svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 323{ 324 int index, bit, error; 325 326 error = svm_msr_index(msr, &index, &bit); 327 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 328 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 329 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 330 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 331 "msr %#lx", __func__, bit, msr)); 332 333 if (read) 334 perm_bitmap[index] &= ~(1UL << bit); 335 336 if (write) 337 perm_bitmap[index] &= ~(2UL << bit); 338} 339 340static void 341svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 342{ 343 344 svm_msr_perm(perm_bitmap, msr, true, true); 345} 346 347static void 348svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 349{ 350 351 svm_msr_perm(perm_bitmap, msr, true, false); 352} 353 354static __inline int 355svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 356{ 357 struct vmcb_ctrl *ctrl; 358 359 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 360 361 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 362 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 363} 364 365static __inline void 366svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 367 int enabled) 368{ 369 struct vmcb_ctrl *ctrl; 370 uint32_t oldval; 371 372 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 373 374 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 375 oldval = ctrl->intercept[idx]; 376 377 if (enabled) 378 ctrl->intercept[idx] |= bitmask; 379 else 380 ctrl->intercept[idx] &= ~bitmask; 381 382 if (ctrl->intercept[idx] != oldval) { 383 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 384 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 385 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 386 } 387} 388 389static __inline void 390svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 391{ 392 393 svm_set_intercept(sc, vcpu, off, bitmask, 0); 394} 395 396static __inline void 397svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 398{ 399 400 svm_set_intercept(sc, vcpu, off, bitmask, 1); 401} 402 403static void 404vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 405 uint64_t msrpm_base_pa, uint64_t np_pml4) 406{ 407 struct vmcb_ctrl *ctrl; 408 struct vmcb_state *state; 409 uint32_t mask; 410 int n; 411 412 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 413 state = svm_get_vmcb_state(sc, vcpu); 414 415 ctrl->iopm_base_pa = iopm_base_pa; 416 ctrl->msrpm_base_pa = msrpm_base_pa; 417 418 /* Enable nested paging */ 419 ctrl->np_enable = 1; 420 ctrl->n_cr3 = np_pml4; 421 422 /* 423 * Intercept accesses to the control registers that are not shadowed 424 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 425 */ 426 for (n = 0; n < 16; n++) { 427 mask = (BIT(n) << 16) | BIT(n); 428 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 429 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 430 else 431 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 432 } 433 434 435 /* 436 * Intercept everything when tracing guest exceptions otherwise 437 * just intercept machine check exception. 438 */ 439 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 440 for (n = 0; n < 32; n++) { 441 /* 442 * Skip unimplemented vectors in the exception bitmap. 443 */ 444 if (n == 2 || n == 9) { 445 continue; 446 } 447 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 448 } 449 } else { 450 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 451 } 452 453 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 454 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 455 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 456 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 457 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 458 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 459 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 463 VMCB_INTCPT_FERR_FREEZE); 464 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 467 468 /* 469 * From section "Canonicalization and Consistency Checks" in APMv2 470 * the VMRUN intercept bit must be set to pass the consistency check. 471 */ 472 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 473 474 /* 475 * The ASID will be set to a non-zero value just before VMRUN. 476 */ 477 ctrl->asid = 0; 478 479 /* 480 * Section 15.21.1, Interrupt Masking in EFLAGS 481 * Section 15.21.2, Virtualizing APIC.TPR 482 * 483 * This must be set for %rflag and %cr8 isolation of guest and host. 484 */ 485 ctrl->v_intr_masking = 1; 486 487 /* Enable Last Branch Record aka LBR for debugging */ 488 ctrl->lbr_virt_en = 1; 489 state->dbgctl = BIT(0); 490 491 /* EFER_SVM must always be set when the guest is executing */ 492 state->efer = EFER_SVM; 493 494 /* Set up the PAT to power-on state */ 495 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 496 PAT_VALUE(1, PAT_WRITE_THROUGH) | 497 PAT_VALUE(2, PAT_UNCACHED) | 498 PAT_VALUE(3, PAT_UNCACHEABLE) | 499 PAT_VALUE(4, PAT_WRITE_BACK) | 500 PAT_VALUE(5, PAT_WRITE_THROUGH) | 501 PAT_VALUE(6, PAT_UNCACHED) | 502 PAT_VALUE(7, PAT_UNCACHEABLE); 503} 504 505/* 506 * Initialize a virtual machine. 507 */ 508static void * 509svm_vminit(struct vm *vm, pmap_t pmap) 510{ 511 struct svm_softc *svm_sc; 512 struct svm_vcpu *vcpu; 513 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 514 int i; 515 516 svm_sc = malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO); 517 svm_sc->vm = vm; 518 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 519 520 /* 521 * Intercept read and write accesses to all MSRs. 522 */ 523 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 524 525 /* 526 * Access to the following MSRs is redirected to the VMCB when the 527 * guest is executing. Therefore it is safe to allow the guest to 528 * read/write these MSRs directly without hypervisor involvement. 529 */ 530 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 531 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 532 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 533 534 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 535 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 536 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 537 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 538 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 539 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 540 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 541 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 542 543 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 544 545 /* 546 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 547 */ 548 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 549 550 /* Intercept access to all I/O ports. */ 551 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 552 553 iopm_pa = vtophys(svm_sc->iopm_bitmap); 554 msrpm_pa = vtophys(svm_sc->msr_bitmap); 555 pml4_pa = svm_sc->nptp; 556 for (i = 0; i < VM_MAXCPU; i++) { 557 vcpu = svm_get_vcpu(svm_sc, i); 558 vcpu->nextrip = ~0; 559 vcpu->lastcpu = NOCPU; 560 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 561 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 562 svm_msr_guest_init(svm_sc, i); 563 } 564 return (svm_sc); 565} 566 567/* 568 * Collateral for a generic SVM VM-exit. 569 */ 570static void 571vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 572{ 573 574 vme->exitcode = VM_EXITCODE_SVM; 575 vme->u.svm.exitcode = code; 576 vme->u.svm.exitinfo1 = info1; 577 vme->u.svm.exitinfo2 = info2; 578} 579 580static int 581svm_cpl(struct vmcb_state *state) 582{ 583 584 /* 585 * From APMv2: 586 * "Retrieve the CPL from the CPL field in the VMCB, not 587 * from any segment DPL" 588 */ 589 return (state->cpl); 590} 591 592static enum vm_cpu_mode 593svm_vcpu_mode(struct vmcb *vmcb) 594{ 595 struct vmcb_segment seg; 596 struct vmcb_state *state; 597 int error; 598 599 state = &vmcb->state; 600 601 if (state->efer & EFER_LMA) { 602 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 603 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 604 error)); 605 606 /* 607 * Section 4.8.1 for APM2, check if Code Segment has 608 * Long attribute set in descriptor. 609 */ 610 if (seg.attrib & VMCB_CS_ATTRIB_L) 611 return (CPU_MODE_64BIT); 612 else 613 return (CPU_MODE_COMPATIBILITY); 614 } else if (state->cr0 & CR0_PE) { 615 return (CPU_MODE_PROTECTED); 616 } else { 617 return (CPU_MODE_REAL); 618 } 619} 620 621static enum vm_paging_mode 622svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 623{ 624 625 if ((cr0 & CR0_PG) == 0) 626 return (PAGING_MODE_FLAT); 627 if ((cr4 & CR4_PAE) == 0) 628 return (PAGING_MODE_32); 629 if (efer & EFER_LME) 630 return (PAGING_MODE_64); 631 else 632 return (PAGING_MODE_PAE); 633} 634 635/* 636 * ins/outs utility routines 637 */ 638static uint64_t 639svm_inout_str_index(struct svm_regctx *regs, int in) 640{ 641 uint64_t val; 642 643 val = in ? regs->sctx_rdi : regs->sctx_rsi; 644 645 return (val); 646} 647 648static uint64_t 649svm_inout_str_count(struct svm_regctx *regs, int rep) 650{ 651 uint64_t val; 652 653 val = rep ? regs->sctx_rcx : 1; 654 655 return (val); 656} 657 658static void 659svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 660 int in, struct vm_inout_str *vis) 661{ 662 int error, s; 663 664 if (in) { 665 vis->seg_name = VM_REG_GUEST_ES; 666 } else { 667 /* The segment field has standard encoding */ 668 s = (info1 >> 10) & 0x7; 669 vis->seg_name = vm_segment_name(s); 670 } 671 672 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 673 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 674} 675 676static int 677svm_inout_str_addrsize(uint64_t info1) 678{ 679 uint32_t size; 680 681 size = (info1 >> 7) & 0x7; 682 switch (size) { 683 case 1: 684 return (2); /* 16 bit */ 685 case 2: 686 return (4); /* 32 bit */ 687 case 4: 688 return (8); /* 64 bit */ 689 default: 690 panic("%s: invalid size encoding %d", __func__, size); 691 } 692} 693 694static void 695svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 696{ 697 struct vmcb_state *state; 698 699 state = &vmcb->state; 700 paging->cr3 = state->cr3; 701 paging->cpl = svm_cpl(state); 702 paging->cpu_mode = svm_vcpu_mode(vmcb); 703 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 704 state->efer); 705} 706 707#define UNHANDLED 0 708 709/* 710 * Handle guest I/O intercept. 711 */ 712static int 713svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 714{ 715 struct vmcb_ctrl *ctrl; 716 struct vmcb_state *state; 717 struct svm_regctx *regs; 718 struct vm_inout_str *vis; 719 uint64_t info1; 720 int inout_string; 721 722 state = svm_get_vmcb_state(svm_sc, vcpu); 723 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 724 regs = svm_get_guest_regctx(svm_sc, vcpu); 725 726 info1 = ctrl->exitinfo1; 727 inout_string = info1 & BIT(2) ? 1 : 0; 728 729 /* 730 * The effective segment number in EXITINFO1[12:10] is populated 731 * only if the processor has the DecodeAssist capability. 732 * 733 * XXX this is not specified explicitly in APMv2 but can be verified 734 * empirically. 735 */ 736 if (inout_string && !decode_assist()) 737 return (UNHANDLED); 738 739 vmexit->exitcode = VM_EXITCODE_INOUT; 740 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 741 vmexit->u.inout.string = inout_string; 742 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 743 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 744 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 745 vmexit->u.inout.eax = (uint32_t)(state->rax); 746 747 if (inout_string) { 748 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 749 vis = &vmexit->u.inout_str; 750 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 751 vis->rflags = state->rflags; 752 vis->cr0 = state->cr0; 753 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 754 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 755 vis->addrsize = svm_inout_str_addrsize(info1); 756 svm_inout_str_seginfo(svm_sc, vcpu, info1, 757 vmexit->u.inout.in, vis); 758 } 759 760 return (UNHANDLED); 761} 762 763static int 764npf_fault_type(uint64_t exitinfo1) 765{ 766 767 if (exitinfo1 & VMCB_NPF_INFO1_W) 768 return (VM_PROT_WRITE); 769 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 770 return (VM_PROT_EXECUTE); 771 else 772 return (VM_PROT_READ); 773} 774 775static bool 776svm_npf_emul_fault(uint64_t exitinfo1) 777{ 778 779 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 780 return (false); 781 } 782 783 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 784 return (false); 785 } 786 787 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 788 return (false); 789 } 790 791 return (true); 792} 793 794static void 795svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 796{ 797 struct vm_guest_paging *paging; 798 struct vmcb_segment seg; 799 struct vmcb_ctrl *ctrl; 800 char *inst_bytes; 801 int error, inst_len; 802 803 ctrl = &vmcb->ctrl; 804 paging = &vmexit->u.inst_emul.paging; 805 806 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 807 vmexit->u.inst_emul.gpa = gpa; 808 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 809 svm_paging_info(vmcb, paging); 810 811 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 812 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 813 814 switch(paging->cpu_mode) { 815 case CPU_MODE_REAL: 816 vmexit->u.inst_emul.cs_base = seg.base; 817 vmexit->u.inst_emul.cs_d = 0; 818 break; 819 case CPU_MODE_PROTECTED: 820 case CPU_MODE_COMPATIBILITY: 821 vmexit->u.inst_emul.cs_base = seg.base; 822 823 /* 824 * Section 4.8.1 of APM2, Default Operand Size or D bit. 825 */ 826 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 827 1 : 0; 828 break; 829 default: 830 vmexit->u.inst_emul.cs_base = 0; 831 vmexit->u.inst_emul.cs_d = 0; 832 break; 833 } 834 835 /* 836 * Copy the instruction bytes into 'vie' if available. 837 */ 838 if (decode_assist() && !disable_npf_assist) { 839 inst_len = ctrl->inst_len; 840 inst_bytes = ctrl->inst_bytes; 841 } else { 842 inst_len = 0; 843 inst_bytes = NULL; 844 } 845 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 846} 847 848#ifdef KTR 849static const char * 850intrtype_to_str(int intr_type) 851{ 852 switch (intr_type) { 853 case VMCB_EVENTINJ_TYPE_INTR: 854 return ("hwintr"); 855 case VMCB_EVENTINJ_TYPE_NMI: 856 return ("nmi"); 857 case VMCB_EVENTINJ_TYPE_INTn: 858 return ("swintr"); 859 case VMCB_EVENTINJ_TYPE_EXCEPTION: 860 return ("exception"); 861 default: 862 panic("%s: unknown intr_type %d", __func__, intr_type); 863 } 864} 865#endif 866 867/* 868 * Inject an event to vcpu as described in section 15.20, "Event injection". 869 */ 870static void 871svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 872 uint32_t error, bool ec_valid) 873{ 874 struct vmcb_ctrl *ctrl; 875 876 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 877 878 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 879 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 880 881 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 882 __func__, vector)); 883 884 switch (intr_type) { 885 case VMCB_EVENTINJ_TYPE_INTR: 886 case VMCB_EVENTINJ_TYPE_NMI: 887 case VMCB_EVENTINJ_TYPE_INTn: 888 break; 889 case VMCB_EVENTINJ_TYPE_EXCEPTION: 890 if (vector >= 0 && vector <= 31 && vector != 2) 891 break; 892 /* FALLTHROUGH */ 893 default: 894 panic("%s: invalid intr_type/vector: %d/%d", __func__, 895 intr_type, vector); 896 } 897 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 898 if (ec_valid) { 899 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 900 ctrl->eventinj |= (uint64_t)error << 32; 901 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 902 intrtype_to_str(intr_type), vector, error); 903 } else { 904 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 905 intrtype_to_str(intr_type), vector); 906 } 907} 908 909static void 910svm_update_virqinfo(struct svm_softc *sc, int vcpu) 911{ 912 struct vm *vm; 913 struct vlapic *vlapic; 914 struct vmcb_ctrl *ctrl; 915 int pending; 916 917 vm = sc->vm; 918 vlapic = vm_lapic(vm, vcpu); 919 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 920 921 /* Update %cr8 in the emulated vlapic */ 922 vlapic_set_cr8(vlapic, ctrl->v_tpr); 923 924 /* 925 * If V_IRQ indicates that the interrupt injection attempted on then 926 * last VMRUN was successful then update the vlapic accordingly. 927 */ 928 if (ctrl->v_intr_vector != 0) { 929 pending = ctrl->v_irq; 930 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 931 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 932 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 933 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 934 pending ? "pending" : "accepted"); 935 if (!pending) 936 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 937 } 938} 939 940static void 941svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 942{ 943 struct vmcb_ctrl *ctrl; 944 uint64_t intinfo; 945 946 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 947 intinfo = ctrl->exitintinfo; 948 if (!VMCB_EXITINTINFO_VALID(intinfo)) 949 return; 950 951 /* 952 * From APMv2, Section "Intercepts during IDT interrupt delivery" 953 * 954 * If a #VMEXIT happened during event delivery then record the event 955 * that was being delivered. 956 */ 957 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 958 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 959 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 960 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 961} 962 963static __inline int 964vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 965{ 966 967 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 968 VMCB_INTCPT_VINTR)); 969} 970 971static __inline void 972enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 973{ 974 struct vmcb_ctrl *ctrl; 975 976 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 977 978 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 979 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 980 KASSERT(vintr_intercept_enabled(sc, vcpu), 981 ("%s: vintr intercept should be enabled", __func__)); 982 return; 983 } 984 985 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 986 ctrl->v_irq = 1; 987 ctrl->v_ign_tpr = 1; 988 ctrl->v_intr_vector = 0; 989 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 990 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 991} 992 993static __inline void 994disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 995{ 996 struct vmcb_ctrl *ctrl; 997 998 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 999 1000 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1001 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1002 ("%s: vintr intercept should be disabled", __func__)); 1003 return; 1004 } 1005 1006#ifdef KTR 1007 if (ctrl->v_intr_vector == 0) 1008 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1009 else 1010 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1011#endif 1012 ctrl->v_irq = 0; 1013 ctrl->v_intr_vector = 0; 1014 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1015 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1016} 1017 1018static int 1019svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1020{ 1021 struct vmcb_ctrl *ctrl; 1022 int oldval, newval; 1023 1024 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1025 oldval = ctrl->intr_shadow; 1026 newval = val ? 1 : 0; 1027 if (newval != oldval) { 1028 ctrl->intr_shadow = newval; 1029 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1030 } 1031 return (0); 1032} 1033 1034static int 1035svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1036{ 1037 struct vmcb_ctrl *ctrl; 1038 1039 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1040 *val = ctrl->intr_shadow; 1041 return (0); 1042} 1043 1044/* 1045 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1046 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1047 * to track when the vcpu is done handling the NMI. 1048 */ 1049static int 1050nmi_blocked(struct svm_softc *sc, int vcpu) 1051{ 1052 int blocked; 1053 1054 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1055 VMCB_INTCPT_IRET); 1056 return (blocked); 1057} 1058 1059static void 1060enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1061{ 1062 1063 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1064 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1065 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1066} 1067 1068static void 1069clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1070{ 1071 int error; 1072 1073 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1074 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1075 /* 1076 * When the IRET intercept is cleared the vcpu will attempt to execute 1077 * the "iret" when it runs next. However, it is possible to inject 1078 * another NMI into the vcpu before the "iret" has actually executed. 1079 * 1080 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1081 * it will trap back into the hypervisor. If an NMI is pending for 1082 * the vcpu it will be injected into the guest. 1083 * 1084 * XXX this needs to be fixed 1085 */ 1086 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1087 1088 /* 1089 * Set 'intr_shadow' to prevent an NMI from being injected on the 1090 * immediate VMRUN. 1091 */ 1092 error = svm_modify_intr_shadow(sc, vcpu, 1); 1093 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1094} 1095 1096#define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1097 1098static int 1099svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1100{ 1101 struct vm_exit *vme; 1102 struct vmcb_state *state; 1103 uint64_t changed, lma, oldval; 1104 int error; 1105 1106 state = svm_get_vmcb_state(sc, vcpu); 1107 1108 oldval = state->efer; 1109 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1110 1111 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1112 changed = oldval ^ newval; 1113 1114 if (newval & EFER_MBZ_BITS) 1115 goto gpf; 1116 1117 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1118 if (changed & EFER_LME) { 1119 if (state->cr0 & CR0_PG) 1120 goto gpf; 1121 } 1122 1123 /* EFER.LMA = EFER.LME & CR0.PG */ 1124 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1125 lma = EFER_LMA; 1126 else 1127 lma = 0; 1128 1129 if ((newval & EFER_LMA) != lma) 1130 goto gpf; 1131 1132 if (newval & EFER_NXE) { 1133 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1134 goto gpf; 1135 } 1136 1137 /* 1138 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1139 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1140 */ 1141 if (newval & EFER_LMSLE) { 1142 vme = vm_exitinfo(sc->vm, vcpu); 1143 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1144 *retu = true; 1145 return (0); 1146 } 1147 1148 if (newval & EFER_FFXSR) { 1149 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1150 goto gpf; 1151 } 1152 1153 if (newval & EFER_TCE) { 1154 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1155 goto gpf; 1156 } 1157 1158 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1159 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1160 return (0); 1161gpf: 1162 vm_inject_gp(sc->vm, vcpu); 1163 return (0); 1164} 1165 1166static int 1167emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1168 bool *retu) 1169{ 1170 int error; 1171 1172 if (lapic_msr(num)) 1173 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1174 else if (num == MSR_EFER) 1175 error = svm_write_efer(sc, vcpu, val, retu); 1176 else 1177 error = svm_wrmsr(sc, vcpu, num, val, retu); 1178 1179 return (error); 1180} 1181 1182static int 1183emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1184{ 1185 struct vmcb_state *state; 1186 struct svm_regctx *ctx; 1187 uint64_t result; 1188 int error; 1189 1190 if (lapic_msr(num)) 1191 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1192 else 1193 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1194 1195 if (error == 0) { 1196 state = svm_get_vmcb_state(sc, vcpu); 1197 ctx = svm_get_guest_regctx(sc, vcpu); 1198 state->rax = result & 0xffffffff; 1199 ctx->sctx_rdx = result >> 32; 1200 } 1201 1202 return (error); 1203} 1204 1205#ifdef KTR 1206static const char * 1207exit_reason_to_str(uint64_t reason) 1208{ 1209 static char reasonbuf[32]; 1210 1211 switch (reason) { 1212 case VMCB_EXIT_INVALID: 1213 return ("invalvmcb"); 1214 case VMCB_EXIT_SHUTDOWN: 1215 return ("shutdown"); 1216 case VMCB_EXIT_NPF: 1217 return ("nptfault"); 1218 case VMCB_EXIT_PAUSE: 1219 return ("pause"); 1220 case VMCB_EXIT_HLT: 1221 return ("hlt"); 1222 case VMCB_EXIT_CPUID: 1223 return ("cpuid"); 1224 case VMCB_EXIT_IO: 1225 return ("inout"); 1226 case VMCB_EXIT_MC: 1227 return ("mchk"); 1228 case VMCB_EXIT_INTR: 1229 return ("extintr"); 1230 case VMCB_EXIT_NMI: 1231 return ("nmi"); 1232 case VMCB_EXIT_VINTR: 1233 return ("vintr"); 1234 case VMCB_EXIT_MSR: 1235 return ("msr"); 1236 case VMCB_EXIT_IRET: 1237 return ("iret"); 1238 case VMCB_EXIT_MONITOR: 1239 return ("monitor"); 1240 case VMCB_EXIT_MWAIT: 1241 return ("mwait"); 1242 default: 1243 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1244 return (reasonbuf); 1245 } 1246} 1247#endif /* KTR */ 1248 1249/* 1250 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1251 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1252 * and exceptions caused by INT3, INTO and BOUND instructions. 1253 * 1254 * Return 1 if the nRIP is valid and 0 otherwise. 1255 */ 1256static int 1257nrip_valid(uint64_t exitcode) 1258{ 1259 switch (exitcode) { 1260 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1261 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1262 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1263 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1264 case 0x43: /* INT3 */ 1265 case 0x44: /* INTO */ 1266 case 0x45: /* BOUND */ 1267 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1268 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1269 return (1); 1270 default: 1271 return (0); 1272 } 1273} 1274 1275static int 1276svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1277{ 1278 struct vmcb *vmcb; 1279 struct vmcb_state *state; 1280 struct vmcb_ctrl *ctrl; 1281 struct svm_regctx *ctx; 1282 uint64_t code, info1, info2, val; 1283 uint32_t eax, ecx, edx; 1284 int error, errcode_valid, handled, idtvec, reflect; 1285 bool retu; 1286 1287 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1288 vmcb = svm_get_vmcb(svm_sc, vcpu); 1289 state = &vmcb->state; 1290 ctrl = &vmcb->ctrl; 1291 1292 handled = 0; 1293 code = ctrl->exitcode; 1294 info1 = ctrl->exitinfo1; 1295 info2 = ctrl->exitinfo2; 1296 1297 vmexit->exitcode = VM_EXITCODE_BOGUS; 1298 vmexit->rip = state->rip; 1299 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1300 1301 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1302 1303 /* 1304 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1305 * in an inconsistent state and can trigger assertions that would 1306 * never happen otherwise. 1307 */ 1308 if (code == VMCB_EXIT_INVALID) { 1309 vm_exit_svm(vmexit, code, info1, info2); 1310 return (0); 1311 } 1312 1313 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1314 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1315 1316 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1317 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1318 vmexit->inst_length, code, info1, info2)); 1319 1320 svm_update_virqinfo(svm_sc, vcpu); 1321 svm_save_intinfo(svm_sc, vcpu); 1322 1323 switch (code) { 1324 case VMCB_EXIT_IRET: 1325 /* 1326 * Restart execution at "iret" but with the intercept cleared. 1327 */ 1328 vmexit->inst_length = 0; 1329 clear_nmi_blocking(svm_sc, vcpu); 1330 handled = 1; 1331 break; 1332 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1333 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1334 handled = 1; 1335 break; 1336 case VMCB_EXIT_INTR: /* external interrupt */ 1337 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1338 handled = 1; 1339 break; 1340 case VMCB_EXIT_NMI: /* external NMI */ 1341 handled = 1; 1342 break; 1343 case 0x40 ... 0x5F: 1344 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1345 reflect = 1; 1346 idtvec = code - 0x40; 1347 switch (idtvec) { 1348 case IDT_MC: 1349 /* 1350 * Call the machine check handler by hand. Also don't 1351 * reflect the machine check back into the guest. 1352 */ 1353 reflect = 0; 1354 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1355 __asm __volatile("int $18"); 1356 break; 1357 case IDT_PF: 1358 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1359 info2); 1360 KASSERT(error == 0, ("%s: error %d updating cr2", 1361 __func__, error)); 1362 /* fallthru */ 1363 case IDT_NP: 1364 case IDT_SS: 1365 case IDT_GP: 1366 case IDT_AC: 1367 case IDT_TS: 1368 errcode_valid = 1; 1369 break; 1370 1371 case IDT_DF: 1372 errcode_valid = 1; 1373 info1 = 0; 1374 break; 1375 1376 case IDT_BP: 1377 case IDT_OF: 1378 case IDT_BR: 1379 /* 1380 * The 'nrip' field is populated for INT3, INTO and 1381 * BOUND exceptions and this also implies that 1382 * 'inst_length' is non-zero. 1383 * 1384 * Reset 'inst_length' to zero so the guest %rip at 1385 * event injection is identical to what it was when 1386 * the exception originally happened. 1387 */ 1388 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1389 "to zero before injecting exception %d", 1390 vmexit->inst_length, idtvec); 1391 vmexit->inst_length = 0; 1392 /* fallthru */ 1393 default: 1394 errcode_valid = 0; 1395 info1 = 0; 1396 break; 1397 } 1398 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1399 "when reflecting exception %d into guest", 1400 vmexit->inst_length, idtvec)); 1401 1402 if (reflect) { 1403 /* Reflect the exception back into the guest */ 1404 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1405 "%d/%#x into the guest", idtvec, (int)info1); 1406 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1407 errcode_valid, info1, 0); 1408 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1409 __func__, error)); 1410 } 1411 handled = 1; 1412 break; 1413 case VMCB_EXIT_MSR: /* MSR access. */ 1414 eax = state->rax; 1415 ecx = ctx->sctx_rcx; 1416 edx = ctx->sctx_rdx; 1417 retu = false; 1418 1419 if (info1) { 1420 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1421 val = (uint64_t)edx << 32 | eax; 1422 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1423 ecx, val); 1424 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1425 vmexit->exitcode = VM_EXITCODE_WRMSR; 1426 vmexit->u.msr.code = ecx; 1427 vmexit->u.msr.wval = val; 1428 } else if (!retu) { 1429 handled = 1; 1430 } else { 1431 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1432 ("emulate_wrmsr retu with bogus exitcode")); 1433 } 1434 } else { 1435 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1436 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1437 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1438 vmexit->exitcode = VM_EXITCODE_RDMSR; 1439 vmexit->u.msr.code = ecx; 1440 } else if (!retu) { 1441 handled = 1; 1442 } else { 1443 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1444 ("emulate_rdmsr retu with bogus exitcode")); 1445 } 1446 } 1447 break; 1448 case VMCB_EXIT_IO: 1449 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1450 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1451 break; 1452 case VMCB_EXIT_CPUID: 1453 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1454 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1455 (uint32_t *)&state->rax, 1456 (uint32_t *)&ctx->sctx_rbx, 1457 (uint32_t *)&ctx->sctx_rcx, 1458 (uint32_t *)&ctx->sctx_rdx); 1459 break; 1460 case VMCB_EXIT_HLT: 1461 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1462 vmexit->exitcode = VM_EXITCODE_HLT; 1463 vmexit->u.hlt.rflags = state->rflags; 1464 break; 1465 case VMCB_EXIT_PAUSE: 1466 vmexit->exitcode = VM_EXITCODE_PAUSE; 1467 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1468 break; 1469 case VMCB_EXIT_NPF: 1470 /* EXITINFO2 contains the faulting guest physical address */ 1471 if (info1 & VMCB_NPF_INFO1_RSV) { 1472 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1473 "reserved bits set: info1(%#lx) info2(%#lx)", 1474 info1, info2); 1475 } else if (vm_mem_allocated(svm_sc->vm, info2)) { 1476 vmexit->exitcode = VM_EXITCODE_PAGING; 1477 vmexit->u.paging.gpa = info2; 1478 vmexit->u.paging.fault_type = npf_fault_type(info1); 1479 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1480 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1481 "on gpa %#lx/%#lx at rip %#lx", 1482 info2, info1, state->rip); 1483 } else if (svm_npf_emul_fault(info1)) { 1484 svm_handle_inst_emul(vmcb, info2, vmexit); 1485 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1486 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1487 "for gpa %#lx/%#lx at rip %#lx", 1488 info2, info1, state->rip); 1489 } 1490 break; 1491 case VMCB_EXIT_MONITOR: 1492 vmexit->exitcode = VM_EXITCODE_MONITOR; 1493 break; 1494 case VMCB_EXIT_MWAIT: 1495 vmexit->exitcode = VM_EXITCODE_MWAIT; 1496 break; 1497 default: 1498 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1499 break; 1500 } 1501 1502 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1503 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1504 vmexit->rip, vmexit->inst_length); 1505 1506 if (handled) { 1507 vmexit->rip += vmexit->inst_length; 1508 vmexit->inst_length = 0; 1509 state->rip = vmexit->rip; 1510 } else { 1511 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1512 /* 1513 * If this VM exit was not claimed by anybody then 1514 * treat it as a generic SVM exit. 1515 */ 1516 vm_exit_svm(vmexit, code, info1, info2); 1517 } else { 1518 /* 1519 * The exitcode and collateral have been populated. 1520 * The VM exit will be processed further in userland. 1521 */ 1522 } 1523 } 1524 return (handled); 1525} 1526 1527static void 1528svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1529{ 1530 uint64_t intinfo; 1531 1532 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1533 return; 1534 1535 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1536 "valid: %#lx", __func__, intinfo)); 1537 1538 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1539 VMCB_EXITINTINFO_VECTOR(intinfo), 1540 VMCB_EXITINTINFO_EC(intinfo), 1541 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1542 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1543 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1544} 1545 1546/* 1547 * Inject event to virtual cpu. 1548 */ 1549static void 1550svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1551{ 1552 struct vmcb_ctrl *ctrl; 1553 struct vmcb_state *state; 1554 struct svm_vcpu *vcpustate; 1555 uint8_t v_tpr; 1556 int vector, need_intr_window, pending_apic_vector; 1557 1558 state = svm_get_vmcb_state(sc, vcpu); 1559 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1560 vcpustate = svm_get_vcpu(sc, vcpu); 1561 1562 need_intr_window = 0; 1563 pending_apic_vector = 0; 1564 1565 if (vcpustate->nextrip != state->rip) { 1566 ctrl->intr_shadow = 0; 1567 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1568 "cleared due to rip change: %#lx/%#lx", 1569 vcpustate->nextrip, state->rip); 1570 } 1571 1572 /* 1573 * Inject pending events or exceptions for this vcpu. 1574 * 1575 * An event might be pending because the previous #VMEXIT happened 1576 * during event delivery (i.e. ctrl->exitintinfo). 1577 * 1578 * An event might also be pending because an exception was injected 1579 * by the hypervisor (e.g. #PF during instruction emulation). 1580 */ 1581 svm_inj_intinfo(sc, vcpu); 1582 1583 /* NMI event has priority over interrupts. */ 1584 if (vm_nmi_pending(sc->vm, vcpu)) { 1585 if (nmi_blocked(sc, vcpu)) { 1586 /* 1587 * Can't inject another NMI if the guest has not 1588 * yet executed an "iret" after the last NMI. 1589 */ 1590 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1591 "to NMI-blocking"); 1592 } else if (ctrl->intr_shadow) { 1593 /* 1594 * Can't inject an NMI if the vcpu is in an intr_shadow. 1595 */ 1596 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1597 "interrupt shadow"); 1598 need_intr_window = 1; 1599 goto done; 1600 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1601 /* 1602 * If there is already an exception/interrupt pending 1603 * then defer the NMI until after that. 1604 */ 1605 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1606 "eventinj %#lx", ctrl->eventinj); 1607 1608 /* 1609 * Use self-IPI to trigger a VM-exit as soon as 1610 * possible after the event injection is completed. 1611 * 1612 * This works only if the external interrupt exiting 1613 * is at a lower priority than the event injection. 1614 * 1615 * Although not explicitly specified in APMv2 the 1616 * relative priorities were verified empirically. 1617 */ 1618 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1619 } else { 1620 vm_nmi_clear(sc->vm, vcpu); 1621 1622 /* Inject NMI, vector number is not used */ 1623 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1624 IDT_NMI, 0, false); 1625 1626 /* virtual NMI blocking is now in effect */ 1627 enable_nmi_blocking(sc, vcpu); 1628 1629 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1630 } 1631 } 1632 1633 if (!vm_extint_pending(sc->vm, vcpu)) { 1634 /* 1635 * APIC interrupts are delivered using the V_IRQ offload. 1636 * 1637 * The primary benefit is that the hypervisor doesn't need to 1638 * deal with the various conditions that inhibit interrupts. 1639 * It also means that TPR changes via CR8 will be handled 1640 * without any hypervisor involvement. 1641 * 1642 * Note that the APIC vector must remain pending in the vIRR 1643 * until it is confirmed that it was delivered to the guest. 1644 * This can be confirmed based on the value of V_IRQ at the 1645 * next #VMEXIT (1 = pending, 0 = delivered). 1646 * 1647 * Also note that it is possible that another higher priority 1648 * vector can become pending before this vector is delivered 1649 * to the guest. This is alright because vcpu_notify_event() 1650 * will send an IPI and force the vcpu to trap back into the 1651 * hypervisor. The higher priority vector will be injected on 1652 * the next VMRUN. 1653 */ 1654 if (vlapic_pending_intr(vlapic, &vector)) { 1655 KASSERT(vector >= 16 && vector <= 255, 1656 ("invalid vector %d from local APIC", vector)); 1657 pending_apic_vector = vector; 1658 } 1659 goto done; 1660 } 1661 1662 /* Ask the legacy pic for a vector to inject */ 1663 vatpic_pending_intr(sc->vm, &vector); 1664 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1665 vector)); 1666 1667 /* 1668 * If the guest has disabled interrupts or is in an interrupt shadow 1669 * then we cannot inject the pending interrupt. 1670 */ 1671 if ((state->rflags & PSL_I) == 0) { 1672 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1673 "rflags %#lx", vector, state->rflags); 1674 need_intr_window = 1; 1675 goto done; 1676 } 1677 1678 if (ctrl->intr_shadow) { 1679 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1680 "interrupt shadow", vector); 1681 need_intr_window = 1; 1682 goto done; 1683 } 1684 1685 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1686 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1687 "eventinj %#lx", vector, ctrl->eventinj); 1688 need_intr_window = 1; 1689 goto done; 1690 } 1691 1692 /* 1693 * Legacy PIC interrupts are delivered via the event injection 1694 * mechanism. 1695 */ 1696 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1697 1698 vm_extint_clear(sc->vm, vcpu); 1699 vatpic_intr_accepted(sc->vm, vector); 1700 1701 /* 1702 * Force a VM-exit as soon as the vcpu is ready to accept another 1703 * interrupt. This is done because the PIC might have another vector 1704 * that it wants to inject. Also, if the APIC has a pending interrupt 1705 * that was preempted by the ExtInt then it allows us to inject the 1706 * APIC vector as soon as possible. 1707 */ 1708 need_intr_window = 1; 1709done: 1710 /* 1711 * The guest can modify the TPR by writing to %CR8. In guest mode 1712 * the processor reflects this write to V_TPR without hypervisor 1713 * intervention. 1714 * 1715 * The guest can also modify the TPR by writing to it via the memory 1716 * mapped APIC page. In this case, the write will be emulated by the 1717 * hypervisor. For this reason V_TPR must be updated before every 1718 * VMRUN. 1719 */ 1720 v_tpr = vlapic_get_cr8(vlapic); 1721 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1722 if (ctrl->v_tpr != v_tpr) { 1723 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1724 ctrl->v_tpr, v_tpr); 1725 ctrl->v_tpr = v_tpr; 1726 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1727 } 1728 1729 if (pending_apic_vector) { 1730 /* 1731 * If an APIC vector is being injected then interrupt window 1732 * exiting is not possible on this VMRUN. 1733 */ 1734 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1735 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1736 pending_apic_vector); 1737 1738 ctrl->v_irq = 1; 1739 ctrl->v_ign_tpr = 0; 1740 ctrl->v_intr_vector = pending_apic_vector; 1741 ctrl->v_intr_prio = pending_apic_vector >> 4; 1742 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1743 } else if (need_intr_window) { 1744 /* 1745 * We use V_IRQ in conjunction with the VINTR intercept to 1746 * trap into the hypervisor as soon as a virtual interrupt 1747 * can be delivered. 1748 * 1749 * Since injected events are not subject to intercept checks 1750 * we need to ensure that the V_IRQ is not actually going to 1751 * be delivered on VM entry. The KASSERT below enforces this. 1752 */ 1753 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1754 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1755 ("Bogus intr_window_exiting: eventinj (%#lx), " 1756 "intr_shadow (%u), rflags (%#lx)", 1757 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1758 enable_intr_window_exiting(sc, vcpu); 1759 } else { 1760 disable_intr_window_exiting(sc, vcpu); 1761 } 1762} 1763 1764static __inline void 1765restore_host_tss(void) 1766{ 1767 struct system_segment_descriptor *tss_sd; 1768 1769 /* 1770 * The TSS descriptor was in use prior to launching the guest so it 1771 * has been marked busy. 1772 * 1773 * 'ltr' requires the descriptor to be marked available so change the 1774 * type to "64-bit available TSS". 1775 */ 1776 tss_sd = PCPU_GET(tss); 1777 tss_sd->sd_type = SDT_SYSTSS; 1778 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1779} 1780 1781static void 1782check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1783{ 1784 struct svm_vcpu *vcpustate; 1785 struct vmcb_ctrl *ctrl; 1786 long eptgen; 1787 bool alloc_asid; 1788 1789 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1790 "active on cpu %u", __func__, thiscpu)); 1791 1792 vcpustate = svm_get_vcpu(sc, vcpuid); 1793 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1794 1795 /* 1796 * The TLB entries associated with the vcpu's ASID are not valid 1797 * if either of the following conditions is true: 1798 * 1799 * 1. The vcpu's ASID generation is different than the host cpu's 1800 * ASID generation. This happens when the vcpu migrates to a new 1801 * host cpu. It can also happen when the number of vcpus executing 1802 * on a host cpu is greater than the number of ASIDs available. 1803 * 1804 * 2. The pmap generation number is different than the value cached in 1805 * the 'vcpustate'. This happens when the host invalidates pages 1806 * belonging to the guest. 1807 * 1808 * asidgen eptgen Action 1809 * mismatch mismatch 1810 * 0 0 (a) 1811 * 0 1 (b1) or (b2) 1812 * 1 0 (c) 1813 * 1 1 (d) 1814 * 1815 * (a) There is no mismatch in eptgen or ASID generation and therefore 1816 * no further action is needed. 1817 * 1818 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1819 * retained and the TLB entries associated with this ASID 1820 * are flushed by VMRUN. 1821 * 1822 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1823 * allocated. 1824 * 1825 * (c) A new ASID is allocated. 1826 * 1827 * (d) A new ASID is allocated. 1828 */ 1829 1830 alloc_asid = false; 1831 eptgen = pmap->pm_eptgen; 1832 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1833 1834 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1835 alloc_asid = true; /* (c) and (d) */ 1836 } else if (vcpustate->eptgen != eptgen) { 1837 if (flush_by_asid()) 1838 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1839 else 1840 alloc_asid = true; /* (b2) */ 1841 } else { 1842 /* 1843 * This is the common case (a). 1844 */ 1845 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1846 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1847 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1848 } 1849 1850 if (alloc_asid) { 1851 if (++asid[thiscpu].num >= nasid) { 1852 asid[thiscpu].num = 1; 1853 if (++asid[thiscpu].gen == 0) 1854 asid[thiscpu].gen = 1; 1855 /* 1856 * If this cpu does not support "flush-by-asid" 1857 * then flush the entire TLB on a generation 1858 * bump. Subsequent ASID allocation in this 1859 * generation can be done without a TLB flush. 1860 */ 1861 if (!flush_by_asid()) 1862 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1863 } 1864 vcpustate->asid.gen = asid[thiscpu].gen; 1865 vcpustate->asid.num = asid[thiscpu].num; 1866 1867 ctrl->asid = vcpustate->asid.num; 1868 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1869 /* 1870 * If this cpu supports "flush-by-asid" then the TLB 1871 * was not flushed after the generation bump. The TLB 1872 * is flushed selectively after every new ASID allocation. 1873 */ 1874 if (flush_by_asid()) 1875 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1876 } 1877 vcpustate->eptgen = eptgen; 1878 1879 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1880 KASSERT(ctrl->asid == vcpustate->asid.num, 1881 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1882} 1883 1884static __inline void 1885disable_gintr(void) 1886{ 1887 1888 __asm __volatile("clgi"); 1889} 1890 1891static __inline void 1892enable_gintr(void) 1893{ 1894 1895 __asm __volatile("stgi"); 1896} 1897 1898/* 1899 * Start vcpu with specified RIP. 1900 */ 1901static int 1902svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1903 struct vm_eventinfo *evinfo) 1904{ 1905 struct svm_regctx *gctx; 1906 struct svm_softc *svm_sc; 1907 struct svm_vcpu *vcpustate; 1908 struct vmcb_state *state; 1909 struct vmcb_ctrl *ctrl; 1910 struct vm_exit *vmexit; 1911 struct vlapic *vlapic; 1912 struct vm *vm; 1913 uint64_t vmcb_pa; 1914 u_int thiscpu; 1915 int handled; 1916 1917 svm_sc = arg; 1918 vm = svm_sc->vm; 1919 1920 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1921 state = svm_get_vmcb_state(svm_sc, vcpu); 1922 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1923 vmexit = vm_exitinfo(vm, vcpu); 1924 vlapic = vm_lapic(vm, vcpu); 1925 1926 /* 1927 * Stash 'curcpu' on the stack as 'thiscpu'. 1928 * 1929 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1930 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1931 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1932 */ 1933 thiscpu = curcpu; 1934 1935 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1936 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1937 1938 if (vcpustate->lastcpu != thiscpu) { 1939 /* 1940 * Force new ASID allocation by invalidating the generation. 1941 */ 1942 vcpustate->asid.gen = 0; 1943 1944 /* 1945 * Invalidate the VMCB state cache by marking all fields dirty. 1946 */ 1947 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1948 1949 /* 1950 * XXX 1951 * Setting 'vcpustate->lastcpu' here is bit premature because 1952 * we may return from this function without actually executing 1953 * the VMRUN instruction. This could happen if a rendezvous 1954 * or an AST is pending on the first time through the loop. 1955 * 1956 * This works for now but any new side-effects of vcpu 1957 * migration should take this case into account. 1958 */ 1959 vcpustate->lastcpu = thiscpu; 1960 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1961 } 1962 1963 svm_msr_guest_enter(svm_sc, vcpu); 1964 1965 /* Update Guest RIP */ 1966 state->rip = rip; 1967 1968 do { 1969 /* 1970 * Disable global interrupts to guarantee atomicity during 1971 * loading of guest state. This includes not only the state 1972 * loaded by the "vmrun" instruction but also software state 1973 * maintained by the hypervisor: suspended and rendezvous 1974 * state, NPT generation number, vlapic interrupts etc. 1975 */ 1976 disable_gintr(); 1977 1978 if (vcpu_suspended(evinfo)) { 1979 enable_gintr(); 1980 vm_exit_suspended(vm, vcpu, state->rip); 1981 break; 1982 } 1983 1984 if (vcpu_rendezvous_pending(evinfo)) { 1985 enable_gintr(); 1986 vm_exit_rendezvous(vm, vcpu, state->rip); 1987 break; 1988 } 1989 1990 if (vcpu_reqidle(evinfo)) { 1991 enable_gintr(); 1992 vm_exit_reqidle(vm, vcpu, state->rip); 1993 break; 1994 } 1995 1996 /* We are asked to give the cpu by scheduler. */ 1997 if (vcpu_should_yield(vm, vcpu)) { 1998 enable_gintr(); 1999 vm_exit_astpending(vm, vcpu, state->rip); 2000 break; 2001 } 2002 2003 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2004 2005 /* Activate the nested pmap on 'thiscpu' */ 2006 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 2007 2008 /* 2009 * Check the pmap generation and the ASID generation to 2010 * ensure that the vcpu does not use stale TLB mappings. 2011 */ 2012 check_asid(svm_sc, vcpu, pmap, thiscpu); 2013 2014 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2015 vcpustate->dirty = 0; 2016 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2017 2018 /* Launch Virtual Machine. */ 2019 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2020 svm_launch(vmcb_pa, gctx); 2021 2022 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 2023 2024 /* 2025 * Restore MSR_GSBASE to point to the pcpu data area. 2026 * 2027 * Note that accesses done via PCPU_GET/PCPU_SET will work 2028 * only after MSR_GSBASE is restored. 2029 * 2030 * Also note that we don't bother restoring MSR_KGSBASE 2031 * since it is not used in the kernel and will be restored 2032 * when the VMRUN ioctl returns to userspace. 2033 */ 2034 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 2035 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 2036 thiscpu, curcpu)); 2037 2038 /* 2039 * The host GDTR and IDTR is saved by VMRUN and restored 2040 * automatically on #VMEXIT. However, the host TSS needs 2041 * to be restored explicitly. 2042 */ 2043 restore_host_tss(); 2044 2045 /* #VMEXIT disables interrupts so re-enable them here. */ 2046 enable_gintr(); 2047 2048 /* Update 'nextrip' */ 2049 vcpustate->nextrip = state->rip; 2050 2051 /* Handle #VMEXIT and if required return to user space. */ 2052 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2053 } while (handled); 2054 2055 svm_msr_guest_exit(svm_sc, vcpu); 2056 2057 return (0); 2058} 2059 2060static void 2061svm_vmcleanup(void *arg) 2062{ 2063 struct svm_softc *sc = arg; 2064 2065 free(sc, M_SVM); 2066} 2067 2068static register_t * 2069swctx_regptr(struct svm_regctx *regctx, int reg) 2070{ 2071 2072 switch (reg) { 2073 case VM_REG_GUEST_RBX: 2074 return (®ctx->sctx_rbx); 2075 case VM_REG_GUEST_RCX: 2076 return (®ctx->sctx_rcx); 2077 case VM_REG_GUEST_RDX: 2078 return (®ctx->sctx_rdx); 2079 case VM_REG_GUEST_RDI: 2080 return (®ctx->sctx_rdi); 2081 case VM_REG_GUEST_RSI: 2082 return (®ctx->sctx_rsi); 2083 case VM_REG_GUEST_RBP: 2084 return (®ctx->sctx_rbp); 2085 case VM_REG_GUEST_R8: 2086 return (®ctx->sctx_r8); 2087 case VM_REG_GUEST_R9: 2088 return (®ctx->sctx_r9); 2089 case VM_REG_GUEST_R10: 2090 return (®ctx->sctx_r10); 2091 case VM_REG_GUEST_R11: 2092 return (®ctx->sctx_r11); 2093 case VM_REG_GUEST_R12: 2094 return (®ctx->sctx_r12); 2095 case VM_REG_GUEST_R13: 2096 return (®ctx->sctx_r13); 2097 case VM_REG_GUEST_R14: 2098 return (®ctx->sctx_r14); 2099 case VM_REG_GUEST_R15: 2100 return (®ctx->sctx_r15); 2101 default: 2102 return (NULL); 2103 } 2104} 2105 2106static int 2107svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2108{ 2109 struct svm_softc *svm_sc; 2110 register_t *reg; 2111 2112 svm_sc = arg; 2113 2114 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2115 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2116 } 2117 2118 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2119 return (0); 2120 } 2121 2122 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2123 2124 if (reg != NULL) { 2125 *val = *reg; 2126 return (0); 2127 } 2128 2129 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2130 return (EINVAL); 2131} 2132 2133static int 2134svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2135{ 2136 struct svm_softc *svm_sc; 2137 register_t *reg; 2138 2139 svm_sc = arg; 2140 2141 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2142 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2143 } 2144 2145 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2146 return (0); 2147 } 2148 2149 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2150 2151 if (reg != NULL) { 2152 *reg = val; 2153 return (0); 2154 } 2155 2156 /* 2157 * XXX deal with CR3 and invalidate TLB entries tagged with the 2158 * vcpu's ASID. This needs to be treated differently depending on 2159 * whether 'running' is true/false. 2160 */ 2161 2162 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2163 return (EINVAL); 2164} 2165 2166static int 2167svm_setcap(void *arg, int vcpu, int type, int val) 2168{ 2169 struct svm_softc *sc; 2170 int error; 2171 2172 sc = arg; 2173 error = 0; 2174 switch (type) { 2175 case VM_CAP_HALT_EXIT: 2176 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2177 VMCB_INTCPT_HLT, val); 2178 break; 2179 case VM_CAP_PAUSE_EXIT: 2180 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2181 VMCB_INTCPT_PAUSE, val); 2182 break; 2183 case VM_CAP_UNRESTRICTED_GUEST: 2184 /* Unrestricted guest execution cannot be disabled in SVM */ 2185 if (val == 0) 2186 error = EINVAL; 2187 break; 2188 default: 2189 error = ENOENT; 2190 break; 2191 } 2192 return (error); 2193} 2194 2195static int 2196svm_getcap(void *arg, int vcpu, int type, int *retval) 2197{ 2198 struct svm_softc *sc; 2199 int error; 2200 2201 sc = arg; 2202 error = 0; 2203 2204 switch (type) { 2205 case VM_CAP_HALT_EXIT: 2206 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2207 VMCB_INTCPT_HLT); 2208 break; 2209 case VM_CAP_PAUSE_EXIT: 2210 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2211 VMCB_INTCPT_PAUSE); 2212 break; 2213 case VM_CAP_UNRESTRICTED_GUEST: 2214 *retval = 1; /* unrestricted guest is always enabled */ 2215 break; 2216 default: 2217 error = ENOENT; 2218 break; 2219 } 2220 return (error); 2221} 2222 2223static struct vlapic * 2224svm_vlapic_init(void *arg, int vcpuid) 2225{ 2226 struct svm_softc *svm_sc; 2227 struct vlapic *vlapic; 2228 2229 svm_sc = arg; 2230 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2231 vlapic->vm = svm_sc->vm; 2232 vlapic->vcpuid = vcpuid; 2233 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2234 2235 vlapic_init(vlapic); 2236 2237 return (vlapic); 2238} 2239 2240static void 2241svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2242{ 2243 2244 vlapic_cleanup(vlapic); 2245 free(vlapic, M_SVM_VLAPIC); 2246} 2247 2248struct vmm_ops vmm_ops_amd = { 2249 svm_init, 2250 svm_cleanup, 2251 svm_restore, 2252 svm_vminit, 2253 svm_vmrun, 2254 svm_vmcleanup, 2255 svm_getreg, 2256 svm_setreg, 2257 vmcb_getdesc, 2258 vmcb_setdesc, 2259 svm_getcap, 2260 svm_setcap, 2261 svm_npt_alloc, 2262 svm_npt_free, 2263 svm_vlapic_init, 2264 svm_vlapic_cleanup 2265}; 2266