1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm.c 330069 2018-02-27 14:47:56Z avg $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/cpufunc.h> 43#include <machine/psl.h> 44#include <machine/pmap.h> 45#include <machine/md_var.h> 46#include <machine/specialreg.h> 47#include <machine/smp.h> 48#include <machine/vmm.h> 49#include <machine/vmm_dev.h> 50#include <machine/vmm_instruction_emul.h> 51 52#include "vmm_lapic.h" 53#include "vmm_stat.h" 54#include "vmm_ktr.h" 55#include "vmm_ioport.h" 56#include "vatpic.h" 57#include "vlapic.h" 58#include "vlapic_priv.h" 59 60#include "x86.h" 61#include "vmcb.h" 62#include "svm.h" 63#include "svm_softc.h" 64#include "svm_msr.h" 65#include "npt.h" 66 67SYSCTL_DECL(_hw_vmm); 68SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 69 70/* 71 * SVM CPUID function 0x8000_000A, edx bit decoding. 72 */ 73#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 74#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 75#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 76#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 77#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 78#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 79#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 80#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 81#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 82#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 83#define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 84 85#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 86 VMCB_CACHE_IOPM | \ 87 VMCB_CACHE_I | \ 88 VMCB_CACHE_TPR | \ 89 VMCB_CACHE_CR2 | \ 90 VMCB_CACHE_CR | \ 91 VMCB_CACHE_DT | \ 92 VMCB_CACHE_SEG | \ 93 VMCB_CACHE_NP) 94 95static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 96SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 97 0, NULL); 98 99static MALLOC_DEFINE(M_SVM, "svm", "svm"); 100static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 101 102/* Per-CPU context area. */ 103extern struct pcpu __pcpu[]; 104 105static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 106SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 107 "SVM features advertised by CPUID.8000000AH:EDX"); 108 109static int disable_npf_assist; 110SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 111 &disable_npf_assist, 0, NULL); 112 113/* Maximum ASIDs supported by the processor */ 114static uint32_t nasid; 115SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 116 "Number of ASIDs supported by this processor"); 117 118/* Current ASID generation for each host cpu */ 119static struct asid asid[MAXCPU]; 120 121/* 122 * SVM host state saved area of size 4KB for each core. 123 */ 124static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 125 126static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 127static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 128static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 129 130static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 131 132static __inline int 133flush_by_asid(void) 134{ 135 136 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 137} 138 139static __inline int 140decode_assist(void) 141{ 142 143 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 144} 145 146static void 147svm_disable(void *arg __unused) 148{ 149 uint64_t efer; 150 151 efer = rdmsr(MSR_EFER); 152 efer &= ~EFER_SVM; 153 wrmsr(MSR_EFER, efer); 154} 155 156/* 157 * Disable SVM on all CPUs. 158 */ 159static int 160svm_cleanup(void) 161{ 162 163 smp_rendezvous(NULL, svm_disable, NULL, NULL); 164 return (0); 165} 166 167/* 168 * Verify that all the features required by bhyve are available. 169 */ 170static int 171check_svm_features(void) 172{ 173 u_int regs[4]; 174 175 /* CPUID Fn8000_000A is for SVM */ 176 do_cpuid(0x8000000A, regs); 177 svm_feature &= regs[3]; 178 179 /* 180 * The number of ASIDs can be configured to be less than what is 181 * supported by the hardware but not more. 182 */ 183 if (nasid == 0 || nasid > regs[1]) 184 nasid = regs[1]; 185 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 186 187 /* bhyve requires the Nested Paging feature */ 188 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 189 printf("SVM: Nested Paging feature not available.\n"); 190 return (ENXIO); 191 } 192 193 /* bhyve requires the NRIP Save feature */ 194 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 195 printf("SVM: NRIP Save feature not available.\n"); 196 return (ENXIO); 197 } 198 199 return (0); 200} 201 202static void 203svm_enable(void *arg __unused) 204{ 205 uint64_t efer; 206 207 efer = rdmsr(MSR_EFER); 208 efer |= EFER_SVM; 209 wrmsr(MSR_EFER, efer); 210 211 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 212} 213 214/* 215 * Return 1 if SVM is enabled on this processor and 0 otherwise. 216 */ 217static int 218svm_available(void) 219{ 220 uint64_t msr; 221 222 /* Section 15.4 Enabling SVM from APM2. */ 223 if ((amd_feature2 & AMDID2_SVM) == 0) { 224 printf("SVM: not available.\n"); 225 return (0); 226 } 227 228 msr = rdmsr(MSR_VM_CR); 229 if ((msr & VM_CR_SVMDIS) != 0) { 230 printf("SVM: disabled by BIOS.\n"); 231 return (0); 232 } 233 234 return (1); 235} 236 237static int 238svm_init(int ipinum) 239{ 240 int error, cpu; 241 242 if (!svm_available()) 243 return (ENXIO); 244 245 error = check_svm_features(); 246 if (error) 247 return (error); 248 249 vmcb_clean &= VMCB_CACHE_DEFAULT; 250 251 for (cpu = 0; cpu < MAXCPU; cpu++) { 252 /* 253 * Initialize the host ASIDs to their "highest" valid values. 254 * 255 * The next ASID allocation will rollover both 'gen' and 'num' 256 * and start off the sequence at {1,1}. 257 */ 258 asid[cpu].gen = ~0UL; 259 asid[cpu].num = nasid - 1; 260 } 261 262 svm_msr_init(); 263 svm_npt_init(ipinum); 264 265 /* Enable SVM on all CPUs */ 266 smp_rendezvous(NULL, svm_enable, NULL, NULL); 267 268 return (0); 269} 270 271static void 272svm_restore(void) 273{ 274 275 svm_enable(NULL); 276} 277 278/* Pentium compatible MSRs */ 279#define MSR_PENTIUM_START 0 280#define MSR_PENTIUM_END 0x1FFF 281/* AMD 6th generation and Intel compatible MSRs */ 282#define MSR_AMD6TH_START 0xC0000000UL 283#define MSR_AMD6TH_END 0xC0001FFFUL 284/* AMD 7th and 8th generation compatible MSRs */ 285#define MSR_AMD7TH_START 0xC0010000UL 286#define MSR_AMD7TH_END 0xC0011FFFUL 287 288/* 289 * Get the index and bit position for a MSR in permission bitmap. 290 * Two bits are used for each MSR: lower bit for read and higher bit for write. 291 */ 292static int 293svm_msr_index(uint64_t msr, int *index, int *bit) 294{ 295 uint32_t base, off; 296 297 *index = -1; 298 *bit = (msr % 4) * 2; 299 base = 0; 300 301 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 302 *index = msr / 4; 303 return (0); 304 } 305 306 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 307 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 308 off = (msr - MSR_AMD6TH_START); 309 *index = (off + base) / 4; 310 return (0); 311 } 312 313 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 314 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 315 off = (msr - MSR_AMD7TH_START); 316 *index = (off + base) / 4; 317 return (0); 318 } 319 320 return (EINVAL); 321} 322 323/* 324 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 325 */ 326static void 327svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 328{ 329 int index, bit, error; 330 331 error = svm_msr_index(msr, &index, &bit); 332 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 333 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 334 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 335 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 336 "msr %#lx", __func__, bit, msr)); 337 338 if (read) 339 perm_bitmap[index] &= ~(1UL << bit); 340 341 if (write) 342 perm_bitmap[index] &= ~(2UL << bit); 343} 344 345static void 346svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 347{ 348 349 svm_msr_perm(perm_bitmap, msr, true, true); 350} 351 352static void 353svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 354{ 355 356 svm_msr_perm(perm_bitmap, msr, true, false); 357} 358 359static __inline int 360svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 361{ 362 struct vmcb_ctrl *ctrl; 363 364 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 365 366 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 367 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 368} 369 370static __inline void 371svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 372 int enabled) 373{ 374 struct vmcb_ctrl *ctrl; 375 uint32_t oldval; 376 377 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 378 379 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 380 oldval = ctrl->intercept[idx]; 381 382 if (enabled) 383 ctrl->intercept[idx] |= bitmask; 384 else 385 ctrl->intercept[idx] &= ~bitmask; 386 387 if (ctrl->intercept[idx] != oldval) { 388 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 389 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 390 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 391 } 392} 393 394static __inline void 395svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 396{ 397 398 svm_set_intercept(sc, vcpu, off, bitmask, 0); 399} 400 401static __inline void 402svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 403{ 404 405 svm_set_intercept(sc, vcpu, off, bitmask, 1); 406} 407 408static void 409vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 410 uint64_t msrpm_base_pa, uint64_t np_pml4) 411{ 412 struct vmcb_ctrl *ctrl; 413 struct vmcb_state *state; 414 uint32_t mask; 415 int n; 416 417 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 418 state = svm_get_vmcb_state(sc, vcpu); 419 420 ctrl->iopm_base_pa = iopm_base_pa; 421 ctrl->msrpm_base_pa = msrpm_base_pa; 422 423 /* Enable nested paging */ 424 ctrl->np_enable = 1; 425 ctrl->n_cr3 = np_pml4; 426 427 /* 428 * Intercept accesses to the control registers that are not shadowed 429 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 430 */ 431 for (n = 0; n < 16; n++) { 432 mask = (BIT(n) << 16) | BIT(n); 433 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 434 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 435 else 436 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 437 } 438 439 440 /* 441 * Intercept everything when tracing guest exceptions otherwise 442 * just intercept machine check exception. 443 */ 444 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 445 for (n = 0; n < 32; n++) { 446 /* 447 * Skip unimplemented vectors in the exception bitmap. 448 */ 449 if (n == 2 || n == 9) { 450 continue; 451 } 452 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 453 } 454 } else { 455 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 456 } 457 458 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 459 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 467 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 468 VMCB_INTCPT_FERR_FREEZE); 469 470 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 471 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 472 473 /* 474 * From section "Canonicalization and Consistency Checks" in APMv2 475 * the VMRUN intercept bit must be set to pass the consistency check. 476 */ 477 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 478 479 /* 480 * The ASID will be set to a non-zero value just before VMRUN. 481 */ 482 ctrl->asid = 0; 483 484 /* 485 * Section 15.21.1, Interrupt Masking in EFLAGS 486 * Section 15.21.2, Virtualizing APIC.TPR 487 * 488 * This must be set for %rflag and %cr8 isolation of guest and host. 489 */ 490 ctrl->v_intr_masking = 1; 491 492 /* Enable Last Branch Record aka LBR for debugging */ 493 ctrl->lbr_virt_en = 1; 494 state->dbgctl = BIT(0); 495 496 /* EFER_SVM must always be set when the guest is executing */ 497 state->efer = EFER_SVM; 498 499 /* Set up the PAT to power-on state */ 500 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 501 PAT_VALUE(1, PAT_WRITE_THROUGH) | 502 PAT_VALUE(2, PAT_UNCACHED) | 503 PAT_VALUE(3, PAT_UNCACHEABLE) | 504 PAT_VALUE(4, PAT_WRITE_BACK) | 505 PAT_VALUE(5, PAT_WRITE_THROUGH) | 506 PAT_VALUE(6, PAT_UNCACHED) | 507 PAT_VALUE(7, PAT_UNCACHEABLE); 508} 509 510/* 511 * Initialize a virtual machine. 512 */ 513static void * 514svm_vminit(struct vm *vm, pmap_t pmap) 515{ 516 struct svm_softc *svm_sc; 517 struct svm_vcpu *vcpu; 518 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 519 int i; 520 521 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 522 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 523 panic("malloc of svm_softc not aligned on page boundary"); 524 525 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 526 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 527 if (svm_sc->msr_bitmap == NULL) 528 panic("contigmalloc of SVM MSR bitmap failed"); 529 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 530 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 531 if (svm_sc->iopm_bitmap == NULL) 532 panic("contigmalloc of SVM IO bitmap failed"); 533 534 svm_sc->vm = vm; 535 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 536 537 /* 538 * Intercept read and write accesses to all MSRs. 539 */ 540 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 541 542 /* 543 * Access to the following MSRs is redirected to the VMCB when the 544 * guest is executing. Therefore it is safe to allow the guest to 545 * read/write these MSRs directly without hypervisor involvement. 546 */ 547 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 548 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 549 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 550 551 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 552 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 553 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 554 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 555 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 556 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 557 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 558 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 559 560 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 561 562 /* 563 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 564 */ 565 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 566 567 /* Intercept access to all I/O ports. */ 568 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 569 570 iopm_pa = vtophys(svm_sc->iopm_bitmap); 571 msrpm_pa = vtophys(svm_sc->msr_bitmap); 572 pml4_pa = svm_sc->nptp; 573 for (i = 0; i < VM_MAXCPU; i++) { 574 vcpu = svm_get_vcpu(svm_sc, i); 575 vcpu->nextrip = ~0; 576 vcpu->lastcpu = NOCPU; 577 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 578 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 579 svm_msr_guest_init(svm_sc, i); 580 } 581 return (svm_sc); 582} 583 584/* 585 * Collateral for a generic SVM VM-exit. 586 */ 587static void 588vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 589{ 590 591 vme->exitcode = VM_EXITCODE_SVM; 592 vme->u.svm.exitcode = code; 593 vme->u.svm.exitinfo1 = info1; 594 vme->u.svm.exitinfo2 = info2; 595} 596 597static int 598svm_cpl(struct vmcb_state *state) 599{ 600 601 /* 602 * From APMv2: 603 * "Retrieve the CPL from the CPL field in the VMCB, not 604 * from any segment DPL" 605 */ 606 return (state->cpl); 607} 608 609static enum vm_cpu_mode 610svm_vcpu_mode(struct vmcb *vmcb) 611{ 612 struct vmcb_segment seg; 613 struct vmcb_state *state; 614 int error; 615 616 state = &vmcb->state; 617 618 if (state->efer & EFER_LMA) { 619 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 620 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 621 error)); 622 623 /* 624 * Section 4.8.1 for APM2, check if Code Segment has 625 * Long attribute set in descriptor. 626 */ 627 if (seg.attrib & VMCB_CS_ATTRIB_L) 628 return (CPU_MODE_64BIT); 629 else 630 return (CPU_MODE_COMPATIBILITY); 631 } else if (state->cr0 & CR0_PE) { 632 return (CPU_MODE_PROTECTED); 633 } else { 634 return (CPU_MODE_REAL); 635 } 636} 637 638static enum vm_paging_mode 639svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 640{ 641 642 if ((cr0 & CR0_PG) == 0) 643 return (PAGING_MODE_FLAT); 644 if ((cr4 & CR4_PAE) == 0) 645 return (PAGING_MODE_32); 646 if (efer & EFER_LME) 647 return (PAGING_MODE_64); 648 else 649 return (PAGING_MODE_PAE); 650} 651 652/* 653 * ins/outs utility routines 654 */ 655static uint64_t 656svm_inout_str_index(struct svm_regctx *regs, int in) 657{ 658 uint64_t val; 659 660 val = in ? regs->sctx_rdi : regs->sctx_rsi; 661 662 return (val); 663} 664 665static uint64_t 666svm_inout_str_count(struct svm_regctx *regs, int rep) 667{ 668 uint64_t val; 669 670 val = rep ? regs->sctx_rcx : 1; 671 672 return (val); 673} 674 675static void 676svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 677 int in, struct vm_inout_str *vis) 678{ 679 int error, s; 680 681 if (in) { 682 vis->seg_name = VM_REG_GUEST_ES; 683 } else { 684 /* The segment field has standard encoding */ 685 s = (info1 >> 10) & 0x7; 686 vis->seg_name = vm_segment_name(s); 687 } 688 689 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 690 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 691} 692 693static int 694svm_inout_str_addrsize(uint64_t info1) 695{ 696 uint32_t size; 697 698 size = (info1 >> 7) & 0x7; 699 switch (size) { 700 case 1: 701 return (2); /* 16 bit */ 702 case 2: 703 return (4); /* 32 bit */ 704 case 4: 705 return (8); /* 64 bit */ 706 default: 707 panic("%s: invalid size encoding %d", __func__, size); 708 } 709} 710 711static void 712svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 713{ 714 struct vmcb_state *state; 715 716 state = &vmcb->state; 717 paging->cr3 = state->cr3; 718 paging->cpl = svm_cpl(state); 719 paging->cpu_mode = svm_vcpu_mode(vmcb); 720 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 721 state->efer); 722} 723 724#define UNHANDLED 0 725 726/* 727 * Handle guest I/O intercept. 728 */ 729static int 730svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 731{ 732 struct vmcb_ctrl *ctrl; 733 struct vmcb_state *state; 734 struct svm_regctx *regs; 735 struct vm_inout_str *vis; 736 uint64_t info1; 737 int inout_string; 738 739 state = svm_get_vmcb_state(svm_sc, vcpu); 740 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 741 regs = svm_get_guest_regctx(svm_sc, vcpu); 742 743 info1 = ctrl->exitinfo1; 744 inout_string = info1 & BIT(2) ? 1 : 0; 745 746 /* 747 * The effective segment number in EXITINFO1[12:10] is populated 748 * only if the processor has the DecodeAssist capability. 749 * 750 * XXX this is not specified explicitly in APMv2 but can be verified 751 * empirically. 752 */ 753 if (inout_string && !decode_assist()) 754 return (UNHANDLED); 755 756 vmexit->exitcode = VM_EXITCODE_INOUT; 757 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 758 vmexit->u.inout.string = inout_string; 759 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 760 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 761 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 762 vmexit->u.inout.eax = (uint32_t)(state->rax); 763 764 if (inout_string) { 765 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 766 vis = &vmexit->u.inout_str; 767 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 768 vis->rflags = state->rflags; 769 vis->cr0 = state->cr0; 770 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 771 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 772 vis->addrsize = svm_inout_str_addrsize(info1); 773 svm_inout_str_seginfo(svm_sc, vcpu, info1, 774 vmexit->u.inout.in, vis); 775 } 776 777 return (UNHANDLED); 778} 779 780static int 781npf_fault_type(uint64_t exitinfo1) 782{ 783 784 if (exitinfo1 & VMCB_NPF_INFO1_W) 785 return (VM_PROT_WRITE); 786 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 787 return (VM_PROT_EXECUTE); 788 else 789 return (VM_PROT_READ); 790} 791 792static bool 793svm_npf_emul_fault(uint64_t exitinfo1) 794{ 795 796 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 797 return (false); 798 } 799 800 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 801 return (false); 802 } 803 804 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 805 return (false); 806 } 807 808 return (true); 809} 810 811static void 812svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 813{ 814 struct vm_guest_paging *paging; 815 struct vmcb_segment seg; 816 struct vmcb_ctrl *ctrl; 817 char *inst_bytes; 818 int error, inst_len; 819 820 ctrl = &vmcb->ctrl; 821 paging = &vmexit->u.inst_emul.paging; 822 823 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 824 vmexit->u.inst_emul.gpa = gpa; 825 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 826 svm_paging_info(vmcb, paging); 827 828 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 829 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 830 831 switch(paging->cpu_mode) { 832 case CPU_MODE_REAL: 833 vmexit->u.inst_emul.cs_base = seg.base; 834 vmexit->u.inst_emul.cs_d = 0; 835 break; 836 case CPU_MODE_PROTECTED: 837 case CPU_MODE_COMPATIBILITY: 838 vmexit->u.inst_emul.cs_base = seg.base; 839 840 /* 841 * Section 4.8.1 of APM2, Default Operand Size or D bit. 842 */ 843 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 844 1 : 0; 845 break; 846 default: 847 vmexit->u.inst_emul.cs_base = 0; 848 vmexit->u.inst_emul.cs_d = 0; 849 break; 850 } 851 852 /* 853 * Copy the instruction bytes into 'vie' if available. 854 */ 855 if (decode_assist() && !disable_npf_assist) { 856 inst_len = ctrl->inst_len; 857 inst_bytes = ctrl->inst_bytes; 858 } else { 859 inst_len = 0; 860 inst_bytes = NULL; 861 } 862 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 863} 864 865#ifdef KTR 866static const char * 867intrtype_to_str(int intr_type) 868{ 869 switch (intr_type) { 870 case VMCB_EVENTINJ_TYPE_INTR: 871 return ("hwintr"); 872 case VMCB_EVENTINJ_TYPE_NMI: 873 return ("nmi"); 874 case VMCB_EVENTINJ_TYPE_INTn: 875 return ("swintr"); 876 case VMCB_EVENTINJ_TYPE_EXCEPTION: 877 return ("exception"); 878 default: 879 panic("%s: unknown intr_type %d", __func__, intr_type); 880 } 881} 882#endif 883 884/* 885 * Inject an event to vcpu as described in section 15.20, "Event injection". 886 */ 887static void 888svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 889 uint32_t error, bool ec_valid) 890{ 891 struct vmcb_ctrl *ctrl; 892 893 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 894 895 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 896 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 897 898 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 899 __func__, vector)); 900 901 switch (intr_type) { 902 case VMCB_EVENTINJ_TYPE_INTR: 903 case VMCB_EVENTINJ_TYPE_NMI: 904 case VMCB_EVENTINJ_TYPE_INTn: 905 break; 906 case VMCB_EVENTINJ_TYPE_EXCEPTION: 907 if (vector >= 0 && vector <= 31 && vector != 2) 908 break; 909 /* FALLTHROUGH */ 910 default: 911 panic("%s: invalid intr_type/vector: %d/%d", __func__, 912 intr_type, vector); 913 } 914 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 915 if (ec_valid) { 916 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 917 ctrl->eventinj |= (uint64_t)error << 32; 918 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 919 intrtype_to_str(intr_type), vector, error); 920 } else { 921 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 922 intrtype_to_str(intr_type), vector); 923 } 924} 925 926static void 927svm_update_virqinfo(struct svm_softc *sc, int vcpu) 928{ 929 struct vm *vm; 930 struct vlapic *vlapic; 931 struct vmcb_ctrl *ctrl; 932 933 vm = sc->vm; 934 vlapic = vm_lapic(vm, vcpu); 935 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 936 937 /* Update %cr8 in the emulated vlapic */ 938 vlapic_set_cr8(vlapic, ctrl->v_tpr); 939 940 /* Virtual interrupt injection is not used. */ 941 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 942 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 943} 944 945static void 946svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 947{ 948 struct vmcb_ctrl *ctrl; 949 uint64_t intinfo; 950 951 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 952 intinfo = ctrl->exitintinfo; 953 if (!VMCB_EXITINTINFO_VALID(intinfo)) 954 return; 955 956 /* 957 * From APMv2, Section "Intercepts during IDT interrupt delivery" 958 * 959 * If a #VMEXIT happened during event delivery then record the event 960 * that was being delivered. 961 */ 962 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 963 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 964 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 965 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 966} 967 968#ifdef INVARIANTS 969static __inline int 970vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 971{ 972 973 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 974 VMCB_INTCPT_VINTR)); 975} 976#endif 977 978static __inline void 979enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 980{ 981 struct vmcb_ctrl *ctrl; 982 983 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 984 985 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 986 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 987 KASSERT(vintr_intercept_enabled(sc, vcpu), 988 ("%s: vintr intercept should be enabled", __func__)); 989 return; 990 } 991 992 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 993 ctrl->v_irq = 1; 994 ctrl->v_ign_tpr = 1; 995 ctrl->v_intr_vector = 0; 996 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 997 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 998} 999 1000static __inline void 1001disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1002{ 1003 struct vmcb_ctrl *ctrl; 1004 1005 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1006 1007 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1008 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1009 ("%s: vintr intercept should be disabled", __func__)); 1010 return; 1011 } 1012 1013 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1014 ctrl->v_irq = 0; 1015 ctrl->v_intr_vector = 0; 1016 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1017 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1018} 1019 1020static int 1021svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1022{ 1023 struct vmcb_ctrl *ctrl; 1024 int oldval, newval; 1025 1026 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1027 oldval = ctrl->intr_shadow; 1028 newval = val ? 1 : 0; 1029 if (newval != oldval) { 1030 ctrl->intr_shadow = newval; 1031 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1032 } 1033 return (0); 1034} 1035 1036static int 1037svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1038{ 1039 struct vmcb_ctrl *ctrl; 1040 1041 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1042 *val = ctrl->intr_shadow; 1043 return (0); 1044} 1045 1046/* 1047 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1048 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1049 * to track when the vcpu is done handling the NMI. 1050 */ 1051static int 1052nmi_blocked(struct svm_softc *sc, int vcpu) 1053{ 1054 int blocked; 1055 1056 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1057 VMCB_INTCPT_IRET); 1058 return (blocked); 1059} 1060 1061static void 1062enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1063{ 1064 1065 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1066 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1067 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1068} 1069 1070static void 1071clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1072{ 1073 int error; 1074 1075 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1076 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1077 /* 1078 * When the IRET intercept is cleared the vcpu will attempt to execute 1079 * the "iret" when it runs next. However, it is possible to inject 1080 * another NMI into the vcpu before the "iret" has actually executed. 1081 * 1082 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1083 * it will trap back into the hypervisor. If an NMI is pending for 1084 * the vcpu it will be injected into the guest. 1085 * 1086 * XXX this needs to be fixed 1087 */ 1088 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1089 1090 /* 1091 * Set 'intr_shadow' to prevent an NMI from being injected on the 1092 * immediate VMRUN. 1093 */ 1094 error = svm_modify_intr_shadow(sc, vcpu, 1); 1095 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1096} 1097 1098#define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1099 1100static int 1101svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1102{ 1103 struct vm_exit *vme; 1104 struct vmcb_state *state; 1105 uint64_t changed, lma, oldval; 1106 int error; 1107 1108 state = svm_get_vmcb_state(sc, vcpu); 1109 1110 oldval = state->efer; 1111 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1112 1113 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1114 changed = oldval ^ newval; 1115 1116 if (newval & EFER_MBZ_BITS) 1117 goto gpf; 1118 1119 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1120 if (changed & EFER_LME) { 1121 if (state->cr0 & CR0_PG) 1122 goto gpf; 1123 } 1124 1125 /* EFER.LMA = EFER.LME & CR0.PG */ 1126 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1127 lma = EFER_LMA; 1128 else 1129 lma = 0; 1130 1131 if ((newval & EFER_LMA) != lma) 1132 goto gpf; 1133 1134 if (newval & EFER_NXE) { 1135 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1136 goto gpf; 1137 } 1138 1139 /* 1140 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1141 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1142 */ 1143 if (newval & EFER_LMSLE) { 1144 vme = vm_exitinfo(sc->vm, vcpu); 1145 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1146 *retu = true; 1147 return (0); 1148 } 1149 1150 if (newval & EFER_FFXSR) { 1151 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1152 goto gpf; 1153 } 1154 1155 if (newval & EFER_TCE) { 1156 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1157 goto gpf; 1158 } 1159 1160 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1161 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1162 return (0); 1163gpf: 1164 vm_inject_gp(sc->vm, vcpu); 1165 return (0); 1166} 1167 1168static int 1169emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1170 bool *retu) 1171{ 1172 int error; 1173 1174 if (lapic_msr(num)) 1175 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1176 else if (num == MSR_EFER) 1177 error = svm_write_efer(sc, vcpu, val, retu); 1178 else 1179 error = svm_wrmsr(sc, vcpu, num, val, retu); 1180 1181 return (error); 1182} 1183 1184static int 1185emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1186{ 1187 struct vmcb_state *state; 1188 struct svm_regctx *ctx; 1189 uint64_t result; 1190 int error; 1191 1192 if (lapic_msr(num)) 1193 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1194 else 1195 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1196 1197 if (error == 0) { 1198 state = svm_get_vmcb_state(sc, vcpu); 1199 ctx = svm_get_guest_regctx(sc, vcpu); 1200 state->rax = result & 0xffffffff; 1201 ctx->sctx_rdx = result >> 32; 1202 } 1203 1204 return (error); 1205} 1206 1207#ifdef KTR 1208static const char * 1209exit_reason_to_str(uint64_t reason) 1210{ 1211 static char reasonbuf[32]; 1212 1213 switch (reason) { 1214 case VMCB_EXIT_INVALID: 1215 return ("invalvmcb"); 1216 case VMCB_EXIT_SHUTDOWN: 1217 return ("shutdown"); 1218 case VMCB_EXIT_NPF: 1219 return ("nptfault"); 1220 case VMCB_EXIT_PAUSE: 1221 return ("pause"); 1222 case VMCB_EXIT_HLT: 1223 return ("hlt"); 1224 case VMCB_EXIT_CPUID: 1225 return ("cpuid"); 1226 case VMCB_EXIT_IO: 1227 return ("inout"); 1228 case VMCB_EXIT_MC: 1229 return ("mchk"); 1230 case VMCB_EXIT_INTR: 1231 return ("extintr"); 1232 case VMCB_EXIT_NMI: 1233 return ("nmi"); 1234 case VMCB_EXIT_VINTR: 1235 return ("vintr"); 1236 case VMCB_EXIT_MSR: 1237 return ("msr"); 1238 case VMCB_EXIT_IRET: 1239 return ("iret"); 1240 case VMCB_EXIT_MONITOR: 1241 return ("monitor"); 1242 case VMCB_EXIT_MWAIT: 1243 return ("mwait"); 1244 default: 1245 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1246 return (reasonbuf); 1247 } 1248} 1249#endif /* KTR */ 1250 1251/* 1252 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1253 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1254 * and exceptions caused by INT3, INTO and BOUND instructions. 1255 * 1256 * Return 1 if the nRIP is valid and 0 otherwise. 1257 */ 1258static int 1259nrip_valid(uint64_t exitcode) 1260{ 1261 switch (exitcode) { 1262 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1263 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1264 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1265 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1266 case 0x43: /* INT3 */ 1267 case 0x44: /* INTO */ 1268 case 0x45: /* BOUND */ 1269 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1270 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1271 return (1); 1272 default: 1273 return (0); 1274 } 1275} 1276 1277static int 1278svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1279{ 1280 struct vmcb *vmcb; 1281 struct vmcb_state *state; 1282 struct vmcb_ctrl *ctrl; 1283 struct svm_regctx *ctx; 1284 uint64_t code, info1, info2, val; 1285 uint32_t eax, ecx, edx; 1286 int error, errcode_valid, handled, idtvec, reflect; 1287 bool retu; 1288 1289 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1290 vmcb = svm_get_vmcb(svm_sc, vcpu); 1291 state = &vmcb->state; 1292 ctrl = &vmcb->ctrl; 1293 1294 handled = 0; 1295 code = ctrl->exitcode; 1296 info1 = ctrl->exitinfo1; 1297 info2 = ctrl->exitinfo2; 1298 1299 vmexit->exitcode = VM_EXITCODE_BOGUS; 1300 vmexit->rip = state->rip; 1301 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1302 1303 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1304 1305 /* 1306 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1307 * in an inconsistent state and can trigger assertions that would 1308 * never happen otherwise. 1309 */ 1310 if (code == VMCB_EXIT_INVALID) { 1311 vm_exit_svm(vmexit, code, info1, info2); 1312 return (0); 1313 } 1314 1315 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1316 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1317 1318 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1319 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1320 vmexit->inst_length, code, info1, info2)); 1321 1322 svm_update_virqinfo(svm_sc, vcpu); 1323 svm_save_intinfo(svm_sc, vcpu); 1324 1325 switch (code) { 1326 case VMCB_EXIT_IRET: 1327 /* 1328 * Restart execution at "iret" but with the intercept cleared. 1329 */ 1330 vmexit->inst_length = 0; 1331 clear_nmi_blocking(svm_sc, vcpu); 1332 handled = 1; 1333 break; 1334 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1335 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1336 handled = 1; 1337 break; 1338 case VMCB_EXIT_INTR: /* external interrupt */ 1339 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1340 handled = 1; 1341 break; 1342 case VMCB_EXIT_NMI: /* external NMI */ 1343 handled = 1; 1344 break; 1345 case 0x40 ... 0x5F: 1346 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1347 reflect = 1; 1348 idtvec = code - 0x40; 1349 switch (idtvec) { 1350 case IDT_MC: 1351 /* 1352 * Call the machine check handler by hand. Also don't 1353 * reflect the machine check back into the guest. 1354 */ 1355 reflect = 0; 1356 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1357 __asm __volatile("int $18"); 1358 break; 1359 case IDT_PF: 1360 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1361 info2); 1362 KASSERT(error == 0, ("%s: error %d updating cr2", 1363 __func__, error)); 1364 /* fallthru */ 1365 case IDT_NP: 1366 case IDT_SS: 1367 case IDT_GP: 1368 case IDT_AC: 1369 case IDT_TS: 1370 errcode_valid = 1; 1371 break; 1372 1373 case IDT_DF: 1374 errcode_valid = 1; 1375 info1 = 0; 1376 break; 1377 1378 case IDT_BP: 1379 case IDT_OF: 1380 case IDT_BR: 1381 /* 1382 * The 'nrip' field is populated for INT3, INTO and 1383 * BOUND exceptions and this also implies that 1384 * 'inst_length' is non-zero. 1385 * 1386 * Reset 'inst_length' to zero so the guest %rip at 1387 * event injection is identical to what it was when 1388 * the exception originally happened. 1389 */ 1390 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1391 "to zero before injecting exception %d", 1392 vmexit->inst_length, idtvec); 1393 vmexit->inst_length = 0; 1394 /* fallthru */ 1395 default: 1396 errcode_valid = 0; 1397 info1 = 0; 1398 break; 1399 } 1400 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1401 "when reflecting exception %d into guest", 1402 vmexit->inst_length, idtvec)); 1403 1404 if (reflect) { 1405 /* Reflect the exception back into the guest */ 1406 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1407 "%d/%#x into the guest", idtvec, (int)info1); 1408 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1409 errcode_valid, info1, 0); 1410 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1411 __func__, error)); 1412 } 1413 handled = 1; 1414 break; 1415 case VMCB_EXIT_MSR: /* MSR access. */ 1416 eax = state->rax; 1417 ecx = ctx->sctx_rcx; 1418 edx = ctx->sctx_rdx; 1419 retu = false; 1420 1421 if (info1) { 1422 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1423 val = (uint64_t)edx << 32 | eax; 1424 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1425 ecx, val); 1426 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1427 vmexit->exitcode = VM_EXITCODE_WRMSR; 1428 vmexit->u.msr.code = ecx; 1429 vmexit->u.msr.wval = val; 1430 } else if (!retu) { 1431 handled = 1; 1432 } else { 1433 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1434 ("emulate_wrmsr retu with bogus exitcode")); 1435 } 1436 } else { 1437 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1438 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1439 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1440 vmexit->exitcode = VM_EXITCODE_RDMSR; 1441 vmexit->u.msr.code = ecx; 1442 } else if (!retu) { 1443 handled = 1; 1444 } else { 1445 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1446 ("emulate_rdmsr retu with bogus exitcode")); 1447 } 1448 } 1449 break; 1450 case VMCB_EXIT_IO: 1451 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1452 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1453 break; 1454 case VMCB_EXIT_CPUID: 1455 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1456 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1457 (uint32_t *)&state->rax, 1458 (uint32_t *)&ctx->sctx_rbx, 1459 (uint32_t *)&ctx->sctx_rcx, 1460 (uint32_t *)&ctx->sctx_rdx); 1461 break; 1462 case VMCB_EXIT_HLT: 1463 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1464 vmexit->exitcode = VM_EXITCODE_HLT; 1465 vmexit->u.hlt.rflags = state->rflags; 1466 break; 1467 case VMCB_EXIT_PAUSE: 1468 vmexit->exitcode = VM_EXITCODE_PAUSE; 1469 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1470 break; 1471 case VMCB_EXIT_NPF: 1472 /* EXITINFO2 contains the faulting guest physical address */ 1473 if (info1 & VMCB_NPF_INFO1_RSV) { 1474 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1475 "reserved bits set: info1(%#lx) info2(%#lx)", 1476 info1, info2); 1477 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1478 vmexit->exitcode = VM_EXITCODE_PAGING; 1479 vmexit->u.paging.gpa = info2; 1480 vmexit->u.paging.fault_type = npf_fault_type(info1); 1481 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1482 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1483 "on gpa %#lx/%#lx at rip %#lx", 1484 info2, info1, state->rip); 1485 } else if (svm_npf_emul_fault(info1)) { 1486 svm_handle_inst_emul(vmcb, info2, vmexit); 1487 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1488 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1489 "for gpa %#lx/%#lx at rip %#lx", 1490 info2, info1, state->rip); 1491 } 1492 break; 1493 case VMCB_EXIT_MONITOR: 1494 vmexit->exitcode = VM_EXITCODE_MONITOR; 1495 break; 1496 case VMCB_EXIT_MWAIT: 1497 vmexit->exitcode = VM_EXITCODE_MWAIT; 1498 break; 1499 default: 1500 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1501 break; 1502 } 1503 1504 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1505 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1506 vmexit->rip, vmexit->inst_length); 1507 1508 if (handled) { 1509 vmexit->rip += vmexit->inst_length; 1510 vmexit->inst_length = 0; 1511 state->rip = vmexit->rip; 1512 } else { 1513 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1514 /* 1515 * If this VM exit was not claimed by anybody then 1516 * treat it as a generic SVM exit. 1517 */ 1518 vm_exit_svm(vmexit, code, info1, info2); 1519 } else { 1520 /* 1521 * The exitcode and collateral have been populated. 1522 * The VM exit will be processed further in userland. 1523 */ 1524 } 1525 } 1526 return (handled); 1527} 1528 1529static void 1530svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1531{ 1532 uint64_t intinfo; 1533 1534 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1535 return; 1536 1537 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1538 "valid: %#lx", __func__, intinfo)); 1539 1540 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1541 VMCB_EXITINTINFO_VECTOR(intinfo), 1542 VMCB_EXITINTINFO_EC(intinfo), 1543 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1544 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1545 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1546} 1547 1548/* 1549 * Inject event to virtual cpu. 1550 */ 1551static void 1552svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1553{ 1554 struct vmcb_ctrl *ctrl; 1555 struct vmcb_state *state; 1556 struct svm_vcpu *vcpustate; 1557 uint8_t v_tpr; 1558 int vector, need_intr_window; 1559 int extint_pending; 1560 1561 state = svm_get_vmcb_state(sc, vcpu); 1562 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1563 vcpustate = svm_get_vcpu(sc, vcpu); 1564 1565 need_intr_window = 0; 1566 1567 if (vcpustate->nextrip != state->rip) { 1568 ctrl->intr_shadow = 0; 1569 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1570 "cleared due to rip change: %#lx/%#lx", 1571 vcpustate->nextrip, state->rip); 1572 } 1573 1574 /* 1575 * Inject pending events or exceptions for this vcpu. 1576 * 1577 * An event might be pending because the previous #VMEXIT happened 1578 * during event delivery (i.e. ctrl->exitintinfo). 1579 * 1580 * An event might also be pending because an exception was injected 1581 * by the hypervisor (e.g. #PF during instruction emulation). 1582 */ 1583 svm_inj_intinfo(sc, vcpu); 1584 1585 /* NMI event has priority over interrupts. */ 1586 if (vm_nmi_pending(sc->vm, vcpu)) { 1587 if (nmi_blocked(sc, vcpu)) { 1588 /* 1589 * Can't inject another NMI if the guest has not 1590 * yet executed an "iret" after the last NMI. 1591 */ 1592 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1593 "to NMI-blocking"); 1594 } else if (ctrl->intr_shadow) { 1595 /* 1596 * Can't inject an NMI if the vcpu is in an intr_shadow. 1597 */ 1598 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1599 "interrupt shadow"); 1600 need_intr_window = 1; 1601 goto done; 1602 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1603 /* 1604 * If there is already an exception/interrupt pending 1605 * then defer the NMI until after that. 1606 */ 1607 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1608 "eventinj %#lx", ctrl->eventinj); 1609 1610 /* 1611 * Use self-IPI to trigger a VM-exit as soon as 1612 * possible after the event injection is completed. 1613 * 1614 * This works only if the external interrupt exiting 1615 * is at a lower priority than the event injection. 1616 * 1617 * Although not explicitly specified in APMv2 the 1618 * relative priorities were verified empirically. 1619 */ 1620 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1621 } else { 1622 vm_nmi_clear(sc->vm, vcpu); 1623 1624 /* Inject NMI, vector number is not used */ 1625 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1626 IDT_NMI, 0, false); 1627 1628 /* virtual NMI blocking is now in effect */ 1629 enable_nmi_blocking(sc, vcpu); 1630 1631 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1632 } 1633 } 1634 1635 extint_pending = vm_extint_pending(sc->vm, vcpu); 1636 if (!extint_pending) { 1637 if (!vlapic_pending_intr(vlapic, &vector)) 1638 goto done; 1639 KASSERT(vector >= 16 && vector <= 255, 1640 ("invalid vector %d from local APIC", vector)); 1641 } else { 1642 /* Ask the legacy pic for a vector to inject */ 1643 vatpic_pending_intr(sc->vm, &vector); 1644 KASSERT(vector >= 0 && vector <= 255, 1645 ("invalid vector %d from INTR", vector)); 1646 } 1647 1648 /* 1649 * If the guest has disabled interrupts or is in an interrupt shadow 1650 * then we cannot inject the pending interrupt. 1651 */ 1652 if ((state->rflags & PSL_I) == 0) { 1653 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1654 "rflags %#lx", vector, state->rflags); 1655 need_intr_window = 1; 1656 goto done; 1657 } 1658 1659 if (ctrl->intr_shadow) { 1660 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1661 "interrupt shadow", vector); 1662 need_intr_window = 1; 1663 goto done; 1664 } 1665 1666 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1667 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1668 "eventinj %#lx", vector, ctrl->eventinj); 1669 need_intr_window = 1; 1670 goto done; 1671 } 1672 1673 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1674 1675 if (!extint_pending) { 1676 vlapic_intr_accepted(vlapic, vector); 1677 } else { 1678 vm_extint_clear(sc->vm, vcpu); 1679 vatpic_intr_accepted(sc->vm, vector); 1680 } 1681 1682 /* 1683 * Force a VM-exit as soon as the vcpu is ready to accept another 1684 * interrupt. This is done because the PIC might have another vector 1685 * that it wants to inject. Also, if the APIC has a pending interrupt 1686 * that was preempted by the ExtInt then it allows us to inject the 1687 * APIC vector as soon as possible. 1688 */ 1689 need_intr_window = 1; 1690done: 1691 /* 1692 * The guest can modify the TPR by writing to %CR8. In guest mode 1693 * the processor reflects this write to V_TPR without hypervisor 1694 * intervention. 1695 * 1696 * The guest can also modify the TPR by writing to it via the memory 1697 * mapped APIC page. In this case, the write will be emulated by the 1698 * hypervisor. For this reason V_TPR must be updated before every 1699 * VMRUN. 1700 */ 1701 v_tpr = vlapic_get_cr8(vlapic); 1702 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1703 if (ctrl->v_tpr != v_tpr) { 1704 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1705 ctrl->v_tpr, v_tpr); 1706 ctrl->v_tpr = v_tpr; 1707 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1708 } 1709 1710 if (need_intr_window) { 1711 /* 1712 * We use V_IRQ in conjunction with the VINTR intercept to 1713 * trap into the hypervisor as soon as a virtual interrupt 1714 * can be delivered. 1715 * 1716 * Since injected events are not subject to intercept checks 1717 * we need to ensure that the V_IRQ is not actually going to 1718 * be delivered on VM entry. The KASSERT below enforces this. 1719 */ 1720 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1721 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1722 ("Bogus intr_window_exiting: eventinj (%#lx), " 1723 "intr_shadow (%u), rflags (%#lx)", 1724 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1725 enable_intr_window_exiting(sc, vcpu); 1726 } else { 1727 disable_intr_window_exiting(sc, vcpu); 1728 } 1729} 1730 1731static __inline void 1732restore_host_tss(void) 1733{ 1734 struct system_segment_descriptor *tss_sd; 1735 1736 /* 1737 * The TSS descriptor was in use prior to launching the guest so it 1738 * has been marked busy. 1739 * 1740 * 'ltr' requires the descriptor to be marked available so change the 1741 * type to "64-bit available TSS". 1742 */ 1743 tss_sd = PCPU_GET(tss); 1744 tss_sd->sd_type = SDT_SYSTSS; 1745 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1746} 1747 1748static void 1749check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1750{ 1751 struct svm_vcpu *vcpustate; 1752 struct vmcb_ctrl *ctrl; 1753 long eptgen; 1754 bool alloc_asid; 1755 1756 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1757 "active on cpu %u", __func__, thiscpu)); 1758 1759 vcpustate = svm_get_vcpu(sc, vcpuid); 1760 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1761 1762 /* 1763 * The TLB entries associated with the vcpu's ASID are not valid 1764 * if either of the following conditions is true: 1765 * 1766 * 1. The vcpu's ASID generation is different than the host cpu's 1767 * ASID generation. This happens when the vcpu migrates to a new 1768 * host cpu. It can also happen when the number of vcpus executing 1769 * on a host cpu is greater than the number of ASIDs available. 1770 * 1771 * 2. The pmap generation number is different than the value cached in 1772 * the 'vcpustate'. This happens when the host invalidates pages 1773 * belonging to the guest. 1774 * 1775 * asidgen eptgen Action 1776 * mismatch mismatch 1777 * 0 0 (a) 1778 * 0 1 (b1) or (b2) 1779 * 1 0 (c) 1780 * 1 1 (d) 1781 * 1782 * (a) There is no mismatch in eptgen or ASID generation and therefore 1783 * no further action is needed. 1784 * 1785 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1786 * retained and the TLB entries associated with this ASID 1787 * are flushed by VMRUN. 1788 * 1789 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1790 * allocated. 1791 * 1792 * (c) A new ASID is allocated. 1793 * 1794 * (d) A new ASID is allocated. 1795 */ 1796 1797 alloc_asid = false; 1798 eptgen = pmap->pm_eptgen; 1799 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1800 1801 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1802 alloc_asid = true; /* (c) and (d) */ 1803 } else if (vcpustate->eptgen != eptgen) { 1804 if (flush_by_asid()) 1805 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1806 else 1807 alloc_asid = true; /* (b2) */ 1808 } else { 1809 /* 1810 * This is the common case (a). 1811 */ 1812 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1813 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1814 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1815 } 1816 1817 if (alloc_asid) { 1818 if (++asid[thiscpu].num >= nasid) { 1819 asid[thiscpu].num = 1; 1820 if (++asid[thiscpu].gen == 0) 1821 asid[thiscpu].gen = 1; 1822 /* 1823 * If this cpu does not support "flush-by-asid" 1824 * then flush the entire TLB on a generation 1825 * bump. Subsequent ASID allocation in this 1826 * generation can be done without a TLB flush. 1827 */ 1828 if (!flush_by_asid()) 1829 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1830 } 1831 vcpustate->asid.gen = asid[thiscpu].gen; 1832 vcpustate->asid.num = asid[thiscpu].num; 1833 1834 ctrl->asid = vcpustate->asid.num; 1835 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1836 /* 1837 * If this cpu supports "flush-by-asid" then the TLB 1838 * was not flushed after the generation bump. The TLB 1839 * is flushed selectively after every new ASID allocation. 1840 */ 1841 if (flush_by_asid()) 1842 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1843 } 1844 vcpustate->eptgen = eptgen; 1845 1846 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1847 KASSERT(ctrl->asid == vcpustate->asid.num, 1848 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1849} 1850 1851static __inline void 1852disable_gintr(void) 1853{ 1854 1855 __asm __volatile("clgi"); 1856} 1857 1858static __inline void 1859enable_gintr(void) 1860{ 1861 1862 __asm __volatile("stgi"); 1863} 1864 1865/* 1866 * Start vcpu with specified RIP. 1867 */ 1868static int 1869svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1870 struct vm_eventinfo *evinfo) 1871{ 1872 struct svm_regctx *gctx; 1873 struct svm_softc *svm_sc; 1874 struct svm_vcpu *vcpustate; 1875 struct vmcb_state *state; 1876 struct vmcb_ctrl *ctrl; 1877 struct vm_exit *vmexit; 1878 struct vlapic *vlapic; 1879 struct vm *vm; 1880 uint64_t vmcb_pa; 1881 int handled; 1882 1883 svm_sc = arg; 1884 vm = svm_sc->vm; 1885 1886 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1887 state = svm_get_vmcb_state(svm_sc, vcpu); 1888 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1889 vmexit = vm_exitinfo(vm, vcpu); 1890 vlapic = vm_lapic(vm, vcpu); 1891 1892 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1893 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1894 1895 if (vcpustate->lastcpu != curcpu) { 1896 /* 1897 * Force new ASID allocation by invalidating the generation. 1898 */ 1899 vcpustate->asid.gen = 0; 1900 1901 /* 1902 * Invalidate the VMCB state cache by marking all fields dirty. 1903 */ 1904 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1905 1906 /* 1907 * XXX 1908 * Setting 'vcpustate->lastcpu' here is bit premature because 1909 * we may return from this function without actually executing 1910 * the VMRUN instruction. This could happen if a rendezvous 1911 * or an AST is pending on the first time through the loop. 1912 * 1913 * This works for now but any new side-effects of vcpu 1914 * migration should take this case into account. 1915 */ 1916 vcpustate->lastcpu = curcpu; 1917 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1918 } 1919 1920 svm_msr_guest_enter(svm_sc, vcpu); 1921 1922 /* Update Guest RIP */ 1923 state->rip = rip; 1924 1925 do { 1926 /* 1927 * Disable global interrupts to guarantee atomicity during 1928 * loading of guest state. This includes not only the state 1929 * loaded by the "vmrun" instruction but also software state 1930 * maintained by the hypervisor: suspended and rendezvous 1931 * state, NPT generation number, vlapic interrupts etc. 1932 */ 1933 disable_gintr(); 1934 1935 if (vcpu_suspended(evinfo)) { 1936 enable_gintr(); 1937 vm_exit_suspended(vm, vcpu, state->rip); 1938 break; 1939 } 1940 1941 if (vcpu_rendezvous_pending(evinfo)) { 1942 enable_gintr(); 1943 vm_exit_rendezvous(vm, vcpu, state->rip); 1944 break; 1945 } 1946 1947 if (vcpu_reqidle(evinfo)) { 1948 enable_gintr(); 1949 vm_exit_reqidle(vm, vcpu, state->rip); 1950 break; 1951 } 1952 1953 /* We are asked to give the cpu by scheduler. */ 1954 if (vcpu_should_yield(vm, vcpu)) { 1955 enable_gintr(); 1956 vm_exit_astpending(vm, vcpu, state->rip); 1957 break; 1958 } 1959 1960 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1961 1962 /* Activate the nested pmap on 'curcpu' */ 1963 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); 1964 1965 /* 1966 * Check the pmap generation and the ASID generation to 1967 * ensure that the vcpu does not use stale TLB mappings. 1968 */ 1969 check_asid(svm_sc, vcpu, pmap, curcpu); 1970 1971 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 1972 vcpustate->dirty = 0; 1973 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1974 1975 /* Launch Virtual Machine. */ 1976 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1977 svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]); 1978 1979 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 1980 1981 /* 1982 * The host GDTR and IDTR is saved by VMRUN and restored 1983 * automatically on #VMEXIT. However, the host TSS needs 1984 * to be restored explicitly. 1985 */ 1986 restore_host_tss(); 1987 1988 /* #VMEXIT disables interrupts so re-enable them here. */ 1989 enable_gintr(); 1990 1991 /* Update 'nextrip' */ 1992 vcpustate->nextrip = state->rip; 1993 1994 /* Handle #VMEXIT and if required return to user space. */ 1995 handled = svm_vmexit(svm_sc, vcpu, vmexit); 1996 } while (handled); 1997 1998 svm_msr_guest_exit(svm_sc, vcpu); 1999 2000 return (0); 2001} 2002 2003static void 2004svm_vmcleanup(void *arg) 2005{ 2006 struct svm_softc *sc = arg; 2007 2008 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2009 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2010 free(sc, M_SVM); 2011} 2012 2013static register_t * 2014swctx_regptr(struct svm_regctx *regctx, int reg) 2015{ 2016 2017 switch (reg) { 2018 case VM_REG_GUEST_RBX: 2019 return (®ctx->sctx_rbx); 2020 case VM_REG_GUEST_RCX: 2021 return (®ctx->sctx_rcx); 2022 case VM_REG_GUEST_RDX: 2023 return (®ctx->sctx_rdx); 2024 case VM_REG_GUEST_RDI: 2025 return (®ctx->sctx_rdi); 2026 case VM_REG_GUEST_RSI: 2027 return (®ctx->sctx_rsi); 2028 case VM_REG_GUEST_RBP: 2029 return (®ctx->sctx_rbp); 2030 case VM_REG_GUEST_R8: 2031 return (®ctx->sctx_r8); 2032 case VM_REG_GUEST_R9: 2033 return (®ctx->sctx_r9); 2034 case VM_REG_GUEST_R10: 2035 return (®ctx->sctx_r10); 2036 case VM_REG_GUEST_R11: 2037 return (®ctx->sctx_r11); 2038 case VM_REG_GUEST_R12: 2039 return (®ctx->sctx_r12); 2040 case VM_REG_GUEST_R13: 2041 return (®ctx->sctx_r13); 2042 case VM_REG_GUEST_R14: 2043 return (®ctx->sctx_r14); 2044 case VM_REG_GUEST_R15: 2045 return (®ctx->sctx_r15); 2046 default: 2047 return (NULL); 2048 } 2049} 2050 2051static int 2052svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2053{ 2054 struct svm_softc *svm_sc; 2055 register_t *reg; 2056 2057 svm_sc = arg; 2058 2059 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2060 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2061 } 2062 2063 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2064 return (0); 2065 } 2066 2067 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2068 2069 if (reg != NULL) { 2070 *val = *reg; 2071 return (0); 2072 } 2073 2074 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2075 return (EINVAL); 2076} 2077 2078static int 2079svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2080{ 2081 struct svm_softc *svm_sc; 2082 register_t *reg; 2083 2084 svm_sc = arg; 2085 2086 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2087 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2088 } 2089 2090 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2091 return (0); 2092 } 2093 2094 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2095 2096 if (reg != NULL) { 2097 *reg = val; 2098 return (0); 2099 } 2100 2101 /* 2102 * XXX deal with CR3 and invalidate TLB entries tagged with the 2103 * vcpu's ASID. This needs to be treated differently depending on 2104 * whether 'running' is true/false. 2105 */ 2106 2107 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2108 return (EINVAL); 2109} 2110 2111static int 2112svm_setcap(void *arg, int vcpu, int type, int val) 2113{ 2114 struct svm_softc *sc; 2115 int error; 2116 2117 sc = arg; 2118 error = 0; 2119 switch (type) { 2120 case VM_CAP_HALT_EXIT: 2121 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2122 VMCB_INTCPT_HLT, val); 2123 break; 2124 case VM_CAP_PAUSE_EXIT: 2125 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2126 VMCB_INTCPT_PAUSE, val); 2127 break; 2128 case VM_CAP_UNRESTRICTED_GUEST: 2129 /* Unrestricted guest execution cannot be disabled in SVM */ 2130 if (val == 0) 2131 error = EINVAL; 2132 break; 2133 default: 2134 error = ENOENT; 2135 break; 2136 } 2137 return (error); 2138} 2139 2140static int 2141svm_getcap(void *arg, int vcpu, int type, int *retval) 2142{ 2143 struct svm_softc *sc; 2144 int error; 2145 2146 sc = arg; 2147 error = 0; 2148 2149 switch (type) { 2150 case VM_CAP_HALT_EXIT: 2151 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2152 VMCB_INTCPT_HLT); 2153 break; 2154 case VM_CAP_PAUSE_EXIT: 2155 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2156 VMCB_INTCPT_PAUSE); 2157 break; 2158 case VM_CAP_UNRESTRICTED_GUEST: 2159 *retval = 1; /* unrestricted guest is always enabled */ 2160 break; 2161 default: 2162 error = ENOENT; 2163 break; 2164 } 2165 return (error); 2166} 2167 2168static struct vlapic * 2169svm_vlapic_init(void *arg, int vcpuid) 2170{ 2171 struct svm_softc *svm_sc; 2172 struct vlapic *vlapic; 2173 2174 svm_sc = arg; 2175 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2176 vlapic->vm = svm_sc->vm; 2177 vlapic->vcpuid = vcpuid; 2178 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2179 2180 vlapic_init(vlapic); 2181 2182 return (vlapic); 2183} 2184 2185static void 2186svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2187{ 2188 2189 vlapic_cleanup(vlapic); 2190 free(vlapic, M_SVM_VLAPIC); 2191} 2192 2193struct vmm_ops vmm_ops_amd = { 2194 svm_init, 2195 svm_cleanup, 2196 svm_restore, 2197 svm_vminit, 2198 svm_vmrun, 2199 svm_vmcleanup, 2200 svm_getreg, 2201 svm_setreg, 2202 vmcb_getdesc, 2203 vmcb_setdesc, 2204 svm_getcap, 2205 svm_setcap, 2206 svm_npt_alloc, 2207 svm_npt_free, 2208 svm_vlapic_init, 2209 svm_vlapic_cleanup 2210}; 2211