svm.c revision 272926
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 272926 2014-10-11 03:09:34Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/cpufunc.h> 43#include <machine/psl.h> 44#include <machine/pmap.h> 45#include <machine/md_var.h> 46#include <machine/vmparam.h> 47#include <machine/specialreg.h> 48#include <machine/segments.h> 49#include <machine/smp.h> 50#include <machine/vmm.h> 51#include <machine/vmm_dev.h> 52#include <machine/vmm_instruction_emul.h> 53 54#include <x86/apicreg.h> 55 56#include "vmm_lapic.h" 57#include "vmm_stat.h" 58#include "vmm_ktr.h" 59#include "vmm_ioport.h" 60#include "vatpic.h" 61#include "vlapic.h" 62#include "vlapic_priv.h" 63 64#include "x86.h" 65#include "vmcb.h" 66#include "svm.h" 67#include "svm_softc.h" 68#include "svm_msr.h" 69#include "npt.h" 70 71SYSCTL_DECL(_hw_vmm); 72SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 73 74/* 75 * SVM CPUID function 0x8000_000A, edx bit decoding. 76 */ 77#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 78#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 79#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 80#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 81#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 82#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 83#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 84#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 85#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 86#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 87 88#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 89 VMCB_CACHE_IOPM | \ 90 VMCB_CACHE_I | \ 91 VMCB_CACHE_TPR | \ 92 VMCB_CACHE_CR2 | \ 93 VMCB_CACHE_CR | \ 94 VMCB_CACHE_DT | \ 95 VMCB_CACHE_SEG | \ 96 VMCB_CACHE_NP) 97 98static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 99SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 100 0, NULL); 101 102MALLOC_DEFINE(M_SVM, "svm", "svm"); 103MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 104 105/* Per-CPU context area. */ 106extern struct pcpu __pcpu[]; 107 108static uint32_t svm_feature; /* AMD SVM features. */ 109SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 110 "SVM features advertised by CPUID.8000000AH:EDX"); 111 112static int disable_npf_assist; 113SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 114 &disable_npf_assist, 0, NULL); 115 116/* Maximum ASIDs supported by the processor */ 117static uint32_t nasid; 118SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, 119 "Number of ASIDs supported by this processor"); 120 121/* Current ASID generation for each host cpu */ 122static struct asid asid[MAXCPU]; 123 124/* 125 * SVM host state saved area of size 4KB for each core. 126 */ 127static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 128 129static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 130static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 131static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 132 133static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 134 135/* 136 * Common function to enable or disabled SVM for a CPU. 137 */ 138static int 139cpu_svm_enable_disable(boolean_t enable) 140{ 141 uint64_t efer_msr; 142 143 efer_msr = rdmsr(MSR_EFER); 144 145 if (enable) 146 efer_msr |= EFER_SVM; 147 else 148 efer_msr &= ~EFER_SVM; 149 150 wrmsr(MSR_EFER, efer_msr); 151 152 return(0); 153} 154 155/* 156 * Disable SVM on a CPU. 157 */ 158static void 159svm_disable(void *arg __unused) 160{ 161 162 (void)cpu_svm_enable_disable(FALSE); 163} 164 165/* 166 * Disable SVM for all CPUs. 167 */ 168static int 169svm_cleanup(void) 170{ 171 172 smp_rendezvous(NULL, svm_disable, NULL, NULL); 173 return (0); 174} 175 176/* 177 * Verify that all the features required by bhyve are available. 178 */ 179static int 180check_svm_features(void) 181{ 182 u_int regs[4]; 183 184 /* CPUID Fn8000_000A is for SVM */ 185 do_cpuid(0x8000000A, regs); 186 svm_feature = regs[3]; 187 188 printf("SVM: Revision %d\n", regs[0] & 0xFF); 189 printf("SVM: NumASID %u\n", regs[1]); 190 191 nasid = regs[1]; 192 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 193 194 printf("SVM: Features 0x%b\n", svm_feature, 195 "\020" 196 "\001NP" /* Nested paging */ 197 "\002LbrVirt" /* LBR virtualization */ 198 "\003SVML" /* SVM lock */ 199 "\004NRIPS" /* NRIP save */ 200 "\005TscRateMsr" /* MSR based TSC rate control */ 201 "\006VmcbClean" /* VMCB clean bits */ 202 "\007FlushByAsid" /* Flush by ASID */ 203 "\010DecodeAssist" /* Decode assist */ 204 "\011<b8>" 205 "\012<b9>" 206 "\013PauseFilter" 207 "\014<b11>" 208 "\015PauseFilterThreshold" 209 "\016AVIC" 210 ); 211 212 /* bhyve requires the Nested Paging feature */ 213 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 214 printf("SVM: Nested Paging feature not available.\n"); 215 return (ENXIO); 216 } 217 218 /* bhyve requires the NRIP Save feature */ 219 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 220 printf("SVM: NRIP Save feature not available.\n"); 221 return (ENXIO); 222 } 223 224 return (0); 225} 226 227static __inline int 228flush_by_asid(void) 229{ 230 231 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 232} 233 234static __inline int 235decode_assist(void) 236{ 237 238 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 239} 240 241/* 242 * Enable SVM for a CPU. 243 */ 244static void 245svm_enable(void *arg __unused) 246{ 247 uint64_t hsave_pa; 248 249 (void)cpu_svm_enable_disable(TRUE); 250 251 hsave_pa = vtophys(hsave[curcpu]); 252 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 253 254 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 255 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 256 } 257} 258 259/* 260 * Verify that SVM is enabled and the processor has all the required features. 261 */ 262static int 263is_svm_enabled(void) 264{ 265 uint64_t msr; 266 267 /* Section 15.4 Enabling SVM from APM2. */ 268 if ((amd_feature2 & AMDID2_SVM) == 0) { 269 printf("SVM: not available.\n"); 270 return (ENXIO); 271 } 272 273 msr = rdmsr(MSR_VM_CR); 274 if ((msr & VM_CR_SVMDIS) != 0) { 275 printf("SVM: disabled by BIOS.\n"); 276 return (ENXIO); 277 } 278 279 return (check_svm_features()); 280} 281 282/* 283 * Enable SVM on CPU and initialize nested page table h/w. 284 */ 285static int 286svm_init(int ipinum) 287{ 288 int err, cpu; 289 290 err = is_svm_enabled(); 291 if (err) 292 return (err); 293 294 vmcb_clean &= VMCB_CACHE_DEFAULT; 295 296 for (cpu = 0; cpu < MAXCPU; cpu++) { 297 /* 298 * Initialize the host ASIDs to their "highest" valid values. 299 * 300 * The next ASID allocation will rollover both 'gen' and 'num' 301 * and start off the sequence at {1,1}. 302 */ 303 asid[cpu].gen = ~0UL; 304 asid[cpu].num = nasid - 1; 305 } 306 307 svm_msr_init(); 308 svm_npt_init(ipinum); 309 310 /* Start SVM on all CPUs */ 311 smp_rendezvous(NULL, svm_enable, NULL, NULL); 312 313 return (0); 314} 315 316static void 317svm_restore(void) 318{ 319 svm_enable(NULL); 320} 321 322/* 323 * Get index and bit position for a MSR in MSR permission 324 * bitmap. Two bits are used for each MSR, lower bit is 325 * for read and higher bit is for write. 326 */ 327static int 328svm_msr_index(uint64_t msr, int *index, int *bit) 329{ 330 uint32_t base, off; 331 332/* Pentium compatible MSRs */ 333#define MSR_PENTIUM_START 0 334#define MSR_PENTIUM_END 0x1FFF 335/* AMD 6th generation and Intel compatible MSRs */ 336#define MSR_AMD6TH_START 0xC0000000UL 337#define MSR_AMD6TH_END 0xC0001FFFUL 338/* AMD 7th and 8th generation compatible MSRs */ 339#define MSR_AMD7TH_START 0xC0010000UL 340#define MSR_AMD7TH_END 0xC0011FFFUL 341 342 *index = -1; 343 *bit = (msr % 4) * 2; 344 base = 0; 345 346 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 347 *index = msr / 4; 348 return (0); 349 } 350 351 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 352 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 353 off = (msr - MSR_AMD6TH_START); 354 *index = (off + base) / 4; 355 return (0); 356 } 357 358 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 359 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 360 off = (msr - MSR_AMD7TH_START); 361 *index = (off + base) / 4; 362 return (0); 363 } 364 365 return (EIO); 366} 367 368/* 369 * Give virtual cpu the complete access to MSR(read & write). 370 */ 371static int 372svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 373{ 374 int index, bit, err; 375 376 err = svm_msr_index(msr, &index, &bit); 377 if (err) { 378 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 379 return (err); 380 } 381 382 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 383 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 384 return (EINVAL); 385 } 386 if (bit < 0 || bit > 8) { 387 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 388 return (EINVAL); 389 } 390 391 /* Disable intercept for read and write. */ 392 if (read) 393 perm_bitmap[index] &= ~(1UL << bit); 394 if (write) 395 perm_bitmap[index] &= ~(2UL << bit); 396 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 397 (perm_bitmap[index] >> bit) & 0x3, msr); 398 399 return (0); 400} 401 402static int 403svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 404{ 405 return svm_msr_perm(perm_bitmap, msr, true, true); 406} 407 408static int 409svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 410{ 411 return svm_msr_perm(perm_bitmap, msr, true, false); 412} 413 414static __inline int 415svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 416{ 417 struct vmcb_ctrl *ctrl; 418 419 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 420 421 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 422 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 423} 424 425static __inline void 426svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 427 int enabled) 428{ 429 struct vmcb_ctrl *ctrl; 430 uint32_t oldval; 431 432 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 433 434 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 435 oldval = ctrl->intercept[idx]; 436 437 if (enabled) 438 ctrl->intercept[idx] |= bitmask; 439 else 440 ctrl->intercept[idx] &= ~bitmask; 441 442 if (ctrl->intercept[idx] != oldval) { 443 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 444 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 445 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 446 } 447} 448 449static __inline void 450svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 451{ 452 svm_set_intercept(sc, vcpu, off, bitmask, 0); 453} 454 455static __inline void 456svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 457{ 458 svm_set_intercept(sc, vcpu, off, bitmask, 1); 459} 460 461static void 462vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 463 uint64_t msrpm_base_pa, uint64_t np_pml4) 464{ 465 struct vmcb_ctrl *ctrl; 466 struct vmcb_state *state; 467 uint32_t mask; 468 int n; 469 470 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 471 state = svm_get_vmcb_state(sc, vcpu); 472 473 ctrl->iopm_base_pa = iopm_base_pa; 474 ctrl->msrpm_base_pa = msrpm_base_pa; 475 476 /* Enable nested paging */ 477 ctrl->np_enable = 1; 478 ctrl->n_cr3 = np_pml4; 479 480 /* 481 * Intercept accesses to the control registers that are not shadowed 482 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 483 */ 484 for (n = 0; n < 16; n++) { 485 mask = (BIT(n) << 16) | BIT(n); 486 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 487 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 488 else 489 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 490 } 491 492 /* Intercept Machine Check exceptions. */ 493 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 494 495 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 496 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 497 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 498 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 499 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 500 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 501 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 502 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 503 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 504 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 505 VMCB_INTCPT_FERR_FREEZE); 506 507 /* 508 * From section "Canonicalization and Consistency Checks" in APMv2 509 * the VMRUN intercept bit must be set to pass the consistency check. 510 */ 511 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 512 513 /* 514 * The ASID will be set to a non-zero value just before VMRUN. 515 */ 516 ctrl->asid = 0; 517 518 /* 519 * Section 15.21.1, Interrupt Masking in EFLAGS 520 * Section 15.21.2, Virtualizing APIC.TPR 521 * 522 * This must be set for %rflag and %cr8 isolation of guest and host. 523 */ 524 ctrl->v_intr_masking = 1; 525 526 /* Enable Last Branch Record aka LBR for debugging */ 527 ctrl->lbr_virt_en = 1; 528 state->dbgctl = BIT(0); 529 530 /* EFER_SVM must always be set when the guest is executing */ 531 state->efer = EFER_SVM; 532 533 /* Set up the PAT to power-on state */ 534 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 535 PAT_VALUE(1, PAT_WRITE_THROUGH) | 536 PAT_VALUE(2, PAT_UNCACHED) | 537 PAT_VALUE(3, PAT_UNCACHEABLE) | 538 PAT_VALUE(4, PAT_WRITE_BACK) | 539 PAT_VALUE(5, PAT_WRITE_THROUGH) | 540 PAT_VALUE(6, PAT_UNCACHED) | 541 PAT_VALUE(7, PAT_UNCACHEABLE); 542} 543 544/* 545 * Initialise a virtual machine. 546 */ 547static void * 548svm_vminit(struct vm *vm, pmap_t pmap) 549{ 550 struct svm_softc *svm_sc; 551 struct svm_vcpu *vcpu; 552 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 553 int i; 554 555 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 556 M_SVM, M_WAITOK | M_ZERO); 557 558 svm_sc->vm = vm; 559 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 560 561 /* 562 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 563 */ 564 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 565 566 /* 567 * Following MSR can be completely controlled by virtual machines 568 * since access to following are translated to access to VMCB. 569 */ 570 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 571 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 572 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 573 574 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 575 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 576 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 577 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 578 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 579 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 580 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 581 582 /* For Nested Paging/RVI only. */ 583 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 584 585 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 586 587 /* 588 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 589 */ 590 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 591 592 /* Intercept access to all I/O ports. */ 593 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 594 595 /* Cache physical address for multiple vcpus. */ 596 iopm_pa = vtophys(svm_sc->iopm_bitmap); 597 msrpm_pa = vtophys(svm_sc->msr_bitmap); 598 pml4_pa = svm_sc->nptp; 599 600 for (i = 0; i < VM_MAXCPU; i++) { 601 vcpu = svm_get_vcpu(svm_sc, i); 602 vcpu->lastcpu = NOCPU; 603 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 604 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 605 svm_msr_guest_init(svm_sc, i); 606 } 607 return (svm_sc); 608} 609 610static int 611svm_cpl(struct vmcb_state *state) 612{ 613 614 /* 615 * From APMv2: 616 * "Retrieve the CPL from the CPL field in the VMCB, not 617 * from any segment DPL" 618 */ 619 return (state->cpl); 620} 621 622static enum vm_cpu_mode 623svm_vcpu_mode(struct vmcb *vmcb) 624{ 625 struct vmcb_segment seg; 626 struct vmcb_state *state; 627 int error; 628 629 state = &vmcb->state; 630 631 if (state->efer & EFER_LMA) { 632 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 633 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 634 error)); 635 636 /* 637 * Section 4.8.1 for APM2, check if Code Segment has 638 * Long attribute set in descriptor. 639 */ 640 if (seg.attrib & VMCB_CS_ATTRIB_L) 641 return (CPU_MODE_64BIT); 642 else 643 return (CPU_MODE_COMPATIBILITY); 644 } else if (state->cr0 & CR0_PE) { 645 return (CPU_MODE_PROTECTED); 646 } else { 647 return (CPU_MODE_REAL); 648 } 649} 650 651static enum vm_paging_mode 652svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 653{ 654 655 if ((cr0 & CR0_PG) == 0) 656 return (PAGING_MODE_FLAT); 657 if ((cr4 & CR4_PAE) == 0) 658 return (PAGING_MODE_32); 659 if (efer & EFER_LME) 660 return (PAGING_MODE_64); 661 else 662 return (PAGING_MODE_PAE); 663} 664 665/* 666 * ins/outs utility routines 667 */ 668static uint64_t 669svm_inout_str_index(struct svm_regctx *regs, int in) 670{ 671 uint64_t val; 672 673 val = in ? regs->sctx_rdi : regs->sctx_rsi; 674 675 return (val); 676} 677 678static uint64_t 679svm_inout_str_count(struct svm_regctx *regs, int rep) 680{ 681 uint64_t val; 682 683 val = rep ? regs->sctx_rcx : 1; 684 685 return (val); 686} 687 688static void 689svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 690 int in, struct vm_inout_str *vis) 691{ 692 int error, s; 693 694 if (in) { 695 vis->seg_name = VM_REG_GUEST_ES; 696 } else { 697 /* The segment field has standard encoding */ 698 s = (info1 >> 10) & 0x7; 699 vis->seg_name = vm_segment_name(s); 700 } 701 702 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 703 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 704} 705 706static int 707svm_inout_str_addrsize(uint64_t info1) 708{ 709 uint32_t size; 710 711 size = (info1 >> 7) & 0x7; 712 switch (size) { 713 case 1: 714 return (2); /* 16 bit */ 715 case 2: 716 return (4); /* 32 bit */ 717 case 4: 718 return (8); /* 64 bit */ 719 default: 720 panic("%s: invalid size encoding %d", __func__, size); 721 } 722} 723 724static void 725svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 726{ 727 struct vmcb_state *state; 728 729 state = &vmcb->state; 730 paging->cr3 = state->cr3; 731 paging->cpl = svm_cpl(state); 732 paging->cpu_mode = svm_vcpu_mode(vmcb); 733 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 734 state->efer); 735} 736 737#define UNHANDLED 0 738 739/* 740 * Handle guest I/O intercept. 741 */ 742static int 743svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 744{ 745 struct vmcb_ctrl *ctrl; 746 struct vmcb_state *state; 747 struct svm_regctx *regs; 748 struct vm_inout_str *vis; 749 uint64_t info1; 750 int inout_string; 751 752 state = svm_get_vmcb_state(svm_sc, vcpu); 753 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 754 regs = svm_get_guest_regctx(svm_sc, vcpu); 755 756 info1 = ctrl->exitinfo1; 757 inout_string = info1 & BIT(2) ? 1 : 0; 758 759 /* 760 * The effective segment number in EXITINFO1[12:10] is populated 761 * only if the processor has the DecodeAssist capability. 762 * 763 * XXX this is not specified explicitly in APMv2 but can be verified 764 * empirically. 765 */ 766 if (inout_string && !decode_assist()) 767 return (UNHANDLED); 768 769 vmexit->exitcode = VM_EXITCODE_INOUT; 770 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 771 vmexit->u.inout.string = inout_string; 772 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 773 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 774 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 775 vmexit->u.inout.eax = (uint32_t)(state->rax); 776 777 if (inout_string) { 778 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 779 vis = &vmexit->u.inout_str; 780 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 781 vis->rflags = state->rflags; 782 vis->cr0 = state->cr0; 783 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 784 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 785 vis->addrsize = svm_inout_str_addrsize(info1); 786 svm_inout_str_seginfo(svm_sc, vcpu, info1, 787 vmexit->u.inout.in, vis); 788 } 789 790 return (UNHANDLED); 791} 792 793static int 794svm_npf_paging(uint64_t exitinfo1) 795{ 796 797 if (exitinfo1 & VMCB_NPF_INFO1_W) 798 return (VM_PROT_WRITE); 799 800 return (VM_PROT_READ); 801} 802 803static bool 804svm_npf_emul_fault(uint64_t exitinfo1) 805{ 806 807 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 808 return (false); 809 } 810 811 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 812 return (false); 813 } 814 815 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 816 return (false); 817 } 818 819 return (true); 820} 821 822static void 823svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 824{ 825 struct vm_guest_paging *paging; 826 struct vmcb_segment seg; 827 struct vmcb_ctrl *ctrl; 828 char *inst_bytes; 829 int error, inst_len; 830 831 ctrl = &vmcb->ctrl; 832 paging = &vmexit->u.inst_emul.paging; 833 834 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 835 vmexit->u.inst_emul.gpa = gpa; 836 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 837 svm_paging_info(vmcb, paging); 838 839 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 840 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 841 842 switch(paging->cpu_mode) { 843 case CPU_MODE_PROTECTED: 844 case CPU_MODE_COMPATIBILITY: 845 /* 846 * Section 4.8.1 of APM2, Default Operand Size or D bit. 847 */ 848 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 849 1 : 0; 850 break; 851 default: 852 vmexit->u.inst_emul.cs_d = 0; 853 break; 854 } 855 856 /* 857 * Copy the instruction bytes into 'vie' if available. 858 */ 859 if (decode_assist() && !disable_npf_assist) { 860 inst_len = ctrl->inst_len; 861 inst_bytes = ctrl->inst_bytes; 862 } else { 863 inst_len = 0; 864 inst_bytes = NULL; 865 } 866 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 867} 868 869#ifdef KTR 870static const char * 871intrtype_to_str(int intr_type) 872{ 873 switch (intr_type) { 874 case VMCB_EVENTINJ_TYPE_INTR: 875 return ("hwintr"); 876 case VMCB_EVENTINJ_TYPE_NMI: 877 return ("nmi"); 878 case VMCB_EVENTINJ_TYPE_INTn: 879 return ("swintr"); 880 case VMCB_EVENTINJ_TYPE_EXCEPTION: 881 return ("exception"); 882 default: 883 panic("%s: unknown intr_type %d", __func__, intr_type); 884 } 885} 886#endif 887 888/* 889 * Inject an event to vcpu as described in section 15.20, "Event injection". 890 */ 891static void 892svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 893 uint32_t error, bool ec_valid) 894{ 895 struct vmcb_ctrl *ctrl; 896 897 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 898 899 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 900 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 901 902 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 903 __func__, vector)); 904 905 switch (intr_type) { 906 case VMCB_EVENTINJ_TYPE_INTR: 907 case VMCB_EVENTINJ_TYPE_NMI: 908 case VMCB_EVENTINJ_TYPE_INTn: 909 break; 910 case VMCB_EVENTINJ_TYPE_EXCEPTION: 911 if (vector >= 0 && vector <= 31 && vector != 2) 912 break; 913 /* FALLTHROUGH */ 914 default: 915 panic("%s: invalid intr_type/vector: %d/%d", __func__, 916 intr_type, vector); 917 } 918 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 919 if (ec_valid) { 920 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 921 ctrl->eventinj |= (uint64_t)error << 32; 922 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 923 intrtype_to_str(intr_type), vector, error); 924 } else { 925 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 926 intrtype_to_str(intr_type), vector); 927 } 928} 929 930static void 931svm_update_virqinfo(struct svm_softc *sc, int vcpu) 932{ 933 struct vm *vm; 934 struct vlapic *vlapic; 935 struct vmcb_ctrl *ctrl; 936 int pending; 937 938 vm = sc->vm; 939 vlapic = vm_lapic(vm, vcpu); 940 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 941 942 /* Update %cr8 in the emulated vlapic */ 943 vlapic_set_cr8(vlapic, ctrl->v_tpr); 944 945 /* 946 * If V_IRQ indicates that the interrupt injection attempted on then 947 * last VMRUN was successful then update the vlapic accordingly. 948 */ 949 if (ctrl->v_intr_vector != 0) { 950 pending = ctrl->v_irq; 951 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 952 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 953 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 954 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 955 pending ? "pending" : "accepted"); 956 if (!pending) 957 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 958 } 959} 960 961static void 962svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 963{ 964 struct vmcb_ctrl *ctrl; 965 uint64_t intinfo; 966 967 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 968 intinfo = ctrl->exitintinfo; 969 if (!VMCB_EXITINTINFO_VALID(intinfo)) 970 return; 971 972 /* 973 * From APMv2, Section "Intercepts during IDT interrupt delivery" 974 * 975 * If a #VMEXIT happened during event delivery then record the event 976 * that was being delivered. 977 */ 978 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 979 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 980 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 981 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 982} 983 984static __inline int 985vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 986{ 987 988 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 989 VMCB_INTCPT_VINTR)); 990} 991 992static __inline void 993enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 994{ 995 struct vmcb_ctrl *ctrl; 996 997 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 998 999 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1000 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1001 KASSERT(vintr_intercept_enabled(sc, vcpu), 1002 ("%s: vintr intercept should be enabled", __func__)); 1003 return; 1004 } 1005 1006 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1007 ctrl->v_irq = 1; 1008 ctrl->v_ign_tpr = 1; 1009 ctrl->v_intr_vector = 0; 1010 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1011 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1012} 1013 1014static __inline void 1015disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1016{ 1017 struct vmcb_ctrl *ctrl; 1018 1019 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1020 1021 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1022 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1023 ("%s: vintr intercept should be disabled", __func__)); 1024 return; 1025 } 1026 1027#ifdef KTR 1028 if (ctrl->v_intr_vector == 0) 1029 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1030 else 1031 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1032#endif 1033 ctrl->v_irq = 0; 1034 ctrl->v_intr_vector = 0; 1035 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1036 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1037} 1038 1039static int 1040svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1041{ 1042 struct vmcb_ctrl *ctrl; 1043 int oldval, newval; 1044 1045 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1046 oldval = ctrl->intr_shadow; 1047 newval = val ? 1 : 0; 1048 if (newval != oldval) { 1049 ctrl->intr_shadow = newval; 1050 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1051 } 1052 return (0); 1053} 1054 1055static int 1056svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1057{ 1058 struct vmcb_ctrl *ctrl; 1059 1060 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1061 *val = ctrl->intr_shadow; 1062 return (0); 1063} 1064 1065/* 1066 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1067 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1068 * to track when the vcpu is done handling the NMI. 1069 */ 1070static int 1071nmi_blocked(struct svm_softc *sc, int vcpu) 1072{ 1073 int blocked; 1074 1075 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1076 VMCB_INTCPT_IRET); 1077 return (blocked); 1078} 1079 1080static void 1081enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1082{ 1083 1084 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1085 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1086 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1087} 1088 1089static void 1090clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1091{ 1092 int error; 1093 1094 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1095 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1096 /* 1097 * When the IRET intercept is cleared the vcpu will attempt to execute 1098 * the "iret" when it runs next. However, it is possible to inject 1099 * another NMI into the vcpu before the "iret" has actually executed. 1100 * 1101 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1102 * it will trap back into the hypervisor. If an NMI is pending for 1103 * the vcpu it will be injected into the guest. 1104 * 1105 * XXX this needs to be fixed 1106 */ 1107 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1108 1109 /* 1110 * Set 'intr_shadow' to prevent an NMI from being injected on the 1111 * immediate VMRUN. 1112 */ 1113 error = svm_modify_intr_shadow(sc, vcpu, 1); 1114 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1115} 1116 1117static int 1118emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1119 bool *retu) 1120{ 1121 int error; 1122 1123 if (lapic_msr(num)) 1124 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1125 else if (num == MSR_EFER) 1126 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val); 1127 else 1128 error = svm_wrmsr(sc, vcpu, num, val, retu); 1129 1130 return (error); 1131} 1132 1133static int 1134emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1135{ 1136 struct vmcb_state *state; 1137 struct svm_regctx *ctx; 1138 uint64_t result; 1139 int error; 1140 1141 if (lapic_msr(num)) 1142 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1143 else 1144 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1145 1146 if (error == 0) { 1147 state = svm_get_vmcb_state(sc, vcpu); 1148 ctx = svm_get_guest_regctx(sc, vcpu); 1149 state->rax = result & 0xffffffff; 1150 ctx->sctx_rdx = result >> 32; 1151 } 1152 1153 return (error); 1154} 1155 1156#ifdef KTR 1157static const char * 1158exit_reason_to_str(uint64_t reason) 1159{ 1160 static char reasonbuf[32]; 1161 1162 switch (reason) { 1163 case VMCB_EXIT_INVALID: 1164 return ("invalvmcb"); 1165 case VMCB_EXIT_SHUTDOWN: 1166 return ("shutdown"); 1167 case VMCB_EXIT_NPF: 1168 return ("nptfault"); 1169 case VMCB_EXIT_PAUSE: 1170 return ("pause"); 1171 case VMCB_EXIT_HLT: 1172 return ("hlt"); 1173 case VMCB_EXIT_CPUID: 1174 return ("cpuid"); 1175 case VMCB_EXIT_IO: 1176 return ("inout"); 1177 case VMCB_EXIT_MC: 1178 return ("mchk"); 1179 case VMCB_EXIT_INTR: 1180 return ("extintr"); 1181 case VMCB_EXIT_NMI: 1182 return ("nmi"); 1183 case VMCB_EXIT_VINTR: 1184 return ("vintr"); 1185 case VMCB_EXIT_MSR: 1186 return ("msr"); 1187 case VMCB_EXIT_IRET: 1188 return ("iret"); 1189 default: 1190 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1191 return (reasonbuf); 1192 } 1193} 1194#endif /* KTR */ 1195 1196/* 1197 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1198 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1199 * and exceptions caused by INT3, INTO and BOUND instructions. 1200 * 1201 * Return 1 if the nRIP is valid and 0 otherwise. 1202 */ 1203static int 1204nrip_valid(uint64_t exitcode) 1205{ 1206 switch (exitcode) { 1207 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1208 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1209 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1210 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1211 case 0x43: /* INT3 */ 1212 case 0x44: /* INTO */ 1213 case 0x45: /* BOUND */ 1214 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1215 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1216 return (1); 1217 default: 1218 return (0); 1219 } 1220} 1221 1222/* 1223 * Collateral for a generic SVM VM-exit. 1224 */ 1225static void 1226vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 1227{ 1228 1229 vme->exitcode = VM_EXITCODE_SVM; 1230 vme->u.svm.exitcode = code; 1231 vme->u.svm.exitinfo1 = info1; 1232 vme->u.svm.exitinfo2 = info2; 1233} 1234 1235static int 1236svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1237{ 1238 struct vmcb *vmcb; 1239 struct vmcb_state *state; 1240 struct vmcb_ctrl *ctrl; 1241 struct svm_regctx *ctx; 1242 uint64_t code, info1, info2, val; 1243 uint32_t eax, ecx, edx; 1244 int handled; 1245 bool retu; 1246 1247 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1248 vmcb = svm_get_vmcb(svm_sc, vcpu); 1249 state = &vmcb->state; 1250 ctrl = &vmcb->ctrl; 1251 1252 handled = 0; 1253 code = ctrl->exitcode; 1254 info1 = ctrl->exitinfo1; 1255 info2 = ctrl->exitinfo2; 1256 1257 vmexit->exitcode = VM_EXITCODE_BOGUS; 1258 vmexit->rip = state->rip; 1259 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1260 1261 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1262 1263 /* 1264 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1265 * in an inconsistent state and can trigger assertions that would 1266 * never happen otherwise. 1267 */ 1268 if (code == VMCB_EXIT_INVALID) { 1269 vm_exit_svm(vmexit, code, info1, info2); 1270 return (0); 1271 } 1272 1273 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1274 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1275 1276 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1277 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1278 vmexit->inst_length, code, info1, info2)); 1279 1280 svm_update_virqinfo(svm_sc, vcpu); 1281 svm_save_intinfo(svm_sc, vcpu); 1282 1283 switch (code) { 1284 case VMCB_EXIT_IRET: 1285 /* 1286 * Restart execution at "iret" but with the intercept cleared. 1287 */ 1288 vmexit->inst_length = 0; 1289 clear_nmi_blocking(svm_sc, vcpu); 1290 handled = 1; 1291 break; 1292 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1293 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1294 handled = 1; 1295 break; 1296 case VMCB_EXIT_INTR: /* external interrupt */ 1297 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1298 handled = 1; 1299 break; 1300 case VMCB_EXIT_NMI: /* external NMI */ 1301 handled = 1; 1302 break; 1303 case VMCB_EXIT_MC: /* machine check */ 1304 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1305 break; 1306 case VMCB_EXIT_MSR: /* MSR access. */ 1307 eax = state->rax; 1308 ecx = ctx->sctx_rcx; 1309 edx = ctx->sctx_rdx; 1310 retu = false; 1311 1312 if (info1) { 1313 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1314 val = (uint64_t)edx << 32 | eax; 1315 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1316 ecx, val); 1317 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1318 vmexit->exitcode = VM_EXITCODE_WRMSR; 1319 vmexit->u.msr.code = ecx; 1320 vmexit->u.msr.wval = val; 1321 } else if (!retu) { 1322 handled = 1; 1323 } else { 1324 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1325 ("emulate_wrmsr retu with bogus exitcode")); 1326 } 1327 } else { 1328 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1329 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1330 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1331 vmexit->exitcode = VM_EXITCODE_RDMSR; 1332 vmexit->u.msr.code = ecx; 1333 } else if (!retu) { 1334 handled = 1; 1335 } else { 1336 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1337 ("emulate_rdmsr retu with bogus exitcode")); 1338 } 1339 } 1340 break; 1341 case VMCB_EXIT_IO: 1342 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1343 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1344 break; 1345 case VMCB_EXIT_CPUID: 1346 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1347 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1348 (uint32_t *)&state->rax, 1349 (uint32_t *)&ctx->sctx_rbx, 1350 (uint32_t *)&ctx->sctx_rcx, 1351 (uint32_t *)&ctx->sctx_rdx); 1352 break; 1353 case VMCB_EXIT_HLT: 1354 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1355 vmexit->exitcode = VM_EXITCODE_HLT; 1356 vmexit->u.hlt.rflags = state->rflags; 1357 break; 1358 case VMCB_EXIT_PAUSE: 1359 vmexit->exitcode = VM_EXITCODE_PAUSE; 1360 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1361 break; 1362 case VMCB_EXIT_NPF: 1363 /* EXITINFO2 contains the faulting guest physical address */ 1364 if (info1 & VMCB_NPF_INFO1_RSV) { 1365 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1366 "reserved bits set: info1(%#lx) info2(%#lx)", 1367 info1, info2); 1368 } else if (vm_mem_allocated(svm_sc->vm, info2)) { 1369 vmexit->exitcode = VM_EXITCODE_PAGING; 1370 vmexit->u.paging.gpa = info2; 1371 vmexit->u.paging.fault_type = svm_npf_paging(info1); 1372 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1373 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1374 "on gpa %#lx/%#lx at rip %#lx", 1375 info2, info1, state->rip); 1376 } else if (svm_npf_emul_fault(info1)) { 1377 svm_handle_inst_emul(vmcb, info2, vmexit); 1378 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1379 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1380 "for gpa %#lx/%#lx at rip %#lx", 1381 info2, info1, state->rip); 1382 } 1383 break; 1384 default: 1385 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1386 break; 1387 } 1388 1389 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1390 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1391 vmexit->rip, vmexit->inst_length); 1392 1393 if (handled) { 1394 vmexit->rip += vmexit->inst_length; 1395 vmexit->inst_length = 0; 1396 state->rip = vmexit->rip; 1397 } else { 1398 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1399 /* 1400 * If this VM exit was not claimed by anybody then 1401 * treat it as a generic SVM exit. 1402 */ 1403 vm_exit_svm(vmexit, code, info1, info2); 1404 } else { 1405 /* 1406 * The exitcode and collateral have been populated. 1407 * The VM exit will be processed further in userland. 1408 */ 1409 } 1410 } 1411 return (handled); 1412} 1413 1414static void 1415svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1416{ 1417 uint64_t intinfo; 1418 1419 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1420 return; 1421 1422 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1423 "valid: %#lx", __func__, intinfo)); 1424 1425 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1426 VMCB_EXITINTINFO_VECTOR(intinfo), 1427 VMCB_EXITINTINFO_EC(intinfo), 1428 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1429 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1430 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1431} 1432 1433/* 1434 * Inject event to virtual cpu. 1435 */ 1436static void 1437svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1438{ 1439 struct vmcb_ctrl *ctrl; 1440 struct vmcb_state *state; 1441 uint8_t v_tpr; 1442 int vector, need_intr_window, pending_apic_vector; 1443 1444 state = svm_get_vmcb_state(sc, vcpu); 1445 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1446 1447 need_intr_window = 0; 1448 pending_apic_vector = 0; 1449 1450 /* 1451 * Inject pending events or exceptions for this vcpu. 1452 * 1453 * An event might be pending because the previous #VMEXIT happened 1454 * during event delivery (i.e. ctrl->exitintinfo). 1455 * 1456 * An event might also be pending because an exception was injected 1457 * by the hypervisor (e.g. #PF during instruction emulation). 1458 */ 1459 svm_inj_intinfo(sc, vcpu); 1460 1461 /* NMI event has priority over interrupts. */ 1462 if (vm_nmi_pending(sc->vm, vcpu)) { 1463 if (nmi_blocked(sc, vcpu)) { 1464 /* 1465 * Can't inject another NMI if the guest has not 1466 * yet executed an "iret" after the last NMI. 1467 */ 1468 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1469 "to NMI-blocking"); 1470 } else if (ctrl->intr_shadow) { 1471 /* 1472 * Can't inject an NMI if the vcpu is in an intr_shadow. 1473 */ 1474 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1475 "interrupt shadow"); 1476 need_intr_window = 1; 1477 goto done; 1478 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1479 /* 1480 * If there is already an exception/interrupt pending 1481 * then defer the NMI until after that. 1482 */ 1483 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1484 "eventinj %#lx", ctrl->eventinj); 1485 1486 /* 1487 * Use self-IPI to trigger a VM-exit as soon as 1488 * possible after the event injection is completed. 1489 * 1490 * This works only if the external interrupt exiting 1491 * is at a lower priority than the event injection. 1492 * 1493 * Although not explicitly specified in APMv2 the 1494 * relative priorities were verified empirically. 1495 */ 1496 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1497 } else { 1498 vm_nmi_clear(sc->vm, vcpu); 1499 1500 /* Inject NMI, vector number is not used */ 1501 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1502 IDT_NMI, 0, false); 1503 1504 /* virtual NMI blocking is now in effect */ 1505 enable_nmi_blocking(sc, vcpu); 1506 1507 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1508 } 1509 } 1510 1511 if (!vm_extint_pending(sc->vm, vcpu)) { 1512 /* 1513 * APIC interrupts are delivered using the V_IRQ offload. 1514 * 1515 * The primary benefit is that the hypervisor doesn't need to 1516 * deal with the various conditions that inhibit interrupts. 1517 * It also means that TPR changes via CR8 will be handled 1518 * without any hypervisor involvement. 1519 * 1520 * Note that the APIC vector must remain pending in the vIRR 1521 * until it is confirmed that it was delivered to the guest. 1522 * This can be confirmed based on the value of V_IRQ at the 1523 * next #VMEXIT (1 = pending, 0 = delivered). 1524 * 1525 * Also note that it is possible that another higher priority 1526 * vector can become pending before this vector is delivered 1527 * to the guest. This is alright because vcpu_notify_event() 1528 * will send an IPI and force the vcpu to trap back into the 1529 * hypervisor. The higher priority vector will be injected on 1530 * the next VMRUN. 1531 */ 1532 if (vlapic_pending_intr(vlapic, &vector)) { 1533 KASSERT(vector >= 16 && vector <= 255, 1534 ("invalid vector %d from local APIC", vector)); 1535 pending_apic_vector = vector; 1536 } 1537 goto done; 1538 } 1539 1540 /* Ask the legacy pic for a vector to inject */ 1541 vatpic_pending_intr(sc->vm, &vector); 1542 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1543 vector)); 1544 1545 /* 1546 * If the guest has disabled interrupts or is in an interrupt shadow 1547 * then we cannot inject the pending interrupt. 1548 */ 1549 if ((state->rflags & PSL_I) == 0) { 1550 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1551 "rflags %#lx", vector, state->rflags); 1552 need_intr_window = 1; 1553 goto done; 1554 } 1555 1556 if (ctrl->intr_shadow) { 1557 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1558 "interrupt shadow", vector); 1559 need_intr_window = 1; 1560 goto done; 1561 } 1562 1563 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1564 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1565 "eventinj %#lx", vector, ctrl->eventinj); 1566 need_intr_window = 1; 1567 goto done; 1568 } 1569 1570 /* 1571 * Legacy PIC interrupts are delivered via the event injection 1572 * mechanism. 1573 */ 1574 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1575 1576 vm_extint_clear(sc->vm, vcpu); 1577 vatpic_intr_accepted(sc->vm, vector); 1578 1579 /* 1580 * Force a VM-exit as soon as the vcpu is ready to accept another 1581 * interrupt. This is done because the PIC might have another vector 1582 * that it wants to inject. Also, if the APIC has a pending interrupt 1583 * that was preempted by the ExtInt then it allows us to inject the 1584 * APIC vector as soon as possible. 1585 */ 1586 need_intr_window = 1; 1587done: 1588 /* 1589 * The guest can modify the TPR by writing to %CR8. In guest mode 1590 * the processor reflects this write to V_TPR without hypervisor 1591 * intervention. 1592 * 1593 * The guest can also modify the TPR by writing to it via the memory 1594 * mapped APIC page. In this case, the write will be emulated by the 1595 * hypervisor. For this reason V_TPR must be updated before every 1596 * VMRUN. 1597 */ 1598 v_tpr = vlapic_get_cr8(vlapic); 1599 KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1600 if (ctrl->v_tpr != v_tpr) { 1601 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1602 ctrl->v_tpr, v_tpr); 1603 ctrl->v_tpr = v_tpr; 1604 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1605 } 1606 1607 if (pending_apic_vector) { 1608 /* 1609 * If an APIC vector is being injected then interrupt window 1610 * exiting is not possible on this VMRUN. 1611 */ 1612 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1613 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1614 pending_apic_vector); 1615 1616 ctrl->v_irq = 1; 1617 ctrl->v_ign_tpr = 0; 1618 ctrl->v_intr_vector = pending_apic_vector; 1619 ctrl->v_intr_prio = pending_apic_vector >> 4; 1620 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1621 } else if (need_intr_window) { 1622 /* 1623 * We use V_IRQ in conjunction with the VINTR intercept to 1624 * trap into the hypervisor as soon as a virtual interrupt 1625 * can be delivered. 1626 * 1627 * Since injected events are not subject to intercept checks 1628 * we need to ensure that the V_IRQ is not actually going to 1629 * be delivered on VM entry. The KASSERT below enforces this. 1630 */ 1631 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1632 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1633 ("Bogus intr_window_exiting: eventinj (%#lx), " 1634 "intr_shadow (%u), rflags (%#lx)", 1635 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1636 enable_intr_window_exiting(sc, vcpu); 1637 } else { 1638 disable_intr_window_exiting(sc, vcpu); 1639 } 1640} 1641 1642static __inline void 1643restore_host_tss(void) 1644{ 1645 struct system_segment_descriptor *tss_sd; 1646 1647 /* 1648 * The TSS descriptor was in use prior to launching the guest so it 1649 * has been marked busy. 1650 * 1651 * 'ltr' requires the descriptor to be marked available so change the 1652 * type to "64-bit available TSS". 1653 */ 1654 tss_sd = PCPU_GET(tss); 1655 tss_sd->sd_type = SDT_SYSTSS; 1656 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1657} 1658 1659static void 1660check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1661{ 1662 struct svm_vcpu *vcpustate; 1663 struct vmcb_ctrl *ctrl; 1664 long eptgen; 1665 bool alloc_asid; 1666 1667 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1668 "active on cpu %u", __func__, thiscpu)); 1669 1670 vcpustate = svm_get_vcpu(sc, vcpuid); 1671 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1672 1673 /* 1674 * The TLB entries associated with the vcpu's ASID are not valid 1675 * if either of the following conditions is true: 1676 * 1677 * 1. The vcpu's ASID generation is different than the host cpu's 1678 * ASID generation. This happens when the vcpu migrates to a new 1679 * host cpu. It can also happen when the number of vcpus executing 1680 * on a host cpu is greater than the number of ASIDs available. 1681 * 1682 * 2. The pmap generation number is different than the value cached in 1683 * the 'vcpustate'. This happens when the host invalidates pages 1684 * belonging to the guest. 1685 * 1686 * asidgen eptgen Action 1687 * mismatch mismatch 1688 * 0 0 (a) 1689 * 0 1 (b1) or (b2) 1690 * 1 0 (c) 1691 * 1 1 (d) 1692 * 1693 * (a) There is no mismatch in eptgen or ASID generation and therefore 1694 * no further action is needed. 1695 * 1696 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1697 * retained and the TLB entries associated with this ASID 1698 * are flushed by VMRUN. 1699 * 1700 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1701 * allocated. 1702 * 1703 * (c) A new ASID is allocated. 1704 * 1705 * (d) A new ASID is allocated. 1706 */ 1707 1708 alloc_asid = false; 1709 eptgen = pmap->pm_eptgen; 1710 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1711 1712 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1713 alloc_asid = true; /* (c) and (d) */ 1714 } else if (vcpustate->eptgen != eptgen) { 1715 if (flush_by_asid()) 1716 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1717 else 1718 alloc_asid = true; /* (b2) */ 1719 } else { 1720 /* 1721 * This is the common case (a). 1722 */ 1723 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1724 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1725 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1726 } 1727 1728 if (alloc_asid) { 1729 if (++asid[thiscpu].num >= nasid) { 1730 asid[thiscpu].num = 1; 1731 if (++asid[thiscpu].gen == 0) 1732 asid[thiscpu].gen = 1; 1733 /* 1734 * If this cpu does not support "flush-by-asid" 1735 * then flush the entire TLB on a generation 1736 * bump. Subsequent ASID allocation in this 1737 * generation can be done without a TLB flush. 1738 */ 1739 if (!flush_by_asid()) 1740 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1741 } 1742 vcpustate->asid.gen = asid[thiscpu].gen; 1743 vcpustate->asid.num = asid[thiscpu].num; 1744 1745 ctrl->asid = vcpustate->asid.num; 1746 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1747 /* 1748 * If this cpu supports "flush-by-asid" then the TLB 1749 * was not flushed after the generation bump. The TLB 1750 * is flushed selectively after every new ASID allocation. 1751 */ 1752 if (flush_by_asid()) 1753 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1754 } 1755 vcpustate->eptgen = eptgen; 1756 1757 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1758 KASSERT(ctrl->asid == vcpustate->asid.num, 1759 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1760} 1761 1762/* 1763 * Start vcpu with specified RIP. 1764 */ 1765static int 1766svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1767 void *rend_cookie, void *suspended_cookie) 1768{ 1769 struct svm_regctx *gctx; 1770 struct svm_softc *svm_sc; 1771 struct svm_vcpu *vcpustate; 1772 struct vmcb_state *state; 1773 struct vmcb_ctrl *ctrl; 1774 struct vm_exit *vmexit; 1775 struct vlapic *vlapic; 1776 struct vm *vm; 1777 uint64_t vmcb_pa; 1778 u_int thiscpu; 1779 int handled; 1780 1781 svm_sc = arg; 1782 vm = svm_sc->vm; 1783 1784 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1785 state = svm_get_vmcb_state(svm_sc, vcpu); 1786 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1787 vmexit = vm_exitinfo(vm, vcpu); 1788 vlapic = vm_lapic(vm, vcpu); 1789 1790 /* 1791 * Stash 'curcpu' on the stack as 'thiscpu'. 1792 * 1793 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1794 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1795 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1796 */ 1797 thiscpu = curcpu; 1798 1799 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1800 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1801 1802 if (vcpustate->lastcpu != thiscpu) { 1803 /* 1804 * Force new ASID allocation by invalidating the generation. 1805 */ 1806 vcpustate->asid.gen = 0; 1807 1808 /* 1809 * Invalidate the VMCB state cache by marking all fields dirty. 1810 */ 1811 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1812 1813 /* 1814 * XXX 1815 * Setting 'vcpustate->lastcpu' here is bit premature because 1816 * we may return from this function without actually executing 1817 * the VMRUN instruction. This could happen if a rendezvous 1818 * or an AST is pending on the first time through the loop. 1819 * 1820 * This works for now but any new side-effects of vcpu 1821 * migration should take this case into account. 1822 */ 1823 vcpustate->lastcpu = thiscpu; 1824 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1825 } 1826 1827 svm_msr_guest_enter(svm_sc, vcpu); 1828 1829 /* Update Guest RIP */ 1830 state->rip = rip; 1831 1832 do { 1833 /* 1834 * Disable global interrupts to guarantee atomicity during 1835 * loading of guest state. This includes not only the state 1836 * loaded by the "vmrun" instruction but also software state 1837 * maintained by the hypervisor: suspended and rendezvous 1838 * state, NPT generation number, vlapic interrupts etc. 1839 */ 1840 disable_gintr(); 1841 1842 if (vcpu_suspended(suspended_cookie)) { 1843 enable_gintr(); 1844 vm_exit_suspended(vm, vcpu, state->rip); 1845 break; 1846 } 1847 1848 if (vcpu_rendezvous_pending(rend_cookie)) { 1849 enable_gintr(); 1850 vm_exit_rendezvous(vm, vcpu, state->rip); 1851 break; 1852 } 1853 1854 /* We are asked to give the cpu by scheduler. */ 1855 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1856 enable_gintr(); 1857 vm_exit_astpending(vm, vcpu, state->rip); 1858 break; 1859 } 1860 1861 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1862 1863 /* Activate the nested pmap on 'thiscpu' */ 1864 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1865 1866 /* 1867 * Check the pmap generation and the ASID generation to 1868 * ensure that the vcpu does not use stale TLB mappings. 1869 */ 1870 check_asid(svm_sc, vcpu, pmap, thiscpu); 1871 1872 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 1873 vcpustate->dirty = 0; 1874 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1875 1876 /* Launch Virtual Machine. */ 1877 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1878 svm_launch(vmcb_pa, gctx); 1879 1880 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1881 1882 /* 1883 * Restore MSR_GSBASE to point to the pcpu data area. 1884 * 1885 * Note that accesses done via PCPU_GET/PCPU_SET will work 1886 * only after MSR_GSBASE is restored. 1887 * 1888 * Also note that we don't bother restoring MSR_KGSBASE 1889 * since it is not used in the kernel and will be restored 1890 * when the VMRUN ioctl returns to userspace. 1891 */ 1892 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1893 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1894 thiscpu, curcpu)); 1895 1896 /* 1897 * The host GDTR and IDTR is saved by VMRUN and restored 1898 * automatically on #VMEXIT. However, the host TSS needs 1899 * to be restored explicitly. 1900 */ 1901 restore_host_tss(); 1902 1903 /* #VMEXIT disables interrupts so re-enable them here. */ 1904 enable_gintr(); 1905 1906 /* Handle #VMEXIT and if required return to user space. */ 1907 handled = svm_vmexit(svm_sc, vcpu, vmexit); 1908 } while (handled); 1909 1910 svm_msr_guest_exit(svm_sc, vcpu); 1911 1912 return (0); 1913} 1914 1915/* 1916 * Cleanup for virtual machine. 1917 */ 1918static void 1919svm_vmcleanup(void *arg) 1920{ 1921 struct svm_softc *svm_sc; 1922 1923 svm_sc = arg; 1924 1925 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1926 1927 free(svm_sc, M_SVM); 1928} 1929 1930/* 1931 * Return pointer to hypervisor saved register state. 1932 */ 1933static register_t * 1934swctx_regptr(struct svm_regctx *regctx, int reg) 1935{ 1936 1937 switch (reg) { 1938 case VM_REG_GUEST_RBX: 1939 return (®ctx->sctx_rbx); 1940 case VM_REG_GUEST_RCX: 1941 return (®ctx->sctx_rcx); 1942 case VM_REG_GUEST_RDX: 1943 return (®ctx->sctx_rdx); 1944 case VM_REG_GUEST_RDI: 1945 return (®ctx->sctx_rdi); 1946 case VM_REG_GUEST_RSI: 1947 return (®ctx->sctx_rsi); 1948 case VM_REG_GUEST_RBP: 1949 return (®ctx->sctx_rbp); 1950 case VM_REG_GUEST_R8: 1951 return (®ctx->sctx_r8); 1952 case VM_REG_GUEST_R9: 1953 return (®ctx->sctx_r9); 1954 case VM_REG_GUEST_R10: 1955 return (®ctx->sctx_r10); 1956 case VM_REG_GUEST_R11: 1957 return (®ctx->sctx_r11); 1958 case VM_REG_GUEST_R12: 1959 return (®ctx->sctx_r12); 1960 case VM_REG_GUEST_R13: 1961 return (®ctx->sctx_r13); 1962 case VM_REG_GUEST_R14: 1963 return (®ctx->sctx_r14); 1964 case VM_REG_GUEST_R15: 1965 return (®ctx->sctx_r15); 1966 default: 1967 ERR("Unknown register requested, reg=%d.\n", reg); 1968 break; 1969 } 1970 1971 return (NULL); 1972} 1973 1974/* 1975 * Interface to read guest registers. 1976 * This can be SVM h/w saved or hypervisor saved register. 1977 */ 1978static int 1979svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1980{ 1981 struct svm_softc *svm_sc; 1982 register_t *reg; 1983 1984 svm_sc = arg; 1985 1986 if (ident == VM_REG_GUEST_INTR_SHADOW) { 1987 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 1988 } 1989 1990 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 1991 return (0); 1992 } 1993 1994 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1995 1996 if (reg != NULL) { 1997 *val = *reg; 1998 return (0); 1999 } 2000 2001 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2002 return (EINVAL); 2003} 2004 2005/* 2006 * Interface to write to guest registers. 2007 * This can be SVM h/w saved or hypervisor saved register. 2008 */ 2009static int 2010svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2011{ 2012 struct svm_softc *svm_sc; 2013 register_t *reg; 2014 2015 svm_sc = arg; 2016 2017 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2018 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2019 } 2020 2021 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2022 return (0); 2023 } 2024 2025 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2026 2027 if (reg != NULL) { 2028 *reg = val; 2029 return (0); 2030 } 2031 2032 /* 2033 * XXX deal with CR3 and invalidate TLB entries tagged with the 2034 * vcpu's ASID. This needs to be treated differently depending on 2035 * whether 'running' is true/false. 2036 */ 2037 2038 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2039 return (EINVAL); 2040} 2041 2042static int 2043svm_setcap(void *arg, int vcpu, int type, int val) 2044{ 2045 struct svm_softc *sc; 2046 int error; 2047 2048 sc = arg; 2049 error = 0; 2050 switch (type) { 2051 case VM_CAP_HALT_EXIT: 2052 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2053 VMCB_INTCPT_HLT, val); 2054 break; 2055 case VM_CAP_PAUSE_EXIT: 2056 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2057 VMCB_INTCPT_PAUSE, val); 2058 break; 2059 case VM_CAP_UNRESTRICTED_GUEST: 2060 /* Unrestricted guest execution cannot be disabled in SVM */ 2061 if (val == 0) 2062 error = EINVAL; 2063 break; 2064 default: 2065 error = ENOENT; 2066 break; 2067 } 2068 return (error); 2069} 2070 2071static int 2072svm_getcap(void *arg, int vcpu, int type, int *retval) 2073{ 2074 struct svm_softc *sc; 2075 int error; 2076 2077 sc = arg; 2078 error = 0; 2079 2080 switch (type) { 2081 case VM_CAP_HALT_EXIT: 2082 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2083 VMCB_INTCPT_HLT); 2084 break; 2085 case VM_CAP_PAUSE_EXIT: 2086 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2087 VMCB_INTCPT_PAUSE); 2088 break; 2089 case VM_CAP_UNRESTRICTED_GUEST: 2090 *retval = 1; /* unrestricted guest is always enabled */ 2091 break; 2092 default: 2093 error = ENOENT; 2094 break; 2095 } 2096 return (error); 2097} 2098 2099static struct vlapic * 2100svm_vlapic_init(void *arg, int vcpuid) 2101{ 2102 struct svm_softc *svm_sc; 2103 struct vlapic *vlapic; 2104 2105 svm_sc = arg; 2106 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2107 vlapic->vm = svm_sc->vm; 2108 vlapic->vcpuid = vcpuid; 2109 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2110 2111 vlapic_init(vlapic); 2112 2113 return (vlapic); 2114} 2115 2116static void 2117svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2118{ 2119 2120 vlapic_cleanup(vlapic); 2121 free(vlapic, M_SVM_VLAPIC); 2122} 2123 2124struct vmm_ops vmm_ops_amd = { 2125 svm_init, 2126 svm_cleanup, 2127 svm_restore, 2128 svm_vminit, 2129 svm_vmrun, 2130 svm_vmcleanup, 2131 svm_getreg, 2132 svm_setreg, 2133 vmcb_getdesc, 2134 vmcb_setdesc, 2135 svm_getcap, 2136 svm_setcap, 2137 svm_npt_alloc, 2138 svm_npt_free, 2139 svm_vlapic_init, 2140 svm_vlapic_cleanup 2141}; 2142