1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/11/sys/amd64/vmm/amd/svm.c 365777 2020-09-15 21:28:47Z emaste $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/smp.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/pcpu.h> 38#include <sys/proc.h> 39#include <sys/sysctl.h> 40 41#include <vm/vm.h> 42#include <vm/pmap.h> 43 44#include <machine/cpufunc.h> 45#include <machine/psl.h> 46#include <machine/md_var.h> 47#include <machine/reg.h> 48#include <machine/specialreg.h> 49#include <machine/smp.h> 50#include <machine/vmm.h> 51#include <machine/vmm_dev.h> 52#include <machine/vmm_instruction_emul.h> 53 54#include "vmm_lapic.h" 55#include "vmm_stat.h" 56#include "vmm_ktr.h" 57#include "vmm_ioport.h" 58#include "vatpic.h" 59#include "vlapic.h" 60#include "vlapic_priv.h" 61 62#include "x86.h" 63#include "vmcb.h" 64#include "svm.h" 65#include "svm_softc.h" 66#include "svm_msr.h" 67#include "npt.h" 68 69SYSCTL_DECL(_hw_vmm); 70SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 71 72/* 73 * SVM CPUID function 0x8000_000A, edx bit decoding. 74 */ 75#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 76#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 77#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 78#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 79#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 80#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 81#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 82#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 83#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 84#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 85#define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 86 87#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 88 VMCB_CACHE_IOPM | \ 89 VMCB_CACHE_I | \ 90 VMCB_CACHE_TPR | \ 91 VMCB_CACHE_CR2 | \ 92 VMCB_CACHE_CR | \ 93 VMCB_CACHE_DR | \ 94 VMCB_CACHE_DT | \ 95 VMCB_CACHE_SEG | \ 96 VMCB_CACHE_NP) 97 98static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 99SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 100 0, NULL); 101 102static MALLOC_DEFINE(M_SVM, "svm", "svm"); 103static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 104 105/* Per-CPU context area. */ 106extern struct pcpu __pcpu[]; 107 108static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 109SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 110 "SVM features advertised by CPUID.8000000AH:EDX"); 111 112static int disable_npf_assist; 113SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 114 &disable_npf_assist, 0, NULL); 115 116/* Maximum ASIDs supported by the processor */ 117static uint32_t nasid; 118SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 119 "Number of ASIDs supported by this processor"); 120 121/* Current ASID generation for each host cpu */ 122static struct asid asid[MAXCPU]; 123 124/* 125 * SVM host state saved area of size 4KB for each core. 126 */ 127static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 128 129static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 130static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 131static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 132 133static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 134 135static __inline int 136flush_by_asid(void) 137{ 138 139 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 140} 141 142static __inline int 143decode_assist(void) 144{ 145 146 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 147} 148 149static void 150svm_disable(void *arg __unused) 151{ 152 uint64_t efer; 153 154 efer = rdmsr(MSR_EFER); 155 efer &= ~EFER_SVM; 156 wrmsr(MSR_EFER, efer); 157} 158 159/* 160 * Disable SVM on all CPUs. 161 */ 162static int 163svm_cleanup(void) 164{ 165 166 smp_rendezvous(NULL, svm_disable, NULL, NULL); 167 return (0); 168} 169 170/* 171 * Verify that all the features required by bhyve are available. 172 */ 173static int 174check_svm_features(void) 175{ 176 u_int regs[4]; 177 178 /* CPUID Fn8000_000A is for SVM */ 179 do_cpuid(0x8000000A, regs); 180 svm_feature &= regs[3]; 181 182 /* 183 * The number of ASIDs can be configured to be less than what is 184 * supported by the hardware but not more. 185 */ 186 if (nasid == 0 || nasid > regs[1]) 187 nasid = regs[1]; 188 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 189 190 /* bhyve requires the Nested Paging feature */ 191 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 192 printf("SVM: Nested Paging feature not available.\n"); 193 return (ENXIO); 194 } 195 196 /* bhyve requires the NRIP Save feature */ 197 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 198 printf("SVM: NRIP Save feature not available.\n"); 199 return (ENXIO); 200 } 201 202 return (0); 203} 204 205static void 206svm_enable(void *arg __unused) 207{ 208 uint64_t efer; 209 210 efer = rdmsr(MSR_EFER); 211 efer |= EFER_SVM; 212 wrmsr(MSR_EFER, efer); 213 214 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); 215} 216 217/* 218 * Return 1 if SVM is enabled on this processor and 0 otherwise. 219 */ 220static int 221svm_available(void) 222{ 223 uint64_t msr; 224 225 /* Section 15.4 Enabling SVM from APM2. */ 226 if ((amd_feature2 & AMDID2_SVM) == 0) { 227 printf("SVM: not available.\n"); 228 return (0); 229 } 230 231 msr = rdmsr(MSR_VM_CR); 232 if ((msr & VM_CR_SVMDIS) != 0) { 233 printf("SVM: disabled by BIOS.\n"); 234 return (0); 235 } 236 237 return (1); 238} 239 240static int 241svm_init(int ipinum) 242{ 243 int error, cpu; 244 245 if (!svm_available()) 246 return (ENXIO); 247 248 error = check_svm_features(); 249 if (error) 250 return (error); 251 252 vmcb_clean &= VMCB_CACHE_DEFAULT; 253 254 for (cpu = 0; cpu < MAXCPU; cpu++) { 255 /* 256 * Initialize the host ASIDs to their "highest" valid values. 257 * 258 * The next ASID allocation will rollover both 'gen' and 'num' 259 * and start off the sequence at {1,1}. 260 */ 261 asid[cpu].gen = ~0UL; 262 asid[cpu].num = nasid - 1; 263 } 264 265 svm_msr_init(); 266 svm_npt_init(ipinum); 267 268 /* Enable SVM on all CPUs */ 269 smp_rendezvous(NULL, svm_enable, NULL, NULL); 270 271 return (0); 272} 273 274static void 275svm_restore(void) 276{ 277 278 svm_enable(NULL); 279} 280 281/* Pentium compatible MSRs */ 282#define MSR_PENTIUM_START 0 283#define MSR_PENTIUM_END 0x1FFF 284/* AMD 6th generation and Intel compatible MSRs */ 285#define MSR_AMD6TH_START 0xC0000000UL 286#define MSR_AMD6TH_END 0xC0001FFFUL 287/* AMD 7th and 8th generation compatible MSRs */ 288#define MSR_AMD7TH_START 0xC0010000UL 289#define MSR_AMD7TH_END 0xC0011FFFUL 290 291/* 292 * Get the index and bit position for a MSR in permission bitmap. 293 * Two bits are used for each MSR: lower bit for read and higher bit for write. 294 */ 295static int 296svm_msr_index(uint64_t msr, int *index, int *bit) 297{ 298 uint32_t base, off; 299 300 *index = -1; 301 *bit = (msr % 4) * 2; 302 base = 0; 303 304 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 305 *index = msr / 4; 306 return (0); 307 } 308 309 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 310 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 311 off = (msr - MSR_AMD6TH_START); 312 *index = (off + base) / 4; 313 return (0); 314 } 315 316 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 317 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 318 off = (msr - MSR_AMD7TH_START); 319 *index = (off + base) / 4; 320 return (0); 321 } 322 323 return (EINVAL); 324} 325 326/* 327 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 328 */ 329static void 330svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 331{ 332 int index, bit, error; 333 334 error = svm_msr_index(msr, &index, &bit); 335 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 336 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 337 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 338 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 339 "msr %#lx", __func__, bit, msr)); 340 341 if (read) 342 perm_bitmap[index] &= ~(1UL << bit); 343 344 if (write) 345 perm_bitmap[index] &= ~(2UL << bit); 346} 347 348static void 349svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 350{ 351 352 svm_msr_perm(perm_bitmap, msr, true, true); 353} 354 355static void 356svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 357{ 358 359 svm_msr_perm(perm_bitmap, msr, true, false); 360} 361 362static __inline int 363svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 364{ 365 struct vmcb_ctrl *ctrl; 366 367 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 368 369 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 370 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 371} 372 373static __inline void 374svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 375 int enabled) 376{ 377 struct vmcb_ctrl *ctrl; 378 uint32_t oldval; 379 380 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 381 382 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 383 oldval = ctrl->intercept[idx]; 384 385 if (enabled) 386 ctrl->intercept[idx] |= bitmask; 387 else 388 ctrl->intercept[idx] &= ~bitmask; 389 390 if (ctrl->intercept[idx] != oldval) { 391 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 392 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 393 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 394 } 395} 396 397static __inline void 398svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 399{ 400 401 svm_set_intercept(sc, vcpu, off, bitmask, 0); 402} 403 404static __inline void 405svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 406{ 407 408 svm_set_intercept(sc, vcpu, off, bitmask, 1); 409} 410 411static void 412vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 413 uint64_t msrpm_base_pa, uint64_t np_pml4) 414{ 415 struct vmcb_ctrl *ctrl; 416 struct vmcb_state *state; 417 uint32_t mask; 418 int n; 419 420 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 421 state = svm_get_vmcb_state(sc, vcpu); 422 423 ctrl->iopm_base_pa = iopm_base_pa; 424 ctrl->msrpm_base_pa = msrpm_base_pa; 425 426 /* Enable nested paging */ 427 ctrl->np_enable = 1; 428 ctrl->n_cr3 = np_pml4; 429 430 /* 431 * Intercept accesses to the control registers that are not shadowed 432 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 433 */ 434 for (n = 0; n < 16; n++) { 435 mask = (BIT(n) << 16) | BIT(n); 436 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 437 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 438 else 439 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 440 } 441 442 443 /* 444 * Intercept everything when tracing guest exceptions otherwise 445 * just intercept machine check exception. 446 */ 447 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 448 for (n = 0; n < 32; n++) { 449 /* 450 * Skip unimplemented vectors in the exception bitmap. 451 */ 452 if (n == 2 || n == 9) { 453 continue; 454 } 455 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 456 } 457 } else { 458 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 459 } 460 461 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 467 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 468 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 469 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 470 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 471 VMCB_INTCPT_FERR_FREEZE); 472 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 473 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 474 475 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 476 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 477 478 /* 479 * Intercept SVM instructions since AMD enables them in guests otherwise. 480 * Non-intercepted VMMCALL causes #UD, skip it. 481 */ 482 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 483 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 484 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 485 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 486 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 487 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 488 489 /* 490 * From section "Canonicalization and Consistency Checks" in APMv2 491 * the VMRUN intercept bit must be set to pass the consistency check. 492 */ 493 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 494 495 /* 496 * The ASID will be set to a non-zero value just before VMRUN. 497 */ 498 ctrl->asid = 0; 499 500 /* 501 * Section 15.21.1, Interrupt Masking in EFLAGS 502 * Section 15.21.2, Virtualizing APIC.TPR 503 * 504 * This must be set for %rflag and %cr8 isolation of guest and host. 505 */ 506 ctrl->v_intr_masking = 1; 507 508 /* Enable Last Branch Record aka LBR for debugging */ 509 ctrl->lbr_virt_en = 1; 510 state->dbgctl = BIT(0); 511 512 /* EFER_SVM must always be set when the guest is executing */ 513 state->efer = EFER_SVM; 514 515 /* Set up the PAT to power-on state */ 516 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 517 PAT_VALUE(1, PAT_WRITE_THROUGH) | 518 PAT_VALUE(2, PAT_UNCACHED) | 519 PAT_VALUE(3, PAT_UNCACHEABLE) | 520 PAT_VALUE(4, PAT_WRITE_BACK) | 521 PAT_VALUE(5, PAT_WRITE_THROUGH) | 522 PAT_VALUE(6, PAT_UNCACHED) | 523 PAT_VALUE(7, PAT_UNCACHEABLE); 524 525 /* Set up DR6/7 to power-on state */ 526 state->dr6 = DBREG_DR6_RESERVED1; 527 state->dr7 = DBREG_DR7_RESERVED1; 528} 529 530/* 531 * Initialize a virtual machine. 532 */ 533static void * 534svm_vminit(struct vm *vm, pmap_t pmap) 535{ 536 struct svm_softc *svm_sc; 537 struct svm_vcpu *vcpu; 538 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 539 int i; 540 uint16_t maxcpus; 541 542 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 543 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 544 panic("malloc of svm_softc not aligned on page boundary"); 545 546 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 547 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 548 if (svm_sc->msr_bitmap == NULL) 549 panic("contigmalloc of SVM MSR bitmap failed"); 550 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 551 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 552 if (svm_sc->iopm_bitmap == NULL) 553 panic("contigmalloc of SVM IO bitmap failed"); 554 555 svm_sc->vm = vm; 556 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 557 558 /* 559 * Intercept read and write accesses to all MSRs. 560 */ 561 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 562 563 /* 564 * Access to the following MSRs is redirected to the VMCB when the 565 * guest is executing. Therefore it is safe to allow the guest to 566 * read/write these MSRs directly without hypervisor involvement. 567 */ 568 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 569 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 570 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 571 572 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 573 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 574 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 575 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 576 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 577 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 578 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 579 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 580 581 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 582 583 /* 584 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 585 */ 586 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 587 588 /* Intercept access to all I/O ports. */ 589 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 590 591 iopm_pa = vtophys(svm_sc->iopm_bitmap); 592 msrpm_pa = vtophys(svm_sc->msr_bitmap); 593 pml4_pa = svm_sc->nptp; 594 maxcpus = vm_get_maxcpus(svm_sc->vm); 595 for (i = 0; i < maxcpus; i++) { 596 vcpu = svm_get_vcpu(svm_sc, i); 597 vcpu->nextrip = ~0; 598 vcpu->lastcpu = NOCPU; 599 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 600 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 601 svm_msr_guest_init(svm_sc, i); 602 } 603 return (svm_sc); 604} 605 606/* 607 * Collateral for a generic SVM VM-exit. 608 */ 609static void 610vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 611{ 612 613 vme->exitcode = VM_EXITCODE_SVM; 614 vme->u.svm.exitcode = code; 615 vme->u.svm.exitinfo1 = info1; 616 vme->u.svm.exitinfo2 = info2; 617} 618 619static int 620svm_cpl(struct vmcb_state *state) 621{ 622 623 /* 624 * From APMv2: 625 * "Retrieve the CPL from the CPL field in the VMCB, not 626 * from any segment DPL" 627 */ 628 return (state->cpl); 629} 630 631static enum vm_cpu_mode 632svm_vcpu_mode(struct vmcb *vmcb) 633{ 634 struct vmcb_segment seg; 635 struct vmcb_state *state; 636 int error; 637 638 state = &vmcb->state; 639 640 if (state->efer & EFER_LMA) { 641 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 642 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 643 error)); 644 645 /* 646 * Section 4.8.1 for APM2, check if Code Segment has 647 * Long attribute set in descriptor. 648 */ 649 if (seg.attrib & VMCB_CS_ATTRIB_L) 650 return (CPU_MODE_64BIT); 651 else 652 return (CPU_MODE_COMPATIBILITY); 653 } else if (state->cr0 & CR0_PE) { 654 return (CPU_MODE_PROTECTED); 655 } else { 656 return (CPU_MODE_REAL); 657 } 658} 659 660static enum vm_paging_mode 661svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 662{ 663 664 if ((cr0 & CR0_PG) == 0) 665 return (PAGING_MODE_FLAT); 666 if ((cr4 & CR4_PAE) == 0) 667 return (PAGING_MODE_32); 668 if (efer & EFER_LME) 669 return (PAGING_MODE_64); 670 else 671 return (PAGING_MODE_PAE); 672} 673 674/* 675 * ins/outs utility routines 676 */ 677static uint64_t 678svm_inout_str_index(struct svm_regctx *regs, int in) 679{ 680 uint64_t val; 681 682 val = in ? regs->sctx_rdi : regs->sctx_rsi; 683 684 return (val); 685} 686 687static uint64_t 688svm_inout_str_count(struct svm_regctx *regs, int rep) 689{ 690 uint64_t val; 691 692 val = rep ? regs->sctx_rcx : 1; 693 694 return (val); 695} 696 697static void 698svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 699 int in, struct vm_inout_str *vis) 700{ 701 int error, s; 702 703 if (in) { 704 vis->seg_name = VM_REG_GUEST_ES; 705 } else { 706 /* The segment field has standard encoding */ 707 s = (info1 >> 10) & 0x7; 708 vis->seg_name = vm_segment_name(s); 709 } 710 711 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 712 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 713} 714 715static int 716svm_inout_str_addrsize(uint64_t info1) 717{ 718 uint32_t size; 719 720 size = (info1 >> 7) & 0x7; 721 switch (size) { 722 case 1: 723 return (2); /* 16 bit */ 724 case 2: 725 return (4); /* 32 bit */ 726 case 4: 727 return (8); /* 64 bit */ 728 default: 729 panic("%s: invalid size encoding %d", __func__, size); 730 } 731} 732 733static void 734svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 735{ 736 struct vmcb_state *state; 737 738 state = &vmcb->state; 739 paging->cr3 = state->cr3; 740 paging->cpl = svm_cpl(state); 741 paging->cpu_mode = svm_vcpu_mode(vmcb); 742 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 743 state->efer); 744} 745 746#define UNHANDLED 0 747 748/* 749 * Handle guest I/O intercept. 750 */ 751static int 752svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 753{ 754 struct vmcb_ctrl *ctrl; 755 struct vmcb_state *state; 756 struct svm_regctx *regs; 757 struct vm_inout_str *vis; 758 uint64_t info1; 759 int inout_string; 760 761 state = svm_get_vmcb_state(svm_sc, vcpu); 762 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 763 regs = svm_get_guest_regctx(svm_sc, vcpu); 764 765 info1 = ctrl->exitinfo1; 766 inout_string = info1 & BIT(2) ? 1 : 0; 767 768 /* 769 * The effective segment number in EXITINFO1[12:10] is populated 770 * only if the processor has the DecodeAssist capability. 771 * 772 * XXX this is not specified explicitly in APMv2 but can be verified 773 * empirically. 774 */ 775 if (inout_string && !decode_assist()) 776 return (UNHANDLED); 777 778 vmexit->exitcode = VM_EXITCODE_INOUT; 779 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 780 vmexit->u.inout.string = inout_string; 781 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 782 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 783 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 784 vmexit->u.inout.eax = (uint32_t)(state->rax); 785 786 if (inout_string) { 787 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 788 vis = &vmexit->u.inout_str; 789 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 790 vis->rflags = state->rflags; 791 vis->cr0 = state->cr0; 792 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 793 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 794 vis->addrsize = svm_inout_str_addrsize(info1); 795 svm_inout_str_seginfo(svm_sc, vcpu, info1, 796 vmexit->u.inout.in, vis); 797 } 798 799 return (UNHANDLED); 800} 801 802static int 803npf_fault_type(uint64_t exitinfo1) 804{ 805 806 if (exitinfo1 & VMCB_NPF_INFO1_W) 807 return (VM_PROT_WRITE); 808 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 809 return (VM_PROT_EXECUTE); 810 else 811 return (VM_PROT_READ); 812} 813 814static bool 815svm_npf_emul_fault(uint64_t exitinfo1) 816{ 817 818 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 819 return (false); 820 } 821 822 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 823 return (false); 824 } 825 826 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 827 return (false); 828 } 829 830 return (true); 831} 832 833static void 834svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 835{ 836 struct vm_guest_paging *paging; 837 struct vmcb_segment seg; 838 struct vmcb_ctrl *ctrl; 839 char *inst_bytes; 840 int error, inst_len; 841 842 ctrl = &vmcb->ctrl; 843 paging = &vmexit->u.inst_emul.paging; 844 845 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 846 vmexit->u.inst_emul.gpa = gpa; 847 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 848 svm_paging_info(vmcb, paging); 849 850 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 851 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 852 853 switch(paging->cpu_mode) { 854 case CPU_MODE_REAL: 855 vmexit->u.inst_emul.cs_base = seg.base; 856 vmexit->u.inst_emul.cs_d = 0; 857 break; 858 case CPU_MODE_PROTECTED: 859 case CPU_MODE_COMPATIBILITY: 860 vmexit->u.inst_emul.cs_base = seg.base; 861 862 /* 863 * Section 4.8.1 of APM2, Default Operand Size or D bit. 864 */ 865 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 866 1 : 0; 867 break; 868 default: 869 vmexit->u.inst_emul.cs_base = 0; 870 vmexit->u.inst_emul.cs_d = 0; 871 break; 872 } 873 874 /* 875 * Copy the instruction bytes into 'vie' if available. 876 */ 877 if (decode_assist() && !disable_npf_assist) { 878 inst_len = ctrl->inst_len; 879 inst_bytes = ctrl->inst_bytes; 880 } else { 881 inst_len = 0; 882 inst_bytes = NULL; 883 } 884 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 885} 886 887#ifdef KTR 888static const char * 889intrtype_to_str(int intr_type) 890{ 891 switch (intr_type) { 892 case VMCB_EVENTINJ_TYPE_INTR: 893 return ("hwintr"); 894 case VMCB_EVENTINJ_TYPE_NMI: 895 return ("nmi"); 896 case VMCB_EVENTINJ_TYPE_INTn: 897 return ("swintr"); 898 case VMCB_EVENTINJ_TYPE_EXCEPTION: 899 return ("exception"); 900 default: 901 panic("%s: unknown intr_type %d", __func__, intr_type); 902 } 903} 904#endif 905 906/* 907 * Inject an event to vcpu as described in section 15.20, "Event injection". 908 */ 909static void 910svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 911 uint32_t error, bool ec_valid) 912{ 913 struct vmcb_ctrl *ctrl; 914 915 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 916 917 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 918 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 919 920 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 921 __func__, vector)); 922 923 switch (intr_type) { 924 case VMCB_EVENTINJ_TYPE_INTR: 925 case VMCB_EVENTINJ_TYPE_NMI: 926 case VMCB_EVENTINJ_TYPE_INTn: 927 break; 928 case VMCB_EVENTINJ_TYPE_EXCEPTION: 929 if (vector >= 0 && vector <= 31 && vector != 2) 930 break; 931 /* FALLTHROUGH */ 932 default: 933 panic("%s: invalid intr_type/vector: %d/%d", __func__, 934 intr_type, vector); 935 } 936 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 937 if (ec_valid) { 938 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 939 ctrl->eventinj |= (uint64_t)error << 32; 940 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 941 intrtype_to_str(intr_type), vector, error); 942 } else { 943 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 944 intrtype_to_str(intr_type), vector); 945 } 946} 947 948static void 949svm_update_virqinfo(struct svm_softc *sc, int vcpu) 950{ 951 struct vm *vm; 952 struct vlapic *vlapic; 953 struct vmcb_ctrl *ctrl; 954 955 vm = sc->vm; 956 vlapic = vm_lapic(vm, vcpu); 957 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 958 959 /* Update %cr8 in the emulated vlapic */ 960 vlapic_set_cr8(vlapic, ctrl->v_tpr); 961 962 /* Virtual interrupt injection is not used. */ 963 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 964 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 965} 966 967static void 968svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 969{ 970 struct vmcb_ctrl *ctrl; 971 uint64_t intinfo; 972 973 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 974 intinfo = ctrl->exitintinfo; 975 if (!VMCB_EXITINTINFO_VALID(intinfo)) 976 return; 977 978 /* 979 * From APMv2, Section "Intercepts during IDT interrupt delivery" 980 * 981 * If a #VMEXIT happened during event delivery then record the event 982 * that was being delivered. 983 */ 984 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 985 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 986 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 987 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 988} 989 990#ifdef INVARIANTS 991static __inline int 992vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 993{ 994 995 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 996 VMCB_INTCPT_VINTR)); 997} 998#endif 999 1000static __inline void 1001enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1002{ 1003 struct vmcb_ctrl *ctrl; 1004 1005 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1006 1007 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1008 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1009 KASSERT(vintr_intercept_enabled(sc, vcpu), 1010 ("%s: vintr intercept should be enabled", __func__)); 1011 return; 1012 } 1013 1014 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1015 ctrl->v_irq = 1; 1016 ctrl->v_ign_tpr = 1; 1017 ctrl->v_intr_vector = 0; 1018 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1019 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1020} 1021 1022static __inline void 1023disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1024{ 1025 struct vmcb_ctrl *ctrl; 1026 1027 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1028 1029 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1030 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1031 ("%s: vintr intercept should be disabled", __func__)); 1032 return; 1033 } 1034 1035 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1036 ctrl->v_irq = 0; 1037 ctrl->v_intr_vector = 0; 1038 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1039 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1040} 1041 1042static int 1043svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1044{ 1045 struct vmcb_ctrl *ctrl; 1046 int oldval, newval; 1047 1048 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1049 oldval = ctrl->intr_shadow; 1050 newval = val ? 1 : 0; 1051 if (newval != oldval) { 1052 ctrl->intr_shadow = newval; 1053 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1054 } 1055 return (0); 1056} 1057 1058static int 1059svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1060{ 1061 struct vmcb_ctrl *ctrl; 1062 1063 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1064 *val = ctrl->intr_shadow; 1065 return (0); 1066} 1067 1068/* 1069 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1070 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1071 * to track when the vcpu is done handling the NMI. 1072 */ 1073static int 1074nmi_blocked(struct svm_softc *sc, int vcpu) 1075{ 1076 int blocked; 1077 1078 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1079 VMCB_INTCPT_IRET); 1080 return (blocked); 1081} 1082 1083static void 1084enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1085{ 1086 1087 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1088 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1089 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1090} 1091 1092static void 1093clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1094{ 1095 int error; 1096 1097 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1098 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1099 /* 1100 * When the IRET intercept is cleared the vcpu will attempt to execute 1101 * the "iret" when it runs next. However, it is possible to inject 1102 * another NMI into the vcpu before the "iret" has actually executed. 1103 * 1104 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1105 * it will trap back into the hypervisor. If an NMI is pending for 1106 * the vcpu it will be injected into the guest. 1107 * 1108 * XXX this needs to be fixed 1109 */ 1110 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1111 1112 /* 1113 * Set 'intr_shadow' to prevent an NMI from being injected on the 1114 * immediate VMRUN. 1115 */ 1116 error = svm_modify_intr_shadow(sc, vcpu, 1); 1117 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1118} 1119 1120#define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1121 1122static int 1123svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) 1124{ 1125 struct vm_exit *vme; 1126 struct vmcb_state *state; 1127 uint64_t changed, lma, oldval; 1128 int error; 1129 1130 state = svm_get_vmcb_state(sc, vcpu); 1131 1132 oldval = state->efer; 1133 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1134 1135 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1136 changed = oldval ^ newval; 1137 1138 if (newval & EFER_MBZ_BITS) 1139 goto gpf; 1140 1141 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1142 if (changed & EFER_LME) { 1143 if (state->cr0 & CR0_PG) 1144 goto gpf; 1145 } 1146 1147 /* EFER.LMA = EFER.LME & CR0.PG */ 1148 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1149 lma = EFER_LMA; 1150 else 1151 lma = 0; 1152 1153 if ((newval & EFER_LMA) != lma) 1154 goto gpf; 1155 1156 if (newval & EFER_NXE) { 1157 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) 1158 goto gpf; 1159 } 1160 1161 /* 1162 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1163 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1164 */ 1165 if (newval & EFER_LMSLE) { 1166 vme = vm_exitinfo(sc->vm, vcpu); 1167 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1168 *retu = true; 1169 return (0); 1170 } 1171 1172 if (newval & EFER_FFXSR) { 1173 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) 1174 goto gpf; 1175 } 1176 1177 if (newval & EFER_TCE) { 1178 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) 1179 goto gpf; 1180 } 1181 1182 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1183 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1184 return (0); 1185gpf: 1186 vm_inject_gp(sc->vm, vcpu); 1187 return (0); 1188} 1189 1190static int 1191emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1192 bool *retu) 1193{ 1194 int error; 1195 1196 if (lapic_msr(num)) 1197 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1198 else if (num == MSR_EFER) 1199 error = svm_write_efer(sc, vcpu, val, retu); 1200 else 1201 error = svm_wrmsr(sc, vcpu, num, val, retu); 1202 1203 return (error); 1204} 1205 1206static int 1207emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1208{ 1209 struct vmcb_state *state; 1210 struct svm_regctx *ctx; 1211 uint64_t result; 1212 int error; 1213 1214 if (lapic_msr(num)) 1215 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1216 else 1217 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1218 1219 if (error == 0) { 1220 state = svm_get_vmcb_state(sc, vcpu); 1221 ctx = svm_get_guest_regctx(sc, vcpu); 1222 state->rax = result & 0xffffffff; 1223 ctx->sctx_rdx = result >> 32; 1224 } 1225 1226 return (error); 1227} 1228 1229#ifdef KTR 1230static const char * 1231exit_reason_to_str(uint64_t reason) 1232{ 1233 int i; 1234 static char reasonbuf[32]; 1235 static const struct { 1236 int reason; 1237 const char *str; 1238 } reasons[] = { 1239 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1240 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1241 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1242 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1243 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1244 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1245 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1246 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1247 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1248 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1249 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1250 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1251 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1252 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1253 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1254 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1255 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1256 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1257 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1258 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1259 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1260 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1261 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1262 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1263 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1264 }; 1265 1266 for (i = 0; i < nitems(reasons); i++) { 1267 if (reasons[i].reason == reason) 1268 return (reasons[i].str); 1269 } 1270 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1271 return (reasonbuf); 1272} 1273#endif /* KTR */ 1274 1275/* 1276 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1277 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1278 * and exceptions caused by INT3, INTO and BOUND instructions. 1279 * 1280 * Return 1 if the nRIP is valid and 0 otherwise. 1281 */ 1282static int 1283nrip_valid(uint64_t exitcode) 1284{ 1285 switch (exitcode) { 1286 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1287 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1288 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1289 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1290 case 0x43: /* INT3 */ 1291 case 0x44: /* INTO */ 1292 case 0x45: /* BOUND */ 1293 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1294 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1295 return (1); 1296 default: 1297 return (0); 1298 } 1299} 1300 1301static int 1302svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1303{ 1304 struct vmcb *vmcb; 1305 struct vmcb_state *state; 1306 struct vmcb_ctrl *ctrl; 1307 struct svm_regctx *ctx; 1308 uint64_t code, info1, info2, val; 1309 uint32_t eax, ecx, edx; 1310 int error, errcode_valid, handled, idtvec, reflect; 1311 bool retu; 1312 1313 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1314 vmcb = svm_get_vmcb(svm_sc, vcpu); 1315 state = &vmcb->state; 1316 ctrl = &vmcb->ctrl; 1317 1318 handled = 0; 1319 code = ctrl->exitcode; 1320 info1 = ctrl->exitinfo1; 1321 info2 = ctrl->exitinfo2; 1322 1323 vmexit->exitcode = VM_EXITCODE_BOGUS; 1324 vmexit->rip = state->rip; 1325 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1326 1327 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1328 1329 /* 1330 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1331 * in an inconsistent state and can trigger assertions that would 1332 * never happen otherwise. 1333 */ 1334 if (code == VMCB_EXIT_INVALID) { 1335 vm_exit_svm(vmexit, code, info1, info2); 1336 return (0); 1337 } 1338 1339 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1340 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1341 1342 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1343 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1344 vmexit->inst_length, code, info1, info2)); 1345 1346 svm_update_virqinfo(svm_sc, vcpu); 1347 svm_save_intinfo(svm_sc, vcpu); 1348 1349 switch (code) { 1350 case VMCB_EXIT_IRET: 1351 /* 1352 * Restart execution at "iret" but with the intercept cleared. 1353 */ 1354 vmexit->inst_length = 0; 1355 clear_nmi_blocking(svm_sc, vcpu); 1356 handled = 1; 1357 break; 1358 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1359 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1360 handled = 1; 1361 break; 1362 case VMCB_EXIT_INTR: /* external interrupt */ 1363 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1364 handled = 1; 1365 break; 1366 case VMCB_EXIT_NMI: /* external NMI */ 1367 handled = 1; 1368 break; 1369 case 0x40 ... 0x5F: 1370 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1371 reflect = 1; 1372 idtvec = code - 0x40; 1373 switch (idtvec) { 1374 case IDT_MC: 1375 /* 1376 * Call the machine check handler by hand. Also don't 1377 * reflect the machine check back into the guest. 1378 */ 1379 reflect = 0; 1380 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1381 __asm __volatile("int $18"); 1382 break; 1383 case IDT_PF: 1384 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1385 info2); 1386 KASSERT(error == 0, ("%s: error %d updating cr2", 1387 __func__, error)); 1388 /* fallthru */ 1389 case IDT_NP: 1390 case IDT_SS: 1391 case IDT_GP: 1392 case IDT_AC: 1393 case IDT_TS: 1394 errcode_valid = 1; 1395 break; 1396 1397 case IDT_DF: 1398 errcode_valid = 1; 1399 info1 = 0; 1400 break; 1401 1402 case IDT_BP: 1403 case IDT_OF: 1404 case IDT_BR: 1405 /* 1406 * The 'nrip' field is populated for INT3, INTO and 1407 * BOUND exceptions and this also implies that 1408 * 'inst_length' is non-zero. 1409 * 1410 * Reset 'inst_length' to zero so the guest %rip at 1411 * event injection is identical to what it was when 1412 * the exception originally happened. 1413 */ 1414 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1415 "to zero before injecting exception %d", 1416 vmexit->inst_length, idtvec); 1417 vmexit->inst_length = 0; 1418 /* fallthru */ 1419 default: 1420 errcode_valid = 0; 1421 info1 = 0; 1422 break; 1423 } 1424 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1425 "when reflecting exception %d into guest", 1426 vmexit->inst_length, idtvec)); 1427 1428 if (reflect) { 1429 /* Reflect the exception back into the guest */ 1430 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1431 "%d/%#x into the guest", idtvec, (int)info1); 1432 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1433 errcode_valid, info1, 0); 1434 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1435 __func__, error)); 1436 } 1437 handled = 1; 1438 break; 1439 case VMCB_EXIT_MSR: /* MSR access. */ 1440 eax = state->rax; 1441 ecx = ctx->sctx_rcx; 1442 edx = ctx->sctx_rdx; 1443 retu = false; 1444 1445 if (info1) { 1446 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1447 val = (uint64_t)edx << 32 | eax; 1448 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1449 ecx, val); 1450 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1451 vmexit->exitcode = VM_EXITCODE_WRMSR; 1452 vmexit->u.msr.code = ecx; 1453 vmexit->u.msr.wval = val; 1454 } else if (!retu) { 1455 handled = 1; 1456 } else { 1457 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1458 ("emulate_wrmsr retu with bogus exitcode")); 1459 } 1460 } else { 1461 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1462 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1463 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1464 vmexit->exitcode = VM_EXITCODE_RDMSR; 1465 vmexit->u.msr.code = ecx; 1466 } else if (!retu) { 1467 handled = 1; 1468 } else { 1469 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1470 ("emulate_rdmsr retu with bogus exitcode")); 1471 } 1472 } 1473 break; 1474 case VMCB_EXIT_IO: 1475 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1476 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1477 break; 1478 case VMCB_EXIT_CPUID: 1479 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1480 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1481 (uint32_t *)&state->rax, 1482 (uint32_t *)&ctx->sctx_rbx, 1483 (uint32_t *)&ctx->sctx_rcx, 1484 (uint32_t *)&ctx->sctx_rdx); 1485 break; 1486 case VMCB_EXIT_HLT: 1487 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1488 vmexit->exitcode = VM_EXITCODE_HLT; 1489 vmexit->u.hlt.rflags = state->rflags; 1490 break; 1491 case VMCB_EXIT_PAUSE: 1492 vmexit->exitcode = VM_EXITCODE_PAUSE; 1493 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1494 break; 1495 case VMCB_EXIT_NPF: 1496 /* EXITINFO2 contains the faulting guest physical address */ 1497 if (info1 & VMCB_NPF_INFO1_RSV) { 1498 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1499 "reserved bits set: info1(%#lx) info2(%#lx)", 1500 info1, info2); 1501 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1502 vmexit->exitcode = VM_EXITCODE_PAGING; 1503 vmexit->u.paging.gpa = info2; 1504 vmexit->u.paging.fault_type = npf_fault_type(info1); 1505 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1506 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1507 "on gpa %#lx/%#lx at rip %#lx", 1508 info2, info1, state->rip); 1509 } else if (svm_npf_emul_fault(info1)) { 1510 svm_handle_inst_emul(vmcb, info2, vmexit); 1511 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1512 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1513 "for gpa %#lx/%#lx at rip %#lx", 1514 info2, info1, state->rip); 1515 } 1516 break; 1517 case VMCB_EXIT_MONITOR: 1518 vmexit->exitcode = VM_EXITCODE_MONITOR; 1519 break; 1520 case VMCB_EXIT_MWAIT: 1521 vmexit->exitcode = VM_EXITCODE_MWAIT; 1522 break; 1523 case VMCB_EXIT_SHUTDOWN: 1524 case VMCB_EXIT_VMRUN: 1525 case VMCB_EXIT_VMMCALL: 1526 case VMCB_EXIT_VMLOAD: 1527 case VMCB_EXIT_VMSAVE: 1528 case VMCB_EXIT_STGI: 1529 case VMCB_EXIT_CLGI: 1530 case VMCB_EXIT_SKINIT: 1531 case VMCB_EXIT_ICEBP: 1532 case VMCB_EXIT_INVD: 1533 case VMCB_EXIT_INVLPGA: 1534 vm_inject_ud(svm_sc->vm, vcpu); 1535 handled = 1; 1536 break; 1537 default: 1538 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1539 break; 1540 } 1541 1542 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1543 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1544 vmexit->rip, vmexit->inst_length); 1545 1546 if (handled) { 1547 vmexit->rip += vmexit->inst_length; 1548 vmexit->inst_length = 0; 1549 state->rip = vmexit->rip; 1550 } else { 1551 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1552 /* 1553 * If this VM exit was not claimed by anybody then 1554 * treat it as a generic SVM exit. 1555 */ 1556 vm_exit_svm(vmexit, code, info1, info2); 1557 } else { 1558 /* 1559 * The exitcode and collateral have been populated. 1560 * The VM exit will be processed further in userland. 1561 */ 1562 } 1563 } 1564 return (handled); 1565} 1566 1567static void 1568svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1569{ 1570 uint64_t intinfo; 1571 1572 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1573 return; 1574 1575 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1576 "valid: %#lx", __func__, intinfo)); 1577 1578 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1579 VMCB_EXITINTINFO_VECTOR(intinfo), 1580 VMCB_EXITINTINFO_EC(intinfo), 1581 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1582 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1583 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1584} 1585 1586/* 1587 * Inject event to virtual cpu. 1588 */ 1589static void 1590svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1591{ 1592 struct vmcb_ctrl *ctrl; 1593 struct vmcb_state *state; 1594 struct svm_vcpu *vcpustate; 1595 uint8_t v_tpr; 1596 int vector, need_intr_window; 1597 int extint_pending; 1598 1599 state = svm_get_vmcb_state(sc, vcpu); 1600 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1601 vcpustate = svm_get_vcpu(sc, vcpu); 1602 1603 need_intr_window = 0; 1604 1605 if (vcpustate->nextrip != state->rip) { 1606 ctrl->intr_shadow = 0; 1607 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " 1608 "cleared due to rip change: %#lx/%#lx", 1609 vcpustate->nextrip, state->rip); 1610 } 1611 1612 /* 1613 * Inject pending events or exceptions for this vcpu. 1614 * 1615 * An event might be pending because the previous #VMEXIT happened 1616 * during event delivery (i.e. ctrl->exitintinfo). 1617 * 1618 * An event might also be pending because an exception was injected 1619 * by the hypervisor (e.g. #PF during instruction emulation). 1620 */ 1621 svm_inj_intinfo(sc, vcpu); 1622 1623 /* NMI event has priority over interrupts. */ 1624 if (vm_nmi_pending(sc->vm, vcpu)) { 1625 if (nmi_blocked(sc, vcpu)) { 1626 /* 1627 * Can't inject another NMI if the guest has not 1628 * yet executed an "iret" after the last NMI. 1629 */ 1630 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1631 "to NMI-blocking"); 1632 } else if (ctrl->intr_shadow) { 1633 /* 1634 * Can't inject an NMI if the vcpu is in an intr_shadow. 1635 */ 1636 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1637 "interrupt shadow"); 1638 need_intr_window = 1; 1639 goto done; 1640 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1641 /* 1642 * If there is already an exception/interrupt pending 1643 * then defer the NMI until after that. 1644 */ 1645 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1646 "eventinj %#lx", ctrl->eventinj); 1647 1648 /* 1649 * Use self-IPI to trigger a VM-exit as soon as 1650 * possible after the event injection is completed. 1651 * 1652 * This works only if the external interrupt exiting 1653 * is at a lower priority than the event injection. 1654 * 1655 * Although not explicitly specified in APMv2 the 1656 * relative priorities were verified empirically. 1657 */ 1658 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1659 } else { 1660 vm_nmi_clear(sc->vm, vcpu); 1661 1662 /* Inject NMI, vector number is not used */ 1663 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1664 IDT_NMI, 0, false); 1665 1666 /* virtual NMI blocking is now in effect */ 1667 enable_nmi_blocking(sc, vcpu); 1668 1669 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1670 } 1671 } 1672 1673 extint_pending = vm_extint_pending(sc->vm, vcpu); 1674 if (!extint_pending) { 1675 if (!vlapic_pending_intr(vlapic, &vector)) 1676 goto done; 1677 KASSERT(vector >= 16 && vector <= 255, 1678 ("invalid vector %d from local APIC", vector)); 1679 } else { 1680 /* Ask the legacy pic for a vector to inject */ 1681 vatpic_pending_intr(sc->vm, &vector); 1682 KASSERT(vector >= 0 && vector <= 255, 1683 ("invalid vector %d from INTR", vector)); 1684 } 1685 1686 /* 1687 * If the guest has disabled interrupts or is in an interrupt shadow 1688 * then we cannot inject the pending interrupt. 1689 */ 1690 if ((state->rflags & PSL_I) == 0) { 1691 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1692 "rflags %#lx", vector, state->rflags); 1693 need_intr_window = 1; 1694 goto done; 1695 } 1696 1697 if (ctrl->intr_shadow) { 1698 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1699 "interrupt shadow", vector); 1700 need_intr_window = 1; 1701 goto done; 1702 } 1703 1704 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1705 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1706 "eventinj %#lx", vector, ctrl->eventinj); 1707 need_intr_window = 1; 1708 goto done; 1709 } 1710 1711 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1712 1713 if (!extint_pending) { 1714 vlapic_intr_accepted(vlapic, vector); 1715 } else { 1716 vm_extint_clear(sc->vm, vcpu); 1717 vatpic_intr_accepted(sc->vm, vector); 1718 } 1719 1720 /* 1721 * Force a VM-exit as soon as the vcpu is ready to accept another 1722 * interrupt. This is done because the PIC might have another vector 1723 * that it wants to inject. Also, if the APIC has a pending interrupt 1724 * that was preempted by the ExtInt then it allows us to inject the 1725 * APIC vector as soon as possible. 1726 */ 1727 need_intr_window = 1; 1728done: 1729 /* 1730 * The guest can modify the TPR by writing to %CR8. In guest mode 1731 * the processor reflects this write to V_TPR without hypervisor 1732 * intervention. 1733 * 1734 * The guest can also modify the TPR by writing to it via the memory 1735 * mapped APIC page. In this case, the write will be emulated by the 1736 * hypervisor. For this reason V_TPR must be updated before every 1737 * VMRUN. 1738 */ 1739 v_tpr = vlapic_get_cr8(vlapic); 1740 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1741 if (ctrl->v_tpr != v_tpr) { 1742 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1743 ctrl->v_tpr, v_tpr); 1744 ctrl->v_tpr = v_tpr; 1745 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1746 } 1747 1748 if (need_intr_window) { 1749 /* 1750 * We use V_IRQ in conjunction with the VINTR intercept to 1751 * trap into the hypervisor as soon as a virtual interrupt 1752 * can be delivered. 1753 * 1754 * Since injected events are not subject to intercept checks 1755 * we need to ensure that the V_IRQ is not actually going to 1756 * be delivered on VM entry. The KASSERT below enforces this. 1757 */ 1758 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1759 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1760 ("Bogus intr_window_exiting: eventinj (%#lx), " 1761 "intr_shadow (%u), rflags (%#lx)", 1762 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1763 enable_intr_window_exiting(sc, vcpu); 1764 } else { 1765 disable_intr_window_exiting(sc, vcpu); 1766 } 1767} 1768 1769static __inline void 1770restore_host_tss(void) 1771{ 1772 struct system_segment_descriptor *tss_sd; 1773 1774 /* 1775 * The TSS descriptor was in use prior to launching the guest so it 1776 * has been marked busy. 1777 * 1778 * 'ltr' requires the descriptor to be marked available so change the 1779 * type to "64-bit available TSS". 1780 */ 1781 tss_sd = PCPU_GET(tss); 1782 tss_sd->sd_type = SDT_SYSTSS; 1783 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1784} 1785 1786static void 1787check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1788{ 1789 struct svm_vcpu *vcpustate; 1790 struct vmcb_ctrl *ctrl; 1791 long eptgen; 1792 bool alloc_asid; 1793 1794 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1795 "active on cpu %u", __func__, thiscpu)); 1796 1797 vcpustate = svm_get_vcpu(sc, vcpuid); 1798 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1799 1800 /* 1801 * The TLB entries associated with the vcpu's ASID are not valid 1802 * if either of the following conditions is true: 1803 * 1804 * 1. The vcpu's ASID generation is different than the host cpu's 1805 * ASID generation. This happens when the vcpu migrates to a new 1806 * host cpu. It can also happen when the number of vcpus executing 1807 * on a host cpu is greater than the number of ASIDs available. 1808 * 1809 * 2. The pmap generation number is different than the value cached in 1810 * the 'vcpustate'. This happens when the host invalidates pages 1811 * belonging to the guest. 1812 * 1813 * asidgen eptgen Action 1814 * mismatch mismatch 1815 * 0 0 (a) 1816 * 0 1 (b1) or (b2) 1817 * 1 0 (c) 1818 * 1 1 (d) 1819 * 1820 * (a) There is no mismatch in eptgen or ASID generation and therefore 1821 * no further action is needed. 1822 * 1823 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1824 * retained and the TLB entries associated with this ASID 1825 * are flushed by VMRUN. 1826 * 1827 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1828 * allocated. 1829 * 1830 * (c) A new ASID is allocated. 1831 * 1832 * (d) A new ASID is allocated. 1833 */ 1834 1835 alloc_asid = false; 1836 eptgen = pmap->pm_eptgen; 1837 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1838 1839 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1840 alloc_asid = true; /* (c) and (d) */ 1841 } else if (vcpustate->eptgen != eptgen) { 1842 if (flush_by_asid()) 1843 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1844 else 1845 alloc_asid = true; /* (b2) */ 1846 } else { 1847 /* 1848 * This is the common case (a). 1849 */ 1850 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1851 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1852 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1853 } 1854 1855 if (alloc_asid) { 1856 if (++asid[thiscpu].num >= nasid) { 1857 asid[thiscpu].num = 1; 1858 if (++asid[thiscpu].gen == 0) 1859 asid[thiscpu].gen = 1; 1860 /* 1861 * If this cpu does not support "flush-by-asid" 1862 * then flush the entire TLB on a generation 1863 * bump. Subsequent ASID allocation in this 1864 * generation can be done without a TLB flush. 1865 */ 1866 if (!flush_by_asid()) 1867 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1868 } 1869 vcpustate->asid.gen = asid[thiscpu].gen; 1870 vcpustate->asid.num = asid[thiscpu].num; 1871 1872 ctrl->asid = vcpustate->asid.num; 1873 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1874 /* 1875 * If this cpu supports "flush-by-asid" then the TLB 1876 * was not flushed after the generation bump. The TLB 1877 * is flushed selectively after every new ASID allocation. 1878 */ 1879 if (flush_by_asid()) 1880 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1881 } 1882 vcpustate->eptgen = eptgen; 1883 1884 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1885 KASSERT(ctrl->asid == vcpustate->asid.num, 1886 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1887} 1888 1889static __inline void 1890disable_gintr(void) 1891{ 1892 1893 __asm __volatile("clgi"); 1894} 1895 1896static __inline void 1897enable_gintr(void) 1898{ 1899 1900 __asm __volatile("stgi"); 1901} 1902 1903static __inline void 1904svm_dr_enter_guest(struct svm_regctx *gctx) 1905{ 1906 1907 /* Save host control debug registers. */ 1908 gctx->host_dr7 = rdr7(); 1909 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1910 1911 /* 1912 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1913 * exceptions in the host based on the guest DRx values. The 1914 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1915 * VMCB. 1916 */ 1917 load_dr7(0); 1918 wrmsr(MSR_DEBUGCTLMSR, 0); 1919 1920 /* Save host debug registers. */ 1921 gctx->host_dr0 = rdr0(); 1922 gctx->host_dr1 = rdr1(); 1923 gctx->host_dr2 = rdr2(); 1924 gctx->host_dr3 = rdr3(); 1925 gctx->host_dr6 = rdr6(); 1926 1927 /* Restore guest debug registers. */ 1928 load_dr0(gctx->sctx_dr0); 1929 load_dr1(gctx->sctx_dr1); 1930 load_dr2(gctx->sctx_dr2); 1931 load_dr3(gctx->sctx_dr3); 1932} 1933 1934static __inline void 1935svm_dr_leave_guest(struct svm_regctx *gctx) 1936{ 1937 1938 /* Save guest debug registers. */ 1939 gctx->sctx_dr0 = rdr0(); 1940 gctx->sctx_dr1 = rdr1(); 1941 gctx->sctx_dr2 = rdr2(); 1942 gctx->sctx_dr3 = rdr3(); 1943 1944 /* 1945 * Restore host debug registers. Restore DR7 and DEBUGCTL 1946 * last. 1947 */ 1948 load_dr0(gctx->host_dr0); 1949 load_dr1(gctx->host_dr1); 1950 load_dr2(gctx->host_dr2); 1951 load_dr3(gctx->host_dr3); 1952 load_dr6(gctx->host_dr6); 1953 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1954 load_dr7(gctx->host_dr7); 1955} 1956 1957/* 1958 * Start vcpu with specified RIP. 1959 */ 1960static int 1961svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1962 struct vm_eventinfo *evinfo) 1963{ 1964 struct svm_regctx *gctx; 1965 struct svm_softc *svm_sc; 1966 struct svm_vcpu *vcpustate; 1967 struct vmcb_state *state; 1968 struct vmcb_ctrl *ctrl; 1969 struct vm_exit *vmexit; 1970 struct vlapic *vlapic; 1971 struct vm *vm; 1972 uint64_t vmcb_pa; 1973 int handled; 1974 uint16_t ldt_sel; 1975 1976 svm_sc = arg; 1977 vm = svm_sc->vm; 1978 1979 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1980 state = svm_get_vmcb_state(svm_sc, vcpu); 1981 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1982 vmexit = vm_exitinfo(vm, vcpu); 1983 vlapic = vm_lapic(vm, vcpu); 1984 1985 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1986 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1987 1988 if (vcpustate->lastcpu != curcpu) { 1989 /* 1990 * Force new ASID allocation by invalidating the generation. 1991 */ 1992 vcpustate->asid.gen = 0; 1993 1994 /* 1995 * Invalidate the VMCB state cache by marking all fields dirty. 1996 */ 1997 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1998 1999 /* 2000 * XXX 2001 * Setting 'vcpustate->lastcpu' here is bit premature because 2002 * we may return from this function without actually executing 2003 * the VMRUN instruction. This could happen if a rendezvous 2004 * or an AST is pending on the first time through the loop. 2005 * 2006 * This works for now but any new side-effects of vcpu 2007 * migration should take this case into account. 2008 */ 2009 vcpustate->lastcpu = curcpu; 2010 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 2011 } 2012 2013 svm_msr_guest_enter(svm_sc, vcpu); 2014 2015 /* Update Guest RIP */ 2016 state->rip = rip; 2017 2018 do { 2019 /* 2020 * Disable global interrupts to guarantee atomicity during 2021 * loading of guest state. This includes not only the state 2022 * loaded by the "vmrun" instruction but also software state 2023 * maintained by the hypervisor: suspended and rendezvous 2024 * state, NPT generation number, vlapic interrupts etc. 2025 */ 2026 disable_gintr(); 2027 2028 if (vcpu_suspended(evinfo)) { 2029 enable_gintr(); 2030 vm_exit_suspended(vm, vcpu, state->rip); 2031 break; 2032 } 2033 2034 if (vcpu_rendezvous_pending(evinfo)) { 2035 enable_gintr(); 2036 vm_exit_rendezvous(vm, vcpu, state->rip); 2037 break; 2038 } 2039 2040 if (vcpu_reqidle(evinfo)) { 2041 enable_gintr(); 2042 vm_exit_reqidle(vm, vcpu, state->rip); 2043 break; 2044 } 2045 2046 /* We are asked to give the cpu by scheduler. */ 2047 if (vcpu_should_yield(vm, vcpu)) { 2048 enable_gintr(); 2049 vm_exit_astpending(vm, vcpu, state->rip); 2050 break; 2051 } 2052 2053 /* 2054 * #VMEXIT resumes the host with the guest LDTR, so 2055 * save the current LDT selector so it can be restored 2056 * after an exit. The userspace hypervisor probably 2057 * doesn't use a LDT, but save and restore it to be 2058 * safe. 2059 */ 2060 ldt_sel = sldt(); 2061 2062 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2063 2064 /* Activate the nested pmap on 'curcpu' */ 2065 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); 2066 2067 /* 2068 * Check the pmap generation and the ASID generation to 2069 * ensure that the vcpu does not use stale TLB mappings. 2070 */ 2071 check_asid(svm_sc, vcpu, pmap, curcpu); 2072 2073 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2074 vcpustate->dirty = 0; 2075 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2076 2077 /* Launch Virtual Machine. */ 2078 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 2079 svm_dr_enter_guest(gctx); 2080 svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]); 2081 svm_dr_leave_guest(gctx); 2082 2083 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2084 2085 /* 2086 * The host GDTR and IDTR is saved by VMRUN and restored 2087 * automatically on #VMEXIT. However, the host TSS needs 2088 * to be restored explicitly. 2089 */ 2090 restore_host_tss(); 2091 2092 /* Restore host LDTR. */ 2093 lldt(ldt_sel); 2094 2095 /* #VMEXIT disables interrupts so re-enable them here. */ 2096 enable_gintr(); 2097 2098 /* Update 'nextrip' */ 2099 vcpustate->nextrip = state->rip; 2100 2101 /* Handle #VMEXIT and if required return to user space. */ 2102 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2103 } while (handled); 2104 2105 svm_msr_guest_exit(svm_sc, vcpu); 2106 2107 return (0); 2108} 2109 2110static void 2111svm_vmcleanup(void *arg) 2112{ 2113 struct svm_softc *sc = arg; 2114 2115 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2116 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2117 free(sc, M_SVM); 2118} 2119 2120static register_t * 2121swctx_regptr(struct svm_regctx *regctx, int reg) 2122{ 2123 2124 switch (reg) { 2125 case VM_REG_GUEST_RBX: 2126 return (®ctx->sctx_rbx); 2127 case VM_REG_GUEST_RCX: 2128 return (®ctx->sctx_rcx); 2129 case VM_REG_GUEST_RDX: 2130 return (®ctx->sctx_rdx); 2131 case VM_REG_GUEST_RDI: 2132 return (®ctx->sctx_rdi); 2133 case VM_REG_GUEST_RSI: 2134 return (®ctx->sctx_rsi); 2135 case VM_REG_GUEST_RBP: 2136 return (®ctx->sctx_rbp); 2137 case VM_REG_GUEST_R8: 2138 return (®ctx->sctx_r8); 2139 case VM_REG_GUEST_R9: 2140 return (®ctx->sctx_r9); 2141 case VM_REG_GUEST_R10: 2142 return (®ctx->sctx_r10); 2143 case VM_REG_GUEST_R11: 2144 return (®ctx->sctx_r11); 2145 case VM_REG_GUEST_R12: 2146 return (®ctx->sctx_r12); 2147 case VM_REG_GUEST_R13: 2148 return (®ctx->sctx_r13); 2149 case VM_REG_GUEST_R14: 2150 return (®ctx->sctx_r14); 2151 case VM_REG_GUEST_R15: 2152 return (®ctx->sctx_r15); 2153 case VM_REG_GUEST_DR0: 2154 return (®ctx->sctx_dr0); 2155 case VM_REG_GUEST_DR1: 2156 return (®ctx->sctx_dr1); 2157 case VM_REG_GUEST_DR2: 2158 return (®ctx->sctx_dr2); 2159 case VM_REG_GUEST_DR3: 2160 return (®ctx->sctx_dr3); 2161 default: 2162 return (NULL); 2163 } 2164} 2165 2166static int 2167svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2168{ 2169 struct svm_softc *svm_sc; 2170 register_t *reg; 2171 2172 svm_sc = arg; 2173 2174 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2175 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2176 } 2177 2178 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { 2179 return (0); 2180 } 2181 2182 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2183 2184 if (reg != NULL) { 2185 *val = *reg; 2186 return (0); 2187 } 2188 2189 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); 2190 return (EINVAL); 2191} 2192 2193static int 2194svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2195{ 2196 struct svm_softc *svm_sc; 2197 register_t *reg; 2198 2199 svm_sc = arg; 2200 2201 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2202 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2203 } 2204 2205 /* Do not permit user write access to VMCB fields by offset. */ 2206 if (!VMCB_ACCESS_OK(ident)) { 2207 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { 2208 return (0); 2209 } 2210 } 2211 2212 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2213 2214 if (reg != NULL) { 2215 *reg = val; 2216 return (0); 2217 } 2218 2219 /* 2220 * XXX deal with CR3 and invalidate TLB entries tagged with the 2221 * vcpu's ASID. This needs to be treated differently depending on 2222 * whether 'running' is true/false. 2223 */ 2224 2225 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); 2226 return (EINVAL); 2227} 2228 2229static int 2230svm_setcap(void *arg, int vcpu, int type, int val) 2231{ 2232 struct svm_softc *sc; 2233 int error; 2234 2235 sc = arg; 2236 error = 0; 2237 switch (type) { 2238 case VM_CAP_HALT_EXIT: 2239 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2240 VMCB_INTCPT_HLT, val); 2241 break; 2242 case VM_CAP_PAUSE_EXIT: 2243 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2244 VMCB_INTCPT_PAUSE, val); 2245 break; 2246 case VM_CAP_UNRESTRICTED_GUEST: 2247 /* Unrestricted guest execution cannot be disabled in SVM */ 2248 if (val == 0) 2249 error = EINVAL; 2250 break; 2251 default: 2252 error = ENOENT; 2253 break; 2254 } 2255 return (error); 2256} 2257 2258static int 2259svm_getcap(void *arg, int vcpu, int type, int *retval) 2260{ 2261 struct svm_softc *sc; 2262 int error; 2263 2264 sc = arg; 2265 error = 0; 2266 2267 switch (type) { 2268 case VM_CAP_HALT_EXIT: 2269 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2270 VMCB_INTCPT_HLT); 2271 break; 2272 case VM_CAP_PAUSE_EXIT: 2273 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2274 VMCB_INTCPT_PAUSE); 2275 break; 2276 case VM_CAP_UNRESTRICTED_GUEST: 2277 *retval = 1; /* unrestricted guest is always enabled */ 2278 break; 2279 default: 2280 error = ENOENT; 2281 break; 2282 } 2283 return (error); 2284} 2285 2286static struct vlapic * 2287svm_vlapic_init(void *arg, int vcpuid) 2288{ 2289 struct svm_softc *svm_sc; 2290 struct vlapic *vlapic; 2291 2292 svm_sc = arg; 2293 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2294 vlapic->vm = svm_sc->vm; 2295 vlapic->vcpuid = vcpuid; 2296 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2297 2298 vlapic_init(vlapic); 2299 2300 return (vlapic); 2301} 2302 2303static void 2304svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2305{ 2306 2307 vlapic_cleanup(vlapic); 2308 free(vlapic, M_SVM_VLAPIC); 2309} 2310 2311struct vmm_ops vmm_ops_amd = { 2312 svm_init, 2313 svm_cleanup, 2314 svm_restore, 2315 svm_vminit, 2316 svm_vmrun, 2317 svm_vmcleanup, 2318 svm_getreg, 2319 svm_setreg, 2320 vmcb_getdesc, 2321 vmcb_setdesc, 2322 svm_getcap, 2323 svm_setcap, 2324 svm_npt_alloc, 2325 svm_npt_free, 2326 svm_vlapic_init, 2327 svm_vlapic_cleanup 2328}; 2329