svm.c revision 271559
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271559 2014-09-13 23:48:43Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/cpufunc.h> 43#include <machine/psl.h> 44#include <machine/pmap.h> 45#include <machine/md_var.h> 46#include <machine/vmparam.h> 47#include <machine/specialreg.h> 48#include <machine/segments.h> 49#include <machine/smp.h> 50#include <machine/vmm.h> 51#include <machine/vmm_dev.h> 52#include <machine/vmm_instruction_emul.h> 53 54#include <x86/apicreg.h> 55 56#include "vmm_lapic.h" 57#include "vmm_msr.h" 58#include "vmm_stat.h" 59#include "vmm_ktr.h" 60#include "vmm_ioport.h" 61#include "vatpic.h" 62#include "vlapic.h" 63#include "vlapic_priv.h" 64 65#include "x86.h" 66#include "vmcb.h" 67#include "svm.h" 68#include "svm_softc.h" 69#include "npt.h" 70 71SYSCTL_DECL(_hw_vmm); 72SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 73 74/* 75 * SVM CPUID function 0x8000_000A, edx bit decoding. 76 */ 77#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 78#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 79#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 80#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 81#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 82#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 83#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 84#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 85#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 86#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 87 88#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 89 VMCB_CACHE_IOPM | \ 90 VMCB_CACHE_I | \ 91 VMCB_CACHE_TPR | \ 92 VMCB_CACHE_NP) 93 94MALLOC_DEFINE(M_SVM, "svm", "svm"); 95MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 96 97/* Per-CPU context area. */ 98extern struct pcpu __pcpu[]; 99 100static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 101 102static uint32_t svm_feature; /* AMD SVM features. */ 103SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 104 "SVM features advertised by CPUID.8000000AH:EDX"); 105 106static int disable_npf_assist; 107SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 108 &disable_npf_assist, 0, NULL); 109 110/* Maximum ASIDs supported by the processor */ 111static uint32_t nasid; 112SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, 113 "Number of ASIDs supported by this processor"); 114 115/* Current ASID generation for each host cpu */ 116static struct asid asid[MAXCPU]; 117 118/* 119 * SVM host state saved area of size 4KB for each core. 120 */ 121static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 122 123/* 124 * S/w saved host context. 125 */ 126static struct svm_regctx host_ctx[MAXCPU]; 127 128static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 129static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 130static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 131 132/* 133 * Common function to enable or disabled SVM for a CPU. 134 */ 135static int 136cpu_svm_enable_disable(boolean_t enable) 137{ 138 uint64_t efer_msr; 139 140 efer_msr = rdmsr(MSR_EFER); 141 142 if (enable) 143 efer_msr |= EFER_SVM; 144 else 145 efer_msr &= ~EFER_SVM; 146 147 wrmsr(MSR_EFER, efer_msr); 148 149 return(0); 150} 151 152/* 153 * Disable SVM on a CPU. 154 */ 155static void 156svm_disable(void *arg __unused) 157{ 158 159 (void)cpu_svm_enable_disable(FALSE); 160} 161 162/* 163 * Disable SVM for all CPUs. 164 */ 165static int 166svm_cleanup(void) 167{ 168 169 smp_rendezvous(NULL, svm_disable, NULL, NULL); 170 return (0); 171} 172 173/* 174 * Check for required BHyVe SVM features in a CPU. 175 */ 176static int 177svm_cpuid_features(void) 178{ 179 u_int regs[4]; 180 181 /* CPUID Fn8000_000A is for SVM */ 182 do_cpuid(0x8000000A, regs); 183 svm_feature = regs[3]; 184 185 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 186 nasid = regs[1]; 187 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 188 189 printf("SVM Features:0x%b\n", svm_feature, 190 "\020" 191 "\001NP" /* Nested paging */ 192 "\002LbrVirt" /* LBR virtualization */ 193 "\003SVML" /* SVM lock */ 194 "\004NRIPS" /* NRIP save */ 195 "\005TscRateMsr" /* MSR based TSC rate control */ 196 "\006VmcbClean" /* VMCB clean bits */ 197 "\007FlushByAsid" /* Flush by ASID */ 198 "\010DecodeAssist" /* Decode assist */ 199 "\011<b20>" 200 "\012<b20>" 201 "\013PauseFilter" 202 "\014<b20>" 203 "\015PauseFilterThreshold" 204 "\016AVIC" 205 ); 206 207 /* SVM Lock */ 208 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 209 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 210 return (ENXIO); 211 } 212 213 /* 214 * bhyve need RVI to work. 215 */ 216 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 217 printf("Missing Nested paging or RVI SVM support in processor.\n"); 218 return (EIO); 219 } 220 221 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 222 return (0); 223 224 return (EIO); 225} 226 227static __inline int 228flush_by_asid(void) 229{ 230 231 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 232} 233 234static __inline int 235decode_assist(void) 236{ 237 238 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 239} 240 241/* 242 * Enable SVM for a CPU. 243 */ 244static void 245svm_enable(void *arg __unused) 246{ 247 uint64_t hsave_pa; 248 249 (void)cpu_svm_enable_disable(TRUE); 250 251 hsave_pa = vtophys(hsave[curcpu]); 252 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 253 254 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 255 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 256 } 257} 258 259/* 260 * Check if a processor support SVM. 261 */ 262static int 263is_svm_enabled(void) 264{ 265 uint64_t msr; 266 267 /* Section 15.4 Enabling SVM from APM2. */ 268 if ((amd_feature2 & AMDID2_SVM) == 0) { 269 printf("SVM is not supported on this processor.\n"); 270 return (ENXIO); 271 } 272 273 msr = rdmsr(MSR_VM_CR); 274 /* Make sure SVM is not disabled by BIOS. */ 275 if ((msr & VM_CR_SVMDIS) == 0) { 276 return svm_cpuid_features(); 277 } 278 279 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 280 return (ENXIO); 281} 282 283/* 284 * Enable SVM on CPU and initialize nested page table h/w. 285 */ 286static int 287svm_init(int ipinum) 288{ 289 int err, cpu; 290 291 err = is_svm_enabled(); 292 if (err) 293 return (err); 294 295 for (cpu = 0; cpu < MAXCPU; cpu++) { 296 /* 297 * Initialize the host ASIDs to their "highest" valid values. 298 * 299 * The next ASID allocation will rollover both 'gen' and 'num' 300 * and start off the sequence at {1,1}. 301 */ 302 asid[cpu].gen = ~0UL; 303 asid[cpu].num = nasid - 1; 304 } 305 306 svm_npt_init(ipinum); 307 308 /* Start SVM on all CPUs */ 309 smp_rendezvous(NULL, svm_enable, NULL, NULL); 310 311 return (0); 312} 313 314static void 315svm_restore(void) 316{ 317 svm_enable(NULL); 318} 319 320/* 321 * Get index and bit position for a MSR in MSR permission 322 * bitmap. Two bits are used for each MSR, lower bit is 323 * for read and higher bit is for write. 324 */ 325static int 326svm_msr_index(uint64_t msr, int *index, int *bit) 327{ 328 uint32_t base, off; 329 330/* Pentium compatible MSRs */ 331#define MSR_PENTIUM_START 0 332#define MSR_PENTIUM_END 0x1FFF 333/* AMD 6th generation and Intel compatible MSRs */ 334#define MSR_AMD6TH_START 0xC0000000UL 335#define MSR_AMD6TH_END 0xC0001FFFUL 336/* AMD 7th and 8th generation compatible MSRs */ 337#define MSR_AMD7TH_START 0xC0010000UL 338#define MSR_AMD7TH_END 0xC0011FFFUL 339 340 *index = -1; 341 *bit = (msr % 4) * 2; 342 base = 0; 343 344 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 345 *index = msr / 4; 346 return (0); 347 } 348 349 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 350 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 351 off = (msr - MSR_AMD6TH_START); 352 *index = (off + base) / 4; 353 return (0); 354 } 355 356 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 357 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 358 off = (msr - MSR_AMD7TH_START); 359 *index = (off + base) / 4; 360 return (0); 361 } 362 363 return (EIO); 364} 365 366/* 367 * Give virtual cpu the complete access to MSR(read & write). 368 */ 369static int 370svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 371{ 372 int index, bit, err; 373 374 err = svm_msr_index(msr, &index, &bit); 375 if (err) { 376 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 377 return (err); 378 } 379 380 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 381 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 382 return (EINVAL); 383 } 384 if (bit < 0 || bit > 8) { 385 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 386 return (EINVAL); 387 } 388 389 /* Disable intercept for read and write. */ 390 if (read) 391 perm_bitmap[index] &= ~(1UL << bit); 392 if (write) 393 perm_bitmap[index] &= ~(2UL << bit); 394 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 395 (perm_bitmap[index] >> bit) & 0x3, msr); 396 397 return (0); 398} 399 400static int 401svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 402{ 403 return svm_msr_perm(perm_bitmap, msr, true, true); 404} 405 406static int 407svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 408{ 409 return svm_msr_perm(perm_bitmap, msr, true, false); 410} 411 412static __inline void 413vcpu_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits) 414{ 415 struct svm_vcpu *vcpustate; 416 417 vcpustate = svm_get_vcpu(sc, vcpu); 418 419 vcpustate->dirty |= dirtybits; 420} 421 422static __inline int 423svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 424{ 425 struct vmcb_ctrl *ctrl; 426 427 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 428 429 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 430 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 431} 432 433static __inline void 434svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 435 int enabled) 436{ 437 struct vmcb_ctrl *ctrl; 438 uint32_t oldval; 439 440 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 441 442 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 443 oldval = ctrl->intercept[idx]; 444 445 if (enabled) 446 ctrl->intercept[idx] |= bitmask; 447 else 448 ctrl->intercept[idx] &= ~bitmask; 449 450 if (ctrl->intercept[idx] != oldval) { 451 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_I); 452 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 453 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 454 } 455} 456 457static __inline void 458svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 459{ 460 svm_set_intercept(sc, vcpu, off, bitmask, 0); 461} 462 463static __inline void 464svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 465{ 466 svm_set_intercept(sc, vcpu, off, bitmask, 1); 467} 468 469static void 470vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 471 uint64_t msrpm_base_pa, uint64_t np_pml4) 472{ 473 struct vmcb_ctrl *ctrl; 474 struct vmcb_state *state; 475 uint32_t mask; 476 int n; 477 478 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 479 state = svm_get_vmcb_state(sc, vcpu); 480 481 ctrl->iopm_base_pa = iopm_base_pa; 482 ctrl->msrpm_base_pa = msrpm_base_pa; 483 484 /* Enable nested paging */ 485 ctrl->np_enable = 1; 486 ctrl->n_cr3 = np_pml4; 487 488 /* 489 * Intercept accesses to the control registers that are not shadowed 490 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 491 */ 492 for (n = 0; n < 16; n++) { 493 mask = (BIT(n) << 16) | BIT(n); 494 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 495 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 496 else 497 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 498 } 499 500 /* Intercept Machine Check exceptions. */ 501 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 502 503 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 504 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 505 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 506 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 507 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 508 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 509 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 510 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 511 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 512 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 513 VMCB_INTCPT_FERR_FREEZE); 514 515 /* 516 * From section "Canonicalization and Consistency Checks" in APMv2 517 * the VMRUN intercept bit must be set to pass the consistency check. 518 */ 519 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 520 521 /* 522 * The ASID will be set to a non-zero value just before VMRUN. 523 */ 524 ctrl->asid = 0; 525 526 /* 527 * Section 15.21.1, Interrupt Masking in EFLAGS 528 * Section 15.21.2, Virtualizing APIC.TPR 529 * 530 * This must be set for %rflag and %cr8 isolation of guest and host. 531 */ 532 ctrl->v_intr_masking = 1; 533 534 /* Enable Last Branch Record aka LBR for debugging */ 535 ctrl->lbr_virt_en = 1; 536 state->dbgctl = BIT(0); 537 538 /* EFER_SVM must always be set when the guest is executing */ 539 state->efer = EFER_SVM; 540 541 /* Set up the PAT to power-on state */ 542 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 543 PAT_VALUE(1, PAT_WRITE_THROUGH) | 544 PAT_VALUE(2, PAT_UNCACHED) | 545 PAT_VALUE(3, PAT_UNCACHEABLE) | 546 PAT_VALUE(4, PAT_WRITE_BACK) | 547 PAT_VALUE(5, PAT_WRITE_THROUGH) | 548 PAT_VALUE(6, PAT_UNCACHED) | 549 PAT_VALUE(7, PAT_UNCACHEABLE); 550} 551 552/* 553 * Initialise a virtual machine. 554 */ 555static void * 556svm_vminit(struct vm *vm, pmap_t pmap) 557{ 558 struct svm_softc *svm_sc; 559 struct svm_vcpu *vcpu; 560 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 561 int i; 562 563 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 564 M_SVM, M_WAITOK | M_ZERO); 565 566 svm_sc->vm = vm; 567 svm_sc->svm_feature = svm_feature; 568 svm_sc->vcpu_cnt = VM_MAXCPU; 569 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 570 571 /* 572 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 573 */ 574 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 575 576 /* 577 * Following MSR can be completely controlled by virtual machines 578 * since access to following are translated to access to VMCB. 579 */ 580 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 581 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 582 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 583 584 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 585 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 586 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 591 592 /* For Nested Paging/RVI only. */ 593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 594 595 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 596 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 597 598 /* Intercept access to all I/O ports. */ 599 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 600 601 /* Cache physical address for multiple vcpus. */ 602 iopm_pa = vtophys(svm_sc->iopm_bitmap); 603 msrpm_pa = vtophys(svm_sc->msr_bitmap); 604 pml4_pa = svm_sc->nptp; 605 606 for (i = 0; i < svm_sc->vcpu_cnt; i++) { 607 vcpu = svm_get_vcpu(svm_sc, i); 608 vcpu->lastcpu = NOCPU; 609 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 610 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 611 } 612 return (svm_sc); 613} 614 615static int 616svm_cpl(struct vmcb_state *state) 617{ 618 619 /* 620 * From APMv2: 621 * "Retrieve the CPL from the CPL field in the VMCB, not 622 * from any segment DPL" 623 */ 624 return (state->cpl); 625} 626 627static enum vm_cpu_mode 628svm_vcpu_mode(struct vmcb *vmcb) 629{ 630 struct vmcb_segment *seg; 631 struct vmcb_state *state; 632 633 state = &vmcb->state; 634 635 if (state->efer & EFER_LMA) { 636 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 637 /* 638 * Section 4.8.1 for APM2, check if Code Segment has 639 * Long attribute set in descriptor. 640 */ 641 if (seg->attrib & VMCB_CS_ATTRIB_L) 642 return (CPU_MODE_64BIT); 643 else 644 return (CPU_MODE_COMPATIBILITY); 645 } else if (state->cr0 & CR0_PE) { 646 return (CPU_MODE_PROTECTED); 647 } else { 648 return (CPU_MODE_REAL); 649 } 650} 651 652static enum vm_paging_mode 653svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 654{ 655 656 if ((cr0 & CR0_PG) == 0) 657 return (PAGING_MODE_FLAT); 658 if ((cr4 & CR4_PAE) == 0) 659 return (PAGING_MODE_32); 660 if (efer & EFER_LME) 661 return (PAGING_MODE_64); 662 else 663 return (PAGING_MODE_PAE); 664} 665 666/* 667 * ins/outs utility routines 668 */ 669static uint64_t 670svm_inout_str_index(struct svm_regctx *regs, int in) 671{ 672 uint64_t val; 673 674 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 675 676 return (val); 677} 678 679static uint64_t 680svm_inout_str_count(struct svm_regctx *regs, int rep) 681{ 682 uint64_t val; 683 684 val = rep ? regs->sctx_rcx : 1; 685 686 return (val); 687} 688 689static void 690svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 691 int in, struct vm_inout_str *vis) 692{ 693 int error, s; 694 695 if (in) { 696 vis->seg_name = VM_REG_GUEST_ES; 697 } else { 698 /* The segment field has standard encoding */ 699 s = (info1 >> 10) & 0x7; 700 vis->seg_name = vm_segment_name(s); 701 } 702 703 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 704 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 705} 706 707static int 708svm_inout_str_addrsize(uint64_t info1) 709{ 710 uint32_t size; 711 712 size = (info1 >> 7) & 0x7; 713 switch (size) { 714 case 1: 715 return (2); /* 16 bit */ 716 case 2: 717 return (4); /* 32 bit */ 718 case 4: 719 return (8); /* 64 bit */ 720 default: 721 panic("%s: invalid size encoding %d", __func__, size); 722 } 723} 724 725static void 726svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 727{ 728 struct vmcb_state *state; 729 730 state = &vmcb->state; 731 paging->cr3 = state->cr3; 732 paging->cpl = svm_cpl(state); 733 paging->cpu_mode = svm_vcpu_mode(vmcb); 734 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 735 state->efer); 736} 737 738 739/* 740 * Handle guest I/O intercept. 741 */ 742static bool 743svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 744{ 745 struct vmcb_ctrl *ctrl; 746 struct vmcb_state *state; 747 struct svm_regctx *regs; 748 struct vm_inout_str *vis; 749 uint64_t info1; 750 751 state = svm_get_vmcb_state(svm_sc, vcpu); 752 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 753 regs = svm_get_guest_regctx(svm_sc, vcpu); 754 info1 = ctrl->exitinfo1; 755 756 vmexit->exitcode = VM_EXITCODE_INOUT; 757 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 758 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0; 759 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 760 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 761 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 762 vmexit->u.inout.eax = (uint32_t)(state->rax); 763 764 if (vmexit->u.inout.string) { 765 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 766 vis = &vmexit->u.inout_str; 767 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 768 vis->rflags = state->rflags; 769 vis->cr0 = state->cr0; 770 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 771 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 772 vis->addrsize = svm_inout_str_addrsize(info1); 773 svm_inout_str_seginfo(svm_sc, vcpu, info1, 774 vmexit->u.inout.in, vis); 775 } 776 777 return (false); 778} 779 780static int 781svm_npf_paging(uint64_t exitinfo1) 782{ 783 784 if (exitinfo1 & VMCB_NPF_INFO1_W) 785 return (VM_PROT_WRITE); 786 787 return (VM_PROT_READ); 788} 789 790static bool 791svm_npf_emul_fault(uint64_t exitinfo1) 792{ 793 794 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 795 return (false); 796 } 797 798 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 799 return (false); 800 } 801 802 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 803 return (false); 804 } 805 806 return (true); 807} 808 809static void 810svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 811{ 812 struct vm_guest_paging *paging; 813 struct vmcb_segment *seg; 814 struct vmcb_ctrl *ctrl; 815 char *inst_bytes; 816 int inst_len; 817 818 ctrl = &vmcb->ctrl; 819 paging = &vmexit->u.inst_emul.paging; 820 821 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 822 vmexit->u.inst_emul.gpa = gpa; 823 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 824 svm_paging_info(vmcb, paging); 825 826 /* 827 * The inst_length will be determined by decoding the instruction. 828 */ 829 vmexit->inst_length = 0; 830 831 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 832 switch(paging->cpu_mode) { 833 case CPU_MODE_PROTECTED: 834 case CPU_MODE_COMPATIBILITY: 835 /* 836 * Section 4.8.1 of APM2, Default Operand Size or D bit. 837 */ 838 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 839 1 : 0; 840 break; 841 default: 842 vmexit->u.inst_emul.cs_d = 0; 843 break; 844 } 845 846 /* 847 * Copy the instruction bytes into 'vie' if available. 848 */ 849 if (decode_assist() && !disable_npf_assist) { 850 inst_len = ctrl->inst_len; 851 inst_bytes = ctrl->inst_bytes; 852 } else { 853 inst_len = 0; 854 inst_bytes = NULL; 855 } 856 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 857} 858 859/* 860 * Intercept access to MSR_EFER to prevent the guest from clearing the 861 * SVM enable bit. 862 */ 863static void 864svm_write_efer(struct svm_softc *sc, int vcpu, uint32_t edx, uint32_t eax) 865{ 866 struct vmcb_state *state; 867 uint64_t oldval; 868 869 state = svm_get_vmcb_state(sc, vcpu); 870 871 oldval = state->efer; 872 state->efer = (uint64_t)edx << 32 | eax | EFER_SVM; 873 if (state->efer != oldval) { 874 VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx", 875 oldval, state->efer); 876 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR); 877 } 878} 879 880#ifdef KTR 881static const char * 882intrtype_to_str(int intr_type) 883{ 884 switch (intr_type) { 885 case VMCB_EVENTINJ_TYPE_INTR: 886 return ("hwintr"); 887 case VMCB_EVENTINJ_TYPE_NMI: 888 return ("nmi"); 889 case VMCB_EVENTINJ_TYPE_INTn: 890 return ("swintr"); 891 case VMCB_EVENTINJ_TYPE_EXCEPTION: 892 return ("exception"); 893 default: 894 panic("%s: unknown intr_type %d", __func__, intr_type); 895 } 896} 897#endif 898 899/* 900 * Inject an event to vcpu as described in section 15.20, "Event injection". 901 */ 902static void 903svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 904 uint32_t error, bool ec_valid) 905{ 906 struct vmcb_ctrl *ctrl; 907 908 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 909 910 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 911 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 912 913 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 914 __func__, vector)); 915 916 switch (intr_type) { 917 case VMCB_EVENTINJ_TYPE_INTR: 918 case VMCB_EVENTINJ_TYPE_NMI: 919 case VMCB_EVENTINJ_TYPE_INTn: 920 break; 921 case VMCB_EVENTINJ_TYPE_EXCEPTION: 922 if (vector >= 0 && vector <= 31 && vector != 2) 923 break; 924 /* FALLTHROUGH */ 925 default: 926 panic("%s: invalid intr_type/vector: %d/%d", __func__, 927 intr_type, vector); 928 } 929 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 930 if (ec_valid) { 931 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 932 ctrl->eventinj |= (uint64_t)error << 32; 933 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 934 intrtype_to_str(intr_type), vector, error); 935 } else { 936 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 937 intrtype_to_str(intr_type), vector); 938 } 939} 940 941static void 942svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 943{ 944 struct vmcb_ctrl *ctrl; 945 uint64_t intinfo; 946 947 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 948 intinfo = ctrl->exitintinfo; 949 if (!VMCB_EXITINTINFO_VALID(intinfo)) 950 return; 951 952 /* 953 * From APMv2, Section "Intercepts during IDT interrupt delivery" 954 * 955 * If a #VMEXIT happened during event delivery then record the event 956 * that was being delivered. 957 */ 958 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 959 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 960 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 961 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 962} 963 964static __inline void 965enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 966{ 967 struct vmcb_ctrl *ctrl; 968 969 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 970 971 if (ctrl->v_irq == 0) { 972 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 973 ctrl->v_irq = 1; 974 ctrl->v_ign_tpr = 1; 975 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 976 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 977 VMCB_INTCPT_VINTR); 978 } 979} 980 981static __inline void 982disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 983{ 984 struct vmcb_ctrl *ctrl; 985 986 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 987 988 if (ctrl->v_irq) { 989 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 990 ctrl->v_irq = 0; 991 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 992 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 993 VMCB_INTCPT_VINTR); 994 } 995} 996 997static int 998nmi_blocked(struct svm_softc *sc, int vcpu) 999{ 1000 /* XXX need to track NMI blocking */ 1001 return (0); 1002} 1003 1004static void 1005enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1006{ 1007 /* XXX enable iret intercept */ 1008} 1009 1010#ifdef notyet 1011static void 1012clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1013{ 1014 /* XXX disable iret intercept */ 1015} 1016#endif 1017 1018#ifdef KTR 1019static const char * 1020exit_reason_to_str(uint64_t reason) 1021{ 1022 static char reasonbuf[32]; 1023 1024 switch (reason) { 1025 case VMCB_EXIT_INVALID: 1026 return ("invalvmcb"); 1027 case VMCB_EXIT_SHUTDOWN: 1028 return ("shutdown"); 1029 case VMCB_EXIT_NPF: 1030 return ("nptfault"); 1031 case VMCB_EXIT_PAUSE: 1032 return ("pause"); 1033 case VMCB_EXIT_HLT: 1034 return ("hlt"); 1035 case VMCB_EXIT_CPUID: 1036 return ("cpuid"); 1037 case VMCB_EXIT_IO: 1038 return ("inout"); 1039 case VMCB_EXIT_MC: 1040 return ("mchk"); 1041 case VMCB_EXIT_INTR: 1042 return ("extintr"); 1043 case VMCB_EXIT_NMI: 1044 return ("nmi"); 1045 case VMCB_EXIT_VINTR: 1046 return ("vintr"); 1047 case VMCB_EXIT_MSR: 1048 return ("msr"); 1049 default: 1050 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1051 return (reasonbuf); 1052 } 1053} 1054#endif /* KTR */ 1055 1056/* 1057 * Determine the cause of virtual cpu exit and handle VMEXIT. 1058 * Return: false - Break vcpu execution loop and handle vmexit 1059 * in kernel or user space. 1060 * true - Continue vcpu run. 1061 */ 1062static bool 1063svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1064{ 1065 struct vmcb_state *state; 1066 struct vmcb_ctrl *ctrl; 1067 struct svm_regctx *ctx; 1068 uint64_t code, info1, info2, val; 1069 uint32_t eax, ecx, edx; 1070 bool update_rip, loop, retu; 1071 1072 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1073 1074 state = svm_get_vmcb_state(svm_sc, vcpu); 1075 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1076 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1077 code = ctrl->exitcode; 1078 info1 = ctrl->exitinfo1; 1079 info2 = ctrl->exitinfo2; 1080 1081 update_rip = true; 1082 loop = true; 1083 vmexit->exitcode = VM_EXITCODE_VMX; 1084 vmexit->u.vmx.status = 0; 1085 1086 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1087 1088 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1089 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1090 1091 svm_save_intinfo(svm_sc, vcpu); 1092 1093 switch (code) { 1094 case VMCB_EXIT_VINTR: 1095 update_rip = false; 1096 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1097 break; 1098 case VMCB_EXIT_MC: /* Machine Check. */ 1099 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1); 1100 vmexit->exitcode = VM_EXITCODE_MTRAP; 1101 loop = false; 1102 break; 1103 case VMCB_EXIT_MSR: /* MSR access. */ 1104 eax = state->rax; 1105 ecx = ctx->sctx_rcx; 1106 edx = ctx->e.g.sctx_rdx; 1107 1108 if (ecx == MSR_EFER) { 1109 KASSERT(info1 != 0, ("rdmsr(MSR_EFER) is not " 1110 "emulated: info1(%#lx) info2(%#lx)", 1111 info1, info2)); 1112 svm_write_efer(svm_sc, vcpu, edx, eax); 1113 break; 1114 } 1115 1116 retu = false; 1117 if (info1) { 1118 /* VM exited because of write MSR */ 1119 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1120 vmexit->exitcode = VM_EXITCODE_WRMSR; 1121 vmexit->u.msr.code = ecx; 1122 val = (uint64_t)edx << 32 | eax; 1123 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, &retu)) { 1124 vmexit->u.msr.wval = val; 1125 loop = false; 1126 } else 1127 loop = retu ? false : true; 1128 VCPU_CTR3(svm_sc->vm, vcpu, 1129 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x", 1130 loop ? "kernel" : "user", val, ecx); 1131 } else { 1132 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1133 vmexit->exitcode = VM_EXITCODE_RDMSR; 1134 vmexit->u.msr.code = ecx; 1135 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, &retu)) 1136 loop = false; 1137 else 1138 loop = retu ? false : true; 1139 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR" 1140 " MSB=0x%08x, LSB=%08x @0x%x", 1141 ctx->e.g.sctx_rdx, state->rax, ecx); 1142 } 1143 1144#define MSR_AMDK8_IPM 0xc0010055 1145 /* 1146 * We can't hide AMD C1E idle capability since its 1147 * based on CPU generation, for now ignore access to 1148 * this MSR by vcpus 1149 * XXX: special handling of AMD C1E - Ignore. 1150 */ 1151 if (ecx == MSR_AMDK8_IPM) 1152 loop = true; 1153 break; 1154 case VMCB_EXIT_INTR: 1155 /* 1156 * Exit on External Interrupt. 1157 * Give host interrupt handler to run and if its guest 1158 * interrupt, local APIC will inject event in guest. 1159 */ 1160 update_rip = false; 1161 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1162 break; 1163 case VMCB_EXIT_NMI: 1164 update_rip = false; 1165 break; 1166 case VMCB_EXIT_IO: 1167 loop = svm_handle_io(svm_sc, vcpu, vmexit); 1168 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1169 break; 1170 case VMCB_EXIT_CPUID: 1171 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1172 loop = x86_emulate_cpuid(svm_sc->vm, vcpu, 1173 (uint32_t *)&state->rax, 1174 (uint32_t *)&ctx->sctx_rbx, 1175 (uint32_t *)&ctx->sctx_rcx, 1176 (uint32_t *)&ctx->e.g.sctx_rdx); 1177 break; 1178 case VMCB_EXIT_HLT: 1179 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1180 vmexit->exitcode = VM_EXITCODE_HLT; 1181 vmexit->u.hlt.rflags = state->rflags; 1182 loop = false; 1183 break; 1184 case VMCB_EXIT_PAUSE: 1185 vmexit->exitcode = VM_EXITCODE_PAUSE; 1186 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1187 loop = false; 1188 break; 1189 case VMCB_EXIT_NPF: 1190 loop = false; 1191 update_rip = false; 1192 if (info1 & VMCB_NPF_INFO1_RSV) { 1193 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1194 "reserved bits set: info1(%#lx) info2(%#lx)", 1195 info1, info2); 1196 break; 1197 } 1198 1199 /* EXITINFO2 has the physical fault address (GPA). */ 1200 if(vm_mem_allocated(svm_sc->vm, info2)) { 1201 vmexit->exitcode = VM_EXITCODE_PAGING; 1202 vmexit->u.paging.gpa = info2; 1203 vmexit->u.paging.fault_type = svm_npf_paging(info1); 1204 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1205 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1206 "on gpa %#lx/%#lx at rip %#lx", 1207 info2, info1, state->rip); 1208 } else if (svm_npf_emul_fault(info1)) { 1209 svm_handle_inst_emul(svm_get_vmcb(svm_sc, vcpu), 1210 info2, vmexit); 1211 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1212 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1213 "for gpa %#lx/%#lx at rip %#lx", 1214 info2, info1, state->rip); 1215 } 1216 break; 1217 case VMCB_EXIT_SHUTDOWN: 1218 loop = false; 1219 break; 1220 case VMCB_EXIT_INVALID: 1221 loop = false; 1222 break; 1223 default: 1224 /* Return to user space. */ 1225 loop = false; 1226 update_rip = false; 1227 VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" 1228 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n", 1229 ctrl->exitcode, info1, info2); 1230 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" 1231 " Inst decoder len:%d\n", state->rip, 1232 ctrl->nrip, ctrl->inst_len); 1233 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1234 break; 1235 } 1236 1237 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx nrip %#lx", 1238 loop ? "handled" : "unhandled", exit_reason_to_str(code), 1239 state->rip, update_rip ? ctrl->nrip : state->rip); 1240 1241 vmexit->rip = state->rip; 1242 if (update_rip) { 1243 if (ctrl->nrip == 0) { 1244 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set " 1245 "for RIP0x%lx.\n", state->rip); 1246 vmexit->exitcode = VM_EXITCODE_VMX; 1247 } else 1248 vmexit->rip = ctrl->nrip; 1249 } 1250 1251 /* If vcpu execution is continued, update RIP. */ 1252 if (loop) { 1253 state->rip = vmexit->rip; 1254 } 1255 1256 return (loop); 1257} 1258 1259static void 1260svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1261{ 1262 uint64_t intinfo; 1263 1264 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1265 return; 1266 1267 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1268 "valid: %#lx", __func__, intinfo)); 1269 1270 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1271 VMCB_EXITINTINFO_VECTOR(intinfo), 1272 VMCB_EXITINTINFO_EC(intinfo), 1273 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1274 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1275 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1276} 1277 1278/* 1279 * Inject event to virtual cpu. 1280 */ 1281static void 1282svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1283{ 1284 struct vmcb_ctrl *ctrl; 1285 struct vmcb_state *state; 1286 int extint_pending; 1287 int vector, need_intr_window; 1288 1289 state = svm_get_vmcb_state(sc, vcpu); 1290 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1291 1292 need_intr_window = 0; 1293 1294 /* 1295 * Inject pending events or exceptions for this vcpu. 1296 * 1297 * An event might be pending because the previous #VMEXIT happened 1298 * during event delivery (i.e. ctrl->exitintinfo). 1299 * 1300 * An event might also be pending because an exception was injected 1301 * by the hypervisor (e.g. #PF during instruction emulation). 1302 */ 1303 svm_inj_intinfo(sc, vcpu); 1304 1305 /* NMI event has priority over interrupts. */ 1306 if (vm_nmi_pending(sc->vm, vcpu)) { 1307 if (nmi_blocked(sc, vcpu)) { 1308 /* 1309 * Can't inject another NMI if the guest has not 1310 * yet executed an "iret" after the last NMI. 1311 */ 1312 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1313 "to NMI-blocking"); 1314 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1315 /* 1316 * If there is already an exception/interrupt pending 1317 * then defer the NMI until after that. 1318 */ 1319 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1320 "eventinj %#lx", ctrl->eventinj); 1321 1322 /* 1323 * Use self-IPI to trigger a VM-exit as soon as 1324 * possible after the event injection is completed. 1325 * 1326 * This works only if the external interrupt exiting 1327 * is at a lower priority than the event injection. 1328 * 1329 * Although not explicitly specified in APMv2 the 1330 * relative priorities were verified empirically. 1331 */ 1332 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1333 } else { 1334 vm_nmi_clear(sc->vm, vcpu); 1335 1336 /* Inject NMI, vector number is not used */ 1337 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1338 IDT_NMI, 0, false); 1339 1340 /* virtual NMI blocking is now in effect */ 1341 enable_nmi_blocking(sc, vcpu); 1342 1343 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1344 } 1345 } 1346 1347 extint_pending = vm_extint_pending(sc->vm, vcpu); 1348 1349 if (!extint_pending) { 1350 /* Ask the local apic for a vector to inject */ 1351 if (!vlapic_pending_intr(vlapic, &vector)) { 1352 goto done; /* nothing to inject */ 1353 } 1354 KASSERT(vector >= 16 && vector <= 255, 1355 ("invalid vector %d from local APIC", vector)); 1356 } else { 1357 /* Ask the legacy pic for a vector to inject */ 1358 vatpic_pending_intr(sc->vm, &vector); 1359 KASSERT(vector >= 0 && vector <= 255, 1360 ("invalid vector %d from local APIC", vector)); 1361 } 1362 1363 /* 1364 * If the guest has disabled interrupts or is in an interrupt shadow 1365 * then we cannot inject the pending interrupt. 1366 */ 1367 if ((state->rflags & PSL_I) == 0) { 1368 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1369 "rflags %#lx", vector, state->rflags); 1370 need_intr_window = 1; 1371 goto done; 1372 } 1373 1374 if (ctrl->intr_shadow) { 1375 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1376 "interrupt shadow", vector); 1377 need_intr_window = 1; 1378 goto done; 1379 } 1380 1381 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1382 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1383 "eventinj %#lx", vector, ctrl->eventinj); 1384 need_intr_window = 1; 1385 goto done; 1386 } 1387 1388 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1389 1390 if (!extint_pending) { 1391 /* Update the Local APIC ISR */ 1392 vlapic_intr_accepted(vlapic, vector); 1393 } else { 1394 vm_extint_clear(sc->vm, vcpu); 1395 vatpic_intr_accepted(sc->vm, vector); 1396 /* 1397 * Force a VM-exit as soon as the vcpu is ready to accept 1398 * another interrupt. This is done because the PIC might 1399 * have another vector that it wants to inject. Also, if 1400 * the vlapic has a pending interrupt that was preempted 1401 * by the ExtInt then it allows us to inject the APIC 1402 * vector as soon as possible. 1403 */ 1404 need_intr_window = 1; 1405 } 1406done: 1407 if (need_intr_window) { 1408 /* 1409 * We use V_IRQ in conjunction with the VINTR intercept to 1410 * trap into the hypervisor as soon as a virtual interrupt 1411 * can be delivered. 1412 * 1413 * Since injected events are not subject to intercept checks 1414 * we need to ensure that the V_IRQ is not actually going to 1415 * be delivered on VM entry. The KASSERT below enforces this. 1416 */ 1417 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1418 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1419 ("Bogus intr_window_exiting: eventinj (%#lx), " 1420 "intr_shadow (%u), rflags (%#lx)", 1421 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1422 enable_intr_window_exiting(sc, vcpu); 1423 } else { 1424 disable_intr_window_exiting(sc, vcpu); 1425 } 1426} 1427 1428static __inline void 1429restore_host_tss(void) 1430{ 1431 struct system_segment_descriptor *tss_sd; 1432 1433 /* 1434 * The TSS descriptor was in use prior to launching the guest so it 1435 * has been marked busy. 1436 * 1437 * 'ltr' requires the descriptor to be marked available so change the 1438 * type to "64-bit available TSS". 1439 */ 1440 tss_sd = PCPU_GET(tss); 1441 tss_sd->sd_type = SDT_SYSTSS; 1442 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1443} 1444 1445static void 1446check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1447{ 1448 struct svm_vcpu *vcpustate; 1449 struct vmcb_ctrl *ctrl; 1450 long eptgen; 1451 bool alloc_asid; 1452 1453 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1454 "active on cpu %u", __func__, thiscpu)); 1455 1456 vcpustate = svm_get_vcpu(sc, vcpuid); 1457 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1458 1459 /* 1460 * The TLB entries associated with the vcpu's ASID are not valid 1461 * if either of the following conditions is true: 1462 * 1463 * 1. The vcpu's ASID generation is different than the host cpu's 1464 * ASID generation. This happens when the vcpu migrates to a new 1465 * host cpu. It can also happen when the number of vcpus executing 1466 * on a host cpu is greater than the number of ASIDs available. 1467 * 1468 * 2. The pmap generation number is different than the value cached in 1469 * the 'vcpustate'. This happens when the host invalidates pages 1470 * belonging to the guest. 1471 * 1472 * asidgen eptgen Action 1473 * mismatch mismatch 1474 * 0 0 (a) 1475 * 0 1 (b1) or (b2) 1476 * 1 0 (c) 1477 * 1 1 (d) 1478 * 1479 * (a) There is no mismatch in eptgen or ASID generation and therefore 1480 * no further action is needed. 1481 * 1482 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1483 * retained and the TLB entries associated with this ASID 1484 * are flushed by VMRUN. 1485 * 1486 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1487 * allocated. 1488 * 1489 * (c) A new ASID is allocated. 1490 * 1491 * (d) A new ASID is allocated. 1492 */ 1493 1494 alloc_asid = false; 1495 eptgen = pmap->pm_eptgen; 1496 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1497 1498 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1499 alloc_asid = true; /* (c) and (d) */ 1500 } else if (vcpustate->eptgen != eptgen) { 1501 if (flush_by_asid()) 1502 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1503 else 1504 alloc_asid = true; /* (b2) */ 1505 } else { 1506 /* 1507 * This is the common case (a). 1508 */ 1509 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1510 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1511 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1512 } 1513 1514 if (alloc_asid) { 1515 if (++asid[thiscpu].num >= nasid) { 1516 asid[thiscpu].num = 1; 1517 if (++asid[thiscpu].gen == 0) 1518 asid[thiscpu].gen = 1; 1519 /* 1520 * If this cpu does not support "flush-by-asid" 1521 * then flush the entire TLB on a generation 1522 * bump. Subsequent ASID allocation in this 1523 * generation can be done without a TLB flush. 1524 */ 1525 if (!flush_by_asid()) 1526 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1527 } 1528 vcpustate->asid.gen = asid[thiscpu].gen; 1529 vcpustate->asid.num = asid[thiscpu].num; 1530 1531 ctrl->asid = vcpustate->asid.num; 1532 vcpu_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1533 /* 1534 * If this cpu supports "flush-by-asid" then the TLB 1535 * was not flushed after the generation bump. The TLB 1536 * is flushed selectively after every new ASID allocation. 1537 */ 1538 if (flush_by_asid()) 1539 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1540 } 1541 vcpustate->eptgen = eptgen; 1542 1543 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1544 KASSERT(ctrl->asid == vcpustate->asid.num, 1545 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1546} 1547 1548/* 1549 * Start vcpu with specified RIP. 1550 */ 1551static int 1552svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1553 void *rend_cookie, void *suspended_cookie) 1554{ 1555 struct svm_regctx *hctx, *gctx; 1556 struct svm_softc *svm_sc; 1557 struct svm_vcpu *vcpustate; 1558 struct vmcb_state *state; 1559 struct vmcb_ctrl *ctrl; 1560 struct vm_exit *vmexit; 1561 struct vlapic *vlapic; 1562 struct vm *vm; 1563 uint64_t vmcb_pa; 1564 u_int thiscpu; 1565 bool loop; /* Continue vcpu execution loop. */ 1566 1567 loop = true; 1568 svm_sc = arg; 1569 vm = svm_sc->vm; 1570 1571 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1572 state = svm_get_vmcb_state(svm_sc, vcpu); 1573 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1574 vmexit = vm_exitinfo(vm, vcpu); 1575 vlapic = vm_lapic(vm, vcpu); 1576 1577 /* 1578 * Stash 'curcpu' on the stack as 'thiscpu'. 1579 * 1580 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1581 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1582 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1583 */ 1584 thiscpu = curcpu; 1585 1586 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1587 hctx = &host_ctx[thiscpu]; 1588 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1589 1590 if (vcpustate->lastcpu != thiscpu) { 1591 /* 1592 * Force new ASID allocation by invalidating the generation. 1593 */ 1594 vcpustate->asid.gen = 0; 1595 1596 /* 1597 * Invalidate the VMCB state cache by marking all fields dirty. 1598 */ 1599 vcpu_set_dirty(svm_sc, vcpu, 0xffffffff); 1600 1601 /* 1602 * XXX 1603 * Setting 'vcpustate->lastcpu' here is bit premature because 1604 * we may return from this function without actually executing 1605 * the VMRUN instruction. This could happen if a rendezvous 1606 * or an AST is pending on the first time through the loop. 1607 * 1608 * This works for now but any new side-effects of vcpu 1609 * migration should take this case into account. 1610 */ 1611 vcpustate->lastcpu = thiscpu; 1612 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1613 } 1614 1615 /* Update Guest RIP */ 1616 state->rip = rip; 1617 1618 do { 1619 vmexit->inst_length = 0; 1620 1621 /* 1622 * Disable global interrupts to guarantee atomicity during 1623 * loading of guest state. This includes not only the state 1624 * loaded by the "vmrun" instruction but also software state 1625 * maintained by the hypervisor: suspended and rendezvous 1626 * state, NPT generation number, vlapic interrupts etc. 1627 */ 1628 disable_gintr(); 1629 1630 if (vcpu_suspended(suspended_cookie)) { 1631 enable_gintr(); 1632 vm_exit_suspended(vm, vcpu, state->rip); 1633 break; 1634 } 1635 1636 if (vcpu_rendezvous_pending(rend_cookie)) { 1637 enable_gintr(); 1638 vm_exit_rendezvous(vm, vcpu, state->rip); 1639 break; 1640 } 1641 1642 /* We are asked to give the cpu by scheduler. */ 1643 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1644 enable_gintr(); 1645 vm_exit_astpending(vm, vcpu, state->rip); 1646 break; 1647 } 1648 1649 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1650 1651 /* Activate the nested pmap on 'thiscpu' */ 1652 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1653 1654 /* 1655 * Check the pmap generation and the ASID generation to 1656 * ensure that the vcpu does not use stale TLB mappings. 1657 */ 1658 check_asid(svm_sc, vcpu, pmap, thiscpu); 1659 1660 ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty; 1661 vcpustate->dirty = 0; 1662 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1663 1664 /* Launch Virtual Machine. */ 1665 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1666 svm_launch(vmcb_pa, gctx, hctx); 1667 1668 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1669 1670 /* 1671 * Restore MSR_GSBASE to point to the pcpu data area. 1672 * 1673 * Note that accesses done via PCPU_GET/PCPU_SET will work 1674 * only after MSR_GSBASE is restored. 1675 * 1676 * Also note that we don't bother restoring MSR_KGSBASE 1677 * since it is not used in the kernel and will be restored 1678 * when the VMRUN ioctl returns to userspace. 1679 */ 1680 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1681 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1682 thiscpu, curcpu)); 1683 1684 /* 1685 * The host GDTR and IDTR is saved by VMRUN and restored 1686 * automatically on #VMEXIT. However, the host TSS needs 1687 * to be restored explicitly. 1688 */ 1689 restore_host_tss(); 1690 1691 /* #VMEXIT disables interrupts so re-enable them here. */ 1692 enable_gintr(); 1693 1694 /* Handle #VMEXIT and if required return to user space. */ 1695 loop = svm_vmexit(svm_sc, vcpu, vmexit); 1696 } while (loop); 1697 1698 return (0); 1699} 1700 1701/* 1702 * Cleanup for virtual machine. 1703 */ 1704static void 1705svm_vmcleanup(void *arg) 1706{ 1707 struct svm_softc *svm_sc; 1708 1709 svm_sc = arg; 1710 1711 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1712 1713 free(svm_sc, M_SVM); 1714} 1715 1716/* 1717 * Return pointer to hypervisor saved register state. 1718 */ 1719static register_t * 1720swctx_regptr(struct svm_regctx *regctx, int reg) 1721{ 1722 1723 switch (reg) { 1724 case VM_REG_GUEST_RBX: 1725 return (®ctx->sctx_rbx); 1726 case VM_REG_GUEST_RCX: 1727 return (®ctx->sctx_rcx); 1728 case VM_REG_GUEST_RDX: 1729 return (®ctx->e.g.sctx_rdx); 1730 case VM_REG_GUEST_RDI: 1731 return (®ctx->e.g.sctx_rdi); 1732 case VM_REG_GUEST_RSI: 1733 return (®ctx->e.g.sctx_rsi); 1734 case VM_REG_GUEST_RBP: 1735 return (®ctx->sctx_rbp); 1736 case VM_REG_GUEST_R8: 1737 return (®ctx->sctx_r8); 1738 case VM_REG_GUEST_R9: 1739 return (®ctx->sctx_r9); 1740 case VM_REG_GUEST_R10: 1741 return (®ctx->sctx_r10); 1742 case VM_REG_GUEST_R11: 1743 return (®ctx->sctx_r11); 1744 case VM_REG_GUEST_R12: 1745 return (®ctx->sctx_r12); 1746 case VM_REG_GUEST_R13: 1747 return (®ctx->sctx_r13); 1748 case VM_REG_GUEST_R14: 1749 return (®ctx->sctx_r14); 1750 case VM_REG_GUEST_R15: 1751 return (®ctx->sctx_r15); 1752 default: 1753 ERR("Unknown register requested, reg=%d.\n", reg); 1754 break; 1755 } 1756 1757 return (NULL); 1758} 1759 1760/* 1761 * Interface to read guest registers. 1762 * This can be SVM h/w saved or hypervisor saved register. 1763 */ 1764static int 1765svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1766{ 1767 struct svm_softc *svm_sc; 1768 struct vmcb *vmcb; 1769 register_t *reg; 1770 1771 svm_sc = arg; 1772 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1773 1774 vmcb = svm_get_vmcb(svm_sc, vcpu); 1775 1776 if (vmcb_read(vmcb, ident, val) == 0) { 1777 return (0); 1778 } 1779 1780 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1781 1782 if (reg != NULL) { 1783 *val = *reg; 1784 return (0); 1785 } 1786 1787 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1788 return (EINVAL); 1789} 1790 1791/* 1792 * Interface to write to guest registers. 1793 * This can be SVM h/w saved or hypervisor saved register. 1794 */ 1795static int 1796svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1797{ 1798 struct svm_softc *svm_sc; 1799 struct vmcb *vmcb; 1800 register_t *reg; 1801 1802 svm_sc = arg; 1803 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1804 1805 vmcb = svm_get_vmcb(svm_sc, vcpu); 1806 if (vmcb_write(vmcb, ident, val) == 0) { 1807 return (0); 1808 } 1809 1810 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1811 1812 if (reg != NULL) { 1813 *reg = val; 1814 return (0); 1815 } 1816 1817 /* 1818 * XXX deal with CR3 and invalidate TLB entries tagged with the 1819 * vcpu's ASID. This needs to be treated differently depending on 1820 * whether 'running' is true/false. 1821 */ 1822 1823 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1824 return (EINVAL); 1825} 1826 1827 1828/* 1829 * Inteface to set various descriptors. 1830 */ 1831static int 1832svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1833{ 1834 struct svm_softc *svm_sc; 1835 struct vmcb *vmcb; 1836 struct vmcb_segment *seg; 1837 uint16_t attrib; 1838 1839 svm_sc = arg; 1840 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1841 1842 vmcb = svm_get_vmcb(svm_sc, vcpu); 1843 1844 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 1845 1846 seg = vmcb_seg(vmcb, type); 1847 if (seg == NULL) { 1848 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1849 return (EINVAL); 1850 } 1851 1852 /* Map seg_desc access to VMCB attribute format.*/ 1853 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 1854 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 1855 type, desc->access, desc->limit); 1856 seg->attrib = attrib; 1857 seg->base = desc->base; 1858 seg->limit = desc->limit; 1859 1860 return (0); 1861} 1862 1863/* 1864 * Interface to get guest descriptor. 1865 */ 1866static int 1867svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1868{ 1869 struct svm_softc *svm_sc; 1870 struct vmcb_segment *seg; 1871 1872 svm_sc = arg; 1873 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1874 1875 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 1876 1877 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 1878 if (!seg) { 1879 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1880 return (EINVAL); 1881 } 1882 1883 /* Map seg_desc access to VMCB attribute format.*/ 1884 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 1885 desc->base = seg->base; 1886 desc->limit = seg->limit; 1887 1888 /* 1889 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 1890 * loaded with a NULL segment selector. The 'desc->access' field is 1891 * interpreted in the VT-x format by the processor-independent code. 1892 * 1893 * SVM uses the 'P' bit to convey the same information so convert it 1894 * into the VT-x format. For more details refer to section 1895 * "Segment State in the VMCB" in APMv2. 1896 */ 1897 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 1898 desc->access |= 0x80; /* CS and TS always present */ 1899 1900 if (!(desc->access & 0x80)) 1901 desc->access |= 0x10000; /* Unusable segment */ 1902 1903 return (0); 1904} 1905 1906static int 1907svm_setcap(void *arg, int vcpu, int type, int val) 1908{ 1909 struct svm_softc *sc; 1910 int error; 1911 1912 sc = arg; 1913 error = 0; 1914 switch (type) { 1915 case VM_CAP_HALT_EXIT: 1916 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1917 VMCB_INTCPT_HLT, val); 1918 break; 1919 case VM_CAP_PAUSE_EXIT: 1920 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1921 VMCB_INTCPT_PAUSE, val); 1922 break; 1923 case VM_CAP_UNRESTRICTED_GUEST: 1924 /* Unrestricted guest execution cannot be disabled in SVM */ 1925 if (val == 0) 1926 error = EINVAL; 1927 break; 1928 default: 1929 error = ENOENT; 1930 break; 1931 } 1932 return (error); 1933} 1934 1935static int 1936svm_getcap(void *arg, int vcpu, int type, int *retval) 1937{ 1938 struct svm_softc *sc; 1939 int error; 1940 1941 sc = arg; 1942 error = 0; 1943 1944 switch (type) { 1945 case VM_CAP_HALT_EXIT: 1946 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1947 VMCB_INTCPT_HLT); 1948 break; 1949 case VM_CAP_PAUSE_EXIT: 1950 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1951 VMCB_INTCPT_PAUSE); 1952 break; 1953 case VM_CAP_UNRESTRICTED_GUEST: 1954 *retval = 1; /* unrestricted guest is always enabled */ 1955 break; 1956 default: 1957 error = ENOENT; 1958 break; 1959 } 1960 return (error); 1961} 1962 1963static struct vlapic * 1964svm_vlapic_init(void *arg, int vcpuid) 1965{ 1966 struct svm_softc *svm_sc; 1967 struct vlapic *vlapic; 1968 1969 svm_sc = arg; 1970 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 1971 vlapic->vm = svm_sc->vm; 1972 vlapic->vcpuid = vcpuid; 1973 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 1974 1975 vlapic_init(vlapic); 1976 1977 return (vlapic); 1978} 1979 1980static void 1981svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 1982{ 1983 1984 vlapic_cleanup(vlapic); 1985 free(vlapic, M_SVM_VLAPIC); 1986} 1987 1988struct vmm_ops vmm_ops_amd = { 1989 svm_init, 1990 svm_cleanup, 1991 svm_restore, 1992 svm_vminit, 1993 svm_vmrun, 1994 svm_vmcleanup, 1995 svm_getreg, 1996 svm_setreg, 1997 svm_getdesc, 1998 svm_setdesc, 1999 svm_getcap, 2000 svm_setcap, 2001 svm_npt_alloc, 2002 svm_npt_free, 2003 svm_vlapic_init, 2004 svm_vlapic_cleanup 2005}; 2006