svm.c revision 271343
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271343 2014-09-10 01:37:32Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37 38#include <vm/vm.h> 39#include <vm/pmap.h> 40 41#include <machine/cpufunc.h> 42#include <machine/psl.h> 43#include <machine/pmap.h> 44#include <machine/md_var.h> 45#include <machine/vmparam.h> 46#include <machine/specialreg.h> 47#include <machine/segments.h> 48#include <machine/vmm.h> 49#include <machine/vmm_dev.h> 50#include <machine/vmm_instruction_emul.h> 51 52#include <x86/apicreg.h> 53 54#include "vmm_lapic.h" 55#include "vmm_msr.h" 56#include "vmm_stat.h" 57#include "vmm_ktr.h" 58#include "vmm_ioport.h" 59#include "vatpic.h" 60#include "vlapic.h" 61#include "vlapic_priv.h" 62 63#include "x86.h" 64#include "vmcb.h" 65#include "svm.h" 66#include "svm_softc.h" 67#include "npt.h" 68 69/* 70 * SVM CPUID function 0x8000_000A, edx bit decoding. 71 */ 72#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 73#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 74#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 75#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 76#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 77#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 78#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 79#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 80#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 81#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 82 83#define VMCB_CACHE_DEFAULT \ 84 (VMCB_CACHE_ASID | VMCB_CACHE_IOPM | VMCB_CACHE_NP) 85 86MALLOC_DEFINE(M_SVM, "svm", "svm"); 87MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 88 89/* Per-CPU context area. */ 90extern struct pcpu __pcpu[]; 91 92static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 93 94static uint32_t svm_feature; /* AMD SVM features. */ 95 96/* Maximum ASIDs supported by the processor */ 97static uint32_t nasid; 98 99/* Current ASID generation for each host cpu */ 100static struct asid asid[MAXCPU]; 101 102/* 103 * SVM host state saved area of size 4KB for each core. 104 */ 105static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 106 107/* 108 * S/w saved host context. 109 */ 110static struct svm_regctx host_ctx[MAXCPU]; 111 112static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid VMCB EXITINTINFO"); 113static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "VMM pending exception injected"); 114 115/* 116 * Common function to enable or disabled SVM for a CPU. 117 */ 118static int 119cpu_svm_enable_disable(boolean_t enable) 120{ 121 uint64_t efer_msr; 122 123 efer_msr = rdmsr(MSR_EFER); 124 125 if (enable) 126 efer_msr |= EFER_SVM; 127 else 128 efer_msr &= ~EFER_SVM; 129 130 wrmsr(MSR_EFER, efer_msr); 131 132 return(0); 133} 134 135/* 136 * Disable SVM on a CPU. 137 */ 138static void 139svm_disable(void *arg __unused) 140{ 141 142 (void)cpu_svm_enable_disable(FALSE); 143} 144 145/* 146 * Disable SVM for all CPUs. 147 */ 148static int 149svm_cleanup(void) 150{ 151 152 smp_rendezvous(NULL, svm_disable, NULL, NULL); 153 return (0); 154} 155 156/* 157 * Check for required BHyVe SVM features in a CPU. 158 */ 159static int 160svm_cpuid_features(void) 161{ 162 u_int regs[4]; 163 164 /* CPUID Fn8000_000A is for SVM */ 165 do_cpuid(0x8000000A, regs); 166 svm_feature = regs[3]; 167 168 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 169 nasid = regs[1]; 170 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 171 172 printf("SVM Features:0x%b\n", svm_feature, 173 "\020" 174 "\001NP" /* Nested paging */ 175 "\002LbrVirt" /* LBR virtualization */ 176 "\003SVML" /* SVM lock */ 177 "\004NRIPS" /* NRIP save */ 178 "\005TscRateMsr" /* MSR based TSC rate control */ 179 "\006VmcbClean" /* VMCB clean bits */ 180 "\007FlushByAsid" /* Flush by ASID */ 181 "\010DecodeAssist" /* Decode assist */ 182 "\011<b20>" 183 "\012<b20>" 184 "\013PauseFilter" 185 "\014<b20>" 186 "\015PauseFilterThreshold" 187 "\016AVIC" 188 ); 189 190 /* SVM Lock */ 191 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 192 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 193 return (ENXIO); 194 } 195 196 /* 197 * bhyve need RVI to work. 198 */ 199 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 200 printf("Missing Nested paging or RVI SVM support in processor.\n"); 201 return (EIO); 202 } 203 204 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 205 return (0); 206 207 return (EIO); 208} 209 210static __inline int 211flush_by_asid(void) 212{ 213 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 214} 215 216/* 217 * Enable SVM for a CPU. 218 */ 219static void 220svm_enable(void *arg __unused) 221{ 222 uint64_t hsave_pa; 223 224 (void)cpu_svm_enable_disable(TRUE); 225 226 hsave_pa = vtophys(hsave[curcpu]); 227 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 228 229 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 230 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 231 } 232} 233 234/* 235 * Check if a processor support SVM. 236 */ 237static int 238is_svm_enabled(void) 239{ 240 uint64_t msr; 241 242 /* Section 15.4 Enabling SVM from APM2. */ 243 if ((amd_feature2 & AMDID2_SVM) == 0) { 244 printf("SVM is not supported on this processor.\n"); 245 return (ENXIO); 246 } 247 248 msr = rdmsr(MSR_VM_CR); 249 /* Make sure SVM is not disabled by BIOS. */ 250 if ((msr & VM_CR_SVMDIS) == 0) { 251 return svm_cpuid_features(); 252 } 253 254 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 255 return (ENXIO); 256} 257 258/* 259 * Enable SVM on CPU and initialize nested page table h/w. 260 */ 261static int 262svm_init(int ipinum) 263{ 264 int err, cpu; 265 266 err = is_svm_enabled(); 267 if (err) 268 return (err); 269 270 for (cpu = 0; cpu < MAXCPU; cpu++) { 271 /* 272 * Initialize the host ASIDs to their "highest" valid values. 273 * 274 * The next ASID allocation will rollover both 'gen' and 'num' 275 * and start off the sequence at {1,1}. 276 */ 277 asid[cpu].gen = ~0UL; 278 asid[cpu].num = nasid - 1; 279 } 280 281 svm_npt_init(ipinum); 282 283 /* Start SVM on all CPUs */ 284 smp_rendezvous(NULL, svm_enable, NULL, NULL); 285 286 return (0); 287} 288 289static void 290svm_restore(void) 291{ 292 svm_enable(NULL); 293} 294 295/* 296 * Get index and bit position for a MSR in MSR permission 297 * bitmap. Two bits are used for each MSR, lower bit is 298 * for read and higher bit is for write. 299 */ 300static int 301svm_msr_index(uint64_t msr, int *index, int *bit) 302{ 303 uint32_t base, off; 304 305/* Pentium compatible MSRs */ 306#define MSR_PENTIUM_START 0 307#define MSR_PENTIUM_END 0x1FFF 308/* AMD 6th generation and Intel compatible MSRs */ 309#define MSR_AMD6TH_START 0xC0000000UL 310#define MSR_AMD6TH_END 0xC0001FFFUL 311/* AMD 7th and 8th generation compatible MSRs */ 312#define MSR_AMD7TH_START 0xC0010000UL 313#define MSR_AMD7TH_END 0xC0011FFFUL 314 315 *index = -1; 316 *bit = (msr % 4) * 2; 317 base = 0; 318 319 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 320 *index = msr / 4; 321 return (0); 322 } 323 324 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 325 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 326 off = (msr - MSR_AMD6TH_START); 327 *index = (off + base) / 4; 328 return (0); 329 } 330 331 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 332 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 333 off = (msr - MSR_AMD7TH_START); 334 *index = (off + base) / 4; 335 return (0); 336 } 337 338 return (EIO); 339} 340 341/* 342 * Give virtual cpu the complete access to MSR(read & write). 343 */ 344static int 345svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 346{ 347 int index, bit, err; 348 349 err = svm_msr_index(msr, &index, &bit); 350 if (err) { 351 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 352 return (err); 353 } 354 355 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 356 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 357 return (EINVAL); 358 } 359 if (bit < 0 || bit > 8) { 360 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 361 return (EINVAL); 362 } 363 364 /* Disable intercept for read and write. */ 365 if (read) 366 perm_bitmap[index] &= ~(1UL << bit); 367 if (write) 368 perm_bitmap[index] &= ~(2UL << bit); 369 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 370 (perm_bitmap[index] >> bit) & 0x3, msr); 371 372 return (0); 373} 374 375static int 376svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 377{ 378 return svm_msr_perm(perm_bitmap, msr, true, true); 379} 380 381static int 382svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 383{ 384 return svm_msr_perm(perm_bitmap, msr, true, false); 385} 386 387static __inline void 388vcpu_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits) 389{ 390 struct svm_vcpu *vcpustate; 391 392 vcpustate = svm_get_vcpu(sc, vcpu); 393 394 vcpustate->dirty |= dirtybits; 395} 396 397/* 398 * Initialise a virtual machine. 399 */ 400static void * 401svm_vminit(struct vm *vm, pmap_t pmap) 402{ 403 struct svm_softc *svm_sc; 404 struct svm_vcpu *vcpu; 405 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 406 int i; 407 408 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 409 M_SVM, M_WAITOK | M_ZERO); 410 411 svm_sc->vm = vm; 412 svm_sc->svm_feature = svm_feature; 413 svm_sc->vcpu_cnt = VM_MAXCPU; 414 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 415 416 /* 417 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 418 */ 419 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 420 421 /* 422 * Following MSR can be completely controlled by virtual machines 423 * since access to following are translated to access to VMCB. 424 */ 425 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 426 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 427 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 428 429 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 430 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 431 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 432 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 433 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 434 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 435 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 436 437 /* For Nested Paging/RVI only. */ 438 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 439 440 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 441 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 442 443 /* Intercept access to all I/O ports. */ 444 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 445 446 /* Cache physical address for multiple vcpus. */ 447 iopm_pa = vtophys(svm_sc->iopm_bitmap); 448 msrpm_pa = vtophys(svm_sc->msr_bitmap); 449 pml4_pa = svm_sc->nptp; 450 451 for (i = 0; i < svm_sc->vcpu_cnt; i++) { 452 vcpu = svm_get_vcpu(svm_sc, i); 453 vcpu->lastcpu = NOCPU; 454 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 455 svm_init_vmcb(&vcpu->vmcb, iopm_pa, msrpm_pa, pml4_pa); 456 } 457 return (svm_sc); 458} 459 460static int 461svm_cpl(struct vmcb_state *state) 462{ 463 464 /* 465 * From APMv2: 466 * "Retrieve the CPL from the CPL field in the VMCB, not 467 * from any segment DPL" 468 */ 469 return (state->cpl); 470} 471 472static enum vm_cpu_mode 473svm_vcpu_mode(struct vmcb *vmcb) 474{ 475 struct vmcb_segment *seg; 476 struct vmcb_state *state; 477 478 state = &vmcb->state; 479 480 if (state->efer & EFER_LMA) { 481 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 482 /* 483 * Section 4.8.1 for APM2, check if Code Segment has 484 * Long attribute set in descriptor. 485 */ 486 if (seg->attrib & VMCB_CS_ATTRIB_L) 487 return (CPU_MODE_64BIT); 488 else 489 return (CPU_MODE_COMPATIBILITY); 490 } else if (state->cr0 & CR0_PE) { 491 return (CPU_MODE_PROTECTED); 492 } else { 493 return (CPU_MODE_REAL); 494 } 495} 496 497static enum vm_paging_mode 498svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 499{ 500 501 if ((cr0 & CR0_PG) == 0) 502 return (PAGING_MODE_FLAT); 503 if ((cr4 & CR4_PAE) == 0) 504 return (PAGING_MODE_32); 505 if (efer & EFER_LME) 506 return (PAGING_MODE_64); 507 else 508 return (PAGING_MODE_PAE); 509} 510 511/* 512 * ins/outs utility routines 513 */ 514static uint64_t 515svm_inout_str_index(struct svm_regctx *regs, int in) 516{ 517 uint64_t val; 518 519 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 520 521 return (val); 522} 523 524static uint64_t 525svm_inout_str_count(struct svm_regctx *regs, int rep) 526{ 527 uint64_t val; 528 529 val = rep ? regs->sctx_rcx : 1; 530 531 return (val); 532} 533 534static void 535svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 536 int in, struct vm_inout_str *vis) 537{ 538 int error, s; 539 540 if (in) { 541 vis->seg_name = VM_REG_GUEST_ES; 542 } else { 543 /* The segment field has standard encoding */ 544 s = (info1 >> 10) & 0x7; 545 vis->seg_name = vm_segment_name(s); 546 } 547 548 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 549 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 550} 551 552static int 553svm_inout_str_addrsize(uint64_t info1) 554{ 555 uint32_t size; 556 557 size = (info1 >> 7) & 0x7; 558 switch (size) { 559 case 1: 560 return (2); /* 16 bit */ 561 case 2: 562 return (4); /* 32 bit */ 563 case 4: 564 return (8); /* 64 bit */ 565 default: 566 panic("%s: invalid size encoding %d", __func__, size); 567 } 568} 569 570static void 571svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 572{ 573 struct vmcb_state *state; 574 575 state = &vmcb->state; 576 paging->cr3 = state->cr3; 577 paging->cpl = svm_cpl(state); 578 paging->cpu_mode = svm_vcpu_mode(vmcb); 579 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 580 state->efer); 581} 582 583 584/* 585 * Handle guest I/O intercept. 586 */ 587static bool 588svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 589{ 590 struct vmcb_ctrl *ctrl; 591 struct vmcb_state *state; 592 struct svm_regctx *regs; 593 struct vm_inout_str *vis; 594 uint64_t info1; 595 596 state = svm_get_vmcb_state(svm_sc, vcpu); 597 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 598 regs = svm_get_guest_regctx(svm_sc, vcpu); 599 info1 = ctrl->exitinfo1; 600 601 vmexit->exitcode = VM_EXITCODE_INOUT; 602 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 603 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0; 604 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 605 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 606 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 607 vmexit->u.inout.eax = (uint32_t)(state->rax); 608 609 if (vmexit->u.inout.string) { 610 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 611 vis = &vmexit->u.inout_str; 612 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 613 vis->rflags = state->rflags; 614 vis->cr0 = state->cr0; 615 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 616 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 617 vis->addrsize = svm_inout_str_addrsize(info1); 618 svm_inout_str_seginfo(svm_sc, vcpu, info1, 619 vmexit->u.inout.in, vis); 620 } 621 622 return (false); 623} 624 625static int 626svm_npf_paging(uint64_t exitinfo1) 627{ 628 629 if (exitinfo1 & VMCB_NPF_INFO1_W) 630 return (VM_PROT_WRITE); 631 632 return (VM_PROT_READ); 633} 634 635static bool 636svm_npf_emul_fault(uint64_t exitinfo1) 637{ 638 639 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 640 return (false); 641 } 642 643 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 644 return (false); 645 } 646 647 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 648 return (false); 649 } 650 651 return (true); 652} 653 654static void 655svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 656{ 657 struct vm_guest_paging *paging; 658 struct vmcb_segment *seg; 659 660 paging = &vmexit->u.inst_emul.paging; 661 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 662 vmexit->u.inst_emul.gpa = gpa; 663 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 664 svm_paging_info(vmcb, paging); 665 666 /* 667 * If DecodeAssist SVM feature doesn't exist, we don't have NPF 668 * instuction length. RIP will be calculated based on the length 669 * determined by instruction emulation. 670 */ 671 vmexit->inst_length = VIE_INST_SIZE; 672 673 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 674 switch(paging->cpu_mode) { 675 case CPU_MODE_PROTECTED: 676 case CPU_MODE_COMPATIBILITY: 677 /* 678 * Section 4.8.1 of APM2, Default Operand Size or D bit. 679 */ 680 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 681 1 : 0; 682 break; 683 default: 684 vmexit->u.inst_emul.cs_d = 0; 685 break; 686 } 687} 688 689/* 690 * Intercept access to MSR_EFER to prevent the guest from clearing the 691 * SVM enable bit. 692 */ 693static void 694svm_write_efer(struct svm_softc *sc, int vcpu, uint32_t edx, uint32_t eax) 695{ 696 struct vmcb_state *state; 697 uint64_t oldval; 698 699 state = svm_get_vmcb_state(sc, vcpu); 700 701 oldval = state->efer; 702 state->efer = (uint64_t)edx << 32 | eax | EFER_SVM; 703 if (state->efer != oldval) { 704 VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx", 705 oldval, state->efer); 706 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR); 707 } 708} 709 710static void 711svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 712{ 713 struct vmcb_ctrl *ctrl; 714 uint64_t intinfo; 715 716 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 717 intinfo = ctrl->exitintinfo; 718 if (!VMCB_EXITINTINFO_VALID(intinfo)) 719 return; 720 721 /* 722 * From APMv2, Section "Intercepts during IDT interrupt delivery" 723 * 724 * If a #VMEXIT happened during event delivery then record the event 725 * that was being delivered. 726 */ 727 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 728 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 729 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 730 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 731} 732 733#ifdef KTR 734static const char * 735exit_reason_to_str(uint64_t reason) 736{ 737 static char reasonbuf[32]; 738 739 switch (reason) { 740 case VMCB_EXIT_INVALID: 741 return ("invalvmcb"); 742 case VMCB_EXIT_SHUTDOWN: 743 return ("shutdown"); 744 case VMCB_EXIT_NPF: 745 return ("nptfault"); 746 case VMCB_EXIT_PAUSE: 747 return ("pause"); 748 case VMCB_EXIT_HLT: 749 return ("hlt"); 750 case VMCB_EXIT_CPUID: 751 return ("cpuid"); 752 case VMCB_EXIT_IO: 753 return ("inout"); 754 case VMCB_EXIT_MC: 755 return ("mchk"); 756 case VMCB_EXIT_INTR: 757 return ("extintr"); 758 case VMCB_EXIT_VINTR: 759 return ("vintr"); 760 case VMCB_EXIT_MSR: 761 return ("msr"); 762 default: 763 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 764 return (reasonbuf); 765 } 766} 767#endif /* KTR */ 768 769/* 770 * Determine the cause of virtual cpu exit and handle VMEXIT. 771 * Return: false - Break vcpu execution loop and handle vmexit 772 * in kernel or user space. 773 * true - Continue vcpu run. 774 */ 775static bool 776svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 777{ 778 struct vmcb_state *state; 779 struct vmcb_ctrl *ctrl; 780 struct svm_regctx *ctx; 781 uint64_t code, info1, info2, val; 782 uint32_t eax, ecx, edx; 783 bool update_rip, loop, retu; 784 785 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 786 787 state = svm_get_vmcb_state(svm_sc, vcpu); 788 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 789 ctx = svm_get_guest_regctx(svm_sc, vcpu); 790 code = ctrl->exitcode; 791 info1 = ctrl->exitinfo1; 792 info2 = ctrl->exitinfo2; 793 794 update_rip = true; 795 loop = true; 796 vmexit->exitcode = VM_EXITCODE_VMX; 797 vmexit->u.vmx.status = 0; 798 799 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 800 801 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 802 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 803 804 svm_save_intinfo(svm_sc, vcpu); 805 806 switch (code) { 807 case VMCB_EXIT_MC: /* Machine Check. */ 808 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1); 809 vmexit->exitcode = VM_EXITCODE_MTRAP; 810 loop = false; 811 break; 812 813 case VMCB_EXIT_MSR: /* MSR access. */ 814 eax = state->rax; 815 ecx = ctx->sctx_rcx; 816 edx = ctx->e.g.sctx_rdx; 817 818 if (ecx == MSR_EFER) { 819 KASSERT(info1 != 0, ("rdmsr(MSR_EFER) is not " 820 "emulated: info1(%#lx) info2(%#lx)", 821 info1, info2)); 822 svm_write_efer(svm_sc, vcpu, edx, eax); 823 break; 824 } 825 826 retu = false; 827 if (info1) { 828 /* VM exited because of write MSR */ 829 vmm_stat_incr(svm_sc->vm, vcpu, 830 VMEXIT_WRMSR, 1); 831 vmexit->exitcode = VM_EXITCODE_WRMSR; 832 vmexit->u.msr.code = ecx; 833 val = (uint64_t)edx << 32 | eax; 834 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, 835 &retu)) { 836 vmexit->u.msr.wval = val; 837 loop = false; 838 } else 839 loop = retu ? false : true; 840 841 VCPU_CTR3(svm_sc->vm, vcpu, 842 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x", 843 loop ? "kernel" : "user", val, ecx); 844 } else { 845 vmm_stat_incr(svm_sc->vm, vcpu, 846 VMEXIT_RDMSR, 1); 847 vmexit->exitcode = VM_EXITCODE_RDMSR; 848 vmexit->u.msr.code = ecx; 849 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, 850 &retu)) { 851 loop = false; 852 } else 853 loop = retu ? false : true; 854 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR" 855 " MSB=0x%08x, LSB=%08x @0x%x", 856 ctx->e.g.sctx_rdx, state->rax, ecx); 857 } 858 859#define MSR_AMDK8_IPM 0xc0010055 860 /* 861 * We can't hide AMD C1E idle capability since its 862 * based on CPU generation, for now ignore access to 863 * this MSR by vcpus 864 * XXX: special handling of AMD C1E - Ignore. 865 */ 866 if (ecx == MSR_AMDK8_IPM) 867 loop = true; 868 break; 869 870 case VMCB_EXIT_INTR: 871 /* 872 * Exit on External Interrupt. 873 * Give host interrupt handler to run and if its guest 874 * interrupt, local APIC will inject event in guest. 875 */ 876 update_rip = false; 877 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 878 break; 879 880 case VMCB_EXIT_IO: 881 loop = svm_handle_io(svm_sc, vcpu, vmexit); 882 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 883 break; 884 885 case VMCB_EXIT_CPUID: 886 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 887 (void)x86_emulate_cpuid(svm_sc->vm, vcpu, 888 (uint32_t *)&state->rax, 889 (uint32_t *)&ctx->sctx_rbx, 890 (uint32_t *)&ctx->sctx_rcx, 891 (uint32_t *)&ctx->e.g.sctx_rdx); 892 break; 893 894 case VMCB_EXIT_HLT: 895 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 896 vmexit->exitcode = VM_EXITCODE_HLT; 897 vmexit->u.hlt.rflags = state->rflags; 898 loop = false; 899 break; 900 901 case VMCB_EXIT_PAUSE: 902 vmexit->exitcode = VM_EXITCODE_PAUSE; 903 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 904 905 break; 906 907 case VMCB_EXIT_NPF: 908 loop = false; 909 update_rip = false; 910 911 if (info1 & VMCB_NPF_INFO1_RSV) { 912 VCPU_CTR2(svm_sc->vm, vcpu, "SVM_ERR:NPT" 913 " reserved bit is set," 914 "INFO1:0x%lx INFO2:0x%lx .\n", 915 info1, info2); 916 break; 917 } 918 919 /* EXITINFO2 has the physical fault address (GPA). */ 920 if(vm_mem_allocated(svm_sc->vm, info2)) { 921 vmexit->exitcode = VM_EXITCODE_PAGING; 922 vmexit->u.paging.gpa = info2; 923 vmexit->u.paging.fault_type = 924 svm_npf_paging(info1); 925 vmm_stat_incr(svm_sc->vm, vcpu, 926 VMEXIT_NESTED_FAULT, 1); 927 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 928 "on gpa %#lx/%#lx at rip %#lx", 929 info2, info1, state->rip); 930 } else if (svm_npf_emul_fault(info1)) { 931 svm_handle_inst_emul(svm_get_vmcb(svm_sc, vcpu), 932 info2, vmexit); 933 vmm_stat_incr(svm_sc->vm, vcpu, 934 VMEXIT_INST_EMUL, 1); 935 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 936 "for gpa %#lx/%#lx at rip %#lx", 937 info2, info1, state->rip); 938 } 939 break; 940 941 case VMCB_EXIT_SHUTDOWN: 942 loop = false; 943 break; 944 945 case VMCB_EXIT_INVALID: 946 loop = false; 947 break; 948 949 default: 950 /* Return to user space. */ 951 loop = false; 952 update_rip = false; 953 VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" 954 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n", 955 ctrl->exitcode, info1, info2); 956 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" 957 " Inst decoder len:%d\n", state->rip, 958 ctrl->nrip, ctrl->inst_decode_size); 959 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 960 break; 961 } 962 963 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx nrip %#lx", 964 loop ? "handled" : "unhandled", exit_reason_to_str(code), 965 state->rip, update_rip ? ctrl->nrip : state->rip); 966 967 vmexit->rip = state->rip; 968 if (update_rip) { 969 if (ctrl->nrip == 0) { 970 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set " 971 "for RIP0x%lx.\n", state->rip); 972 vmexit->exitcode = VM_EXITCODE_VMX; 973 } else 974 vmexit->rip = ctrl->nrip; 975 } 976 977 /* If vcpu execution is continued, update RIP. */ 978 if (loop) { 979 state->rip = vmexit->rip; 980 } 981 982 if (state->rip == 0) { 983 VCPU_CTR0(svm_sc->vm, vcpu, "SVM_ERR:RIP is NULL\n"); 984 vmexit->exitcode = VM_EXITCODE_VMX; 985 } 986 987 return (loop); 988} 989 990/* 991 * Inject NMI to virtual cpu. 992 */ 993static int 994svm_inject_nmi(struct svm_softc *svm_sc, int vcpu) 995{ 996 struct vmcb_ctrl *ctrl; 997 998 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 999 1000 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1001 /* Can't inject another NMI if last one is pending.*/ 1002 if (!vm_nmi_pending(svm_sc->vm, vcpu)) 1003 return (0); 1004 1005 /* Inject NMI, vector number is not used.*/ 1006 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_NMI, IDT_NMI, 0, false); 1007 1008 /* Acknowledge the request is accepted.*/ 1009 vm_nmi_clear(svm_sc->vm, vcpu); 1010 1011 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n"); 1012 1013 return (1); 1014} 1015 1016static void 1017svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1018{ 1019 struct vmcb_ctrl *ctrl; 1020 uint64_t intinfo; 1021 1022 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1023 1024 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1025 return; 1026 1027 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1028 "valid: %#lx", __func__, intinfo)); 1029 1030 vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo), 1031 VMCB_EXITINTINFO_VECTOR(intinfo), 1032 VMCB_EXITINTINFO_EC(intinfo), 1033 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1034 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1035 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1036} 1037 1038/* 1039 * Inject event to virtual cpu. 1040 */ 1041static void 1042svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu, struct vlapic *vlapic) 1043{ 1044 struct vmcb_ctrl *ctrl; 1045 struct vmcb_state *state; 1046 int extint_pending; 1047 int vector; 1048 1049 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1050 1051 state = svm_get_vmcb_state(svm_sc, vcpu); 1052 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1053 1054 svm_inj_intinfo(svm_sc, vcpu); 1055 1056 /* Can't inject multiple events at once. */ 1057 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1058 VCPU_CTR1(svm_sc->vm, vcpu, 1059 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj); 1060 return ; 1061 } 1062 1063 /* Wait for guest to come out of interrupt shadow. */ 1064 if (ctrl->intr_shadow) { 1065 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n"); 1066 return; 1067 } 1068 1069 /* NMI event has priority over interrupts.*/ 1070 if (svm_inject_nmi(svm_sc, vcpu)) { 1071 return; 1072 } 1073 1074 extint_pending = vm_extint_pending(svm_sc->vm, vcpu); 1075 1076 if (!extint_pending) { 1077 /* Ask the local apic for a vector to inject */ 1078 if (!vlapic_pending_intr(vlapic, &vector)) 1079 return; 1080 } else { 1081 /* Ask the legacy pic for a vector to inject */ 1082 vatpic_pending_intr(svm_sc->vm, &vector); 1083 } 1084 1085 if (vector < 32 || vector > 255) { 1086 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:Event injection" 1087 "invalid vector=%d.\n", vector); 1088 ERR("SVM_ERR:Event injection invalid vector=%d.\n", vector); 1089 return; 1090 } 1091 1092 if ((state->rflags & PSL_I) == 0) { 1093 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n"); 1094 return; 1095 } 1096 1097 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1098 1099 if (!extint_pending) { 1100 /* Update the Local APIC ISR */ 1101 vlapic_intr_accepted(vlapic, vector); 1102 } else { 1103 vm_extint_clear(svm_sc->vm, vcpu); 1104 vatpic_intr_accepted(svm_sc->vm, vector); 1105 1106 /* 1107 * XXX need to recheck exting_pending ala VT-x 1108 */ 1109 } 1110 1111 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector); 1112} 1113 1114static __inline void 1115restore_host_tss(void) 1116{ 1117 struct system_segment_descriptor *tss_sd; 1118 1119 /* 1120 * The TSS descriptor was in use prior to launching the guest so it 1121 * has been marked busy. 1122 * 1123 * 'ltr' requires the descriptor to be marked available so change the 1124 * type to "64-bit available TSS". 1125 */ 1126 tss_sd = PCPU_GET(tss); 1127 tss_sd->sd_type = SDT_SYSTSS; 1128 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1129} 1130 1131static void 1132check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1133{ 1134 struct svm_vcpu *vcpustate; 1135 struct vmcb_ctrl *ctrl; 1136 long eptgen; 1137 bool alloc_asid; 1138 1139 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1140 "active on cpu %u", __func__, thiscpu)); 1141 1142 vcpustate = svm_get_vcpu(sc, vcpuid); 1143 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1144 1145 /* 1146 * The TLB entries associated with the vcpu's ASID are not valid 1147 * if either of the following conditions is true: 1148 * 1149 * 1. The vcpu's ASID generation is different than the host cpu's 1150 * ASID generation. This happens when the vcpu migrates to a new 1151 * host cpu. It can also happen when the number of vcpus executing 1152 * on a host cpu is greater than the number of ASIDs available. 1153 * 1154 * 2. The pmap generation number is different than the value cached in 1155 * the 'vcpustate'. This happens when the host invalidates pages 1156 * belonging to the guest. 1157 * 1158 * asidgen eptgen Action 1159 * mismatch mismatch 1160 * 0 0 (a) 1161 * 0 1 (b1) or (b2) 1162 * 1 0 (c) 1163 * 1 1 (d) 1164 * 1165 * (a) There is no mismatch in eptgen or ASID generation and therefore 1166 * no further action is needed. 1167 * 1168 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1169 * retained and the TLB entries associated with this ASID 1170 * are flushed by VMRUN. 1171 * 1172 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1173 * allocated. 1174 * 1175 * (c) A new ASID is allocated. 1176 * 1177 * (d) A new ASID is allocated. 1178 */ 1179 1180 alloc_asid = false; 1181 eptgen = pmap->pm_eptgen; 1182 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1183 1184 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1185 alloc_asid = true; /* (c) and (d) */ 1186 } else if (vcpustate->eptgen != eptgen) { 1187 if (flush_by_asid()) 1188 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1189 else 1190 alloc_asid = true; /* (b2) */ 1191 } else { 1192 /* 1193 * This is the common case (a). 1194 */ 1195 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1196 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1197 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1198 } 1199 1200 if (alloc_asid) { 1201 if (++asid[thiscpu].num >= nasid) { 1202 asid[thiscpu].num = 1; 1203 if (++asid[thiscpu].gen == 0) 1204 asid[thiscpu].gen = 1; 1205 /* 1206 * If this cpu does not support "flush-by-asid" 1207 * then flush the entire TLB on a generation 1208 * bump. Subsequent ASID allocation in this 1209 * generation can be done without a TLB flush. 1210 */ 1211 if (!flush_by_asid()) 1212 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1213 } 1214 vcpustate->asid.gen = asid[thiscpu].gen; 1215 vcpustate->asid.num = asid[thiscpu].num; 1216 1217 ctrl->asid = vcpustate->asid.num; 1218 vcpu_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1219 /* 1220 * If this cpu supports "flush-by-asid" then the TLB 1221 * was not flushed after the generation bump. The TLB 1222 * is flushed selectively after every new ASID allocation. 1223 */ 1224 if (flush_by_asid()) 1225 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1226 } 1227 vcpustate->eptgen = eptgen; 1228 1229 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1230 KASSERT(ctrl->asid == vcpustate->asid.num, 1231 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1232} 1233 1234/* 1235 * Start vcpu with specified RIP. 1236 */ 1237static int 1238svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1239 void *rend_cookie, void *suspended_cookie) 1240{ 1241 struct svm_regctx *hctx, *gctx; 1242 struct svm_softc *svm_sc; 1243 struct svm_vcpu *vcpustate; 1244 struct vmcb_state *state; 1245 struct vmcb_ctrl *ctrl; 1246 struct vm_exit *vmexit; 1247 struct vlapic *vlapic; 1248 struct vm *vm; 1249 uint64_t vmcb_pa; 1250 u_int thiscpu; 1251 bool loop; /* Continue vcpu execution loop. */ 1252 1253 loop = true; 1254 svm_sc = arg; 1255 vm = svm_sc->vm; 1256 1257 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1258 state = svm_get_vmcb_state(svm_sc, vcpu); 1259 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1260 vmexit = vm_exitinfo(vm, vcpu); 1261 vlapic = vm_lapic(vm, vcpu); 1262 1263 /* 1264 * Stash 'curcpu' on the stack as 'thiscpu'. 1265 * 1266 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1267 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1268 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1269 */ 1270 thiscpu = curcpu; 1271 1272 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1273 hctx = &host_ctx[thiscpu]; 1274 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1275 1276 if (vcpustate->lastcpu != thiscpu) { 1277 /* 1278 * Force new ASID allocation by invalidating the generation. 1279 */ 1280 vcpustate->asid.gen = 0; 1281 1282 /* 1283 * Invalidate the VMCB state cache by marking all fields dirty. 1284 */ 1285 vcpu_set_dirty(svm_sc, vcpu, 0xffffffff); 1286 1287 /* 1288 * XXX 1289 * Setting 'vcpustate->lastcpu' here is bit premature because 1290 * we may return from this function without actually executing 1291 * the VMRUN instruction. This could happen if a rendezvous 1292 * or an AST is pending on the first time through the loop. 1293 * 1294 * This works for now but any new side-effects of vcpu 1295 * migration should take this case into account. 1296 */ 1297 vcpustate->lastcpu = thiscpu; 1298 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1299 } 1300 1301 /* Update Guest RIP */ 1302 state->rip = rip; 1303 1304 do { 1305 vmexit->inst_length = 0; 1306 1307 /* 1308 * Disable global interrupts to guarantee atomicity during 1309 * loading of guest state. This includes not only the state 1310 * loaded by the "vmrun" instruction but also software state 1311 * maintained by the hypervisor: suspended and rendezvous 1312 * state, NPT generation number, vlapic interrupts etc. 1313 */ 1314 disable_gintr(); 1315 1316 if (vcpu_suspended(suspended_cookie)) { 1317 enable_gintr(); 1318 vm_exit_suspended(vm, vcpu, state->rip); 1319 break; 1320 } 1321 1322 if (vcpu_rendezvous_pending(rend_cookie)) { 1323 enable_gintr(); 1324 vm_exit_rendezvous(vm, vcpu, state->rip); 1325 break; 1326 } 1327 1328 /* We are asked to give the cpu by scheduler. */ 1329 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1330 enable_gintr(); 1331 vm_exit_astpending(vm, vcpu, state->rip); 1332 break; 1333 } 1334 1335 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1336 1337 /* Activate the nested pmap on 'thiscpu' */ 1338 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1339 1340 /* 1341 * Check the pmap generation and the ASID generation to 1342 * ensure that the vcpu does not use stale TLB mappings. 1343 */ 1344 check_asid(svm_sc, vcpu, pmap, thiscpu); 1345 1346 ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty; 1347 vcpustate->dirty = 0; 1348 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1349 1350 /* Launch Virtual Machine. */ 1351 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1352 svm_launch(vmcb_pa, gctx, hctx); 1353 1354 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1355 1356 /* 1357 * Restore MSR_GSBASE to point to the pcpu data area. 1358 * 1359 * Note that accesses done via PCPU_GET/PCPU_SET will work 1360 * only after MSR_GSBASE is restored. 1361 * 1362 * Also note that we don't bother restoring MSR_KGSBASE 1363 * since it is not used in the kernel and will be restored 1364 * when the VMRUN ioctl returns to userspace. 1365 */ 1366 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1367 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1368 thiscpu, curcpu)); 1369 1370 /* 1371 * The host GDTR and IDTR is saved by VMRUN and restored 1372 * automatically on #VMEXIT. However, the host TSS needs 1373 * to be restored explicitly. 1374 */ 1375 restore_host_tss(); 1376 1377 /* #VMEXIT disables interrupts so re-enable them here. */ 1378 enable_gintr(); 1379 1380 /* Handle #VMEXIT and if required return to user space. */ 1381 loop = svm_vmexit(svm_sc, vcpu, vmexit); 1382 } while (loop); 1383 1384 return (0); 1385} 1386 1387/* 1388 * Cleanup for virtual machine. 1389 */ 1390static void 1391svm_vmcleanup(void *arg) 1392{ 1393 struct svm_softc *svm_sc; 1394 1395 svm_sc = arg; 1396 1397 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1398 1399 free(svm_sc, M_SVM); 1400} 1401 1402/* 1403 * Return pointer to hypervisor saved register state. 1404 */ 1405static register_t * 1406swctx_regptr(struct svm_regctx *regctx, int reg) 1407{ 1408 1409 switch (reg) { 1410 case VM_REG_GUEST_RBX: 1411 return (®ctx->sctx_rbx); 1412 case VM_REG_GUEST_RCX: 1413 return (®ctx->sctx_rcx); 1414 case VM_REG_GUEST_RDX: 1415 return (®ctx->e.g.sctx_rdx); 1416 case VM_REG_GUEST_RDI: 1417 return (®ctx->e.g.sctx_rdi); 1418 case VM_REG_GUEST_RSI: 1419 return (®ctx->e.g.sctx_rsi); 1420 case VM_REG_GUEST_RBP: 1421 return (®ctx->sctx_rbp); 1422 case VM_REG_GUEST_R8: 1423 return (®ctx->sctx_r8); 1424 case VM_REG_GUEST_R9: 1425 return (®ctx->sctx_r9); 1426 case VM_REG_GUEST_R10: 1427 return (®ctx->sctx_r10); 1428 case VM_REG_GUEST_R11: 1429 return (®ctx->sctx_r11); 1430 case VM_REG_GUEST_R12: 1431 return (®ctx->sctx_r12); 1432 case VM_REG_GUEST_R13: 1433 return (®ctx->sctx_r13); 1434 case VM_REG_GUEST_R14: 1435 return (®ctx->sctx_r14); 1436 case VM_REG_GUEST_R15: 1437 return (®ctx->sctx_r15); 1438 default: 1439 ERR("Unknown register requested, reg=%d.\n", reg); 1440 break; 1441 } 1442 1443 return (NULL); 1444} 1445 1446/* 1447 * Interface to read guest registers. 1448 * This can be SVM h/w saved or hypervisor saved register. 1449 */ 1450static int 1451svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1452{ 1453 struct svm_softc *svm_sc; 1454 struct vmcb *vmcb; 1455 register_t *reg; 1456 1457 svm_sc = arg; 1458 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1459 1460 vmcb = svm_get_vmcb(svm_sc, vcpu); 1461 1462 if (vmcb_read(vmcb, ident, val) == 0) { 1463 return (0); 1464 } 1465 1466 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1467 1468 if (reg != NULL) { 1469 *val = *reg; 1470 return (0); 1471 } 1472 1473 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1474 return (EINVAL); 1475} 1476 1477/* 1478 * Interface to write to guest registers. 1479 * This can be SVM h/w saved or hypervisor saved register. 1480 */ 1481static int 1482svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1483{ 1484 struct svm_softc *svm_sc; 1485 struct vmcb *vmcb; 1486 register_t *reg; 1487 1488 svm_sc = arg; 1489 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1490 1491 vmcb = svm_get_vmcb(svm_sc, vcpu); 1492 if (vmcb_write(vmcb, ident, val) == 0) { 1493 return (0); 1494 } 1495 1496 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1497 1498 if (reg != NULL) { 1499 *reg = val; 1500 return (0); 1501 } 1502 1503 /* 1504 * XXX deal with CR3 and invalidate TLB entries tagged with the 1505 * vcpu's ASID. This needs to be treated differently depending on 1506 * whether 'running' is true/false. 1507 */ 1508 1509 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1510 return (EINVAL); 1511} 1512 1513 1514/* 1515 * Inteface to set various descriptors. 1516 */ 1517static int 1518svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1519{ 1520 struct svm_softc *svm_sc; 1521 struct vmcb *vmcb; 1522 struct vmcb_segment *seg; 1523 uint16_t attrib; 1524 1525 svm_sc = arg; 1526 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1527 1528 vmcb = svm_get_vmcb(svm_sc, vcpu); 1529 1530 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 1531 1532 seg = vmcb_seg(vmcb, type); 1533 if (seg == NULL) { 1534 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1535 return (EINVAL); 1536 } 1537 1538 /* Map seg_desc access to VMCB attribute format.*/ 1539 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 1540 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 1541 type, desc->access, desc->limit); 1542 seg->attrib = attrib; 1543 seg->base = desc->base; 1544 seg->limit = desc->limit; 1545 1546 return (0); 1547} 1548 1549/* 1550 * Interface to get guest descriptor. 1551 */ 1552static int 1553svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1554{ 1555 struct svm_softc *svm_sc; 1556 struct vmcb_segment *seg; 1557 1558 svm_sc = arg; 1559 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1560 1561 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 1562 1563 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 1564 if (!seg) { 1565 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1566 return (EINVAL); 1567 } 1568 1569 /* Map seg_desc access to VMCB attribute format.*/ 1570 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 1571 desc->base = seg->base; 1572 desc->limit = seg->limit; 1573 1574 /* 1575 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 1576 * loaded with a NULL segment selector. The 'desc->access' field is 1577 * interpreted in the VT-x format by the processor-independent code. 1578 * 1579 * SVM uses the 'P' bit to convey the same information so convert it 1580 * into the VT-x format. For more details refer to section 1581 * "Segment State in the VMCB" in APMv2. 1582 */ 1583 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 1584 desc->access |= 0x80; /* CS and TS always present */ 1585 1586 if (!(desc->access & 0x80)) 1587 desc->access |= 0x10000; /* Unusable segment */ 1588 1589 return (0); 1590} 1591 1592static int 1593svm_setcap(void *arg, int vcpu, int type, int val) 1594{ 1595 struct svm_softc *svm_sc; 1596 struct vmcb_ctrl *ctrl; 1597 int ret = ENOENT; 1598 1599 svm_sc = arg; 1600 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1601 1602 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1603 1604 switch (type) { 1605 case VM_CAP_HALT_EXIT: 1606 if (val) 1607 ctrl->ctrl1 |= VMCB_INTCPT_HLT; 1608 else 1609 ctrl->ctrl1 &= ~VMCB_INTCPT_HLT; 1610 ret = 0; 1611 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Halt exit %s.\n", 1612 val ? "enabled": "disabled"); 1613 break; 1614 1615 case VM_CAP_PAUSE_EXIT: 1616 if (val) 1617 ctrl->ctrl1 |= VMCB_INTCPT_PAUSE; 1618 else 1619 ctrl->ctrl1 &= ~VMCB_INTCPT_PAUSE; 1620 ret = 0; 1621 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Pause exit %s.\n", 1622 val ? "enabled": "disabled"); 1623 break; 1624 1625 case VM_CAP_MTRAP_EXIT: 1626 if (val) 1627 ctrl->exception |= BIT(IDT_MC); 1628 else 1629 ctrl->exception &= ~BIT(IDT_MC); 1630 ret = 0; 1631 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:MC exit %s.\n", 1632 val ? "enabled": "disabled"); 1633 break; 1634 1635 case VM_CAP_UNRESTRICTED_GUEST: 1636 /* SVM doesn't need special capability for SMP.*/ 1637 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Set_gap:Unrestricted " 1638 "always enabled.\n"); 1639 ret = 0; 1640 break; 1641 1642 default: 1643 break; 1644 } 1645 1646 return (ret); 1647} 1648 1649static int 1650svm_getcap(void *arg, int vcpu, int type, int *retval) 1651{ 1652 struct svm_softc *svm_sc; 1653 struct vmcb_ctrl *ctrl; 1654 1655 svm_sc = arg; 1656 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1657 1658 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1659 1660 switch (type) { 1661 case VM_CAP_HALT_EXIT: 1662 *retval = (ctrl->ctrl1 & VMCB_INTCPT_HLT) ? 1 : 0; 1663 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Halt exit %s.\n", 1664 *retval ? "enabled": "disabled"); 1665 break; 1666 1667 case VM_CAP_PAUSE_EXIT: 1668 *retval = (ctrl->ctrl1 & VMCB_INTCPT_PAUSE) ? 1 : 0; 1669 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Pause exit %s.\n", 1670 *retval ? "enabled": "disabled"); 1671 break; 1672 1673 case VM_CAP_MTRAP_EXIT: 1674 *retval = (ctrl->exception & BIT(IDT_MC)) ? 1 : 0; 1675 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:MC exit %s.\n", 1676 *retval ? "enabled": "disabled"); 1677 break; 1678 1679 case VM_CAP_UNRESTRICTED_GUEST: 1680 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:get_cap:Unrestricted.\n"); 1681 *retval = 1; 1682 break; 1683 default: 1684 break; 1685 } 1686 1687 return (0); 1688} 1689 1690static struct vlapic * 1691svm_vlapic_init(void *arg, int vcpuid) 1692{ 1693 struct svm_softc *svm_sc; 1694 struct vlapic *vlapic; 1695 1696 svm_sc = arg; 1697 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 1698 vlapic->vm = svm_sc->vm; 1699 vlapic->vcpuid = vcpuid; 1700 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 1701 1702 vlapic_init(vlapic); 1703 1704 return (vlapic); 1705} 1706 1707static void 1708svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 1709{ 1710 1711 vlapic_cleanup(vlapic); 1712 free(vlapic, M_SVM_VLAPIC); 1713} 1714 1715struct vmm_ops vmm_ops_amd = { 1716 svm_init, 1717 svm_cleanup, 1718 svm_restore, 1719 svm_vminit, 1720 svm_vmrun, 1721 svm_vmcleanup, 1722 svm_getreg, 1723 svm_setreg, 1724 svm_getdesc, 1725 svm_setdesc, 1726 svm_getcap, 1727 svm_setcap, 1728 svm_npt_alloc, 1729 svm_npt_free, 1730 svm_vlapic_init, 1731 svm_vlapic_cleanup 1732}; 1733