svm.c revision 271340
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271340 2014-09-09 23:39:43Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37 38#include <vm/vm.h> 39#include <vm/pmap.h> 40 41#include <machine/cpufunc.h> 42#include <machine/psl.h> 43#include <machine/pmap.h> 44#include <machine/md_var.h> 45#include <machine/vmparam.h> 46#include <machine/specialreg.h> 47#include <machine/segments.h> 48#include <machine/vmm.h> 49#include <machine/vmm_dev.h> 50#include <machine/vmm_instruction_emul.h> 51 52#include <x86/apicreg.h> 53 54#include "vmm_lapic.h" 55#include "vmm_msr.h" 56#include "vmm_stat.h" 57#include "vmm_ktr.h" 58#include "vmm_ioport.h" 59#include "vatpic.h" 60#include "vlapic.h" 61#include "vlapic_priv.h" 62 63#include "x86.h" 64#include "vmcb.h" 65#include "svm.h" 66#include "svm_softc.h" 67#include "npt.h" 68 69/* 70 * SVM CPUID function 0x8000_000A, edx bit decoding. 71 */ 72#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 73#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 74#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 75#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 76#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 77#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 78#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 79#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 80#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 81#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 82 83#define VMCB_CACHE_DEFAULT \ 84 (VMCB_CACHE_ASID | VMCB_CACHE_IOPM | VMCB_CACHE_NP) 85 86MALLOC_DEFINE(M_SVM, "svm", "svm"); 87MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 88 89/* Per-CPU context area. */ 90extern struct pcpu __pcpu[]; 91 92static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 93 94static uint32_t svm_feature; /* AMD SVM features. */ 95 96/* Maximum ASIDs supported by the processor */ 97static uint32_t nasid; 98 99/* Current ASID generation for each host cpu */ 100static struct asid asid[MAXCPU]; 101 102/* 103 * SVM host state saved area of size 4KB for each core. 104 */ 105static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 106 107/* 108 * S/w saved host context. 109 */ 110static struct svm_regctx host_ctx[MAXCPU]; 111 112static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid VMCB EXITINTINFO"); 113static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "VMM pending exception injected"); 114 115/* 116 * Common function to enable or disabled SVM for a CPU. 117 */ 118static int 119cpu_svm_enable_disable(boolean_t enable) 120{ 121 uint64_t efer_msr; 122 123 efer_msr = rdmsr(MSR_EFER); 124 125 if (enable) 126 efer_msr |= EFER_SVM; 127 else 128 efer_msr &= ~EFER_SVM; 129 130 wrmsr(MSR_EFER, efer_msr); 131 132 return(0); 133} 134 135/* 136 * Disable SVM on a CPU. 137 */ 138static void 139svm_disable(void *arg __unused) 140{ 141 142 (void)cpu_svm_enable_disable(FALSE); 143} 144 145/* 146 * Disable SVM for all CPUs. 147 */ 148static int 149svm_cleanup(void) 150{ 151 152 smp_rendezvous(NULL, svm_disable, NULL, NULL); 153 return (0); 154} 155 156/* 157 * Check for required BHyVe SVM features in a CPU. 158 */ 159static int 160svm_cpuid_features(void) 161{ 162 u_int regs[4]; 163 164 /* CPUID Fn8000_000A is for SVM */ 165 do_cpuid(0x8000000A, regs); 166 svm_feature = regs[3]; 167 168 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 169 nasid = regs[1]; 170 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 171 172 printf("SVM Features:0x%b\n", svm_feature, 173 "\020" 174 "\001NP" /* Nested paging */ 175 "\002LbrVirt" /* LBR virtualization */ 176 "\003SVML" /* SVM lock */ 177 "\004NRIPS" /* NRIP save */ 178 "\005TscRateMsr" /* MSR based TSC rate control */ 179 "\006VmcbClean" /* VMCB clean bits */ 180 "\007FlushByAsid" /* Flush by ASID */ 181 "\010DecodeAssist" /* Decode assist */ 182 "\011<b20>" 183 "\012<b20>" 184 "\013PauseFilter" 185 "\014<b20>" 186 "\015PauseFilterThreshold" 187 "\016AVIC" 188 ); 189 190 /* SVM Lock */ 191 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 192 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 193 return (ENXIO); 194 } 195 196 /* 197 * bhyve need RVI to work. 198 */ 199 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 200 printf("Missing Nested paging or RVI SVM support in processor.\n"); 201 return (EIO); 202 } 203 204 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 205 return (0); 206 207 return (EIO); 208} 209 210static __inline int 211flush_by_asid(void) 212{ 213 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 214} 215 216/* 217 * Enable SVM for a CPU. 218 */ 219static void 220svm_enable(void *arg __unused) 221{ 222 uint64_t hsave_pa; 223 224 (void)cpu_svm_enable_disable(TRUE); 225 226 hsave_pa = vtophys(hsave[curcpu]); 227 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 228 229 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 230 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 231 } 232} 233 234/* 235 * Check if a processor support SVM. 236 */ 237static int 238is_svm_enabled(void) 239{ 240 uint64_t msr; 241 242 /* Section 15.4 Enabling SVM from APM2. */ 243 if ((amd_feature2 & AMDID2_SVM) == 0) { 244 printf("SVM is not supported on this processor.\n"); 245 return (ENXIO); 246 } 247 248 msr = rdmsr(MSR_VM_CR); 249 /* Make sure SVM is not disabled by BIOS. */ 250 if ((msr & VM_CR_SVMDIS) == 0) { 251 return svm_cpuid_features(); 252 } 253 254 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 255 return (ENXIO); 256} 257 258/* 259 * Enable SVM on CPU and initialize nested page table h/w. 260 */ 261static int 262svm_init(int ipinum) 263{ 264 int err, cpu; 265 266 err = is_svm_enabled(); 267 if (err) 268 return (err); 269 270 for (cpu = 0; cpu < MAXCPU; cpu++) { 271 /* 272 * Initialize the host ASIDs to their "highest" valid values. 273 * 274 * The next ASID allocation will rollover both 'gen' and 'num' 275 * and start off the sequence at {1,1}. 276 */ 277 asid[cpu].gen = ~0UL; 278 asid[cpu].num = nasid - 1; 279 } 280 281 svm_npt_init(ipinum); 282 283 /* Start SVM on all CPUs */ 284 smp_rendezvous(NULL, svm_enable, NULL, NULL); 285 286 return (0); 287} 288 289static void 290svm_restore(void) 291{ 292 svm_enable(NULL); 293} 294 295/* 296 * Get index and bit position for a MSR in MSR permission 297 * bitmap. Two bits are used for each MSR, lower bit is 298 * for read and higher bit is for write. 299 */ 300static int 301svm_msr_index(uint64_t msr, int *index, int *bit) 302{ 303 uint32_t base, off; 304 305/* Pentium compatible MSRs */ 306#define MSR_PENTIUM_START 0 307#define MSR_PENTIUM_END 0x1FFF 308/* AMD 6th generation and Intel compatible MSRs */ 309#define MSR_AMD6TH_START 0xC0000000UL 310#define MSR_AMD6TH_END 0xC0001FFFUL 311/* AMD 7th and 8th generation compatible MSRs */ 312#define MSR_AMD7TH_START 0xC0010000UL 313#define MSR_AMD7TH_END 0xC0011FFFUL 314 315 *index = -1; 316 *bit = (msr % 4) * 2; 317 base = 0; 318 319 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 320 *index = msr / 4; 321 return (0); 322 } 323 324 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 325 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 326 off = (msr - MSR_AMD6TH_START); 327 *index = (off + base) / 4; 328 return (0); 329 } 330 331 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 332 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 333 off = (msr - MSR_AMD7TH_START); 334 *index = (off + base) / 4; 335 return (0); 336 } 337 338 return (EIO); 339} 340 341/* 342 * Give virtual cpu the complete access to MSR(read & write). 343 */ 344static int 345svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 346{ 347 int index, bit, err; 348 349 err = svm_msr_index(msr, &index, &bit); 350 if (err) { 351 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 352 return (err); 353 } 354 355 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 356 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 357 return (EINVAL); 358 } 359 if (bit < 0 || bit > 8) { 360 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 361 return (EINVAL); 362 } 363 364 /* Disable intercept for read and write. */ 365 if (read) 366 perm_bitmap[index] &= ~(1UL << bit); 367 if (write) 368 perm_bitmap[index] &= ~(2UL << bit); 369 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 370 (perm_bitmap[index] >> bit) & 0x3, msr); 371 372 return (0); 373} 374 375static int 376svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 377{ 378 return svm_msr_perm(perm_bitmap, msr, true, true); 379} 380 381static int 382svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 383{ 384 return svm_msr_perm(perm_bitmap, msr, true, false); 385} 386 387static __inline void 388vcpu_set_dirty(struct svm_vcpu *vcpustate, uint32_t dirtybits) 389{ 390 vcpustate->dirty |= dirtybits; 391} 392 393/* 394 * Initialise a virtual machine. 395 */ 396static void * 397svm_vminit(struct vm *vm, pmap_t pmap) 398{ 399 struct svm_softc *svm_sc; 400 struct svm_vcpu *vcpu; 401 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 402 int i; 403 404 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 405 M_SVM, M_WAITOK | M_ZERO); 406 407 svm_sc->vm = vm; 408 svm_sc->svm_feature = svm_feature; 409 svm_sc->vcpu_cnt = VM_MAXCPU; 410 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 411 412 /* 413 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 414 */ 415 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 416 417 /* 418 * Following MSR can be completely controlled by virtual machines 419 * since access to following are translated to access to VMCB. 420 */ 421 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 422 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 423 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 424 425 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 426 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 427 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 428 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 429 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 430 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 431 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 432 433 /* For Nested Paging/RVI only. */ 434 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 435 436 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 437 438 /* Intercept access to all I/O ports. */ 439 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 440 441 /* Cache physical address for multiple vcpus. */ 442 iopm_pa = vtophys(svm_sc->iopm_bitmap); 443 msrpm_pa = vtophys(svm_sc->msr_bitmap); 444 pml4_pa = svm_sc->nptp; 445 446 for (i = 0; i < svm_sc->vcpu_cnt; i++) { 447 vcpu = svm_get_vcpu(svm_sc, i); 448 vcpu->lastcpu = NOCPU; 449 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 450 svm_init_vmcb(&vcpu->vmcb, iopm_pa, msrpm_pa, pml4_pa); 451 } 452 return (svm_sc); 453} 454 455static int 456svm_cpl(struct vmcb_state *state) 457{ 458 459 /* 460 * From APMv2: 461 * "Retrieve the CPL from the CPL field in the VMCB, not 462 * from any segment DPL" 463 */ 464 return (state->cpl); 465} 466 467static enum vm_cpu_mode 468svm_vcpu_mode(struct vmcb *vmcb) 469{ 470 struct vmcb_segment *seg; 471 struct vmcb_state *state; 472 473 state = &vmcb->state; 474 475 if (state->efer & EFER_LMA) { 476 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 477 /* 478 * Section 4.8.1 for APM2, check if Code Segment has 479 * Long attribute set in descriptor. 480 */ 481 if (seg->attrib & VMCB_CS_ATTRIB_L) 482 return (CPU_MODE_64BIT); 483 else 484 return (CPU_MODE_COMPATIBILITY); 485 } else if (state->cr0 & CR0_PE) { 486 return (CPU_MODE_PROTECTED); 487 } else { 488 return (CPU_MODE_REAL); 489 } 490} 491 492static enum vm_paging_mode 493svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 494{ 495 496 if ((cr0 & CR0_PG) == 0) 497 return (PAGING_MODE_FLAT); 498 if ((cr4 & CR4_PAE) == 0) 499 return (PAGING_MODE_32); 500 if (efer & EFER_LME) 501 return (PAGING_MODE_64); 502 else 503 return (PAGING_MODE_PAE); 504} 505 506/* 507 * ins/outs utility routines 508 */ 509static uint64_t 510svm_inout_str_index(struct svm_regctx *regs, int in) 511{ 512 uint64_t val; 513 514 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 515 516 return (val); 517} 518 519static uint64_t 520svm_inout_str_count(struct svm_regctx *regs, int rep) 521{ 522 uint64_t val; 523 524 val = rep ? regs->sctx_rcx : 1; 525 526 return (val); 527} 528 529static void 530svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 531 int in, struct vm_inout_str *vis) 532{ 533 int error, s; 534 535 if (in) { 536 vis->seg_name = VM_REG_GUEST_ES; 537 } else { 538 /* The segment field has standard encoding */ 539 s = (info1 >> 10) & 0x7; 540 vis->seg_name = vm_segment_name(s); 541 } 542 543 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 544 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 545} 546 547static int 548svm_inout_str_addrsize(uint64_t info1) 549{ 550 uint32_t size; 551 552 size = (info1 >> 7) & 0x7; 553 switch (size) { 554 case 1: 555 return (2); /* 16 bit */ 556 case 2: 557 return (4); /* 32 bit */ 558 case 4: 559 return (8); /* 64 bit */ 560 default: 561 panic("%s: invalid size encoding %d", __func__, size); 562 } 563} 564 565static void 566svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 567{ 568 struct vmcb_state *state; 569 570 state = &vmcb->state; 571 paging->cr3 = state->cr3; 572 paging->cpl = svm_cpl(state); 573 paging->cpu_mode = svm_vcpu_mode(vmcb); 574 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 575 state->efer); 576} 577 578 579/* 580 * Handle guest I/O intercept. 581 */ 582static bool 583svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 584{ 585 struct vmcb_ctrl *ctrl; 586 struct vmcb_state *state; 587 struct svm_regctx *regs; 588 struct vm_inout_str *vis; 589 uint64_t info1; 590 591 state = svm_get_vmcb_state(svm_sc, vcpu); 592 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 593 regs = svm_get_guest_regctx(svm_sc, vcpu); 594 info1 = ctrl->exitinfo1; 595 596 vmexit->exitcode = VM_EXITCODE_INOUT; 597 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 598 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0; 599 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 600 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 601 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 602 vmexit->u.inout.eax = (uint32_t)(state->rax); 603 604 if (vmexit->u.inout.string) { 605 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 606 vis = &vmexit->u.inout_str; 607 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 608 vis->rflags = state->rflags; 609 vis->cr0 = state->cr0; 610 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 611 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 612 vis->addrsize = svm_inout_str_addrsize(info1); 613 svm_inout_str_seginfo(svm_sc, vcpu, info1, 614 vmexit->u.inout.in, vis); 615 } 616 617 return (false); 618} 619 620static int 621svm_npf_paging(uint64_t exitinfo1) 622{ 623 624 if (exitinfo1 & VMCB_NPF_INFO1_W) 625 return (VM_PROT_WRITE); 626 627 return (VM_PROT_READ); 628} 629 630static bool 631svm_npf_emul_fault(uint64_t exitinfo1) 632{ 633 634 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 635 return (false); 636 } 637 638 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 639 return (false); 640 } 641 642 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 643 return (false); 644 } 645 646 return (true); 647} 648 649static void 650svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 651{ 652 struct vm_guest_paging *paging; 653 struct vmcb_segment *seg; 654 655 paging = &vmexit->u.inst_emul.paging; 656 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 657 vmexit->u.inst_emul.gpa = gpa; 658 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 659 svm_paging_info(vmcb, paging); 660 661 /* 662 * If DecodeAssist SVM feature doesn't exist, we don't have NPF 663 * instuction length. RIP will be calculated based on the length 664 * determined by instruction emulation. 665 */ 666 vmexit->inst_length = VIE_INST_SIZE; 667 668 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 669 switch(paging->cpu_mode) { 670 case CPU_MODE_PROTECTED: 671 case CPU_MODE_COMPATIBILITY: 672 /* 673 * Section 4.8.1 of APM2, Default Operand Size or D bit. 674 */ 675 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 676 1 : 0; 677 break; 678 default: 679 vmexit->u.inst_emul.cs_d = 0; 680 break; 681 } 682} 683 684/* 685 * Special handling of EFER MSR. 686 * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM 687 * enable bit in EFER. 688 */ 689static void 690svm_efer(struct svm_softc *svm_sc, int vcpu, boolean_t write) 691{ 692 struct svm_regctx *swctx; 693 struct vmcb_state *state; 694 695 state = svm_get_vmcb_state(svm_sc, vcpu); 696 swctx = svm_get_guest_regctx(svm_sc, vcpu); 697 698 if (write) { 699 state->efer = ((swctx->e.g.sctx_rdx & (uint32_t)~0) << 32) | 700 ((uint32_t)state->rax) | EFER_SVM; 701 } else { 702 state->rax = (uint32_t)state->efer; 703 swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32); 704 } 705} 706 707static void 708svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 709{ 710 struct vmcb_ctrl *ctrl; 711 uint64_t intinfo; 712 713 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 714 intinfo = ctrl->exitintinfo; 715 if (!VMCB_EXITINTINFO_VALID(intinfo)) 716 return; 717 718 /* 719 * From APMv2, Section "Intercepts during IDT interrupt delivery" 720 * 721 * If a #VMEXIT happened during event delivery then record the event 722 * that was being delivered. 723 */ 724 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 725 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 726 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 727 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 728} 729 730/* 731 * Determine the cause of virtual cpu exit and handle VMEXIT. 732 * Return: false - Break vcpu execution loop and handle vmexit 733 * in kernel or user space. 734 * true - Continue vcpu run. 735 */ 736static bool 737svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 738{ 739 struct vmcb_state *state; 740 struct vmcb_ctrl *ctrl; 741 struct svm_regctx *ctx; 742 uint64_t code, info1, info2, val; 743 uint32_t eax, ecx, edx; 744 bool update_rip, loop, retu; 745 746 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 747 748 state = svm_get_vmcb_state(svm_sc, vcpu); 749 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 750 ctx = svm_get_guest_regctx(svm_sc, vcpu); 751 code = ctrl->exitcode; 752 info1 = ctrl->exitinfo1; 753 info2 = ctrl->exitinfo2; 754 755 update_rip = true; 756 loop = true; 757 vmexit->exitcode = VM_EXITCODE_VMX; 758 vmexit->u.vmx.status = 0; 759 760 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 761 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 762 763 svm_save_intinfo(svm_sc, vcpu); 764 765 switch (code) { 766 case VMCB_EXIT_MC: /* Machine Check. */ 767 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1); 768 vmexit->exitcode = VM_EXITCODE_MTRAP; 769 loop = false; 770 break; 771 772 case VMCB_EXIT_MSR: /* MSR access. */ 773 eax = state->rax; 774 ecx = ctx->sctx_rcx; 775 edx = ctx->e.g.sctx_rdx; 776 777 if (ecx == MSR_EFER) { 778 VCPU_CTR0(svm_sc->vm, vcpu,"VMEXIT EFER\n"); 779 svm_efer(svm_sc, vcpu, info1); 780 break; 781 } 782 783 retu = false; 784 if (info1) { 785 /* VM exited because of write MSR */ 786 vmm_stat_incr(svm_sc->vm, vcpu, 787 VMEXIT_WRMSR, 1); 788 vmexit->exitcode = VM_EXITCODE_WRMSR; 789 vmexit->u.msr.code = ecx; 790 val = (uint64_t)edx << 32 | eax; 791 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, 792 &retu)) { 793 vmexit->u.msr.wval = val; 794 loop = false; 795 } else 796 loop = retu ? false : true; 797 798 VCPU_CTR3(svm_sc->vm, vcpu, 799 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x", 800 loop ? "kernel" : "user", val, ecx); 801 } else { 802 vmm_stat_incr(svm_sc->vm, vcpu, 803 VMEXIT_RDMSR, 1); 804 vmexit->exitcode = VM_EXITCODE_RDMSR; 805 vmexit->u.msr.code = ecx; 806 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, 807 &retu)) { 808 loop = false; 809 } else 810 loop = retu ? false : true; 811 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR" 812 " MSB=0x%08x, LSB=%08x @0x%x", 813 ctx->e.g.sctx_rdx, state->rax, ecx); 814 } 815 816#define MSR_AMDK8_IPM 0xc0010055 817 /* 818 * We can't hide AMD C1E idle capability since its 819 * based on CPU generation, for now ignore access to 820 * this MSR by vcpus 821 * XXX: special handling of AMD C1E - Ignore. 822 */ 823 if (ecx == MSR_AMDK8_IPM) 824 loop = true; 825 break; 826 827 case VMCB_EXIT_INTR: 828 /* 829 * Exit on External Interrupt. 830 * Give host interrupt handler to run and if its guest 831 * interrupt, local APIC will inject event in guest. 832 */ 833 update_rip = false; 834 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt" 835 " RIP:0x%lx.\n", state->rip); 836 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 837 break; 838 839 case VMCB_EXIT_IO: 840 loop = svm_handle_io(svm_sc, vcpu, vmexit); 841 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 842 break; 843 844 case VMCB_EXIT_CPUID: 845 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 846 (void)x86_emulate_cpuid(svm_sc->vm, vcpu, 847 (uint32_t *)&state->rax, 848 (uint32_t *)&ctx->sctx_rbx, 849 (uint32_t *)&ctx->sctx_rcx, 850 (uint32_t *)&ctx->e.g.sctx_rdx); 851 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT CPUID\n"); 852 break; 853 854 case VMCB_EXIT_HLT: 855 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 856 if (ctrl->v_irq) { 857 /* Interrupt is pending, can't halt guest. */ 858 vmm_stat_incr(svm_sc->vm, vcpu, 859 VMEXIT_HLT_IGNORED, 1); 860 VCPU_CTR0(svm_sc->vm, vcpu, 861 "VMEXIT halt ignored."); 862 } else { 863 VCPU_CTR0(svm_sc->vm, vcpu, 864 "VMEXIT halted CPU."); 865 vmexit->exitcode = VM_EXITCODE_HLT; 866 vmexit->u.hlt.rflags = state->rflags; 867 loop = false; 868 869 } 870 break; 871 872 case VMCB_EXIT_PAUSE: 873 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT pause"); 874 vmexit->exitcode = VM_EXITCODE_PAUSE; 875 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 876 877 break; 878 879 case VMCB_EXIT_NPF: 880 loop = false; 881 update_rip = false; 882 883 if (info1 & VMCB_NPF_INFO1_RSV) { 884 VCPU_CTR2(svm_sc->vm, vcpu, "SVM_ERR:NPT" 885 " reserved bit is set," 886 "INFO1:0x%lx INFO2:0x%lx .\n", 887 info1, info2); 888 break; 889 } 890 891 /* EXITINFO2 has the physical fault address (GPA). */ 892 if(vm_mem_allocated(svm_sc->vm, info2)) { 893 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF-paging," 894 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", 895 state->rip, info1, info2); 896 vmexit->exitcode = VM_EXITCODE_PAGING; 897 vmexit->u.paging.gpa = info2; 898 vmexit->u.paging.fault_type = 899 svm_npf_paging(info1); 900 vmm_stat_incr(svm_sc->vm, vcpu, 901 VMEXIT_NESTED_FAULT, 1); 902 } else if (svm_npf_emul_fault(info1)) { 903 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF inst_emul," 904 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", 905 state->rip, info1, info2); 906 svm_handle_inst_emul(svm_get_vmcb(svm_sc, vcpu), 907 info2, vmexit); 908 vmm_stat_incr(svm_sc->vm, vcpu, 909 VMEXIT_INST_EMUL, 1); 910 } 911 912 break; 913 914 case VMCB_EXIT_SHUTDOWN: 915 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT shutdown."); 916 loop = false; 917 break; 918 919 case VMCB_EXIT_INVALID: 920 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT INVALID."); 921 loop = false; 922 break; 923 924 default: 925 /* Return to user space. */ 926 loop = false; 927 update_rip = false; 928 VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" 929 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n", 930 ctrl->exitcode, info1, info2); 931 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" 932 " Inst decoder len:%d\n", state->rip, 933 ctrl->nrip, ctrl->inst_decode_size); 934 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 935 break; 936 } 937 938 vmexit->rip = state->rip; 939 if (update_rip) { 940 if (ctrl->nrip == 0) { 941 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set " 942 "for RIP0x%lx.\n", state->rip); 943 vmexit->exitcode = VM_EXITCODE_VMX; 944 } else 945 vmexit->rip = ctrl->nrip; 946 } 947 948 /* If vcpu execution is continued, update RIP. */ 949 if (loop) { 950 state->rip = vmexit->rip; 951 } 952 953 if (state->rip == 0) { 954 VCPU_CTR0(svm_sc->vm, vcpu, "SVM_ERR:RIP is NULL\n"); 955 vmexit->exitcode = VM_EXITCODE_VMX; 956 } 957 958 return (loop); 959} 960 961/* 962 * Inject NMI to virtual cpu. 963 */ 964static int 965svm_inject_nmi(struct svm_softc *svm_sc, int vcpu) 966{ 967 struct vmcb_ctrl *ctrl; 968 969 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 970 971 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 972 /* Can't inject another NMI if last one is pending.*/ 973 if (!vm_nmi_pending(svm_sc->vm, vcpu)) 974 return (0); 975 976 /* Inject NMI, vector number is not used.*/ 977 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_NMI, IDT_NMI, 0, false); 978 979 /* Acknowledge the request is accepted.*/ 980 vm_nmi_clear(svm_sc->vm, vcpu); 981 982 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n"); 983 984 return (1); 985} 986 987static void 988svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 989{ 990 struct vmcb_ctrl *ctrl; 991 uint64_t intinfo; 992 993 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 994 995 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 996 return; 997 998 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 999 "valid: %#lx", __func__, intinfo)); 1000 1001 vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo), 1002 VMCB_EXITINTINFO_VECTOR(intinfo), 1003 VMCB_EXITINTINFO_EC(intinfo), 1004 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1005 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1006 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1007} 1008 1009/* 1010 * Inject event to virtual cpu. 1011 */ 1012static void 1013svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu, struct vlapic *vlapic) 1014{ 1015 struct vmcb_ctrl *ctrl; 1016 struct vmcb_state *state; 1017 int extint_pending; 1018 int vector; 1019 1020 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1021 1022 state = svm_get_vmcb_state(svm_sc, vcpu); 1023 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1024 1025 svm_inj_intinfo(svm_sc, vcpu); 1026 1027 /* Can't inject multiple events at once. */ 1028 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1029 VCPU_CTR1(svm_sc->vm, vcpu, 1030 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj); 1031 return ; 1032 } 1033 1034 /* Wait for guest to come out of interrupt shadow. */ 1035 if (ctrl->intr_shadow) { 1036 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n"); 1037 return; 1038 } 1039 1040 /* NMI event has priority over interrupts.*/ 1041 if (svm_inject_nmi(svm_sc, vcpu)) { 1042 return; 1043 } 1044 1045 extint_pending = vm_extint_pending(svm_sc->vm, vcpu); 1046 1047 if (!extint_pending) { 1048 /* Ask the local apic for a vector to inject */ 1049 if (!vlapic_pending_intr(vlapic, &vector)) 1050 return; 1051 } else { 1052 /* Ask the legacy pic for a vector to inject */ 1053 vatpic_pending_intr(svm_sc->vm, &vector); 1054 } 1055 1056 if (vector < 32 || vector > 255) { 1057 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:Event injection" 1058 "invalid vector=%d.\n", vector); 1059 ERR("SVM_ERR:Event injection invalid vector=%d.\n", vector); 1060 return; 1061 } 1062 1063 if ((state->rflags & PSL_I) == 0) { 1064 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n"); 1065 return; 1066 } 1067 1068 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1069 1070 if (!extint_pending) { 1071 /* Update the Local APIC ISR */ 1072 vlapic_intr_accepted(vlapic, vector); 1073 } else { 1074 vm_extint_clear(svm_sc->vm, vcpu); 1075 vatpic_intr_accepted(svm_sc->vm, vector); 1076 1077 /* 1078 * XXX need to recheck exting_pending ala VT-x 1079 */ 1080 } 1081 1082 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector); 1083} 1084 1085static __inline void 1086restore_host_tss(void) 1087{ 1088 struct system_segment_descriptor *tss_sd; 1089 1090 /* 1091 * The TSS descriptor was in use prior to launching the guest so it 1092 * has been marked busy. 1093 * 1094 * 'ltr' requires the descriptor to be marked available so change the 1095 * type to "64-bit available TSS". 1096 */ 1097 tss_sd = PCPU_GET(tss); 1098 tss_sd->sd_type = SDT_SYSTSS; 1099 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1100} 1101 1102static void 1103check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1104{ 1105 struct svm_vcpu *vcpustate; 1106 struct vmcb_ctrl *ctrl; 1107 long eptgen; 1108 bool alloc_asid; 1109 1110 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1111 "active on cpu %u", __func__, thiscpu)); 1112 1113 vcpustate = svm_get_vcpu(sc, vcpuid); 1114 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1115 1116 /* 1117 * The TLB entries associated with the vcpu's ASID are not valid 1118 * if either of the following conditions is true: 1119 * 1120 * 1. The vcpu's ASID generation is different than the host cpu's 1121 * ASID generation. This happens when the vcpu migrates to a new 1122 * host cpu. It can also happen when the number of vcpus executing 1123 * on a host cpu is greater than the number of ASIDs available. 1124 * 1125 * 2. The pmap generation number is different than the value cached in 1126 * the 'vcpustate'. This happens when the host invalidates pages 1127 * belonging to the guest. 1128 * 1129 * asidgen eptgen Action 1130 * mismatch mismatch 1131 * 0 0 (a) 1132 * 0 1 (b1) or (b2) 1133 * 1 0 (c) 1134 * 1 1 (d) 1135 * 1136 * (a) There is no mismatch in eptgen or ASID generation and therefore 1137 * no further action is needed. 1138 * 1139 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1140 * retained and the TLB entries associated with this ASID 1141 * are flushed by VMRUN. 1142 * 1143 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1144 * allocated. 1145 * 1146 * (c) A new ASID is allocated. 1147 * 1148 * (d) A new ASID is allocated. 1149 */ 1150 1151 alloc_asid = false; 1152 eptgen = pmap->pm_eptgen; 1153 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1154 1155 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1156 alloc_asid = true; /* (c) and (d) */ 1157 } else if (vcpustate->eptgen != eptgen) { 1158 if (flush_by_asid()) 1159 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1160 else 1161 alloc_asid = true; /* (b2) */ 1162 } else { 1163 /* 1164 * This is the common case (a). 1165 */ 1166 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1167 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1168 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1169 } 1170 1171 if (alloc_asid) { 1172 if (++asid[thiscpu].num >= nasid) { 1173 asid[thiscpu].num = 1; 1174 if (++asid[thiscpu].gen == 0) 1175 asid[thiscpu].gen = 1; 1176 /* 1177 * If this cpu does not support "flush-by-asid" 1178 * then flush the entire TLB on a generation 1179 * bump. Subsequent ASID allocation in this 1180 * generation can be done without a TLB flush. 1181 */ 1182 if (!flush_by_asid()) 1183 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1184 } 1185 vcpustate->asid.gen = asid[thiscpu].gen; 1186 vcpustate->asid.num = asid[thiscpu].num; 1187 1188 ctrl->asid = vcpustate->asid.num; 1189 vcpu_set_dirty(vcpustate, VMCB_CACHE_ASID); 1190 /* 1191 * If this cpu supports "flush-by-asid" then the TLB 1192 * was not flushed after the generation bump. The TLB 1193 * is flushed selectively after every new ASID allocation. 1194 */ 1195 if (flush_by_asid()) 1196 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1197 } 1198 vcpustate->eptgen = eptgen; 1199 1200 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1201 KASSERT(ctrl->asid == vcpustate->asid.num, 1202 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1203} 1204 1205/* 1206 * Start vcpu with specified RIP. 1207 */ 1208static int 1209svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1210 void *rend_cookie, void *suspended_cookie) 1211{ 1212 struct svm_regctx *hctx, *gctx; 1213 struct svm_softc *svm_sc; 1214 struct svm_vcpu *vcpustate; 1215 struct vmcb_state *state; 1216 struct vmcb_ctrl *ctrl; 1217 struct vm_exit *vmexit; 1218 struct vlapic *vlapic; 1219 struct vm *vm; 1220 uint64_t vmcb_pa; 1221 u_int thiscpu; 1222 bool loop; /* Continue vcpu execution loop. */ 1223 1224 loop = true; 1225 svm_sc = arg; 1226 vm = svm_sc->vm; 1227 1228 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1229 state = svm_get_vmcb_state(svm_sc, vcpu); 1230 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1231 vmexit = vm_exitinfo(vm, vcpu); 1232 vlapic = vm_lapic(vm, vcpu); 1233 1234 /* 1235 * Stash 'curcpu' on the stack as 'thiscpu'. 1236 * 1237 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1238 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1239 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1240 */ 1241 thiscpu = curcpu; 1242 1243 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1244 hctx = &host_ctx[thiscpu]; 1245 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1246 1247 if (vcpustate->lastcpu != thiscpu) { 1248 /* 1249 * Force new ASID allocation by invalidating the generation. 1250 */ 1251 vcpustate->asid.gen = 0; 1252 1253 /* 1254 * Invalidate the VMCB state cache by marking all fields dirty. 1255 */ 1256 vcpu_set_dirty(vcpustate, 0xffffffff); 1257 1258 /* 1259 * XXX 1260 * Setting 'vcpustate->lastcpu' here is bit premature because 1261 * we may return from this function without actually executing 1262 * the VMRUN instruction. This could happen if a rendezvous 1263 * or an AST is pending on the first time through the loop. 1264 * 1265 * This works for now but any new side-effects of vcpu 1266 * migration should take this case into account. 1267 */ 1268 vcpustate->lastcpu = thiscpu; 1269 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1270 } 1271 1272 VCPU_CTR3(vm, vcpu, "SVM:Enter vmrun RIP:0x%lx" 1273 " inst len=%d/%d\n", 1274 rip, vmexit->inst_length, 1275 vmexit->u.inst_emul.vie.num_valid); 1276 /* Update Guest RIP */ 1277 state->rip = rip; 1278 1279 do { 1280 vmexit->inst_length = 0; 1281 1282 /* 1283 * Disable global interrupts to guarantee atomicity during 1284 * loading of guest state. This includes not only the state 1285 * loaded by the "vmrun" instruction but also software state 1286 * maintained by the hypervisor: suspended and rendezvous 1287 * state, NPT generation number, vlapic interrupts etc. 1288 */ 1289 disable_gintr(); 1290 1291 if (vcpu_suspended(suspended_cookie)) { 1292 enable_gintr(); 1293 vm_exit_suspended(vm, vcpu, state->rip); 1294 break; 1295 } 1296 1297 if (vcpu_rendezvous_pending(rend_cookie)) { 1298 enable_gintr(); 1299 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1300 vmm_stat_incr(vm, vcpu, VMEXIT_RENDEZVOUS, 1); 1301 VCPU_CTR1(vm, vcpu, 1302 "SVM: VCPU rendezvous, RIP:0x%lx\n", 1303 state->rip); 1304 vmexit->rip = state->rip; 1305 break; 1306 } 1307 1308 /* We are asked to give the cpu by scheduler. */ 1309 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1310 enable_gintr(); 1311 vmexit->exitcode = VM_EXITCODE_BOGUS; 1312 vmm_stat_incr(vm, vcpu, VMEXIT_ASTPENDING, 1); 1313 VCPU_CTR1(vm, vcpu, 1314 "SVM: ASTPENDING, RIP:0x%lx\n", state->rip); 1315 vmexit->rip = state->rip; 1316 break; 1317 } 1318 1319 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1320 1321 /* Activate the nested pmap on 'thiscpu' */ 1322 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1323 1324 /* 1325 * Check the pmap generation and the ASID generation to 1326 * ensure that the vcpu does not use stale TLB mappings. 1327 */ 1328 check_asid(svm_sc, vcpu, pmap, thiscpu); 1329 1330 ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty; 1331 vcpustate->dirty = 0; 1332 1333 /* Launch Virtual Machine. */ 1334 svm_launch(vmcb_pa, gctx, hctx); 1335 1336 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1337 1338 /* 1339 * Restore MSR_GSBASE to point to the pcpu data area. 1340 * 1341 * Note that accesses done via PCPU_GET/PCPU_SET will work 1342 * only after MSR_GSBASE is restored. 1343 * 1344 * Also note that we don't bother restoring MSR_KGSBASE 1345 * since it is not used in the kernel and will be restored 1346 * when the VMRUN ioctl returns to userspace. 1347 */ 1348 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1349 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1350 thiscpu, curcpu)); 1351 1352 /* 1353 * The host GDTR and IDTR is saved by VMRUN and restored 1354 * automatically on #VMEXIT. However, the host TSS needs 1355 * to be restored explicitly. 1356 */ 1357 restore_host_tss(); 1358 1359 /* #VMEXIT disables interrupts so re-enable them here. */ 1360 enable_gintr(); 1361 1362 /* Handle #VMEXIT and if required return to user space. */ 1363 loop = svm_vmexit(svm_sc, vcpu, vmexit); 1364 vcpustate->loop++; 1365 vmm_stat_incr(vm, vcpu, VMEXIT_COUNT, 1); 1366 } while (loop); 1367 1368 return (0); 1369} 1370 1371/* 1372 * Cleanup for virtual machine. 1373 */ 1374static void 1375svm_vmcleanup(void *arg) 1376{ 1377 struct svm_softc *svm_sc; 1378 1379 svm_sc = arg; 1380 1381 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1382 1383 free(svm_sc, M_SVM); 1384} 1385 1386/* 1387 * Return pointer to hypervisor saved register state. 1388 */ 1389static register_t * 1390swctx_regptr(struct svm_regctx *regctx, int reg) 1391{ 1392 1393 switch (reg) { 1394 case VM_REG_GUEST_RBX: 1395 return (®ctx->sctx_rbx); 1396 case VM_REG_GUEST_RCX: 1397 return (®ctx->sctx_rcx); 1398 case VM_REG_GUEST_RDX: 1399 return (®ctx->e.g.sctx_rdx); 1400 case VM_REG_GUEST_RDI: 1401 return (®ctx->e.g.sctx_rdi); 1402 case VM_REG_GUEST_RSI: 1403 return (®ctx->e.g.sctx_rsi); 1404 case VM_REG_GUEST_RBP: 1405 return (®ctx->sctx_rbp); 1406 case VM_REG_GUEST_R8: 1407 return (®ctx->sctx_r8); 1408 case VM_REG_GUEST_R9: 1409 return (®ctx->sctx_r9); 1410 case VM_REG_GUEST_R10: 1411 return (®ctx->sctx_r10); 1412 case VM_REG_GUEST_R11: 1413 return (®ctx->sctx_r11); 1414 case VM_REG_GUEST_R12: 1415 return (®ctx->sctx_r12); 1416 case VM_REG_GUEST_R13: 1417 return (®ctx->sctx_r13); 1418 case VM_REG_GUEST_R14: 1419 return (®ctx->sctx_r14); 1420 case VM_REG_GUEST_R15: 1421 return (®ctx->sctx_r15); 1422 default: 1423 ERR("Unknown register requested, reg=%d.\n", reg); 1424 break; 1425 } 1426 1427 return (NULL); 1428} 1429 1430/* 1431 * Interface to read guest registers. 1432 * This can be SVM h/w saved or hypervisor saved register. 1433 */ 1434static int 1435svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1436{ 1437 struct svm_softc *svm_sc; 1438 struct vmcb *vmcb; 1439 register_t *reg; 1440 1441 svm_sc = arg; 1442 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1443 1444 vmcb = svm_get_vmcb(svm_sc, vcpu); 1445 1446 if (vmcb_read(vmcb, ident, val) == 0) { 1447 return (0); 1448 } 1449 1450 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1451 1452 if (reg != NULL) { 1453 *val = *reg; 1454 return (0); 1455 } 1456 1457 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1458 return (EINVAL); 1459} 1460 1461/* 1462 * Interface to write to guest registers. 1463 * This can be SVM h/w saved or hypervisor saved register. 1464 */ 1465static int 1466svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1467{ 1468 struct svm_softc *svm_sc; 1469 struct vmcb *vmcb; 1470 register_t *reg; 1471 1472 svm_sc = arg; 1473 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1474 1475 vmcb = svm_get_vmcb(svm_sc, vcpu); 1476 if (vmcb_write(vmcb, ident, val) == 0) { 1477 return (0); 1478 } 1479 1480 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1481 1482 if (reg != NULL) { 1483 *reg = val; 1484 return (0); 1485 } 1486 1487 /* 1488 * XXX deal with CR3 and invalidate TLB entries tagged with the 1489 * vcpu's ASID. This needs to be treated differently depending on 1490 * whether 'running' is true/false. 1491 */ 1492 1493 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1494 return (EINVAL); 1495} 1496 1497 1498/* 1499 * Inteface to set various descriptors. 1500 */ 1501static int 1502svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1503{ 1504 struct svm_softc *svm_sc; 1505 struct vmcb *vmcb; 1506 struct vmcb_segment *seg; 1507 uint16_t attrib; 1508 1509 svm_sc = arg; 1510 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1511 1512 vmcb = svm_get_vmcb(svm_sc, vcpu); 1513 1514 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 1515 1516 seg = vmcb_seg(vmcb, type); 1517 if (seg == NULL) { 1518 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1519 return (EINVAL); 1520 } 1521 1522 /* Map seg_desc access to VMCB attribute format.*/ 1523 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 1524 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 1525 type, desc->access, desc->limit); 1526 seg->attrib = attrib; 1527 seg->base = desc->base; 1528 seg->limit = desc->limit; 1529 1530 return (0); 1531} 1532 1533/* 1534 * Interface to get guest descriptor. 1535 */ 1536static int 1537svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1538{ 1539 struct svm_softc *svm_sc; 1540 struct vmcb_segment *seg; 1541 1542 svm_sc = arg; 1543 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1544 1545 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 1546 1547 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 1548 if (!seg) { 1549 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1550 return (EINVAL); 1551 } 1552 1553 /* Map seg_desc access to VMCB attribute format.*/ 1554 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 1555 desc->base = seg->base; 1556 desc->limit = seg->limit; 1557 1558 /* 1559 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 1560 * loaded with a NULL segment selector. The 'desc->access' field is 1561 * interpreted in the VT-x format by the processor-independent code. 1562 * 1563 * SVM uses the 'P' bit to convey the same information so convert it 1564 * into the VT-x format. For more details refer to section 1565 * "Segment State in the VMCB" in APMv2. 1566 */ 1567 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 1568 desc->access |= 0x80; /* CS and TS always present */ 1569 1570 if (!(desc->access & 0x80)) 1571 desc->access |= 0x10000; /* Unusable segment */ 1572 1573 return (0); 1574} 1575 1576static int 1577svm_setcap(void *arg, int vcpu, int type, int val) 1578{ 1579 struct svm_softc *svm_sc; 1580 struct vmcb_ctrl *ctrl; 1581 int ret = ENOENT; 1582 1583 svm_sc = arg; 1584 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1585 1586 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1587 1588 switch (type) { 1589 case VM_CAP_HALT_EXIT: 1590 if (val) 1591 ctrl->ctrl1 |= VMCB_INTCPT_HLT; 1592 else 1593 ctrl->ctrl1 &= ~VMCB_INTCPT_HLT; 1594 ret = 0; 1595 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Halt exit %s.\n", 1596 val ? "enabled": "disabled"); 1597 break; 1598 1599 case VM_CAP_PAUSE_EXIT: 1600 if (val) 1601 ctrl->ctrl1 |= VMCB_INTCPT_PAUSE; 1602 else 1603 ctrl->ctrl1 &= ~VMCB_INTCPT_PAUSE; 1604 ret = 0; 1605 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Pause exit %s.\n", 1606 val ? "enabled": "disabled"); 1607 break; 1608 1609 case VM_CAP_MTRAP_EXIT: 1610 if (val) 1611 ctrl->exception |= BIT(IDT_MC); 1612 else 1613 ctrl->exception &= ~BIT(IDT_MC); 1614 ret = 0; 1615 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:MC exit %s.\n", 1616 val ? "enabled": "disabled"); 1617 break; 1618 1619 case VM_CAP_UNRESTRICTED_GUEST: 1620 /* SVM doesn't need special capability for SMP.*/ 1621 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Set_gap:Unrestricted " 1622 "always enabled.\n"); 1623 ret = 0; 1624 break; 1625 1626 default: 1627 break; 1628 } 1629 1630 return (ret); 1631} 1632 1633static int 1634svm_getcap(void *arg, int vcpu, int type, int *retval) 1635{ 1636 struct svm_softc *svm_sc; 1637 struct vmcb_ctrl *ctrl; 1638 1639 svm_sc = arg; 1640 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1641 1642 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1643 1644 switch (type) { 1645 case VM_CAP_HALT_EXIT: 1646 *retval = (ctrl->ctrl1 & VMCB_INTCPT_HLT) ? 1 : 0; 1647 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Halt exit %s.\n", 1648 *retval ? "enabled": "disabled"); 1649 break; 1650 1651 case VM_CAP_PAUSE_EXIT: 1652 *retval = (ctrl->ctrl1 & VMCB_INTCPT_PAUSE) ? 1 : 0; 1653 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Pause exit %s.\n", 1654 *retval ? "enabled": "disabled"); 1655 break; 1656 1657 case VM_CAP_MTRAP_EXIT: 1658 *retval = (ctrl->exception & BIT(IDT_MC)) ? 1 : 0; 1659 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:MC exit %s.\n", 1660 *retval ? "enabled": "disabled"); 1661 break; 1662 1663 case VM_CAP_UNRESTRICTED_GUEST: 1664 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:get_cap:Unrestricted.\n"); 1665 *retval = 1; 1666 break; 1667 default: 1668 break; 1669 } 1670 1671 return (0); 1672} 1673 1674static struct vlapic * 1675svm_vlapic_init(void *arg, int vcpuid) 1676{ 1677 struct svm_softc *svm_sc; 1678 struct vlapic *vlapic; 1679 1680 svm_sc = arg; 1681 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 1682 vlapic->vm = svm_sc->vm; 1683 vlapic->vcpuid = vcpuid; 1684 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 1685 1686 vlapic_init(vlapic); 1687 1688 return (vlapic); 1689} 1690 1691static void 1692svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 1693{ 1694 1695 vlapic_cleanup(vlapic); 1696 free(vlapic, M_SVM_VLAPIC); 1697} 1698 1699struct vmm_ops vmm_ops_amd = { 1700 svm_init, 1701 svm_cleanup, 1702 svm_restore, 1703 svm_vminit, 1704 svm_vmrun, 1705 svm_vmcleanup, 1706 svm_getreg, 1707 svm_setreg, 1708 svm_getdesc, 1709 svm_setdesc, 1710 svm_getcap, 1711 svm_setcap, 1712 svm_npt_alloc, 1713 svm_npt_free, 1714 svm_vlapic_init, 1715 svm_vlapic_cleanup 1716}; 1717