svm.c revision 271203
1157016Sdes/*- 2126274Sdes * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 398937Sdes * All rights reserved. 498937Sdes * 598937Sdes * Redistribution and use in source and binary forms, with or without 6124208Sdes * modification, are permitted provided that the following conditions 7124208Sdes * are met: 8124208Sdes * 1. Redistributions of source code must retain the above copyright 998937Sdes * notice unmodified, this list of conditions, and the following 10124208Sdes * disclaimer. 11124208Sdes * 2. Redistributions in binary form must reproduce the above copyright 12124208Sdes * notice, this list of conditions and the following disclaimer in the 13124208Sdes * documentation and/or other materials provided with the distribution. 14124208Sdes * 15124208Sdes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16124208Sdes * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1798937Sdes * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1898937Sdes * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19157016Sdes * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20157016Sdes * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21106121Sdes * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2298937Sdes * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2398937Sdes * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2498937Sdes * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2598937Sdes */ 2698937Sdes 2798937Sdes#include <sys/cdefs.h> 2898937Sdes__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271203 2014-09-06 19:02:52Z neel $"); 2998937Sdes 3098937Sdes#include <sys/param.h> 3198937Sdes#include <sys/systm.h> 3298937Sdes#include <sys/smp.h> 3398937Sdes#include <sys/kernel.h> 3498937Sdes#include <sys/malloc.h> 35124208Sdes#include <sys/pcpu.h> 3698937Sdes#include <sys/proc.h> 37157016Sdes 38157016Sdes#include <vm/vm.h> 39157016Sdes#include <vm/pmap.h> 4098937Sdes 4198937Sdes#include <machine/cpufunc.h> 4298937Sdes#include <machine/psl.h> 4398937Sdes#include <machine/pmap.h> 4498937Sdes#include <machine/md_var.h> 4598937Sdes#include <machine/vmparam.h> 4698937Sdes#include <machine/specialreg.h> 4798937Sdes#include <machine/segments.h> 4898937Sdes#include <machine/vmm.h> 4998937Sdes#include <machine/vmm_dev.h> 5098937Sdes#include <machine/vmm_instruction_emul.h> 5198937Sdes 5298937Sdes#include <x86/apicreg.h> 5398937Sdes 5498937Sdes#include "vmm_lapic.h" 5598937Sdes#include "vmm_msr.h" 5698937Sdes#include "vmm_stat.h" 5798937Sdes#include "vmm_ktr.h" 5898937Sdes#include "vmm_ioport.h" 5998937Sdes#include "vatpic.h" 6098937Sdes#include "vlapic.h" 6198937Sdes#include "vlapic_priv.h" 6298937Sdes 63#include "x86.h" 64#include "vmcb.h" 65#include "svm.h" 66#include "svm_softc.h" 67#include "npt.h" 68 69/* 70 * SVM CPUID function 0x8000_000A, edx bit decoding. 71 */ 72#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 73#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 74#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 75#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 76#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 77#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 78#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 79#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 80#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 81#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 82 83#define VMCB_CACHE_DEFAULT \ 84 (VMCB_CACHE_ASID | VMCB_CACHE_IOPM | VMCB_CACHE_NP) 85 86MALLOC_DEFINE(M_SVM, "svm", "svm"); 87MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 88 89/* Per-CPU context area. */ 90extern struct pcpu __pcpu[]; 91 92static bool svm_vmexit(struct svm_softc *svm_sc, int vcpu, 93 struct vm_exit *vmexit); 94static int svm_msr_rw_ok(uint8_t *btmap, uint64_t msr); 95static int svm_msr_rd_ok(uint8_t *btmap, uint64_t msr); 96static int svm_msr_index(uint64_t msr, int *index, int *bit); 97static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 98 99static uint32_t svm_feature; /* AMD SVM features. */ 100 101/* Maximum ASIDs supported by the processor */ 102static uint32_t nasid; 103 104/* Current ASID generation for each host cpu */ 105static struct asid asid[MAXCPU]; 106 107/* 108 * SVM host state saved area of size 4KB for each core. 109 */ 110static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 111 112/* 113 * S/w saved host context. 114 */ 115static struct svm_regctx host_ctx[MAXCPU]; 116 117static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid VMCB EXITINTINFO"); 118static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "VMM pending exception injected"); 119 120/* 121 * Common function to enable or disabled SVM for a CPU. 122 */ 123static int 124cpu_svm_enable_disable(boolean_t enable) 125{ 126 uint64_t efer_msr; 127 128 efer_msr = rdmsr(MSR_EFER); 129 130 if (enable) 131 efer_msr |= EFER_SVM; 132 else 133 efer_msr &= ~EFER_SVM; 134 135 wrmsr(MSR_EFER, efer_msr); 136 137 return(0); 138} 139 140/* 141 * Disable SVM on a CPU. 142 */ 143static void 144svm_disable(void *arg __unused) 145{ 146 147 (void)cpu_svm_enable_disable(FALSE); 148} 149 150/* 151 * Disable SVM for all CPUs. 152 */ 153static int 154svm_cleanup(void) 155{ 156 157 smp_rendezvous(NULL, svm_disable, NULL, NULL); 158 return (0); 159} 160 161/* 162 * Check for required BHyVe SVM features in a CPU. 163 */ 164static int 165svm_cpuid_features(void) 166{ 167 u_int regs[4]; 168 169 /* CPUID Fn8000_000A is for SVM */ 170 do_cpuid(0x8000000A, regs); 171 svm_feature = regs[3]; 172 173 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 174 nasid = regs[1]; 175 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 176 177 printf("SVM Features:0x%b\n", svm_feature, 178 "\020" 179 "\001NP" /* Nested paging */ 180 "\002LbrVirt" /* LBR virtualization */ 181 "\003SVML" /* SVM lock */ 182 "\004NRIPS" /* NRIP save */ 183 "\005TscRateMsr" /* MSR based TSC rate control */ 184 "\006VmcbClean" /* VMCB clean bits */ 185 "\007FlushByAsid" /* Flush by ASID */ 186 "\010DecodeAssist" /* Decode assist */ 187 "\011<b20>" 188 "\012<b20>" 189 "\013PauseFilter" 190 "\014<b20>" 191 "\015PauseFilterThreshold" 192 "\016AVIC" 193 ); 194 195 /* SVM Lock */ 196 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 197 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 198 return (ENXIO); 199 } 200 201 /* 202 * bhyve need RVI to work. 203 */ 204 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 205 printf("Missing Nested paging or RVI SVM support in processor.\n"); 206 return (EIO); 207 } 208 209 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 210 return (0); 211 212 return (EIO); 213} 214 215static __inline int 216flush_by_asid(void) 217{ 218 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 219} 220 221/* 222 * Enable SVM for a CPU. 223 */ 224static void 225svm_enable(void *arg __unused) 226{ 227 uint64_t hsave_pa; 228 229 (void)cpu_svm_enable_disable(TRUE); 230 231 hsave_pa = vtophys(hsave[curcpu]); 232 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 233 234 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 235 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 236 } 237} 238 239/* 240 * Check if a processor support SVM. 241 */ 242static int 243is_svm_enabled(void) 244{ 245 uint64_t msr; 246 247 /* Section 15.4 Enabling SVM from APM2. */ 248 if ((amd_feature2 & AMDID2_SVM) == 0) { 249 printf("SVM is not supported on this processor.\n"); 250 return (ENXIO); 251 } 252 253 msr = rdmsr(MSR_VM_CR); 254 /* Make sure SVM is not disabled by BIOS. */ 255 if ((msr & VM_CR_SVMDIS) == 0) { 256 return svm_cpuid_features(); 257 } 258 259 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 260 return (ENXIO); 261} 262 263/* 264 * Enable SVM on CPU and initialize nested page table h/w. 265 */ 266static int 267svm_init(int ipinum) 268{ 269 int err, cpu; 270 271 err = is_svm_enabled(); 272 if (err) 273 return (err); 274 275 for (cpu = 0; cpu < MAXCPU; cpu++) { 276 /* 277 * Initialize the host ASIDs to their "highest" valid values. 278 * 279 * The next ASID allocation will rollover both 'gen' and 'num' 280 * and start off the sequence at {1,1}. 281 */ 282 asid[cpu].gen = ~0UL; 283 asid[cpu].num = nasid - 1; 284 } 285 286 svm_npt_init(ipinum); 287 288 /* Start SVM on all CPUs */ 289 smp_rendezvous(NULL, svm_enable, NULL, NULL); 290 291 return (0); 292} 293 294static void 295svm_restore(void) 296{ 297 svm_enable(NULL); 298} 299/* 300 * Get index and bit position for a MSR in MSR permission 301 * bitmap. Two bits are used for each MSR, lower bit is 302 * for read and higher bit is for write. 303 */ 304static int 305svm_msr_index(uint64_t msr, int *index, int *bit) 306{ 307 uint32_t base, off; 308 309/* Pentium compatible MSRs */ 310#define MSR_PENTIUM_START 0 311#define MSR_PENTIUM_END 0x1FFF 312/* AMD 6th generation and Intel compatible MSRs */ 313#define MSR_AMD6TH_START 0xC0000000UL 314#define MSR_AMD6TH_END 0xC0001FFFUL 315/* AMD 7th and 8th generation compatible MSRs */ 316#define MSR_AMD7TH_START 0xC0010000UL 317#define MSR_AMD7TH_END 0xC0011FFFUL 318 319 *index = -1; 320 *bit = (msr % 4) * 2; 321 base = 0; 322 323 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 324 *index = msr / 4; 325 return (0); 326 } 327 328 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 329 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 330 off = (msr - MSR_AMD6TH_START); 331 *index = (off + base) / 4; 332 return (0); 333 } 334 335 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 336 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 337 off = (msr - MSR_AMD7TH_START); 338 *index = (off + base) / 4; 339 return (0); 340 } 341 342 return (EIO); 343} 344 345/* 346 * Give virtual cpu the complete access to MSR(read & write). 347 */ 348static int 349svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 350{ 351 int index, bit, err; 352 353 err = svm_msr_index(msr, &index, &bit); 354 if (err) { 355 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 356 return (err); 357 } 358 359 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 360 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 361 return (EINVAL); 362 } 363 if (bit < 0 || bit > 8) { 364 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 365 return (EINVAL); 366 } 367 368 /* Disable intercept for read and write. */ 369 if (read) 370 perm_bitmap[index] &= ~(1UL << bit); 371 if (write) 372 perm_bitmap[index] &= ~(2UL << bit); 373 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 374 (perm_bitmap[index] >> bit) & 0x3, msr); 375 376 return (0); 377} 378 379static int 380svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 381{ 382 return svm_msr_perm(perm_bitmap, msr, true, true); 383} 384 385static int 386svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 387{ 388 return svm_msr_perm(perm_bitmap, msr, true, false); 389} 390 391static __inline void 392vcpu_set_dirty(struct svm_vcpu *vcpustate, uint32_t dirtybits) 393{ 394 vcpustate->dirty |= dirtybits; 395} 396 397/* 398 * Initialise a virtual machine. 399 */ 400static void * 401svm_vminit(struct vm *vm, pmap_t pmap) 402{ 403 struct svm_softc *svm_sc; 404 struct svm_vcpu *vcpu; 405 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 406 int i; 407 408 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 409 M_SVM, M_WAITOK | M_ZERO); 410 411 svm_sc->vm = vm; 412 svm_sc->svm_feature = svm_feature; 413 svm_sc->vcpu_cnt = VM_MAXCPU; 414 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 415 416 /* 417 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 418 */ 419 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 420 421 /* 422 * Following MSR can be completely controlled by virtual machines 423 * since access to following are translated to access to VMCB. 424 */ 425 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 426 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 427 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 428 429 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 430 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 431 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 432 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 433 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 434 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 435 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 436 437 /* For Nested Paging/RVI only. */ 438 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 439 440 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 441 442 /* Intercept access to all I/O ports. */ 443 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 444 445 /* Cache physical address for multiple vcpus. */ 446 iopm_pa = vtophys(svm_sc->iopm_bitmap); 447 msrpm_pa = vtophys(svm_sc->msr_bitmap); 448 pml4_pa = svm_sc->nptp; 449 450 for (i = 0; i < svm_sc->vcpu_cnt; i++) { 451 vcpu = svm_get_vcpu(svm_sc, i); 452 vcpu->lastcpu = NOCPU; 453 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 454 svm_init_vmcb(&vcpu->vmcb, iopm_pa, msrpm_pa, pml4_pa); 455 } 456 return (svm_sc); 457} 458 459static int 460svm_cpl(struct vmcb_state *state) 461{ 462 463 /* 464 * From APMv2: 465 * "Retrieve the CPL from the CPL field in the VMCB, not 466 * from any segment DPL" 467 */ 468 return (state->cpl); 469} 470 471static enum vm_cpu_mode 472svm_vcpu_mode(struct vmcb *vmcb) 473{ 474 struct vmcb_segment *seg; 475 struct vmcb_state *state; 476 477 state = &vmcb->state; 478 479 if (state->efer & EFER_LMA) { 480 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 481 /* 482 * Section 4.8.1 for APM2, check if Code Segment has 483 * Long attribute set in descriptor. 484 */ 485 if (seg->attrib & VMCB_CS_ATTRIB_L) 486 return (CPU_MODE_64BIT); 487 else 488 return (CPU_MODE_COMPATIBILITY); 489 } else if (state->cr0 & CR0_PE) { 490 return (CPU_MODE_PROTECTED); 491 } else { 492 return (CPU_MODE_REAL); 493 } 494} 495 496static enum vm_paging_mode 497svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 498{ 499 500 if ((cr0 & CR0_PG) == 0) 501 return (PAGING_MODE_FLAT); 502 if ((cr4 & CR4_PAE) == 0) 503 return (PAGING_MODE_32); 504 if (efer & EFER_LME) 505 return (PAGING_MODE_64); 506 else 507 return (PAGING_MODE_PAE); 508} 509 510/* 511 * ins/outs utility routines 512 */ 513static uint64_t 514svm_inout_str_index(struct svm_regctx *regs, int in) 515{ 516 uint64_t val; 517 518 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 519 520 return (val); 521} 522 523static uint64_t 524svm_inout_str_count(struct svm_regctx *regs, int rep) 525{ 526 uint64_t val; 527 528 val = rep ? regs->sctx_rcx : 1; 529 530 return (val); 531} 532 533static void 534svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 535 int in, struct vm_inout_str *vis) 536{ 537 int error, s; 538 539 if (in) { 540 vis->seg_name = VM_REG_GUEST_ES; 541 } else { 542 /* The segment field has standard encoding */ 543 s = (info1 >> 10) & 0x7; 544 vis->seg_name = vm_segment_name(s); 545 } 546 547 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 548 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 549} 550 551static int 552svm_inout_str_addrsize(uint64_t info1) 553{ 554 uint32_t size; 555 556 size = (info1 >> 7) & 0x7; 557 switch (size) { 558 case 1: 559 return (2); /* 16 bit */ 560 case 2: 561 return (4); /* 32 bit */ 562 case 4: 563 return (8); /* 64 bit */ 564 default: 565 panic("%s: invalid size encoding %d", __func__, size); 566 } 567} 568 569static void 570svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 571{ 572 struct vmcb_state *state; 573 574 state = &vmcb->state; 575 paging->cr3 = state->cr3; 576 paging->cpl = svm_cpl(state); 577 paging->cpu_mode = svm_vcpu_mode(vmcb); 578 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 579 state->efer); 580} 581 582 583/* 584 * Handle guest I/O intercept. 585 */ 586static bool 587svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 588{ 589 struct vmcb_ctrl *ctrl; 590 struct vmcb_state *state; 591 struct svm_regctx *regs; 592 struct vm_inout_str *vis; 593 uint64_t info1; 594 595 state = svm_get_vmcb_state(svm_sc, vcpu); 596 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 597 regs = svm_get_guest_regctx(svm_sc, vcpu); 598 info1 = ctrl->exitinfo1; 599 600 vmexit->exitcode = VM_EXITCODE_INOUT; 601 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 602 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0; 603 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 604 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 605 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 606 vmexit->u.inout.eax = (uint32_t)(state->rax); 607 608 if (vmexit->u.inout.string) { 609 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 610 vis = &vmexit->u.inout_str; 611 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 612 vis->rflags = state->rflags; 613 vis->cr0 = state->cr0; 614 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 615 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 616 vis->addrsize = svm_inout_str_addrsize(info1); 617 svm_inout_str_seginfo(svm_sc, vcpu, info1, 618 vmexit->u.inout.in, vis); 619 } 620 621 return (false); 622} 623 624static int 625svm_npf_paging(uint64_t exitinfo1) 626{ 627 628 if (exitinfo1 & VMCB_NPF_INFO1_W) 629 return (VM_PROT_WRITE); 630 631 return (VM_PROT_READ); 632} 633 634static bool 635svm_npf_emul_fault(uint64_t exitinfo1) 636{ 637 638 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 639 return (false); 640 } 641 642 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 643 return (false); 644 } 645 646 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 647 return (false); 648 } 649 650 return (true); 651} 652 653static void 654svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 655{ 656 struct vm_guest_paging *paging; 657 struct vmcb_segment *seg; 658 659 paging = &vmexit->u.inst_emul.paging; 660 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 661 vmexit->u.inst_emul.gpa = gpa; 662 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 663 svm_paging_info(vmcb, paging); 664 665 /* 666 * If DecodeAssist SVM feature doesn't exist, we don't have NPF 667 * instuction length. RIP will be calculated based on the length 668 * determined by instruction emulation. 669 */ 670 vmexit->inst_length = VIE_INST_SIZE; 671 672 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 673 switch(paging->cpu_mode) { 674 case CPU_MODE_PROTECTED: 675 case CPU_MODE_COMPATIBILITY: 676 /* 677 * Section 4.8.1 of APM2, Default Operand Size or D bit. 678 */ 679 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 680 1 : 0; 681 break; 682 default: 683 vmexit->u.inst_emul.cs_d = 0; 684 break; 685 } 686} 687 688/* 689 * Special handling of EFER MSR. 690 * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM 691 * enable bit in EFER. 692 */ 693static void 694svm_efer(struct svm_softc *svm_sc, int vcpu, boolean_t write) 695{ 696 struct svm_regctx *swctx; 697 struct vmcb_state *state; 698 699 state = svm_get_vmcb_state(svm_sc, vcpu); 700 swctx = svm_get_guest_regctx(svm_sc, vcpu); 701 702 if (write) { 703 state->efer = ((swctx->e.g.sctx_rdx & (uint32_t)~0) << 32) | 704 ((uint32_t)state->rax) | EFER_SVM; 705 } else { 706 state->rax = (uint32_t)state->efer; 707 swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32); 708 } 709} 710 711static void 712svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 713{ 714 struct vmcb_ctrl *ctrl; 715 uint64_t intinfo; 716 717 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 718 intinfo = ctrl->exitintinfo; 719 if (!VMCB_EXITINTINFO_VALID(intinfo)) 720 return; 721 722 /* 723 * From APMv2, Section "Intercepts during IDT interrupt delivery" 724 * 725 * If a #VMEXIT happened during event delivery then record the event 726 * that was being delivered. 727 */ 728 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 729 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 730 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 731 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 732} 733 734/* 735 * Determine the cause of virtual cpu exit and handle VMEXIT. 736 * Return: false - Break vcpu execution loop and handle vmexit 737 * in kernel or user space. 738 * true - Continue vcpu run. 739 */ 740static bool 741svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 742{ 743 struct vmcb_state *state; 744 struct vmcb_ctrl *ctrl; 745 struct svm_regctx *ctx; 746 uint64_t code, info1, info2, val; 747 uint32_t eax, ecx, edx; 748 bool update_rip, loop, retu; 749 750 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 751 752 state = svm_get_vmcb_state(svm_sc, vcpu); 753 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 754 ctx = svm_get_guest_regctx(svm_sc, vcpu); 755 code = ctrl->exitcode; 756 info1 = ctrl->exitinfo1; 757 info2 = ctrl->exitinfo2; 758 759 update_rip = true; 760 loop = true; 761 vmexit->exitcode = VM_EXITCODE_VMX; 762 vmexit->u.vmx.status = 0; 763 764 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 765 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 766 767 svm_save_intinfo(svm_sc, vcpu); 768 769 switch (code) { 770 case VMCB_EXIT_MC: /* Machine Check. */ 771 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1); 772 vmexit->exitcode = VM_EXITCODE_MTRAP; 773 loop = false; 774 break; 775 776 case VMCB_EXIT_MSR: /* MSR access. */ 777 eax = state->rax; 778 ecx = ctx->sctx_rcx; 779 edx = ctx->e.g.sctx_rdx; 780 781 if (ecx == MSR_EFER) { 782 VCPU_CTR0(svm_sc->vm, vcpu,"VMEXIT EFER\n"); 783 svm_efer(svm_sc, vcpu, info1); 784 break; 785 } 786 787 retu = false; 788 if (info1) { 789 /* VM exited because of write MSR */ 790 vmm_stat_incr(svm_sc->vm, vcpu, 791 VMEXIT_WRMSR, 1); 792 vmexit->exitcode = VM_EXITCODE_WRMSR; 793 vmexit->u.msr.code = ecx; 794 val = (uint64_t)edx << 32 | eax; 795 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, 796 &retu)) { 797 vmexit->u.msr.wval = val; 798 loop = false; 799 } else 800 loop = retu ? false : true; 801 802 VCPU_CTR3(svm_sc->vm, vcpu, 803 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x", 804 loop ? "kernel" : "user", val, ecx); 805 } else { 806 vmm_stat_incr(svm_sc->vm, vcpu, 807 VMEXIT_RDMSR, 1); 808 vmexit->exitcode = VM_EXITCODE_RDMSR; 809 vmexit->u.msr.code = ecx; 810 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, 811 &retu)) { 812 loop = false; 813 } else 814 loop = retu ? false : true; 815 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR" 816 " MSB=0x%08x, LSB=%08x @0x%x", 817 ctx->e.g.sctx_rdx, state->rax, ecx); 818 } 819 820#define MSR_AMDK8_IPM 0xc0010055 821 /* 822 * We can't hide AMD C1E idle capability since its 823 * based on CPU generation, for now ignore access to 824 * this MSR by vcpus 825 * XXX: special handling of AMD C1E - Ignore. 826 */ 827 if (ecx == MSR_AMDK8_IPM) 828 loop = true; 829 break; 830 831 case VMCB_EXIT_INTR: 832 /* 833 * Exit on External Interrupt. 834 * Give host interrupt handler to run and if its guest 835 * interrupt, local APIC will inject event in guest. 836 */ 837 update_rip = false; 838 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt" 839 " RIP:0x%lx.\n", state->rip); 840 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 841 break; 842 843 case VMCB_EXIT_IO: 844 loop = svm_handle_io(svm_sc, vcpu, vmexit); 845 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 846 break; 847 848 case VMCB_EXIT_CPUID: 849 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 850 (void)x86_emulate_cpuid(svm_sc->vm, vcpu, 851 (uint32_t *)&state->rax, 852 (uint32_t *)&ctx->sctx_rbx, 853 (uint32_t *)&ctx->sctx_rcx, 854 (uint32_t *)&ctx->e.g.sctx_rdx); 855 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT CPUID\n"); 856 break; 857 858 case VMCB_EXIT_HLT: 859 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 860 if (ctrl->v_irq) { 861 /* Interrupt is pending, can't halt guest. */ 862 vmm_stat_incr(svm_sc->vm, vcpu, 863 VMEXIT_HLT_IGNORED, 1); 864 VCPU_CTR0(svm_sc->vm, vcpu, 865 "VMEXIT halt ignored."); 866 } else { 867 VCPU_CTR0(svm_sc->vm, vcpu, 868 "VMEXIT halted CPU."); 869 vmexit->exitcode = VM_EXITCODE_HLT; 870 vmexit->u.hlt.rflags = state->rflags; 871 loop = false; 872 873 } 874 break; 875 876 case VMCB_EXIT_PAUSE: 877 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT pause"); 878 vmexit->exitcode = VM_EXITCODE_PAUSE; 879 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 880 881 break; 882 883 case VMCB_EXIT_NPF: 884 loop = false; 885 update_rip = false; 886 887 if (info1 & VMCB_NPF_INFO1_RSV) { 888 VCPU_CTR2(svm_sc->vm, vcpu, "SVM_ERR:NPT" 889 " reserved bit is set," 890 "INFO1:0x%lx INFO2:0x%lx .\n", 891 info1, info2); 892 break; 893 } 894 895 /* EXITINFO2 has the physical fault address (GPA). */ 896 if(vm_mem_allocated(svm_sc->vm, info2)) { 897 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF-paging," 898 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", 899 state->rip, info1, info2); 900 vmexit->exitcode = VM_EXITCODE_PAGING; 901 vmexit->u.paging.gpa = info2; 902 vmexit->u.paging.fault_type = 903 svm_npf_paging(info1); 904 vmm_stat_incr(svm_sc->vm, vcpu, 905 VMEXIT_NESTED_FAULT, 1); 906 } else if (svm_npf_emul_fault(info1)) { 907 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF inst_emul," 908 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", 909 state->rip, info1, info2); 910 svm_handle_inst_emul(svm_get_vmcb(svm_sc, vcpu), 911 info2, vmexit); 912 vmm_stat_incr(svm_sc->vm, vcpu, 913 VMEXIT_INST_EMUL, 1); 914 } 915 916 break; 917 918 case VMCB_EXIT_SHUTDOWN: 919 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT shutdown."); 920 loop = false; 921 break; 922 923 case VMCB_EXIT_INVALID: 924 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT INVALID."); 925 loop = false; 926 break; 927 928 default: 929 /* Return to user space. */ 930 loop = false; 931 update_rip = false; 932 VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" 933 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n", 934 ctrl->exitcode, info1, info2); 935 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" 936 " Inst decoder len:%d\n", state->rip, 937 ctrl->nrip, ctrl->inst_decode_size); 938 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 939 break; 940 } 941 942 vmexit->rip = state->rip; 943 if (update_rip) { 944 if (ctrl->nrip == 0) { 945 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set " 946 "for RIP0x%lx.\n", state->rip); 947 vmexit->exitcode = VM_EXITCODE_VMX; 948 } else 949 vmexit->rip = ctrl->nrip; 950 } 951 952 /* If vcpu execution is continued, update RIP. */ 953 if (loop) { 954 state->rip = vmexit->rip; 955 } 956 957 if (state->rip == 0) { 958 VCPU_CTR0(svm_sc->vm, vcpu, "SVM_ERR:RIP is NULL\n"); 959 vmexit->exitcode = VM_EXITCODE_VMX; 960 } 961 962 return (loop); 963} 964 965/* 966 * Inject NMI to virtual cpu. 967 */ 968static int 969svm_inject_nmi(struct svm_softc *svm_sc, int vcpu) 970{ 971 struct vmcb_ctrl *ctrl; 972 973 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 974 975 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 976 /* Can't inject another NMI if last one is pending.*/ 977 if (!vm_nmi_pending(svm_sc->vm, vcpu)) 978 return (0); 979 980 /* Inject NMI, vector number is not used.*/ 981 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_NMI, IDT_NMI, 0, false); 982 983 /* Acknowledge the request is accepted.*/ 984 vm_nmi_clear(svm_sc->vm, vcpu); 985 986 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n"); 987 988 return (1); 989} 990 991static void 992svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 993{ 994 struct vmcb_ctrl *ctrl; 995 uint64_t intinfo; 996 997 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 998 999 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1000 return; 1001 1002 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1003 "valid: %#lx", __func__, intinfo)); 1004 1005 vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo), 1006 VMCB_EXITINTINFO_VECTOR(intinfo), 1007 VMCB_EXITINTINFO_EC(intinfo), 1008 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1009 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1010 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1011} 1012 1013/* 1014 * Inject event to virtual cpu. 1015 */ 1016static void 1017svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu, struct vlapic *vlapic) 1018{ 1019 struct vmcb_ctrl *ctrl; 1020 struct vmcb_state *state; 1021 int extint_pending; 1022 int vector; 1023 1024 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1025 1026 state = svm_get_vmcb_state(svm_sc, vcpu); 1027 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1028 1029 svm_inj_intinfo(svm_sc, vcpu); 1030 1031 /* Can't inject multiple events at once. */ 1032 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1033 VCPU_CTR1(svm_sc->vm, vcpu, 1034 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj); 1035 return ; 1036 } 1037 1038 /* Wait for guest to come out of interrupt shadow. */ 1039 if (ctrl->intr_shadow) { 1040 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n"); 1041 return; 1042 } 1043 1044 /* NMI event has priority over interrupts.*/ 1045 if (svm_inject_nmi(svm_sc, vcpu)) { 1046 return; 1047 } 1048 1049 extint_pending = vm_extint_pending(svm_sc->vm, vcpu); 1050 1051 if (!extint_pending) { 1052 /* Ask the local apic for a vector to inject */ 1053 if (!vlapic_pending_intr(vlapic, &vector)) 1054 return; 1055 } else { 1056 /* Ask the legacy pic for a vector to inject */ 1057 vatpic_pending_intr(svm_sc->vm, &vector); 1058 } 1059 1060 if (vector < 32 || vector > 255) { 1061 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:Event injection" 1062 "invalid vector=%d.\n", vector); 1063 ERR("SVM_ERR:Event injection invalid vector=%d.\n", vector); 1064 return; 1065 } 1066 1067 if ((state->rflags & PSL_I) == 0) { 1068 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n"); 1069 return; 1070 } 1071 1072 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1073 1074 if (!extint_pending) { 1075 /* Update the Local APIC ISR */ 1076 vlapic_intr_accepted(vlapic, vector); 1077 } else { 1078 vm_extint_clear(svm_sc->vm, vcpu); 1079 vatpic_intr_accepted(svm_sc->vm, vector); 1080 1081 /* 1082 * XXX need to recheck exting_pending ala VT-x 1083 */ 1084 } 1085 1086 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector); 1087} 1088 1089static __inline void 1090restore_host_tss(void) 1091{ 1092 struct system_segment_descriptor *tss_sd; 1093 1094 /* 1095 * The TSS descriptor was in use prior to launching the guest so it 1096 * has been marked busy. 1097 * 1098 * 'ltr' requires the descriptor to be marked available so change the 1099 * type to "64-bit available TSS". 1100 */ 1101 tss_sd = PCPU_GET(tss); 1102 tss_sd->sd_type = SDT_SYSTSS; 1103 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1104} 1105 1106static void 1107check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1108{ 1109 struct svm_vcpu *vcpustate; 1110 struct vmcb_ctrl *ctrl; 1111 long eptgen; 1112 bool alloc_asid; 1113 1114 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1115 "active on cpu %u", __func__, thiscpu)); 1116 1117 vcpustate = svm_get_vcpu(sc, vcpuid); 1118 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1119 1120 /* 1121 * The TLB entries associated with the vcpu's ASID are not valid 1122 * if either of the following conditions is true: 1123 * 1124 * 1. The vcpu's ASID generation is different than the host cpu's 1125 * ASID generation. This happens when the vcpu migrates to a new 1126 * host cpu. It can also happen when the number of vcpus executing 1127 * on a host cpu is greater than the number of ASIDs available. 1128 * 1129 * 2. The pmap generation number is different than the value cached in 1130 * the 'vcpustate'. This happens when the host invalidates pages 1131 * belonging to the guest. 1132 * 1133 * asidgen eptgen Action 1134 * mismatch mismatch 1135 * 0 0 (a) 1136 * 0 1 (b1) or (b2) 1137 * 1 0 (c) 1138 * 1 1 (d) 1139 * 1140 * (a) There is no mismatch in eptgen or ASID generation and therefore 1141 * no further action is needed. 1142 * 1143 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1144 * retained and the TLB entries associated with this ASID 1145 * are flushed by VMRUN. 1146 * 1147 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1148 * allocated. 1149 * 1150 * (c) A new ASID is allocated. 1151 * 1152 * (d) A new ASID is allocated. 1153 */ 1154 1155 alloc_asid = false; 1156 eptgen = pmap->pm_eptgen; 1157 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1158 1159 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1160 alloc_asid = true; /* (c) and (d) */ 1161 } else if (vcpustate->eptgen != eptgen) { 1162 if (flush_by_asid()) 1163 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1164 else 1165 alloc_asid = true; /* (b2) */ 1166 } else { 1167 /* 1168 * This is the common case (a). 1169 */ 1170 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1171 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1172 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1173 } 1174 1175 if (alloc_asid) { 1176 if (++asid[thiscpu].num >= nasid) { 1177 asid[thiscpu].num = 1; 1178 if (++asid[thiscpu].gen == 0) 1179 asid[thiscpu].gen = 1; 1180 /* 1181 * If this cpu does not support "flush-by-asid" 1182 * then flush the entire TLB on a generation 1183 * bump. Subsequent ASID allocation in this 1184 * generation can be done without a TLB flush. 1185 */ 1186 if (!flush_by_asid()) 1187 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1188 } 1189 vcpustate->asid.gen = asid[thiscpu].gen; 1190 vcpustate->asid.num = asid[thiscpu].num; 1191 1192 ctrl->asid = vcpustate->asid.num; 1193 vcpu_set_dirty(vcpustate, VMCB_CACHE_ASID); 1194 /* 1195 * If this cpu supports "flush-by-asid" then the TLB 1196 * was not flushed after the generation bump. The TLB 1197 * is flushed selectively after every new ASID allocation. 1198 */ 1199 if (flush_by_asid()) 1200 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1201 } 1202 vcpustate->eptgen = eptgen; 1203 1204 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1205 KASSERT(ctrl->asid == vcpustate->asid.num, 1206 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1207} 1208 1209/* 1210 * Start vcpu with specified RIP. 1211 */ 1212static int 1213svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1214 void *rend_cookie, void *suspended_cookie) 1215{ 1216 struct svm_regctx *hctx, *gctx; 1217 struct svm_softc *svm_sc; 1218 struct svm_vcpu *vcpustate; 1219 struct vmcb_state *state; 1220 struct vmcb_ctrl *ctrl; 1221 struct vm_exit *vmexit; 1222 struct vlapic *vlapic; 1223 struct vm *vm; 1224 uint64_t vmcb_pa; 1225 u_int thiscpu; 1226 bool loop; /* Continue vcpu execution loop. */ 1227 1228 loop = true; 1229 svm_sc = arg; 1230 vm = svm_sc->vm; 1231 1232 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1233 state = svm_get_vmcb_state(svm_sc, vcpu); 1234 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1235 vmexit = vm_exitinfo(vm, vcpu); 1236 vlapic = vm_lapic(vm, vcpu); 1237 1238 /* 1239 * Stash 'curcpu' on the stack as 'thiscpu'. 1240 * 1241 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1242 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1243 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1244 */ 1245 thiscpu = curcpu; 1246 1247 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1248 hctx = &host_ctx[thiscpu]; 1249 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1250 1251 if (vcpustate->lastcpu != thiscpu) { 1252 /* 1253 * Force new ASID allocation by invalidating the generation. 1254 */ 1255 vcpustate->asid.gen = 0; 1256 1257 /* 1258 * Invalidate the VMCB state cache by marking all fields dirty. 1259 */ 1260 vcpu_set_dirty(vcpustate, 0xffffffff); 1261 1262 /* 1263 * XXX 1264 * Setting 'vcpustate->lastcpu' here is bit premature because 1265 * we may return from this function without actually executing 1266 * the VMRUN instruction. This could happen if a rendezvous 1267 * or an AST is pending on the first time through the loop. 1268 * 1269 * This works for now but any new side-effects of vcpu 1270 * migration should take this case into account. 1271 */ 1272 vcpustate->lastcpu = thiscpu; 1273 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1274 } 1275 1276 VCPU_CTR3(vm, vcpu, "SVM:Enter vmrun RIP:0x%lx" 1277 " inst len=%d/%d\n", 1278 rip, vmexit->inst_length, 1279 vmexit->u.inst_emul.vie.num_valid); 1280 /* Update Guest RIP */ 1281 state->rip = rip; 1282 1283 do { 1284 vmexit->inst_length = 0; 1285 1286 /* 1287 * Disable global interrupts to guarantee atomicity during 1288 * loading of guest state. This includes not only the state 1289 * loaded by the "vmrun" instruction but also software state 1290 * maintained by the hypervisor: suspended and rendezvous 1291 * state, NPT generation number, vlapic interrupts etc. 1292 */ 1293 disable_gintr(); 1294 1295 if (vcpu_suspended(suspended_cookie)) { 1296 enable_gintr(); 1297 vm_exit_suspended(vm, vcpu, state->rip); 1298 break; 1299 } 1300 1301 if (vcpu_rendezvous_pending(rend_cookie)) { 1302 enable_gintr(); 1303 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1304 vmm_stat_incr(vm, vcpu, VMEXIT_RENDEZVOUS, 1); 1305 VCPU_CTR1(vm, vcpu, 1306 "SVM: VCPU rendezvous, RIP:0x%lx\n", 1307 state->rip); 1308 vmexit->rip = state->rip; 1309 break; 1310 } 1311 1312 /* We are asked to give the cpu by scheduler. */ 1313 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1314 enable_gintr(); 1315 vmexit->exitcode = VM_EXITCODE_BOGUS; 1316 vmm_stat_incr(vm, vcpu, VMEXIT_ASTPENDING, 1); 1317 VCPU_CTR1(vm, vcpu, 1318 "SVM: ASTPENDING, RIP:0x%lx\n", state->rip); 1319 vmexit->rip = state->rip; 1320 break; 1321 } 1322 1323 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1324 1325 /* Activate the nested pmap on 'thiscpu' */ 1326 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1327 1328 /* 1329 * Check the pmap generation and the ASID generation to 1330 * ensure that the vcpu does not use stale TLB mappings. 1331 */ 1332 check_asid(svm_sc, vcpu, pmap, thiscpu); 1333 1334 ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty; 1335 vcpustate->dirty = 0; 1336 1337 /* Launch Virtual Machine. */ 1338 svm_launch(vmcb_pa, gctx, hctx); 1339 1340 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1341 1342 /* 1343 * Restore MSR_GSBASE to point to the pcpu data area. 1344 * 1345 * Note that accesses done via PCPU_GET/PCPU_SET will work 1346 * only after MSR_GSBASE is restored. 1347 * 1348 * Also note that we don't bother restoring MSR_KGSBASE 1349 * since it is not used in the kernel and will be restored 1350 * when the VMRUN ioctl returns to userspace. 1351 */ 1352 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1353 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1354 thiscpu, curcpu)); 1355 1356 /* 1357 * The host GDTR and IDTR is saved by VMRUN and restored 1358 * automatically on #VMEXIT. However, the host TSS needs 1359 * to be restored explicitly. 1360 */ 1361 restore_host_tss(); 1362 1363 /* #VMEXIT disables interrupts so re-enable them here. */ 1364 enable_gintr(); 1365 1366 /* Handle #VMEXIT and if required return to user space. */ 1367 loop = svm_vmexit(svm_sc, vcpu, vmexit); 1368 vcpustate->loop++; 1369 vmm_stat_incr(vm, vcpu, VMEXIT_COUNT, 1); 1370 } while (loop); 1371 1372 return (0); 1373} 1374 1375/* 1376 * Cleanup for virtual machine. 1377 */ 1378static void 1379svm_vmcleanup(void *arg) 1380{ 1381 struct svm_softc *svm_sc; 1382 1383 svm_sc = arg; 1384 1385 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1386 1387 free(svm_sc, M_SVM); 1388} 1389 1390/* 1391 * Return pointer to hypervisor saved register state. 1392 */ 1393static register_t * 1394swctx_regptr(struct svm_regctx *regctx, int reg) 1395{ 1396 1397 switch (reg) { 1398 case VM_REG_GUEST_RBX: 1399 return (®ctx->sctx_rbx); 1400 case VM_REG_GUEST_RCX: 1401 return (®ctx->sctx_rcx); 1402 case VM_REG_GUEST_RDX: 1403 return (®ctx->e.g.sctx_rdx); 1404 case VM_REG_GUEST_RDI: 1405 return (®ctx->e.g.sctx_rdi); 1406 case VM_REG_GUEST_RSI: 1407 return (®ctx->e.g.sctx_rsi); 1408 case VM_REG_GUEST_RBP: 1409 return (®ctx->sctx_rbp); 1410 case VM_REG_GUEST_R8: 1411 return (®ctx->sctx_r8); 1412 case VM_REG_GUEST_R9: 1413 return (®ctx->sctx_r9); 1414 case VM_REG_GUEST_R10: 1415 return (®ctx->sctx_r10); 1416 case VM_REG_GUEST_R11: 1417 return (®ctx->sctx_r11); 1418 case VM_REG_GUEST_R12: 1419 return (®ctx->sctx_r12); 1420 case VM_REG_GUEST_R13: 1421 return (®ctx->sctx_r13); 1422 case VM_REG_GUEST_R14: 1423 return (®ctx->sctx_r14); 1424 case VM_REG_GUEST_R15: 1425 return (®ctx->sctx_r15); 1426 default: 1427 ERR("Unknown register requested, reg=%d.\n", reg); 1428 break; 1429 } 1430 1431 return (NULL); 1432} 1433 1434/* 1435 * Interface to read guest registers. 1436 * This can be SVM h/w saved or hypervisor saved register. 1437 */ 1438static int 1439svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1440{ 1441 struct svm_softc *svm_sc; 1442 struct vmcb *vmcb; 1443 register_t *reg; 1444 1445 svm_sc = arg; 1446 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1447 1448 vmcb = svm_get_vmcb(svm_sc, vcpu); 1449 1450 if (vmcb_read(vmcb, ident, val) == 0) { 1451 return (0); 1452 } 1453 1454 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1455 1456 if (reg != NULL) { 1457 *val = *reg; 1458 return (0); 1459 } 1460 1461 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1462 return (EINVAL); 1463} 1464 1465/* 1466 * Interface to write to guest registers. 1467 * This can be SVM h/w saved or hypervisor saved register. 1468 */ 1469static int 1470svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1471{ 1472 struct svm_softc *svm_sc; 1473 struct vmcb *vmcb; 1474 register_t *reg; 1475 1476 svm_sc = arg; 1477 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1478 1479 vmcb = svm_get_vmcb(svm_sc, vcpu); 1480 if (vmcb_write(vmcb, ident, val) == 0) { 1481 return (0); 1482 } 1483 1484 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1485 1486 if (reg != NULL) { 1487 *reg = val; 1488 return (0); 1489 } 1490 1491 /* 1492 * XXX deal with CR3 and invalidate TLB entries tagged with the 1493 * vcpu's ASID. This needs to be treated differently depending on 1494 * whether 'running' is true/false. 1495 */ 1496 1497 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1498 return (EINVAL); 1499} 1500 1501 1502/* 1503 * Inteface to set various descriptors. 1504 */ 1505static int 1506svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1507{ 1508 struct svm_softc *svm_sc; 1509 struct vmcb *vmcb; 1510 struct vmcb_segment *seg; 1511 uint16_t attrib; 1512 1513 svm_sc = arg; 1514 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1515 1516 vmcb = svm_get_vmcb(svm_sc, vcpu); 1517 1518 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 1519 1520 seg = vmcb_seg(vmcb, type); 1521 if (seg == NULL) { 1522 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1523 return (EINVAL); 1524 } 1525 1526 /* Map seg_desc access to VMCB attribute format.*/ 1527 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 1528 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 1529 type, desc->access, desc->limit); 1530 seg->attrib = attrib; 1531 seg->base = desc->base; 1532 seg->limit = desc->limit; 1533 1534 return (0); 1535} 1536 1537/* 1538 * Interface to get guest descriptor. 1539 */ 1540static int 1541svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1542{ 1543 struct svm_softc *svm_sc; 1544 struct vmcb_segment *seg; 1545 1546 svm_sc = arg; 1547 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1548 1549 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 1550 1551 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 1552 if (!seg) { 1553 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1554 return (EINVAL); 1555 } 1556 1557 /* Map seg_desc access to VMCB attribute format.*/ 1558 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 1559 desc->base = seg->base; 1560 desc->limit = seg->limit; 1561 1562 /* 1563 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 1564 * loaded with a NULL segment selector. The 'desc->access' field is 1565 * interpreted in the VT-x format by the processor-independent code. 1566 * 1567 * SVM uses the 'P' bit to convey the same information so convert it 1568 * into the VT-x format. For more details refer to section 1569 * "Segment State in the VMCB" in APMv2. 1570 */ 1571 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 1572 desc->access |= 0x80; /* CS and TS always present */ 1573 1574 if (!(desc->access & 0x80)) 1575 desc->access |= 0x10000; /* Unusable segment */ 1576 1577 return (0); 1578} 1579 1580static int 1581svm_setcap(void *arg, int vcpu, int type, int val) 1582{ 1583 struct svm_softc *svm_sc; 1584 struct vmcb_ctrl *ctrl; 1585 int ret = ENOENT; 1586 1587 svm_sc = arg; 1588 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1589 1590 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1591 1592 switch (type) { 1593 case VM_CAP_HALT_EXIT: 1594 if (val) 1595 ctrl->ctrl1 |= VMCB_INTCPT_HLT; 1596 else 1597 ctrl->ctrl1 &= ~VMCB_INTCPT_HLT; 1598 ret = 0; 1599 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Halt exit %s.\n", 1600 val ? "enabled": "disabled"); 1601 break; 1602 1603 case VM_CAP_PAUSE_EXIT: 1604 if (val) 1605 ctrl->ctrl1 |= VMCB_INTCPT_PAUSE; 1606 else 1607 ctrl->ctrl1 &= ~VMCB_INTCPT_PAUSE; 1608 ret = 0; 1609 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Pause exit %s.\n", 1610 val ? "enabled": "disabled"); 1611 break; 1612 1613 case VM_CAP_MTRAP_EXIT: 1614 if (val) 1615 ctrl->exception |= BIT(IDT_MC); 1616 else 1617 ctrl->exception &= ~BIT(IDT_MC); 1618 ret = 0; 1619 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:MC exit %s.\n", 1620 val ? "enabled": "disabled"); 1621 break; 1622 1623 case VM_CAP_UNRESTRICTED_GUEST: 1624 /* SVM doesn't need special capability for SMP.*/ 1625 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Set_gap:Unrestricted " 1626 "always enabled.\n"); 1627 ret = 0; 1628 break; 1629 1630 default: 1631 break; 1632 } 1633 1634 return (ret); 1635} 1636 1637static int 1638svm_getcap(void *arg, int vcpu, int type, int *retval) 1639{ 1640 struct svm_softc *svm_sc; 1641 struct vmcb_ctrl *ctrl; 1642 1643 svm_sc = arg; 1644 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1645 1646 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1647 1648 switch (type) { 1649 case VM_CAP_HALT_EXIT: 1650 *retval = (ctrl->ctrl1 & VMCB_INTCPT_HLT) ? 1 : 0; 1651 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Halt exit %s.\n", 1652 *retval ? "enabled": "disabled"); 1653 break; 1654 1655 case VM_CAP_PAUSE_EXIT: 1656 *retval = (ctrl->ctrl1 & VMCB_INTCPT_PAUSE) ? 1 : 0; 1657 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Pause exit %s.\n", 1658 *retval ? "enabled": "disabled"); 1659 break; 1660 1661 case VM_CAP_MTRAP_EXIT: 1662 *retval = (ctrl->exception & BIT(IDT_MC)) ? 1 : 0; 1663 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:MC exit %s.\n", 1664 *retval ? "enabled": "disabled"); 1665 break; 1666 1667 case VM_CAP_UNRESTRICTED_GUEST: 1668 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:get_cap:Unrestricted.\n"); 1669 *retval = 1; 1670 break; 1671 default: 1672 break; 1673 } 1674 1675 return (0); 1676} 1677 1678static struct vlapic * 1679svm_vlapic_init(void *arg, int vcpuid) 1680{ 1681 struct svm_softc *svm_sc; 1682 struct vlapic *vlapic; 1683 1684 svm_sc = arg; 1685 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 1686 vlapic->vm = svm_sc->vm; 1687 vlapic->vcpuid = vcpuid; 1688 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 1689 1690 vlapic_init(vlapic); 1691 1692 return (vlapic); 1693} 1694 1695static void 1696svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 1697{ 1698 1699 vlapic_cleanup(vlapic); 1700 free(vlapic, M_SVM_VLAPIC); 1701} 1702 1703struct vmm_ops vmm_ops_amd = { 1704 svm_init, 1705 svm_cleanup, 1706 svm_restore, 1707 svm_vminit, 1708 svm_vmrun, 1709 svm_vmcleanup, 1710 svm_getreg, 1711 svm_setreg, 1712 svm_getdesc, 1713 svm_setdesc, 1714 svm_getcap, 1715 svm_setcap, 1716 svm_npt_alloc, 1717 svm_npt_free, 1718 svm_vlapic_init, 1719 svm_vlapic_cleanup 1720}; 1721