svm.c revision 270962
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 270962 2014-09-02 04:22:42Z neel $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37 38#include <vm/vm.h> 39#include <vm/pmap.h> 40 41#include <machine/cpufunc.h> 42#include <machine/psl.h> 43#include <machine/pmap.h> 44#include <machine/md_var.h> 45#include <machine/vmparam.h> 46#include <machine/specialreg.h> 47#include <machine/segments.h> 48#include <machine/vmm.h> 49#include <machine/vmm_dev.h> 50#include <machine/vmm_instruction_emul.h> 51 52#include <x86/apicreg.h> 53 54#include "vmm_lapic.h" 55#include "vmm_msr.h" 56#include "vmm_stat.h" 57#include "vmm_ktr.h" 58#include "vmm_ioport.h" 59#include "vatpic.h" 60#include "vlapic.h" 61#include "vlapic_priv.h" 62 63#include "x86.h" 64#include "vmcb.h" 65#include "svm.h" 66#include "svm_softc.h" 67#include "npt.h" 68 69/* 70 * SVM CPUID function 0x8000_000A, edx bit decoding. 71 */ 72#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 73#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 74#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 75#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 76#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 77#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 78#define AMD_CPUID_SVM_ASID_FLUSH BIT(6) /* Flush by ASID */ 79#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 80#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 81#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 82 83MALLOC_DEFINE(M_SVM, "svm", "svm"); 84MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 85 86/* Per-CPU context area. */ 87extern struct pcpu __pcpu[]; 88 89static bool svm_vmexit(struct svm_softc *svm_sc, int vcpu, 90 struct vm_exit *vmexit); 91static int svm_msr_rw_ok(uint8_t *btmap, uint64_t msr); 92static int svm_msr_rd_ok(uint8_t *btmap, uint64_t msr); 93static int svm_msr_index(uint64_t msr, int *index, int *bit); 94static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 95 96static uint32_t svm_feature; /* AMD SVM features. */ 97 98/* 99 * Starting guest ASID, 0 is reserved for host. 100 * Each guest will have its own unique ASID. 101 */ 102static uint32_t guest_asid = 1; 103 104/* 105 * Max ASID processor can support. 106 * This limit the maximum number of virtual machines that can be created. 107 */ 108static int max_asid; 109 110/* 111 * SVM host state saved area of size 4KB for each core. 112 */ 113static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 114 115/* 116 * S/w saved host context. 117 */ 118static struct svm_regctx host_ctx[MAXCPU]; 119 120static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid VMCB EXITINTINFO"); 121static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "VMM pending exception injected"); 122 123/* 124 * Common function to enable or disabled SVM for a CPU. 125 */ 126static int 127cpu_svm_enable_disable(boolean_t enable) 128{ 129 uint64_t efer_msr; 130 131 efer_msr = rdmsr(MSR_EFER); 132 133 if (enable) 134 efer_msr |= EFER_SVM; 135 else 136 efer_msr &= ~EFER_SVM; 137 138 wrmsr(MSR_EFER, efer_msr); 139 140 return(0); 141} 142 143/* 144 * Disable SVM on a CPU. 145 */ 146static void 147svm_disable(void *arg __unused) 148{ 149 150 (void)cpu_svm_enable_disable(FALSE); 151} 152 153/* 154 * Disable SVM for all CPUs. 155 */ 156static int 157svm_cleanup(void) 158{ 159 160 smp_rendezvous(NULL, svm_disable, NULL, NULL); 161 return (0); 162} 163 164/* 165 * Check for required BHyVe SVM features in a CPU. 166 */ 167static int 168svm_cpuid_features(void) 169{ 170 u_int regs[4]; 171 172 /* CPUID Fn8000_000A is for SVM */ 173 do_cpuid(0x8000000A, regs); 174 svm_feature = regs[3]; 175 176 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 177 max_asid = regs[1]; 178 179 printf("SVM Features:0x%b\n", svm_feature, 180 "\020" 181 "\001NP" /* Nested paging */ 182 "\002LbrVirt" /* LBR virtualization */ 183 "\003SVML" /* SVM lock */ 184 "\004NRIPS" /* NRIP save */ 185 "\005TscRateMsr" /* MSR based TSC rate control */ 186 "\006VmcbClean" /* VMCB clean bits */ 187 "\007FlushByAsid" /* Flush by ASID */ 188 "\010DecodeAssist" /* Decode assist */ 189 "\011<b20>" 190 "\012<b20>" 191 "\013PauseFilter" 192 "\014<b20>" 193 "\015PauseFilterThreshold" 194 "\016AVIC" 195 ); 196 197 /* SVM Lock */ 198 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 199 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 200 return (ENXIO); 201 } 202 203 /* 204 * bhyve need RVI to work. 205 */ 206 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 207 printf("Missing Nested paging or RVI SVM support in processor.\n"); 208 return (EIO); 209 } 210 211 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 212 return (0); 213 214 return (EIO); 215} 216 217/* 218 * Enable SVM for a CPU. 219 */ 220static void 221svm_enable(void *arg __unused) 222{ 223 uint64_t hsave_pa; 224 225 (void)cpu_svm_enable_disable(TRUE); 226 227 hsave_pa = vtophys(hsave[curcpu]); 228 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 229 230 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 231 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 232 } 233} 234 235/* 236 * Check if a processor support SVM. 237 */ 238static int 239is_svm_enabled(void) 240{ 241 uint64_t msr; 242 243 /* Section 15.4 Enabling SVM from APM2. */ 244 if ((amd_feature2 & AMDID2_SVM) == 0) { 245 printf("SVM is not supported on this processor.\n"); 246 return (ENXIO); 247 } 248 249 msr = rdmsr(MSR_VM_CR); 250 /* Make sure SVM is not disabled by BIOS. */ 251 if ((msr & VM_CR_SVMDIS) == 0) { 252 return svm_cpuid_features(); 253 } 254 255 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 256 return (ENXIO); 257} 258 259/* 260 * Enable SVM on CPU and initialize nested page table h/w. 261 */ 262static int 263svm_init(int ipinum) 264{ 265 int err; 266 267 err = is_svm_enabled(); 268 if (err) 269 return (err); 270 271 272 svm_npt_init(ipinum); 273 274 /* Start SVM on all CPUs */ 275 smp_rendezvous(NULL, svm_enable, NULL, NULL); 276 277 return (0); 278} 279 280static void 281svm_restore(void) 282{ 283 svm_enable(NULL); 284} 285/* 286 * Get index and bit position for a MSR in MSR permission 287 * bitmap. Two bits are used for each MSR, lower bit is 288 * for read and higher bit is for write. 289 */ 290static int 291svm_msr_index(uint64_t msr, int *index, int *bit) 292{ 293 uint32_t base, off; 294 295/* Pentium compatible MSRs */ 296#define MSR_PENTIUM_START 0 297#define MSR_PENTIUM_END 0x1FFF 298/* AMD 6th generation and Intel compatible MSRs */ 299#define MSR_AMD6TH_START 0xC0000000UL 300#define MSR_AMD6TH_END 0xC0001FFFUL 301/* AMD 7th and 8th generation compatible MSRs */ 302#define MSR_AMD7TH_START 0xC0010000UL 303#define MSR_AMD7TH_END 0xC0011FFFUL 304 305 *index = -1; 306 *bit = (msr % 4) * 2; 307 base = 0; 308 309 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 310 *index = msr / 4; 311 return (0); 312 } 313 314 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 315 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 316 off = (msr - MSR_AMD6TH_START); 317 *index = (off + base) / 4; 318 return (0); 319 } 320 321 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 322 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 323 off = (msr - MSR_AMD7TH_START); 324 *index = (off + base) / 4; 325 return (0); 326 } 327 328 return (EIO); 329} 330 331/* 332 * Give virtual cpu the complete access to MSR(read & write). 333 */ 334static int 335svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 336{ 337 int index, bit, err; 338 339 err = svm_msr_index(msr, &index, &bit); 340 if (err) { 341 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 342 return (err); 343 } 344 345 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 346 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 347 return (EINVAL); 348 } 349 if (bit < 0 || bit > 8) { 350 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 351 return (EINVAL); 352 } 353 354 /* Disable intercept for read and write. */ 355 if (read) 356 perm_bitmap[index] &= ~(1UL << bit); 357 if (write) 358 perm_bitmap[index] &= ~(2UL << bit); 359 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 360 (perm_bitmap[index] >> bit) & 0x3, msr); 361 362 return (0); 363} 364 365static int 366svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 367{ 368 return svm_msr_perm(perm_bitmap, msr, true, true); 369} 370 371static int 372svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 373{ 374 return svm_msr_perm(perm_bitmap, msr, true, false); 375} 376/* 377 * Initialise VCPU. 378 */ 379static int 380svm_init_vcpu(struct svm_vcpu *vcpu, vm_paddr_t iopm_pa, vm_paddr_t msrpm_pa, 381 vm_paddr_t pml4_pa, uint8_t asid) 382{ 383 384 vcpu->lastcpu = NOCPU; 385 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 386 387 /* 388 * Initiaise VMCB persistent area of vcpu. 389 * 1. Permission bitmap for MSR and IO space. 390 * 2. Nested paging. 391 * 3. ASID of virtual machine. 392 */ 393 if (svm_init_vmcb(&vcpu->vmcb, iopm_pa, msrpm_pa, pml4_pa)) { 394 return (EIO); 395 } 396 397 return (0); 398} 399/* 400 * Initialise a virtual machine. 401 */ 402static void * 403svm_vminit(struct vm *vm, pmap_t pmap) 404{ 405 struct svm_softc *svm_sc; 406 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 407 int i; 408 409 if (guest_asid >= max_asid) { 410 ERR("Host support max ASID:%d, can't create more guests.\n", 411 max_asid); 412 return (NULL); 413 } 414 415 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 416 M_SVM, M_WAITOK | M_ZERO); 417 418 svm_sc->vm = vm; 419 svm_sc->svm_feature = svm_feature; 420 svm_sc->vcpu_cnt = VM_MAXCPU; 421 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 422 /* 423 * Each guest has its own unique ASID. 424 * ASID(Address Space Identifier) is used by TLB entry. 425 */ 426 svm_sc->asid = guest_asid++; 427 428 /* 429 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 430 */ 431 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 432 433 /* 434 * Following MSR can be completely controlled by virtual machines 435 * since access to following are translated to access to VMCB. 436 */ 437 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 438 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 439 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 440 441 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 442 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 443 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 444 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 445 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 446 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 447 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 448 449 /* For Nested Paging/RVI only. */ 450 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 451 452 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 453 454 /* Intercept access to all I/O ports. */ 455 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 456 457 /* Cache physical address for multiple vcpus. */ 458 iopm_pa = vtophys(svm_sc->iopm_bitmap); 459 msrpm_pa = vtophys(svm_sc->msr_bitmap); 460 pml4_pa = svm_sc->nptp; 461 462 for (i = 0; i < svm_sc->vcpu_cnt; i++) { 463 if (svm_init_vcpu(svm_get_vcpu(svm_sc, i), iopm_pa, msrpm_pa, 464 pml4_pa, svm_sc->asid)) { 465 ERR("SVM couldn't initialise VCPU%d\n", i); 466 goto cleanup; 467 } 468 } 469 470 return (svm_sc); 471 472cleanup: 473 free(svm_sc, M_SVM); 474 return (NULL); 475} 476 477static int 478svm_cpl(struct vmcb_state *state) 479{ 480 481 /* 482 * From APMv2: 483 * "Retrieve the CPL from the CPL field in the VMCB, not 484 * from any segment DPL" 485 */ 486 return (state->cpl); 487} 488 489static enum vm_cpu_mode 490svm_vcpu_mode(struct vmcb *vmcb) 491{ 492 struct vmcb_segment *seg; 493 struct vmcb_state *state; 494 495 state = &vmcb->state; 496 497 if (state->efer & EFER_LMA) { 498 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 499 /* 500 * Section 4.8.1 for APM2, check if Code Segment has 501 * Long attribute set in descriptor. 502 */ 503 if (seg->attrib & VMCB_CS_ATTRIB_L) 504 return (CPU_MODE_64BIT); 505 else 506 return (CPU_MODE_COMPATIBILITY); 507 } else if (state->cr0 & CR0_PE) { 508 return (CPU_MODE_PROTECTED); 509 } else { 510 return (CPU_MODE_REAL); 511 } 512} 513 514static enum vm_paging_mode 515svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 516{ 517 518 if ((cr0 & CR0_PG) == 0) 519 return (PAGING_MODE_FLAT); 520 if ((cr4 & CR4_PAE) == 0) 521 return (PAGING_MODE_32); 522 if (efer & EFER_LME) 523 return (PAGING_MODE_64); 524 else 525 return (PAGING_MODE_PAE); 526} 527 528/* 529 * ins/outs utility routines 530 */ 531static uint64_t 532svm_inout_str_index(struct svm_regctx *regs, int in) 533{ 534 uint64_t val; 535 536 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 537 538 return (val); 539} 540 541static uint64_t 542svm_inout_str_count(struct svm_regctx *regs, int rep) 543{ 544 uint64_t val; 545 546 val = rep ? regs->sctx_rcx : 1; 547 548 return (val); 549} 550 551static void 552svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 553 int in, struct vm_inout_str *vis) 554{ 555 int error, s; 556 557 if (in) { 558 vis->seg_name = VM_REG_GUEST_ES; 559 } else { 560 /* The segment field has standard encoding */ 561 s = (info1 >> 10) & 0x7; 562 vis->seg_name = vm_segment_name(s); 563 } 564 565 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); 566 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 567} 568 569static int 570svm_inout_str_addrsize(uint64_t info1) 571{ 572 uint32_t size; 573 574 size = (info1 >> 7) & 0x7; 575 switch (size) { 576 case 1: 577 return (2); /* 16 bit */ 578 case 2: 579 return (4); /* 32 bit */ 580 case 4: 581 return (8); /* 64 bit */ 582 default: 583 panic("%s: invalid size encoding %d", __func__, size); 584 } 585} 586 587static void 588svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 589{ 590 struct vmcb_state *state; 591 592 state = &vmcb->state; 593 paging->cr3 = state->cr3; 594 paging->cpl = svm_cpl(state); 595 paging->cpu_mode = svm_vcpu_mode(vmcb); 596 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 597 state->efer); 598} 599 600 601/* 602 * Handle guest I/O intercept. 603 */ 604static bool 605svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 606{ 607 struct vmcb_ctrl *ctrl; 608 struct vmcb_state *state; 609 struct svm_regctx *regs; 610 struct vm_inout_str *vis; 611 uint64_t info1; 612 613 state = svm_get_vmcb_state(svm_sc, vcpu); 614 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 615 regs = svm_get_guest_regctx(svm_sc, vcpu); 616 info1 = ctrl->exitinfo1; 617 618 vmexit->exitcode = VM_EXITCODE_INOUT; 619 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 620 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0; 621 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 622 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 623 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 624 vmexit->u.inout.eax = (uint32_t)(state->rax); 625 626 if (vmexit->u.inout.string) { 627 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 628 vis = &vmexit->u.inout_str; 629 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 630 vis->rflags = state->rflags; 631 vis->cr0 = state->cr0; 632 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 633 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 634 vis->addrsize = svm_inout_str_addrsize(info1); 635 svm_inout_str_seginfo(svm_sc, vcpu, info1, 636 vmexit->u.inout.in, vis); 637 } 638 639 return (false); 640} 641 642static int 643svm_npf_paging(uint64_t exitinfo1) 644{ 645 646 if (exitinfo1 & VMCB_NPF_INFO1_W) 647 return (VM_PROT_WRITE); 648 649 return (VM_PROT_READ); 650} 651 652static bool 653svm_npf_emul_fault(uint64_t exitinfo1) 654{ 655 656 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 657 return (false); 658 } 659 660 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 661 return (false); 662 } 663 664 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 665 return (false); 666 } 667 668 return (true); 669} 670 671static void 672svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 673{ 674 struct vm_guest_paging *paging; 675 struct vmcb_segment *seg; 676 677 paging = &vmexit->u.inst_emul.paging; 678 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 679 vmexit->u.inst_emul.gpa = gpa; 680 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 681 svm_paging_info(vmcb, paging); 682 683 /* 684 * If DecodeAssist SVM feature doesn't exist, we don't have NPF 685 * instuction length. RIP will be calculated based on the length 686 * determined by instruction emulation. 687 */ 688 vmexit->inst_length = VIE_INST_SIZE; 689 690 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); 691 switch(paging->cpu_mode) { 692 case CPU_MODE_PROTECTED: 693 case CPU_MODE_COMPATIBILITY: 694 /* 695 * Section 4.8.1 of APM2, Default Operand Size or D bit. 696 */ 697 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 698 1 : 0; 699 break; 700 default: 701 vmexit->u.inst_emul.cs_d = 0; 702 break; 703 } 704} 705 706/* 707 * Special handling of EFER MSR. 708 * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM 709 * enable bit in EFER. 710 */ 711static void 712svm_efer(struct svm_softc *svm_sc, int vcpu, boolean_t write) 713{ 714 struct svm_regctx *swctx; 715 struct vmcb_state *state; 716 717 state = svm_get_vmcb_state(svm_sc, vcpu); 718 swctx = svm_get_guest_regctx(svm_sc, vcpu); 719 720 if (write) { 721 state->efer = ((swctx->e.g.sctx_rdx & (uint32_t)~0) << 32) | 722 ((uint32_t)state->rax) | EFER_SVM; 723 } else { 724 state->rax = (uint32_t)state->efer; 725 swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32); 726 } 727} 728 729static void 730svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 731{ 732 struct vmcb_ctrl *ctrl; 733 uint64_t intinfo; 734 735 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 736 intinfo = ctrl->exitintinfo; 737 if (!VMCB_EXITINTINFO_VALID(intinfo)) 738 return; 739 740 /* 741 * From APMv2, Section "Intercepts during IDT interrupt delivery" 742 * 743 * If a #VMEXIT happened during event delivery then record the event 744 * that was being delivered. 745 */ 746 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 747 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 748 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 749 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 750} 751 752/* 753 * Determine the cause of virtual cpu exit and handle VMEXIT. 754 * Return: false - Break vcpu execution loop and handle vmexit 755 * in kernel or user space. 756 * true - Continue vcpu run. 757 */ 758static bool 759svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 760{ 761 struct vmcb_state *state; 762 struct vmcb_ctrl *ctrl; 763 struct svm_regctx *ctx; 764 uint64_t code, info1, info2, val; 765 uint32_t eax, ecx, edx; 766 bool update_rip, loop, retu; 767 768 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 769 770 state = svm_get_vmcb_state(svm_sc, vcpu); 771 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 772 ctx = svm_get_guest_regctx(svm_sc, vcpu); 773 code = ctrl->exitcode; 774 info1 = ctrl->exitinfo1; 775 info2 = ctrl->exitinfo2; 776 777 update_rip = true; 778 loop = true; 779 vmexit->exitcode = VM_EXITCODE_VMX; 780 vmexit->u.vmx.status = 0; 781 782 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 783 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 784 785 svm_save_intinfo(svm_sc, vcpu); 786 787 switch (code) { 788 case VMCB_EXIT_MC: /* Machine Check. */ 789 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1); 790 vmexit->exitcode = VM_EXITCODE_MTRAP; 791 loop = false; 792 break; 793 794 case VMCB_EXIT_MSR: /* MSR access. */ 795 eax = state->rax; 796 ecx = ctx->sctx_rcx; 797 edx = ctx->e.g.sctx_rdx; 798 799 if (ecx == MSR_EFER) { 800 VCPU_CTR0(svm_sc->vm, vcpu,"VMEXIT EFER\n"); 801 svm_efer(svm_sc, vcpu, info1); 802 break; 803 } 804 805 retu = false; 806 if (info1) { 807 /* VM exited because of write MSR */ 808 vmm_stat_incr(svm_sc->vm, vcpu, 809 VMEXIT_WRMSR, 1); 810 vmexit->exitcode = VM_EXITCODE_WRMSR; 811 vmexit->u.msr.code = ecx; 812 val = (uint64_t)edx << 32 | eax; 813 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, 814 &retu)) { 815 vmexit->u.msr.wval = val; 816 loop = false; 817 } else 818 loop = retu ? false : true; 819 820 VCPU_CTR3(svm_sc->vm, vcpu, 821 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x", 822 loop ? "kernel" : "user", val, ecx); 823 } else { 824 vmm_stat_incr(svm_sc->vm, vcpu, 825 VMEXIT_RDMSR, 1); 826 vmexit->exitcode = VM_EXITCODE_RDMSR; 827 vmexit->u.msr.code = ecx; 828 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, 829 &retu)) { 830 loop = false; 831 } else 832 loop = retu ? false : true; 833 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR" 834 " MSB=0x%08x, LSB=%08x @0x%x", 835 ctx->e.g.sctx_rdx, state->rax, ecx); 836 } 837 838#define MSR_AMDK8_IPM 0xc0010055 839 /* 840 * We can't hide AMD C1E idle capability since its 841 * based on CPU generation, for now ignore access to 842 * this MSR by vcpus 843 * XXX: special handling of AMD C1E - Ignore. 844 */ 845 if (ecx == MSR_AMDK8_IPM) 846 loop = true; 847 break; 848 849 case VMCB_EXIT_INTR: 850 /* 851 * Exit on External Interrupt. 852 * Give host interrupt handler to run and if its guest 853 * interrupt, local APIC will inject event in guest. 854 */ 855 update_rip = false; 856 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt" 857 " RIP:0x%lx.\n", state->rip); 858 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 859 break; 860 861 case VMCB_EXIT_IO: 862 loop = svm_handle_io(svm_sc, vcpu, vmexit); 863 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 864 break; 865 866 case VMCB_EXIT_CPUID: 867 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 868 (void)x86_emulate_cpuid(svm_sc->vm, vcpu, 869 (uint32_t *)&state->rax, 870 (uint32_t *)&ctx->sctx_rbx, 871 (uint32_t *)&ctx->sctx_rcx, 872 (uint32_t *)&ctx->e.g.sctx_rdx); 873 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT CPUID\n"); 874 break; 875 876 case VMCB_EXIT_HLT: 877 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 878 if (ctrl->v_irq) { 879 /* Interrupt is pending, can't halt guest. */ 880 vmm_stat_incr(svm_sc->vm, vcpu, 881 VMEXIT_HLT_IGNORED, 1); 882 VCPU_CTR0(svm_sc->vm, vcpu, 883 "VMEXIT halt ignored."); 884 } else { 885 VCPU_CTR0(svm_sc->vm, vcpu, 886 "VMEXIT halted CPU."); 887 vmexit->exitcode = VM_EXITCODE_HLT; 888 vmexit->u.hlt.rflags = state->rflags; 889 loop = false; 890 891 } 892 break; 893 894 case VMCB_EXIT_PAUSE: 895 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT pause"); 896 vmexit->exitcode = VM_EXITCODE_PAUSE; 897 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 898 899 break; 900 901 case VMCB_EXIT_NPF: 902 loop = false; 903 update_rip = false; 904 905 if (info1 & VMCB_NPF_INFO1_RSV) { 906 VCPU_CTR2(svm_sc->vm, vcpu, "SVM_ERR:NPT" 907 " reserved bit is set," 908 "INFO1:0x%lx INFO2:0x%lx .\n", 909 info1, info2); 910 break; 911 } 912 913 /* EXITINFO2 has the physical fault address (GPA). */ 914 if(vm_mem_allocated(svm_sc->vm, info2)) { 915 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF-paging," 916 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", 917 state->rip, info1, info2); 918 vmexit->exitcode = VM_EXITCODE_PAGING; 919 vmexit->u.paging.gpa = info2; 920 vmexit->u.paging.fault_type = 921 svm_npf_paging(info1); 922 vmm_stat_incr(svm_sc->vm, vcpu, 923 VMEXIT_NESTED_FAULT, 1); 924 } else if (svm_npf_emul_fault(info1)) { 925 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF inst_emul," 926 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", 927 state->rip, info1, info2); 928 svm_handle_inst_emul(svm_get_vmcb(svm_sc, vcpu), 929 info2, vmexit); 930 vmm_stat_incr(svm_sc->vm, vcpu, 931 VMEXIT_INST_EMUL, 1); 932 } 933 934 break; 935 936 case VMCB_EXIT_SHUTDOWN: 937 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT shutdown."); 938 loop = false; 939 break; 940 941 case VMCB_EXIT_INVALID: 942 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT INVALID."); 943 loop = false; 944 break; 945 946 default: 947 /* Return to user space. */ 948 loop = false; 949 update_rip = false; 950 VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" 951 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n", 952 ctrl->exitcode, info1, info2); 953 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" 954 " Inst decoder len:%d\n", state->rip, 955 ctrl->nrip, ctrl->inst_decode_size); 956 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 957 break; 958 } 959 960 vmexit->rip = state->rip; 961 if (update_rip) { 962 if (ctrl->nrip == 0) { 963 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set " 964 "for RIP0x%lx.\n", state->rip); 965 vmexit->exitcode = VM_EXITCODE_VMX; 966 } else 967 vmexit->rip = ctrl->nrip; 968 } 969 970 /* If vcpu execution is continued, update RIP. */ 971 if (loop) { 972 state->rip = vmexit->rip; 973 } 974 975 if (state->rip == 0) { 976 VCPU_CTR0(svm_sc->vm, vcpu, "SVM_ERR:RIP is NULL\n"); 977 vmexit->exitcode = VM_EXITCODE_VMX; 978 } 979 980 return (loop); 981} 982 983/* 984 * Inject NMI to virtual cpu. 985 */ 986static int 987svm_inject_nmi(struct svm_softc *svm_sc, int vcpu) 988{ 989 struct vmcb_ctrl *ctrl; 990 991 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 992 993 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 994 /* Can't inject another NMI if last one is pending.*/ 995 if (!vm_nmi_pending(svm_sc->vm, vcpu)) 996 return (0); 997 998 /* Inject NMI, vector number is not used.*/ 999 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_NMI, IDT_NMI, 0, false); 1000 1001 /* Acknowledge the request is accepted.*/ 1002 vm_nmi_clear(svm_sc->vm, vcpu); 1003 1004 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n"); 1005 1006 return (1); 1007} 1008 1009static void 1010svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1011{ 1012 struct vmcb_ctrl *ctrl; 1013 uint64_t intinfo; 1014 1015 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1016 1017 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1018 return; 1019 1020 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1021 "valid: %#lx", __func__, intinfo)); 1022 1023 vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo), 1024 VMCB_EXITINTINFO_VECTOR(intinfo), 1025 VMCB_EXITINTINFO_EC(intinfo), 1026 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1027 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1028 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1029} 1030 1031/* 1032 * Inject event to virtual cpu. 1033 */ 1034static void 1035svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu, struct vlapic *vlapic) 1036{ 1037 struct vmcb_ctrl *ctrl; 1038 struct vmcb_state *state; 1039 int extint_pending; 1040 int vector; 1041 1042 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1043 1044 state = svm_get_vmcb_state(svm_sc, vcpu); 1045 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1046 1047 svm_inj_intinfo(svm_sc, vcpu); 1048 1049 /* Can't inject multiple events at once. */ 1050 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1051 VCPU_CTR1(svm_sc->vm, vcpu, 1052 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj); 1053 return ; 1054 } 1055 1056 /* Wait for guest to come out of interrupt shadow. */ 1057 if (ctrl->intr_shadow) { 1058 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n"); 1059 return; 1060 } 1061 1062 /* NMI event has priority over interrupts.*/ 1063 if (svm_inject_nmi(svm_sc, vcpu)) { 1064 return; 1065 } 1066 1067 extint_pending = vm_extint_pending(svm_sc->vm, vcpu); 1068 1069 if (!extint_pending) { 1070 /* Ask the local apic for a vector to inject */ 1071 if (!vlapic_pending_intr(vlapic, &vector)) 1072 return; 1073 } else { 1074 /* Ask the legacy pic for a vector to inject */ 1075 vatpic_pending_intr(svm_sc->vm, &vector); 1076 } 1077 1078 if (vector < 32 || vector > 255) { 1079 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:Event injection" 1080 "invalid vector=%d.\n", vector); 1081 ERR("SVM_ERR:Event injection invalid vector=%d.\n", vector); 1082 return; 1083 } 1084 1085 if ((state->rflags & PSL_I) == 0) { 1086 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n"); 1087 return; 1088 } 1089 1090 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1091 1092 if (!extint_pending) { 1093 /* Update the Local APIC ISR */ 1094 vlapic_intr_accepted(vlapic, vector); 1095 } else { 1096 vm_extint_clear(svm_sc->vm, vcpu); 1097 vatpic_intr_accepted(svm_sc->vm, vector); 1098 1099 /* 1100 * XXX need to recheck exting_pending ala VT-x 1101 */ 1102 } 1103 1104 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector); 1105} 1106 1107/* 1108 * Restore host Task Register selector type after every vcpu exit. 1109 */ 1110static void 1111setup_tss_type(void) 1112{ 1113 struct system_segment_descriptor *desc; 1114 1115 desc = (struct system_segment_descriptor *)&gdt[curcpu * NGDT + 1116 GPROC0_SEL]; 1117 /* 1118 * Task selector that should be restored in host is 1119 * 64-bit available(9), not what is read(0xb), see 1120 * APMvol2 Rev3.21 4.8.3 System Descriptors table. 1121 */ 1122 desc->sd_type = 9; 1123} 1124 1125/* 1126 * Start vcpu with specified RIP. 1127 */ 1128static int 1129svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1130 void *rend_cookie, void *suspended_cookie) 1131{ 1132 struct svm_regctx *hctx, *gctx; 1133 struct svm_softc *svm_sc; 1134 struct svm_vcpu *vcpustate; 1135 struct vmcb_state *state; 1136 struct vmcb_ctrl *ctrl; 1137 struct vm_exit *vmexit; 1138 struct vlapic *vlapic; 1139 struct vm *vm; 1140 uint64_t vmcb_pa; 1141 bool loop; /* Continue vcpu execution loop. */ 1142 1143 loop = true; 1144 svm_sc = arg; 1145 vm = svm_sc->vm; 1146 1147 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1148 state = svm_get_vmcb_state(svm_sc, vcpu); 1149 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1150 vmexit = vm_exitinfo(vm, vcpu); 1151 vlapic = vm_lapic(vm, vcpu); 1152 1153 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1154 hctx = &host_ctx[curcpu]; 1155 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1156 1157 if (vcpustate->lastcpu != curcpu) { 1158 /* Virtual CPU is running on a diiferent CPU now.*/ 1159 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1160 1161 /* 1162 * Flush all TLB mappings for this guest on this CPU, 1163 * it might have stale entries since vcpu has migrated 1164 * or vmm is restarted. 1165 */ 1166 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1167 1168 /* Can't use any cached VMCB state by cpu.*/ 1169 ctrl->vmcb_clean = VMCB_CACHE_NONE; 1170 } else { 1171 /* 1172 * XXX: Using same ASID for all vcpus of a VM will cause TLB 1173 * corruption. This can easily be produced by muxing two vcpus 1174 * on same core. 1175 * For now, flush guest TLB for every vmrun. 1176 */ 1177 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1178 1179 /* 1180 * This is the same cpu on which vcpu last ran so don't 1181 * need to reload all VMCB state. 1182 * ASID is unique for a guest. 1183 * IOPM is unchanged. 1184 * RVI/EPT is unchanged. 1185 * 1186 */ 1187 ctrl->vmcb_clean = VMCB_CACHE_ASID | 1188 VMCB_CACHE_IOPM | 1189 VMCB_CACHE_NP; 1190 } 1191 1192 vcpustate->lastcpu = curcpu; 1193 VCPU_CTR3(vm, vcpu, "SVM:Enter vmrun RIP:0x%lx" 1194 " inst len=%d/%d\n", 1195 rip, vmexit->inst_length, 1196 vmexit->u.inst_emul.vie.num_valid); 1197 /* Update Guest RIP */ 1198 state->rip = rip; 1199 1200 do { 1201 vmexit->inst_length = 0; 1202 1203 /* 1204 * Disable global interrupts to guarantee atomicity during 1205 * loading of guest state. This includes not only the state 1206 * loaded by the "vmrun" instruction but also software state 1207 * maintained by the hypervisor: suspended and rendezvous 1208 * state, NPT generation number, vlapic interrupts etc. 1209 */ 1210 disable_gintr(); 1211 1212 if (vcpu_suspended(suspended_cookie)) { 1213 enable_gintr(); 1214 vm_exit_suspended(vm, vcpu, state->rip); 1215 break; 1216 } 1217 1218 if (vcpu_rendezvous_pending(rend_cookie)) { 1219 enable_gintr(); 1220 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1221 vmm_stat_incr(vm, vcpu, VMEXIT_RENDEZVOUS, 1); 1222 VCPU_CTR1(vm, vcpu, 1223 "SVM: VCPU rendezvous, RIP:0x%lx\n", 1224 state->rip); 1225 vmexit->rip = state->rip; 1226 break; 1227 } 1228 1229 /* We are asked to give the cpu by scheduler. */ 1230 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1231 enable_gintr(); 1232 vmexit->exitcode = VM_EXITCODE_BOGUS; 1233 vmm_stat_incr(vm, vcpu, VMEXIT_ASTPENDING, 1); 1234 VCPU_CTR1(vm, vcpu, 1235 "SVM: ASTPENDING, RIP:0x%lx\n", state->rip); 1236 vmexit->rip = state->rip; 1237 break; 1238 } 1239 1240 (void)svm_set_vmcb(svm_get_vmcb(svm_sc, vcpu), svm_sc->asid); 1241 1242 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1243 1244 /* Change TSS type to available.*/ 1245 setup_tss_type(); 1246 1247 /* Launch Virtual Machine. */ 1248 svm_launch(vmcb_pa, gctx, hctx); 1249 1250 /* 1251 * Only GDTR and IDTR of host is saved and restore by SVM, 1252 * LDTR and TR need to be restored by VMM. 1253 * XXX: kernel doesn't use LDT, only user space. 1254 */ 1255 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1256 1257 /* 1258 * Guest FS and GS selector are stashed by vmload and vmsave. 1259 * Host FS and GS selector are stashed by svm_launch(). 1260 * Host GS base that holds per-cpu need to be restored before 1261 * enabling global interrupt. 1262 * FS is not used by FreeBSD kernel and kernel does restore 1263 * back FS selector and base of user before returning to 1264 * userland. 1265 * 1266 * Note: You can't use 'curcpu' which uses pcpu. 1267 */ 1268 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]); 1269 wrmsr(MSR_KGSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]); 1270 1271 /* #VMEXIT disables interrupts so re-enable them here. */ 1272 enable_gintr(); 1273 1274 /* Handle #VMEXIT and if required return to user space. */ 1275 loop = svm_vmexit(svm_sc, vcpu, vmexit); 1276 vcpustate->loop++; 1277 vmm_stat_incr(vm, vcpu, VMEXIT_COUNT, 1); 1278 } while (loop); 1279 1280 return (0); 1281} 1282 1283/* 1284 * Cleanup for virtual machine. 1285 */ 1286static void 1287svm_vmcleanup(void *arg) 1288{ 1289 struct svm_softc *svm_sc; 1290 1291 svm_sc = arg; 1292 1293 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1294 1295 free(svm_sc, M_SVM); 1296} 1297 1298/* 1299 * Return pointer to hypervisor saved register state. 1300 */ 1301static register_t * 1302swctx_regptr(struct svm_regctx *regctx, int reg) 1303{ 1304 1305 switch (reg) { 1306 case VM_REG_GUEST_RBX: 1307 return (®ctx->sctx_rbx); 1308 case VM_REG_GUEST_RCX: 1309 return (®ctx->sctx_rcx); 1310 case VM_REG_GUEST_RDX: 1311 return (®ctx->e.g.sctx_rdx); 1312 case VM_REG_GUEST_RDI: 1313 return (®ctx->e.g.sctx_rdi); 1314 case VM_REG_GUEST_RSI: 1315 return (®ctx->e.g.sctx_rsi); 1316 case VM_REG_GUEST_RBP: 1317 return (®ctx->sctx_rbp); 1318 case VM_REG_GUEST_R8: 1319 return (®ctx->sctx_r8); 1320 case VM_REG_GUEST_R9: 1321 return (®ctx->sctx_r9); 1322 case VM_REG_GUEST_R10: 1323 return (®ctx->sctx_r10); 1324 case VM_REG_GUEST_R11: 1325 return (®ctx->sctx_r11); 1326 case VM_REG_GUEST_R12: 1327 return (®ctx->sctx_r12); 1328 case VM_REG_GUEST_R13: 1329 return (®ctx->sctx_r13); 1330 case VM_REG_GUEST_R14: 1331 return (®ctx->sctx_r14); 1332 case VM_REG_GUEST_R15: 1333 return (®ctx->sctx_r15); 1334 default: 1335 ERR("Unknown register requested, reg=%d.\n", reg); 1336 break; 1337 } 1338 1339 return (NULL); 1340} 1341 1342/* 1343 * Interface to read guest registers. 1344 * This can be SVM h/w saved or hypervisor saved register. 1345 */ 1346static int 1347svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1348{ 1349 struct svm_softc *svm_sc; 1350 struct vmcb *vmcb; 1351 register_t *reg; 1352 1353 svm_sc = arg; 1354 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1355 1356 vmcb = svm_get_vmcb(svm_sc, vcpu); 1357 1358 if (vmcb_read(vmcb, ident, val) == 0) { 1359 return (0); 1360 } 1361 1362 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1363 1364 if (reg != NULL) { 1365 *val = *reg; 1366 return (0); 1367 } 1368 1369 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1370 return (EINVAL); 1371} 1372 1373/* 1374 * Interface to write to guest registers. 1375 * This can be SVM h/w saved or hypervisor saved register. 1376 */ 1377static int 1378svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1379{ 1380 struct svm_softc *svm_sc; 1381 struct vmcb *vmcb; 1382 register_t *reg; 1383 1384 svm_sc = arg; 1385 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1386 1387 vmcb = svm_get_vmcb(svm_sc, vcpu); 1388 if (vmcb_write(vmcb, ident, val) == 0) { 1389 return (0); 1390 } 1391 1392 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1393 1394 if (reg != NULL) { 1395 *reg = val; 1396 return (0); 1397 } 1398 1399 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1400 return (EINVAL); 1401} 1402 1403 1404/* 1405 * Inteface to set various descriptors. 1406 */ 1407static int 1408svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1409{ 1410 struct svm_softc *svm_sc; 1411 struct vmcb *vmcb; 1412 struct vmcb_segment *seg; 1413 uint16_t attrib; 1414 1415 svm_sc = arg; 1416 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1417 1418 vmcb = svm_get_vmcb(svm_sc, vcpu); 1419 1420 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 1421 1422 seg = vmcb_seg(vmcb, type); 1423 if (seg == NULL) { 1424 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1425 return (EINVAL); 1426 } 1427 1428 /* Map seg_desc access to VMCB attribute format.*/ 1429 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 1430 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 1431 type, desc->access, desc->limit); 1432 seg->attrib = attrib; 1433 seg->base = desc->base; 1434 seg->limit = desc->limit; 1435 1436 return (0); 1437} 1438 1439/* 1440 * Interface to get guest descriptor. 1441 */ 1442static int 1443svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1444{ 1445 struct svm_softc *svm_sc; 1446 struct vmcb_segment *seg; 1447 1448 svm_sc = arg; 1449 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1450 1451 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 1452 1453 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 1454 if (!seg) { 1455 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1456 return (EINVAL); 1457 } 1458 1459 /* Map seg_desc access to VMCB attribute format.*/ 1460 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 1461 desc->base = seg->base; 1462 desc->limit = seg->limit; 1463 1464 /* 1465 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 1466 * loaded with a NULL segment selector. The 'desc->access' field is 1467 * interpreted in the VT-x format by the processor-independent code. 1468 * 1469 * SVM uses the 'P' bit to convey the same information so convert it 1470 * into the VT-x format. For more details refer to section 1471 * "Segment State in the VMCB" in APMv2. 1472 */ 1473 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 1474 desc->access |= 0x80; /* CS and TS always present */ 1475 1476 if (!(desc->access & 0x80)) 1477 desc->access |= 0x10000; /* Unusable segment */ 1478 1479 return (0); 1480} 1481 1482static int 1483svm_setcap(void *arg, int vcpu, int type, int val) 1484{ 1485 struct svm_softc *svm_sc; 1486 struct vmcb_ctrl *ctrl; 1487 int ret = ENOENT; 1488 1489 svm_sc = arg; 1490 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1491 1492 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1493 1494 switch (type) { 1495 case VM_CAP_HALT_EXIT: 1496 if (val) 1497 ctrl->ctrl1 |= VMCB_INTCPT_HLT; 1498 else 1499 ctrl->ctrl1 &= ~VMCB_INTCPT_HLT; 1500 ret = 0; 1501 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Halt exit %s.\n", 1502 val ? "enabled": "disabled"); 1503 break; 1504 1505 case VM_CAP_PAUSE_EXIT: 1506 if (val) 1507 ctrl->ctrl1 |= VMCB_INTCPT_PAUSE; 1508 else 1509 ctrl->ctrl1 &= ~VMCB_INTCPT_PAUSE; 1510 ret = 0; 1511 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Pause exit %s.\n", 1512 val ? "enabled": "disabled"); 1513 break; 1514 1515 case VM_CAP_MTRAP_EXIT: 1516 if (val) 1517 ctrl->exception |= BIT(IDT_MC); 1518 else 1519 ctrl->exception &= ~BIT(IDT_MC); 1520 ret = 0; 1521 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:MC exit %s.\n", 1522 val ? "enabled": "disabled"); 1523 break; 1524 1525 case VM_CAP_UNRESTRICTED_GUEST: 1526 /* SVM doesn't need special capability for SMP.*/ 1527 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Set_gap:Unrestricted " 1528 "always enabled.\n"); 1529 ret = 0; 1530 break; 1531 1532 default: 1533 break; 1534 } 1535 1536 return (ret); 1537} 1538 1539static int 1540svm_getcap(void *arg, int vcpu, int type, int *retval) 1541{ 1542 struct svm_softc *svm_sc; 1543 struct vmcb_ctrl *ctrl; 1544 1545 svm_sc = arg; 1546 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1547 1548 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1549 1550 switch (type) { 1551 case VM_CAP_HALT_EXIT: 1552 *retval = (ctrl->ctrl1 & VMCB_INTCPT_HLT) ? 1 : 0; 1553 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Halt exit %s.\n", 1554 *retval ? "enabled": "disabled"); 1555 break; 1556 1557 case VM_CAP_PAUSE_EXIT: 1558 *retval = (ctrl->ctrl1 & VMCB_INTCPT_PAUSE) ? 1 : 0; 1559 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Pause exit %s.\n", 1560 *retval ? "enabled": "disabled"); 1561 break; 1562 1563 case VM_CAP_MTRAP_EXIT: 1564 *retval = (ctrl->exception & BIT(IDT_MC)) ? 1 : 0; 1565 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:MC exit %s.\n", 1566 *retval ? "enabled": "disabled"); 1567 break; 1568 1569 case VM_CAP_UNRESTRICTED_GUEST: 1570 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:get_cap:Unrestricted.\n"); 1571 *retval = 1; 1572 break; 1573 default: 1574 break; 1575 } 1576 1577 return (0); 1578} 1579 1580static struct vlapic * 1581svm_vlapic_init(void *arg, int vcpuid) 1582{ 1583 struct svm_softc *svm_sc; 1584 struct vlapic *vlapic; 1585 1586 svm_sc = arg; 1587 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 1588 vlapic->vm = svm_sc->vm; 1589 vlapic->vcpuid = vcpuid; 1590 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 1591 1592 vlapic_init(vlapic); 1593 1594 return (vlapic); 1595} 1596 1597static void 1598svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 1599{ 1600 1601 vlapic_cleanup(vlapic); 1602 free(vlapic, M_SVM_VLAPIC); 1603} 1604 1605struct vmm_ops vmm_ops_amd = { 1606 svm_init, 1607 svm_cleanup, 1608 svm_restore, 1609 svm_vminit, 1610 svm_vmrun, 1611 svm_vmcleanup, 1612 svm_getreg, 1613 svm_setreg, 1614 svm_getdesc, 1615 svm_setdesc, 1616 svm_getcap, 1617 svm_setcap, 1618 svm_npt_alloc, 1619 svm_npt_free, 1620 svm_vlapic_init, 1621 svm_vlapic_cleanup 1622}; 1623