svm.c revision 261462
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 261462 2014-02-04 07:13:56Z grehan $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37 38#include <vm/vm.h> 39#include <vm/pmap.h> 40 41#include <machine/cpufunc.h> 42#include <machine/psl.h> 43#include <machine/pmap.h> 44#include <machine/md_var.h> 45#include <machine/vmparam.h> 46#include <machine/specialreg.h> 47#include <machine/segments.h> 48#include <machine/vmm.h> 49 50#include <x86/apicreg.h> 51 52#include "vmm_lapic.h" 53#include "vmm_msr.h" 54#include "vmm_stat.h" 55#include "vmm_ktr.h" 56 57#include "x86.h" 58#include "vmcb.h" 59#include "svm.h" 60#include "svm_softc.h" 61#include "npt.h" 62 63/* 64 * SVM CPUID function 0x8000_000A, edx bit decoding. 65 */ 66#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 67#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 68#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 69#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 70#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 71#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 72#define AMD_CPUID_SVM_ASID_FLUSH BIT(6) /* Flush by ASID */ 73#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 74#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 75#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 76 77MALLOC_DEFINE(M_SVM, "svm", "svm"); 78 79/* Per-CPU context area. */ 80extern struct pcpu __pcpu[]; 81 82static bool svm_vmexit(struct svm_softc *svm_sc, int vcpu, 83 struct vm_exit *vmexit); 84static int svm_msr_rw_ok(uint8_t *btmap, uint64_t msr); 85static int svm_msr_index(uint64_t msr, int *index, int *bit); 86 87static uint32_t svm_feature; /* AMD SVM features. */ 88 89/* 90 * Starting guest ASID, 0 is reserved for host. 91 * Each guest will have its own unique ASID. 92 */ 93static uint32_t guest_asid = 1; 94 95/* 96 * Max ASID processor can support. 97 * This limit the maximum number of virtual machines that can be created. 98 */ 99static int max_asid; 100 101/* 102 * SVM host state saved area of size 4KB for each core. 103 */ 104static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 105 106/* 107 * S/w saved host context. 108 */ 109static struct svm_regctx host_ctx[MAXCPU]; 110 111static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid EXITINTINFO"); 112 113/* 114 * Common function to enable or disabled SVM for a CPU. 115 */ 116static int 117cpu_svm_enable_disable(boolean_t enable) 118{ 119 uint64_t efer_msr; 120 121 efer_msr = rdmsr(MSR_EFER); 122 123 if (enable) 124 efer_msr |= EFER_SVM; 125 else 126 efer_msr &= ~EFER_SVM; 127 128 wrmsr(MSR_EFER, efer_msr); 129 130 return(0); 131} 132 133/* 134 * Disable SVM on a CPU. 135 */ 136static void 137svm_disable(void *arg __unused) 138{ 139 140 (void)cpu_svm_enable_disable(FALSE); 141} 142 143/* 144 * Disable SVM for all CPUs. 145 */ 146static int 147svm_cleanup(void) 148{ 149 150 smp_rendezvous(NULL, svm_disable, NULL, NULL); 151 return (0); 152} 153 154/* 155 * Check for required BHyVe SVM features in a CPU. 156 */ 157static int 158svm_cpuid_features(void) 159{ 160 u_int regs[4]; 161 162 /* CPUID Fn8000_000A is for SVM */ 163 do_cpuid(0x8000000A, regs); 164 svm_feature = regs[3]; 165 166 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 167 max_asid = regs[1]; 168 169 printf("SVM Features:0x%b\n", svm_feature, 170 "\020" 171 "\001NP" /* Nested paging */ 172 "\002LbrVirt" /* LBR virtualization */ 173 "\003SVML" /* SVM lock */ 174 "\004NRIPS" /* NRIP save */ 175 "\005TscRateMsr" /* MSR based TSC rate control */ 176 "\006VmcbClean" /* VMCB clean bits */ 177 "\007FlushByAsid" /* Flush by ASID */ 178 "\010DecodeAssist" /* Decode assist */ 179 "\011<b20>" 180 "\012<b20>" 181 "\013PauseFilter" 182 "\014<b20>" 183 "\015PauseFilterThreshold" 184 ); 185 186 /* SVM Lock */ 187 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 188 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 189 return (ENXIO); 190 } 191 192 /* 193 * bhyve need RVI to work. 194 */ 195 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 196 printf("Missing Nested paging or RVI SVM support in processor.\n"); 197 return (EIO); 198 } 199 200 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 201 return (0); 202 203 return (EIO); 204} 205 206/* 207 * Enable SVM for a CPU. 208 */ 209static void 210svm_enable(void *arg __unused) 211{ 212 uint64_t hsave_pa; 213 214 (void)cpu_svm_enable_disable(TRUE); 215 216 hsave_pa = vtophys(hsave[curcpu]); 217 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 218 219 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 220 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 221 } 222} 223 224/* 225 * Check if a processor support SVM. 226 */ 227static int 228is_svm_enabled(void) 229{ 230 uint64_t msr; 231 232 /* Section 15.4 Enabling SVM from APM2. */ 233 if ((amd_feature2 & AMDID2_SVM) == 0) { 234 printf("SVM is not supported on this processor.\n"); 235 return (ENXIO); 236 } 237 238 msr = rdmsr(MSR_VM_CR); 239 /* Make sure SVM is not disabled by BIOS. */ 240 if ((msr & VM_CR_SVMDIS) == 0) { 241 return svm_cpuid_features(); 242 } 243 244 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 245 return (ENXIO); 246} 247 248/* 249 * Enable SVM on CPU and initialize nested page table h/w. 250 */ 251static int 252svm_init(void) 253{ 254 int err; 255 256 err = is_svm_enabled(); 257 if (err) 258 return (err); 259 260 261 svm_npt_init(); 262 263 /* Start SVM on all CPUs */ 264 smp_rendezvous(NULL, svm_enable, NULL, NULL); 265 266 return (0); 267} 268 269/* 270 * Get index and bit position for a MSR in MSR permission 271 * bitmap. Two bits are used for each MSR, lower bit is 272 * for read and higher bit is for write. 273 */ 274static int 275svm_msr_index(uint64_t msr, int *index, int *bit) 276{ 277 uint32_t base, off; 278 279/* Pentium compatible MSRs */ 280#define MSR_PENTIUM_START 0 281#define MSR_PENTIUM_END 0x1FFF 282/* AMD 6th generation and Intel compatible MSRs */ 283#define MSR_AMD6TH_START 0xC0000000UL 284#define MSR_AMD6TH_END 0xC0001FFFUL 285/* AMD 7th and 8th generation compatible MSRs */ 286#define MSR_AMD7TH_START 0xC0010000UL 287#define MSR_AMD7TH_END 0xC0011FFFUL 288 289 *index = -1; 290 *bit = (msr % 4) * 2; 291 base = 0; 292 293 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 294 *index = msr / 4; 295 return (0); 296 } 297 298 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 299 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 300 off = (msr - MSR_AMD6TH_START); 301 *index = (off + base) / 4; 302 return (0); 303 } 304 305 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 306 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 307 off = (msr - MSR_AMD7TH_START); 308 *index = (off + base) / 4; 309 return (0); 310 } 311 312 return (EIO); 313} 314 315/* 316 * Give virtual cpu the complete access to MSR(read & write). 317 */ 318static int 319svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 320{ 321 int index, bit, err; 322 323 err = svm_msr_index(msr, &index, &bit); 324 if (err) { 325 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 326 return (err); 327 } 328 329 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 330 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 331 return (EINVAL); 332 } 333 if (bit < 0 || bit > 8) { 334 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 335 return (EINVAL); 336 } 337 338 /* Disable intercept for read and write. */ 339 perm_bitmap[index] &= ~(3 << bit); 340 CTR1(KTR_VMM, "Guest has full control on SVM:MSR(0x%lx).\n", msr); 341 342 return (0); 343} 344 345/* 346 * Initialise VCPU. 347 */ 348static int 349svm_init_vcpu(struct svm_vcpu *vcpu, vm_paddr_t iopm_pa, vm_paddr_t msrpm_pa, 350 vm_paddr_t pml4_pa, uint8_t asid) 351{ 352 353 vcpu->lastcpu = NOCPU; 354 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 355 356 /* 357 * Initiaise VMCB persistent area of vcpu. 358 * 1. Permission bitmap for MSR and IO space. 359 * 2. Nested paging. 360 * 3. ASID of virtual machine. 361 */ 362 if (svm_init_vmcb(&vcpu->vmcb, iopm_pa, msrpm_pa, pml4_pa)) { 363 return (EIO); 364 } 365 366 return (0); 367} 368 369/* 370 * Initialise a virtual machine. 371 */ 372static void * 373svm_vminit(struct vm *vm, pmap_t pmap) 374{ 375 struct svm_softc *svm_sc; 376 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 377 int i; 378 379 if (guest_asid >= max_asid) { 380 ERR("Host support max ASID:%d, can't create more guests.\n", 381 max_asid); 382 return (NULL); 383 } 384 385 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 386 M_SVM, M_WAITOK | M_ZERO); 387 388 svm_sc->vm = vm; 389 svm_sc->svm_feature = svm_feature; 390 svm_sc->vcpu_cnt = VM_MAXCPU; 391 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 392 /* 393 * Each guest has its own unique ASID. 394 * ASID(Address Space Identifier) is used by TLB entry. 395 */ 396 svm_sc->asid = guest_asid++; 397 398 /* 399 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 400 */ 401 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 402 403 /* 404 * Following MSR can be completely controlled by virtual machines 405 * since access to following are translated to access to VMCB. 406 */ 407 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 408 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 409 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 410 411 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 412 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 413 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 414 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 415 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 416 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 417 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 418 419 /* For Nested Paging/RVI only. */ 420 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 421 422 /* Intercept access to all I/O ports. */ 423 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 424 425 /* Cache physical address for multiple vcpus. */ 426 iopm_pa = vtophys(svm_sc->iopm_bitmap); 427 msrpm_pa = vtophys(svm_sc->msr_bitmap); 428 pml4_pa = svm_sc->nptp; 429 430 for (i = 0; i < svm_sc->vcpu_cnt; i++) { 431 if (svm_init_vcpu(svm_get_vcpu(svm_sc, i), iopm_pa, msrpm_pa, 432 pml4_pa, svm_sc->asid)) { 433 ERR("SVM couldn't initialise VCPU%d\n", i); 434 goto cleanup; 435 } 436 } 437 438 return (svm_sc); 439 440cleanup: 441 free(svm_sc, M_SVM); 442 return (NULL); 443} 444 445/* 446 * Handle guest I/O intercept. 447 */ 448static bool 449svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 450{ 451 struct vmcb_ctrl *ctrl; 452 struct vmcb_state *state; 453 uint64_t info1; 454 455 state = svm_get_vmcb_state(svm_sc, vcpu); 456 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 457 info1 = ctrl->exitinfo1; 458 459 vmexit->exitcode = VM_EXITCODE_INOUT; 460 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 461 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0; 462 vmexit->u. inout.rep = (info1 & BIT(3)) ? 1 : 0; 463 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 464 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 465 vmexit->u.inout.eax = (uint32_t)(state->rax); 466 467 return (false); 468} 469 470static int 471svm_npf_paging(uint64_t exitinfo1) 472{ 473 474 if (exitinfo1 & VMCB_NPF_INFO1_W) 475 return (VM_PROT_WRITE); 476 477 return (VM_PROT_READ); 478} 479 480static bool 481svm_npf_emul_fault(uint64_t exitinfo1) 482{ 483 484 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 485 return (false); 486 } 487 488 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 489 return (false); 490 } 491 492 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 493 return (false); 494 } 495 496 return (true); 497} 498 499/* 500 * Special handling of EFER MSR. 501 * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM 502 * enable bit in EFER. 503 */ 504static void 505svm_efer(struct svm_softc *svm_sc, int vcpu, boolean_t write) 506{ 507 struct svm_regctx *swctx; 508 struct vmcb_state *state; 509 510 state = svm_get_vmcb_state(svm_sc, vcpu); 511 swctx = svm_get_guest_regctx(svm_sc, vcpu); 512 513 if (write) { 514 state->efer = ((swctx->e.g.sctx_rdx & (uint32_t)~0) << 32) | 515 ((uint32_t)state->rax) | EFER_SVM; 516 } else { 517 state->rax = (uint32_t)state->efer; 518 swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32); 519 } 520} 521 522/* 523 * Determine the cause of virtual cpu exit and handle VMEXIT. 524 * Return: false - Break vcpu execution loop and handle vmexit 525 * in kernel or user space. 526 * true - Continue vcpu run. 527 */ 528static bool 529svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 530{ 531 struct vmcb_state *state; 532 struct vmcb_ctrl *ctrl; 533 struct svm_regctx *ctx; 534 uint64_t code, info1, info2, val; 535 uint32_t eax, ecx, edx; 536 bool update_rip, loop, retu; 537 538 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 539 540 state = svm_get_vmcb_state(svm_sc, vcpu); 541 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 542 ctx = svm_get_guest_regctx(svm_sc, vcpu); 543 code = ctrl->exitcode; 544 info1 = ctrl->exitinfo1; 545 info2 = ctrl->exitinfo2; 546 547 update_rip = true; 548 loop = true; 549 vmexit->exitcode = VM_EXITCODE_VMX; 550 vmexit->u.vmx.error = 0; 551 552 switch (code) { 553 case VMCB_EXIT_MC: /* Machine Check. */ 554 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1); 555 vmexit->exitcode = VM_EXITCODE_MTRAP; 556 loop = false; 557 break; 558 559 case VMCB_EXIT_MSR: /* MSR access. */ 560 eax = state->rax; 561 ecx = ctx->sctx_rcx; 562 edx = ctx->e.g.sctx_rdx; 563 564 if (ecx == MSR_EFER) { 565 VCPU_CTR0(svm_sc->vm, vcpu,"VMEXIT EFER\n"); 566 svm_efer(svm_sc, vcpu, info1); 567 break; 568 } 569 570 retu = false; 571 if (info1) { 572 /* VM exited because of write MSR */ 573 vmm_stat_incr(svm_sc->vm, vcpu, 574 VMEXIT_WRMSR, 1); 575 vmexit->exitcode = VM_EXITCODE_WRMSR; 576 vmexit->u.msr.code = ecx; 577 val = (uint64_t)edx << 32 | eax; 578 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, 579 &retu)) { 580 vmexit->u.msr.wval = val; 581 loop = false; 582 } else 583 loop = retu ? false : true; 584 585 VCPU_CTR3(svm_sc->vm, vcpu, 586 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x", 587 loop ? "kernel" : "user", val, ecx); 588 } else { 589 vmm_stat_incr(svm_sc->vm, vcpu, 590 VMEXIT_RDMSR, 1); 591 vmexit->exitcode = VM_EXITCODE_RDMSR; 592 vmexit->u.msr.code = ecx; 593 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, 594 &retu)) { 595 loop = false; 596 } else 597 loop = retu ? false : true; 598 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR" 599 " MSB=0x%08x, LSB=%08x @0x%x", 600 ctx->e.g.sctx_rdx, state->rax, ecx); 601 } 602 603#define MSR_AMDK8_IPM 0xc0010055 604 /* 605 * We can't hide AMD C1E idle capability since its 606 * based on CPU generation, for now ignore access to 607 * this MSR by vcpus 608 * XXX: special handling of AMD C1E - Ignore. 609 */ 610 if (ecx == MSR_AMDK8_IPM) 611 loop = true; 612 break; 613 614 case VMCB_EXIT_INTR: 615 /* 616 * Exit on External Interrupt. 617 * Give host interrupt handler to run and if its guest 618 * interrupt, local APIC will inject event in guest. 619 */ 620 update_rip = false; 621 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt" 622 " RIP:0x%lx.\n", state->rip); 623 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 624 break; 625 626 case VMCB_EXIT_IO: 627 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 628 loop = svm_handle_io(svm_sc, vcpu, vmexit); 629 update_rip = true; 630 break; 631 632 case VMCB_EXIT_CPUID: 633 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 634 (void)x86_emulate_cpuid(svm_sc->vm, vcpu, 635 (uint32_t *)&state->rax, 636 (uint32_t *)&ctx->sctx_rbx, 637 (uint32_t *)&ctx->sctx_rcx, 638 (uint32_t *)&ctx->e.g.sctx_rdx); 639 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT CPUID\n"); 640 break; 641 642 case VMCB_EXIT_HLT: 643 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 644 if (ctrl->v_irq) { 645 /* Interrupt is pending, can't halt guest. */ 646 vmm_stat_incr(svm_sc->vm, vcpu, 647 VMEXIT_HLT_IGNORED, 1); 648 VCPU_CTR0(svm_sc->vm, vcpu, 649 "VMEXIT halt ignored."); 650 } else { 651 VCPU_CTR0(svm_sc->vm, vcpu, 652 "VMEXIT halted CPU."); 653 vmexit->exitcode = VM_EXITCODE_HLT; 654 vmexit->u.hlt.rflags = state->rflags; 655 loop = false; 656 657 } 658 break; 659 660 case VMCB_EXIT_PAUSE: 661 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT pause"); 662 vmexit->exitcode = VM_EXITCODE_PAUSE; 663 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 664 665 break; 666 667 case VMCB_EXIT_NPF: 668 loop = false; 669 update_rip = false; 670 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EPT_FAULT, 1); 671 672 if (info1 & VMCB_NPF_INFO1_RSV) { 673 VCPU_CTR2(svm_sc->vm, vcpu, "SVM_ERR:NPT" 674 " reserved bit is set," 675 "INFO1:0x%lx INFO2:0x%lx .\n", 676 info1, info2); 677 break; 678 } 679 680 /* EXITINFO2 has the physical fault address (GPA). */ 681 if(vm_mem_allocated(svm_sc->vm, info2)) { 682 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF-paging," 683 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", 684 state->rip, info1, info2); 685 vmexit->exitcode = VM_EXITCODE_PAGING; 686 vmexit->u.paging.gpa = info2; 687 vmexit->u.paging.fault_type = 688 svm_npf_paging(info1); 689 } else if (svm_npf_emul_fault(info1)) { 690 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF-inst_emul," 691 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n", 692 state->rip, info1, info2); 693 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 694 vmexit->u.inst_emul.gpa = info2; 695 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 696 vmexit->u.inst_emul.cr3 = state->cr3; 697 vmexit->inst_length = VIE_INST_SIZE; 698 } 699 700 break; 701 702 case VMCB_EXIT_SHUTDOWN: 703 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT shutdown."); 704 loop = false; 705 break; 706 707 case VMCB_EXIT_INVALID: 708 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT INVALID."); 709 loop = false; 710 break; 711 712 default: 713 /* Return to user space. */ 714 loop = false; 715 update_rip = false; 716 VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" 717 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n", 718 ctrl->exitcode, info1, info2); 719 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" 720 " Inst decoder len:%d\n", state->rip, 721 ctrl->nrip, ctrl->inst_decode_size); 722 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 723 break; 724 } 725 726 vmexit->rip = state->rip; 727 if (update_rip) { 728 if (ctrl->nrip == 0) { 729 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set " 730 "for RIP0x%lx.\n", state->rip); 731 vmexit->exitcode = VM_EXITCODE_VMX; 732 } else 733 vmexit->rip = ctrl->nrip; 734 } 735 736 /* If vcpu execution is continued, update RIP. */ 737 if (loop) { 738 state->rip = vmexit->rip; 739 } 740 741 if (state->rip == 0) { 742 VCPU_CTR0(svm_sc->vm, vcpu, "SVM_ERR:RIP is NULL\n"); 743 vmexit->exitcode = VM_EXITCODE_VMX; 744 } 745 746 return (loop); 747} 748 749/* 750 * Inject NMI to virtual cpu. 751 */ 752static int 753svm_inject_nmi(struct svm_softc *svm_sc, int vcpu) 754{ 755 struct vmcb_ctrl *ctrl; 756 757 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 758 759 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 760 /* Can't inject another NMI if last one is pending.*/ 761 if (!vm_nmi_pending(svm_sc->vm, vcpu)) 762 return (0); 763 764 /* Inject NMI, vector number is not used.*/ 765 if (vmcb_eventinject(ctrl, VM_NMI, IDT_NMI, 0, false)) { 766 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:NMI injection failed.\n"); 767 return (EIO); 768 } 769 770 /* Acknowledge the request is accepted.*/ 771 vm_nmi_clear(svm_sc->vm, vcpu); 772 773 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n"); 774 775 return (1); 776} 777 778/* 779 * Inject event to virtual cpu. 780 */ 781static void 782svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu) 783{ 784 struct vmcb_ctrl *ctrl; 785 struct vmcb_state *state; 786 int vector; 787 788 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 789 790 state = svm_get_vmcb_state(svm_sc, vcpu); 791 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 792 793 /* Can't inject multiple events at once. */ 794 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 795 VCPU_CTR1(svm_sc->vm, vcpu, 796 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj); 797 return ; 798 } 799 800 /* Wait for guest to come out of interrupt shadow. */ 801 if (ctrl->intr_shadow) { 802 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n"); 803 return; 804 } 805 806 /* NMI event has priority over interrupts.*/ 807 if (svm_inject_nmi(svm_sc, vcpu)) { 808 return; 809 } 810 811 vector = lapic_pending_intr(svm_sc->vm, vcpu); 812 813 /* No interrupt is pending. */ 814 if (vector < 0) 815 return; 816 817 if (vector < 32 || vector > 255) { 818 VCPU_CTR1(svm_sc->vm, vcpu, "SVM_ERR:Event injection" 819 "invalid vector=%d.\n", vector); 820 ERR("SVM_ERR:Event injection invalid vector=%d.\n", vector); 821 return; 822 } 823 824 if ((state->rflags & PSL_I) == 0) { 825 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n"); 826 return; 827 } 828 829 if (vmcb_eventinject(ctrl, VM_HW_INTR, vector, 0, false)) { 830 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Event injection failed to" 831 " vector=%d.\n", vector); 832 return; 833 } 834 835 /* Acknowledge that event is accepted.*/ 836 lapic_intr_accepted(svm_sc->vm, vcpu, vector); 837 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector); 838} 839 840/* 841 * Restore host Task Register selector type after every vcpu exit. 842 */ 843static void 844setup_tss_type(void) 845{ 846 struct system_segment_descriptor *desc; 847 848 desc = (struct system_segment_descriptor *)&gdt[curcpu * NGDT + 849 GPROC0_SEL]; 850 /* 851 * Task selector that should be restored in host is 852 * 64-bit available(9), not what is read(0xb), see 853 * APMvol2 Rev3.21 4.8.3 System Descriptors table. 854 */ 855 desc->sd_type = 9; 856} 857 858static void 859svm_handle_exitintinfo(struct svm_softc *svm_sc, int vcpu) 860{ 861 struct vmcb_ctrl *ctrl; 862 uint64_t intinfo; 863 864 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 865 866 /* 867 * VMEXIT while delivering an exception or interrupt. 868 * Inject it as virtual interrupt. 869 * Section 15.7.2 Intercepts during IDT interrupt delivery. 870 */ 871 intinfo = ctrl->exitintinfo; 872 873 if (intinfo & VMCB_EXITINTINFO_VALID) { 874 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 875 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:EXITINTINFO:0x%lx is valid\n", 876 intinfo); 877 if (vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo), 878 VMCB_EXITINTINFO_VECTOR(intinfo), 879 VMCB_EXITINTINFO_EC(intinfo), 880 VMCB_EXITINTINFO_EC_VALID & intinfo)) { 881 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:couldn't inject pending" 882 " interrupt, exitintinfo:0x%lx\n", intinfo); 883 } 884 } 885} 886/* 887 * Start vcpu with specified RIP. 888 */ 889static int 890svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap) 891{ 892 struct svm_regctx *hctx, *gctx; 893 struct svm_softc *svm_sc; 894 struct svm_vcpu *vcpustate; 895 struct vmcb_state *state; 896 struct vmcb_ctrl *ctrl; 897 struct vm_exit *vmexit; 898 uint64_t vmcb_pa; 899 static uint64_t host_cr2; 900 bool loop; /* Continue vcpu execution loop. */ 901 902 loop = true; 903 svm_sc = arg; 904 905 vcpustate = svm_get_vcpu(svm_sc, vcpu); 906 state = svm_get_vmcb_state(svm_sc, vcpu); 907 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 908 vmexit = vm_exitinfo(svm_sc->vm, vcpu); 909 910 gctx = svm_get_guest_regctx(svm_sc, vcpu); 911 hctx = &host_ctx[curcpu]; 912 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 913 914 if (vcpustate->lastcpu != curcpu) { 915 /* Virtual CPU is running on a diiferent CPU now.*/ 916 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_MIGRATIONS, 1); 917 918 /* 919 * Flush all TLB mapping for this guest on this CPU, 920 * it might have stale entries. 921 */ 922 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 923 924 /* Can't use any cached VMCB state by cpu.*/ 925 ctrl->vmcb_clean = VMCB_CACHE_NONE; 926 } else { 927 /* 928 * XXX: Using same ASID for all vcpus of a VM will cause TLB 929 * corruption. This can easily be produced by muxing two vcpus 930 * on same core. 931 * For now, flush guest TLB for every vmrun. 932 */ 933 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 934 935 /* 936 * This is the same cpu on which vcpu last ran so don't 937 * need to reload all VMCB state. 938 * ASID is unique for a guest. 939 * IOPM is unchanged. 940 * RVI/EPT is unchanged. 941 * 942 */ 943 ctrl->vmcb_clean = VMCB_CACHE_ASID | 944 VMCB_CACHE_IOPM | 945 VMCB_CACHE_NP; 946 } 947 948 vcpustate->lastcpu = curcpu; 949 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:Enter vmrun old RIP:0x%lx" 950 " new RIP:0x%lx inst len=%d\n", 951 state->rip, rip, vmexit->inst_length); 952 /* Update Guest RIP */ 953 state->rip = rip; 954 955 do { 956 vmexit->inst_length = 0; 957 /* We are asked to give the cpu by scheduler. */ 958 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 959 vmexit->exitcode = VM_EXITCODE_BOGUS; 960 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_ASTPENDING, 1); 961 VCPU_CTR1(svm_sc->vm, vcpu, 962 "SVM: gave up CPU, RIP:0x%lx\n", state->rip); 963 vmexit->rip = state->rip; 964 break; 965 } 966 967 (void)svm_set_vmcb(svm_get_vmcb(svm_sc, vcpu), svm_sc->asid); 968 969 svm_handle_exitintinfo(svm_sc, vcpu); 970 971 (void)svm_inj_interrupts(svm_sc, vcpu); 972 973 /* Change TSS type to available.*/ 974 setup_tss_type(); 975 976 /* 977 * Disable global interrupt to guarantee atomicity 978 * during loading of guest state. 979 * See 15.5.1 "Loading guest state" APM2. 980 */ 981 disable_gintr(); 982 983 save_cr2(&host_cr2); 984 load_cr2(&state->cr2); 985 986 987 /* Launch Virtual Machine. */ 988 svm_launch(vmcb_pa, gctx, hctx); 989 990 save_cr2(&state->cr2); 991 load_cr2(&host_cr2); 992 993 /* 994 * Only GDTR and IDTR of host is saved and restore by SVM, 995 * LDTR and TR need to be restored by VMM. 996 * XXX: kernel doesn't use LDT, only user space. 997 */ 998 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 999 1000 /* 1001 * Guest FS and GS selector are stashed by vmload and vmsave. 1002 * Host FS and GS selector are stashed by svm_launch(). 1003 * Host GS base that holds per-cpu need to be restored before 1004 * enabling global interrupt. 1005 * FS is not used by FreeBSD kernel and kernel does restore 1006 * back FS selector and base of user before returning to 1007 * userland. 1008 * 1009 * Note: You can't use 'curcpu' which uses pcpu. 1010 */ 1011 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]); 1012 wrmsr(MSR_KGSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]); 1013 1014 /* vcpu exit with glbal interrupt disabled. */ 1015 enable_gintr(); 1016 1017 /* Handle #VMEXIT and if required return to user space. */ 1018 loop = svm_vmexit(svm_sc, vcpu, vmexit); 1019 vcpustate->loop++; 1020 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1021 1022 } while (loop); 1023 1024 return (0); 1025} 1026 1027/* 1028 * Cleanup for virtual machine. 1029 */ 1030static void 1031svm_vmcleanup(void *arg) 1032{ 1033 struct svm_softc *svm_sc; 1034 1035 svm_sc = arg; 1036 1037 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1038 1039 free(svm_sc, M_SVM); 1040} 1041 1042/* 1043 * Return pointer to hypervisor saved register state. 1044 */ 1045static register_t * 1046swctx_regptr(struct svm_regctx *regctx, int reg) 1047{ 1048 1049 switch (reg) { 1050 case VM_REG_GUEST_RBX: 1051 return (®ctx->sctx_rbx); 1052 case VM_REG_GUEST_RCX: 1053 return (®ctx->sctx_rcx); 1054 case VM_REG_GUEST_RDX: 1055 return (®ctx->e.g.sctx_rdx); 1056 case VM_REG_GUEST_RDI: 1057 return (®ctx->e.g.sctx_rdi); 1058 case VM_REG_GUEST_RSI: 1059 return (®ctx->e.g.sctx_rsi); 1060 case VM_REG_GUEST_RBP: 1061 return (®ctx->sctx_rbp); 1062 case VM_REG_GUEST_R8: 1063 return (®ctx->sctx_r8); 1064 case VM_REG_GUEST_R9: 1065 return (®ctx->sctx_r9); 1066 case VM_REG_GUEST_R10: 1067 return (®ctx->sctx_r10); 1068 case VM_REG_GUEST_R11: 1069 return (®ctx->sctx_r11); 1070 case VM_REG_GUEST_R12: 1071 return (®ctx->sctx_r12); 1072 case VM_REG_GUEST_R13: 1073 return (®ctx->sctx_r13); 1074 case VM_REG_GUEST_R14: 1075 return (®ctx->sctx_r14); 1076 case VM_REG_GUEST_R15: 1077 return (®ctx->sctx_r15); 1078 default: 1079 ERR("Unknown register requested, reg=%d.\n", reg); 1080 break; 1081 } 1082 1083 return (NULL); 1084} 1085 1086/* 1087 * Interface to read guest registers. 1088 * This can be SVM h/w saved or hypervisor saved register. 1089 */ 1090static int 1091svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1092{ 1093 struct svm_softc *svm_sc; 1094 struct vmcb *vmcb; 1095 register_t *reg; 1096 1097 svm_sc = arg; 1098 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1099 1100 vmcb = svm_get_vmcb(svm_sc, vcpu); 1101 1102 if (vmcb_read(vmcb, ident, val) == 0) { 1103 return (0); 1104 } 1105 1106 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1107 1108 if (reg != NULL) { 1109 *val = *reg; 1110 return (0); 1111 } 1112 1113 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1114 return (EINVAL); 1115} 1116 1117/* 1118 * Interface to write to guest registers. 1119 * This can be SVM h/w saved or hypervisor saved register. 1120 */ 1121static int 1122svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 1123{ 1124 struct svm_softc *svm_sc; 1125 struct vmcb *vmcb; 1126 register_t *reg; 1127 1128 svm_sc = arg; 1129 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1130 1131 vmcb = svm_get_vmcb(svm_sc, vcpu); 1132 if (vmcb_write(vmcb, ident, val) == 0) { 1133 return (0); 1134 } 1135 1136 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 1137 1138 if (reg != NULL) { 1139 *reg = val; 1140 return (0); 1141 } 1142 1143 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 1144 return (EINVAL); 1145} 1146 1147 1148/* 1149 * Inteface to set various descriptors. 1150 */ 1151static int 1152svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1153{ 1154 struct svm_softc *svm_sc; 1155 struct vmcb *vmcb; 1156 struct vmcb_segment *seg; 1157 uint16_t attrib; 1158 1159 svm_sc = arg; 1160 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1161 1162 vmcb = svm_get_vmcb(svm_sc, vcpu); 1163 1164 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 1165 1166 seg = vmcb_seg(vmcb, type); 1167 if (seg == NULL) { 1168 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1169 return (EINVAL); 1170 } 1171 1172 /* Map seg_desc access to VMCB attribute format.*/ 1173 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 1174 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 1175 type, desc->access, desc->limit); 1176 seg->attrib = attrib; 1177 seg->base = desc->base; 1178 seg->limit = desc->limit; 1179 1180 return (0); 1181} 1182 1183/* 1184 * Interface to get guest descriptor. 1185 */ 1186static int 1187svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 1188{ 1189 struct svm_softc *svm_sc; 1190 struct vmcb_segment *seg; 1191 1192 svm_sc = arg; 1193 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1194 1195 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 1196 1197 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 1198 if (!seg) { 1199 ERR("SVM_ERR:Unsupported segment type%d\n", type); 1200 return (EINVAL); 1201 } 1202 1203 /* Map seg_desc access to VMCB attribute format.*/ 1204 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 1205 desc->base = seg->base; 1206 desc->limit = seg->limit; 1207 1208 return (0); 1209} 1210 1211static int 1212svm_inject_event(void *arg, int vcpu, int type, int vector, 1213 uint32_t error, int ec_valid) 1214{ 1215 struct svm_softc *svm_sc; 1216 struct vmcb_ctrl *ctrl; 1217 1218 svm_sc = arg; 1219 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1220 1221 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1222 VCPU_CTR3(svm_sc->vm, vcpu, "Injecting event type:0x%x vector:0x%x" 1223 "error:0x%x\n", type, vector, error); 1224 1225 return (vmcb_eventinject(ctrl, type, vector, error, 1226 ec_valid ? TRUE : FALSE)); 1227} 1228 1229static int 1230svm_setcap(void *arg, int vcpu, int type, int val) 1231{ 1232 struct svm_softc *svm_sc; 1233 struct vmcb_ctrl *ctrl; 1234 int ret = ENOENT; 1235 1236 svm_sc = arg; 1237 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1238 1239 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1240 1241 switch (type) { 1242 case VM_CAP_HALT_EXIT: 1243 if (val) 1244 ctrl->ctrl1 |= VMCB_INTCPT_HLT; 1245 else 1246 ctrl->ctrl1 &= ~VMCB_INTCPT_HLT; 1247 ret = 0; 1248 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Halt exit %s.\n", 1249 val ? "enabled": "disabled"); 1250 break; 1251 1252 case VM_CAP_PAUSE_EXIT: 1253 if (val) 1254 ctrl->ctrl1 |= VMCB_INTCPT_PAUSE; 1255 else 1256 ctrl->ctrl1 &= ~VMCB_INTCPT_PAUSE; 1257 ret = 0; 1258 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Pause exit %s.\n", 1259 val ? "enabled": "disabled"); 1260 break; 1261 1262 case VM_CAP_MTRAP_EXIT: 1263 if (val) 1264 ctrl->exception |= BIT(IDT_MC); 1265 else 1266 ctrl->exception &= ~BIT(IDT_MC); 1267 ret = 0; 1268 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:MC exit %s.\n", 1269 val ? "enabled": "disabled"); 1270 break; 1271 1272 case VM_CAP_UNRESTRICTED_GUEST: 1273 /* SVM doesn't need special capability for SMP.*/ 1274 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Set_gap:Unrestricted " 1275 "always enabled.\n"); 1276 ret = 0; 1277 break; 1278 1279 default: 1280 break; 1281 } 1282 1283 return (ret); 1284} 1285 1286static int 1287svm_getcap(void *arg, int vcpu, int type, int *retval) 1288{ 1289 struct svm_softc *svm_sc; 1290 struct vmcb_ctrl *ctrl; 1291 1292 svm_sc = arg; 1293 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); 1294 1295 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1296 1297 switch (type) { 1298 case VM_CAP_HALT_EXIT: 1299 *retval = (ctrl->ctrl1 & VMCB_INTCPT_HLT) ? 1 : 0; 1300 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Halt exit %s.\n", 1301 *retval ? "enabled": "disabled"); 1302 break; 1303 1304 case VM_CAP_PAUSE_EXIT: 1305 *retval = (ctrl->ctrl1 & VMCB_INTCPT_PAUSE) ? 1 : 0; 1306 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Pause exit %s.\n", 1307 *retval ? "enabled": "disabled"); 1308 break; 1309 1310 case VM_CAP_MTRAP_EXIT: 1311 *retval = (ctrl->exception & BIT(IDT_MC)) ? 1 : 0; 1312 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:MC exit %s.\n", 1313 *retval ? "enabled": "disabled"); 1314 break; 1315 1316 case VM_CAP_UNRESTRICTED_GUEST: 1317 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:get_cap:Unrestricted.\n"); 1318 *retval = 1; 1319 break; 1320 default: 1321 break; 1322 } 1323 1324 return (0); 1325} 1326 1327struct vmm_ops vmm_ops_amd = { 1328 svm_init, 1329 svm_cleanup, 1330 svm_vminit, 1331 svm_vmrun, 1332 svm_vmcleanup, 1333 svm_getreg, 1334 svm_setreg, 1335 svm_getdesc, 1336 svm_setdesc, 1337 svm_inject_event, 1338 svm_getcap, 1339 svm_setcap, 1340 svm_npt_alloc, 1341 svm_npt_free 1342}; 1343