1/* $NetBSD: nvmm_x86_svm.c,v 1.85 2023/02/23 02:54:02 riastradh Exp $ */ 2 3/* 4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 5 * All rights reserved. 6 * 7 * This code is part of the NVMM hypervisor. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> 32__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.85 2023/02/23 02:54:02 riastradh Exp $"); 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/kernel.h> 37#include <sys/kmem.h> 38#include <sys/cpu.h> 39#include <sys/xcall.h> 40#include <sys/mman.h> 41 42#include <uvm/uvm_extern.h> 43#include <uvm/uvm_page.h> 44 45#include <x86/cputypes.h> 46#include <x86/specialreg.h> 47#include <x86/dbregs.h> 48#include <x86/cpu_counter.h> 49 50#include <machine/cpuvar.h> 51#include <machine/pmap_private.h> 52 53#include <dev/nvmm/nvmm.h> 54#include <dev/nvmm/nvmm_internal.h> 55#include <dev/nvmm/x86/nvmm_x86.h> 56 57int svm_vmrun(paddr_t, uint64_t *); 58 59static inline void 60svm_clgi(void) 61{ 62 asm volatile ("clgi" ::: "memory"); 63} 64 65static inline void 66svm_stgi(void) 67{ 68 asm volatile ("stgi" ::: "memory"); 69} 70 71#define MSR_VM_HSAVE_PA 0xC0010117 72 73/* -------------------------------------------------------------------------- */ 74 75#define VMCB_EXITCODE_CR0_READ 0x0000 76#define VMCB_EXITCODE_CR1_READ 0x0001 77#define VMCB_EXITCODE_CR2_READ 0x0002 78#define VMCB_EXITCODE_CR3_READ 0x0003 79#define VMCB_EXITCODE_CR4_READ 0x0004 80#define VMCB_EXITCODE_CR5_READ 0x0005 81#define VMCB_EXITCODE_CR6_READ 0x0006 82#define VMCB_EXITCODE_CR7_READ 0x0007 83#define VMCB_EXITCODE_CR8_READ 0x0008 84#define VMCB_EXITCODE_CR9_READ 0x0009 85#define VMCB_EXITCODE_CR10_READ 0x000A 86#define VMCB_EXITCODE_CR11_READ 0x000B 87#define VMCB_EXITCODE_CR12_READ 0x000C 88#define VMCB_EXITCODE_CR13_READ 0x000D 89#define VMCB_EXITCODE_CR14_READ 0x000E 90#define VMCB_EXITCODE_CR15_READ 0x000F 91#define VMCB_EXITCODE_CR0_WRITE 0x0010 92#define VMCB_EXITCODE_CR1_WRITE 0x0011 93#define VMCB_EXITCODE_CR2_WRITE 0x0012 94#define VMCB_EXITCODE_CR3_WRITE 0x0013 95#define VMCB_EXITCODE_CR4_WRITE 0x0014 96#define VMCB_EXITCODE_CR5_WRITE 0x0015 97#define VMCB_EXITCODE_CR6_WRITE 0x0016 98#define VMCB_EXITCODE_CR7_WRITE 0x0017 99#define VMCB_EXITCODE_CR8_WRITE 0x0018 100#define VMCB_EXITCODE_CR9_WRITE 0x0019 101#define VMCB_EXITCODE_CR10_WRITE 0x001A 102#define VMCB_EXITCODE_CR11_WRITE 0x001B 103#define VMCB_EXITCODE_CR12_WRITE 0x001C 104#define VMCB_EXITCODE_CR13_WRITE 0x001D 105#define VMCB_EXITCODE_CR14_WRITE 0x001E 106#define VMCB_EXITCODE_CR15_WRITE 0x001F 107#define VMCB_EXITCODE_DR0_READ 0x0020 108#define VMCB_EXITCODE_DR1_READ 0x0021 109#define VMCB_EXITCODE_DR2_READ 0x0022 110#define VMCB_EXITCODE_DR3_READ 0x0023 111#define VMCB_EXITCODE_DR4_READ 0x0024 112#define VMCB_EXITCODE_DR5_READ 0x0025 113#define VMCB_EXITCODE_DR6_READ 0x0026 114#define VMCB_EXITCODE_DR7_READ 0x0027 115#define VMCB_EXITCODE_DR8_READ 0x0028 116#define VMCB_EXITCODE_DR9_READ 0x0029 117#define VMCB_EXITCODE_DR10_READ 0x002A 118#define VMCB_EXITCODE_DR11_READ 0x002B 119#define VMCB_EXITCODE_DR12_READ 0x002C 120#define VMCB_EXITCODE_DR13_READ 0x002D 121#define VMCB_EXITCODE_DR14_READ 0x002E 122#define VMCB_EXITCODE_DR15_READ 0x002F 123#define VMCB_EXITCODE_DR0_WRITE 0x0030 124#define VMCB_EXITCODE_DR1_WRITE 0x0031 125#define VMCB_EXITCODE_DR2_WRITE 0x0032 126#define VMCB_EXITCODE_DR3_WRITE 0x0033 127#define VMCB_EXITCODE_DR4_WRITE 0x0034 128#define VMCB_EXITCODE_DR5_WRITE 0x0035 129#define VMCB_EXITCODE_DR6_WRITE 0x0036 130#define VMCB_EXITCODE_DR7_WRITE 0x0037 131#define VMCB_EXITCODE_DR8_WRITE 0x0038 132#define VMCB_EXITCODE_DR9_WRITE 0x0039 133#define VMCB_EXITCODE_DR10_WRITE 0x003A 134#define VMCB_EXITCODE_DR11_WRITE 0x003B 135#define VMCB_EXITCODE_DR12_WRITE 0x003C 136#define VMCB_EXITCODE_DR13_WRITE 0x003D 137#define VMCB_EXITCODE_DR14_WRITE 0x003E 138#define VMCB_EXITCODE_DR15_WRITE 0x003F 139#define VMCB_EXITCODE_EXCP0 0x0040 140#define VMCB_EXITCODE_EXCP1 0x0041 141#define VMCB_EXITCODE_EXCP2 0x0042 142#define VMCB_EXITCODE_EXCP3 0x0043 143#define VMCB_EXITCODE_EXCP4 0x0044 144#define VMCB_EXITCODE_EXCP5 0x0045 145#define VMCB_EXITCODE_EXCP6 0x0046 146#define VMCB_EXITCODE_EXCP7 0x0047 147#define VMCB_EXITCODE_EXCP8 0x0048 148#define VMCB_EXITCODE_EXCP9 0x0049 149#define VMCB_EXITCODE_EXCP10 0x004A 150#define VMCB_EXITCODE_EXCP11 0x004B 151#define VMCB_EXITCODE_EXCP12 0x004C 152#define VMCB_EXITCODE_EXCP13 0x004D 153#define VMCB_EXITCODE_EXCP14 0x004E 154#define VMCB_EXITCODE_EXCP15 0x004F 155#define VMCB_EXITCODE_EXCP16 0x0050 156#define VMCB_EXITCODE_EXCP17 0x0051 157#define VMCB_EXITCODE_EXCP18 0x0052 158#define VMCB_EXITCODE_EXCP19 0x0053 159#define VMCB_EXITCODE_EXCP20 0x0054 160#define VMCB_EXITCODE_EXCP21 0x0055 161#define VMCB_EXITCODE_EXCP22 0x0056 162#define VMCB_EXITCODE_EXCP23 0x0057 163#define VMCB_EXITCODE_EXCP24 0x0058 164#define VMCB_EXITCODE_EXCP25 0x0059 165#define VMCB_EXITCODE_EXCP26 0x005A 166#define VMCB_EXITCODE_EXCP27 0x005B 167#define VMCB_EXITCODE_EXCP28 0x005C 168#define VMCB_EXITCODE_EXCP29 0x005D 169#define VMCB_EXITCODE_EXCP30 0x005E 170#define VMCB_EXITCODE_EXCP31 0x005F 171#define VMCB_EXITCODE_INTR 0x0060 172#define VMCB_EXITCODE_NMI 0x0061 173#define VMCB_EXITCODE_SMI 0x0062 174#define VMCB_EXITCODE_INIT 0x0063 175#define VMCB_EXITCODE_VINTR 0x0064 176#define VMCB_EXITCODE_CR0_SEL_WRITE 0x0065 177#define VMCB_EXITCODE_IDTR_READ 0x0066 178#define VMCB_EXITCODE_GDTR_READ 0x0067 179#define VMCB_EXITCODE_LDTR_READ 0x0068 180#define VMCB_EXITCODE_TR_READ 0x0069 181#define VMCB_EXITCODE_IDTR_WRITE 0x006A 182#define VMCB_EXITCODE_GDTR_WRITE 0x006B 183#define VMCB_EXITCODE_LDTR_WRITE 0x006C 184#define VMCB_EXITCODE_TR_WRITE 0x006D 185#define VMCB_EXITCODE_RDTSC 0x006E 186#define VMCB_EXITCODE_RDPMC 0x006F 187#define VMCB_EXITCODE_PUSHF 0x0070 188#define VMCB_EXITCODE_POPF 0x0071 189#define VMCB_EXITCODE_CPUID 0x0072 190#define VMCB_EXITCODE_RSM 0x0073 191#define VMCB_EXITCODE_IRET 0x0074 192#define VMCB_EXITCODE_SWINT 0x0075 193#define VMCB_EXITCODE_INVD 0x0076 194#define VMCB_EXITCODE_PAUSE 0x0077 195#define VMCB_EXITCODE_HLT 0x0078 196#define VMCB_EXITCODE_INVLPG 0x0079 197#define VMCB_EXITCODE_INVLPGA 0x007A 198#define VMCB_EXITCODE_IOIO 0x007B 199#define VMCB_EXITCODE_MSR 0x007C 200#define VMCB_EXITCODE_TASK_SWITCH 0x007D 201#define VMCB_EXITCODE_FERR_FREEZE 0x007E 202#define VMCB_EXITCODE_SHUTDOWN 0x007F 203#define VMCB_EXITCODE_VMRUN 0x0080 204#define VMCB_EXITCODE_VMMCALL 0x0081 205#define VMCB_EXITCODE_VMLOAD 0x0082 206#define VMCB_EXITCODE_VMSAVE 0x0083 207#define VMCB_EXITCODE_STGI 0x0084 208#define VMCB_EXITCODE_CLGI 0x0085 209#define VMCB_EXITCODE_SKINIT 0x0086 210#define VMCB_EXITCODE_RDTSCP 0x0087 211#define VMCB_EXITCODE_ICEBP 0x0088 212#define VMCB_EXITCODE_WBINVD 0x0089 213#define VMCB_EXITCODE_MONITOR 0x008A 214#define VMCB_EXITCODE_MWAIT 0x008B 215#define VMCB_EXITCODE_MWAIT_CONDITIONAL 0x008C 216#define VMCB_EXITCODE_XSETBV 0x008D 217#define VMCB_EXITCODE_RDPRU 0x008E 218#define VMCB_EXITCODE_EFER_WRITE_TRAP 0x008F 219#define VMCB_EXITCODE_CR0_WRITE_TRAP 0x0090 220#define VMCB_EXITCODE_CR1_WRITE_TRAP 0x0091 221#define VMCB_EXITCODE_CR2_WRITE_TRAP 0x0092 222#define VMCB_EXITCODE_CR3_WRITE_TRAP 0x0093 223#define VMCB_EXITCODE_CR4_WRITE_TRAP 0x0094 224#define VMCB_EXITCODE_CR5_WRITE_TRAP 0x0095 225#define VMCB_EXITCODE_CR6_WRITE_TRAP 0x0096 226#define VMCB_EXITCODE_CR7_WRITE_TRAP 0x0097 227#define VMCB_EXITCODE_CR8_WRITE_TRAP 0x0098 228#define VMCB_EXITCODE_CR9_WRITE_TRAP 0x0099 229#define VMCB_EXITCODE_CR10_WRITE_TRAP 0x009A 230#define VMCB_EXITCODE_CR11_WRITE_TRAP 0x009B 231#define VMCB_EXITCODE_CR12_WRITE_TRAP 0x009C 232#define VMCB_EXITCODE_CR13_WRITE_TRAP 0x009D 233#define VMCB_EXITCODE_CR14_WRITE_TRAP 0x009E 234#define VMCB_EXITCODE_CR15_WRITE_TRAP 0x009F 235#define VMCB_EXITCODE_INVLPGB 0x00A0 236#define VMCB_EXITCODE_INVLPGB_ILLEGAL 0x00A1 237#define VMCB_EXITCODE_INVPCID 0x00A2 238#define VMCB_EXITCODE_MCOMMIT 0x00A3 239#define VMCB_EXITCODE_TLBSYNC 0x00A4 240#define VMCB_EXITCODE_NPF 0x0400 241#define VMCB_EXITCODE_AVIC_INCOMP_IPI 0x0401 242#define VMCB_EXITCODE_AVIC_NOACCEL 0x0402 243#define VMCB_EXITCODE_VMGEXIT 0x0403 244#define VMCB_EXITCODE_BUSY -2ULL 245#define VMCB_EXITCODE_INVALID -1ULL 246 247/* -------------------------------------------------------------------------- */ 248 249struct vmcb_ctrl { 250 uint32_t intercept_cr; 251#define VMCB_CTRL_INTERCEPT_RCR(x) __BIT( 0 + x) 252#define VMCB_CTRL_INTERCEPT_WCR(x) __BIT(16 + x) 253 254 uint32_t intercept_dr; 255#define VMCB_CTRL_INTERCEPT_RDR(x) __BIT( 0 + x) 256#define VMCB_CTRL_INTERCEPT_WDR(x) __BIT(16 + x) 257 258 uint32_t intercept_vec; 259#define VMCB_CTRL_INTERCEPT_VEC(x) __BIT(x) 260 261 uint32_t intercept_misc1; 262#define VMCB_CTRL_INTERCEPT_INTR __BIT(0) 263#define VMCB_CTRL_INTERCEPT_NMI __BIT(1) 264#define VMCB_CTRL_INTERCEPT_SMI __BIT(2) 265#define VMCB_CTRL_INTERCEPT_INIT __BIT(3) 266#define VMCB_CTRL_INTERCEPT_VINTR __BIT(4) 267#define VMCB_CTRL_INTERCEPT_CR0_SPEC __BIT(5) 268#define VMCB_CTRL_INTERCEPT_RIDTR __BIT(6) 269#define VMCB_CTRL_INTERCEPT_RGDTR __BIT(7) 270#define VMCB_CTRL_INTERCEPT_RLDTR __BIT(8) 271#define VMCB_CTRL_INTERCEPT_RTR __BIT(9) 272#define VMCB_CTRL_INTERCEPT_WIDTR __BIT(10) 273#define VMCB_CTRL_INTERCEPT_WGDTR __BIT(11) 274#define VMCB_CTRL_INTERCEPT_WLDTR __BIT(12) 275#define VMCB_CTRL_INTERCEPT_WTR __BIT(13) 276#define VMCB_CTRL_INTERCEPT_RDTSC __BIT(14) 277#define VMCB_CTRL_INTERCEPT_RDPMC __BIT(15) 278#define VMCB_CTRL_INTERCEPT_PUSHF __BIT(16) 279#define VMCB_CTRL_INTERCEPT_POPF __BIT(17) 280#define VMCB_CTRL_INTERCEPT_CPUID __BIT(18) 281#define VMCB_CTRL_INTERCEPT_RSM __BIT(19) 282#define VMCB_CTRL_INTERCEPT_IRET __BIT(20) 283#define VMCB_CTRL_INTERCEPT_INTN __BIT(21) 284#define VMCB_CTRL_INTERCEPT_INVD __BIT(22) 285#define VMCB_CTRL_INTERCEPT_PAUSE __BIT(23) 286#define VMCB_CTRL_INTERCEPT_HLT __BIT(24) 287#define VMCB_CTRL_INTERCEPT_INVLPG __BIT(25) 288#define VMCB_CTRL_INTERCEPT_INVLPGA __BIT(26) 289#define VMCB_CTRL_INTERCEPT_IOIO_PROT __BIT(27) 290#define VMCB_CTRL_INTERCEPT_MSR_PROT __BIT(28) 291#define VMCB_CTRL_INTERCEPT_TASKSW __BIT(29) 292#define VMCB_CTRL_INTERCEPT_FERR_FREEZE __BIT(30) 293#define VMCB_CTRL_INTERCEPT_SHUTDOWN __BIT(31) 294 295 uint32_t intercept_misc2; 296#define VMCB_CTRL_INTERCEPT_VMRUN __BIT(0) 297#define VMCB_CTRL_INTERCEPT_VMMCALL __BIT(1) 298#define VMCB_CTRL_INTERCEPT_VMLOAD __BIT(2) 299#define VMCB_CTRL_INTERCEPT_VMSAVE __BIT(3) 300#define VMCB_CTRL_INTERCEPT_STGI __BIT(4) 301#define VMCB_CTRL_INTERCEPT_CLGI __BIT(5) 302#define VMCB_CTRL_INTERCEPT_SKINIT __BIT(6) 303#define VMCB_CTRL_INTERCEPT_RDTSCP __BIT(7) 304#define VMCB_CTRL_INTERCEPT_ICEBP __BIT(8) 305#define VMCB_CTRL_INTERCEPT_WBINVD __BIT(9) 306#define VMCB_CTRL_INTERCEPT_MONITOR __BIT(10) 307#define VMCB_CTRL_INTERCEPT_MWAIT __BIT(11) 308#define VMCB_CTRL_INTERCEPT_MWAIT_ARMED __BIT(12) 309#define VMCB_CTRL_INTERCEPT_XSETBV __BIT(13) 310#define VMCB_CTRL_INTERCEPT_RDPRU __BIT(14) 311#define VMCB_CTRL_INTERCEPT_EFER_SPEC __BIT(15) 312#define VMCB_CTRL_INTERCEPT_WCR_SPEC(x) __BIT(16 + x) 313 314 uint32_t intercept_misc3; 315#define VMCB_CTRL_INTERCEPT_INVLPGB_ALL __BIT(0) 316#define VMCB_CTRL_INTERCEPT_INVLPGB_ILL __BIT(1) 317#define VMCB_CTRL_INTERCEPT_PCID __BIT(2) 318#define VMCB_CTRL_INTERCEPT_MCOMMIT __BIT(3) 319#define VMCB_CTRL_INTERCEPT_TLBSYNC __BIT(4) 320 321 uint8_t rsvd1[36]; 322 uint16_t pause_filt_thresh; 323 uint16_t pause_filt_cnt; 324 uint64_t iopm_base_pa; 325 uint64_t msrpm_base_pa; 326 uint64_t tsc_offset; 327 uint32_t guest_asid; 328 329 uint32_t tlb_ctrl; 330#define VMCB_CTRL_TLB_CTRL_FLUSH_ALL 0x01 331#define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST 0x03 332#define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST_NONGLOBAL 0x07 333 334 uint64_t v; 335#define VMCB_CTRL_V_TPR __BITS(3,0) 336#define VMCB_CTRL_V_IRQ __BIT(8) 337#define VMCB_CTRL_V_VGIF __BIT(9) 338#define VMCB_CTRL_V_INTR_PRIO __BITS(19,16) 339#define VMCB_CTRL_V_IGN_TPR __BIT(20) 340#define VMCB_CTRL_V_INTR_MASKING __BIT(24) 341#define VMCB_CTRL_V_GUEST_VGIF __BIT(25) 342#define VMCB_CTRL_V_AVIC_EN __BIT(31) 343#define VMCB_CTRL_V_INTR_VECTOR __BITS(39,32) 344 345 uint64_t intr; 346#define VMCB_CTRL_INTR_SHADOW __BIT(0) 347#define VMCB_CTRL_INTR_MASK __BIT(1) 348 349 uint64_t exitcode; 350 uint64_t exitinfo1; 351 uint64_t exitinfo2; 352 353 uint64_t exitintinfo; 354#define VMCB_CTRL_EXITINTINFO_VECTOR __BITS(7,0) 355#define VMCB_CTRL_EXITINTINFO_TYPE __BITS(10,8) 356#define VMCB_CTRL_EXITINTINFO_EV __BIT(11) 357#define VMCB_CTRL_EXITINTINFO_V __BIT(31) 358#define VMCB_CTRL_EXITINTINFO_ERRORCODE __BITS(63,32) 359 360 uint64_t enable1; 361#define VMCB_CTRL_ENABLE_NP __BIT(0) 362#define VMCB_CTRL_ENABLE_SEV __BIT(1) 363#define VMCB_CTRL_ENABLE_ES_SEV __BIT(2) 364#define VMCB_CTRL_ENABLE_GMET __BIT(3) 365#define VMCB_CTRL_ENABLE_VTE __BIT(5) 366 367 uint64_t avic; 368#define VMCB_CTRL_AVIC_APIC_BAR __BITS(51,0) 369 370 uint64_t ghcb; 371 372 uint64_t eventinj; 373#define VMCB_CTRL_EVENTINJ_VECTOR __BITS(7,0) 374#define VMCB_CTRL_EVENTINJ_TYPE __BITS(10,8) 375#define VMCB_CTRL_EVENTINJ_EV __BIT(11) 376#define VMCB_CTRL_EVENTINJ_V __BIT(31) 377#define VMCB_CTRL_EVENTINJ_ERRORCODE __BITS(63,32) 378 379 uint64_t n_cr3; 380 381 uint64_t enable2; 382#define VMCB_CTRL_ENABLE_LBR __BIT(0) 383#define VMCB_CTRL_ENABLE_VVMSAVE __BIT(1) 384 385 uint32_t vmcb_clean; 386#define VMCB_CTRL_VMCB_CLEAN_I __BIT(0) 387#define VMCB_CTRL_VMCB_CLEAN_IOPM __BIT(1) 388#define VMCB_CTRL_VMCB_CLEAN_ASID __BIT(2) 389#define VMCB_CTRL_VMCB_CLEAN_TPR __BIT(3) 390#define VMCB_CTRL_VMCB_CLEAN_NP __BIT(4) 391#define VMCB_CTRL_VMCB_CLEAN_CR __BIT(5) 392#define VMCB_CTRL_VMCB_CLEAN_DR __BIT(6) 393#define VMCB_CTRL_VMCB_CLEAN_DT __BIT(7) 394#define VMCB_CTRL_VMCB_CLEAN_SEG __BIT(8) 395#define VMCB_CTRL_VMCB_CLEAN_CR2 __BIT(9) 396#define VMCB_CTRL_VMCB_CLEAN_LBR __BIT(10) 397#define VMCB_CTRL_VMCB_CLEAN_AVIC __BIT(11) 398 399 uint32_t rsvd2; 400 uint64_t nrip; 401 uint8_t inst_len; 402 uint8_t inst_bytes[15]; 403 uint64_t avic_abpp; 404 uint64_t rsvd3; 405 uint64_t avic_ltp; 406 407 uint64_t avic_phys; 408#define VMCB_CTRL_AVIC_PHYS_TABLE_PTR __BITS(51,12) 409#define VMCB_CTRL_AVIC_PHYS_MAX_INDEX __BITS(7,0) 410 411 uint64_t rsvd4; 412 uint64_t vmsa_ptr; 413 414 uint8_t pad[752]; 415} __packed; 416 417CTASSERT(sizeof(struct vmcb_ctrl) == 1024); 418 419struct vmcb_segment { 420 uint16_t selector; 421 uint16_t attrib; /* hidden */ 422 uint32_t limit; /* hidden */ 423 uint64_t base; /* hidden */ 424} __packed; 425 426CTASSERT(sizeof(struct vmcb_segment) == 16); 427 428struct vmcb_state { 429 struct vmcb_segment es; 430 struct vmcb_segment cs; 431 struct vmcb_segment ss; 432 struct vmcb_segment ds; 433 struct vmcb_segment fs; 434 struct vmcb_segment gs; 435 struct vmcb_segment gdt; 436 struct vmcb_segment ldt; 437 struct vmcb_segment idt; 438 struct vmcb_segment tr; 439 uint8_t rsvd1[43]; 440 uint8_t cpl; 441 uint8_t rsvd2[4]; 442 uint64_t efer; 443 uint8_t rsvd3[112]; 444 uint64_t cr4; 445 uint64_t cr3; 446 uint64_t cr0; 447 uint64_t dr7; 448 uint64_t dr6; 449 uint64_t rflags; 450 uint64_t rip; 451 uint8_t rsvd4[88]; 452 uint64_t rsp; 453 uint8_t rsvd5[24]; 454 uint64_t rax; 455 uint64_t star; 456 uint64_t lstar; 457 uint64_t cstar; 458 uint64_t sfmask; 459 uint64_t kernelgsbase; 460 uint64_t sysenter_cs; 461 uint64_t sysenter_esp; 462 uint64_t sysenter_eip; 463 uint64_t cr2; 464 uint8_t rsvd6[32]; 465 uint64_t g_pat; 466 uint64_t dbgctl; 467 uint64_t br_from; 468 uint64_t br_to; 469 uint64_t int_from; 470 uint64_t int_to; 471 uint8_t pad[2408]; 472} __packed; 473 474CTASSERT(sizeof(struct vmcb_state) == 0xC00); 475 476struct vmcb { 477 struct vmcb_ctrl ctrl; 478 struct vmcb_state state; 479} __packed; 480 481CTASSERT(sizeof(struct vmcb) == PAGE_SIZE); 482CTASSERT(offsetof(struct vmcb, state) == 0x400); 483 484/* -------------------------------------------------------------------------- */ 485 486static void svm_vcpu_state_provide(struct nvmm_cpu *, uint64_t); 487static void svm_vcpu_state_commit(struct nvmm_cpu *); 488 489struct svm_hsave { 490 paddr_t pa; 491}; 492 493static struct svm_hsave hsave[MAXCPUS]; 494 495static uint8_t *svm_asidmap __read_mostly; 496static uint32_t svm_maxasid __read_mostly; 497static kmutex_t svm_asidlock __cacheline_aligned; 498 499static bool svm_decode_assist __read_mostly; 500static uint32_t svm_ctrl_tlb_flush __read_mostly; 501 502#define SVM_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE) 503static uint64_t svm_xcr0_mask __read_mostly; 504 505#define SVM_NCPUIDS 32 506 507#define VMCB_NPAGES 1 508 509#define MSRBM_NPAGES 2 510#define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE) 511 512#define IOBM_NPAGES 3 513#define IOBM_SIZE (IOBM_NPAGES * PAGE_SIZE) 514 515/* Does not include EFER_LMSLE. */ 516#define EFER_VALID \ 517 (EFER_SCE|EFER_LME|EFER_LMA|EFER_NXE|EFER_SVME|EFER_FFXSR|EFER_TCE) 518 519#define EFER_TLB_FLUSH \ 520 (EFER_NXE|EFER_LMA|EFER_LME) 521#define CR0_TLB_FLUSH \ 522 (CR0_PG|CR0_WP|CR0_CD|CR0_NW) 523#define CR4_TLB_FLUSH \ 524 (CR4_PSE|CR4_PAE|CR4_PGE|CR4_PCIDE|CR4_SMEP) 525 526#define CR4_VALID \ 527 (CR4_VME | \ 528 CR4_PVI | \ 529 CR4_TSD | \ 530 CR4_DE | \ 531 CR4_PSE | \ 532 CR4_PAE | \ 533 CR4_MCE | \ 534 CR4_PGE | \ 535 CR4_PCE | \ 536 CR4_OSFXSR | \ 537 CR4_OSXMMEXCPT | \ 538 CR4_UMIP | \ 539 /* CR4_LA57 excluded */ \ 540 /* bit 13 reserved on AMD */ \ 541 /* bit 14 reserved on AMD */ \ 542 /* bit 15 reserved on AMD */ \ 543 CR4_FSGSBASE | \ 544 CR4_PCIDE | \ 545 CR4_OSXSAVE | \ 546 /* bit 19 reserved on AMD */ \ 547 CR4_SMEP | \ 548 CR4_SMAP \ 549 /* CR4_PKE excluded */ \ 550 /* CR4_CET excluded */ \ 551 /* bits 24:63 reserved on AMD */) 552 553/* -------------------------------------------------------------------------- */ 554 555struct svm_machdata { 556 volatile uint64_t mach_htlb_gen; 557}; 558 559static const size_t svm_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = { 560 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] = 561 sizeof(struct nvmm_vcpu_conf_cpuid), 562 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] = 563 sizeof(struct nvmm_vcpu_conf_tpr) 564}; 565 566struct svm_cpudata { 567 /* General */ 568 bool shared_asid; 569 bool gtlb_want_flush; 570 bool gtsc_want_update; 571 uint64_t vcpu_htlb_gen; 572 573 /* VMCB */ 574 struct vmcb *vmcb; 575 paddr_t vmcb_pa; 576 577 /* I/O bitmap */ 578 uint8_t *iobm; 579 paddr_t iobm_pa; 580 581 /* MSR bitmap */ 582 uint8_t *msrbm; 583 paddr_t msrbm_pa; 584 585 /* Host state */ 586 uint64_t hxcr0; 587 uint64_t star; 588 uint64_t lstar; 589 uint64_t cstar; 590 uint64_t sfmask; 591 uint64_t fsbase; 592 uint64_t kernelgsbase; 593 594 /* Intr state */ 595 bool int_window_exit; 596 bool nmi_window_exit; 597 bool evt_pending; 598 599 /* Guest state */ 600 uint64_t gxcr0; 601 uint64_t gprs[NVMM_X64_NGPR]; 602 uint64_t drs[NVMM_X64_NDR]; 603 uint64_t gtsc; 604 struct xsave_header gfpu __aligned(64); 605 606 /* VCPU configuration. */ 607 bool cpuidpresent[SVM_NCPUIDS]; 608 struct nvmm_vcpu_conf_cpuid cpuid[SVM_NCPUIDS]; 609}; 610 611static void 612svm_vmcb_cache_default(struct vmcb *vmcb) 613{ 614 vmcb->ctrl.vmcb_clean = 615 VMCB_CTRL_VMCB_CLEAN_I | 616 VMCB_CTRL_VMCB_CLEAN_IOPM | 617 VMCB_CTRL_VMCB_CLEAN_ASID | 618 VMCB_CTRL_VMCB_CLEAN_TPR | 619 VMCB_CTRL_VMCB_CLEAN_NP | 620 VMCB_CTRL_VMCB_CLEAN_CR | 621 VMCB_CTRL_VMCB_CLEAN_DR | 622 VMCB_CTRL_VMCB_CLEAN_DT | 623 VMCB_CTRL_VMCB_CLEAN_SEG | 624 VMCB_CTRL_VMCB_CLEAN_CR2 | 625 VMCB_CTRL_VMCB_CLEAN_LBR | 626 VMCB_CTRL_VMCB_CLEAN_AVIC; 627} 628 629static void 630svm_vmcb_cache_update(struct vmcb *vmcb, uint64_t flags) 631{ 632 if (flags & NVMM_X64_STATE_SEGS) { 633 vmcb->ctrl.vmcb_clean &= 634 ~(VMCB_CTRL_VMCB_CLEAN_SEG | VMCB_CTRL_VMCB_CLEAN_DT); 635 } 636 if (flags & NVMM_X64_STATE_CRS) { 637 vmcb->ctrl.vmcb_clean &= 638 ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_CR2 | 639 VMCB_CTRL_VMCB_CLEAN_TPR); 640 } 641 if (flags & NVMM_X64_STATE_DRS) { 642 vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_DR; 643 } 644 if (flags & NVMM_X64_STATE_MSRS) { 645 /* CR for EFER, NP for PAT. */ 646 vmcb->ctrl.vmcb_clean &= 647 ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_NP); 648 } 649} 650 651static inline void 652svm_vmcb_cache_flush(struct vmcb *vmcb, uint64_t flags) 653{ 654 vmcb->ctrl.vmcb_clean &= ~flags; 655} 656 657static inline void 658svm_vmcb_cache_flush_all(struct vmcb *vmcb) 659{ 660 vmcb->ctrl.vmcb_clean = 0; 661} 662 663#define SVM_EVENT_TYPE_HW_INT 0 664#define SVM_EVENT_TYPE_NMI 2 665#define SVM_EVENT_TYPE_EXC 3 666#define SVM_EVENT_TYPE_SW_INT 4 667 668static void 669svm_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi) 670{ 671 struct svm_cpudata *cpudata = vcpu->cpudata; 672 struct vmcb *vmcb = cpudata->vmcb; 673 674 if (nmi) { 675 vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_IRET; 676 cpudata->nmi_window_exit = true; 677 } else { 678 vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR; 679 vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR); 680 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR); 681 cpudata->int_window_exit = true; 682 } 683 684 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); 685} 686 687static void 688svm_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi) 689{ 690 struct svm_cpudata *cpudata = vcpu->cpudata; 691 struct vmcb *vmcb = cpudata->vmcb; 692 693 if (nmi) { 694 vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_IRET; 695 cpudata->nmi_window_exit = false; 696 } else { 697 vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR; 698 vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR); 699 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR); 700 cpudata->int_window_exit = false; 701 } 702 703 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); 704} 705 706static inline bool 707svm_excp_has_rf(uint8_t vector) 708{ 709 switch (vector) { 710 case 1: /* #DB */ 711 case 4: /* #OF */ 712 case 8: /* #DF */ 713 case 18: /* #MC */ 714 return false; 715 default: 716 return true; 717 } 718} 719 720static inline int 721svm_excp_has_error(uint8_t vector) 722{ 723 switch (vector) { 724 case 8: /* #DF */ 725 case 10: /* #TS */ 726 case 11: /* #NP */ 727 case 12: /* #SS */ 728 case 13: /* #GP */ 729 case 14: /* #PF */ 730 case 17: /* #AC */ 731 case 30: /* #SX */ 732 return 1; 733 default: 734 return 0; 735 } 736} 737 738static int 739svm_vcpu_inject(struct nvmm_cpu *vcpu) 740{ 741 struct nvmm_comm_page *comm = vcpu->comm; 742 struct svm_cpudata *cpudata = vcpu->cpudata; 743 struct vmcb *vmcb = cpudata->vmcb; 744 u_int evtype; 745 uint8_t vector; 746 uint64_t error; 747 int type = 0, err = 0; 748 749 evtype = comm->event.type; 750 vector = comm->event.vector; 751 error = comm->event.u.excp.error; 752 __insn_barrier(); 753 754 switch (evtype) { 755 case NVMM_VCPU_EVENT_EXCP: 756 type = SVM_EVENT_TYPE_EXC; 757 if (vector == 2 || vector >= 32) 758 return EINVAL; 759 if (vector == 3 || vector == 0) 760 return EINVAL; 761 if (svm_excp_has_rf(vector)) { 762 vmcb->state.rflags |= PSL_RF; 763 } 764 err = svm_excp_has_error(vector); 765 break; 766 case NVMM_VCPU_EVENT_INTR: 767 type = SVM_EVENT_TYPE_HW_INT; 768 if (vector == 2) { 769 type = SVM_EVENT_TYPE_NMI; 770 svm_event_waitexit_enable(vcpu, true); 771 } 772 err = 0; 773 break; 774 default: 775 return EINVAL; 776 } 777 778 vmcb->ctrl.eventinj = 779 __SHIFTIN((uint64_t)vector, VMCB_CTRL_EVENTINJ_VECTOR) | 780 __SHIFTIN((uint64_t)type, VMCB_CTRL_EVENTINJ_TYPE) | 781 __SHIFTIN((uint64_t)err, VMCB_CTRL_EVENTINJ_EV) | 782 __SHIFTIN((uint64_t)1, VMCB_CTRL_EVENTINJ_V) | 783 __SHIFTIN((uint64_t)error, VMCB_CTRL_EVENTINJ_ERRORCODE); 784 785 cpudata->evt_pending = true; 786 787 return 0; 788} 789 790static void 791svm_inject_ud(struct nvmm_cpu *vcpu) 792{ 793 struct nvmm_comm_page *comm = vcpu->comm; 794 int ret __diagused; 795 796 comm->event.type = NVMM_VCPU_EVENT_EXCP; 797 comm->event.vector = 6; 798 comm->event.u.excp.error = 0; 799 800 ret = svm_vcpu_inject(vcpu); 801 KASSERT(ret == 0); 802} 803 804static void 805svm_inject_gp(struct nvmm_cpu *vcpu) 806{ 807 struct nvmm_comm_page *comm = vcpu->comm; 808 int ret __diagused; 809 810 comm->event.type = NVMM_VCPU_EVENT_EXCP; 811 comm->event.vector = 13; 812 comm->event.u.excp.error = 0; 813 814 ret = svm_vcpu_inject(vcpu); 815 KASSERT(ret == 0); 816} 817 818static inline int 819svm_vcpu_event_commit(struct nvmm_cpu *vcpu) 820{ 821 if (__predict_true(!vcpu->comm->event_commit)) { 822 return 0; 823 } 824 vcpu->comm->event_commit = false; 825 return svm_vcpu_inject(vcpu); 826} 827 828static inline void 829svm_inkernel_advance(struct vmcb *vmcb) 830{ 831 /* 832 * Maybe we should also apply single-stepping and debug exceptions. 833 * Matters for guest-ring3, because it can execute 'cpuid' under a 834 * debugger. 835 */ 836 vmcb->state.rip = vmcb->ctrl.nrip; 837 vmcb->state.rflags &= ~PSL_RF; 838 vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW; 839} 840 841#define SVM_CPUID_MAX_BASIC 0xD 842#define SVM_CPUID_MAX_HYPERVISOR 0x40000000 843#define SVM_CPUID_MAX_EXTENDED 0x8000001F 844static uint32_t svm_cpuid_max_basic __read_mostly; 845static uint32_t svm_cpuid_max_extended __read_mostly; 846 847static void 848svm_inkernel_exec_cpuid(struct svm_cpudata *cpudata, uint64_t eax, uint64_t ecx) 849{ 850 u_int descs[4]; 851 852 x86_cpuid2(eax, ecx, descs); 853 cpudata->vmcb->state.rax = descs[0]; 854 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1]; 855 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2]; 856 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3]; 857} 858 859static void 860svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx) 861{ 862 struct svm_cpudata *cpudata = vcpu->cpudata; 863 uint64_t cr4; 864 865 if (eax < 0x40000000) { 866 if (__predict_false(eax > svm_cpuid_max_basic)) { 867 eax = svm_cpuid_max_basic; 868 svm_inkernel_exec_cpuid(cpudata, eax, ecx); 869 } 870 } else if (eax < 0x80000000) { 871 if (__predict_false(eax > SVM_CPUID_MAX_HYPERVISOR)) { 872 eax = svm_cpuid_max_basic; 873 svm_inkernel_exec_cpuid(cpudata, eax, ecx); 874 } 875 } else { 876 if (__predict_false(eax > svm_cpuid_max_extended)) { 877 eax = svm_cpuid_max_basic; 878 svm_inkernel_exec_cpuid(cpudata, eax, ecx); 879 } 880 } 881 882 switch (eax) { 883 case 0x00000000: 884 cpudata->vmcb->state.rax = svm_cpuid_max_basic; 885 break; 886 case 0x00000001: 887 cpudata->vmcb->state.rax &= nvmm_cpuid_00000001.eax; 888 889 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID; 890 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid, 891 CPUID_LOCAL_APIC_ID); 892 893 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx; 894 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ; 895 896 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx; 897 898 /* CPUID2_OSXSAVE depends on CR4. */ 899 cr4 = cpudata->vmcb->state.cr4; 900 if (!(cr4 & CR4_OSXSAVE)) { 901 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID2_OSXSAVE; 902 } 903 break; 904 case 0x00000002: /* Empty */ 905 case 0x00000003: /* Empty */ 906 case 0x00000004: /* Empty */ 907 case 0x00000005: /* Monitor/MWait */ 908 case 0x00000006: /* Power Management Related Features */ 909 cpudata->vmcb->state.rax = 0; 910 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 911 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 912 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 913 break; 914 case 0x00000007: /* Structured Extended Features */ 915 switch (ecx) { 916 case 0: 917 cpudata->vmcb->state.rax = 0; 918 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx; 919 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx; 920 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx; 921 break; 922 default: 923 cpudata->vmcb->state.rax = 0; 924 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 925 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 926 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 927 break; 928 } 929 break; 930 case 0x00000008: /* Empty */ 931 case 0x00000009: /* Empty */ 932 case 0x0000000A: /* Empty */ 933 case 0x0000000B: /* Empty */ 934 case 0x0000000C: /* Empty */ 935 cpudata->vmcb->state.rax = 0; 936 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 937 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 938 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 939 break; 940 case 0x0000000D: /* Processor Extended State Enumeration */ 941 if (svm_xcr0_mask == 0) { 942 break; 943 } 944 switch (ecx) { 945 case 0: 946 cpudata->vmcb->state.rax = svm_xcr0_mask & 0xFFFFFFFF; 947 if (cpudata->gxcr0 & XCR0_SSE) { 948 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave); 949 } else { 950 cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87); 951 } 952 cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */ 953 cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave) + 64; 954 cpudata->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32; 955 break; 956 case 1: 957 cpudata->vmcb->state.rax &= 958 (CPUID_PES1_XSAVEOPT | CPUID_PES1_XSAVEC | 959 CPUID_PES1_XGETBV); 960 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 961 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 962 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 963 break; 964 default: 965 cpudata->vmcb->state.rax = 0; 966 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 967 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 968 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 969 break; 970 } 971 break; 972 973 case 0x40000000: /* Hypervisor Information */ 974 cpudata->vmcb->state.rax = SVM_CPUID_MAX_HYPERVISOR; 975 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 976 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 977 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 978 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4); 979 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4); 980 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4); 981 break; 982 983 case 0x80000000: 984 cpudata->vmcb->state.rax = svm_cpuid_max_extended; 985 break; 986 case 0x80000001: 987 cpudata->vmcb->state.rax &= nvmm_cpuid_80000001.eax; 988 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx; 989 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx; 990 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx; 991 break; 992 case 0x80000002: /* Extended Processor Name String */ 993 case 0x80000003: /* Extended Processor Name String */ 994 case 0x80000004: /* Extended Processor Name String */ 995 case 0x80000005: /* L1 Cache and TLB Information */ 996 case 0x80000006: /* L2 Cache and TLB and L3 Cache Information */ 997 break; 998 case 0x80000007: /* Processor Power Management and RAS Capabilities */ 999 cpudata->vmcb->state.rax &= nvmm_cpuid_80000007.eax; 1000 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx; 1001 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx; 1002 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx; 1003 break; 1004 case 0x80000008: /* Processor Capacity Parameters and Ext Feat Ident */ 1005 cpudata->vmcb->state.rax &= nvmm_cpuid_80000008.eax; 1006 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx; 1007 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx; 1008 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx; 1009 break; 1010 case 0x80000009: /* Empty */ 1011 case 0x8000000A: /* SVM Features */ 1012 case 0x8000000B: /* Empty */ 1013 case 0x8000000C: /* Empty */ 1014 case 0x8000000D: /* Empty */ 1015 case 0x8000000E: /* Empty */ 1016 case 0x8000000F: /* Empty */ 1017 case 0x80000010: /* Empty */ 1018 case 0x80000011: /* Empty */ 1019 case 0x80000012: /* Empty */ 1020 case 0x80000013: /* Empty */ 1021 case 0x80000014: /* Empty */ 1022 case 0x80000015: /* Empty */ 1023 case 0x80000016: /* Empty */ 1024 case 0x80000017: /* Empty */ 1025 case 0x80000018: /* Empty */ 1026 cpudata->vmcb->state.rax = 0; 1027 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1028 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1029 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1030 break; 1031 case 0x80000019: /* TLB Characteristics for 1GB pages */ 1032 case 0x8000001A: /* Instruction Optimizations */ 1033 break; 1034 case 0x8000001B: /* Instruction-Based Sampling Capabilities */ 1035 case 0x8000001C: /* Lightweight Profiling Capabilities */ 1036 cpudata->vmcb->state.rax = 0; 1037 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1038 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1039 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1040 break; 1041 case 0x8000001D: /* Cache Topology Information */ 1042 case 0x8000001E: /* Processor Topology Information */ 1043 break; /* TODO? */ 1044 case 0x8000001F: /* Encrypted Memory Capabilities */ 1045 cpudata->vmcb->state.rax = 0; 1046 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1047 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1048 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1049 break; 1050 1051 default: 1052 break; 1053 } 1054} 1055 1056static void 1057svm_exit_insn(struct vmcb *vmcb, struct nvmm_vcpu_exit *exit, uint64_t reason) 1058{ 1059 exit->u.insn.npc = vmcb->ctrl.nrip; 1060 exit->reason = reason; 1061} 1062 1063static void 1064svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1065 struct nvmm_vcpu_exit *exit) 1066{ 1067 struct svm_cpudata *cpudata = vcpu->cpudata; 1068 struct nvmm_vcpu_conf_cpuid *cpuid; 1069 uint64_t eax, ecx; 1070 size_t i; 1071 1072 eax = cpudata->vmcb->state.rax; 1073 ecx = cpudata->gprs[NVMM_X64_GPR_RCX]; 1074 svm_inkernel_exec_cpuid(cpudata, eax, ecx); 1075 svm_inkernel_handle_cpuid(vcpu, eax, ecx); 1076 1077 for (i = 0; i < SVM_NCPUIDS; i++) { 1078 if (!cpudata->cpuidpresent[i]) { 1079 continue; 1080 } 1081 cpuid = &cpudata->cpuid[i]; 1082 if (cpuid->leaf != eax) { 1083 continue; 1084 } 1085 1086 if (cpuid->exit) { 1087 svm_exit_insn(cpudata->vmcb, exit, NVMM_VCPU_EXIT_CPUID); 1088 return; 1089 } 1090 KASSERT(cpuid->mask); 1091 1092 /* del */ 1093 cpudata->vmcb->state.rax &= ~cpuid->u.mask.del.eax; 1094 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx; 1095 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx; 1096 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx; 1097 1098 /* set */ 1099 cpudata->vmcb->state.rax |= cpuid->u.mask.set.eax; 1100 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx; 1101 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx; 1102 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx; 1103 1104 break; 1105 } 1106 1107 svm_inkernel_advance(cpudata->vmcb); 1108 exit->reason = NVMM_VCPU_EXIT_NONE; 1109} 1110 1111static void 1112svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1113 struct nvmm_vcpu_exit *exit) 1114{ 1115 struct svm_cpudata *cpudata = vcpu->cpudata; 1116 struct vmcb *vmcb = cpudata->vmcb; 1117 1118 if (cpudata->int_window_exit && (vmcb->state.rflags & PSL_I)) { 1119 svm_event_waitexit_disable(vcpu, false); 1120 } 1121 1122 svm_inkernel_advance(cpudata->vmcb); 1123 exit->reason = NVMM_VCPU_EXIT_HALTED; 1124} 1125 1126#define SVM_EXIT_IO_PORT __BITS(31,16) 1127#define SVM_EXIT_IO_SEG __BITS(12,10) 1128#define SVM_EXIT_IO_A64 __BIT(9) 1129#define SVM_EXIT_IO_A32 __BIT(8) 1130#define SVM_EXIT_IO_A16 __BIT(7) 1131#define SVM_EXIT_IO_SZ32 __BIT(6) 1132#define SVM_EXIT_IO_SZ16 __BIT(5) 1133#define SVM_EXIT_IO_SZ8 __BIT(4) 1134#define SVM_EXIT_IO_REP __BIT(3) 1135#define SVM_EXIT_IO_STR __BIT(2) 1136#define SVM_EXIT_IO_IN __BIT(0) 1137 1138static void 1139svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1140 struct nvmm_vcpu_exit *exit) 1141{ 1142 struct svm_cpudata *cpudata = vcpu->cpudata; 1143 uint64_t info = cpudata->vmcb->ctrl.exitinfo1; 1144 uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2; 1145 1146 exit->reason = NVMM_VCPU_EXIT_IO; 1147 1148 exit->u.io.in = (info & SVM_EXIT_IO_IN) != 0; 1149 exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT); 1150 1151 if (svm_decode_assist) { 1152 KASSERT(__SHIFTOUT(info, SVM_EXIT_IO_SEG) < 6); 1153 exit->u.io.seg = __SHIFTOUT(info, SVM_EXIT_IO_SEG); 1154 } else { 1155 exit->u.io.seg = -1; 1156 } 1157 1158 if (info & SVM_EXIT_IO_A64) { 1159 exit->u.io.address_size = 8; 1160 } else if (info & SVM_EXIT_IO_A32) { 1161 exit->u.io.address_size = 4; 1162 } else if (info & SVM_EXIT_IO_A16) { 1163 exit->u.io.address_size = 2; 1164 } 1165 1166 if (info & SVM_EXIT_IO_SZ32) { 1167 exit->u.io.operand_size = 4; 1168 } else if (info & SVM_EXIT_IO_SZ16) { 1169 exit->u.io.operand_size = 2; 1170 } else if (info & SVM_EXIT_IO_SZ8) { 1171 exit->u.io.operand_size = 1; 1172 } 1173 1174 exit->u.io.rep = (info & SVM_EXIT_IO_REP) != 0; 1175 exit->u.io.str = (info & SVM_EXIT_IO_STR) != 0; 1176 exit->u.io.npc = nextpc; 1177 1178 svm_vcpu_state_provide(vcpu, 1179 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 1180 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 1181} 1182 1183static const uint64_t msr_ignore_list[] = { 1184 0xc0010055, /* MSR_CMPHALT */ 1185 MSR_DE_CFG, 1186 MSR_IC_CFG, 1187 MSR_UCODE_AMD_PATCHLEVEL 1188}; 1189 1190static bool 1191svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1192 struct nvmm_vcpu_exit *exit) 1193{ 1194 struct svm_cpudata *cpudata = vcpu->cpudata; 1195 struct vmcb *vmcb = cpudata->vmcb; 1196 uint64_t val; 1197 size_t i; 1198 1199 if (exit->reason == NVMM_VCPU_EXIT_RDMSR) { 1200 if (exit->u.rdmsr.msr == MSR_EFER) { 1201 val = vmcb->state.efer & ~EFER_SVME; 1202 vmcb->state.rax = (val & 0xFFFFFFFF); 1203 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1204 goto handled; 1205 } 1206 if (exit->u.rdmsr.msr == MSR_NB_CFG) { 1207 val = NB_CFG_INITAPICCPUIDLO; 1208 vmcb->state.rax = (val & 0xFFFFFFFF); 1209 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1210 goto handled; 1211 } 1212 for (i = 0; i < __arraycount(msr_ignore_list); i++) { 1213 if (msr_ignore_list[i] != exit->u.rdmsr.msr) 1214 continue; 1215 val = 0; 1216 vmcb->state.rax = (val & 0xFFFFFFFF); 1217 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 1218 goto handled; 1219 } 1220 } else { 1221 if (exit->u.wrmsr.msr == MSR_EFER) { 1222 if (__predict_false(exit->u.wrmsr.val & ~EFER_VALID)) { 1223 goto error; 1224 } 1225 if ((vmcb->state.efer ^ exit->u.wrmsr.val) & 1226 EFER_TLB_FLUSH) { 1227 cpudata->gtlb_want_flush = true; 1228 } 1229 vmcb->state.efer = exit->u.wrmsr.val | EFER_SVME; 1230 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_CR); 1231 goto handled; 1232 } 1233 if (exit->u.wrmsr.msr == MSR_TSC) { 1234 cpudata->gtsc = exit->u.wrmsr.val; 1235 cpudata->gtsc_want_update = true; 1236 goto handled; 1237 } 1238 for (i = 0; i < __arraycount(msr_ignore_list); i++) { 1239 if (msr_ignore_list[i] != exit->u.wrmsr.msr) 1240 continue; 1241 goto handled; 1242 } 1243 } 1244 1245 return false; 1246 1247handled: 1248 svm_inkernel_advance(cpudata->vmcb); 1249 return true; 1250 1251error: 1252 svm_inject_gp(vcpu); 1253 return true; 1254} 1255 1256static inline void 1257svm_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1258 struct nvmm_vcpu_exit *exit) 1259{ 1260 struct svm_cpudata *cpudata = vcpu->cpudata; 1261 1262 exit->reason = NVMM_VCPU_EXIT_RDMSR; 1263 exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF); 1264 exit->u.rdmsr.npc = cpudata->vmcb->ctrl.nrip; 1265 1266 if (svm_inkernel_handle_msr(mach, vcpu, exit)) { 1267 exit->reason = NVMM_VCPU_EXIT_NONE; 1268 return; 1269 } 1270 1271 svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS); 1272} 1273 1274static inline void 1275svm_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1276 struct nvmm_vcpu_exit *exit) 1277{ 1278 struct svm_cpudata *cpudata = vcpu->cpudata; 1279 uint64_t rdx, rax; 1280 1281 rdx = cpudata->gprs[NVMM_X64_GPR_RDX]; 1282 rax = cpudata->vmcb->state.rax; 1283 1284 exit->reason = NVMM_VCPU_EXIT_WRMSR; 1285 exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF); 1286 exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF); 1287 exit->u.wrmsr.npc = cpudata->vmcb->ctrl.nrip; 1288 1289 if (svm_inkernel_handle_msr(mach, vcpu, exit)) { 1290 exit->reason = NVMM_VCPU_EXIT_NONE; 1291 return; 1292 } 1293 1294 svm_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS); 1295} 1296 1297static void 1298svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1299 struct nvmm_vcpu_exit *exit) 1300{ 1301 struct svm_cpudata *cpudata = vcpu->cpudata; 1302 uint64_t info = cpudata->vmcb->ctrl.exitinfo1; 1303 1304 if (info == 0) { 1305 svm_exit_rdmsr(mach, vcpu, exit); 1306 } else { 1307 svm_exit_wrmsr(mach, vcpu, exit); 1308 } 1309} 1310 1311static void 1312svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1313 struct nvmm_vcpu_exit *exit) 1314{ 1315 struct svm_cpudata *cpudata = vcpu->cpudata; 1316 gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2; 1317 1318 exit->reason = NVMM_VCPU_EXIT_MEMORY; 1319 if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W) 1320 exit->u.mem.prot = PROT_WRITE; 1321 else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_I) 1322 exit->u.mem.prot = PROT_EXEC; 1323 else 1324 exit->u.mem.prot = PROT_READ; 1325 exit->u.mem.gpa = gpa; 1326 exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len; 1327 memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes, 1328 sizeof(exit->u.mem.inst_bytes)); 1329 1330 svm_vcpu_state_provide(vcpu, 1331 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 1332 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 1333} 1334 1335static void 1336svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1337 struct nvmm_vcpu_exit *exit) 1338{ 1339 struct svm_cpudata *cpudata = vcpu->cpudata; 1340 struct vmcb *vmcb = cpudata->vmcb; 1341 uint64_t val; 1342 1343 exit->reason = NVMM_VCPU_EXIT_NONE; 1344 1345 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) | 1346 (vmcb->state.rax & 0xFFFFFFFF); 1347 1348 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) { 1349 goto error; 1350 } else if (__predict_false(vmcb->state.cpl != 0)) { 1351 goto error; 1352 } else if (__predict_false((val & ~svm_xcr0_mask) != 0)) { 1353 goto error; 1354 } else if (__predict_false((val & XCR0_X87) == 0)) { 1355 goto error; 1356 } 1357 1358 cpudata->gxcr0 = val; 1359 1360 svm_inkernel_advance(cpudata->vmcb); 1361 return; 1362 1363error: 1364 svm_inject_gp(vcpu); 1365} 1366 1367static void 1368svm_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code) 1369{ 1370 exit->u.inv.hwcode = code; 1371 exit->reason = NVMM_VCPU_EXIT_INVALID; 1372} 1373 1374/* -------------------------------------------------------------------------- */ 1375 1376static void 1377svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) 1378{ 1379 struct svm_cpudata *cpudata = vcpu->cpudata; 1380 1381 fpu_kern_enter(); 1382 /* TODO: should we use *XSAVE64 here? */ 1383 fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask, false); 1384 1385 if (svm_xcr0_mask != 0) { 1386 cpudata->hxcr0 = rdxcr(0); 1387 wrxcr(0, cpudata->gxcr0); 1388 } 1389} 1390 1391static void 1392svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu) 1393{ 1394 struct svm_cpudata *cpudata = vcpu->cpudata; 1395 1396 if (svm_xcr0_mask != 0) { 1397 cpudata->gxcr0 = rdxcr(0); 1398 wrxcr(0, cpudata->hxcr0); 1399 } 1400 1401 /* TODO: should we use *XSAVE64 here? */ 1402 fpu_area_save(&cpudata->gfpu, svm_xcr0_mask, false); 1403 fpu_kern_leave(); 1404} 1405 1406static void 1407svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu) 1408{ 1409 struct svm_cpudata *cpudata = vcpu->cpudata; 1410 1411 x86_dbregs_save(curlwp); 1412 1413 ldr7(0); 1414 1415 ldr0(cpudata->drs[NVMM_X64_DR_DR0]); 1416 ldr1(cpudata->drs[NVMM_X64_DR_DR1]); 1417 ldr2(cpudata->drs[NVMM_X64_DR_DR2]); 1418 ldr3(cpudata->drs[NVMM_X64_DR_DR3]); 1419} 1420 1421static void 1422svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu) 1423{ 1424 struct svm_cpudata *cpudata = vcpu->cpudata; 1425 1426 cpudata->drs[NVMM_X64_DR_DR0] = rdr0(); 1427 cpudata->drs[NVMM_X64_DR_DR1] = rdr1(); 1428 cpudata->drs[NVMM_X64_DR_DR2] = rdr2(); 1429 cpudata->drs[NVMM_X64_DR_DR3] = rdr3(); 1430 1431 x86_dbregs_restore(curlwp); 1432} 1433 1434static void 1435svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu) 1436{ 1437 struct svm_cpudata *cpudata = vcpu->cpudata; 1438 1439 cpudata->fsbase = rdmsr(MSR_FSBASE); 1440 cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE); 1441} 1442 1443static void 1444svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu) 1445{ 1446 struct svm_cpudata *cpudata = vcpu->cpudata; 1447 1448 wrmsr(MSR_STAR, cpudata->star); 1449 wrmsr(MSR_LSTAR, cpudata->lstar); 1450 wrmsr(MSR_CSTAR, cpudata->cstar); 1451 wrmsr(MSR_SFMASK, cpudata->sfmask); 1452 wrmsr(MSR_FSBASE, cpudata->fsbase); 1453 wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase); 1454} 1455 1456/* -------------------------------------------------------------------------- */ 1457 1458static inline void 1459svm_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu) 1460{ 1461 struct svm_cpudata *cpudata = vcpu->cpudata; 1462 1463 if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) { 1464 cpudata->gtlb_want_flush = true; 1465 } 1466} 1467 1468static inline void 1469svm_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu) 1470{ 1471 /* 1472 * Nothing to do. If an hTLB flush was needed, either the VCPU was 1473 * executing on this hCPU and the hTLB already got flushed, or it 1474 * was executing on another hCPU in which case the catchup is done 1475 * in svm_gtlb_catchup(). 1476 */ 1477} 1478 1479static inline uint64_t 1480svm_htlb_flush(struct svm_machdata *machdata, struct svm_cpudata *cpudata) 1481{ 1482 struct vmcb *vmcb = cpudata->vmcb; 1483 uint64_t machgen; 1484 1485 machgen = machdata->mach_htlb_gen; 1486 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) { 1487 return machgen; 1488 } 1489 1490 vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush; 1491 return machgen; 1492} 1493 1494static inline void 1495svm_htlb_flush_ack(struct svm_cpudata *cpudata, uint64_t machgen) 1496{ 1497 struct vmcb *vmcb = cpudata->vmcb; 1498 1499 if (__predict_true(vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID)) { 1500 cpudata->vcpu_htlb_gen = machgen; 1501 } 1502} 1503 1504static inline void 1505svm_exit_evt(struct svm_cpudata *cpudata, struct vmcb *vmcb) 1506{ 1507 cpudata->evt_pending = false; 1508 1509 if (__predict_false(vmcb->ctrl.exitintinfo & VMCB_CTRL_EXITINTINFO_V)) { 1510 vmcb->ctrl.eventinj = vmcb->ctrl.exitintinfo; 1511 cpudata->evt_pending = true; 1512 } 1513} 1514 1515static int 1516svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1517 struct nvmm_vcpu_exit *exit) 1518{ 1519 struct nvmm_comm_page *comm = vcpu->comm; 1520 struct svm_machdata *machdata = mach->machdata; 1521 struct svm_cpudata *cpudata = vcpu->cpudata; 1522 struct vmcb *vmcb = cpudata->vmcb; 1523 uint64_t machgen; 1524 int hcpu; 1525 1526 svm_vcpu_state_commit(vcpu); 1527 comm->state_cached = 0; 1528 1529 if (__predict_false(svm_vcpu_event_commit(vcpu) != 0)) { 1530 return EINVAL; 1531 } 1532 1533 kpreempt_disable(); 1534 hcpu = cpu_number(); 1535 1536 svm_gtlb_catchup(vcpu, hcpu); 1537 svm_htlb_catchup(vcpu, hcpu); 1538 1539 if (vcpu->hcpu_last != hcpu) { 1540 svm_vmcb_cache_flush_all(vmcb); 1541 cpudata->gtsc_want_update = true; 1542 } 1543 1544 svm_vcpu_guest_dbregs_enter(vcpu); 1545 svm_vcpu_guest_misc_enter(vcpu); 1546 1547 while (1) { 1548 if (cpudata->gtlb_want_flush) { 1549 vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush; 1550 } else { 1551 vmcb->ctrl.tlb_ctrl = 0; 1552 } 1553 1554 if (__predict_false(cpudata->gtsc_want_update)) { 1555 vmcb->ctrl.tsc_offset = cpudata->gtsc - rdtsc(); 1556 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); 1557 } 1558 1559 svm_vcpu_guest_fpu_enter(vcpu); 1560 svm_clgi(); 1561 machgen = svm_htlb_flush(machdata, cpudata); 1562 svm_vmrun(cpudata->vmcb_pa, cpudata->gprs); 1563 svm_htlb_flush_ack(cpudata, machgen); 1564 svm_stgi(); 1565 svm_vcpu_guest_fpu_leave(vcpu); 1566 1567 svm_vmcb_cache_default(vmcb); 1568 1569 if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) { 1570 cpudata->gtlb_want_flush = false; 1571 cpudata->gtsc_want_update = false; 1572 vcpu->hcpu_last = hcpu; 1573 } 1574 svm_exit_evt(cpudata, vmcb); 1575 1576 switch (vmcb->ctrl.exitcode) { 1577 case VMCB_EXITCODE_INTR: 1578 case VMCB_EXITCODE_NMI: 1579 exit->reason = NVMM_VCPU_EXIT_NONE; 1580 break; 1581 case VMCB_EXITCODE_VINTR: 1582 svm_event_waitexit_disable(vcpu, false); 1583 exit->reason = NVMM_VCPU_EXIT_INT_READY; 1584 break; 1585 case VMCB_EXITCODE_IRET: 1586 svm_event_waitexit_disable(vcpu, true); 1587 exit->reason = NVMM_VCPU_EXIT_NMI_READY; 1588 break; 1589 case VMCB_EXITCODE_CPUID: 1590 svm_exit_cpuid(mach, vcpu, exit); 1591 break; 1592 case VMCB_EXITCODE_HLT: 1593 svm_exit_hlt(mach, vcpu, exit); 1594 break; 1595 case VMCB_EXITCODE_IOIO: 1596 svm_exit_io(mach, vcpu, exit); 1597 break; 1598 case VMCB_EXITCODE_MSR: 1599 svm_exit_msr(mach, vcpu, exit); 1600 break; 1601 case VMCB_EXITCODE_SHUTDOWN: 1602 exit->reason = NVMM_VCPU_EXIT_SHUTDOWN; 1603 break; 1604 case VMCB_EXITCODE_RDPMC: 1605 case VMCB_EXITCODE_RSM: 1606 case VMCB_EXITCODE_INVLPGA: 1607 case VMCB_EXITCODE_VMRUN: 1608 case VMCB_EXITCODE_VMMCALL: 1609 case VMCB_EXITCODE_VMLOAD: 1610 case VMCB_EXITCODE_VMSAVE: 1611 case VMCB_EXITCODE_STGI: 1612 case VMCB_EXITCODE_CLGI: 1613 case VMCB_EXITCODE_SKINIT: 1614 case VMCB_EXITCODE_RDTSCP: 1615 case VMCB_EXITCODE_RDPRU: 1616 case VMCB_EXITCODE_INVLPGB: 1617 case VMCB_EXITCODE_INVPCID: 1618 case VMCB_EXITCODE_MCOMMIT: 1619 case VMCB_EXITCODE_TLBSYNC: 1620 svm_inject_ud(vcpu); 1621 exit->reason = NVMM_VCPU_EXIT_NONE; 1622 break; 1623 case VMCB_EXITCODE_MONITOR: 1624 svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MONITOR); 1625 break; 1626 case VMCB_EXITCODE_MWAIT: 1627 case VMCB_EXITCODE_MWAIT_CONDITIONAL: 1628 svm_exit_insn(vmcb, exit, NVMM_VCPU_EXIT_MWAIT); 1629 break; 1630 case VMCB_EXITCODE_XSETBV: 1631 svm_exit_xsetbv(mach, vcpu, exit); 1632 break; 1633 case VMCB_EXITCODE_NPF: 1634 svm_exit_npf(mach, vcpu, exit); 1635 break; 1636 case VMCB_EXITCODE_FERR_FREEZE: /* ? */ 1637 default: 1638 svm_exit_invalid(exit, vmcb->ctrl.exitcode); 1639 break; 1640 } 1641 1642 /* If no reason to return to userland, keep rolling. */ 1643 if (nvmm_return_needed(vcpu, exit)) { 1644 break; 1645 } 1646 if (exit->reason != NVMM_VCPU_EXIT_NONE) { 1647 break; 1648 } 1649 } 1650 1651 cpudata->gtsc = rdtsc() + vmcb->ctrl.tsc_offset; 1652 1653 svm_vcpu_guest_misc_leave(vcpu); 1654 svm_vcpu_guest_dbregs_leave(vcpu); 1655 1656 kpreempt_enable(); 1657 1658 exit->exitstate.rflags = vmcb->state.rflags; 1659 exit->exitstate.cr8 = __SHIFTOUT(vmcb->ctrl.v, VMCB_CTRL_V_TPR); 1660 exit->exitstate.int_shadow = 1661 ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0); 1662 exit->exitstate.int_window_exiting = cpudata->int_window_exit; 1663 exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit; 1664 exit->exitstate.evt_pending = cpudata->evt_pending; 1665 1666 return 0; 1667} 1668 1669/* -------------------------------------------------------------------------- */ 1670 1671static int 1672svm_memalloc(paddr_t *pa, vaddr_t *va, size_t npages) 1673{ 1674 struct pglist pglist; 1675 paddr_t _pa; 1676 vaddr_t _va; 1677 size_t i; 1678 int ret; 1679 1680 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0, 1681 &pglist, 1, 0); 1682 if (ret != 0) 1683 return ENOMEM; 1684 _pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 1685 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0, 1686 UVM_KMF_VAONLY | UVM_KMF_NOWAIT); 1687 if (_va == 0) 1688 goto error; 1689 1690 for (i = 0; i < npages; i++) { 1691 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE, 1692 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK); 1693 } 1694 pmap_update(pmap_kernel()); 1695 1696 memset((void *)_va, 0, npages * PAGE_SIZE); 1697 1698 *pa = _pa; 1699 *va = _va; 1700 return 0; 1701 1702error: 1703 for (i = 0; i < npages; i++) { 1704 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE)); 1705 } 1706 return ENOMEM; 1707} 1708 1709static void 1710svm_memfree(paddr_t pa, vaddr_t va, size_t npages) 1711{ 1712 size_t i; 1713 1714 pmap_kremove(va, npages * PAGE_SIZE); 1715 pmap_update(pmap_kernel()); 1716 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY); 1717 for (i = 0; i < npages; i++) { 1718 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE)); 1719 } 1720} 1721 1722/* -------------------------------------------------------------------------- */ 1723 1724#define SVM_MSRBM_READ __BIT(0) 1725#define SVM_MSRBM_WRITE __BIT(1) 1726 1727static void 1728svm_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write) 1729{ 1730 uint64_t byte; 1731 uint8_t bitoff; 1732 1733 if (msr < 0x00002000) { 1734 /* Range 1 */ 1735 byte = ((msr - 0x00000000) >> 2UL) + 0x0000; 1736 } else if (msr >= 0xC0000000 && msr < 0xC0002000) { 1737 /* Range 2 */ 1738 byte = ((msr - 0xC0000000) >> 2UL) + 0x0800; 1739 } else if (msr >= 0xC0010000 && msr < 0xC0012000) { 1740 /* Range 3 */ 1741 byte = ((msr - 0xC0010000) >> 2UL) + 0x1000; 1742 } else { 1743 panic("%s: wrong range", __func__); 1744 } 1745 1746 bitoff = (msr & 0x3) << 1; 1747 1748 if (read) { 1749 bitmap[byte] &= ~(SVM_MSRBM_READ << bitoff); 1750 } 1751 if (write) { 1752 bitmap[byte] &= ~(SVM_MSRBM_WRITE << bitoff); 1753 } 1754} 1755 1756#define SVM_SEG_ATTRIB_TYPE __BITS(3,0) 1757#define SVM_SEG_ATTRIB_S __BIT(4) 1758#define SVM_SEG_ATTRIB_DPL __BITS(6,5) 1759#define SVM_SEG_ATTRIB_P __BIT(7) 1760#define SVM_SEG_ATTRIB_AVL __BIT(8) 1761#define SVM_SEG_ATTRIB_L __BIT(9) 1762#define SVM_SEG_ATTRIB_DEF __BIT(10) 1763#define SVM_SEG_ATTRIB_G __BIT(11) 1764 1765static void 1766svm_vcpu_setstate_seg(const struct nvmm_x64_state_seg *seg, 1767 struct vmcb_segment *vseg) 1768{ 1769 vseg->selector = seg->selector; 1770 vseg->attrib = 1771 __SHIFTIN(seg->attrib.type, SVM_SEG_ATTRIB_TYPE) | 1772 __SHIFTIN(seg->attrib.s, SVM_SEG_ATTRIB_S) | 1773 __SHIFTIN(seg->attrib.dpl, SVM_SEG_ATTRIB_DPL) | 1774 __SHIFTIN(seg->attrib.p, SVM_SEG_ATTRIB_P) | 1775 __SHIFTIN(seg->attrib.avl, SVM_SEG_ATTRIB_AVL) | 1776 __SHIFTIN(seg->attrib.l, SVM_SEG_ATTRIB_L) | 1777 __SHIFTIN(seg->attrib.def, SVM_SEG_ATTRIB_DEF) | 1778 __SHIFTIN(seg->attrib.g, SVM_SEG_ATTRIB_G); 1779 vseg->limit = seg->limit; 1780 vseg->base = seg->base; 1781} 1782 1783static void 1784svm_vcpu_getstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg) 1785{ 1786 seg->selector = vseg->selector; 1787 seg->attrib.type = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_TYPE); 1788 seg->attrib.s = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_S); 1789 seg->attrib.dpl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DPL); 1790 seg->attrib.p = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_P); 1791 seg->attrib.avl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_AVL); 1792 seg->attrib.l = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_L); 1793 seg->attrib.def = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DEF); 1794 seg->attrib.g = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_G); 1795 seg->limit = vseg->limit; 1796 seg->base = vseg->base; 1797} 1798 1799static inline bool 1800svm_state_tlb_flush(const struct vmcb *vmcb, const struct nvmm_x64_state *state, 1801 uint64_t flags) 1802{ 1803 if (flags & NVMM_X64_STATE_CRS) { 1804 if ((vmcb->state.cr0 ^ 1805 state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) { 1806 return true; 1807 } 1808 if (vmcb->state.cr3 != state->crs[NVMM_X64_CR_CR3]) { 1809 return true; 1810 } 1811 if ((vmcb->state.cr4 ^ 1812 state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) { 1813 return true; 1814 } 1815 } 1816 1817 if (flags & NVMM_X64_STATE_MSRS) { 1818 if ((vmcb->state.efer ^ 1819 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) { 1820 return true; 1821 } 1822 } 1823 1824 return false; 1825} 1826 1827static void 1828svm_vcpu_setstate(struct nvmm_cpu *vcpu) 1829{ 1830 struct nvmm_comm_page *comm = vcpu->comm; 1831 const struct nvmm_x64_state *state = &comm->state; 1832 struct svm_cpudata *cpudata = vcpu->cpudata; 1833 struct vmcb *vmcb = cpudata->vmcb; 1834 struct fxsave *fpustate; 1835 uint64_t flags; 1836 1837 flags = comm->state_wanted; 1838 1839 if (svm_state_tlb_flush(vmcb, state, flags)) { 1840 cpudata->gtlb_want_flush = true; 1841 } 1842 1843 if (flags & NVMM_X64_STATE_SEGS) { 1844 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_CS], 1845 &vmcb->state.cs); 1846 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_DS], 1847 &vmcb->state.ds); 1848 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_ES], 1849 &vmcb->state.es); 1850 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_FS], 1851 &vmcb->state.fs); 1852 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GS], 1853 &vmcb->state.gs); 1854 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_SS], 1855 &vmcb->state.ss); 1856 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GDT], 1857 &vmcb->state.gdt); 1858 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_IDT], 1859 &vmcb->state.idt); 1860 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_LDT], 1861 &vmcb->state.ldt); 1862 svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_TR], 1863 &vmcb->state.tr); 1864 1865 vmcb->state.cpl = state->segs[NVMM_X64_SEG_SS].attrib.dpl; 1866 } 1867 1868 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs)); 1869 if (flags & NVMM_X64_STATE_GPRS) { 1870 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs)); 1871 1872 vmcb->state.rip = state->gprs[NVMM_X64_GPR_RIP]; 1873 vmcb->state.rsp = state->gprs[NVMM_X64_GPR_RSP]; 1874 vmcb->state.rax = state->gprs[NVMM_X64_GPR_RAX]; 1875 vmcb->state.rflags = state->gprs[NVMM_X64_GPR_RFLAGS]; 1876 } 1877 1878 if (flags & NVMM_X64_STATE_CRS) { 1879 vmcb->state.cr0 = state->crs[NVMM_X64_CR_CR0]; 1880 vmcb->state.cr2 = state->crs[NVMM_X64_CR_CR2]; 1881 vmcb->state.cr3 = state->crs[NVMM_X64_CR_CR3]; 1882 vmcb->state.cr4 = state->crs[NVMM_X64_CR_CR4]; 1883 vmcb->state.cr4 &= CR4_VALID; 1884 1885 vmcb->ctrl.v &= ~VMCB_CTRL_V_TPR; 1886 vmcb->ctrl.v |= __SHIFTIN(state->crs[NVMM_X64_CR_CR8], 1887 VMCB_CTRL_V_TPR); 1888 1889 if (svm_xcr0_mask != 0) { 1890 /* Clear illegal XCR0 bits, set mandatory X87 bit. */ 1891 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0]; 1892 cpudata->gxcr0 &= svm_xcr0_mask; 1893 cpudata->gxcr0 |= XCR0_X87; 1894 } 1895 } 1896 1897 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs)); 1898 if (flags & NVMM_X64_STATE_DRS) { 1899 memcpy(cpudata->drs, state->drs, sizeof(state->drs)); 1900 1901 vmcb->state.dr6 = state->drs[NVMM_X64_DR_DR6]; 1902 vmcb->state.dr7 = state->drs[NVMM_X64_DR_DR7]; 1903 } 1904 1905 if (flags & NVMM_X64_STATE_MSRS) { 1906 /* 1907 * EFER_SVME is mandatory. 1908 */ 1909 vmcb->state.efer = state->msrs[NVMM_X64_MSR_EFER] | EFER_SVME; 1910 vmcb->state.star = state->msrs[NVMM_X64_MSR_STAR]; 1911 vmcb->state.lstar = state->msrs[NVMM_X64_MSR_LSTAR]; 1912 vmcb->state.cstar = state->msrs[NVMM_X64_MSR_CSTAR]; 1913 vmcb->state.sfmask = state->msrs[NVMM_X64_MSR_SFMASK]; 1914 vmcb->state.kernelgsbase = 1915 state->msrs[NVMM_X64_MSR_KERNELGSBASE]; 1916 vmcb->state.sysenter_cs = 1917 state->msrs[NVMM_X64_MSR_SYSENTER_CS]; 1918 vmcb->state.sysenter_esp = 1919 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]; 1920 vmcb->state.sysenter_eip = 1921 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]; 1922 vmcb->state.g_pat = state->msrs[NVMM_X64_MSR_PAT]; 1923 1924 cpudata->gtsc = state->msrs[NVMM_X64_MSR_TSC]; 1925 cpudata->gtsc_want_update = true; 1926 } 1927 1928 if (flags & NVMM_X64_STATE_INTR) { 1929 if (state->intr.int_shadow) { 1930 vmcb->ctrl.intr |= VMCB_CTRL_INTR_SHADOW; 1931 } else { 1932 vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW; 1933 } 1934 1935 if (state->intr.int_window_exiting) { 1936 svm_event_waitexit_enable(vcpu, false); 1937 } else { 1938 svm_event_waitexit_disable(vcpu, false); 1939 } 1940 1941 if (state->intr.nmi_window_exiting) { 1942 svm_event_waitexit_enable(vcpu, true); 1943 } else { 1944 svm_event_waitexit_disable(vcpu, true); 1945 } 1946 } 1947 1948 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu)); 1949 if (flags & NVMM_X64_STATE_FPU) { 1950 memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu, 1951 sizeof(state->fpu)); 1952 1953 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave; 1954 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask; 1955 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask; 1956 1957 if (svm_xcr0_mask != 0) { 1958 /* Reset XSTATE_BV, to force a reload. */ 1959 cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask; 1960 } 1961 } 1962 1963 svm_vmcb_cache_update(vmcb, flags); 1964 1965 comm->state_wanted = 0; 1966 comm->state_cached |= flags; 1967} 1968 1969static void 1970svm_vcpu_getstate(struct nvmm_cpu *vcpu) 1971{ 1972 struct nvmm_comm_page *comm = vcpu->comm; 1973 struct nvmm_x64_state *state = &comm->state; 1974 struct svm_cpudata *cpudata = vcpu->cpudata; 1975 struct vmcb *vmcb = cpudata->vmcb; 1976 uint64_t flags; 1977 1978 flags = comm->state_wanted; 1979 1980 if (flags & NVMM_X64_STATE_SEGS) { 1981 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_CS], 1982 &vmcb->state.cs); 1983 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_DS], 1984 &vmcb->state.ds); 1985 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_ES], 1986 &vmcb->state.es); 1987 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_FS], 1988 &vmcb->state.fs); 1989 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GS], 1990 &vmcb->state.gs); 1991 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_SS], 1992 &vmcb->state.ss); 1993 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GDT], 1994 &vmcb->state.gdt); 1995 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_IDT], 1996 &vmcb->state.idt); 1997 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_LDT], 1998 &vmcb->state.ldt); 1999 svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_TR], 2000 &vmcb->state.tr); 2001 2002 state->segs[NVMM_X64_SEG_SS].attrib.dpl = vmcb->state.cpl; 2003 } 2004 2005 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs)); 2006 if (flags & NVMM_X64_STATE_GPRS) { 2007 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs)); 2008 2009 state->gprs[NVMM_X64_GPR_RIP] = vmcb->state.rip; 2010 state->gprs[NVMM_X64_GPR_RSP] = vmcb->state.rsp; 2011 state->gprs[NVMM_X64_GPR_RAX] = vmcb->state.rax; 2012 state->gprs[NVMM_X64_GPR_RFLAGS] = vmcb->state.rflags; 2013 } 2014 2015 if (flags & NVMM_X64_STATE_CRS) { 2016 state->crs[NVMM_X64_CR_CR0] = vmcb->state.cr0; 2017 state->crs[NVMM_X64_CR_CR2] = vmcb->state.cr2; 2018 state->crs[NVMM_X64_CR_CR3] = vmcb->state.cr3; 2019 state->crs[NVMM_X64_CR_CR4] = vmcb->state.cr4; 2020 state->crs[NVMM_X64_CR_CR8] = __SHIFTOUT(vmcb->ctrl.v, 2021 VMCB_CTRL_V_TPR); 2022 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0; 2023 } 2024 2025 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs)); 2026 if (flags & NVMM_X64_STATE_DRS) { 2027 memcpy(state->drs, cpudata->drs, sizeof(state->drs)); 2028 2029 state->drs[NVMM_X64_DR_DR6] = vmcb->state.dr6; 2030 state->drs[NVMM_X64_DR_DR7] = vmcb->state.dr7; 2031 } 2032 2033 if (flags & NVMM_X64_STATE_MSRS) { 2034 state->msrs[NVMM_X64_MSR_EFER] = vmcb->state.efer; 2035 state->msrs[NVMM_X64_MSR_STAR] = vmcb->state.star; 2036 state->msrs[NVMM_X64_MSR_LSTAR] = vmcb->state.lstar; 2037 state->msrs[NVMM_X64_MSR_CSTAR] = vmcb->state.cstar; 2038 state->msrs[NVMM_X64_MSR_SFMASK] = vmcb->state.sfmask; 2039 state->msrs[NVMM_X64_MSR_KERNELGSBASE] = 2040 vmcb->state.kernelgsbase; 2041 state->msrs[NVMM_X64_MSR_SYSENTER_CS] = 2042 vmcb->state.sysenter_cs; 2043 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = 2044 vmcb->state.sysenter_esp; 2045 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = 2046 vmcb->state.sysenter_eip; 2047 state->msrs[NVMM_X64_MSR_PAT] = vmcb->state.g_pat; 2048 state->msrs[NVMM_X64_MSR_TSC] = cpudata->gtsc; 2049 2050 /* Hide SVME. */ 2051 state->msrs[NVMM_X64_MSR_EFER] &= ~EFER_SVME; 2052 } 2053 2054 if (flags & NVMM_X64_STATE_INTR) { 2055 state->intr.int_shadow = 2056 (vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0; 2057 state->intr.int_window_exiting = cpudata->int_window_exit; 2058 state->intr.nmi_window_exiting = cpudata->nmi_window_exit; 2059 state->intr.evt_pending = cpudata->evt_pending; 2060 } 2061 2062 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu)); 2063 if (flags & NVMM_X64_STATE_FPU) { 2064 memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave, 2065 sizeof(state->fpu)); 2066 } 2067 2068 comm->state_wanted = 0; 2069 comm->state_cached |= flags; 2070} 2071 2072static void 2073svm_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags) 2074{ 2075 vcpu->comm->state_wanted = flags; 2076 svm_vcpu_getstate(vcpu); 2077} 2078 2079static void 2080svm_vcpu_state_commit(struct nvmm_cpu *vcpu) 2081{ 2082 vcpu->comm->state_wanted = vcpu->comm->state_commit; 2083 vcpu->comm->state_commit = 0; 2084 svm_vcpu_setstate(vcpu); 2085} 2086 2087/* -------------------------------------------------------------------------- */ 2088 2089static void 2090svm_asid_alloc(struct nvmm_cpu *vcpu) 2091{ 2092 struct svm_cpudata *cpudata = vcpu->cpudata; 2093 struct vmcb *vmcb = cpudata->vmcb; 2094 size_t i, oct, bit; 2095 2096 mutex_enter(&svm_asidlock); 2097 2098 for (i = 0; i < svm_maxasid; i++) { 2099 oct = i / 8; 2100 bit = i % 8; 2101 2102 if (svm_asidmap[oct] & __BIT(bit)) { 2103 continue; 2104 } 2105 2106 svm_asidmap[oct] |= __BIT(bit); 2107 vmcb->ctrl.guest_asid = i; 2108 mutex_exit(&svm_asidlock); 2109 return; 2110 } 2111 2112 /* 2113 * No free ASID. Use the last one, which is shared and requires 2114 * special TLB handling. 2115 */ 2116 cpudata->shared_asid = true; 2117 vmcb->ctrl.guest_asid = svm_maxasid - 1; 2118 mutex_exit(&svm_asidlock); 2119} 2120 2121static void 2122svm_asid_free(struct nvmm_cpu *vcpu) 2123{ 2124 struct svm_cpudata *cpudata = vcpu->cpudata; 2125 struct vmcb *vmcb = cpudata->vmcb; 2126 size_t oct, bit; 2127 2128 if (cpudata->shared_asid) { 2129 return; 2130 } 2131 2132 oct = vmcb->ctrl.guest_asid / 8; 2133 bit = vmcb->ctrl.guest_asid % 8; 2134 2135 mutex_enter(&svm_asidlock); 2136 svm_asidmap[oct] &= ~__BIT(bit); 2137 mutex_exit(&svm_asidlock); 2138} 2139 2140static void 2141svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 2142{ 2143 struct svm_cpudata *cpudata = vcpu->cpudata; 2144 struct vmcb *vmcb = cpudata->vmcb; 2145 2146 /* Allow reads/writes of Control Registers. */ 2147 vmcb->ctrl.intercept_cr = 0; 2148 2149 /* Allow reads/writes of Debug Registers. */ 2150 vmcb->ctrl.intercept_dr = 0; 2151 2152 /* Allow exceptions 0 to 31. */ 2153 vmcb->ctrl.intercept_vec = 0; 2154 2155 /* 2156 * Allow: 2157 * - SMI [smm interrupts] 2158 * - VINTR [virtual interrupts] 2159 * - CR0_SPEC [CR0 writes changing other fields than CR0.TS or CR0.MP] 2160 * - RIDTR [reads of IDTR] 2161 * - RGDTR [reads of GDTR] 2162 * - RLDTR [reads of LDTR] 2163 * - RTR [reads of TR] 2164 * - WIDTR [writes of IDTR] 2165 * - WGDTR [writes of GDTR] 2166 * - WLDTR [writes of LDTR] 2167 * - WTR [writes of TR] 2168 * - RDTSC [rdtsc instruction] 2169 * - PUSHF [pushf instruction] 2170 * - POPF [popf instruction] 2171 * - IRET [iret instruction] 2172 * - INTN [int $n instructions] 2173 * - PAUSE [pause instruction] 2174 * - INVLPG [invplg instruction] 2175 * - TASKSW [task switches] 2176 * 2177 * Intercept the rest below. 2178 */ 2179 vmcb->ctrl.intercept_misc1 = 2180 VMCB_CTRL_INTERCEPT_INTR | 2181 VMCB_CTRL_INTERCEPT_NMI | 2182 VMCB_CTRL_INTERCEPT_INIT | 2183 VMCB_CTRL_INTERCEPT_RDPMC | 2184 VMCB_CTRL_INTERCEPT_CPUID | 2185 VMCB_CTRL_INTERCEPT_RSM | 2186 VMCB_CTRL_INTERCEPT_INVD | 2187 VMCB_CTRL_INTERCEPT_HLT | 2188 VMCB_CTRL_INTERCEPT_INVLPGA | 2189 VMCB_CTRL_INTERCEPT_IOIO_PROT | 2190 VMCB_CTRL_INTERCEPT_MSR_PROT | 2191 VMCB_CTRL_INTERCEPT_FERR_FREEZE | 2192 VMCB_CTRL_INTERCEPT_SHUTDOWN; 2193 2194 /* 2195 * Allow: 2196 * - ICEBP [icebp instruction] 2197 * - WBINVD [wbinvd instruction] 2198 * - WCR_SPEC(0..15) [writes of CR0-15, received after instruction] 2199 * 2200 * Intercept the rest below. 2201 */ 2202 vmcb->ctrl.intercept_misc2 = 2203 VMCB_CTRL_INTERCEPT_VMRUN | 2204 VMCB_CTRL_INTERCEPT_VMMCALL | 2205 VMCB_CTRL_INTERCEPT_VMLOAD | 2206 VMCB_CTRL_INTERCEPT_VMSAVE | 2207 VMCB_CTRL_INTERCEPT_STGI | 2208 VMCB_CTRL_INTERCEPT_CLGI | 2209 VMCB_CTRL_INTERCEPT_SKINIT | 2210 VMCB_CTRL_INTERCEPT_RDTSCP | 2211 VMCB_CTRL_INTERCEPT_MONITOR | 2212 VMCB_CTRL_INTERCEPT_MWAIT | 2213 VMCB_CTRL_INTERCEPT_XSETBV | 2214 VMCB_CTRL_INTERCEPT_RDPRU; 2215 2216 /* 2217 * Intercept everything. 2218 */ 2219 vmcb->ctrl.intercept_misc3 = 2220 VMCB_CTRL_INTERCEPT_INVLPGB_ALL | 2221 VMCB_CTRL_INTERCEPT_PCID | 2222 VMCB_CTRL_INTERCEPT_MCOMMIT | 2223 VMCB_CTRL_INTERCEPT_TLBSYNC; 2224 2225 /* Intercept all I/O accesses. */ 2226 memset(cpudata->iobm, 0xFF, IOBM_SIZE); 2227 vmcb->ctrl.iopm_base_pa = cpudata->iobm_pa; 2228 2229 /* Allow direct access to certain MSRs. */ 2230 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE); 2231 svm_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true); 2232 svm_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true); 2233 svm_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true); 2234 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true); 2235 svm_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true); 2236 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true); 2237 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true); 2238 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true); 2239 svm_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true); 2240 svm_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true); 2241 svm_vcpu_msr_allow(cpudata->msrbm, MSR_CR_PAT, true, true); 2242 svm_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false); 2243 vmcb->ctrl.msrpm_base_pa = cpudata->msrbm_pa; 2244 2245 /* Generate ASID. */ 2246 svm_asid_alloc(vcpu); 2247 2248 /* Virtual TPR. */ 2249 vmcb->ctrl.v = VMCB_CTRL_V_INTR_MASKING; 2250 2251 /* Enable Nested Paging. */ 2252 vmcb->ctrl.enable1 = VMCB_CTRL_ENABLE_NP; 2253 vmcb->ctrl.n_cr3 = mach->vm->vm_map.pmap->pm_pdirpa[0]; 2254 2255 /* Init XSAVE header. */ 2256 cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask; 2257 cpudata->gfpu.xsh_xcomp_bv = 0; 2258 2259 /* These MSRs are static. */ 2260 cpudata->star = rdmsr(MSR_STAR); 2261 cpudata->lstar = rdmsr(MSR_LSTAR); 2262 cpudata->cstar = rdmsr(MSR_CSTAR); 2263 cpudata->sfmask = rdmsr(MSR_SFMASK); 2264 2265 /* Install the RESET state. */ 2266 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state, 2267 sizeof(nvmm_x86_reset_state)); 2268 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL; 2269 vcpu->comm->state_cached = 0; 2270 svm_vcpu_setstate(vcpu); 2271} 2272 2273static int 2274svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 2275{ 2276 struct svm_cpudata *cpudata; 2277 int error; 2278 2279 /* Allocate the SVM cpudata. */ 2280 cpudata = (struct svm_cpudata *)uvm_km_alloc(kernel_map, 2281 roundup(sizeof(*cpudata), PAGE_SIZE), 0, 2282 UVM_KMF_WIRED|UVM_KMF_ZERO); 2283 vcpu->cpudata = cpudata; 2284 2285 /* VMCB */ 2286 error = svm_memalloc(&cpudata->vmcb_pa, (vaddr_t *)&cpudata->vmcb, 2287 VMCB_NPAGES); 2288 if (error) 2289 goto error; 2290 2291 /* I/O Bitmap */ 2292 error = svm_memalloc(&cpudata->iobm_pa, (vaddr_t *)&cpudata->iobm, 2293 IOBM_NPAGES); 2294 if (error) 2295 goto error; 2296 2297 /* MSR Bitmap */ 2298 error = svm_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm, 2299 MSRBM_NPAGES); 2300 if (error) 2301 goto error; 2302 2303 /* Init the VCPU info. */ 2304 svm_vcpu_init(mach, vcpu); 2305 2306 return 0; 2307 2308error: 2309 if (cpudata->vmcb_pa) { 2310 svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, 2311 VMCB_NPAGES); 2312 } 2313 if (cpudata->iobm_pa) { 2314 svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, 2315 IOBM_NPAGES); 2316 } 2317 if (cpudata->msrbm_pa) { 2318 svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, 2319 MSRBM_NPAGES); 2320 } 2321 uvm_km_free(kernel_map, (vaddr_t)cpudata, 2322 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED); 2323 return error; 2324} 2325 2326static void 2327svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 2328{ 2329 struct svm_cpudata *cpudata = vcpu->cpudata; 2330 2331 svm_asid_free(vcpu); 2332 2333 svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, VMCB_NPAGES); 2334 svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, IOBM_NPAGES); 2335 svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES); 2336 2337 uvm_km_free(kernel_map, (vaddr_t)cpudata, 2338 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED); 2339} 2340 2341/* -------------------------------------------------------------------------- */ 2342 2343static int 2344svm_vcpu_configure_cpuid(struct svm_cpudata *cpudata, void *data) 2345{ 2346 struct nvmm_vcpu_conf_cpuid *cpuid = data; 2347 size_t i; 2348 2349 if (__predict_false(cpuid->mask && cpuid->exit)) { 2350 return EINVAL; 2351 } 2352 if (__predict_false(cpuid->mask && 2353 ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) || 2354 (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) || 2355 (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) || 2356 (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) { 2357 return EINVAL; 2358 } 2359 2360 /* If unset, delete, to restore the default behavior. */ 2361 if (!cpuid->mask && !cpuid->exit) { 2362 for (i = 0; i < SVM_NCPUIDS; i++) { 2363 if (!cpudata->cpuidpresent[i]) { 2364 continue; 2365 } 2366 if (cpudata->cpuid[i].leaf == cpuid->leaf) { 2367 cpudata->cpuidpresent[i] = false; 2368 } 2369 } 2370 return 0; 2371 } 2372 2373 /* If already here, replace. */ 2374 for (i = 0; i < SVM_NCPUIDS; i++) { 2375 if (!cpudata->cpuidpresent[i]) { 2376 continue; 2377 } 2378 if (cpudata->cpuid[i].leaf == cpuid->leaf) { 2379 memcpy(&cpudata->cpuid[i], cpuid, 2380 sizeof(struct nvmm_vcpu_conf_cpuid)); 2381 return 0; 2382 } 2383 } 2384 2385 /* Not here, insert. */ 2386 for (i = 0; i < SVM_NCPUIDS; i++) { 2387 if (!cpudata->cpuidpresent[i]) { 2388 cpudata->cpuidpresent[i] = true; 2389 memcpy(&cpudata->cpuid[i], cpuid, 2390 sizeof(struct nvmm_vcpu_conf_cpuid)); 2391 return 0; 2392 } 2393 } 2394 2395 return ENOBUFS; 2396} 2397 2398static int 2399svm_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data) 2400{ 2401 struct svm_cpudata *cpudata = vcpu->cpudata; 2402 2403 switch (op) { 2404 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID): 2405 return svm_vcpu_configure_cpuid(cpudata, data); 2406 default: 2407 return EINVAL; 2408 } 2409} 2410 2411/* -------------------------------------------------------------------------- */ 2412 2413static void 2414svm_tlb_flush(struct pmap *pm) 2415{ 2416 struct nvmm_machine *mach = pm->pm_data; 2417 struct svm_machdata *machdata = mach->machdata; 2418 2419 atomic_inc_64(&machdata->mach_htlb_gen); 2420 2421 /* Generates IPIs, which cause #VMEXITs. */ 2422 pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM); 2423} 2424 2425static void 2426svm_machine_create(struct nvmm_machine *mach) 2427{ 2428 struct svm_machdata *machdata; 2429 2430 /* Fill in pmap info. */ 2431 mach->vm->vm_map.pmap->pm_data = (void *)mach; 2432 mach->vm->vm_map.pmap->pm_tlb_flush = svm_tlb_flush; 2433 2434 machdata = kmem_zalloc(sizeof(struct svm_machdata), KM_SLEEP); 2435 mach->machdata = machdata; 2436 2437 /* Start with an hTLB flush everywhere. */ 2438 machdata->mach_htlb_gen = 1; 2439} 2440 2441static void 2442svm_machine_destroy(struct nvmm_machine *mach) 2443{ 2444 kmem_free(mach->machdata, sizeof(struct svm_machdata)); 2445} 2446 2447static int 2448svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data) 2449{ 2450 panic("%s: impossible", __func__); 2451} 2452 2453/* -------------------------------------------------------------------------- */ 2454 2455static bool 2456svm_ident(void) 2457{ 2458 u_int descs[4]; 2459 uint64_t msr; 2460 2461 if (cpu_vendor != CPUVENDOR_AMD) { 2462 return false; 2463 } 2464 if (!(cpu_feature[3] & CPUID_SVM)) { 2465 printf("NVMM: SVM not supported\n"); 2466 return false; 2467 } 2468 2469 if (curcpu()->ci_max_ext_cpuid < 0x8000000a) { 2470 printf("NVMM: CPUID leaf not available\n"); 2471 return false; 2472 } 2473 x86_cpuid(0x8000000a, descs); 2474 2475 /* Expect revision 1. */ 2476 if (__SHIFTOUT(descs[0], CPUID_AMD_SVM_REV) != 1) { 2477 printf("NVMM: SVM revision not supported\n"); 2478 return false; 2479 } 2480 2481 /* Want Nested Paging. */ 2482 if (!(descs[3] & CPUID_AMD_SVM_NP)) { 2483 printf("NVMM: SVM-NP not supported\n"); 2484 return false; 2485 } 2486 2487 /* Want nRIP. */ 2488 if (!(descs[3] & CPUID_AMD_SVM_NRIPS)) { 2489 printf("NVMM: SVM-NRIPS not supported\n"); 2490 return false; 2491 } 2492 2493 svm_decode_assist = (descs[3] & CPUID_AMD_SVM_DecodeAssist) != 0; 2494 2495 msr = rdmsr(MSR_VMCR); 2496 if ((msr & VMCR_SVMED) && (msr & VMCR_LOCK)) { 2497 printf("NVMM: SVM disabled in BIOS\n"); 2498 return false; 2499 } 2500 2501 return true; 2502} 2503 2504static void 2505svm_init_asid(uint32_t maxasid) 2506{ 2507 size_t i, j, allocsz; 2508 2509 mutex_init(&svm_asidlock, MUTEX_DEFAULT, IPL_NONE); 2510 2511 /* Arbitrarily limit. */ 2512 maxasid = uimin(maxasid, 8192); 2513 2514 svm_maxasid = maxasid; 2515 allocsz = roundup(maxasid, 8) / 8; 2516 svm_asidmap = kmem_zalloc(allocsz, KM_SLEEP); 2517 2518 /* ASID 0 is reserved for the host. */ 2519 svm_asidmap[0] |= __BIT(0); 2520 2521 /* ASID n-1 is special, we share it. */ 2522 i = (maxasid - 1) / 8; 2523 j = (maxasid - 1) % 8; 2524 svm_asidmap[i] |= __BIT(j); 2525} 2526 2527static void 2528svm_change_cpu(void *arg1, void *arg2) 2529{ 2530 bool enable = arg1 != NULL; 2531 uint64_t msr; 2532 2533 msr = rdmsr(MSR_VMCR); 2534 if (msr & VMCR_SVMED) { 2535 wrmsr(MSR_VMCR, msr & ~VMCR_SVMED); 2536 } 2537 2538 if (!enable) { 2539 wrmsr(MSR_VM_HSAVE_PA, 0); 2540 } 2541 2542 msr = rdmsr(MSR_EFER); 2543 if (enable) { 2544 msr |= EFER_SVME; 2545 } else { 2546 msr &= ~EFER_SVME; 2547 } 2548 wrmsr(MSR_EFER, msr); 2549 2550 if (enable) { 2551 wrmsr(MSR_VM_HSAVE_PA, hsave[cpu_index(curcpu())].pa); 2552 } 2553} 2554 2555static void 2556svm_init(void) 2557{ 2558 CPU_INFO_ITERATOR cii; 2559 struct cpu_info *ci; 2560 struct vm_page *pg; 2561 u_int descs[4]; 2562 uint64_t xc; 2563 2564 x86_cpuid(0x8000000a, descs); 2565 2566 /* The guest TLB flush command. */ 2567 if (descs[3] & CPUID_AMD_SVM_FlushByASID) { 2568 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST; 2569 } else { 2570 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL; 2571 } 2572 2573 /* Init the ASID. */ 2574 svm_init_asid(descs[1]); 2575 2576 /* Init the XCR0 mask. */ 2577 svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features; 2578 2579 /* Init the max basic CPUID leaf. */ 2580 svm_cpuid_max_basic = uimin(cpuid_level, SVM_CPUID_MAX_BASIC); 2581 2582 /* Init the max extended CPUID leaf. */ 2583 x86_cpuid(0x80000000, descs); 2584 svm_cpuid_max_extended = uimin(descs[0], SVM_CPUID_MAX_EXTENDED); 2585 2586 memset(hsave, 0, sizeof(hsave)); 2587 for (CPU_INFO_FOREACH(cii, ci)) { 2588 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); 2589 hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg); 2590 } 2591 2592 xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL); 2593 xc_wait(xc); 2594} 2595 2596static void 2597svm_fini_asid(void) 2598{ 2599 size_t allocsz; 2600 2601 allocsz = roundup(svm_maxasid, 8) / 8; 2602 kmem_free(svm_asidmap, allocsz); 2603 2604 mutex_destroy(&svm_asidlock); 2605} 2606 2607static void 2608svm_fini(void) 2609{ 2610 uint64_t xc; 2611 size_t i; 2612 2613 xc = xc_broadcast(0, svm_change_cpu, (void *)false, NULL); 2614 xc_wait(xc); 2615 2616 for (i = 0; i < MAXCPUS; i++) { 2617 if (hsave[i].pa != 0) 2618 uvm_pagefree(PHYS_TO_VM_PAGE(hsave[i].pa)); 2619 } 2620 2621 svm_fini_asid(); 2622} 2623 2624static void 2625svm_capability(struct nvmm_capability *cap) 2626{ 2627 cap->arch.mach_conf_support = 0; 2628 cap->arch.vcpu_conf_support = 2629 NVMM_CAP_ARCH_VCPU_CONF_CPUID; 2630 cap->arch.xcr0_mask = svm_xcr0_mask; 2631 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask; 2632 cap->arch.conf_cpuid_maxops = SVM_NCPUIDS; 2633} 2634 2635const struct nvmm_impl nvmm_x86_svm = { 2636 .name = "x86-svm", 2637 .ident = svm_ident, 2638 .init = svm_init, 2639 .fini = svm_fini, 2640 .capability = svm_capability, 2641 .mach_conf_max = NVMM_X86_MACH_NCONF, 2642 .mach_conf_sizes = NULL, 2643 .vcpu_conf_max = NVMM_X86_VCPU_NCONF, 2644 .vcpu_conf_sizes = svm_vcpu_conf_sizes, 2645 .state_size = sizeof(struct nvmm_x64_state), 2646 .machine_create = svm_machine_create, 2647 .machine_destroy = svm_machine_destroy, 2648 .machine_configure = svm_machine_configure, 2649 .vcpu_create = svm_vcpu_create, 2650 .vcpu_destroy = svm_vcpu_destroy, 2651 .vcpu_configure = svm_vcpu_configure, 2652 .vcpu_setstate = svm_vcpu_setstate, 2653 .vcpu_getstate = svm_vcpu_getstate, 2654 .vcpu_inject = svm_vcpu_inject, 2655 .vcpu_run = svm_vcpu_run 2656}; 2657