nvmm_x86_svm.c revision 1.7
1/* $NetBSD: nvmm_x86_svm.c,v 1.7 2018/12/13 16:28:10 maxv Exp $ */ 2 3/* 4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Maxime Villard. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32#include <sys/cdefs.h> 33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.7 2018/12/13 16:28:10 maxv Exp $"); 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/kernel.h> 38#include <sys/kmem.h> 39#include <sys/cpu.h> 40#include <sys/xcall.h> 41 42#include <uvm/uvm.h> 43#include <uvm/uvm_page.h> 44 45#include <x86/cputypes.h> 46#include <x86/specialreg.h> 47#include <x86/pmap.h> 48#include <x86/dbregs.h> 49#include <machine/cpuvar.h> 50 51#include <dev/nvmm/nvmm.h> 52#include <dev/nvmm/nvmm_internal.h> 53#include <dev/nvmm/x86/nvmm_x86.h> 54 55int svm_vmrun(paddr_t, uint64_t *); 56 57#define MSR_VM_HSAVE_PA 0xC0010117 58 59/* -------------------------------------------------------------------------- */ 60 61#define VMCB_EXITCODE_CR0_READ 0x0000 62#define VMCB_EXITCODE_CR1_READ 0x0001 63#define VMCB_EXITCODE_CR2_READ 0x0002 64#define VMCB_EXITCODE_CR3_READ 0x0003 65#define VMCB_EXITCODE_CR4_READ 0x0004 66#define VMCB_EXITCODE_CR5_READ 0x0005 67#define VMCB_EXITCODE_CR6_READ 0x0006 68#define VMCB_EXITCODE_CR7_READ 0x0007 69#define VMCB_EXITCODE_CR8_READ 0x0008 70#define VMCB_EXITCODE_CR9_READ 0x0009 71#define VMCB_EXITCODE_CR10_READ 0x000A 72#define VMCB_EXITCODE_CR11_READ 0x000B 73#define VMCB_EXITCODE_CR12_READ 0x000C 74#define VMCB_EXITCODE_CR13_READ 0x000D 75#define VMCB_EXITCODE_CR14_READ 0x000E 76#define VMCB_EXITCODE_CR15_READ 0x000F 77#define VMCB_EXITCODE_CR0_WRITE 0x0010 78#define VMCB_EXITCODE_CR1_WRITE 0x0011 79#define VMCB_EXITCODE_CR2_WRITE 0x0012 80#define VMCB_EXITCODE_CR3_WRITE 0x0013 81#define VMCB_EXITCODE_CR4_WRITE 0x0014 82#define VMCB_EXITCODE_CR5_WRITE 0x0015 83#define VMCB_EXITCODE_CR6_WRITE 0x0016 84#define VMCB_EXITCODE_CR7_WRITE 0x0017 85#define VMCB_EXITCODE_CR8_WRITE 0x0018 86#define VMCB_EXITCODE_CR9_WRITE 0x0019 87#define VMCB_EXITCODE_CR10_WRITE 0x001A 88#define VMCB_EXITCODE_CR11_WRITE 0x001B 89#define VMCB_EXITCODE_CR12_WRITE 0x001C 90#define VMCB_EXITCODE_CR13_WRITE 0x001D 91#define VMCB_EXITCODE_CR14_WRITE 0x001E 92#define VMCB_EXITCODE_CR15_WRITE 0x001F 93#define VMCB_EXITCODE_DR0_READ 0x0020 94#define VMCB_EXITCODE_DR1_READ 0x0021 95#define VMCB_EXITCODE_DR2_READ 0x0022 96#define VMCB_EXITCODE_DR3_READ 0x0023 97#define VMCB_EXITCODE_DR4_READ 0x0024 98#define VMCB_EXITCODE_DR5_READ 0x0025 99#define VMCB_EXITCODE_DR6_READ 0x0026 100#define VMCB_EXITCODE_DR7_READ 0x0027 101#define VMCB_EXITCODE_DR8_READ 0x0028 102#define VMCB_EXITCODE_DR9_READ 0x0029 103#define VMCB_EXITCODE_DR10_READ 0x002A 104#define VMCB_EXITCODE_DR11_READ 0x002B 105#define VMCB_EXITCODE_DR12_READ 0x002C 106#define VMCB_EXITCODE_DR13_READ 0x002D 107#define VMCB_EXITCODE_DR14_READ 0x002E 108#define VMCB_EXITCODE_DR15_READ 0x002F 109#define VMCB_EXITCODE_DR0_WRITE 0x0030 110#define VMCB_EXITCODE_DR1_WRITE 0x0031 111#define VMCB_EXITCODE_DR2_WRITE 0x0032 112#define VMCB_EXITCODE_DR3_WRITE 0x0033 113#define VMCB_EXITCODE_DR4_WRITE 0x0034 114#define VMCB_EXITCODE_DR5_WRITE 0x0035 115#define VMCB_EXITCODE_DR6_WRITE 0x0036 116#define VMCB_EXITCODE_DR7_WRITE 0x0037 117#define VMCB_EXITCODE_DR8_WRITE 0x0038 118#define VMCB_EXITCODE_DR9_WRITE 0x0039 119#define VMCB_EXITCODE_DR10_WRITE 0x003A 120#define VMCB_EXITCODE_DR11_WRITE 0x003B 121#define VMCB_EXITCODE_DR12_WRITE 0x003C 122#define VMCB_EXITCODE_DR13_WRITE 0x003D 123#define VMCB_EXITCODE_DR14_WRITE 0x003E 124#define VMCB_EXITCODE_DR15_WRITE 0x003F 125#define VMCB_EXITCODE_EXCP0 0x0040 126#define VMCB_EXITCODE_EXCP1 0x0041 127#define VMCB_EXITCODE_EXCP2 0x0042 128#define VMCB_EXITCODE_EXCP3 0x0043 129#define VMCB_EXITCODE_EXCP4 0x0044 130#define VMCB_EXITCODE_EXCP5 0x0045 131#define VMCB_EXITCODE_EXCP6 0x0046 132#define VMCB_EXITCODE_EXCP7 0x0047 133#define VMCB_EXITCODE_EXCP8 0x0048 134#define VMCB_EXITCODE_EXCP9 0x0049 135#define VMCB_EXITCODE_EXCP10 0x004A 136#define VMCB_EXITCODE_EXCP11 0x004B 137#define VMCB_EXITCODE_EXCP12 0x004C 138#define VMCB_EXITCODE_EXCP13 0x004D 139#define VMCB_EXITCODE_EXCP14 0x004E 140#define VMCB_EXITCODE_EXCP15 0x004F 141#define VMCB_EXITCODE_EXCP16 0x0050 142#define VMCB_EXITCODE_EXCP17 0x0051 143#define VMCB_EXITCODE_EXCP18 0x0052 144#define VMCB_EXITCODE_EXCP19 0x0053 145#define VMCB_EXITCODE_EXCP20 0x0054 146#define VMCB_EXITCODE_EXCP21 0x0055 147#define VMCB_EXITCODE_EXCP22 0x0056 148#define VMCB_EXITCODE_EXCP23 0x0057 149#define VMCB_EXITCODE_EXCP24 0x0058 150#define VMCB_EXITCODE_EXCP25 0x0059 151#define VMCB_EXITCODE_EXCP26 0x005A 152#define VMCB_EXITCODE_EXCP27 0x005B 153#define VMCB_EXITCODE_EXCP28 0x005C 154#define VMCB_EXITCODE_EXCP29 0x005D 155#define VMCB_EXITCODE_EXCP30 0x005E 156#define VMCB_EXITCODE_EXCP31 0x005F 157#define VMCB_EXITCODE_INTR 0x0060 158#define VMCB_EXITCODE_NMI 0x0061 159#define VMCB_EXITCODE_SMI 0x0062 160#define VMCB_EXITCODE_INIT 0x0063 161#define VMCB_EXITCODE_VINTR 0x0064 162#define VMCB_EXITCODE_CR0_SEL_WRITE 0x0065 163#define VMCB_EXITCODE_IDTR_READ 0x0066 164#define VMCB_EXITCODE_GDTR_READ 0x0067 165#define VMCB_EXITCODE_LDTR_READ 0x0068 166#define VMCB_EXITCODE_TR_READ 0x0069 167#define VMCB_EXITCODE_IDTR_WRITE 0x006A 168#define VMCB_EXITCODE_GDTR_WRITE 0x006B 169#define VMCB_EXITCODE_LDTR_WRITE 0x006C 170#define VMCB_EXITCODE_TR_WRITE 0x006D 171#define VMCB_EXITCODE_RDTSC 0x006E 172#define VMCB_EXITCODE_RDPMC 0x006F 173#define VMCB_EXITCODE_PUSHF 0x0070 174#define VMCB_EXITCODE_POPF 0x0071 175#define VMCB_EXITCODE_CPUID 0x0072 176#define VMCB_EXITCODE_RSM 0x0073 177#define VMCB_EXITCODE_IRET 0x0074 178#define VMCB_EXITCODE_SWINT 0x0075 179#define VMCB_EXITCODE_INVD 0x0076 180#define VMCB_EXITCODE_PAUSE 0x0077 181#define VMCB_EXITCODE_HLT 0x0078 182#define VMCB_EXITCODE_INVLPG 0x0079 183#define VMCB_EXITCODE_INVLPGA 0x007A 184#define VMCB_EXITCODE_IOIO 0x007B 185#define VMCB_EXITCODE_MSR 0x007C 186#define VMCB_EXITCODE_TASK_SWITCH 0x007D 187#define VMCB_EXITCODE_FERR_FREEZE 0x007E 188#define VMCB_EXITCODE_SHUTDOWN 0x007F 189#define VMCB_EXITCODE_VMRUN 0x0080 190#define VMCB_EXITCODE_VMMCALL 0x0081 191#define VMCB_EXITCODE_VMLOAD 0x0082 192#define VMCB_EXITCODE_VMSAVE 0x0083 193#define VMCB_EXITCODE_STGI 0x0084 194#define VMCB_EXITCODE_CLGI 0x0085 195#define VMCB_EXITCODE_SKINIT 0x0086 196#define VMCB_EXITCODE_RDTSCP 0x0087 197#define VMCB_EXITCODE_ICEBP 0x0088 198#define VMCB_EXITCODE_WBINVD 0x0089 199#define VMCB_EXITCODE_MONITOR 0x008A 200#define VMCB_EXITCODE_MWAIT 0x008B 201#define VMCB_EXITCODE_MWAIT_CONDITIONAL 0x008C 202#define VMCB_EXITCODE_XSETBV 0x008D 203#define VMCB_EXITCODE_EFER_WRITE_TRAP 0x008F 204#define VMCB_EXITCODE_CR0_WRITE_TRAP 0x0090 205#define VMCB_EXITCODE_CR1_WRITE_TRAP 0x0091 206#define VMCB_EXITCODE_CR2_WRITE_TRAP 0x0092 207#define VMCB_EXITCODE_CR3_WRITE_TRAP 0x0093 208#define VMCB_EXITCODE_CR4_WRITE_TRAP 0x0094 209#define VMCB_EXITCODE_CR5_WRITE_TRAP 0x0095 210#define VMCB_EXITCODE_CR6_WRITE_TRAP 0x0096 211#define VMCB_EXITCODE_CR7_WRITE_TRAP 0x0097 212#define VMCB_EXITCODE_CR8_WRITE_TRAP 0x0098 213#define VMCB_EXITCODE_CR9_WRITE_TRAP 0x0099 214#define VMCB_EXITCODE_CR10_WRITE_TRAP 0x009A 215#define VMCB_EXITCODE_CR11_WRITE_TRAP 0x009B 216#define VMCB_EXITCODE_CR12_WRITE_TRAP 0x009C 217#define VMCB_EXITCODE_CR13_WRITE_TRAP 0x009D 218#define VMCB_EXITCODE_CR14_WRITE_TRAP 0x009E 219#define VMCB_EXITCODE_CR15_WRITE_TRAP 0x009F 220#define VMCB_EXITCODE_NPF 0x0400 221#define VMCB_EXITCODE_AVIC_INCOMP_IPI 0x0401 222#define VMCB_EXITCODE_AVIC_NOACCEL 0x0402 223#define VMCB_EXITCODE_VMGEXIT 0x0403 224#define VMCB_EXITCODE_INVALID -1 225 226/* -------------------------------------------------------------------------- */ 227 228struct vmcb_ctrl { 229 uint32_t intercept_cr; 230#define VMCB_CTRL_INTERCEPT_RCR(x) __BIT( 0 + x) 231#define VMCB_CTRL_INTERCEPT_WCR(x) __BIT(16 + x) 232 233 uint32_t intercept_dr; 234#define VMCB_CTRL_INTERCEPT_RDR(x) __BIT( 0 + x) 235#define VMCB_CTRL_INTERCEPT_WDR(x) __BIT(16 + x) 236 237 uint32_t intercept_vec; 238#define VMCB_CTRL_INTERCEPT_VEC(x) __BIT(x) 239 240 uint32_t intercept_misc1; 241#define VMCB_CTRL_INTERCEPT_INTR __BIT(0) 242#define VMCB_CTRL_INTERCEPT_NMI __BIT(1) 243#define VMCB_CTRL_INTERCEPT_SMI __BIT(2) 244#define VMCB_CTRL_INTERCEPT_INIT __BIT(3) 245#define VMCB_CTRL_INTERCEPT_VINTR __BIT(4) 246#define VMCB_CTRL_INTERCEPT_CR0_SPEC __BIT(5) 247#define VMCB_CTRL_INTERCEPT_RIDTR __BIT(6) 248#define VMCB_CTRL_INTERCEPT_RGDTR __BIT(7) 249#define VMCB_CTRL_INTERCEPT_RLDTR __BIT(8) 250#define VMCB_CTRL_INTERCEPT_RTR __BIT(9) 251#define VMCB_CTRL_INTERCEPT_WIDTR __BIT(10) 252#define VMCB_CTRL_INTERCEPT_WGDTR __BIT(11) 253#define VMCB_CTRL_INTERCEPT_WLDTR __BIT(12) 254#define VMCB_CTRL_INTERCEPT_WTR __BIT(13) 255#define VMCB_CTRL_INTERCEPT_RDTSC __BIT(14) 256#define VMCB_CTRL_INTERCEPT_RDPMC __BIT(15) 257#define VMCB_CTRL_INTERCEPT_PUSHF __BIT(16) 258#define VMCB_CTRL_INTERCEPT_POPF __BIT(17) 259#define VMCB_CTRL_INTERCEPT_CPUID __BIT(18) 260#define VMCB_CTRL_INTERCEPT_RSM __BIT(19) 261#define VMCB_CTRL_INTERCEPT_IRET __BIT(20) 262#define VMCB_CTRL_INTERCEPT_INTN __BIT(21) 263#define VMCB_CTRL_INTERCEPT_INVD __BIT(22) 264#define VMCB_CTRL_INTERCEPT_PAUSE __BIT(23) 265#define VMCB_CTRL_INTERCEPT_HLT __BIT(24) 266#define VMCB_CTRL_INTERCEPT_INVLPG __BIT(25) 267#define VMCB_CTRL_INTERCEPT_INVLPGA __BIT(26) 268#define VMCB_CTRL_INTERCEPT_IOIO_PROT __BIT(27) 269#define VMCB_CTRL_INTERCEPT_MSR_PROT __BIT(28) 270#define VMCB_CTRL_INTERCEPT_TASKSW __BIT(29) 271#define VMCB_CTRL_INTERCEPT_FERR_FREEZE __BIT(30) 272#define VMCB_CTRL_INTERCEPT_SHUTDOWN __BIT(31) 273 274 uint32_t intercept_misc2; 275#define VMCB_CTRL_INTERCEPT_VMRUN __BIT(0) 276#define VMCB_CTRL_INTERCEPT_VMMCALL __BIT(1) 277#define VMCB_CTRL_INTERCEPT_VMLOAD __BIT(2) 278#define VMCB_CTRL_INTERCEPT_VMSAVE __BIT(3) 279#define VMCB_CTRL_INTERCEPT_STGI __BIT(4) 280#define VMCB_CTRL_INTERCEPT_CLGI __BIT(5) 281#define VMCB_CTRL_INTERCEPT_SKINIT __BIT(6) 282#define VMCB_CTRL_INTERCEPT_RDTSCP __BIT(7) 283#define VMCB_CTRL_INTERCEPT_ICEBP __BIT(8) 284#define VMCB_CTRL_INTERCEPT_WBINVD __BIT(9) 285#define VMCB_CTRL_INTERCEPT_MONITOR __BIT(10) 286#define VMCB_CTRL_INTERCEPT_MWAIT __BIT(12) 287#define VMCB_CTRL_INTERCEPT_XSETBV __BIT(13) 288#define VMCB_CTRL_INTERCEPT_EFER_SPEC __BIT(15) 289#define VMCB_CTRL_INTERCEPT_WCR_SPEC(x) __BIT(16 + x) 290 291 uint8_t rsvd1[40]; 292 uint16_t pause_filt_thresh; 293 uint16_t pause_filt_cnt; 294 uint64_t iopm_base_pa; 295 uint64_t msrpm_base_pa; 296 uint64_t tsc_offset; 297 uint32_t guest_asid; 298 299 uint32_t tlb_ctrl; 300#define VMCB_CTRL_TLB_CTRL_FLUSH_ALL 0x01 301#define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST 0x03 302#define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST_NONGLOBAL 0x07 303 304 uint64_t v; 305#define VMCB_CTRL_V_TPR __BITS(7,0) 306#define VMCB_CTRL_V_IRQ __BIT(8) 307#define VMCB_CTRL_V_VGIF __BIT(9) 308#define VMCB_CTRL_V_INTR_PRIO __BITS(19,16) 309#define VMCB_CTRL_V_IGN_TPR __BIT(20) 310#define VMCB_CTRL_V_INTR_MASKING __BIT(24) 311#define VMCB_CTRL_V_GUEST_VGIF __BIT(25) 312#define VMCB_CTRL_V_AVIC_EN __BIT(31) 313#define VMCB_CTRL_V_INTR_VECTOR __BITS(39,32) 314 315 uint64_t intr; 316#define VMCB_CTRL_INTR_SHADOW __BIT(0) 317#define VMCB_CTRL_GUEST_INTR_MASK __BIT(1) 318 319 uint64_t exitcode; 320 uint64_t exitinfo1; 321 uint64_t exitinfo2; 322 323 uint64_t exitintinfo; 324#define VMCB_CTRL_EXITINTINFO_VECTOR __BITS(7,0) 325#define VMCB_CTRL_EXITINTINFO_TYPE __BITS(10,8) 326#define VMCB_CTRL_EXITINTINFO_EV __BIT(11) 327#define VMCB_CTRL_EXITINTINFO_V __BIT(31) 328#define VMCB_CTRL_EXITINTINFO_ERRORCODE __BITS(63,32) 329 330 uint64_t enable1; 331#define VMCB_CTRL_ENABLE_NP __BIT(0) 332#define VMCB_CTRL_ENABLE_SEV __BIT(1) 333#define VMCB_CTRL_ENABLE_ES_SEV __BIT(2) 334 335 uint64_t avic; 336#define VMCB_CTRL_AVIC_APIC_BAR __BITS(51,0) 337 338 uint64_t ghcb; 339 340 uint64_t eventinj; 341#define VMCB_CTRL_EVENTINJ_VECTOR __BITS(7,0) 342#define VMCB_CTRL_EVENTINJ_TYPE __BITS(10,8) 343#define VMCB_CTRL_EVENTINJ_EV __BIT(11) 344#define VMCB_CTRL_EVENTINJ_V __BIT(31) 345#define VMCB_CTRL_EVENTINJ_ERRORCODE __BITS(63,32) 346 347 uint64_t n_cr3; 348 349 uint64_t enable2; 350#define VMCB_CTRL_ENABLE_LBR __BIT(0) 351#define VMCB_CTRL_ENABLE_VVMSAVE __BIT(1) 352 353 uint32_t vmcb_clean; 354#define VMCB_CTRL_VMCB_CLEAN_I __BIT(0) 355#define VMCB_CTRL_VMCB_CLEAN_IOPM __BIT(1) 356#define VMCB_CTRL_VMCB_CLEAN_ASID __BIT(2) 357#define VMCB_CTRL_VMCB_CLEAN_TPR __BIT(3) 358#define VMCB_CTRL_VMCB_CLEAN_NP __BIT(4) 359#define VMCB_CTRL_VMCB_CLEAN_CR __BIT(5) 360#define VMCB_CTRL_VMCB_CLEAN_DR __BIT(6) 361#define VMCB_CTRL_VMCB_CLEAN_DT __BIT(7) 362#define VMCB_CTRL_VMCB_CLEAN_SEG __BIT(8) 363#define VMCB_CTRL_VMCB_CLEAN_CR2 __BIT(9) 364#define VMCB_CTRL_VMCB_CLEAN_LBR __BIT(10) 365#define VMCB_CTRL_VMCB_CLEAN_AVIC __BIT(11) 366 367 uint32_t rsvd2; 368 uint64_t nrip; 369 uint8_t inst_len; 370 uint8_t inst_bytes[15]; 371 uint8_t pad[800]; 372} __packed; 373 374CTASSERT(sizeof(struct vmcb_ctrl) == 1024); 375 376struct vmcb_segment { 377 uint16_t selector; 378 uint16_t attrib; /* hidden */ 379 uint32_t limit; /* hidden */ 380 uint64_t base; /* hidden */ 381} __packed; 382 383CTASSERT(sizeof(struct vmcb_segment) == 16); 384 385struct vmcb_state { 386 struct vmcb_segment es; 387 struct vmcb_segment cs; 388 struct vmcb_segment ss; 389 struct vmcb_segment ds; 390 struct vmcb_segment fs; 391 struct vmcb_segment gs; 392 struct vmcb_segment gdt; 393 struct vmcb_segment ldt; 394 struct vmcb_segment idt; 395 struct vmcb_segment tr; 396 uint8_t rsvd1[43]; 397 uint8_t cpl; 398 uint8_t rsvd2[4]; 399 uint64_t efer; 400 uint8_t rsvd3[112]; 401 uint64_t cr4; 402 uint64_t cr3; 403 uint64_t cr0; 404 uint64_t dr7; 405 uint64_t dr6; 406 uint64_t rflags; 407 uint64_t rip; 408 uint8_t rsvd4[88]; 409 uint64_t rsp; 410 uint8_t rsvd5[24]; 411 uint64_t rax; 412 uint64_t star; 413 uint64_t lstar; 414 uint64_t cstar; 415 uint64_t sfmask; 416 uint64_t kernelgsbase; 417 uint64_t sysenter_cs; 418 uint64_t sysenter_esp; 419 uint64_t sysenter_eip; 420 uint64_t cr2; 421 uint8_t rsvd6[32]; 422 uint64_t g_pat; 423 uint64_t dbgctl; 424 uint64_t br_from; 425 uint64_t br_to; 426 uint64_t int_from; 427 uint64_t int_to; 428 uint8_t pad[2408]; 429} __packed; 430 431CTASSERT(sizeof(struct vmcb_state) == 0xC00); 432 433struct vmcb { 434 struct vmcb_ctrl ctrl; 435 struct vmcb_state state; 436} __packed; 437 438CTASSERT(sizeof(struct vmcb) == PAGE_SIZE); 439CTASSERT(offsetof(struct vmcb, state) == 0x400); 440 441/* -------------------------------------------------------------------------- */ 442 443struct svm_hsave { 444 paddr_t pa; 445}; 446 447static struct svm_hsave hsave[MAXCPUS]; 448 449static uint8_t *svm_asidmap __read_mostly; 450static uint32_t svm_maxasid __read_mostly; 451static kmutex_t svm_asidlock __cacheline_aligned; 452 453static bool svm_decode_assist __read_mostly; 454static uint32_t svm_ctrl_tlb_flush __read_mostly; 455 456#define SVM_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE) 457static uint64_t svm_xcr0_mask __read_mostly; 458 459#define SVM_NCPUIDS 32 460 461#define VMCB_NPAGES 1 462 463#define MSRBM_NPAGES 2 464#define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE) 465 466#define IOBM_NPAGES 3 467#define IOBM_SIZE (IOBM_NPAGES * PAGE_SIZE) 468 469/* Does not include EFER_LMSLE. */ 470#define EFER_VALID \ 471 (EFER_SCE|EFER_LME|EFER_LMA|EFER_NXE|EFER_SVME|EFER_FFXSR|EFER_TCE) 472 473#define EFER_TLB_FLUSH \ 474 (EFER_NXE|EFER_LMA|EFER_LME) 475#define CR0_TLB_FLUSH \ 476 (CR0_PG|CR0_WP|CR0_CD|CR0_NW) 477#define CR4_TLB_FLUSH \ 478 (CR4_PGE|CR4_PAE|CR4_PSE) 479 480/* -------------------------------------------------------------------------- */ 481 482struct svm_machdata { 483 bool cpuidpresent[SVM_NCPUIDS]; 484 struct nvmm_x86_conf_cpuid cpuid[SVM_NCPUIDS]; 485}; 486 487static const size_t svm_conf_sizes[NVMM_X86_NCONF] = { 488 [NVMM_X86_CONF_CPUID] = sizeof(struct nvmm_x86_conf_cpuid) 489}; 490 491struct svm_cpudata { 492 /* x64-specific */ 493 struct nvmm_x64_state state; 494 495 /* General */ 496 bool shared_asid; 497 bool tlb_want_flush; 498 499 /* VMCB */ 500 struct vmcb *vmcb; 501 paddr_t vmcb_pa; 502 503 /* I/O bitmap */ 504 uint8_t *iobm; 505 paddr_t iobm_pa; 506 507 /* MSR bitmap */ 508 uint8_t *msrbm; 509 paddr_t msrbm_pa; 510 511 /* Host state */ 512 uint64_t xcr0; 513 uint64_t star; 514 uint64_t lstar; 515 uint64_t cstar; 516 uint64_t sfmask; 517 uint64_t cr2; 518 bool ts_set; 519 struct xsave_header hfpu __aligned(16); 520 521 /* Guest state */ 522 bool in_nmi; 523 uint64_t tsc_offset; 524 struct xsave_header gfpu __aligned(16); 525}; 526 527#define SVM_EVENT_TYPE_HW_INT 0 528#define SVM_EVENT_TYPE_NMI 2 529#define SVM_EVENT_TYPE_EXC 3 530#define SVM_EVENT_TYPE_SW_INT 4 531 532static void 533svm_event_waitexit_enable(struct vmcb *vmcb, bool nmi) 534{ 535 if (nmi) { 536 vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_IRET; 537 } else { 538 vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR; 539 vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | 540 __SHIFTIN(0, VMCB_CTRL_V_INTR_VECTOR)); 541 } 542} 543 544static void 545svm_event_waitexit_disable(struct vmcb *vmcb, bool nmi) 546{ 547 if (nmi) { 548 vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_IRET; 549 } else { 550 vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR; 551 vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | 552 __SHIFTIN(0, VMCB_CTRL_V_INTR_VECTOR)); 553 } 554} 555 556static inline int 557svm_event_has_error(uint64_t vector) 558{ 559 switch (vector) { 560 case 8: /* #DF */ 561 case 10: /* #TS */ 562 case 11: /* #NP */ 563 case 12: /* #SS */ 564 case 13: /* #GP */ 565 case 14: /* #PF */ 566 case 17: /* #AC */ 567 case 30: /* #SX */ 568 return 1; 569 default: 570 return 0; 571 } 572} 573 574static int 575svm_vcpu_inject(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 576 struct nvmm_event *event) 577{ 578 struct svm_cpudata *cpudata = vcpu->cpudata; 579 struct vmcb *vmcb = cpudata->vmcb; 580 uint64_t rflags = vmcb->state.rflags; 581 int type = 0, err = 0; 582 uint64_t tpr; 583 584 if (event->vector >= 256) { 585 return EINVAL; 586 } 587 588 switch (event->type) { 589 case NVMM_EVENT_INTERRUPT_HW: 590 type = SVM_EVENT_TYPE_HW_INT; 591 if (event->vector == 2) { 592 type = SVM_EVENT_TYPE_NMI; 593 } 594 if (type == SVM_EVENT_TYPE_NMI) { 595 if (cpudata->in_nmi) { 596 svm_event_waitexit_enable(vmcb, true); 597 return EAGAIN; 598 } 599 cpudata->in_nmi = true; 600 } else { 601 tpr = __SHIFTOUT(vmcb->ctrl.v, VMCB_CTRL_V_TPR); 602 if ((rflags & PSL_I) == 0 || event->u.prio <= tpr) { 603 svm_event_waitexit_enable(vmcb, false); 604 return EAGAIN; 605 } 606 } 607 err = 0; 608 break; 609 case NVMM_EVENT_INTERRUPT_SW: 610 type = SVM_EVENT_TYPE_SW_INT; 611 err = 0; 612 break; 613 case NVMM_EVENT_EXCEPTION: 614 type = SVM_EVENT_TYPE_EXC; 615 if (event->vector == 2 || event->vector >= 32) 616 return EINVAL; 617 err = svm_event_has_error(event->vector); 618 break; 619 default: 620 return EINVAL; 621 } 622 623 vmcb->ctrl.eventinj = 624 __SHIFTIN(event->vector, VMCB_CTRL_EVENTINJ_VECTOR) | 625 __SHIFTIN(type, VMCB_CTRL_EVENTINJ_TYPE) | 626 __SHIFTIN(err, VMCB_CTRL_EVENTINJ_EV) | 627 __SHIFTIN(1, VMCB_CTRL_EVENTINJ_V) | 628 __SHIFTIN(event->u.error, VMCB_CTRL_EVENTINJ_ERRORCODE); 629 630 return 0; 631} 632 633static void 634svm_inject_ud(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 635{ 636 struct nvmm_event event; 637 int ret __diagused; 638 639 event.type = NVMM_EVENT_EXCEPTION; 640 event.vector = 6; 641 event.u.error = 0; 642 643 ret = svm_vcpu_inject(mach, vcpu, &event); 644 KASSERT(ret == 0); 645} 646 647static void 648svm_inject_db(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 649{ 650 struct nvmm_event event; 651 int ret __diagused; 652 653 event.type = NVMM_EVENT_EXCEPTION; 654 event.vector = 1; 655 event.u.error = 0; 656 657 ret = svm_vcpu_inject(mach, vcpu, &event); 658 KASSERT(ret == 0); 659} 660 661static void 662svm_inject_gp(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 663{ 664 struct nvmm_event event; 665 int ret __diagused; 666 667 event.type = NVMM_EVENT_EXCEPTION; 668 event.vector = 13; 669 event.u.error = 0; 670 671 ret = svm_vcpu_inject(mach, vcpu, &event); 672 KASSERT(ret == 0); 673} 674 675static void 676svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx) 677{ 678 struct svm_cpudata *cpudata = vcpu->cpudata; 679 struct nvmm_x64_state *state = &cpudata->state; 680 681 switch (eax) { 682 case 0x00000001: /* APIC number in RBX. The rest is tunable. */ 683 state->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID; 684 state->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid, 685 CPUID_LOCAL_APIC_ID); 686 break; 687 case 0x0000000D: /* FPU description. Not tunable. */ 688 if (ecx != 0 || svm_xcr0_mask == 0) { 689 break; 690 } 691 cpudata->vmcb->state.rax = svm_xcr0_mask & 0xFFFFFFFF; 692 if (state->crs[NVMM_X64_CR_XCR0] & XCR0_SSE) { 693 state->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave); 694 } else { 695 state->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87); 696 } 697 state->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */ 698 state->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave); 699 state->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32; 700 break; 701 default: 702 break; 703 } 704} 705 706static void 707svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 708 struct nvmm_exit *exit) 709{ 710 struct svm_machdata *machdata = mach->machdata; 711 struct svm_cpudata *cpudata = vcpu->cpudata; 712 struct nvmm_x64_state *state = &cpudata->state; 713 struct nvmm_x86_conf_cpuid *cpuid; 714 uint64_t eax, ecx; 715 u_int descs[4]; 716 size_t i; 717 718 eax = cpudata->vmcb->state.rax; 719 ecx = state->gprs[NVMM_X64_GPR_RCX]; 720 x86_cpuid2(eax, ecx, descs); 721 722 cpudata->vmcb->state.rax = descs[0]; 723 state->gprs[NVMM_X64_GPR_RBX] = descs[1]; 724 state->gprs[NVMM_X64_GPR_RCX] = descs[2]; 725 state->gprs[NVMM_X64_GPR_RDX] = descs[3]; 726 727 for (i = 0; i < SVM_NCPUIDS; i++) { 728 cpuid = &machdata->cpuid[i]; 729 if (!machdata->cpuidpresent[i]) { 730 continue; 731 } 732 if (cpuid->leaf != eax) { 733 continue; 734 } 735 736 /* del */ 737 cpudata->vmcb->state.rax &= ~cpuid->del.eax; 738 state->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx; 739 state->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx; 740 state->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx; 741 742 /* set */ 743 cpudata->vmcb->state.rax |= cpuid->set.eax; 744 state->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx; 745 state->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx; 746 state->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx; 747 748 break; 749 } 750 751 /* Overwrite non-tunable leaves. */ 752 svm_inkernel_handle_cpuid(vcpu, eax, ecx); 753 754 /* For now we omit DBREGS. */ 755 if (__predict_false(cpudata->vmcb->state.rflags & PSL_T)) { 756 svm_inject_db(mach, vcpu); 757 } 758 759 cpudata->vmcb->state.rip = cpudata->vmcb->ctrl.nrip; 760 exit->reason = NVMM_EXIT_NONE; 761} 762 763#define SVM_EXIT_IO_PORT __BITS(31,16) 764#define SVM_EXIT_IO_SEG __BITS(12,10) 765#define SVM_EXIT_IO_A64 __BIT(9) 766#define SVM_EXIT_IO_A32 __BIT(8) 767#define SVM_EXIT_IO_A16 __BIT(7) 768#define SVM_EXIT_IO_SZ32 __BIT(6) 769#define SVM_EXIT_IO_SZ16 __BIT(5) 770#define SVM_EXIT_IO_SZ8 __BIT(4) 771#define SVM_EXIT_IO_REP __BIT(3) 772#define SVM_EXIT_IO_STR __BIT(2) 773#define SVM_EXIT_IO_IN __BIT(0) 774 775static const int seg_to_nvmm[] = { 776 [0] = NVMM_X64_SEG_ES, 777 [1] = NVMM_X64_SEG_CS, 778 [2] = NVMM_X64_SEG_SS, 779 [3] = NVMM_X64_SEG_DS, 780 [4] = NVMM_X64_SEG_FS, 781 [5] = NVMM_X64_SEG_GS 782}; 783 784static void 785svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 786 struct nvmm_exit *exit) 787{ 788 struct svm_cpudata *cpudata = vcpu->cpudata; 789 uint64_t info = cpudata->vmcb->ctrl.exitinfo1; 790 uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2; 791 792 exit->reason = NVMM_EXIT_IO; 793 794 if (info & SVM_EXIT_IO_IN) { 795 exit->u.io.type = NVMM_EXIT_IO_IN; 796 } else { 797 exit->u.io.type = NVMM_EXIT_IO_OUT; 798 } 799 800 exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT); 801 802 if (svm_decode_assist) { 803 KASSERT(__SHIFTOUT(info, SVM_EXIT_IO_SEG) < 6); 804 exit->u.io.seg = seg_to_nvmm[__SHIFTOUT(info, SVM_EXIT_IO_SEG)]; 805 } else { 806 if (exit->u.io.type == NVMM_EXIT_IO_IN) { 807 exit->u.io.seg = NVMM_X64_SEG_ES; 808 } else { 809 exit->u.io.seg = NVMM_X64_SEG_DS; 810 } 811 } 812 813 if (info & SVM_EXIT_IO_A64) { 814 exit->u.io.address_size = 8; 815 } else if (info & SVM_EXIT_IO_A32) { 816 exit->u.io.address_size = 4; 817 } else if (info & SVM_EXIT_IO_A16) { 818 exit->u.io.address_size = 2; 819 } 820 821 if (info & SVM_EXIT_IO_SZ32) { 822 exit->u.io.operand_size = 4; 823 } else if (info & SVM_EXIT_IO_SZ16) { 824 exit->u.io.operand_size = 2; 825 } else if (info & SVM_EXIT_IO_SZ8) { 826 exit->u.io.operand_size = 1; 827 } 828 829 exit->u.io.rep = (info & SVM_EXIT_IO_REP) != 0; 830 exit->u.io.str = (info & SVM_EXIT_IO_STR) != 0; 831 exit->u.io.npc = nextpc; 832} 833 834static bool 835svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 836 struct nvmm_exit *exit) 837{ 838 struct svm_cpudata *cpudata = vcpu->cpudata; 839 struct nvmm_x64_state *state = &cpudata->state; 840 uint64_t pat; 841 842 switch (exit->u.msr.type) { 843 case NVMM_EXIT_MSR_RDMSR: 844 if (exit->u.msr.msr == MSR_CR_PAT) { 845 pat = cpudata->vmcb->state.g_pat; 846 cpudata->vmcb->state.rax = (pat & 0xFFFFFFFF); 847 state->gprs[NVMM_X64_GPR_RDX] = (pat >> 32); 848 goto handled; 849 } 850 break; 851 case NVMM_EXIT_MSR_WRMSR: 852 if (exit->u.msr.msr == MSR_EFER) { 853 if (__predict_false(exit->u.msr.val & ~EFER_VALID)) { 854 svm_inject_gp(mach, vcpu); 855 goto handled; 856 } 857 if ((cpudata->vmcb->state.efer ^ exit->u.msr.val) & 858 EFER_TLB_FLUSH) { 859 cpudata->tlb_want_flush = true; 860 } 861 cpudata->vmcb->state.efer = exit->u.msr.val | EFER_SVME; 862 goto handled; 863 } 864 if (exit->u.msr.msr == MSR_CR_PAT) { 865 cpudata->vmcb->state.g_pat = exit->u.msr.val; 866 goto handled; 867 } 868 break; 869 } 870 871 return false; 872 873handled: 874 cpudata->vmcb->state.rip = cpudata->vmcb->ctrl.nrip; 875 return true; 876} 877 878static void 879svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 880 struct nvmm_exit *exit) 881{ 882 struct svm_cpudata *cpudata = vcpu->cpudata; 883 struct nvmm_x64_state *state = &cpudata->state; 884 uint64_t info = cpudata->vmcb->ctrl.exitinfo1; 885 886 if (info == 0) { 887 exit->u.msr.type = NVMM_EXIT_MSR_RDMSR; 888 } else { 889 exit->u.msr.type = NVMM_EXIT_MSR_WRMSR; 890 } 891 892 exit->u.msr.msr = state->gprs[NVMM_X64_GPR_RCX]; 893 894 if (info == 1) { 895 uint64_t rdx, rax; 896 rdx = state->gprs[NVMM_X64_GPR_RDX]; 897 rax = cpudata->vmcb->state.rax; 898 exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF); 899 } else { 900 exit->u.msr.val = 0; 901 } 902 903 if (svm_inkernel_handle_msr(mach, vcpu, exit)) { 904 exit->reason = NVMM_EXIT_NONE; 905 return; 906 } 907 908 exit->reason = NVMM_EXIT_MSR; 909 exit->u.msr.npc = cpudata->vmcb->ctrl.nrip; 910} 911 912static void 913svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 914 struct nvmm_exit *exit) 915{ 916 struct svm_cpudata *cpudata = vcpu->cpudata; 917 gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2; 918 int error; 919 920 error = uvm_fault(&mach->vm->vm_map, gpa, VM_PROT_ALL); 921 922 if (error) { 923 exit->reason = NVMM_EXIT_MEMORY; 924 if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W) 925 exit->u.mem.perm = NVMM_EXIT_MEMORY_WRITE; 926 else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_X) 927 exit->u.mem.perm = NVMM_EXIT_MEMORY_EXEC; 928 else 929 exit->u.mem.perm = NVMM_EXIT_MEMORY_READ; 930 exit->u.mem.gpa = gpa; 931 exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len; 932 memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes, 933 sizeof(exit->u.mem.inst_bytes)); 934 exit->u.mem.npc = cpudata->vmcb->ctrl.nrip; 935 } else { 936 exit->reason = NVMM_EXIT_NONE; 937 } 938} 939 940static void 941svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 942 struct nvmm_exit *exit) 943{ 944 struct svm_cpudata *cpudata = vcpu->cpudata; 945 struct nvmm_x64_state *state = &cpudata->state; 946 struct vmcb *vmcb = cpudata->vmcb; 947 uint64_t val; 948 949 exit->reason = NVMM_EXIT_NONE; 950 951 val = (state->gprs[NVMM_X64_GPR_RDX] << 32) | 952 (vmcb->state.rax & 0xFFFFFFFF); 953 954 if (__predict_false(state->gprs[NVMM_X64_GPR_RCX] != 0)) { 955 goto error; 956 } else if (__predict_false(vmcb->state.cpl != 0)) { 957 goto error; 958 } else if (__predict_false((val & ~svm_xcr0_mask) != 0)) { 959 goto error; 960 } else if (__predict_false((val & XCR0_X87) == 0)) { 961 goto error; 962 } 963 964 state->crs[NVMM_X64_CR_XCR0] = val; 965 966 cpudata->vmcb->state.rip = cpudata->vmcb->ctrl.nrip; 967 return; 968 969error: 970 svm_inject_gp(mach, vcpu); 971} 972 973static void 974svm_vmcb_cache_default(struct vmcb *vmcb) 975{ 976 vmcb->ctrl.vmcb_clean = 977 VMCB_CTRL_VMCB_CLEAN_I | 978 VMCB_CTRL_VMCB_CLEAN_IOPM | 979 VMCB_CTRL_VMCB_CLEAN_ASID | 980 VMCB_CTRL_VMCB_CLEAN_LBR | 981 VMCB_CTRL_VMCB_CLEAN_AVIC; 982} 983 984static void 985svm_vmcb_cache_flush(struct vmcb *vmcb) 986{ 987 vmcb->ctrl.vmcb_clean = 0; 988} 989 990static void 991svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) 992{ 993 struct svm_cpudata *cpudata = vcpu->cpudata; 994 995 if (x86_xsave_features != 0) { 996 cpudata->xcr0 = rdxcr(0); 997 wrxcr(0, cpudata->state.crs[NVMM_X64_CR_XCR0]); 998 } 999 1000 cpudata->ts_set = (rcr0() & CR0_TS) != 0; 1001 1002 fpu_area_save(&cpudata->hfpu); 1003 fpu_area_restore(&cpudata->gfpu); 1004} 1005 1006static void 1007svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu) 1008{ 1009 struct svm_cpudata *cpudata = vcpu->cpudata; 1010 1011 fpu_area_save(&cpudata->gfpu); 1012 fpu_area_restore(&cpudata->hfpu); 1013 1014 if (cpudata->ts_set) { 1015 stts(); 1016 } 1017 1018 if (x86_xsave_features != 0) { 1019 cpudata->state.crs[NVMM_X64_CR_XCR0] = rdxcr(0); 1020 wrxcr(0, cpudata->xcr0); 1021 } 1022} 1023 1024static void 1025svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu) 1026{ 1027 struct svm_cpudata *cpudata = vcpu->cpudata; 1028 struct nvmm_x64_state *state = &cpudata->state; 1029 1030 x86_dbregs_save(curlwp); 1031 1032 ldr0(state->drs[NVMM_X64_DR_DR0]); 1033 ldr1(state->drs[NVMM_X64_DR_DR1]); 1034 ldr2(state->drs[NVMM_X64_DR_DR2]); 1035 ldr3(state->drs[NVMM_X64_DR_DR3]); 1036} 1037 1038static void 1039svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu) 1040{ 1041 struct svm_cpudata *cpudata = vcpu->cpudata; 1042 struct nvmm_x64_state *state = &cpudata->state; 1043 1044 state->drs[NVMM_X64_DR_DR0] = rdr0(); 1045 state->drs[NVMM_X64_DR_DR1] = rdr1(); 1046 state->drs[NVMM_X64_DR_DR2] = rdr2(); 1047 state->drs[NVMM_X64_DR_DR3] = rdr3(); 1048 1049 x86_dbregs_restore(curlwp); 1050} 1051 1052static void 1053svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu) 1054{ 1055 struct svm_cpudata *cpudata = vcpu->cpudata; 1056 1057 /* Save the fixed Host MSRs. */ 1058 cpudata->star = rdmsr(MSR_STAR); 1059 cpudata->lstar = rdmsr(MSR_LSTAR); 1060 cpudata->cstar = rdmsr(MSR_CSTAR); 1061 cpudata->sfmask = rdmsr(MSR_SFMASK); 1062 1063 /* Save the Host CR2. */ 1064 cpudata->cr2 = rcr2(); 1065} 1066 1067static void 1068svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu) 1069{ 1070 struct svm_cpudata *cpudata = vcpu->cpudata; 1071 1072 /* Restore the fixed Host MSRs. */ 1073 wrmsr(MSR_STAR, cpudata->star); 1074 wrmsr(MSR_LSTAR, cpudata->lstar); 1075 wrmsr(MSR_CSTAR, cpudata->cstar); 1076 wrmsr(MSR_SFMASK, cpudata->sfmask); 1077 1078 /* Restore the Host CR2. */ 1079 lcr2(cpudata->cr2); 1080} 1081 1082static int 1083svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu, 1084 struct nvmm_exit *exit) 1085{ 1086 struct svm_cpudata *cpudata = vcpu->cpudata; 1087 struct vmcb *vmcb = cpudata->vmcb; 1088 bool tlb_need_flush = false; 1089 int hcpu, s; 1090 1091 kpreempt_disable(); 1092 hcpu = cpu_number(); 1093 1094 if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) { 1095 tlb_need_flush = true; 1096 } 1097 1098 if (cpudata->tlb_want_flush || tlb_need_flush) { 1099 vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush; 1100 } else { 1101 vmcb->ctrl.tlb_ctrl = 0; 1102 } 1103 1104 if (vcpu->hcpu_last != hcpu) { 1105 vmcb->ctrl.tsc_offset = cpudata->tsc_offset + 1106 curcpu()->ci_data.cpu_cc_skew; 1107 svm_vmcb_cache_flush(vmcb); 1108 } 1109 1110 svm_vcpu_guest_dbregs_enter(vcpu); 1111 svm_vcpu_guest_misc_enter(vcpu); 1112 1113 while (1) { 1114 s = splhigh(); 1115 svm_vcpu_guest_fpu_enter(vcpu); 1116 svm_vmrun(cpudata->vmcb_pa, cpudata->state.gprs); 1117 svm_vcpu_guest_fpu_leave(vcpu); 1118 splx(s); 1119 1120 svm_vmcb_cache_default(vmcb); 1121 1122 if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) { 1123 if (cpudata->tlb_want_flush) { 1124 cpudata->tlb_want_flush = false; 1125 } 1126 vcpu->hcpu_last = hcpu; 1127 } 1128 1129 switch (vmcb->ctrl.exitcode) { 1130 case VMCB_EXITCODE_INTR: 1131 case VMCB_EXITCODE_NMI: 1132 exit->reason = NVMM_EXIT_NONE; 1133 break; 1134 case VMCB_EXITCODE_VINTR: 1135 svm_event_waitexit_disable(vmcb, false); 1136 exit->reason = NVMM_EXIT_INT_READY; 1137 break; 1138 case VMCB_EXITCODE_IRET: 1139 svm_event_waitexit_disable(vmcb, true); 1140 cpudata->in_nmi = false; 1141 exit->reason = NVMM_EXIT_NMI_READY; 1142 break; 1143 case VMCB_EXITCODE_CPUID: 1144 svm_exit_cpuid(mach, vcpu, exit); 1145 break; 1146 case VMCB_EXITCODE_HLT: 1147 exit->reason = NVMM_EXIT_HLT; 1148 break; 1149 case VMCB_EXITCODE_IOIO: 1150 svm_exit_io(mach, vcpu, exit); 1151 break; 1152 case VMCB_EXITCODE_MSR: 1153 svm_exit_msr(mach, vcpu, exit); 1154 break; 1155 case VMCB_EXITCODE_SHUTDOWN: 1156 exit->reason = NVMM_EXIT_SHUTDOWN; 1157 break; 1158 case VMCB_EXITCODE_RDPMC: 1159 case VMCB_EXITCODE_RSM: 1160 case VMCB_EXITCODE_INVLPGA: 1161 case VMCB_EXITCODE_VMRUN: 1162 case VMCB_EXITCODE_VMMCALL: 1163 case VMCB_EXITCODE_VMLOAD: 1164 case VMCB_EXITCODE_VMSAVE: 1165 case VMCB_EXITCODE_STGI: 1166 case VMCB_EXITCODE_CLGI: 1167 case VMCB_EXITCODE_SKINIT: 1168 case VMCB_EXITCODE_RDTSCP: 1169 svm_inject_ud(mach, vcpu); 1170 exit->reason = NVMM_EXIT_NONE; 1171 break; 1172 case VMCB_EXITCODE_MONITOR: 1173 exit->reason = NVMM_EXIT_MONITOR; 1174 break; 1175 case VMCB_EXITCODE_MWAIT: 1176 exit->reason = NVMM_EXIT_MWAIT; 1177 break; 1178 case VMCB_EXITCODE_MWAIT_CONDITIONAL: 1179 exit->reason = NVMM_EXIT_MWAIT_COND; 1180 break; 1181 case VMCB_EXITCODE_XSETBV: 1182 svm_exit_xsetbv(mach, vcpu, exit); 1183 break; 1184 case VMCB_EXITCODE_NPF: 1185 svm_exit_npf(mach, vcpu, exit); 1186 break; 1187 case VMCB_EXITCODE_FERR_FREEZE: /* ? */ 1188 default: 1189 exit->reason = NVMM_EXIT_INVALID; 1190 break; 1191 } 1192 1193 /* If no reason to return to userland, keep rolling. */ 1194 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) { 1195 break; 1196 } 1197 if (exit->reason != NVMM_EXIT_NONE) { 1198 break; 1199 } 1200 } 1201 1202 svm_vcpu_guest_misc_leave(vcpu); 1203 svm_vcpu_guest_dbregs_leave(vcpu); 1204 1205 kpreempt_enable(); 1206 1207 exit->exitstate[NVMM_X64_EXITSTATE_CR8] = __SHIFTOUT(vmcb->ctrl.v, 1208 VMCB_CTRL_V_TPR); 1209 exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] = vmcb->state.rflags; 1210 1211 return 0; 1212} 1213 1214/* -------------------------------------------------------------------------- */ 1215 1216static int 1217svm_memalloc(paddr_t *pa, vaddr_t *va, size_t npages) 1218{ 1219 struct pglist pglist; 1220 paddr_t _pa; 1221 vaddr_t _va; 1222 size_t i; 1223 int ret; 1224 1225 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0, 1226 &pglist, 1, 0); 1227 if (ret != 0) 1228 return ENOMEM; 1229 _pa = TAILQ_FIRST(&pglist)->phys_addr; 1230 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0, 1231 UVM_KMF_VAONLY | UVM_KMF_NOWAIT); 1232 if (_va == 0) 1233 goto error; 1234 1235 for (i = 0; i < npages; i++) { 1236 pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE, 1237 VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK); 1238 } 1239 pmap_update(pmap_kernel()); 1240 1241 memset((void *)_va, 0, npages * PAGE_SIZE); 1242 1243 *pa = _pa; 1244 *va = _va; 1245 return 0; 1246 1247error: 1248 for (i = 0; i < npages; i++) { 1249 uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE)); 1250 } 1251 return ENOMEM; 1252} 1253 1254static void 1255svm_memfree(paddr_t pa, vaddr_t va, size_t npages) 1256{ 1257 size_t i; 1258 1259 pmap_kremove(va, npages * PAGE_SIZE); 1260 pmap_update(pmap_kernel()); 1261 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY); 1262 for (i = 0; i < npages; i++) { 1263 uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE)); 1264 } 1265} 1266 1267/* -------------------------------------------------------------------------- */ 1268 1269#define SVM_MSRBM_READ __BIT(0) 1270#define SVM_MSRBM_WRITE __BIT(1) 1271 1272static void 1273svm_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write) 1274{ 1275 uint64_t byte; 1276 uint8_t bitoff; 1277 1278 if (msr < 0x00002000) { 1279 /* Range 1 */ 1280 byte = ((msr - 0x00000000) >> 2UL) + 0x0000; 1281 } else if (msr >= 0xC0000000 && msr < 0xC0002000) { 1282 /* Range 2 */ 1283 byte = ((msr - 0xC0000000) >> 2UL) + 0x0800; 1284 } else if (msr >= 0xC0010000 && msr < 0xC0012000) { 1285 /* Range 3 */ 1286 byte = ((msr - 0xC0010000) >> 2UL) + 0x1000; 1287 } else { 1288 panic("%s: wrong range", __func__); 1289 } 1290 1291 bitoff = (msr & 0x3) << 1; 1292 1293 if (read) { 1294 bitmap[byte] &= ~(SVM_MSRBM_READ << bitoff); 1295 } 1296 if (write) { 1297 bitmap[byte] &= ~(SVM_MSRBM_WRITE << bitoff); 1298 } 1299} 1300 1301static void 1302svm_asid_alloc(struct nvmm_cpu *vcpu) 1303{ 1304 struct svm_cpudata *cpudata = vcpu->cpudata; 1305 struct vmcb *vmcb = cpudata->vmcb; 1306 size_t i, oct, bit; 1307 1308 mutex_enter(&svm_asidlock); 1309 1310 for (i = 0; i < svm_maxasid; i++) { 1311 oct = i / 8; 1312 bit = i % 8; 1313 1314 if (svm_asidmap[oct] & __BIT(bit)) { 1315 continue; 1316 } 1317 1318 svm_asidmap[oct] |= __BIT(bit); 1319 vmcb->ctrl.guest_asid = i; 1320 mutex_exit(&svm_asidlock); 1321 return; 1322 } 1323 1324 /* 1325 * No free ASID. Use the last one, which is shared and requires 1326 * special TLB handling. 1327 */ 1328 cpudata->shared_asid = true; 1329 vmcb->ctrl.guest_asid = svm_maxasid - 1; 1330 mutex_exit(&svm_asidlock); 1331} 1332 1333static void 1334svm_asid_free(struct nvmm_cpu *vcpu) 1335{ 1336 struct svm_cpudata *cpudata = vcpu->cpudata; 1337 struct vmcb *vmcb = cpudata->vmcb; 1338 size_t oct, bit; 1339 1340 if (cpudata->shared_asid) { 1341 return; 1342 } 1343 1344 oct = vmcb->ctrl.guest_asid / 8; 1345 bit = vmcb->ctrl.guest_asid % 8; 1346 1347 mutex_enter(&svm_asidlock); 1348 svm_asidmap[oct] &= ~__BIT(bit); 1349 mutex_exit(&svm_asidlock); 1350} 1351 1352static void 1353svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 1354{ 1355 struct svm_cpudata *cpudata = vcpu->cpudata; 1356 struct vmcb *vmcb = cpudata->vmcb; 1357 1358 /* Allow reads/writes of Control Registers. */ 1359 vmcb->ctrl.intercept_cr = 0; 1360 1361 /* Allow reads/writes of Debug Registers. */ 1362 vmcb->ctrl.intercept_dr = 0; 1363 1364 /* Allow exceptions 0 to 31. */ 1365 vmcb->ctrl.intercept_vec = 0; 1366 1367 /* 1368 * Allow: 1369 * - SMI [smm interrupts] 1370 * - VINTR [virtual interrupts] 1371 * - CR0_SPEC [CR0 writes changing other fields than CR0.TS or CR0.MP] 1372 * - RIDTR [reads of IDTR] 1373 * - RGDTR [reads of GDTR] 1374 * - RLDTR [reads of LDTR] 1375 * - RTR [reads of TR] 1376 * - WIDTR [writes of IDTR] 1377 * - WGDTR [writes of GDTR] 1378 * - WLDTR [writes of LDTR] 1379 * - WTR [writes of TR] 1380 * - RDTSC [rdtsc instruction] 1381 * - PUSHF [pushf instruction] 1382 * - POPF [popf instruction] 1383 * - IRET [iret instruction] 1384 * - INTN [int $n instructions] 1385 * - INVD [invd instruction] 1386 * - PAUSE [pause instruction] 1387 * - INVLPG [invplg instruction] 1388 * - TASKSW [task switches] 1389 * 1390 * Intercept the rest below. 1391 */ 1392 vmcb->ctrl.intercept_misc1 = 1393 VMCB_CTRL_INTERCEPT_INTR | 1394 VMCB_CTRL_INTERCEPT_NMI | 1395 VMCB_CTRL_INTERCEPT_INIT | 1396 VMCB_CTRL_INTERCEPT_RDPMC | 1397 VMCB_CTRL_INTERCEPT_CPUID | 1398 VMCB_CTRL_INTERCEPT_RSM | 1399 VMCB_CTRL_INTERCEPT_HLT | 1400 VMCB_CTRL_INTERCEPT_INVLPGA | 1401 VMCB_CTRL_INTERCEPT_IOIO_PROT | 1402 VMCB_CTRL_INTERCEPT_MSR_PROT | 1403 VMCB_CTRL_INTERCEPT_FERR_FREEZE | 1404 VMCB_CTRL_INTERCEPT_SHUTDOWN; 1405 1406 /* 1407 * Allow: 1408 * - ICEBP [icebp instruction] 1409 * - WBINVD [wbinvd instruction] 1410 * - WCR_SPEC(0..15) [writes of CR0-15, received after instruction] 1411 * 1412 * Intercept the rest below. 1413 */ 1414 vmcb->ctrl.intercept_misc2 = 1415 VMCB_CTRL_INTERCEPT_VMRUN | 1416 VMCB_CTRL_INTERCEPT_VMMCALL | 1417 VMCB_CTRL_INTERCEPT_VMLOAD | 1418 VMCB_CTRL_INTERCEPT_VMSAVE | 1419 VMCB_CTRL_INTERCEPT_STGI | 1420 VMCB_CTRL_INTERCEPT_CLGI | 1421 VMCB_CTRL_INTERCEPT_SKINIT | 1422 VMCB_CTRL_INTERCEPT_RDTSCP | 1423 VMCB_CTRL_INTERCEPT_MONITOR | 1424 VMCB_CTRL_INTERCEPT_MWAIT | 1425 VMCB_CTRL_INTERCEPT_XSETBV; 1426 1427 /* Intercept all I/O accesses. */ 1428 memset(cpudata->iobm, 0xFF, IOBM_SIZE); 1429 vmcb->ctrl.iopm_base_pa = cpudata->iobm_pa; 1430 1431 /* 1432 * Allow: 1433 * - EFER [read] 1434 * - STAR [read, write] 1435 * - LSTAR [read, write] 1436 * - CSTAR [read, write] 1437 * - SFMASK [read, write] 1438 * - KERNELGSBASE [read, write] 1439 * - SYSENTER_CS [read, write] 1440 * - SYSENTER_ESP [read, write] 1441 * - SYSENTER_EIP [read, write] 1442 * - FSBASE [read, write] 1443 * - GSBASE [read, write] 1444 * 1445 * Intercept the rest. 1446 */ 1447 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE); 1448 svm_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, false); 1449 svm_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true); 1450 svm_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true); 1451 svm_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true); 1452 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true); 1453 svm_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true); 1454 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true); 1455 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true); 1456 svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true); 1457 svm_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true); 1458 svm_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true); 1459 vmcb->ctrl.msrpm_base_pa = cpudata->msrbm_pa; 1460 1461 /* Generate ASID. */ 1462 svm_asid_alloc(vcpu); 1463 1464 /* Virtual TPR. */ 1465 vmcb->ctrl.v = VMCB_CTRL_V_INTR_MASKING; 1466 1467 /* Enable Nested Paging. */ 1468 vmcb->ctrl.enable1 = VMCB_CTRL_ENABLE_NP; 1469 vmcb->ctrl.n_cr3 = mach->vm->vm_map.pmap->pm_pdirpa[0]; 1470 1471 /* Must always be set. */ 1472 vmcb->state.efer = EFER_SVME; 1473 1474 /* Init XSAVE header. */ 1475 cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask; 1476 cpudata->gfpu.xsh_xcomp_bv = 0; 1477 1478 /* Bluntly hide the host TSC. */ 1479 cpudata->tsc_offset = rdtsc(); 1480} 1481 1482static int 1483svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 1484{ 1485 struct svm_cpudata *cpudata; 1486 int error; 1487 1488 /* Allocate the SVM cpudata. */ 1489 cpudata = (struct svm_cpudata *)uvm_km_alloc(kernel_map, 1490 roundup(sizeof(*cpudata), PAGE_SIZE), 0, 1491 UVM_KMF_WIRED|UVM_KMF_ZERO); 1492 vcpu->cpudata = cpudata; 1493 1494 /* VMCB */ 1495 error = svm_memalloc(&cpudata->vmcb_pa, (vaddr_t *)&cpudata->vmcb, 1496 VMCB_NPAGES); 1497 if (error) 1498 goto error; 1499 1500 /* I/O Bitmap */ 1501 error = svm_memalloc(&cpudata->iobm_pa, (vaddr_t *)&cpudata->iobm, 1502 IOBM_NPAGES); 1503 if (error) 1504 goto error; 1505 1506 /* MSR Bitmap */ 1507 error = svm_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm, 1508 MSRBM_NPAGES); 1509 if (error) 1510 goto error; 1511 1512 /* Init the VCPU info. */ 1513 svm_vcpu_init(mach, vcpu); 1514 1515 return 0; 1516 1517error: 1518 if (cpudata->vmcb_pa) { 1519 svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, 1520 VMCB_NPAGES); 1521 } 1522 if (cpudata->iobm_pa) { 1523 svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, 1524 IOBM_NPAGES); 1525 } 1526 if (cpudata->msrbm_pa) { 1527 svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, 1528 MSRBM_NPAGES); 1529 } 1530 uvm_km_free(kernel_map, (vaddr_t)cpudata, 1531 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED); 1532 return error; 1533} 1534 1535static void 1536svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu) 1537{ 1538 struct svm_cpudata *cpudata = vcpu->cpudata; 1539 1540 svm_asid_free(vcpu); 1541 1542 svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, VMCB_NPAGES); 1543 svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, IOBM_NPAGES); 1544 svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES); 1545 1546 uvm_km_free(kernel_map, (vaddr_t)cpudata, 1547 roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED); 1548} 1549 1550#define SVM_SEG_ATTRIB_TYPE __BITS(4,0) 1551#define SVM_SEG_ATTRIB_DPL __BITS(6,5) 1552#define SVM_SEG_ATTRIB_P __BIT(7) 1553#define SVM_SEG_ATTRIB_AVL __BIT(8) 1554#define SVM_SEG_ATTRIB_LONG __BIT(9) 1555#define SVM_SEG_ATTRIB_DEF32 __BIT(10) 1556#define SVM_SEG_ATTRIB_GRAN __BIT(11) 1557 1558static void 1559svm_vcpu_setstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg) 1560{ 1561 vseg->selector = seg->selector; 1562 vseg->attrib = 1563 __SHIFTIN(seg->attrib.type, SVM_SEG_ATTRIB_TYPE) | 1564 __SHIFTIN(seg->attrib.dpl, SVM_SEG_ATTRIB_DPL) | 1565 __SHIFTIN(seg->attrib.p, SVM_SEG_ATTRIB_P) | 1566 __SHIFTIN(seg->attrib.avl, SVM_SEG_ATTRIB_AVL) | 1567 __SHIFTIN(seg->attrib.lng, SVM_SEG_ATTRIB_LONG) | 1568 __SHIFTIN(seg->attrib.def32, SVM_SEG_ATTRIB_DEF32) | 1569 __SHIFTIN(seg->attrib.gran, SVM_SEG_ATTRIB_GRAN); 1570 vseg->limit = seg->limit; 1571 vseg->base = seg->base; 1572} 1573 1574static void 1575svm_vcpu_getstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg) 1576{ 1577 seg->selector = vseg->selector; 1578 seg->attrib.type = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_TYPE); 1579 seg->attrib.dpl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DPL); 1580 seg->attrib.p = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_P); 1581 seg->attrib.avl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_AVL); 1582 seg->attrib.lng = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_LONG); 1583 seg->attrib.def32 = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DEF32); 1584 seg->attrib.gran = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_GRAN); 1585 seg->limit = vseg->limit; 1586 seg->base = vseg->base; 1587} 1588 1589static bool 1590svm_state_tlb_flush(struct nvmm_x64_state *cstate, 1591 struct nvmm_x64_state *nstate, uint64_t flags) 1592{ 1593 if (flags & NVMM_X64_STATE_CRS) { 1594 if ((cstate->crs[NVMM_X64_CR_CR0] ^ 1595 nstate->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) { 1596 return true; 1597 } 1598 if (cstate->crs[NVMM_X64_CR_CR3] != 1599 nstate->crs[NVMM_X64_CR_CR3]) { 1600 return true; 1601 } 1602 if ((cstate->crs[NVMM_X64_CR_CR4] ^ 1603 nstate->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) { 1604 return true; 1605 } 1606 } 1607 1608 if (flags & NVMM_X64_STATE_MSRS) { 1609 if ((cstate->msrs[NVMM_X64_MSR_EFER] ^ 1610 nstate->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) { 1611 return true; 1612 } 1613 } 1614 1615 return false; 1616} 1617 1618static void 1619svm_vcpu_setstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags) 1620{ 1621 struct svm_cpudata *cpudata = vcpu->cpudata; 1622 struct nvmm_x64_state *cstate = &cpudata->state; 1623 struct nvmm_x64_state *nstate = (struct nvmm_x64_state *)data; 1624 struct vmcb *vmcb = cpudata->vmcb; 1625 struct fxsave *fpustate; 1626 1627 if (svm_state_tlb_flush(cstate, nstate, flags)) { 1628 cpudata->tlb_want_flush = true; 1629 } 1630 1631 if (flags & NVMM_X64_STATE_SEGS) { 1632 memcpy(cstate->segs, nstate->segs, sizeof(nstate->segs)); 1633 1634 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_CS], 1635 &vmcb->state.cs); 1636 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_DS], 1637 &vmcb->state.ds); 1638 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_ES], 1639 &vmcb->state.es); 1640 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_FS], 1641 &vmcb->state.fs); 1642 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_GS], 1643 &vmcb->state.gs); 1644 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_SS], 1645 &vmcb->state.ss); 1646 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_GDT], 1647 &vmcb->state.gdt); 1648 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_IDT], 1649 &vmcb->state.idt); 1650 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_LDT], 1651 &vmcb->state.ldt); 1652 svm_vcpu_setstate_seg(&cstate->segs[NVMM_X64_SEG_TR], 1653 &vmcb->state.tr); 1654 } 1655 1656 if (flags & NVMM_X64_STATE_GPRS) { 1657 memcpy(cstate->gprs, nstate->gprs, sizeof(nstate->gprs)); 1658 1659 vmcb->state.rip = cstate->gprs[NVMM_X64_GPR_RIP]; 1660 vmcb->state.rsp = cstate->gprs[NVMM_X64_GPR_RSP]; 1661 vmcb->state.rax = cstate->gprs[NVMM_X64_GPR_RAX]; 1662 vmcb->state.rflags = cstate->gprs[NVMM_X64_GPR_RFLAGS]; 1663 } 1664 1665 if (flags & NVMM_X64_STATE_CRS) { 1666 memcpy(cstate->crs, nstate->crs, sizeof(nstate->crs)); 1667 1668 vmcb->state.cr0 = cstate->crs[NVMM_X64_CR_CR0]; 1669 vmcb->state.cr2 = cstate->crs[NVMM_X64_CR_CR2]; 1670 vmcb->state.cr3 = cstate->crs[NVMM_X64_CR_CR3]; 1671 vmcb->state.cr4 = cstate->crs[NVMM_X64_CR_CR4]; 1672 1673 vmcb->ctrl.v &= ~VMCB_CTRL_V_TPR; 1674 vmcb->ctrl.v |= __SHIFTIN(cstate->crs[NVMM_X64_CR_CR8], 1675 VMCB_CTRL_V_TPR); 1676 1677 /* Clear unsupported XCR0 bits, set mandatory X87 bit. */ 1678 if (svm_xcr0_mask != 0) { 1679 cstate->crs[NVMM_X64_CR_XCR0] &= svm_xcr0_mask; 1680 cstate->crs[NVMM_X64_CR_XCR0] |= XCR0_X87; 1681 } else { 1682 cstate->crs[NVMM_X64_CR_XCR0] = 0; 1683 } 1684 } 1685 1686 if (flags & NVMM_X64_STATE_DRS) { 1687 memcpy(cstate->drs, nstate->drs, sizeof(nstate->drs)); 1688 1689 vmcb->state.dr6 = cstate->drs[NVMM_X64_DR_DR6]; 1690 vmcb->state.dr7 = cstate->drs[NVMM_X64_DR_DR7]; 1691 } 1692 1693 if (flags & NVMM_X64_STATE_MSRS) { 1694 memcpy(cstate->msrs, nstate->msrs, sizeof(nstate->msrs)); 1695 1696 /* Bit EFER_SVME is mandatory. */ 1697 cstate->msrs[NVMM_X64_MSR_EFER] |= EFER_SVME; 1698 1699 vmcb->state.efer = cstate->msrs[NVMM_X64_MSR_EFER]; 1700 vmcb->state.star = cstate->msrs[NVMM_X64_MSR_STAR]; 1701 vmcb->state.lstar = cstate->msrs[NVMM_X64_MSR_LSTAR]; 1702 vmcb->state.cstar = cstate->msrs[NVMM_X64_MSR_CSTAR]; 1703 vmcb->state.sfmask = cstate->msrs[NVMM_X64_MSR_SFMASK]; 1704 vmcb->state.kernelgsbase = 1705 cstate->msrs[NVMM_X64_MSR_KERNELGSBASE]; 1706 vmcb->state.sysenter_cs = 1707 cstate->msrs[NVMM_X64_MSR_SYSENTER_CS]; 1708 vmcb->state.sysenter_esp = 1709 cstate->msrs[NVMM_X64_MSR_SYSENTER_ESP]; 1710 vmcb->state.sysenter_eip = 1711 cstate->msrs[NVMM_X64_MSR_SYSENTER_EIP]; 1712 vmcb->state.g_pat = cstate->msrs[NVMM_X64_MSR_PAT]; 1713 } 1714 1715 if (flags & NVMM_X64_STATE_MISC) { 1716 memcpy(cstate->misc, nstate->misc, sizeof(nstate->misc)); 1717 1718 vmcb->state.cpl = cstate->misc[NVMM_X64_MISC_CPL]; 1719 } 1720 1721 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(cstate->fpu)); 1722 if (flags & NVMM_X64_STATE_FPU) { 1723 memcpy(&cstate->fpu, &nstate->fpu, sizeof(nstate->fpu)); 1724 1725 memcpy(cpudata->gfpu.xsh_fxsave, &cstate->fpu, 1726 sizeof(cstate->fpu)); 1727 1728 fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave; 1729 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask; 1730 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask; 1731 } 1732} 1733 1734static void 1735svm_vcpu_getstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags) 1736{ 1737 struct svm_cpudata *cpudata = vcpu->cpudata; 1738 struct nvmm_x64_state *cstate = &cpudata->state; 1739 struct nvmm_x64_state *nstate = (struct nvmm_x64_state *)data; 1740 struct vmcb *vmcb = cpudata->vmcb; 1741 1742 if (flags & NVMM_X64_STATE_SEGS) { 1743 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_CS], 1744 &vmcb->state.cs); 1745 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_DS], 1746 &vmcb->state.ds); 1747 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_ES], 1748 &vmcb->state.es); 1749 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_FS], 1750 &vmcb->state.fs); 1751 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_GS], 1752 &vmcb->state.gs); 1753 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_SS], 1754 &vmcb->state.ss); 1755 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_GDT], 1756 &vmcb->state.gdt); 1757 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_IDT], 1758 &vmcb->state.idt); 1759 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_LDT], 1760 &vmcb->state.ldt); 1761 svm_vcpu_getstate_seg(&cstate->segs[NVMM_X64_SEG_TR], 1762 &vmcb->state.tr); 1763 1764 memcpy(nstate->segs, cstate->segs, sizeof(cstate->segs)); 1765 } 1766 1767 if (flags & NVMM_X64_STATE_GPRS) { 1768 cstate->gprs[NVMM_X64_GPR_RIP] = vmcb->state.rip; 1769 cstate->gprs[NVMM_X64_GPR_RSP] = vmcb->state.rsp; 1770 cstate->gprs[NVMM_X64_GPR_RAX] = vmcb->state.rax; 1771 cstate->gprs[NVMM_X64_GPR_RFLAGS] = vmcb->state.rflags; 1772 1773 memcpy(nstate->gprs, cstate->gprs, sizeof(cstate->gprs)); 1774 } 1775 1776 if (flags & NVMM_X64_STATE_CRS) { 1777 cstate->crs[NVMM_X64_CR_CR0] = vmcb->state.cr0; 1778 cstate->crs[NVMM_X64_CR_CR2] = vmcb->state.cr2; 1779 cstate->crs[NVMM_X64_CR_CR3] = vmcb->state.cr3; 1780 cstate->crs[NVMM_X64_CR_CR4] = vmcb->state.cr4; 1781 cstate->crs[NVMM_X64_CR_CR8] = __SHIFTOUT(vmcb->ctrl.v, 1782 VMCB_CTRL_V_TPR); 1783 1784 memcpy(nstate->crs, cstate->crs, sizeof(cstate->crs)); 1785 } 1786 1787 if (flags & NVMM_X64_STATE_DRS) { 1788 cstate->drs[NVMM_X64_DR_DR6] = vmcb->state.dr6; 1789 cstate->drs[NVMM_X64_DR_DR7] = vmcb->state.dr7; 1790 1791 memcpy(nstate->drs, cstate->drs, sizeof(cstate->drs)); 1792 } 1793 1794 if (flags & NVMM_X64_STATE_MSRS) { 1795 cstate->msrs[NVMM_X64_MSR_EFER] = vmcb->state.efer; 1796 cstate->msrs[NVMM_X64_MSR_STAR] = vmcb->state.star; 1797 cstate->msrs[NVMM_X64_MSR_LSTAR] = vmcb->state.lstar; 1798 cstate->msrs[NVMM_X64_MSR_CSTAR] = vmcb->state.cstar; 1799 cstate->msrs[NVMM_X64_MSR_SFMASK] = vmcb->state.sfmask; 1800 cstate->msrs[NVMM_X64_MSR_KERNELGSBASE] = 1801 vmcb->state.kernelgsbase; 1802 cstate->msrs[NVMM_X64_MSR_SYSENTER_CS] = 1803 vmcb->state.sysenter_cs; 1804 cstate->msrs[NVMM_X64_MSR_SYSENTER_ESP] = 1805 vmcb->state.sysenter_esp; 1806 cstate->msrs[NVMM_X64_MSR_SYSENTER_EIP] = 1807 vmcb->state.sysenter_eip; 1808 cstate->msrs[NVMM_X64_MSR_PAT] = vmcb->state.g_pat; 1809 1810 memcpy(nstate->msrs, cstate->msrs, sizeof(cstate->msrs)); 1811 1812 /* Hide SVME. */ 1813 nstate->msrs[NVMM_X64_MSR_EFER] &= ~EFER_SVME; 1814 } 1815 1816 if (flags & NVMM_X64_STATE_MISC) { 1817 cstate->misc[NVMM_X64_MISC_CPL] = vmcb->state.cpl; 1818 1819 memcpy(nstate->misc, cstate->misc, sizeof(cstate->misc)); 1820 } 1821 1822 CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(cstate->fpu)); 1823 if (flags & NVMM_X64_STATE_FPU) { 1824 memcpy(&cstate->fpu, cpudata->gfpu.xsh_fxsave, 1825 sizeof(cstate->fpu)); 1826 1827 memcpy(&cstate->fpu, &nstate->fpu, sizeof(cstate->fpu)); 1828 } 1829} 1830 1831/* -------------------------------------------------------------------------- */ 1832 1833static void 1834svm_tlb_flush(struct pmap *pm) 1835{ 1836 struct nvmm_machine *mach = pm->pm_data; 1837 struct svm_cpudata *cpudata; 1838 struct nvmm_cpu *vcpu; 1839 int error; 1840 size_t i; 1841 1842 /* Request TLB flushes. */ 1843 for (i = 0; i < NVMM_MAX_VCPUS; i++) { 1844 error = nvmm_vcpu_get(mach, i, &vcpu); 1845 if (error) 1846 continue; 1847 cpudata = vcpu->cpudata; 1848 cpudata->tlb_want_flush = true; 1849 nvmm_vcpu_put(vcpu); 1850 } 1851} 1852 1853static void 1854svm_machine_create(struct nvmm_machine *mach) 1855{ 1856 /* Fill in pmap info. */ 1857 mach->vm->vm_map.pmap->pm_data = (void *)mach; 1858 mach->vm->vm_map.pmap->pm_tlb_flush = svm_tlb_flush; 1859 1860 mach->machdata = kmem_zalloc(sizeof(struct svm_machdata), KM_SLEEP); 1861} 1862 1863static void 1864svm_machine_destroy(struct nvmm_machine *mach) 1865{ 1866 kmem_free(mach->machdata, sizeof(struct svm_machdata)); 1867} 1868 1869static int 1870svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data) 1871{ 1872 struct nvmm_x86_conf_cpuid *cpuid = data; 1873 struct svm_machdata *machdata = (struct svm_machdata *)mach->machdata; 1874 size_t i; 1875 1876 if (__predict_false(op != NVMM_X86_CONF_CPUID)) { 1877 return EINVAL; 1878 } 1879 1880 if (__predict_false((cpuid->set.eax & cpuid->del.eax) || 1881 (cpuid->set.ebx & cpuid->del.ebx) || 1882 (cpuid->set.ecx & cpuid->del.ecx) || 1883 (cpuid->set.edx & cpuid->del.edx))) { 1884 return EINVAL; 1885 } 1886 1887 /* If already here, replace. */ 1888 for (i = 0; i < SVM_NCPUIDS; i++) { 1889 if (!machdata->cpuidpresent[i]) { 1890 continue; 1891 } 1892 if (machdata->cpuid[i].leaf == cpuid->leaf) { 1893 memcpy(&machdata->cpuid[i], cpuid, 1894 sizeof(struct nvmm_x86_conf_cpuid)); 1895 return 0; 1896 } 1897 } 1898 1899 /* Not here, insert. */ 1900 for (i = 0; i < SVM_NCPUIDS; i++) { 1901 if (!machdata->cpuidpresent[i]) { 1902 machdata->cpuidpresent[i] = true; 1903 memcpy(&machdata->cpuid[i], cpuid, 1904 sizeof(struct nvmm_x86_conf_cpuid)); 1905 return 0; 1906 } 1907 } 1908 1909 return ENOBUFS; 1910} 1911 1912/* -------------------------------------------------------------------------- */ 1913 1914static bool 1915svm_ident(void) 1916{ 1917 u_int descs[4]; 1918 uint64_t msr; 1919 1920 if (cpu_vendor != CPUVENDOR_AMD) { 1921 return false; 1922 } 1923 if (!(cpu_feature[3] & CPUID_SVM)) { 1924 return false; 1925 } 1926 1927 if (curcpu()->ci_max_ext_cpuid < 0x8000000a) { 1928 return false; 1929 } 1930 x86_cpuid(0x8000000a, descs); 1931 1932 /* Want Nested Paging. */ 1933 if (!(descs[3] & CPUID_AMD_SVM_NP)) { 1934 return false; 1935 } 1936 1937 /* Want nRIP. */ 1938 if (!(descs[3] & CPUID_AMD_SVM_NRIPS)) { 1939 return false; 1940 } 1941 1942 svm_decode_assist = (descs[3] & CPUID_AMD_SVM_DecodeAssist) != 0; 1943 1944 msr = rdmsr(MSR_VMCR); 1945 if ((msr & VMCR_SVMED) && (msr & VMCR_LOCK)) { 1946 return false; 1947 } 1948 1949 return true; 1950} 1951 1952static void 1953svm_init_asid(uint32_t maxasid) 1954{ 1955 size_t i, j, allocsz; 1956 1957 mutex_init(&svm_asidlock, MUTEX_DEFAULT, IPL_NONE); 1958 1959 /* Arbitrarily limit. */ 1960 maxasid = uimin(maxasid, 8192); 1961 1962 svm_maxasid = maxasid; 1963 allocsz = roundup(maxasid, 8) / 8; 1964 svm_asidmap = kmem_zalloc(allocsz, KM_SLEEP); 1965 1966 /* ASID 0 is reserved for the host. */ 1967 svm_asidmap[0] |= __BIT(0); 1968 1969 /* ASID n-1 is special, we share it. */ 1970 i = (maxasid - 1) / 8; 1971 j = (maxasid - 1) % 8; 1972 svm_asidmap[i] |= __BIT(j); 1973} 1974 1975static void 1976svm_change_cpu(void *arg1, void *arg2) 1977{ 1978 bool enable = (bool)arg1; 1979 uint64_t msr; 1980 1981 msr = rdmsr(MSR_VMCR); 1982 if (msr & VMCR_SVMED) { 1983 wrmsr(MSR_VMCR, msr & ~VMCR_SVMED); 1984 } 1985 1986 if (!enable) { 1987 wrmsr(MSR_VM_HSAVE_PA, 0); 1988 } 1989 1990 msr = rdmsr(MSR_EFER); 1991 if (enable) { 1992 msr |= EFER_SVME; 1993 } else { 1994 msr &= ~EFER_SVME; 1995 } 1996 wrmsr(MSR_EFER, msr); 1997 1998 if (enable) { 1999 wrmsr(MSR_VM_HSAVE_PA, hsave[cpu_index(curcpu())].pa); 2000 } 2001} 2002 2003static void 2004svm_init(void) 2005{ 2006 CPU_INFO_ITERATOR cii; 2007 struct cpu_info *ci; 2008 struct vm_page *pg; 2009 u_int descs[4]; 2010 uint64_t xc; 2011 2012 x86_cpuid(0x8000000a, descs); 2013 2014 /* The guest TLB flush command. */ 2015 if (descs[3] & CPUID_AMD_SVM_FlushByASID) { 2016 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST; 2017 } else { 2018 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL; 2019 } 2020 2021 /* Init the ASID. */ 2022 svm_init_asid(descs[1]); 2023 2024 /* Init the XCR0 mask. */ 2025 svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features; 2026 2027 memset(hsave, 0, sizeof(hsave)); 2028 for (CPU_INFO_FOREACH(cii, ci)) { 2029 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); 2030 hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg); 2031 } 2032 2033 xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL); 2034 xc_wait(xc); 2035} 2036 2037static void 2038svm_fini_asid(void) 2039{ 2040 size_t allocsz; 2041 2042 allocsz = roundup(svm_maxasid, 8) / 8; 2043 kmem_free(svm_asidmap, allocsz); 2044 2045 mutex_destroy(&svm_asidlock); 2046} 2047 2048static void 2049svm_fini(void) 2050{ 2051 uint64_t xc; 2052 size_t i; 2053 2054 xc = xc_broadcast(0, svm_change_cpu, (void *)false, NULL); 2055 xc_wait(xc); 2056 2057 for (i = 0; i < MAXCPUS; i++) { 2058 if (hsave[i].pa != 0) 2059 uvm_pagefree(PHYS_TO_VM_PAGE(hsave[i].pa)); 2060 } 2061 2062 svm_fini_asid(); 2063} 2064 2065static void 2066svm_capability(struct nvmm_capability *cap) 2067{ 2068 cap->u.x86.xcr0_mask = svm_xcr0_mask; 2069 cap->u.x86.mxcsr_mask = x86_fpu_mxcsr_mask; 2070 cap->u.x86.conf_cpuid_maxops = SVM_NCPUIDS; 2071} 2072 2073const struct nvmm_impl nvmm_x86_svm = { 2074 .ident = svm_ident, 2075 .init = svm_init, 2076 .fini = svm_fini, 2077 .capability = svm_capability, 2078 .conf_max = NVMM_X86_NCONF, 2079 .conf_sizes = svm_conf_sizes, 2080 .state_size = sizeof(struct nvmm_x64_state), 2081 .machine_create = svm_machine_create, 2082 .machine_destroy = svm_machine_destroy, 2083 .machine_configure = svm_machine_configure, 2084 .vcpu_create = svm_vcpu_create, 2085 .vcpu_destroy = svm_vcpu_destroy, 2086 .vcpu_setstate = svm_vcpu_setstate, 2087 .vcpu_getstate = svm_vcpu_getstate, 2088 .vcpu_inject = svm_vcpu_inject, 2089 .vcpu_run = svm_vcpu_run 2090}; 2091