1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h>
| 1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h>
|
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271912 2014-09-20 21:46:31Z neel $");
| 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271939 2014-09-21 23:42:54Z neel $");
|
29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/cpufunc.h> 43#include <machine/psl.h> 44#include <machine/pmap.h> 45#include <machine/md_var.h> 46#include <machine/vmparam.h> 47#include <machine/specialreg.h> 48#include <machine/segments.h> 49#include <machine/smp.h> 50#include <machine/vmm.h> 51#include <machine/vmm_dev.h> 52#include <machine/vmm_instruction_emul.h> 53 54#include <x86/apicreg.h> 55 56#include "vmm_lapic.h" 57#include "vmm_stat.h" 58#include "vmm_ktr.h" 59#include "vmm_ioport.h" 60#include "vatpic.h" 61#include "vlapic.h" 62#include "vlapic_priv.h" 63 64#include "x86.h" 65#include "vmcb.h" 66#include "svm.h" 67#include "svm_softc.h" 68#include "svm_msr.h" 69#include "npt.h" 70 71SYSCTL_DECL(_hw_vmm); 72SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 73 74/* 75 * SVM CPUID function 0x8000_000A, edx bit decoding. 76 */ 77#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 78#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 79#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 80#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 81#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 82#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 83#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 84#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 85#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 86#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 87 88#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 89 VMCB_CACHE_IOPM | \ 90 VMCB_CACHE_I | \ 91 VMCB_CACHE_TPR | \
| 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41 42#include <machine/cpufunc.h> 43#include <machine/psl.h> 44#include <machine/pmap.h> 45#include <machine/md_var.h> 46#include <machine/vmparam.h> 47#include <machine/specialreg.h> 48#include <machine/segments.h> 49#include <machine/smp.h> 50#include <machine/vmm.h> 51#include <machine/vmm_dev.h> 52#include <machine/vmm_instruction_emul.h> 53 54#include <x86/apicreg.h> 55 56#include "vmm_lapic.h" 57#include "vmm_stat.h" 58#include "vmm_ktr.h" 59#include "vmm_ioport.h" 60#include "vatpic.h" 61#include "vlapic.h" 62#include "vlapic_priv.h" 63 64#include "x86.h" 65#include "vmcb.h" 66#include "svm.h" 67#include "svm_softc.h" 68#include "svm_msr.h" 69#include "npt.h" 70 71SYSCTL_DECL(_hw_vmm); 72SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); 73 74/* 75 * SVM CPUID function 0x8000_000A, edx bit decoding. 76 */ 77#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 78#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 79#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 80#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 81#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 82#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 83#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 84#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 85#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 86#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 87 88#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 89 VMCB_CACHE_IOPM | \ 90 VMCB_CACHE_I | \ 91 VMCB_CACHE_TPR | \
|
| 92 VMCB_CACHE_CR2 | \ 93 VMCB_CACHE_CR | \ 94 VMCB_CACHE_DT | \ 95 VMCB_CACHE_SEG | \
|
92 VMCB_CACHE_NP) 93
| 96 VMCB_CACHE_NP) 97
|
| 98static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 99SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 100 0, NULL); 101
|
94MALLOC_DEFINE(M_SVM, "svm", "svm"); 95MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 96 97/* Per-CPU context area. */ 98extern struct pcpu __pcpu[]; 99
| 102MALLOC_DEFINE(M_SVM, "svm", "svm"); 103MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 104 105/* Per-CPU context area. */ 106extern struct pcpu __pcpu[]; 107
|
100static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 101
| |
102static uint32_t svm_feature; /* AMD SVM features. */ 103SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 104 "SVM features advertised by CPUID.8000000AH:EDX"); 105 106static int disable_npf_assist; 107SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 108 &disable_npf_assist, 0, NULL); 109 110/* Maximum ASIDs supported by the processor */ 111static uint32_t nasid; 112SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, 113 "Number of ASIDs supported by this processor"); 114 115/* Current ASID generation for each host cpu */ 116static struct asid asid[MAXCPU]; 117 118/* 119 * SVM host state saved area of size 4KB for each core. 120 */ 121static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 122 123/* 124 * S/w saved host context. 125 */ 126static struct svm_regctx host_ctx[MAXCPU]; 127 128static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 129static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 130static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 131
| 108static uint32_t svm_feature; /* AMD SVM features. */ 109SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 110 "SVM features advertised by CPUID.8000000AH:EDX"); 111 112static int disable_npf_assist; 113SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 114 &disable_npf_assist, 0, NULL); 115 116/* Maximum ASIDs supported by the processor */ 117static uint32_t nasid; 118SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, 119 "Number of ASIDs supported by this processor"); 120 121/* Current ASID generation for each host cpu */ 122static struct asid asid[MAXCPU]; 123 124/* 125 * SVM host state saved area of size 4KB for each core. 126 */ 127static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 128 129/* 130 * S/w saved host context. 131 */ 132static struct svm_regctx host_ctx[MAXCPU]; 133 134static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 135static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 136static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 137
|
| 138static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 139
|
132/* 133 * Common function to enable or disabled SVM for a CPU. 134 */ 135static int 136cpu_svm_enable_disable(boolean_t enable) 137{ 138 uint64_t efer_msr; 139 140 efer_msr = rdmsr(MSR_EFER); 141 142 if (enable) 143 efer_msr |= EFER_SVM; 144 else 145 efer_msr &= ~EFER_SVM; 146 147 wrmsr(MSR_EFER, efer_msr); 148 149 return(0); 150} 151 152/* 153 * Disable SVM on a CPU. 154 */ 155static void 156svm_disable(void *arg __unused) 157{ 158 159 (void)cpu_svm_enable_disable(FALSE); 160} 161 162/* 163 * Disable SVM for all CPUs. 164 */ 165static int 166svm_cleanup(void) 167{ 168 169 smp_rendezvous(NULL, svm_disable, NULL, NULL); 170 return (0); 171} 172 173/* 174 * Check for required BHyVe SVM features in a CPU. 175 */ 176static int 177svm_cpuid_features(void) 178{ 179 u_int regs[4]; 180 181 /* CPUID Fn8000_000A is for SVM */ 182 do_cpuid(0x8000000A, regs); 183 svm_feature = regs[3]; 184 185 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 186 nasid = regs[1]; 187 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 188 189 printf("SVM Features:0x%b\n", svm_feature, 190 "\020" 191 "\001NP" /* Nested paging */ 192 "\002LbrVirt" /* LBR virtualization */ 193 "\003SVML" /* SVM lock */ 194 "\004NRIPS" /* NRIP save */ 195 "\005TscRateMsr" /* MSR based TSC rate control */ 196 "\006VmcbClean" /* VMCB clean bits */ 197 "\007FlushByAsid" /* Flush by ASID */ 198 "\010DecodeAssist" /* Decode assist */ 199 "\011<b20>" 200 "\012<b20>" 201 "\013PauseFilter" 202 "\014<b20>" 203 "\015PauseFilterThreshold" 204 "\016AVIC" 205 ); 206 207 /* SVM Lock */ 208 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 209 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 210 return (ENXIO); 211 } 212 213 /* 214 * bhyve need RVI to work. 215 */ 216 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 217 printf("Missing Nested paging or RVI SVM support in processor.\n"); 218 return (EIO); 219 } 220 221 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 222 return (0); 223 224 return (EIO); 225} 226 227static __inline int 228flush_by_asid(void) 229{ 230 231 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 232} 233 234static __inline int 235decode_assist(void) 236{ 237 238 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 239} 240 241/* 242 * Enable SVM for a CPU. 243 */ 244static void 245svm_enable(void *arg __unused) 246{ 247 uint64_t hsave_pa; 248 249 (void)cpu_svm_enable_disable(TRUE); 250 251 hsave_pa = vtophys(hsave[curcpu]); 252 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 253 254 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 255 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 256 } 257} 258 259/* 260 * Check if a processor support SVM. 261 */ 262static int 263is_svm_enabled(void) 264{ 265 uint64_t msr; 266 267 /* Section 15.4 Enabling SVM from APM2. */ 268 if ((amd_feature2 & AMDID2_SVM) == 0) { 269 printf("SVM is not supported on this processor.\n"); 270 return (ENXIO); 271 } 272 273 msr = rdmsr(MSR_VM_CR); 274 /* Make sure SVM is not disabled by BIOS. */ 275 if ((msr & VM_CR_SVMDIS) == 0) { 276 return svm_cpuid_features(); 277 } 278 279 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 280 return (ENXIO); 281} 282 283/* 284 * Enable SVM on CPU and initialize nested page table h/w. 285 */ 286static int 287svm_init(int ipinum) 288{ 289 int err, cpu; 290 291 err = is_svm_enabled(); 292 if (err) 293 return (err); 294
| 140/* 141 * Common function to enable or disabled SVM for a CPU. 142 */ 143static int 144cpu_svm_enable_disable(boolean_t enable) 145{ 146 uint64_t efer_msr; 147 148 efer_msr = rdmsr(MSR_EFER); 149 150 if (enable) 151 efer_msr |= EFER_SVM; 152 else 153 efer_msr &= ~EFER_SVM; 154 155 wrmsr(MSR_EFER, efer_msr); 156 157 return(0); 158} 159 160/* 161 * Disable SVM on a CPU. 162 */ 163static void 164svm_disable(void *arg __unused) 165{ 166 167 (void)cpu_svm_enable_disable(FALSE); 168} 169 170/* 171 * Disable SVM for all CPUs. 172 */ 173static int 174svm_cleanup(void) 175{ 176 177 smp_rendezvous(NULL, svm_disable, NULL, NULL); 178 return (0); 179} 180 181/* 182 * Check for required BHyVe SVM features in a CPU. 183 */ 184static int 185svm_cpuid_features(void) 186{ 187 u_int regs[4]; 188 189 /* CPUID Fn8000_000A is for SVM */ 190 do_cpuid(0x8000000A, regs); 191 svm_feature = regs[3]; 192 193 printf("SVM rev: 0x%x NASID:0x%x\n", regs[0] & 0xFF, regs[1]); 194 nasid = regs[1]; 195 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 196 197 printf("SVM Features:0x%b\n", svm_feature, 198 "\020" 199 "\001NP" /* Nested paging */ 200 "\002LbrVirt" /* LBR virtualization */ 201 "\003SVML" /* SVM lock */ 202 "\004NRIPS" /* NRIP save */ 203 "\005TscRateMsr" /* MSR based TSC rate control */ 204 "\006VmcbClean" /* VMCB clean bits */ 205 "\007FlushByAsid" /* Flush by ASID */ 206 "\010DecodeAssist" /* Decode assist */ 207 "\011<b20>" 208 "\012<b20>" 209 "\013PauseFilter" 210 "\014<b20>" 211 "\015PauseFilterThreshold" 212 "\016AVIC" 213 ); 214 215 /* SVM Lock */ 216 if (!(svm_feature & AMD_CPUID_SVM_SVML)) { 217 printf("SVM is disabled by BIOS, please enable in BIOS.\n"); 218 return (ENXIO); 219 } 220 221 /* 222 * bhyve need RVI to work. 223 */ 224 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 225 printf("Missing Nested paging or RVI SVM support in processor.\n"); 226 return (EIO); 227 } 228 229 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE) 230 return (0); 231 232 return (EIO); 233} 234 235static __inline int 236flush_by_asid(void) 237{ 238 239 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 240} 241 242static __inline int 243decode_assist(void) 244{ 245 246 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 247} 248 249/* 250 * Enable SVM for a CPU. 251 */ 252static void 253svm_enable(void *arg __unused) 254{ 255 uint64_t hsave_pa; 256 257 (void)cpu_svm_enable_disable(TRUE); 258 259 hsave_pa = vtophys(hsave[curcpu]); 260 wrmsr(MSR_VM_HSAVE_PA, hsave_pa); 261 262 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) { 263 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu); 264 } 265} 266 267/* 268 * Check if a processor support SVM. 269 */ 270static int 271is_svm_enabled(void) 272{ 273 uint64_t msr; 274 275 /* Section 15.4 Enabling SVM from APM2. */ 276 if ((amd_feature2 & AMDID2_SVM) == 0) { 277 printf("SVM is not supported on this processor.\n"); 278 return (ENXIO); 279 } 280 281 msr = rdmsr(MSR_VM_CR); 282 /* Make sure SVM is not disabled by BIOS. */ 283 if ((msr & VM_CR_SVMDIS) == 0) { 284 return svm_cpuid_features(); 285 } 286 287 printf("SVM disabled by Key, consult TPM/BIOS manual.\n"); 288 return (ENXIO); 289} 290 291/* 292 * Enable SVM on CPU and initialize nested page table h/w. 293 */ 294static int 295svm_init(int ipinum) 296{ 297 int err, cpu; 298 299 err = is_svm_enabled(); 300 if (err) 301 return (err); 302
|
| 303 vmcb_clean &= VMCB_CACHE_DEFAULT; 304
|
295 for (cpu = 0; cpu < MAXCPU; cpu++) { 296 /* 297 * Initialize the host ASIDs to their "highest" valid values. 298 * 299 * The next ASID allocation will rollover both 'gen' and 'num' 300 * and start off the sequence at {1,1}. 301 */ 302 asid[cpu].gen = ~0UL; 303 asid[cpu].num = nasid - 1; 304 } 305 306 svm_msr_init(); 307 svm_npt_init(ipinum); 308 309 /* Start SVM on all CPUs */ 310 smp_rendezvous(NULL, svm_enable, NULL, NULL); 311 312 return (0); 313} 314 315static void 316svm_restore(void) 317{ 318 svm_enable(NULL); 319} 320 321/* 322 * Get index and bit position for a MSR in MSR permission 323 * bitmap. Two bits are used for each MSR, lower bit is 324 * for read and higher bit is for write. 325 */ 326static int 327svm_msr_index(uint64_t msr, int *index, int *bit) 328{ 329 uint32_t base, off; 330 331/* Pentium compatible MSRs */ 332#define MSR_PENTIUM_START 0 333#define MSR_PENTIUM_END 0x1FFF 334/* AMD 6th generation and Intel compatible MSRs */ 335#define MSR_AMD6TH_START 0xC0000000UL 336#define MSR_AMD6TH_END 0xC0001FFFUL 337/* AMD 7th and 8th generation compatible MSRs */ 338#define MSR_AMD7TH_START 0xC0010000UL 339#define MSR_AMD7TH_END 0xC0011FFFUL 340 341 *index = -1; 342 *bit = (msr % 4) * 2; 343 base = 0; 344 345 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 346 *index = msr / 4; 347 return (0); 348 } 349 350 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 351 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 352 off = (msr - MSR_AMD6TH_START); 353 *index = (off + base) / 4; 354 return (0); 355 } 356 357 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 358 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 359 off = (msr - MSR_AMD7TH_START); 360 *index = (off + base) / 4; 361 return (0); 362 } 363 364 return (EIO); 365} 366 367/* 368 * Give virtual cpu the complete access to MSR(read & write). 369 */ 370static int 371svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 372{ 373 int index, bit, err; 374 375 err = svm_msr_index(msr, &index, &bit); 376 if (err) { 377 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 378 return (err); 379 } 380 381 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 382 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 383 return (EINVAL); 384 } 385 if (bit < 0 || bit > 8) { 386 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 387 return (EINVAL); 388 } 389 390 /* Disable intercept for read and write. */ 391 if (read) 392 perm_bitmap[index] &= ~(1UL << bit); 393 if (write) 394 perm_bitmap[index] &= ~(2UL << bit); 395 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 396 (perm_bitmap[index] >> bit) & 0x3, msr); 397 398 return (0); 399} 400 401static int 402svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 403{ 404 return svm_msr_perm(perm_bitmap, msr, true, true); 405} 406 407static int 408svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 409{ 410 return svm_msr_perm(perm_bitmap, msr, true, false); 411} 412
| 305 for (cpu = 0; cpu < MAXCPU; cpu++) { 306 /* 307 * Initialize the host ASIDs to their "highest" valid values. 308 * 309 * The next ASID allocation will rollover both 'gen' and 'num' 310 * and start off the sequence at {1,1}. 311 */ 312 asid[cpu].gen = ~0UL; 313 asid[cpu].num = nasid - 1; 314 } 315 316 svm_msr_init(); 317 svm_npt_init(ipinum); 318 319 /* Start SVM on all CPUs */ 320 smp_rendezvous(NULL, svm_enable, NULL, NULL); 321 322 return (0); 323} 324 325static void 326svm_restore(void) 327{ 328 svm_enable(NULL); 329} 330 331/* 332 * Get index and bit position for a MSR in MSR permission 333 * bitmap. Two bits are used for each MSR, lower bit is 334 * for read and higher bit is for write. 335 */ 336static int 337svm_msr_index(uint64_t msr, int *index, int *bit) 338{ 339 uint32_t base, off; 340 341/* Pentium compatible MSRs */ 342#define MSR_PENTIUM_START 0 343#define MSR_PENTIUM_END 0x1FFF 344/* AMD 6th generation and Intel compatible MSRs */ 345#define MSR_AMD6TH_START 0xC0000000UL 346#define MSR_AMD6TH_END 0xC0001FFFUL 347/* AMD 7th and 8th generation compatible MSRs */ 348#define MSR_AMD7TH_START 0xC0010000UL 349#define MSR_AMD7TH_END 0xC0011FFFUL 350 351 *index = -1; 352 *bit = (msr % 4) * 2; 353 base = 0; 354 355 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 356 *index = msr / 4; 357 return (0); 358 } 359 360 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 361 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 362 off = (msr - MSR_AMD6TH_START); 363 *index = (off + base) / 4; 364 return (0); 365 } 366 367 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 368 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 369 off = (msr - MSR_AMD7TH_START); 370 *index = (off + base) / 4; 371 return (0); 372 } 373 374 return (EIO); 375} 376 377/* 378 * Give virtual cpu the complete access to MSR(read & write). 379 */ 380static int 381svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 382{ 383 int index, bit, err; 384 385 err = svm_msr_index(msr, &index, &bit); 386 if (err) { 387 ERR("MSR 0x%lx is not writeable by guest.\n", msr); 388 return (err); 389 } 390 391 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) { 392 ERR("MSR 0x%lx index out of range(%d).\n", msr, index); 393 return (EINVAL); 394 } 395 if (bit < 0 || bit > 8) { 396 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit); 397 return (EINVAL); 398 } 399 400 /* Disable intercept for read and write. */ 401 if (read) 402 perm_bitmap[index] &= ~(1UL << bit); 403 if (write) 404 perm_bitmap[index] &= ~(2UL << bit); 405 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n", 406 (perm_bitmap[index] >> bit) & 0x3, msr); 407 408 return (0); 409} 410 411static int 412svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 413{ 414 return svm_msr_perm(perm_bitmap, msr, true, true); 415} 416 417static int 418svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 419{ 420 return svm_msr_perm(perm_bitmap, msr, true, false); 421} 422
|
413static __inline void 414vcpu_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits) 415{ 416 struct svm_vcpu *vcpustate; 417 418 vcpustate = svm_get_vcpu(sc, vcpu); 419 420 vcpustate->dirty |= dirtybits; 421} 422
| |
423static __inline int 424svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 425{ 426 struct vmcb_ctrl *ctrl; 427 428 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 429 430 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 431 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 432} 433 434static __inline void 435svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 436 int enabled) 437{ 438 struct vmcb_ctrl *ctrl; 439 uint32_t oldval; 440 441 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 442 443 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 444 oldval = ctrl->intercept[idx]; 445 446 if (enabled) 447 ctrl->intercept[idx] |= bitmask; 448 else 449 ctrl->intercept[idx] &= ~bitmask; 450 451 if (ctrl->intercept[idx] != oldval) {
| 423static __inline int 424svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 425{ 426 struct vmcb_ctrl *ctrl; 427 428 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 429 430 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 431 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 432} 433 434static __inline void 435svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 436 int enabled) 437{ 438 struct vmcb_ctrl *ctrl; 439 uint32_t oldval; 440 441 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 442 443 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 444 oldval = ctrl->intercept[idx]; 445 446 if (enabled) 447 ctrl->intercept[idx] |= bitmask; 448 else 449 ctrl->intercept[idx] &= ~bitmask; 450 451 if (ctrl->intercept[idx] != oldval) {
|
452 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_I);
| 452 svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
|
453 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 454 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 455 } 456} 457 458static __inline void 459svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 460{ 461 svm_set_intercept(sc, vcpu, off, bitmask, 0); 462} 463 464static __inline void 465svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 466{ 467 svm_set_intercept(sc, vcpu, off, bitmask, 1); 468} 469 470static void 471vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 472 uint64_t msrpm_base_pa, uint64_t np_pml4) 473{ 474 struct vmcb_ctrl *ctrl; 475 struct vmcb_state *state; 476 uint32_t mask; 477 int n; 478 479 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 480 state = svm_get_vmcb_state(sc, vcpu); 481 482 ctrl->iopm_base_pa = iopm_base_pa; 483 ctrl->msrpm_base_pa = msrpm_base_pa; 484 485 /* Enable nested paging */ 486 ctrl->np_enable = 1; 487 ctrl->n_cr3 = np_pml4; 488 489 /* 490 * Intercept accesses to the control registers that are not shadowed 491 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 492 */ 493 for (n = 0; n < 16; n++) { 494 mask = (BIT(n) << 16) | BIT(n); 495 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 496 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 497 else 498 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 499 } 500 501 /* Intercept Machine Check exceptions. */ 502 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 503 504 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 505 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 506 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 507 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 508 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 509 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 510 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 511 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 512 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 513 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 514 VMCB_INTCPT_FERR_FREEZE); 515 516 /* 517 * From section "Canonicalization and Consistency Checks" in APMv2 518 * the VMRUN intercept bit must be set to pass the consistency check. 519 */ 520 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 521 522 /* 523 * The ASID will be set to a non-zero value just before VMRUN. 524 */ 525 ctrl->asid = 0; 526 527 /* 528 * Section 15.21.1, Interrupt Masking in EFLAGS 529 * Section 15.21.2, Virtualizing APIC.TPR 530 * 531 * This must be set for %rflag and %cr8 isolation of guest and host. 532 */ 533 ctrl->v_intr_masking = 1; 534 535 /* Enable Last Branch Record aka LBR for debugging */ 536 ctrl->lbr_virt_en = 1; 537 state->dbgctl = BIT(0); 538 539 /* EFER_SVM must always be set when the guest is executing */ 540 state->efer = EFER_SVM; 541 542 /* Set up the PAT to power-on state */ 543 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 544 PAT_VALUE(1, PAT_WRITE_THROUGH) | 545 PAT_VALUE(2, PAT_UNCACHED) | 546 PAT_VALUE(3, PAT_UNCACHEABLE) | 547 PAT_VALUE(4, PAT_WRITE_BACK) | 548 PAT_VALUE(5, PAT_WRITE_THROUGH) | 549 PAT_VALUE(6, PAT_UNCACHED) | 550 PAT_VALUE(7, PAT_UNCACHEABLE); 551} 552 553/* 554 * Initialise a virtual machine. 555 */ 556static void * 557svm_vminit(struct vm *vm, pmap_t pmap) 558{ 559 struct svm_softc *svm_sc; 560 struct svm_vcpu *vcpu; 561 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 562 int i; 563 564 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 565 M_SVM, M_WAITOK | M_ZERO); 566 567 svm_sc->vm = vm; 568 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 569 570 /* 571 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 572 */ 573 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 574 575 /* 576 * Following MSR can be completely controlled by virtual machines 577 * since access to following are translated to access to VMCB. 578 */ 579 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 580 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 581 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 582 583 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 584 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 585 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 586 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 590 591 /* For Nested Paging/RVI only. */ 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 593 594 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
| 453 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 454 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 455 } 456} 457 458static __inline void 459svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 460{ 461 svm_set_intercept(sc, vcpu, off, bitmask, 0); 462} 463 464static __inline void 465svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 466{ 467 svm_set_intercept(sc, vcpu, off, bitmask, 1); 468} 469 470static void 471vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 472 uint64_t msrpm_base_pa, uint64_t np_pml4) 473{ 474 struct vmcb_ctrl *ctrl; 475 struct vmcb_state *state; 476 uint32_t mask; 477 int n; 478 479 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 480 state = svm_get_vmcb_state(sc, vcpu); 481 482 ctrl->iopm_base_pa = iopm_base_pa; 483 ctrl->msrpm_base_pa = msrpm_base_pa; 484 485 /* Enable nested paging */ 486 ctrl->np_enable = 1; 487 ctrl->n_cr3 = np_pml4; 488 489 /* 490 * Intercept accesses to the control registers that are not shadowed 491 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 492 */ 493 for (n = 0; n < 16; n++) { 494 mask = (BIT(n) << 16) | BIT(n); 495 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 496 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 497 else 498 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 499 } 500 501 /* Intercept Machine Check exceptions. */ 502 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 503 504 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 505 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 506 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 507 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 508 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 509 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 510 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 511 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 512 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 513 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 514 VMCB_INTCPT_FERR_FREEZE); 515 516 /* 517 * From section "Canonicalization and Consistency Checks" in APMv2 518 * the VMRUN intercept bit must be set to pass the consistency check. 519 */ 520 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 521 522 /* 523 * The ASID will be set to a non-zero value just before VMRUN. 524 */ 525 ctrl->asid = 0; 526 527 /* 528 * Section 15.21.1, Interrupt Masking in EFLAGS 529 * Section 15.21.2, Virtualizing APIC.TPR 530 * 531 * This must be set for %rflag and %cr8 isolation of guest and host. 532 */ 533 ctrl->v_intr_masking = 1; 534 535 /* Enable Last Branch Record aka LBR for debugging */ 536 ctrl->lbr_virt_en = 1; 537 state->dbgctl = BIT(0); 538 539 /* EFER_SVM must always be set when the guest is executing */ 540 state->efer = EFER_SVM; 541 542 /* Set up the PAT to power-on state */ 543 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 544 PAT_VALUE(1, PAT_WRITE_THROUGH) | 545 PAT_VALUE(2, PAT_UNCACHED) | 546 PAT_VALUE(3, PAT_UNCACHEABLE) | 547 PAT_VALUE(4, PAT_WRITE_BACK) | 548 PAT_VALUE(5, PAT_WRITE_THROUGH) | 549 PAT_VALUE(6, PAT_UNCACHED) | 550 PAT_VALUE(7, PAT_UNCACHEABLE); 551} 552 553/* 554 * Initialise a virtual machine. 555 */ 556static void * 557svm_vminit(struct vm *vm, pmap_t pmap) 558{ 559 struct svm_softc *svm_sc; 560 struct svm_vcpu *vcpu; 561 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 562 int i; 563 564 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc), 565 M_SVM, M_WAITOK | M_ZERO); 566 567 svm_sc->vm = vm; 568 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); 569 570 /* 571 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc. 572 */ 573 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); 574 575 /* 576 * Following MSR can be completely controlled by virtual machines 577 * since access to following are translated to access to VMCB. 578 */ 579 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 580 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 581 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 582 583 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 584 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 585 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 586 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 590 591 /* For Nested Paging/RVI only. */ 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 593 594 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
|
| 595 596 /* 597 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 598 */
|
595 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 596 597 /* Intercept access to all I/O ports. */ 598 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 599 600 /* Cache physical address for multiple vcpus. */ 601 iopm_pa = vtophys(svm_sc->iopm_bitmap); 602 msrpm_pa = vtophys(svm_sc->msr_bitmap); 603 pml4_pa = svm_sc->nptp; 604 605 for (i = 0; i < VM_MAXCPU; i++) { 606 vcpu = svm_get_vcpu(svm_sc, i); 607 vcpu->lastcpu = NOCPU; 608 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 609 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 610 svm_msr_guest_init(svm_sc, i); 611 } 612 return (svm_sc); 613} 614 615static int 616svm_cpl(struct vmcb_state *state) 617{ 618 619 /* 620 * From APMv2: 621 * "Retrieve the CPL from the CPL field in the VMCB, not 622 * from any segment DPL" 623 */ 624 return (state->cpl); 625} 626 627static enum vm_cpu_mode 628svm_vcpu_mode(struct vmcb *vmcb) 629{
| 599 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 600 601 /* Intercept access to all I/O ports. */ 602 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 603 604 /* Cache physical address for multiple vcpus. */ 605 iopm_pa = vtophys(svm_sc->iopm_bitmap); 606 msrpm_pa = vtophys(svm_sc->msr_bitmap); 607 pml4_pa = svm_sc->nptp; 608 609 for (i = 0; i < VM_MAXCPU; i++) { 610 vcpu = svm_get_vcpu(svm_sc, i); 611 vcpu->lastcpu = NOCPU; 612 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 613 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 614 svm_msr_guest_init(svm_sc, i); 615 } 616 return (svm_sc); 617} 618 619static int 620svm_cpl(struct vmcb_state *state) 621{ 622 623 /* 624 * From APMv2: 625 * "Retrieve the CPL from the CPL field in the VMCB, not 626 * from any segment DPL" 627 */ 628 return (state->cpl); 629} 630 631static enum vm_cpu_mode 632svm_vcpu_mode(struct vmcb *vmcb) 633{
|
630 struct vmcb_segment *seg;
| 634 struct vmcb_segment seg;
|
631 struct vmcb_state *state;
| 635 struct vmcb_state *state;
|
| 636 int error;
|
632 633 state = &vmcb->state; 634 635 if (state->efer & EFER_LMA) {
| 637 638 state = &vmcb->state; 639 640 if (state->efer & EFER_LMA) {
|
636 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS);
| 641 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 642 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 643 error)); 644
|
637 /* 638 * Section 4.8.1 for APM2, check if Code Segment has 639 * Long attribute set in descriptor. 640 */
| 645 /* 646 * Section 4.8.1 for APM2, check if Code Segment has 647 * Long attribute set in descriptor. 648 */
|
641 if (seg->attrib & VMCB_CS_ATTRIB_L)
| 649 if (seg.attrib & VMCB_CS_ATTRIB_L)
|
642 return (CPU_MODE_64BIT); 643 else 644 return (CPU_MODE_COMPATIBILITY); 645 } else if (state->cr0 & CR0_PE) { 646 return (CPU_MODE_PROTECTED); 647 } else { 648 return (CPU_MODE_REAL); 649 } 650} 651 652static enum vm_paging_mode 653svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 654{ 655 656 if ((cr0 & CR0_PG) == 0) 657 return (PAGING_MODE_FLAT); 658 if ((cr4 & CR4_PAE) == 0) 659 return (PAGING_MODE_32); 660 if (efer & EFER_LME) 661 return (PAGING_MODE_64); 662 else 663 return (PAGING_MODE_PAE); 664} 665 666/* 667 * ins/outs utility routines 668 */ 669static uint64_t 670svm_inout_str_index(struct svm_regctx *regs, int in) 671{ 672 uint64_t val; 673 674 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 675 676 return (val); 677} 678 679static uint64_t 680svm_inout_str_count(struct svm_regctx *regs, int rep) 681{ 682 uint64_t val; 683 684 val = rep ? regs->sctx_rcx : 1; 685 686 return (val); 687} 688 689static void 690svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 691 int in, struct vm_inout_str *vis) 692{ 693 int error, s; 694 695 if (in) { 696 vis->seg_name = VM_REG_GUEST_ES; 697 } else { 698 /* The segment field has standard encoding */ 699 s = (info1 >> 10) & 0x7; 700 vis->seg_name = vm_segment_name(s); 701 } 702
| 650 return (CPU_MODE_64BIT); 651 else 652 return (CPU_MODE_COMPATIBILITY); 653 } else if (state->cr0 & CR0_PE) { 654 return (CPU_MODE_PROTECTED); 655 } else { 656 return (CPU_MODE_REAL); 657 } 658} 659 660static enum vm_paging_mode 661svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 662{ 663 664 if ((cr0 & CR0_PG) == 0) 665 return (PAGING_MODE_FLAT); 666 if ((cr4 & CR4_PAE) == 0) 667 return (PAGING_MODE_32); 668 if (efer & EFER_LME) 669 return (PAGING_MODE_64); 670 else 671 return (PAGING_MODE_PAE); 672} 673 674/* 675 * ins/outs utility routines 676 */ 677static uint64_t 678svm_inout_str_index(struct svm_regctx *regs, int in) 679{ 680 uint64_t val; 681 682 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; 683 684 return (val); 685} 686 687static uint64_t 688svm_inout_str_count(struct svm_regctx *regs, int rep) 689{ 690 uint64_t val; 691 692 val = rep ? regs->sctx_rcx : 1; 693 694 return (val); 695} 696 697static void 698svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, 699 int in, struct vm_inout_str *vis) 700{ 701 int error, s; 702 703 if (in) { 704 vis->seg_name = VM_REG_GUEST_ES; 705 } else { 706 /* The segment field has standard encoding */ 707 s = (info1 >> 10) & 0x7; 708 vis->seg_name = vm_segment_name(s); 709 } 710
|
703 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
| 711 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
|
704 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 705} 706 707static int 708svm_inout_str_addrsize(uint64_t info1) 709{ 710 uint32_t size; 711 712 size = (info1 >> 7) & 0x7; 713 switch (size) { 714 case 1: 715 return (2); /* 16 bit */ 716 case 2: 717 return (4); /* 32 bit */ 718 case 4: 719 return (8); /* 64 bit */ 720 default: 721 panic("%s: invalid size encoding %d", __func__, size); 722 } 723} 724 725static void 726svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 727{ 728 struct vmcb_state *state; 729 730 state = &vmcb->state; 731 paging->cr3 = state->cr3; 732 paging->cpl = svm_cpl(state); 733 paging->cpu_mode = svm_vcpu_mode(vmcb); 734 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 735 state->efer); 736} 737 738#define UNHANDLED 0 739 740/* 741 * Handle guest I/O intercept. 742 */ 743static int 744svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 745{ 746 struct vmcb_ctrl *ctrl; 747 struct vmcb_state *state; 748 struct svm_regctx *regs; 749 struct vm_inout_str *vis; 750 uint64_t info1; 751 int inout_string; 752 753 state = svm_get_vmcb_state(svm_sc, vcpu); 754 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 755 regs = svm_get_guest_regctx(svm_sc, vcpu); 756 757 info1 = ctrl->exitinfo1; 758 inout_string = info1 & BIT(2) ? 1 : 0; 759 760 /* 761 * The effective segment number in EXITINFO1[12:10] is populated 762 * only if the processor has the DecodeAssist capability. 763 * 764 * XXX this is not specified explicitly in APMv2 but can be verified 765 * empirically. 766 */ 767 if (inout_string && !decode_assist()) 768 return (UNHANDLED); 769 770 vmexit->exitcode = VM_EXITCODE_INOUT; 771 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 772 vmexit->u.inout.string = inout_string; 773 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 774 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 775 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 776 vmexit->u.inout.eax = (uint32_t)(state->rax); 777 778 if (inout_string) { 779 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 780 vis = &vmexit->u.inout_str; 781 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 782 vis->rflags = state->rflags; 783 vis->cr0 = state->cr0; 784 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 785 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 786 vis->addrsize = svm_inout_str_addrsize(info1); 787 svm_inout_str_seginfo(svm_sc, vcpu, info1, 788 vmexit->u.inout.in, vis); 789 } 790 791 return (UNHANDLED); 792} 793 794static int 795svm_npf_paging(uint64_t exitinfo1) 796{ 797 798 if (exitinfo1 & VMCB_NPF_INFO1_W) 799 return (VM_PROT_WRITE); 800 801 return (VM_PROT_READ); 802} 803 804static bool 805svm_npf_emul_fault(uint64_t exitinfo1) 806{ 807 808 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 809 return (false); 810 } 811 812 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 813 return (false); 814 } 815 816 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 817 return (false); 818 } 819 820 return (true); 821} 822 823static void 824svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 825{ 826 struct vm_guest_paging *paging;
| 712 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 713} 714 715static int 716svm_inout_str_addrsize(uint64_t info1) 717{ 718 uint32_t size; 719 720 size = (info1 >> 7) & 0x7; 721 switch (size) { 722 case 1: 723 return (2); /* 16 bit */ 724 case 2: 725 return (4); /* 32 bit */ 726 case 4: 727 return (8); /* 64 bit */ 728 default: 729 panic("%s: invalid size encoding %d", __func__, size); 730 } 731} 732 733static void 734svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 735{ 736 struct vmcb_state *state; 737 738 state = &vmcb->state; 739 paging->cr3 = state->cr3; 740 paging->cpl = svm_cpl(state); 741 paging->cpu_mode = svm_vcpu_mode(vmcb); 742 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 743 state->efer); 744} 745 746#define UNHANDLED 0 747 748/* 749 * Handle guest I/O intercept. 750 */ 751static int 752svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 753{ 754 struct vmcb_ctrl *ctrl; 755 struct vmcb_state *state; 756 struct svm_regctx *regs; 757 struct vm_inout_str *vis; 758 uint64_t info1; 759 int inout_string; 760 761 state = svm_get_vmcb_state(svm_sc, vcpu); 762 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 763 regs = svm_get_guest_regctx(svm_sc, vcpu); 764 765 info1 = ctrl->exitinfo1; 766 inout_string = info1 & BIT(2) ? 1 : 0; 767 768 /* 769 * The effective segment number in EXITINFO1[12:10] is populated 770 * only if the processor has the DecodeAssist capability. 771 * 772 * XXX this is not specified explicitly in APMv2 but can be verified 773 * empirically. 774 */ 775 if (inout_string && !decode_assist()) 776 return (UNHANDLED); 777 778 vmexit->exitcode = VM_EXITCODE_INOUT; 779 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 780 vmexit->u.inout.string = inout_string; 781 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 782 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 783 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 784 vmexit->u.inout.eax = (uint32_t)(state->rax); 785 786 if (inout_string) { 787 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 788 vis = &vmexit->u.inout_str; 789 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); 790 vis->rflags = state->rflags; 791 vis->cr0 = state->cr0; 792 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 793 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 794 vis->addrsize = svm_inout_str_addrsize(info1); 795 svm_inout_str_seginfo(svm_sc, vcpu, info1, 796 vmexit->u.inout.in, vis); 797 } 798 799 return (UNHANDLED); 800} 801 802static int 803svm_npf_paging(uint64_t exitinfo1) 804{ 805 806 if (exitinfo1 & VMCB_NPF_INFO1_W) 807 return (VM_PROT_WRITE); 808 809 return (VM_PROT_READ); 810} 811 812static bool 813svm_npf_emul_fault(uint64_t exitinfo1) 814{ 815 816 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 817 return (false); 818 } 819 820 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 821 return (false); 822 } 823 824 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 825 return (false); 826 } 827 828 return (true); 829} 830 831static void 832svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 833{ 834 struct vm_guest_paging *paging;
|
827 struct vmcb_segment *seg;
| 835 struct vmcb_segment seg;
|
828 struct vmcb_ctrl *ctrl; 829 char *inst_bytes;
| 836 struct vmcb_ctrl *ctrl; 837 char *inst_bytes;
|
830 int inst_len;
| 838 int error, inst_len;
|
831 832 ctrl = &vmcb->ctrl; 833 paging = &vmexit->u.inst_emul.paging; 834 835 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 836 vmexit->u.inst_emul.gpa = gpa; 837 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 838 svm_paging_info(vmcb, paging); 839
| 839 840 ctrl = &vmcb->ctrl; 841 paging = &vmexit->u.inst_emul.paging; 842 843 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 844 vmexit->u.inst_emul.gpa = gpa; 845 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 846 svm_paging_info(vmcb, paging); 847
|
840 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS);
| 848 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 849 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 850
|
841 switch(paging->cpu_mode) { 842 case CPU_MODE_PROTECTED: 843 case CPU_MODE_COMPATIBILITY: 844 /* 845 * Section 4.8.1 of APM2, Default Operand Size or D bit. 846 */
| 851 switch(paging->cpu_mode) { 852 case CPU_MODE_PROTECTED: 853 case CPU_MODE_COMPATIBILITY: 854 /* 855 * Section 4.8.1 of APM2, Default Operand Size or D bit. 856 */
|
847 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ?
| 857 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
|
848 1 : 0; 849 break; 850 default: 851 vmexit->u.inst_emul.cs_d = 0; 852 break; 853 } 854 855 /* 856 * Copy the instruction bytes into 'vie' if available. 857 */ 858 if (decode_assist() && !disable_npf_assist) { 859 inst_len = ctrl->inst_len; 860 inst_bytes = ctrl->inst_bytes; 861 } else { 862 inst_len = 0; 863 inst_bytes = NULL; 864 } 865 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 866} 867
| 858 1 : 0; 859 break; 860 default: 861 vmexit->u.inst_emul.cs_d = 0; 862 break; 863 } 864 865 /* 866 * Copy the instruction bytes into 'vie' if available. 867 */ 868 if (decode_assist() && !disable_npf_assist) { 869 inst_len = ctrl->inst_len; 870 inst_bytes = ctrl->inst_bytes; 871 } else { 872 inst_len = 0; 873 inst_bytes = NULL; 874 } 875 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 876} 877
|
868/* 869 * Intercept access to MSR_EFER to prevent the guest from clearing the 870 * SVM enable bit. 871 */ 872static int 873svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t val) 874{ 875 struct vmcb_state *state; 876 uint64_t oldval; 877 878 state = svm_get_vmcb_state(sc, vcpu); 879 880 oldval = state->efer; 881 state->efer = val | EFER_SVM; 882 if (state->efer != oldval) { 883 VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx", 884 oldval, state->efer); 885 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR); 886 } 887 return (0); 888} 889
| |
890#ifdef KTR 891static const char * 892intrtype_to_str(int intr_type) 893{ 894 switch (intr_type) { 895 case VMCB_EVENTINJ_TYPE_INTR: 896 return ("hwintr"); 897 case VMCB_EVENTINJ_TYPE_NMI: 898 return ("nmi"); 899 case VMCB_EVENTINJ_TYPE_INTn: 900 return ("swintr"); 901 case VMCB_EVENTINJ_TYPE_EXCEPTION: 902 return ("exception"); 903 default: 904 panic("%s: unknown intr_type %d", __func__, intr_type); 905 } 906} 907#endif 908 909/* 910 * Inject an event to vcpu as described in section 15.20, "Event injection". 911 */ 912static void 913svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 914 uint32_t error, bool ec_valid) 915{ 916 struct vmcb_ctrl *ctrl; 917 918 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 919 920 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 921 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 922 923 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 924 __func__, vector)); 925 926 switch (intr_type) { 927 case VMCB_EVENTINJ_TYPE_INTR: 928 case VMCB_EVENTINJ_TYPE_NMI: 929 case VMCB_EVENTINJ_TYPE_INTn: 930 break; 931 case VMCB_EVENTINJ_TYPE_EXCEPTION: 932 if (vector >= 0 && vector <= 31 && vector != 2) 933 break; 934 /* FALLTHROUGH */ 935 default: 936 panic("%s: invalid intr_type/vector: %d/%d", __func__, 937 intr_type, vector); 938 } 939 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 940 if (ec_valid) { 941 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 942 ctrl->eventinj |= (uint64_t)error << 32; 943 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 944 intrtype_to_str(intr_type), vector, error); 945 } else { 946 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 947 intrtype_to_str(intr_type), vector); 948 } 949} 950 951static void 952svm_update_virqinfo(struct svm_softc *sc, int vcpu) 953{ 954 struct vm *vm; 955 struct vlapic *vlapic; 956 struct vmcb_ctrl *ctrl; 957 int pending; 958 959 vm = sc->vm; 960 vlapic = vm_lapic(vm, vcpu); 961 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 962 963 /* Update %cr8 in the emulated vlapic */ 964 vlapic_set_cr8(vlapic, ctrl->v_tpr); 965 966 /* 967 * If V_IRQ indicates that the interrupt injection attempted on then 968 * last VMRUN was successful then update the vlapic accordingly. 969 */ 970 if (ctrl->v_intr_vector != 0) { 971 pending = ctrl->v_irq; 972 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 973 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 974 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 975 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 976 pending ? "pending" : "accepted"); 977 if (!pending) 978 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 979 } 980} 981 982static void 983svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 984{ 985 struct vmcb_ctrl *ctrl; 986 uint64_t intinfo; 987 988 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 989 intinfo = ctrl->exitintinfo; 990 if (!VMCB_EXITINTINFO_VALID(intinfo)) 991 return; 992 993 /* 994 * From APMv2, Section "Intercepts during IDT interrupt delivery" 995 * 996 * If a #VMEXIT happened during event delivery then record the event 997 * that was being delivered. 998 */ 999 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 1000 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 1001 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 1002 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 1003} 1004 1005static __inline int 1006vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 1007{ 1008 1009 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1010 VMCB_INTCPT_VINTR)); 1011} 1012 1013static __inline void 1014enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1015{ 1016 struct vmcb_ctrl *ctrl; 1017 1018 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1019 1020 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1021 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1022 KASSERT(vintr_intercept_enabled(sc, vcpu), 1023 ("%s: vintr intercept should be enabled", __func__)); 1024 return; 1025 } 1026 1027 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1028 ctrl->v_irq = 1; 1029 ctrl->v_ign_tpr = 1; 1030 ctrl->v_intr_vector = 0;
| 878#ifdef KTR 879static const char * 880intrtype_to_str(int intr_type) 881{ 882 switch (intr_type) { 883 case VMCB_EVENTINJ_TYPE_INTR: 884 return ("hwintr"); 885 case VMCB_EVENTINJ_TYPE_NMI: 886 return ("nmi"); 887 case VMCB_EVENTINJ_TYPE_INTn: 888 return ("swintr"); 889 case VMCB_EVENTINJ_TYPE_EXCEPTION: 890 return ("exception"); 891 default: 892 panic("%s: unknown intr_type %d", __func__, intr_type); 893 } 894} 895#endif 896 897/* 898 * Inject an event to vcpu as described in section 15.20, "Event injection". 899 */ 900static void 901svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, 902 uint32_t error, bool ec_valid) 903{ 904 struct vmcb_ctrl *ctrl; 905 906 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 907 908 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 909 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 910 911 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 912 __func__, vector)); 913 914 switch (intr_type) { 915 case VMCB_EVENTINJ_TYPE_INTR: 916 case VMCB_EVENTINJ_TYPE_NMI: 917 case VMCB_EVENTINJ_TYPE_INTn: 918 break; 919 case VMCB_EVENTINJ_TYPE_EXCEPTION: 920 if (vector >= 0 && vector <= 31 && vector != 2) 921 break; 922 /* FALLTHROUGH */ 923 default: 924 panic("%s: invalid intr_type/vector: %d/%d", __func__, 925 intr_type, vector); 926 } 927 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 928 if (ec_valid) { 929 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 930 ctrl->eventinj |= (uint64_t)error << 32; 931 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", 932 intrtype_to_str(intr_type), vector, error); 933 } else { 934 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", 935 intrtype_to_str(intr_type), vector); 936 } 937} 938 939static void 940svm_update_virqinfo(struct svm_softc *sc, int vcpu) 941{ 942 struct vm *vm; 943 struct vlapic *vlapic; 944 struct vmcb_ctrl *ctrl; 945 int pending; 946 947 vm = sc->vm; 948 vlapic = vm_lapic(vm, vcpu); 949 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 950 951 /* Update %cr8 in the emulated vlapic */ 952 vlapic_set_cr8(vlapic, ctrl->v_tpr); 953 954 /* 955 * If V_IRQ indicates that the interrupt injection attempted on then 956 * last VMRUN was successful then update the vlapic accordingly. 957 */ 958 if (ctrl->v_intr_vector != 0) { 959 pending = ctrl->v_irq; 960 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " 961 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 962 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 963 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, 964 pending ? "pending" : "accepted"); 965 if (!pending) 966 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); 967 } 968} 969 970static void 971svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) 972{ 973 struct vmcb_ctrl *ctrl; 974 uint64_t intinfo; 975 976 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 977 intinfo = ctrl->exitintinfo; 978 if (!VMCB_EXITINTINFO_VALID(intinfo)) 979 return; 980 981 /* 982 * From APMv2, Section "Intercepts during IDT interrupt delivery" 983 * 984 * If a #VMEXIT happened during event delivery then record the event 985 * that was being delivered. 986 */ 987 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 988 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 989 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 990 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 991} 992 993static __inline int 994vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 995{ 996 997 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 998 VMCB_INTCPT_VINTR)); 999} 1000 1001static __inline void 1002enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1003{ 1004 struct vmcb_ctrl *ctrl; 1005 1006 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1007 1008 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1009 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1010 KASSERT(vintr_intercept_enabled(sc, vcpu), 1011 ("%s: vintr intercept should be enabled", __func__)); 1012 return; 1013 } 1014 1015 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1016 ctrl->v_irq = 1; 1017 ctrl->v_ign_tpr = 1; 1018 ctrl->v_intr_vector = 0;
|
1031 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
| 1019 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
|
1032 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1033} 1034 1035static __inline void 1036disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1037{ 1038 struct vmcb_ctrl *ctrl; 1039 1040 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1041 1042 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1043 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1044 ("%s: vintr intercept should be disabled", __func__)); 1045 return; 1046 } 1047 1048#ifdef KTR 1049 if (ctrl->v_intr_vector == 0) 1050 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1051 else 1052 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1053#endif 1054 ctrl->v_irq = 0; 1055 ctrl->v_intr_vector = 0;
| 1020 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1021} 1022 1023static __inline void 1024disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1025{ 1026 struct vmcb_ctrl *ctrl; 1027 1028 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1029 1030 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1031 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1032 ("%s: vintr intercept should be disabled", __func__)); 1033 return; 1034 } 1035 1036#ifdef KTR 1037 if (ctrl->v_intr_vector == 0) 1038 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1039 else 1040 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1041#endif 1042 ctrl->v_irq = 0; 1043 ctrl->v_intr_vector = 0;
|
1056 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
| 1044 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
|
1057 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1058} 1059 1060static int 1061svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1062{ 1063 struct vmcb_ctrl *ctrl; 1064 int oldval, newval; 1065 1066 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1067 oldval = ctrl->intr_shadow; 1068 newval = val ? 1 : 0; 1069 if (newval != oldval) { 1070 ctrl->intr_shadow = newval; 1071 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1072 } 1073 return (0); 1074} 1075 1076static int 1077svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1078{ 1079 struct vmcb_ctrl *ctrl; 1080 1081 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1082 *val = ctrl->intr_shadow; 1083 return (0); 1084} 1085 1086/* 1087 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1088 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1089 * to track when the vcpu is done handling the NMI. 1090 */ 1091static int 1092nmi_blocked(struct svm_softc *sc, int vcpu) 1093{ 1094 int blocked; 1095 1096 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1097 VMCB_INTCPT_IRET); 1098 return (blocked); 1099} 1100 1101static void 1102enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1103{ 1104 1105 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1106 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1107 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1108} 1109 1110static void 1111clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1112{ 1113 int error; 1114 1115 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1116 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1117 /* 1118 * When the IRET intercept is cleared the vcpu will attempt to execute 1119 * the "iret" when it runs next. However, it is possible to inject 1120 * another NMI into the vcpu before the "iret" has actually executed. 1121 * 1122 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1123 * it will trap back into the hypervisor. If an NMI is pending for 1124 * the vcpu it will be injected into the guest. 1125 * 1126 * XXX this needs to be fixed 1127 */ 1128 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1129 1130 /* 1131 * Set 'intr_shadow' to prevent an NMI from being injected on the 1132 * immediate VMRUN. 1133 */ 1134 error = svm_modify_intr_shadow(sc, vcpu, 1); 1135 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1136} 1137 1138static int 1139emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1140 bool *retu) 1141{ 1142 int error; 1143 1144 if (lapic_msr(num)) 1145 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1146 else if (num == MSR_EFER)
| 1045 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1046} 1047 1048static int 1049svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1050{ 1051 struct vmcb_ctrl *ctrl; 1052 int oldval, newval; 1053 1054 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1055 oldval = ctrl->intr_shadow; 1056 newval = val ? 1 : 0; 1057 if (newval != oldval) { 1058 ctrl->intr_shadow = newval; 1059 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); 1060 } 1061 return (0); 1062} 1063 1064static int 1065svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) 1066{ 1067 struct vmcb_ctrl *ctrl; 1068 1069 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1070 *val = ctrl->intr_shadow; 1071 return (0); 1072} 1073 1074/* 1075 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1076 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1077 * to track when the vcpu is done handling the NMI. 1078 */ 1079static int 1080nmi_blocked(struct svm_softc *sc, int vcpu) 1081{ 1082 int blocked; 1083 1084 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1085 VMCB_INTCPT_IRET); 1086 return (blocked); 1087} 1088 1089static void 1090enable_nmi_blocking(struct svm_softc *sc, int vcpu) 1091{ 1092 1093 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); 1094 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); 1095 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1096} 1097 1098static void 1099clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1100{ 1101 int error; 1102 1103 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1104 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1105 /* 1106 * When the IRET intercept is cleared the vcpu will attempt to execute 1107 * the "iret" when it runs next. However, it is possible to inject 1108 * another NMI into the vcpu before the "iret" has actually executed. 1109 * 1110 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1111 * it will trap back into the hypervisor. If an NMI is pending for 1112 * the vcpu it will be injected into the guest. 1113 * 1114 * XXX this needs to be fixed 1115 */ 1116 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1117 1118 /* 1119 * Set 'intr_shadow' to prevent an NMI from being injected on the 1120 * immediate VMRUN. 1121 */ 1122 error = svm_modify_intr_shadow(sc, vcpu, 1); 1123 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1124} 1125 1126static int 1127emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1128 bool *retu) 1129{ 1130 int error; 1131 1132 if (lapic_msr(num)) 1133 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1134 else if (num == MSR_EFER)
|
1147 error = svm_write_efer(sc, vcpu, val);
| 1135 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val);
|
1148 else 1149 error = svm_wrmsr(sc, vcpu, num, val, retu); 1150 1151 return (error); 1152} 1153 1154static int 1155emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1156{ 1157 struct vmcb_state *state; 1158 struct svm_regctx *ctx; 1159 uint64_t result; 1160 int error; 1161 1162 if (lapic_msr(num)) 1163 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1164 else 1165 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1166 1167 if (error == 0) { 1168 state = svm_get_vmcb_state(sc, vcpu); 1169 ctx = svm_get_guest_regctx(sc, vcpu); 1170 state->rax = result & 0xffffffff; 1171 ctx->e.g.sctx_rdx = result >> 32; 1172 } 1173 1174 return (error); 1175} 1176 1177#ifdef KTR 1178static const char * 1179exit_reason_to_str(uint64_t reason) 1180{ 1181 static char reasonbuf[32]; 1182 1183 switch (reason) { 1184 case VMCB_EXIT_INVALID: 1185 return ("invalvmcb"); 1186 case VMCB_EXIT_SHUTDOWN: 1187 return ("shutdown"); 1188 case VMCB_EXIT_NPF: 1189 return ("nptfault"); 1190 case VMCB_EXIT_PAUSE: 1191 return ("pause"); 1192 case VMCB_EXIT_HLT: 1193 return ("hlt"); 1194 case VMCB_EXIT_CPUID: 1195 return ("cpuid"); 1196 case VMCB_EXIT_IO: 1197 return ("inout"); 1198 case VMCB_EXIT_MC: 1199 return ("mchk"); 1200 case VMCB_EXIT_INTR: 1201 return ("extintr"); 1202 case VMCB_EXIT_NMI: 1203 return ("nmi"); 1204 case VMCB_EXIT_VINTR: 1205 return ("vintr"); 1206 case VMCB_EXIT_MSR: 1207 return ("msr"); 1208 case VMCB_EXIT_IRET: 1209 return ("iret"); 1210 default: 1211 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1212 return (reasonbuf); 1213 } 1214} 1215#endif /* KTR */ 1216 1217/* 1218 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1219 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1220 * and exceptions caused by INT3, INTO and BOUND instructions. 1221 * 1222 * Return 1 if the nRIP is valid and 0 otherwise. 1223 */ 1224static int 1225nrip_valid(uint64_t exitcode) 1226{ 1227 switch (exitcode) { 1228 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1229 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1230 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1231 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1232 case 0x43: /* INT3 */ 1233 case 0x44: /* INTO */ 1234 case 0x45: /* BOUND */ 1235 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1236 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1237 return (1); 1238 default: 1239 return (0); 1240 } 1241} 1242 1243/* 1244 * Collateral for a generic SVM VM-exit. 1245 */ 1246static void 1247vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 1248{ 1249 1250 vme->exitcode = VM_EXITCODE_SVM; 1251 vme->u.svm.exitcode = code; 1252 vme->u.svm.exitinfo1 = info1; 1253 vme->u.svm.exitinfo2 = info2; 1254} 1255 1256static int 1257svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1258{ 1259 struct vmcb *vmcb; 1260 struct vmcb_state *state; 1261 struct vmcb_ctrl *ctrl; 1262 struct svm_regctx *ctx; 1263 uint64_t code, info1, info2, val; 1264 uint32_t eax, ecx, edx; 1265 int handled; 1266 bool retu; 1267 1268 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1269 vmcb = svm_get_vmcb(svm_sc, vcpu); 1270 state = &vmcb->state; 1271 ctrl = &vmcb->ctrl; 1272 1273 handled = 0; 1274 code = ctrl->exitcode; 1275 info1 = ctrl->exitinfo1; 1276 info2 = ctrl->exitinfo2; 1277 1278 vmexit->exitcode = VM_EXITCODE_BOGUS; 1279 vmexit->rip = state->rip; 1280 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1281 1282 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1283 1284 /* 1285 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1286 * in an inconsistent state and can trigger assertions that would 1287 * never happen otherwise. 1288 */ 1289 if (code == VMCB_EXIT_INVALID) { 1290 vm_exit_svm(vmexit, code, info1, info2); 1291 return (0); 1292 } 1293 1294 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1295 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1296 1297 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1298 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1299 vmexit->inst_length, code, info1, info2)); 1300 1301 svm_update_virqinfo(svm_sc, vcpu); 1302 svm_save_intinfo(svm_sc, vcpu); 1303 1304 switch (code) { 1305 case VMCB_EXIT_IRET: 1306 /* 1307 * Restart execution at "iret" but with the intercept cleared. 1308 */ 1309 vmexit->inst_length = 0; 1310 clear_nmi_blocking(svm_sc, vcpu); 1311 handled = 1; 1312 break; 1313 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1314 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1315 handled = 1; 1316 break; 1317 case VMCB_EXIT_INTR: /* external interrupt */ 1318 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1319 handled = 1; 1320 break; 1321 case VMCB_EXIT_NMI: /* external NMI */ 1322 handled = 1; 1323 break; 1324 case VMCB_EXIT_MC: /* machine check */ 1325 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1326 break; 1327 case VMCB_EXIT_MSR: /* MSR access. */ 1328 eax = state->rax; 1329 ecx = ctx->sctx_rcx; 1330 edx = ctx->e.g.sctx_rdx; 1331 retu = false; 1332 1333 if (info1) { 1334 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1335 val = (uint64_t)edx << 32 | eax; 1336 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1337 ecx, val); 1338 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1339 vmexit->exitcode = VM_EXITCODE_WRMSR; 1340 vmexit->u.msr.code = ecx; 1341 vmexit->u.msr.wval = val; 1342 } else if (!retu) { 1343 handled = 1; 1344 } else { 1345 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1346 ("emulate_wrmsr retu with bogus exitcode")); 1347 } 1348 } else { 1349 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1350 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1351 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1352 vmexit->exitcode = VM_EXITCODE_RDMSR; 1353 vmexit->u.msr.code = ecx; 1354 } else if (!retu) { 1355 handled = 1; 1356 } else { 1357 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1358 ("emulate_rdmsr retu with bogus exitcode")); 1359 } 1360 } 1361 break; 1362 case VMCB_EXIT_IO: 1363 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1364 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1365 break; 1366 case VMCB_EXIT_CPUID: 1367 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1368 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1369 (uint32_t *)&state->rax, 1370 (uint32_t *)&ctx->sctx_rbx, 1371 (uint32_t *)&ctx->sctx_rcx, 1372 (uint32_t *)&ctx->e.g.sctx_rdx); 1373 break; 1374 case VMCB_EXIT_HLT: 1375 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1376 vmexit->exitcode = VM_EXITCODE_HLT; 1377 vmexit->u.hlt.rflags = state->rflags; 1378 break; 1379 case VMCB_EXIT_PAUSE: 1380 vmexit->exitcode = VM_EXITCODE_PAUSE; 1381 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1382 break; 1383 case VMCB_EXIT_NPF: 1384 /* EXITINFO2 contains the faulting guest physical address */ 1385 if (info1 & VMCB_NPF_INFO1_RSV) { 1386 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1387 "reserved bits set: info1(%#lx) info2(%#lx)", 1388 info1, info2); 1389 } else if (vm_mem_allocated(svm_sc->vm, info2)) { 1390 vmexit->exitcode = VM_EXITCODE_PAGING; 1391 vmexit->u.paging.gpa = info2; 1392 vmexit->u.paging.fault_type = svm_npf_paging(info1); 1393 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1394 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1395 "on gpa %#lx/%#lx at rip %#lx", 1396 info2, info1, state->rip); 1397 } else if (svm_npf_emul_fault(info1)) { 1398 svm_handle_inst_emul(vmcb, info2, vmexit); 1399 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1400 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1401 "for gpa %#lx/%#lx at rip %#lx", 1402 info2, info1, state->rip); 1403 } 1404 break; 1405 default: 1406 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1407 break; 1408 } 1409 1410 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1411 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1412 vmexit->rip, vmexit->inst_length); 1413 1414 if (handled) { 1415 vmexit->rip += vmexit->inst_length; 1416 vmexit->inst_length = 0; 1417 state->rip = vmexit->rip; 1418 } else { 1419 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1420 /* 1421 * If this VM exit was not claimed by anybody then 1422 * treat it as a generic SVM exit. 1423 */ 1424 vm_exit_svm(vmexit, code, info1, info2); 1425 } else { 1426 /* 1427 * The exitcode and collateral have been populated. 1428 * The VM exit will be processed further in userland. 1429 */ 1430 } 1431 } 1432 return (handled); 1433} 1434 1435static void 1436svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1437{ 1438 uint64_t intinfo; 1439 1440 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1441 return; 1442 1443 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1444 "valid: %#lx", __func__, intinfo)); 1445 1446 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1447 VMCB_EXITINTINFO_VECTOR(intinfo), 1448 VMCB_EXITINTINFO_EC(intinfo), 1449 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1450 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1451 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1452} 1453 1454/* 1455 * Inject event to virtual cpu. 1456 */ 1457static void 1458svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1459{ 1460 struct vmcb_ctrl *ctrl; 1461 struct vmcb_state *state; 1462 uint8_t v_tpr; 1463 int vector, need_intr_window, pending_apic_vector; 1464 1465 state = svm_get_vmcb_state(sc, vcpu); 1466 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1467 1468 need_intr_window = 0; 1469 pending_apic_vector = 0; 1470 1471 /* 1472 * Inject pending events or exceptions for this vcpu. 1473 * 1474 * An event might be pending because the previous #VMEXIT happened 1475 * during event delivery (i.e. ctrl->exitintinfo). 1476 * 1477 * An event might also be pending because an exception was injected 1478 * by the hypervisor (e.g. #PF during instruction emulation). 1479 */ 1480 svm_inj_intinfo(sc, vcpu); 1481 1482 /* NMI event has priority over interrupts. */ 1483 if (vm_nmi_pending(sc->vm, vcpu)) { 1484 if (nmi_blocked(sc, vcpu)) { 1485 /* 1486 * Can't inject another NMI if the guest has not 1487 * yet executed an "iret" after the last NMI. 1488 */ 1489 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1490 "to NMI-blocking"); 1491 } else if (ctrl->intr_shadow) { 1492 /* 1493 * Can't inject an NMI if the vcpu is in an intr_shadow. 1494 */ 1495 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1496 "interrupt shadow"); 1497 need_intr_window = 1; 1498 goto done; 1499 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1500 /* 1501 * If there is already an exception/interrupt pending 1502 * then defer the NMI until after that. 1503 */ 1504 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1505 "eventinj %#lx", ctrl->eventinj); 1506 1507 /* 1508 * Use self-IPI to trigger a VM-exit as soon as 1509 * possible after the event injection is completed. 1510 * 1511 * This works only if the external interrupt exiting 1512 * is at a lower priority than the event injection. 1513 * 1514 * Although not explicitly specified in APMv2 the 1515 * relative priorities were verified empirically. 1516 */ 1517 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1518 } else { 1519 vm_nmi_clear(sc->vm, vcpu); 1520 1521 /* Inject NMI, vector number is not used */ 1522 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1523 IDT_NMI, 0, false); 1524 1525 /* virtual NMI blocking is now in effect */ 1526 enable_nmi_blocking(sc, vcpu); 1527 1528 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1529 } 1530 } 1531 1532 if (!vm_extint_pending(sc->vm, vcpu)) { 1533 /* 1534 * APIC interrupts are delivered using the V_IRQ offload. 1535 * 1536 * The primary benefit is that the hypervisor doesn't need to 1537 * deal with the various conditions that inhibit interrupts. 1538 * It also means that TPR changes via CR8 will be handled 1539 * without any hypervisor involvement. 1540 * 1541 * Note that the APIC vector must remain pending in the vIRR 1542 * until it is confirmed that it was delivered to the guest. 1543 * This can be confirmed based on the value of V_IRQ at the 1544 * next #VMEXIT (1 = pending, 0 = delivered). 1545 * 1546 * Also note that it is possible that another higher priority 1547 * vector can become pending before this vector is delivered 1548 * to the guest. This is alright because vcpu_notify_event() 1549 * will send an IPI and force the vcpu to trap back into the 1550 * hypervisor. The higher priority vector will be injected on 1551 * the next VMRUN. 1552 */ 1553 if (vlapic_pending_intr(vlapic, &vector)) { 1554 KASSERT(vector >= 16 && vector <= 255, 1555 ("invalid vector %d from local APIC", vector)); 1556 pending_apic_vector = vector; 1557 } 1558 goto done; 1559 } 1560 1561 /* Ask the legacy pic for a vector to inject */ 1562 vatpic_pending_intr(sc->vm, &vector); 1563 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1564 vector)); 1565 1566 /* 1567 * If the guest has disabled interrupts or is in an interrupt shadow 1568 * then we cannot inject the pending interrupt. 1569 */ 1570 if ((state->rflags & PSL_I) == 0) { 1571 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1572 "rflags %#lx", vector, state->rflags); 1573 need_intr_window = 1; 1574 goto done; 1575 } 1576 1577 if (ctrl->intr_shadow) { 1578 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1579 "interrupt shadow", vector); 1580 need_intr_window = 1; 1581 goto done; 1582 } 1583 1584 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1585 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1586 "eventinj %#lx", vector, ctrl->eventinj); 1587 need_intr_window = 1; 1588 goto done; 1589 } 1590 1591 /* 1592 * Legacy PIC interrupts are delivered via the event injection 1593 * mechanism. 1594 */ 1595 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1596 1597 vm_extint_clear(sc->vm, vcpu); 1598 vatpic_intr_accepted(sc->vm, vector); 1599 1600 /* 1601 * Force a VM-exit as soon as the vcpu is ready to accept another 1602 * interrupt. This is done because the PIC might have another vector 1603 * that it wants to inject. Also, if the APIC has a pending interrupt 1604 * that was preempted by the ExtInt then it allows us to inject the 1605 * APIC vector as soon as possible. 1606 */ 1607 need_intr_window = 1; 1608done: 1609 /* 1610 * The guest can modify the TPR by writing to %CR8. In guest mode 1611 * the processor reflects this write to V_TPR without hypervisor 1612 * intervention. 1613 * 1614 * The guest can also modify the TPR by writing to it via the memory 1615 * mapped APIC page. In this case, the write will be emulated by the 1616 * hypervisor. For this reason V_TPR must be updated before every 1617 * VMRUN. 1618 */ 1619 v_tpr = vlapic_get_cr8(vlapic); 1620 KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1621 if (ctrl->v_tpr != v_tpr) { 1622 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1623 ctrl->v_tpr, v_tpr); 1624 ctrl->v_tpr = v_tpr;
| 1136 else 1137 error = svm_wrmsr(sc, vcpu, num, val, retu); 1138 1139 return (error); 1140} 1141 1142static int 1143emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) 1144{ 1145 struct vmcb_state *state; 1146 struct svm_regctx *ctx; 1147 uint64_t result; 1148 int error; 1149 1150 if (lapic_msr(num)) 1151 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); 1152 else 1153 error = svm_rdmsr(sc, vcpu, num, &result, retu); 1154 1155 if (error == 0) { 1156 state = svm_get_vmcb_state(sc, vcpu); 1157 ctx = svm_get_guest_regctx(sc, vcpu); 1158 state->rax = result & 0xffffffff; 1159 ctx->e.g.sctx_rdx = result >> 32; 1160 } 1161 1162 return (error); 1163} 1164 1165#ifdef KTR 1166static const char * 1167exit_reason_to_str(uint64_t reason) 1168{ 1169 static char reasonbuf[32]; 1170 1171 switch (reason) { 1172 case VMCB_EXIT_INVALID: 1173 return ("invalvmcb"); 1174 case VMCB_EXIT_SHUTDOWN: 1175 return ("shutdown"); 1176 case VMCB_EXIT_NPF: 1177 return ("nptfault"); 1178 case VMCB_EXIT_PAUSE: 1179 return ("pause"); 1180 case VMCB_EXIT_HLT: 1181 return ("hlt"); 1182 case VMCB_EXIT_CPUID: 1183 return ("cpuid"); 1184 case VMCB_EXIT_IO: 1185 return ("inout"); 1186 case VMCB_EXIT_MC: 1187 return ("mchk"); 1188 case VMCB_EXIT_INTR: 1189 return ("extintr"); 1190 case VMCB_EXIT_NMI: 1191 return ("nmi"); 1192 case VMCB_EXIT_VINTR: 1193 return ("vintr"); 1194 case VMCB_EXIT_MSR: 1195 return ("msr"); 1196 case VMCB_EXIT_IRET: 1197 return ("iret"); 1198 default: 1199 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1200 return (reasonbuf); 1201 } 1202} 1203#endif /* KTR */ 1204 1205/* 1206 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1207 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1208 * and exceptions caused by INT3, INTO and BOUND instructions. 1209 * 1210 * Return 1 if the nRIP is valid and 0 otherwise. 1211 */ 1212static int 1213nrip_valid(uint64_t exitcode) 1214{ 1215 switch (exitcode) { 1216 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1217 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1218 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1219 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1220 case 0x43: /* INT3 */ 1221 case 0x44: /* INTO */ 1222 case 0x45: /* BOUND */ 1223 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1224 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1225 return (1); 1226 default: 1227 return (0); 1228 } 1229} 1230 1231/* 1232 * Collateral for a generic SVM VM-exit. 1233 */ 1234static void 1235vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 1236{ 1237 1238 vme->exitcode = VM_EXITCODE_SVM; 1239 vme->u.svm.exitcode = code; 1240 vme->u.svm.exitinfo1 = info1; 1241 vme->u.svm.exitinfo2 = info2; 1242} 1243 1244static int 1245svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1246{ 1247 struct vmcb *vmcb; 1248 struct vmcb_state *state; 1249 struct vmcb_ctrl *ctrl; 1250 struct svm_regctx *ctx; 1251 uint64_t code, info1, info2, val; 1252 uint32_t eax, ecx, edx; 1253 int handled; 1254 bool retu; 1255 1256 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1257 vmcb = svm_get_vmcb(svm_sc, vcpu); 1258 state = &vmcb->state; 1259 ctrl = &vmcb->ctrl; 1260 1261 handled = 0; 1262 code = ctrl->exitcode; 1263 info1 = ctrl->exitinfo1; 1264 info2 = ctrl->exitinfo2; 1265 1266 vmexit->exitcode = VM_EXITCODE_BOGUS; 1267 vmexit->rip = state->rip; 1268 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1269 1270 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1271 1272 /* 1273 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1274 * in an inconsistent state and can trigger assertions that would 1275 * never happen otherwise. 1276 */ 1277 if (code == VMCB_EXIT_INVALID) { 1278 vm_exit_svm(vmexit, code, info1, info2); 1279 return (0); 1280 } 1281 1282 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1283 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1284 1285 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1286 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1287 vmexit->inst_length, code, info1, info2)); 1288 1289 svm_update_virqinfo(svm_sc, vcpu); 1290 svm_save_intinfo(svm_sc, vcpu); 1291 1292 switch (code) { 1293 case VMCB_EXIT_IRET: 1294 /* 1295 * Restart execution at "iret" but with the intercept cleared. 1296 */ 1297 vmexit->inst_length = 0; 1298 clear_nmi_blocking(svm_sc, vcpu); 1299 handled = 1; 1300 break; 1301 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1302 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1303 handled = 1; 1304 break; 1305 case VMCB_EXIT_INTR: /* external interrupt */ 1306 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1307 handled = 1; 1308 break; 1309 case VMCB_EXIT_NMI: /* external NMI */ 1310 handled = 1; 1311 break; 1312 case VMCB_EXIT_MC: /* machine check */ 1313 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1314 break; 1315 case VMCB_EXIT_MSR: /* MSR access. */ 1316 eax = state->rax; 1317 ecx = ctx->sctx_rcx; 1318 edx = ctx->e.g.sctx_rdx; 1319 retu = false; 1320 1321 if (info1) { 1322 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1323 val = (uint64_t)edx << 32 | eax; 1324 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", 1325 ecx, val); 1326 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1327 vmexit->exitcode = VM_EXITCODE_WRMSR; 1328 vmexit->u.msr.code = ecx; 1329 vmexit->u.msr.wval = val; 1330 } else if (!retu) { 1331 handled = 1; 1332 } else { 1333 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1334 ("emulate_wrmsr retu with bogus exitcode")); 1335 } 1336 } else { 1337 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); 1338 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1339 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { 1340 vmexit->exitcode = VM_EXITCODE_RDMSR; 1341 vmexit->u.msr.code = ecx; 1342 } else if (!retu) { 1343 handled = 1; 1344 } else { 1345 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1346 ("emulate_rdmsr retu with bogus exitcode")); 1347 } 1348 } 1349 break; 1350 case VMCB_EXIT_IO: 1351 handled = svm_handle_io(svm_sc, vcpu, vmexit); 1352 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1353 break; 1354 case VMCB_EXIT_CPUID: 1355 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1356 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, 1357 (uint32_t *)&state->rax, 1358 (uint32_t *)&ctx->sctx_rbx, 1359 (uint32_t *)&ctx->sctx_rcx, 1360 (uint32_t *)&ctx->e.g.sctx_rdx); 1361 break; 1362 case VMCB_EXIT_HLT: 1363 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1364 vmexit->exitcode = VM_EXITCODE_HLT; 1365 vmexit->u.hlt.rflags = state->rflags; 1366 break; 1367 case VMCB_EXIT_PAUSE: 1368 vmexit->exitcode = VM_EXITCODE_PAUSE; 1369 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1370 break; 1371 case VMCB_EXIT_NPF: 1372 /* EXITINFO2 contains the faulting guest physical address */ 1373 if (info1 & VMCB_NPF_INFO1_RSV) { 1374 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1375 "reserved bits set: info1(%#lx) info2(%#lx)", 1376 info1, info2); 1377 } else if (vm_mem_allocated(svm_sc->vm, info2)) { 1378 vmexit->exitcode = VM_EXITCODE_PAGING; 1379 vmexit->u.paging.gpa = info2; 1380 vmexit->u.paging.fault_type = svm_npf_paging(info1); 1381 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1382 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1383 "on gpa %#lx/%#lx at rip %#lx", 1384 info2, info1, state->rip); 1385 } else if (svm_npf_emul_fault(info1)) { 1386 svm_handle_inst_emul(vmcb, info2, vmexit); 1387 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); 1388 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " 1389 "for gpa %#lx/%#lx at rip %#lx", 1390 info2, info1, state->rip); 1391 } 1392 break; 1393 default: 1394 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1395 break; 1396 } 1397 1398 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", 1399 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1400 vmexit->rip, vmexit->inst_length); 1401 1402 if (handled) { 1403 vmexit->rip += vmexit->inst_length; 1404 vmexit->inst_length = 0; 1405 state->rip = vmexit->rip; 1406 } else { 1407 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1408 /* 1409 * If this VM exit was not claimed by anybody then 1410 * treat it as a generic SVM exit. 1411 */ 1412 vm_exit_svm(vmexit, code, info1, info2); 1413 } else { 1414 /* 1415 * The exitcode and collateral have been populated. 1416 * The VM exit will be processed further in userland. 1417 */ 1418 } 1419 } 1420 return (handled); 1421} 1422 1423static void 1424svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) 1425{ 1426 uint64_t intinfo; 1427 1428 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) 1429 return; 1430 1431 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1432 "valid: %#lx", __func__, intinfo)); 1433 1434 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1435 VMCB_EXITINTINFO_VECTOR(intinfo), 1436 VMCB_EXITINTINFO_EC(intinfo), 1437 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1438 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1439 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); 1440} 1441 1442/* 1443 * Inject event to virtual cpu. 1444 */ 1445static void 1446svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) 1447{ 1448 struct vmcb_ctrl *ctrl; 1449 struct vmcb_state *state; 1450 uint8_t v_tpr; 1451 int vector, need_intr_window, pending_apic_vector; 1452 1453 state = svm_get_vmcb_state(sc, vcpu); 1454 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1455 1456 need_intr_window = 0; 1457 pending_apic_vector = 0; 1458 1459 /* 1460 * Inject pending events or exceptions for this vcpu. 1461 * 1462 * An event might be pending because the previous #VMEXIT happened 1463 * during event delivery (i.e. ctrl->exitintinfo). 1464 * 1465 * An event might also be pending because an exception was injected 1466 * by the hypervisor (e.g. #PF during instruction emulation). 1467 */ 1468 svm_inj_intinfo(sc, vcpu); 1469 1470 /* NMI event has priority over interrupts. */ 1471 if (vm_nmi_pending(sc->vm, vcpu)) { 1472 if (nmi_blocked(sc, vcpu)) { 1473 /* 1474 * Can't inject another NMI if the guest has not 1475 * yet executed an "iret" after the last NMI. 1476 */ 1477 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " 1478 "to NMI-blocking"); 1479 } else if (ctrl->intr_shadow) { 1480 /* 1481 * Can't inject an NMI if the vcpu is in an intr_shadow. 1482 */ 1483 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " 1484 "interrupt shadow"); 1485 need_intr_window = 1; 1486 goto done; 1487 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1488 /* 1489 * If there is already an exception/interrupt pending 1490 * then defer the NMI until after that. 1491 */ 1492 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " 1493 "eventinj %#lx", ctrl->eventinj); 1494 1495 /* 1496 * Use self-IPI to trigger a VM-exit as soon as 1497 * possible after the event injection is completed. 1498 * 1499 * This works only if the external interrupt exiting 1500 * is at a lower priority than the event injection. 1501 * 1502 * Although not explicitly specified in APMv2 the 1503 * relative priorities were verified empirically. 1504 */ 1505 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1506 } else { 1507 vm_nmi_clear(sc->vm, vcpu); 1508 1509 /* Inject NMI, vector number is not used */ 1510 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, 1511 IDT_NMI, 0, false); 1512 1513 /* virtual NMI blocking is now in effect */ 1514 enable_nmi_blocking(sc, vcpu); 1515 1516 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); 1517 } 1518 } 1519 1520 if (!vm_extint_pending(sc->vm, vcpu)) { 1521 /* 1522 * APIC interrupts are delivered using the V_IRQ offload. 1523 * 1524 * The primary benefit is that the hypervisor doesn't need to 1525 * deal with the various conditions that inhibit interrupts. 1526 * It also means that TPR changes via CR8 will be handled 1527 * without any hypervisor involvement. 1528 * 1529 * Note that the APIC vector must remain pending in the vIRR 1530 * until it is confirmed that it was delivered to the guest. 1531 * This can be confirmed based on the value of V_IRQ at the 1532 * next #VMEXIT (1 = pending, 0 = delivered). 1533 * 1534 * Also note that it is possible that another higher priority 1535 * vector can become pending before this vector is delivered 1536 * to the guest. This is alright because vcpu_notify_event() 1537 * will send an IPI and force the vcpu to trap back into the 1538 * hypervisor. The higher priority vector will be injected on 1539 * the next VMRUN. 1540 */ 1541 if (vlapic_pending_intr(vlapic, &vector)) { 1542 KASSERT(vector >= 16 && vector <= 255, 1543 ("invalid vector %d from local APIC", vector)); 1544 pending_apic_vector = vector; 1545 } 1546 goto done; 1547 } 1548 1549 /* Ask the legacy pic for a vector to inject */ 1550 vatpic_pending_intr(sc->vm, &vector); 1551 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", 1552 vector)); 1553 1554 /* 1555 * If the guest has disabled interrupts or is in an interrupt shadow 1556 * then we cannot inject the pending interrupt. 1557 */ 1558 if ((state->rflags & PSL_I) == 0) { 1559 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1560 "rflags %#lx", vector, state->rflags); 1561 need_intr_window = 1; 1562 goto done; 1563 } 1564 1565 if (ctrl->intr_shadow) { 1566 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " 1567 "interrupt shadow", vector); 1568 need_intr_window = 1; 1569 goto done; 1570 } 1571 1572 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1573 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " 1574 "eventinj %#lx", vector, ctrl->eventinj); 1575 need_intr_window = 1; 1576 goto done; 1577 } 1578 1579 /* 1580 * Legacy PIC interrupts are delivered via the event injection 1581 * mechanism. 1582 */ 1583 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1584 1585 vm_extint_clear(sc->vm, vcpu); 1586 vatpic_intr_accepted(sc->vm, vector); 1587 1588 /* 1589 * Force a VM-exit as soon as the vcpu is ready to accept another 1590 * interrupt. This is done because the PIC might have another vector 1591 * that it wants to inject. Also, if the APIC has a pending interrupt 1592 * that was preempted by the ExtInt then it allows us to inject the 1593 * APIC vector as soon as possible. 1594 */ 1595 need_intr_window = 1; 1596done: 1597 /* 1598 * The guest can modify the TPR by writing to %CR8. In guest mode 1599 * the processor reflects this write to V_TPR without hypervisor 1600 * intervention. 1601 * 1602 * The guest can also modify the TPR by writing to it via the memory 1603 * mapped APIC page. In this case, the write will be emulated by the 1604 * hypervisor. For this reason V_TPR must be updated before every 1605 * VMRUN. 1606 */ 1607 v_tpr = vlapic_get_cr8(vlapic); 1608 KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1609 if (ctrl->v_tpr != v_tpr) { 1610 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1611 ctrl->v_tpr, v_tpr); 1612 ctrl->v_tpr = v_tpr;
|
1625 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
| 1613 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
|
1626 } 1627 1628 if (pending_apic_vector) { 1629 /* 1630 * If an APIC vector is being injected then interrupt window 1631 * exiting is not possible on this VMRUN. 1632 */ 1633 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1634 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1635 pending_apic_vector); 1636 1637 ctrl->v_irq = 1; 1638 ctrl->v_ign_tpr = 0; 1639 ctrl->v_intr_vector = pending_apic_vector; 1640 ctrl->v_intr_prio = pending_apic_vector >> 4;
| 1614 } 1615 1616 if (pending_apic_vector) { 1617 /* 1618 * If an APIC vector is being injected then interrupt window 1619 * exiting is not possible on this VMRUN. 1620 */ 1621 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1622 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1623 pending_apic_vector); 1624 1625 ctrl->v_irq = 1; 1626 ctrl->v_ign_tpr = 0; 1627 ctrl->v_intr_vector = pending_apic_vector; 1628 ctrl->v_intr_prio = pending_apic_vector >> 4;
|
1641 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
| 1629 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
|
1642 } else if (need_intr_window) { 1643 /* 1644 * We use V_IRQ in conjunction with the VINTR intercept to 1645 * trap into the hypervisor as soon as a virtual interrupt 1646 * can be delivered. 1647 * 1648 * Since injected events are not subject to intercept checks 1649 * we need to ensure that the V_IRQ is not actually going to 1650 * be delivered on VM entry. The KASSERT below enforces this. 1651 */ 1652 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1653 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1654 ("Bogus intr_window_exiting: eventinj (%#lx), " 1655 "intr_shadow (%u), rflags (%#lx)", 1656 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1657 enable_intr_window_exiting(sc, vcpu); 1658 } else { 1659 disable_intr_window_exiting(sc, vcpu); 1660 } 1661} 1662 1663static __inline void 1664restore_host_tss(void) 1665{ 1666 struct system_segment_descriptor *tss_sd; 1667 1668 /* 1669 * The TSS descriptor was in use prior to launching the guest so it 1670 * has been marked busy. 1671 * 1672 * 'ltr' requires the descriptor to be marked available so change the 1673 * type to "64-bit available TSS". 1674 */ 1675 tss_sd = PCPU_GET(tss); 1676 tss_sd->sd_type = SDT_SYSTSS; 1677 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1678} 1679 1680static void 1681check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1682{ 1683 struct svm_vcpu *vcpustate; 1684 struct vmcb_ctrl *ctrl; 1685 long eptgen; 1686 bool alloc_asid; 1687 1688 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1689 "active on cpu %u", __func__, thiscpu)); 1690 1691 vcpustate = svm_get_vcpu(sc, vcpuid); 1692 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1693 1694 /* 1695 * The TLB entries associated with the vcpu's ASID are not valid 1696 * if either of the following conditions is true: 1697 * 1698 * 1. The vcpu's ASID generation is different than the host cpu's 1699 * ASID generation. This happens when the vcpu migrates to a new 1700 * host cpu. It can also happen when the number of vcpus executing 1701 * on a host cpu is greater than the number of ASIDs available. 1702 * 1703 * 2. The pmap generation number is different than the value cached in 1704 * the 'vcpustate'. This happens when the host invalidates pages 1705 * belonging to the guest. 1706 * 1707 * asidgen eptgen Action 1708 * mismatch mismatch 1709 * 0 0 (a) 1710 * 0 1 (b1) or (b2) 1711 * 1 0 (c) 1712 * 1 1 (d) 1713 * 1714 * (a) There is no mismatch in eptgen or ASID generation and therefore 1715 * no further action is needed. 1716 * 1717 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1718 * retained and the TLB entries associated with this ASID 1719 * are flushed by VMRUN. 1720 * 1721 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1722 * allocated. 1723 * 1724 * (c) A new ASID is allocated. 1725 * 1726 * (d) A new ASID is allocated. 1727 */ 1728 1729 alloc_asid = false; 1730 eptgen = pmap->pm_eptgen; 1731 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1732 1733 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1734 alloc_asid = true; /* (c) and (d) */ 1735 } else if (vcpustate->eptgen != eptgen) { 1736 if (flush_by_asid()) 1737 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1738 else 1739 alloc_asid = true; /* (b2) */ 1740 } else { 1741 /* 1742 * This is the common case (a). 1743 */ 1744 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1745 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1746 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1747 } 1748 1749 if (alloc_asid) { 1750 if (++asid[thiscpu].num >= nasid) { 1751 asid[thiscpu].num = 1; 1752 if (++asid[thiscpu].gen == 0) 1753 asid[thiscpu].gen = 1; 1754 /* 1755 * If this cpu does not support "flush-by-asid" 1756 * then flush the entire TLB on a generation 1757 * bump. Subsequent ASID allocation in this 1758 * generation can be done without a TLB flush. 1759 */ 1760 if (!flush_by_asid()) 1761 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1762 } 1763 vcpustate->asid.gen = asid[thiscpu].gen; 1764 vcpustate->asid.num = asid[thiscpu].num; 1765 1766 ctrl->asid = vcpustate->asid.num;
| 1630 } else if (need_intr_window) { 1631 /* 1632 * We use V_IRQ in conjunction with the VINTR intercept to 1633 * trap into the hypervisor as soon as a virtual interrupt 1634 * can be delivered. 1635 * 1636 * Since injected events are not subject to intercept checks 1637 * we need to ensure that the V_IRQ is not actually going to 1638 * be delivered on VM entry. The KASSERT below enforces this. 1639 */ 1640 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1641 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1642 ("Bogus intr_window_exiting: eventinj (%#lx), " 1643 "intr_shadow (%u), rflags (%#lx)", 1644 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1645 enable_intr_window_exiting(sc, vcpu); 1646 } else { 1647 disable_intr_window_exiting(sc, vcpu); 1648 } 1649} 1650 1651static __inline void 1652restore_host_tss(void) 1653{ 1654 struct system_segment_descriptor *tss_sd; 1655 1656 /* 1657 * The TSS descriptor was in use prior to launching the guest so it 1658 * has been marked busy. 1659 * 1660 * 'ltr' requires the descriptor to be marked available so change the 1661 * type to "64-bit available TSS". 1662 */ 1663 tss_sd = PCPU_GET(tss); 1664 tss_sd->sd_type = SDT_SYSTSS; 1665 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1666} 1667 1668static void 1669check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) 1670{ 1671 struct svm_vcpu *vcpustate; 1672 struct vmcb_ctrl *ctrl; 1673 long eptgen; 1674 bool alloc_asid; 1675 1676 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " 1677 "active on cpu %u", __func__, thiscpu)); 1678 1679 vcpustate = svm_get_vcpu(sc, vcpuid); 1680 ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1681 1682 /* 1683 * The TLB entries associated with the vcpu's ASID are not valid 1684 * if either of the following conditions is true: 1685 * 1686 * 1. The vcpu's ASID generation is different than the host cpu's 1687 * ASID generation. This happens when the vcpu migrates to a new 1688 * host cpu. It can also happen when the number of vcpus executing 1689 * on a host cpu is greater than the number of ASIDs available. 1690 * 1691 * 2. The pmap generation number is different than the value cached in 1692 * the 'vcpustate'. This happens when the host invalidates pages 1693 * belonging to the guest. 1694 * 1695 * asidgen eptgen Action 1696 * mismatch mismatch 1697 * 0 0 (a) 1698 * 0 1 (b1) or (b2) 1699 * 1 0 (c) 1700 * 1 1 (d) 1701 * 1702 * (a) There is no mismatch in eptgen or ASID generation and therefore 1703 * no further action is needed. 1704 * 1705 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1706 * retained and the TLB entries associated with this ASID 1707 * are flushed by VMRUN. 1708 * 1709 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1710 * allocated. 1711 * 1712 * (c) A new ASID is allocated. 1713 * 1714 * (d) A new ASID is allocated. 1715 */ 1716 1717 alloc_asid = false; 1718 eptgen = pmap->pm_eptgen; 1719 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1720 1721 if (vcpustate->asid.gen != asid[thiscpu].gen) { 1722 alloc_asid = true; /* (c) and (d) */ 1723 } else if (vcpustate->eptgen != eptgen) { 1724 if (flush_by_asid()) 1725 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1726 else 1727 alloc_asid = true; /* (b2) */ 1728 } else { 1729 /* 1730 * This is the common case (a). 1731 */ 1732 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1733 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1734 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1735 } 1736 1737 if (alloc_asid) { 1738 if (++asid[thiscpu].num >= nasid) { 1739 asid[thiscpu].num = 1; 1740 if (++asid[thiscpu].gen == 0) 1741 asid[thiscpu].gen = 1; 1742 /* 1743 * If this cpu does not support "flush-by-asid" 1744 * then flush the entire TLB on a generation 1745 * bump. Subsequent ASID allocation in this 1746 * generation can be done without a TLB flush. 1747 */ 1748 if (!flush_by_asid()) 1749 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1750 } 1751 vcpustate->asid.gen = asid[thiscpu].gen; 1752 vcpustate->asid.num = asid[thiscpu].num; 1753 1754 ctrl->asid = vcpustate->asid.num;
|
1767 vcpu_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
| 1755 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
|
1768 /* 1769 * If this cpu supports "flush-by-asid" then the TLB 1770 * was not flushed after the generation bump. The TLB 1771 * is flushed selectively after every new ASID allocation. 1772 */ 1773 if (flush_by_asid()) 1774 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1775 } 1776 vcpustate->eptgen = eptgen; 1777 1778 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1779 KASSERT(ctrl->asid == vcpustate->asid.num, 1780 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1781} 1782 1783/* 1784 * Start vcpu with specified RIP. 1785 */ 1786static int 1787svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1788 void *rend_cookie, void *suspended_cookie) 1789{ 1790 struct svm_regctx *hctx, *gctx; 1791 struct svm_softc *svm_sc; 1792 struct svm_vcpu *vcpustate; 1793 struct vmcb_state *state; 1794 struct vmcb_ctrl *ctrl; 1795 struct vm_exit *vmexit; 1796 struct vlapic *vlapic; 1797 struct vm *vm; 1798 uint64_t vmcb_pa; 1799 u_int thiscpu; 1800 int handled; 1801 1802 svm_sc = arg; 1803 vm = svm_sc->vm; 1804 1805 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1806 state = svm_get_vmcb_state(svm_sc, vcpu); 1807 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1808 vmexit = vm_exitinfo(vm, vcpu); 1809 vlapic = vm_lapic(vm, vcpu); 1810 1811 /* 1812 * Stash 'curcpu' on the stack as 'thiscpu'. 1813 * 1814 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1815 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1816 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1817 */ 1818 thiscpu = curcpu; 1819 1820 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1821 hctx = &host_ctx[thiscpu]; 1822 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1823 1824 if (vcpustate->lastcpu != thiscpu) { 1825 /* 1826 * Force new ASID allocation by invalidating the generation. 1827 */ 1828 vcpustate->asid.gen = 0; 1829 1830 /* 1831 * Invalidate the VMCB state cache by marking all fields dirty. 1832 */
| 1756 /* 1757 * If this cpu supports "flush-by-asid" then the TLB 1758 * was not flushed after the generation bump. The TLB 1759 * is flushed selectively after every new ASID allocation. 1760 */ 1761 if (flush_by_asid()) 1762 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1763 } 1764 vcpustate->eptgen = eptgen; 1765 1766 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1767 KASSERT(ctrl->asid == vcpustate->asid.num, 1768 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); 1769} 1770 1771/* 1772 * Start vcpu with specified RIP. 1773 */ 1774static int 1775svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, 1776 void *rend_cookie, void *suspended_cookie) 1777{ 1778 struct svm_regctx *hctx, *gctx; 1779 struct svm_softc *svm_sc; 1780 struct svm_vcpu *vcpustate; 1781 struct vmcb_state *state; 1782 struct vmcb_ctrl *ctrl; 1783 struct vm_exit *vmexit; 1784 struct vlapic *vlapic; 1785 struct vm *vm; 1786 uint64_t vmcb_pa; 1787 u_int thiscpu; 1788 int handled; 1789 1790 svm_sc = arg; 1791 vm = svm_sc->vm; 1792 1793 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1794 state = svm_get_vmcb_state(svm_sc, vcpu); 1795 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1796 vmexit = vm_exitinfo(vm, vcpu); 1797 vlapic = vm_lapic(vm, vcpu); 1798 1799 /* 1800 * Stash 'curcpu' on the stack as 'thiscpu'. 1801 * 1802 * The per-cpu data area is not accessible until MSR_GSBASE is restored 1803 * after the #VMEXIT. Since VMRUN is executed inside a critical section 1804 * 'curcpu' and 'thiscpu' are guaranteed to identical. 1805 */ 1806 thiscpu = curcpu; 1807 1808 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1809 hctx = &host_ctx[thiscpu]; 1810 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1811 1812 if (vcpustate->lastcpu != thiscpu) { 1813 /* 1814 * Force new ASID allocation by invalidating the generation. 1815 */ 1816 vcpustate->asid.gen = 0; 1817 1818 /* 1819 * Invalidate the VMCB state cache by marking all fields dirty. 1820 */
|
1833 vcpu_set_dirty(svm_sc, vcpu, 0xffffffff);
| 1821 svm_set_dirty(svm_sc, vcpu, 0xffffffff);
|
1834 1835 /* 1836 * XXX 1837 * Setting 'vcpustate->lastcpu' here is bit premature because 1838 * we may return from this function without actually executing 1839 * the VMRUN instruction. This could happen if a rendezvous 1840 * or an AST is pending on the first time through the loop. 1841 * 1842 * This works for now but any new side-effects of vcpu 1843 * migration should take this case into account. 1844 */ 1845 vcpustate->lastcpu = thiscpu; 1846 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1847 } 1848 1849 svm_msr_guest_enter(svm_sc, vcpu); 1850 1851 /* Update Guest RIP */ 1852 state->rip = rip; 1853 1854 do { 1855 /* 1856 * Disable global interrupts to guarantee atomicity during 1857 * loading of guest state. This includes not only the state 1858 * loaded by the "vmrun" instruction but also software state 1859 * maintained by the hypervisor: suspended and rendezvous 1860 * state, NPT generation number, vlapic interrupts etc. 1861 */ 1862 disable_gintr(); 1863 1864 if (vcpu_suspended(suspended_cookie)) { 1865 enable_gintr(); 1866 vm_exit_suspended(vm, vcpu, state->rip); 1867 break; 1868 } 1869 1870 if (vcpu_rendezvous_pending(rend_cookie)) { 1871 enable_gintr(); 1872 vm_exit_rendezvous(vm, vcpu, state->rip); 1873 break; 1874 } 1875 1876 /* We are asked to give the cpu by scheduler. */ 1877 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1878 enable_gintr(); 1879 vm_exit_astpending(vm, vcpu, state->rip); 1880 break; 1881 } 1882 1883 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1884 1885 /* Activate the nested pmap on 'thiscpu' */ 1886 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1887 1888 /* 1889 * Check the pmap generation and the ASID generation to 1890 * ensure that the vcpu does not use stale TLB mappings. 1891 */ 1892 check_asid(svm_sc, vcpu, pmap, thiscpu); 1893
| 1822 1823 /* 1824 * XXX 1825 * Setting 'vcpustate->lastcpu' here is bit premature because 1826 * we may return from this function without actually executing 1827 * the VMRUN instruction. This could happen if a rendezvous 1828 * or an AST is pending on the first time through the loop. 1829 * 1830 * This works for now but any new side-effects of vcpu 1831 * migration should take this case into account. 1832 */ 1833 vcpustate->lastcpu = thiscpu; 1834 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1835 } 1836 1837 svm_msr_guest_enter(svm_sc, vcpu); 1838 1839 /* Update Guest RIP */ 1840 state->rip = rip; 1841 1842 do { 1843 /* 1844 * Disable global interrupts to guarantee atomicity during 1845 * loading of guest state. This includes not only the state 1846 * loaded by the "vmrun" instruction but also software state 1847 * maintained by the hypervisor: suspended and rendezvous 1848 * state, NPT generation number, vlapic interrupts etc. 1849 */ 1850 disable_gintr(); 1851 1852 if (vcpu_suspended(suspended_cookie)) { 1853 enable_gintr(); 1854 vm_exit_suspended(vm, vcpu, state->rip); 1855 break; 1856 } 1857 1858 if (vcpu_rendezvous_pending(rend_cookie)) { 1859 enable_gintr(); 1860 vm_exit_rendezvous(vm, vcpu, state->rip); 1861 break; 1862 } 1863 1864 /* We are asked to give the cpu by scheduler. */ 1865 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { 1866 enable_gintr(); 1867 vm_exit_astpending(vm, vcpu, state->rip); 1868 break; 1869 } 1870 1871 svm_inj_interrupts(svm_sc, vcpu, vlapic); 1872 1873 /* Activate the nested pmap on 'thiscpu' */ 1874 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1875 1876 /* 1877 * Check the pmap generation and the ASID generation to 1878 * ensure that the vcpu does not use stale TLB mappings. 1879 */ 1880 check_asid(svm_sc, vcpu, pmap, thiscpu); 1881
|
1894 ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty;
| 1882 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
|
1895 vcpustate->dirty = 0; 1896 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1897 1898 /* Launch Virtual Machine. */ 1899 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1900 svm_launch(vmcb_pa, gctx, hctx); 1901 1902 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1903 1904 /* 1905 * Restore MSR_GSBASE to point to the pcpu data area. 1906 * 1907 * Note that accesses done via PCPU_GET/PCPU_SET will work 1908 * only after MSR_GSBASE is restored. 1909 * 1910 * Also note that we don't bother restoring MSR_KGSBASE 1911 * since it is not used in the kernel and will be restored 1912 * when the VMRUN ioctl returns to userspace. 1913 */ 1914 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1915 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1916 thiscpu, curcpu)); 1917 1918 /* 1919 * The host GDTR and IDTR is saved by VMRUN and restored 1920 * automatically on #VMEXIT. However, the host TSS needs 1921 * to be restored explicitly. 1922 */ 1923 restore_host_tss(); 1924 1925 /* #VMEXIT disables interrupts so re-enable them here. */ 1926 enable_gintr(); 1927 1928 /* Handle #VMEXIT and if required return to user space. */ 1929 handled = svm_vmexit(svm_sc, vcpu, vmexit); 1930 } while (handled); 1931 1932 svm_msr_guest_exit(svm_sc, vcpu); 1933 1934 return (0); 1935} 1936 1937/* 1938 * Cleanup for virtual machine. 1939 */ 1940static void 1941svm_vmcleanup(void *arg) 1942{ 1943 struct svm_softc *svm_sc; 1944 1945 svm_sc = arg; 1946 1947 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1948 1949 free(svm_sc, M_SVM); 1950} 1951 1952/* 1953 * Return pointer to hypervisor saved register state. 1954 */ 1955static register_t * 1956swctx_regptr(struct svm_regctx *regctx, int reg) 1957{ 1958 1959 switch (reg) { 1960 case VM_REG_GUEST_RBX: 1961 return (®ctx->sctx_rbx); 1962 case VM_REG_GUEST_RCX: 1963 return (®ctx->sctx_rcx); 1964 case VM_REG_GUEST_RDX: 1965 return (®ctx->e.g.sctx_rdx); 1966 case VM_REG_GUEST_RDI: 1967 return (®ctx->e.g.sctx_rdi); 1968 case VM_REG_GUEST_RSI: 1969 return (®ctx->e.g.sctx_rsi); 1970 case VM_REG_GUEST_RBP: 1971 return (®ctx->sctx_rbp); 1972 case VM_REG_GUEST_R8: 1973 return (®ctx->sctx_r8); 1974 case VM_REG_GUEST_R9: 1975 return (®ctx->sctx_r9); 1976 case VM_REG_GUEST_R10: 1977 return (®ctx->sctx_r10); 1978 case VM_REG_GUEST_R11: 1979 return (®ctx->sctx_r11); 1980 case VM_REG_GUEST_R12: 1981 return (®ctx->sctx_r12); 1982 case VM_REG_GUEST_R13: 1983 return (®ctx->sctx_r13); 1984 case VM_REG_GUEST_R14: 1985 return (®ctx->sctx_r14); 1986 case VM_REG_GUEST_R15: 1987 return (®ctx->sctx_r15); 1988 default: 1989 ERR("Unknown register requested, reg=%d.\n", reg); 1990 break; 1991 } 1992 1993 return (NULL); 1994} 1995 1996/* 1997 * Interface to read guest registers. 1998 * This can be SVM h/w saved or hypervisor saved register. 1999 */ 2000static int 2001svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2002{ 2003 struct svm_softc *svm_sc;
| 1883 vcpustate->dirty = 0; 1884 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1885 1886 /* Launch Virtual Machine. */ 1887 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1888 svm_launch(vmcb_pa, gctx, hctx); 1889 1890 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); 1891 1892 /* 1893 * Restore MSR_GSBASE to point to the pcpu data area. 1894 * 1895 * Note that accesses done via PCPU_GET/PCPU_SET will work 1896 * only after MSR_GSBASE is restored. 1897 * 1898 * Also note that we don't bother restoring MSR_KGSBASE 1899 * since it is not used in the kernel and will be restored 1900 * when the VMRUN ioctl returns to userspace. 1901 */ 1902 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]); 1903 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch", 1904 thiscpu, curcpu)); 1905 1906 /* 1907 * The host GDTR and IDTR is saved by VMRUN and restored 1908 * automatically on #VMEXIT. However, the host TSS needs 1909 * to be restored explicitly. 1910 */ 1911 restore_host_tss(); 1912 1913 /* #VMEXIT disables interrupts so re-enable them here. */ 1914 enable_gintr(); 1915 1916 /* Handle #VMEXIT and if required return to user space. */ 1917 handled = svm_vmexit(svm_sc, vcpu, vmexit); 1918 } while (handled); 1919 1920 svm_msr_guest_exit(svm_sc, vcpu); 1921 1922 return (0); 1923} 1924 1925/* 1926 * Cleanup for virtual machine. 1927 */ 1928static void 1929svm_vmcleanup(void *arg) 1930{ 1931 struct svm_softc *svm_sc; 1932 1933 svm_sc = arg; 1934 1935 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n"); 1936 1937 free(svm_sc, M_SVM); 1938} 1939 1940/* 1941 * Return pointer to hypervisor saved register state. 1942 */ 1943static register_t * 1944swctx_regptr(struct svm_regctx *regctx, int reg) 1945{ 1946 1947 switch (reg) { 1948 case VM_REG_GUEST_RBX: 1949 return (®ctx->sctx_rbx); 1950 case VM_REG_GUEST_RCX: 1951 return (®ctx->sctx_rcx); 1952 case VM_REG_GUEST_RDX: 1953 return (®ctx->e.g.sctx_rdx); 1954 case VM_REG_GUEST_RDI: 1955 return (®ctx->e.g.sctx_rdi); 1956 case VM_REG_GUEST_RSI: 1957 return (®ctx->e.g.sctx_rsi); 1958 case VM_REG_GUEST_RBP: 1959 return (®ctx->sctx_rbp); 1960 case VM_REG_GUEST_R8: 1961 return (®ctx->sctx_r8); 1962 case VM_REG_GUEST_R9: 1963 return (®ctx->sctx_r9); 1964 case VM_REG_GUEST_R10: 1965 return (®ctx->sctx_r10); 1966 case VM_REG_GUEST_R11: 1967 return (®ctx->sctx_r11); 1968 case VM_REG_GUEST_R12: 1969 return (®ctx->sctx_r12); 1970 case VM_REG_GUEST_R13: 1971 return (®ctx->sctx_r13); 1972 case VM_REG_GUEST_R14: 1973 return (®ctx->sctx_r14); 1974 case VM_REG_GUEST_R15: 1975 return (®ctx->sctx_r15); 1976 default: 1977 ERR("Unknown register requested, reg=%d.\n", reg); 1978 break; 1979 } 1980 1981 return (NULL); 1982} 1983 1984/* 1985 * Interface to read guest registers. 1986 * This can be SVM h/w saved or hypervisor saved register. 1987 */ 1988static int 1989svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1990{ 1991 struct svm_softc *svm_sc;
|
2004 struct vmcb *vmcb;
| |
2005 register_t *reg; 2006 2007 svm_sc = arg;
| 1992 register_t *reg; 1993 1994 svm_sc = arg;
|
2008 vmcb = svm_get_vmcb(svm_sc, vcpu);
| |
2009 2010 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2011 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2012 } 2013
| 1995 1996 if (ident == VM_REG_GUEST_INTR_SHADOW) { 1997 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 1998 } 1999
|
2014 if (vmcb_read(vmcb, ident, val) == 0) {
| 2000 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
|
2015 return (0); 2016 } 2017 2018 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2019 2020 if (reg != NULL) { 2021 *val = *reg; 2022 return (0); 2023 } 2024 2025 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2026 return (EINVAL); 2027} 2028 2029/* 2030 * Interface to write to guest registers. 2031 * This can be SVM h/w saved or hypervisor saved register. 2032 */ 2033static int 2034svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2035{ 2036 struct svm_softc *svm_sc;
| 2001 return (0); 2002 } 2003 2004 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2005 2006 if (reg != NULL) { 2007 *val = *reg; 2008 return (0); 2009 } 2010 2011 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2012 return (EINVAL); 2013} 2014 2015/* 2016 * Interface to write to guest registers. 2017 * This can be SVM h/w saved or hypervisor saved register. 2018 */ 2019static int 2020svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2021{ 2022 struct svm_softc *svm_sc;
|
2037 struct vmcb *vmcb;
| |
2038 register_t *reg; 2039 2040 svm_sc = arg;
| 2023 register_t *reg; 2024 2025 svm_sc = arg;
|
2041 vmcb = svm_get_vmcb(svm_sc, vcpu);
| |
2042 2043 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2044 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2045 } 2046
| 2026 2027 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2028 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2029 } 2030
|
2047 if (vmcb_write(vmcb, ident, val) == 0) {
| 2031 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
|
2048 return (0); 2049 } 2050 2051 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2052 2053 if (reg != NULL) { 2054 *reg = val; 2055 return (0); 2056 } 2057 2058 /* 2059 * XXX deal with CR3 and invalidate TLB entries tagged with the 2060 * vcpu's ASID. This needs to be treated differently depending on 2061 * whether 'running' is true/false. 2062 */ 2063 2064 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2065 return (EINVAL); 2066} 2067
| 2032 return (0); 2033 } 2034 2035 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2036 2037 if (reg != NULL) { 2038 *reg = val; 2039 return (0); 2040 } 2041 2042 /* 2043 * XXX deal with CR3 and invalidate TLB entries tagged with the 2044 * vcpu's ASID. This needs to be treated differently depending on 2045 * whether 'running' is true/false. 2046 */ 2047 2048 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2049 return (EINVAL); 2050} 2051
|
2068 2069/* 2070 * Inteface to set various descriptors. 2071 */
| |
2072static int
| 2052static int
|
2073svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 2074{ 2075 struct svm_softc *svm_sc; 2076 struct vmcb *vmcb; 2077 struct vmcb_segment *seg; 2078 uint16_t attrib; 2079 2080 svm_sc = arg; 2081 vmcb = svm_get_vmcb(svm_sc, vcpu); 2082 2083 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 2084 2085 seg = vmcb_seg(vmcb, type); 2086 if (seg == NULL) { 2087 ERR("SVM_ERR:Unsupported segment type%d\n", type); 2088 return (EINVAL); 2089 } 2090 2091 /* Map seg_desc access to VMCB attribute format.*/ 2092 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 2093 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 2094 type, desc->access, desc->limit); 2095 seg->attrib = attrib; 2096 seg->base = desc->base; 2097 seg->limit = desc->limit; 2098 2099 return (0); 2100} 2101 2102/* 2103 * Interface to get guest descriptor. 2104 */ 2105static int 2106svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 2107{ 2108 struct svm_softc *svm_sc; 2109 struct vmcb_segment *seg; 2110 2111 svm_sc = arg; 2112 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 2113 2114 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 2115 if (!seg) { 2116 ERR("SVM_ERR:Unsupported segment type%d\n", type); 2117 return (EINVAL); 2118 } 2119 2120 /* Map seg_desc access to VMCB attribute format.*/ 2121 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 2122 desc->base = seg->base; 2123 desc->limit = seg->limit; 2124 2125 /* 2126 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 2127 * loaded with a NULL segment selector. The 'desc->access' field is 2128 * interpreted in the VT-x format by the processor-independent code. 2129 * 2130 * SVM uses the 'P' bit to convey the same information so convert it 2131 * into the VT-x format. For more details refer to section 2132 * "Segment State in the VMCB" in APMv2. 2133 */ 2134 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 2135 desc->access |= 0x80; /* CS and TS always present */ 2136 2137 if (!(desc->access & 0x80)) 2138 desc->access |= 0x10000; /* Unusable segment */ 2139 2140 return (0); 2141} 2142 2143static int
| |
2144svm_setcap(void *arg, int vcpu, int type, int val) 2145{ 2146 struct svm_softc *sc; 2147 int error; 2148 2149 sc = arg; 2150 error = 0; 2151 switch (type) { 2152 case VM_CAP_HALT_EXIT: 2153 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2154 VMCB_INTCPT_HLT, val); 2155 break; 2156 case VM_CAP_PAUSE_EXIT: 2157 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2158 VMCB_INTCPT_PAUSE, val); 2159 break; 2160 case VM_CAP_UNRESTRICTED_GUEST: 2161 /* Unrestricted guest execution cannot be disabled in SVM */ 2162 if (val == 0) 2163 error = EINVAL; 2164 break; 2165 default: 2166 error = ENOENT; 2167 break; 2168 } 2169 return (error); 2170} 2171 2172static int 2173svm_getcap(void *arg, int vcpu, int type, int *retval) 2174{ 2175 struct svm_softc *sc; 2176 int error; 2177 2178 sc = arg; 2179 error = 0; 2180 2181 switch (type) { 2182 case VM_CAP_HALT_EXIT: 2183 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2184 VMCB_INTCPT_HLT); 2185 break; 2186 case VM_CAP_PAUSE_EXIT: 2187 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2188 VMCB_INTCPT_PAUSE); 2189 break; 2190 case VM_CAP_UNRESTRICTED_GUEST: 2191 *retval = 1; /* unrestricted guest is always enabled */ 2192 break; 2193 default: 2194 error = ENOENT; 2195 break; 2196 } 2197 return (error); 2198} 2199 2200static struct vlapic * 2201svm_vlapic_init(void *arg, int vcpuid) 2202{ 2203 struct svm_softc *svm_sc; 2204 struct vlapic *vlapic; 2205 2206 svm_sc = arg; 2207 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2208 vlapic->vm = svm_sc->vm; 2209 vlapic->vcpuid = vcpuid; 2210 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2211 2212 vlapic_init(vlapic); 2213 2214 return (vlapic); 2215} 2216 2217static void 2218svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2219{ 2220 2221 vlapic_cleanup(vlapic); 2222 free(vlapic, M_SVM_VLAPIC); 2223} 2224 2225struct vmm_ops vmm_ops_amd = { 2226 svm_init, 2227 svm_cleanup, 2228 svm_restore, 2229 svm_vminit, 2230 svm_vmrun, 2231 svm_vmcleanup, 2232 svm_getreg, 2233 svm_setreg,
| 2053svm_setcap(void *arg, int vcpu, int type, int val) 2054{ 2055 struct svm_softc *sc; 2056 int error; 2057 2058 sc = arg; 2059 error = 0; 2060 switch (type) { 2061 case VM_CAP_HALT_EXIT: 2062 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2063 VMCB_INTCPT_HLT, val); 2064 break; 2065 case VM_CAP_PAUSE_EXIT: 2066 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2067 VMCB_INTCPT_PAUSE, val); 2068 break; 2069 case VM_CAP_UNRESTRICTED_GUEST: 2070 /* Unrestricted guest execution cannot be disabled in SVM */ 2071 if (val == 0) 2072 error = EINVAL; 2073 break; 2074 default: 2075 error = ENOENT; 2076 break; 2077 } 2078 return (error); 2079} 2080 2081static int 2082svm_getcap(void *arg, int vcpu, int type, int *retval) 2083{ 2084 struct svm_softc *sc; 2085 int error; 2086 2087 sc = arg; 2088 error = 0; 2089 2090 switch (type) { 2091 case VM_CAP_HALT_EXIT: 2092 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2093 VMCB_INTCPT_HLT); 2094 break; 2095 case VM_CAP_PAUSE_EXIT: 2096 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2097 VMCB_INTCPT_PAUSE); 2098 break; 2099 case VM_CAP_UNRESTRICTED_GUEST: 2100 *retval = 1; /* unrestricted guest is always enabled */ 2101 break; 2102 default: 2103 error = ENOENT; 2104 break; 2105 } 2106 return (error); 2107} 2108 2109static struct vlapic * 2110svm_vlapic_init(void *arg, int vcpuid) 2111{ 2112 struct svm_softc *svm_sc; 2113 struct vlapic *vlapic; 2114 2115 svm_sc = arg; 2116 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2117 vlapic->vm = svm_sc->vm; 2118 vlapic->vcpuid = vcpuid; 2119 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2120 2121 vlapic_init(vlapic); 2122 2123 return (vlapic); 2124} 2125 2126static void 2127svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2128{ 2129 2130 vlapic_cleanup(vlapic); 2131 free(vlapic, M_SVM_VLAPIC); 2132} 2133 2134struct vmm_ops vmm_ops_amd = { 2135 svm_init, 2136 svm_cleanup, 2137 svm_restore, 2138 svm_vminit, 2139 svm_vmrun, 2140 svm_vmcleanup, 2141 svm_getreg, 2142 svm_setreg,
|
2234 svm_getdesc, 2235 svm_setdesc,
| 2143 vmcb_getdesc, 2144 vmcb_setdesc,
|
2236 svm_getcap, 2237 svm_setcap, 2238 svm_npt_alloc, 2239 svm_npt_free, 2240 svm_vlapic_init, 2241 svm_vlapic_cleanup 2242};
| 2145 svm_getcap, 2146 svm_setcap, 2147 svm_npt_alloc, 2148 svm_npt_free, 2149 svm_vlapic_init, 2150 svm_vlapic_cleanup 2151};
|