x86.c revision 315928
1262395Sbapt/*- 2262395Sbapt * Copyright (c) 2011 NetApp, Inc. 3262395Sbapt * All rights reserved. 4262395Sbapt * 5262395Sbapt * Redistribution and use in source and binary forms, with or without 6262395Sbapt * modification, are permitted provided that the following conditions 7262395Sbapt * are met: 8262395Sbapt * 1. Redistributions of source code must retain the above copyright 9262395Sbapt * notice, this list of conditions and the following disclaimer. 10262395Sbapt * 2. Redistributions in binary form must reproduce the above copyright 11262395Sbapt * notice, this list of conditions and the following disclaimer in the 12262395Sbapt * documentation and/or other materials provided with the distribution. 13262395Sbapt * 14262395Sbapt * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15262395Sbapt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16262395Sbapt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17262395Sbapt * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18262395Sbapt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19262395Sbapt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20262395Sbapt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21262395Sbapt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22262395Sbapt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23262395Sbapt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24262395Sbapt * SUCH DAMAGE. 25262395Sbapt * 26262395Sbapt * $FreeBSD: stable/10/sys/amd64/vmm/x86.c 315928 2017-03-25 05:09:03Z grehan $ 27262395Sbapt */ 28263648Sbapt 29262395Sbapt#include <sys/cdefs.h> 30263648Sbapt__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/x86.c 315928 2017-03-25 05:09:03Z grehan $"); 31262395Sbapt 32262395Sbapt#include <sys/param.h> 33262395Sbapt#include <sys/pcpu.h> 34262395Sbapt#include <sys/systm.h> 35262395Sbapt#include <sys/sysctl.h> 36262395Sbapt 37262395Sbapt#include <machine/clock.h> 38262395Sbapt#include <machine/cpufunc.h> 39262395Sbapt#include <machine/md_var.h> 40263648Sbapt#include <machine/segments.h> 41263648Sbapt#include <machine/specialreg.h> 42263648Sbapt 43263648Sbapt#include <machine/vmm.h> 44263648Sbapt 45263648Sbapt#include "vmm_host.h" 46263648Sbapt#include "vmm_ktr.h" 47262975Sbapt#include "vmm_util.h" 48262975Sbapt#include "x86.h" 49262975Sbapt 50263648SbaptSYSCTL_DECL(_hw_vmm); 51262975Sbaptstatic SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD, 0, NULL); 52263648Sbapt 53263648Sbapt#define CPUID_VM_HIGH 0x40000000 54262975Sbapt 55263648Sbaptstatic const char bhyve_id[12] = "bhyve bhyve "; 56263648Sbapt 57262975Sbaptstatic uint64_t bhyve_xcpuids; 58263648SbaptSYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0, 59263648Sbapt "Number of times an unknown cpuid leaf was accessed"); 60262975Sbapt 61263648Sbapt/* 62263648Sbapt * The default CPU topology is a single thread per package. 63262975Sbapt */ 64263648Sbaptstatic u_int threads_per_core = 1; 65263648SbaptSYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN, 66262975Sbapt &threads_per_core, 0, NULL); 67263648Sbapt 68262975Sbaptstatic u_int cores_per_package = 1; 69263648SbaptSYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN, 70262975Sbapt &cores_per_package, 0, NULL); 71262975Sbapt 72262975Sbaptstatic int cpuid_leaf_b = 1; 73262975SbaptSYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN, 74262975Sbapt &cpuid_leaf_b, 0, NULL); 75262975Sbapt 76262975Sbapt/* 77262975Sbapt * Round up to the next power of two, if necessary, and then take log2. 78262975Sbapt * Returns -1 if argument is zero. 79262975Sbapt */ 80262975Sbaptstatic __inline int 81262975Sbaptlog2(u_int x) 82262975Sbapt{ 83262975Sbapt 84262975Sbapt return (fls(x << (1 - powerof2(x))) - 1); 85262975Sbapt} 86262975Sbapt 87262975Sbaptint 88262975Sbaptx86_emulate_cpuid(struct vm *vm, int vcpu_id, 89262975Sbapt uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 90262975Sbapt{ 91262975Sbapt const struct xsave_limits *limits; 92262975Sbapt uint64_t cr4; 93262975Sbapt int error, enable_invpcid, level, width, x2apic_id; 94262975Sbapt unsigned int func, regs[4], logical_cpus; 95262975Sbapt enum x2apic_state x2apic_state; 96262975Sbapt 97262975Sbapt VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx); 98262975Sbapt 99262975Sbapt /* 100262975Sbapt * Requests for invalid CPUID levels should map to the highest 101262975Sbapt * available level instead. 102262975Sbapt */ 103262975Sbapt if (cpu_exthigh != 0 && *eax >= 0x80000000) { 104262975Sbapt if (*eax > cpu_exthigh) 105262975Sbapt *eax = cpu_exthigh; 106262975Sbapt } else if (*eax >= 0x40000000) { 107263648Sbapt if (*eax > CPUID_VM_HIGH) 108262975Sbapt *eax = CPUID_VM_HIGH; 109262975Sbapt } else if (*eax > cpu_high) { 110262975Sbapt *eax = cpu_high; 111262975Sbapt } 112262975Sbapt 113262975Sbapt func = *eax; 114262975Sbapt 115263648Sbapt /* 116262975Sbapt * In general the approach used for CPU topology is to 117262975Sbapt * advertise a flat topology where all CPUs are packages with 118262975Sbapt * no multi-core or SMT. 119262975Sbapt */ 120262975Sbapt switch (func) { 121262975Sbapt /* 122262975Sbapt * Pass these through to the guest 123262975Sbapt */ 124262975Sbapt case CPUID_0000_0000: 125262975Sbapt case CPUID_0000_0002: 126263648Sbapt case CPUID_0000_0003: 127263648Sbapt case CPUID_8000_0000: 128263648Sbapt case CPUID_8000_0002: 129263648Sbapt case CPUID_8000_0003: 130262975Sbapt case CPUID_8000_0004: 131262975Sbapt case CPUID_8000_0006: 132262395Sbapt cpuid_count(*eax, *ecx, regs); 133262395Sbapt break; 134262395Sbapt case CPUID_8000_0008: 135262395Sbapt cpuid_count(*eax, *ecx, regs); 136262395Sbapt if (vmm_is_amd()) { 137264789Sbapt /* 138264789Sbapt * XXX this might appear silly because AMD 139264789Sbapt * cpus don't have threads. 140264789Sbapt * 141262395Sbapt * However this matches the logical cpus as 142262395Sbapt * advertised by leaf 0x1 and will work even 143264789Sbapt * if the 'threads_per_core' tunable is set 144262395Sbapt * incorrectly on an AMD host. 145264789Sbapt */ 146264789Sbapt logical_cpus = threads_per_core * 147264789Sbapt cores_per_package; 148264789Sbapt regs[2] = logical_cpus - 1; 149264789Sbapt } 150264789Sbapt break; 151264789Sbapt 152264789Sbapt case CPUID_8000_0001: 153264789Sbapt cpuid_count(*eax, *ecx, regs); 154264789Sbapt 155264789Sbapt /* 156264789Sbapt * Hide SVM and Topology Extension features from guest. 157264789Sbapt */ 158264789Sbapt regs[2] &= ~(AMDID2_SVM | AMDID2_TOPOLOGY); 159264789Sbapt 160264789Sbapt /* 161264789Sbapt * Don't advertise extended performance counter MSRs 162264789Sbapt * to the guest. 163264789Sbapt */ 164264789Sbapt regs[2] &= ~AMDID2_PCXC; 165264789Sbapt regs[2] &= ~AMDID2_PNXC; 166264789Sbapt regs[2] &= ~AMDID2_PTSCEL2I; 167264789Sbapt 168264789Sbapt /* 169264789Sbapt * Don't advertise Instruction Based Sampling feature. 170264789Sbapt */ 171264789Sbapt regs[2] &= ~AMDID2_IBS; 172264789Sbapt 173264789Sbapt /* NodeID MSR not available */ 174264789Sbapt regs[2] &= ~AMDID2_NODE_ID; 175264789Sbapt 176264789Sbapt /* Don't advertise the OS visible workaround feature */ 177264789Sbapt regs[2] &= ~AMDID2_OSVW; 178264789Sbapt 179264789Sbapt /* Hide mwaitx/monitorx capability from the guest */ 180264789Sbapt regs[2] &= ~AMDID2_MWAITX; 181264789Sbapt 182264789Sbapt /* 183264789Sbapt * Hide rdtscp/ia32_tsc_aux until we know how 184264789Sbapt * to deal with them. 185264789Sbapt */ 186264789Sbapt regs[3] &= ~AMDID_RDTSCP; 187264789Sbapt break; 188264789Sbapt 189262395Sbapt case CPUID_8000_0007: 190262395Sbapt /* 191262395Sbapt * AMD uses this leaf to advertise the processor's 192262395Sbapt * power monitoring and RAS capabilities. These 193262395Sbapt * features are hardware-specific and exposing 194262395Sbapt * them to a guest doesn't make a lot of sense. 195262395Sbapt * 196264789Sbapt * Intel uses this leaf only to advertise the 197262395Sbapt * "Invariant TSC" feature with all other bits 198262395Sbapt * being reserved (set to zero). 199262395Sbapt */ 200262395Sbapt regs[0] = 0; 201262395Sbapt regs[1] = 0; 202264789Sbapt regs[2] = 0; 203262395Sbapt regs[3] = 0; 204262395Sbapt 205262395Sbapt /* 206264789Sbapt * "Invariant TSC" can be advertised to the guest if: 207262395Sbapt * - host TSC frequency is invariant 208262395Sbapt * - host TSCs are synchronized across physical cpus 209262395Sbapt * 210262395Sbapt * XXX This still falls short because the vcpu 211262395Sbapt * can observe the TSC moving backwards as it 212262395Sbapt * migrates across physical cpus. But at least 213262395Sbapt * it should discourage the guest from using the 214262395Sbapt * TSC to keep track of time. 215262395Sbapt */ 216262395Sbapt if (tsc_is_invariant && smp_tsc) 217262395Sbapt regs[3] |= AMDPM_TSC_INVARIANT; 218264789Sbapt break; 219262395Sbapt 220262395Sbapt case CPUID_0000_0001: 221262395Sbapt do_cpuid(1, regs); 222262395Sbapt 223262395Sbapt error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state); 224262395Sbapt if (error) { 225262395Sbapt panic("x86_emulate_cpuid: error %d " 226262395Sbapt "fetching x2apic state", error); 227263648Sbapt } 228263648Sbapt 229263648Sbapt /* 230262395Sbapt * Override the APIC ID only in ebx 231262395Sbapt */ 232262395Sbapt regs[1] &= ~(CPUID_LOCAL_APIC_ID); 233262395Sbapt regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT); 234262395Sbapt 235262395Sbapt /* 236262395Sbapt * Don't expose VMX, SpeedStep, TME or SMX capability. 237262395Sbapt * Advertise x2APIC capability and Hypervisor guest. 238262395Sbapt */ 239262395Sbapt regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2); 240262395Sbapt regs[2] &= ~(CPUID2_SMX); 241262395Sbapt 242262395Sbapt regs[2] |= CPUID2_HV; 243262395Sbapt 244262395Sbapt if (x2apic_state != X2APIC_DISABLED) 245262395Sbapt regs[2] |= CPUID2_X2APIC; 246262395Sbapt else 247262395Sbapt regs[2] &= ~CPUID2_X2APIC; 248262395Sbapt 249262395Sbapt /* 250262395Sbapt * Only advertise CPUID2_XSAVE in the guest if 251262395Sbapt * the host is using XSAVE. 252262395Sbapt */ 253262395Sbapt if (!(regs[2] & CPUID2_OSXSAVE)) 254262395Sbapt regs[2] &= ~CPUID2_XSAVE; 255262395Sbapt 256262395Sbapt /* 257262395Sbapt * If CPUID2_XSAVE is being advertised and the 258262395Sbapt * guest has set CR4_XSAVE, set 259262395Sbapt * CPUID2_OSXSAVE. 260263648Sbapt */ 261263648Sbapt regs[2] &= ~CPUID2_OSXSAVE; 262263648Sbapt if (regs[2] & CPUID2_XSAVE) { 263263648Sbapt error = vm_get_register(vm, vcpu_id, 264263648Sbapt VM_REG_GUEST_CR4, &cr4); 265263648Sbapt if (error) 266263648Sbapt panic("x86_emulate_cpuid: error %d " 267263648Sbapt "fetching %%cr4", error); 268263648Sbapt if (cr4 & CR4_XSAVE) 269263648Sbapt regs[2] |= CPUID2_OSXSAVE; 270263648Sbapt } 271263648Sbapt 272263648Sbapt /* 273263648Sbapt * Hide monitor/mwait until we know how to deal with 274263648Sbapt * these instructions. 275262395Sbapt */ 276263648Sbapt regs[2] &= ~CPUID2_MON; 277263648Sbapt 278263648Sbapt /* 279263648Sbapt * Hide the performance and debug features. 280263648Sbapt */ 281263648Sbapt regs[2] &= ~CPUID2_PDCM; 282262395Sbapt 283263648Sbapt /* 284263648Sbapt * No TSC deadline support in the APIC yet 285263648Sbapt */ 286263648Sbapt regs[2] &= ~CPUID2_TSCDLT; 287262395Sbapt 288263648Sbapt /* 289263648Sbapt * Hide thermal monitoring 290263648Sbapt */ 291263648Sbapt regs[3] &= ~(CPUID_ACPI | CPUID_TM); 292263648Sbapt 293263648Sbapt /* 294263648Sbapt * Hide the debug store capability. 295263648Sbapt */ 296263648Sbapt regs[3] &= ~CPUID_DS; 297263648Sbapt 298263648Sbapt /* 299263648Sbapt * Advertise the Machine Check and MTRR capability. 300263648Sbapt * 301263648Sbapt * Some guest OSes (e.g. Windows) will not boot if 302263648Sbapt * these features are absent. 303263648Sbapt */ 304262395Sbapt regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR); 305262395Sbapt 306263648Sbapt logical_cpus = threads_per_core * cores_per_package; 307262395Sbapt regs[1] &= ~CPUID_HTT_CORES; 308262395Sbapt regs[1] |= (logical_cpus & 0xff) << 16; 309262395Sbapt regs[3] |= CPUID_HTT; 310262395Sbapt break; 311262395Sbapt 312262395Sbapt case CPUID_0000_0004: 313262395Sbapt cpuid_count(*eax, *ecx, regs); 314262395Sbapt 315262395Sbapt if (regs[0] || regs[1] || regs[2] || regs[3]) { 316262395Sbapt regs[0] &= 0x3ff; 317262395Sbapt regs[0] |= (cores_per_package - 1) << 26; 318262395Sbapt /* 319262395Sbapt * Cache topology: 320262395Sbapt * - L1 and L2 are shared only by the logical 321262395Sbapt * processors in a single core. 322262395Sbapt * - L3 and above are shared by all logical 323262395Sbapt * processors in the package. 324262395Sbapt */ 325262395Sbapt logical_cpus = threads_per_core; 326264789Sbapt level = (regs[0] >> 5) & 0x7; 327264789Sbapt if (level >= 3) 328262395Sbapt logical_cpus *= cores_per_package; 329264789Sbapt regs[0] |= (logical_cpus - 1) << 14; 330264789Sbapt } 331263648Sbapt break; 332263648Sbapt 333263648Sbapt case CPUID_0000_0007: 334262395Sbapt regs[0] = 0; 335264789Sbapt regs[1] = 0; 336264789Sbapt regs[2] = 0; 337264789Sbapt regs[3] = 0; 338264789Sbapt 339264789Sbapt /* leaf 0 */ 340262395Sbapt if (*ecx == 0) { 341264789Sbapt cpuid_count(*eax, *ecx, regs); 342264789Sbapt 343262395Sbapt /* Only leaf 0 is supported */ 344262395Sbapt regs[0] = 0; 345262395Sbapt 346262395Sbapt /* 347262395Sbapt * Expose known-safe features. 348264789Sbapt */ 349264789Sbapt regs[1] &= (CPUID_STDEXT_FSGSBASE | 350262395Sbapt CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE | 351264789Sbapt CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2 | 352264789Sbapt CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM | 353263648Sbapt CPUID_STDEXT_AVX512F | 354263648Sbapt CPUID_STDEXT_AVX512PF | 355263648Sbapt CPUID_STDEXT_AVX512ER | 356262395Sbapt CPUID_STDEXT_AVX512CD); 357264789Sbapt regs[2] = 0; 358262395Sbapt regs[3] = 0; 359264789Sbapt 360262395Sbapt /* Advertise INVPCID if it is enabled. */ 361264789Sbapt error = vm_get_capability(vm, vcpu_id, 362264789Sbapt VM_CAP_ENABLE_INVPCID, &enable_invpcid); 363264789Sbapt if (error == 0 && enable_invpcid) 364264789Sbapt regs[1] |= CPUID_STDEXT_INVPCID; 365264789Sbapt } 366262395Sbapt break; 367262395Sbapt 368262395Sbapt case CPUID_0000_0006: 369262395Sbapt regs[0] = CPUTPM1_ARAT; 370264789Sbapt regs[1] = 0; 371264789Sbapt regs[2] = 0; 372262395Sbapt regs[3] = 0; 373264789Sbapt break; 374262395Sbapt 375262395Sbapt case CPUID_0000_000A: 376262395Sbapt /* 377262395Sbapt * Handle the access, but report 0 for 378262975Sbapt * all options 379262395Sbapt */ 380262395Sbapt regs[0] = 0; 381262395Sbapt regs[1] = 0; 382262395Sbapt regs[2] = 0; 383262395Sbapt regs[3] = 0; 384262395Sbapt break; 385262395Sbapt 386262395Sbapt case CPUID_0000_000B: 387262395Sbapt /* 388262975Sbapt * Processor topology enumeration 389262395Sbapt */ 390262395Sbapt if (*ecx == 0) { 391262395Sbapt logical_cpus = threads_per_core; 392262395Sbapt width = log2(logical_cpus); 393262395Sbapt level = CPUID_TYPE_SMT; 394262395Sbapt x2apic_id = vcpu_id; 395262395Sbapt } 396262395Sbapt 397263648Sbapt if (*ecx == 1) { 398263648Sbapt logical_cpus = threads_per_core * 399263648Sbapt cores_per_package; 400263648Sbapt width = log2(logical_cpus); 401262395Sbapt level = CPUID_TYPE_CORE; 402262395Sbapt x2apic_id = vcpu_id; 403262395Sbapt } 404262395Sbapt 405262395Sbapt if (!cpuid_leaf_b || *ecx >= 2) { 406262395Sbapt width = 0; 407262395Sbapt logical_cpus = 0; 408262395Sbapt level = 0; 409262395Sbapt x2apic_id = 0; 410262395Sbapt } 411262395Sbapt 412262395Sbapt regs[0] = width & 0x1f; 413262395Sbapt regs[1] = logical_cpus & 0xffff; 414262395Sbapt regs[2] = (level << 8) | (*ecx & 0xff); 415262395Sbapt regs[3] = x2apic_id; 416262395Sbapt break; 417262395Sbapt 418262395Sbapt case CPUID_0000_000D: 419262395Sbapt limits = vmm_get_xsave_limits(); 420262395Sbapt if (!limits->xsave_enabled) { 421262395Sbapt regs[0] = 0; 422262395Sbapt regs[1] = 0; 423262395Sbapt regs[2] = 0; 424262395Sbapt regs[3] = 0; 425262395Sbapt break; 426262395Sbapt } 427262395Sbapt 428262395Sbapt cpuid_count(*eax, *ecx, regs); 429262395Sbapt switch (*ecx) { 430262395Sbapt case 0: 431262395Sbapt /* 432262975Sbapt * Only permit the guest to use bits 433262395Sbapt * that are active in the host in 434262395Sbapt * %xcr0. Also, claim that the 435263648Sbapt * maximum save area size is 436263648Sbapt * equivalent to the host's current 437263648Sbapt * save area size. Since this runs 438263648Sbapt * "inside" of vmrun(), it runs with 439262395Sbapt * the guest's xcr0, so the current 440262395Sbapt * save area size is correct as-is. 441262395Sbapt */ 442262395Sbapt regs[0] &= limits->xcr0_allowed; 443262395Sbapt regs[2] = limits->xsave_max_size; 444262395Sbapt regs[3] &= (limits->xcr0_allowed >> 32); 445262975Sbapt break; 446262395Sbapt case 1: 447262395Sbapt /* Only permit XSAVEOPT. */ 448262395Sbapt regs[0] &= CPUID_EXTSTATE_XSAVEOPT; 449262395Sbapt regs[1] = 0; 450262395Sbapt regs[2] = 0; 451262395Sbapt regs[3] = 0; 452262395Sbapt break; 453262395Sbapt default: 454262395Sbapt /* 455262395Sbapt * If the leaf is for a permitted feature, 456262395Sbapt * pass through as-is, otherwise return 457262395Sbapt * all zeroes. 458262395Sbapt */ 459262395Sbapt if (!(limits->xcr0_allowed & (1ul << *ecx))) { 460262395Sbapt regs[0] = 0; 461263648Sbapt regs[1] = 0; 462263648Sbapt regs[2] = 0; 463263648Sbapt regs[3] = 0; 464263648Sbapt } 465262395Sbapt break; 466262395Sbapt } 467262395Sbapt break; 468262395Sbapt 469262395Sbapt case 0x40000000: 470262395Sbapt regs[0] = CPUID_VM_HIGH; 471262395Sbapt bcopy(bhyve_id, ®s[1], 4); 472262395Sbapt bcopy(bhyve_id + 4, ®s[2], 4); 473262395Sbapt bcopy(bhyve_id + 8, ®s[3], 4); 474262395Sbapt break; 475262395Sbapt 476262395Sbapt default: 477262395Sbapt /* 478262395Sbapt * The leaf value has already been clamped so 479262395Sbapt * simply pass this through, keeping count of 480262395Sbapt * how many unhandled leaf values have been seen. 481262395Sbapt */ 482262395Sbapt atomic_add_long(&bhyve_xcpuids, 1); 483262395Sbapt cpuid_count(*eax, *ecx, regs); 484262395Sbapt break; 485262395Sbapt } 486262395Sbapt 487262395Sbapt *eax = regs[0]; 488262395Sbapt *ebx = regs[1]; 489262395Sbapt *ecx = regs[2]; 490262395Sbapt *edx = regs[3]; 491262395Sbapt 492262395Sbapt return (1); 493262395Sbapt} 494262395Sbapt 495262395Sbaptbool 496262395Sbaptvm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap) 497262395Sbapt{ 498262395Sbapt bool rv; 499262395Sbapt 500262395Sbapt KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d", 501262395Sbapt __func__, cap)); 502262395Sbapt 503262395Sbapt /* 504262395Sbapt * Simply passthrough the capabilities of the host cpu for now. 505262395Sbapt */ 506262395Sbapt rv = false; 507262395Sbapt switch (cap) { 508262395Sbapt case VCC_NO_EXECUTE: 509262395Sbapt if (amd_feature & AMDID_NX) 510262395Sbapt rv = true; 511262395Sbapt break; 512262395Sbapt case VCC_FFXSR: 513262395Sbapt if (amd_feature & AMDID_FFXSR) 514262395Sbapt rv = true; 515262395Sbapt break; 516262395Sbapt case VCC_TCE: 517262395Sbapt if (amd_feature2 & AMDID2_TCE) 518262395Sbapt rv = true; 519262395Sbapt break; 520262395Sbapt default: 521262395Sbapt panic("%s: unknown vm_cpu_capability %d", __func__, cap); 522262395Sbapt } 523262395Sbapt return (rv); 524262395Sbapt} 525262395Sbapt