x86.c revision 284900
1164640Sflz/*- 298186Sgordon * Copyright (c) 2011 NetApp, Inc. 378344Sobrien * All rights reserved. 4157473Sflz * 578344Sobrien * Redistribution and use in source and binary forms, with or without 678344Sobrien * modification, are permitted provided that the following conditions 778344Sobrien * are met: 878344Sobrien * 1. Redistributions of source code must retain the above copyright 978344Sobrien * notice, this list of conditions and the following disclaimer. 1078344Sobrien * 2. Redistributions in binary form must reproduce the above copyright 1178344Sobrien * notice, this list of conditions and the following disclaimer in the 1278344Sobrien * documentation and/or other materials provided with the distribution. 1378344Sobrien * 1478344Sobrien * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 1578344Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1678344Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1778344Sobrien * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 1878344Sobrien * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1978344Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2078344Sobrien * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2178344Sobrien * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2278344Sobrien * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2378344Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2478344Sobrien * SUCH DAMAGE. 2578344Sobrien * 2678344Sobrien * $FreeBSD: stable/10/sys/amd64/vmm/x86.c 284900 2015-06-28 03:22:26Z neel $ 2778344Sobrien */ 2878344Sobrien 2978344Sobrien#include <sys/cdefs.h> 3078344Sobrien__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/x86.c 284900 2015-06-28 03:22:26Z neel $"); 3178344Sobrien 3278344Sobrien#include <sys/param.h> 3378344Sobrien#include <sys/pcpu.h> 3478344Sobrien#include <sys/systm.h> 3578344Sobrien#include <sys/sysctl.h> 3678344Sobrien 3778344Sobrien#include <machine/clock.h> 3878344Sobrien#include <machine/cpufunc.h> 3978344Sobrien#include <machine/md_var.h> 4078344Sobrien#include <machine/segments.h> 4178344Sobrien#include <machine/specialreg.h> 42157473Sflz 43169668Smtm#include <machine/vmm.h> 44157473Sflz 4578344Sobrien#include "vmm_host.h" 4698186Sgordon#include "vmm_ktr.h" 4798186Sgordon#include "vmm_util.h" 4898186Sgordon#include "x86.h" 49131550Scperciva 50131550ScpercivaSYSCTL_DECL(_hw_vmm); 51131550Scpercivastatic SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD, 0, NULL); 52131550Scperciva 5398186Sgordon#define CPUID_VM_HIGH 0x40000000 5498186Sgordon 5598186Sgordonstatic const char bhyve_id[12] = "bhyve bhyve "; 56103018Sgordon 57124832Smtmstatic uint64_t bhyve_xcpuids; 58124832SmtmSYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0, 59161435Syar "Number of times an unknown cpuid leaf was accessed"); 60161435Syar 6198186Sgordon/* 62103018Sgordon * The default CPU topology is a single thread per package. 6398186Sgordon */ 6498186Sgordonstatic u_int threads_per_core = 1; 6598186SgordonSYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN, 6698186Sgordon &threads_per_core, 0, NULL); 6798186Sgordon 6898186Sgordonstatic u_int cores_per_package = 1; 6998186SgordonSYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN, 7098186Sgordon &cores_per_package, 0, NULL); 7198186Sgordon 7278344Sobrienstatic int cpuid_leaf_b = 1; 7378344SobrienSYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN, 7478344Sobrien &cpuid_leaf_b, 0, NULL); 75197144Shrs 7678344Sobrien/* 77197144Shrs * Round up to the next power of two, if necessary, and then take log2. 78197144Shrs * Returns -1 if argument is zero. 7998186Sgordon */ 80197144Shrsstatic __inline int 81197144Shrslog2(u_int x) 82197144Shrs{ 83197144Shrs 84197144Shrs return (fls(x << (1 - powerof2(x))) - 1); 85197144Shrs} 86197144Shrs 87197144Shrsint 88197144Shrsx86_emulate_cpuid(struct vm *vm, int vcpu_id, 8998186Sgordon uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 9098186Sgordon{ 91197144Shrs const struct xsave_limits *limits; 92197144Shrs uint64_t cr4; 93197144Shrs int error, enable_invpcid, level, width, x2apic_id; 9498186Sgordon unsigned int func, regs[4], logical_cpus; 95197144Shrs enum x2apic_state x2apic_state; 96197144Shrs 9798186Sgordon VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx); 9898186Sgordon 99197144Shrs /* 100197144Shrs * Requests for invalid CPUID levels should map to the highest 101197144Shrs * available level instead. 102197144Shrs */ 103197144Shrs if (cpu_exthigh != 0 && *eax >= 0x80000000) { 104197144Shrs if (*eax > cpu_exthigh) 105197144Shrs *eax = cpu_exthigh; 106197144Shrs } else if (*eax >= 0x40000000) { 107197144Shrs if (*eax > CPUID_VM_HIGH) 108197144Shrs *eax = CPUID_VM_HIGH; 109197144Shrs } else if (*eax > cpu_high) { 110197144Shrs *eax = cpu_high; 111197144Shrs } 11298186Sgordon 11398186Sgordon func = *eax; 11498186Sgordon 11598186Sgordon /* 116197144Shrs * In general the approach used for CPU topology is to 117197144Shrs * advertise a flat topology where all CPUs are packages with 118197144Shrs * no multi-core or SMT. 11998186Sgordon */ 120197144Shrs switch (func) { 121197144Shrs /* 122197144Shrs * Pass these through to the guest 123197144Shrs */ 124197144Shrs case CPUID_0000_0000: 125197144Shrs case CPUID_0000_0002: 126197144Shrs case CPUID_0000_0003: 127197144Shrs case CPUID_8000_0000: 128197144Shrs case CPUID_8000_0002: 129197144Shrs case CPUID_8000_0003: 130197144Shrs case CPUID_8000_0004: 131197144Shrs case CPUID_8000_0006: 132197144Shrs cpuid_count(*eax, *ecx, regs); 13398186Sgordon break; 13498186Sgordon case CPUID_8000_0008: 13598186Sgordon cpuid_count(*eax, *ecx, regs); 13698186Sgordon if (vmm_is_amd()) { 13798186Sgordon /* 13898186Sgordon * XXX this might appear silly because AMD 13998186Sgordon * cpus don't have threads. 14098186Sgordon * 14198186Sgordon * However this matches the logical cpus as 14298186Sgordon * advertised by leaf 0x1 and will work even 14398186Sgordon * if the 'threads_per_core' tunable is set 144146490Sschweikh * incorrectly on an AMD host. 14598186Sgordon */ 14698186Sgordon logical_cpus = threads_per_core * 14798186Sgordon cores_per_package; 14898186Sgordon regs[2] = logical_cpus - 1; 14998186Sgordon } 15098186Sgordon break; 15198186Sgordon 15278344Sobrien case CPUID_8000_0001: 15378344Sobrien cpuid_count(*eax, *ecx, regs); 15478344Sobrien 15578344Sobrien /* 15678344Sobrien * Hide SVM and Topology Extension features from guest. 15778344Sobrien */ 15878344Sobrien regs[2] &= ~(AMDID2_SVM | AMDID2_TOPOLOGY); 15998186Sgordon 16078344Sobrien /* 16178344Sobrien * Don't advertise extended performance counter MSRs 16278344Sobrien * to the guest. 16378344Sobrien */ 16478344Sobrien regs[2] &= ~AMDID2_PCXC; 16578344Sobrien regs[2] &= ~AMDID2_PNXC; 16678344Sobrien regs[2] &= ~AMDID2_PTSCEL2I; 16778344Sobrien 16878344Sobrien /* 16978344Sobrien * Don't advertise Instruction Based Sampling feature. 17078344Sobrien */ 17178344Sobrien regs[2] &= ~AMDID2_IBS; 172157473Sflz 17378344Sobrien /* NodeID MSR not available */ 17478344Sobrien regs[2] &= ~AMDID2_NODE_ID; 17578344Sobrien 17678344Sobrien /* Don't advertise the OS visible workaround feature */ 17778344Sobrien regs[2] &= ~AMDID2_OSVW; 178157473Sflz 17998186Sgordon /* 18098186Sgordon * Hide rdtscp/ia32_tsc_aux until we know how 18178344Sobrien * to deal with them. 18298186Sgordon */ 18398186Sgordon regs[3] &= ~AMDID_RDTSCP; 18498186Sgordon break; 185126286Smtm 18698186Sgordon case CPUID_8000_0007: 18798186Sgordon /* 18898186Sgordon * AMD uses this leaf to advertise the processor's 18998186Sgordon * power monitoring and RAS capabilities. These 19098186Sgordon * features are hardware-specific and exposing 191169668Smtm * them to a guest doesn't make a lot of sense. 192169668Smtm * 193169668Smtm * Intel uses this leaf only to advertise the 194169668Smtm * "Invariant TSC" feature with all other bits 19578344Sobrien * being reserved (set to zero). 196169668Smtm */ 197169668Smtm regs[0] = 0; 198169668Smtm regs[1] = 0; 199169668Smtm regs[2] = 0; 200178776Smaxim regs[3] = 0; 201178776Smaxim 202178770Smtm /* 203169668Smtm * "Invariant TSC" can be advertised to the guest if: 204178770Smtm * - host TSC frequency is invariant 205178770Smtm * - host TSCs are synchronized across physical cpus 206169668Smtm * 207178770Smtm * XXX This still falls short because the vcpu 208178775Smaxim * can observe the TSC moving backwards as it 209169668Smtm * migrates across physical cpus. But at least 210169668Smtm * it should discourage the guest from using the 211169668Smtm * TSC to keep track of time. 212169668Smtm */ 213169668Smtm if (tsc_is_invariant && smp_tsc) 214169668Smtm regs[3] |= AMDPM_TSC_INVARIANT; 215169668Smtm break; 216169668Smtm 21798186Sgordon case CPUID_0000_0001: 21898186Sgordon do_cpuid(1, regs); 21998186Sgordon 22098186Sgordon error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state); 22198186Sgordon if (error) { 22278344Sobrien panic("x86_emulate_cpuid: error %d " 22378344Sobrien "fetching x2apic state", error); 22498186Sgordon } 22578344Sobrien 22678344Sobrien /* 227126285Smtm * Override the APIC ID only in ebx 22878344Sobrien */ 22978344Sobrien regs[1] &= ~(CPUID_LOCAL_APIC_ID); 230126285Smtm regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT); 23178344Sobrien 23278344Sobrien /* 233126285Smtm * Don't expose VMX, SpeedStep, TME or SMX capability. 234126285Smtm * Advertise x2APIC capability and Hypervisor guest. 235126285Smtm */ 23678344Sobrien regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2); 23778344Sobrien regs[2] &= ~(CPUID2_SMX); 23898186Sgordon 23978344Sobrien regs[2] |= CPUID2_HV; 24078344Sobrien 24178344Sobrien if (x2apic_state != X2APIC_DISABLED) 24278344Sobrien regs[2] |= CPUID2_X2APIC; 24398186Sgordon else 24498186Sgordon regs[2] &= ~CPUID2_X2APIC; 24578344Sobrien 24698186Sgordon /* 24798186Sgordon * Only advertise CPUID2_XSAVE in the guest if 24878344Sobrien * the host is using XSAVE. 24978344Sobrien */ 25078344Sobrien if (!(regs[2] & CPUID2_OSXSAVE)) 25178344Sobrien regs[2] &= ~CPUID2_XSAVE; 25278344Sobrien 25398186Sgordon /* 25478344Sobrien * If CPUID2_XSAVE is being advertised and the 25598186Sgordon * guest has set CR4_XSAVE, set 25678344Sobrien * CPUID2_OSXSAVE. 25778344Sobrien */ 258131061Smtm regs[2] &= ~CPUID2_OSXSAVE; 25978344Sobrien if (regs[2] & CPUID2_XSAVE) { 26078344Sobrien error = vm_get_register(vm, vcpu_id, 26178344Sobrien VM_REG_GUEST_CR4, &cr4); 26278344Sobrien if (error) 263139949Skeramida panic("x86_emulate_cpuid: error %d " 26478344Sobrien "fetching %%cr4", error); 26578344Sobrien if (cr4 & CR4_XSAVE) 26698186Sgordon regs[2] |= CPUID2_OSXSAVE; 26778344Sobrien } 26878344Sobrien 26978344Sobrien /* 27098186Sgordon * Hide monitor/mwait until we know how to deal with 27178344Sobrien * these instructions. 27298186Sgordon */ 27398186Sgordon regs[2] &= ~CPUID2_MON; 27478344Sobrien 27578344Sobrien /* 27678344Sobrien * Hide the performance and debug features. 27778344Sobrien */ 27898186Sgordon regs[2] &= ~CPUID2_PDCM; 27978344Sobrien 28098186Sgordon /* 28178344Sobrien * No TSC deadline support in the APIC yet 28298186Sgordon */ 28398186Sgordon regs[2] &= ~CPUID2_TSCDLT; 28498186Sgordon 28598186Sgordon /* 28698186Sgordon * Hide thermal monitoring 28798186Sgordon */ 28898186Sgordon regs[3] &= ~(CPUID_ACPI | CPUID_TM); 28998186Sgordon 29098186Sgordon /* 29198186Sgordon * Hide the debug store capability. 29298186Sgordon */ 29398186Sgordon regs[3] &= ~CPUID_DS; 29498186Sgordon 29598186Sgordon /* 296155719Sceri * Advertise the Machine Check and MTRR capability. 29798186Sgordon * 29898186Sgordon * Some guest OSes (e.g. Windows) will not boot if 29998186Sgordon * these features are absent. 30098186Sgordon */ 301157841Sflz regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR); 302157841Sflz 303157841Sflz logical_cpus = threads_per_core * cores_per_package; 30498186Sgordon regs[1] &= ~CPUID_HTT_CORES; 30598186Sgordon regs[1] |= (logical_cpus & 0xff) << 16; 30698186Sgordon regs[3] |= CPUID_HTT; 30798186Sgordon break; 30898186Sgordon 30998186Sgordon case CPUID_0000_0004: 31098186Sgordon cpuid_count(*eax, *ecx, regs); 31198186Sgordon 31298186Sgordon if (regs[0] || regs[1] || regs[2] || regs[3]) { 31398186Sgordon regs[0] &= 0x3ff; 31478344Sobrien regs[0] |= (cores_per_package - 1) << 26; 31598186Sgordon /* 316170282Syar * Cache topology: 317170282Syar * - L1 and L2 are shared only by the logical 318170282Syar * processors in a single core. 319170282Syar * - L3 and above are shared by all logical 320170282Syar * processors in the package. 321170282Syar */ 322170282Syar logical_cpus = threads_per_core; 323170282Syar level = (regs[0] >> 5) & 0x7; 324170282Syar if (level >= 3) 325170282Syar logical_cpus *= cores_per_package; 326170282Syar regs[0] |= (logical_cpus - 1) << 14; 327170282Syar } 328170282Syar break; 329170282Syar 330170282Syar case CPUID_0000_0007: 331170282Syar regs[0] = 0; 332170282Syar regs[1] = 0; 333170282Syar regs[2] = 0; 334170282Syar regs[3] = 0; 335170282Syar 336170282Syar /* leaf 0 */ 337170282Syar if (*ecx == 0) { 338170282Syar cpuid_count(*eax, *ecx, regs); 339170282Syar 34078344Sobrien /* Only leaf 0 is supported */ 34198186Sgordon regs[0] = 0; 342157841Sflz 34398186Sgordon /* 34498186Sgordon * Expose known-safe features. 345157841Sflz */ 34698186Sgordon regs[1] &= (CPUID_STDEXT_FSGSBASE | 34798186Sgordon CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE | 34898186Sgordon CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2 | 34998186Sgordon CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM | 350151426Sjhb CPUID_STDEXT_AVX512F | 35198186Sgordon CPUID_STDEXT_AVX512PF | 35298186Sgordon CPUID_STDEXT_AVX512ER | 353161435Syar CPUID_STDEXT_AVX512CD); 354161436Syar regs[2] = 0; 355157657Sflz regs[3] = 0; 356161436Syar 357157657Sflz /* Advertise INVPCID if it is enabled. */ 358157657Sflz error = vm_get_capability(vm, vcpu_id, 359157657Sflz VM_CAP_ENABLE_INVPCID, &enable_invpcid); 360157657Sflz if (error == 0 && enable_invpcid) 36198186Sgordon regs[1] |= CPUID_STDEXT_INVPCID; 36298186Sgordon } 36398186Sgordon break; 36498186Sgordon 365114272Smtm case CPUID_0000_0006: 36698186Sgordon regs[0] = CPUTPM1_ARAT; 36798186Sgordon regs[1] = 0; 36898186Sgordon regs[2] = 0; 36998186Sgordon regs[3] = 0; 37098186Sgordon break; 37198186Sgordon 37298186Sgordon case CPUID_0000_000A: 37398186Sgordon /* 37498186Sgordon * Handle the access, but report 0 for 375126286Smtm * all options 37698186Sgordon */ 37798186Sgordon regs[0] = 0; 37898186Sgordon regs[1] = 0; 37998186Sgordon regs[2] = 0; 38098186Sgordon regs[3] = 0; 38198186Sgordon break; 38298186Sgordon 38398186Sgordon case CPUID_0000_000B: 38498186Sgordon /* 38598186Sgordon * Processor topology enumeration 38698186Sgordon */ 38798186Sgordon if (*ecx == 0) { 38898186Sgordon logical_cpus = threads_per_core; 38978344Sobrien width = log2(logical_cpus); 39098186Sgordon level = CPUID_TYPE_SMT; 39198186Sgordon x2apic_id = vcpu_id; 39298186Sgordon } 39398186Sgordon 39478344Sobrien if (*ecx == 1) { 39598186Sgordon logical_cpus = threads_per_core * 39698186Sgordon cores_per_package; 39798186Sgordon width = log2(logical_cpus); 39878344Sobrien level = CPUID_TYPE_CORE; 39978344Sobrien x2apic_id = vcpu_id; 40078344Sobrien } 40198186Sgordon 40298186Sgordon if (!cpuid_leaf_b || *ecx >= 2) { 40398186Sgordon width = 0; 40498186Sgordon logical_cpus = 0; 40598186Sgordon level = 0; 40678344Sobrien x2apic_id = 0; 40798186Sgordon } 40898186Sgordon 40978344Sobrien regs[0] = width & 0x1f; 410175676Smtm regs[1] = logical_cpus & 0xffff; 41198186Sgordon regs[2] = (level << 8) | (*ecx & 0xff); 412126303Smtm regs[3] = x2apic_id; 413175676Smtm break; 41478344Sobrien 41578344Sobrien case CPUID_0000_000D: 41678344Sobrien limits = vmm_get_xsave_limits(); 41798186Sgordon if (!limits->xsave_enabled) { 41898186Sgordon regs[0] = 0; 41978344Sobrien regs[1] = 0; 42078344Sobrien regs[2] = 0; 42178344Sobrien regs[3] = 0; 42298186Sgordon break; 42378344Sobrien } 42478344Sobrien 42578344Sobrien cpuid_count(*eax, *ecx, regs); 42678344Sobrien switch (*ecx) { 42798186Sgordon case 0: 42898186Sgordon /* 42998186Sgordon * Only permit the guest to use bits 430197144Shrs * that are active in the host in 431197144Shrs * %xcr0. Also, claim that the 43278344Sobrien * maximum save area size is 43378344Sobrien * equivalent to the host's current 43498186Sgordon * save area size. Since this runs 43598186Sgordon * "inside" of vmrun(), it runs with 43698186Sgordon * the guest's xcr0, so the current 43778344Sobrien * save area size is correct as-is. 43898186Sgordon */ 43998186Sgordon regs[0] &= limits->xcr0_allowed; 44078344Sobrien regs[2] = limits->xsave_max_size; 44178344Sobrien regs[3] &= (limits->xcr0_allowed >> 32); 44278344Sobrien break; 443157653Sflz case 1: 444157653Sflz /* Only permit XSAVEOPT. */ 445157653Sflz regs[0] &= CPUID_EXTSTATE_XSAVEOPT; 446157653Sflz regs[1] = 0; 44778344Sobrien regs[2] = 0; 44898186Sgordon regs[3] = 0; 44978344Sobrien break; 45078344Sobrien default: 45178344Sobrien /* 45278344Sobrien * If the leaf is for a permitted feature, 45378344Sobrien * pass through as-is, otherwise return 45478344Sobrien * all zeroes. 45578344Sobrien */ 45678344Sobrien if (!(limits->xcr0_allowed & (1ul << *ecx))) { 45778344Sobrien regs[0] = 0; 45878344Sobrien regs[1] = 0; 45978344Sobrien regs[2] = 0; 46078344Sobrien regs[3] = 0; 46198186Sgordon } 46278344Sobrien break; 46378344Sobrien } 46498186Sgordon break; 46578344Sobrien 46698186Sgordon case 0x40000000: 46798186Sgordon regs[0] = CPUID_VM_HIGH; 46898186Sgordon bcopy(bhyve_id, ®s[1], 4); 46978344Sobrien bcopy(bhyve_id + 4, ®s[2], 4); 47098186Sgordon bcopy(bhyve_id + 8, ®s[3], 4); 47178344Sobrien break; 47278344Sobrien 47398186Sgordon default: 47498186Sgordon /* 47598186Sgordon * The leaf value has already been clamped so 47698186Sgordon * simply pass this through, keeping count of 47778344Sobrien * how many unhandled leaf values have been seen. 47898186Sgordon */ 47978344Sobrien atomic_add_long(&bhyve_xcpuids, 1); 48098186Sgordon cpuid_count(*eax, *ecx, regs); 48198186Sgordon break; 48298186Sgordon } 48398186Sgordon 48478344Sobrien *eax = regs[0]; 485165565Syar *ebx = regs[1]; 48678344Sobrien *ecx = regs[2]; 48778344Sobrien *edx = regs[3]; 488165565Syar 48978344Sobrien return (1); 490165565Syar} 491165565Syar 492165565Syarbool 493165565Syarvm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap) 494165565Syar{ 495165565Syar bool rv; 496165565Syar 497165565Syar KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d", 498165565Syar __func__, cap)); 499165565Syar 500165565Syar /* 501165565Syar * Simply passthrough the capabilities of the host cpu for now. 502165565Syar */ 503165565Syar rv = false; 504165565Syar switch (cap) { 50578344Sobrien case VCC_NO_EXECUTE: 50678344Sobrien if (amd_feature & AMDID_NX) 50778344Sobrien rv = true; 50878344Sobrien break; 50998186Sgordon case VCC_FFXSR: 51098186Sgordon if (amd_feature & AMDID_FFXSR) 51178344Sobrien rv = true; 51298186Sgordon break; 51398186Sgordon case VCC_TCE: 51478344Sobrien if (amd_feature2 & AMDID2_TCE) 51578344Sobrien rv = true; 51678344Sobrien break; 51778344Sobrien default: 51898186Sgordon panic("%s: unknown vm_cpu_capability %d", __func__, cap); 51978344Sobrien } 52098186Sgordon return (rv); 52198186Sgordon} 52298186Sgordon