1221828Sgrehan/*- 2221828Sgrehan * Copyright (c) 2011 NetApp, Inc. 3221828Sgrehan * All rights reserved. 4221828Sgrehan * 5221828Sgrehan * Redistribution and use in source and binary forms, with or without 6221828Sgrehan * modification, are permitted provided that the following conditions 7221828Sgrehan * are met: 8221828Sgrehan * 1. Redistributions of source code must retain the above copyright 9221828Sgrehan * notice, this list of conditions and the following disclaimer. 10221828Sgrehan * 2. Redistributions in binary form must reproduce the above copyright 11221828Sgrehan * notice, this list of conditions and the following disclaimer in the 12221828Sgrehan * documentation and/or other materials provided with the distribution. 13221828Sgrehan * 14221828Sgrehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15221828Sgrehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16221828Sgrehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17221828Sgrehan * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18221828Sgrehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19221828Sgrehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20221828Sgrehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21221828Sgrehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22221828Sgrehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23221828Sgrehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24221828Sgrehan * SUCH DAMAGE. 25221828Sgrehan * 26221828Sgrehan * $FreeBSD: stable/10/sys/amd64/vmm/x86.c 315928 2017-03-25 05:09:03Z grehan $ 27221828Sgrehan */ 28221828Sgrehan 29221828Sgrehan#include <sys/cdefs.h> 30221828Sgrehan__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/x86.c 315928 2017-03-25 05:09:03Z grehan $"); 31221828Sgrehan 32240941Sneel#include <sys/param.h> 33267427Sjhb#include <sys/pcpu.h> 34222610Sjhb#include <sys/systm.h> 35276349Sneel#include <sys/sysctl.h> 36221828Sgrehan 37249324Sneel#include <machine/clock.h> 38221828Sgrehan#include <machine/cpufunc.h> 39222610Sjhb#include <machine/md_var.h> 40267427Sjhb#include <machine/segments.h> 41221828Sgrehan#include <machine/specialreg.h> 42221828Sgrehan 43240941Sneel#include <machine/vmm.h> 44240941Sneel 45267427Sjhb#include "vmm_host.h" 46276403Sneel#include "vmm_ktr.h" 47276403Sneel#include "vmm_util.h" 48221828Sgrehan#include "x86.h" 49221828Sgrehan 50276349SneelSYSCTL_DECL(_hw_vmm); 51276349Sneelstatic SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD, 0, NULL); 52276349Sneel 53222610Sjhb#define CPUID_VM_HIGH 0x40000000 54222610Sjhb 55252335Sgrehanstatic const char bhyve_id[12] = "bhyve bhyve "; 56222610Sjhb 57252335Sgrehanstatic uint64_t bhyve_xcpuids; 58276403SneelSYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0, 59276403Sneel "Number of times an unknown cpuid leaf was accessed"); 60252335Sgrehan 61276349Sneel/* 62276349Sneel * The default CPU topology is a single thread per package. 63276349Sneel */ 64276349Sneelstatic u_int threads_per_core = 1; 65276349SneelSYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN, 66276349Sneel &threads_per_core, 0, NULL); 67276349Sneel 68276349Sneelstatic u_int cores_per_package = 1; 69276349SneelSYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN, 70276349Sneel &cores_per_package, 0, NULL); 71276349Sneel 72276349Sneelstatic int cpuid_leaf_b = 1; 73276349SneelSYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN, 74276349Sneel &cpuid_leaf_b, 0, NULL); 75276349Sneel 76276349Sneel/* 77276349Sneel * Round up to the next power of two, if necessary, and then take log2. 78276349Sneel * Returns -1 if argument is zero. 79276349Sneel */ 80276349Sneelstatic __inline int 81276349Sneellog2(u_int x) 82276349Sneel{ 83276349Sneel 84276349Sneel return (fls(x << (1 - powerof2(x))) - 1); 85276349Sneel} 86276349Sneel 87221828Sgrehanint 88240941Sneelx86_emulate_cpuid(struct vm *vm, int vcpu_id, 89240941Sneel uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 90221828Sgrehan{ 91267427Sjhb const struct xsave_limits *limits; 92267427Sjhb uint64_t cr4; 93276349Sneel int error, enable_invpcid, level, width, x2apic_id; 94276349Sneel unsigned int func, regs[4], logical_cpus; 95240941Sneel enum x2apic_state x2apic_state; 96221828Sgrehan 97276403Sneel VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx); 98276403Sneel 99222610Sjhb /* 100222610Sjhb * Requests for invalid CPUID levels should map to the highest 101222610Sjhb * available level instead. 102222610Sjhb */ 103222610Sjhb if (cpu_exthigh != 0 && *eax >= 0x80000000) { 104222610Sjhb if (*eax > cpu_exthigh) 105222610Sjhb *eax = cpu_exthigh; 106222610Sjhb } else if (*eax >= 0x40000000) { 107222610Sjhb if (*eax > CPUID_VM_HIGH) 108222610Sjhb *eax = CPUID_VM_HIGH; 109222610Sjhb } else if (*eax > cpu_high) { 110222610Sjhb *eax = cpu_high; 111222610Sjhb } 112221828Sgrehan 113246774Sneel func = *eax; 114246774Sneel 115222610Sjhb /* 116222610Sjhb * In general the approach used for CPU topology is to 117222610Sjhb * advertise a flat topology where all CPUs are packages with 118222610Sjhb * no multi-core or SMT. 119222610Sjhb */ 120222610Sjhb switch (func) { 121252335Sgrehan /* 122252335Sgrehan * Pass these through to the guest 123252335Sgrehan */ 124221828Sgrehan case CPUID_0000_0000: 125221828Sgrehan case CPUID_0000_0002: 126221828Sgrehan case CPUID_0000_0003: 127221828Sgrehan case CPUID_8000_0000: 128221828Sgrehan case CPUID_8000_0002: 129221828Sgrehan case CPUID_8000_0003: 130221828Sgrehan case CPUID_8000_0004: 131221828Sgrehan case CPUID_8000_0006: 132276403Sneel cpuid_count(*eax, *ecx, regs); 133276403Sneel break; 134221828Sgrehan case CPUID_8000_0008: 135222610Sjhb cpuid_count(*eax, *ecx, regs); 136276403Sneel if (vmm_is_amd()) { 137276403Sneel /* 138276403Sneel * XXX this might appear silly because AMD 139276403Sneel * cpus don't have threads. 140276403Sneel * 141276403Sneel * However this matches the logical cpus as 142276403Sneel * advertised by leaf 0x1 and will work even 143276403Sneel * if the 'threads_per_core' tunable is set 144276403Sneel * incorrectly on an AMD host. 145276403Sneel */ 146276403Sneel logical_cpus = threads_per_core * 147276403Sneel cores_per_package; 148276403Sneel regs[2] = logical_cpus - 1; 149276403Sneel } 150221828Sgrehan break; 151221828Sgrehan 152252335Sgrehan case CPUID_8000_0001: 153276403Sneel cpuid_count(*eax, *ecx, regs); 154276403Sneel 155252335Sgrehan /* 156276403Sneel * Hide SVM and Topology Extension features from guest. 157276403Sneel */ 158276403Sneel regs[2] &= ~(AMDID2_SVM | AMDID2_TOPOLOGY); 159276403Sneel 160276403Sneel /* 161276403Sneel * Don't advertise extended performance counter MSRs 162276403Sneel * to the guest. 163276403Sneel */ 164276403Sneel regs[2] &= ~AMDID2_PCXC; 165276403Sneel regs[2] &= ~AMDID2_PNXC; 166276403Sneel regs[2] &= ~AMDID2_PTSCEL2I; 167276403Sneel 168276403Sneel /* 169276403Sneel * Don't advertise Instruction Based Sampling feature. 170276403Sneel */ 171276403Sneel regs[2] &= ~AMDID2_IBS; 172276403Sneel 173276403Sneel /* NodeID MSR not available */ 174276403Sneel regs[2] &= ~AMDID2_NODE_ID; 175276403Sneel 176276403Sneel /* Don't advertise the OS visible workaround feature */ 177276403Sneel regs[2] &= ~AMDID2_OSVW; 178276403Sneel 179315928Sgrehan /* Hide mwaitx/monitorx capability from the guest */ 180315928Sgrehan regs[2] &= ~AMDID2_MWAITX; 181315928Sgrehan 182276403Sneel /* 183252335Sgrehan * Hide rdtscp/ia32_tsc_aux until we know how 184252335Sgrehan * to deal with them. 185252335Sgrehan */ 186252335Sgrehan regs[3] &= ~AMDID_RDTSCP; 187252335Sgrehan break; 188252335Sgrehan 189249324Sneel case CPUID_8000_0007: 190249324Sneel /* 191276403Sneel * AMD uses this leaf to advertise the processor's 192276403Sneel * power monitoring and RAS capabilities. These 193276403Sneel * features are hardware-specific and exposing 194276403Sneel * them to a guest doesn't make a lot of sense. 195249324Sneel * 196276403Sneel * Intel uses this leaf only to advertise the 197276403Sneel * "Invariant TSC" feature with all other bits 198276403Sneel * being reserved (set to zero). 199276403Sneel */ 200276403Sneel regs[0] = 0; 201276403Sneel regs[1] = 0; 202276403Sneel regs[2] = 0; 203276403Sneel regs[3] = 0; 204276403Sneel 205276403Sneel /* 206276403Sneel * "Invariant TSC" can be advertised to the guest if: 207276403Sneel * - host TSC frequency is invariant 208276403Sneel * - host TSCs are synchronized across physical cpus 209276403Sneel * 210249324Sneel * XXX This still falls short because the vcpu 211249324Sneel * can observe the TSC moving backwards as it 212249324Sneel * migrates across physical cpus. But at least 213249324Sneel * it should discourage the guest from using the 214249324Sneel * TSC to keep track of time. 215249324Sneel */ 216276403Sneel if (tsc_is_invariant && smp_tsc) 217276403Sneel regs[3] |= AMDPM_TSC_INVARIANT; 218249324Sneel break; 219249324Sneel 220221828Sgrehan case CPUID_0000_0001: 221222610Sjhb do_cpuid(1, regs); 222222610Sjhb 223240941Sneel error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state); 224240941Sneel if (error) { 225240941Sneel panic("x86_emulate_cpuid: error %d " 226240941Sneel "fetching x2apic state", error); 227240941Sneel } 228240941Sneel 229221828Sgrehan /* 230221828Sgrehan * Override the APIC ID only in ebx 231221828Sgrehan */ 232222610Sjhb regs[1] &= ~(CPUID_LOCAL_APIC_ID); 233222610Sjhb regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT); 234221828Sgrehan 235221828Sgrehan /* 236284900Sneel * Don't expose VMX, SpeedStep, TME or SMX capability. 237222610Sjhb * Advertise x2APIC capability and Hypervisor guest. 238221828Sgrehan */ 239222610Sjhb regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2); 240284900Sneel regs[2] &= ~(CPUID2_SMX); 241221828Sgrehan 242240941Sneel regs[2] |= CPUID2_HV; 243240941Sneel 244240941Sneel if (x2apic_state != X2APIC_DISABLED) 245240941Sneel regs[2] |= CPUID2_X2APIC; 246267447Sjhb else 247267447Sjhb regs[2] &= ~CPUID2_X2APIC; 248240941Sneel 249221828Sgrehan /* 250267427Sjhb * Only advertise CPUID2_XSAVE in the guest if 251267427Sjhb * the host is using XSAVE. 252234939Sgrehan */ 253267427Sjhb if (!(regs[2] & CPUID2_OSXSAVE)) 254267427Sjhb regs[2] &= ~CPUID2_XSAVE; 255234939Sgrehan 256234939Sgrehan /* 257267427Sjhb * If CPUID2_XSAVE is being advertised and the 258267427Sjhb * guest has set CR4_XSAVE, set 259267427Sjhb * CPUID2_OSXSAVE. 260267427Sjhb */ 261267427Sjhb regs[2] &= ~CPUID2_OSXSAVE; 262267427Sjhb if (regs[2] & CPUID2_XSAVE) { 263267427Sjhb error = vm_get_register(vm, vcpu_id, 264267427Sjhb VM_REG_GUEST_CR4, &cr4); 265267427Sjhb if (error) 266267427Sjhb panic("x86_emulate_cpuid: error %d " 267267427Sjhb "fetching %%cr4", error); 268267427Sjhb if (cr4 & CR4_XSAVE) 269267427Sjhb regs[2] |= CPUID2_OSXSAVE; 270267427Sjhb } 271267427Sjhb 272267427Sjhb /* 273242060Sneel * Hide monitor/mwait until we know how to deal with 274242060Sneel * these instructions. 275242060Sneel */ 276242060Sneel regs[2] &= ~CPUID2_MON; 277242060Sneel 278252335Sgrehan /* 279252335Sgrehan * Hide the performance and debug features. 280252335Sgrehan */ 281252335Sgrehan regs[2] &= ~CPUID2_PDCM; 282255645Sgrehan 283242060Sneel /* 284255645Sgrehan * No TSC deadline support in the APIC yet 285255645Sgrehan */ 286255645Sgrehan regs[2] &= ~CPUID2_TSCDLT; 287255645Sgrehan 288255645Sgrehan /* 289222105Sgrehan * Hide thermal monitoring 290222105Sgrehan */ 291222105Sgrehan regs[3] &= ~(CPUID_ACPI | CPUID_TM); 292284900Sneel 293222105Sgrehan /* 294284900Sneel * Hide the debug store capability. 295221828Sgrehan */ 296252335Sgrehan regs[3] &= ~CPUID_DS; 297252335Sgrehan 298284900Sneel /* 299284900Sneel * Advertise the Machine Check and MTRR capability. 300284900Sneel * 301284900Sneel * Some guest OSes (e.g. Windows) will not boot if 302284900Sneel * these features are absent. 303284900Sneel */ 304284900Sneel regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR); 305284900Sneel 306276349Sneel logical_cpus = threads_per_core * cores_per_package; 307222610Sjhb regs[1] &= ~CPUID_HTT_CORES; 308276349Sneel regs[1] |= (logical_cpus & 0xff) << 16; 309276349Sneel regs[3] |= CPUID_HTT; 310221828Sgrehan break; 311221828Sgrehan 312222610Sjhb case CPUID_0000_0004: 313276349Sneel cpuid_count(*eax, *ecx, regs); 314222610Sjhb 315276349Sneel if (regs[0] || regs[1] || regs[2] || regs[3]) { 316276349Sneel regs[0] &= 0x3ff; 317276349Sneel regs[0] |= (cores_per_package - 1) << 26; 318276349Sneel /* 319276349Sneel * Cache topology: 320276349Sneel * - L1 and L2 are shared only by the logical 321276349Sneel * processors in a single core. 322276349Sneel * - L3 and above are shared by all logical 323276349Sneel * processors in the package. 324276349Sneel */ 325276349Sneel logical_cpus = threads_per_core; 326276349Sneel level = (regs[0] >> 5) & 0x7; 327276349Sneel if (level >= 3) 328276349Sneel logical_cpus *= cores_per_package; 329276349Sneel regs[0] |= (logical_cpus - 1) << 14; 330276349Sneel } 331222610Sjhb break; 332222610Sjhb 333256869Sneel case CPUID_0000_0007: 334256869Sneel regs[0] = 0; 335256869Sneel regs[1] = 0; 336256869Sneel regs[2] = 0; 337256869Sneel regs[3] = 0; 338256869Sneel 339256869Sneel /* leaf 0 */ 340256869Sneel if (*ecx == 0) { 341267427Sjhb cpuid_count(*eax, *ecx, regs); 342267427Sjhb 343267427Sjhb /* Only leaf 0 is supported */ 344267427Sjhb regs[0] = 0; 345267427Sjhb 346267427Sjhb /* 347267427Sjhb * Expose known-safe features. 348267427Sjhb */ 349267427Sjhb regs[1] &= (CPUID_STDEXT_FSGSBASE | 350267427Sjhb CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE | 351267427Sjhb CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2 | 352267427Sjhb CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM | 353267427Sjhb CPUID_STDEXT_AVX512F | 354267427Sjhb CPUID_STDEXT_AVX512PF | 355267427Sjhb CPUID_STDEXT_AVX512ER | 356267427Sjhb CPUID_STDEXT_AVX512CD); 357267427Sjhb regs[2] = 0; 358267427Sjhb regs[3] = 0; 359267427Sjhb 360267427Sjhb /* Advertise INVPCID if it is enabled. */ 361256869Sneel error = vm_get_capability(vm, vcpu_id, 362256869Sneel VM_CAP_ENABLE_INVPCID, &enable_invpcid); 363256869Sneel if (error == 0 && enable_invpcid) 364256869Sneel regs[1] |= CPUID_STDEXT_INVPCID; 365256869Sneel } 366256869Sneel break; 367256869Sneel 368222105Sgrehan case CPUID_0000_0006: 369280839Smav regs[0] = CPUTPM1_ARAT; 370280839Smav regs[1] = 0; 371280839Smav regs[2] = 0; 372280839Smav regs[3] = 0; 373280839Smav break; 374280839Smav 375252335Sgrehan case CPUID_0000_000A: 376222105Sgrehan /* 377222105Sgrehan * Handle the access, but report 0 for 378222105Sgrehan * all options 379222105Sgrehan */ 380222105Sgrehan regs[0] = 0; 381222105Sgrehan regs[1] = 0; 382222105Sgrehan regs[2] = 0; 383222105Sgrehan regs[3] = 0; 384222105Sgrehan break; 385222105Sgrehan 386221828Sgrehan case CPUID_0000_000B: 387221828Sgrehan /* 388221828Sgrehan * Processor topology enumeration 389221828Sgrehan */ 390276349Sneel if (*ecx == 0) { 391276349Sneel logical_cpus = threads_per_core; 392276349Sneel width = log2(logical_cpus); 393276349Sneel level = CPUID_TYPE_SMT; 394276349Sneel x2apic_id = vcpu_id; 395276349Sneel } 396276349Sneel 397276349Sneel if (*ecx == 1) { 398276349Sneel logical_cpus = threads_per_core * 399276349Sneel cores_per_package; 400276349Sneel width = log2(logical_cpus); 401276349Sneel level = CPUID_TYPE_CORE; 402276349Sneel x2apic_id = vcpu_id; 403276349Sneel } 404276349Sneel 405276349Sneel if (!cpuid_leaf_b || *ecx >= 2) { 406276349Sneel width = 0; 407276349Sneel logical_cpus = 0; 408276349Sneel level = 0; 409276349Sneel x2apic_id = 0; 410276349Sneel } 411276349Sneel 412276349Sneel regs[0] = width & 0x1f; 413276349Sneel regs[1] = logical_cpus & 0xffff; 414276349Sneel regs[2] = (level << 8) | (*ecx & 0xff); 415276349Sneel regs[3] = x2apic_id; 416221828Sgrehan break; 417221828Sgrehan 418267427Sjhb case CPUID_0000_000D: 419267427Sjhb limits = vmm_get_xsave_limits(); 420267427Sjhb if (!limits->xsave_enabled) { 421267427Sjhb regs[0] = 0; 422267427Sjhb regs[1] = 0; 423267427Sjhb regs[2] = 0; 424267427Sjhb regs[3] = 0; 425267427Sjhb break; 426267427Sjhb } 427267427Sjhb 428267427Sjhb cpuid_count(*eax, *ecx, regs); 429267427Sjhb switch (*ecx) { 430267427Sjhb case 0: 431267427Sjhb /* 432267427Sjhb * Only permit the guest to use bits 433267427Sjhb * that are active in the host in 434267427Sjhb * %xcr0. Also, claim that the 435267427Sjhb * maximum save area size is 436267427Sjhb * equivalent to the host's current 437267427Sjhb * save area size. Since this runs 438267427Sjhb * "inside" of vmrun(), it runs with 439267427Sjhb * the guest's xcr0, so the current 440267427Sjhb * save area size is correct as-is. 441267427Sjhb */ 442267427Sjhb regs[0] &= limits->xcr0_allowed; 443267427Sjhb regs[2] = limits->xsave_max_size; 444267427Sjhb regs[3] &= (limits->xcr0_allowed >> 32); 445267427Sjhb break; 446267427Sjhb case 1: 447267427Sjhb /* Only permit XSAVEOPT. */ 448267427Sjhb regs[0] &= CPUID_EXTSTATE_XSAVEOPT; 449267427Sjhb regs[1] = 0; 450267427Sjhb regs[2] = 0; 451267427Sjhb regs[3] = 0; 452267427Sjhb break; 453267427Sjhb default: 454267427Sjhb /* 455267427Sjhb * If the leaf is for a permitted feature, 456267427Sjhb * pass through as-is, otherwise return 457267427Sjhb * all zeroes. 458267427Sjhb */ 459267427Sjhb if (!(limits->xcr0_allowed & (1ul << *ecx))) { 460267427Sjhb regs[0] = 0; 461267427Sjhb regs[1] = 0; 462267427Sjhb regs[2] = 0; 463267427Sjhb regs[3] = 0; 464267427Sjhb } 465267427Sjhb break; 466267427Sjhb } 467267427Sjhb break; 468267427Sjhb 469222610Sjhb case 0x40000000: 470222610Sjhb regs[0] = CPUID_VM_HIGH; 471222610Sjhb bcopy(bhyve_id, ®s[1], 4); 472252335Sgrehan bcopy(bhyve_id + 4, ®s[2], 4); 473252335Sgrehan bcopy(bhyve_id + 8, ®s[3], 4); 474222610Sjhb break; 475252335Sgrehan 476221828Sgrehan default: 477252335Sgrehan /* 478252335Sgrehan * The leaf value has already been clamped so 479252335Sgrehan * simply pass this through, keeping count of 480252335Sgrehan * how many unhandled leaf values have been seen. 481252335Sgrehan */ 482252335Sgrehan atomic_add_long(&bhyve_xcpuids, 1); 483252335Sgrehan cpuid_count(*eax, *ecx, regs); 484252335Sgrehan break; 485221828Sgrehan } 486221828Sgrehan 487221828Sgrehan *eax = regs[0]; 488221828Sgrehan *ebx = regs[1]; 489221828Sgrehan *ecx = regs[2]; 490221828Sgrehan *edx = regs[3]; 491252335Sgrehan 492221828Sgrehan return (1); 493221828Sgrehan} 494284900Sneel 495284900Sneelbool 496284900Sneelvm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap) 497284900Sneel{ 498284900Sneel bool rv; 499284900Sneel 500284900Sneel KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d", 501284900Sneel __func__, cap)); 502284900Sneel 503284900Sneel /* 504284900Sneel * Simply passthrough the capabilities of the host cpu for now. 505284900Sneel */ 506284900Sneel rv = false; 507284900Sneel switch (cap) { 508284900Sneel case VCC_NO_EXECUTE: 509284900Sneel if (amd_feature & AMDID_NX) 510284900Sneel rv = true; 511284900Sneel break; 512284900Sneel case VCC_FFXSR: 513284900Sneel if (amd_feature & AMDID_FFXSR) 514284900Sneel rv = true; 515284900Sneel break; 516284900Sneel case VCC_TCE: 517284900Sneel if (amd_feature2 & AMDID2_TCE) 518284900Sneel rv = true; 519284900Sneel break; 520284900Sneel default: 521284900Sneel panic("%s: unknown vm_cpu_capability %d", __func__, cap); 522284900Sneel } 523284900Sneel return (rv); 524284900Sneel} 525