1221828Sgrehan/*-
2221828Sgrehan * Copyright (c) 2011 NetApp, Inc.
3221828Sgrehan * All rights reserved.
4221828Sgrehan *
5221828Sgrehan * Redistribution and use in source and binary forms, with or without
6221828Sgrehan * modification, are permitted provided that the following conditions
7221828Sgrehan * are met:
8221828Sgrehan * 1. Redistributions of source code must retain the above copyright
9221828Sgrehan *    notice, this list of conditions and the following disclaimer.
10221828Sgrehan * 2. Redistributions in binary form must reproduce the above copyright
11221828Sgrehan *    notice, this list of conditions and the following disclaimer in the
12221828Sgrehan *    documentation and/or other materials provided with the distribution.
13221828Sgrehan *
14221828Sgrehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15221828Sgrehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16221828Sgrehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17221828Sgrehan * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18221828Sgrehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19221828Sgrehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20221828Sgrehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21221828Sgrehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22221828Sgrehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23221828Sgrehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24221828Sgrehan * SUCH DAMAGE.
25221828Sgrehan *
26221828Sgrehan * $FreeBSD: releng/10.3/sys/amd64/vmm/x86.c 284900 2015-06-28 03:22:26Z neel $
27221828Sgrehan */
28221828Sgrehan
29221828Sgrehan#include <sys/cdefs.h>
30221828Sgrehan__FBSDID("$FreeBSD: releng/10.3/sys/amd64/vmm/x86.c 284900 2015-06-28 03:22:26Z neel $");
31221828Sgrehan
32240941Sneel#include <sys/param.h>
33267427Sjhb#include <sys/pcpu.h>
34222610Sjhb#include <sys/systm.h>
35276349Sneel#include <sys/sysctl.h>
36221828Sgrehan
37249324Sneel#include <machine/clock.h>
38221828Sgrehan#include <machine/cpufunc.h>
39222610Sjhb#include <machine/md_var.h>
40267427Sjhb#include <machine/segments.h>
41221828Sgrehan#include <machine/specialreg.h>
42221828Sgrehan
43240941Sneel#include <machine/vmm.h>
44240941Sneel
45267427Sjhb#include "vmm_host.h"
46276403Sneel#include "vmm_ktr.h"
47276403Sneel#include "vmm_util.h"
48221828Sgrehan#include "x86.h"
49221828Sgrehan
50276349SneelSYSCTL_DECL(_hw_vmm);
51276349Sneelstatic SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD, 0, NULL);
52276349Sneel
53222610Sjhb#define	CPUID_VM_HIGH		0x40000000
54222610Sjhb
55252335Sgrehanstatic const char bhyve_id[12] = "bhyve bhyve ";
56222610Sjhb
57252335Sgrehanstatic uint64_t bhyve_xcpuids;
58276403SneelSYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0,
59276403Sneel    "Number of times an unknown cpuid leaf was accessed");
60252335Sgrehan
61276349Sneel/*
62276349Sneel * The default CPU topology is a single thread per package.
63276349Sneel */
64276349Sneelstatic u_int threads_per_core = 1;
65276349SneelSYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN,
66276349Sneel    &threads_per_core, 0, NULL);
67276349Sneel
68276349Sneelstatic u_int cores_per_package = 1;
69276349SneelSYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN,
70276349Sneel    &cores_per_package, 0, NULL);
71276349Sneel
72276349Sneelstatic int cpuid_leaf_b = 1;
73276349SneelSYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN,
74276349Sneel    &cpuid_leaf_b, 0, NULL);
75276349Sneel
76276349Sneel/*
77276349Sneel * Round up to the next power of two, if necessary, and then take log2.
78276349Sneel * Returns -1 if argument is zero.
79276349Sneel */
80276349Sneelstatic __inline int
81276349Sneellog2(u_int x)
82276349Sneel{
83276349Sneel
84276349Sneel	return (fls(x << (1 - powerof2(x))) - 1);
85276349Sneel}
86276349Sneel
87221828Sgrehanint
88240941Sneelx86_emulate_cpuid(struct vm *vm, int vcpu_id,
89240941Sneel		  uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
90221828Sgrehan{
91267427Sjhb	const struct xsave_limits *limits;
92267427Sjhb	uint64_t cr4;
93276349Sneel	int error, enable_invpcid, level, width, x2apic_id;
94276349Sneel	unsigned int func, regs[4], logical_cpus;
95240941Sneel	enum x2apic_state x2apic_state;
96221828Sgrehan
97276403Sneel	VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx);
98276403Sneel
99222610Sjhb	/*
100222610Sjhb	 * Requests for invalid CPUID levels should map to the highest
101222610Sjhb	 * available level instead.
102222610Sjhb	 */
103222610Sjhb	if (cpu_exthigh != 0 && *eax >= 0x80000000) {
104222610Sjhb		if (*eax > cpu_exthigh)
105222610Sjhb			*eax = cpu_exthigh;
106222610Sjhb	} else if (*eax >= 0x40000000) {
107222610Sjhb		if (*eax > CPUID_VM_HIGH)
108222610Sjhb			*eax = CPUID_VM_HIGH;
109222610Sjhb	} else if (*eax > cpu_high) {
110222610Sjhb		*eax = cpu_high;
111222610Sjhb	}
112221828Sgrehan
113246774Sneel	func = *eax;
114246774Sneel
115222610Sjhb	/*
116222610Sjhb	 * In general the approach used for CPU topology is to
117222610Sjhb	 * advertise a flat topology where all CPUs are packages with
118222610Sjhb	 * no multi-core or SMT.
119222610Sjhb	 */
120222610Sjhb	switch (func) {
121252335Sgrehan		/*
122252335Sgrehan		 * Pass these through to the guest
123252335Sgrehan		 */
124221828Sgrehan		case CPUID_0000_0000:
125221828Sgrehan		case CPUID_0000_0002:
126221828Sgrehan		case CPUID_0000_0003:
127221828Sgrehan		case CPUID_8000_0000:
128221828Sgrehan		case CPUID_8000_0002:
129221828Sgrehan		case CPUID_8000_0003:
130221828Sgrehan		case CPUID_8000_0004:
131221828Sgrehan		case CPUID_8000_0006:
132276403Sneel			cpuid_count(*eax, *ecx, regs);
133276403Sneel			break;
134221828Sgrehan		case CPUID_8000_0008:
135222610Sjhb			cpuid_count(*eax, *ecx, regs);
136276403Sneel			if (vmm_is_amd()) {
137276403Sneel				/*
138276403Sneel				 * XXX this might appear silly because AMD
139276403Sneel				 * cpus don't have threads.
140276403Sneel				 *
141276403Sneel				 * However this matches the logical cpus as
142276403Sneel				 * advertised by leaf 0x1 and will work even
143276403Sneel				 * if the 'threads_per_core' tunable is set
144276403Sneel				 * incorrectly on an AMD host.
145276403Sneel				 */
146276403Sneel				logical_cpus = threads_per_core *
147276403Sneel				    cores_per_package;
148276403Sneel				regs[2] = logical_cpus - 1;
149276403Sneel			}
150221828Sgrehan			break;
151221828Sgrehan
152252335Sgrehan		case CPUID_8000_0001:
153276403Sneel			cpuid_count(*eax, *ecx, regs);
154276403Sneel
155252335Sgrehan			/*
156276403Sneel			 * Hide SVM and Topology Extension features from guest.
157276403Sneel			 */
158276403Sneel			regs[2] &= ~(AMDID2_SVM | AMDID2_TOPOLOGY);
159276403Sneel
160276403Sneel			/*
161276403Sneel			 * Don't advertise extended performance counter MSRs
162276403Sneel			 * to the guest.
163276403Sneel			 */
164276403Sneel			regs[2] &= ~AMDID2_PCXC;
165276403Sneel			regs[2] &= ~AMDID2_PNXC;
166276403Sneel			regs[2] &= ~AMDID2_PTSCEL2I;
167276403Sneel
168276403Sneel			/*
169276403Sneel			 * Don't advertise Instruction Based Sampling feature.
170276403Sneel			 */
171276403Sneel			regs[2] &= ~AMDID2_IBS;
172276403Sneel
173276403Sneel			/* NodeID MSR not available */
174276403Sneel			regs[2] &= ~AMDID2_NODE_ID;
175276403Sneel
176276403Sneel			/* Don't advertise the OS visible workaround feature */
177276403Sneel			regs[2] &= ~AMDID2_OSVW;
178276403Sneel
179276403Sneel			/*
180252335Sgrehan			 * Hide rdtscp/ia32_tsc_aux until we know how
181252335Sgrehan			 * to deal with them.
182252335Sgrehan			 */
183252335Sgrehan			regs[3] &= ~AMDID_RDTSCP;
184252335Sgrehan			break;
185252335Sgrehan
186249324Sneel		case CPUID_8000_0007:
187249324Sneel			/*
188276403Sneel			 * AMD uses this leaf to advertise the processor's
189276403Sneel			 * power monitoring and RAS capabilities. These
190276403Sneel			 * features are hardware-specific and exposing
191276403Sneel			 * them to a guest doesn't make a lot of sense.
192249324Sneel			 *
193276403Sneel			 * Intel uses this leaf only to advertise the
194276403Sneel			 * "Invariant TSC" feature with all other bits
195276403Sneel			 * being reserved (set to zero).
196276403Sneel			 */
197276403Sneel			regs[0] = 0;
198276403Sneel			regs[1] = 0;
199276403Sneel			regs[2] = 0;
200276403Sneel			regs[3] = 0;
201276403Sneel
202276403Sneel			/*
203276403Sneel			 * "Invariant TSC" can be advertised to the guest if:
204276403Sneel			 * - host TSC frequency is invariant
205276403Sneel			 * - host TSCs are synchronized across physical cpus
206276403Sneel			 *
207249324Sneel			 * XXX This still falls short because the vcpu
208249324Sneel			 * can observe the TSC moving backwards as it
209249324Sneel			 * migrates across physical cpus. But at least
210249324Sneel			 * it should discourage the guest from using the
211249324Sneel			 * TSC to keep track of time.
212249324Sneel			 */
213276403Sneel			if (tsc_is_invariant && smp_tsc)
214276403Sneel				regs[3] |= AMDPM_TSC_INVARIANT;
215249324Sneel			break;
216249324Sneel
217221828Sgrehan		case CPUID_0000_0001:
218222610Sjhb			do_cpuid(1, regs);
219222610Sjhb
220240941Sneel			error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
221240941Sneel			if (error) {
222240941Sneel				panic("x86_emulate_cpuid: error %d "
223240941Sneel				      "fetching x2apic state", error);
224240941Sneel			}
225240941Sneel
226221828Sgrehan			/*
227221828Sgrehan			 * Override the APIC ID only in ebx
228221828Sgrehan			 */
229222610Sjhb			regs[1] &= ~(CPUID_LOCAL_APIC_ID);
230222610Sjhb			regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
231221828Sgrehan
232221828Sgrehan			/*
233284900Sneel			 * Don't expose VMX, SpeedStep, TME or SMX capability.
234222610Sjhb			 * Advertise x2APIC capability and Hypervisor guest.
235221828Sgrehan			 */
236222610Sjhb			regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
237284900Sneel			regs[2] &= ~(CPUID2_SMX);
238221828Sgrehan
239240941Sneel			regs[2] |= CPUID2_HV;
240240941Sneel
241240941Sneel			if (x2apic_state != X2APIC_DISABLED)
242240941Sneel				regs[2] |= CPUID2_X2APIC;
243267447Sjhb			else
244267447Sjhb				regs[2] &= ~CPUID2_X2APIC;
245240941Sneel
246221828Sgrehan			/*
247267427Sjhb			 * Only advertise CPUID2_XSAVE in the guest if
248267427Sjhb			 * the host is using XSAVE.
249234939Sgrehan			 */
250267427Sjhb			if (!(regs[2] & CPUID2_OSXSAVE))
251267427Sjhb				regs[2] &= ~CPUID2_XSAVE;
252234939Sgrehan
253234939Sgrehan			/*
254267427Sjhb			 * If CPUID2_XSAVE is being advertised and the
255267427Sjhb			 * guest has set CR4_XSAVE, set
256267427Sjhb			 * CPUID2_OSXSAVE.
257267427Sjhb			 */
258267427Sjhb			regs[2] &= ~CPUID2_OSXSAVE;
259267427Sjhb			if (regs[2] & CPUID2_XSAVE) {
260267427Sjhb				error = vm_get_register(vm, vcpu_id,
261267427Sjhb				    VM_REG_GUEST_CR4, &cr4);
262267427Sjhb				if (error)
263267427Sjhb					panic("x86_emulate_cpuid: error %d "
264267427Sjhb					      "fetching %%cr4", error);
265267427Sjhb				if (cr4 & CR4_XSAVE)
266267427Sjhb					regs[2] |= CPUID2_OSXSAVE;
267267427Sjhb			}
268267427Sjhb
269267427Sjhb			/*
270242060Sneel			 * Hide monitor/mwait until we know how to deal with
271242060Sneel			 * these instructions.
272242060Sneel			 */
273242060Sneel			regs[2] &= ~CPUID2_MON;
274242060Sneel
275252335Sgrehan                        /*
276252335Sgrehan			 * Hide the performance and debug features.
277252335Sgrehan			 */
278252335Sgrehan			regs[2] &= ~CPUID2_PDCM;
279255645Sgrehan
280242060Sneel			/*
281255645Sgrehan			 * No TSC deadline support in the APIC yet
282255645Sgrehan			 */
283255645Sgrehan			regs[2] &= ~CPUID2_TSCDLT;
284255645Sgrehan
285255645Sgrehan			/*
286222105Sgrehan			 * Hide thermal monitoring
287222105Sgrehan			 */
288222105Sgrehan			regs[3] &= ~(CPUID_ACPI | CPUID_TM);
289284900Sneel
290222105Sgrehan			/*
291284900Sneel			 * Hide the debug store capability.
292221828Sgrehan			 */
293252335Sgrehan			regs[3] &= ~CPUID_DS;
294252335Sgrehan
295284900Sneel			/*
296284900Sneel			 * Advertise the Machine Check and MTRR capability.
297284900Sneel			 *
298284900Sneel			 * Some guest OSes (e.g. Windows) will not boot if
299284900Sneel			 * these features are absent.
300284900Sneel			 */
301284900Sneel			regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
302284900Sneel
303276349Sneel			logical_cpus = threads_per_core * cores_per_package;
304222610Sjhb			regs[1] &= ~CPUID_HTT_CORES;
305276349Sneel			regs[1] |= (logical_cpus & 0xff) << 16;
306276349Sneel			regs[3] |= CPUID_HTT;
307221828Sgrehan			break;
308221828Sgrehan
309222610Sjhb		case CPUID_0000_0004:
310276349Sneel			cpuid_count(*eax, *ecx, regs);
311222610Sjhb
312276349Sneel			if (regs[0] || regs[1] || regs[2] || regs[3]) {
313276349Sneel				regs[0] &= 0x3ff;
314276349Sneel				regs[0] |= (cores_per_package - 1) << 26;
315276349Sneel				/*
316276349Sneel				 * Cache topology:
317276349Sneel				 * - L1 and L2 are shared only by the logical
318276349Sneel				 *   processors in a single core.
319276349Sneel				 * - L3 and above are shared by all logical
320276349Sneel				 *   processors in the package.
321276349Sneel				 */
322276349Sneel				logical_cpus = threads_per_core;
323276349Sneel				level = (regs[0] >> 5) & 0x7;
324276349Sneel				if (level >= 3)
325276349Sneel					logical_cpus *= cores_per_package;
326276349Sneel				regs[0] |= (logical_cpus - 1) << 14;
327276349Sneel			}
328222610Sjhb			break;
329222610Sjhb
330256869Sneel		case CPUID_0000_0007:
331256869Sneel			regs[0] = 0;
332256869Sneel			regs[1] = 0;
333256869Sneel			regs[2] = 0;
334256869Sneel			regs[3] = 0;
335256869Sneel
336256869Sneel			/* leaf 0 */
337256869Sneel			if (*ecx == 0) {
338267427Sjhb				cpuid_count(*eax, *ecx, regs);
339267427Sjhb
340267427Sjhb				/* Only leaf 0 is supported */
341267427Sjhb				regs[0] = 0;
342267427Sjhb
343267427Sjhb				/*
344267427Sjhb				 * Expose known-safe features.
345267427Sjhb				 */
346267427Sjhb				regs[1] &= (CPUID_STDEXT_FSGSBASE |
347267427Sjhb				    CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
348267427Sjhb				    CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2 |
349267427Sjhb				    CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
350267427Sjhb				    CPUID_STDEXT_AVX512F |
351267427Sjhb				    CPUID_STDEXT_AVX512PF |
352267427Sjhb				    CPUID_STDEXT_AVX512ER |
353267427Sjhb				    CPUID_STDEXT_AVX512CD);
354267427Sjhb				regs[2] = 0;
355267427Sjhb				regs[3] = 0;
356267427Sjhb
357267427Sjhb				/* Advertise INVPCID if it is enabled. */
358256869Sneel				error = vm_get_capability(vm, vcpu_id,
359256869Sneel				    VM_CAP_ENABLE_INVPCID, &enable_invpcid);
360256869Sneel				if (error == 0 && enable_invpcid)
361256869Sneel					regs[1] |= CPUID_STDEXT_INVPCID;
362256869Sneel			}
363256869Sneel			break;
364256869Sneel
365222105Sgrehan		case CPUID_0000_0006:
366280839Smav			regs[0] = CPUTPM1_ARAT;
367280839Smav			regs[1] = 0;
368280839Smav			regs[2] = 0;
369280839Smav			regs[3] = 0;
370280839Smav			break;
371280839Smav
372252335Sgrehan		case CPUID_0000_000A:
373222105Sgrehan			/*
374222105Sgrehan			 * Handle the access, but report 0 for
375222105Sgrehan			 * all options
376222105Sgrehan			 */
377222105Sgrehan			regs[0] = 0;
378222105Sgrehan			regs[1] = 0;
379222105Sgrehan			regs[2] = 0;
380222105Sgrehan			regs[3] = 0;
381222105Sgrehan			break;
382222105Sgrehan
383221828Sgrehan		case CPUID_0000_000B:
384221828Sgrehan			/*
385221828Sgrehan			 * Processor topology enumeration
386221828Sgrehan			 */
387276349Sneel			if (*ecx == 0) {
388276349Sneel				logical_cpus = threads_per_core;
389276349Sneel				width = log2(logical_cpus);
390276349Sneel				level = CPUID_TYPE_SMT;
391276349Sneel				x2apic_id = vcpu_id;
392276349Sneel			}
393276349Sneel
394276349Sneel			if (*ecx == 1) {
395276349Sneel				logical_cpus = threads_per_core *
396276349Sneel				    cores_per_package;
397276349Sneel				width = log2(logical_cpus);
398276349Sneel				level = CPUID_TYPE_CORE;
399276349Sneel				x2apic_id = vcpu_id;
400276349Sneel			}
401276349Sneel
402276349Sneel			if (!cpuid_leaf_b || *ecx >= 2) {
403276349Sneel				width = 0;
404276349Sneel				logical_cpus = 0;
405276349Sneel				level = 0;
406276349Sneel				x2apic_id = 0;
407276349Sneel			}
408276349Sneel
409276349Sneel			regs[0] = width & 0x1f;
410276349Sneel			regs[1] = logical_cpus & 0xffff;
411276349Sneel			regs[2] = (level << 8) | (*ecx & 0xff);
412276349Sneel			regs[3] = x2apic_id;
413221828Sgrehan			break;
414221828Sgrehan
415267427Sjhb		case CPUID_0000_000D:
416267427Sjhb			limits = vmm_get_xsave_limits();
417267427Sjhb			if (!limits->xsave_enabled) {
418267427Sjhb				regs[0] = 0;
419267427Sjhb				regs[1] = 0;
420267427Sjhb				regs[2] = 0;
421267427Sjhb				regs[3] = 0;
422267427Sjhb				break;
423267427Sjhb			}
424267427Sjhb
425267427Sjhb			cpuid_count(*eax, *ecx, regs);
426267427Sjhb			switch (*ecx) {
427267427Sjhb			case 0:
428267427Sjhb				/*
429267427Sjhb				 * Only permit the guest to use bits
430267427Sjhb				 * that are active in the host in
431267427Sjhb				 * %xcr0.  Also, claim that the
432267427Sjhb				 * maximum save area size is
433267427Sjhb				 * equivalent to the host's current
434267427Sjhb				 * save area size.  Since this runs
435267427Sjhb				 * "inside" of vmrun(), it runs with
436267427Sjhb				 * the guest's xcr0, so the current
437267427Sjhb				 * save area size is correct as-is.
438267427Sjhb				 */
439267427Sjhb				regs[0] &= limits->xcr0_allowed;
440267427Sjhb				regs[2] = limits->xsave_max_size;
441267427Sjhb				regs[3] &= (limits->xcr0_allowed >> 32);
442267427Sjhb				break;
443267427Sjhb			case 1:
444267427Sjhb				/* Only permit XSAVEOPT. */
445267427Sjhb				regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
446267427Sjhb				regs[1] = 0;
447267427Sjhb				regs[2] = 0;
448267427Sjhb				regs[3] = 0;
449267427Sjhb				break;
450267427Sjhb			default:
451267427Sjhb				/*
452267427Sjhb				 * If the leaf is for a permitted feature,
453267427Sjhb				 * pass through as-is, otherwise return
454267427Sjhb				 * all zeroes.
455267427Sjhb				 */
456267427Sjhb				if (!(limits->xcr0_allowed & (1ul << *ecx))) {
457267427Sjhb					regs[0] = 0;
458267427Sjhb					regs[1] = 0;
459267427Sjhb					regs[2] = 0;
460267427Sjhb					regs[3] = 0;
461267427Sjhb				}
462267427Sjhb				break;
463267427Sjhb			}
464267427Sjhb			break;
465267427Sjhb
466222610Sjhb		case 0x40000000:
467222610Sjhb			regs[0] = CPUID_VM_HIGH;
468222610Sjhb			bcopy(bhyve_id, &regs[1], 4);
469252335Sgrehan			bcopy(bhyve_id + 4, &regs[2], 4);
470252335Sgrehan			bcopy(bhyve_id + 8, &regs[3], 4);
471222610Sjhb			break;
472252335Sgrehan
473221828Sgrehan		default:
474252335Sgrehan			/*
475252335Sgrehan			 * The leaf value has already been clamped so
476252335Sgrehan			 * simply pass this through, keeping count of
477252335Sgrehan			 * how many unhandled leaf values have been seen.
478252335Sgrehan			 */
479252335Sgrehan			atomic_add_long(&bhyve_xcpuids, 1);
480252335Sgrehan			cpuid_count(*eax, *ecx, regs);
481252335Sgrehan			break;
482221828Sgrehan	}
483221828Sgrehan
484221828Sgrehan	*eax = regs[0];
485221828Sgrehan	*ebx = regs[1];
486221828Sgrehan	*ecx = regs[2];
487221828Sgrehan	*edx = regs[3];
488252335Sgrehan
489221828Sgrehan	return (1);
490221828Sgrehan}
491284900Sneel
492284900Sneelbool
493284900Sneelvm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap)
494284900Sneel{
495284900Sneel	bool rv;
496284900Sneel
497284900Sneel	KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d",
498284900Sneel	    __func__, cap));
499284900Sneel
500284900Sneel	/*
501284900Sneel	 * Simply passthrough the capabilities of the host cpu for now.
502284900Sneel	 */
503284900Sneel	rv = false;
504284900Sneel	switch (cap) {
505284900Sneel	case VCC_NO_EXECUTE:
506284900Sneel		if (amd_feature & AMDID_NX)
507284900Sneel			rv = true;
508284900Sneel		break;
509284900Sneel	case VCC_FFXSR:
510284900Sneel		if (amd_feature & AMDID_FFXSR)
511284900Sneel			rv = true;
512284900Sneel		break;
513284900Sneel	case VCC_TCE:
514284900Sneel		if (amd_feature2 & AMDID2_TCE)
515284900Sneel			rv = true;
516284900Sneel		break;
517284900Sneel	default:
518284900Sneel		panic("%s: unknown vm_cpu_capability %d", __func__, cap);
519284900Sneel	}
520284900Sneel	return (rv);
521284900Sneel}
522