1/*-
2 * Copyright (c) 2012 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/amd64/vmm/vmm_host.c 333167 2018-05-02 08:24:59Z kib $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/amd64/vmm/vmm_host.c 333167 2018-05-02 08:24:59Z kib $");
31
32#include <sys/param.h>
33#include <sys/pcpu.h>
34
35#include <machine/cpufunc.h>
36#include <machine/segments.h>
37#include <machine/specialreg.h>
38
39#include "vmm_host.h"
40
41static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4,
42	vmm_host_xcr0;
43static struct xsave_limits vmm_xsave_limits;
44
45void
46vmm_host_state_init(void)
47{
48	int regs[4];
49
50	vmm_host_efer = rdmsr(MSR_EFER);
51	vmm_host_pat = rdmsr(MSR_PAT);
52
53	/*
54	 * We always want CR0.TS to be set when the processor does a VM exit.
55	 *
56	 * With emulation turned on unconditionally after a VM exit, we are
57	 * able to trap inadvertent use of the FPU until the guest FPU state
58	 * has been safely squirreled away.
59	 */
60	vmm_host_cr0 = rcr0() | CR0_TS;
61
62	/*
63	 * On non-PCID or PCID but without INVPCID support machines,
64	 * we flush kernel i.e. global TLB entries, by temporary
65	 * clearing the CR4.PGE bit, see invltlb_glob().  If
66	 * preemption occurs at the wrong time, cached vmm_host_cr4
67	 * might store the value with CR4.PGE cleared.  Since FreeBSD
68	 * requires support for PG_G on amd64, just set it
69	 * unconditionally.
70	 */
71	vmm_host_cr4 = rcr4() | CR4_PGE;
72
73	/*
74	 * Only permit a guest to use XSAVE if the host is using
75	 * XSAVE.  Only permit a guest to use XSAVE features supported
76	 * by the host.  This ensures that the FPU state used by the
77	 * guest is always a subset of the saved guest FPU state.
78	 *
79	 * In addition, only permit known XSAVE features where the
80	 * rules for which features depend on other features is known
81	 * to properly emulate xsetbv.
82	 */
83	if (vmm_host_cr4 & CR4_XSAVE) {
84		vmm_xsave_limits.xsave_enabled = 1;
85		vmm_host_xcr0 = rxcr(0);
86		vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0 &
87		    (XFEATURE_AVX | XFEATURE_MPX | XFEATURE_AVX512);
88
89		cpuid_count(0xd, 0x0, regs);
90		vmm_xsave_limits.xsave_max_size = regs[1];
91	}
92}
93
94uint64_t
95vmm_get_host_pat(void)
96{
97
98	return (vmm_host_pat);
99}
100
101uint64_t
102vmm_get_host_efer(void)
103{
104
105	return (vmm_host_efer);
106}
107
108uint64_t
109vmm_get_host_cr0(void)
110{
111
112	return (vmm_host_cr0);
113}
114
115uint64_t
116vmm_get_host_cr4(void)
117{
118
119	return (vmm_host_cr4);
120}
121
122uint64_t
123vmm_get_host_xcr0(void)
124{
125
126	return (vmm_host_xcr0);
127}
128
129uint64_t
130vmm_get_host_datasel(void)
131{
132
133	return (GSEL(GDATA_SEL, SEL_KPL));
134
135}
136
137uint64_t
138vmm_get_host_codesel(void)
139{
140
141	return (GSEL(GCODE_SEL, SEL_KPL));
142}
143
144uint64_t
145vmm_get_host_tsssel(void)
146{
147
148	return (GSEL(GPROC0_SEL, SEL_KPL));
149}
150
151uint64_t
152vmm_get_host_fsbase(void)
153{
154
155	return (0);
156}
157
158uint64_t
159vmm_get_host_idtrbase(void)
160{
161
162	return (r_idt.rd_base);
163}
164
165const struct xsave_limits *
166vmm_get_xsave_limits(void)
167{
168
169	return (&vmm_xsave_limits);
170}
171