1242275Sneel/*-
2242275Sneel * Copyright (c) 2012 NetApp, Inc.
3242275Sneel * All rights reserved.
4242275Sneel *
5242275Sneel * Redistribution and use in source and binary forms, with or without
6242275Sneel * modification, are permitted provided that the following conditions
7242275Sneel * are met:
8242275Sneel * 1. Redistributions of source code must retain the above copyright
9242275Sneel *    notice, this list of conditions and the following disclaimer.
10242275Sneel * 2. Redistributions in binary form must reproduce the above copyright
11242275Sneel *    notice, this list of conditions and the following disclaimer in the
12242275Sneel *    documentation and/or other materials provided with the distribution.
13242275Sneel *
14242275Sneel * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15242275Sneel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16242275Sneel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17242275Sneel * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18242275Sneel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19242275Sneel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20242275Sneel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21242275Sneel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22242275Sneel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23242275Sneel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24242275Sneel * SUCH DAMAGE.
25242275Sneel *
26242275Sneel * $FreeBSD: releng/10.3/sys/amd64/vmm/vmm_host.c 267427 2014-06-12 19:58:12Z jhb $
27242275Sneel */
28242275Sneel
29242275Sneel#include <sys/cdefs.h>
30242275Sneel__FBSDID("$FreeBSD: releng/10.3/sys/amd64/vmm/vmm_host.c 267427 2014-06-12 19:58:12Z jhb $");
31242275Sneel
32242275Sneel#include <sys/param.h>
33242275Sneel#include <sys/pcpu.h>
34242275Sneel
35242275Sneel#include <machine/cpufunc.h>
36242275Sneel#include <machine/segments.h>
37242275Sneel#include <machine/specialreg.h>
38242275Sneel
39242275Sneel#include "vmm_host.h"
40242275Sneel
41267427Sjhbstatic uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4,
42267427Sjhb	vmm_host_xcr0;
43267427Sjhbstatic struct xsave_limits vmm_xsave_limits;
44242275Sneel
45242275Sneelvoid
46242275Sneelvmm_host_state_init(void)
47242275Sneel{
48267427Sjhb	int regs[4];
49242275Sneel
50242275Sneel	vmm_host_efer = rdmsr(MSR_EFER);
51242275Sneel	vmm_host_pat = rdmsr(MSR_PAT);
52242275Sneel
53242275Sneel	/*
54242275Sneel	 * We always want CR0.TS to be set when the processor does a VM exit.
55242275Sneel	 *
56242275Sneel	 * With emulation turned on unconditionally after a VM exit, we are
57242275Sneel	 * able to trap inadvertent use of the FPU until the guest FPU state
58242275Sneel	 * has been safely squirreled away.
59242275Sneel	 */
60242275Sneel	vmm_host_cr0 = rcr0() | CR0_TS;
61242275Sneel
62242275Sneel	vmm_host_cr4 = rcr4();
63267427Sjhb
64267427Sjhb	/*
65267427Sjhb	 * Only permit a guest to use XSAVE if the host is using
66267427Sjhb	 * XSAVE.  Only permit a guest to use XSAVE features supported
67267427Sjhb	 * by the host.  This ensures that the FPU state used by the
68267427Sjhb	 * guest is always a subset of the saved guest FPU state.
69267427Sjhb	 *
70267427Sjhb	 * In addition, only permit known XSAVE features where the
71267427Sjhb	 * rules for which features depend on other features is known
72267427Sjhb	 * to properly emulate xsetbv.
73267427Sjhb	 */
74267427Sjhb	if (vmm_host_cr4 & CR4_XSAVE) {
75267427Sjhb		vmm_xsave_limits.xsave_enabled = 1;
76267427Sjhb		vmm_host_xcr0 = rxcr(0);
77267427Sjhb		vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0 &
78267427Sjhb		    (XFEATURE_AVX | XFEATURE_MPX | XFEATURE_AVX512);
79267427Sjhb
80267427Sjhb		cpuid_count(0xd, 0x0, regs);
81267427Sjhb		vmm_xsave_limits.xsave_max_size = regs[1];
82267427Sjhb	}
83242275Sneel}
84242275Sneel
85242275Sneeluint64_t
86242275Sneelvmm_get_host_pat(void)
87242275Sneel{
88242275Sneel
89242275Sneel	return (vmm_host_pat);
90242275Sneel}
91242275Sneel
92242275Sneeluint64_t
93242275Sneelvmm_get_host_efer(void)
94242275Sneel{
95242275Sneel
96242275Sneel	return (vmm_host_efer);
97242275Sneel}
98242275Sneel
99242275Sneeluint64_t
100242275Sneelvmm_get_host_cr0(void)
101242275Sneel{
102242275Sneel
103242275Sneel	return (vmm_host_cr0);
104242275Sneel}
105242275Sneel
106242275Sneeluint64_t
107242275Sneelvmm_get_host_cr4(void)
108242275Sneel{
109242275Sneel
110242275Sneel	return (vmm_host_cr4);
111242275Sneel}
112242275Sneel
113242275Sneeluint64_t
114267427Sjhbvmm_get_host_xcr0(void)
115267427Sjhb{
116267427Sjhb
117267427Sjhb	return (vmm_host_xcr0);
118267427Sjhb}
119267427Sjhb
120267427Sjhbuint64_t
121242275Sneelvmm_get_host_datasel(void)
122242275Sneel{
123242275Sneel
124242275Sneel	return (GSEL(GDATA_SEL, SEL_KPL));
125242275Sneel
126242275Sneel}
127242275Sneel
128242275Sneeluint64_t
129242275Sneelvmm_get_host_codesel(void)
130242275Sneel{
131242275Sneel
132242275Sneel	return (GSEL(GCODE_SEL, SEL_KPL));
133242275Sneel}
134242275Sneel
135242275Sneeluint64_t
136242275Sneelvmm_get_host_tsssel(void)
137242275Sneel{
138242275Sneel
139242275Sneel	return (GSEL(GPROC0_SEL, SEL_KPL));
140242275Sneel}
141242275Sneel
142242275Sneeluint64_t
143242275Sneelvmm_get_host_fsbase(void)
144242275Sneel{
145242275Sneel
146242275Sneel	return (0);
147242275Sneel}
148242275Sneel
149242275Sneeluint64_t
150242275Sneelvmm_get_host_idtrbase(void)
151242275Sneel{
152242275Sneel
153242275Sneel	return (r_idt.rd_base);
154242275Sneel}
155267427Sjhb
156267427Sjhbconst struct xsave_limits *
157267427Sjhbvmm_get_xsave_limits(void)
158267427Sjhb{
159267427Sjhb
160267427Sjhb	return (&vmm_xsave_limits);
161267427Sjhb}
162