vmm_host.c revision 261638
1/*-
2 * Copyright (c) 2012 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/vmm/vmm_host.c 261638 2014-02-08 16:37:54Z jhb $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/amd64/vmm/vmm_host.c 261638 2014-02-08 16:37:54Z jhb $");
31
32#include <sys/param.h>
33#include <sys/pcpu.h>
34
35#include <machine/cpufunc.h>
36#include <machine/segments.h>
37#include <machine/specialreg.h>
38
39#include "vmm_host.h"
40
41static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4,
42	vmm_host_xcr0;
43static struct xsave_limits vmm_xsave_limits;
44
45void
46vmm_host_state_init(void)
47{
48	int regs[4];
49
50	vmm_host_efer = rdmsr(MSR_EFER);
51	vmm_host_pat = rdmsr(MSR_PAT);
52
53	/*
54	 * We always want CR0.TS to be set when the processor does a VM exit.
55	 *
56	 * With emulation turned on unconditionally after a VM exit, we are
57	 * able to trap inadvertent use of the FPU until the guest FPU state
58	 * has been safely squirreled away.
59	 */
60	vmm_host_cr0 = rcr0() | CR0_TS;
61
62	vmm_host_cr4 = rcr4();
63
64	/*
65	 * Only permit a guest to use XSAVE if the host is using
66	 * XSAVE.  Only permit a guest to use XSAVE features supported
67	 * by the host.  This ensures that the FPU state used by the
68	 * guest is always a subset of the saved guest FPU state.
69	 */
70	if (vmm_host_cr4 & CR4_XSAVE) {
71		vmm_xsave_limits.xsave_enabled = 1;
72		vmm_host_xcr0 = rxcr(0);
73		vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0;
74
75		cpuid_count(0xd, 0x0, regs);
76		vmm_xsave_limits.xsave_max_size = regs[1];
77	}
78}
79
80uint64_t
81vmm_get_host_pat(void)
82{
83
84	return (vmm_host_pat);
85}
86
87uint64_t
88vmm_get_host_efer(void)
89{
90
91	return (vmm_host_efer);
92}
93
94uint64_t
95vmm_get_host_cr0(void)
96{
97
98	return (vmm_host_cr0);
99}
100
101uint64_t
102vmm_get_host_cr4(void)
103{
104
105	return (vmm_host_cr4);
106}
107
108uint64_t
109vmm_get_host_xcr0(void)
110{
111
112	return (vmm_host_xcr0);
113}
114
115uint64_t
116vmm_get_host_datasel(void)
117{
118
119	return (GSEL(GDATA_SEL, SEL_KPL));
120
121}
122
123uint64_t
124vmm_get_host_codesel(void)
125{
126
127	return (GSEL(GCODE_SEL, SEL_KPL));
128}
129
130uint64_t
131vmm_get_host_tsssel(void)
132{
133
134	return (GSEL(GPROC0_SEL, SEL_KPL));
135}
136
137uint64_t
138vmm_get_host_fsbase(void)
139{
140
141	return (0);
142}
143
144uint64_t
145vmm_get_host_idtrbase(void)
146{
147
148	return (r_idt.rd_base);
149}
150
151const struct xsave_limits *
152vmm_get_xsave_limits(void)
153{
154
155	return (&vmm_xsave_limits);
156}
157