vmm_host.c revision 261638
1242275Sneel/*-
2242275Sneel * Copyright (c) 2012 NetApp, Inc.
3242275Sneel * All rights reserved.
4242275Sneel *
5242275Sneel * Redistribution and use in source and binary forms, with or without
6242275Sneel * modification, are permitted provided that the following conditions
7242275Sneel * are met:
8242275Sneel * 1. Redistributions of source code must retain the above copyright
9242275Sneel *    notice, this list of conditions and the following disclaimer.
10242275Sneel * 2. Redistributions in binary form must reproduce the above copyright
11242275Sneel *    notice, this list of conditions and the following disclaimer in the
12242275Sneel *    documentation and/or other materials provided with the distribution.
13242275Sneel *
14242275Sneel * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15242275Sneel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16242275Sneel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17242275Sneel * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18242275Sneel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19242275Sneel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20242275Sneel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21242275Sneel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22242275Sneel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23242275Sneel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24242275Sneel * SUCH DAMAGE.
25242275Sneel *
26242275Sneel * $FreeBSD: head/sys/amd64/vmm/vmm_host.c 261638 2014-02-08 16:37:54Z jhb $
27242275Sneel */
28242275Sneel
29242275Sneel#include <sys/cdefs.h>
30242275Sneel__FBSDID("$FreeBSD: head/sys/amd64/vmm/vmm_host.c 261638 2014-02-08 16:37:54Z jhb $");
31242275Sneel
32242275Sneel#include <sys/param.h>
33242275Sneel#include <sys/pcpu.h>
34242275Sneel
35242275Sneel#include <machine/cpufunc.h>
36242275Sneel#include <machine/segments.h>
37242275Sneel#include <machine/specialreg.h>
38242275Sneel
39242275Sneel#include "vmm_host.h"
40242275Sneel
41261638Sjhbstatic uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4,
42261638Sjhb	vmm_host_xcr0;
43261638Sjhbstatic struct xsave_limits vmm_xsave_limits;
44242275Sneel
45242275Sneelvoid
46242275Sneelvmm_host_state_init(void)
47242275Sneel{
48261638Sjhb	int regs[4];
49242275Sneel
50242275Sneel	vmm_host_efer = rdmsr(MSR_EFER);
51242275Sneel	vmm_host_pat = rdmsr(MSR_PAT);
52242275Sneel
53242275Sneel	/*
54242275Sneel	 * We always want CR0.TS to be set when the processor does a VM exit.
55242275Sneel	 *
56242275Sneel	 * With emulation turned on unconditionally after a VM exit, we are
57242275Sneel	 * able to trap inadvertent use of the FPU until the guest FPU state
58242275Sneel	 * has been safely squirreled away.
59242275Sneel	 */
60242275Sneel	vmm_host_cr0 = rcr0() | CR0_TS;
61242275Sneel
62242275Sneel	vmm_host_cr4 = rcr4();
63261638Sjhb
64261638Sjhb	/*
65261638Sjhb	 * Only permit a guest to use XSAVE if the host is using
66261638Sjhb	 * XSAVE.  Only permit a guest to use XSAVE features supported
67261638Sjhb	 * by the host.  This ensures that the FPU state used by the
68261638Sjhb	 * guest is always a subset of the saved guest FPU state.
69261638Sjhb	 */
70261638Sjhb	if (vmm_host_cr4 & CR4_XSAVE) {
71261638Sjhb		vmm_xsave_limits.xsave_enabled = 1;
72261638Sjhb		vmm_host_xcr0 = rxcr(0);
73261638Sjhb		vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0;
74261638Sjhb
75261638Sjhb		cpuid_count(0xd, 0x0, regs);
76261638Sjhb		vmm_xsave_limits.xsave_max_size = regs[1];
77261638Sjhb	}
78242275Sneel}
79242275Sneel
80242275Sneeluint64_t
81242275Sneelvmm_get_host_pat(void)
82242275Sneel{
83242275Sneel
84242275Sneel	return (vmm_host_pat);
85242275Sneel}
86242275Sneel
87242275Sneeluint64_t
88242275Sneelvmm_get_host_efer(void)
89242275Sneel{
90242275Sneel
91242275Sneel	return (vmm_host_efer);
92242275Sneel}
93242275Sneel
94242275Sneeluint64_t
95242275Sneelvmm_get_host_cr0(void)
96242275Sneel{
97242275Sneel
98242275Sneel	return (vmm_host_cr0);
99242275Sneel}
100242275Sneel
101242275Sneeluint64_t
102242275Sneelvmm_get_host_cr4(void)
103242275Sneel{
104242275Sneel
105242275Sneel	return (vmm_host_cr4);
106242275Sneel}
107242275Sneel
108242275Sneeluint64_t
109261638Sjhbvmm_get_host_xcr0(void)
110261638Sjhb{
111261638Sjhb
112261638Sjhb	return (vmm_host_xcr0);
113261638Sjhb}
114261638Sjhb
115261638Sjhbuint64_t
116242275Sneelvmm_get_host_datasel(void)
117242275Sneel{
118242275Sneel
119242275Sneel	return (GSEL(GDATA_SEL, SEL_KPL));
120242275Sneel
121242275Sneel}
122242275Sneel
123242275Sneeluint64_t
124242275Sneelvmm_get_host_codesel(void)
125242275Sneel{
126242275Sneel
127242275Sneel	return (GSEL(GCODE_SEL, SEL_KPL));
128242275Sneel}
129242275Sneel
130242275Sneeluint64_t
131242275Sneelvmm_get_host_tsssel(void)
132242275Sneel{
133242275Sneel
134242275Sneel	return (GSEL(GPROC0_SEL, SEL_KPL));
135242275Sneel}
136242275Sneel
137242275Sneeluint64_t
138242275Sneelvmm_get_host_fsbase(void)
139242275Sneel{
140242275Sneel
141242275Sneel	return (0);
142242275Sneel}
143242275Sneel
144242275Sneeluint64_t
145242275Sneelvmm_get_host_idtrbase(void)
146242275Sneel{
147242275Sneel
148242275Sneel	return (r_idt.rd_base);
149242275Sneel}
150261638Sjhb
151261638Sjhbconst struct xsave_limits *
152261638Sjhbvmm_get_xsave_limits(void)
153261638Sjhb{
154261638Sjhb
155261638Sjhb	return (&vmm_xsave_limits);
156261638Sjhb}
157