svm_msr.c revision 271912
1/*-
2 * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm_msr.c 271912 2014-09-20 21:46:31Z neel $");
29
30#include <sys/types.h>
31#include <sys/errno.h>
32
33#include <machine/cpufunc.h>
34#include <machine/specialreg.h>
35
36#include "svm_msr.h"
37
38#ifndef MSR_AMDK8_IPM
39#define	MSR_AMDK8_IPM	0xc0010055
40#endif
41
42enum {
43	IDX_MSR_LSTAR,
44	IDX_MSR_CSTAR,
45	IDX_MSR_STAR,
46	IDX_MSR_SF_MASK,
47	HOST_MSR_NUM		/* must be the last enumeration */
48};
49
50static uint64_t host_msrs[HOST_MSR_NUM];
51
52void
53svm_msr_init(void)
54{
55	/*
56	 * It is safe to cache the values of the following MSRs because they
57	 * don't change based on curcpu, curproc or curthread.
58	 */
59	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
60	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
61	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
62	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
63}
64
65void
66svm_msr_guest_init(struct svm_softc *sc, int vcpu)
67{
68	/*
69	 * All the MSRs accessible to the guest are either saved/restored by
70	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
71	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
72	 *
73	 * There are no guest MSRs that are saved/restored "by hand" so nothing
74	 * more to do here.
75	 */
76	return;
77}
78
79void
80svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
81{
82	/*
83	 * Save host MSRs (if any) and restore guest MSRs (if any).
84	 */
85}
86
87void
88svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
89{
90	/*
91	 * Save guest MSRs (if any) and restore host MSRs.
92	 */
93	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
94	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
95	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
96	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
97
98	/* MSR_KGSBASE will be restored on the way back to userspace */
99}
100
101int
102svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
103    bool *retu)
104{
105	int error = 0;
106
107	switch (num) {
108	case MSR_AMDK8_IPM:
109		*result = 0;
110		break;
111	default:
112		error = EINVAL;
113		break;
114	}
115
116	return (error);
117}
118
119int
120svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
121{
122	int error = 0;
123
124	switch (num) {
125	case MSR_AMDK8_IPM:
126		/*
127		 * Ignore writes to the "Interrupt Pending Message" MSR.
128		 */
129		break;
130	default:
131		error = EINVAL;
132		break;
133	}
134
135	return (error);
136}
137