svm_msr.c revision 273375
1311116Sdim/*-
2311116Sdim * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
3353358Sdim * All rights reserved.
4353358Sdim *
5353358Sdim * Redistribution and use in source and binary forms, with or without
6311116Sdim * modification, are permitted provided that the following conditions
7311116Sdim * are met:
8311116Sdim * 1. Redistributions of source code must retain the above copyright
9311116Sdim *    notice unmodified, this list of conditions, and the following
10311116Sdim *    disclaimer.
11311116Sdim * 2. Redistributions in binary form must reproduce the above copyright
12311116Sdim *    notice, this list of conditions and the following disclaimer in the
13311116Sdim *    documentation and/or other materials provided with the distribution.
14311116Sdim *
15311116Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16311116Sdim * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17311116Sdim * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18311116Sdim * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19321369Sdim * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20321369Sdim * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21311116Sdim * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22311116Sdim * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23311116Sdim * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24311116Sdim * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25311116Sdim */
26311116Sdim
27353358Sdim#include <sys/cdefs.h>
28311116Sdim__FBSDID("$FreeBSD: head/sys/amd64/vmm/amd/svm_msr.c 273375 2014-10-21 07:10:43Z neel $");
29311116Sdim
30353358Sdim#include <sys/types.h>
31353358Sdim#include <sys/errno.h>
32353358Sdim
33353358Sdim#include <machine/cpufunc.h>
34353358Sdim#include <machine/specialreg.h>
35353358Sdim
36360784Sdim#include "svm_msr.h"
37360784Sdim
38360784Sdim#ifndef MSR_AMDK8_IPM
39353358Sdim#define	MSR_AMDK8_IPM	0xc0010055
40353358Sdim#endif
41353358Sdim
42311116Sdimenum {
43311116Sdim	IDX_MSR_LSTAR,
44353358Sdim	IDX_MSR_CSTAR,
45353358Sdim	IDX_MSR_STAR,
46353358Sdim	IDX_MSR_SF_MASK,
47353358Sdim	HOST_MSR_NUM		/* must be the last enumeration */
48353358Sdim};
49353358Sdim
50311116Sdimstatic uint64_t host_msrs[HOST_MSR_NUM];
51311116Sdim
52311116Sdimvoid
53353358Sdimsvm_msr_init(void)
54321369Sdim{
55353358Sdim	/*
56353358Sdim	 * It is safe to cache the values of the following MSRs because they
57321369Sdim	 * don't change based on curcpu, curproc or curthread.
58311116Sdim	 */
59353358Sdim	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
60353358Sdim	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
61353358Sdim	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
62353358Sdim	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
63311116Sdim}
64311116Sdim
65311116Sdimvoid
66353358Sdimsvm_msr_guest_init(struct svm_softc *sc, int vcpu)
67311116Sdim{
68353358Sdim	/*
69353358Sdim	 * All the MSRs accessible to the guest are either saved/restored by
70353358Sdim	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
71353358Sdim	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
72353358Sdim	 *
73353358Sdim	 * There are no guest MSRs that are saved/restored "by hand" so nothing
74353358Sdim	 * more to do here.
75353358Sdim	 */
76353358Sdim	return;
77311116Sdim}
78311116Sdim
79311116Sdimvoid
80311116Sdimsvm_msr_guest_enter(struct svm_softc *sc, int vcpu)
81353358Sdim{
82353358Sdim	/*
83353358Sdim	 * Save host MSRs (if any) and restore guest MSRs (if any).
84353358Sdim	 */
85353358Sdim}
86353358Sdim
87353358Sdimvoid
88353358Sdimsvm_msr_guest_exit(struct svm_softc *sc, int vcpu)
89311116Sdim{
90311116Sdim	/*
91311116Sdim	 * Save guest MSRs (if any) and restore host MSRs.
92311116Sdim	 */
93311116Sdim	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
94311116Sdim	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
95311116Sdim	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
96311116Sdim	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
97311116Sdim
98311116Sdim	/* MSR_KGSBASE will be restored on the way back to userspace */
99353358Sdim}
100353358Sdim
101353358Sdimint
102353358Sdimsvm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
103353358Sdim    bool *retu)
104353358Sdim{
105353358Sdim	int error = 0;
106353358Sdim
107311116Sdim	switch (num) {
108311116Sdim	case MSR_AMDK8_IPM:
109311116Sdim		*result = 0;
110311116Sdim		break;
111311116Sdim	default:
112311116Sdim		error = EINVAL;
113353358Sdim		break;
114353358Sdim	}
115311116Sdim
116311116Sdim	return (error);
117311116Sdim}
118311116Sdim
119353358Sdimint
120353358Sdimsvm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
121311116Sdim{
122311116Sdim	int error = 0;
123353358Sdim
124311116Sdim	switch (num) {
125353358Sdim	case MSR_AMDK8_IPM:
126311116Sdim		/*
127311116Sdim		 * Ignore writes to the "Interrupt Pending Message" MSR.
128353358Sdim		 */
129311116Sdim		break;
130311116Sdim	default:
131311116Sdim		error = EINVAL;
132353358Sdim		break;
133353358Sdim	}
134353358Sdim
135353358Sdim	return (error);
136353358Sdim}
137311116Sdim