svm_msr.c revision 284900
165686Smarkm/*-
2128059Smarkm * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
365686Smarkm * All rights reserved.
465686Smarkm *
565686Smarkm * Redistribution and use in source and binary forms, with or without
665686Smarkm * modification, are permitted provided that the following conditions
765686Smarkm * are met:
865686Smarkm * 1. Redistributions of source code must retain the above copyright
965686Smarkm *    notice unmodified, this list of conditions, and the following
1065686Smarkm *    disclaimer.
1165686Smarkm * 2. Redistributions in binary form must reproduce the above copyright
1265686Smarkm *    notice, this list of conditions and the following disclaimer in the
1365686Smarkm *    documentation and/or other materials provided with the distribution.
1465686Smarkm *
1565686Smarkm * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1665686Smarkm * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1765686Smarkm * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1865686Smarkm * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1965686Smarkm * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2065686Smarkm * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2165686Smarkm * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2265686Smarkm * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2365686Smarkm * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2465686Smarkm * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2565686Smarkm */
2665686Smarkm
2765686Smarkm#include <sys/cdefs.h>
28119418Sobrien__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm_msr.c 284900 2015-06-28 03:22:26Z neel $");
29119418Sobrien
30119418Sobrien#include <sys/param.h>
3165686Smarkm#include <sys/errno.h>
3265686Smarkm#include <sys/systm.h>
3369168Smarkm
34143418Sume#include <machine/cpufunc.h>
35100082Smarkm#include <machine/specialreg.h>
3665686Smarkm#include <machine/vmm.h>
3767112Smarkm
3865686Smarkm#include "svm.h"
39100082Smarkm#include "vmcb.h"
4065686Smarkm#include "svm_softc.h"
4174072Smarkm#include "svm_msr.h"
4265686Smarkm
43100082Smarkm#ifndef MSR_AMDK8_IPM
4465686Smarkm#define	MSR_AMDK8_IPM	0xc0010055
4565686Smarkm#endif
46100082Smarkm
4765686Smarkmenum {
4865686Smarkm	IDX_MSR_LSTAR,
4965686Smarkm	IDX_MSR_CSTAR,
50100082Smarkm	IDX_MSR_STAR,
5165686Smarkm	IDX_MSR_SF_MASK,
5265686Smarkm	HOST_MSR_NUM		/* must be the last enumeration */
5374072Smarkm};
54103763Smarkm
5574072Smarkmstatic uint64_t host_msrs[HOST_MSR_NUM];
5665686Smarkm
5765686Smarkmvoid
5865686Smarkmsvm_msr_init(void)
59100082Smarkm{
6065686Smarkm	/*
6165686Smarkm	 * It is safe to cache the values of the following MSRs because they
6274072Smarkm	 * don't change based on curcpu, curproc or curthread.
63103763Smarkm	 */
6474072Smarkm	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
6574072Smarkm	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
6665686Smarkm	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
6774072Smarkm	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
6865686Smarkm}
6974072Smarkm
7074072Smarkmvoid
7165686Smarkmsvm_msr_guest_init(struct svm_softc *sc, int vcpu)
7265686Smarkm{
7374072Smarkm	/*
7474072Smarkm	 * All the MSRs accessible to the guest are either saved/restored by
7574072Smarkm	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
7665686Smarkm	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
7774072Smarkm	 *
7865686Smarkm	 * There are no guest MSRs that are saved/restored "by hand" so nothing
7974072Smarkm	 * more to do here.
80103763Smarkm	 */
8165686Smarkm	return;
82}
83
84void
85svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
86{
87	/*
88	 * Save host MSRs (if any) and restore guest MSRs (if any).
89	 */
90}
91
92void
93svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
94{
95	/*
96	 * Save guest MSRs (if any) and restore host MSRs.
97	 */
98	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
99	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
100	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
101	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
102
103	/* MSR_KGSBASE will be restored on the way back to userspace */
104}
105
106int
107svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
108    bool *retu)
109{
110	int error = 0;
111
112	switch (num) {
113	case MSR_MCG_CAP:
114	case MSR_MCG_STATUS:
115		*result = 0;
116		break;
117	case MSR_MTRRcap:
118	case MSR_MTRRdefType:
119	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
120	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
121	case MSR_MTRR64kBase:
122	case MSR_SYSCFG:
123		*result = 0;
124		break;
125	case MSR_AMDK8_IPM:
126		*result = 0;
127		break;
128	default:
129		error = EINVAL;
130		break;
131	}
132
133	return (error);
134}
135
136int
137svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
138{
139	int error = 0;
140
141	switch (num) {
142	case MSR_MCG_CAP:
143	case MSR_MCG_STATUS:
144		break;		/* ignore writes */
145	case MSR_MTRRcap:
146		vm_inject_gp(sc->vm, vcpu);
147		break;
148	case MSR_MTRRdefType:
149	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
150	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
151	case MSR_MTRR64kBase:
152	case MSR_SYSCFG:
153		break;		/* Ignore writes */
154	case MSR_AMDK8_IPM:
155		/*
156		 * Ignore writes to the "Interrupt Pending Message" MSR.
157		 */
158		break;
159	default:
160		error = EINVAL;
161		break;
162	}
163
164	return (error);
165}
166