svm_msr.c revision 282296
1271912Sneel/*-
2271912Sneel * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
3271912Sneel * All rights reserved.
4271912Sneel *
5271912Sneel * Redistribution and use in source and binary forms, with or without
6271912Sneel * modification, are permitted provided that the following conditions
7271912Sneel * are met:
8271912Sneel * 1. Redistributions of source code must retain the above copyright
9271912Sneel *    notice unmodified, this list of conditions, and the following
10271912Sneel *    disclaimer.
11271912Sneel * 2. Redistributions in binary form must reproduce the above copyright
12271912Sneel *    notice, this list of conditions and the following disclaimer in the
13271912Sneel *    documentation and/or other materials provided with the distribution.
14271912Sneel *
15271912Sneel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16271912Sneel * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17271912Sneel * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18271912Sneel * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19271912Sneel * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20271912Sneel * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21271912Sneel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22271912Sneel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23271912Sneel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24271912Sneel * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25271912Sneel */
26271912Sneel
27271912Sneel#include <sys/cdefs.h>
28271912Sneel__FBSDID("$FreeBSD: head/sys/amd64/vmm/amd/svm_msr.c 282296 2015-05-01 05:11:14Z neel $");
29271912Sneel
30282281Sneel#include <sys/param.h>
31271912Sneel#include <sys/errno.h>
32282281Sneel#include <sys/systm.h>
33271912Sneel
34271912Sneel#include <machine/cpufunc.h>
35271912Sneel#include <machine/specialreg.h>
36282281Sneel#include <machine/vmm.h>
37271912Sneel
38282281Sneel#include "svm.h"
39282281Sneel#include "vmcb.h"
40282281Sneel#include "svm_softc.h"
41271912Sneel#include "svm_msr.h"
42271912Sneel
43271912Sneel#ifndef MSR_AMDK8_IPM
44271912Sneel#define	MSR_AMDK8_IPM	0xc0010055
45271912Sneel#endif
46271912Sneel
47271912Sneelenum {
48271912Sneel	IDX_MSR_LSTAR,
49271912Sneel	IDX_MSR_CSTAR,
50271912Sneel	IDX_MSR_STAR,
51271912Sneel	IDX_MSR_SF_MASK,
52271912Sneel	HOST_MSR_NUM		/* must be the last enumeration */
53271912Sneel};
54271912Sneel
55271912Sneelstatic uint64_t host_msrs[HOST_MSR_NUM];
56271912Sneel
57271912Sneelvoid
58271912Sneelsvm_msr_init(void)
59271912Sneel{
60271912Sneel	/*
61271912Sneel	 * It is safe to cache the values of the following MSRs because they
62271912Sneel	 * don't change based on curcpu, curproc or curthread.
63271912Sneel	 */
64271912Sneel	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
65271912Sneel	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
66271912Sneel	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
67271912Sneel	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
68271912Sneel}
69271912Sneel
70271912Sneelvoid
71271912Sneelsvm_msr_guest_init(struct svm_softc *sc, int vcpu)
72271912Sneel{
73271912Sneel	/*
74271912Sneel	 * All the MSRs accessible to the guest are either saved/restored by
75271912Sneel	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
76271912Sneel	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
77271912Sneel	 *
78271912Sneel	 * There are no guest MSRs that are saved/restored "by hand" so nothing
79271912Sneel	 * more to do here.
80271912Sneel	 */
81271912Sneel	return;
82271912Sneel}
83271912Sneel
84271912Sneelvoid
85271912Sneelsvm_msr_guest_enter(struct svm_softc *sc, int vcpu)
86271912Sneel{
87271912Sneel	/*
88271912Sneel	 * Save host MSRs (if any) and restore guest MSRs (if any).
89271912Sneel	 */
90271912Sneel}
91271912Sneel
92271912Sneelvoid
93271912Sneelsvm_msr_guest_exit(struct svm_softc *sc, int vcpu)
94271912Sneel{
95271912Sneel	/*
96271912Sneel	 * Save guest MSRs (if any) and restore host MSRs.
97271912Sneel	 */
98271912Sneel	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
99271912Sneel	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
100271912Sneel	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
101271912Sneel	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
102271912Sneel
103271912Sneel	/* MSR_KGSBASE will be restored on the way back to userspace */
104271912Sneel}
105271912Sneel
106271912Sneelint
107271912Sneelsvm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
108271912Sneel    bool *retu)
109271912Sneel{
110271912Sneel	int error = 0;
111271912Sneel
112271912Sneel	switch (num) {
113282281Sneel	case MSR_MTRRcap:
114282281Sneel	case MSR_MTRRdefType:
115282281Sneel	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
116282281Sneel	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
117282281Sneel	case MSR_MTRR64kBase:
118282296Sneel	case MSR_SYSCFG:
119282281Sneel		*result = 0;
120282281Sneel		break;
121271912Sneel	case MSR_AMDK8_IPM:
122271912Sneel		*result = 0;
123271912Sneel		break;
124271912Sneel	default:
125271912Sneel		error = EINVAL;
126271912Sneel		break;
127271912Sneel	}
128271912Sneel
129271912Sneel	return (error);
130271912Sneel}
131271912Sneel
132271912Sneelint
133271912Sneelsvm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
134271912Sneel{
135271912Sneel	int error = 0;
136271912Sneel
137271912Sneel	switch (num) {
138282281Sneel	case MSR_MTRRcap:
139282281Sneel		vm_inject_gp(sc->vm, vcpu);
140282281Sneel		break;
141282281Sneel	case MSR_MTRRdefType:
142282281Sneel	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
143282281Sneel	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
144282281Sneel	case MSR_MTRR64kBase:
145282296Sneel	case MSR_SYSCFG:
146282281Sneel		break;		/* Ignore writes */
147271912Sneel	case MSR_AMDK8_IPM:
148271912Sneel		/*
149271912Sneel		 * Ignore writes to the "Interrupt Pending Message" MSR.
150271912Sneel		 */
151271912Sneel		break;
152271912Sneel	default:
153271912Sneel		error = EINVAL;
154271912Sneel		break;
155271912Sneel	}
156271912Sneel
157271912Sneel	return (error);
158271912Sneel}
159