1271912Sneel/*-
2271912Sneel * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
3271912Sneel * All rights reserved.
4271912Sneel *
5271912Sneel * Redistribution and use in source and binary forms, with or without
6271912Sneel * modification, are permitted provided that the following conditions
7271912Sneel * are met:
8271912Sneel * 1. Redistributions of source code must retain the above copyright
9271912Sneel *    notice unmodified, this list of conditions, and the following
10271912Sneel *    disclaimer.
11271912Sneel * 2. Redistributions in binary form must reproduce the above copyright
12271912Sneel *    notice, this list of conditions and the following disclaimer in the
13271912Sneel *    documentation and/or other materials provided with the distribution.
14271912Sneel *
15271912Sneel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16271912Sneel * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17271912Sneel * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18271912Sneel * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19271912Sneel * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20271912Sneel * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21271912Sneel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22271912Sneel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23271912Sneel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24271912Sneel * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25271912Sneel */
26271912Sneel
27271912Sneel#include <sys/cdefs.h>
28271912Sneel__FBSDID("$FreeBSD: releng/11.0/sys/amd64/vmm/amd/svm_msr.c 297806 2016-04-11 05:09:43Z anish $");
29271912Sneel
30282281Sneel#include <sys/param.h>
31271912Sneel#include <sys/errno.h>
32282281Sneel#include <sys/systm.h>
33271912Sneel
34271912Sneel#include <machine/cpufunc.h>
35271912Sneel#include <machine/specialreg.h>
36282281Sneel#include <machine/vmm.h>
37271912Sneel
38282281Sneel#include "svm.h"
39282281Sneel#include "vmcb.h"
40282281Sneel#include "svm_softc.h"
41271912Sneel#include "svm_msr.h"
42271912Sneel
43271912Sneel#ifndef MSR_AMDK8_IPM
44271912Sneel#define	MSR_AMDK8_IPM	0xc0010055
45271912Sneel#endif
46271912Sneel
47271912Sneelenum {
48271912Sneel	IDX_MSR_LSTAR,
49271912Sneel	IDX_MSR_CSTAR,
50271912Sneel	IDX_MSR_STAR,
51271912Sneel	IDX_MSR_SF_MASK,
52271912Sneel	HOST_MSR_NUM		/* must be the last enumeration */
53271912Sneel};
54271912Sneel
55271912Sneelstatic uint64_t host_msrs[HOST_MSR_NUM];
56271912Sneel
57271912Sneelvoid
58271912Sneelsvm_msr_init(void)
59271912Sneel{
60271912Sneel	/*
61271912Sneel	 * It is safe to cache the values of the following MSRs because they
62271912Sneel	 * don't change based on curcpu, curproc or curthread.
63271912Sneel	 */
64271912Sneel	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
65271912Sneel	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
66271912Sneel	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
67271912Sneel	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
68271912Sneel}
69271912Sneel
70271912Sneelvoid
71271912Sneelsvm_msr_guest_init(struct svm_softc *sc, int vcpu)
72271912Sneel{
73271912Sneel	/*
74271912Sneel	 * All the MSRs accessible to the guest are either saved/restored by
75271912Sneel	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
76271912Sneel	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
77271912Sneel	 *
78271912Sneel	 * There are no guest MSRs that are saved/restored "by hand" so nothing
79271912Sneel	 * more to do here.
80271912Sneel	 */
81271912Sneel	return;
82271912Sneel}
83271912Sneel
84271912Sneelvoid
85271912Sneelsvm_msr_guest_enter(struct svm_softc *sc, int vcpu)
86271912Sneel{
87271912Sneel	/*
88271912Sneel	 * Save host MSRs (if any) and restore guest MSRs (if any).
89271912Sneel	 */
90271912Sneel}
91271912Sneel
92271912Sneelvoid
93271912Sneelsvm_msr_guest_exit(struct svm_softc *sc, int vcpu)
94271912Sneel{
95271912Sneel	/*
96271912Sneel	 * Save guest MSRs (if any) and restore host MSRs.
97271912Sneel	 */
98271912Sneel	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
99271912Sneel	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
100271912Sneel	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
101271912Sneel	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
102271912Sneel
103271912Sneel	/* MSR_KGSBASE will be restored on the way back to userspace */
104271912Sneel}
105271912Sneel
106271912Sneelint
107271912Sneelsvm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
108271912Sneel    bool *retu)
109271912Sneel{
110271912Sneel	int error = 0;
111271912Sneel
112271912Sneel	switch (num) {
113282336Sneel	case MSR_MCG_CAP:
114282336Sneel	case MSR_MCG_STATUS:
115282336Sneel		*result = 0;
116282336Sneel		break;
117282281Sneel	case MSR_MTRRcap:
118282281Sneel	case MSR_MTRRdefType:
119282281Sneel	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
120282281Sneel	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
121282281Sneel	case MSR_MTRR64kBase:
122282296Sneel	case MSR_SYSCFG:
123282281Sneel		*result = 0;
124282281Sneel		break;
125271912Sneel	case MSR_AMDK8_IPM:
126271912Sneel		*result = 0;
127271912Sneel		break;
128271912Sneel	default:
129271912Sneel		error = EINVAL;
130271912Sneel		break;
131271912Sneel	}
132271912Sneel
133271912Sneel	return (error);
134271912Sneel}
135271912Sneel
136271912Sneelint
137271912Sneelsvm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
138271912Sneel{
139271912Sneel	int error = 0;
140271912Sneel
141271912Sneel	switch (num) {
142282336Sneel	case MSR_MCG_CAP:
143282336Sneel	case MSR_MCG_STATUS:
144282336Sneel		break;		/* ignore writes */
145282281Sneel	case MSR_MTRRcap:
146282281Sneel		vm_inject_gp(sc->vm, vcpu);
147282281Sneel		break;
148282281Sneel	case MSR_MTRRdefType:
149282281Sneel	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
150282281Sneel	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
151282281Sneel	case MSR_MTRR64kBase:
152282296Sneel	case MSR_SYSCFG:
153282281Sneel		break;		/* Ignore writes */
154271912Sneel	case MSR_AMDK8_IPM:
155271912Sneel		/*
156271912Sneel		 * Ignore writes to the "Interrupt Pending Message" MSR.
157271912Sneel		 */
158271912Sneel		break;
159297806Sanish	case MSR_K8_UCODE_UPDATE:
160297806Sanish		/*
161297806Sanish		 * Ignore writes to microcode update register.
162297806Sanish		 */
163297806Sanish		break;
164271912Sneel	default:
165271912Sneel		error = EINVAL;
166271912Sneel		break;
167271912Sneel	}
168271912Sneel
169271912Sneel	return (error);
170271912Sneel}
171