vmm_msr.c revision 243650
11541Srgrimes/*-
21541Srgrimes * Copyright (c) 2011 NetApp, Inc.
31541Srgrimes * All rights reserved.
41541Srgrimes *
51541Srgrimes * Redistribution and use in source and binary forms, with or without
61541Srgrimes * modification, are permitted provided that the following conditions
71541Srgrimes * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice, this list of conditions and the following disclaimer.
101541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111541Srgrimes *    notice, this list of conditions and the following disclaimer in the
121541Srgrimes *    documentation and/or other materials provided with the distribution.
131541Srgrimes *
141541Srgrimes * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
151541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
161541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
181541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
191541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
211541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
221541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
231541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
241541Srgrimes * SUCH DAMAGE.
251541Srgrimes *
261541Srgrimes * $FreeBSD$
271541Srgrimes */
281541Srgrimes
291541Srgrimes#include <sys/cdefs.h>
301541Srgrimes__FBSDID("$FreeBSD$");
311541Srgrimes
321541Srgrimes#include <sys/param.h>
331541Srgrimes#include <sys/systm.h>
345187Sdg#include <sys/smp.h>
351541Srgrimes
361541Srgrimes#include <machine/specialreg.h>
372168Spaul
384507Sbde#include <machine/vmm.h>
392168Spaul#include "vmm_lapic.h"
401541Srgrimes#include "vmm_msr.h"
411541Srgrimes
421541Srgrimes#define	VMM_MSR_F_EMULATE	0x01
431541Srgrimes#define	VMM_MSR_F_READONLY	0x02
441541Srgrimes#define VMM_MSR_F_INVALID	0x04  /* guest_msr_valid() can override this */
451541Srgrimes
461541Srgrimesstruct vmm_msr {
471541Srgrimes	int		num;
481541Srgrimes	int		flags;
491541Srgrimes	uint64_t	hostval;
501541Srgrimes};
511541Srgrimes
521541Srgrimesstatic struct vmm_msr vmm_msr[] = {
531541Srgrimes	{ MSR_LSTAR,	0 },
541541Srgrimes	{ MSR_CSTAR,	0 },
551541Srgrimes	{ MSR_STAR,	0 },
561541Srgrimes	{ MSR_SF_MASK,	0 },
571541Srgrimes	{ MSR_PAT,      VMM_MSR_F_EMULATE | VMM_MSR_F_INVALID },
581541Srgrimes	{ MSR_BIOS_SIGN,VMM_MSR_F_EMULATE },
591541Srgrimes	{ MSR_MCG_CAP,	VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
601541Srgrimes};
611541Srgrimes
621541Srgrimes#define	vmm_msr_num	(sizeof(vmm_msr) / sizeof(vmm_msr[0]))
631541SrgrimesCTASSERT(VMM_MSR_NUM >= vmm_msr_num);
644507Sbde
654518Sphk#define	readonly_msr(idx)	\
664507Sbde	((vmm_msr[(idx)].flags & VMM_MSR_F_READONLY) != 0)
671541Srgrimes
681541Srgrimes#define	emulated_msr(idx)	\
691541Srgrimes	((vmm_msr[(idx)].flags & VMM_MSR_F_EMULATE) != 0)
701541Srgrimes
711541Srgrimes#define invalid_msr(idx)	\
721541Srgrimes	((vmm_msr[(idx)].flags & VMM_MSR_F_INVALID) != 0)
731541Srgrimes
741541Srgrimesvoid
751541Srgrimesvmm_msr_init(void)
761541Srgrimes{
771541Srgrimes	int i;
781541Srgrimes
791541Srgrimes	for (i = 0; i < vmm_msr_num; i++) {
801541Srgrimes		if (emulated_msr(i))
811541Srgrimes			continue;
821541Srgrimes		/*
831541Srgrimes		 * XXX this assumes that the value of the host msr does not
841541Srgrimes		 * change after we have cached it.
851541Srgrimes		 */
861541Srgrimes		vmm_msr[i].hostval = rdmsr(vmm_msr[i].num);
871541Srgrimes	}
881541Srgrimes}
891541Srgrimes
901541Srgrimesvoid
911541Srgrimesguest_msrs_init(struct vm *vm, int cpu)
921541Srgrimes{
931541Srgrimes	int i;
941541Srgrimes	uint64_t *guest_msrs;
951541Srgrimes
961541Srgrimes	guest_msrs = vm_guest_msrs(vm, cpu);
971541Srgrimes
981541Srgrimes	for (i = 0; i < vmm_msr_num; i++) {
991541Srgrimes		switch (vmm_msr[i].num) {
1001541Srgrimes		case MSR_LSTAR:
1011541Srgrimes		case MSR_CSTAR:
1021541Srgrimes		case MSR_STAR:
1031541Srgrimes		case MSR_SF_MASK:
1045099Swollman		case MSR_BIOS_SIGN:
1051541Srgrimes		case MSR_MCG_CAP:
1061541Srgrimes			guest_msrs[i] = 0;
1071541Srgrimes			break;
1081541Srgrimes		case MSR_PAT:
1091541Srgrimes			guest_msrs[i] = PAT_VALUE(0, PAT_WRITE_BACK)      |
1101541Srgrimes				PAT_VALUE(1, PAT_WRITE_THROUGH)   |
1111541Srgrimes				PAT_VALUE(2, PAT_UNCACHED)        |
1121541Srgrimes				PAT_VALUE(3, PAT_UNCACHEABLE)     |
1131541Srgrimes				PAT_VALUE(4, PAT_WRITE_BACK)      |
1141541Srgrimes				PAT_VALUE(5, PAT_WRITE_THROUGH)   |
1151541Srgrimes				PAT_VALUE(6, PAT_UNCACHED)        |
1161541Srgrimes				PAT_VALUE(7, PAT_UNCACHEABLE);
1171541Srgrimes			break;
1181541Srgrimes		default:
1191541Srgrimes			panic("guest_msrs_init: missing initialization for msr "
1201541Srgrimes			      "0x%0x", vmm_msr[i].num);
1211541Srgrimes		}
1221541Srgrimes	}
1231541Srgrimes}
1241541Srgrimes
1251549Srgrimesstatic int
1261541Srgrimesmsr_num_to_idx(u_int num)
1271541Srgrimes{
1281541Srgrimes	int i;
1291541Srgrimes
1301549Srgrimes	for (i = 0; i < vmm_msr_num; i++)
1311541Srgrimes		if (vmm_msr[i].num == num)
1321541Srgrimes			return (i);
1331541Srgrimes
1341541Srgrimes	return (-1);
1351541Srgrimes}
1361549Srgrimes
1371541Srgrimesint
1381549Srgrimesemulate_wrmsr(struct vm *vm, int cpu, u_int num, uint64_t val)
1391541Srgrimes{
1401541Srgrimes	int idx;
1411541Srgrimes	uint64_t *guest_msrs;
1421541Srgrimes
1431541Srgrimes	if (lapic_msr(num))
1441541Srgrimes		return (lapic_wrmsr(vm, cpu, num, val));
1451541Srgrimes
1461541Srgrimes	idx = msr_num_to_idx(num);
1471541Srgrimes	if (idx < 0 || invalid_msr(idx))
1481541Srgrimes		return (EINVAL);
1491541Srgrimes
1505099Swollman	if (!readonly_msr(idx)) {
1511541Srgrimes		guest_msrs = vm_guest_msrs(vm, cpu);
1521541Srgrimes
1531541Srgrimes		/* Stash the value */
1541541Srgrimes		guest_msrs[idx] = val;
1551541Srgrimes
1561541Srgrimes		/* Update processor state for non-emulated MSRs */
1571541Srgrimes		if (!emulated_msr(idx))
1581541Srgrimes			wrmsr(vmm_msr[idx].num, val);
1591541Srgrimes	}
1601541Srgrimes
1611541Srgrimes	return (0);
1621541Srgrimes}
1631541Srgrimes
1641541Srgrimesint
1651541Srgrimesemulate_rdmsr(struct vm *vm, int cpu, u_int num)
1661541Srgrimes{
1675181Swollman	int error, idx;
1681541Srgrimes	uint32_t eax, edx;
1691541Srgrimes	uint64_t result, *guest_msrs;
1701541Srgrimes
1711541Srgrimes	if (lapic_msr(num)) {
1721541Srgrimes		error = lapic_rdmsr(vm, cpu, num, &result);
1731541Srgrimes		goto done;
1741541Srgrimes	}
1751541Srgrimes
1761541Srgrimes	idx = msr_num_to_idx(num);
1771541Srgrimes	if (idx < 0 || invalid_msr(idx)) {
1781541Srgrimes		error = EINVAL;
1791541Srgrimes		goto done;
1801541Srgrimes	}
1811541Srgrimes
1821541Srgrimes	guest_msrs = vm_guest_msrs(vm, cpu);
1831541Srgrimes	result = guest_msrs[idx];
1843274Swollman
1851541Srgrimes	/*
1861541Srgrimes	 * If this is not an emulated msr register make sure that the processor
1871541Srgrimes	 * state matches our cached state.
1881541Srgrimes	 */
1891541Srgrimes	if (!emulated_msr(idx) && (rdmsr(num) != result)) {
1901541Srgrimes		panic("emulate_rdmsr: msr 0x%0x has inconsistent cached "
1911541Srgrimes		      "(0x%016lx) and actual (0x%016lx) values", num,
1925099Swollman		      result, rdmsr(num));
1931541Srgrimes	}
1945099Swollman
1955099Swollman	error = 0;
1965099Swollman
1975099Swollmandone:
1985099Swollman	if (error == 0) {
1995099Swollman		eax = result;
2005099Swollman		edx = result >> 32;
2015099Swollman		error = vm_set_register(vm, cpu, VM_REG_GUEST_RAX, eax);
2025099Swollman		if (error)
2031541Srgrimes			panic("vm_set_register(rax) error %d", error);
2041541Srgrimes		error = vm_set_register(vm, cpu, VM_REG_GUEST_RDX, edx);
2051541Srgrimes		if (error)
2061541Srgrimes			panic("vm_set_register(rdx) error %d", error);
2071541Srgrimes	}
2081541Srgrimes	return (error);
2091541Srgrimes}
2101541Srgrimes
2111541Srgrimesvoid
2121541Srgrimesrestore_guest_msrs(struct vm *vm, int cpu)
2131541Srgrimes{
2141541Srgrimes	int i;
2151541Srgrimes	uint64_t *guest_msrs;
2161541Srgrimes
2171541Srgrimes	guest_msrs = vm_guest_msrs(vm, cpu);
2181541Srgrimes
2191541Srgrimes	for (i = 0; i < vmm_msr_num; i++) {
2201541Srgrimes		if (emulated_msr(i))
2211541Srgrimes			continue;
2221541Srgrimes		else
2231541Srgrimes			wrmsr(vmm_msr[i].num, guest_msrs[i]);
2241541Srgrimes	}
2251541Srgrimes}
2261541Srgrimes
2271541Srgrimesvoid
2281541Srgrimesrestore_host_msrs(struct vm *vm, int cpu)
2291541Srgrimes{
2301541Srgrimes	int i;
2311541Srgrimes
2321541Srgrimes	for (i = 0; i < vmm_msr_num; i++) {
2331541Srgrimes		if (emulated_msr(i))
2341541Srgrimes			continue;
2351541Srgrimes		else
2361541Srgrimes			wrmsr(vmm_msr[i].num, vmm_msr[i].hostval);
2371541Srgrimes	}
2381541Srgrimes}
2391541Srgrimes
2401541Srgrimes/*
2411541Srgrimes * Must be called by the CPU-specific code before any guests are
2421541Srgrimes * created
2431541Srgrimes */
2441541Srgrimesvoid
2451541Srgrimesguest_msr_valid(int msr)
2461541Srgrimes{
2471541Srgrimes	int i;
2481541Srgrimes
2491541Srgrimes	for (i = 0; i < vmm_msr_num; i++) {
2501541Srgrimes		if (vmm_msr[i].num == msr && invalid_msr(i)) {
2511541Srgrimes			vmm_msr[i].flags &= ~VMM_MSR_F_INVALID;
2524469Sbde		}
2534469Sbde	}
2541541Srgrimes}
2551541Srgrimes