svm_softc.h revision 259579
1249967Sneel/*- 2249967Sneel * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) 3249967Sneel * All rights reserved. 4249967Sneel * 5249967Sneel * Redistribution and use in source and binary forms, with or without 6249967Sneel * modification, are permitted provided that the following conditions 7249967Sneel * are met: 8249967Sneel * 1. Redistributions of source code must retain the above copyright 9249967Sneel * notice unmodified, this list of conditions, and the following 10249967Sneel * disclaimer. 11249967Sneel * 2. Redistributions in binary form must reproduce the above copyright 12249967Sneel * notice, this list of conditions and the following disclaimer in the 13249967Sneel * documentation and/or other materials provided with the distribution. 14249967Sneel * 15249967Sneel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16249967Sneel * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17249967Sneel * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18249967Sneel * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19249967Sneel * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20249967Sneel * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21249967Sneel * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22249967Sneel * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23249967Sneel * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24249967Sneel * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25249967Sneel * 26249967Sneel * $FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm_softc.h 259579 2013-12-18 23:39:42Z grehan $ 27249967Sneel */ 28249967Sneel 29249967Sneel#ifndef _SVM_SOFTC_H_ 30249967Sneel#define _SVM_SOFTC_H_ 31249967Sneel 32249967Sneel#define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE) 33249967Sneel#define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE) 34249967Sneel 35249967Sneel/* 36249967Sneel * svm_vpcu contains SVM VMCB state and vcpu register state. 37249967Sneel */ 38249967Sneelstruct svm_vcpu { 39249967Sneel struct vmcb vmcb; /* hardware saved vcpu context */ 40249967Sneel struct svm_regctx swctx; /* software saved vcpu context */ 41249967Sneel uint64_t vmcb_pa; /* VMCB physical address */ 42249967Sneel uint64_t loop; /* loop count for vcpu */ 43249967Sneel int lastcpu; /* host cpu that the vcpu last ran on */ 44249967Sneel} __aligned(PAGE_SIZE); 45249967Sneel 46249967Sneel/* 47249967Sneel * SVM softc, one per virtual machine. 48249967Sneel */ 49249967Sneelstruct svm_softc { 50249967Sneel /* 51249967Sneel * IO permission map, VMCB.ctrl.iopm_base_pa should point to this. 52249967Sneel * If a bit is set, access to I/O port is intercepted. 53249967Sneel */ 54249967Sneel uint8_t iopm_bitmap[SVM_IO_BITMAP_SIZE]; 55249967Sneel 56249967Sneel /* 57249967Sneel * MSR permission bitmap, VMCB.ctrl.msrpm_base_pa should point to this. 58249967Sneel * Two bits are used for each MSR with the LSB used for read access 59249967Sneel * and the MSB used for write access. A value of '1' indicates that 60249967Sneel * the operation is intercepted. 61249967Sneel */ 62249967Sneel uint8_t msr_bitmap[SVM_MSR_BITMAP_SIZE]; 63249967Sneel 64249967Sneel /* Nested Paging */ 65259579Sgrehan vm_offset_t nptp; 66249967Sneel 67249967Sneel /* Virtual machine pointer. */ 68249967Sneel struct vm *vm; 69249967Sneel 70249967Sneel /* Guest VCPU h/w and s/w context. */ 71249967Sneel struct svm_vcpu vcpu[VM_MAXCPU]; 72249967Sneel 73249967Sneel uint32_t svm_feature; /* SVM features from CPUID.*/ 74249967Sneel 75249967Sneel int asid; /* Guest Address Space Identifier */ 76249967Sneel int vcpu_cnt; /* number of VCPUs for this guest.*/ 77249967Sneel} __aligned(PAGE_SIZE); 78249967Sneel 79259579SgrehanCTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0); 80249967Sneel 81249967Sneelstatic __inline struct svm_vcpu * 82249967Sneelsvm_get_vcpu(struct svm_softc *sc, int vcpu) 83249967Sneel{ 84249967Sneel 85249967Sneel return (&(sc->vcpu[vcpu])); 86249967Sneel} 87249967Sneel 88249967Sneelstatic __inline struct vmcb * 89249967Sneelsvm_get_vmcb(struct svm_softc *sc, int vcpu) 90249967Sneel{ 91249967Sneel 92249967Sneel return (&(sc->vcpu[vcpu].vmcb)); 93249967Sneel} 94249967Sneel 95249967Sneelstatic __inline struct vmcb_state * 96249967Sneelsvm_get_vmcb_state(struct svm_softc *sc, int vcpu) 97249967Sneel{ 98249967Sneel 99249967Sneel return (&(sc->vcpu[vcpu].vmcb.state)); 100249967Sneel} 101249967Sneel 102249967Sneelstatic __inline struct vmcb_ctrl * 103249967Sneelsvm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu) 104249967Sneel{ 105249967Sneel 106249967Sneel return (&(sc->vcpu[vcpu].vmcb.ctrl)); 107249967Sneel} 108249967Sneel 109249967Sneelstatic __inline struct svm_regctx * 110249967Sneelsvm_get_guest_regctx(struct svm_softc *sc, int vcpu) 111249967Sneel{ 112249967Sneel 113249967Sneel return (&(sc->vcpu[vcpu].swctx)); 114249967Sneel} 115249967Sneel 116249967Sneelvoid svm_dump_vmcb(struct svm_softc *svm_sc, int vcpu); 117249967Sneel#endif /* _SVM_SOFTC_H_ */ 118