1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2012-2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#ifndef __ARM64_KVM_HYP_SYSREG_SR_H__ 8#define __ARM64_KVM_HYP_SYSREG_SR_H__ 9 10#include <linux/compiler.h> 11#include <linux/kvm_host.h> 12 13#include <asm/kprobes.h> 14#include <asm/kvm_asm.h> 15#include <asm/kvm_emulate.h> 16#include <asm/kvm_hyp.h> 17#include <asm/kvm_mmu.h> 18 19static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) 20{ 21 ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); 22} 23 24static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) 25{ 26 ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0); 27 ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); 28} 29 30static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) 31{ 32 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; 33 34 if (!vcpu) 35 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); 36 37 return vcpu; 38} 39 40static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) 41{ 42 struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); 43 44 return kvm_has_mte(kern_hyp_va(vcpu->kvm)); 45} 46 47static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt) 48{ 49 struct kvm_vcpu *vcpu; 50 51 if (!cpus_have_final_cap(ARM64_HAS_S1PIE)) 52 return false; 53 54 vcpu = ctxt_to_vcpu(ctxt); 55 return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP); 56} 57 58static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) 59{ 60 ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR); 61 ctxt_sys_reg(ctxt, CPACR_EL1) = read_sysreg_el1(SYS_CPACR); 62 ctxt_sys_reg(ctxt, TTBR0_EL1) = read_sysreg_el1(SYS_TTBR0); 63 ctxt_sys_reg(ctxt, TTBR1_EL1) = read_sysreg_el1(SYS_TTBR1); 64 ctxt_sys_reg(ctxt, TCR_EL1) = read_sysreg_el1(SYS_TCR); 65 if (cpus_have_final_cap(ARM64_HAS_TCR2)) 66 ctxt_sys_reg(ctxt, TCR2_EL1) = read_sysreg_el1(SYS_TCR2); 67 ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR); 68 ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0); 69 ctxt_sys_reg(ctxt, AFSR1_EL1) = read_sysreg_el1(SYS_AFSR1); 70 ctxt_sys_reg(ctxt, FAR_EL1) = read_sysreg_el1(SYS_FAR); 71 ctxt_sys_reg(ctxt, MAIR_EL1) = read_sysreg_el1(SYS_MAIR); 72 ctxt_sys_reg(ctxt, VBAR_EL1) = read_sysreg_el1(SYS_VBAR); 73 ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR); 74 ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR); 75 ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL); 76 if (ctxt_has_s1pie(ctxt)) { 77 ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR); 78 ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0); 79 } 80 ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par(); 81 ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1); 82 83 if (ctxt_has_mte(ctxt)) { 84 ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR); 85 ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1); 86 } 87 88 ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1); 89 ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR); 90 ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR); 91} 92 93static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) 94{ 95 ctxt->regs.pc = read_sysreg_el2(SYS_ELR); 96 /* 97 * Guest PSTATE gets saved at guest fixup time in all 98 * cases. We still need to handle the nVHE host side here. 99 */ 100 if (!has_vhe() && ctxt->__hyp_running_vcpu) 101 ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); 102 103 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) 104 ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2); 105} 106 107static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) 108{ 109 write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1); 110} 111 112static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) 113{ 114 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0); 115 write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0); 116} 117 118static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) 119{ 120 write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2); 121 122 if (has_vhe() || 123 !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 124 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR); 125 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR); 126 } else if (!ctxt->__hyp_running_vcpu) { 127 /* 128 * Must only be done for guest registers, hence the context 129 * test. We're coming from the host, so SCTLR.M is already 130 * set. Pairs with nVHE's __activate_traps(). 131 */ 132 write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) | 133 TCR_EPD1_MASK | TCR_EPD0_MASK), 134 SYS_TCR); 135 isb(); 136 } 137 138 write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1), SYS_CPACR); 139 write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1), SYS_TTBR0); 140 write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1), SYS_TTBR1); 141 if (cpus_have_final_cap(ARM64_HAS_TCR2)) 142 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR2_EL1), SYS_TCR2); 143 write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR); 144 write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0); 145 write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1), SYS_AFSR1); 146 write_sysreg_el1(ctxt_sys_reg(ctxt, FAR_EL1), SYS_FAR); 147 write_sysreg_el1(ctxt_sys_reg(ctxt, MAIR_EL1), SYS_MAIR); 148 write_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1), SYS_VBAR); 149 write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR); 150 write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR); 151 write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL); 152 if (ctxt_has_s1pie(ctxt)) { 153 write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR); 154 write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0); 155 } 156 write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1); 157 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1); 158 159 if (ctxt_has_mte(ctxt)) { 160 write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR); 161 write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1); 162 } 163 164 if (!has_vhe() && 165 cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) && 166 ctxt->__hyp_running_vcpu) { 167 /* 168 * Must only be done for host registers, hence the context 169 * test. Pairs with nVHE's __deactivate_traps(). 170 */ 171 isb(); 172 /* 173 * At this stage, and thanks to the above isb(), S2 is 174 * deconfigured and disabled. We can now restore the host's 175 * S1 configuration: SCTLR, and only then TCR. 176 */ 177 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR); 178 isb(); 179 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR); 180 } 181 182 write_sysreg(ctxt_sys_reg(ctxt, SP_EL1), sp_el1); 183 write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR); 184 write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1), SYS_SPSR); 185} 186 187/* Read the VCPU state's PSTATE, but translate (v)EL2 to EL1. */ 188static inline u64 to_hw_pstate(const struct kvm_cpu_context *ctxt) 189{ 190 u64 mode = ctxt->regs.pstate & (PSR_MODE_MASK | PSR_MODE32_BIT); 191 192 switch (mode) { 193 case PSR_MODE_EL2t: 194 mode = PSR_MODE_EL1t; 195 break; 196 case PSR_MODE_EL2h: 197 mode = PSR_MODE_EL1h; 198 break; 199 } 200 201 return (ctxt->regs.pstate & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode; 202} 203 204static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) 205{ 206 u64 pstate = to_hw_pstate(ctxt); 207 u64 mode = pstate & PSR_AA32_MODE_MASK; 208 209 /* 210 * Safety check to ensure we're setting the CPU up to enter the guest 211 * in a less privileged mode. 212 * 213 * If we are attempting a return to EL2 or higher in AArch64 state, 214 * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that 215 * we'll take an illegal exception state exception immediately after 216 * the ERET to the guest. Attempts to return to AArch32 Hyp will 217 * result in an illegal exception return because EL2's execution state 218 * is determined by SCR_EL3.RW. 219 */ 220 if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t) 221 pstate = PSR_MODE_EL2h | PSR_IL_BIT; 222 223 write_sysreg_el2(ctxt->regs.pc, SYS_ELR); 224 write_sysreg_el2(pstate, SYS_SPSR); 225 226 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) 227 write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2); 228} 229 230static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu) 231{ 232 if (!vcpu_el1_is_32bit(vcpu)) 233 return; 234 235 vcpu->arch.ctxt.spsr_abt = read_sysreg(spsr_abt); 236 vcpu->arch.ctxt.spsr_und = read_sysreg(spsr_und); 237 vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq); 238 vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq); 239 240 __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2); 241 __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2); 242 243 if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY)) 244 __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2); 245} 246 247static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) 248{ 249 if (!vcpu_el1_is_32bit(vcpu)) 250 return; 251 252 write_sysreg(vcpu->arch.ctxt.spsr_abt, spsr_abt); 253 write_sysreg(vcpu->arch.ctxt.spsr_und, spsr_und); 254 write_sysreg(vcpu->arch.ctxt.spsr_irq, spsr_irq); 255 write_sysreg(vcpu->arch.ctxt.spsr_fiq, spsr_fiq); 256 257 write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2); 258 write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2); 259 260 if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY)) 261 write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2); 262} 263 264#endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */ 265