1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#ifndef __ARM64_KVM_HYP_SWITCH_H__ 8#define __ARM64_KVM_HYP_SWITCH_H__ 9 10#include <hyp/adjust_pc.h> 11#include <hyp/fault.h> 12 13#include <linux/arm-smccc.h> 14#include <linux/kvm_host.h> 15#include <linux/types.h> 16#include <linux/jump_label.h> 17#include <uapi/linux/psci.h> 18 19#include <kvm/arm_psci.h> 20 21#include <asm/barrier.h> 22#include <asm/cpufeature.h> 23#include <asm/extable.h> 24#include <asm/kprobes.h> 25#include <asm/kvm_asm.h> 26#include <asm/kvm_emulate.h> 27#include <asm/kvm_hyp.h> 28#include <asm/kvm_mmu.h> 29#include <asm/kvm_nested.h> 30#include <asm/fpsimd.h> 31#include <asm/debug-monitors.h> 32#include <asm/processor.h> 33#include <asm/traps.h> 34 35struct kvm_exception_table_entry { 36 int insn, fixup; 37}; 38 39extern struct kvm_exception_table_entry __start___kvm_ex_table; 40extern struct kvm_exception_table_entry __stop___kvm_ex_table; 41 42/* Check whether the FP regs are owned by the guest */ 43static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu) 44{ 45 return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED; 46} 47 48/* Save the 32-bit only FPSIMD system register state */ 49static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) 50{ 51 if (!vcpu_el1_is_32bit(vcpu)) 52 return; 53 54 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2); 55} 56 57static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) 58{ 59 /* 60 * We are about to set CPTR_EL2.TFP to trap all floating point 61 * register accesses to EL2, however, the ARM ARM clearly states that 62 * traps are only taken to EL2 if the operation would not otherwise 63 * trap to EL1. Therefore, always make sure that for 32-bit guests, 64 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. 65 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to 66 * it will cause an exception. 67 */ 68 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { 69 write_sysreg(1 << 30, fpexc32_el2); 70 isb(); 71 } 72} 73 74#define compute_clr_set(vcpu, reg, clr, set) \ 75 do { \ 76 u64 hfg; \ 77 hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \ 78 set |= hfg & __ ## reg ## _MASK; \ 79 clr |= ~hfg & __ ## reg ## _nMASK; \ 80 } while(0) 81 82#define reg_to_fgt_group_id(reg) \ 83 ({ \ 84 enum fgt_group_id id; \ 85 switch(reg) { \ 86 case HFGRTR_EL2: \ 87 case HFGWTR_EL2: \ 88 id = HFGxTR_GROUP; \ 89 break; \ 90 case HFGITR_EL2: \ 91 id = HFGITR_GROUP; \ 92 break; \ 93 case HDFGRTR_EL2: \ 94 case HDFGWTR_EL2: \ 95 id = HDFGRTR_GROUP; \ 96 break; \ 97 case HAFGRTR_EL2: \ 98 id = HAFGRTR_GROUP; \ 99 break; \ 100 default: \ 101 BUILD_BUG_ON(1); \ 102 } \ 103 \ 104 id; \ 105 }) 106 107#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \ 108 do { \ 109 u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \ 110 set |= hfg & __ ## reg ## _MASK; \ 111 clr |= hfg & __ ## reg ## _nMASK; \ 112 } while(0) 113 114#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \ 115 do { \ 116 u64 c = 0, s = 0; \ 117 \ 118 ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \ 119 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \ 120 compute_clr_set(vcpu, reg, c, s); \ 121 \ 122 compute_undef_clr_set(vcpu, kvm, reg, c, s); \ 123 \ 124 s |= set; \ 125 c |= clr; \ 126 if (c || s) { \ 127 u64 val = __ ## reg ## _nMASK; \ 128 val |= s; \ 129 val &= ~c; \ 130 write_sysreg_s(val, SYS_ ## reg); \ 131 } \ 132 } while(0) 133 134#define update_fgt_traps(hctxt, vcpu, kvm, reg) \ 135 update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0) 136 137/* 138 * Validate the fine grain trap masks. 139 * Check that the masks do not overlap and that all bits are accounted for. 140 */ 141#define CHECK_FGT_MASKS(reg) \ 142 do { \ 143 BUILD_BUG_ON((__ ## reg ## _MASK) & (__ ## reg ## _nMASK)); \ 144 BUILD_BUG_ON(~((__ ## reg ## _RES0) ^ (__ ## reg ## _MASK) ^ \ 145 (__ ## reg ## _nMASK))); \ 146 } while(0) 147 148static inline bool cpu_has_amu(void) 149{ 150 u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); 151 152 return cpuid_feature_extract_unsigned_field(pfr0, 153 ID_AA64PFR0_EL1_AMU_SHIFT); 154} 155 156static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) 157{ 158 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 159 struct kvm *kvm = kern_hyp_va(vcpu->kvm); 160 161 CHECK_FGT_MASKS(HFGRTR_EL2); 162 CHECK_FGT_MASKS(HFGWTR_EL2); 163 CHECK_FGT_MASKS(HFGITR_EL2); 164 CHECK_FGT_MASKS(HDFGRTR_EL2); 165 CHECK_FGT_MASKS(HDFGWTR_EL2); 166 CHECK_FGT_MASKS(HAFGRTR_EL2); 167 CHECK_FGT_MASKS(HCRX_EL2); 168 169 if (!cpus_have_final_cap(ARM64_HAS_FGT)) 170 return; 171 172 update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2); 173 update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0, 174 cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ? 175 HFGxTR_EL2_TCR_EL1_MASK : 0); 176 update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2); 177 update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2); 178 update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2); 179 180 if (cpu_has_amu()) 181 update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2); 182} 183 184#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \ 185 do { \ 186 if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \ 187 kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \ 188 write_sysreg_s(ctxt_sys_reg(hctxt, reg), \ 189 SYS_ ## reg); \ 190 } while(0) 191 192static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) 193{ 194 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 195 struct kvm *kvm = kern_hyp_va(vcpu->kvm); 196 197 if (!cpus_have_final_cap(ARM64_HAS_FGT)) 198 return; 199 200 __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2); 201 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) 202 write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); 203 else 204 __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2); 205 __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2); 206 __deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2); 207 __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2); 208 209 if (cpu_has_amu()) 210 __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2); 211} 212 213static inline void __activate_traps_common(struct kvm_vcpu *vcpu) 214{ 215 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ 216 write_sysreg(1 << 15, hstr_el2); 217 218 /* 219 * Make sure we trap PMU access from EL0 to EL2. Also sanitize 220 * PMSELR_EL0 to make sure it never contains the cycle 221 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at 222 * EL1 instead of being trapped to EL2. 223 */ 224 if (kvm_arm_support_pmu_v3()) { 225 struct kvm_cpu_context *hctxt; 226 227 write_sysreg(0, pmselr_el0); 228 229 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 230 ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); 231 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); 232 vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); 233 } 234 235 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); 236 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 237 238 if (cpus_have_final_cap(ARM64_HAS_HCX)) { 239 u64 hcrx = vcpu->arch.hcrx_el2; 240 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { 241 u64 clr = 0, set = 0; 242 243 compute_clr_set(vcpu, HCRX_EL2, clr, set); 244 245 hcrx |= set; 246 hcrx &= ~clr; 247 } 248 249 write_sysreg_s(hcrx, SYS_HCRX_EL2); 250 } 251 252 __activate_traps_hfgxtr(vcpu); 253} 254 255static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) 256{ 257 write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); 258 259 write_sysreg(0, hstr_el2); 260 if (kvm_arm_support_pmu_v3()) { 261 struct kvm_cpu_context *hctxt; 262 263 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 264 write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); 265 vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); 266 } 267 268 if (cpus_have_final_cap(ARM64_HAS_HCX)) 269 write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); 270 271 __deactivate_traps_hfgxtr(vcpu); 272} 273 274static inline void ___activate_traps(struct kvm_vcpu *vcpu) 275{ 276 u64 hcr = vcpu->arch.hcr_el2; 277 278 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) 279 hcr |= HCR_TVM; 280 281 write_sysreg(hcr, hcr_el2); 282 283 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) 284 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); 285} 286 287static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) 288{ 289 /* 290 * If we pended a virtual abort, preserve it until it gets 291 * cleared. See D1.14.3 (Virtual Interrupts) for details, but 292 * the crucial bit is "On taking a vSError interrupt, 293 * HCR_EL2.VSE is cleared to 0." 294 */ 295 if (vcpu->arch.hcr_el2 & HCR_VSE) { 296 vcpu->arch.hcr_el2 &= ~HCR_VSE; 297 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; 298 } 299} 300 301static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) 302{ 303 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); 304} 305 306static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code) 307{ 308 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 309 arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2); 310 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); 311 312 /* 313 * Finish potential single step before executing the prologue 314 * instruction. 315 */ 316 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; 317 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 318 319 return true; 320} 321 322static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) 323{ 324 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); 325 __sve_restore_state(vcpu_sve_pffr(vcpu), 326 &vcpu->arch.ctxt.fp_regs.fpsr); 327 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); 328} 329 330/* 331 * We trap the first access to the FP/SIMD to save the host context and 332 * restore the guest context lazily. 333 * If FP/SIMD is not implemented, handle the trap and inject an undefined 334 * instruction exception to the guest. Similarly for trapped SVE accesses. 335 */ 336static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) 337{ 338 bool sve_guest; 339 u8 esr_ec; 340 u64 reg; 341 342 if (!system_supports_fpsimd()) 343 return false; 344 345 sve_guest = vcpu_has_sve(vcpu); 346 esr_ec = kvm_vcpu_trap_get_class(vcpu); 347 348 /* Only handle traps the vCPU can support here: */ 349 switch (esr_ec) { 350 case ESR_ELx_EC_FP_ASIMD: 351 break; 352 case ESR_ELx_EC_SVE: 353 if (!sve_guest) 354 return false; 355 break; 356 default: 357 return false; 358 } 359 360 /* Valid trap. Switch the context: */ 361 362 /* First disable enough traps to allow us to update the registers */ 363 if (has_vhe() || has_hvhe()) { 364 reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN; 365 if (sve_guest) 366 reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; 367 368 sysreg_clear_set(cpacr_el1, 0, reg); 369 } else { 370 reg = CPTR_EL2_TFP; 371 if (sve_guest) 372 reg |= CPTR_EL2_TZ; 373 374 sysreg_clear_set(cptr_el2, reg, 0); 375 } 376 isb(); 377 378 /* Write out the host state if it's in the registers */ 379 if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED) 380 __fpsimd_save_state(vcpu->arch.host_fpsimd_state); 381 382 /* Restore the guest state */ 383 if (sve_guest) 384 __hyp_sve_restore_guest(vcpu); 385 else 386 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); 387 388 /* Skip restoring fpexc32 for AArch64 guests */ 389 if (!(read_sysreg(hcr_el2) & HCR_RW)) 390 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); 391 392 vcpu->arch.fp_state = FP_STATE_GUEST_OWNED; 393 394 return true; 395} 396 397static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) 398{ 399 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 400 int rt = kvm_vcpu_sys_get_rt(vcpu); 401 u64 val = vcpu_get_reg(vcpu, rt); 402 403 /* 404 * The normal sysreg handling code expects to see the traps, 405 * let's not do anything here. 406 */ 407 if (vcpu->arch.hcr_el2 & HCR_TVM) 408 return false; 409 410 switch (sysreg) { 411 case SYS_SCTLR_EL1: 412 write_sysreg_el1(val, SYS_SCTLR); 413 break; 414 case SYS_TTBR0_EL1: 415 write_sysreg_el1(val, SYS_TTBR0); 416 break; 417 case SYS_TTBR1_EL1: 418 write_sysreg_el1(val, SYS_TTBR1); 419 break; 420 case SYS_TCR_EL1: 421 write_sysreg_el1(val, SYS_TCR); 422 break; 423 case SYS_ESR_EL1: 424 write_sysreg_el1(val, SYS_ESR); 425 break; 426 case SYS_FAR_EL1: 427 write_sysreg_el1(val, SYS_FAR); 428 break; 429 case SYS_AFSR0_EL1: 430 write_sysreg_el1(val, SYS_AFSR0); 431 break; 432 case SYS_AFSR1_EL1: 433 write_sysreg_el1(val, SYS_AFSR1); 434 break; 435 case SYS_MAIR_EL1: 436 write_sysreg_el1(val, SYS_MAIR); 437 break; 438 case SYS_AMAIR_EL1: 439 write_sysreg_el1(val, SYS_AMAIR); 440 break; 441 case SYS_CONTEXTIDR_EL1: 442 write_sysreg_el1(val, SYS_CONTEXTIDR); 443 break; 444 default: 445 return false; 446 } 447 448 __kvm_skip_instr(vcpu); 449 return true; 450} 451 452static inline bool esr_is_ptrauth_trap(u64 esr) 453{ 454 switch (esr_sys64_to_sysreg(esr)) { 455 case SYS_APIAKEYLO_EL1: 456 case SYS_APIAKEYHI_EL1: 457 case SYS_APIBKEYLO_EL1: 458 case SYS_APIBKEYHI_EL1: 459 case SYS_APDAKEYLO_EL1: 460 case SYS_APDAKEYHI_EL1: 461 case SYS_APDBKEYLO_EL1: 462 case SYS_APDBKEYHI_EL1: 463 case SYS_APGAKEYLO_EL1: 464 case SYS_APGAKEYHI_EL1: 465 return true; 466 } 467 468 return false; 469} 470 471#define __ptrauth_save_key(ctxt, key) \ 472 do { \ 473 u64 __val; \ 474 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ 475 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ 476 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ 477 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \ 478} while(0) 479 480DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 481 482static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) 483{ 484 struct kvm_cpu_context *ctxt; 485 u64 val; 486 487 if (!vcpu_has_ptrauth(vcpu)) 488 return false; 489 490 ctxt = this_cpu_ptr(&kvm_hyp_ctxt); 491 __ptrauth_save_key(ctxt, APIA); 492 __ptrauth_save_key(ctxt, APIB); 493 __ptrauth_save_key(ctxt, APDA); 494 __ptrauth_save_key(ctxt, APDB); 495 __ptrauth_save_key(ctxt, APGA); 496 497 vcpu_ptrauth_enable(vcpu); 498 499 val = read_sysreg(hcr_el2); 500 val |= (HCR_API | HCR_APK); 501 write_sysreg(val, hcr_el2); 502 503 return true; 504} 505 506static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) 507{ 508 struct arch_timer_context *ctxt; 509 u32 sysreg; 510 u64 val; 511 512 /* 513 * We only get here for 64bit guests, 32bit guests will hit 514 * the long and winding road all the way to the standard 515 * handling. Yes, it sucks to be irrelevant. 516 */ 517 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 518 519 switch (sysreg) { 520 case SYS_CNTPCT_EL0: 521 case SYS_CNTPCTSS_EL0: 522 if (vcpu_has_nv(vcpu)) { 523 if (is_hyp_ctxt(vcpu)) { 524 ctxt = vcpu_hptimer(vcpu); 525 break; 526 } 527 528 /* Check for guest hypervisor trapping */ 529 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); 530 if (!vcpu_el2_e2h_is_set(vcpu)) 531 val = (val & CNTHCTL_EL1PCTEN) << 10; 532 533 if (!(val & (CNTHCTL_EL1PCTEN << 10))) 534 return false; 535 } 536 537 ctxt = vcpu_ptimer(vcpu); 538 break; 539 default: 540 return false; 541 } 542 543 val = arch_timer_read_cntpct_el0(); 544 545 if (ctxt->offset.vm_offset) 546 val -= *kern_hyp_va(ctxt->offset.vm_offset); 547 if (ctxt->offset.vcpu_offset) 548 val -= *kern_hyp_va(ctxt->offset.vcpu_offset); 549 550 vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); 551 __kvm_skip_instr(vcpu); 552 return true; 553} 554 555static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) 556{ 557 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); 558 int rt = kvm_vcpu_sys_get_rt(vcpu); 559 u64 val = vcpu_get_reg(vcpu, rt); 560 561 if (sysreg != SYS_TCR_EL1) 562 return false; 563 564 /* 565 * Affected parts do not advertise support for hardware Access Flag / 566 * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying 567 * control bits are still functional. The architecture requires these be 568 * RES0 on systems that do not implement FEAT_HAFDBS. 569 * 570 * Uphold the requirements of the architecture by masking guest writes 571 * to TCR_EL1.{HA,HD} here. 572 */ 573 val &= ~(TCR_HD | TCR_HA); 574 write_sysreg_el1(val, SYS_TCR); 575 __kvm_skip_instr(vcpu); 576 return true; 577} 578 579static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) 580{ 581 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && 582 handle_tx2_tvm(vcpu)) 583 return true; 584 585 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) && 586 handle_ampere1_tcr(vcpu)) 587 return true; 588 589 if (static_branch_unlikely(&vgic_v3_cpuif_trap) && 590 __vgic_v3_perform_cpuif_access(vcpu) == 1) 591 return true; 592 593 if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) 594 return kvm_hyp_handle_ptrauth(vcpu, exit_code); 595 596 if (kvm_hyp_handle_cntpct(vcpu)) 597 return true; 598 599 return false; 600} 601 602static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) 603{ 604 if (static_branch_unlikely(&vgic_v3_cpuif_trap) && 605 __vgic_v3_perform_cpuif_access(vcpu) == 1) 606 return true; 607 608 return false; 609} 610 611static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) 612{ 613 if (!__populate_fault_info(vcpu)) 614 return true; 615 616 return false; 617} 618static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 619 __alias(kvm_hyp_handle_memory_fault); 620static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 621 __alias(kvm_hyp_handle_memory_fault); 622 623static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) 624{ 625 if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) 626 return true; 627 628 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { 629 bool valid; 630 631 valid = kvm_vcpu_trap_is_translation_fault(vcpu) && 632 kvm_vcpu_dabt_isvalid(vcpu) && 633 !kvm_vcpu_abt_issea(vcpu) && 634 !kvm_vcpu_abt_iss1tw(vcpu); 635 636 if (valid) { 637 int ret = __vgic_v2_perform_cpuif_access(vcpu); 638 639 if (ret == 1) 640 return true; 641 642 /* Promote an illegal access to an SError.*/ 643 if (ret == -1) 644 *exit_code = ARM_EXCEPTION_EL1_SERROR; 645 } 646 } 647 648 return false; 649} 650 651typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); 652 653static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); 654 655static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code); 656 657/* 658 * Allow the hypervisor to handle the exit with an exit handler if it has one. 659 * 660 * Returns true if the hypervisor handled the exit, and control should go back 661 * to the guest, or false if it hasn't. 662 */ 663static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) 664{ 665 const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); 666 exit_handler_fn fn; 667 668 fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; 669 670 if (fn) 671 return fn(vcpu, exit_code); 672 673 return false; 674} 675 676static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code) 677{ 678 /* 679 * Check for the conditions of Cortex-A510's #2077057. When these occur 680 * SPSR_EL2 can't be trusted, but isn't needed either as it is 681 * unchanged from the value in vcpu_gp_regs(vcpu)->pstate. 682 * Are we single-stepping the guest, and took a PAC exception from the 683 * active-not-pending state? 684 */ 685 if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) && 686 vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 687 *vcpu_cpsr(vcpu) & DBG_SPSR_SS && 688 ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC) 689 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 690 691 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); 692} 693 694/* 695 * Return true when we were able to fixup the guest exit and should return to 696 * the guest, false when we should restore the host state and return to the 697 * main run loop. 698 */ 699static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) 700{ 701 /* 702 * Save PSTATE early so that we can evaluate the vcpu mode 703 * early on. 704 */ 705 synchronize_vcpu_pstate(vcpu, exit_code); 706 707 /* 708 * Check whether we want to repaint the state one way or 709 * another. 710 */ 711 early_exit_filter(vcpu, exit_code); 712 713 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) 714 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); 715 716 if (ARM_SERROR_PENDING(*exit_code) && 717 ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) { 718 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); 719 720 /* 721 * HVC already have an adjusted PC, which we need to 722 * correct in order to return to after having injected 723 * the SError. 724 * 725 * SMC, on the other hand, is *trapped*, meaning its 726 * preferred return address is the SMC itself. 727 */ 728 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64) 729 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR); 730 } 731 732 /* 733 * We're using the raw exception code in order to only process 734 * the trap if no SError is pending. We will come back to the 735 * same PC once the SError has been injected, and replay the 736 * trapping instruction. 737 */ 738 if (*exit_code != ARM_EXCEPTION_TRAP) 739 goto exit; 740 741 /* Check if there's an exit handler and allow it to handle the exit. */ 742 if (kvm_hyp_handle_exit(vcpu, exit_code)) 743 goto guest; 744exit: 745 /* Return to the host kernel and handle the exit */ 746 return false; 747 748guest: 749 /* Re-enter the guest */ 750 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); 751 return true; 752} 753 754static inline void __kvm_unexpected_el2_exception(void) 755{ 756 extern char __guest_exit_panic[]; 757 unsigned long addr, fixup; 758 struct kvm_exception_table_entry *entry, *end; 759 unsigned long elr_el2 = read_sysreg(elr_el2); 760 761 entry = &__start___kvm_ex_table; 762 end = &__stop___kvm_ex_table; 763 764 while (entry < end) { 765 addr = (unsigned long)&entry->insn + entry->insn; 766 fixup = (unsigned long)&entry->fixup + entry->fixup; 767 768 if (addr != elr_el2) { 769 entry++; 770 continue; 771 } 772 773 write_sysreg(fixup, elr_el2); 774 return; 775 } 776 777 /* Trigger a panic after restoring the hyp context. */ 778 write_sysreg(__guest_exit_panic, elr_el2); 779} 780 781#endif /* __ARM64_KVM_HYP_SWITCH_H__ */ 782