Lines Matching defs:vcpu

44 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
46 if (!vcpu_el1_is_32bit(vcpu))
49 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
52 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
63 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
69 #define compute_clr_set(vcpu, reg, clr, set) \
72 hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \
102 #define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
109 #define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
114 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
115 compute_clr_set(vcpu, reg, c, s); \
117 compute_undef_clr_set(vcpu, kvm, reg, c, s); \
129 #define update_fgt_traps(hctxt, vcpu, kvm, reg) \
130 update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
151 static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
154 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
167 update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
168 update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
171 update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
172 update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
173 update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
176 update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
179 #define __deactivate_fgt(htcxt, vcpu, kvm, reg) \
181 if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
187 static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
190 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
195 __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2);
199 __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2);
200 __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2);
201 __deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2);
202 __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2);
205 __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
208 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
227 vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
231 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
234 u64 hcrx = vcpu->arch.hcrx_el2;
235 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
238 compute_clr_set(vcpu, HCRX_EL2, clr, set);
247 __activate_traps_hfgxtr(vcpu);
250 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
260 vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
266 __deactivate_traps_hfgxtr(vcpu);
269 static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
277 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
280 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
288 if (vcpu->arch.hcr_el2 & HCR_VSE) {
289 vcpu->arch.hcr_el2 &= ~HCR_VSE;
290 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
294 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
296 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
299 static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
301 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
302 arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
303 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
309 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
310 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
315 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
317 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
318 __sve_restore_state(vcpu_sve_pffr(vcpu),
319 &vcpu->arch.ctxt.fp_regs.fpsr);
320 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
329 static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
338 sve_guest = vcpu_has_sve(vcpu);
339 esr_ec = kvm_vcpu_trap_get_class(vcpu);
377 __hyp_sve_restore_guest(vcpu);
379 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
383 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
390 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
392 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
393 int rt = kvm_vcpu_sys_get_rt(vcpu);
394 u64 val = vcpu_get_reg(vcpu, rt);
400 if (vcpu->arch.hcr_el2 & HCR_TVM)
441 __kvm_skip_instr(vcpu);
445 static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
456 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
461 if (vcpu_has_nv(vcpu)) {
462 if (is_hyp_ctxt(vcpu)) {
463 ctxt = vcpu_hptimer(vcpu);
468 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
469 if (!vcpu_el2_e2h_is_set(vcpu))
476 ctxt = vcpu_ptimer(vcpu);
489 vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
490 __kvm_skip_instr(vcpu);
494 static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
496 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
497 int rt = kvm_vcpu_sys_get_rt(vcpu);
498 u64 val = vcpu_get_reg(vcpu, rt);
514 __kvm_skip_instr(vcpu);
518 static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
521 handle_tx2_tvm(vcpu))
525 handle_ampere1_tcr(vcpu))
529 __vgic_v3_perform_cpuif_access(vcpu) == 1)
532 if (kvm_hyp_handle_cntpct(vcpu))
538 static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
541 __vgic_v3_perform_cpuif_access(vcpu) == 1)
547 static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
549 if (!__populate_fault_info(vcpu))
554 static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
556 static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
559 static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
561 if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
567 valid = kvm_vcpu_trap_is_translation_fault(vcpu) &&
568 kvm_vcpu_dabt_isvalid(vcpu) &&
569 !kvm_vcpu_abt_issea(vcpu) &&
570 !kvm_vcpu_abt_iss1tw(vcpu);
573 int ret = __vgic_v2_perform_cpuif_access(vcpu);
589 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
591 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
599 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
601 const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
604 fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
607 return fn(vcpu, exit_code);
612 static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
617 * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
622 vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
623 *vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
625 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
627 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
635 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
638 * Save PSTATE early so that we can evaluate the vcpu mode
641 synchronize_vcpu_pstate(vcpu, exit_code);
647 early_exit_filter(vcpu, exit_code);
650 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
654 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
678 if (kvm_hyp_handle_exit(vcpu, exit_code))