Lines Matching defs:vcpu

23 static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
27 if (unlikely(vcpu_has_nv(vcpu)))
28 return vcpu_read_sys_reg(vcpu, reg);
32 return __vcpu_sys_reg(vcpu, reg);
35 static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
37 if (unlikely(vcpu_has_nv(vcpu)))
38 vcpu_write_sys_reg(vcpu, val, reg);
40 __vcpu_sys_reg(vcpu, reg) = val;
43 static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
46 if (unlikely(vcpu_has_nv(vcpu))) {
48 vcpu_write_sys_reg(vcpu, val, SPSR_EL1);
50 vcpu_write_sys_reg(vcpu, val, SPSR_EL2);
54 __vcpu_sys_reg(vcpu, SPSR_EL1) = val;
58 static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
63 vcpu->arch.ctxt.spsr_abt = val;
66 static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
71 vcpu->arch.ctxt.spsr_und = val;
91 static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
97 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
110 vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL1);
111 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
112 __vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
115 vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL2);
116 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL2);
117 __vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL2);
124 *vcpu_pc(vcpu) = vbar + exc_offset + type;
126 old = *vcpu_cpsr(vcpu);
134 if (kvm_has_mte(kern_hyp_va(vcpu->kvm)))
170 *vcpu_cpsr(vcpu) = new;
171 __vcpu_write_spsr(vcpu, target_mode, old);
192 static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
194 u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
197 old = *vcpu_cpsr(vcpu);
286 static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
288 unsigned long spsr = *vcpu_cpsr(vcpu);
290 u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
293 *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
294 return_address = *vcpu_pc(vcpu);
300 __vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr));
301 vcpu_gp_regs(vcpu)->compat_lr_abt = return_address;
305 __vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr));
306 vcpu_gp_regs(vcpu)->compat_lr_und = return_address;
314 vect_offset += __vcpu_read_sys_reg(vcpu, VBAR_EL1);
316 *vcpu_pc(vcpu) = vect_offset;
319 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
321 if (vcpu_el1_is_32bit(vcpu)) {
322 switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
324 enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
327 enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
330 enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
337 switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
339 enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
343 enter_exception64(vcpu, PSR_MODE_EL2h, except_type_sync);
347 enter_exception64(vcpu, PSR_MODE_EL2h, except_type_irq);
365 void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
367 if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
368 kvm_inject_exception(vcpu);
369 vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
370 vcpu_clear_flag(vcpu, EXCEPT_MASK);
371 } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
372 kvm_skip_instr(vcpu);
373 vcpu_clear_flag(vcpu, INCREMENT_PC);