1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#ifndef __ARM64_KVM_EMULATE_H__
12#define __ARM64_KVM_EMULATE_H__
13
14#include <linux/kvm_host.h>
15
16#include <asm/debug-monitors.h>
17#include <asm/esr.h>
18#include <asm/kvm_arm.h>
19#include <asm/kvm_hyp.h>
20#include <asm/kvm_nested.h>
21#include <asm/ptrace.h>
22#include <asm/cputype.h>
23#include <asm/virt.h>
24
25#define CURRENT_EL_SP_EL0_VECTOR	0x0
26#define CURRENT_EL_SP_ELx_VECTOR	0x200
27#define LOWER_EL_AArch64_VECTOR		0x400
28#define LOWER_EL_AArch32_VECTOR		0x600
29
30enum exception_type {
31	except_type_sync	= 0,
32	except_type_irq		= 0x80,
33	except_type_fiq		= 0x100,
34	except_type_serror	= 0x180,
35};
36
37#define kvm_exception_type_names		\
38	{ except_type_sync,	"SYNC"   },	\
39	{ except_type_irq,	"IRQ"    },	\
40	{ except_type_fiq,	"FIQ"    },	\
41	{ except_type_serror,	"SERROR" }
42
43bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
44void kvm_skip_instr32(struct kvm_vcpu *vcpu);
45
46void kvm_inject_undefined(struct kvm_vcpu *vcpu);
47void kvm_inject_vabt(struct kvm_vcpu *vcpu);
48void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
49void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
50void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
51
52void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
53
54void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
55int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
56int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
57
58#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
59static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
60{
61	return !(vcpu->arch.hcr_el2 & HCR_RW);
62}
63#else
64static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
65{
66	return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
67}
68#endif
69
70static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
71{
72	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
73	if (has_vhe() || has_hvhe())
74		vcpu->arch.hcr_el2 |= HCR_E2H;
75	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
76		/* route synchronous external abort exceptions to EL2 */
77		vcpu->arch.hcr_el2 |= HCR_TEA;
78		/* trap error record accesses */
79		vcpu->arch.hcr_el2 |= HCR_TERR;
80	}
81
82	if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
83		vcpu->arch.hcr_el2 |= HCR_FWB;
84	} else {
85		/*
86		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
87		 * get set in SCTLR_EL1 such that we can detect when the guest
88		 * MMU gets turned on and do the necessary cache maintenance
89		 * then.
90		 */
91		vcpu->arch.hcr_el2 |= HCR_TVM;
92	}
93
94	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
95	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
96		vcpu->arch.hcr_el2 |= HCR_TID4;
97	else
98		vcpu->arch.hcr_el2 |= HCR_TID2;
99
100	if (vcpu_el1_is_32bit(vcpu))
101		vcpu->arch.hcr_el2 &= ~HCR_RW;
102
103	if (kvm_has_mte(vcpu->kvm))
104		vcpu->arch.hcr_el2 |= HCR_ATA;
105}
106
107static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
108{
109	return (unsigned long *)&vcpu->arch.hcr_el2;
110}
111
112static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
113{
114	vcpu->arch.hcr_el2 &= ~HCR_TWE;
115	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
116	    vcpu->kvm->arch.vgic.nassgireq)
117		vcpu->arch.hcr_el2 &= ~HCR_TWI;
118	else
119		vcpu->arch.hcr_el2 |= HCR_TWI;
120}
121
122static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
123{
124	vcpu->arch.hcr_el2 |= HCR_TWE;
125	vcpu->arch.hcr_el2 |= HCR_TWI;
126}
127
128static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
129{
130	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
131}
132
133static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
134{
135	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
136}
137
138static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
139{
140	return vcpu->arch.vsesr_el2;
141}
142
143static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
144{
145	vcpu->arch.vsesr_el2 = vsesr;
146}
147
148static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
149{
150	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
151}
152
153static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
154{
155	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
156}
157
158static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
159{
160	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
161}
162
163static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
164{
165	if (vcpu_mode_is_32bit(vcpu))
166		return kvm_condition_valid32(vcpu);
167
168	return true;
169}
170
171static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
172{
173	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
174}
175
176/*
177 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
178 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
179 * AArch32 with banked registers.
180 */
181static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
182					 u8 reg_num)
183{
184	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
185}
186
187static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
188				unsigned long val)
189{
190	if (reg_num != 31)
191		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
192}
193
194static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
195{
196	switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
197	case PSR_MODE_EL2h:
198	case PSR_MODE_EL2t:
199		return true;
200	default:
201		return false;
202	}
203}
204
205static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
206{
207	return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
208}
209
210static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
211{
212	return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
213		(ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H));
214}
215
216static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
217{
218	return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
219}
220
221static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
222{
223	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
224}
225
226static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
227{
228	return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
229}
230
231static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
232{
233	/*
234	 * We are in a hypervisor context if the vcpu mode is EL2 or
235	 * E2H and TGE bits are set. The latter means we are in the user space
236	 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
237	 *
238	 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
239	 * rest of the KVM code, and will result in a misbehaving guest.
240	 */
241	return vcpu_is_el2_ctxt(ctxt) ||
242		(__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
243		__vcpu_el2_tge_is_set(ctxt);
244}
245
246static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
247{
248	return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
249}
250
251/*
252 * The layout of SPSR for an AArch32 state is different when observed from an
253 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
254 * view given an AArch64 view.
255 *
256 * In ARM DDI 0487E.a see:
257 *
258 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
259 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
260 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
261 *
262 * Which show the following differences:
263 *
264 * | Bit | AA64 | AA32 | Notes                       |
265 * +-----+------+------+-----------------------------|
266 * | 24  | DIT  | J    | J is RES0 in ARMv8          |
267 * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
268 *
269 * ... and all other bits are (currently) common.
270 */
271static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
272{
273	const unsigned long overlap = BIT(24) | BIT(21);
274	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
275
276	spsr &= ~overlap;
277
278	spsr |= dit << 21;
279
280	return spsr;
281}
282
283static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
284{
285	u32 mode;
286
287	if (vcpu_mode_is_32bit(vcpu)) {
288		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
289		return mode > PSR_AA32_MODE_USR;
290	}
291
292	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
293
294	return mode != PSR_MODE_EL0t;
295}
296
297static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
298{
299	return vcpu->arch.fault.esr_el2;
300}
301
302static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
303{
304	u64 esr = kvm_vcpu_get_esr(vcpu);
305
306	if (esr & ESR_ELx_CV)
307		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
308
309	return -1;
310}
311
312static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
313{
314	return vcpu->arch.fault.far_el2;
315}
316
317static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
318{
319	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
320}
321
322static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
323{
324	return vcpu->arch.fault.disr_el1;
325}
326
327static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
328{
329	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
330}
331
332static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
333{
334	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
335}
336
337static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
338{
339	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
340}
341
342static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
343{
344	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
345}
346
347static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
348{
349	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
350}
351
352static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
353{
354	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
355}
356
357static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
358{
359	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
360}
361
362/* Always check for S1PTW *before* using this. */
363static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
364{
365	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
366}
367
368static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
369{
370	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
371}
372
373static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
374{
375	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
376}
377
378/* This one is not specific to Data Abort */
379static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
380{
381	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
382}
383
384static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
385{
386	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
387}
388
389static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
390{
391	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
392}
393
394static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
395{
396	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
397}
398
399static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
400{
401	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
402}
403
404static inline
405bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
406{
407	return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
408}
409
410static inline
411bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
412{
413	return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
414}
415
416static inline
417u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
418{
419	unsigned long esr = kvm_vcpu_get_esr(vcpu);
420
421	BUG_ON(!esr_fsc_is_permission_fault(esr));
422	return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
423}
424
425static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
426{
427	switch (kvm_vcpu_trap_get_fault(vcpu)) {
428	case ESR_ELx_FSC_EXTABT:
429	case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3):
430	case ESR_ELx_FSC_SECC:
431	case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3):
432		return true;
433	default:
434		return false;
435	}
436}
437
438static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
439{
440	u64 esr = kvm_vcpu_get_esr(vcpu);
441	return ESR_ELx_SYS64_ISS_RT(esr);
442}
443
444static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
445{
446	if (kvm_vcpu_abt_iss1tw(vcpu)) {
447		/*
448		 * Only a permission fault on a S1PTW should be
449		 * considered as a write. Otherwise, page tables baked
450		 * in a read-only memslot will result in an exception
451		 * being delivered in the guest.
452		 *
453		 * The drawback is that we end-up faulting twice if the
454		 * guest is using any of HW AF/DB: a translation fault
455		 * to map the page containing the PT (read only at
456		 * first), then a permission fault to allow the flags
457		 * to be set.
458		 */
459		return kvm_vcpu_trap_is_permission_fault(vcpu);
460	}
461
462	if (kvm_vcpu_trap_is_iabt(vcpu))
463		return false;
464
465	return kvm_vcpu_dabt_iswrite(vcpu);
466}
467
468static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
469{
470	return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
471}
472
473static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
474{
475	if (vcpu_mode_is_32bit(vcpu)) {
476		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
477	} else {
478		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
479		sctlr |= SCTLR_ELx_EE;
480		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
481	}
482}
483
484static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
485{
486	if (vcpu_mode_is_32bit(vcpu))
487		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
488
489	if (vcpu_mode_priv(vcpu))
490		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
491	else
492		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
493}
494
495static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
496						    unsigned long data,
497						    unsigned int len)
498{
499	if (kvm_vcpu_is_be(vcpu)) {
500		switch (len) {
501		case 1:
502			return data & 0xff;
503		case 2:
504			return be16_to_cpu(data & 0xffff);
505		case 4:
506			return be32_to_cpu(data & 0xffffffff);
507		default:
508			return be64_to_cpu(data);
509		}
510	} else {
511		switch (len) {
512		case 1:
513			return data & 0xff;
514		case 2:
515			return le16_to_cpu(data & 0xffff);
516		case 4:
517			return le32_to_cpu(data & 0xffffffff);
518		default:
519			return le64_to_cpu(data);
520		}
521	}
522
523	return data;		/* Leave LE untouched */
524}
525
526static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
527						    unsigned long data,
528						    unsigned int len)
529{
530	if (kvm_vcpu_is_be(vcpu)) {
531		switch (len) {
532		case 1:
533			return data & 0xff;
534		case 2:
535			return cpu_to_be16(data & 0xffff);
536		case 4:
537			return cpu_to_be32(data & 0xffffffff);
538		default:
539			return cpu_to_be64(data);
540		}
541	} else {
542		switch (len) {
543		case 1:
544			return data & 0xff;
545		case 2:
546			return cpu_to_le16(data & 0xffff);
547		case 4:
548			return cpu_to_le32(data & 0xffffffff);
549		default:
550			return cpu_to_le64(data);
551		}
552	}
553
554	return data;		/* Leave LE untouched */
555}
556
557static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
558{
559	WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
560	vcpu_set_flag(vcpu, INCREMENT_PC);
561}
562
563#define kvm_pend_exception(v, e)					\
564	do {								\
565		WARN_ON(vcpu_get_flag((v), INCREMENT_PC));		\
566		vcpu_set_flag((v), PENDING_EXCEPTION);			\
567		vcpu_set_flag((v), e);					\
568	} while (0)
569
570static __always_inline void kvm_write_cptr_el2(u64 val)
571{
572	if (has_vhe() || has_hvhe())
573		write_sysreg(val, cpacr_el1);
574	else
575		write_sysreg(val, cptr_el2);
576}
577
578static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
579{
580	u64 val;
581
582	if (has_vhe()) {
583		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
584		       CPACR_EL1_ZEN_EL1EN);
585		if (cpus_have_final_cap(ARM64_SME))
586			val |= CPACR_EL1_SMEN_EL1EN;
587	} else if (has_hvhe()) {
588		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
589
590		if (!vcpu_has_sve(vcpu) ||
591		    (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
592			val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
593		if (cpus_have_final_cap(ARM64_SME))
594			val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
595	} else {
596		val = CPTR_NVHE_EL2_RES1;
597
598		if (vcpu_has_sve(vcpu) &&
599		    (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
600			val |= CPTR_EL2_TZ;
601		if (cpus_have_final_cap(ARM64_SME))
602			val &= ~CPTR_EL2_TSM;
603	}
604
605	return val;
606}
607
608static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
609{
610	u64 val = kvm_get_reset_cptr_el2(vcpu);
611
612	kvm_write_cptr_el2(val);
613}
614#endif /* __ARM64_KVM_EMULATE_H__ */
615