1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6#ifndef __ASM_LOONGARCH_KVM_HOST_H__
7#define __ASM_LOONGARCH_KVM_HOST_H__
8
9#include <linux/cpumask.h>
10#include <linux/hrtimer.h>
11#include <linux/interrupt.h>
12#include <linux/kvm.h>
13#include <linux/kvm_types.h>
14#include <linux/mutex.h>
15#include <linux/spinlock.h>
16#include <linux/threads.h>
17#include <linux/types.h>
18
19#include <asm/inst.h>
20#include <asm/kvm_mmu.h>
21#include <asm/loongarch.h>
22
23/* Loongarch KVM register ids */
24#define KVM_GET_IOC_CSR_IDX(id)		((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
25#define KVM_GET_IOC_CPUCFG_IDX(id)	((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
26
27#define KVM_MAX_VCPUS			256
28#define KVM_MAX_CPUCFG_REGS		21
29/* memory slots that does not exposed to userspace */
30#define KVM_PRIVATE_MEM_SLOTS		0
31
32#define KVM_HALT_POLL_NS_DEFAULT	500000
33
34struct kvm_vm_stat {
35	struct kvm_vm_stat_generic generic;
36	u64 pages;
37	u64 hugepages;
38};
39
40struct kvm_vcpu_stat {
41	struct kvm_vcpu_stat_generic generic;
42	u64 int_exits;
43	u64 idle_exits;
44	u64 cpucfg_exits;
45	u64 signal_exits;
46};
47
48#define KVM_MEM_HUGEPAGE_CAPABLE	(1UL << 0)
49#define KVM_MEM_HUGEPAGE_INCAPABLE	(1UL << 1)
50struct kvm_arch_memory_slot {
51	unsigned long flags;
52};
53
54struct kvm_context {
55	unsigned long vpid_cache;
56	struct kvm_vcpu *last_vcpu;
57};
58
59struct kvm_world_switch {
60	int (*exc_entry)(void);
61	int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
62	unsigned long page_order;
63};
64
65#define MAX_PGTABLE_LEVELS	4
66
67struct kvm_arch {
68	/* Guest physical mm */
69	kvm_pte_t *pgd;
70	unsigned long gpa_size;
71	unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
72	unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
73	unsigned int  root_level;
74
75	s64 time_offset;
76	struct kvm_context __percpu *vmcs;
77};
78
79#define CSR_MAX_NUMS		0x800
80
81struct loongarch_csrs {
82	unsigned long csrs[CSR_MAX_NUMS];
83};
84
85/* Resume Flags */
86#define RESUME_HOST		0
87#define RESUME_GUEST		1
88
89enum emulation_result {
90	EMULATE_DONE,		/* no further processing */
91	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
92	EMULATE_DO_IOCSR,	/* handle IOCSR request */
93	EMULATE_FAIL,		/* can't emulate this instruction */
94	EMULATE_EXCEPT,		/* A guest exception has been generated */
95};
96
97#define KVM_LARCH_FPU		(0x1 << 0)
98#define KVM_LARCH_LSX		(0x1 << 1)
99#define KVM_LARCH_LASX		(0x1 << 2)
100#define KVM_LARCH_SWCSR_LATEST	(0x1 << 3)
101#define KVM_LARCH_HWCSR_USABLE	(0x1 << 4)
102
103struct kvm_vcpu_arch {
104	/*
105	 * Switch pointer-to-function type to unsigned long
106	 * for loading the value into register directly.
107	 */
108	unsigned long host_eentry;
109	unsigned long guest_eentry;
110
111	/* Pointers stored here for easy accessing from assembly code */
112	int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
113
114	/* Host registers preserved across guest mode execution */
115	unsigned long host_sp;
116	unsigned long host_tp;
117	unsigned long host_pgd;
118
119	/* Host CSRs are used when handling exits from guest */
120	unsigned long badi;
121	unsigned long badv;
122	unsigned long host_ecfg;
123	unsigned long host_estat;
124	unsigned long host_percpu;
125
126	/* GPRs */
127	unsigned long gprs[32];
128	unsigned long pc;
129
130	/* Which auxiliary state is loaded (KVM_LARCH_*) */
131	unsigned int aux_inuse;
132
133	/* FPU state */
134	struct loongarch_fpu fpu FPU_ALIGN;
135
136	/* CSR state */
137	struct loongarch_csrs *csr;
138
139	/* GPR used as IO source/target */
140	u32 io_gpr;
141
142	/* KVM register to control count timer */
143	u32 count_ctl;
144	struct hrtimer swtimer;
145
146	/* Bitmask of intr that are pending */
147	unsigned long irq_pending;
148	/* Bitmask of pending intr to be cleared */
149	unsigned long irq_clear;
150
151	/* Bitmask of exceptions that are pending */
152	unsigned long exception_pending;
153	unsigned int  esubcode;
154
155	/* Cache for pages needed inside spinlock regions */
156	struct kvm_mmu_memory_cache mmu_page_cache;
157
158	/* vcpu's vpid */
159	u64 vpid;
160
161	/* Frequency of stable timer in Hz */
162	u64 timer_mhz;
163	ktime_t expire;
164
165	/* Last CPU the vCPU state was loaded on */
166	int last_sched_cpu;
167	/* mp state */
168	struct kvm_mp_state mp_state;
169	/* cpucfg */
170	u32 cpucfg[KVM_MAX_CPUCFG_REGS];
171};
172
173static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
174{
175	return csr->csrs[reg];
176}
177
178static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
179{
180	csr->csrs[reg] = val;
181}
182
183static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
184{
185	return arch->cpucfg[2] & CPUCFG2_FP;
186}
187
188static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
189{
190	return arch->cpucfg[2] & CPUCFG2_LSX;
191}
192
193static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
194{
195	return arch->cpucfg[2] & CPUCFG2_LASX;
196}
197
198/* Debug: dump vcpu state */
199int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
200
201/* MMU handling */
202void kvm_flush_tlb_all(void);
203void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
204int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
205
206void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
207int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
208int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
209int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
210
211static inline void update_pc(struct kvm_vcpu_arch *arch)
212{
213	arch->pc += 4;
214}
215
216/*
217 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
218 * @vcpu:	Virtual CPU.
219 *
220 * Returns:	Whether the TLBL exception was likely due to an instruction
221 *		fetch fault rather than a data load fault.
222 */
223static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
224{
225	return arch->pc == arch->badv;
226}
227
228/* Misc */
229static inline void kvm_arch_hardware_unsetup(void) {}
230static inline void kvm_arch_sync_events(struct kvm *kvm) {}
231static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
232static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
233static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
234static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
235static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
236static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {}
237void kvm_check_vpid(struct kvm_vcpu *vcpu);
238enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
239void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
240void kvm_init_vmcs(struct kvm *kvm);
241void kvm_exc_entry(void);
242int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
243
244extern unsigned long vpid_mask;
245extern const unsigned long kvm_exception_size;
246extern const unsigned long kvm_enter_guest_size;
247extern struct kvm_world_switch *kvm_loongarch_ops;
248
249#define SW_GCSR		(1 << 0)
250#define HW_GCSR		(1 << 1)
251#define INVALID_GCSR	(1 << 2)
252
253int get_gcsr_flag(int csr);
254void set_hw_gcsr(int csr_id, unsigned long val);
255
256#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
257