1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6#include <linux/err.h>
7#include <linux/module.h>
8#include <linux/kvm_host.h>
9#include <asm/cacheflush.h>
10#include <asm/cpufeature.h>
11#include <asm/kvm_csr.h>
12#include "trace.h"
13
14unsigned long vpid_mask;
15struct kvm_world_switch *kvm_loongarch_ops;
16static int gcsr_flag[CSR_MAX_NUMS];
17static struct kvm_context __percpu *vmcs;
18
19int get_gcsr_flag(int csr)
20{
21	if (csr < CSR_MAX_NUMS)
22		return gcsr_flag[csr];
23
24	return INVALID_GCSR;
25}
26
27static inline void set_gcsr_sw_flag(int csr)
28{
29	if (csr < CSR_MAX_NUMS)
30		gcsr_flag[csr] |= SW_GCSR;
31}
32
33static inline void set_gcsr_hw_flag(int csr)
34{
35	if (csr < CSR_MAX_NUMS)
36		gcsr_flag[csr] |= HW_GCSR;
37}
38
39/*
40 * The default value of gcsr_flag[CSR] is 0, and we use this
41 * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the
42 * gcsr is software or hardware. It will be used by get/set_gcsr,
43 * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it,
44 * else use software csr to emulate it.
45 */
46static void kvm_init_gcsr_flag(void)
47{
48	set_gcsr_hw_flag(LOONGARCH_CSR_CRMD);
49	set_gcsr_hw_flag(LOONGARCH_CSR_PRMD);
50	set_gcsr_hw_flag(LOONGARCH_CSR_EUEN);
51	set_gcsr_hw_flag(LOONGARCH_CSR_MISC);
52	set_gcsr_hw_flag(LOONGARCH_CSR_ECFG);
53	set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT);
54	set_gcsr_hw_flag(LOONGARCH_CSR_ERA);
55	set_gcsr_hw_flag(LOONGARCH_CSR_BADV);
56	set_gcsr_hw_flag(LOONGARCH_CSR_BADI);
57	set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY);
58	set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX);
59	set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI);
60	set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0);
61	set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1);
62	set_gcsr_hw_flag(LOONGARCH_CSR_ASID);
63	set_gcsr_hw_flag(LOONGARCH_CSR_PGDL);
64	set_gcsr_hw_flag(LOONGARCH_CSR_PGDH);
65	set_gcsr_hw_flag(LOONGARCH_CSR_PGD);
66	set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0);
67	set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1);
68	set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE);
69	set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG);
70	set_gcsr_hw_flag(LOONGARCH_CSR_CPUID);
71	set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1);
72	set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2);
73	set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3);
74	set_gcsr_hw_flag(LOONGARCH_CSR_KS0);
75	set_gcsr_hw_flag(LOONGARCH_CSR_KS1);
76	set_gcsr_hw_flag(LOONGARCH_CSR_KS2);
77	set_gcsr_hw_flag(LOONGARCH_CSR_KS3);
78	set_gcsr_hw_flag(LOONGARCH_CSR_KS4);
79	set_gcsr_hw_flag(LOONGARCH_CSR_KS5);
80	set_gcsr_hw_flag(LOONGARCH_CSR_KS6);
81	set_gcsr_hw_flag(LOONGARCH_CSR_KS7);
82	set_gcsr_hw_flag(LOONGARCH_CSR_TMID);
83	set_gcsr_hw_flag(LOONGARCH_CSR_TCFG);
84	set_gcsr_hw_flag(LOONGARCH_CSR_TVAL);
85	set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR);
86	set_gcsr_hw_flag(LOONGARCH_CSR_CNTC);
87	set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL);
88	set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY);
89	set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV);
90	set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA);
91	set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE);
92	set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0);
93	set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1);
94	set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI);
95	set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD);
96	set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0);
97	set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1);
98	set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2);
99	set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3);
100
101	set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1);
102	set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2);
103	set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL);
104	set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1);
105	set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2);
106	set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY);
107	set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA);
108	set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE);
109	set_gcsr_sw_flag(LOONGARCH_CSR_CTAG);
110	set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG);
111	set_gcsr_sw_flag(LOONGARCH_CSR_DERA);
112	set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE);
113
114	set_gcsr_sw_flag(LOONGARCH_CSR_FWPC);
115	set_gcsr_sw_flag(LOONGARCH_CSR_FWPS);
116	set_gcsr_sw_flag(LOONGARCH_CSR_MWPC);
117	set_gcsr_sw_flag(LOONGARCH_CSR_MWPS);
118
119	set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR);
120	set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK);
121	set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL);
122	set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID);
123	set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR);
124	set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK);
125	set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL);
126	set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID);
127	set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR);
128	set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK);
129	set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL);
130	set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID);
131	set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR);
132	set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK);
133	set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL);
134	set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID);
135	set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR);
136	set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK);
137	set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL);
138	set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID);
139	set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR);
140	set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK);
141	set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL);
142	set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID);
143	set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR);
144	set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK);
145	set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL);
146	set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID);
147	set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR);
148	set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK);
149	set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL);
150	set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID);
151
152	set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR);
153	set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK);
154	set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL);
155	set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID);
156	set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR);
157	set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK);
158	set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL);
159	set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID);
160	set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR);
161	set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK);
162	set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL);
163	set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID);
164	set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR);
165	set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK);
166	set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL);
167	set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID);
168	set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR);
169	set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK);
170	set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL);
171	set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID);
172	set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR);
173	set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK);
174	set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL);
175	set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID);
176	set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR);
177	set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK);
178	set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL);
179	set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID);
180	set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR);
181	set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK);
182	set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL);
183	set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID);
184
185	set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0);
186	set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0);
187	set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1);
188	set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1);
189	set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2);
190	set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2);
191	set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3);
192	set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3);
193}
194
195static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu)
196{
197	unsigned long vpid;
198	struct kvm_context *context;
199
200	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
201	vpid = context->vpid_cache + 1;
202	if (!(vpid & vpid_mask)) {
203		/* finish round of vpid loop */
204		if (unlikely(!vpid))
205			vpid = vpid_mask + 1;
206
207		++vpid; /* vpid 0 reserved for root */
208
209		/* start new vpid cycle */
210		kvm_flush_tlb_all();
211	}
212
213	context->vpid_cache = vpid;
214	vcpu->arch.vpid = vpid;
215}
216
217void kvm_check_vpid(struct kvm_vcpu *vcpu)
218{
219	int cpu;
220	bool migrated;
221	unsigned long ver, old, vpid;
222	struct kvm_context *context;
223
224	cpu = smp_processor_id();
225	/*
226	 * Are we entering guest context on a different CPU to last time?
227	 * If so, the vCPU's guest TLB state on this CPU may be stale.
228	 */
229	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
230	migrated = (vcpu->cpu != cpu);
231
232	/*
233	 * Check if our vpid is of an older version
234	 *
235	 * We also discard the stored vpid if we've executed on
236	 * another CPU, as the guest mappings may have changed without
237	 * hypervisor knowledge.
238	 */
239	ver = vcpu->arch.vpid & ~vpid_mask;
240	old = context->vpid_cache  & ~vpid_mask;
241	if (migrated || (ver != old)) {
242		kvm_update_vpid(vcpu, cpu);
243		trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
244		vcpu->cpu = cpu;
245	}
246
247	/* Restore GSTAT(0x50).vpid */
248	vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT;
249	change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid);
250}
251
252void kvm_init_vmcs(struct kvm *kvm)
253{
254	kvm->arch.vmcs = vmcs;
255}
256
257long kvm_arch_dev_ioctl(struct file *filp,
258			unsigned int ioctl, unsigned long arg)
259{
260	return -ENOIOCTLCMD;
261}
262
263int kvm_arch_hardware_enable(void)
264{
265	unsigned long env, gcfg = 0;
266
267	env = read_csr_gcfg();
268
269	/* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */
270	write_csr_gcfg(0);
271	write_csr_gstat(0);
272	write_csr_gintc(0);
273	clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI);
274
275	/*
276	 * Enable virtualization features granting guest direct control of
277	 * certain features:
278	 * GCI=2:       Trap on init or unimplement cache instruction.
279	 * TORU=0:      Trap on Root Unimplement.
280	 * CACTRL=1:    Root control cache.
281	 * TOP=0:       Trap on Previlege.
282	 * TOE=0:       Trap on Exception.
283	 * TIT=0:       Trap on Timer.
284	 */
285	if (env & CSR_GCFG_GCIP_ALL)
286		gcfg |= CSR_GCFG_GCI_SECURE;
287	if (env & CSR_GCFG_MATC_ROOT)
288		gcfg |= CSR_GCFG_MATC_ROOT;
289
290	write_csr_gcfg(gcfg);
291
292	kvm_flush_tlb_all();
293
294	/* Enable using TGID  */
295	set_csr_gtlbc(CSR_GTLBC_USETGID);
296	kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
297		  read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
298
299	return 0;
300}
301
302void kvm_arch_hardware_disable(void)
303{
304	write_csr_gcfg(0);
305	write_csr_gstat(0);
306	write_csr_gintc(0);
307	clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI);
308
309	/* Flush any remaining guest TLB entries */
310	kvm_flush_tlb_all();
311}
312
313static int kvm_loongarch_env_init(void)
314{
315	int cpu, order;
316	void *addr;
317	struct kvm_context *context;
318
319	vmcs = alloc_percpu(struct kvm_context);
320	if (!vmcs) {
321		pr_err("kvm: failed to allocate percpu kvm_context\n");
322		return -ENOMEM;
323	}
324
325	kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL);
326	if (!kvm_loongarch_ops) {
327		free_percpu(vmcs);
328		vmcs = NULL;
329		return -ENOMEM;
330	}
331
332	/*
333	 * PGD register is shared between root kernel and kvm hypervisor.
334	 * So world switch entry should be in DMW area rather than TLB area
335	 * to avoid page fault reenter.
336	 *
337	 * In future if hardware pagetable walking is supported, we won't
338	 * need to copy world switch code to DMW area.
339	 */
340	order = get_order(kvm_exception_size + kvm_enter_guest_size);
341	addr = (void *)__get_free_pages(GFP_KERNEL, order);
342	if (!addr) {
343		free_percpu(vmcs);
344		vmcs = NULL;
345		kfree(kvm_loongarch_ops);
346		kvm_loongarch_ops = NULL;
347		return -ENOMEM;
348	}
349
350	memcpy(addr, kvm_exc_entry, kvm_exception_size);
351	memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size);
352	flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size);
353	kvm_loongarch_ops->exc_entry = addr;
354	kvm_loongarch_ops->enter_guest = addr + kvm_exception_size;
355	kvm_loongarch_ops->page_order = order;
356
357	vpid_mask = read_csr_gstat();
358	vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
359	if (vpid_mask)
360		vpid_mask = GENMASK(vpid_mask - 1, 0);
361
362	for_each_possible_cpu(cpu) {
363		context = per_cpu_ptr(vmcs, cpu);
364		context->vpid_cache = vpid_mask + 1;
365		context->last_vcpu = NULL;
366	}
367
368	kvm_init_gcsr_flag();
369
370	return 0;
371}
372
373static void kvm_loongarch_env_exit(void)
374{
375	unsigned long addr;
376
377	if (vmcs)
378		free_percpu(vmcs);
379
380	if (kvm_loongarch_ops) {
381		if (kvm_loongarch_ops->exc_entry) {
382			addr = (unsigned long)kvm_loongarch_ops->exc_entry;
383			free_pages(addr, kvm_loongarch_ops->page_order);
384		}
385		kfree(kvm_loongarch_ops);
386	}
387}
388
389static int kvm_loongarch_init(void)
390{
391	int r;
392
393	if (!cpu_has_lvz) {
394		kvm_info("Hardware virtualization not available\n");
395		return -ENODEV;
396	}
397	r = kvm_loongarch_env_init();
398	if (r)
399		return r;
400
401	return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
402}
403
404static void kvm_loongarch_exit(void)
405{
406	kvm_exit();
407	kvm_loongarch_env_exit();
408}
409
410module_init(kvm_loongarch_init);
411module_exit(kvm_loongarch_exit);
412
413#ifdef MODULE
414static const struct cpu_feature kvm_feature[] = {
415	{ .feature = cpu_feature(LOONGARCH_LVZ) },
416	{},
417};
418MODULE_DEVICE_TABLE(cpu, kvm_feature);
419#endif
420