1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6#include <linux/err.h>
7#include <linux/errno.h>
8#include <linux/kvm_host.h>
9#include <linux/module.h>
10#include <linux/preempt.h>
11#include <linux/vmalloc.h>
12#include <asm/fpu.h>
13#include <asm/inst.h>
14#include <asm/loongarch.h>
15#include <asm/mmzone.h>
16#include <asm/numa.h>
17#include <asm/time.h>
18#include <asm/tlb.h>
19#include <asm/kvm_csr.h>
20#include <asm/kvm_vcpu.h>
21#include "trace.h"
22
23static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
24{
25	unsigned long val = 0;
26	struct loongarch_csrs *csr = vcpu->arch.csr;
27
28	/*
29	 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
30	 * For undefined CSR id, return value is 0
31	 */
32	if (get_gcsr_flag(csrid) & SW_GCSR)
33		val = kvm_read_sw_gcsr(csr, csrid);
34	else
35		pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
36
37	return val;
38}
39
40static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
41{
42	unsigned long old = 0;
43	struct loongarch_csrs *csr = vcpu->arch.csr;
44
45	if (get_gcsr_flag(csrid) & SW_GCSR) {
46		old = kvm_read_sw_gcsr(csr, csrid);
47		kvm_write_sw_gcsr(csr, csrid, val);
48	} else
49		pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
50
51	return old;
52}
53
54static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
55				unsigned long csr_mask, unsigned long val)
56{
57	unsigned long old = 0;
58	struct loongarch_csrs *csr = vcpu->arch.csr;
59
60	if (get_gcsr_flag(csrid) & SW_GCSR) {
61		old = kvm_read_sw_gcsr(csr, csrid);
62		val = (old & ~csr_mask) | (val & csr_mask);
63		kvm_write_sw_gcsr(csr, csrid, val);
64		old = old & csr_mask;
65	} else
66		pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
67
68	return old;
69}
70
71static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
72{
73	unsigned int rd, rj, csrid;
74	unsigned long csr_mask, val = 0;
75
76	/*
77	 * CSR value mask imm
78	 * rj = 0 means csrrd
79	 * rj = 1 means csrwr
80	 * rj != 0,1 means csrxchg
81	 */
82	rd = inst.reg2csr_format.rd;
83	rj = inst.reg2csr_format.rj;
84	csrid = inst.reg2csr_format.csr;
85
86	/* Process CSR ops */
87	switch (rj) {
88	case 0: /* process csrrd */
89		val = kvm_emu_read_csr(vcpu, csrid);
90		vcpu->arch.gprs[rd] = val;
91		break;
92	case 1: /* process csrwr */
93		val = vcpu->arch.gprs[rd];
94		val = kvm_emu_write_csr(vcpu, csrid, val);
95		vcpu->arch.gprs[rd] = val;
96		break;
97	default: /* process csrxchg */
98		val = vcpu->arch.gprs[rd];
99		csr_mask = vcpu->arch.gprs[rj];
100		val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
101		vcpu->arch.gprs[rd] = val;
102	}
103
104	return EMULATE_DONE;
105}
106
107int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
108{
109	int ret;
110	unsigned long val;
111	u32 addr, rd, rj, opcode;
112
113	/*
114	 * Each IOCSR with different opcode
115	 */
116	rd = inst.reg2_format.rd;
117	rj = inst.reg2_format.rj;
118	opcode = inst.reg2_format.opcode;
119	addr = vcpu->arch.gprs[rj];
120	ret = EMULATE_DO_IOCSR;
121	run->iocsr_io.phys_addr = addr;
122	run->iocsr_io.is_write = 0;
123
124	/* LoongArch is Little endian */
125	switch (opcode) {
126	case iocsrrdb_op:
127		run->iocsr_io.len = 1;
128		break;
129	case iocsrrdh_op:
130		run->iocsr_io.len = 2;
131		break;
132	case iocsrrdw_op:
133		run->iocsr_io.len = 4;
134		break;
135	case iocsrrdd_op:
136		run->iocsr_io.len = 8;
137		break;
138	case iocsrwrb_op:
139		run->iocsr_io.len = 1;
140		run->iocsr_io.is_write = 1;
141		break;
142	case iocsrwrh_op:
143		run->iocsr_io.len = 2;
144		run->iocsr_io.is_write = 1;
145		break;
146	case iocsrwrw_op:
147		run->iocsr_io.len = 4;
148		run->iocsr_io.is_write = 1;
149		break;
150	case iocsrwrd_op:
151		run->iocsr_io.len = 8;
152		run->iocsr_io.is_write = 1;
153		break;
154	default:
155		ret = EMULATE_FAIL;
156		break;
157	}
158
159	if (ret == EMULATE_DO_IOCSR) {
160		if (run->iocsr_io.is_write) {
161			val = vcpu->arch.gprs[rd];
162			memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
163		}
164		vcpu->arch.io_gpr = rd;
165	}
166
167	return ret;
168}
169
170int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
171{
172	enum emulation_result er = EMULATE_DONE;
173	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
174
175	switch (run->iocsr_io.len) {
176	case 1:
177		*gpr = *(s8 *)run->iocsr_io.data;
178		break;
179	case 2:
180		*gpr = *(s16 *)run->iocsr_io.data;
181		break;
182	case 4:
183		*gpr = *(s32 *)run->iocsr_io.data;
184		break;
185	case 8:
186		*gpr = *(s64 *)run->iocsr_io.data;
187		break;
188	default:
189		kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
190				run->iocsr_io.len, vcpu->arch.badv);
191		er = EMULATE_FAIL;
192		break;
193	}
194
195	return er;
196}
197
198int kvm_emu_idle(struct kvm_vcpu *vcpu)
199{
200	++vcpu->stat.idle_exits;
201	trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
202
203	if (!kvm_arch_vcpu_runnable(vcpu))
204		kvm_vcpu_halt(vcpu);
205
206	return EMULATE_DONE;
207}
208
209static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
210{
211	int rd, rj;
212	unsigned int index;
213	unsigned long curr_pc;
214	larch_inst inst;
215	enum emulation_result er = EMULATE_DONE;
216	struct kvm_run *run = vcpu->run;
217
218	/* Fetch the instruction */
219	inst.word = vcpu->arch.badi;
220	curr_pc = vcpu->arch.pc;
221	update_pc(&vcpu->arch);
222
223	trace_kvm_exit_gspr(vcpu, inst.word);
224	er = EMULATE_FAIL;
225	switch (((inst.word >> 24) & 0xff)) {
226	case 0x0: /* CPUCFG GSPR */
227		if (inst.reg2_format.opcode == 0x1B) {
228			rd = inst.reg2_format.rd;
229			rj = inst.reg2_format.rj;
230			++vcpu->stat.cpucfg_exits;
231			index = vcpu->arch.gprs[rj];
232			er = EMULATE_DONE;
233			/*
234			 * By LoongArch Reference Manual 2.2.10.5
235			 * return value is 0 for undefined cpucfg index
236			 */
237			if (index < KVM_MAX_CPUCFG_REGS)
238				vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
239			else
240				vcpu->arch.gprs[rd] = 0;
241		}
242		break;
243	case 0x4: /* CSR{RD,WR,XCHG} GSPR */
244		er = kvm_handle_csr(vcpu, inst);
245		break;
246	case 0x6: /* Cache, Idle and IOCSR GSPR */
247		switch (((inst.word >> 22) & 0x3ff)) {
248		case 0x18: /* Cache GSPR */
249			er = EMULATE_DONE;
250			trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
251			break;
252		case 0x19: /* Idle/IOCSR GSPR */
253			switch (((inst.word >> 15) & 0x1ffff)) {
254			case 0xc90: /* IOCSR GSPR */
255				er = kvm_emu_iocsr(inst, run, vcpu);
256				break;
257			case 0xc91: /* Idle GSPR */
258				er = kvm_emu_idle(vcpu);
259				break;
260			default:
261				er = EMULATE_FAIL;
262				break;
263			}
264			break;
265		default:
266			er = EMULATE_FAIL;
267			break;
268		}
269		break;
270	default:
271		er = EMULATE_FAIL;
272		break;
273	}
274
275	/* Rollback PC only if emulation was unsuccessful */
276	if (er == EMULATE_FAIL) {
277		kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
278			curr_pc, __func__, inst.word);
279
280		kvm_arch_vcpu_dump_regs(vcpu);
281		vcpu->arch.pc = curr_pc;
282	}
283
284	return er;
285}
286
287/*
288 * Trigger GSPR:
289 * 1) Execute CPUCFG instruction;
290 * 2) Execute CACOP/IDLE instructions;
291 * 3) Access to unimplemented CSRs/IOCSRs.
292 */
293static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
294{
295	int ret = RESUME_GUEST;
296	enum emulation_result er = EMULATE_DONE;
297
298	er = kvm_trap_handle_gspr(vcpu);
299
300	if (er == EMULATE_DONE) {
301		ret = RESUME_GUEST;
302	} else if (er == EMULATE_DO_MMIO) {
303		vcpu->run->exit_reason = KVM_EXIT_MMIO;
304		ret = RESUME_HOST;
305	} else if (er == EMULATE_DO_IOCSR) {
306		vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
307		ret = RESUME_HOST;
308	} else {
309		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
310		ret = RESUME_GUEST;
311	}
312
313	return ret;
314}
315
316int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
317{
318	int ret;
319	unsigned int op8, opcode, rd;
320	struct kvm_run *run = vcpu->run;
321
322	run->mmio.phys_addr = vcpu->arch.badv;
323	vcpu->mmio_needed = 2;	/* signed */
324	op8 = (inst.word >> 24) & 0xff;
325	ret = EMULATE_DO_MMIO;
326
327	switch (op8) {
328	case 0x24 ... 0x27:	/* ldptr.w/d process */
329		rd = inst.reg2i14_format.rd;
330		opcode = inst.reg2i14_format.opcode;
331
332		switch (opcode) {
333		case ldptrw_op:
334			run->mmio.len = 4;
335			break;
336		case ldptrd_op:
337			run->mmio.len = 8;
338			break;
339		default:
340			break;
341		}
342		break;
343	case 0x28 ... 0x2e:	/* ld.b/h/w/d, ld.bu/hu/wu process */
344		rd = inst.reg2i12_format.rd;
345		opcode = inst.reg2i12_format.opcode;
346
347		switch (opcode) {
348		case ldb_op:
349			run->mmio.len = 1;
350			break;
351		case ldbu_op:
352			vcpu->mmio_needed = 1;	/* unsigned */
353			run->mmio.len = 1;
354			break;
355		case ldh_op:
356			run->mmio.len = 2;
357			break;
358		case ldhu_op:
359			vcpu->mmio_needed = 1;	/* unsigned */
360			run->mmio.len = 2;
361			break;
362		case ldw_op:
363			run->mmio.len = 4;
364			break;
365		case ldwu_op:
366			vcpu->mmio_needed = 1;	/* unsigned */
367			run->mmio.len = 4;
368			break;
369		case ldd_op:
370			run->mmio.len = 8;
371			break;
372		default:
373			ret = EMULATE_FAIL;
374			break;
375		}
376		break;
377	case 0x38:	/* ldx.b/h/w/d, ldx.bu/hu/wu process */
378		rd = inst.reg3_format.rd;
379		opcode = inst.reg3_format.opcode;
380
381		switch (opcode) {
382		case ldxb_op:
383			run->mmio.len = 1;
384			break;
385		case ldxbu_op:
386			run->mmio.len = 1;
387			vcpu->mmio_needed = 1;	/* unsigned */
388			break;
389		case ldxh_op:
390			run->mmio.len = 2;
391			break;
392		case ldxhu_op:
393			run->mmio.len = 2;
394			vcpu->mmio_needed = 1;	/* unsigned */
395			break;
396		case ldxw_op:
397			run->mmio.len = 4;
398			break;
399		case ldxwu_op:
400			run->mmio.len = 4;
401			vcpu->mmio_needed = 1;	/* unsigned */
402			break;
403		case ldxd_op:
404			run->mmio.len = 8;
405			break;
406		default:
407			ret = EMULATE_FAIL;
408			break;
409		}
410		break;
411	default:
412		ret = EMULATE_FAIL;
413	}
414
415	if (ret == EMULATE_DO_MMIO) {
416		/* Set for kvm_complete_mmio_read() use */
417		vcpu->arch.io_gpr = rd;
418		run->mmio.is_write = 0;
419		vcpu->mmio_is_write = 0;
420	} else {
421		kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
422			inst.word, vcpu->arch.pc, vcpu->arch.badv);
423		kvm_arch_vcpu_dump_regs(vcpu);
424		vcpu->mmio_needed = 0;
425	}
426
427	return ret;
428}
429
430int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
431{
432	enum emulation_result er = EMULATE_DONE;
433	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
434
435	/* Update with new PC */
436	update_pc(&vcpu->arch);
437	switch (run->mmio.len) {
438	case 1:
439		if (vcpu->mmio_needed == 2)
440			*gpr = *(s8 *)run->mmio.data;
441		else
442			*gpr = *(u8 *)run->mmio.data;
443		break;
444	case 2:
445		if (vcpu->mmio_needed == 2)
446			*gpr = *(s16 *)run->mmio.data;
447		else
448			*gpr = *(u16 *)run->mmio.data;
449		break;
450	case 4:
451		if (vcpu->mmio_needed == 2)
452			*gpr = *(s32 *)run->mmio.data;
453		else
454			*gpr = *(u32 *)run->mmio.data;
455		break;
456	case 8:
457		*gpr = *(s64 *)run->mmio.data;
458		break;
459	default:
460		kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
461				run->mmio.len, vcpu->arch.badv);
462		er = EMULATE_FAIL;
463		break;
464	}
465
466	return er;
467}
468
469int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
470{
471	int ret;
472	unsigned int rd, op8, opcode;
473	unsigned long curr_pc, rd_val = 0;
474	struct kvm_run *run = vcpu->run;
475	void *data = run->mmio.data;
476
477	/*
478	 * Update PC and hold onto current PC in case there is
479	 * an error and we want to rollback the PC
480	 */
481	curr_pc = vcpu->arch.pc;
482	update_pc(&vcpu->arch);
483
484	op8 = (inst.word >> 24) & 0xff;
485	run->mmio.phys_addr = vcpu->arch.badv;
486	ret = EMULATE_DO_MMIO;
487	switch (op8) {
488	case 0x24 ... 0x27:	/* stptr.w/d process */
489		rd = inst.reg2i14_format.rd;
490		opcode = inst.reg2i14_format.opcode;
491
492		switch (opcode) {
493		case stptrw_op:
494			run->mmio.len = 4;
495			*(unsigned int *)data = vcpu->arch.gprs[rd];
496			break;
497		case stptrd_op:
498			run->mmio.len = 8;
499			*(unsigned long *)data = vcpu->arch.gprs[rd];
500			break;
501		default:
502			ret = EMULATE_FAIL;
503			break;
504		}
505		break;
506	case 0x28 ... 0x2e:	/* st.b/h/w/d  process */
507		rd = inst.reg2i12_format.rd;
508		opcode = inst.reg2i12_format.opcode;
509		rd_val = vcpu->arch.gprs[rd];
510
511		switch (opcode) {
512		case stb_op:
513			run->mmio.len = 1;
514			*(unsigned char *)data = rd_val;
515			break;
516		case sth_op:
517			run->mmio.len = 2;
518			*(unsigned short *)data = rd_val;
519			break;
520		case stw_op:
521			run->mmio.len = 4;
522			*(unsigned int *)data = rd_val;
523			break;
524		case std_op:
525			run->mmio.len = 8;
526			*(unsigned long *)data = rd_val;
527			break;
528		default:
529			ret = EMULATE_FAIL;
530			break;
531		}
532		break;
533	case 0x38:	/* stx.b/h/w/d process */
534		rd = inst.reg3_format.rd;
535		opcode = inst.reg3_format.opcode;
536
537		switch (opcode) {
538		case stxb_op:
539			run->mmio.len = 1;
540			*(unsigned char *)data = vcpu->arch.gprs[rd];
541			break;
542		case stxh_op:
543			run->mmio.len = 2;
544			*(unsigned short *)data = vcpu->arch.gprs[rd];
545			break;
546		case stxw_op:
547			run->mmio.len = 4;
548			*(unsigned int *)data = vcpu->arch.gprs[rd];
549			break;
550		case stxd_op:
551			run->mmio.len = 8;
552			*(unsigned long *)data = vcpu->arch.gprs[rd];
553			break;
554		default:
555			ret = EMULATE_FAIL;
556			break;
557		}
558		break;
559	default:
560		ret = EMULATE_FAIL;
561	}
562
563	if (ret == EMULATE_DO_MMIO) {
564		run->mmio.is_write = 1;
565		vcpu->mmio_needed = 1;
566		vcpu->mmio_is_write = 1;
567	} else {
568		vcpu->arch.pc = curr_pc;
569		kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
570			inst.word, vcpu->arch.pc, vcpu->arch.badv);
571		kvm_arch_vcpu_dump_regs(vcpu);
572		/* Rollback PC if emulation was unsuccessful */
573	}
574
575	return ret;
576}
577
578static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
579{
580	int ret;
581	larch_inst inst;
582	enum emulation_result er = EMULATE_DONE;
583	struct kvm_run *run = vcpu->run;
584	unsigned long badv = vcpu->arch.badv;
585
586	ret = kvm_handle_mm_fault(vcpu, badv, write);
587	if (ret) {
588		/* Treat as MMIO */
589		inst.word = vcpu->arch.badi;
590		if (write) {
591			er = kvm_emu_mmio_write(vcpu, inst);
592		} else {
593			/* A code fetch fault doesn't count as an MMIO */
594			if (kvm_is_ifetch_fault(&vcpu->arch)) {
595				kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
596				return RESUME_GUEST;
597			}
598
599			er = kvm_emu_mmio_read(vcpu, inst);
600		}
601	}
602
603	if (er == EMULATE_DONE) {
604		ret = RESUME_GUEST;
605	} else if (er == EMULATE_DO_MMIO) {
606		run->exit_reason = KVM_EXIT_MMIO;
607		ret = RESUME_HOST;
608	} else {
609		kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
610		ret = RESUME_GUEST;
611	}
612
613	return ret;
614}
615
616static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
617{
618	return kvm_handle_rdwr_fault(vcpu, false);
619}
620
621static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
622{
623	return kvm_handle_rdwr_fault(vcpu, true);
624}
625
626/**
627 * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
628 * @vcpu:	Virtual CPU context.
629 *
630 * Handle when the guest attempts to use fpu which hasn't been allowed
631 * by the root context.
632 */
633static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
634{
635	struct kvm_run *run = vcpu->run;
636
637	if (!kvm_guest_has_fpu(&vcpu->arch)) {
638		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
639		return RESUME_GUEST;
640	}
641
642	/*
643	 * If guest FPU not present, the FPU operation should have been
644	 * treated as a reserved instruction!
645	 * If FPU already in use, we shouldn't get this at all.
646	 */
647	if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
648		kvm_err("%s internal error\n", __func__);
649		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
650		return RESUME_HOST;
651	}
652
653	kvm_own_fpu(vcpu);
654
655	return RESUME_GUEST;
656}
657
658/*
659 * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
660 * @vcpu:      Virtual CPU context.
661 *
662 * Handle when the guest attempts to use LSX when it is disabled in the root
663 * context.
664 */
665static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
666{
667	if (kvm_own_lsx(vcpu))
668		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
669
670	return RESUME_GUEST;
671}
672
673/*
674 * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
675 * @vcpu:	Virtual CPU context.
676 *
677 * Handle when the guest attempts to use LASX when it is disabled in the root
678 * context.
679 */
680static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
681{
682	if (kvm_own_lasx(vcpu))
683		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
684
685	return RESUME_GUEST;
686}
687
688/*
689 * LoongArch KVM callback handling for unimplemented guest exiting
690 */
691static int kvm_fault_ni(struct kvm_vcpu *vcpu)
692{
693	unsigned int ecode, inst;
694	unsigned long estat, badv;
695
696	/* Fetch the instruction */
697	inst = vcpu->arch.badi;
698	badv = vcpu->arch.badv;
699	estat = vcpu->arch.host_estat;
700	ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
701	kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
702			ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
703	kvm_arch_vcpu_dump_regs(vcpu);
704	kvm_queue_exception(vcpu, EXCCODE_INE, 0);
705
706	return RESUME_GUEST;
707}
708
709static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
710	[0 ... EXCCODE_INT_START - 1]	= kvm_fault_ni,
711	[EXCCODE_TLBI]			= kvm_handle_read_fault,
712	[EXCCODE_TLBL]			= kvm_handle_read_fault,
713	[EXCCODE_TLBS]			= kvm_handle_write_fault,
714	[EXCCODE_TLBM]			= kvm_handle_write_fault,
715	[EXCCODE_FPDIS]			= kvm_handle_fpu_disabled,
716	[EXCCODE_LSXDIS]		= kvm_handle_lsx_disabled,
717	[EXCCODE_LASXDIS]		= kvm_handle_lasx_disabled,
718	[EXCCODE_GSPR]			= kvm_handle_gspr,
719};
720
721int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
722{
723	return kvm_fault_tables[fault](vcpu);
724}
725