1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_LOONGARCH_KVM_PARA_H
3#define _ASM_LOONGARCH_KVM_PARA_H
4
5/*
6 * Hypercall code field
7 */
8#define HYPERVISOR_KVM			1
9#define HYPERVISOR_VENDOR_SHIFT		8
10#define HYPERCALL_ENCODE(vendor, code)	((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
11
12#define KVM_HCALL_CODE_SERVICE		0
13#define KVM_HCALL_CODE_SWDBG		1
14
15#define KVM_HCALL_SERVICE		HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
16#define  KVM_HCALL_FUNC_IPI		1
17
18#define KVM_HCALL_SWDBG			HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
19
20/*
21 * LoongArch hypercall return code
22 */
23#define KVM_HCALL_SUCCESS		0
24#define KVM_HCALL_INVALID_CODE		-1UL
25#define KVM_HCALL_INVALID_PARAMETER	-2UL
26
27/*
28 * Hypercall interface for KVM hypervisor
29 *
30 * a0: function identifier
31 * a1-a6: args
32 * Return value will be placed in a0.
33 * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
34 */
35static __always_inline long kvm_hypercall0(u64 fid)
36{
37	register long ret asm("a0");
38	register unsigned long fun asm("a0") = fid;
39
40	__asm__ __volatile__(
41		"hvcl "__stringify(KVM_HCALL_SERVICE)
42		: "=r" (ret)
43		: "r" (fun)
44		: "memory"
45		);
46
47	return ret;
48}
49
50static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
51{
52	register long ret asm("a0");
53	register unsigned long fun asm("a0") = fid;
54	register unsigned long a1  asm("a1") = arg0;
55
56	__asm__ __volatile__(
57		"hvcl "__stringify(KVM_HCALL_SERVICE)
58		: "=r" (ret)
59		: "r" (fun), "r" (a1)
60		: "memory"
61		);
62
63	return ret;
64}
65
66static __always_inline long kvm_hypercall2(u64 fid,
67		unsigned long arg0, unsigned long arg1)
68{
69	register long ret asm("a0");
70	register unsigned long fun asm("a0") = fid;
71	register unsigned long a1  asm("a1") = arg0;
72	register unsigned long a2  asm("a2") = arg1;
73
74	__asm__ __volatile__(
75		"hvcl "__stringify(KVM_HCALL_SERVICE)
76		: "=r" (ret)
77		: "r" (fun), "r" (a1), "r" (a2)
78		: "memory"
79		);
80
81	return ret;
82}
83
84static __always_inline long kvm_hypercall3(u64 fid,
85	unsigned long arg0, unsigned long arg1, unsigned long arg2)
86{
87	register long ret asm("a0");
88	register unsigned long fun asm("a0") = fid;
89	register unsigned long a1  asm("a1") = arg0;
90	register unsigned long a2  asm("a2") = arg1;
91	register unsigned long a3  asm("a3") = arg2;
92
93	__asm__ __volatile__(
94		"hvcl "__stringify(KVM_HCALL_SERVICE)
95		: "=r" (ret)
96		: "r" (fun), "r" (a1), "r" (a2), "r" (a3)
97		: "memory"
98		);
99
100	return ret;
101}
102
103static __always_inline long kvm_hypercall4(u64 fid,
104		unsigned long arg0, unsigned long arg1,
105		unsigned long arg2, unsigned long arg3)
106{
107	register long ret asm("a0");
108	register unsigned long fun asm("a0") = fid;
109	register unsigned long a1  asm("a1") = arg0;
110	register unsigned long a2  asm("a2") = arg1;
111	register unsigned long a3  asm("a3") = arg2;
112	register unsigned long a4  asm("a4") = arg3;
113
114	__asm__ __volatile__(
115		"hvcl "__stringify(KVM_HCALL_SERVICE)
116		: "=r" (ret)
117		: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
118		: "memory"
119		);
120
121	return ret;
122}
123
124static __always_inline long kvm_hypercall5(u64 fid,
125		unsigned long arg0, unsigned long arg1,
126		unsigned long arg2, unsigned long arg3, unsigned long arg4)
127{
128	register long ret asm("a0");
129	register unsigned long fun asm("a0") = fid;
130	register unsigned long a1  asm("a1") = arg0;
131	register unsigned long a2  asm("a2") = arg1;
132	register unsigned long a3  asm("a3") = arg2;
133	register unsigned long a4  asm("a4") = arg3;
134	register unsigned long a5  asm("a5") = arg4;
135
136	__asm__ __volatile__(
137		"hvcl "__stringify(KVM_HCALL_SERVICE)
138		: "=r" (ret)
139		: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
140		: "memory"
141		);
142
143	return ret;
144}
145
146static inline unsigned int kvm_arch_para_features(void)
147{
148	return 0;
149}
150
151static inline unsigned int kvm_arch_para_hints(void)
152{
153	return 0;
154}
155
156static inline bool kvm_check_and_clear_guest_paused(void)
157{
158	return false;
159}
160
161#endif /* _ASM_LOONGARCH_KVM_PARA_H */
162