1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VGIC system registers handling functions for AArch64 mode
4 */
5
6#include <linux/irqchip/arm-gic-v3.h>
7#include <linux/kvm.h>
8#include <linux/kvm_host.h>
9#include <asm/kvm_emulate.h>
10#include "vgic/vgic.h"
11#include "sys_regs.h"
12
13static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
14			u64 val)
15{
16	u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
17	struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
18	struct vgic_vmcr vmcr;
19
20	vgic_get_vmcr(vcpu, &vmcr);
21
22	/*
23	 * Disallow restoring VM state if not supported by this
24	 * hardware.
25	 */
26	host_pri_bits = FIELD_GET(ICC_CTLR_EL1_PRI_BITS_MASK, val) + 1;
27	if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
28		return -EINVAL;
29
30	vgic_v3_cpu->num_pri_bits = host_pri_bits;
31
32	host_id_bits = FIELD_GET(ICC_CTLR_EL1_ID_BITS_MASK, val);
33	if (host_id_bits > vgic_v3_cpu->num_id_bits)
34		return -EINVAL;
35
36	vgic_v3_cpu->num_id_bits = host_id_bits;
37
38	host_seis = FIELD_GET(ICH_VTR_SEIS_MASK, kvm_vgic_global_state.ich_vtr_el2);
39	seis = FIELD_GET(ICC_CTLR_EL1_SEIS_MASK, val);
40	if (host_seis != seis)
41		return -EINVAL;
42
43	host_a3v = FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2);
44	a3v = FIELD_GET(ICC_CTLR_EL1_A3V_MASK, val);
45	if (host_a3v != a3v)
46		return -EINVAL;
47
48	/*
49	 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
50	 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
51	 */
52	vmcr.cbpr = FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val);
53	vmcr.eoim = FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val);
54	vgic_set_vmcr(vcpu, &vmcr);
55
56	return 0;
57}
58
59static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
60			u64 *valp)
61{
62	struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
63	struct vgic_vmcr vmcr;
64	u64 val;
65
66	vgic_get_vmcr(vcpu, &vmcr);
67	val = 0;
68	val |= FIELD_PREP(ICC_CTLR_EL1_PRI_BITS_MASK, vgic_v3_cpu->num_pri_bits - 1);
69	val |= FIELD_PREP(ICC_CTLR_EL1_ID_BITS_MASK, vgic_v3_cpu->num_id_bits);
70	val |= FIELD_PREP(ICC_CTLR_EL1_SEIS_MASK,
71			  FIELD_GET(ICH_VTR_SEIS_MASK,
72				    kvm_vgic_global_state.ich_vtr_el2));
73	val |= FIELD_PREP(ICC_CTLR_EL1_A3V_MASK,
74			  FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2));
75	/*
76	 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
77	 * Extract it directly using ICC_CTLR_EL1 reg definitions.
78	 */
79	val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK, vmcr.cbpr);
80	val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK, vmcr.eoim);
81
82	*valp = val;
83
84	return 0;
85}
86
87static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
88		       u64 val)
89{
90	struct vgic_vmcr vmcr;
91
92	vgic_get_vmcr(vcpu, &vmcr);
93	vmcr.pmr = FIELD_GET(ICC_PMR_EL1_MASK, val);
94	vgic_set_vmcr(vcpu, &vmcr);
95
96	return 0;
97}
98
99static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
100		       u64 *val)
101{
102	struct vgic_vmcr vmcr;
103
104	vgic_get_vmcr(vcpu, &vmcr);
105	*val = FIELD_PREP(ICC_PMR_EL1_MASK, vmcr.pmr);
106
107	return 0;
108}
109
110static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
111			u64 val)
112{
113	struct vgic_vmcr vmcr;
114
115	vgic_get_vmcr(vcpu, &vmcr);
116	vmcr.bpr = FIELD_GET(ICC_BPR0_EL1_MASK, val);
117	vgic_set_vmcr(vcpu, &vmcr);
118
119	return 0;
120}
121
122static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
123			u64 *val)
124{
125	struct vgic_vmcr vmcr;
126
127	vgic_get_vmcr(vcpu, &vmcr);
128	*val = FIELD_PREP(ICC_BPR0_EL1_MASK, vmcr.bpr);
129
130	return 0;
131}
132
133static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
134			u64 val)
135{
136	struct vgic_vmcr vmcr;
137
138	vgic_get_vmcr(vcpu, &vmcr);
139	if (!vmcr.cbpr) {
140		vmcr.abpr = FIELD_GET(ICC_BPR1_EL1_MASK, val);
141		vgic_set_vmcr(vcpu, &vmcr);
142	}
143
144	return 0;
145}
146
147static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
148			u64 *val)
149{
150	struct vgic_vmcr vmcr;
151
152	vgic_get_vmcr(vcpu, &vmcr);
153	if (!vmcr.cbpr)
154		*val = FIELD_PREP(ICC_BPR1_EL1_MASK, vmcr.abpr);
155	else
156		*val = min((vmcr.bpr + 1), 7U);
157
158
159	return 0;
160}
161
162static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
163			  u64 val)
164{
165	struct vgic_vmcr vmcr;
166
167	vgic_get_vmcr(vcpu, &vmcr);
168	vmcr.grpen0 = FIELD_GET(ICC_IGRPEN0_EL1_MASK, val);
169	vgic_set_vmcr(vcpu, &vmcr);
170
171	return 0;
172}
173
174static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
175			  u64 *val)
176{
177	struct vgic_vmcr vmcr;
178
179	vgic_get_vmcr(vcpu, &vmcr);
180	*val = FIELD_PREP(ICC_IGRPEN0_EL1_MASK, vmcr.grpen0);
181
182	return 0;
183}
184
185static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
186			  u64 val)
187{
188	struct vgic_vmcr vmcr;
189
190	vgic_get_vmcr(vcpu, &vmcr);
191	vmcr.grpen1 = FIELD_GET(ICC_IGRPEN1_EL1_MASK, val);
192	vgic_set_vmcr(vcpu, &vmcr);
193
194	return 0;
195}
196
197static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
198			  u64 *val)
199{
200	struct vgic_vmcr vmcr;
201
202	vgic_get_vmcr(vcpu, &vmcr);
203	*val = FIELD_GET(ICC_IGRPEN1_EL1_MASK, vmcr.grpen1);
204
205	return 0;
206}
207
208static void set_apr_reg(struct kvm_vcpu *vcpu, u64 val, u8 apr, u8 idx)
209{
210	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
211
212	if (apr)
213		vgicv3->vgic_ap1r[idx] = val;
214	else
215		vgicv3->vgic_ap0r[idx] = val;
216}
217
218static u64 get_apr_reg(struct kvm_vcpu *vcpu, u8 apr, u8 idx)
219{
220	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
221
222	if (apr)
223		return vgicv3->vgic_ap1r[idx];
224	else
225		return vgicv3->vgic_ap0r[idx];
226}
227
228static int set_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
229			u64 val)
230
231{
232	u8 idx = r->Op2 & 3;
233
234	if (idx > vgic_v3_max_apr_idx(vcpu))
235		return -EINVAL;
236
237	set_apr_reg(vcpu, val, 0, idx);
238	return 0;
239}
240
241static int get_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
242			u64 *val)
243{
244	u8 idx = r->Op2 & 3;
245
246	if (idx > vgic_v3_max_apr_idx(vcpu))
247		return -EINVAL;
248
249	*val = get_apr_reg(vcpu, 0, idx);
250
251	return 0;
252}
253
254static int set_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
255			u64 val)
256
257{
258	u8 idx = r->Op2 & 3;
259
260	if (idx > vgic_v3_max_apr_idx(vcpu))
261		return -EINVAL;
262
263	set_apr_reg(vcpu, val, 1, idx);
264	return 0;
265}
266
267static int get_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
268			u64 *val)
269{
270	u8 idx = r->Op2 & 3;
271
272	if (idx > vgic_v3_max_apr_idx(vcpu))
273		return -EINVAL;
274
275	*val = get_apr_reg(vcpu, 1, idx);
276
277	return 0;
278}
279
280static int set_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
281		       u64 val)
282{
283	/* Validate SRE bit */
284	if (!(val & ICC_SRE_EL1_SRE))
285		return -EINVAL;
286
287	return 0;
288}
289
290static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
291		       u64 *val)
292{
293	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
294
295	*val = vgicv3->vgic_sre;
296
297	return 0;
298}
299
300static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
301	{ SYS_DESC(SYS_ICC_PMR_EL1),
302	  .set_user = set_gic_pmr, .get_user = get_gic_pmr, },
303	{ SYS_DESC(SYS_ICC_BPR0_EL1),
304	  .set_user = set_gic_bpr0, .get_user = get_gic_bpr0, },
305	{ SYS_DESC(SYS_ICC_AP0R0_EL1),
306	  .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
307	{ SYS_DESC(SYS_ICC_AP0R1_EL1),
308	  .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
309	{ SYS_DESC(SYS_ICC_AP0R2_EL1),
310	  .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
311	{ SYS_DESC(SYS_ICC_AP0R3_EL1),
312	  .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
313	{ SYS_DESC(SYS_ICC_AP1R0_EL1),
314	  .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
315	{ SYS_DESC(SYS_ICC_AP1R1_EL1),
316	  .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
317	{ SYS_DESC(SYS_ICC_AP1R2_EL1),
318	  .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
319	{ SYS_DESC(SYS_ICC_AP1R3_EL1),
320	  .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
321	{ SYS_DESC(SYS_ICC_BPR1_EL1),
322	  .set_user = set_gic_bpr1, .get_user = get_gic_bpr1, },
323	{ SYS_DESC(SYS_ICC_CTLR_EL1),
324	  .set_user = set_gic_ctlr, .get_user = get_gic_ctlr, },
325	{ SYS_DESC(SYS_ICC_SRE_EL1),
326	  .set_user = set_gic_sre, .get_user = get_gic_sre, },
327	{ SYS_DESC(SYS_ICC_IGRPEN0_EL1),
328	  .set_user = set_gic_grpen0, .get_user = get_gic_grpen0, },
329	{ SYS_DESC(SYS_ICC_IGRPEN1_EL1),
330	  .set_user = set_gic_grpen1, .get_user = get_gic_grpen1, },
331};
332
333static u64 attr_to_id(u64 attr)
334{
335	return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr),
336			     FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP1_MASK, attr),
337			     FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRN_MASK, attr),
338			     FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRM_MASK, attr),
339			     FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP2_MASK, attr));
340}
341
342int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
343{
344	if (get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs,
345			  ARRAY_SIZE(gic_v3_icc_reg_descs)))
346		return 0;
347
348	return -ENXIO;
349}
350
351int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
352				struct kvm_device_attr *attr,
353				bool is_write)
354{
355	struct kvm_one_reg reg = {
356		.id	= attr_to_id(attr->attr),
357		.addr	= attr->addr,
358	};
359
360	if (is_write)
361		return kvm_sys_reg_set_user(vcpu, &reg, gic_v3_icc_reg_descs,
362					    ARRAY_SIZE(gic_v3_icc_reg_descs));
363	else
364		return kvm_sys_reg_get_user(vcpu, &reg, gic_v3_icc_reg_descs,
365					    ARRAY_SIZE(gic_v3_icc_reg_descs));
366}
367