1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2019 Arm Ltd.
3
4#include <linux/arm-smccc.h>
5#include <linux/kvm_host.h>
6
7#include <asm/kvm_emulate.h>
8
9#include <kvm/arm_hypercalls.h>
10#include <kvm/arm_psci.h>
11
12#define KVM_ARM_SMCCC_STD_FEATURES				\
13	GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0)
14#define KVM_ARM_SMCCC_STD_HYP_FEATURES				\
15	GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
16#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES			\
17	GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
18
19static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
20{
21	struct system_time_snapshot systime_snapshot;
22	u64 cycles = ~0UL;
23	u32 feature;
24
25	/*
26	 * system time and counter value must captured at the same
27	 * time to keep consistency and precision.
28	 */
29	ktime_get_snapshot(&systime_snapshot);
30
31	/*
32	 * This is only valid if the current clocksource is the
33	 * architected counter, as this is the only one the guest
34	 * can see.
35	 */
36	if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER)
37		return;
38
39	/*
40	 * The guest selects one of the two reference counters
41	 * (virtual or physical) with the first argument of the SMCCC
42	 * call. In case the identifier is not supported, error out.
43	 */
44	feature = smccc_get_arg1(vcpu);
45	switch (feature) {
46	case KVM_PTP_VIRT_COUNTER:
47		cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
48		break;
49	case KVM_PTP_PHYS_COUNTER:
50		cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.poffset;
51		break;
52	default:
53		return;
54	}
55
56	/*
57	 * This relies on the top bit of val[0] never being set for
58	 * valid values of system time, because that is *really* far
59	 * in the future (about 292 years from 1970, and at that stage
60	 * nobody will give a damn about it).
61	 */
62	val[0] = upper_32_bits(systime_snapshot.real);
63	val[1] = lower_32_bits(systime_snapshot.real);
64	val[2] = upper_32_bits(cycles);
65	val[3] = lower_32_bits(cycles);
66}
67
68static bool kvm_smccc_default_allowed(u32 func_id)
69{
70	switch (func_id) {
71	/*
72	 * List of function-ids that are not gated with the bitmapped
73	 * feature firmware registers, and are to be allowed for
74	 * servicing the call by default.
75	 */
76	case ARM_SMCCC_VERSION_FUNC_ID:
77	case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
78		return true;
79	default:
80		/* PSCI 0.2 and up is in the 0:0x1f range */
81		if (ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
82		    ARM_SMCCC_FUNC_NUM(func_id) <= 0x1f)
83			return true;
84
85		/*
86		 * KVM's PSCI 0.1 doesn't comply with SMCCC, and has
87		 * its own function-id base and range
88		 */
89		if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3))
90			return true;
91
92		return false;
93	}
94}
95
96static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
97{
98	struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
99
100	switch (func_id) {
101	case ARM_SMCCC_TRNG_VERSION:
102	case ARM_SMCCC_TRNG_FEATURES:
103	case ARM_SMCCC_TRNG_GET_UUID:
104	case ARM_SMCCC_TRNG_RND32:
105	case ARM_SMCCC_TRNG_RND64:
106		return test_bit(KVM_REG_ARM_STD_BIT_TRNG_V1_0,
107				&smccc_feat->std_bmap);
108	case ARM_SMCCC_HV_PV_TIME_FEATURES:
109	case ARM_SMCCC_HV_PV_TIME_ST:
110		return test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
111				&smccc_feat->std_hyp_bmap);
112	case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
113	case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
114		return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT,
115				&smccc_feat->vendor_hyp_bmap);
116	case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
117		return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
118				&smccc_feat->vendor_hyp_bmap);
119	default:
120		return false;
121	}
122}
123
124#define SMC32_ARCH_RANGE_BEGIN	ARM_SMCCC_VERSION_FUNC_ID
125#define SMC32_ARCH_RANGE_END	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,		\
126						   ARM_SMCCC_SMC_32,		\
127						   0, ARM_SMCCC_FUNC_MASK)
128
129#define SMC64_ARCH_RANGE_BEGIN	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,		\
130						   ARM_SMCCC_SMC_64,		\
131						   0, 0)
132#define SMC64_ARCH_RANGE_END	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,		\
133						   ARM_SMCCC_SMC_64,		\
134						   0, ARM_SMCCC_FUNC_MASK)
135
136static int kvm_smccc_filter_insert_reserved(struct kvm *kvm)
137{
138	int r;
139
140	/*
141	 * Prevent userspace from handling any SMCCC calls in the architecture
142	 * range, avoiding the risk of misrepresenting Spectre mitigation status
143	 * to the guest.
144	 */
145	r = mtree_insert_range(&kvm->arch.smccc_filter,
146			       SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END,
147			       xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
148			       GFP_KERNEL_ACCOUNT);
149	if (r)
150		goto out_destroy;
151
152	r = mtree_insert_range(&kvm->arch.smccc_filter,
153			       SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END,
154			       xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
155			       GFP_KERNEL_ACCOUNT);
156	if (r)
157		goto out_destroy;
158
159	return 0;
160out_destroy:
161	mtree_destroy(&kvm->arch.smccc_filter);
162	return r;
163}
164
165static bool kvm_smccc_filter_configured(struct kvm *kvm)
166{
167	return !mtree_empty(&kvm->arch.smccc_filter);
168}
169
170static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr)
171{
172	const void *zero_page = page_to_virt(ZERO_PAGE(0));
173	struct kvm_smccc_filter filter;
174	u32 start, end;
175	int r;
176
177	if (copy_from_user(&filter, uaddr, sizeof(filter)))
178		return -EFAULT;
179
180	if (memcmp(filter.pad, zero_page, sizeof(filter.pad)))
181		return -EINVAL;
182
183	start = filter.base;
184	end = start + filter.nr_functions - 1;
185
186	if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS)
187		return -EINVAL;
188
189	mutex_lock(&kvm->arch.config_lock);
190
191	if (kvm_vm_has_ran_once(kvm)) {
192		r = -EBUSY;
193		goto out_unlock;
194	}
195
196	if (!kvm_smccc_filter_configured(kvm)) {
197		r = kvm_smccc_filter_insert_reserved(kvm);
198		if (WARN_ON_ONCE(r))
199			goto out_unlock;
200	}
201
202	r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
203			       xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT);
204out_unlock:
205	mutex_unlock(&kvm->arch.config_lock);
206	return r;
207}
208
209static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id)
210{
211	unsigned long idx = func_id;
212	void *val;
213
214	if (!kvm_smccc_filter_configured(kvm))
215		return KVM_SMCCC_FILTER_HANDLE;
216
217	/*
218	 * But where's the error handling, you say?
219	 *
220	 * mt_find() returns NULL if no entry was found, which just so happens
221	 * to match KVM_SMCCC_FILTER_HANDLE.
222	 */
223	val = mt_find(&kvm->arch.smccc_filter, &idx, idx);
224	return xa_to_value(val);
225}
226
227static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id)
228{
229	/*
230	 * Intervening actions in the SMCCC filter take precedence over the
231	 * pseudo-firmware register bitmaps.
232	 */
233	u8 action = kvm_smccc_filter_get_action(vcpu->kvm, func_id);
234	if (action != KVM_SMCCC_FILTER_HANDLE)
235		return action;
236
237	if (kvm_smccc_test_fw_bmap(vcpu, func_id) ||
238	    kvm_smccc_default_allowed(func_id))
239		return KVM_SMCCC_FILTER_HANDLE;
240
241	return KVM_SMCCC_FILTER_DENY;
242}
243
244static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id)
245{
246	u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
247	struct kvm_run *run = vcpu->run;
248	u64 flags = 0;
249
250	if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64)
251		flags |= KVM_HYPERCALL_EXIT_SMC;
252
253	if (!kvm_vcpu_trap_il_is32bit(vcpu))
254		flags |= KVM_HYPERCALL_EXIT_16BIT;
255
256	run->exit_reason = KVM_EXIT_HYPERCALL;
257	run->hypercall = (typeof(run->hypercall)) {
258		.nr	= func_id,
259		.flags	= flags,
260	};
261}
262
263int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
264{
265	struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
266	u32 func_id = smccc_get_function(vcpu);
267	u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
268	u32 feature;
269	u8 action;
270	gpa_t gpa;
271
272	action = kvm_smccc_get_action(vcpu, func_id);
273	switch (action) {
274	case KVM_SMCCC_FILTER_HANDLE:
275		break;
276	case KVM_SMCCC_FILTER_DENY:
277		goto out;
278	case KVM_SMCCC_FILTER_FWD_TO_USER:
279		kvm_prepare_hypercall_exit(vcpu, func_id);
280		return 0;
281	default:
282		WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action);
283		goto out;
284	}
285
286	switch (func_id) {
287	case ARM_SMCCC_VERSION_FUNC_ID:
288		val[0] = ARM_SMCCC_VERSION_1_1;
289		break;
290	case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
291		feature = smccc_get_arg1(vcpu);
292		switch (feature) {
293		case ARM_SMCCC_ARCH_WORKAROUND_1:
294			switch (arm64_get_spectre_v2_state()) {
295			case SPECTRE_VULNERABLE:
296				break;
297			case SPECTRE_MITIGATED:
298				val[0] = SMCCC_RET_SUCCESS;
299				break;
300			case SPECTRE_UNAFFECTED:
301				val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
302				break;
303			}
304			break;
305		case ARM_SMCCC_ARCH_WORKAROUND_2:
306			switch (arm64_get_spectre_v4_state()) {
307			case SPECTRE_VULNERABLE:
308				break;
309			case SPECTRE_MITIGATED:
310				/*
311				 * SSBS everywhere: Indicate no firmware
312				 * support, as the SSBS support will be
313				 * indicated to the guest and the default is
314				 * safe.
315				 *
316				 * Otherwise, expose a permanent mitigation
317				 * to the guest, and hide SSBS so that the
318				 * guest stays protected.
319				 */
320				if (cpus_have_final_cap(ARM64_SSBS))
321					break;
322				fallthrough;
323			case SPECTRE_UNAFFECTED:
324				val[0] = SMCCC_RET_NOT_REQUIRED;
325				break;
326			}
327			break;
328		case ARM_SMCCC_ARCH_WORKAROUND_3:
329			switch (arm64_get_spectre_bhb_state()) {
330			case SPECTRE_VULNERABLE:
331				break;
332			case SPECTRE_MITIGATED:
333				val[0] = SMCCC_RET_SUCCESS;
334				break;
335			case SPECTRE_UNAFFECTED:
336				val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
337				break;
338			}
339			break;
340		case ARM_SMCCC_HV_PV_TIME_FEATURES:
341			if (test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
342				     &smccc_feat->std_hyp_bmap))
343				val[0] = SMCCC_RET_SUCCESS;
344			break;
345		}
346		break;
347	case ARM_SMCCC_HV_PV_TIME_FEATURES:
348		val[0] = kvm_hypercall_pv_features(vcpu);
349		break;
350	case ARM_SMCCC_HV_PV_TIME_ST:
351		gpa = kvm_init_stolen_time(vcpu);
352		if (gpa != INVALID_GPA)
353			val[0] = gpa;
354		break;
355	case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
356		val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
357		val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
358		val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2;
359		val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
360		break;
361	case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
362		val[0] = smccc_feat->vendor_hyp_bmap;
363		break;
364	case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
365		kvm_ptp_get_time(vcpu, val);
366		break;
367	case ARM_SMCCC_TRNG_VERSION:
368	case ARM_SMCCC_TRNG_FEATURES:
369	case ARM_SMCCC_TRNG_GET_UUID:
370	case ARM_SMCCC_TRNG_RND32:
371	case ARM_SMCCC_TRNG_RND64:
372		return kvm_trng_call(vcpu);
373	default:
374		return kvm_psci_call(vcpu);
375	}
376
377out:
378	smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
379	return 1;
380}
381
382static const u64 kvm_arm_fw_reg_ids[] = {
383	KVM_REG_ARM_PSCI_VERSION,
384	KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1,
385	KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2,
386	KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3,
387	KVM_REG_ARM_STD_BMAP,
388	KVM_REG_ARM_STD_HYP_BMAP,
389	KVM_REG_ARM_VENDOR_HYP_BMAP,
390};
391
392void kvm_arm_init_hypercalls(struct kvm *kvm)
393{
394	struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
395
396	smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES;
397	smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
398	smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
399
400	mt_init(&kvm->arch.smccc_filter);
401}
402
403void kvm_arm_teardown_hypercalls(struct kvm *kvm)
404{
405	mtree_destroy(&kvm->arch.smccc_filter);
406}
407
408int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
409{
410	return ARRAY_SIZE(kvm_arm_fw_reg_ids);
411}
412
413int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
414{
415	int i;
416
417	for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) {
418		if (put_user(kvm_arm_fw_reg_ids[i], uindices++))
419			return -EFAULT;
420	}
421
422	return 0;
423}
424
425#define KVM_REG_FEATURE_LEVEL_MASK	GENMASK(3, 0)
426
427/*
428 * Convert the workaround level into an easy-to-compare number, where higher
429 * values mean better protection.
430 */
431static int get_kernel_wa_level(u64 regid)
432{
433	switch (regid) {
434	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
435		switch (arm64_get_spectre_v2_state()) {
436		case SPECTRE_VULNERABLE:
437			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
438		case SPECTRE_MITIGATED:
439			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
440		case SPECTRE_UNAFFECTED:
441			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
442		}
443		return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
444	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
445		switch (arm64_get_spectre_v4_state()) {
446		case SPECTRE_MITIGATED:
447			/*
448			 * As for the hypercall discovery, we pretend we
449			 * don't have any FW mitigation if SSBS is there at
450			 * all times.
451			 */
452			if (cpus_have_final_cap(ARM64_SSBS))
453				return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
454			fallthrough;
455		case SPECTRE_UNAFFECTED:
456			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
457		case SPECTRE_VULNERABLE:
458			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
459		}
460		break;
461	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
462		switch (arm64_get_spectre_bhb_state()) {
463		case SPECTRE_VULNERABLE:
464			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
465		case SPECTRE_MITIGATED:
466			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
467		case SPECTRE_UNAFFECTED:
468			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
469		}
470		return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
471	}
472
473	return -EINVAL;
474}
475
476int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
477{
478	struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
479	void __user *uaddr = (void __user *)(long)reg->addr;
480	u64 val;
481
482	switch (reg->id) {
483	case KVM_REG_ARM_PSCI_VERSION:
484		val = kvm_psci_version(vcpu);
485		break;
486	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
487	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
488	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
489		val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
490		break;
491	case KVM_REG_ARM_STD_BMAP:
492		val = READ_ONCE(smccc_feat->std_bmap);
493		break;
494	case KVM_REG_ARM_STD_HYP_BMAP:
495		val = READ_ONCE(smccc_feat->std_hyp_bmap);
496		break;
497	case KVM_REG_ARM_VENDOR_HYP_BMAP:
498		val = READ_ONCE(smccc_feat->vendor_hyp_bmap);
499		break;
500	default:
501		return -ENOENT;
502	}
503
504	if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
505		return -EFAULT;
506
507	return 0;
508}
509
510static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
511{
512	int ret = 0;
513	struct kvm *kvm = vcpu->kvm;
514	struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
515	unsigned long *fw_reg_bmap, fw_reg_features;
516
517	switch (reg_id) {
518	case KVM_REG_ARM_STD_BMAP:
519		fw_reg_bmap = &smccc_feat->std_bmap;
520		fw_reg_features = KVM_ARM_SMCCC_STD_FEATURES;
521		break;
522	case KVM_REG_ARM_STD_HYP_BMAP:
523		fw_reg_bmap = &smccc_feat->std_hyp_bmap;
524		fw_reg_features = KVM_ARM_SMCCC_STD_HYP_FEATURES;
525		break;
526	case KVM_REG_ARM_VENDOR_HYP_BMAP:
527		fw_reg_bmap = &smccc_feat->vendor_hyp_bmap;
528		fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
529		break;
530	default:
531		return -ENOENT;
532	}
533
534	/* Check for unsupported bit */
535	if (val & ~fw_reg_features)
536		return -EINVAL;
537
538	mutex_lock(&kvm->arch.config_lock);
539
540	if (kvm_vm_has_ran_once(kvm) && val != *fw_reg_bmap) {
541		ret = -EBUSY;
542		goto out;
543	}
544
545	WRITE_ONCE(*fw_reg_bmap, val);
546out:
547	mutex_unlock(&kvm->arch.config_lock);
548	return ret;
549}
550
551int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
552{
553	void __user *uaddr = (void __user *)(long)reg->addr;
554	u64 val;
555	int wa_level;
556
557	if (KVM_REG_SIZE(reg->id) != sizeof(val))
558		return -ENOENT;
559	if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
560		return -EFAULT;
561
562	switch (reg->id) {
563	case KVM_REG_ARM_PSCI_VERSION:
564	{
565		bool wants_02;
566
567		wants_02 = vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2);
568
569		switch (val) {
570		case KVM_ARM_PSCI_0_1:
571			if (wants_02)
572				return -EINVAL;
573			vcpu->kvm->arch.psci_version = val;
574			return 0;
575		case KVM_ARM_PSCI_0_2:
576		case KVM_ARM_PSCI_1_0:
577		case KVM_ARM_PSCI_1_1:
578			if (!wants_02)
579				return -EINVAL;
580			vcpu->kvm->arch.psci_version = val;
581			return 0;
582		}
583		break;
584	}
585
586	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
587	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
588		if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
589			return -EINVAL;
590
591		if (get_kernel_wa_level(reg->id) < val)
592			return -EINVAL;
593
594		return 0;
595
596	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
597		if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
598			    KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
599			return -EINVAL;
600
601		/* The enabled bit must not be set unless the level is AVAIL. */
602		if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
603		    (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
604			return -EINVAL;
605
606		/*
607		 * Map all the possible incoming states to the only two we
608		 * really want to deal with.
609		 */
610		switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
611		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
612		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
613			wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
614			break;
615		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
616		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
617			wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
618			break;
619		default:
620			return -EINVAL;
621		}
622
623		/*
624		 * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
625		 * other way around.
626		 */
627		if (get_kernel_wa_level(reg->id) < wa_level)
628			return -EINVAL;
629
630		return 0;
631	case KVM_REG_ARM_STD_BMAP:
632	case KVM_REG_ARM_STD_HYP_BMAP:
633	case KVM_REG_ARM_VENDOR_HYP_BMAP:
634		return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val);
635	default:
636		return -ENOENT;
637	}
638
639	return -EINVAL;
640}
641
642int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
643{
644	switch (attr->attr) {
645	case KVM_ARM_VM_SMCCC_FILTER:
646		return 0;
647	default:
648		return -ENXIO;
649	}
650}
651
652int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
653{
654	void __user *uaddr = (void __user *)attr->addr;
655
656	switch (attr->attr) {
657	case KVM_ARM_VM_SMCCC_FILTER:
658		return kvm_smccc_set_filter(kvm, uaddr);
659	default:
660		return -ENXIO;
661	}
662}
663