1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020 - Google LLC
4 * Author: David Brazdil <dbrazdil@google.com>
5 */
6
7#include <asm/kvm_asm.h>
8#include <asm/kvm_hyp.h>
9#include <asm/kvm_mmu.h>
10#include <linux/arm-smccc.h>
11#include <linux/kvm_host.h>
12#include <uapi/linux/psci.h>
13
14#include <nvhe/memory.h>
15#include <nvhe/trap_handler.h>
16
17void kvm_hyp_cpu_entry(unsigned long r0);
18void kvm_hyp_cpu_resume(unsigned long r0);
19
20void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
21
22/* Config options set by the host. */
23struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
24
25#define INVALID_CPU_ID	UINT_MAX
26
27struct psci_boot_args {
28	atomic_t lock;
29	unsigned long pc;
30	unsigned long r0;
31};
32
33#define PSCI_BOOT_ARGS_UNLOCKED		0
34#define PSCI_BOOT_ARGS_LOCKED		1
35
36#define PSCI_BOOT_ARGS_INIT					\
37	((struct psci_boot_args){				\
38		.lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED),	\
39	})
40
41static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
42static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
43
44#define	is_psci_0_1(what, func_id)					\
45	(kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&	\
46	 (func_id) == kvm_host_psci_config.function_ids_0_1.what)
47
48static bool is_psci_0_1_call(u64 func_id)
49{
50	return (is_psci_0_1(cpu_suspend, func_id) ||
51		is_psci_0_1(cpu_on, func_id) ||
52		is_psci_0_1(cpu_off, func_id) ||
53		is_psci_0_1(migrate, func_id));
54}
55
56static bool is_psci_0_2_call(u64 func_id)
57{
58	/* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
59	return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
60	       (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
61}
62
63static unsigned long psci_call(unsigned long fn, unsigned long arg0,
64			       unsigned long arg1, unsigned long arg2)
65{
66	struct arm_smccc_res res;
67
68	arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res);
69	return res.a0;
70}
71
72static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
73{
74	return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1),
75			 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
76}
77
78static unsigned int find_cpu_id(u64 mpidr)
79{
80	unsigned int i;
81
82	/* Reject invalid MPIDRs */
83	if (mpidr & ~MPIDR_HWID_BITMASK)
84		return INVALID_CPU_ID;
85
86	for (i = 0; i < NR_CPUS; i++) {
87		if (cpu_logical_map(i) == mpidr)
88			return i;
89	}
90
91	return INVALID_CPU_ID;
92}
93
94static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args)
95{
96	return atomic_cmpxchg_acquire(&args->lock,
97				      PSCI_BOOT_ARGS_UNLOCKED,
98				      PSCI_BOOT_ARGS_LOCKED) ==
99		PSCI_BOOT_ARGS_UNLOCKED;
100}
101
102static __always_inline void release_boot_args(struct psci_boot_args *args)
103{
104	atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED);
105}
106
107static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
108{
109	DECLARE_REG(u64, mpidr, host_ctxt, 1);
110	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
111	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
112
113	unsigned int cpu_id;
114	struct psci_boot_args *boot_args;
115	struct kvm_nvhe_init_params *init_params;
116	int ret;
117
118	/*
119	 * Find the logical CPU ID for the given MPIDR. The search set is
120	 * the set of CPUs that were online at the point of KVM initialization.
121	 * Booting other CPUs is rejected because their cpufeatures were not
122	 * checked against the finalized capabilities. This could be relaxed
123	 * by doing the feature checks in hyp.
124	 */
125	cpu_id = find_cpu_id(mpidr);
126	if (cpu_id == INVALID_CPU_ID)
127		return PSCI_RET_INVALID_PARAMS;
128
129	boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
130	init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
131
132	/* Check if the target CPU is already being booted. */
133	if (!try_acquire_boot_args(boot_args))
134		return PSCI_RET_ALREADY_ON;
135
136	boot_args->pc = pc;
137	boot_args->r0 = r0;
138	wmb();
139
140	ret = psci_call(func_id, mpidr,
141			__hyp_pa(&kvm_hyp_cpu_entry),
142			__hyp_pa(init_params));
143
144	/* If successful, the lock will be released by the target CPU. */
145	if (ret != PSCI_RET_SUCCESS)
146		release_boot_args(boot_args);
147
148	return ret;
149}
150
151static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
152{
153	DECLARE_REG(u64, power_state, host_ctxt, 1);
154	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
155	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
156
157	struct psci_boot_args *boot_args;
158	struct kvm_nvhe_init_params *init_params;
159
160	boot_args = this_cpu_ptr(&suspend_args);
161	init_params = this_cpu_ptr(&kvm_init_params);
162
163	/*
164	 * No need to acquire a lock before writing to boot_args because a core
165	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
166	 */
167	boot_args->pc = pc;
168	boot_args->r0 = r0;
169
170	/*
171	 * Will either return if shallow sleep state, or wake up into the entry
172	 * point if it is a deep sleep state.
173	 */
174	return psci_call(func_id, power_state,
175			 __hyp_pa(&kvm_hyp_cpu_resume),
176			 __hyp_pa(init_params));
177}
178
179static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
180{
181	DECLARE_REG(unsigned long, pc, host_ctxt, 1);
182	DECLARE_REG(unsigned long, r0, host_ctxt, 2);
183
184	struct psci_boot_args *boot_args;
185	struct kvm_nvhe_init_params *init_params;
186
187	boot_args = this_cpu_ptr(&suspend_args);
188	init_params = this_cpu_ptr(&kvm_init_params);
189
190	/*
191	 * No need to acquire a lock before writing to boot_args because a core
192	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
193	 */
194	boot_args->pc = pc;
195	boot_args->r0 = r0;
196
197	/* Will only return on error. */
198	return psci_call(func_id,
199			 __hyp_pa(&kvm_hyp_cpu_resume),
200			 __hyp_pa(init_params), 0);
201}
202
203asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
204{
205	struct psci_boot_args *boot_args;
206	struct kvm_cpu_context *host_ctxt;
207
208	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
209
210	if (is_cpu_on)
211		boot_args = this_cpu_ptr(&cpu_on_args);
212	else
213		boot_args = this_cpu_ptr(&suspend_args);
214
215	cpu_reg(host_ctxt, 0) = boot_args->r0;
216	write_sysreg_el2(boot_args->pc, SYS_ELR);
217
218	if (is_cpu_on)
219		release_boot_args(boot_args);
220
221	__host_enter(host_ctxt);
222}
223
224static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
225{
226	if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
227		return psci_forward(host_ctxt);
228	if (is_psci_0_1(cpu_on, func_id))
229		return psci_cpu_on(func_id, host_ctxt);
230	if (is_psci_0_1(cpu_suspend, func_id))
231		return psci_cpu_suspend(func_id, host_ctxt);
232
233	return PSCI_RET_NOT_SUPPORTED;
234}
235
236static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
237{
238	switch (func_id) {
239	case PSCI_0_2_FN_PSCI_VERSION:
240	case PSCI_0_2_FN_CPU_OFF:
241	case PSCI_0_2_FN64_AFFINITY_INFO:
242	case PSCI_0_2_FN64_MIGRATE:
243	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
244	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
245		return psci_forward(host_ctxt);
246	/*
247	 * SYSTEM_OFF/RESET should not return according to the spec.
248	 * Allow it so as to stay robust to broken firmware.
249	 */
250	case PSCI_0_2_FN_SYSTEM_OFF:
251	case PSCI_0_2_FN_SYSTEM_RESET:
252		return psci_forward(host_ctxt);
253	case PSCI_0_2_FN64_CPU_SUSPEND:
254		return psci_cpu_suspend(func_id, host_ctxt);
255	case PSCI_0_2_FN64_CPU_ON:
256		return psci_cpu_on(func_id, host_ctxt);
257	default:
258		return PSCI_RET_NOT_SUPPORTED;
259	}
260}
261
262static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
263{
264	switch (func_id) {
265	case PSCI_1_0_FN_PSCI_FEATURES:
266	case PSCI_1_0_FN_SET_SUSPEND_MODE:
267	case PSCI_1_1_FN64_SYSTEM_RESET2:
268		return psci_forward(host_ctxt);
269	case PSCI_1_0_FN64_SYSTEM_SUSPEND:
270		return psci_system_suspend(func_id, host_ctxt);
271	default:
272		return psci_0_2_handler(func_id, host_ctxt);
273	}
274}
275
276bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
277{
278	unsigned long ret;
279
280	switch (kvm_host_psci_config.version) {
281	case PSCI_VERSION(0, 1):
282		if (!is_psci_0_1_call(func_id))
283			return false;
284		ret = psci_0_1_handler(func_id, host_ctxt);
285		break;
286	case PSCI_VERSION(0, 2):
287		if (!is_psci_0_2_call(func_id))
288			return false;
289		ret = psci_0_2_handler(func_id, host_ctxt);
290		break;
291	default:
292		if (!is_psci_0_2_call(func_id))
293			return false;
294		ret = psci_1_0_handler(func_id, host_ctxt);
295		break;
296	}
297
298	cpu_reg(host_ctxt, 0) = ret;
299	cpu_reg(host_ctxt, 1) = 0;
300	cpu_reg(host_ctxt, 2) = 0;
301	cpu_reg(host_ctxt, 3) = 0;
302	return true;
303}
304