1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * The hwprobe interface, for allowing userspace to probe to see which features
4 * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5 * more details.
6 */
7#include <linux/syscalls.h>
8#include <asm/cacheflush.h>
9#include <asm/cpufeature.h>
10#include <asm/hwprobe.h>
11#include <asm/sbi.h>
12#include <asm/switch_to.h>
13#include <asm/uaccess.h>
14#include <asm/unistd.h>
15#include <asm/vector.h>
16#include <vdso/vsyscall.h>
17
18
19static void hwprobe_arch_id(struct riscv_hwprobe *pair,
20			    const struct cpumask *cpus)
21{
22	u64 id = -1ULL;
23	bool first = true;
24	int cpu;
25
26	for_each_cpu(cpu, cpus) {
27		u64 cpu_id;
28
29		switch (pair->key) {
30		case RISCV_HWPROBE_KEY_MVENDORID:
31			cpu_id = riscv_cached_mvendorid(cpu);
32			break;
33		case RISCV_HWPROBE_KEY_MIMPID:
34			cpu_id = riscv_cached_mimpid(cpu);
35			break;
36		case RISCV_HWPROBE_KEY_MARCHID:
37			cpu_id = riscv_cached_marchid(cpu);
38			break;
39		}
40
41		if (first) {
42			id = cpu_id;
43			first = false;
44		}
45
46		/*
47		 * If there's a mismatch for the given set, return -1 in the
48		 * value.
49		 */
50		if (id != cpu_id) {
51			id = -1ULL;
52			break;
53		}
54	}
55
56	pair->value = id;
57}
58
59static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
60			     const struct cpumask *cpus)
61{
62	int cpu;
63	u64 missing = 0;
64
65	pair->value = 0;
66	if (has_fpu())
67		pair->value |= RISCV_HWPROBE_IMA_FD;
68
69	if (riscv_isa_extension_available(NULL, c))
70		pair->value |= RISCV_HWPROBE_IMA_C;
71
72	if (has_vector())
73		pair->value |= RISCV_HWPROBE_IMA_V;
74
75	/*
76	 * Loop through and record extensions that 1) anyone has, and 2) anyone
77	 * doesn't have.
78	 */
79	for_each_cpu(cpu, cpus) {
80		struct riscv_isainfo *isainfo = &hart_isa[cpu];
81
82#define EXT_KEY(ext)									\
83	do {										\
84		if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext))	\
85			pair->value |= RISCV_HWPROBE_EXT_##ext;				\
86		else									\
87			missing |= RISCV_HWPROBE_EXT_##ext;				\
88	} while (false)
89
90		/*
91		 * Only use EXT_KEY() for extensions which can be exposed to userspace,
92		 * regardless of the kernel's configuration, as no other checks, besides
93		 * presence in the hart_isa bitmap, are made.
94		 */
95		EXT_KEY(ZBA);
96		EXT_KEY(ZBB);
97		EXT_KEY(ZBS);
98		EXT_KEY(ZICBOZ);
99		EXT_KEY(ZBC);
100
101		EXT_KEY(ZBKB);
102		EXT_KEY(ZBKC);
103		EXT_KEY(ZBKX);
104		EXT_KEY(ZKND);
105		EXT_KEY(ZKNE);
106		EXT_KEY(ZKNH);
107		EXT_KEY(ZKSED);
108		EXT_KEY(ZKSH);
109		EXT_KEY(ZKT);
110		EXT_KEY(ZIHINTNTL);
111		EXT_KEY(ZTSO);
112		EXT_KEY(ZACAS);
113		EXT_KEY(ZICOND);
114
115		if (has_vector()) {
116			EXT_KEY(ZVBB);
117			EXT_KEY(ZVBC);
118			EXT_KEY(ZVKB);
119			EXT_KEY(ZVKG);
120			EXT_KEY(ZVKNED);
121			EXT_KEY(ZVKNHA);
122			EXT_KEY(ZVKNHB);
123			EXT_KEY(ZVKSED);
124			EXT_KEY(ZVKSH);
125			EXT_KEY(ZVKT);
126			EXT_KEY(ZVFH);
127			EXT_KEY(ZVFHMIN);
128		}
129
130		if (has_fpu()) {
131			EXT_KEY(ZFH);
132			EXT_KEY(ZFHMIN);
133			EXT_KEY(ZFA);
134		}
135#undef EXT_KEY
136	}
137
138	/* Now turn off reporting features if any CPU is missing it. */
139	pair->value &= ~missing;
140}
141
142static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
143{
144	struct riscv_hwprobe pair;
145
146	hwprobe_isa_ext0(&pair, cpus);
147	return (pair.value & ext);
148}
149
150#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
151static u64 hwprobe_misaligned(const struct cpumask *cpus)
152{
153	int cpu;
154	u64 perf = -1ULL;
155
156	for_each_cpu(cpu, cpus) {
157		int this_perf = per_cpu(misaligned_access_speed, cpu);
158
159		if (perf == -1ULL)
160			perf = this_perf;
161
162		if (perf != this_perf) {
163			perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
164			break;
165		}
166	}
167
168	if (perf == -1ULL)
169		return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
170
171	return perf;
172}
173#else
174static u64 hwprobe_misaligned(const struct cpumask *cpus)
175{
176	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
177		return RISCV_HWPROBE_MISALIGNED_FAST;
178
179	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
180		return RISCV_HWPROBE_MISALIGNED_EMULATED;
181
182	return RISCV_HWPROBE_MISALIGNED_SLOW;
183}
184#endif
185
186static void hwprobe_one_pair(struct riscv_hwprobe *pair,
187			     const struct cpumask *cpus)
188{
189	switch (pair->key) {
190	case RISCV_HWPROBE_KEY_MVENDORID:
191	case RISCV_HWPROBE_KEY_MARCHID:
192	case RISCV_HWPROBE_KEY_MIMPID:
193		hwprobe_arch_id(pair, cpus);
194		break;
195	/*
196	 * The kernel already assumes that the base single-letter ISA
197	 * extensions are supported on all harts, and only supports the
198	 * IMA base, so just cheat a bit here and tell that to
199	 * userspace.
200	 */
201	case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
202		pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
203		break;
204
205	case RISCV_HWPROBE_KEY_IMA_EXT_0:
206		hwprobe_isa_ext0(pair, cpus);
207		break;
208
209	case RISCV_HWPROBE_KEY_CPUPERF_0:
210		pair->value = hwprobe_misaligned(cpus);
211		break;
212
213	case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
214		pair->value = 0;
215		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
216			pair->value = riscv_cboz_block_size;
217		break;
218
219	/*
220	 * For forward compatibility, unknown keys don't fail the whole
221	 * call, but get their element key set to -1 and value set to 0
222	 * indicating they're unrecognized.
223	 */
224	default:
225		pair->key = -1;
226		pair->value = 0;
227		break;
228	}
229}
230
231static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
232			      size_t pair_count, size_t cpusetsize,
233			      unsigned long __user *cpus_user,
234			      unsigned int flags)
235{
236	size_t out;
237	int ret;
238	cpumask_t cpus;
239
240	/* Check the reserved flags. */
241	if (flags != 0)
242		return -EINVAL;
243
244	/*
245	 * The interface supports taking in a CPU mask, and returns values that
246	 * are consistent across that mask. Allow userspace to specify NULL and
247	 * 0 as a shortcut to all online CPUs.
248	 */
249	cpumask_clear(&cpus);
250	if (!cpusetsize && !cpus_user) {
251		cpumask_copy(&cpus, cpu_online_mask);
252	} else {
253		if (cpusetsize > cpumask_size())
254			cpusetsize = cpumask_size();
255
256		ret = copy_from_user(&cpus, cpus_user, cpusetsize);
257		if (ret)
258			return -EFAULT;
259
260		/*
261		 * Userspace must provide at least one online CPU, without that
262		 * there's no way to define what is supported.
263		 */
264		cpumask_and(&cpus, &cpus, cpu_online_mask);
265		if (cpumask_empty(&cpus))
266			return -EINVAL;
267	}
268
269	for (out = 0; out < pair_count; out++, pairs++) {
270		struct riscv_hwprobe pair;
271
272		if (get_user(pair.key, &pairs->key))
273			return -EFAULT;
274
275		pair.value = 0;
276		hwprobe_one_pair(&pair, &cpus);
277		ret = put_user(pair.key, &pairs->key);
278		if (ret == 0)
279			ret = put_user(pair.value, &pairs->value);
280
281		if (ret)
282			return -EFAULT;
283	}
284
285	return 0;
286}
287
288static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
289			    size_t pair_count, size_t cpusetsize,
290			    unsigned long __user *cpus_user,
291			    unsigned int flags)
292{
293	cpumask_t cpus, one_cpu;
294	bool clear_all = false;
295	size_t i;
296	int ret;
297
298	if (flags != RISCV_HWPROBE_WHICH_CPUS)
299		return -EINVAL;
300
301	if (!cpusetsize || !cpus_user)
302		return -EINVAL;
303
304	if (cpusetsize > cpumask_size())
305		cpusetsize = cpumask_size();
306
307	ret = copy_from_user(&cpus, cpus_user, cpusetsize);
308	if (ret)
309		return -EFAULT;
310
311	if (cpumask_empty(&cpus))
312		cpumask_copy(&cpus, cpu_online_mask);
313
314	cpumask_and(&cpus, &cpus, cpu_online_mask);
315
316	cpumask_clear(&one_cpu);
317
318	for (i = 0; i < pair_count; i++) {
319		struct riscv_hwprobe pair, tmp;
320		int cpu;
321
322		ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
323		if (ret)
324			return -EFAULT;
325
326		if (!riscv_hwprobe_key_is_valid(pair.key)) {
327			clear_all = true;
328			pair = (struct riscv_hwprobe){ .key = -1, };
329			ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
330			if (ret)
331				return -EFAULT;
332		}
333
334		if (clear_all)
335			continue;
336
337		tmp = (struct riscv_hwprobe){ .key = pair.key, };
338
339		for_each_cpu(cpu, &cpus) {
340			cpumask_set_cpu(cpu, &one_cpu);
341
342			hwprobe_one_pair(&tmp, &one_cpu);
343
344			if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
345				cpumask_clear_cpu(cpu, &cpus);
346
347			cpumask_clear_cpu(cpu, &one_cpu);
348		}
349	}
350
351	if (clear_all)
352		cpumask_clear(&cpus);
353
354	ret = copy_to_user(cpus_user, &cpus, cpusetsize);
355	if (ret)
356		return -EFAULT;
357
358	return 0;
359}
360
361static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
362			    size_t pair_count, size_t cpusetsize,
363			    unsigned long __user *cpus_user,
364			    unsigned int flags)
365{
366	if (flags & RISCV_HWPROBE_WHICH_CPUS)
367		return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
368					cpus_user, flags);
369
370	return hwprobe_get_values(pairs, pair_count, cpusetsize,
371				  cpus_user, flags);
372}
373
374#ifdef CONFIG_MMU
375
376static int __init init_hwprobe_vdso_data(void)
377{
378	struct vdso_data *vd = __arch_get_k_vdso_data();
379	struct arch_vdso_data *avd = &vd->arch_data;
380	u64 id_bitsmash = 0;
381	struct riscv_hwprobe pair;
382	int key;
383
384	/*
385	 * Initialize vDSO data with the answers for the "all CPUs" case, to
386	 * save a syscall in the common case.
387	 */
388	for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
389		pair.key = key;
390		hwprobe_one_pair(&pair, cpu_online_mask);
391
392		WARN_ON_ONCE(pair.key < 0);
393
394		avd->all_cpu_hwprobe_values[key] = pair.value;
395		/*
396		 * Smash together the vendor, arch, and impl IDs to see if
397		 * they're all 0 or any negative.
398		 */
399		if (key <= RISCV_HWPROBE_KEY_MIMPID)
400			id_bitsmash |= pair.value;
401	}
402
403	/*
404	 * If the arch, vendor, and implementation ID are all the same across
405	 * all harts, then assume all CPUs are the same, and allow the vDSO to
406	 * answer queries for arbitrary masks. However if all values are 0 (not
407	 * populated) or any value returns -1 (varies across CPUs), then the
408	 * vDSO should defer to the kernel for exotic cpu masks.
409	 */
410	avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
411	return 0;
412}
413
414arch_initcall_sync(init_hwprobe_vdso_data);
415
416#endif /* CONFIG_MMU */
417
418SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
419		size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
420		cpus, unsigned int, flags)
421{
422	return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
423				cpus, flags);
424}
425