1/*-
2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/sys/arm/arm/cpuinfo.c 331988 2018-04-04 06:11:05Z mmel $");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/pcpu.h>
35#include <sys/smp.h>
36#include <sys/sysctl.h>
37
38#include <machine/cpu.h>
39#include <machine/cpuinfo.h>
40#include <machine/elf.h>
41#include <machine/md_var.h>
42
43#if __ARM_ARCH >= 6
44void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
45
46int disable_bp_hardening;
47int spectre_v2_safe = 1;
48#endif
49
50struct cpuinfo cpuinfo =
51{
52	/* Use safe defaults for start */
53	.dcache_line_size = 32,
54	.dcache_line_mask = 31,
55	.icache_line_size = 32,
56	.icache_line_mask = 31,
57};
58
59static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD, 0,
60    "CPU");
61static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD, 0,
62    "CPU quirks");
63
64/*
65 * Tunable CPU quirks.
66 * Be careful, ACTRL cannot be changed if CPU is started in secure
67 * mode(world) and write to ACTRL can cause exception!
68 * These quirks are intended for optimizing CPU performance, not for
69 * applying errata workarounds. Nobody can expect that CPU with unfixed
70 * errata is stable enough to execute the kernel until quirks are applied.
71 */
72static uint32_t cpu_quirks_actlr_mask;
73SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
74    CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
75    "Bits to be masked in ACTLR");
76
77static uint32_t cpu_quirks_actlr_set;
78SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
79    CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
80    "Bits to be set in ACTLR");
81
82
83/* Read and parse CPU id scheme */
84void
85cpuinfo_init(void)
86{
87#if __ARM_ARCH >= 6
88	uint32_t tmp;
89#endif
90
91	/*
92	 * Prematurely fetch CPU quirks. Standard fetch for tunable
93	 * sysctls is handled using SYSINIT, thus too late for boot CPU.
94	 * Keep names in sync with sysctls.
95	 */
96	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
97	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
98
99	cpuinfo.midr = cp15_midr_get();
100	/* Test old version id schemes first */
101	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
102		if (CPU_ID_ISOLD(cpuinfo.midr)) {
103			/* obsolete ARMv2 or ARMv3 CPU */
104			cpuinfo.midr = 0;
105			return;
106		}
107		if (CPU_ID_IS7(cpuinfo.midr)) {
108			if ((cpuinfo.midr & (1 << 23)) == 0) {
109				/* obsolete ARMv3 CPU */
110				cpuinfo.midr = 0;
111				return;
112			}
113			/* ARMv4T CPU */
114			cpuinfo.architecture = 1;
115			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
116		} else {
117			/* ARM new id scheme */
118			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
119			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
120		}
121	} else {
122		/* non ARM -> must be new id scheme */
123		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
124		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
125	}
126	/* Parse rest of MIDR  */
127	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
128	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
129	cpuinfo.patch = cpuinfo.midr & 0x0F;
130
131	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
132	cpuinfo.ctr = cp15_ctr_get();
133	cpuinfo.tcmtr = cp15_tcmtr_get();
134#if __ARM_ARCH >= 6
135	cpuinfo.tlbtr = cp15_tlbtr_get();
136	cpuinfo.mpidr = cp15_mpidr_get();
137	cpuinfo.revidr = cp15_revidr_get();
138#endif
139
140	/* if CPU is not v7 cpu id scheme */
141	if (cpuinfo.architecture != 0xF)
142		return;
143#if __ARM_ARCH >= 6
144	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
145	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
146	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
147	cpuinfo.id_afr0 = cp15_id_afr0_get();
148	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
149	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
150	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
151	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
152	cpuinfo.id_isar0 = cp15_id_isar0_get();
153	cpuinfo.id_isar1 = cp15_id_isar1_get();
154	cpuinfo.id_isar2 = cp15_id_isar2_get();
155	cpuinfo.id_isar3 = cp15_id_isar3_get();
156	cpuinfo.id_isar4 = cp15_id_isar4_get();
157	cpuinfo.id_isar5 = cp15_id_isar5_get();
158
159/* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
160	cpuinfo.cbar = cp15_cbar_get();
161*/
162	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
163		cpuinfo.ccsidr = cp15_ccsidr_get();
164		cpuinfo.clidr = cp15_clidr_get();
165	}
166
167	/* Test if revidr is implemented */
168	if (cpuinfo.revidr == cpuinfo.midr)
169		cpuinfo.revidr = 0;
170
171	/* parsed bits of above registers */
172	/* id_mmfr0 */
173	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
174	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
175	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
176	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
177	/* id_mmfr2 */
178	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
179	/* id_mmfr3 */
180	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
181	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
182	/* id_pfr1 */
183	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
184	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
185	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
186
187	/* L1 Cache sizes */
188	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
189		cpuinfo.dcache_line_size =
190		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
191		cpuinfo.icache_line_size =
192		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
193	} else {
194		cpuinfo.dcache_line_size =
195		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
196		cpuinfo.icache_line_size =
197		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
198	}
199	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
200	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
201
202	/* Fill AT_HWCAP bits. */
203	elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */
204	elf_hwcap |= HWCAP_TLS | HWCAP_EDSP;	   /* Required for v6+ CPUs */
205
206	tmp = (cpuinfo.id_isar0 >> 24) & 0xF;	/* Divide_instrs */
207	if (tmp >= 1)
208		elf_hwcap |= HWCAP_IDIVT;
209	if (tmp >= 2)
210		elf_hwcap |= HWCAP_IDIVA;
211
212	tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; 	/* State1  */
213	if (tmp >= 1)
214		elf_hwcap |= HWCAP_THUMB;
215
216	tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; 	/* State3  */
217	if (tmp >= 1)
218		elf_hwcap |= HWCAP_THUMBEE;
219
220	tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; 	/* VMSA */
221	if (tmp >= 5)
222		elf_hwcap |= HWCAP_LPAE;
223
224	/* Fill AT_HWCAP2 bits. */
225	tmp = (cpuinfo.id_isar5 >> 4) & 0xF;	/* AES */
226	if (tmp >= 1)
227		elf_hwcap2 |= HWCAP2_AES;
228	if (tmp >= 2)
229		elf_hwcap2 |= HWCAP2_PMULL;
230
231	tmp = (cpuinfo.id_isar5 >> 8) & 0xF;	/* SHA1 */
232	if (tmp >= 1)
233		elf_hwcap2 |= HWCAP2_SHA1;
234
235	tmp = (cpuinfo.id_isar5 >> 12) & 0xF;	/* SHA2 */
236	if (tmp >= 1)
237		elf_hwcap2 |= HWCAP2_SHA2;
238
239	tmp = (cpuinfo.id_isar5 >> 16) & 0xF;	/* CRC32 */
240	if (tmp >= 1)
241		elf_hwcap2 |= HWCAP2_CRC32;
242#endif
243}
244
245#if __ARM_ARCH >= 6
246/*
247 * Get bits that must be set or cleared in ACLR register.
248 * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
249 * Its expected that SCU is in operational state before this
250 * function is called.
251 */
252static void
253cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
254{
255
256	*actlr_mask = 0;
257	*actlr_set = 0;
258
259	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
260		switch (cpuinfo.part_number) {
261		case CPU_ARCH_CORTEX_A75:
262		case CPU_ARCH_CORTEX_A73:
263		case CPU_ARCH_CORTEX_A72:
264		case CPU_ARCH_CORTEX_A57:
265		case CPU_ARCH_CORTEX_A53:
266			/* Nothing to do for AArch32 */
267			break;
268		case CPU_ARCH_CORTEX_A17:
269		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
270			/*
271			 * Enable SMP mode
272			 */
273			*actlr_mask = (1 << 6);
274			*actlr_set = (1 << 6);
275			break;
276		case CPU_ARCH_CORTEX_A15:
277			/*
278			 * Enable snoop-delayed exclusive handling
279			 * Enable SMP mode
280			 */
281			*actlr_mask = (1U << 31) |(1 << 6);
282			*actlr_set = (1U << 31) |(1 << 6);
283			break;
284		case CPU_ARCH_CORTEX_A9:
285			/*
286			 * Disable exclusive L1/L2 cache control
287			 * Enable SMP mode
288			 * Enable Cache and TLB maintenance broadcast
289			 */
290			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
291			*actlr_set = (1 << 6) | (1 << 0);
292			break;
293		case CPU_ARCH_CORTEX_A8:
294			/*
295			 * Enable L2 cache
296			 * Enable L1 data cache hardware alias checks
297			 */
298			*actlr_mask = (1 << 1) | (1 << 0);
299			*actlr_set = (1 << 1);
300			break;
301		case CPU_ARCH_CORTEX_A7:
302			/*
303			 * Enable SMP mode
304			 */
305			*actlr_mask = (1 << 6);
306			*actlr_set = (1 << 6);
307			break;
308		case CPU_ARCH_CORTEX_A5:
309			/*
310			 * Disable exclusive L1/L2 cache control
311			 * Enable SMP mode
312			 * Enable Cache and TLB maintenance broadcast
313			 */
314			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
315			*actlr_set = (1 << 6) | (1 << 0);
316			break;
317		case CPU_ARCH_ARM1176:
318			/*
319			 * Restrict cache size to 16KB
320			 * Enable the return stack
321			 * Enable dynamic branch prediction
322			 * Enable static branch prediction
323			 */
324			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
325			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
326			break;
327		}
328		return;
329	}
330}
331
332/* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
333void
334cpuinfo_reinit_mmu(uint32_t ttb)
335{
336	uint32_t actlr_mask;
337	uint32_t actlr_set;
338
339	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
340	actlr_mask |= cpu_quirks_actlr_mask;
341	actlr_set |= cpu_quirks_actlr_set;
342	reinit_mmu(ttb, actlr_mask, actlr_set);
343}
344
345static bool
346modify_actlr(uint32_t clear, uint32_t set)
347{
348	uint32_t reg, newreg;
349
350	reg = cp15_actlr_get();
351	newreg = reg;
352	newreg &= ~clear;
353	newreg |= set;
354	if (reg == newreg)
355		return (true);
356	cp15_actlr_set(newreg);
357
358	reg = cp15_actlr_get();
359	if (reg == newreg)
360		return (true);
361	return (false);
362}
363
364/* Apply/restore BP hardening on current core. */
365static int
366apply_bp_hardening(bool enable, int kind, bool actrl, uint32_t set_mask)
367{
368	if (enable) {
369		if (actrl && !modify_actlr(0, set_mask))
370			return (-1);
371		PCPU_SET(bp_harden_kind, kind);
372	} else {
373		PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
374		if (actrl)
375			modify_actlr(~0, PCPU_GET(original_actlr));
376		spectre_v2_safe = 0;
377	}
378	return (0);
379}
380
381static void
382handle_bp_hardening(bool enable)
383{
384	int kind;
385	char *kind_str;
386
387	kind = PCPU_BP_HARDEN_KIND_NONE;
388	/*
389	 * Note: Access to ACTRL is locked to secure world on most boards.
390	 * This means that full BP hardening depends on updated u-boot/firmware
391	 * or is impossible at all (if secure monitor is in on-chip ROM).
392	 */
393	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
394		switch (cpuinfo.part_number) {
395		case CPU_ARCH_CORTEX_A8:
396			/*
397			 * For Cortex-A8, IBE bit must be set otherwise
398			 * BPIALL is effectively NOP.
399			 * Unfortunately, Cortex-A is also affected by
400			 * ARM erratum 687067 which causes non-working
401			 * BPIALL if IBE bit is set and 'Instruction L1 System
402			 * Array Debug Register 0' is not 0.
403			 * This register is not reset on power-up and is
404			 * accessible only from secure world, so we cannot do
405			 * nothing (nor detect) to fix this issue.
406			 * I afraid that on chip ROM based secure monitor on
407			 * AM335x (BeagleBone) doesn't reset this debug
408			 * register.
409			 */
410			kind = PCPU_BP_HARDEN_KIND_BPIALL;
411			if (apply_bp_hardening(enable, kind, true, 1 << 6) != 0)
412				goto actlr_err;
413			break;
414		break;
415
416		case CPU_ARCH_CORTEX_A9:
417		case CPU_ARCH_CORTEX_A12:
418		case CPU_ARCH_CORTEX_A17:
419		case CPU_ARCH_CORTEX_A57:
420		case CPU_ARCH_CORTEX_A72:
421		case CPU_ARCH_CORTEX_A73:
422		case CPU_ARCH_CORTEX_A75:
423			kind = PCPU_BP_HARDEN_KIND_BPIALL;
424			if (apply_bp_hardening(enable, kind, false, 0) != 0)
425				goto actlr_err;
426			break;
427
428		case CPU_ARCH_CORTEX_A15:
429			/*
430			 * For Cortex-A15, set 'Enable invalidates of BTB' bit.
431			 * Despite this, the BPIALL is still effectively NOP,
432			 * but with this bit set, the ICIALLU also flushes
433			 * branch predictor as side effect.
434			 */
435			kind = PCPU_BP_HARDEN_KIND_ICIALLU;
436			if (apply_bp_hardening(enable, kind, true, 1 << 0) != 0)
437				goto actlr_err;
438			break;
439
440		default:
441			break;
442		}
443	} else if (cpuinfo.implementer == CPU_IMPLEMENTER_QCOM) {
444		printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative "
445		    "branch attacks. !!!\n"
446		    "Qualcomm Krait cores are known (or believed) to be "
447		    "vulnerable to \n"
448		    "speculative branch attacks, no mitigation exists yet.\n",
449		    PCPU_GET(cpuid));
450		goto unkonown_mitigation;
451	}  else {
452		goto unkonown_mitigation;
453	}
454
455	if (bootverbose) {
456		switch (kind) {
457		case PCPU_BP_HARDEN_KIND_NONE:
458			kind_str = "not necessary";
459			break;
460		case PCPU_BP_HARDEN_KIND_BPIALL:
461			kind_str = "BPIALL";
462			break;
463		case PCPU_BP_HARDEN_KIND_ICIALLU:
464			kind_str = "ICIALLU";
465			break;
466		default:
467			panic("Unknown BP hardering kind (%d).", kind);
468		}
469		printf("CPU(%d) applied BP hardening: %s\n", PCPU_GET(cpuid),
470		    kind_str);
471	}
472
473	return;
474
475unkonown_mitigation:
476	PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
477	spectre_v2_safe = 0;
478	return;
479
480actlr_err:
481	PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
482	spectre_v2_safe = 0;
483	printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative branch "
484	    "attacks. !!!\n"
485	    "We cannot enable required bit(s) in ACTRL register\n"
486	    "because it's locked by secure monitor and/or firmware.\n",
487	    PCPU_GET(cpuid));
488}
489
490void
491cpuinfo_init_bp_hardening(void)
492{
493
494	/*
495	 * Store original unmodified ACTRL, so we can restore it when
496	 * BP hardening is disabled by sysctl.
497	 */
498	PCPU_SET(original_actlr, cp15_actlr_get());
499	handle_bp_hardening(true);
500}
501
502static void
503bp_hardening_action(void *arg)
504{
505
506	handle_bp_hardening(disable_bp_hardening == 0);
507}
508
509static int
510sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS)
511{
512	int rv;
513
514	rv = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
515
516	if (!rv && req->newptr) {
517		spectre_v2_safe = 1;
518		dmb();
519#ifdef SMP
520		smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier,
521		bp_hardening_action, NULL, NULL);
522#else
523		bp_hardening_action(NULL);
524#endif
525	}
526
527	return (rv);
528}
529
530SYSCTL_PROC(_machdep, OID_AUTO, disable_bp_hardening,
531    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
532    &disable_bp_hardening, 0, sysctl_disable_bp_hardening, "I",
533    "Disable BP hardening mitigation.");
534
535SYSCTL_INT(_machdep, OID_AUTO, spectre_v2_safe, CTLFLAG_RD,
536    &spectre_v2_safe, 0, "System is safe to Spectre Version 2 attacks");
537
538#endif /* __ARM_ARCH >= 6 */
539