1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2021 Heiko Stuebner <heiko@sntech.de>
4 */
5
6#include <linux/bug.h>
7#include <linux/kernel.h>
8#include <linux/memory.h>
9#include <linux/module.h>
10#include <linux/string.h>
11#include <linux/uaccess.h>
12#include <asm/alternative.h>
13#include <asm/cacheflush.h>
14#include <asm/cpufeature.h>
15#include <asm/dma-noncoherent.h>
16#include <asm/errata_list.h>
17#include <asm/hwprobe.h>
18#include <asm/io.h>
19#include <asm/patch.h>
20#include <asm/vendorid_list.h>
21
22#define CSR_TH_SXSTATUS		0x5c0
23#define SXSTATUS_MAEE		_AC(0x200000, UL)
24
25static bool errata_probe_mae(unsigned int stage,
26			     unsigned long arch_id, unsigned long impid)
27{
28	if (!IS_ENABLED(CONFIG_ERRATA_THEAD_MAE))
29		return false;
30
31	if (arch_id != 0 || impid != 0)
32		return false;
33
34	if (stage != RISCV_ALTERNATIVES_EARLY_BOOT &&
35	    stage != RISCV_ALTERNATIVES_MODULE)
36		return false;
37
38	if (!(csr_read(CSR_TH_SXSTATUS) & SXSTATUS_MAEE))
39		return false;
40
41	return true;
42}
43
44/*
45 * th.dcache.ipa rs1 (invalidate, physical address)
46 * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
47 *   0000001    01010      rs1       000      00000  0001011
48 * th.dcache.iva rs1 (invalidate, virtual address)
49 *   0000001    00110      rs1       000      00000  0001011
50 *
51 * th.dcache.cpa rs1 (clean, physical address)
52 * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
53 *   0000001    01001      rs1       000      00000  0001011
54 * th.dcache.cva rs1 (clean, virtual address)
55 *   0000001    00101      rs1       000      00000  0001011
56 *
57 * th.dcache.cipa rs1 (clean then invalidate, physical address)
58 * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
59 *   0000001    01011      rs1       000      00000  0001011
60 * th.dcache.civa rs1 (clean then invalidate, virtual address)
61 *   0000001    00111      rs1       000      00000  0001011
62 *
63 * th.sync.s (make sure all cache operations finished)
64 * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
65 *   0000000    11001     00000      000      00000  0001011
66 */
67#define THEAD_INVAL_A0	".long 0x02a5000b"
68#define THEAD_CLEAN_A0	".long 0x0295000b"
69#define THEAD_FLUSH_A0	".long 0x02b5000b"
70#define THEAD_SYNC_S	".long 0x0190000b"
71
72#define THEAD_CMO_OP(_op, _start, _size, _cachesize)			\
73asm volatile("mv a0, %1\n\t"						\
74	     "j 2f\n\t"							\
75	     "3:\n\t"							\
76	     THEAD_##_op##_A0 "\n\t"					\
77	     "add a0, a0, %0\n\t"					\
78	     "2:\n\t"							\
79	     "bltu a0, %2, 3b\n\t"					\
80	     THEAD_SYNC_S						\
81	     : : "r"(_cachesize),					\
82		 "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)),	\
83		 "r"((unsigned long)(_start) + (_size))			\
84	     : "a0")
85
86static void thead_errata_cache_inv(phys_addr_t paddr, size_t size)
87{
88	THEAD_CMO_OP(INVAL, paddr, size, riscv_cbom_block_size);
89}
90
91static void thead_errata_cache_wback(phys_addr_t paddr, size_t size)
92{
93	THEAD_CMO_OP(CLEAN, paddr, size, riscv_cbom_block_size);
94}
95
96static void thead_errata_cache_wback_inv(phys_addr_t paddr, size_t size)
97{
98	THEAD_CMO_OP(FLUSH, paddr, size, riscv_cbom_block_size);
99}
100
101static const struct riscv_nonstd_cache_ops thead_errata_cmo_ops = {
102	.wback = &thead_errata_cache_wback,
103	.inv = &thead_errata_cache_inv,
104	.wback_inv = &thead_errata_cache_wback_inv,
105};
106
107static bool errata_probe_cmo(unsigned int stage,
108			     unsigned long arch_id, unsigned long impid)
109{
110	if (!IS_ENABLED(CONFIG_ERRATA_THEAD_CMO))
111		return false;
112
113	if (arch_id != 0 || impid != 0)
114		return false;
115
116	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
117		return false;
118
119	if (stage == RISCV_ALTERNATIVES_BOOT) {
120		riscv_cbom_block_size = L1_CACHE_BYTES;
121		riscv_noncoherent_supported();
122		riscv_noncoherent_register_cache_ops(&thead_errata_cmo_ops);
123	}
124
125	return true;
126}
127
128static bool errata_probe_pmu(unsigned int stage,
129			     unsigned long arch_id, unsigned long impid)
130{
131	if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU))
132		return false;
133
134	/* target-c9xx cores report arch_id and impid as 0 */
135	if (arch_id != 0 || impid != 0)
136		return false;
137
138	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
139		return false;
140
141	return true;
142}
143
144static u32 thead_errata_probe(unsigned int stage,
145			      unsigned long archid, unsigned long impid)
146{
147	u32 cpu_req_errata = 0;
148
149	if (errata_probe_mae(stage, archid, impid))
150		cpu_req_errata |= BIT(ERRATA_THEAD_MAE);
151
152	errata_probe_cmo(stage, archid, impid);
153
154	if (errata_probe_pmu(stage, archid, impid))
155		cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
156
157	return cpu_req_errata;
158}
159
160void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
161			     unsigned long archid, unsigned long impid,
162			     unsigned int stage)
163{
164	struct alt_entry *alt;
165	u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
166	u32 tmp;
167	void *oldptr, *altptr;
168
169	for (alt = begin; alt < end; alt++) {
170		if (alt->vendor_id != THEAD_VENDOR_ID)
171			continue;
172		if (alt->patch_id >= ERRATA_THEAD_NUMBER)
173			continue;
174
175		tmp = (1U << alt->patch_id);
176		if (cpu_req_errata & tmp) {
177			oldptr = ALT_OLD_PTR(alt);
178			altptr = ALT_ALT_PTR(alt);
179
180			/* On vm-alternatives, the mmu isn't running yet */
181			if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) {
182				memcpy(oldptr, altptr, alt->alt_len);
183			} else {
184				mutex_lock(&text_mutex);
185				patch_text_nosync(oldptr, altptr, alt->alt_len);
186				mutex_unlock(&text_mutex);
187			}
188		}
189	}
190
191	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
192		local_flush_icache_all();
193}
194