1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 *  Cache-handling routined for MIPS CPUs
4 *
5 *  Copyright (c) 2003	Wolfgang Denk <wd@denx.de>
6 */
7
8#include <asm-offsets.h>
9#include <config.h>
10#include <asm/asm.h>
11#include <asm/regdef.h>
12#include <asm/mipsregs.h>
13#include <asm/addrspace.h>
14#include <asm/cacheops.h>
15#include <asm/cm.h>
16
17	.macro	f_fill64 dst, offset, val
18	LONG_S	\val, (\offset +  0 * LONGSIZE)(\dst)
19	LONG_S	\val, (\offset +  1 * LONGSIZE)(\dst)
20	LONG_S	\val, (\offset +  2 * LONGSIZE)(\dst)
21	LONG_S	\val, (\offset +  3 * LONGSIZE)(\dst)
22	LONG_S	\val, (\offset +  4 * LONGSIZE)(\dst)
23	LONG_S	\val, (\offset +  5 * LONGSIZE)(\dst)
24	LONG_S	\val, (\offset +  6 * LONGSIZE)(\dst)
25	LONG_S	\val, (\offset +  7 * LONGSIZE)(\dst)
26#if LONGSIZE == 4
27	LONG_S	\val, (\offset +  8 * LONGSIZE)(\dst)
28	LONG_S	\val, (\offset +  9 * LONGSIZE)(\dst)
29	LONG_S	\val, (\offset + 10 * LONGSIZE)(\dst)
30	LONG_S	\val, (\offset + 11 * LONGSIZE)(\dst)
31	LONG_S	\val, (\offset + 12 * LONGSIZE)(\dst)
32	LONG_S	\val, (\offset + 13 * LONGSIZE)(\dst)
33	LONG_S	\val, (\offset + 14 * LONGSIZE)(\dst)
34	LONG_S	\val, (\offset + 15 * LONGSIZE)(\dst)
35#endif
36	.endm
37
38	.macro cache_loop	curr, end, line_sz, op
3910:	cache		\op, 0(\curr)
40	PTR_ADDU	\curr, \curr, \line_sz
41	bne		\curr, \end, 10b
42	.endm
43
44	.macro	l1_info		sz, line_sz, off
45	.set	push
46	.set	noat
47
48	mfc0	$1, CP0_CONFIG, 1
49
50	/* detect line size */
51	srl	\line_sz, $1, \off + MIPS_CONF1_DL_SHF - MIPS_CONF1_DA_SHF
52	andi	\line_sz, \line_sz, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHF)
53	move	\sz, zero
54	beqz	\line_sz, 10f
55	li	\sz, 2
56	sllv	\line_sz, \sz, \line_sz
57
58	/* detect associativity */
59	srl	\sz, $1, \off + MIPS_CONF1_DA_SHF - MIPS_CONF1_DA_SHF
60	andi	\sz, \sz, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHF)
61	addiu	\sz, \sz, 1
62
63	/* sz *= line_sz */
64	mul	\sz, \sz, \line_sz
65
66	/* detect log32(sets) */
67	srl	$1, $1, \off + MIPS_CONF1_DS_SHF - MIPS_CONF1_DA_SHF
68	andi	$1, $1, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHF)
69	addiu	$1, $1, 1
70	andi	$1, $1, 0x7
71
72	/* sz <<= log32(sets) */
73	sllv	\sz, \sz, $1
74
75	/* sz *= 32 */
76	li	$1, 32
77	mul	\sz, \sz, $1
7810:
79	.set	pop
80	.endm
81
82	/*
83	 * The changing of Kernel mode cacheability must be done from KSEG1.
84	 * If the code is executing from KSEG0, jump to KSEG1 during the execution
85	 * of change_k0_cca. change_k0_cca itself clears all hazards when returning.
86	 */
87	.macro change_k0_cca_kseg1 mode
88	PTR_LA	t0, change_k0_cca
89	li	t1, CPHYSADDR(~0)
90	and	t0, t0, t1
91	PTR_LI	t1, CKSEG1
92	or	t0, t0, t1
93	li	a0, \mode
94	jalr	t0
95	.endm
96
97/*
98 * mips_cache_reset - low level initialisation of the primary caches
99 *
100 * This routine initialises the primary caches to ensure that they have good
101 * parity.  It must be called by the ROM before any cached locations are used
102 * to prevent the possibility of data with bad parity being written to memory.
103 *
104 * To initialise the instruction cache it is essential that a source of data
105 * with good parity is available. This routine will initialise an area of
106 * memory starting at location zero to be used as a source of parity.
107 *
108 * Note that this function does not follow the standard calling convention &
109 * may clobber typically callee-saved registers.
110 *
111 * RETURNS: N/A
112 *
113 */
114#define R_RETURN	s0
115#define R_IC_SIZE	s1
116#define R_IC_LINE	s2
117#define R_DC_SIZE	s3
118#define R_DC_LINE	s4
119#define R_L2_SIZE	s5
120#define R_L2_LINE	s6
121#define R_L2_BYPASSED	s7
122#define R_L2_L2C	t8
123LEAF(mips_cache_reset)
124	move	R_RETURN, ra
125
126#ifdef CONFIG_MIPS_L2_CACHE
127	/*
128	 * For there to be an L2 present, Config2 must be present. If it isn't
129	 * then we proceed knowing there's no L2 cache.
130	 */
131	move	R_L2_SIZE, zero
132	move	R_L2_LINE, zero
133	move	R_L2_BYPASSED, zero
134	move	R_L2_L2C, zero
135	mfc0	t0, CP0_CONFIG, 1
136	bgez	t0, l2_probe_done
137
138	/*
139	 * From MIPSr6 onwards the L2 cache configuration might not be reported
140	 * by Config2. The Config5.L2C bit indicates whether this is the case,
141	 * and if it is then we need knowledge of where else to look. For cores
142	 * from Imagination Technologies this is a CM GCR.
143	 */
144# if __mips_isa_rev >= 6
145	/* Check that Config5 exists */
146	mfc0	t0, CP0_CONFIG, 2
147	bgez	t0, l2_probe_cop0
148	mfc0	t0, CP0_CONFIG, 3
149	bgez	t0, l2_probe_cop0
150	mfc0	t0, CP0_CONFIG, 4
151	bgez	t0, l2_probe_cop0
152
153	/* Check Config5.L2C is set */
154	mfc0	t0, CP0_CONFIG, 5
155	and	R_L2_L2C, t0, MIPS_CONF5_L2C
156	beqz	R_L2_L2C, l2_probe_cop0
157
158	/* Config5.L2C is set */
159#  ifdef CONFIG_MIPS_CM
160	/* The CM will provide L2 configuration */
161	PTR_LI	t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
162	lw	t1, GCR_L2_CONFIG(t0)
163	bgez	t1, l2_probe_done
164
165	ext	R_L2_LINE, t1, \
166		GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS
167	beqz	R_L2_LINE, l2_probe_done
168	li	t2, 2
169	sllv	R_L2_LINE, t2, R_L2_LINE
170
171	ext	t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS
172	addiu	t2, t2, 1
173	mul	R_L2_SIZE, R_L2_LINE, t2
174
175	ext	t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS
176	sllv	R_L2_SIZE, R_L2_SIZE, t2
177	li	t2, 64
178	mul	R_L2_SIZE, R_L2_SIZE, t2
179
180	/* Bypass the L2 cache so that we can init the L1s early */
181	or	t1, t1, GCR_L2_CONFIG_BYPASS
182	sw	t1, GCR_L2_CONFIG(t0)
183	sync
184	li	R_L2_BYPASSED, 1
185
186	/* Zero the L2 tag registers */
187	sw	zero, GCR_L2_TAG_ADDR(t0)
188	sw	zero, GCR_L2_TAG_ADDR_UPPER(t0)
189	sw	zero, GCR_L2_TAG_STATE(t0)
190	sw	zero, GCR_L2_TAG_STATE_UPPER(t0)
191	sw	zero, GCR_L2_DATA(t0)
192	sw	zero, GCR_L2_DATA_UPPER(t0)
193	sync
194#  else
195	/* We don't know how to retrieve L2 configuration on this system */
196#  endif
197	b	l2_probe_done
198# endif
199
200	/*
201	 * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2
202	 * cache configuration from the cop0 Config2 register.
203	 */
204l2_probe_cop0:
205	mfc0	t0, CP0_CONFIG, 2
206
207	srl	R_L2_LINE, t0, MIPS_CONF2_SL_SHF
208	andi	R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF
209	beqz	R_L2_LINE, l2_probe_done
210	li	t1, 2
211	sllv	R_L2_LINE, t1, R_L2_LINE
212
213	srl	t1, t0, MIPS_CONF2_SA_SHF
214	andi	t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF
215	addiu	t1, t1, 1
216	mul	R_L2_SIZE, R_L2_LINE, t1
217
218	srl	t1, t0, MIPS_CONF2_SS_SHF
219	andi	t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF
220	sllv	R_L2_SIZE, R_L2_SIZE, t1
221	li	t1, 64
222	mul	R_L2_SIZE, R_L2_SIZE, t1
223
224	/* Attempt to bypass the L2 so that we can init the L1s early */
225	or	t0, t0, MIPS_CONF2_L2B
226	mtc0	t0, CP0_CONFIG, 2
227	ehb
228	mfc0	t0, CP0_CONFIG, 2
229	and	R_L2_BYPASSED, t0, MIPS_CONF2_L2B
230
231	/* Zero the L2 tag registers */
232	mtc0	zero, CP0_TAGLO, 4
233	ehb
234l2_probe_done:
235#endif
236
237#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
238	li	R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE
239	li	R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE
240#else
241	l1_info	R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF
242#endif
243
244#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
245	li	R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE
246	li	R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE
247#else
248	l1_info	R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF
249#endif
250
251#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
252
253	/* Determine the largest L1 cache size */
254#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
255#if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE
256	li	v0, CONFIG_SYS_ICACHE_SIZE
257#else
258	li	v0, CONFIG_SYS_DCACHE_SIZE
259#endif
260#else
261	move	v0, R_IC_SIZE
262	sltu	t1, R_IC_SIZE, R_DC_SIZE
263	movn	v0, R_DC_SIZE, t1
264#endif
265	/*
266	 * Now clear that much memory starting from zero.
267	 */
268	PTR_LI		a0, CKSEG1ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
269	PTR_ADDU	a1, a0, v0
2702:	PTR_ADDIU	a0, 64
271	f_fill64	a0, -64, zero
272	bne		a0, a1, 2b
273
274#endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */
275
276#ifdef CONFIG_MIPS_L2_CACHE
277	/*
278	 * If the L2 is bypassed, init the L1 first so that we can execute the
279	 * rest of the cache initialisation using the L1 instruction cache.
280	 */
281	bnez		R_L2_BYPASSED, l1_init
282
283l2_init:
284	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
285	PTR_ADDU	t1, t0, R_L2_SIZE
2861:	cache		INDEX_STORE_TAG_SD, 0(t0)
287	PTR_ADDU	t0, t0, R_L2_LINE
288	bne		t0, t1, 1b
289
290	/*
291	 * If the L2 was bypassed then we already initialised the L1s before
292	 * the L2, so we are now done.
293	 */
294	bnez		R_L2_BYPASSED, l2_unbypass
295#endif
296
297	/*
298	 * The TagLo registers used depend upon the CPU implementation, but the
299	 * architecture requires that it is safe for software to write to both
300	 * TagLo selects 0 & 2 covering supported cases.
301	 */
302l1_init:
303	mtc0		zero, CP0_TAGLO
304	mtc0		zero, CP0_TAGLO, 2
305	ehb
306
307	/*
308	 * The caches are probably in an indeterminate state, so we force good
309	 * parity into them by doing an invalidate for each line. If
310	 * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to
311	 * perform a load/fill & a further invalidate for each line, assuming
312	 * that the bottom of RAM (having just been cleared) will generate good
313	 * parity for the cache.
314	 */
315
316	/*
317	 * Initialize the I-cache first,
318	 */
319	blez		R_IC_SIZE, 1f
320	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
321	PTR_ADDU	t1, t0, R_IC_SIZE
322	/* clear tag to invalidate */
323	cache_loop	t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
324#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
325	/* fill once, so data field parity is correct */
326	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
327	cache_loop	t0, t1, R_IC_LINE, FILL
328	/* invalidate again - prudent but not strictly neccessary */
329	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
330	cache_loop	t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
331#endif
332	sync
333
334	/*
335	 * Enable use of the I-cache by setting Config.K0.
336	 */
337	change_k0_cca_kseg1 CONF_CM_CACHABLE_NONCOHERENT
338
339	/*
340	 * then initialize D-cache.
341	 */
3421:	blez		R_DC_SIZE, 3f
343	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
344	PTR_ADDU	t1, t0, R_DC_SIZE
345	/* clear all tags */
346	cache_loop	t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
347#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
348	/* load from each line (in cached space) */
349	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
3502:	LONG_L		zero, 0(t0)
351	PTR_ADDU	t0, R_DC_LINE
352	bne		t0, t1, 2b
353	/* clear all tags */
354	PTR_LI		t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
355	cache_loop	t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
356#endif
3573:
358
359#ifdef CONFIG_MIPS_L2_CACHE
360	/* If the L2 isn't bypassed then we're done */
361	beqz		R_L2_BYPASSED, return
362
363	/* The L2 is bypassed - go initialise it */
364	b		l2_init
365
366l2_unbypass:
367# if __mips_isa_rev >= 6
368	beqz		R_L2_L2C, 1f
369
370	li		t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
371	lw		t1, GCR_L2_CONFIG(t0)
372	xor		t1, t1, GCR_L2_CONFIG_BYPASS
373	sw		t1, GCR_L2_CONFIG(t0)
374	sync
375	ehb
376	b		2f
377# endif
3781:	mfc0		t0, CP0_CONFIG, 2
379	xor		t0, t0, MIPS_CONF2_L2B
380	mtc0		t0, CP0_CONFIG, 2
381	ehb
382
3832:
384# ifdef CONFIG_MIPS_CM
385	/* Config3 must exist for a CM to be present */
386	mfc0		t0, CP0_CONFIG, 1
387	bgez		t0, 2f
388	mfc0		t0, CP0_CONFIG, 2
389	bgez		t0, 2f
390
391	/* Check Config3.CMGCR to determine CM presence */
392	mfc0		t0, CP0_CONFIG, 3
393	and		t0, t0, MIPS_CONF3_CMGCR
394	beqz		t0, 2f
395
396	/* Change Config.K0 to a coherent CCA */
397	change_k0_cca_kseg1 CONF_CM_CACHABLE_COW
398
399	/*
400	 * Join the coherent domain such that the caches of this core are kept
401	 * coherent with those of other cores.
402	 */
403	PTR_LI		t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
404	lw		t1, GCR_REV(t0)
405	li		t2, GCR_REV_CM3
406	li		t3, GCR_Cx_COHERENCE_EN
407	bge		t1, t2, 1f
408	li		t3, GCR_Cx_COHERENCE_DOM_EN
4091:	sw		t3, GCR_Cx_COHERENCE(t0)
410	ehb
4112:
412# endif
413#endif
414
415return:
416	/* Ensure all cache operations complete before returning */
417	sync
418	jr	R_RETURN
419	END(mips_cache_reset)
420
421LEAF(mips_cache_disable)
422	move	R_RETURN, ra
423	change_k0_cca_kseg1 CONF_CM_UNCACHED
424	jr	R_RETURN
425	END(mips_cache_disable)
426
427LEAF(change_k0_cca)
428	mfc0		t0, CP0_CONFIG
429#if __mips_isa_rev >= 2
430	ins		t0, a0, 0, 3
431#else
432	xor		a0, a0, t0
433	andi		a0, a0, CONF_CM_CMASK
434	xor		t0, a0, t0
435#endif
436	mtc0		t0, CP0_CONFIG
437
438	jr.hb		ra
439	END(change_k0_cca)
440