1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
6 * This file is based on sample code from ARMv8 ARM.
7 */
8
9#include <asm-offsets.h>
10#include <config.h>
11#include <asm/macro.h>
12#include <asm/system.h>
13#include <linux/linkage.h>
14
15#ifndef CONFIG_CMO_BY_VA_ONLY
16/*
17 * void __asm_dcache_level(level)
18 *
19 * flush or invalidate one level cache.
20 *
21 * x0: cache level
22 * x1: 0 clean & invalidate, 1 invalidate only
23 * x16: FEAT_CCIDX
24 * x2~x9: clobbered
25 */
26.pushsection .text.__asm_dcache_level, "ax"
27ENTRY(__asm_dcache_level)
28	lsl	x12, x0, #1
29	msr	csselr_el1, x12		/* select cache level */
30	isb				/* sync change of cssidr_el1 */
31	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
32	ubfx	x2, x6,  #0,  #3	/* x2 <- log2(cache line size)-4 */
33	cbz	x16, 3f			/* check for FEAT_CCIDX */
34	ubfx	x3, x6,  #3, #21	/* x3 <- number of cache ways - 1 */
35	ubfx	x4, x6, #32, #24	/* x4 <- number of cache sets - 1 */
36	b	4f
373:
38	ubfx	x3, x6,  #3, #10	/* x3 <- number of cache ways - 1 */
39	ubfx	x4, x6, #13, #15	/* x4 <- number of cache sets - 1 */
404:
41	add	x2, x2, #4		/* x2 <- log2(cache line size) */
42	clz	w5, w3			/* bit position of #ways */
43	/* x12 <- cache level << 1 */
44	/* x2 <- line length offset */
45	/* x3 <- number of cache ways - 1 */
46	/* x4 <- number of cache sets - 1 */
47	/* x5 <- bit position of #ways */
48
49loop_set:
50	mov	x6, x3			/* x6 <- working copy of #ways */
51loop_way:
52	lsl	x7, x6, x5
53	orr	x9, x12, x7		/* map way and level to cisw value */
54	lsl	x7, x4, x2
55	orr	x9, x9, x7		/* map set number to cisw value */
56	tbz	w1, #0, 1f
57	dc	isw, x9
58	b	2f
591:	dc	cisw, x9		/* clean & invalidate by set/way */
602:	subs	x6, x6, #1		/* decrement the way */
61	b.ge	loop_way
62	subs	x4, x4, #1		/* decrement the set */
63	b.ge	loop_set
64
65	ret
66ENDPROC(__asm_dcache_level)
67.popsection
68
69/*
70 * void __asm_flush_dcache_all(int invalidate_only)
71 *
72 * x0: 0 clean & invalidate, 1 invalidate only
73 *
74 * flush or invalidate all data cache by SET/WAY.
75 */
76.pushsection .text.__asm_dcache_all, "ax"
77ENTRY(__asm_dcache_all)
78	mov	x1, x0
79	dsb	sy
80	mrs	x10, clidr_el1		/* read clidr_el1 */
81	ubfx	x11, x10, #24, #3	/* x11 <- loc */
82	cbz	x11, finished		/* if loc is 0, exit */
83	mov	x15, lr
84	mrs	x16, s3_0_c0_c7_2	/* read value of id_aa64mmfr2_el1*/
85	ubfx	x16, x16, #20, #4	/* save FEAT_CCIDX identifier in x16 */
86	mov	x0, #0			/* start flush at cache level 0 */
87	/* x0  <- cache level */
88	/* x10 <- clidr_el1 */
89	/* x11 <- loc */
90	/* x15 <- return address */
91
92loop_level:
93	add	x12, x0, x0, lsl #1	/* x12 <- tripled cache level */
94	lsr	x12, x10, x12
95	and	x12, x12, #7		/* x12 <- cache type */
96	cmp	x12, #2
97	b.lt	skip			/* skip if no cache or icache */
98	bl	__asm_dcache_level	/* x1 = 0 flush, 1 invalidate */
99skip:
100	add	x0, x0, #1		/* increment cache level */
101	cmp	x11, x0
102	b.gt	loop_level
103
104	mov	x0, #0
105	msr	csselr_el1, x0		/* restore csselr_el1 */
106	dsb	sy
107	isb
108	mov	lr, x15
109
110finished:
111	ret
112ENDPROC(__asm_dcache_all)
113.popsection
114
115.pushsection .text.__asm_flush_dcache_all, "ax"
116ENTRY(__asm_flush_dcache_all)
117	mov	x0, #0
118	b	__asm_dcache_all
119ENDPROC(__asm_flush_dcache_all)
120.popsection
121
122.pushsection .text.__asm_invalidate_dcache_all, "ax"
123ENTRY(__asm_invalidate_dcache_all)
124	mov	x0, #0x1
125	b	__asm_dcache_all
126ENDPROC(__asm_invalidate_dcache_all)
127.popsection
128
129.pushsection .text.__asm_flush_l3_dcache, "ax"
130WEAK(__asm_flush_l3_dcache)
131	mov	x0, #0			/* return status as success */
132	ret
133ENDPROC(__asm_flush_l3_dcache)
134.popsection
135
136.pushsection .text.__asm_invalidate_l3_icache, "ax"
137WEAK(__asm_invalidate_l3_icache)
138	mov	x0, #0			/* return status as success */
139	ret
140ENDPROC(__asm_invalidate_l3_icache)
141.popsection
142
143#else	/* CONFIG_CMO_BY_VA */
144
145/*
146 * Define these so that they actively clash with in implementation
147 * accidentally selecting CONFIG_CMO_BY_VA
148 */
149
150.pushsection .text.__asm_invalidate_l3_icache, "ax"
151ENTRY(__asm_invalidate_l3_icache)
152	mov	x0, xzr
153	ret
154ENDPROC(__asm_invalidate_l3_icache)
155.popsection
156.pushsection .text.__asm_flush_l3_dcache, "ax"
157ENTRY(__asm_flush_l3_dcache)
158	mov	x0, xzr
159	ret
160ENDPROC(__asm_flush_l3_dcache)
161.popsection
162#endif	/* CONFIG_CMO_BY_VA */
163
164/*
165 * void __asm_flush_dcache_range(start, end)
166 *
167 * clean & invalidate data cache in the range
168 *
169 * x0: start address
170 * x1: end address
171 */
172.pushsection .text.__asm_flush_dcache_range, "ax"
173ENTRY(__asm_flush_dcache_range)
174	mrs	x3, ctr_el0
175	ubfx	x3, x3, #16, #4
176	mov	x2, #4
177	lsl	x2, x2, x3		/* cache line size */
178
179	/* x2 <- minimal cache line size in cache system */
180	sub	x3, x2, #1
181	bic	x0, x0, x3
1821:	dc	civac, x0	/* clean & invalidate data or unified cache */
183	add	x0, x0, x2
184	cmp	x0, x1
185	b.lo	1b
186	dsb	sy
187	ret
188ENDPROC(__asm_flush_dcache_range)
189.popsection
190/*
191 * void __asm_invalidate_dcache_range(start, end)
192 *
193 * invalidate data cache in the range
194 *
195 * x0: start address
196 * x1: end address
197 */
198.pushsection .text.__asm_invalidate_dcache_range, "ax"
199ENTRY(__asm_invalidate_dcache_range)
200	mrs	x3, ctr_el0
201	ubfx	x3, x3, #16, #4
202	mov	x2, #4
203	lsl	x2, x2, x3		/* cache line size */
204
205	/* x2 <- minimal cache line size in cache system */
206	sub	x3, x2, #1
207	bic	x0, x0, x3
2081:	dc	ivac, x0	/* invalidate data or unified cache */
209	add	x0, x0, x2
210	cmp	x0, x1
211	b.lo	1b
212	dsb	sy
213	ret
214ENDPROC(__asm_invalidate_dcache_range)
215.popsection
216
217/*
218 * void __asm_invalidate_icache_all(void)
219 *
220 * invalidate all tlb entries.
221 */
222.pushsection .text.__asm_invalidate_icache_all, "ax"
223ENTRY(__asm_invalidate_icache_all)
224	ic	ialluis
225	isb	sy
226	ret
227ENDPROC(__asm_invalidate_icache_all)
228.popsection
229
230.pushsection .text.__asm_invalidate_l3_dcache, "ax"
231WEAK(__asm_invalidate_l3_dcache)
232	mov	x0, #0			/* return status as success */
233	ret
234ENDPROC(__asm_invalidate_l3_dcache)
235.popsection
236
237/*
238 * void __asm_switch_ttbr(ulong new_ttbr)
239 *
240 * Safely switches to a new page table.
241 */
242.pushsection .text.__asm_switch_ttbr, "ax"
243ENTRY(__asm_switch_ttbr)
244	/* x2 = SCTLR (alive throghout the function) */
245	switch_el x4, 3f, 2f, 1f
2463:	mrs	x2, sctlr_el3
247	b	0f
2482:	mrs	x2, sctlr_el2
249	b	0f
2501:	mrs	x2, sctlr_el1
2510:
252
253	/* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
254	movn	x1, #(CR_M | CR_C | CR_I)
255	and	x1, x2, x1
256	switch_el x4, 3f, 2f, 1f
2573:	msr	sctlr_el3, x1
258	b	0f
2592:	msr	sctlr_el2, x1
260	b	0f
2611:	msr	sctlr_el1, x1
2620:	isb
263
264	/* This call only clobbers x30 (lr) and x9 (unused) */
265	mov	x3, x30
266	bl	__asm_invalidate_tlb_all
267
268	/* From here on we're running safely with caches disabled */
269
270	/* Set TTBR to our first argument */
271	switch_el x4, 3f, 2f, 1f
2723:	msr	ttbr0_el3, x0
273	b	0f
2742:	msr	ttbr0_el2, x0
275	b	0f
2761:	msr	ttbr0_el1, x0
2770:	isb
278
279	/* Restore original SCTLR and thus enable caches again */
280	switch_el x4, 3f, 2f, 1f
2813:	msr	sctlr_el3, x2
282	b	0f
2832:	msr	sctlr_el2, x2
284	b	0f
2851:	msr	sctlr_el1, x2
2860:	isb
287
288	ret	x3
289ENDPROC(__asm_switch_ttbr)
290.popsection
291