1/*-
2 * Copyright (c) 2010 Per Odlund <per.odlund@armagedon.se>
3 * Copyright (C) 2011 MARVELL INTERNATIONAL LTD.
4 * All rights reserved.
5 *
6 * Developed by Semihalf.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of MARVELL nor the names of contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include <machine/asm.h>
34__FBSDID("$FreeBSD$");
35
36#include <machine/sysreg.h>
37
38	.cpu cortex-a8
39
40.Lcoherency_level:
41	.word	_C_LABEL(arm_cache_loc)
42.Lcache_type:
43	.word	_C_LABEL(arm_cache_type)
44.Larmv7_dcache_line_size:
45	.word	_C_LABEL(arm_dcache_min_line_size)
46.Larmv7_icache_line_size:
47	.word	_C_LABEL(arm_icache_min_line_size)
48.Larmv7_idcache_line_size:
49	.word	_C_LABEL(arm_idcache_min_line_size)
50.Lway_mask:
51	.word	0x3ff
52.Lmax_index:
53	.word	0x7fff
54.Lpage_mask:
55	.word	0xfff
56
57#define PT_NOS          (1 << 5)
58#define PT_S 	        (1 << 1)
59#define PT_INNER_NC	0
60#define PT_INNER_WT	(1 << 0)
61#define PT_INNER_WB	((1 << 0) | (1 << 6))
62#define PT_INNER_WBWA	(1 << 6)
63#define PT_OUTER_NC	0
64#define PT_OUTER_WT	(2 << 3)
65#define PT_OUTER_WB	(3 << 3)
66#define PT_OUTER_WBWA	(1 << 3)
67
68#ifdef SMP
69#define PT_ATTR	(PT_S|PT_INNER_WBWA|PT_OUTER_WBWA|PT_NOS)
70#else
71#define PT_ATTR	(PT_INNER_WBWA|PT_OUTER_WBWA)
72#endif
73
74ENTRY(armv7_setttb)
75 	dsb
76	orr 	r0, r0, #PT_ATTR
77 	mcr	CP15_TTBR0(r0)
78	isb
79#ifdef SMP
80 	mcr	CP15_TLBIALLIS
81#else
82 	mcr     CP15_TLBIALL
83#endif
84 	dsb
85 	isb
86	RET
87END(armv7_setttb)
88
89ENTRY(armv7_tlb_flushID)
90	dsb
91#ifdef SMP
92	mcr	CP15_TLBIALLIS
93	mcr	CP15_BPIALLIS
94#else
95	mcr	CP15_TLBIALL
96	mcr	CP15_BPIALL
97#endif
98	dsb
99	isb
100	mov	pc, lr
101END(armv7_tlb_flushID)
102
103ENTRY(armv7_tlb_flushID_SE)
104	ldr	r1, .Lpage_mask
105	bic	r0, r0, r1
106#ifdef SMP
107	mcr	CP15_TLBIMVAAIS(r0)
108	mcr	CP15_BPIALLIS
109#else
110	mcr	CP15_TLBIMVA(r0)
111	mcr	CP15_BPIALL
112#endif
113	dsb
114	isb
115	mov	pc, lr
116END(armv7_tlb_flushID_SE)
117
118/* Based on algorithm from ARM Architecture Reference Manual */
119ENTRY(armv7_dcache_wbinv_all)
120	stmdb	sp!, {r4, r5, r6, r7, r8, r9}
121
122	/* Get cache level */
123	ldr	r0, .Lcoherency_level
124	ldr	r3, [r0]
125	cmp	r3, #0
126	beq	Finished
127	/* For each cache level */
128	mov	r8, #0
129Loop1:
130	/* Get cache type for given level */
131	mov	r2, r8, lsl #2
132	add	r2, r2, r2
133	ldr	r0, .Lcache_type
134	ldr	r1, [r0, r2]
135
136	/* Get line size */
137	and	r2, r1, #7
138	add	r2, r2, #4
139
140	/* Get number of ways */
141	ldr	r4, .Lway_mask
142	ands	r4, r4, r1, lsr #3
143	clz	r5, r4
144
145	/* Get max index */
146	ldr	r7, .Lmax_index
147	ands	r7, r7, r1, lsr #13
148Loop2:
149	mov	r9, r4
150Loop3:
151	mov	r6, r8, lsl #1
152	orr	r6, r6, r9, lsl r5
153	orr	r6, r6, r7, lsl r2
154
155	/* Clean and invalidate data cache by way/index */
156	mcr	CP15_DCCISW(r6)
157	subs	r9, r9, #1
158	bge	Loop3
159	subs	r7, r7, #1
160	bge	Loop2
161Skip:
162	add	r8, r8, #1
163	cmp	r3, r8
164	bne Loop1
165Finished:
166	dsb
167	ldmia	sp!, {r4, r5, r6, r7, r8, r9}
168	RET
169END(armv7_dcache_wbinv_all)
170
171ENTRY(armv7_idcache_wbinv_all)
172	stmdb	sp!, {lr}
173	bl armv7_dcache_wbinv_all
174#ifdef SMP
175	mcr	CP15_ICIALLUIS
176#else
177	mcr	CP15_ICIALLU
178#endif
179	dsb
180	isb
181	ldmia	sp!, {lr}
182	RET
183END(armv7_idcache_wbinv_all)
184
185ENTRY(armv7_dcache_wb_range)
186	ldr	ip, .Larmv7_dcache_line_size
187	ldr	ip, [ip]
188	sub	r3, ip, #1
189	and	r2, r0, r3
190	add	r1, r1, r2
191	bic	r0, r0, r3
192.Larmv7_wb_next:
193	mcr	CP15_DCCMVAC(r0)
194	add	r0, r0, ip
195	subs	r1, r1, ip
196	bhi	.Larmv7_wb_next
197	dsb				/* data synchronization barrier */
198	RET
199END(armv7_dcache_wb_range)
200
201ENTRY(armv7_dcache_wbinv_range)
202	ldr     ip, .Larmv7_dcache_line_size
203	ldr     ip, [ip]
204	sub     r3, ip, #1
205	and     r2, r0, r3
206	add     r1, r1, r2
207	bic     r0, r0, r3
208.Larmv7_wbinv_next:
209	mcr	CP15_DCCIMVAC(r0)
210	add	r0, r0, ip
211	subs	r1, r1, ip
212	bhi	.Larmv7_wbinv_next
213	dsb				/* data synchronization barrier */
214	RET
215END(armv7_dcache_wbinv_range)
216
217/*
218 * Note, we must not invalidate everything.  If the range is too big we
219 * must use wb-inv of the entire cache.
220 */
221ENTRY(armv7_dcache_inv_range)
222	ldr     ip, .Larmv7_dcache_line_size
223	ldr     ip, [ip]
224	sub     r3, ip, #1
225	and     r2, r0, r3
226	add     r1, r1, r2
227	bic     r0, r0, r3
228.Larmv7_inv_next:
229	mcr	CP15_DCIMVAC(r0)
230	add	r0, r0, ip
231	subs	r1, r1, ip
232	bhi	.Larmv7_inv_next
233	dsb				/* data synchronization barrier */
234	RET
235END(armv7_dcache_inv_range)
236
237ENTRY(armv7_idcache_wbinv_range)
238	ldr     ip, .Larmv7_idcache_line_size
239	ldr     ip, [ip]
240	sub     r3, ip, #1
241	and     r2, r0, r3
242	add     r1, r1, r2
243	bic     r0, r0, r3
244.Larmv7_id_wbinv_next:
245	mcr	CP15_ICIMVAU(r0)
246	mcr	CP15_DCCIMVAC(r0)
247	add	r0, r0, ip
248	subs	r1, r1, ip
249	bhi	.Larmv7_id_wbinv_next
250	dsb				/* data synchronization barrier */
251	isb				/* instruction synchronization barrier */
252	RET
253END(armv7_idcache_wbinv_range)
254
255ENTRY_NP(armv7_icache_sync_all)
256#ifdef SMP
257	mcr	CP15_ICIALLUIS
258#else
259	mcr	CP15_ICIALLU
260#endif
261	dsb				/* data synchronization barrier */
262	isb				/* instruction synchronization barrier */
263	RET
264END(armv7_icache_sync_all)
265
266ENTRY_NP(armv7_icache_sync_range)
267	ldr	ip, .Larmv7_icache_line_size
268	ldr	ip, [ip]
269	sub	r3, ip, #1		/* Address need not be aligned, but */
270	and	r2, r0, r3		/* round length up if op spans line */
271	add	r1, r1, r2		/* boundary: len += addr & linemask; */
272.Larmv7_sync_next:
273	mcr	CP15_DCCMVAC(r0)
274	mcr	CP15_ICIMVAU(r0)
275	add	r0, r0, ip
276	subs	r1, r1, ip
277	bhi	.Larmv7_sync_next
278	dsb				/* data synchronization barrier */
279	isb				/* instruction synchronization barrier */
280	RET
281END(armv7_icache_sync_range)
282
283ENTRY(armv7_cpu_sleep)
284	dsb				/* data synchronization barrier */
285	wfi  				/* wait for interrupt */
286	RET
287END(armv7_cpu_sleep)
288
289ENTRY(armv7_context_switch)
290	dsb
291	orr     r0, r0, #PT_ATTR
292
293	mcr	CP15_TTBR0(r0)
294	isb
295#ifdef SMP
296	mcr	CP15_TLBIALLIS
297#else
298	mcr	CP15_TLBIALL
299#endif
300	dsb
301	isb
302	RET
303END(armv7_context_switch)
304
305ENTRY(armv7_drain_writebuf)
306	dsb
307	RET
308END(armv7_drain_writebuf)
309
310ENTRY(armv7_sev)
311	dsb
312	sev
313	nop
314	RET
315END(armv7_sev)
316
317ENTRY(armv7_auxctrl)
318	mrc	CP15_ACTLR(r2)
319	bic r3, r2, r0	/* Clear bits */
320	eor r3, r3, r1  /* XOR bits */
321
322	teq r2, r3
323	mcrne	CP15_ACTLR(r3)
324	mov r0, r2
325	RET
326END(armv7_auxctrl)
327
328/*
329 * Invalidate all I+D+branch cache.  Used by startup code, which counts
330 * on the fact that only r0-r3,ip are modified and no stack space is used.
331 */
332ENTRY(armv7_idcache_inv_all)
333	mov     r0, #0
334	mcr	CP15_CSSELR(r0)		@ set cache level to L1
335	mrc	CP15_CCSIDR(r0)
336
337	ubfx    r2, r0, #13, #15        @ get num sets - 1 from CCSIDR
338	ubfx    r3, r0, #3, #10         @ get numways - 1 from CCSIDR
339	clz     r1, r3                  @ number of bits to MSB of way
340	lsl     r3, r3, r1              @ shift into position
341	mov     ip, #1                  @
342	lsl     ip, ip, r1              @ ip now contains the way decr
343
344	ubfx    r0, r0, #0, #3          @ get linesize from CCSIDR
345	add     r0, r0, #4              @ apply bias
346	lsl     r2, r2, r0              @ shift sets by log2(linesize)
347	add     r3, r3, r2              @ merge numsets - 1 with numways - 1
348	sub     ip, ip, r2              @ subtract numsets - 1 from way decr
349	mov     r1, #1
350	lsl     r1, r1, r0              @ r1 now contains the set decr
351	mov     r2, ip                  @ r2 now contains set way decr
352
353	/* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */
3541:	mcr	CP15_DCISW(r3)		@ invalidate line
355	movs    r0, r3                  @ get current way/set
356	beq     2f                      @ at 0 means we are done.
357	movs    r0, r0, lsl #10         @ clear way bits leaving only set bits
358	subne   r3, r3, r1              @ non-zero?, decrement set #
359	subeq   r3, r3, r2              @ zero?, decrement way # and restore set count
360	b       1b
361
3622:	dsb                             @ wait for stores to finish
363	mov     r0, #0                  @ and ...
364	mcr	CP15_ICIALLU		@ invalidate instruction+branch cache
365	isb                             @ instruction sync barrier
366	bx      lr                      @ return
367END(armv7_idcache_inv_all)
368
369ENTRY_NP(armv7_sleep)
370	dsb
371	wfi
372	bx	lr
373END(armv7_sleep)
374
375