1/*	$NetBSD: cpufunc_asm_sa1.S,v 1.16 2022/10/20 06:58:38 skrll Exp $	*/
2
3/*
4 * Copyright (c) 1997,1998 Mark Brinicombe.
5 * Copyright (c) 1997 Causality Limited
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by Causality Limited.
19 * 4. The name of Causality Limited may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * SA-1 assembly functions for CPU / MMU / TLB specific operations
36 */
37
38#include "assym.h"
39#include <arm/asm.h>
40#include <arm/locore.h>
41
42.Lblock_userspace_access:
43	.word	_C_LABEL(block_userspace_access)
44
45/*
46 * Functions to set the MMU Translation Table Base register
47 *
48 * We need to clean and flush the cache as it uses virtual
49 * addresses that are about to change.
50 */
51ENTRY(sa1_setttb)
52#ifdef CACHE_CLEAN_BLOCK_INTR
53	mrs	r3, cpsr
54	orr	r2, r3, #(I32_bit | F32_bit)
55	msr	cpsr_all, r2
56#else
57	ldr	r3, .Lblock_userspace_access
58	ldr	r2, [r3]
59	orr	ip, r2, #1
60	str	ip, [r3]
61#endif
62	cmp	r1, #0
63	beq	1f
64	stmfd	sp!, {r0-r3, lr}
65	bl	_C_LABEL(sa1_cache_cleanID)
66	ldmfd	sp!, {r0-r3, lr}
67	mcr	p15, 0, r0, c7, c5, 0	/* invalidate I$ and BTB */
68	mcr	p15, 0, r0, c7, c10, 4	/* drain write and fill buffer */
69	cmp	r0, #1
70
711:	/* Write the TTB */
72	mcr	p15, 0, r0, c2, c0, 0
73
74	/* If we have updated the TTB we must flush the TLB */
75	mcrne	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLB */
76
77	/* The cleanID above means we only need to flush the I cache here */
78	mcrne	p15, 0, r0, c7, c5, 0	/* invalidate I$ and BTB */
79
80	/* Make sure that pipeline is emptied */
81	mov	r0, r0
82	mov	r0, r0
83#ifdef CACHE_CLEAN_BLOCK_INTR
84	msr	cpsr_all, r3
85#else
86	str	r2, [r3]
87#endif
88	RET
89END(sa1_setttb)
90
91/*
92 * TLB functions
93 */
94ENTRY(sa1_tlb_flushID_SE)
95	mcr	p15, 0, r0, c8, c6, 1	/* flush D tlb single entry */
96	mcr	p15, 0, r0, c8, c5, 0	/* flush I tlb */
97#if PAGE_SIZE == 2 * L2_S_SIZE
98	add	r0, r0, #L2_S_SIZE
99	mcr	p15, 0, r0, c8, c6, 1	/* flush D tlb single entry */
100	mcr	p15, 0, r0, c8, c5, 0	/* flush I tlb */
101#endif
102	mov	pc, lr
103END(sa1_tlb_flushID_SE)
104
105/*
106 * Cache functions
107 */
108ENTRY(sa1_cache_flushID)
109	mcr	p15, 0, r0, c7, c7, 0	/* flush I+D cache */
110	mov	pc, lr
111END(sa1_cache_flushID)
112
113ENTRY(sa1_cache_flushI)
114	mcr	p15, 0, r0, c7, c5, 0	/* flush I cache */
115	mov	pc, lr
116END(sa1_cache_flushI)
117
118ENTRY(sa1_cache_flushD)
119	mcr	p15, 0, r0, c7, c6, 0	/* flush D cache */
120	mov	pc, lr
121END(sa1_cache_flushD)
122
123ENTRY(sa1_cache_flushD_SE)
124	mcr	p15, 0, r0, c7, c6, 1	/* flush D cache single entry */
125	mov	pc, lr
126END(sa1_cache_flushD_SE)
127
128ENTRY(sa1_cache_cleanD_E)
129	mcr	p15, 0, r0, c7, c10, 1	/* clean D cache entry */
130	mov	pc, lr
131END(sa1_cache_cleanD_E)
132
133/*
134 * Information for the SA-1 cache clean/purge functions:
135 *
136 *	* Virtual address of the memory region to use
137 *	* Size of memory region
138 */
139	.data
140
141	.global	_C_LABEL(sa1_cache_clean_addr)
142_C_LABEL(sa1_cache_clean_addr):
143	.word	0xf0000000
144
145	.global	_C_LABEL(sa1_cache_clean_size)
146_C_LABEL(sa1_cache_clean_size):
147#if defined(CPU_SA1100) || defined(CPU_SA1110)
148	.word	0x00004000
149#else
150	.word	0x00008000
151#endif
152
153	.text
154
155.Lsa1_cache_clean_addr:
156	.word	_C_LABEL(sa1_cache_clean_addr)
157.Lsa1_cache_clean_size:
158	.word	_C_LABEL(sa1_cache_clean_size)
159
160#ifdef CACHE_CLEAN_BLOCK_INTR
161#define	SA1_CACHE_CLEAN_BLOCK						\
162	mrs	r3, cpsr					;	\
163	orr	r0, r3, #(I32_bit | F32_bit)			;	\
164	msr	cpsr_all, r0
165
166#define	SA1_CACHE_CLEAN_UNBLOCK						\
167	msr	cpsr_all, r3
168#else
169#define	SA1_CACHE_CLEAN_BLOCK						\
170	ldr	r3, .Lblock_userspace_access			;	\
171	ldr	ip, [r3]					;	\
172	orr	r0, ip, #1					;	\
173	str	r0, [r3]
174
175#define	SA1_CACHE_CLEAN_UNBLOCK						\
176	str	ip, [r3]
177#endif /* CACHE_CLEAN_BLOCK_INTR */
178
179#ifdef DOUBLE_CACHE_CLEAN_BANK
180#define	SA1_DOUBLE_CACHE_CLEAN_BANK					\
181	eor	r0, r0, r1					;	\
182	str	r0, [r2]
183#else
184#define	SA1_DOUBLE_CACHE_CLEAN_BANK	/* nothing */
185#endif /* DOUBLE_CACHE_CLEAN_BANK */
186
187#define	SA1_CACHE_CLEAN_PROLOGUE					\
188	SA1_CACHE_CLEAN_BLOCK					;	\
189	ldr	r2, .Lsa1_cache_clean_addr			;	\
190	ldmia	r2, {r0, r1}					;	\
191	SA1_DOUBLE_CACHE_CLEAN_BANK
192
193#define	SA1_CACHE_CLEAN_EPILOGUE					\
194	SA1_CACHE_CLEAN_UNBLOCK
195
196ENTRY_NP(sa1_cache_syncI)
197ENTRY_NP(sa1_cache_purgeID)
198	mcr	p15, 0, r0, c7, c5, 0	/* flush I cache (D cleaned below) */
199ENTRY_NP(sa1_cache_cleanID)
200ENTRY_NP(sa1_cache_purgeD)
201ENTRY(sa1_cache_cleanD)
202	SA1_CACHE_CLEAN_PROLOGUE
203
2041:	ldr	r2, [r0], #32
205	subs	r1, r1, #32
206	bne	1b
207
208	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
209
210	SA1_CACHE_CLEAN_EPILOGUE
211	mov	pc, lr
212END(sa1_cache_cleanD)
213
214ENTRY(sa1_cache_purgeID_E)
215	mcr	p15, 0, r0, c7, c10, 1	/* clean dcache entry */
216	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
217	mcr	p15, 0, r0, c7, c5, 0	/* flush I cache */
218	mcr	p15, 0, r0, c7, c6, 1	/* flush D cache single entry */
219	mov	pc, lr
220END(sa1_cache_purgeID_E)
221
222ENTRY(sa1_cache_purgeD_E)
223	mcr	p15, 0, r0, c7, c10, 1	/* clean dcache entry */
224	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
225	mcr	p15, 0, r0, c7, c6, 1	/* flush D cache single entry */
226	mov	pc, lr
227END(sa1_cache_purgeD_E)
228
229/*
230 * Soft functions
231 */
232/* sa1_cache_syncI is identical to sa1_cache_purgeID */
233
234ENTRY(sa1_cache_cleanID_rng)
235ENTRY(sa1_cache_cleanD_rng)
236	cmp	r1, #0x4000
237	bcs	_C_LABEL(sa1_cache_cleanID)
238
239	and	r2, r0, #0x1f
240	add	r1, r1, r2
241	bic	r0, r0, #0x1f
242
2431:	mcr	p15, 0, r0, c7, c10, 1	/* clean D cache entry */
244	add	r0, r0, #32
245	subs	r1, r1, #32
246	bhi	1b
247
248	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
249	mov	pc, lr
250END(sa1_cache_cleanD_rng)
251END(sa1_cache_cleanID_rng)
252
253ENTRY(sa1_cache_purgeID_rng)
254	cmp	r1, #0x4000
255	bcs	_C_LABEL(sa1_cache_purgeID)
256
257	and	r2, r0, #0x1f
258	add	r1, r1, r2
259	bic	r0, r0, #0x1f
260
2611:	mcr	p15, 0, r0, c7, c10, 1	/* clean D cache entry */
262	mcr	p15, 0, r0, c7, c6, 1	/* flush D cache single entry */
263	add	r0, r0, #32
264	subs	r1, r1, #32
265	bhi	1b
266
267	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
268	mcr	p15, 0, r0, c7, c5, 0	/* flush I cache */
269	mov	pc, lr
270END(sa1_cache_purgeID_rng)
271
272ENTRY(sa1_cache_purgeD_rng)
273	cmp	r1, #0x4000
274	bcs	_C_LABEL(sa1_cache_purgeD)
275
276	and	r2, r0, #0x1f
277	add	r1, r1, r2
278	bic	r0, r0, #0x1f
279
2801:	mcr	p15, 0, r0, c7, c10, 1	/* clean D cache entry */
281	mcr	p15, 0, r0, c7, c6, 1	/* flush D cache single entry */
282	add	r0, r0, #32
283	subs	r1, r1, #32
284	bhi	1b
285
286	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
287	mov	pc, lr
288END(sa1_cache_purgeD_rng)
289
290ENTRY(sa1_cache_syncI_rng)
291	cmp	r1, #0x4000
292	bcs	_C_LABEL(sa1_cache_syncI)
293
294	and	r2, r0, #0x1f
295	add	r1, r1, r2
296	bic	r0, r0, #0x1f
297
2981:	mcr	p15, 0, r0, c7, c10, 1	/* clean D cache entry */
299	add	r0, r0, #32
300	subs	r1, r1, #32
301	bhi	1b
302
303	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
304	mcr	p15, 0, r0, c7, c5, 0	/* flush I cache */
305
306	mov	pc, lr
307END(sa1_cache_syncI_rng)
308
309/*
310 * Context switch.
311 *
312 * These are the CPU-specific parts of the context switcher cpu_switch()
313 * These functions actually perform the TTB reload.
314 */
315#if defined(CPU_SA110)
316ENTRY(sa110_context_switch)
317	/*
318	 * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
319	 * Thus the data cache will contain only kernel data and the
320	 * instruction cache will contain only kernel code, and all
321	 * kernel mappings are shared by all processes.
322	 */
323
324	/* Write the TTB */
325	mcr	p15, 0, r0, c2, c0, 0
326
327	/* If we have updated the TTB we must flush the TLB */
328	mcr	p15, 0, r0, c8, c7, 0	/* flush the I+D tlb */
329
330	/* Make sure that pipeline is emptied */
331	mov	r0, r0
332	mov	r0, r0
333	mov	pc, lr
334END(sa110_context_switch)
335#endif
336