1/*-
2 * Copyright (c) 2010 Per Odlund <per.odlund@armagedon.se>
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
14 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
15 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23 * POSSIBILITY OF SUCH DAMAGE.
24 */
25
26/* ARMv6 assembly functions for manipulating caches and other core functions.
27 * Based on cpufuncs for v6 and xscale.
28 */
29
30#include <mach/arm/asm.h>
31#include <arm/asm_help.h>
32#include <arm/arch.h>
33
34#ifdef _ARM_ARCH_6
35
36#define ENTRY_NP ENTRY
37#define _C_LABEL(x) _ ##x
38
39ENTRY(arm_cpu_sleep)
40	mov	r0, #0
41	mcr	p15, 0, r0, c7, c0, 4   /* wait for interrupt */
42	RET
43END(arm_cpu_sleep)
44
45ENTRY(arm_wait)
46	mrc	p15, 0, r0, c2, c0, 0	@ arbitrary read of CP15
47	add	r0, r0, #0		@ a stall
48	bx	lr
49END(arm_wait)
50
51ENTRY(arm_context_switch)
52    /*
53	 * We can assume that the caches will only contain kernel addresses
54	 * at this point.  So no need to flush them again.
55	 */
56	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
57	mcr	p15, 0, r0, c2, c0, 0	/* set the new TTB */
58	mcr	p15, 0, r0, c8, c7, 0	/* and flush the I+D tlbs */
59
60	/* Paranoia -- make sure the pipeline is empty. */
61	nop
62	nop
63	nop
64	RET
65END(arm_context_switch)
66
67ENTRY(arm_tlb_flushID)
68	mcr	p15, 0, r0, c8, c7, 0	@ /* flush I+D tlb */
69	mcr	p15, 0, r0, c7, c10, 4	@ /* drain write buffer */
70	mov	pc, lr
71END(arm_tlb_flushID)
72
73ENTRY(arm_tlb_flushID_RANGE)
74	mcr	p15, 0, r0, c7, c10, 4	@ /* drain write buffer */
75	mov r0, r0, lsr#12
76	mov r1, r1, lsr#12
77	mov r0, r0, lsl#12
78	mov r1, r1, lsl#12
791:	mcr p15, 0, r0, c8, c7, 1 	@ flush I+D tlb single entry
80	add r0, r0, #0x1000 		@ page size
81	cmp r0, r1
82	bcc 1b
83	mov r2, #0
84	mcr p15, 0, r2, c7, c5, 4 	@ BPIALL
85	mcr	p15, 0, r0, c7, c10, 4	@ /* drain write buffer */
86	bx lr
87END(arm_tlb_flushID_RANGE)
88
89ENTRY(arm_tlb_flushID_SE)
90	mcr	p15, 0, r0, c8, c7, 0	@ /* flush I+D tlb */
91	mcr	p15, 0, r0, c7, c10, 4	@ /* drain write buffer */
92	mov	pc, lr
93END(arm_tlb_flushID_SE)
94
95ENTRY(arm_tlb_flushID_ASID)
96#if 0 /* TODO: ARM11 MPCore */
97	mcr	p15, 0, r0, c8, c7, 2	/* flush I+D tlb */
98	mov	r0, #0
99	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
100#endif
101	RET
102END(arm_tlb_flushID_ASID)
103
104ENTRY(arm_setttb)
105	cmp	r1, #0
106	mcrne	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
107	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
108	mcrne	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
109	RET
110END(arm_setttb)
111
112/* Other functions. */
113
114ENTRY_NP(arm_drain_writebuf)
115	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
116	mov	pc, lr
117END(arm_drain_writebuf)
118
119/* LINTSTUB: void arm_icache_sync_range(vaddr_t, vsize_t); */
120ENTRY_NP(arm_icache_sync_range)
121	add	r1, r1, r0
122	sub	r1, r1, #1
123	mcrr	p15, 0, r1, r0, c5	/* invalidate I cache range */
124	mcrr	p15, 0, r1, r0, c12	/* clean D cache range */
125	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
126	RET
127END(arm_icache_sync_range)
128
129/* LINTSTUB: void arm_icache_sync_all(void); */
130ENTRY_NP(arm_icache_sync_all)
131	/*
132	 * We assume that the code here can never be out of sync with the
133	 * dcache, so that we can safely flush the Icache and fall through
134	 * into the Dcache cleaning code.
135	 */
136	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
137	mcr	p15, 0, r0, c7, c10, 0	/* Clean D cache */
138	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
139	RET
140END(arm_icache_sync_all)
141
142/* LINTSTUB: void arm_dcache_wb_range(vaddr_t, vsize_t); */
143ENTRY(arm_dcache_wb_range)
144	add	r1, r1, r0
145	sub	r1, r1, #1
146	mcrr	p15, 0, r1, r0, c12	/* clean D cache range */
147	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
148	RET
149END(arm_dcache_wb_range)
150
151/* LINTSTUB: void arm_dcache_wbinv_range(vaddr_t, vsize_t); */
152ENTRY(arm_dcache_wbinv_range)
153	add	r1, r1, r0
154	sub	r1, r1, #1
155	mcrr	p15, 0, r1, r0, c14	/* clean and invaliate D cache range */
156	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
157	RET
158END(arm_dcache_wbinv_range)
159
160/*
161 * Note, we must not invalidate everything.  If the range is too big we
162 * must use wb-inv of the entire cache.
163 *
164 * LINTSTUB: void arm_dcache_inv_range(vaddr_t, vsize_t);
165 */
166ENTRY(arm_dcache_inv_range)
167	add	r1, r1, r0
168	sub	r1, r1, #1
169	mcrr	p15, 0, r1, r0, c6	/* invaliate D cache range */
170	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
171	RET
172END(arm_dcache_inv_range)
173
174/* LINTSTUB: void arm_idcache_wbinv_range(vaddr_t, vsize_t); */
175ENTRY(arm_idcache_wbinv_range)
176	add	r1, r1, r0
177	sub	r1, r1, #1
178	mcrr	p15, 0, r1, r0, c5	/* invaliate I cache range */
179	mcrr	p15, 0, r1, r0, c14	/* clean & invaliate D cache range */
180	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
181	RET
182END(arm_idcache_wbinv_range)
183
184/* LINTSTUB: void arm_idcache_wbinv_all(void); */
185ENTRY_NP(arm_idcache_wbinv_all)
186	/*
187	 * We assume that the code here can never be out of sync with the
188	 * dcache, so that we can safely flush the Icache and fall through
189	 * into the Dcache purging code.
190	 */
191	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
192	/* Fall through to purge Dcache. */
193
194/* LINTSTUB: void arm_dcache_wbinv_all(void); */
195ENTRY(arm_dcache_wbinv_all)
196	mcr	p15, 0, r0, c7, c14, 0	/* clean & invalidate D cache */
197	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
198	RET
199END(arm_dcache_wbinv_all)
200END(arm_idcache_wbinv_all)
201
202ENTRY(arm_set_context_id)
203	mcr p15, 0, r0, c13, c0, 1
204	mcr p15, 0, r0, c7, c5, 4
205	bx lr
206END(arm_set_context_id)
207
208#endif