• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/arm/mm/
1/*
2 *  linux/arch/arm/mm/cache-v6.S
3 *
4 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *  This is the "shell" of the ARMv6 processor support.
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <asm/assembler.h>
15#include <asm/unwind.h>
16
17#include "proc-macros.S"
18
19#define HARVARD_CACHE
20#define CACHE_LINE_SIZE		32
21#define D_CACHE_LINE_SIZE	32
22#define BTB_FLUSH_SIZE		8
23
24#ifdef CONFIG_ARM_ERRATA_411920
25ENTRY(v6_icache_inval_all)
26	mov	r0, #0
27	mrs	r1, cpsr
28	cpsid	ifa				@ disable interrupts
29	mcr	p15, 0, r0, c7, c5, 0		@ invalidate entire I-cache
30	mcr	p15, 0, r0, c7, c5, 0		@ invalidate entire I-cache
31	mcr	p15, 0, r0, c7, c5, 0		@ invalidate entire I-cache
32	mcr	p15, 0, r0, c7, c5, 0		@ invalidate entire I-cache
33	msr	cpsr_cx, r1			@ restore interrupts
34	.rept	11				@ ARM Ltd recommends at least
35	nop					@ 11 NOPs
36	.endr
37	mov	pc, lr
38#endif
39
40/*
41 *	v6_flush_cache_all()
42 *
43 *	Flush the entire cache.
44 *
45 *	It is assumed that:
46 */
47ENTRY(v6_flush_kern_cache_all)
48	mov	r0, #0
49#ifdef HARVARD_CACHE
50	mcr	p15, 0, r0, c7, c14, 0		@ D cache clean+invalidate
51#ifndef CONFIG_ARM_ERRATA_411920
52	mcr	p15, 0, r0, c7, c5, 0		@ I+BTB cache invalidate
53#else
54	b	v6_icache_inval_all
55#endif
56#else
57	mcr	p15, 0, r0, c7, c15, 0		@ Cache clean+invalidate
58#endif
59	mov	pc, lr
60
61/*
62 *	v6_flush_cache_all()
63 *
64 *	Flush all TLB entries in a particular address space
65 *
66 *	- mm    - mm_struct describing address space
67 */
68ENTRY(v6_flush_user_cache_all)
69	/*FALLTHROUGH*/
70
71/*
72 *	v6_flush_cache_range(start, end, flags)
73 *
74 *	Flush a range of TLB entries in the specified address space.
75 *
76 *	- start - start address (may not be aligned)
77 *	- end   - end address (exclusive, may not be aligned)
78 *	- flags	- vm_area_struct flags describing address space
79 *
80 *	It is assumed that:
81 *	- we have a VIPT cache.
82 */
83ENTRY(v6_flush_user_cache_range)
84	mov	pc, lr
85
86/*
87 *	v6_coherent_kern_range(start,end)
88 *
89 *	Ensure that the I and D caches are coherent within specified
90 *	region.  This is typically used when code has been written to
91 *	a memory region, and will be executed.
92 *
93 *	- start   - virtual start address of region
94 *	- end     - virtual end address of region
95 *
96 *	It is assumed that:
97 *	- the Icache does not read data from the write buffer
98 */
99ENTRY(v6_coherent_kern_range)
100	/* FALLTHROUGH */
101
102/*
103 *	v6_coherent_user_range(start,end)
104 *
105 *	Ensure that the I and D caches are coherent within specified
106 *	region.  This is typically used when code has been written to
107 *	a memory region, and will be executed.
108 *
109 *	- start   - virtual start address of region
110 *	- end     - virtual end address of region
111 *
112 *	It is assumed that:
113 *	- the Icache does not read data from the write buffer
114 */
115ENTRY(v6_coherent_user_range)
116 UNWIND(.fnstart		)
117#ifdef HARVARD_CACHE
118	bic	r0, r0, #CACHE_LINE_SIZE - 1
1191:
120 USER(	mcr	p15, 0, r0, c7, c10, 1	)	@ clean D line
121	add	r0, r0, #CACHE_LINE_SIZE
1222:
123	cmp	r0, r1
124	blo	1b
125#endif
126	mov	r0, #0
127#ifdef HARVARD_CACHE
128	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
129#ifndef CONFIG_ARM_ERRATA_411920
130	mcr	p15, 0, r0, c7, c5, 0		@ I+BTB cache invalidate
131#else
132	b	v6_icache_inval_all
133#endif
134#else
135	mcr	p15, 0, r0, c7, c5, 6		@ invalidate BTB
136#endif
137	mov	pc, lr
138
139/*
140 * Fault handling for the cache operation above. If the virtual address in r0
141 * isn't mapped, just try the next page.
142 */
1439001:
144	mov	r0, r0, lsr #12
145	mov	r0, r0, lsl #12
146	add	r0, r0, #4096
147	b	2b
148 UNWIND(.fnend		)
149ENDPROC(v6_coherent_user_range)
150ENDPROC(v6_coherent_kern_range)
151
152/*
153 *	v6_flush_kern_dcache_area(void *addr, size_t size)
154 *
155 *	Ensure that the data held in the page kaddr is written back
156 *	to the page in question.
157 *
158 *	- addr	- kernel address
159 *	- size	- region size
160 */
161ENTRY(v6_flush_kern_dcache_area)
162	add	r1, r0, r1
1631:
164#ifdef HARVARD_CACHE
165	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
166#else
167	mcr	p15, 0, r0, c7, c15, 1		@ clean & invalidate unified line
168#endif
169	add	r0, r0, #D_CACHE_LINE_SIZE
170	cmp	r0, r1
171	blo	1b
172#ifdef HARVARD_CACHE
173	mov	r0, #0
174	mcr	p15, 0, r0, c7, c10, 4
175#endif
176	mov	pc, lr
177
178
179/*
180 *	v6_dma_inv_range(start,end)
181 *
182 *	Invalidate the data cache within the specified region; we will
183 *	be performing a DMA operation in this region and we want to
184 *	purge old data in the cache.
185 *
186 *	- start   - virtual start address of region
187 *	- end     - virtual end address of region
188 */
189v6_dma_inv_range:
190#ifdef CONFIG_DMA_CACHE_RWFO
191	ldrb	r2, [r0]			@ read for ownership
192	strb	r2, [r0]			@ write for ownership
193#endif
194	tst	r0, #D_CACHE_LINE_SIZE - 1
195	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
196#ifdef HARVARD_CACHE
197	mcrne	p15, 0, r0, c7, c10, 1		@ clean D line
198#else
199	mcrne	p15, 0, r0, c7, c11, 1		@ clean unified line
200#endif
201	tst	r1, #D_CACHE_LINE_SIZE - 1
202#ifdef CONFIG_DMA_CACHE_RWFO
203	ldrneb	r2, [r1, #-1]			@ read for ownership
204	strneb	r2, [r1, #-1]			@ write for ownership
205#endif
206	bic	r1, r1, #D_CACHE_LINE_SIZE - 1
207#ifdef HARVARD_CACHE
208	mcrne	p15, 0, r1, c7, c14, 1		@ clean & invalidate D line
209#else
210	mcrne	p15, 0, r1, c7, c15, 1		@ clean & invalidate unified line
211#endif
2121:
213#ifdef HARVARD_CACHE
214	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D line
215#else
216	mcr	p15, 0, r0, c7, c7, 1		@ invalidate unified line
217#endif
218	add	r0, r0, #D_CACHE_LINE_SIZE
219	cmp	r0, r1
220#ifdef CONFIG_DMA_CACHE_RWFO
221	ldrlo	r2, [r0]			@ read for ownership
222	strlo	r2, [r0]			@ write for ownership
223#endif
224	blo	1b
225	mov	r0, #0
226	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
227	mov	pc, lr
228
229/*
230 *	v6_dma_clean_range(start,end)
231 *	- start   - virtual start address of region
232 *	- end     - virtual end address of region
233 */
234v6_dma_clean_range:
235	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
2361:
237#ifdef CONFIG_DMA_CACHE_RWFO
238	ldr	r2, [r0]			@ read for ownership
239#endif
240#ifdef HARVARD_CACHE
241	mcr	p15, 0, r0, c7, c10, 1		@ clean D line
242#else
243	mcr	p15, 0, r0, c7, c11, 1		@ clean unified line
244#endif
245	add	r0, r0, #D_CACHE_LINE_SIZE
246	cmp	r0, r1
247	blo	1b
248	mov	r0, #0
249	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
250	mov	pc, lr
251
252/*
253 *	v6_dma_flush_range(start,end)
254 *	- start   - virtual start address of region
255 *	- end     - virtual end address of region
256 */
257ENTRY(v6_dma_flush_range)
258#ifdef CONFIG_DMA_CACHE_RWFO
259	ldrb	r2, [r0]		@ read for ownership
260	strb	r2, [r0]		@ write for ownership
261#endif
262	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
2631:
264#ifdef HARVARD_CACHE
265	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
266#else
267	mcr	p15, 0, r0, c7, c15, 1		@ clean & invalidate line
268#endif
269	add	r0, r0, #D_CACHE_LINE_SIZE
270	cmp	r0, r1
271#ifdef CONFIG_DMA_CACHE_RWFO
272	ldrlob	r2, [r0]			@ read for ownership
273	strlob	r2, [r0]			@ write for ownership
274#endif
275	blo	1b
276	mov	r0, #0
277	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
278	mov	pc, lr
279
280/*
281 *	dma_map_area(start, size, dir)
282 *	- start	- kernel virtual start address
283 *	- size	- size of region
284 *	- dir	- DMA direction
285 */
286ENTRY(v6_dma_map_area)
287	add	r1, r1, r0
288	teq	r2, #DMA_FROM_DEVICE
289	beq	v6_dma_inv_range
290#ifndef CONFIG_DMA_CACHE_RWFO
291	b	v6_dma_clean_range
292#else
293	teq	r2, #DMA_TO_DEVICE
294	beq	v6_dma_clean_range
295	b	v6_dma_flush_range
296#endif
297ENDPROC(v6_dma_map_area)
298
299/*
300 *	dma_unmap_area(start, size, dir)
301 *	- start	- kernel virtual start address
302 *	- size	- size of region
303 *	- dir	- DMA direction
304 */
305ENTRY(v6_dma_unmap_area)
306#ifndef CONFIG_DMA_CACHE_RWFO
307	add	r1, r1, r0
308	teq	r2, #DMA_TO_DEVICE
309	bne	v6_dma_inv_range
310#endif
311	mov	pc, lr
312ENDPROC(v6_dma_unmap_area)
313
314	__INITDATA
315
316	.type	v6_cache_fns, #object
317ENTRY(v6_cache_fns)
318	.long	v6_flush_kern_cache_all
319	.long	v6_flush_user_cache_all
320	.long	v6_flush_user_cache_range
321	.long	v6_coherent_kern_range
322	.long	v6_coherent_user_range
323	.long	v6_flush_kern_dcache_area
324	.long	v6_dma_map_area
325	.long	v6_dma_unmap_area
326	.long	v6_dma_flush_range
327	.size	v6_cache_fns, . - v6_cache_fns
328