• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/arm/include/asm/
1/*
2 *  arch/arm/include/asm/cacheflush.h
3 *
4 *  Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
13#include <linux/mm.h>
14
15#include <asm/glue.h>
16#include <asm/shmparam.h>
17#include <asm/cachetype.h>
18#include <asm/outercache.h>
19
20#define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21
22/*
23 *	Cache Model
24 *	===========
25 */
26#undef _CACHE
27#undef MULTI_CACHE
28
29#if defined(CONFIG_CPU_CACHE_V3)
30# ifdef _CACHE
31#  define MULTI_CACHE 1
32# else
33#  define _CACHE v3
34# endif
35#endif
36
37#if defined(CONFIG_CPU_CACHE_V4)
38# ifdef _CACHE
39#  define MULTI_CACHE 1
40# else
41#  define _CACHE v4
42# endif
43#endif
44
45#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
46	defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
47	defined(CONFIG_CPU_ARM1026)
48# define MULTI_CACHE 1
49#endif
50
51#if defined(CONFIG_CPU_FA526)
52# ifdef _CACHE
53#  define MULTI_CACHE 1
54# else
55#  define _CACHE fa
56# endif
57#endif
58
59#if defined(CONFIG_CPU_ARM926T)
60# ifdef _CACHE
61#  define MULTI_CACHE 1
62# else
63#  define _CACHE arm926
64# endif
65#endif
66
67#if defined(CONFIG_CPU_ARM940T)
68# ifdef _CACHE
69#  define MULTI_CACHE 1
70# else
71#  define _CACHE arm940
72# endif
73#endif
74
75#if defined(CONFIG_CPU_ARM946E)
76# ifdef _CACHE
77#  define MULTI_CACHE 1
78# else
79#  define _CACHE arm946
80# endif
81#endif
82
83#if defined(CONFIG_CPU_CACHE_V4WB)
84# ifdef _CACHE
85#  define MULTI_CACHE 1
86# else
87#  define _CACHE v4wb
88# endif
89#endif
90
91#if defined(CONFIG_CPU_XSCALE)
92# ifdef _CACHE
93#  define MULTI_CACHE 1
94# else
95#  define _CACHE xscale
96# endif
97#endif
98
99#if defined(CONFIG_CPU_XSC3)
100# ifdef _CACHE
101#  define MULTI_CACHE 1
102# else
103#  define _CACHE xsc3
104# endif
105#endif
106
107#if defined(CONFIG_CPU_MOHAWK)
108# ifdef _CACHE
109#  define MULTI_CACHE 1
110# else
111#  define _CACHE mohawk
112# endif
113#endif
114
115#if defined(CONFIG_CPU_FEROCEON)
116# define MULTI_CACHE 1
117#endif
118
119#if defined(CONFIG_CPU_V6)
120//# ifdef _CACHE
121#  define MULTI_CACHE 1
122//# else
123//#  define _CACHE v6
124//# endif
125#endif
126
127#if defined(CONFIG_CPU_V7)
128//# ifdef _CACHE
129#  define MULTI_CACHE 1
130//# else
131//#  define _CACHE v7
132//# endif
133#endif
134
135#if !defined(_CACHE) && !defined(MULTI_CACHE)
136#error Unknown cache maintainence model
137#endif
138
139#ifdef CONFIG_BCM47XX
140/*
141 * Merged from Linux-2.6.37
142 * This flag is used to indicate that the page pointed to by a pte is clean
143 * and does not require cleaning before returning it to the user.
144 */
145#define PG_dcache_clean PG_arch_1
146#else
147/*
148 * This flag is used to indicate that the page pointed to by a pte
149 * is dirty and requires cleaning before returning it to the user.
150 */
151#define PG_dcache_dirty PG_arch_1
152#endif /* CONFIG_BCM47XX */
153
154/*
155 *	MM Cache Management
156 *	===================
157 *
158 *	The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
159 *	implement these methods.
160 *
161 *	Start addresses are inclusive and end addresses are exclusive;
162 *	start addresses should be rounded down, end addresses up.
163 *
164 *	See Documentation/cachetlb.txt for more information.
165 *	Please note that the implementation of these, and the required
166 *	effects are cache-type (VIVT/VIPT/PIPT) specific.
167 *
168 *	flush_kern_all()
169 *
170 *		Unconditionally clean and invalidate the entire cache.
171 *
172 *	flush_user_all()
173 *
174 *		Clean and invalidate all user space cache entries
175 *		before a change of page tables.
176 *
177 *	flush_user_range(start, end, flags)
178 *
179 *		Clean and invalidate a range of cache entries in the
180 *		specified address space before a change of page tables.
181 *		- start - user start address (inclusive, page aligned)
182 *		- end   - user end address   (exclusive, page aligned)
183 *		- flags - vma->vm_flags field
184 *
185 *	coherent_kern_range(start, end)
186 *
187 *		Ensure coherency between the Icache and the Dcache in the
188 *		region described by start, end.  If you have non-snooping
189 *		Harvard caches, you need to implement this function.
190 *		- start  - virtual start address
191 *		- end    - virtual end address
192 *
193 *	coherent_user_range(start, end)
194 *
195 *		Ensure coherency between the Icache and the Dcache in the
196 *		region described by start, end.  If you have non-snooping
197 *		Harvard caches, you need to implement this function.
198 *		- start  - virtual start address
199 *		- end    - virtual end address
200 *
201 *	flush_kern_dcache_area(kaddr, size)
202 *
203 *		Ensure that the data held in page is written back.
204 *		- kaddr  - page address
205 *		- size   - region size
206 *
207 *	DMA Cache Coherency
208 *	===================
209 *
210 *	dma_flush_range(start, end)
211 *
212 *		Clean and invalidate the specified virtual address range.
213 *		- start  - virtual start address
214 *		- end    - virtual end address
215 */
216
217struct cpu_cache_fns {
218	void (*flush_kern_all)(void);
219	void (*flush_user_all)(void);
220	void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
221
222	void (*coherent_kern_range)(unsigned long, unsigned long);
223	void (*coherent_user_range)(unsigned long, unsigned long);
224	void (*flush_kern_dcache_area)(void *, size_t);
225
226	void (*dma_map_area)(const void *, size_t, int);
227	void (*dma_unmap_area)(const void *, size_t, int);
228
229	void (*dma_flush_range)(const void *, const void *);
230};
231
232/*
233 * Select the calling method
234 */
235#ifdef MULTI_CACHE
236
237extern struct cpu_cache_fns cpu_cache;
238
239#define __cpuc_flush_kern_all		cpu_cache.flush_kern_all
240#define __cpuc_flush_user_all		cpu_cache.flush_user_all
241#define __cpuc_flush_user_range		cpu_cache.flush_user_range
242#define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range
243#define __cpuc_coherent_user_range	cpu_cache.coherent_user_range
244#define __cpuc_flush_dcache_area	cpu_cache.flush_kern_dcache_area
245
246/*
247 * These are private to the dma-mapping API.  Do not use directly.
248 * Their sole purpose is to ensure that data held in the cache
249 * is visible to DMA, or data written by DMA to system memory is
250 * visible to the CPU.
251 */
252#define dmac_map_area			cpu_cache.dma_map_area
253#define dmac_unmap_area		cpu_cache.dma_unmap_area
254#define dmac_flush_range		cpu_cache.dma_flush_range
255
256#else
257
258#define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all)
259#define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all)
260#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
261#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
262#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
263#define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
264
265extern void __cpuc_flush_kern_all(void);
266extern void __cpuc_flush_user_all(void);
267extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
268extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
269extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
270extern void __cpuc_flush_dcache_area(void *, size_t);
271
272/*
273 * These are private to the dma-mapping API.  Do not use directly.
274 * Their sole purpose is to ensure that data held in the cache
275 * is visible to DMA, or data written by DMA to system memory is
276 * visible to the CPU.
277 */
278#define dmac_map_area			__glue(_CACHE,_dma_map_area)
279#define dmac_unmap_area		__glue(_CACHE,_dma_unmap_area)
280#define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
281
282extern void dmac_map_area(const void *, size_t, int);
283extern void dmac_unmap_area(const void *, size_t, int);
284extern void dmac_flush_range(const void *, const void *);
285
286#endif
287
288/*
289 * Copy user data from/to a page which is mapped into a different
290 * processes address space.  Really, we want to allow our "user
291 * space" model to handle this.
292 */
293extern void copy_to_user_page(struct vm_area_struct *, struct page *,
294	unsigned long, void *, const void *, unsigned long);
295#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
296	do {							\
297		memcpy(dst, src, len);				\
298	} while (0)
299
300/*
301 * Convert calls to our calling convention.
302 */
303#define flush_cache_all()		__cpuc_flush_kern_all()
304
305static inline void vivt_flush_cache_mm(struct mm_struct *mm)
306{
307	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
308		__cpuc_flush_user_all();
309}
310
311static inline void
312vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
313{
314	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
315		__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
316					vma->vm_flags);
317}
318
319static inline void
320vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
321{
322	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
323		unsigned long addr = user_addr & PAGE_MASK;
324		__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
325	}
326}
327
328#ifndef CONFIG_CPU_CACHE_VIPT
329#define flush_cache_mm(mm) \
330		vivt_flush_cache_mm(mm)
331#define flush_cache_range(vma,start,end) \
332		vivt_flush_cache_range(vma,start,end)
333#define flush_cache_page(vma,addr,pfn) \
334		vivt_flush_cache_page(vma,addr,pfn)
335#else
336extern void flush_cache_mm(struct mm_struct *mm);
337extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
338extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
339#endif
340
341#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
342
343/*
344 * flush_cache_user_range is used when we want to ensure that the
345 * Harvard caches are synchronised for the user space address range.
346 * This is used for the ARM private sys_cacheflush system call.
347 */
348#define flush_cache_user_range(vma,start,end) \
349	__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
350
351/*
352 * Perform necessary cache operations to ensure that data previously
353 * stored within this range of addresses can be executed by the CPU.
354 */
355#define flush_icache_range(s,e)		__cpuc_coherent_kern_range(s,e)
356
357/*
358 * Perform necessary cache operations to ensure that the TLB will
359 * see data written in the specified area.
360 */
361#define clean_dcache_area(start,size)	cpu_dcache_clean_area(start, size)
362
363/*
364 * flush_dcache_page is used when the kernel has written to the page
365 * cache page at virtual address page->virtual.
366 *
367 * If this page isn't mapped (ie, page_mapping == NULL), or it might
368 * have userspace mappings, then we _must_ always clean + invalidate
369 * the dcache entries associated with the kernel mapping.
370 *
371 * Otherwise we can defer the operation, and clean the cache when we are
372 * about to change to user space.  This is the same method as used on SPARC64.
373 * See update_mmu_cache for the user space part.
374 */
375#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
376extern void flush_dcache_page(struct page *);
377
378static inline void __flush_icache_all(void)
379{
380#ifdef CONFIG_ARM_ERRATA_411920
381	extern void v6_icache_inval_all(void);
382	v6_icache_inval_all();
383#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
384	asm("mcr	p15, 0, %0, c7, c1, 0	@ invalidate I-cache inner shareable\n"
385	    :
386	    : "r" (0));
387#else
388	asm("mcr	p15, 0, %0, c7, c5, 0	@ invalidate I-cache\n"
389	    :
390	    : "r" (0));
391#endif
392}
393static inline void flush_kernel_vmap_range(void *addr, int size)
394{
395	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
396	  __cpuc_flush_dcache_area(addr, (size_t)size);
397}
398static inline void invalidate_kernel_vmap_range(void *addr, int size)
399{
400	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
401	  __cpuc_flush_dcache_area(addr, (size_t)size);
402}
403
404#define ARCH_HAS_FLUSH_ANON_PAGE
405static inline void flush_anon_page(struct vm_area_struct *vma,
406			 struct page *page, unsigned long vmaddr)
407{
408	extern void __flush_anon_page(struct vm_area_struct *vma,
409				struct page *, unsigned long);
410	if (PageAnon(page))
411		__flush_anon_page(vma, page, vmaddr);
412}
413
414#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
415static inline void flush_kernel_dcache_page(struct page *page)
416{
417	/* highmem pages are always flushed upon kunmap already */
418	if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
419		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
420}
421
422#define flush_dcache_mmap_lock(mapping) \
423	spin_lock_irq(&(mapping)->tree_lock)
424#define flush_dcache_mmap_unlock(mapping) \
425	spin_unlock_irq(&(mapping)->tree_lock)
426
427#define flush_icache_user_range(vma,page,addr,len) \
428	flush_dcache_page(page)
429
430/*
431 * We don't appear to need to do anything here.  In fact, if we did, we'd
432 * duplicate cache flushing elsewhere performed by flush_dcache_page().
433 */
434#define flush_icache_page(vma,page)	do { } while (0)
435
436/*
437 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
438 * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
439 * caches, since the direct-mappings of these pages may contain cached
440 * data, we need to do a full cache flush to ensure that writebacks
441 * don't corrupt data placed into these pages via the new mappings.
442 */
443static inline void flush_cache_vmap(unsigned long start, unsigned long end)
444{
445	if (!cache_is_vipt_nonaliasing())
446		flush_cache_all();
447	else
448		/*
449		 * set_pte_at() called from vmap_pte_range() does not
450		 * have a DSB after cleaning the cache line.
451		 */
452		dsb();
453}
454
455static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
456{
457	if (!cache_is_vipt_nonaliasing())
458		flush_cache_all();
459}
460
461#endif
462