1/*
2 *  linux/include/asm-arm/proc-armv/cache.h
3 *
4 *  Copyright (C) 1999-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <asm/mman.h>
11
12/*
13 * This flag is used to indicate that the page pointed to by a pte
14 * is dirty and requires cleaning before returning it to the user.
15 */
16#define PG_dcache_dirty PG_arch_1
17
18/*
19 * Cache handling for 32-bit ARM processors.
20 *
21 * Note that on ARM, we have a more accurate specification than that
22 * Linux's "flush".  We therefore do not use "flush" here, but instead
23 * use:
24 *
25 * clean:      the act of pushing dirty cache entries out to memory.
26 * invalidate: the act of discarding data held within the cache,
27 *             whether it is dirty or not.
28 */
29
30/*
31 * Generic I + D cache
32 */
33#define flush_cache_all()						\
34	do {								\
35		cpu_cache_clean_invalidate_all();			\
36	} while (0)
37
38/* This is always called for current->mm */
39#define flush_cache_mm(_mm)						\
40	do {								\
41		if ((_mm) == current->active_mm)			\
42			cpu_cache_clean_invalidate_all();		\
43	} while (0)
44
45#define flush_cache_range(_mm,_start,_end)				\
46	do {								\
47		if ((_mm) == current->active_mm)			\
48			cpu_cache_clean_invalidate_range((_start) & PAGE_MASK, \
49							 PAGE_ALIGN(_end), 1); \
50	} while (0)
51
52#define flush_cache_page(_vma,_vmaddr)					\
53	do {								\
54		if ((_vma)->vm_mm == current->active_mm) {		\
55			unsigned long _addr = (_vmaddr) & PAGE_MASK;	\
56			cpu_cache_clean_invalidate_range(_addr,		\
57				_addr + PAGE_SIZE,			\
58				((_vma)->vm_flags & VM_EXEC));		\
59		} \
60	} while (0)
61
62/*
63 * This flushes back any buffered write data.  We have to clean the entries
64 * in the cache for this page.  This does not invalidate either I or D caches.
65 *
66 * Called from:
67 * 1. mm/filemap.c:filemap_nopage
68 * 2. mm/filemap.c:filemap_nopage
69 *    [via do_no_page - ok]
70 *
71 * 3. mm/memory.c:break_cow
72 *    [copy_cow_page doesn't do anything to the cache; insufficient cache
73 *     handling.  Need to add flush_dcache_page() here]
74 *
75 * 4. mm/memory.c:do_swap_page
76 *    [read_swap_cache_async doesn't do anything to the cache: insufficient
77 *     cache handling.  Need to add flush_dcache_page() here]
78 *
79 * 5. mm/memory.c:do_anonymous_page
80 *    [zero page, never written by kernel - ok]
81 *
82 * 6. mm/memory.c:do_no_page
83 *    [we will be calling update_mmu_cache, which will catch on PG_dcache_dirty]
84 *
85 * 7. mm/shmem.c:shmem_nopage
86 * 8. mm/shmem.c:shmem_nopage
87 *    [via do_no_page - ok]
88 *
89 * 9. fs/exec.c:put_dirty_page
90 *    [we call flush_dcache_page prior to this, which will flush out the
91 *     kernel virtual addresses from the dcache - ok]
92 */
93static __inline__ void flush_page_to_ram(struct page *page)
94{
95	cpu_flush_ram_page(page_address(page));
96}
97
98/*
99 * D cache only
100 */
101
102#define invalidate_dcache_range(_s,_e)	cpu_dcache_invalidate_range((_s),(_e))
103#define clean_dcache_range(_s,_e)	cpu_dcache_clean_range((_s),(_e))
104#define flush_dcache_range(_s,_e)	cpu_cache_clean_invalidate_range((_s),(_e),0)
105
106/*
107 * flush_dcache_page is used when the kernel has written to the page
108 * cache page at virtual address page->virtual.
109 *
110 * If this page isn't mapped (ie, page->mapping = NULL), or it has
111 * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
112 * then we _must_ always clean + invalidate the dcache entries associated
113 * with the kernel mapping.
114 *
115 * Otherwise we can defer the operation, and clean the cache when we are
116 * about to change to user space.  This is the same method as used on SPARC64.
117 * See update_mmu_cache for the user space part.
118 */
119#define mapping_mapped(map)	((map)->i_mmap || (map)->i_mmap_shared)
120
121static inline void flush_dcache_page(struct page *page)
122{
123	if (page->mapping && !mapping_mapped(page->mapping))
124		set_bit(PG_dcache_dirty, &page->flags);
125	else {
126		unsigned long virt = (unsigned long)page_address(page);
127		cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
128	}
129}
130
131#define clean_dcache_entry(_s)		cpu_dcache_clean_entry((unsigned long)(_s))
132
133/*
134 * This function is misnamed IMHO.  There are three places where it
135 * is called, each of which is preceded immediately by a call to
136 * flush_page_to_ram:
137 *
138 *  1. kernel/ptrace.c:access_one_page
139 *     called after we have written to the kernel view of a user page.
140 *     The user page has been expundged from the cache by flush_cache_page.
141 *     [we don't need to do anything here if we add a call to
142 *      flush_dcache_page]
143 *
144 *  2. mm/memory.c:do_swap_page
145 *     called after we have (possibly) written to the kernel view of a
146 *     user page, which has previously been removed (ie, has been through
147 *     the swap cache).
148 *     [if the flush_page_to_ram() conditions are satisfied, then ok]
149 *
150 *  3. mm/memory.c:do_no_page
151 *     [if the flush_page_to_ram() conditions are satisfied, then ok]
152 *
153 * Invalidating the icache at the kernels virtual page isn't really
154 * going to do us much good, since we wouldn't have executed any
155 * instructions there.
156 */
157#define flush_icache_page(vma,pg)	do { } while (0)
158
159/*
160 * I cache coherency stuff.
161 *
162 * This *is not* just icache.  It is to make data written to memory
163 * consistent such that instructions fetched from the region are what
164 * we expect.
165 *
166 * This generally means that we have to clean out the Dcache and write
167 * buffers, and maybe flush the Icache in the specified range.
168 */
169#define flush_icache_range(_s,_e)					\
170	do {								\
171		cpu_icache_invalidate_range((_s), (_e));		\
172	} while (0)
173
174/*
175 * TLB flushing.
176 *
177 *  - flush_tlb_all()			flushes all processes TLBs
178 *  - flush_tlb_mm(mm)			flushes the specified mm context TLB's
179 *  - flush_tlb_page(vma, vmaddr)	flushes TLB for specified page
180 *  - flush_tlb_range(mm, start, end)	flushes TLB for specified range of pages
181 *
182 * We drain the write buffer in here to ensure that the page tables in ram
183 * are really up to date.  It is more efficient to do this here...
184 */
185
186/*
187 * Notes:
188 *  current->active_mm is the currently active memory description.
189 *  current->mm == NULL iff we are lazy.
190 */
191#define flush_tlb_all()							\
192	do {								\
193		cpu_tlb_invalidate_all();				\
194	} while (0)
195
196/*
197 * Flush all user virtual address space translations described by `_mm'.
198 *
199 * Currently, this is always called for current->mm, which should be
200 * the same as current->active_mm.  This is currently not be called for
201 * the lazy TLB case.
202 */
203#define flush_tlb_mm(_mm)						\
204	do {								\
205		if ((_mm) == current->active_mm)			\
206			cpu_tlb_invalidate_all();			\
207	} while (0)
208
209/*
210 * Flush the specified range of user virtual address space translations.
211 *
212 * _mm may not be current->active_mm, but may not be NULL.
213 */
214#define flush_tlb_range(_mm,_start,_end)				\
215	do {								\
216		if ((_mm) == current->active_mm)			\
217			cpu_tlb_invalidate_range((_start), (_end));	\
218	} while (0)
219
220/*
221 * Flush the specified user virtual address space translation.
222 */
223#define flush_tlb_page(_vma,_page)					\
224	do {								\
225		if ((_vma)->vm_mm == current->active_mm)		\
226			cpu_tlb_invalidate_page((_page),		\
227				 ((_vma)->vm_flags & VM_EXEC));		\
228	} while (0)
229
230/*
231 * if PG_dcache_dirty is set for the page, we need to ensure that any
232 * cache entries for the kernels virtual memory range are written
233 * back to the page.
234 */
235extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
236
237/*
238 * Old ARM MEMC stuff.  This supports the reversed mapping handling that
239 * we have on the older 26-bit machines.  We don't have a MEMC chip, so...
240 */
241#define memc_update_all()		do { } while (0)
242#define memc_update_mm(mm)		do { } while (0)
243#define memc_update_addr(mm,pte,log)	do { } while (0)
244#define memc_clear(mm,physaddr)		do { } while (0)
245
246