1/*
2 * arch/sh/mm/cache.c
3 *
4 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5 * Copyright (C) 2002 - 2010  Paul Mundt
6 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <linux/mutex.h>
12#include <linux/fs.h>
13#include <linux/smp.h>
14#include <linux/highmem.h>
15#include <linux/module.h>
16#include <asm/mmu_context.h>
17#include <asm/cacheflush.h>
18
19void (*local_flush_cache_all)(void *args) = cache_noop;
20void (*local_flush_cache_mm)(void *args) = cache_noop;
21void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22void (*local_flush_cache_page)(void *args) = cache_noop;
23void (*local_flush_cache_range)(void *args) = cache_noop;
24void (*local_flush_dcache_page)(void *args) = cache_noop;
25void (*local_flush_icache_range)(void *args) = cache_noop;
26void (*local_flush_icache_page)(void *args) = cache_noop;
27void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28
29void (*__flush_wback_region)(void *start, int size);
30EXPORT_SYMBOL(__flush_wback_region);
31void (*__flush_purge_region)(void *start, int size);
32EXPORT_SYMBOL(__flush_purge_region);
33void (*__flush_invalidate_region)(void *start, int size);
34EXPORT_SYMBOL(__flush_invalidate_region);
35
36static inline void noop__flush_region(void *start, int size)
37{
38}
39
40static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
41                                   int wait)
42{
43	preempt_disable();
44
45	/*
46	 * It's possible that this gets called early on when IRQs are
47	 * still disabled due to ioremapping by the boot CPU, so don't
48	 * even attempt IPIs unless there are other CPUs online.
49	 */
50	if (num_online_cpus() > 1)
51		smp_call_function(func, info, wait);
52
53	func(info);
54
55	preempt_enable();
56}
57
58void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
59		       unsigned long vaddr, void *dst, const void *src,
60		       unsigned long len)
61{
62	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63	    !test_bit(PG_dcache_dirty, &page->flags)) {
64		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65		memcpy(vto, src, len);
66		kunmap_coherent(vto);
67	} else {
68		memcpy(dst, src, len);
69		if (boot_cpu_data.dcache.n_aliases)
70			set_bit(PG_dcache_dirty, &page->flags);
71	}
72
73	if (vma->vm_flags & VM_EXEC)
74		flush_cache_page(vma, vaddr, page_to_pfn(page));
75}
76
77void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78			 unsigned long vaddr, void *dst, const void *src,
79			 unsigned long len)
80{
81	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82	    !test_bit(PG_dcache_dirty, &page->flags)) {
83		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84		memcpy(dst, vfrom, len);
85		kunmap_coherent(vfrom);
86	} else {
87		memcpy(dst, src, len);
88		if (boot_cpu_data.dcache.n_aliases)
89			set_bit(PG_dcache_dirty, &page->flags);
90	}
91}
92
93void copy_user_highpage(struct page *to, struct page *from,
94			unsigned long vaddr, struct vm_area_struct *vma)
95{
96	void *vfrom, *vto;
97
98	vto = kmap_atomic(to, KM_USER1);
99
100	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101	    !test_bit(PG_dcache_dirty, &from->flags)) {
102		vfrom = kmap_coherent(from, vaddr);
103		copy_page(vto, vfrom);
104		kunmap_coherent(vfrom);
105	} else {
106		vfrom = kmap_atomic(from, KM_USER0);
107		copy_page(vto, vfrom);
108		kunmap_atomic(vfrom, KM_USER0);
109	}
110
111	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
112		__flush_purge_region(vto, PAGE_SIZE);
113
114	kunmap_atomic(vto, KM_USER1);
115	/* Make sure this page is cleared on other CPU's too before using it */
116	smp_wmb();
117}
118EXPORT_SYMBOL(copy_user_highpage);
119
120void clear_user_highpage(struct page *page, unsigned long vaddr)
121{
122	void *kaddr = kmap_atomic(page, KM_USER0);
123
124	clear_page(kaddr);
125
126	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
127		__flush_purge_region(kaddr, PAGE_SIZE);
128
129	kunmap_atomic(kaddr, KM_USER0);
130}
131EXPORT_SYMBOL(clear_user_highpage);
132
133void __update_cache(struct vm_area_struct *vma,
134		    unsigned long address, pte_t pte)
135{
136	struct page *page;
137	unsigned long pfn = pte_pfn(pte);
138
139	if (!boot_cpu_data.dcache.n_aliases)
140		return;
141
142	page = pfn_to_page(pfn);
143	if (pfn_valid(pfn)) {
144		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
145		if (dirty)
146			__flush_purge_region(page_address(page), PAGE_SIZE);
147	}
148}
149
150void __flush_anon_page(struct page *page, unsigned long vmaddr)
151{
152	unsigned long addr = (unsigned long) page_address(page);
153
154	if (pages_do_alias(addr, vmaddr)) {
155		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
156		    !test_bit(PG_dcache_dirty, &page->flags)) {
157			void *kaddr;
158
159			kaddr = kmap_coherent(page, vmaddr);
160			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
161			kunmap_coherent(kaddr);
162		} else
163			__flush_purge_region((void *)addr, PAGE_SIZE);
164	}
165}
166
167void flush_cache_all(void)
168{
169	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
170}
171EXPORT_SYMBOL(flush_cache_all);
172
173void flush_cache_mm(struct mm_struct *mm)
174{
175	if (boot_cpu_data.dcache.n_aliases == 0)
176		return;
177
178	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
179}
180
181void flush_cache_dup_mm(struct mm_struct *mm)
182{
183	if (boot_cpu_data.dcache.n_aliases == 0)
184		return;
185
186	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
187}
188
189void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
190		      unsigned long pfn)
191{
192	struct flusher_data data;
193
194	data.vma = vma;
195	data.addr1 = addr;
196	data.addr2 = pfn;
197
198	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
199}
200
201void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
202		       unsigned long end)
203{
204	struct flusher_data data;
205
206	data.vma = vma;
207	data.addr1 = start;
208	data.addr2 = end;
209
210	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
211}
212EXPORT_SYMBOL(flush_cache_range);
213
214void flush_dcache_page(struct page *page)
215{
216	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
217}
218EXPORT_SYMBOL(flush_dcache_page);
219
220void flush_icache_range(unsigned long start, unsigned long end)
221{
222	struct flusher_data data;
223
224	data.vma = NULL;
225	data.addr1 = start;
226	data.addr2 = end;
227
228	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
229}
230
231void flush_icache_page(struct vm_area_struct *vma, struct page *page)
232{
233	/* Nothing uses the VMA, so just pass the struct page along */
234	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
235}
236
237void flush_cache_sigtramp(unsigned long address)
238{
239	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
240}
241
242static void compute_alias(struct cache_info *c)
243{
244	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
245	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
246}
247
248static void __init emit_cache_params(void)
249{
250	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
251		boot_cpu_data.icache.ways,
252		boot_cpu_data.icache.sets,
253		boot_cpu_data.icache.way_incr);
254	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
255		boot_cpu_data.icache.entry_mask,
256		boot_cpu_data.icache.alias_mask,
257		boot_cpu_data.icache.n_aliases);
258	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
259		boot_cpu_data.dcache.ways,
260		boot_cpu_data.dcache.sets,
261		boot_cpu_data.dcache.way_incr);
262	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
263		boot_cpu_data.dcache.entry_mask,
264		boot_cpu_data.dcache.alias_mask,
265		boot_cpu_data.dcache.n_aliases);
266
267	/*
268	 * Emit Secondary Cache parameters if the CPU has a probed L2.
269	 */
270	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
271		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
272			boot_cpu_data.scache.ways,
273			boot_cpu_data.scache.sets,
274			boot_cpu_data.scache.way_incr);
275		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
276			boot_cpu_data.scache.entry_mask,
277			boot_cpu_data.scache.alias_mask,
278			boot_cpu_data.scache.n_aliases);
279	}
280}
281
282void __init cpu_cache_init(void)
283{
284	unsigned int cache_disabled = 0;
285
286#ifdef CCR
287	cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
288#endif
289
290	compute_alias(&boot_cpu_data.icache);
291	compute_alias(&boot_cpu_data.dcache);
292	compute_alias(&boot_cpu_data.scache);
293
294	__flush_wback_region		= noop__flush_region;
295	__flush_purge_region		= noop__flush_region;
296	__flush_invalidate_region	= noop__flush_region;
297
298	/*
299	 * No flushing is necessary in the disabled cache case so we can
300	 * just keep the noop functions in local_flush_..() and __flush_..()
301	 */
302	if (unlikely(cache_disabled))
303		goto skip;
304
305	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
306		extern void __weak sh2_cache_init(void);
307
308		sh2_cache_init();
309	}
310
311	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
312		extern void __weak sh2a_cache_init(void);
313
314		sh2a_cache_init();
315	}
316
317	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
318		extern void __weak sh3_cache_init(void);
319
320		sh3_cache_init();
321
322		if ((boot_cpu_data.type == CPU_SH7705) &&
323		    (boot_cpu_data.dcache.sets == 512)) {
324			extern void __weak sh7705_cache_init(void);
325
326			sh7705_cache_init();
327		}
328	}
329
330	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
331	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
332	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
333		extern void __weak sh4_cache_init(void);
334
335		sh4_cache_init();
336
337		if ((boot_cpu_data.type == CPU_SH7786) ||
338		    (boot_cpu_data.type == CPU_SHX3)) {
339			extern void __weak shx3_cache_init(void);
340
341			shx3_cache_init();
342		}
343	}
344
345	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
346		extern void __weak sh5_cache_init(void);
347
348		sh5_cache_init();
349	}
350
351skip:
352	emit_cache_params();
353}
354