• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/parisc/kernel/
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <asm/pdc.h>
22#include <asm/cache.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/system.h>
26#include <asm/page.h>
27#include <asm/pgalloc.h>
28#include <asm/processor.h>
29#include <asm/sections.h>
30
31int split_tlb __read_mostly;
32int dcache_stride __read_mostly;
33int icache_stride __read_mostly;
34EXPORT_SYMBOL(dcache_stride);
35
36
37/* On some machines (e.g. ones with the Merced bus), there can be
38 * only a single PxTLB broadcast at a time; this must be guaranteed
39 * by software.  We put a spinlock around all TLB flushes  to
40 * ensure this.
41 */
42DEFINE_SPINLOCK(pa_tlb_lock);
43
44struct pdc_cache_info cache_info __read_mostly;
45#ifndef CONFIG_PA20
46static struct pdc_btlb_info btlb_info __read_mostly;
47#endif
48
49#ifdef CONFIG_SMP
50void
51flush_data_cache(void)
52{
53	on_each_cpu(flush_data_cache_local, NULL, 1);
54}
55void
56flush_instruction_cache(void)
57{
58	on_each_cpu(flush_instruction_cache_local, NULL, 1);
59}
60#endif
61
62void
63flush_cache_all_local(void)
64{
65	flush_instruction_cache_local(NULL);
66	flush_data_cache_local(NULL);
67}
68EXPORT_SYMBOL(flush_cache_all_local);
69
70void
71update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
72{
73	struct page *page = pte_page(*ptep);
74
75	if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
76	    test_bit(PG_dcache_dirty, &page->flags)) {
77
78		flush_kernel_dcache_page(page);
79		clear_bit(PG_dcache_dirty, &page->flags);
80	} else if (parisc_requires_coherency())
81		flush_kernel_dcache_page(page);
82}
83
84void
85show_cache_info(struct seq_file *m)
86{
87	char buf[32];
88
89	seq_printf(m, "I-cache\t\t: %ld KB\n",
90		cache_info.ic_size/1024 );
91	if (cache_info.dc_loop != 1)
92		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
93	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
94		cache_info.dc_size/1024,
95		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
96		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
97		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
98	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
99		cache_info.it_size,
100		cache_info.dt_size,
101		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
102	);
103
104#ifndef CONFIG_PA20
105	/* BTLB - Block TLB */
106	if (btlb_info.max_size==0) {
107		seq_printf(m, "BTLB\t\t: not supported\n" );
108	} else {
109		seq_printf(m,
110		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
111		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
112		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
113		btlb_info.max_size, (int)4096,
114		btlb_info.max_size>>8,
115		btlb_info.fixed_range_info.num_i,
116		btlb_info.fixed_range_info.num_d,
117		btlb_info.fixed_range_info.num_comb,
118		btlb_info.variable_range_info.num_i,
119		btlb_info.variable_range_info.num_d,
120		btlb_info.variable_range_info.num_comb
121		);
122	}
123#endif
124}
125
126void __init
127parisc_cache_init(void)
128{
129	if (pdc_cache_info(&cache_info) < 0)
130		panic("parisc_cache_init: pdc_cache_info failed");
131
132
133	split_tlb = 0;
134	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
135		if (cache_info.dt_conf.tc_sh == 2)
136			printk(KERN_WARNING "Unexpected TLB configuration. "
137			"Will flush I/D separately (could be optimized).\n");
138
139		split_tlb = 1;
140	}
141
142	/* "New and Improved" version from Jim Hull
143	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
144	 * The following CAFL_STRIDE is an optimized version, see
145	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
146	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
147	 */
148#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
149	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
150	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
151#undef CAFL_STRIDE
152
153#ifndef CONFIG_PA20
154	if (pdc_btlb_info(&btlb_info) < 0) {
155		memset(&btlb_info, 0, sizeof btlb_info);
156	}
157#endif
158
159	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
160						PDC_MODEL_NVA_UNSUPPORTED) {
161		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
162	}
163}
164
165void disable_sr_hashing(void)
166{
167	int srhash_type, retval;
168	unsigned long space_bits;
169
170	switch (boot_cpu_data.cpu_type) {
171	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
172		BUG();
173		return;
174
175	case pcxs:
176	case pcxt:
177	case pcxt_:
178		srhash_type = SRHASH_PCXST;
179		break;
180
181	case pcxl:
182		srhash_type = SRHASH_PCXL;
183		break;
184
185	case pcxl2: /* pcxl2 doesn't support space register hashing */
186		return;
187
188	default: /* Currently all PA2.0 machines use the same ins. sequence */
189		srhash_type = SRHASH_PA20;
190		break;
191	}
192
193	disable_sr_hashing_asm(srhash_type);
194
195	retval = pdc_spaceid_bits(&space_bits);
196	/* If this procedure isn't implemented, don't panic. */
197	if (retval < 0 && retval != PDC_BAD_OPTION)
198		panic("pdc_spaceid_bits call failed.\n");
199	if (space_bits != 0)
200		panic("SpaceID hashing is still on!\n");
201}
202
203/* Simple function to work out if we have an existing address translation
204 * for a user space vma. */
205static inline int translation_exists(struct vm_area_struct *vma,
206				unsigned long addr, unsigned long pfn)
207{
208	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
209	pmd_t *pmd;
210	pte_t pte;
211
212	if(pgd_none(*pgd))
213		return 0;
214
215	pmd = pmd_offset(pgd, addr);
216	if(pmd_none(*pmd) || pmd_bad(*pmd))
217		return 0;
218
219	/* We cannot take the pte lock here: flush_cache_page is usually
220	 * called with pte lock already held.  Whereas flush_dcache_page
221	 * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
222	 * the vma itself is secure, but the pte might come or go racily.
223	 */
224	pte = *pte_offset_map(pmd, addr);
225	/* But pte_unmap() does nothing on this architecture */
226
227	/* Filter out coincidental file entries and swap entries */
228	if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
229		return 0;
230
231	return pte_pfn(pte) == pfn;
232}
233
234/* Private function to flush a page from the cache of a non-current
235 * process.  cr25 contains the Page Directory of the current user
236 * process; we're going to hijack both it and the user space %sr3 to
237 * temporarily make the non-current process current.  We have to do
238 * this because cache flushing may cause a non-access tlb miss which
239 * the handlers have to fill in from the pgd of the non-current
240 * process. */
241static inline void
242flush_user_cache_page_non_current(struct vm_area_struct *vma,
243				  unsigned long vmaddr)
244{
245	/* save the current process space and pgd */
246	unsigned long space = mfsp(3), pgd = mfctl(25);
247
248	/* we don't mind taking interrupts since they may not
249	 * do anything with user space, but we can't
250	 * be preempted here */
251	preempt_disable();
252
253	/* make us current */
254	mtctl(__pa(vma->vm_mm->pgd), 25);
255	mtsp(vma->vm_mm->context, 3);
256
257	flush_user_dcache_page(vmaddr);
258	if(vma->vm_flags & VM_EXEC)
259		flush_user_icache_page(vmaddr);
260
261	/* put the old current process back */
262	mtsp(space, 3);
263	mtctl(pgd, 25);
264	preempt_enable();
265}
266
267
268static inline void
269__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
270{
271	if (likely(vma->vm_mm->context == mfsp(3))) {
272		flush_user_dcache_page(vmaddr);
273		if (vma->vm_flags & VM_EXEC)
274			flush_user_icache_page(vmaddr);
275	} else {
276		flush_user_cache_page_non_current(vma, vmaddr);
277	}
278}
279
280void flush_dcache_page(struct page *page)
281{
282	struct address_space *mapping = page_mapping(page);
283	struct vm_area_struct *mpnt;
284	struct prio_tree_iter iter;
285	unsigned long offset;
286	unsigned long addr;
287	pgoff_t pgoff;
288	unsigned long pfn = page_to_pfn(page);
289
290
291	if (mapping && !mapping_mapped(mapping)) {
292		set_bit(PG_dcache_dirty, &page->flags);
293		return;
294	}
295
296	flush_kernel_dcache_page(page);
297
298	if (!mapping)
299		return;
300
301	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
302
303	/* We have carefully arranged in arch_get_unmapped_area() that
304	 * *any* mappings of a file are always congruently mapped (whether
305	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
306	 * to flush one address here for them all to become coherent */
307
308	flush_dcache_mmap_lock(mapping);
309	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
310		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
311		addr = mpnt->vm_start + offset;
312
313		/* Flush instructions produce non access tlb misses.
314		 * On PA, we nullify these instructions rather than
315		 * taking a page fault if the pte doesn't exist.
316		 * This is just for speed.  If the page translation
317		 * isn't there, there's no point exciting the
318		 * nadtlb handler into a nullification frenzy.
319		 *
320		 * Make sure we really have this page: the private
321		 * mappings may cover this area but have COW'd this
322		 * particular page.
323		 */
324  		if (translation_exists(mpnt, addr, pfn)) {
325			__flush_cache_page(mpnt, addr);
326			break;
327		}
328	}
329	flush_dcache_mmap_unlock(mapping);
330}
331EXPORT_SYMBOL(flush_dcache_page);
332
333/* Defined in arch/parisc/kernel/pacache.S */
334EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
335EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
336EXPORT_SYMBOL(flush_data_cache_local);
337EXPORT_SYMBOL(flush_kernel_icache_range_asm);
338
339void clear_user_page_asm(void *page, unsigned long vaddr)
340{
341	unsigned long flags;
342	/* This function is implemented in assembly in pacache.S */
343	extern void __clear_user_page_asm(void *page, unsigned long vaddr);
344
345	purge_tlb_start(flags);
346	__clear_user_page_asm(page, vaddr);
347	purge_tlb_end(flags);
348}
349
350#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
351int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
352
353void __init parisc_setup_cache_timing(void)
354{
355	unsigned long rangetime, alltime;
356	unsigned long size;
357
358	alltime = mfctl(16);
359	flush_data_cache();
360	alltime = mfctl(16) - alltime;
361
362	size = (unsigned long)(_end - _text);
363	rangetime = mfctl(16);
364	flush_kernel_dcache_range((unsigned long)_text, size);
365	rangetime = mfctl(16) - rangetime;
366
367	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
368		alltime, size, rangetime);
369
370	/* Racy, but if we see an intermediate value, it's ok too... */
371	parisc_cache_flush_threshold = size * alltime / rangetime;
372
373	parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
374	if (!parisc_cache_flush_threshold)
375		parisc_cache_flush_threshold = FLUSH_THRESHOLD;
376
377	if (parisc_cache_flush_threshold > cache_info.dc_size)
378		parisc_cache_flush_threshold = cache_info.dc_size;
379
380	printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
381}
382
383extern void purge_kernel_dcache_page(unsigned long);
384extern void clear_user_page_asm(void *page, unsigned long vaddr);
385
386void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
387{
388	unsigned long flags;
389
390	purge_kernel_dcache_page((unsigned long)page);
391	purge_tlb_start(flags);
392	pdtlb_kernel(page);
393	purge_tlb_end(flags);
394	clear_user_page_asm(page, vaddr);
395}
396EXPORT_SYMBOL(clear_user_page);
397
398void flush_kernel_dcache_page_addr(void *addr)
399{
400	unsigned long flags;
401
402	flush_kernel_dcache_page_asm(addr);
403	purge_tlb_start(flags);
404	pdtlb_kernel(addr);
405	purge_tlb_end(flags);
406}
407EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
408
409void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
410		    struct page *pg)
411{
412	/* no coherency needed (all in kmap/kunmap) */
413	copy_user_page_asm(vto, vfrom);
414	if (!parisc_requires_coherency())
415		flush_kernel_dcache_page_asm(vto);
416}
417EXPORT_SYMBOL(copy_user_page);
418
419#ifdef CONFIG_PA8X00
420
421void kunmap_parisc(void *addr)
422{
423	if (parisc_requires_coherency())
424		flush_kernel_dcache_page_addr(addr);
425}
426EXPORT_SYMBOL(kunmap_parisc);
427#endif
428
429void __flush_tlb_range(unsigned long sid, unsigned long start,
430		       unsigned long end)
431{
432	unsigned long npages;
433
434	npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
435	if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
436		flush_tlb_all();
437	else {
438		unsigned long flags;
439
440		mtsp(sid, 1);
441		purge_tlb_start(flags);
442		if (split_tlb) {
443			while (npages--) {
444				pdtlb(start);
445				pitlb(start);
446				start += PAGE_SIZE;
447			}
448		} else {
449			while (npages--) {
450				pdtlb(start);
451				start += PAGE_SIZE;
452			}
453		}
454		purge_tlb_end(flags);
455	}
456}
457
458static void cacheflush_h_tmp_function(void *dummy)
459{
460	flush_cache_all_local();
461}
462
463void flush_cache_all(void)
464{
465	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
466}
467
468void flush_cache_mm(struct mm_struct *mm)
469{
470#ifdef CONFIG_SMP
471	flush_cache_all();
472#else
473	flush_cache_all_local();
474#endif
475}
476
477void
478flush_user_dcache_range(unsigned long start, unsigned long end)
479{
480	if ((end - start) < parisc_cache_flush_threshold)
481		flush_user_dcache_range_asm(start,end);
482	else
483		flush_data_cache();
484}
485
486void
487flush_user_icache_range(unsigned long start, unsigned long end)
488{
489	if ((end - start) < parisc_cache_flush_threshold)
490		flush_user_icache_range_asm(start,end);
491	else
492		flush_instruction_cache();
493}
494
495
496void flush_cache_range(struct vm_area_struct *vma,
497		unsigned long start, unsigned long end)
498{
499	int sr3;
500
501	BUG_ON(!vma->vm_mm->context);
502
503	sr3 = mfsp(3);
504	if (vma->vm_mm->context == sr3) {
505		flush_user_dcache_range(start,end);
506		flush_user_icache_range(start,end);
507	} else {
508		flush_cache_all();
509	}
510}
511
512void
513flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
514{
515	BUG_ON(!vma->vm_mm->context);
516
517	if (likely(translation_exists(vma, vmaddr, pfn)))
518		__flush_cache_page(vma, vmaddr);
519
520}
521