• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/microblaze/kernel/cpu/
1/*
2 * Cache control for MicroBlaze cache memories
3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 */
12
13#include <asm/cacheflush.h>
14#include <linux/cache.h>
15#include <asm/cpuinfo.h>
16#include <asm/pvr.h>
17
18static inline void __enable_icache_msr(void)
19{
20	__asm__ __volatile__ ("	msrset	r0, %0;		\
21				nop; "			\
22			: : "i" (MSR_ICE) : "memory");
23}
24
25static inline void __disable_icache_msr(void)
26{
27	__asm__ __volatile__ ("	msrclr	r0, %0;		\
28				nop; "			\
29			: : "i" (MSR_ICE) : "memory");
30}
31
32static inline void __enable_dcache_msr(void)
33{
34	__asm__ __volatile__ ("	msrset	r0, %0;		\
35				nop; "			\
36				:			\
37				: "i" (MSR_DCE)		\
38				: "memory");
39}
40
41static inline void __disable_dcache_msr(void)
42{
43	__asm__ __volatile__ ("	msrclr	r0, %0;		\
44				nop; "			\
45				:			\
46				: "i" (MSR_DCE)		\
47				: "memory");
48}
49
50static inline void __enable_icache_nomsr(void)
51{
52	__asm__ __volatile__ ("	mfs	r12, rmsr;	\
53				nop;			\
54				ori	r12, r12, %0;	\
55				mts	rmsr, r12;	\
56				nop; "			\
57				:			\
58				: "i" (MSR_ICE)		\
59				: "memory", "r12");
60}
61
62static inline void __disable_icache_nomsr(void)
63{
64	__asm__ __volatile__ ("	mfs	r12, rmsr;	\
65				nop;			\
66				andi	r12, r12, ~%0;	\
67				mts	rmsr, r12;	\
68				nop; "			\
69				:			\
70				: "i" (MSR_ICE)		\
71				: "memory", "r12");
72}
73
74static inline void __enable_dcache_nomsr(void)
75{
76	__asm__ __volatile__ ("	mfs	r12, rmsr;	\
77				nop;			\
78				ori	r12, r12, %0;	\
79				mts	rmsr, r12;	\
80				nop; "			\
81				:			\
82				: "i" (MSR_DCE)		\
83				: "memory", "r12");
84}
85
86static inline void __disable_dcache_nomsr(void)
87{
88	__asm__ __volatile__ ("	mfs	r12, rmsr;	\
89				nop;			\
90				andi	r12, r12, ~%0;	\
91				mts	rmsr, r12;	\
92				nop; "			\
93				:			\
94				: "i" (MSR_DCE)		\
95				: "memory", "r12");
96}
97
98
99/* Helper macro for computing the limits of cache range loops
100 *
101 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros
103 */
104#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)	\
105do {									\
106	int align = ~(cache_line_length - 1);				\
107	end = min(start + cache_size, end);				\
108	start &= align;							\
109} while (0);
110
111/*
112 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline
114 */
115#define CACHE_ALL_LOOP(cache_size, line_length, op)			\
116do {									\
117	unsigned int len = cache_size - line_length;			\
118	int step = -line_length;					\
119	WARN_ON(step >= 0);						\
120									\
121	__asm__ __volatile__ (" 1:      " #op " %0, r0;			\
122					bgtid   %0, 1b;			\
123					addk    %0, %0, %1;		\
124					" : : "r" (len), "r" (step)	\
125					: "memory");			\
126} while (0);
127
128/* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic.
130 *
131 * start address is cache aligned
132 * end address is not aligned, if end is aligned then I have to substract
133 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line.
135 */
136#define CACHE_RANGE_LOOP_2(start, end, line_length, op)			\
137do {									\
138	int step = -line_length;					\
139	int align = ~(line_length - 1);					\
140	int count;							\
141	end = ((end & align) == end) ? end - line_length : end & align;	\
142	count = end - start;						\
143	WARN_ON(count < 0);						\
144									\
145	__asm__ __volatile__ (" 1:	" #op "	%0, %1;			\
146					bgtid	%1, 1b;			\
147					addk	%1, %1, %2;		\
148					" : : "r" (start), "r" (count),	\
149					"r" (step) : "memory");		\
150} while (0);
151
152/* It is used only first parameter for OP - for wic, wdc */
153#define CACHE_RANGE_LOOP_1(start, end, line_length, op)			\
154do {									\
155	int volatile temp;						\
156	int align = ~(line_length - 1);					\
157	end = ((end & align) == end) ? end - line_length : end & align;	\
158	WARN_ON(end - start < 0);					\
159									\
160	__asm__ __volatile__ (" 1:	" #op "	%1, r0;			\
161					cmpu	%0, %1, %2;		\
162					bgtid	%0, 1b;			\
163					addk	%1, %1, %3;		\
164				" : : "r" (temp), "r" (start), "r" (end),\
165					"r" (line_length) : "memory");	\
166} while (0);
167
168#define ASM_LOOP
169
170static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
171{
172	unsigned long flags;
173#ifndef ASM_LOOP
174	int i;
175#endif
176	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
177				(unsigned int)start, (unsigned int) end);
178
179	CACHE_LOOP_LIMITS(start, end,
180			cpuinfo.icache_line_length, cpuinfo.icache_size);
181
182	local_irq_save(flags);
183	__disable_icache_msr();
184
185#ifdef ASM_LOOP
186	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
187#else
188	for (i = start; i < end; i += cpuinfo.icache_line_length)
189		__asm__ __volatile__ ("wic	%0, r0;"	\
190				: : "r" (i));
191#endif
192	__enable_icache_msr();
193	local_irq_restore(flags);
194}
195
196static void __flush_icache_range_nomsr_irq(unsigned long start,
197				unsigned long end)
198{
199	unsigned long flags;
200#ifndef ASM_LOOP
201	int i;
202#endif
203	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
204				(unsigned int)start, (unsigned int) end);
205
206	CACHE_LOOP_LIMITS(start, end,
207			cpuinfo.icache_line_length, cpuinfo.icache_size);
208
209	local_irq_save(flags);
210	__disable_icache_nomsr();
211
212#ifdef ASM_LOOP
213	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
214#else
215	for (i = start; i < end; i += cpuinfo.icache_line_length)
216		__asm__ __volatile__ ("wic	%0, r0;"	\
217				: : "r" (i));
218#endif
219
220	__enable_icache_nomsr();
221	local_irq_restore(flags);
222}
223
224static void __flush_icache_range_noirq(unsigned long start,
225				unsigned long end)
226{
227#ifndef ASM_LOOP
228	int i;
229#endif
230	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
231				(unsigned int)start, (unsigned int) end);
232
233	CACHE_LOOP_LIMITS(start, end,
234			cpuinfo.icache_line_length, cpuinfo.icache_size);
235#ifdef ASM_LOOP
236	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
237#else
238	for (i = start; i < end; i += cpuinfo.icache_line_length)
239		__asm__ __volatile__ ("wic	%0, r0;"	\
240				: : "r" (i));
241#endif
242}
243
244static void __flush_icache_all_msr_irq(void)
245{
246	unsigned long flags;
247#ifndef ASM_LOOP
248	int i;
249#endif
250	pr_debug("%s\n", __func__);
251
252	local_irq_save(flags);
253	__disable_icache_msr();
254#ifdef ASM_LOOP
255	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
256#else
257	for (i = 0; i < cpuinfo.icache_size;
258		 i += cpuinfo.icache_line_length)
259			__asm__ __volatile__ ("wic	%0, r0;" \
260					: : "r" (i));
261#endif
262	__enable_icache_msr();
263	local_irq_restore(flags);
264}
265
266static void __flush_icache_all_nomsr_irq(void)
267{
268	unsigned long flags;
269#ifndef ASM_LOOP
270	int i;
271#endif
272	pr_debug("%s\n", __func__);
273
274	local_irq_save(flags);
275	__disable_icache_nomsr();
276#ifdef ASM_LOOP
277	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
278#else
279	for (i = 0; i < cpuinfo.icache_size;
280		 i += cpuinfo.icache_line_length)
281			__asm__ __volatile__ ("wic	%0, r0;" \
282					: : "r" (i));
283#endif
284	__enable_icache_nomsr();
285	local_irq_restore(flags);
286}
287
288static void __flush_icache_all_noirq(void)
289{
290#ifndef ASM_LOOP
291	int i;
292#endif
293	pr_debug("%s\n", __func__);
294#ifdef ASM_LOOP
295	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
296#else
297	for (i = 0; i < cpuinfo.icache_size;
298		 i += cpuinfo.icache_line_length)
299			__asm__ __volatile__ ("wic	%0, r0;" \
300					: : "r" (i));
301#endif
302}
303
304static void __invalidate_dcache_all_msr_irq(void)
305{
306	unsigned long flags;
307#ifndef ASM_LOOP
308	int i;
309#endif
310	pr_debug("%s\n", __func__);
311
312	local_irq_save(flags);
313	__disable_dcache_msr();
314#ifdef ASM_LOOP
315	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
316#else
317	for (i = 0; i < cpuinfo.dcache_size;
318		 i += cpuinfo.dcache_line_length)
319			__asm__ __volatile__ ("wdc	%0, r0;" \
320					: : "r" (i));
321#endif
322	__enable_dcache_msr();
323	local_irq_restore(flags);
324}
325
326static void __invalidate_dcache_all_nomsr_irq(void)
327{
328	unsigned long flags;
329#ifndef ASM_LOOP
330	int i;
331#endif
332	pr_debug("%s\n", __func__);
333
334	local_irq_save(flags);
335	__disable_dcache_nomsr();
336#ifdef ASM_LOOP
337	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
338#else
339	for (i = 0; i < cpuinfo.dcache_size;
340		 i += cpuinfo.dcache_line_length)
341			__asm__ __volatile__ ("wdc	%0, r0;" \
342					: : "r" (i));
343#endif
344	__enable_dcache_nomsr();
345	local_irq_restore(flags);
346}
347
348static void __invalidate_dcache_all_noirq_wt(void)
349{
350#ifndef ASM_LOOP
351	int i;
352#endif
353	pr_debug("%s\n", __func__);
354#ifdef ASM_LOOP
355	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
356#else
357	for (i = 0; i < cpuinfo.dcache_size;
358		 i += cpuinfo.dcache_line_length)
359			__asm__ __volatile__ ("wdc	%0, r0;" \
360					: : "r" (i));
361#endif
362}
363
364static void __invalidate_dcache_all_wb(void)
365{
366#ifndef ASM_LOOP
367	int i;
368#endif
369	pr_debug("%s\n", __func__);
370#ifdef ASM_LOOP
371	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
372					wdc)
373#else
374	for (i = 0; i < cpuinfo.dcache_size;
375		 i += cpuinfo.dcache_line_length)
376			__asm__ __volatile__ ("wdc	%0, r0;" \
377					: : "r" (i));
378#endif
379}
380
381static void __invalidate_dcache_range_wb(unsigned long start,
382						unsigned long end)
383{
384#ifndef ASM_LOOP
385	int i;
386#endif
387	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
388				(unsigned int)start, (unsigned int) end);
389
390	CACHE_LOOP_LIMITS(start, end,
391			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
392#ifdef ASM_LOOP
393	CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
394#else
395	for (i = start; i < end; i += cpuinfo.dcache_line_length)
396		__asm__ __volatile__ ("wdc.clear	%0, r0;"	\
397				: : "r" (i));
398#endif
399}
400
401static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
402							unsigned long end)
403{
404#ifndef ASM_LOOP
405	int i;
406#endif
407	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
408				(unsigned int)start, (unsigned int) end);
409	CACHE_LOOP_LIMITS(start, end,
410			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
411
412#ifdef ASM_LOOP
413	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
414#else
415	for (i = start; i < end; i += cpuinfo.dcache_line_length)
416		__asm__ __volatile__ ("wdc	%0, r0;"	\
417				: : "r" (i));
418#endif
419}
420
421static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
422							unsigned long end)
423{
424	unsigned long flags;
425#ifndef ASM_LOOP
426	int i;
427#endif
428	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
429				(unsigned int)start, (unsigned int) end);
430	CACHE_LOOP_LIMITS(start, end,
431			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
432
433	local_irq_save(flags);
434	__disable_dcache_msr();
435
436#ifdef ASM_LOOP
437	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
438#else
439	for (i = start; i < end; i += cpuinfo.dcache_line_length)
440		__asm__ __volatile__ ("wdc	%0, r0;"	\
441				: : "r" (i));
442#endif
443
444	__enable_dcache_msr();
445	local_irq_restore(flags);
446}
447
448static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
449							unsigned long end)
450{
451	unsigned long flags;
452#ifndef ASM_LOOP
453	int i;
454#endif
455	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
456				(unsigned int)start, (unsigned int) end);
457
458	CACHE_LOOP_LIMITS(start, end,
459			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
460
461	local_irq_save(flags);
462	__disable_dcache_nomsr();
463
464#ifdef ASM_LOOP
465	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
466#else
467	for (i = start; i < end; i += cpuinfo.dcache_line_length)
468		__asm__ __volatile__ ("wdc	%0, r0;"	\
469				: : "r" (i));
470#endif
471
472	__enable_dcache_nomsr();
473	local_irq_restore(flags);
474}
475
476static void __flush_dcache_all_wb(void)
477{
478#ifndef ASM_LOOP
479	int i;
480#endif
481	pr_debug("%s\n", __func__);
482#ifdef ASM_LOOP
483	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
484				wdc.flush);
485#else
486	for (i = 0; i < cpuinfo.dcache_size;
487		 i += cpuinfo.dcache_line_length)
488			__asm__ __volatile__ ("wdc.flush	%0, r0;" \
489					: : "r" (i));
490#endif
491}
492
493static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
494{
495#ifndef ASM_LOOP
496	int i;
497#endif
498	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
499				(unsigned int)start, (unsigned int) end);
500
501	CACHE_LOOP_LIMITS(start, end,
502			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
503#ifdef ASM_LOOP
504	CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
505#else
506	for (i = start; i < end; i += cpuinfo.dcache_line_length)
507		__asm__ __volatile__ ("wdc.flush	%0, r0;"	\
508				: : "r" (i));
509#endif
510}
511
512/* struct for wb caches and for wt caches */
513struct scache *mbc;
514
515/* new wb cache model */
516const struct scache wb_msr = {
517	.ie = __enable_icache_msr,
518	.id = __disable_icache_msr,
519	.ifl = __flush_icache_all_noirq,
520	.iflr = __flush_icache_range_noirq,
521	.iin = __flush_icache_all_noirq,
522	.iinr = __flush_icache_range_noirq,
523	.de = __enable_dcache_msr,
524	.dd = __disable_dcache_msr,
525	.dfl = __flush_dcache_all_wb,
526	.dflr = __flush_dcache_range_wb,
527	.din = __invalidate_dcache_all_wb,
528	.dinr = __invalidate_dcache_range_wb,
529};
530
531/* There is only difference in ie, id, de, dd functions */
532const struct scache wb_nomsr = {
533	.ie = __enable_icache_nomsr,
534	.id = __disable_icache_nomsr,
535	.ifl = __flush_icache_all_noirq,
536	.iflr = __flush_icache_range_noirq,
537	.iin = __flush_icache_all_noirq,
538	.iinr = __flush_icache_range_noirq,
539	.de = __enable_dcache_nomsr,
540	.dd = __disable_dcache_nomsr,
541	.dfl = __flush_dcache_all_wb,
542	.dflr = __flush_dcache_range_wb,
543	.din = __invalidate_dcache_all_wb,
544	.dinr = __invalidate_dcache_range_wb,
545};
546
547/* Old wt cache model with disabling irq and turn off cache */
548const struct scache wt_msr = {
549	.ie = __enable_icache_msr,
550	.id = __disable_icache_msr,
551	.ifl = __flush_icache_all_msr_irq,
552	.iflr = __flush_icache_range_msr_irq,
553	.iin = __flush_icache_all_msr_irq,
554	.iinr = __flush_icache_range_msr_irq,
555	.de = __enable_dcache_msr,
556	.dd = __disable_dcache_msr,
557	.dfl = __invalidate_dcache_all_msr_irq,
558	.dflr = __invalidate_dcache_range_msr_irq_wt,
559	.din = __invalidate_dcache_all_msr_irq,
560	.dinr = __invalidate_dcache_range_msr_irq_wt,
561};
562
563const struct scache wt_nomsr = {
564	.ie = __enable_icache_nomsr,
565	.id = __disable_icache_nomsr,
566	.ifl = __flush_icache_all_nomsr_irq,
567	.iflr = __flush_icache_range_nomsr_irq,
568	.iin = __flush_icache_all_nomsr_irq,
569	.iinr = __flush_icache_range_nomsr_irq,
570	.de = __enable_dcache_nomsr,
571	.dd = __disable_dcache_nomsr,
572	.dfl = __invalidate_dcache_all_nomsr_irq,
573	.dflr = __invalidate_dcache_range_nomsr_irq,
574	.din = __invalidate_dcache_all_nomsr_irq,
575	.dinr = __invalidate_dcache_range_nomsr_irq,
576};
577
578/* New wt cache model for newer Microblaze versions */
579const struct scache wt_msr_noirq = {
580	.ie = __enable_icache_msr,
581	.id = __disable_icache_msr,
582	.ifl = __flush_icache_all_noirq,
583	.iflr = __flush_icache_range_noirq,
584	.iin = __flush_icache_all_noirq,
585	.iinr = __flush_icache_range_noirq,
586	.de = __enable_dcache_msr,
587	.dd = __disable_dcache_msr,
588	.dfl = __invalidate_dcache_all_noirq_wt,
589	.dflr = __invalidate_dcache_range_nomsr_wt,
590	.din = __invalidate_dcache_all_noirq_wt,
591	.dinr = __invalidate_dcache_range_nomsr_wt,
592};
593
594const struct scache wt_nomsr_noirq = {
595	.ie = __enable_icache_nomsr,
596	.id = __disable_icache_nomsr,
597	.ifl = __flush_icache_all_noirq,
598	.iflr = __flush_icache_range_noirq,
599	.iin = __flush_icache_all_noirq,
600	.iinr = __flush_icache_range_noirq,
601	.de = __enable_dcache_nomsr,
602	.dd = __disable_dcache_nomsr,
603	.dfl = __invalidate_dcache_all_noirq_wt,
604	.dflr = __invalidate_dcache_range_nomsr_wt,
605	.din = __invalidate_dcache_all_noirq_wt,
606	.dinr = __invalidate_dcache_range_nomsr_wt,
607};
608
609/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
610#define CPUVER_7_20_A	0x0c
611#define CPUVER_7_20_D	0x0f
612
613#define INFO(s)	printk(KERN_INFO "cache: " s "\n");
614
615void microblaze_cache_init(void)
616{
617	if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
618		if (cpuinfo.dcache_wb) {
619			INFO("wb_msr");
620			mbc = (struct scache *)&wb_msr;
621			if (cpuinfo.ver_code < CPUVER_7_20_D) {
622				/* MS: problem with signal handling - hw bug */
623				INFO("WB won't work properly");
624			}
625		} else {
626			if (cpuinfo.ver_code >= CPUVER_7_20_A) {
627				INFO("wt_msr_noirq");
628				mbc = (struct scache *)&wt_msr_noirq;
629			} else {
630				INFO("wt_msr");
631				mbc = (struct scache *)&wt_msr;
632			}
633		}
634	} else {
635		if (cpuinfo.dcache_wb) {
636			INFO("wb_nomsr");
637			mbc = (struct scache *)&wb_nomsr;
638			if (cpuinfo.ver_code < CPUVER_7_20_D) {
639				/* MS: problem with signal handling - hw bug */
640				INFO("WB won't work properly");
641			}
642		} else {
643			if (cpuinfo.ver_code >= CPUVER_7_20_A) {
644				INFO("wt_nomsr_noirq");
645				mbc = (struct scache *)&wt_nomsr_noirq;
646			} else {
647				INFO("wt_nomsr");
648				mbc = (struct scache *)&wt_nomsr;
649			}
650		}
651	}
652	/* invalidate_dcache(); */
653	enable_dcache();
654
655	invalidate_icache();
656	enable_icache();
657}
658