• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/arm/mm/
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/io.h>
22
23#include <asm/cacheflush.h>
24#include <asm/hardware/cache-l2x0.h>
25
26#define CACHE_LINE_SIZE		32
27
28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock);
30static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
31
32static inline void cache_wait(void __iomem *reg, unsigned long mask)
33{
34	/* wait for the operation to complete */
35	while (readl_relaxed(reg) & mask)
36		;
37}
38
39static inline void cache_sync(void)
40{
41	void __iomem *base = l2x0_base;
42	writel_relaxed(0, base + L2X0_CACHE_SYNC);
43	cache_wait(base + L2X0_CACHE_SYNC, 1);
44}
45
46static inline void l2x0_clean_line(unsigned long addr)
47{
48	void __iomem *base = l2x0_base;
49	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
50	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
51}
52
53static inline void l2x0_inv_line(unsigned long addr)
54{
55	void __iomem *base = l2x0_base;
56	cache_wait(base + L2X0_INV_LINE_PA, 1);
57	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
58}
59
60#ifdef CONFIG_PL310_ERRATA_588369
61static void debug_writel(unsigned long val)
62{
63	extern void omap_smc1(u32 fn, u32 arg);
64
65	/*
66	 * Texas Instrument secure monitor api to modify the
67	 * PL310 Debug Control Register.
68	 */
69	omap_smc1(0x100, val);
70}
71
72static inline void l2x0_flush_line(unsigned long addr)
73{
74	void __iomem *base = l2x0_base;
75
76	/* Clean by PA followed by Invalidate by PA */
77	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
78	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
79	cache_wait(base + L2X0_INV_LINE_PA, 1);
80	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
81}
82#else
83
84/* Optimised out for non-errata case */
85static inline void debug_writel(unsigned long val)
86{
87}
88
89static inline void l2x0_flush_line(unsigned long addr)
90{
91	void __iomem *base = l2x0_base;
92	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
93	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
94}
95#endif
96
97static void l2x0_cache_sync(void)
98{
99	unsigned long flags;
100
101	spin_lock_irqsave(&l2x0_lock, flags);
102	cache_sync();
103	spin_unlock_irqrestore(&l2x0_lock, flags);
104}
105
106static inline void l2x0_inv_all(void)
107{
108	unsigned long flags;
109
110	/* invalidate all ways */
111	spin_lock_irqsave(&l2x0_lock, flags);
112	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
113	cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
114	cache_sync();
115	spin_unlock_irqrestore(&l2x0_lock, flags);
116}
117
118static void l2x0_inv_range(unsigned long start, unsigned long end)
119{
120	void __iomem *base = l2x0_base;
121	unsigned long flags;
122
123	spin_lock_irqsave(&l2x0_lock, flags);
124	if (start & (CACHE_LINE_SIZE - 1)) {
125		start &= ~(CACHE_LINE_SIZE - 1);
126		debug_writel(0x03);
127		l2x0_flush_line(start);
128		debug_writel(0x00);
129		start += CACHE_LINE_SIZE;
130	}
131
132	if (end & (CACHE_LINE_SIZE - 1)) {
133		end &= ~(CACHE_LINE_SIZE - 1);
134		debug_writel(0x03);
135		l2x0_flush_line(end);
136		debug_writel(0x00);
137	}
138
139	while (start < end) {
140		unsigned long blk_end = start + min(end - start, 4096UL);
141
142		while (start < blk_end) {
143			l2x0_inv_line(start);
144			start += CACHE_LINE_SIZE;
145		}
146
147		if (blk_end < end) {
148			spin_unlock_irqrestore(&l2x0_lock, flags);
149			spin_lock_irqsave(&l2x0_lock, flags);
150		}
151	}
152	cache_wait(base + L2X0_INV_LINE_PA, 1);
153	cache_sync();
154	spin_unlock_irqrestore(&l2x0_lock, flags);
155}
156
157static void l2x0_clean_range(unsigned long start, unsigned long end)
158{
159	void __iomem *base = l2x0_base;
160	unsigned long flags;
161
162	spin_lock_irqsave(&l2x0_lock, flags);
163	start &= ~(CACHE_LINE_SIZE - 1);
164	while (start < end) {
165		unsigned long blk_end = start + min(end - start, 4096UL);
166
167		while (start < blk_end) {
168			l2x0_clean_line(start);
169			start += CACHE_LINE_SIZE;
170		}
171
172		if (blk_end < end) {
173			spin_unlock_irqrestore(&l2x0_lock, flags);
174			spin_lock_irqsave(&l2x0_lock, flags);
175		}
176	}
177	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
178	cache_sync();
179	spin_unlock_irqrestore(&l2x0_lock, flags);
180}
181
182static void l2x0_flush_range(unsigned long start, unsigned long end)
183{
184	void __iomem *base = l2x0_base;
185	unsigned long flags;
186
187	spin_lock_irqsave(&l2x0_lock, flags);
188	start &= ~(CACHE_LINE_SIZE - 1);
189	while (start < end) {
190		unsigned long blk_end = start + min(end - start, 4096UL);
191
192		debug_writel(0x03);
193		while (start < blk_end) {
194			l2x0_flush_line(start);
195			start += CACHE_LINE_SIZE;
196		}
197		debug_writel(0x00);
198
199		if (blk_end < end) {
200			spin_unlock_irqrestore(&l2x0_lock, flags);
201			spin_lock_irqsave(&l2x0_lock, flags);
202		}
203	}
204	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
205	cache_sync();
206	spin_unlock_irqrestore(&l2x0_lock, flags);
207}
208
209void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
210{
211	__u32 aux;
212	__u32 cache_id;
213	int ways;
214	const char *type;
215
216	l2x0_base = base;
217
218	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
219	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
220
221	aux &= aux_mask;
222	aux |= aux_val;
223
224	/* Determine the number of ways */
225	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
226	case L2X0_CACHE_ID_PART_L310:
227		if (aux & (1 << 16))
228			ways = 16;
229		else
230			ways = 8;
231		type = "L310";
232		break;
233	case L2X0_CACHE_ID_PART_L210:
234		ways = (aux >> 13) & 0xf;
235		type = "L210";
236		break;
237	default:
238		/* Assume unknown chips have 8 ways */
239		ways = 8;
240		type = "L2x0 series";
241		break;
242	}
243
244	l2x0_way_mask = (1 << ways) - 1;
245
246	/*
247	 * Check if l2x0 controller is already enabled.
248	 * If you are booting from non-secure mode
249	 * accessing the below registers will fault.
250	 */
251	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
252
253		/* l2x0 controller is disabled */
254		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
255
256		l2x0_inv_all();
257
258		/* enable L2X0 */
259		writel_relaxed(1, l2x0_base + L2X0_CTRL);
260	}
261
262	outer_cache.inv_range = l2x0_inv_range;
263	outer_cache.clean_range = l2x0_clean_range;
264	outer_cache.flush_range = l2x0_flush_range;
265	outer_cache.sync = l2x0_cache_sync;
266
267	printk(KERN_INFO "%s cache controller enabled\n", type);
268	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
269			 ways, cache_id, aux);
270}
271