• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/arm/plat-brcm/
1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */
2/*
3 * arch/arm/mm/cache-l230.c - L310 cache controller support
4 *
5 * Copyright (C) 2007 ARM Limited
6 * Copyright (c) 2012 Broadcom, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Modified for L310 and improved performance it allows.
22 */
23#include <linux/init.h>
24#include <linux/spinlock.h>
25#include <linux/io.h>
26#include <linux/interrupt.h>
27
28#include <asm/cacheflush.h>
29#include <asm/hardware/cache-l2x0.h>	/* Old register offsets */
30
31#include <typedefs.h>
32#include <bcmdefs.h>
33
34#define CACHE_LINE_SIZE		32
35
36static void __iomem *l2x0_base;
37static DEFINE_SPINLOCK(l2x0_lock);
38static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
39int l2x0_irq = 32 ;
40
41static inline void cache_wait(void __iomem *reg, unsigned long mask)
42{
43	/* wait for the operation to complete */
44	while (readl_relaxed(reg) & mask)
45		;
46}
47
48/*
49 * Atomic operations
50 *
51 * The following are atomic operations:
52 * . Clean Line by PA or by Set/Way.
53 * . Invalidate Line by PA.
54 * . Clean and Invalidate Line by PA or by Set/Way.
55 * . Cache Sync.
56 * These operations stall the slave ports until they are complete.
57 * When these registers are read, bit [0], the C flag,
58 * indicates that a background operation is in progress.
59 * When written, bit 0 must be zero.
60 */
61static inline void atomic_cache_sync( void __iomem *base )
62{
63	cache_wait(base + L2X0_CACHE_SYNC, 1);
64	writel_relaxed(0, base + L2X0_CACHE_SYNC);
65}
66
67static inline void atomic_clean_line( void __iomem *base, unsigned long addr)
68{
69	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
70	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
71}
72
73static inline void atomic_inv_line( void __iomem *base, unsigned long addr)
74{
75	cache_wait(base + L2X0_INV_LINE_PA, 1);
76	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
77}
78
79static inline void atomic_flush_line( void __iomem *base, unsigned long addr)
80{
81	cache_wait(base + L2X0_INV_LINE_PA, 1);
82	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
83}
84
85/*
86 * Atomic operations do not require the use of the spinlock
87 */
88
89static void l2x0_cache_sync(void)
90{
91	void __iomem *base = l2x0_base;
92	atomic_cache_sync( base );
93}
94
95static void BCMFASTPATH l2x0_inv_range(unsigned long start, unsigned long end)
96{
97	void __iomem *base = l2x0_base;
98
99	/* Range edges could contain live dirty data */
100	if (start & (CACHE_LINE_SIZE - 1)) {
101		start &= ~(CACHE_LINE_SIZE - 1);
102		atomic_flush_line(base, start);
103		start += CACHE_LINE_SIZE;
104	}
105
106	if (end & (CACHE_LINE_SIZE - 1)) {
107		end &= ~(CACHE_LINE_SIZE - 1);
108		atomic_flush_line(base, end);
109	}
110
111	while (start < end) {
112		atomic_inv_line(base, start);
113		start += CACHE_LINE_SIZE;
114	}
115	atomic_cache_sync(base);
116}
117
118static void BCMFASTPATH l2x0_clean_range(unsigned long start, unsigned long end)
119{
120	void __iomem *base = l2x0_base;
121
122	start &= ~(CACHE_LINE_SIZE - 1);
123
124	while (start < end) {
125		atomic_clean_line(base, start);
126		start += CACHE_LINE_SIZE;
127	}
128	atomic_cache_sync(base);
129}
130
131static void l2x0_flush_range(unsigned long start, unsigned long end)
132{
133	void __iomem *base = l2x0_base;
134
135	start &= ~(CACHE_LINE_SIZE - 1);
136	while (start < end) {
137		atomic_flush_line(base, start);
138		start += CACHE_LINE_SIZE;
139	}
140	atomic_cache_sync(base);
141}
142
143/*
144 * Invalidate by way is non-atomic, background operation
145 * has to be protected with the spinlock.
146 */
147static inline void l2x0_inv_all(void)
148{
149	void __iomem *base = l2x0_base;
150	unsigned long flags;
151
152	/* invalidate all ways */
153	spin_lock_irqsave(&l2x0_lock, flags);
154	writel_relaxed(l2x0_way_mask, base + L2X0_INV_WAY);
155	cache_wait(base + L2X0_INV_WAY, l2x0_way_mask);
156	atomic_cache_sync(base);
157	spin_unlock_irqrestore(&l2x0_lock, flags);
158}
159
160static irqreturn_t l2x0_isr( int irq, void * cookie )
161{
162	u32 reg ;
163	/* Read pending interrupts */
164	reg = readl_relaxed(l2x0_base + L2X0_RAW_INTR_STAT);
165	/* Acknowledge the interupts */
166	writel_relaxed(reg, l2x0_base + L2X0_INTR_CLEAR);
167	printk(KERN_WARNING "L310: interrupt bits %#x\n", reg );
168
169	return IRQ_HANDLED ;
170}
171
172unsigned int
173l2x0_read_event_cnt(int idx)
174{
175	unsigned int val;
176
177	if (idx == 1)
178		val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT1_VAL);
179	else if (idx == 0)
180		val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT0_VAL);
181	else
182		val = -1;
183
184	return val;
185}
186
187void __init l310_init(void __iomem *base, u32 aux_val, u32 aux_mask, int irq)
188{
189	__u32 aux;
190	__u32 cache_id;
191	int ways;
192
193	l2x0_base = base;
194	l2x0_irq = irq;
195
196	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
197	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
198
199	aux &= aux_mask;
200	aux |= aux_val;
201
202	/* This module unly supports the L310 */
203	BUG_ON((cache_id & L2X0_CACHE_ID_PART_MASK) != L2X0_CACHE_ID_PART_L310);
204
205	/* Determine the number of ways */
206	if (aux & (1 << 16))
207		ways = 16;
208	else
209		ways = 8;
210
211	l2x0_way_mask = (1 << ways) - 1;
212
213	/*
214	 * Check if l2x0 controller is already enabled.
215	 * If you are booting from non-secure mode
216	 * accessing the below registers will fault.
217	 */
218	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
219
220		/* l2x0 controller is disabled */
221		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
222
223		l2x0_inv_all();
224
225		/* enable L2X0 */
226		writel_relaxed(1, l2x0_base + L2X0_CTRL);
227	}
228
229 	/* Enable interrupts */
230 	WARN_ON( request_irq( l2x0_irq, l2x0_isr, 0, "L2C", NULL ));
231 	writel_relaxed(0x00ff, l2x0_base + L2X0_INTR_MASK);
232
233	outer_cache.inv_range = l2x0_inv_range;
234	outer_cache.clean_range = l2x0_clean_range;
235	outer_cache.flush_range = l2x0_flush_range;
236	outer_cache.sync = l2x0_cache_sync;
237
238	/* configure total hits */
239	writel_relaxed((2 << 2), l2x0_base + L2X0_EVENT_CNT1_CFG);
240
241	/* configure total read accesses */
242	writel_relaxed((3 << 2), l2x0_base + L2X0_EVENT_CNT0_CFG);
243
244	/* enable event counting */
245	writel_relaxed(0x1, l2x0_base + L2X0_EVENT_CNT_CTRL);
246
247	printk(KERN_INFO "L310: cache controller enabled %d ways, "
248			"CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
249			 ways, cache_id, aux);
250}
251EXPORT_SYMBOL(l2x0_read_event_cnt);
252