1/**
2 * @file op_model_mpcore.c
3 * MPCORE Event Monitor Driver
4 * @remark Copyright 2004 ARM SMP Development Team
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
11 *
12 * @remark Read the file COPYING
13 *
14 * @author Zwane Mwaikambo
15 *
16 *  Counters:
17 *    0: PMN0 on CPU0, per-cpu configurable event counter
18 *    1: PMN1 on CPU0, per-cpu configurable event counter
19 *    2: CCNT on CPU0
20 *    3: PMN0 on CPU1
21 *    4: PMN1 on CPU1
22 *    5: CCNT on CPU1
23 *    6: PMN0 on CPU1
24 *    7: PMN1 on CPU1
25 *    8: CCNT on CPU1
26 *    9: PMN0 on CPU1
27 *   10: PMN1 on CPU1
28 *   11: CCNT on CPU1
29 *   12-19: configurable SCU event counters
30 */
31
32/* #define DEBUG */
33#include <linux/types.h>
34#include <linux/errno.h>
35#include <linux/sched.h>
36#include <linux/oprofile.h>
37#include <linux/interrupt.h>
38#include <linux/smp.h>
39
40#include <asm/io.h>
41#include <asm/irq.h>
42#include <asm/mach/irq.h>
43#include <asm/hardware.h>
44#include <asm/system.h>
45
46#include "op_counter.h"
47#include "op_arm_model.h"
48#include "op_model_arm11_core.h"
49#include "op_model_mpcore.h"
50
51/*
52 * MPCore SCU event monitor support
53 */
54#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_MPCORE_SCU_BASE + 0x10)
55
56/*
57 * Bitmask of used SCU counters
58 */
59static unsigned int scu_em_used;
60
61/*
62 * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number)
63 */
64static inline void scu_reset_counter(struct eventmonitor __iomem *emc, unsigned int n)
65{
66	writel(-(u32)counter_config[SCU_COUNTER(n)].count, &emc->MC[n]);
67}
68
69static inline void scu_set_event(struct eventmonitor __iomem *emc, unsigned int n, u32 event)
70{
71	event &= 0xff;
72	writeb(event, &emc->MCEB[n]);
73}
74
75/*
76 * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
77 */
78static irqreturn_t scu_em_interrupt(int irq, void *arg)
79{
80	struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
81	unsigned int cnt;
82
83	cnt = irq - IRQ_PMU_SCU0;
84	oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt));
85	scu_reset_counter(emc, cnt);
86
87	/* Clear overflow flag for this counter */
88	writel(1 << (cnt + 16), &emc->PMCR);
89
90	return IRQ_HANDLED;
91}
92
93/* Configure just the SCU counters that the user has requested */
94static void scu_setup(void)
95{
96	struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
97	unsigned int i;
98
99	scu_em_used = 0;
100
101	for (i = 0; i < NUM_SCU_COUNTERS; i++) {
102		if (counter_config[SCU_COUNTER(i)].enabled &&
103		    counter_config[SCU_COUNTER(i)].event) {
104			scu_set_event(emc, i, 0); /* disable counter for now */
105			scu_em_used |= 1 << i;
106		}
107	}
108}
109
110static int scu_start(void)
111{
112	struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
113	unsigned int temp, i;
114	unsigned long event;
115	int ret = 0;
116
117	/*
118	 * request the SCU counter interrupts that we need
119	 */
120	for (i = 0; i < NUM_SCU_COUNTERS; i++) {
121		if (scu_em_used & (1 << i)) {
122			ret = request_irq(IRQ_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED, "SCU PMU", NULL);
123			if (ret) {
124				printk(KERN_ERR "oprofile: unable to request IRQ%u for SCU Event Monitor\n",
125				       IRQ_PMU_SCU0 + i);
126				goto err_free_scu;
127			}
128		}
129	}
130
131	/*
132	 * clear overflow and enable interrupt for all used counters
133	 */
134	temp = readl(&emc->PMCR);
135	for (i = 0; i < NUM_SCU_COUNTERS; i++) {
136		if (scu_em_used & (1 << i)) {
137			scu_reset_counter(emc, i);
138			event = counter_config[SCU_COUNTER(i)].event;
139			scu_set_event(emc, i, event);
140
141			/* clear overflow/interrupt */
142			temp |= 1 << (i + 16);
143			/* enable interrupt*/
144			temp |= 1 << (i + 8);
145		}
146	}
147
148	/* Enable all 8 counters */
149	temp |= PMCR_E;
150	writel(temp, &emc->PMCR);
151
152	return 0;
153
154 err_free_scu:
155	while (i--)
156		free_irq(IRQ_PMU_SCU0 + i, NULL);
157	return ret;
158}
159
160static void scu_stop(void)
161{
162	struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
163	unsigned int temp, i;
164
165	/* Disable counter interrupts */
166	/* Don't disable all 8 counters (with the E bit) as they may be in use */
167	temp = readl(&emc->PMCR);
168	for (i = 0; i < NUM_SCU_COUNTERS; i++) {
169		if (scu_em_used & (1 << i))
170			temp &= ~(1 << (i + 8));
171	}
172	writel(temp, &emc->PMCR);
173
174	/* Free counter interrupts and reset counters */
175	for (i = 0; i < NUM_SCU_COUNTERS; i++) {
176		if (scu_em_used & (1 << i)) {
177			scu_reset_counter(emc, i);
178			free_irq(IRQ_PMU_SCU0 + i, NULL);
179		}
180	}
181}
182
183struct em_function_data {
184	int (*fn)(void);
185	int ret;
186};
187
188static void em_func(void *data)
189{
190	struct em_function_data *d = data;
191	int ret = d->fn();
192	if (ret)
193		d->ret = ret;
194}
195
196static int em_call_function(int (*fn)(void))
197{
198	struct em_function_data data;
199
200	data.fn = fn;
201	data.ret = 0;
202
203	preempt_disable();
204	smp_call_function(em_func, &data, 1, 1);
205	em_func(&data);
206	preempt_enable();
207
208	return data.ret;
209}
210
211/*
212 * Glue to stick the individual ARM11 PMUs and the SCU
213 * into the oprofile framework.
214 */
215static int em_setup_ctrs(void)
216{
217	int ret;
218
219	/* Configure CPU counters by cross-calling to the other CPUs */
220	ret = em_call_function(arm11_setup_pmu);
221	if (ret == 0)
222		scu_setup();
223
224	return 0;
225}
226
227static int arm11_irqs[] = {
228	[0]	= IRQ_PMU_CPU0,
229	[1]	= IRQ_PMU_CPU1,
230	[2]	= IRQ_PMU_CPU2,
231	[3]	= IRQ_PMU_CPU3
232};
233
234static int em_start(void)
235{
236	int ret;
237
238	ret = arm11_request_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
239	if (ret == 0) {
240		em_call_function(arm11_start_pmu);
241
242		ret = scu_start();
243		if (ret)
244			arm11_release_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
245	}
246	return ret;
247}
248
249static void em_stop(void)
250{
251	em_call_function(arm11_stop_pmu);
252	arm11_release_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs));
253	scu_stop();
254}
255
256/*
257 * Why isn't there a function to route an IRQ to a specific CPU in
258 * genirq?
259 */
260static void em_route_irq(int irq, unsigned int cpu)
261{
262	struct irq_desc *desc = irq_desc + irq;
263	cpumask_t mask = cpumask_of_cpu(cpu);
264
265	spin_lock_irq(&desc->lock);
266	desc->affinity = mask;
267	desc->chip->set_affinity(irq, mask);
268	spin_unlock_irq(&desc->lock);
269}
270
271static int em_setup(void)
272{
273	/*
274	 * Send SCU PMU interrupts to the "owner" CPU.
275	 */
276	em_route_irq(IRQ_PMU_SCU0, 0);
277	em_route_irq(IRQ_PMU_SCU1, 0);
278	em_route_irq(IRQ_PMU_SCU2, 1);
279	em_route_irq(IRQ_PMU_SCU3, 1);
280	em_route_irq(IRQ_PMU_SCU4, 2);
281	em_route_irq(IRQ_PMU_SCU5, 2);
282	em_route_irq(IRQ_PMU_SCU6, 3);
283	em_route_irq(IRQ_PMU_SCU7, 3);
284
285	/*
286	 * Send CP15 PMU interrupts to the owner CPU.
287	 */
288	em_route_irq(IRQ_PMU_CPU0, 0);
289	em_route_irq(IRQ_PMU_CPU1, 1);
290	em_route_irq(IRQ_PMU_CPU2, 2);
291	em_route_irq(IRQ_PMU_CPU3, 3);
292
293	return 0;
294}
295
296struct op_arm_model_spec op_mpcore_spec = {
297	.init		= em_setup,
298	.num_counters	= MPCORE_NUM_COUNTERS,
299	.setup_ctrs	= em_setup_ctrs,
300	.start		= em_start,
301	.stop		= em_stop,
302	.name		= "arm/mpcore",
303};
304