1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ARMv5 [xscale] Performance counter handling code.
4 *
5 * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
6 *
7 * Based on the previous xscale OProfile code.
8 *
9 * There are two variants of the xscale PMU that we support:
10 * 	- xscale1pmu: 2 event counters and a cycle counter
11 * 	- xscale2pmu: 4 event counters and a cycle counter
12 * The two variants share event definitions, but have different
13 * PMU structures.
14 */
15
16#ifdef CONFIG_CPU_XSCALE
17
18#include <asm/cputype.h>
19#include <asm/irq_regs.h>
20
21#include <linux/of.h>
22#include <linux/perf/arm_pmu.h>
23#include <linux/platform_device.h>
24
25enum xscale_perf_types {
26	XSCALE_PERFCTR_ICACHE_MISS		= 0x00,
27	XSCALE_PERFCTR_ICACHE_NO_DELIVER	= 0x01,
28	XSCALE_PERFCTR_DATA_STALL		= 0x02,
29	XSCALE_PERFCTR_ITLB_MISS		= 0x03,
30	XSCALE_PERFCTR_DTLB_MISS		= 0x04,
31	XSCALE_PERFCTR_BRANCH			= 0x05,
32	XSCALE_PERFCTR_BRANCH_MISS		= 0x06,
33	XSCALE_PERFCTR_INSTRUCTION		= 0x07,
34	XSCALE_PERFCTR_DCACHE_FULL_STALL	= 0x08,
35	XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG	= 0x09,
36	XSCALE_PERFCTR_DCACHE_ACCESS		= 0x0A,
37	XSCALE_PERFCTR_DCACHE_MISS		= 0x0B,
38	XSCALE_PERFCTR_DCACHE_WRITE_BACK	= 0x0C,
39	XSCALE_PERFCTR_PC_CHANGED		= 0x0D,
40	XSCALE_PERFCTR_BCU_REQUEST		= 0x10,
41	XSCALE_PERFCTR_BCU_FULL			= 0x11,
42	XSCALE_PERFCTR_BCU_DRAIN		= 0x12,
43	XSCALE_PERFCTR_BCU_ECC_NO_ELOG		= 0x14,
44	XSCALE_PERFCTR_BCU_1_BIT_ERR		= 0x15,
45	XSCALE_PERFCTR_RMW			= 0x16,
46	/* XSCALE_PERFCTR_CCNT is not hardware defined */
47	XSCALE_PERFCTR_CCNT			= 0xFE,
48	XSCALE_PERFCTR_UNUSED			= 0xFF,
49};
50
51enum xscale_counters {
52	XSCALE_CYCLE_COUNTER	= 0,
53	XSCALE_COUNTER0,
54	XSCALE_COUNTER1,
55	XSCALE_COUNTER2,
56	XSCALE_COUNTER3,
57};
58
59static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
60	PERF_MAP_ALL_UNSUPPORTED,
61	[PERF_COUNT_HW_CPU_CYCLES]		= XSCALE_PERFCTR_CCNT,
62	[PERF_COUNT_HW_INSTRUCTIONS]		= XSCALE_PERFCTR_INSTRUCTION,
63	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= XSCALE_PERFCTR_BRANCH,
64	[PERF_COUNT_HW_BRANCH_MISSES]		= XSCALE_PERFCTR_BRANCH_MISS,
65	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= XSCALE_PERFCTR_ICACHE_NO_DELIVER,
66};
67
68static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
69					   [PERF_COUNT_HW_CACHE_OP_MAX]
70					   [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
71	PERF_CACHE_MAP_ALL_UNSUPPORTED,
72
73	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= XSCALE_PERFCTR_DCACHE_ACCESS,
74	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= XSCALE_PERFCTR_DCACHE_MISS,
75	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= XSCALE_PERFCTR_DCACHE_ACCESS,
76	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= XSCALE_PERFCTR_DCACHE_MISS,
77
78	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= XSCALE_PERFCTR_ICACHE_MISS,
79
80	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= XSCALE_PERFCTR_DTLB_MISS,
81	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= XSCALE_PERFCTR_DTLB_MISS,
82
83	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= XSCALE_PERFCTR_ITLB_MISS,
84	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= XSCALE_PERFCTR_ITLB_MISS,
85};
86
87#define	XSCALE_PMU_ENABLE	0x001
88#define XSCALE_PMN_RESET	0x002
89#define	XSCALE_CCNT_RESET	0x004
90#define	XSCALE_PMU_RESET	(CCNT_RESET | PMN_RESET)
91#define XSCALE_PMU_CNT64	0x008
92
93#define XSCALE1_OVERFLOWED_MASK	0x700
94#define XSCALE1_CCOUNT_OVERFLOW	0x400
95#define XSCALE1_COUNT0_OVERFLOW	0x100
96#define XSCALE1_COUNT1_OVERFLOW	0x200
97#define XSCALE1_CCOUNT_INT_EN	0x040
98#define XSCALE1_COUNT0_INT_EN	0x010
99#define XSCALE1_COUNT1_INT_EN	0x020
100#define XSCALE1_COUNT0_EVT_SHFT	12
101#define XSCALE1_COUNT0_EVT_MASK	(0xff << XSCALE1_COUNT0_EVT_SHFT)
102#define XSCALE1_COUNT1_EVT_SHFT	20
103#define XSCALE1_COUNT1_EVT_MASK	(0xff << XSCALE1_COUNT1_EVT_SHFT)
104
105static inline u32
106xscale1pmu_read_pmnc(void)
107{
108	u32 val;
109	asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
110	return val;
111}
112
113static inline void
114xscale1pmu_write_pmnc(u32 val)
115{
116	/* upper 4bits and 7, 11 are write-as-0 */
117	val &= 0xffff77f;
118	asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
119}
120
121static inline int
122xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
123					enum xscale_counters counter)
124{
125	int ret = 0;
126
127	switch (counter) {
128	case XSCALE_CYCLE_COUNTER:
129		ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
130		break;
131	case XSCALE_COUNTER0:
132		ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
133		break;
134	case XSCALE_COUNTER1:
135		ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
136		break;
137	default:
138		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
139	}
140
141	return ret;
142}
143
144static irqreturn_t
145xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
146{
147	unsigned long pmnc;
148	struct perf_sample_data data;
149	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
150	struct pt_regs *regs;
151	int idx;
152
153	/*
154	 * NOTE: there's an A stepping erratum that states if an overflow
155	 *       bit already exists and another occurs, the previous
156	 *       Overflow bit gets cleared. There's no workaround.
157	 *	 Fixed in B stepping or later.
158	 */
159	pmnc = xscale1pmu_read_pmnc();
160
161	/*
162	 * Write the value back to clear the overflow flags. Overflow
163	 * flags remain in pmnc for use below. We also disable the PMU
164	 * while we process the interrupt.
165	 */
166	xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
167
168	if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
169		return IRQ_NONE;
170
171	regs = get_irq_regs();
172
173	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
174		struct perf_event *event = cpuc->events[idx];
175		struct hw_perf_event *hwc;
176
177		if (!event)
178			continue;
179
180		if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
181			continue;
182
183		hwc = &event->hw;
184		armpmu_event_update(event);
185		perf_sample_data_init(&data, 0, hwc->last_period);
186		if (!armpmu_event_set_period(event))
187			continue;
188
189		if (perf_event_overflow(event, &data, regs))
190			cpu_pmu->disable(event);
191	}
192
193	irq_work_run();
194
195	/*
196	 * Re-enable the PMU.
197	 */
198	pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
199	xscale1pmu_write_pmnc(pmnc);
200
201	return IRQ_HANDLED;
202}
203
204static void xscale1pmu_enable_event(struct perf_event *event)
205{
206	unsigned long val, mask, evt;
207	struct hw_perf_event *hwc = &event->hw;
208	int idx = hwc->idx;
209
210	switch (idx) {
211	case XSCALE_CYCLE_COUNTER:
212		mask = 0;
213		evt = XSCALE1_CCOUNT_INT_EN;
214		break;
215	case XSCALE_COUNTER0:
216		mask = XSCALE1_COUNT0_EVT_MASK;
217		evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
218			XSCALE1_COUNT0_INT_EN;
219		break;
220	case XSCALE_COUNTER1:
221		mask = XSCALE1_COUNT1_EVT_MASK;
222		evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
223			XSCALE1_COUNT1_INT_EN;
224		break;
225	default:
226		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
227		return;
228	}
229
230	val = xscale1pmu_read_pmnc();
231	val &= ~mask;
232	val |= evt;
233	xscale1pmu_write_pmnc(val);
234}
235
236static void xscale1pmu_disable_event(struct perf_event *event)
237{
238	unsigned long val, mask, evt;
239	struct hw_perf_event *hwc = &event->hw;
240	int idx = hwc->idx;
241
242	switch (idx) {
243	case XSCALE_CYCLE_COUNTER:
244		mask = XSCALE1_CCOUNT_INT_EN;
245		evt = 0;
246		break;
247	case XSCALE_COUNTER0:
248		mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
249		evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
250		break;
251	case XSCALE_COUNTER1:
252		mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
253		evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
254		break;
255	default:
256		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
257		return;
258	}
259
260	val = xscale1pmu_read_pmnc();
261	val &= ~mask;
262	val |= evt;
263	xscale1pmu_write_pmnc(val);
264}
265
266static int
267xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
268				struct perf_event *event)
269{
270	struct hw_perf_event *hwc = &event->hw;
271	if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
272		if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
273			return -EAGAIN;
274
275		return XSCALE_CYCLE_COUNTER;
276	} else {
277		if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
278			return XSCALE_COUNTER1;
279
280		if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
281			return XSCALE_COUNTER0;
282
283		return -EAGAIN;
284	}
285}
286
287static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
288				     struct perf_event *event)
289{
290	clear_bit(event->hw.idx, cpuc->used_mask);
291}
292
293static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
294{
295	unsigned long val;
296
297	val = xscale1pmu_read_pmnc();
298	val |= XSCALE_PMU_ENABLE;
299	xscale1pmu_write_pmnc(val);
300}
301
302static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
303{
304	unsigned long val;
305
306	val = xscale1pmu_read_pmnc();
307	val &= ~XSCALE_PMU_ENABLE;
308	xscale1pmu_write_pmnc(val);
309}
310
311static inline u64 xscale1pmu_read_counter(struct perf_event *event)
312{
313	struct hw_perf_event *hwc = &event->hw;
314	int counter = hwc->idx;
315	u32 val = 0;
316
317	switch (counter) {
318	case XSCALE_CYCLE_COUNTER:
319		asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
320		break;
321	case XSCALE_COUNTER0:
322		asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
323		break;
324	case XSCALE_COUNTER1:
325		asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
326		break;
327	}
328
329	return val;
330}
331
332static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
333{
334	struct hw_perf_event *hwc = &event->hw;
335	int counter = hwc->idx;
336
337	switch (counter) {
338	case XSCALE_CYCLE_COUNTER:
339		asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
340		break;
341	case XSCALE_COUNTER0:
342		asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
343		break;
344	case XSCALE_COUNTER1:
345		asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
346		break;
347	}
348}
349
350static int xscale_map_event(struct perf_event *event)
351{
352	return armpmu_map_event(event, &xscale_perf_map,
353				&xscale_perf_cache_map, 0xFF);
354}
355
356static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
357{
358	cpu_pmu->name		= "armv5_xscale1";
359	cpu_pmu->handle_irq	= xscale1pmu_handle_irq;
360	cpu_pmu->enable		= xscale1pmu_enable_event;
361	cpu_pmu->disable	= xscale1pmu_disable_event;
362	cpu_pmu->read_counter	= xscale1pmu_read_counter;
363	cpu_pmu->write_counter	= xscale1pmu_write_counter;
364	cpu_pmu->get_event_idx	= xscale1pmu_get_event_idx;
365	cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
366	cpu_pmu->start		= xscale1pmu_start;
367	cpu_pmu->stop		= xscale1pmu_stop;
368	cpu_pmu->map_event	= xscale_map_event;
369	cpu_pmu->num_events	= 3;
370
371	return 0;
372}
373
374#define XSCALE2_OVERFLOWED_MASK	0x01f
375#define XSCALE2_CCOUNT_OVERFLOW	0x001
376#define XSCALE2_COUNT0_OVERFLOW	0x002
377#define XSCALE2_COUNT1_OVERFLOW	0x004
378#define XSCALE2_COUNT2_OVERFLOW	0x008
379#define XSCALE2_COUNT3_OVERFLOW	0x010
380#define XSCALE2_CCOUNT_INT_EN	0x001
381#define XSCALE2_COUNT0_INT_EN	0x002
382#define XSCALE2_COUNT1_INT_EN	0x004
383#define XSCALE2_COUNT2_INT_EN	0x008
384#define XSCALE2_COUNT3_INT_EN	0x010
385#define XSCALE2_COUNT0_EVT_SHFT	0
386#define XSCALE2_COUNT0_EVT_MASK	(0xff << XSCALE2_COUNT0_EVT_SHFT)
387#define XSCALE2_COUNT1_EVT_SHFT	8
388#define XSCALE2_COUNT1_EVT_MASK	(0xff << XSCALE2_COUNT1_EVT_SHFT)
389#define XSCALE2_COUNT2_EVT_SHFT	16
390#define XSCALE2_COUNT2_EVT_MASK	(0xff << XSCALE2_COUNT2_EVT_SHFT)
391#define XSCALE2_COUNT3_EVT_SHFT	24
392#define XSCALE2_COUNT3_EVT_MASK	(0xff << XSCALE2_COUNT3_EVT_SHFT)
393
394static inline u32
395xscale2pmu_read_pmnc(void)
396{
397	u32 val;
398	asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
399	/* bits 1-2 and 4-23 are read-unpredictable */
400	return val & 0xff000009;
401}
402
403static inline void
404xscale2pmu_write_pmnc(u32 val)
405{
406	/* bits 4-23 are write-as-0, 24-31 are write ignored */
407	val &= 0xf;
408	asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
409}
410
411static inline u32
412xscale2pmu_read_overflow_flags(void)
413{
414	u32 val;
415	asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
416	return val;
417}
418
419static inline void
420xscale2pmu_write_overflow_flags(u32 val)
421{
422	asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
423}
424
425static inline u32
426xscale2pmu_read_event_select(void)
427{
428	u32 val;
429	asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
430	return val;
431}
432
433static inline void
434xscale2pmu_write_event_select(u32 val)
435{
436	asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
437}
438
439static inline u32
440xscale2pmu_read_int_enable(void)
441{
442	u32 val;
443	asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
444	return val;
445}
446
447static void
448xscale2pmu_write_int_enable(u32 val)
449{
450	asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
451}
452
453static inline int
454xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
455					enum xscale_counters counter)
456{
457	int ret = 0;
458
459	switch (counter) {
460	case XSCALE_CYCLE_COUNTER:
461		ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
462		break;
463	case XSCALE_COUNTER0:
464		ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
465		break;
466	case XSCALE_COUNTER1:
467		ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
468		break;
469	case XSCALE_COUNTER2:
470		ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
471		break;
472	case XSCALE_COUNTER3:
473		ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
474		break;
475	default:
476		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
477	}
478
479	return ret;
480}
481
482static irqreturn_t
483xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
484{
485	unsigned long pmnc, of_flags;
486	struct perf_sample_data data;
487	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
488	struct pt_regs *regs;
489	int idx;
490
491	/* Disable the PMU. */
492	pmnc = xscale2pmu_read_pmnc();
493	xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
494
495	/* Check the overflow flag register. */
496	of_flags = xscale2pmu_read_overflow_flags();
497	if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
498		return IRQ_NONE;
499
500	/* Clear the overflow bits. */
501	xscale2pmu_write_overflow_flags(of_flags);
502
503	regs = get_irq_regs();
504
505	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
506		struct perf_event *event = cpuc->events[idx];
507		struct hw_perf_event *hwc;
508
509		if (!event)
510			continue;
511
512		if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
513			continue;
514
515		hwc = &event->hw;
516		armpmu_event_update(event);
517		perf_sample_data_init(&data, 0, hwc->last_period);
518		if (!armpmu_event_set_period(event))
519			continue;
520
521		if (perf_event_overflow(event, &data, regs))
522			cpu_pmu->disable(event);
523	}
524
525	irq_work_run();
526
527	/*
528	 * Re-enable the PMU.
529	 */
530	pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
531	xscale2pmu_write_pmnc(pmnc);
532
533	return IRQ_HANDLED;
534}
535
536static void xscale2pmu_enable_event(struct perf_event *event)
537{
538	unsigned long ien, evtsel;
539	struct hw_perf_event *hwc = &event->hw;
540	int idx = hwc->idx;
541
542	ien = xscale2pmu_read_int_enable();
543	evtsel = xscale2pmu_read_event_select();
544
545	switch (idx) {
546	case XSCALE_CYCLE_COUNTER:
547		ien |= XSCALE2_CCOUNT_INT_EN;
548		break;
549	case XSCALE_COUNTER0:
550		ien |= XSCALE2_COUNT0_INT_EN;
551		evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
552		evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
553		break;
554	case XSCALE_COUNTER1:
555		ien |= XSCALE2_COUNT1_INT_EN;
556		evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
557		evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
558		break;
559	case XSCALE_COUNTER2:
560		ien |= XSCALE2_COUNT2_INT_EN;
561		evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
562		evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
563		break;
564	case XSCALE_COUNTER3:
565		ien |= XSCALE2_COUNT3_INT_EN;
566		evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
567		evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
568		break;
569	default:
570		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
571		return;
572	}
573
574	xscale2pmu_write_event_select(evtsel);
575	xscale2pmu_write_int_enable(ien);
576}
577
578static void xscale2pmu_disable_event(struct perf_event *event)
579{
580	unsigned long ien, evtsel, of_flags;
581	struct hw_perf_event *hwc = &event->hw;
582	int idx = hwc->idx;
583
584	ien = xscale2pmu_read_int_enable();
585	evtsel = xscale2pmu_read_event_select();
586
587	switch (idx) {
588	case XSCALE_CYCLE_COUNTER:
589		ien &= ~XSCALE2_CCOUNT_INT_EN;
590		of_flags = XSCALE2_CCOUNT_OVERFLOW;
591		break;
592	case XSCALE_COUNTER0:
593		ien &= ~XSCALE2_COUNT0_INT_EN;
594		evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
595		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
596		of_flags = XSCALE2_COUNT0_OVERFLOW;
597		break;
598	case XSCALE_COUNTER1:
599		ien &= ~XSCALE2_COUNT1_INT_EN;
600		evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
601		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
602		of_flags = XSCALE2_COUNT1_OVERFLOW;
603		break;
604	case XSCALE_COUNTER2:
605		ien &= ~XSCALE2_COUNT2_INT_EN;
606		evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
607		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
608		of_flags = XSCALE2_COUNT2_OVERFLOW;
609		break;
610	case XSCALE_COUNTER3:
611		ien &= ~XSCALE2_COUNT3_INT_EN;
612		evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
613		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
614		of_flags = XSCALE2_COUNT3_OVERFLOW;
615		break;
616	default:
617		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
618		return;
619	}
620
621	xscale2pmu_write_event_select(evtsel);
622	xscale2pmu_write_int_enable(ien);
623	xscale2pmu_write_overflow_flags(of_flags);
624}
625
626static int
627xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
628				struct perf_event *event)
629{
630	int idx = xscale1pmu_get_event_idx(cpuc, event);
631	if (idx >= 0)
632		goto out;
633
634	if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
635		idx = XSCALE_COUNTER3;
636	else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
637		idx = XSCALE_COUNTER2;
638out:
639	return idx;
640}
641
642static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
643{
644	unsigned long val;
645
646	val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
647	val |= XSCALE_PMU_ENABLE;
648	xscale2pmu_write_pmnc(val);
649}
650
651static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
652{
653	unsigned long val;
654
655	val = xscale2pmu_read_pmnc();
656	val &= ~XSCALE_PMU_ENABLE;
657	xscale2pmu_write_pmnc(val);
658}
659
660static inline u64 xscale2pmu_read_counter(struct perf_event *event)
661{
662	struct hw_perf_event *hwc = &event->hw;
663	int counter = hwc->idx;
664	u32 val = 0;
665
666	switch (counter) {
667	case XSCALE_CYCLE_COUNTER:
668		asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
669		break;
670	case XSCALE_COUNTER0:
671		asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
672		break;
673	case XSCALE_COUNTER1:
674		asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
675		break;
676	case XSCALE_COUNTER2:
677		asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
678		break;
679	case XSCALE_COUNTER3:
680		asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
681		break;
682	}
683
684	return val;
685}
686
687static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
688{
689	struct hw_perf_event *hwc = &event->hw;
690	int counter = hwc->idx;
691
692	switch (counter) {
693	case XSCALE_CYCLE_COUNTER:
694		asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
695		break;
696	case XSCALE_COUNTER0:
697		asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
698		break;
699	case XSCALE_COUNTER1:
700		asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
701		break;
702	case XSCALE_COUNTER2:
703		asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
704		break;
705	case XSCALE_COUNTER3:
706		asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
707		break;
708	}
709}
710
711static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
712{
713	cpu_pmu->name		= "armv5_xscale2";
714	cpu_pmu->handle_irq	= xscale2pmu_handle_irq;
715	cpu_pmu->enable		= xscale2pmu_enable_event;
716	cpu_pmu->disable	= xscale2pmu_disable_event;
717	cpu_pmu->read_counter	= xscale2pmu_read_counter;
718	cpu_pmu->write_counter	= xscale2pmu_write_counter;
719	cpu_pmu->get_event_idx	= xscale2pmu_get_event_idx;
720	cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
721	cpu_pmu->start		= xscale2pmu_start;
722	cpu_pmu->stop		= xscale2pmu_stop;
723	cpu_pmu->map_event	= xscale_map_event;
724	cpu_pmu->num_events	= 5;
725
726	return 0;
727}
728
729static const struct pmu_probe_info xscale_pmu_probe_table[] = {
730	XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
731	XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
732	{ /* sentinel value */ }
733};
734
735static int xscale_pmu_device_probe(struct platform_device *pdev)
736{
737	return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
738}
739
740static struct platform_driver xscale_pmu_driver = {
741	.driver		= {
742		.name	= "xscale-pmu",
743	},
744	.probe		= xscale_pmu_device_probe,
745};
746
747builtin_platform_driver(xscale_pmu_driver);
748#endif	/* CONFIG_CPU_XSCALE */
749