1// SPDX-License-Identifier: GPL-2.0
2/*
3 * CPU PMU driver for the Apple M1 and derivatives
4 *
5 * Copyright (C) 2021 Google LLC
6 *
7 * Author: Marc Zyngier <maz@kernel.org>
8 *
9 * Most of the information used in this driver was provided by the
10 * Asahi Linux project. The rest was experimentally discovered.
11 */
12
13#include <linux/of.h>
14#include <linux/perf/arm_pmu.h>
15#include <linux/platform_device.h>
16
17#include <asm/apple_m1_pmu.h>
18#include <asm/irq_regs.h>
19#include <asm/perf_event.h>
20
21#define M1_PMU_NR_COUNTERS		10
22
23#define M1_PMU_CFG_EVENT		GENMASK(7, 0)
24
25#define ANY_BUT_0_1			GENMASK(9, 2)
26#define ONLY_2_TO_7			GENMASK(7, 2)
27#define ONLY_2_4_6			(BIT(2) | BIT(4) | BIT(6))
28#define ONLY_5_6_7			(BIT(5) | BIT(6) | BIT(7))
29
30/*
31 * Description of the events we actually know about, as well as those with
32 * a specific counter affinity. Yes, this is a grand total of two known
33 * counters, and the rest is anybody's guess.
34 *
35 * Not all counters can count all events. Counters #0 and #1 are wired to
36 * count cycles and instructions respectively, and some events have
37 * bizarre mappings (every other counter, or even *one* counter). These
38 * restrictions equally apply to both P and E cores.
39 *
40 * It is worth noting that the PMUs attached to P and E cores are likely
41 * to be different because the underlying uarches are different. At the
42 * moment, we don't really need to distinguish between the two because we
43 * know next to nothing about the events themselves, and we already have
44 * per cpu-type PMU abstractions.
45 *
46 * If we eventually find out that the events are different across
47 * implementations, we'll have to introduce per cpu-type tables.
48 */
49enum m1_pmu_events {
50	M1_PMU_PERFCTR_UNKNOWN_01	= 0x01,
51	M1_PMU_PERFCTR_CPU_CYCLES	= 0x02,
52	M1_PMU_PERFCTR_INSTRUCTIONS	= 0x8c,
53	M1_PMU_PERFCTR_UNKNOWN_8d	= 0x8d,
54	M1_PMU_PERFCTR_UNKNOWN_8e	= 0x8e,
55	M1_PMU_PERFCTR_UNKNOWN_8f	= 0x8f,
56	M1_PMU_PERFCTR_UNKNOWN_90	= 0x90,
57	M1_PMU_PERFCTR_UNKNOWN_93	= 0x93,
58	M1_PMU_PERFCTR_UNKNOWN_94	= 0x94,
59	M1_PMU_PERFCTR_UNKNOWN_95	= 0x95,
60	M1_PMU_PERFCTR_UNKNOWN_96	= 0x96,
61	M1_PMU_PERFCTR_UNKNOWN_97	= 0x97,
62	M1_PMU_PERFCTR_UNKNOWN_98	= 0x98,
63	M1_PMU_PERFCTR_UNKNOWN_99	= 0x99,
64	M1_PMU_PERFCTR_UNKNOWN_9a	= 0x9a,
65	M1_PMU_PERFCTR_UNKNOWN_9b	= 0x9b,
66	M1_PMU_PERFCTR_UNKNOWN_9c	= 0x9c,
67	M1_PMU_PERFCTR_UNKNOWN_9f	= 0x9f,
68	M1_PMU_PERFCTR_UNKNOWN_bf	= 0xbf,
69	M1_PMU_PERFCTR_UNKNOWN_c0	= 0xc0,
70	M1_PMU_PERFCTR_UNKNOWN_c1	= 0xc1,
71	M1_PMU_PERFCTR_UNKNOWN_c4	= 0xc4,
72	M1_PMU_PERFCTR_UNKNOWN_c5	= 0xc5,
73	M1_PMU_PERFCTR_UNKNOWN_c6	= 0xc6,
74	M1_PMU_PERFCTR_UNKNOWN_c8	= 0xc8,
75	M1_PMU_PERFCTR_UNKNOWN_ca	= 0xca,
76	M1_PMU_PERFCTR_UNKNOWN_cb	= 0xcb,
77	M1_PMU_PERFCTR_UNKNOWN_f5	= 0xf5,
78	M1_PMU_PERFCTR_UNKNOWN_f6	= 0xf6,
79	M1_PMU_PERFCTR_UNKNOWN_f7	= 0xf7,
80	M1_PMU_PERFCTR_UNKNOWN_f8	= 0xf8,
81	M1_PMU_PERFCTR_UNKNOWN_fd	= 0xfd,
82	M1_PMU_PERFCTR_LAST		= M1_PMU_CFG_EVENT,
83
84	/*
85	 * From this point onwards, these are not actual HW events,
86	 * but attributes that get stored in hw->config_base.
87	 */
88	M1_PMU_CFG_COUNT_USER		= BIT(8),
89	M1_PMU_CFG_COUNT_KERNEL		= BIT(9),
90};
91
92/*
93 * Per-event affinity table. Most events can be installed on counter
94 * 2-9, but there are a number of exceptions. Note that this table
95 * has been created experimentally, and I wouldn't be surprised if more
96 * counters had strange affinities.
97 */
98static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
99	[0 ... M1_PMU_PERFCTR_LAST]	= ANY_BUT_0_1,
100	[M1_PMU_PERFCTR_UNKNOWN_01]	= BIT(7),
101	[M1_PMU_PERFCTR_CPU_CYCLES]	= ANY_BUT_0_1 | BIT(0),
102	[M1_PMU_PERFCTR_INSTRUCTIONS]	= BIT(7) | BIT(1),
103	[M1_PMU_PERFCTR_UNKNOWN_8d]	= ONLY_5_6_7,
104	[M1_PMU_PERFCTR_UNKNOWN_8e]	= ONLY_5_6_7,
105	[M1_PMU_PERFCTR_UNKNOWN_8f]	= ONLY_5_6_7,
106	[M1_PMU_PERFCTR_UNKNOWN_90]	= ONLY_5_6_7,
107	[M1_PMU_PERFCTR_UNKNOWN_93]	= ONLY_5_6_7,
108	[M1_PMU_PERFCTR_UNKNOWN_94]	= ONLY_5_6_7,
109	[M1_PMU_PERFCTR_UNKNOWN_95]	= ONLY_5_6_7,
110	[M1_PMU_PERFCTR_UNKNOWN_96]	= ONLY_5_6_7,
111	[M1_PMU_PERFCTR_UNKNOWN_97]	= BIT(7),
112	[M1_PMU_PERFCTR_UNKNOWN_98]	= ONLY_5_6_7,
113	[M1_PMU_PERFCTR_UNKNOWN_99]	= ONLY_5_6_7,
114	[M1_PMU_PERFCTR_UNKNOWN_9a]	= BIT(7),
115	[M1_PMU_PERFCTR_UNKNOWN_9b]	= ONLY_5_6_7,
116	[M1_PMU_PERFCTR_UNKNOWN_9c]	= ONLY_5_6_7,
117	[M1_PMU_PERFCTR_UNKNOWN_9f]	= BIT(7),
118	[M1_PMU_PERFCTR_UNKNOWN_bf]	= ONLY_5_6_7,
119	[M1_PMU_PERFCTR_UNKNOWN_c0]	= ONLY_5_6_7,
120	[M1_PMU_PERFCTR_UNKNOWN_c1]	= ONLY_5_6_7,
121	[M1_PMU_PERFCTR_UNKNOWN_c4]	= ONLY_5_6_7,
122	[M1_PMU_PERFCTR_UNKNOWN_c5]	= ONLY_5_6_7,
123	[M1_PMU_PERFCTR_UNKNOWN_c6]	= ONLY_5_6_7,
124	[M1_PMU_PERFCTR_UNKNOWN_c8]	= ONLY_5_6_7,
125	[M1_PMU_PERFCTR_UNKNOWN_ca]	= ONLY_5_6_7,
126	[M1_PMU_PERFCTR_UNKNOWN_cb]	= ONLY_5_6_7,
127	[M1_PMU_PERFCTR_UNKNOWN_f5]	= ONLY_2_4_6,
128	[M1_PMU_PERFCTR_UNKNOWN_f6]	= ONLY_2_4_6,
129	[M1_PMU_PERFCTR_UNKNOWN_f7]	= ONLY_2_4_6,
130	[M1_PMU_PERFCTR_UNKNOWN_f8]	= ONLY_2_TO_7,
131	[M1_PMU_PERFCTR_UNKNOWN_fd]	= ONLY_2_4_6,
132};
133
134static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
135	PERF_MAP_ALL_UNSUPPORTED,
136	[PERF_COUNT_HW_CPU_CYCLES]	= M1_PMU_PERFCTR_CPU_CYCLES,
137	[PERF_COUNT_HW_INSTRUCTIONS]	= M1_PMU_PERFCTR_INSTRUCTIONS,
138	/* No idea about the rest yet */
139};
140
141/* sysfs definitions */
142static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
143					struct device_attribute *attr,
144					char *page)
145{
146	struct perf_pmu_events_attr *pmu_attr;
147
148	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
149
150	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
151}
152
153#define M1_PMU_EVENT_ATTR(name, config)					\
154	PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
155
156static struct attribute *m1_pmu_event_attrs[] = {
157	M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES),
158	M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS),
159	NULL,
160};
161
162static const struct attribute_group m1_pmu_events_attr_group = {
163	.name = "events",
164	.attrs = m1_pmu_event_attrs,
165};
166
167PMU_FORMAT_ATTR(event, "config:0-7");
168
169static struct attribute *m1_pmu_format_attrs[] = {
170	&format_attr_event.attr,
171	NULL,
172};
173
174static const struct attribute_group m1_pmu_format_attr_group = {
175	.name = "format",
176	.attrs = m1_pmu_format_attrs,
177};
178
179/* Low level accessors. No synchronisation. */
180#define PMU_READ_COUNTER(_idx)						\
181	case _idx:	return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1)
182
183#define PMU_WRITE_COUNTER(_val, _idx)					\
184	case _idx:							\
185		write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1);	\
186		return
187
188static u64 m1_pmu_read_hw_counter(unsigned int index)
189{
190	switch (index) {
191		PMU_READ_COUNTER(0);
192		PMU_READ_COUNTER(1);
193		PMU_READ_COUNTER(2);
194		PMU_READ_COUNTER(3);
195		PMU_READ_COUNTER(4);
196		PMU_READ_COUNTER(5);
197		PMU_READ_COUNTER(6);
198		PMU_READ_COUNTER(7);
199		PMU_READ_COUNTER(8);
200		PMU_READ_COUNTER(9);
201	}
202
203	BUG();
204}
205
206static void m1_pmu_write_hw_counter(u64 val, unsigned int index)
207{
208	switch (index) {
209		PMU_WRITE_COUNTER(val, 0);
210		PMU_WRITE_COUNTER(val, 1);
211		PMU_WRITE_COUNTER(val, 2);
212		PMU_WRITE_COUNTER(val, 3);
213		PMU_WRITE_COUNTER(val, 4);
214		PMU_WRITE_COUNTER(val, 5);
215		PMU_WRITE_COUNTER(val, 6);
216		PMU_WRITE_COUNTER(val, 7);
217		PMU_WRITE_COUNTER(val, 8);
218		PMU_WRITE_COUNTER(val, 9);
219	}
220
221	BUG();
222}
223
224#define get_bit_offset(index, mask)	(__ffs(mask) + (index))
225
226static void __m1_pmu_enable_counter(unsigned int index, bool en)
227{
228	u64 val, bit;
229
230	switch (index) {
231	case 0 ... 7:
232		bit = BIT(get_bit_offset(index, PMCR0_CNT_ENABLE_0_7));
233		break;
234	case 8 ... 9:
235		bit = BIT(get_bit_offset(index - 8, PMCR0_CNT_ENABLE_8_9));
236		break;
237	default:
238		BUG();
239	}
240
241	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
242
243	if (en)
244		val |= bit;
245	else
246		val &= ~bit;
247
248	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
249}
250
251static void m1_pmu_enable_counter(unsigned int index)
252{
253	__m1_pmu_enable_counter(index, true);
254}
255
256static void m1_pmu_disable_counter(unsigned int index)
257{
258	__m1_pmu_enable_counter(index, false);
259}
260
261static void __m1_pmu_enable_counter_interrupt(unsigned int index, bool en)
262{
263	u64 val, bit;
264
265	switch (index) {
266	case 0 ... 7:
267		bit = BIT(get_bit_offset(index, PMCR0_PMI_ENABLE_0_7));
268		break;
269	case 8 ... 9:
270		bit = BIT(get_bit_offset(index - 8, PMCR0_PMI_ENABLE_8_9));
271		break;
272	default:
273		BUG();
274	}
275
276	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
277
278	if (en)
279		val |= bit;
280	else
281		val &= ~bit;
282
283	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
284}
285
286static void m1_pmu_enable_counter_interrupt(unsigned int index)
287{
288	__m1_pmu_enable_counter_interrupt(index, true);
289}
290
291static void m1_pmu_disable_counter_interrupt(unsigned int index)
292{
293	__m1_pmu_enable_counter_interrupt(index, false);
294}
295
296static void m1_pmu_configure_counter(unsigned int index, u8 event,
297				     bool user, bool kernel)
298{
299	u64 val, user_bit, kernel_bit;
300	int shift;
301
302	switch (index) {
303	case 0 ... 7:
304		user_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL0_0_7));
305		kernel_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL1_0_7));
306		break;
307	case 8 ... 9:
308		user_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL0_8_9));
309		kernel_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL1_8_9));
310		break;
311	default:
312		BUG();
313	}
314
315	val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
316
317	if (user)
318		val |= user_bit;
319	else
320		val &= ~user_bit;
321
322	if (kernel)
323		val |= kernel_bit;
324	else
325		val &= ~kernel_bit;
326
327	write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
328
329	/*
330	 * Counters 0 and 1 have fixed events. For anything else,
331	 * place the event at the expected location in the relevant
332	 * register (PMESR0 holds the event configuration for counters
333	 * 2-5, resp. PMESR1 for counters 6-9).
334	 */
335	switch (index) {
336	case 0 ... 1:
337		break;
338	case 2 ... 5:
339		shift = (index - 2) * 8;
340		val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
341		val &= ~((u64)0xff << shift);
342		val |= (u64)event << shift;
343		write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
344		break;
345	case 6 ... 9:
346		shift = (index - 6) * 8;
347		val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
348		val &= ~((u64)0xff << shift);
349		val |= (u64)event << shift;
350		write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
351		break;
352	}
353}
354
355/* arm_pmu backend */
356static void m1_pmu_enable_event(struct perf_event *event)
357{
358	bool user, kernel;
359	u8 evt;
360
361	evt = event->hw.config_base & M1_PMU_CFG_EVENT;
362	user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
363	kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
364
365	m1_pmu_disable_counter_interrupt(event->hw.idx);
366	m1_pmu_disable_counter(event->hw.idx);
367	isb();
368
369	m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
370	m1_pmu_enable_counter(event->hw.idx);
371	m1_pmu_enable_counter_interrupt(event->hw.idx);
372	isb();
373}
374
375static void m1_pmu_disable_event(struct perf_event *event)
376{
377	m1_pmu_disable_counter_interrupt(event->hw.idx);
378	m1_pmu_disable_counter(event->hw.idx);
379	isb();
380}
381
382static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
383{
384	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
385	struct pt_regs *regs;
386	u64 overflow, state;
387	int idx;
388
389	overflow = read_sysreg_s(SYS_IMP_APL_PMSR_EL1);
390	if (!overflow) {
391		/* Spurious interrupt? */
392		state = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
393		state &= ~PMCR0_IACT;
394		write_sysreg_s(state, SYS_IMP_APL_PMCR0_EL1);
395		isb();
396		return IRQ_NONE;
397	}
398
399	cpu_pmu->stop(cpu_pmu);
400
401	regs = get_irq_regs();
402
403	for (idx = 0; idx < cpu_pmu->num_events; idx++) {
404		struct perf_event *event = cpuc->events[idx];
405		struct perf_sample_data data;
406
407		if (!event)
408			continue;
409
410		armpmu_event_update(event);
411		perf_sample_data_init(&data, 0, event->hw.last_period);
412		if (!armpmu_event_set_period(event))
413			continue;
414
415		if (perf_event_overflow(event, &data, regs))
416			m1_pmu_disable_event(event);
417	}
418
419	cpu_pmu->start(cpu_pmu);
420
421	return IRQ_HANDLED;
422}
423
424static u64 m1_pmu_read_counter(struct perf_event *event)
425{
426	return m1_pmu_read_hw_counter(event->hw.idx);
427}
428
429static void m1_pmu_write_counter(struct perf_event *event, u64 value)
430{
431	m1_pmu_write_hw_counter(value, event->hw.idx);
432	isb();
433}
434
435static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc,
436				struct perf_event *event)
437{
438	unsigned long evtype = event->hw.config_base & M1_PMU_CFG_EVENT;
439	unsigned long affinity = m1_pmu_event_affinity[evtype];
440	int idx;
441
442	/*
443	 * Place the event on the first free counter that can count
444	 * this event.
445	 *
446	 * We could do a better job if we had a view of all the events
447	 * counting on the PMU at any given time, and by placing the
448	 * most constraining events first.
449	 */
450	for_each_set_bit(idx, &affinity, M1_PMU_NR_COUNTERS) {
451		if (!test_and_set_bit(idx, cpuc->used_mask))
452			return idx;
453	}
454
455	return -EAGAIN;
456}
457
458static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
459				   struct perf_event *event)
460{
461	clear_bit(event->hw.idx, cpuc->used_mask);
462}
463
464static void __m1_pmu_set_mode(u8 mode)
465{
466	u64 val;
467
468	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
469	val &= ~(PMCR0_IMODE | PMCR0_IACT);
470	val |= FIELD_PREP(PMCR0_IMODE, mode);
471	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
472	isb();
473}
474
475static void m1_pmu_start(struct arm_pmu *cpu_pmu)
476{
477	__m1_pmu_set_mode(PMCR0_IMODE_FIQ);
478}
479
480static void m1_pmu_stop(struct arm_pmu *cpu_pmu)
481{
482	__m1_pmu_set_mode(PMCR0_IMODE_OFF);
483}
484
485static int m1_pmu_map_event(struct perf_event *event)
486{
487	/*
488	 * Although the counters are 48bit wide, bit 47 is what
489	 * triggers the overflow interrupt. Advertise the counters
490	 * being 47bit wide to mimick the behaviour of the ARM PMU.
491	 */
492	event->hw.flags |= ARMPMU_EVT_47BIT;
493	return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
494}
495
496static int m2_pmu_map_event(struct perf_event *event)
497{
498	/*
499	 * Same deal as the above, except that M2 has 64bit counters.
500	 * Which, as far as we're concerned, actually means 63 bits.
501	 * Yes, this is getting awkward.
502	 */
503	event->hw.flags |= ARMPMU_EVT_63BIT;
504	return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
505}
506
507static void m1_pmu_reset(void *info)
508{
509	int i;
510
511	__m1_pmu_set_mode(PMCR0_IMODE_OFF);
512
513	for (i = 0; i < M1_PMU_NR_COUNTERS; i++) {
514		m1_pmu_disable_counter(i);
515		m1_pmu_disable_counter_interrupt(i);
516		m1_pmu_write_hw_counter(0, i);
517	}
518
519	isb();
520}
521
522static int m1_pmu_set_event_filter(struct hw_perf_event *event,
523				   struct perf_event_attr *attr)
524{
525	unsigned long config_base = 0;
526
527	if (!attr->exclude_guest) {
528		pr_debug("ARM performance counters do not support mode exclusion\n");
529		return -EOPNOTSUPP;
530	}
531	if (!attr->exclude_kernel)
532		config_base |= M1_PMU_CFG_COUNT_KERNEL;
533	if (!attr->exclude_user)
534		config_base |= M1_PMU_CFG_COUNT_USER;
535
536	event->config_base = config_base;
537
538	return 0;
539}
540
541static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
542{
543	cpu_pmu->handle_irq	  = m1_pmu_handle_irq;
544	cpu_pmu->enable		  = m1_pmu_enable_event;
545	cpu_pmu->disable	  = m1_pmu_disable_event;
546	cpu_pmu->read_counter	  = m1_pmu_read_counter;
547	cpu_pmu->write_counter	  = m1_pmu_write_counter;
548	cpu_pmu->get_event_idx	  = m1_pmu_get_event_idx;
549	cpu_pmu->clear_event_idx  = m1_pmu_clear_event_idx;
550	cpu_pmu->start		  = m1_pmu_start;
551	cpu_pmu->stop		  = m1_pmu_stop;
552
553	if (flags & ARMPMU_EVT_47BIT)
554		cpu_pmu->map_event = m1_pmu_map_event;
555	else if (flags & ARMPMU_EVT_63BIT)
556		cpu_pmu->map_event = m2_pmu_map_event;
557	else
558		return WARN_ON(-EINVAL);
559
560	cpu_pmu->reset		  = m1_pmu_reset;
561	cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
562
563	cpu_pmu->num_events	  = M1_PMU_NR_COUNTERS;
564	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
565	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
566	return 0;
567}
568
569/* Device driver gunk */
570static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu)
571{
572	cpu_pmu->name = "apple_icestorm_pmu";
573	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
574}
575
576static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
577{
578	cpu_pmu->name = "apple_firestorm_pmu";
579	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
580}
581
582static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu)
583{
584	cpu_pmu->name = "apple_avalanche_pmu";
585	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
586}
587
588static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu)
589{
590	cpu_pmu->name = "apple_blizzard_pmu";
591	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
592}
593
594static const struct of_device_id m1_pmu_of_device_ids[] = {
595	{ .compatible = "apple,avalanche-pmu",	.data = m2_pmu_avalanche_init, },
596	{ .compatible = "apple,blizzard-pmu",	.data = m2_pmu_blizzard_init, },
597	{ .compatible = "apple,icestorm-pmu",	.data = m1_pmu_ice_init, },
598	{ .compatible = "apple,firestorm-pmu",	.data = m1_pmu_fire_init, },
599	{ },
600};
601MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids);
602
603static int m1_pmu_device_probe(struct platform_device *pdev)
604{
605	return arm_pmu_device_probe(pdev, m1_pmu_of_device_ids, NULL);
606}
607
608static struct platform_driver m1_pmu_driver = {
609	.driver		= {
610		.name			= "apple-m1-cpu-pmu",
611		.of_match_table		= m1_pmu_of_device_ids,
612		.suppress_bind_attrs	= true,
613	},
614	.probe		= m1_pmu_device_probe,
615};
616
617module_platform_driver(m1_pmu_driver);
618