1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * HiSilicon SoC UC (unified cache) uncore Hardware event counters support
4 *
5 * Copyright (C) 2023 HiSilicon Limited
6 *
7 * This code is based on the uncore PMUs like hisi_uncore_l3c_pmu.
8 */
9#include <linux/cpuhotplug.h>
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/list.h>
13#include <linux/mod_devicetable.h>
14#include <linux/property.h>
15
16#include "hisi_uncore_pmu.h"
17
18/* Dynamic CPU hotplug state used by UC PMU */
19static enum cpuhp_state hisi_uc_pmu_online;
20
21/* UC register definition */
22#define HISI_UC_INT_MASK_REG		0x0800
23#define HISI_UC_INT_STS_REG		0x0808
24#define HISI_UC_INT_CLEAR_REG		0x080c
25#define HISI_UC_TRACETAG_CTRL_REG	0x1b2c
26#define HISI_UC_TRACETAG_REQ_MSK	GENMASK(9, 7)
27#define HISI_UC_TRACETAG_MARK_EN	BIT(0)
28#define HISI_UC_TRACETAG_REQ_EN		(HISI_UC_TRACETAG_MARK_EN | BIT(2))
29#define HISI_UC_TRACETAG_SRCID_EN	BIT(3)
30#define HISI_UC_SRCID_CTRL_REG		0x1b40
31#define HISI_UC_SRCID_MSK		GENMASK(14, 1)
32#define HISI_UC_EVENT_CTRL_REG		0x1c00
33#define HISI_UC_EVENT_TRACETAG_EN	BIT(29)
34#define HISI_UC_EVENT_URING_MSK		GENMASK(28, 27)
35#define HISI_UC_EVENT_GLB_EN		BIT(26)
36#define HISI_UC_VERSION_REG		0x1cf0
37#define HISI_UC_EVTYPE_REGn(n)		(0x1d00 + (n) * 4)
38#define HISI_UC_EVTYPE_MASK		GENMASK(7, 0)
39#define HISI_UC_CNTR_REGn(n)		(0x1e00 + (n) * 8)
40
41#define HISI_UC_NR_COUNTERS		0x8
42#define HISI_UC_V2_NR_EVENTS		0xFF
43#define HISI_UC_CNTR_REG_BITS		64
44
45#define HISI_UC_RD_REQ_TRACETAG		0x4
46#define HISI_UC_URING_EVENT_MIN		0x47
47#define HISI_UC_URING_EVENT_MAX		0x59
48
49HISI_PMU_EVENT_ATTR_EXTRACTOR(rd_req_en, config1, 0, 0);
50HISI_PMU_EVENT_ATTR_EXTRACTOR(uring_channel, config1, 5, 4);
51HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid, config1, 19, 6);
52HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_en, config1, 20, 20);
53
54static int hisi_uc_pmu_check_filter(struct perf_event *event)
55{
56	struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
57
58	if (hisi_get_srcid_en(event) && !hisi_get_rd_req_en(event)) {
59		dev_err(uc_pmu->dev,
60			"rcid_en depends on rd_req_en being enabled!\n");
61		return -EINVAL;
62	}
63
64	if (!hisi_get_uring_channel(event))
65		return 0;
66
67	if ((HISI_GET_EVENTID(event) < HISI_UC_URING_EVENT_MIN) ||
68	    (HISI_GET_EVENTID(event) > HISI_UC_URING_EVENT_MAX))
69		dev_warn(uc_pmu->dev,
70			 "Only events: [%#x ~ %#x] support channel filtering!",
71			 HISI_UC_URING_EVENT_MIN, HISI_UC_URING_EVENT_MAX);
72
73	return 0;
74}
75
76static void hisi_uc_pmu_config_req_tracetag(struct perf_event *event)
77{
78	struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
79	u32 val;
80
81	if (!hisi_get_rd_req_en(event))
82		return;
83
84	val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
85
86	/* The request-type has been configured */
87	if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == HISI_UC_RD_REQ_TRACETAG)
88		return;
89
90	/* Set request-type for tracetag, only read request is supported! */
91	val &= ~HISI_UC_TRACETAG_REQ_MSK;
92	val |= FIELD_PREP(HISI_UC_TRACETAG_REQ_MSK, HISI_UC_RD_REQ_TRACETAG);
93	val |= HISI_UC_TRACETAG_REQ_EN;
94	writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
95}
96
97static void hisi_uc_pmu_clear_req_tracetag(struct perf_event *event)
98{
99	struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
100	u32 val;
101
102	if (!hisi_get_rd_req_en(event))
103		return;
104
105	val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
106
107	/* Do nothing, the request-type tracetag has been cleaned up */
108	if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == 0)
109		return;
110
111	/* Clear request-type */
112	val &= ~HISI_UC_TRACETAG_REQ_MSK;
113	val &= ~HISI_UC_TRACETAG_REQ_EN;
114	writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
115}
116
117static void hisi_uc_pmu_config_srcid_tracetag(struct perf_event *event)
118{
119	struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
120	u32 val;
121
122	if (!hisi_get_srcid_en(event))
123		return;
124
125	val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
126
127	/* Do nothing, the source id has been configured */
128	if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val))
129		return;
130
131	/* Enable source id tracetag */
132	val |= HISI_UC_TRACETAG_SRCID_EN;
133	writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
134
135	val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
136	val &= ~HISI_UC_SRCID_MSK;
137	val |= FIELD_PREP(HISI_UC_SRCID_MSK, hisi_get_srcid(event));
138	writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
139
140	/* Depend on request-type tracetag enabled */
141	hisi_uc_pmu_config_req_tracetag(event);
142}
143
144static void hisi_uc_pmu_clear_srcid_tracetag(struct perf_event *event)
145{
146	struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
147	u32 val;
148
149	if (!hisi_get_srcid_en(event))
150		return;
151
152	val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
153
154	/* Do nothing, the source id has been cleaned up */
155	if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val) == 0)
156		return;
157
158	hisi_uc_pmu_clear_req_tracetag(event);
159
160	/* Disable source id tracetag */
161	val &= ~HISI_UC_TRACETAG_SRCID_EN;
162	writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
163
164	val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
165	val &= ~HISI_UC_SRCID_MSK;
166	writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
167}
168
169static void hisi_uc_pmu_config_uring_channel(struct perf_event *event)
170{
171	struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
172	u32 uring_channel = hisi_get_uring_channel(event);
173	u32 val;
174
175	/* Do nothing if not being set or is set explicitly to zero (default) */
176	if (uring_channel == 0)
177		return;
178
179	val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
180
181	/* Do nothing, the uring_channel has been configured */
182	if (uring_channel == FIELD_GET(HISI_UC_EVENT_URING_MSK, val))
183		return;
184
185	val &= ~HISI_UC_EVENT_URING_MSK;
186	val |= FIELD_PREP(HISI_UC_EVENT_URING_MSK, uring_channel);
187	writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
188}
189
190static void hisi_uc_pmu_clear_uring_channel(struct perf_event *event)
191{
192	struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
193	u32 val;
194
195	/* Do nothing if not being set or is set explicitly to zero (default) */
196	if (hisi_get_uring_channel(event) == 0)
197		return;
198
199	val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
200
201	/* Do nothing, the uring_channel has been cleaned up */
202	if (FIELD_GET(HISI_UC_EVENT_URING_MSK, val) == 0)
203		return;
204
205	val &= ~HISI_UC_EVENT_URING_MSK;
206	writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
207}
208
209static void hisi_uc_pmu_enable_filter(struct perf_event *event)
210{
211	if (event->attr.config1 == 0)
212		return;
213
214	hisi_uc_pmu_config_uring_channel(event);
215	hisi_uc_pmu_config_req_tracetag(event);
216	hisi_uc_pmu_config_srcid_tracetag(event);
217}
218
219static void hisi_uc_pmu_disable_filter(struct perf_event *event)
220{
221	if (event->attr.config1 == 0)
222		return;
223
224	hisi_uc_pmu_clear_srcid_tracetag(event);
225	hisi_uc_pmu_clear_req_tracetag(event);
226	hisi_uc_pmu_clear_uring_channel(event);
227}
228
229static void hisi_uc_pmu_write_evtype(struct hisi_pmu *uc_pmu, int idx, u32 type)
230{
231	u32 val;
232
233	/*
234	 * Select the appropriate event select register.
235	 * There are 2 32-bit event select registers for the
236	 * 8 hardware counters, each event code is 8-bit wide.
237	 */
238	val = readl(uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
239	val &= ~(HISI_UC_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
240	val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
241	writel(val, uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
242}
243
244static void hisi_uc_pmu_start_counters(struct hisi_pmu *uc_pmu)
245{
246	u32 val;
247
248	val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
249	val |= HISI_UC_EVENT_GLB_EN;
250	writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
251}
252
253static void hisi_uc_pmu_stop_counters(struct hisi_pmu *uc_pmu)
254{
255	u32 val;
256
257	val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
258	val &= ~HISI_UC_EVENT_GLB_EN;
259	writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
260}
261
262static void hisi_uc_pmu_enable_counter(struct hisi_pmu *uc_pmu,
263					struct hw_perf_event *hwc)
264{
265	u32 val;
266
267	/* Enable counter index */
268	val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
269	val |= (1 << hwc->idx);
270	writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
271}
272
273static void hisi_uc_pmu_disable_counter(struct hisi_pmu *uc_pmu,
274					struct hw_perf_event *hwc)
275{
276	u32 val;
277
278	/* Clear counter index */
279	val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
280	val &= ~(1 << hwc->idx);
281	writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
282}
283
284static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
285				    struct hw_perf_event *hwc)
286{
287	return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
288}
289
290static bool hisi_uc_pmu_get_glb_en_state(struct hisi_pmu *uc_pmu)
291{
292	u32 val;
293
294	val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
295	return !!FIELD_GET(HISI_UC_EVENT_GLB_EN, val);
296}
297
298static void hisi_uc_pmu_write_counter_normal(struct hisi_pmu *uc_pmu,
299				      struct hw_perf_event *hwc, u64 val)
300{
301	writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
302}
303
304static void hisi_uc_pmu_write_counter_quirk_v2(struct hisi_pmu *uc_pmu,
305				      struct hw_perf_event *hwc, u64 val)
306{
307	hisi_uc_pmu_start_counters(uc_pmu);
308	hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
309	hisi_uc_pmu_stop_counters(uc_pmu);
310}
311
312static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
313				      struct hw_perf_event *hwc, u64 val)
314{
315	bool enable = hisi_uc_pmu_get_glb_en_state(uc_pmu);
316	bool erratum = uc_pmu->identifier == HISI_PMU_V2;
317
318	/*
319	 * HiSilicon UC PMU v2 suffers the erratum 162700402 that the
320	 * PMU counter cannot be set due to the lack of clock under power
321	 * saving mode. This will lead to error or inaccurate counts.
322	 * The clock can be enabled by the PMU global enabling control.
323	 * The irq handler and pmu_start() will call the function to set
324	 * period. If the function under irq context, the PMU has been
325	 * enabled therefore we set counter directly. Other situations
326	 * the PMU is disabled, we need to enable it to turn on the
327	 * counter clock to set period, and then restore PMU enable
328	 * status, the counter can hold its value without a clock.
329	 */
330	if (enable || !erratum)
331		hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
332	else
333		hisi_uc_pmu_write_counter_quirk_v2(uc_pmu, hwc, val);
334}
335
336static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
337					   struct hw_perf_event *hwc)
338{
339	u32 val;
340
341	val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
342	val &= ~(1 << hwc->idx);
343	writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
344}
345
346static void hisi_uc_pmu_disable_counter_int(struct hisi_pmu *uc_pmu,
347					    struct hw_perf_event *hwc)
348{
349	u32 val;
350
351	val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
352	val |= (1 << hwc->idx);
353	writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
354}
355
356static u32 hisi_uc_pmu_get_int_status(struct hisi_pmu *uc_pmu)
357{
358	return readl(uc_pmu->base + HISI_UC_INT_STS_REG);
359}
360
361static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
362{
363	writel(1 << idx, uc_pmu->base + HISI_UC_INT_CLEAR_REG);
364}
365
366static int hisi_uc_pmu_init_data(struct platform_device *pdev,
367				 struct hisi_pmu *uc_pmu)
368{
369	/*
370	 * Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
371	 * identify the topology information of UC PMU devices in the chip.
372	 * They have some CCLs per SCCL and then 4 UC PMU per CCL.
373	 */
374	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
375				     &uc_pmu->sccl_id)) {
376		dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
377		return -EINVAL;
378	}
379
380	if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
381				     &uc_pmu->ccl_id)) {
382		dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
383		return -EINVAL;
384	}
385
386	if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
387				     &uc_pmu->sub_id)) {
388		dev_err(&pdev->dev, "Can not read sub-id!\n");
389		return -EINVAL;
390	}
391
392	uc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
393	if (IS_ERR(uc_pmu->base)) {
394		dev_err(&pdev->dev, "ioremap failed for uc_pmu resource\n");
395		return PTR_ERR(uc_pmu->base);
396	}
397
398	uc_pmu->identifier = readl(uc_pmu->base + HISI_UC_VERSION_REG);
399
400	return 0;
401}
402
403static struct attribute *hisi_uc_pmu_format_attr[] = {
404	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
405	HISI_PMU_FORMAT_ATTR(rd_req_en, "config1:0-0"),
406	HISI_PMU_FORMAT_ATTR(uring_channel, "config1:4-5"),
407	HISI_PMU_FORMAT_ATTR(srcid, "config1:6-19"),
408	HISI_PMU_FORMAT_ATTR(srcid_en, "config1:20-20"),
409	NULL
410};
411
412static const struct attribute_group hisi_uc_pmu_format_group = {
413	.name = "format",
414	.attrs = hisi_uc_pmu_format_attr,
415};
416
417static struct attribute *hisi_uc_pmu_events_attr[] = {
418	HISI_PMU_EVENT_ATTR(sq_time,		0x00),
419	HISI_PMU_EVENT_ATTR(pq_time,		0x01),
420	HISI_PMU_EVENT_ATTR(hbm_time,		0x02),
421	HISI_PMU_EVENT_ATTR(iq_comp_time_cring,	0x03),
422	HISI_PMU_EVENT_ATTR(iq_comp_time_uring,	0x05),
423	HISI_PMU_EVENT_ATTR(cpu_rd,		0x10),
424	HISI_PMU_EVENT_ATTR(cpu_rd64,		0x17),
425	HISI_PMU_EVENT_ATTR(cpu_rs64,		0x19),
426	HISI_PMU_EVENT_ATTR(cpu_mru,		0x1c),
427	HISI_PMU_EVENT_ATTR(cycles,		0x95),
428	HISI_PMU_EVENT_ATTR(spipe_hit,		0xb3),
429	HISI_PMU_EVENT_ATTR(hpipe_hit,		0xdb),
430	HISI_PMU_EVENT_ATTR(cring_rxdat_cnt,	0xfa),
431	HISI_PMU_EVENT_ATTR(cring_txdat_cnt,	0xfb),
432	HISI_PMU_EVENT_ATTR(uring_rxdat_cnt,	0xfc),
433	HISI_PMU_EVENT_ATTR(uring_txdat_cnt,	0xfd),
434	NULL
435};
436
437static const struct attribute_group hisi_uc_pmu_events_group = {
438	.name = "events",
439	.attrs = hisi_uc_pmu_events_attr,
440};
441
442static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
443
444static struct attribute *hisi_uc_pmu_cpumask_attrs[] = {
445	&dev_attr_cpumask.attr,
446	NULL,
447};
448
449static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = {
450	.attrs = hisi_uc_pmu_cpumask_attrs,
451};
452
453static struct device_attribute hisi_uc_pmu_identifier_attr =
454	__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
455
456static struct attribute *hisi_uc_pmu_identifier_attrs[] = {
457	&hisi_uc_pmu_identifier_attr.attr,
458	NULL
459};
460
461static const struct attribute_group hisi_uc_pmu_identifier_group = {
462	.attrs = hisi_uc_pmu_identifier_attrs,
463};
464
465static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
466	&hisi_uc_pmu_format_group,
467	&hisi_uc_pmu_events_group,
468	&hisi_uc_pmu_cpumask_attr_group,
469	&hisi_uc_pmu_identifier_group,
470	NULL
471};
472
473static const struct hisi_uncore_ops hisi_uncore_uc_pmu_ops = {
474	.check_filter		= hisi_uc_pmu_check_filter,
475	.write_evtype		= hisi_uc_pmu_write_evtype,
476	.get_event_idx		= hisi_uncore_pmu_get_event_idx,
477	.start_counters		= hisi_uc_pmu_start_counters,
478	.stop_counters		= hisi_uc_pmu_stop_counters,
479	.enable_counter		= hisi_uc_pmu_enable_counter,
480	.disable_counter	= hisi_uc_pmu_disable_counter,
481	.enable_counter_int	= hisi_uc_pmu_enable_counter_int,
482	.disable_counter_int	= hisi_uc_pmu_disable_counter_int,
483	.write_counter		= hisi_uc_pmu_write_counter,
484	.read_counter		= hisi_uc_pmu_read_counter,
485	.get_int_status		= hisi_uc_pmu_get_int_status,
486	.clear_int_status	= hisi_uc_pmu_clear_int_status,
487	.enable_filter		= hisi_uc_pmu_enable_filter,
488	.disable_filter		= hisi_uc_pmu_disable_filter,
489};
490
491static int hisi_uc_pmu_dev_probe(struct platform_device *pdev,
492				 struct hisi_pmu *uc_pmu)
493{
494	int ret;
495
496	ret = hisi_uc_pmu_init_data(pdev, uc_pmu);
497	if (ret)
498		return ret;
499
500	ret = hisi_uncore_pmu_init_irq(uc_pmu, pdev);
501	if (ret)
502		return ret;
503
504	uc_pmu->pmu_events.attr_groups = hisi_uc_pmu_attr_groups;
505	uc_pmu->check_event = HISI_UC_EVTYPE_MASK;
506	uc_pmu->ops = &hisi_uncore_uc_pmu_ops;
507	uc_pmu->counter_bits = HISI_UC_CNTR_REG_BITS;
508	uc_pmu->num_counters = HISI_UC_NR_COUNTERS;
509	uc_pmu->dev = &pdev->dev;
510	uc_pmu->on_cpu = -1;
511
512	return 0;
513}
514
515static void hisi_uc_pmu_remove_cpuhp_instance(void *hotplug_node)
516{
517	cpuhp_state_remove_instance_nocalls(hisi_uc_pmu_online, hotplug_node);
518}
519
520static void hisi_uc_pmu_unregister_pmu(void *pmu)
521{
522	perf_pmu_unregister(pmu);
523}
524
525static int hisi_uc_pmu_probe(struct platform_device *pdev)
526{
527	struct hisi_pmu *uc_pmu;
528	char *name;
529	int ret;
530
531	uc_pmu = devm_kzalloc(&pdev->dev, sizeof(*uc_pmu), GFP_KERNEL);
532	if (!uc_pmu)
533		return -ENOMEM;
534
535	platform_set_drvdata(pdev, uc_pmu);
536
537	ret = hisi_uc_pmu_dev_probe(pdev, uc_pmu);
538	if (ret)
539		return ret;
540
541	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u",
542			      uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id);
543	if (!name)
544		return -ENOMEM;
545
546	ret = cpuhp_state_add_instance(hisi_uc_pmu_online, &uc_pmu->node);
547	if (ret)
548		return dev_err_probe(&pdev->dev, ret, "Error registering hotplug\n");
549
550	ret = devm_add_action_or_reset(&pdev->dev,
551				       hisi_uc_pmu_remove_cpuhp_instance,
552				       &uc_pmu->node);
553	if (ret)
554		return ret;
555
556	hisi_pmu_init(uc_pmu, THIS_MODULE);
557
558	ret = perf_pmu_register(&uc_pmu->pmu, name, -1);
559	if (ret)
560		return ret;
561
562	return devm_add_action_or_reset(&pdev->dev,
563					hisi_uc_pmu_unregister_pmu,
564					&uc_pmu->pmu);
565}
566
567static const struct acpi_device_id hisi_uc_pmu_acpi_match[] = {
568	{ "HISI0291", },
569	{}
570};
571MODULE_DEVICE_TABLE(acpi, hisi_uc_pmu_acpi_match);
572
573static struct platform_driver hisi_uc_pmu_driver = {
574	.driver = {
575		.name = "hisi_uc_pmu",
576		.acpi_match_table = hisi_uc_pmu_acpi_match,
577		/*
578		 * We have not worked out a safe bind/unbind process,
579		 * Forcefully unbinding during sampling will lead to a
580		 * kernel panic, so this is not supported yet.
581		 */
582		.suppress_bind_attrs = true,
583	},
584	.probe = hisi_uc_pmu_probe,
585};
586
587static int __init hisi_uc_pmu_module_init(void)
588{
589	int ret;
590
591	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
592				      "perf/hisi/uc:online",
593				      hisi_uncore_pmu_online_cpu,
594				      hisi_uncore_pmu_offline_cpu);
595	if (ret < 0) {
596		pr_err("UC PMU: Error setup hotplug, ret = %d\n", ret);
597		return ret;
598	}
599	hisi_uc_pmu_online = ret;
600
601	ret = platform_driver_register(&hisi_uc_pmu_driver);
602	if (ret)
603		cpuhp_remove_multi_state(hisi_uc_pmu_online);
604
605	return ret;
606}
607module_init(hisi_uc_pmu_module_init);
608
609static void __exit hisi_uc_pmu_module_exit(void)
610{
611	platform_driver_unregister(&hisi_uc_pmu_driver);
612	cpuhp_remove_multi_state(hisi_uc_pmu_online);
613}
614module_exit(hisi_uc_pmu_module_exit);
615
616MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
617MODULE_LICENSE("GPL");
618MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
619