1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support
4 *
5 * Copyright (C) 2022 HiSilicon Limited
6 * Author: Qi Liu <liuqi115@huawei.com>
7 *
8 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
9 */
10
11#define pr_fmt(fmt) "cpa pmu: " fmt
12#include <linux/acpi.h>
13#include <linux/bug.h>
14#include <linux/cpuhotplug.h>
15#include <linux/interrupt.h>
16#include <linux/irq.h>
17#include <linux/list.h>
18#include <linux/smp.h>
19
20#include "hisi_uncore_pmu.h"
21
22/* CPA register definition */
23#define CPA_PERF_CTRL		0x1c00
24#define CPA_EVENT_CTRL		0x1c04
25#define CPA_INT_MASK		0x1c70
26#define CPA_INT_STATUS		0x1c78
27#define CPA_INT_CLEAR		0x1c7c
28#define CPA_EVENT_TYPE0		0x1c80
29#define CPA_VERSION		0x1cf0
30#define CPA_CNT0_LOWER		0x1d00
31#define CPA_CFG_REG		0x0534
32
33/* CPA operation command */
34#define CPA_PERF_CTRL_EN	BIT_ULL(0)
35#define CPA_EVTYPE_MASK		0xffUL
36#define CPA_PM_CTRL		BIT_ULL(9)
37
38/* CPA has 8-counters */
39#define CPA_NR_COUNTERS		0x8
40#define CPA_COUNTER_BITS	64
41#define CPA_NR_EVENTS		0xff
42#define CPA_REG_OFFSET		0x8
43
44static u32 hisi_cpa_pmu_get_counter_offset(int idx)
45{
46	return (CPA_CNT0_LOWER + idx * CPA_REG_OFFSET);
47}
48
49static u64 hisi_cpa_pmu_read_counter(struct hisi_pmu *cpa_pmu,
50				     struct hw_perf_event *hwc)
51{
52	return readq(cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
53}
54
55static void hisi_cpa_pmu_write_counter(struct hisi_pmu *cpa_pmu,
56				       struct hw_perf_event *hwc, u64 val)
57{
58	writeq(val, cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
59}
60
61static void hisi_cpa_pmu_write_evtype(struct hisi_pmu *cpa_pmu, int idx,
62				      u32 type)
63{
64	u32 reg, reg_idx, shift, val;
65
66	/*
67	 * Select the appropriate event select register(CPA_EVENT_TYPE0/1).
68	 * There are 2 event select registers for the 8 hardware counters.
69	 * Event code is 8-bits and for the former 4 hardware counters,
70	 * CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
71	 * CPA_EVENT_TYPE1 is chosen.
72	 */
73	reg = CPA_EVENT_TYPE0 + (idx / 4) * 4;
74	reg_idx = idx % 4;
75	shift = CPA_REG_OFFSET * reg_idx;
76
77	/* Write event code to CPA_EVENT_TYPEx Register */
78	val = readl(cpa_pmu->base + reg);
79	val &= ~(CPA_EVTYPE_MASK << shift);
80	val |= type << shift;
81	writel(val, cpa_pmu->base + reg);
82}
83
84static void hisi_cpa_pmu_start_counters(struct hisi_pmu *cpa_pmu)
85{
86	u32 val;
87
88	val = readl(cpa_pmu->base + CPA_PERF_CTRL);
89	val |= CPA_PERF_CTRL_EN;
90	writel(val, cpa_pmu->base + CPA_PERF_CTRL);
91}
92
93static void hisi_cpa_pmu_stop_counters(struct hisi_pmu *cpa_pmu)
94{
95	u32 val;
96
97	val = readl(cpa_pmu->base + CPA_PERF_CTRL);
98	val &= ~(CPA_PERF_CTRL_EN);
99	writel(val, cpa_pmu->base + CPA_PERF_CTRL);
100}
101
102static void hisi_cpa_pmu_disable_pm(struct hisi_pmu *cpa_pmu)
103{
104	u32 val;
105
106	val = readl(cpa_pmu->base + CPA_CFG_REG);
107	val |= CPA_PM_CTRL;
108	writel(val, cpa_pmu->base + CPA_CFG_REG);
109}
110
111static void hisi_cpa_pmu_enable_pm(struct hisi_pmu *cpa_pmu)
112{
113	u32 val;
114
115	val = readl(cpa_pmu->base + CPA_CFG_REG);
116	val &= ~(CPA_PM_CTRL);
117	writel(val, cpa_pmu->base + CPA_CFG_REG);
118}
119
120static void hisi_cpa_pmu_enable_counter(struct hisi_pmu *cpa_pmu,
121					struct hw_perf_event *hwc)
122{
123	u32 val;
124
125	/* Enable counter index in CPA_EVENT_CTRL register */
126	val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
127	val |= 1 << hwc->idx;
128	writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
129}
130
131static void hisi_cpa_pmu_disable_counter(struct hisi_pmu *cpa_pmu,
132					 struct hw_perf_event *hwc)
133{
134	u32 val;
135
136	/* Clear counter index in CPA_EVENT_CTRL register */
137	val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
138	val &= ~(1UL << hwc->idx);
139	writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
140}
141
142static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu *cpa_pmu,
143					    struct hw_perf_event *hwc)
144{
145	u32 val;
146
147	/* Write 0 to enable interrupt */
148	val = readl(cpa_pmu->base + CPA_INT_MASK);
149	val &= ~(1UL << hwc->idx);
150	writel(val, cpa_pmu->base + CPA_INT_MASK);
151}
152
153static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu *cpa_pmu,
154					     struct hw_perf_event *hwc)
155{
156	u32 val;
157
158	/* Write 1 to mask interrupt */
159	val = readl(cpa_pmu->base + CPA_INT_MASK);
160	val |= 1 << hwc->idx;
161	writel(val, cpa_pmu->base + CPA_INT_MASK);
162}
163
164static u32 hisi_cpa_pmu_get_int_status(struct hisi_pmu *cpa_pmu)
165{
166	return readl(cpa_pmu->base + CPA_INT_STATUS);
167}
168
169static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu *cpa_pmu, int idx)
170{
171	writel(1 << idx, cpa_pmu->base + CPA_INT_CLEAR);
172}
173
174static const struct acpi_device_id hisi_cpa_pmu_acpi_match[] = {
175	{ "HISI0281", },
176	{}
177};
178MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
179
180static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
181				  struct hisi_pmu *cpa_pmu)
182{
183	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
184				     &cpa_pmu->sicl_id)) {
185		dev_err(&pdev->dev, "Can not read sicl-id\n");
186		return -EINVAL;
187	}
188
189	if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
190				     &cpa_pmu->index_id)) {
191		dev_err(&pdev->dev, "Cannot read idx-id\n");
192		return -EINVAL;
193	}
194
195	cpa_pmu->ccl_id = -1;
196	cpa_pmu->sccl_id = -1;
197	cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
198	if (IS_ERR(cpa_pmu->base))
199		return PTR_ERR(cpa_pmu->base);
200
201	cpa_pmu->identifier = readl(cpa_pmu->base + CPA_VERSION);
202
203	return 0;
204}
205
206static struct attribute *hisi_cpa_pmu_format_attr[] = {
207	HISI_PMU_FORMAT_ATTR(event, "config:0-15"),
208	NULL
209};
210
211static const struct attribute_group hisi_cpa_pmu_format_group = {
212	.name = "format",
213	.attrs = hisi_cpa_pmu_format_attr,
214};
215
216static struct attribute *hisi_cpa_pmu_events_attr[] = {
217	HISI_PMU_EVENT_ATTR(cpa_cycles,		0x00),
218	HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat,	0x61),
219	HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat,	0x62),
220	HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat,	0xE1),
221	HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat,	0xE2),
222	NULL
223};
224
225static const struct attribute_group hisi_cpa_pmu_events_group = {
226	.name = "events",
227	.attrs = hisi_cpa_pmu_events_attr,
228};
229
230static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
231
232static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = {
233	&dev_attr_cpumask.attr,
234	NULL
235};
236
237static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = {
238	.attrs = hisi_cpa_pmu_cpumask_attrs,
239};
240
241static struct device_attribute hisi_cpa_pmu_identifier_attr =
242	__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
243
244static struct attribute *hisi_cpa_pmu_identifier_attrs[] = {
245	&hisi_cpa_pmu_identifier_attr.attr,
246	NULL
247};
248
249static const struct attribute_group hisi_cpa_pmu_identifier_group = {
250	.attrs = hisi_cpa_pmu_identifier_attrs,
251};
252
253static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
254	&hisi_cpa_pmu_format_group,
255	&hisi_cpa_pmu_events_group,
256	&hisi_cpa_pmu_cpumask_attr_group,
257	&hisi_cpa_pmu_identifier_group,
258	NULL
259};
260
261static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops = {
262	.write_evtype           = hisi_cpa_pmu_write_evtype,
263	.get_event_idx		= hisi_uncore_pmu_get_event_idx,
264	.start_counters		= hisi_cpa_pmu_start_counters,
265	.stop_counters		= hisi_cpa_pmu_stop_counters,
266	.enable_counter		= hisi_cpa_pmu_enable_counter,
267	.disable_counter	= hisi_cpa_pmu_disable_counter,
268	.enable_counter_int	= hisi_cpa_pmu_enable_counter_int,
269	.disable_counter_int	= hisi_cpa_pmu_disable_counter_int,
270	.write_counter		= hisi_cpa_pmu_write_counter,
271	.read_counter		= hisi_cpa_pmu_read_counter,
272	.get_int_status		= hisi_cpa_pmu_get_int_status,
273	.clear_int_status	= hisi_cpa_pmu_clear_int_status,
274};
275
276static int hisi_cpa_pmu_dev_probe(struct platform_device *pdev,
277				  struct hisi_pmu *cpa_pmu)
278{
279	int ret;
280
281	ret = hisi_cpa_pmu_init_data(pdev, cpa_pmu);
282	if (ret)
283		return ret;
284
285	ret = hisi_uncore_pmu_init_irq(cpa_pmu, pdev);
286	if (ret)
287		return ret;
288
289	cpa_pmu->counter_bits = CPA_COUNTER_BITS;
290	cpa_pmu->check_event = CPA_NR_EVENTS;
291	cpa_pmu->pmu_events.attr_groups = hisi_cpa_pmu_attr_groups;
292	cpa_pmu->ops = &hisi_uncore_cpa_pmu_ops;
293	cpa_pmu->num_counters = CPA_NR_COUNTERS;
294	cpa_pmu->dev = &pdev->dev;
295	cpa_pmu->on_cpu = -1;
296
297	return 0;
298}
299
300static int hisi_cpa_pmu_probe(struct platform_device *pdev)
301{
302	struct hisi_pmu *cpa_pmu;
303	char *name;
304	int ret;
305
306	cpa_pmu = devm_kzalloc(&pdev->dev, sizeof(*cpa_pmu), GFP_KERNEL);
307	if (!cpa_pmu)
308		return -ENOMEM;
309
310	ret = hisi_cpa_pmu_dev_probe(pdev, cpa_pmu);
311	if (ret)
312		return ret;
313
314	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u",
315			      cpa_pmu->sicl_id, cpa_pmu->index_id);
316	if (!name)
317		return -ENOMEM;
318
319	hisi_pmu_init(cpa_pmu, THIS_MODULE);
320
321	/* Power Management should be disabled before using CPA PMU. */
322	hisi_cpa_pmu_disable_pm(cpa_pmu);
323	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
324				       &cpa_pmu->node);
325	if (ret) {
326		dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
327		hisi_cpa_pmu_enable_pm(cpa_pmu);
328		return ret;
329	}
330
331	ret = perf_pmu_register(&cpa_pmu->pmu, name, -1);
332	if (ret) {
333		dev_err(cpa_pmu->dev, "PMU register failed\n");
334		cpuhp_state_remove_instance_nocalls(
335			CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node);
336		hisi_cpa_pmu_enable_pm(cpa_pmu);
337		return ret;
338	}
339
340	platform_set_drvdata(pdev, cpa_pmu);
341	return ret;
342}
343
344static void hisi_cpa_pmu_remove(struct platform_device *pdev)
345{
346	struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev);
347
348	perf_pmu_unregister(&cpa_pmu->pmu);
349	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
350					    &cpa_pmu->node);
351	hisi_cpa_pmu_enable_pm(cpa_pmu);
352}
353
354static struct platform_driver hisi_cpa_pmu_driver = {
355	.driver = {
356		.name = "hisi_cpa_pmu",
357		.acpi_match_table = ACPI_PTR(hisi_cpa_pmu_acpi_match),
358		.suppress_bind_attrs = true,
359	},
360	.probe = hisi_cpa_pmu_probe,
361	.remove_new = hisi_cpa_pmu_remove,
362};
363
364static int __init hisi_cpa_pmu_module_init(void)
365{
366	int ret;
367
368	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
369				      "AP_PERF_ARM_HISI_CPA_ONLINE",
370				      hisi_uncore_pmu_online_cpu,
371				      hisi_uncore_pmu_offline_cpu);
372	if (ret) {
373		pr_err("setup hotplug failed: %d\n", ret);
374		return ret;
375	}
376
377	ret = platform_driver_register(&hisi_cpa_pmu_driver);
378	if (ret)
379		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
380
381	return ret;
382}
383module_init(hisi_cpa_pmu_module_init);
384
385static void __exit hisi_cpa_pmu_module_exit(void)
386{
387	platform_driver_unregister(&hisi_cpa_pmu_driver);
388	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
389}
390module_exit(hisi_cpa_pmu_module_exit);
391
392MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
393MODULE_LICENSE("GPL v2");
394MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
395