1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/*
3 * Siemens System Memory Buffer driver.
4 * Copyright(c) 2022, HiSilicon Limited.
5 */
6
7#include <linux/atomic.h>
8#include <linux/acpi.h>
9#include <linux/circ_buf.h>
10#include <linux/err.h>
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/mod_devicetable.h>
14#include <linux/platform_device.h>
15
16#include "coresight-etm-perf.h"
17#include "coresight-priv.h"
18#include "ultrasoc-smb.h"
19
20DEFINE_CORESIGHT_DEVLIST(sink_devs, "ultra_smb");
21
22#define ULTRASOC_SMB_DSM_UUID	"82ae1283-7f6a-4cbe-aa06-53e8fb24db18"
23
24static bool smb_buffer_not_empty(struct smb_drv_data *drvdata)
25{
26	u32 buf_status = readl(drvdata->base + SMB_LB_INT_STS_REG);
27
28	return FIELD_GET(SMB_LB_INT_STS_NOT_EMPTY_MSK, buf_status);
29}
30
31static void smb_update_data_size(struct smb_drv_data *drvdata)
32{
33	struct smb_data_buffer *sdb = &drvdata->sdb;
34	u32 buf_wrptr;
35
36	buf_wrptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG) -
37			  sdb->buf_hw_base;
38
39	/* Buffer is full */
40	if (buf_wrptr == sdb->buf_rdptr && smb_buffer_not_empty(drvdata)) {
41		sdb->data_size = sdb->buf_size;
42		return;
43	}
44
45	/* The buffer mode is circular buffer mode */
46	sdb->data_size = CIRC_CNT(buf_wrptr, sdb->buf_rdptr,
47				  sdb->buf_size);
48}
49
50/*
51 * The read pointer adds @nbytes bytes (may round up to the beginning)
52 * after the data is read or discarded, while needing to update the
53 * available data size.
54 */
55static void smb_update_read_ptr(struct smb_drv_data *drvdata, u32 nbytes)
56{
57	struct smb_data_buffer *sdb = &drvdata->sdb;
58
59	sdb->buf_rdptr += nbytes;
60	sdb->buf_rdptr %= sdb->buf_size;
61	writel(sdb->buf_hw_base + sdb->buf_rdptr,
62	       drvdata->base + SMB_LB_RD_ADDR_REG);
63
64	sdb->data_size -= nbytes;
65}
66
67static void smb_reset_buffer(struct smb_drv_data *drvdata)
68{
69	struct smb_data_buffer *sdb = &drvdata->sdb;
70	u32 write_ptr;
71
72	/*
73	 * We must flush and discard any data left in hardware path
74	 * to avoid corrupting the next session.
75	 * Note: The write pointer will never exceed the read pointer.
76	 */
77	writel(SMB_LB_PURGE_PURGED, drvdata->base + SMB_LB_PURGE_REG);
78
79	/* Reset SMB logical buffer status flags */
80	writel(SMB_LB_INT_STS_RESET, drvdata->base + SMB_LB_INT_STS_REG);
81
82	write_ptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG);
83
84	/* Do nothing, not data left in hardware path */
85	if (!write_ptr || write_ptr == sdb->buf_rdptr + sdb->buf_hw_base)
86		return;
87
88	/*
89	 * The SMB_LB_WR_ADDR_REG register is read-only,
90	 * Synchronize the read pointer to write pointer.
91	 */
92	writel(write_ptr, drvdata->base + SMB_LB_RD_ADDR_REG);
93	sdb->buf_rdptr = write_ptr - sdb->buf_hw_base;
94}
95
96static int smb_open(struct inode *inode, struct file *file)
97{
98	struct smb_drv_data *drvdata = container_of(file->private_data,
99					struct smb_drv_data, miscdev);
100
101	guard(spinlock)(&drvdata->spinlock);
102
103	if (drvdata->reading)
104		return -EBUSY;
105
106	if (drvdata->csdev->refcnt)
107		return -EBUSY;
108
109	smb_update_data_size(drvdata);
110	drvdata->reading = true;
111
112	return 0;
113}
114
115static ssize_t smb_read(struct file *file, char __user *data, size_t len,
116			loff_t *ppos)
117{
118	struct smb_drv_data *drvdata = container_of(file->private_data,
119					struct smb_drv_data, miscdev);
120	struct smb_data_buffer *sdb = &drvdata->sdb;
121	struct device *dev = &drvdata->csdev->dev;
122	ssize_t to_copy = 0;
123
124	if (!len)
125		return 0;
126
127	if (!sdb->data_size)
128		return 0;
129
130	to_copy = min(sdb->data_size, len);
131
132	/* Copy parts of trace data when read pointer wrap around SMB buffer */
133	if (sdb->buf_rdptr + to_copy > sdb->buf_size)
134		to_copy = sdb->buf_size - sdb->buf_rdptr;
135
136	if (copy_to_user(data, sdb->buf_base + sdb->buf_rdptr, to_copy)) {
137		dev_dbg(dev, "Failed to copy data to user\n");
138		return -EFAULT;
139	}
140
141	*ppos += to_copy;
142	smb_update_read_ptr(drvdata, to_copy);
143	if (!sdb->data_size)
144		smb_reset_buffer(drvdata);
145
146	dev_dbg(dev, "%zu bytes copied\n", to_copy);
147	return to_copy;
148}
149
150static int smb_release(struct inode *inode, struct file *file)
151{
152	struct smb_drv_data *drvdata = container_of(file->private_data,
153					struct smb_drv_data, miscdev);
154
155	guard(spinlock)(&drvdata->spinlock);
156	drvdata->reading = false;
157
158	return 0;
159}
160
161static const struct file_operations smb_fops = {
162	.owner		= THIS_MODULE,
163	.open		= smb_open,
164	.read		= smb_read,
165	.release	= smb_release,
166	.llseek		= no_llseek,
167};
168
169static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr,
170			     char *buf)
171{
172	struct smb_drv_data *drvdata = dev_get_drvdata(dev->parent);
173
174	return sysfs_emit(buf, "0x%lx\n", drvdata->sdb.buf_size);
175}
176static DEVICE_ATTR_RO(buf_size);
177
178static struct attribute *smb_sink_attrs[] = {
179	coresight_simple_reg32(read_pos, SMB_LB_RD_ADDR_REG),
180	coresight_simple_reg32(write_pos, SMB_LB_WR_ADDR_REG),
181	coresight_simple_reg32(buf_status, SMB_LB_INT_STS_REG),
182	&dev_attr_buf_size.attr,
183	NULL
184};
185
186static const struct attribute_group smb_sink_group = {
187	.attrs = smb_sink_attrs,
188	.name = "mgmt",
189};
190
191static const struct attribute_group *smb_sink_groups[] = {
192	&smb_sink_group,
193	NULL
194};
195
196static void smb_enable_hw(struct smb_drv_data *drvdata)
197{
198	writel(SMB_GLB_EN_HW_ENABLE, drvdata->base + SMB_GLB_EN_REG);
199}
200
201static void smb_disable_hw(struct smb_drv_data *drvdata)
202{
203	writel(0x0, drvdata->base + SMB_GLB_EN_REG);
204}
205
206static void smb_enable_sysfs(struct coresight_device *csdev)
207{
208	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
209
210	if (coresight_get_mode(csdev) != CS_MODE_DISABLED)
211		return;
212
213	smb_enable_hw(drvdata);
214	coresight_set_mode(csdev, CS_MODE_SYSFS);
215}
216
217static int smb_enable_perf(struct coresight_device *csdev, void *data)
218{
219	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
220	struct perf_output_handle *handle = data;
221	struct cs_buffers *buf = etm_perf_sink_config(handle);
222	pid_t pid;
223
224	if (!buf)
225		return -EINVAL;
226
227	/* Get a handle on the pid of the target process */
228	pid = buf->pid;
229
230	/* Device is already in used by other session */
231	if (drvdata->pid != -1 && drvdata->pid != pid)
232		return -EBUSY;
233
234	if (drvdata->pid == -1) {
235		smb_enable_hw(drvdata);
236		drvdata->pid = pid;
237		coresight_set_mode(csdev, CS_MODE_PERF);
238	}
239
240	return 0;
241}
242
243static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
244		      void *data)
245{
246	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
247	int ret = 0;
248
249	guard(spinlock)(&drvdata->spinlock);
250
251	/* Do nothing, the trace data is reading by other interface now */
252	if (drvdata->reading)
253		return -EBUSY;
254
255	/* Do nothing, the SMB is already enabled as other mode */
256	if (coresight_get_mode(csdev) != CS_MODE_DISABLED &&
257	    coresight_get_mode(csdev) != mode)
258		return -EBUSY;
259
260	switch (mode) {
261	case CS_MODE_SYSFS:
262		smb_enable_sysfs(csdev);
263		break;
264	case CS_MODE_PERF:
265		ret = smb_enable_perf(csdev, data);
266		break;
267	default:
268		ret = -EINVAL;
269	}
270
271	if (ret)
272		return ret;
273
274	csdev->refcnt++;
275	dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
276
277	return ret;
278}
279
280static int smb_disable(struct coresight_device *csdev)
281{
282	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
283
284	guard(spinlock)(&drvdata->spinlock);
285
286	if (drvdata->reading)
287		return -EBUSY;
288
289	csdev->refcnt--;
290	if (csdev->refcnt)
291		return -EBUSY;
292
293	/* Complain if we (somehow) got out of sync */
294	WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
295
296	smb_disable_hw(drvdata);
297
298	/* Dissociate from the target process. */
299	drvdata->pid = -1;
300	coresight_set_mode(csdev, CS_MODE_DISABLED);
301	dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
302
303	return 0;
304}
305
306static void *smb_alloc_buffer(struct coresight_device *csdev,
307			      struct perf_event *event, void **pages,
308			      int nr_pages, bool overwrite)
309{
310	struct cs_buffers *buf;
311	int node;
312
313	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
314	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
315	if (!buf)
316		return NULL;
317
318	buf->snapshot = overwrite;
319	buf->nr_pages = nr_pages;
320	buf->data_pages = pages;
321	buf->pid = task_pid_nr(event->owner);
322
323	return buf;
324}
325
326static void smb_free_buffer(void *config)
327{
328	struct cs_buffers *buf = config;
329
330	kfree(buf);
331}
332
333static void smb_sync_perf_buffer(struct smb_drv_data *drvdata,
334				 struct cs_buffers *buf,
335				 unsigned long head)
336{
337	struct smb_data_buffer *sdb = &drvdata->sdb;
338	char **dst_pages = (char **)buf->data_pages;
339	unsigned long to_copy;
340	long pg_idx, pg_offset;
341
342	pg_idx = head >> PAGE_SHIFT;
343	pg_offset = head & (PAGE_SIZE - 1);
344
345	while (sdb->data_size) {
346		unsigned long pg_space = PAGE_SIZE - pg_offset;
347
348		to_copy = min(sdb->data_size, pg_space);
349
350		/* Copy parts of trace data when read pointer wrap around */
351		if (sdb->buf_rdptr + to_copy > sdb->buf_size)
352			to_copy = sdb->buf_size - sdb->buf_rdptr;
353
354		memcpy(dst_pages[pg_idx] + pg_offset,
355			      sdb->buf_base + sdb->buf_rdptr, to_copy);
356
357		pg_offset += to_copy;
358		if (pg_offset >= PAGE_SIZE) {
359			pg_offset = 0;
360			pg_idx++;
361			pg_idx %= buf->nr_pages;
362		}
363		smb_update_read_ptr(drvdata, to_copy);
364	}
365
366	smb_reset_buffer(drvdata);
367}
368
369static unsigned long smb_update_buffer(struct coresight_device *csdev,
370				       struct perf_output_handle *handle,
371				       void *sink_config)
372{
373	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
374	struct smb_data_buffer *sdb = &drvdata->sdb;
375	struct cs_buffers *buf = sink_config;
376	unsigned long data_size;
377	bool lost = false;
378
379	if (!buf)
380		return 0;
381
382	guard(spinlock)(&drvdata->spinlock);
383
384	/* Don't do anything if another tracer is using this sink. */
385	if (csdev->refcnt != 1)
386		return 0;
387
388	smb_disable_hw(drvdata);
389	smb_update_data_size(drvdata);
390
391	/*
392	 * The SMB buffer may be bigger than the space available in the
393	 * perf ring buffer (handle->size). If so advance the offset so
394	 * that we get the latest trace data.
395	 */
396	if (sdb->data_size > handle->size) {
397		smb_update_read_ptr(drvdata, sdb->data_size - handle->size);
398		lost = true;
399	}
400
401	data_size = sdb->data_size;
402	smb_sync_perf_buffer(drvdata, buf, handle->head);
403	if (!buf->snapshot && lost)
404		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
405
406	return data_size;
407}
408
409static const struct coresight_ops_sink smb_cs_ops = {
410	.enable		= smb_enable,
411	.disable	= smb_disable,
412	.alloc_buffer	= smb_alloc_buffer,
413	.free_buffer	= smb_free_buffer,
414	.update_buffer	= smb_update_buffer,
415};
416
417static const struct coresight_ops cs_ops = {
418	.sink_ops	= &smb_cs_ops,
419};
420
421static int smb_init_data_buffer(struct platform_device *pdev,
422				struct smb_data_buffer *sdb)
423{
424	struct resource *res;
425	void *base;
426
427	res = platform_get_resource(pdev, IORESOURCE_MEM, SMB_BUF_ADDR_RES);
428	if (!res) {
429		dev_err(&pdev->dev, "SMB device failed to get resource\n");
430		return -EINVAL;
431	}
432
433	sdb->buf_rdptr = 0;
434	sdb->buf_hw_base = FIELD_GET(SMB_BUF_ADDR_LO_MSK, res->start);
435	sdb->buf_size = resource_size(res);
436	if (sdb->buf_size == 0)
437		return -EINVAL;
438
439	/*
440	 * This is a chunk of memory, use classic mapping with better
441	 * performance.
442	 */
443	base = devm_memremap(&pdev->dev, sdb->buf_hw_base, sdb->buf_size,
444				MEMREMAP_WB);
445	if (IS_ERR(base))
446		return PTR_ERR(base);
447
448	sdb->buf_base = base;
449
450	return 0;
451}
452
453static void smb_init_hw(struct smb_drv_data *drvdata)
454{
455	smb_disable_hw(drvdata);
456
457	writel(SMB_LB_CFG_LO_DEFAULT, drvdata->base + SMB_LB_CFG_LO_REG);
458	writel(SMB_LB_CFG_HI_DEFAULT, drvdata->base + SMB_LB_CFG_HI_REG);
459	writel(SMB_GLB_CFG_DEFAULT, drvdata->base + SMB_GLB_CFG_REG);
460	writel(SMB_GLB_INT_CFG, drvdata->base + SMB_GLB_INT_REG);
461	writel(SMB_LB_INT_CTRL_CFG, drvdata->base + SMB_LB_INT_CTRL_REG);
462}
463
464static int smb_register_sink(struct platform_device *pdev,
465			     struct smb_drv_data *drvdata)
466{
467	struct coresight_platform_data *pdata = NULL;
468	struct coresight_desc desc = { 0 };
469	int ret;
470
471	pdata = coresight_get_platform_data(&pdev->dev);
472	if (IS_ERR(pdata))
473		return PTR_ERR(pdata);
474
475	desc.type = CORESIGHT_DEV_TYPE_SINK;
476	desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
477	desc.ops = &cs_ops;
478	desc.pdata = pdata;
479	desc.dev = &pdev->dev;
480	desc.groups = smb_sink_groups;
481	desc.name = coresight_alloc_device_name(&sink_devs, &pdev->dev);
482	if (!desc.name) {
483		dev_err(&pdev->dev, "Failed to alloc coresight device name");
484		return -ENOMEM;
485	}
486	desc.access = CSDEV_ACCESS_IOMEM(drvdata->base);
487
488	drvdata->csdev = coresight_register(&desc);
489	if (IS_ERR(drvdata->csdev))
490		return PTR_ERR(drvdata->csdev);
491
492	drvdata->miscdev.name = desc.name;
493	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
494	drvdata->miscdev.fops = &smb_fops;
495	ret = misc_register(&drvdata->miscdev);
496	if (ret) {
497		coresight_unregister(drvdata->csdev);
498		dev_err(&pdev->dev, "Failed to register misc, ret=%d\n", ret);
499	}
500
501	return ret;
502}
503
504static void smb_unregister_sink(struct smb_drv_data *drvdata)
505{
506	misc_deregister(&drvdata->miscdev);
507	coresight_unregister(drvdata->csdev);
508}
509
510static int smb_config_inport(struct device *dev, bool enable)
511{
512	u64 func = enable ? 1 : 0;
513	union acpi_object *obj;
514	guid_t guid;
515	u64 rev = 0;
516
517	/*
518	 * Using DSM calls to enable/disable ultrasoc hardwares on
519	 * tracing path, to prevent ultrasoc packet format being exposed.
520	 */
521	if (guid_parse(ULTRASOC_SMB_DSM_UUID, &guid)) {
522		dev_err(dev, "Get GUID failed\n");
523		return -EINVAL;
524	}
525
526	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, rev, func, NULL);
527	if (!obj) {
528		dev_err(dev, "ACPI handle failed\n");
529		return -ENODEV;
530	}
531
532	ACPI_FREE(obj);
533
534	return 0;
535}
536
537static int smb_probe(struct platform_device *pdev)
538{
539	struct device *dev = &pdev->dev;
540	struct smb_drv_data *drvdata;
541	int ret;
542
543	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
544	if (!drvdata)
545		return -ENOMEM;
546
547	drvdata->base = devm_platform_ioremap_resource(pdev, SMB_REG_ADDR_RES);
548	if (IS_ERR(drvdata->base)) {
549		dev_err(dev, "Failed to ioremap resource\n");
550		return PTR_ERR(drvdata->base);
551	}
552
553	smb_init_hw(drvdata);
554
555	ret = smb_init_data_buffer(pdev, &drvdata->sdb);
556	if (ret) {
557		dev_err(dev, "Failed to init buffer, ret = %d\n", ret);
558		return ret;
559	}
560
561	ret = smb_config_inport(dev, true);
562	if (ret)
563		return ret;
564
565	smb_reset_buffer(drvdata);
566	platform_set_drvdata(pdev, drvdata);
567	spin_lock_init(&drvdata->spinlock);
568	drvdata->pid = -1;
569
570	ret = smb_register_sink(pdev, drvdata);
571	if (ret) {
572		smb_config_inport(&pdev->dev, false);
573		dev_err(dev, "Failed to register SMB sink\n");
574		return ret;
575	}
576
577	return 0;
578}
579
580static void smb_remove(struct platform_device *pdev)
581{
582	struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
583
584	smb_unregister_sink(drvdata);
585
586	smb_config_inport(&pdev->dev, false);
587}
588
589#ifdef CONFIG_ACPI
590static const struct acpi_device_id ultrasoc_smb_acpi_match[] = {
591	{"HISI03A1", 0, 0, 0},
592	{}
593};
594MODULE_DEVICE_TABLE(acpi, ultrasoc_smb_acpi_match);
595#endif
596
597static struct platform_driver smb_driver = {
598	.driver = {
599		.name = "ultrasoc-smb",
600		.acpi_match_table = ACPI_PTR(ultrasoc_smb_acpi_match),
601		.suppress_bind_attrs = true,
602	},
603	.probe = smb_probe,
604	.remove_new = smb_remove,
605};
606module_platform_driver(smb_driver);
607
608MODULE_DESCRIPTION("UltraSoc SMB CoreSight driver");
609MODULE_LICENSE("Dual MIT/GPL");
610MODULE_AUTHOR("Jonathan Zhou <jonathan.zhouwen@huawei.com>");
611MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
612