1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019, Linaro Ltd
4 */
5#include <linux/clk-provider.h>
6#include <linux/debugfs.h>
7#include <linux/interrupt.h>
8#include <linux/io.h>
9#include <linux/mailbox_client.h>
10#include <linux/module.h>
11#include <linux/of_platform.h>
12#include <linux/platform_device.h>
13#include <linux/thermal.h>
14#include <linux/slab.h>
15#include <linux/soc/qcom/qcom_aoss.h>
16
17#define CREATE_TRACE_POINTS
18#include "trace-aoss.h"
19
20#define QMP_DESC_MAGIC			0x0
21#define QMP_DESC_VERSION		0x4
22#define QMP_DESC_FEATURES		0x8
23
24/* AOP-side offsets */
25#define QMP_DESC_UCORE_LINK_STATE	0xc
26#define QMP_DESC_UCORE_LINK_STATE_ACK	0x10
27#define QMP_DESC_UCORE_CH_STATE		0x14
28#define QMP_DESC_UCORE_CH_STATE_ACK	0x18
29#define QMP_DESC_UCORE_MBOX_SIZE	0x1c
30#define QMP_DESC_UCORE_MBOX_OFFSET	0x20
31
32/* Linux-side offsets */
33#define QMP_DESC_MCORE_LINK_STATE	0x24
34#define QMP_DESC_MCORE_LINK_STATE_ACK	0x28
35#define QMP_DESC_MCORE_CH_STATE		0x2c
36#define QMP_DESC_MCORE_CH_STATE_ACK	0x30
37#define QMP_DESC_MCORE_MBOX_SIZE	0x34
38#define QMP_DESC_MCORE_MBOX_OFFSET	0x38
39
40#define QMP_STATE_UP			GENMASK(15, 0)
41#define QMP_STATE_DOWN			GENMASK(31, 16)
42
43#define QMP_MAGIC			0x4d41494c /* mail */
44#define QMP_VERSION			1
45
46/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
47#define QMP_MSG_LEN			64
48
49#define QMP_NUM_COOLING_RESOURCES	2
50
51#define QMP_DEBUGFS_FILES		4
52
53static bool qmp_cdev_max_state = 1;
54
55struct qmp_cooling_device {
56	struct thermal_cooling_device *cdev;
57	struct qmp *qmp;
58	char *name;
59	bool state;
60};
61
62/**
63 * struct qmp - driver state for QMP implementation
64 * @msgram: iomem referencing the message RAM used for communication
65 * @dev: reference to QMP device
66 * @mbox_client: mailbox client used to ring the doorbell on transmit
67 * @mbox_chan: mailbox channel used to ring the doorbell on transmit
68 * @offset: offset within @msgram where messages should be written
69 * @size: maximum size of the messages to be transmitted
70 * @event: wait_queue for synchronization with the IRQ
71 * @tx_lock: provides synchronization between multiple callers of qmp_send()
72 * @qdss_clk: QDSS clock hw struct
73 * @cooling_devs: thermal cooling devices
74 * @debugfs_root: directory for the developer/tester interface
75 * @debugfs_files: array of individual debugfs entries under debugfs_root
76 */
77struct qmp {
78	void __iomem *msgram;
79	struct device *dev;
80
81	struct mbox_client mbox_client;
82	struct mbox_chan *mbox_chan;
83
84	size_t offset;
85	size_t size;
86
87	wait_queue_head_t event;
88
89	struct mutex tx_lock;
90
91	struct clk_hw qdss_clk;
92	struct qmp_cooling_device *cooling_devs;
93	struct dentry *debugfs_root;
94	struct dentry *debugfs_files[QMP_DEBUGFS_FILES];
95};
96
97static void qmp_kick(struct qmp *qmp)
98{
99	mbox_send_message(qmp->mbox_chan, NULL);
100	mbox_client_txdone(qmp->mbox_chan, 0);
101}
102
103static bool qmp_magic_valid(struct qmp *qmp)
104{
105	return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
106}
107
108static bool qmp_link_acked(struct qmp *qmp)
109{
110	return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
111}
112
113static bool qmp_mcore_channel_acked(struct qmp *qmp)
114{
115	return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
116}
117
118static bool qmp_ucore_channel_up(struct qmp *qmp)
119{
120	return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
121}
122
123static int qmp_open(struct qmp *qmp)
124{
125	int ret;
126	u32 val;
127
128	if (!qmp_magic_valid(qmp)) {
129		dev_err(qmp->dev, "QMP magic doesn't match\n");
130		return -EINVAL;
131	}
132
133	val = readl(qmp->msgram + QMP_DESC_VERSION);
134	if (val != QMP_VERSION) {
135		dev_err(qmp->dev, "unsupported QMP version %d\n", val);
136		return -EINVAL;
137	}
138
139	qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
140	qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
141	if (!qmp->size) {
142		dev_err(qmp->dev, "invalid mailbox size\n");
143		return -EINVAL;
144	}
145
146	/* Ack remote core's link state */
147	val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
148	writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
149
150	/* Set local core's link state to up */
151	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
152
153	qmp_kick(qmp);
154
155	ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
156	if (!ret) {
157		dev_err(qmp->dev, "ucore didn't ack link\n");
158		goto timeout_close_link;
159	}
160
161	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
162
163	qmp_kick(qmp);
164
165	ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
166	if (!ret) {
167		dev_err(qmp->dev, "ucore didn't open channel\n");
168		goto timeout_close_channel;
169	}
170
171	/* Ack remote core's channel state */
172	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
173
174	qmp_kick(qmp);
175
176	ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
177	if (!ret) {
178		dev_err(qmp->dev, "ucore didn't ack channel\n");
179		goto timeout_close_channel;
180	}
181
182	return 0;
183
184timeout_close_channel:
185	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
186
187timeout_close_link:
188	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
189	qmp_kick(qmp);
190
191	return -ETIMEDOUT;
192}
193
194static void qmp_close(struct qmp *qmp)
195{
196	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
197	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
198	qmp_kick(qmp);
199}
200
201static irqreturn_t qmp_intr(int irq, void *data)
202{
203	struct qmp *qmp = data;
204
205	wake_up_all(&qmp->event);
206
207	return IRQ_HANDLED;
208}
209
210static bool qmp_message_empty(struct qmp *qmp)
211{
212	return readl(qmp->msgram + qmp->offset) == 0;
213}
214
215/**
216 * qmp_send() - send a message to the AOSS
217 * @qmp: qmp context
218 * @fmt: format string for message to be sent
219 * @...: arguments for the format string
220 *
221 * Transmit message to AOSS and wait for the AOSS to acknowledge the message.
222 * data must not be longer than the mailbox size. Access is synchronized by
223 * this implementation.
224 *
225 * Return: 0 on success, negative errno on failure
226 */
227int __printf(2, 3) qmp_send(struct qmp *qmp, const char *fmt, ...)
228{
229	char buf[QMP_MSG_LEN];
230	long time_left;
231	va_list args;
232	int len;
233	int ret;
234
235	if (WARN_ON(IS_ERR_OR_NULL(qmp) || !fmt))
236		return -EINVAL;
237
238	memset(buf, 0, sizeof(buf));
239	va_start(args, fmt);
240	len = vsnprintf(buf, sizeof(buf), fmt, args);
241	va_end(args);
242
243	if (WARN_ON(len >= sizeof(buf)))
244		return -EINVAL;
245
246	mutex_lock(&qmp->tx_lock);
247
248	trace_aoss_send(buf);
249
250	/* The message RAM only implements 32-bit accesses */
251	__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
252			 buf, sizeof(buf) / sizeof(u32));
253	writel(sizeof(buf), qmp->msgram + qmp->offset);
254
255	/* Read back length to confirm data written in message RAM */
256	readl(qmp->msgram + qmp->offset);
257	qmp_kick(qmp);
258
259	time_left = wait_event_interruptible_timeout(qmp->event,
260						     qmp_message_empty(qmp), HZ);
261	if (!time_left) {
262		dev_err(qmp->dev, "ucore did not ack channel\n");
263		ret = -ETIMEDOUT;
264
265		/* Clear message from buffer */
266		writel(0, qmp->msgram + qmp->offset);
267	} else {
268		ret = 0;
269	}
270
271	trace_aoss_send_done(buf, ret);
272
273	mutex_unlock(&qmp->tx_lock);
274
275	return ret;
276}
277EXPORT_SYMBOL_GPL(qmp_send);
278
279static int qmp_qdss_clk_prepare(struct clk_hw *hw)
280{
281	static const char *buf = "{class: clock, res: qdss, val: 1}";
282	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
283
284	return qmp_send(qmp, buf);
285}
286
287static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
288{
289	static const char *buf = "{class: clock, res: qdss, val: 0}";
290	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
291
292	qmp_send(qmp, buf);
293}
294
295static const struct clk_ops qmp_qdss_clk_ops = {
296	.prepare = qmp_qdss_clk_prepare,
297	.unprepare = qmp_qdss_clk_unprepare,
298};
299
300static int qmp_qdss_clk_add(struct qmp *qmp)
301{
302	static const struct clk_init_data qdss_init = {
303		.ops = &qmp_qdss_clk_ops,
304		.name = "qdss",
305	};
306	int ret;
307
308	qmp->qdss_clk.init = &qdss_init;
309	ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
310	if (ret < 0) {
311		dev_err(qmp->dev, "failed to register qdss clock\n");
312		return ret;
313	}
314
315	ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
316				     &qmp->qdss_clk);
317	if (ret < 0) {
318		dev_err(qmp->dev, "unable to register of clk hw provider\n");
319		clk_hw_unregister(&qmp->qdss_clk);
320	}
321
322	return ret;
323}
324
325static void qmp_qdss_clk_remove(struct qmp *qmp)
326{
327	of_clk_del_provider(qmp->dev->of_node);
328	clk_hw_unregister(&qmp->qdss_clk);
329}
330
331static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
332				  unsigned long *state)
333{
334	*state = qmp_cdev_max_state;
335	return 0;
336}
337
338static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
339				  unsigned long *state)
340{
341	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
342
343	*state = qmp_cdev->state;
344	return 0;
345}
346
347static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
348				  unsigned long state)
349{
350	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
351	bool cdev_state;
352	int ret;
353
354	/* Normalize state */
355	cdev_state = !!state;
356
357	if (qmp_cdev->state == state)
358		return 0;
359
360	ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
361		       qmp_cdev->name, cdev_state ? "on" : "off");
362	if (!ret)
363		qmp_cdev->state = cdev_state;
364
365	return ret;
366}
367
368static const struct thermal_cooling_device_ops qmp_cooling_device_ops = {
369	.get_max_state = qmp_cdev_get_max_state,
370	.get_cur_state = qmp_cdev_get_cur_state,
371	.set_cur_state = qmp_cdev_set_cur_state,
372};
373
374static int qmp_cooling_device_add(struct qmp *qmp,
375				  struct qmp_cooling_device *qmp_cdev,
376				  struct device_node *node)
377{
378	char *cdev_name = (char *)node->name;
379
380	qmp_cdev->qmp = qmp;
381	qmp_cdev->state = !qmp_cdev_max_state;
382	qmp_cdev->name = cdev_name;
383	qmp_cdev->cdev = devm_thermal_of_cooling_device_register
384				(qmp->dev, node,
385				cdev_name,
386				qmp_cdev, &qmp_cooling_device_ops);
387
388	if (IS_ERR(qmp_cdev->cdev))
389		dev_err(qmp->dev, "unable to register %s cooling device\n",
390			cdev_name);
391
392	return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
393}
394
395static int qmp_cooling_devices_register(struct qmp *qmp)
396{
397	struct device_node *np, *child;
398	int count = 0;
399	int ret;
400
401	np = qmp->dev->of_node;
402
403	qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
404					 sizeof(*qmp->cooling_devs),
405					 GFP_KERNEL);
406
407	if (!qmp->cooling_devs)
408		return -ENOMEM;
409
410	for_each_available_child_of_node(np, child) {
411		if (!of_property_present(child, "#cooling-cells"))
412			continue;
413		ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
414					     child);
415		if (ret) {
416			of_node_put(child);
417			goto unroll;
418		}
419	}
420
421	if (!count)
422		devm_kfree(qmp->dev, qmp->cooling_devs);
423
424	return 0;
425
426unroll:
427	while (--count >= 0)
428		thermal_cooling_device_unregister
429			(qmp->cooling_devs[count].cdev);
430	devm_kfree(qmp->dev, qmp->cooling_devs);
431
432	return ret;
433}
434
435static void qmp_cooling_devices_remove(struct qmp *qmp)
436{
437	int i;
438
439	for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
440		thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
441}
442
443/**
444 * qmp_get() - get a qmp handle from a device
445 * @dev: client device pointer
446 *
447 * Return: handle to qmp device on success, ERR_PTR() on failure
448 */
449struct qmp *qmp_get(struct device *dev)
450{
451	struct platform_device *pdev;
452	struct device_node *np;
453	struct qmp *qmp;
454
455	if (!dev || !dev->of_node)
456		return ERR_PTR(-EINVAL);
457
458	np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
459	if (!np)
460		return ERR_PTR(-ENODEV);
461
462	pdev = of_find_device_by_node(np);
463	of_node_put(np);
464	if (!pdev)
465		return ERR_PTR(-EINVAL);
466
467	qmp = platform_get_drvdata(pdev);
468
469	if (!qmp) {
470		put_device(&pdev->dev);
471		return ERR_PTR(-EPROBE_DEFER);
472	}
473	return qmp;
474}
475EXPORT_SYMBOL_GPL(qmp_get);
476
477/**
478 * qmp_put() - release a qmp handle
479 * @qmp: qmp handle obtained from qmp_get()
480 */
481void qmp_put(struct qmp *qmp)
482{
483	/*
484	 * Match get_device() inside of_find_device_by_node() in
485	 * qmp_get()
486	 */
487	if (!IS_ERR_OR_NULL(qmp))
488		put_device(qmp->dev);
489}
490EXPORT_SYMBOL_GPL(qmp_put);
491
492struct qmp_debugfs_entry {
493	const char *name;
494	const char *fmt;
495	bool is_bool;
496	const char *true_val;
497	const char *false_val;
498};
499
500static const struct qmp_debugfs_entry qmp_debugfs_entries[QMP_DEBUGFS_FILES] = {
501	{ "ddr_frequency_mhz", "{class: ddr, res: fixed, val: %u}", false },
502	{ "prevent_aoss_sleep", "{class: aoss_slp, res: sleep: %s}", true, "enable", "disable" },
503	{ "prevent_cx_collapse", "{class: cx_mol, res: cx, val: %s}", true, "mol", "off" },
504	{ "prevent_ddr_collapse", "{class: ddr_mol, res: ddr, val: %s}", true, "mol", "off" },
505};
506
507static ssize_t qmp_debugfs_write(struct file *file, const char __user *user_buf,
508				 size_t count, loff_t *pos)
509{
510	const struct qmp_debugfs_entry *entry = NULL;
511	struct qmp *qmp = file->private_data;
512	char buf[QMP_MSG_LEN];
513	unsigned int uint_val;
514	const char *str_val;
515	bool bool_val;
516	int ret;
517	int i;
518
519	for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) {
520		if (qmp->debugfs_files[i] == file->f_path.dentry) {
521			entry = &qmp_debugfs_entries[i];
522			break;
523		}
524	}
525	if (WARN_ON(!entry))
526		return -EFAULT;
527
528	if (entry->is_bool) {
529		ret = kstrtobool_from_user(user_buf, count, &bool_val);
530		if (ret)
531			return ret;
532
533		str_val = bool_val ? entry->true_val : entry->false_val;
534
535		ret = snprintf(buf, sizeof(buf), entry->fmt, str_val);
536		if (ret >= sizeof(buf))
537			return -EINVAL;
538	} else {
539		ret = kstrtou32_from_user(user_buf, count, 0, &uint_val);
540		if (ret)
541			return ret;
542
543		ret = snprintf(buf, sizeof(buf), entry->fmt, uint_val);
544		if (ret >= sizeof(buf))
545			return -EINVAL;
546	}
547
548	ret = qmp_send(qmp, buf);
549	if (ret < 0)
550		return ret;
551
552	return count;
553}
554
555static const struct file_operations qmp_debugfs_fops = {
556	.open = simple_open,
557	.write = qmp_debugfs_write,
558};
559
560static void qmp_debugfs_create(struct qmp *qmp)
561{
562	const struct qmp_debugfs_entry *entry;
563	int i;
564
565	qmp->debugfs_root = debugfs_create_dir("qcom_aoss", NULL);
566
567	for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) {
568		entry = &qmp_debugfs_entries[i];
569
570		qmp->debugfs_files[i] = debugfs_create_file(entry->name, 0200,
571							    qmp->debugfs_root,
572							    qmp,
573							    &qmp_debugfs_fops);
574	}
575}
576
577static int qmp_probe(struct platform_device *pdev)
578{
579	struct qmp *qmp;
580	int irq;
581	int ret;
582
583	qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
584	if (!qmp)
585		return -ENOMEM;
586
587	qmp->dev = &pdev->dev;
588	init_waitqueue_head(&qmp->event);
589	mutex_init(&qmp->tx_lock);
590
591	qmp->msgram = devm_platform_ioremap_resource(pdev, 0);
592	if (IS_ERR(qmp->msgram))
593		return PTR_ERR(qmp->msgram);
594
595	qmp->mbox_client.dev = &pdev->dev;
596	qmp->mbox_client.knows_txdone = true;
597	qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
598	if (IS_ERR(qmp->mbox_chan)) {
599		dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
600		return PTR_ERR(qmp->mbox_chan);
601	}
602
603	irq = platform_get_irq(pdev, 0);
604	ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
605			       "aoss-qmp", qmp);
606	if (ret < 0) {
607		dev_err(&pdev->dev, "failed to request interrupt\n");
608		goto err_free_mbox;
609	}
610
611	ret = qmp_open(qmp);
612	if (ret < 0)
613		goto err_free_mbox;
614
615	ret = qmp_qdss_clk_add(qmp);
616	if (ret)
617		goto err_close_qmp;
618
619	ret = qmp_cooling_devices_register(qmp);
620	if (ret)
621		dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
622
623	platform_set_drvdata(pdev, qmp);
624
625	qmp_debugfs_create(qmp);
626
627	return 0;
628
629err_close_qmp:
630	qmp_close(qmp);
631err_free_mbox:
632	mbox_free_channel(qmp->mbox_chan);
633
634	return ret;
635}
636
637static void qmp_remove(struct platform_device *pdev)
638{
639	struct qmp *qmp = platform_get_drvdata(pdev);
640
641	debugfs_remove_recursive(qmp->debugfs_root);
642
643	qmp_qdss_clk_remove(qmp);
644	qmp_cooling_devices_remove(qmp);
645
646	qmp_close(qmp);
647	mbox_free_channel(qmp->mbox_chan);
648}
649
650static const struct of_device_id qmp_dt_match[] = {
651	{ .compatible = "qcom,sc7180-aoss-qmp", },
652	{ .compatible = "qcom,sc7280-aoss-qmp", },
653	{ .compatible = "qcom,sdm845-aoss-qmp", },
654	{ .compatible = "qcom,sm8150-aoss-qmp", },
655	{ .compatible = "qcom,sm8250-aoss-qmp", },
656	{ .compatible = "qcom,sm8350-aoss-qmp", },
657	{ .compatible = "qcom,aoss-qmp", },
658	{}
659};
660MODULE_DEVICE_TABLE(of, qmp_dt_match);
661
662static struct platform_driver qmp_driver = {
663	.driver = {
664		.name		= "qcom_aoss_qmp",
665		.of_match_table	= qmp_dt_match,
666		.suppress_bind_attrs = true,
667	},
668	.probe = qmp_probe,
669	.remove_new = qmp_remove,
670};
671module_platform_driver(qmp_driver);
672
673MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
674MODULE_LICENSE("GPL v2");
675