1/*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 */
15
16/*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009;  for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43/* Linux Foundation elects GPLv2 license only. */
44
45#include <linux/dmaengine.h>
46#include <linux/dma-mapping.h>
47#include <linux/list.h>
48#include <linux/mod_devicetable.h>
49#include <linux/module.h>
50#include <linux/platform_device.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53#include <linux/of_dma.h>
54#include <linux/property.h>
55#include <linux/delay.h>
56#include <linux/acpi.h>
57#include <linux/irq.h>
58#include <linux/atomic.h>
59#include <linux/pm_runtime.h>
60#include <linux/msi.h>
61
62#include "../dmaengine.h"
63#include "hidma.h"
64
65/*
66 * Default idle time is 2 seconds. This parameter can
67 * be overridden by changing the following
68 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
69 * during kernel boot.
70 */
71#define HIDMA_AUTOSUSPEND_TIMEOUT		2000
72#define HIDMA_ERR_INFO_SW			0xFF
73#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE	0x0
74#define HIDMA_NR_DEFAULT_DESC			10
75#define HIDMA_MSI_INTS				11
76
77static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
78{
79	return container_of(dmadev, struct hidma_dev, ddev);
80}
81
82static inline
83struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
84{
85	return container_of(_lldevp, struct hidma_dev, lldev);
86}
87
88static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
89{
90	return container_of(dmach, struct hidma_chan, chan);
91}
92
93static void hidma_free(struct hidma_dev *dmadev)
94{
95	INIT_LIST_HEAD(&dmadev->ddev.channels);
96}
97
98static unsigned int nr_desc_prm;
99module_param(nr_desc_prm, uint, 0644);
100MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
101
102enum hidma_cap {
103	HIDMA_MSI_CAP = 1,
104	HIDMA_IDENTITY_CAP,
105};
106
107/* process completed descriptors */
108static void hidma_process_completed(struct hidma_chan *mchan)
109{
110	struct dma_device *ddev = mchan->chan.device;
111	struct hidma_dev *mdma = to_hidma_dev(ddev);
112	struct dma_async_tx_descriptor *desc;
113	dma_cookie_t last_cookie;
114	struct hidma_desc *mdesc;
115	struct hidma_desc *next;
116	unsigned long irqflags;
117	struct list_head list;
118
119	INIT_LIST_HEAD(&list);
120
121	/* Get all completed descriptors */
122	spin_lock_irqsave(&mchan->lock, irqflags);
123	list_splice_tail_init(&mchan->completed, &list);
124	spin_unlock_irqrestore(&mchan->lock, irqflags);
125
126	/* Execute callbacks and run dependencies */
127	list_for_each_entry_safe(mdesc, next, &list, node) {
128		enum dma_status llstat;
129		struct dmaengine_desc_callback cb;
130		struct dmaengine_result result;
131
132		desc = &mdesc->desc;
133		last_cookie = desc->cookie;
134
135		llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
136
137		spin_lock_irqsave(&mchan->lock, irqflags);
138		if (llstat == DMA_COMPLETE) {
139			mchan->last_success = last_cookie;
140			result.result = DMA_TRANS_NOERROR;
141		} else {
142			result.result = DMA_TRANS_ABORTED;
143		}
144
145		dma_cookie_complete(desc);
146		spin_unlock_irqrestore(&mchan->lock, irqflags);
147
148		dmaengine_desc_get_callback(desc, &cb);
149
150		dma_run_dependencies(desc);
151
152		spin_lock_irqsave(&mchan->lock, irqflags);
153		list_move(&mdesc->node, &mchan->free);
154		spin_unlock_irqrestore(&mchan->lock, irqflags);
155
156		dmaengine_desc_callback_invoke(&cb, &result);
157	}
158}
159
160/*
161 * Called once for each submitted descriptor.
162 * PM is locked once for each descriptor that is currently
163 * in execution.
164 */
165static void hidma_callback(void *data)
166{
167	struct hidma_desc *mdesc = data;
168	struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
169	struct dma_device *ddev = mchan->chan.device;
170	struct hidma_dev *dmadev = to_hidma_dev(ddev);
171	unsigned long irqflags;
172	bool queued = false;
173
174	spin_lock_irqsave(&mchan->lock, irqflags);
175	if (mdesc->node.next) {
176		/* Delete from the active list, add to completed list */
177		list_move_tail(&mdesc->node, &mchan->completed);
178		queued = true;
179
180		/* calculate the next running descriptor */
181		mchan->running = list_first_entry(&mchan->active,
182						  struct hidma_desc, node);
183	}
184	spin_unlock_irqrestore(&mchan->lock, irqflags);
185
186	hidma_process_completed(mchan);
187
188	if (queued) {
189		pm_runtime_mark_last_busy(dmadev->ddev.dev);
190		pm_runtime_put_autosuspend(dmadev->ddev.dev);
191	}
192}
193
194static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
195{
196	struct hidma_chan *mchan;
197	struct dma_device *ddev;
198
199	mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
200	if (!mchan)
201		return -ENOMEM;
202
203	ddev = &dmadev->ddev;
204	mchan->dma_sig = dma_sig;
205	mchan->dmadev = dmadev;
206	mchan->chan.device = ddev;
207	dma_cookie_init(&mchan->chan);
208
209	INIT_LIST_HEAD(&mchan->free);
210	INIT_LIST_HEAD(&mchan->prepared);
211	INIT_LIST_HEAD(&mchan->active);
212	INIT_LIST_HEAD(&mchan->completed);
213	INIT_LIST_HEAD(&mchan->queued);
214
215	spin_lock_init(&mchan->lock);
216	list_add_tail(&mchan->chan.device_node, &ddev->channels);
217	return 0;
218}
219
220static void hidma_issue_task(struct tasklet_struct *t)
221{
222	struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
223
224	pm_runtime_get_sync(dmadev->ddev.dev);
225	hidma_ll_start(dmadev->lldev);
226}
227
228static void hidma_issue_pending(struct dma_chan *dmach)
229{
230	struct hidma_chan *mchan = to_hidma_chan(dmach);
231	struct hidma_dev *dmadev = mchan->dmadev;
232	unsigned long flags;
233	struct hidma_desc *qdesc, *next;
234	int status;
235
236	spin_lock_irqsave(&mchan->lock, flags);
237	list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
238		hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
239		list_move_tail(&qdesc->node, &mchan->active);
240	}
241
242	if (!mchan->running) {
243		struct hidma_desc *desc = list_first_entry(&mchan->active,
244							   struct hidma_desc,
245							   node);
246		mchan->running = desc;
247	}
248	spin_unlock_irqrestore(&mchan->lock, flags);
249
250	/* PM will be released in hidma_callback function. */
251	status = pm_runtime_get(dmadev->ddev.dev);
252	if (status < 0)
253		tasklet_schedule(&dmadev->task);
254	else
255		hidma_ll_start(dmadev->lldev);
256}
257
258static inline bool hidma_txn_is_success(dma_cookie_t cookie,
259		dma_cookie_t last_success, dma_cookie_t last_used)
260{
261	if (last_success <= last_used) {
262		if ((cookie <= last_success) || (cookie > last_used))
263			return true;
264	} else {
265		if ((cookie <= last_success) && (cookie > last_used))
266			return true;
267	}
268	return false;
269}
270
271static enum dma_status hidma_tx_status(struct dma_chan *dmach,
272				       dma_cookie_t cookie,
273				       struct dma_tx_state *txstate)
274{
275	struct hidma_chan *mchan = to_hidma_chan(dmach);
276	enum dma_status ret;
277
278	ret = dma_cookie_status(dmach, cookie, txstate);
279	if (ret == DMA_COMPLETE) {
280		bool is_success;
281
282		is_success = hidma_txn_is_success(cookie, mchan->last_success,
283						  dmach->cookie);
284		return is_success ? ret : DMA_ERROR;
285	}
286
287	if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
288		unsigned long flags;
289		dma_cookie_t runcookie;
290
291		spin_lock_irqsave(&mchan->lock, flags);
292		if (mchan->running)
293			runcookie = mchan->running->desc.cookie;
294		else
295			runcookie = -EINVAL;
296
297		if (runcookie == cookie)
298			ret = DMA_PAUSED;
299
300		spin_unlock_irqrestore(&mchan->lock, flags);
301	}
302
303	return ret;
304}
305
306/*
307 * Submit descriptor to hardware.
308 * Lock the PM for each descriptor we are sending.
309 */
310static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
311{
312	struct hidma_chan *mchan = to_hidma_chan(txd->chan);
313	struct hidma_dev *dmadev = mchan->dmadev;
314	struct hidma_desc *mdesc;
315	unsigned long irqflags;
316	dma_cookie_t cookie;
317
318	pm_runtime_get_sync(dmadev->ddev.dev);
319	if (!hidma_ll_isenabled(dmadev->lldev)) {
320		pm_runtime_mark_last_busy(dmadev->ddev.dev);
321		pm_runtime_put_autosuspend(dmadev->ddev.dev);
322		return -ENODEV;
323	}
324	pm_runtime_mark_last_busy(dmadev->ddev.dev);
325	pm_runtime_put_autosuspend(dmadev->ddev.dev);
326
327	mdesc = container_of(txd, struct hidma_desc, desc);
328	spin_lock_irqsave(&mchan->lock, irqflags);
329
330	/* Move descriptor to queued */
331	list_move_tail(&mdesc->node, &mchan->queued);
332
333	/* Update cookie */
334	cookie = dma_cookie_assign(txd);
335
336	spin_unlock_irqrestore(&mchan->lock, irqflags);
337
338	return cookie;
339}
340
341static int hidma_alloc_chan_resources(struct dma_chan *dmach)
342{
343	struct hidma_chan *mchan = to_hidma_chan(dmach);
344	struct hidma_dev *dmadev = mchan->dmadev;
345	struct hidma_desc *mdesc, *tmp;
346	unsigned long irqflags;
347	LIST_HEAD(descs);
348	unsigned int i;
349	int rc = 0;
350
351	if (mchan->allocated)
352		return 0;
353
354	/* Alloc descriptors for this channel */
355	for (i = 0; i < dmadev->nr_descriptors; i++) {
356		mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
357		if (!mdesc) {
358			rc = -ENOMEM;
359			break;
360		}
361		dma_async_tx_descriptor_init(&mdesc->desc, dmach);
362		mdesc->desc.tx_submit = hidma_tx_submit;
363
364		rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
365				      "DMA engine", hidma_callback, mdesc,
366				      &mdesc->tre_ch);
367		if (rc) {
368			dev_err(dmach->device->dev,
369				"channel alloc failed at %u\n", i);
370			kfree(mdesc);
371			break;
372		}
373		list_add_tail(&mdesc->node, &descs);
374	}
375
376	if (rc) {
377		/* return the allocated descriptors */
378		list_for_each_entry_safe(mdesc, tmp, &descs, node) {
379			hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
380			kfree(mdesc);
381		}
382		return rc;
383	}
384
385	spin_lock_irqsave(&mchan->lock, irqflags);
386	list_splice_tail_init(&descs, &mchan->free);
387	mchan->allocated = true;
388	spin_unlock_irqrestore(&mchan->lock, irqflags);
389	return 1;
390}
391
392static struct dma_async_tx_descriptor *
393hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
394		size_t len, unsigned long flags)
395{
396	struct hidma_chan *mchan = to_hidma_chan(dmach);
397	struct hidma_desc *mdesc = NULL;
398	struct hidma_dev *mdma = mchan->dmadev;
399	unsigned long irqflags;
400
401	/* Get free descriptor */
402	spin_lock_irqsave(&mchan->lock, irqflags);
403	if (!list_empty(&mchan->free)) {
404		mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
405		list_del(&mdesc->node);
406	}
407	spin_unlock_irqrestore(&mchan->lock, irqflags);
408
409	if (!mdesc)
410		return NULL;
411
412	mdesc->desc.flags = flags;
413	hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
414				     src, dest, len, flags,
415				     HIDMA_TRE_MEMCPY);
416
417	/* Place descriptor in prepared list */
418	spin_lock_irqsave(&mchan->lock, irqflags);
419	list_add_tail(&mdesc->node, &mchan->prepared);
420	spin_unlock_irqrestore(&mchan->lock, irqflags);
421
422	return &mdesc->desc;
423}
424
425static struct dma_async_tx_descriptor *
426hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
427		size_t len, unsigned long flags)
428{
429	struct hidma_chan *mchan = to_hidma_chan(dmach);
430	struct hidma_desc *mdesc = NULL;
431	struct hidma_dev *mdma = mchan->dmadev;
432	unsigned long irqflags;
433	u64 byte_pattern, fill_pattern;
434
435	/* Get free descriptor */
436	spin_lock_irqsave(&mchan->lock, irqflags);
437	if (!list_empty(&mchan->free)) {
438		mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
439		list_del(&mdesc->node);
440	}
441	spin_unlock_irqrestore(&mchan->lock, irqflags);
442
443	if (!mdesc)
444		return NULL;
445
446	byte_pattern = (char)value;
447	fill_pattern =	(byte_pattern << 56) |
448			(byte_pattern << 48) |
449			(byte_pattern << 40) |
450			(byte_pattern << 32) |
451			(byte_pattern << 24) |
452			(byte_pattern << 16) |
453			(byte_pattern << 8) |
454			byte_pattern;
455
456	mdesc->desc.flags = flags;
457	hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
458				     fill_pattern, dest, len, flags,
459				     HIDMA_TRE_MEMSET);
460
461	/* Place descriptor in prepared list */
462	spin_lock_irqsave(&mchan->lock, irqflags);
463	list_add_tail(&mdesc->node, &mchan->prepared);
464	spin_unlock_irqrestore(&mchan->lock, irqflags);
465
466	return &mdesc->desc;
467}
468
469static int hidma_terminate_channel(struct dma_chan *chan)
470{
471	struct hidma_chan *mchan = to_hidma_chan(chan);
472	struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
473	struct hidma_desc *tmp, *mdesc;
474	unsigned long irqflags;
475	LIST_HEAD(list);
476	int rc;
477
478	pm_runtime_get_sync(dmadev->ddev.dev);
479	/* give completed requests a chance to finish */
480	hidma_process_completed(mchan);
481
482	spin_lock_irqsave(&mchan->lock, irqflags);
483	mchan->last_success = 0;
484	list_splice_init(&mchan->active, &list);
485	list_splice_init(&mchan->prepared, &list);
486	list_splice_init(&mchan->completed, &list);
487	list_splice_init(&mchan->queued, &list);
488	spin_unlock_irqrestore(&mchan->lock, irqflags);
489
490	/* this suspends the existing transfer */
491	rc = hidma_ll_disable(dmadev->lldev);
492	if (rc) {
493		dev_err(dmadev->ddev.dev, "channel did not pause\n");
494		goto out;
495	}
496
497	/* return all user requests */
498	list_for_each_entry_safe(mdesc, tmp, &list, node) {
499		struct dma_async_tx_descriptor *txd = &mdesc->desc;
500
501		dma_descriptor_unmap(txd);
502		dmaengine_desc_get_callback_invoke(txd, NULL);
503		dma_run_dependencies(txd);
504
505		/* move myself to free_list */
506		list_move(&mdesc->node, &mchan->free);
507	}
508
509	rc = hidma_ll_enable(dmadev->lldev);
510out:
511	pm_runtime_mark_last_busy(dmadev->ddev.dev);
512	pm_runtime_put_autosuspend(dmadev->ddev.dev);
513	return rc;
514}
515
516static int hidma_terminate_all(struct dma_chan *chan)
517{
518	struct hidma_chan *mchan = to_hidma_chan(chan);
519	struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
520	int rc;
521
522	rc = hidma_terminate_channel(chan);
523	if (rc)
524		return rc;
525
526	/* reinitialize the hardware */
527	pm_runtime_get_sync(dmadev->ddev.dev);
528	rc = hidma_ll_setup(dmadev->lldev);
529	pm_runtime_mark_last_busy(dmadev->ddev.dev);
530	pm_runtime_put_autosuspend(dmadev->ddev.dev);
531	return rc;
532}
533
534static void hidma_free_chan_resources(struct dma_chan *dmach)
535{
536	struct hidma_chan *mchan = to_hidma_chan(dmach);
537	struct hidma_dev *mdma = mchan->dmadev;
538	struct hidma_desc *mdesc, *tmp;
539	unsigned long irqflags;
540	LIST_HEAD(descs);
541
542	/* terminate running transactions and free descriptors */
543	hidma_terminate_channel(dmach);
544
545	spin_lock_irqsave(&mchan->lock, irqflags);
546
547	/* Move data */
548	list_splice_tail_init(&mchan->free, &descs);
549
550	/* Free descriptors */
551	list_for_each_entry_safe(mdesc, tmp, &descs, node) {
552		hidma_ll_free(mdma->lldev, mdesc->tre_ch);
553		list_del(&mdesc->node);
554		kfree(mdesc);
555	}
556
557	mchan->allocated = false;
558	spin_unlock_irqrestore(&mchan->lock, irqflags);
559}
560
561static int hidma_pause(struct dma_chan *chan)
562{
563	struct hidma_chan *mchan;
564	struct hidma_dev *dmadev;
565
566	mchan = to_hidma_chan(chan);
567	dmadev = to_hidma_dev(mchan->chan.device);
568	if (!mchan->paused) {
569		pm_runtime_get_sync(dmadev->ddev.dev);
570		if (hidma_ll_disable(dmadev->lldev))
571			dev_warn(dmadev->ddev.dev, "channel did not stop\n");
572		mchan->paused = true;
573		pm_runtime_mark_last_busy(dmadev->ddev.dev);
574		pm_runtime_put_autosuspend(dmadev->ddev.dev);
575	}
576	return 0;
577}
578
579static int hidma_resume(struct dma_chan *chan)
580{
581	struct hidma_chan *mchan;
582	struct hidma_dev *dmadev;
583	int rc = 0;
584
585	mchan = to_hidma_chan(chan);
586	dmadev = to_hidma_dev(mchan->chan.device);
587	if (mchan->paused) {
588		pm_runtime_get_sync(dmadev->ddev.dev);
589		rc = hidma_ll_enable(dmadev->lldev);
590		if (!rc)
591			mchan->paused = false;
592		else
593			dev_err(dmadev->ddev.dev,
594				"failed to resume the channel");
595		pm_runtime_mark_last_busy(dmadev->ddev.dev);
596		pm_runtime_put_autosuspend(dmadev->ddev.dev);
597	}
598	return rc;
599}
600
601static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
602{
603	struct hidma_lldev *lldev = arg;
604
605	/*
606	 * All interrupts are request driven.
607	 * HW doesn't send an interrupt by itself.
608	 */
609	return hidma_ll_inthandler(chirq, lldev);
610}
611
612#ifdef CONFIG_GENERIC_MSI_IRQ
613static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
614{
615	struct hidma_lldev **lldevp = arg;
616	struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
617
618	return hidma_ll_inthandler_msi(chirq, *lldevp,
619				       1 << (chirq - dmadev->msi_virqbase));
620}
621#endif
622
623static ssize_t hidma_show_values(struct device *dev,
624				 struct device_attribute *attr, char *buf)
625{
626	struct hidma_dev *mdev = dev_get_drvdata(dev);
627
628	buf[0] = 0;
629
630	if (strcmp(attr->attr.name, "chid") == 0)
631		sprintf(buf, "%d\n", mdev->chidx);
632
633	return strlen(buf);
634}
635
636static inline void  hidma_sysfs_uninit(struct hidma_dev *dev)
637{
638	device_remove_file(dev->ddev.dev, dev->chid_attrs);
639}
640
641static struct device_attribute*
642hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
643{
644	struct device_attribute *attrs;
645	char *name_copy;
646
647	attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
648			     GFP_KERNEL);
649	if (!attrs)
650		return NULL;
651
652	name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
653	if (!name_copy)
654		return NULL;
655
656	attrs->attr.name = name_copy;
657	attrs->attr.mode = mode;
658	attrs->show = hidma_show_values;
659	sysfs_attr_init(&attrs->attr);
660
661	return attrs;
662}
663
664static int hidma_sysfs_init(struct hidma_dev *dev)
665{
666	dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
667	if (!dev->chid_attrs)
668		return -ENOMEM;
669
670	return device_create_file(dev->ddev.dev, dev->chid_attrs);
671}
672
673#ifdef CONFIG_GENERIC_MSI_IRQ
674static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
675{
676	struct device *dev = msi_desc_to_dev(desc);
677	struct hidma_dev *dmadev = dev_get_drvdata(dev);
678
679	if (!desc->msi_index) {
680		writel(msg->address_lo, dmadev->dev_evca + 0x118);
681		writel(msg->address_hi, dmadev->dev_evca + 0x11C);
682		writel(msg->data, dmadev->dev_evca + 0x120);
683	}
684}
685#endif
686
687static void hidma_free_msis(struct hidma_dev *dmadev)
688{
689#ifdef CONFIG_GENERIC_MSI_IRQ
690	struct device *dev = dmadev->ddev.dev;
691	int i, virq;
692
693	for (i = 0; i < HIDMA_MSI_INTS; i++) {
694		virq = msi_get_virq(dev, i);
695		if (virq)
696			devm_free_irq(dev, virq, &dmadev->lldev);
697	}
698
699	platform_device_msi_free_irqs_all(dev);
700#endif
701}
702
703static int hidma_request_msi(struct hidma_dev *dmadev,
704			     struct platform_device *pdev)
705{
706#ifdef CONFIG_GENERIC_MSI_IRQ
707	int rc, i, virq;
708
709	rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
710						     hidma_write_msi_msg);
711	if (rc)
712		return rc;
713
714	for (i = 0; i < HIDMA_MSI_INTS; i++) {
715		virq = msi_get_virq(&pdev->dev, i);
716		rc = devm_request_irq(&pdev->dev, virq,
717				       hidma_chirq_handler_msi,
718				       0, "qcom-hidma-msi",
719				       &dmadev->lldev);
720		if (rc)
721			break;
722		if (!i)
723			dmadev->msi_virqbase = virq;
724	}
725
726	if (rc) {
727		/* free allocated MSI interrupts above */
728		for (--i; i >= 0; i--) {
729			virq = msi_get_virq(&pdev->dev, i);
730			devm_free_irq(&pdev->dev, virq, &dmadev->lldev);
731		}
732		dev_warn(&pdev->dev,
733			 "failed to request MSI irq, falling back to wired IRQ\n");
734	} else {
735		/* Add callback to free MSIs on teardown */
736		hidma_ll_setup_irq(dmadev->lldev, true);
737	}
738	return rc;
739#else
740	return -EINVAL;
741#endif
742}
743
744static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
745{
746	enum hidma_cap cap;
747
748	cap = (uintptr_t) device_get_match_data(dev);
749	return cap ? ((cap & test_cap) > 0) : 0;
750}
751
752static int hidma_probe(struct platform_device *pdev)
753{
754	struct hidma_dev *dmadev;
755	struct resource *trca_resource;
756	struct resource *evca_resource;
757	int chirq;
758	void __iomem *evca;
759	void __iomem *trca;
760	int rc;
761	bool msi;
762
763	pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
764	pm_runtime_use_autosuspend(&pdev->dev);
765	pm_runtime_set_active(&pdev->dev);
766	pm_runtime_enable(&pdev->dev);
767
768	trca = devm_platform_get_and_ioremap_resource(pdev, 0, &trca_resource);
769	if (IS_ERR(trca)) {
770		rc = PTR_ERR(trca);
771		goto bailout;
772	}
773
774	evca = devm_platform_get_and_ioremap_resource(pdev, 1, &evca_resource);
775	if (IS_ERR(evca)) {
776		rc = PTR_ERR(evca);
777		goto bailout;
778	}
779
780	/*
781	 * This driver only handles the channel IRQs.
782	 * Common IRQ is handled by the management driver.
783	 */
784	chirq = platform_get_irq(pdev, 0);
785	if (chirq < 0) {
786		rc = chirq;
787		goto bailout;
788	}
789
790	dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
791	if (!dmadev) {
792		rc = -ENOMEM;
793		goto bailout;
794	}
795
796	INIT_LIST_HEAD(&dmadev->ddev.channels);
797	spin_lock_init(&dmadev->lock);
798	dmadev->ddev.dev = &pdev->dev;
799	pm_runtime_get_sync(dmadev->ddev.dev);
800
801	dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
802	dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
803	if (WARN_ON(!pdev->dev.dma_mask)) {
804		rc = -ENXIO;
805		goto dmafree;
806	}
807
808	dmadev->dev_evca = evca;
809	dmadev->evca_resource = evca_resource;
810	dmadev->dev_trca = trca;
811	dmadev->trca_resource = trca_resource;
812	dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
813	dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
814	dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
815	dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
816	dmadev->ddev.device_tx_status = hidma_tx_status;
817	dmadev->ddev.device_issue_pending = hidma_issue_pending;
818	dmadev->ddev.device_pause = hidma_pause;
819	dmadev->ddev.device_resume = hidma_resume;
820	dmadev->ddev.device_terminate_all = hidma_terminate_all;
821	dmadev->ddev.copy_align = 8;
822
823	/*
824	 * Determine the MSI capability of the platform. Old HW doesn't
825	 * support MSI.
826	 */
827	msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
828	device_property_read_u32(&pdev->dev, "desc-count",
829				 &dmadev->nr_descriptors);
830
831	if (nr_desc_prm) {
832		dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
833			 nr_desc_prm);
834		dmadev->nr_descriptors = nr_desc_prm;
835	}
836
837	if (!dmadev->nr_descriptors)
838		dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
839
840	if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
841		dmadev->chidx = readl(dmadev->dev_trca + 0x40);
842	else
843		dmadev->chidx = readl(dmadev->dev_trca + 0x28);
844
845	/* Set DMA mask to 64 bits. */
846	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
847	if (rc) {
848		dev_warn(&pdev->dev, "unable to set coherent mask to 64");
849		goto dmafree;
850	}
851
852	dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
853				      dmadev->nr_descriptors, dmadev->dev_trca,
854				      dmadev->dev_evca, dmadev->chidx);
855	if (!dmadev->lldev) {
856		rc = -EPROBE_DEFER;
857		goto dmafree;
858	}
859
860	platform_set_drvdata(pdev, dmadev);
861	if (msi)
862		rc = hidma_request_msi(dmadev, pdev);
863
864	if (!msi || rc) {
865		hidma_ll_setup_irq(dmadev->lldev, false);
866		rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
867				      0, "qcom-hidma", dmadev->lldev);
868		if (rc)
869			goto uninit;
870	}
871
872	INIT_LIST_HEAD(&dmadev->ddev.channels);
873	rc = hidma_chan_init(dmadev, 0);
874	if (rc)
875		goto uninit;
876
877	rc = dma_async_device_register(&dmadev->ddev);
878	if (rc)
879		goto uninit;
880
881	dmadev->irq = chirq;
882	tasklet_setup(&dmadev->task, hidma_issue_task);
883	hidma_debug_init(dmadev);
884	hidma_sysfs_init(dmadev);
885	dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
886	pm_runtime_mark_last_busy(dmadev->ddev.dev);
887	pm_runtime_put_autosuspend(dmadev->ddev.dev);
888	return 0;
889
890uninit:
891	if (msi)
892		hidma_free_msis(dmadev);
893
894	hidma_ll_uninit(dmadev->lldev);
895dmafree:
896	if (dmadev)
897		hidma_free(dmadev);
898bailout:
899	pm_runtime_put_sync(&pdev->dev);
900	pm_runtime_disable(&pdev->dev);
901	return rc;
902}
903
904static void hidma_shutdown(struct platform_device *pdev)
905{
906	struct hidma_dev *dmadev = platform_get_drvdata(pdev);
907
908	dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
909
910	pm_runtime_get_sync(dmadev->ddev.dev);
911	if (hidma_ll_disable(dmadev->lldev))
912		dev_warn(dmadev->ddev.dev, "channel did not stop\n");
913	pm_runtime_mark_last_busy(dmadev->ddev.dev);
914	pm_runtime_put_autosuspend(dmadev->ddev.dev);
915
916}
917
918static void hidma_remove(struct platform_device *pdev)
919{
920	struct hidma_dev *dmadev = platform_get_drvdata(pdev);
921
922	pm_runtime_get_sync(dmadev->ddev.dev);
923	dma_async_device_unregister(&dmadev->ddev);
924	if (!dmadev->lldev->msi_support)
925		devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
926	else
927		hidma_free_msis(dmadev);
928
929	tasklet_kill(&dmadev->task);
930	hidma_sysfs_uninit(dmadev);
931	hidma_debug_uninit(dmadev);
932	hidma_ll_uninit(dmadev->lldev);
933	hidma_free(dmadev);
934
935	dev_info(&pdev->dev, "HI-DMA engine removed\n");
936	pm_runtime_put_sync_suspend(&pdev->dev);
937	pm_runtime_disable(&pdev->dev);
938}
939
940#if IS_ENABLED(CONFIG_ACPI)
941static const struct acpi_device_id hidma_acpi_ids[] = {
942	{"QCOM8061"},
943	{"QCOM8062", HIDMA_MSI_CAP},
944	{"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
945	{},
946};
947MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
948#endif
949
950static const struct of_device_id hidma_match[] = {
951	{.compatible = "qcom,hidma-1.0",},
952	{.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
953	{.compatible = "qcom,hidma-1.2",
954	 .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
955	{},
956};
957MODULE_DEVICE_TABLE(of, hidma_match);
958
959static struct platform_driver hidma_driver = {
960	.probe = hidma_probe,
961	.remove_new = hidma_remove,
962	.shutdown = hidma_shutdown,
963	.driver = {
964		   .name = "hidma",
965		   .of_match_table = hidma_match,
966		   .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
967	},
968};
969
970module_platform_driver(hidma_driver);
971MODULE_LICENSE("GPL v2");
972