1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Direct Memory Access U-Class driver
4 *
5 * Copyright (C) 2018 ��lvaro Fern��ndez Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
7 * Written by Mugunthan V N <mugunthanvnm@ti.com>
8 *
9 * Author: Mugunthan V N <mugunthanvnm@ti.com>
10 */
11
12#define LOG_CATEGORY UCLASS_DMA
13
14#include <common.h>
15#include <cpu_func.h>
16#include <dm.h>
17#include <log.h>
18#include <malloc.h>
19#include <asm/cache.h>
20#include <dm/read.h>
21#include <dma-uclass.h>
22#include <linux/dma-mapping.h>
23#include <dt-structs.h>
24#include <errno.h>
25#include <linux/printk.h>
26
27#ifdef CONFIG_DMA_CHANNELS
28static inline struct dma_ops *dma_dev_ops(struct udevice *dev)
29{
30	return (struct dma_ops *)dev->driver->ops;
31}
32
33# if CONFIG_IS_ENABLED(OF_CONTROL)
34static int dma_of_xlate_default(struct dma *dma,
35				struct ofnode_phandle_args *args)
36{
37	debug("%s(dma=%p)\n", __func__, dma);
38
39	if (args->args_count > 1) {
40		pr_err("Invalid args_count: %d\n", args->args_count);
41		return -EINVAL;
42	}
43
44	if (args->args_count)
45		dma->id = args->args[0];
46	else
47		dma->id = 0;
48
49	return 0;
50}
51
52int dma_get_by_index(struct udevice *dev, int index, struct dma *dma)
53{
54	int ret;
55	struct ofnode_phandle_args args;
56	struct udevice *dev_dma;
57	const struct dma_ops *ops;
58
59	debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma);
60
61	assert(dma);
62	dma->dev = NULL;
63
64	ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index,
65					 &args);
66	if (ret) {
67		pr_err("%s: dev_read_phandle_with_args failed: err=%d\n",
68		       __func__, ret);
69		return ret;
70	}
71
72	ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma);
73	if (ret) {
74		pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n",
75		       __func__, ret);
76		return ret;
77	}
78
79	dma->dev = dev_dma;
80
81	ops = dma_dev_ops(dev_dma);
82
83	if (ops->of_xlate)
84		ret = ops->of_xlate(dma, &args);
85	else
86		ret = dma_of_xlate_default(dma, &args);
87	if (ret) {
88		pr_err("of_xlate() failed: %d\n", ret);
89		return ret;
90	}
91
92	return dma_request(dev_dma, dma);
93}
94
95int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma)
96{
97	int index;
98
99	debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma);
100	dma->dev = NULL;
101
102	index = dev_read_stringlist_search(dev, "dma-names", name);
103	if (index < 0) {
104		pr_err("dev_read_stringlist_search() failed: %d\n", index);
105		return index;
106	}
107
108	return dma_get_by_index(dev, index, dma);
109}
110# endif /* OF_CONTROL */
111
112int dma_request(struct udevice *dev, struct dma *dma)
113{
114	struct dma_ops *ops = dma_dev_ops(dev);
115
116	debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma);
117
118	dma->dev = dev;
119
120	if (!ops->request)
121		return 0;
122
123	return ops->request(dma);
124}
125
126int dma_free(struct dma *dma)
127{
128	struct dma_ops *ops = dma_dev_ops(dma->dev);
129
130	debug("%s(dma=%p)\n", __func__, dma);
131
132	if (!ops->rfree)
133		return 0;
134
135	return ops->rfree(dma);
136}
137
138int dma_enable(struct dma *dma)
139{
140	struct dma_ops *ops = dma_dev_ops(dma->dev);
141
142	debug("%s(dma=%p)\n", __func__, dma);
143
144	if (!ops->enable)
145		return -ENOSYS;
146
147	return ops->enable(dma);
148}
149
150int dma_disable(struct dma *dma)
151{
152	struct dma_ops *ops = dma_dev_ops(dma->dev);
153
154	debug("%s(dma=%p)\n", __func__, dma);
155
156	if (!ops->disable)
157		return -ENOSYS;
158
159	return ops->disable(dma);
160}
161
162int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
163{
164	struct dma_ops *ops = dma_dev_ops(dma->dev);
165
166	debug("%s(dma=%p)\n", __func__, dma);
167
168	if (!ops->prepare_rcv_buf)
169		return -1;
170
171	return ops->prepare_rcv_buf(dma, dst, size);
172}
173
174int dma_receive(struct dma *dma, void **dst, void *metadata)
175{
176	struct dma_ops *ops = dma_dev_ops(dma->dev);
177
178	debug("%s(dma=%p)\n", __func__, dma);
179
180	if (!ops->receive)
181		return -ENOSYS;
182
183	return ops->receive(dma, dst, metadata);
184}
185
186int dma_send(struct dma *dma, void *src, size_t len, void *metadata)
187{
188	struct dma_ops *ops = dma_dev_ops(dma->dev);
189
190	debug("%s(dma=%p)\n", __func__, dma);
191
192	if (!ops->send)
193		return -ENOSYS;
194
195	return ops->send(dma, src, len, metadata);
196}
197
198int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data)
199{
200	struct dma_ops *ops = dma_dev_ops(dma->dev);
201
202	debug("%s(dma=%p)\n", __func__, dma);
203
204	if (!ops->get_cfg)
205		return -ENOSYS;
206
207	return ops->get_cfg(dma, cfg_id, cfg_data);
208}
209#endif /* CONFIG_DMA_CHANNELS */
210
211int dma_get_device(u32 transfer_type, struct udevice **devp)
212{
213	struct udevice *dev;
214
215	for (uclass_first_device(UCLASS_DMA, &dev); dev;
216	     uclass_next_device(&dev)) {
217		struct dma_dev_priv *uc_priv;
218
219		uc_priv = dev_get_uclass_priv(dev);
220		if (uc_priv->supported & transfer_type)
221			break;
222	}
223
224	if (!dev) {
225		pr_debug("No DMA device found that supports %x type\n",
226			 transfer_type);
227		return -EPROTONOSUPPORT;
228	}
229
230	*devp = dev;
231
232	return 0;
233}
234
235int dma_memcpy(void *dst, void *src, size_t len)
236{
237	struct udevice *dev;
238	const struct dma_ops *ops;
239	dma_addr_t destination;
240	dma_addr_t source;
241	int ret;
242
243	ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev);
244	if (ret < 0)
245		return ret;
246
247	ops = device_get_ops(dev);
248	if (!ops->transfer)
249		return -ENOSYS;
250
251	/* Clean the areas, so no writeback into the RAM races with DMA */
252	destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
253	source = dma_map_single(src, len, DMA_TO_DEVICE);
254
255	ret = ops->transfer(dev, DMA_MEM_TO_MEM, destination, source, len);
256
257	/* Clean+Invalidate the areas after, so we can see DMA'd data */
258	dma_unmap_single(destination, len, DMA_FROM_DEVICE);
259	dma_unmap_single(source, len, DMA_TO_DEVICE);
260
261	return ret;
262}
263
264UCLASS_DRIVER(dma) = {
265	.id		= UCLASS_DMA,
266	.name		= "dma",
267	.flags		= DM_UC_FLAG_SEQ_ALIAS,
268	.per_device_auto	= sizeof(struct dma_dev_priv),
269};
270