1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
4 *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#include <linux/platform_device.h>
7#include <linux/slab.h>
8#include <linux/err.h>
9#include <linux/init.h>
10#include <linux/list.h>
11#include <linux/io.h>
12#include <linux/of.h>
13#include <linux/of_dma.h>
14#include <linux/of_platform.h>
15
16#define TI_XBAR_DRA7		0
17#define TI_XBAR_AM335X		1
18static const u32 ti_xbar_type[] = {
19	[TI_XBAR_DRA7] = TI_XBAR_DRA7,
20	[TI_XBAR_AM335X] = TI_XBAR_AM335X,
21};
22
23static const struct of_device_id ti_dma_xbar_match[] = {
24	{
25		.compatible = "ti,dra7-dma-crossbar",
26		.data = &ti_xbar_type[TI_XBAR_DRA7],
27	},
28	{
29		.compatible = "ti,am335x-edma-crossbar",
30		.data = &ti_xbar_type[TI_XBAR_AM335X],
31	},
32	{},
33};
34
35/* Crossbar on AM335x/AM437x family */
36#define TI_AM335X_XBAR_LINES	64
37
38struct ti_am335x_xbar_data {
39	void __iomem *iomem;
40
41	struct dma_router dmarouter;
42
43	u32 xbar_events; /* maximum number of events to select in xbar */
44	u32 dma_requests; /* number of DMA requests on eDMA */
45};
46
47struct ti_am335x_xbar_map {
48	u16 dma_line;
49	u8 mux_val;
50};
51
52static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
53{
54	/*
55	 * TPCC_EVT_MUX_60_63 register layout is different than the
56	 * rest, in the sense, that event 63 is mapped to lowest byte
57	 * and event 60 is mapped to highest, handle it separately.
58	 */
59	if (event >= 60 && event <= 63)
60		writeb_relaxed(val, iomem + (63 - event % 4));
61	else
62		writeb_relaxed(val, iomem + event);
63}
64
65static void ti_am335x_xbar_free(struct device *dev, void *route_data)
66{
67	struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
68	struct ti_am335x_xbar_map *map = route_data;
69
70	dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
71		map->mux_val, map->dma_line);
72
73	ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
74	kfree(map);
75}
76
77static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
78					   struct of_dma *ofdma)
79{
80	struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
81	struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
82	struct ti_am335x_xbar_map *map;
83
84	if (dma_spec->args_count != 3)
85		return ERR_PTR(-EINVAL);
86
87	if (dma_spec->args[2] >= xbar->xbar_events) {
88		dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
89			dma_spec->args[2]);
90		return ERR_PTR(-EINVAL);
91	}
92
93	if (dma_spec->args[0] >= xbar->dma_requests) {
94		dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
95			dma_spec->args[0]);
96		return ERR_PTR(-EINVAL);
97	}
98
99	/* The of_node_put() will be done in the core for the node */
100	dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
101	if (!dma_spec->np) {
102		dev_err(&pdev->dev, "Can't get DMA master\n");
103		return ERR_PTR(-EINVAL);
104	}
105
106	map = kzalloc(sizeof(*map), GFP_KERNEL);
107	if (!map) {
108		of_node_put(dma_spec->np);
109		return ERR_PTR(-ENOMEM);
110	}
111
112	map->dma_line = (u16)dma_spec->args[0];
113	map->mux_val = (u8)dma_spec->args[2];
114
115	dma_spec->args[2] = 0;
116	dma_spec->args_count = 2;
117
118	dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
119		map->mux_val, map->dma_line);
120
121	ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
122
123	return map;
124}
125
126static const struct of_device_id ti_am335x_master_match[] __maybe_unused = {
127	{ .compatible = "ti,edma3-tpcc", },
128	{},
129};
130
131static int ti_am335x_xbar_probe(struct platform_device *pdev)
132{
133	struct device_node *node = pdev->dev.of_node;
134	const struct of_device_id *match;
135	struct device_node *dma_node;
136	struct ti_am335x_xbar_data *xbar;
137	void __iomem *iomem;
138	int i, ret;
139
140	if (!node)
141		return -ENODEV;
142
143	xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
144	if (!xbar)
145		return -ENOMEM;
146
147	dma_node = of_parse_phandle(node, "dma-masters", 0);
148	if (!dma_node) {
149		dev_err(&pdev->dev, "Can't get DMA master node\n");
150		return -ENODEV;
151	}
152
153	match = of_match_node(ti_am335x_master_match, dma_node);
154	if (!match) {
155		dev_err(&pdev->dev, "DMA master is not supported\n");
156		of_node_put(dma_node);
157		return -EINVAL;
158	}
159
160	if (of_property_read_u32(dma_node, "dma-requests",
161				 &xbar->dma_requests)) {
162		dev_info(&pdev->dev,
163			 "Missing XBAR output information, using %u.\n",
164			 TI_AM335X_XBAR_LINES);
165		xbar->dma_requests = TI_AM335X_XBAR_LINES;
166	}
167	of_node_put(dma_node);
168
169	if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
170		dev_info(&pdev->dev,
171			 "Missing XBAR input information, using %u.\n",
172			 TI_AM335X_XBAR_LINES);
173		xbar->xbar_events = TI_AM335X_XBAR_LINES;
174	}
175
176	iomem = devm_platform_ioremap_resource(pdev, 0);
177	if (IS_ERR(iomem))
178		return PTR_ERR(iomem);
179
180	xbar->iomem = iomem;
181
182	xbar->dmarouter.dev = &pdev->dev;
183	xbar->dmarouter.route_free = ti_am335x_xbar_free;
184
185	platform_set_drvdata(pdev, xbar);
186
187	/* Reset the crossbar */
188	for (i = 0; i < xbar->dma_requests; i++)
189		ti_am335x_xbar_write(xbar->iomem, i, 0);
190
191	ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
192				     &xbar->dmarouter);
193
194	return ret;
195}
196
197/* Crossbar on DRA7xx family */
198#define TI_DRA7_XBAR_OUTPUTS	127
199#define TI_DRA7_XBAR_INPUTS	256
200
201struct ti_dra7_xbar_data {
202	void __iomem *iomem;
203
204	struct dma_router dmarouter;
205	struct mutex mutex;
206	unsigned long *dma_inuse;
207
208	u16 safe_val; /* Value to rest the crossbar lines */
209	u32 xbar_requests; /* number of DMA requests connected to XBAR */
210	u32 dma_requests; /* number of DMA requests forwarded to DMA */
211	u32 dma_offset;
212};
213
214struct ti_dra7_xbar_map {
215	u16 xbar_in;
216	int xbar_out;
217};
218
219static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
220{
221	writew_relaxed(val, iomem + (xbar * 2));
222}
223
224static void ti_dra7_xbar_free(struct device *dev, void *route_data)
225{
226	struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
227	struct ti_dra7_xbar_map *map = route_data;
228
229	dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
230		map->xbar_in, map->xbar_out);
231
232	ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
233	mutex_lock(&xbar->mutex);
234	clear_bit(map->xbar_out, xbar->dma_inuse);
235	mutex_unlock(&xbar->mutex);
236	kfree(map);
237}
238
239static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
240					 struct of_dma *ofdma)
241{
242	struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
243	struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
244	struct ti_dra7_xbar_map *map;
245
246	if (dma_spec->args[0] >= xbar->xbar_requests) {
247		dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
248			dma_spec->args[0]);
249		put_device(&pdev->dev);
250		return ERR_PTR(-EINVAL);
251	}
252
253	/* The of_node_put() will be done in the core for the node */
254	dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
255	if (!dma_spec->np) {
256		dev_err(&pdev->dev, "Can't get DMA master\n");
257		put_device(&pdev->dev);
258		return ERR_PTR(-EINVAL);
259	}
260
261	map = kzalloc(sizeof(*map), GFP_KERNEL);
262	if (!map) {
263		of_node_put(dma_spec->np);
264		put_device(&pdev->dev);
265		return ERR_PTR(-ENOMEM);
266	}
267
268	mutex_lock(&xbar->mutex);
269	map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
270					    xbar->dma_requests);
271	if (map->xbar_out == xbar->dma_requests) {
272		mutex_unlock(&xbar->mutex);
273		dev_err(&pdev->dev, "Run out of free DMA requests\n");
274		kfree(map);
275		of_node_put(dma_spec->np);
276		put_device(&pdev->dev);
277		return ERR_PTR(-ENOMEM);
278	}
279	set_bit(map->xbar_out, xbar->dma_inuse);
280	mutex_unlock(&xbar->mutex);
281
282	map->xbar_in = (u16)dma_spec->args[0];
283
284	dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
285
286	dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
287		map->xbar_in, map->xbar_out);
288
289	ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
290
291	return map;
292}
293
294#define TI_XBAR_EDMA_OFFSET	0
295#define TI_XBAR_SDMA_OFFSET	1
296static const u32 ti_dma_offset[] = {
297	[TI_XBAR_EDMA_OFFSET] = 0,
298	[TI_XBAR_SDMA_OFFSET] = 1,
299};
300
301static const struct of_device_id ti_dra7_master_match[] __maybe_unused = {
302	{
303		.compatible = "ti,omap4430-sdma",
304		.data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
305	},
306	{
307		.compatible = "ti,edma3",
308		.data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
309	},
310	{
311		.compatible = "ti,edma3-tpcc",
312		.data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
313	},
314	{},
315};
316
317static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
318{
319	for (; len > 0; len--)
320		set_bit(offset + (len - 1), p);
321}
322
323static int ti_dra7_xbar_probe(struct platform_device *pdev)
324{
325	struct device_node *node = pdev->dev.of_node;
326	const struct of_device_id *match;
327	struct device_node *dma_node;
328	struct ti_dra7_xbar_data *xbar;
329	struct property *prop;
330	u32 safe_val;
331	int sz;
332	void __iomem *iomem;
333	int i, ret;
334
335	if (!node)
336		return -ENODEV;
337
338	xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
339	if (!xbar)
340		return -ENOMEM;
341
342	dma_node = of_parse_phandle(node, "dma-masters", 0);
343	if (!dma_node) {
344		dev_err(&pdev->dev, "Can't get DMA master node\n");
345		return -ENODEV;
346	}
347
348	match = of_match_node(ti_dra7_master_match, dma_node);
349	if (!match) {
350		dev_err(&pdev->dev, "DMA master is not supported\n");
351		of_node_put(dma_node);
352		return -EINVAL;
353	}
354
355	if (of_property_read_u32(dma_node, "dma-requests",
356				 &xbar->dma_requests)) {
357		dev_info(&pdev->dev,
358			 "Missing XBAR output information, using %u.\n",
359			 TI_DRA7_XBAR_OUTPUTS);
360		xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
361	}
362	of_node_put(dma_node);
363
364	xbar->dma_inuse = devm_kcalloc(&pdev->dev,
365				       BITS_TO_LONGS(xbar->dma_requests),
366				       sizeof(unsigned long), GFP_KERNEL);
367	if (!xbar->dma_inuse)
368		return -ENOMEM;
369
370	if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
371		dev_info(&pdev->dev,
372			 "Missing XBAR input information, using %u.\n",
373			 TI_DRA7_XBAR_INPUTS);
374		xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
375	}
376
377	if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
378		xbar->safe_val = (u16)safe_val;
379
380
381	prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
382	if (prop) {
383		const char pname[] = "ti,reserved-dma-request-ranges";
384		u32 (*rsv_events)[2];
385		size_t nelm = sz / sizeof(*rsv_events);
386		int i;
387
388		if (!nelm)
389			return -EINVAL;
390
391		rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
392		if (!rsv_events)
393			return -ENOMEM;
394
395		ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
396						 nelm * 2);
397		if (ret) {
398			kfree(rsv_events);
399			return ret;
400		}
401
402		for (i = 0; i < nelm; i++) {
403			ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
404					     xbar->dma_inuse);
405		}
406		kfree(rsv_events);
407	}
408
409	iomem = devm_platform_ioremap_resource(pdev, 0);
410	if (IS_ERR(iomem))
411		return PTR_ERR(iomem);
412
413	xbar->iomem = iomem;
414
415	xbar->dmarouter.dev = &pdev->dev;
416	xbar->dmarouter.route_free = ti_dra7_xbar_free;
417	xbar->dma_offset = *(u32 *)match->data;
418
419	mutex_init(&xbar->mutex);
420	platform_set_drvdata(pdev, xbar);
421
422	/* Reset the crossbar */
423	for (i = 0; i < xbar->dma_requests; i++) {
424		if (!test_bit(i, xbar->dma_inuse))
425			ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
426	}
427
428	ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
429				     &xbar->dmarouter);
430	if (ret) {
431		/* Restore the defaults for the crossbar */
432		for (i = 0; i < xbar->dma_requests; i++) {
433			if (!test_bit(i, xbar->dma_inuse))
434				ti_dra7_xbar_write(xbar->iomem, i, i);
435		}
436	}
437
438	return ret;
439}
440
441static int ti_dma_xbar_probe(struct platform_device *pdev)
442{
443	const struct of_device_id *match;
444	int ret;
445
446	match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
447	if (unlikely(!match))
448		return -EINVAL;
449
450	switch (*(u32 *)match->data) {
451	case TI_XBAR_DRA7:
452		ret = ti_dra7_xbar_probe(pdev);
453		break;
454	case TI_XBAR_AM335X:
455		ret = ti_am335x_xbar_probe(pdev);
456		break;
457	default:
458		dev_err(&pdev->dev, "Unsupported crossbar\n");
459		ret = -ENODEV;
460		break;
461	}
462
463	return ret;
464}
465
466static struct platform_driver ti_dma_xbar_driver = {
467	.driver = {
468		.name = "ti-dma-crossbar",
469		.of_match_table = ti_dma_xbar_match,
470	},
471	.probe	= ti_dma_xbar_probe,
472};
473
474static int omap_dmaxbar_init(void)
475{
476	return platform_driver_register(&ti_dma_xbar_driver);
477}
478arch_initcall(omap_dmaxbar_init);
479