1// SPDX-License-Identifier: GPL-2.0+
2/*
3 *  Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com
4 *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
9#include <cpu_func.h>
10#include <log.h>
11#include <asm/cache.h>
12#include <asm/io.h>
13#include <asm/bitops.h>
14#include <malloc.h>
15#include <linux/bitops.h>
16#include <linux/dma-mapping.h>
17#include <linux/sizes.h>
18#include <dm.h>
19#include <dm/device_compat.h>
20#include <dm/devres.h>
21#include <dm/read.h>
22#include <dm/of_access.h>
23#include <dma.h>
24#include <dma-uclass.h>
25#include <linux/delay.h>
26#include <linux/bitmap.h>
27#include <linux/err.h>
28#include <linux/printk.h>
29#include <linux/soc/ti/k3-navss-ringacc.h>
30#include <linux/soc/ti/cppi5.h>
31#include <linux/soc/ti/ti-udma.h>
32#include <linux/soc/ti/ti_sci_protocol.h>
33#include <linux/soc/ti/cppi5.h>
34
35#include "k3-udma-hwdef.h"
36#include "k3-psil-priv.h"
37
38#define K3_UDMA_MAX_RFLOWS 1024
39
40struct udma_chan;
41
42enum k3_dma_type {
43	DMA_TYPE_UDMA = 0,
44	DMA_TYPE_BCDMA,
45	DMA_TYPE_PKTDMA,
46};
47
48enum udma_mmr {
49	MMR_GCFG = 0,
50	MMR_BCHANRT,
51	MMR_RCHANRT,
52	MMR_TCHANRT,
53	MMR_RCHAN,
54	MMR_TCHAN,
55	MMR_RFLOW,
56	MMR_LAST,
57};
58
59static const char * const mmr_names[] = {
60	[MMR_GCFG] = "gcfg",
61	[MMR_BCHANRT] = "bchanrt",
62	[MMR_RCHANRT] = "rchanrt",
63	[MMR_TCHANRT] = "tchanrt",
64	[MMR_RCHAN] = "rchan",
65	[MMR_TCHAN] = "tchan",
66	[MMR_RFLOW] = "rflow",
67};
68
69struct udma_tchan {
70	void __iomem *reg_chan;
71	void __iomem *reg_rt;
72
73	int id;
74	struct k3_nav_ring *t_ring; /* Transmit ring */
75	struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
76	int tflow_id; /* applicable only for PKTDMA */
77
78};
79
80#define udma_bchan udma_tchan
81
82struct udma_rflow {
83	void __iomem *reg_rflow;
84	int id;
85	struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
86	struct k3_nav_ring *r_ring; /* Receive ring */
87};
88
89struct udma_rchan {
90	void __iomem *reg_chan;
91	void __iomem *reg_rt;
92
93	int id;
94};
95
96struct udma_oes_offsets {
97	/* K3 UDMA Output Event Offset */
98	u32 udma_rchan;
99
100	/* BCDMA Output Event Offsets */
101	u32 bcdma_bchan_data;
102	u32 bcdma_bchan_ring;
103	u32 bcdma_tchan_data;
104	u32 bcdma_tchan_ring;
105	u32 bcdma_rchan_data;
106	u32 bcdma_rchan_ring;
107
108	/* PKTDMA Output Event Offsets */
109	u32 pktdma_tchan_flow;
110	u32 pktdma_rchan_flow;
111};
112
113#define UDMA_FLAG_PDMA_ACC32		BIT(0)
114#define UDMA_FLAG_PDMA_BURST		BIT(1)
115#define UDMA_FLAG_TDTYPE		BIT(2)
116
117struct udma_match_data {
118	enum k3_dma_type type;
119	u32 psil_base;
120	bool enable_memcpy_support;
121	u32 flags;
122	u32 statictr_z_mask;
123	struct udma_oes_offsets oes;
124
125	u8 tpl_levels;
126	u32 level_start_idx[];
127};
128
129enum udma_rm_range {
130	RM_RANGE_BCHAN = 0,
131	RM_RANGE_TCHAN,
132	RM_RANGE_RCHAN,
133	RM_RANGE_RFLOW,
134	RM_RANGE_TFLOW,
135	RM_RANGE_LAST,
136};
137
138struct udma_tisci_rm {
139	const struct ti_sci_handle *tisci;
140	const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
141	u32  tisci_dev_id;
142
143	/* tisci information for PSI-L thread pairing/unpairing */
144	const struct ti_sci_rm_psil_ops *tisci_psil_ops;
145	u32  tisci_navss_dev_id;
146
147	struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
148};
149
150struct udma_dev {
151	struct udevice *dev;
152	void __iomem *mmrs[MMR_LAST];
153
154	struct udma_tisci_rm tisci_rm;
155	struct k3_nav_ringacc *ringacc;
156
157	u32 features;
158
159	int bchan_cnt;
160	int tchan_cnt;
161	int echan_cnt;
162	int rchan_cnt;
163	int rflow_cnt;
164	int tflow_cnt;
165	unsigned long *bchan_map;
166	unsigned long *tchan_map;
167	unsigned long *rchan_map;
168	unsigned long *rflow_map;
169	unsigned long *rflow_map_reserved;
170	unsigned long *tflow_map;
171
172	struct udma_bchan *bchans;
173	struct udma_tchan *tchans;
174	struct udma_rchan *rchans;
175	struct udma_rflow *rflows;
176
177	struct udma_match_data *match_data;
178
179	struct udma_chan *channels;
180	u32 psil_base;
181
182	u32 ch_count;
183};
184
185struct udma_chan_config {
186	u32 psd_size; /* size of Protocol Specific Data */
187	u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
188	u32 hdesc_size; /* Size of a packet descriptor in packet mode */
189	int remote_thread_id;
190	u32 atype;
191	u32 src_thread;
192	u32 dst_thread;
193	enum psil_endpoint_type ep_type;
194	enum udma_tp_level channel_tpl; /* Channel Throughput Level */
195
196	/* PKTDMA mapped channel */
197	int mapped_channel_id;
198	/* PKTDMA default tflow or rflow for mapped channel */
199	int default_flow_id;
200
201	enum dma_direction dir;
202
203	unsigned int pkt_mode:1; /* TR or packet */
204	unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
205	unsigned int enable_acc32:1;
206	unsigned int enable_burst:1;
207	unsigned int notdpkt:1; /* Suppress sending TDC packet */
208};
209
210struct udma_chan {
211	struct udma_dev *ud;
212	char name[20];
213
214	struct udma_bchan *bchan;
215	struct udma_tchan *tchan;
216	struct udma_rchan *rchan;
217	struct udma_rflow *rflow;
218
219	struct ti_udma_drv_chan_cfg_data cfg_data;
220
221	u32 bcnt; /* number of bytes completed since the start of the channel */
222
223	struct udma_chan_config config;
224
225	u32 id;
226
227	struct cppi5_host_desc_t *desc_tx;
228	bool in_use;
229	void	*desc_rx;
230	u32	num_rx_bufs;
231	u32	desc_rx_cur;
232
233};
234
235#define UDMA_CH_1000(ch)		(ch * 0x1000)
236#define UDMA_CH_100(ch)			(ch * 0x100)
237#define UDMA_CH_40(ch)			(ch * 0x40)
238
239#ifdef PKTBUFSRX
240#define UDMA_RX_DESC_NUM PKTBUFSRX
241#else
242#define UDMA_RX_DESC_NUM 4
243#endif
244
245/* Generic register access functions */
246static inline u32 udma_read(void __iomem *base, int reg)
247{
248	u32 v;
249
250	v = __raw_readl(base + reg);
251	pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
252	return v;
253}
254
255static inline void udma_write(void __iomem *base, int reg, u32 val)
256{
257	pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
258	__raw_writel(val, base + reg);
259}
260
261static inline void udma_update_bits(void __iomem *base, int reg,
262				    u32 mask, u32 val)
263{
264	u32 tmp, orig;
265
266	orig = udma_read(base, reg);
267	tmp = orig & ~mask;
268	tmp |= (val & mask);
269
270	if (tmp != orig)
271		udma_write(base, reg, tmp);
272}
273
274/* TCHANRT */
275static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
276{
277	if (!tchan)
278		return 0;
279	return udma_read(tchan->reg_rt, reg);
280}
281
282static inline void udma_tchanrt_write(struct udma_tchan *tchan,
283				      int reg, u32 val)
284{
285	if (!tchan)
286		return;
287	udma_write(tchan->reg_rt, reg, val);
288}
289
290/* RCHANRT */
291static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
292{
293	if (!rchan)
294		return 0;
295	return udma_read(rchan->reg_rt, reg);
296}
297
298static inline void udma_rchanrt_write(struct udma_rchan *rchan,
299				      int reg, u32 val)
300{
301	if (!rchan)
302		return;
303	udma_write(rchan->reg_rt, reg, val);
304}
305
306static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
307				       u32 dst_thread)
308{
309	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
310
311	dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
312
313	return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
314					      tisci_rm->tisci_navss_dev_id,
315					      src_thread, dst_thread);
316}
317
318static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
319					 u32 dst_thread)
320{
321	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
322
323	dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
324
325	return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
326						tisci_rm->tisci_navss_dev_id,
327						src_thread, dst_thread);
328}
329
330static inline char *udma_get_dir_text(enum dma_direction dir)
331{
332	switch (dir) {
333	case DMA_DEV_TO_MEM:
334		return "DEV_TO_MEM";
335	case DMA_MEM_TO_DEV:
336		return "MEM_TO_DEV";
337	case DMA_MEM_TO_MEM:
338		return "MEM_TO_MEM";
339	case DMA_DEV_TO_DEV:
340		return "DEV_TO_DEV";
341	default:
342		break;
343	}
344
345	return "invalid";
346}
347
348#include "k3-udma-u-boot.c"
349
350static void udma_reset_uchan(struct udma_chan *uc)
351{
352	memset(&uc->config, 0, sizeof(uc->config));
353	uc->config.remote_thread_id = -1;
354	uc->config.mapped_channel_id = -1;
355	uc->config.default_flow_id = -1;
356}
357
358static inline bool udma_is_chan_running(struct udma_chan *uc)
359{
360	u32 trt_ctl = 0;
361	u32 rrt_ctl = 0;
362
363	switch (uc->config.dir) {
364	case DMA_DEV_TO_MEM:
365		rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
366		pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
367			 __func__, rrt_ctl,
368			 udma_rchanrt_read(uc->rchan,
369					   UDMA_RCHAN_RT_PEER_RT_EN_REG));
370		break;
371	case DMA_MEM_TO_DEV:
372		trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
373		pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
374			 __func__, trt_ctl,
375			 udma_tchanrt_read(uc->tchan,
376					   UDMA_TCHAN_RT_PEER_RT_EN_REG));
377		break;
378	case DMA_MEM_TO_MEM:
379		trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
380		rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
381		break;
382	default:
383		break;
384	}
385
386	if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
387		return true;
388
389	return false;
390}
391
392static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
393{
394	struct k3_nav_ring *ring = NULL;
395	int ret = -ENOENT;
396
397	switch (uc->config.dir) {
398	case DMA_DEV_TO_MEM:
399		ring = uc->rflow->r_ring;
400		break;
401	case DMA_MEM_TO_DEV:
402		ring = uc->tchan->tc_ring;
403		break;
404	case DMA_MEM_TO_MEM:
405		ring = uc->tchan->tc_ring;
406		break;
407	default:
408		break;
409	}
410
411	if (ring && k3_nav_ringacc_ring_get_occ(ring))
412		ret = k3_nav_ringacc_ring_pop(ring, addr);
413
414	return ret;
415}
416
417static void udma_reset_rings(struct udma_chan *uc)
418{
419	struct k3_nav_ring *ring1 = NULL;
420	struct k3_nav_ring *ring2 = NULL;
421
422	switch (uc->config.dir) {
423	case DMA_DEV_TO_MEM:
424		ring1 = uc->rflow->fd_ring;
425		ring2 = uc->rflow->r_ring;
426		break;
427	case DMA_MEM_TO_DEV:
428		ring1 = uc->tchan->t_ring;
429		ring2 = uc->tchan->tc_ring;
430		break;
431	case DMA_MEM_TO_MEM:
432		ring1 = uc->tchan->t_ring;
433		ring2 = uc->tchan->tc_ring;
434		break;
435	default:
436		break;
437	}
438
439	if (ring1)
440		k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
441	if (ring2)
442		k3_nav_ringacc_ring_reset(ring2);
443}
444
445static void udma_reset_counters(struct udma_chan *uc)
446{
447	u32 val;
448
449	if (uc->tchan) {
450		val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
451		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
452
453		val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
454		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
455
456		val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
457		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
458
459		if (!uc->bchan) {
460			val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
461			udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
462		}
463	}
464
465	if (uc->rchan) {
466		val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
467		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
468
469		val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
470		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
471
472		val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
473		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
474
475		val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
476		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
477	}
478
479	uc->bcnt = 0;
480}
481
482static inline int udma_stop_hard(struct udma_chan *uc)
483{
484	pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
485
486	switch (uc->config.dir) {
487	case DMA_DEV_TO_MEM:
488		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
489		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
490		break;
491	case DMA_MEM_TO_DEV:
492		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
493		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
494		break;
495	case DMA_MEM_TO_MEM:
496		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
497		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
498		break;
499	default:
500		return -EINVAL;
501	}
502
503	return 0;
504}
505
506static int udma_start(struct udma_chan *uc)
507{
508	/* Channel is already running, no need to proceed further */
509	if (udma_is_chan_running(uc))
510		goto out;
511
512	pr_debug("%s: chan:%d dir:%s\n",
513		 __func__, uc->id, udma_get_dir_text(uc->config.dir));
514
515	/* Make sure that we clear the teardown bit, if it is set */
516	udma_stop_hard(uc);
517
518	/* Reset all counters */
519	udma_reset_counters(uc);
520
521	switch (uc->config.dir) {
522	case DMA_DEV_TO_MEM:
523		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
524				   UDMA_CHAN_RT_CTL_EN);
525
526		/* Enable remote */
527		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
528				   UDMA_PEER_RT_EN_ENABLE);
529
530		pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
531			 __func__,
532			 udma_rchanrt_read(uc->rchan,
533					   UDMA_RCHAN_RT_CTL_REG),
534			 udma_rchanrt_read(uc->rchan,
535					   UDMA_RCHAN_RT_PEER_RT_EN_REG));
536		break;
537	case DMA_MEM_TO_DEV:
538		/* Enable remote */
539		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
540				   UDMA_PEER_RT_EN_ENABLE);
541
542		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
543				   UDMA_CHAN_RT_CTL_EN);
544
545		pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
546			 __func__,
547			 udma_tchanrt_read(uc->tchan,
548					   UDMA_TCHAN_RT_CTL_REG),
549			 udma_tchanrt_read(uc->tchan,
550					   UDMA_TCHAN_RT_PEER_RT_EN_REG));
551		break;
552	case DMA_MEM_TO_MEM:
553		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
554				   UDMA_CHAN_RT_CTL_EN);
555		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
556				   UDMA_CHAN_RT_CTL_EN);
557
558		break;
559	default:
560		return -EINVAL;
561	}
562
563	pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
564out:
565	return 0;
566}
567
568static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
569{
570	int i = 0;
571	u32 val;
572
573	udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
574			   UDMA_CHAN_RT_CTL_EN |
575			   UDMA_CHAN_RT_CTL_TDOWN);
576
577	val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
578
579	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
580		val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
581		udelay(1);
582		if (i > 1000) {
583			printf(" %s TIMEOUT !\n", __func__);
584			break;
585		}
586		i++;
587	}
588
589	val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
590	if (val & UDMA_PEER_RT_EN_ENABLE)
591		printf("%s: peer not stopped TIMEOUT !\n", __func__);
592}
593
594static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
595{
596	int i = 0;
597	u32 val;
598
599	udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
600			   UDMA_PEER_RT_EN_ENABLE |
601			   UDMA_PEER_RT_EN_TEARDOWN);
602
603	val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
604
605	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
606		val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
607		udelay(1);
608		if (i > 1000) {
609			printf("%s TIMEOUT !\n", __func__);
610			break;
611		}
612		i++;
613	}
614
615	val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
616	if (val & UDMA_PEER_RT_EN_ENABLE)
617		printf("%s: peer not stopped TIMEOUT !\n", __func__);
618}
619
620static inline int udma_stop(struct udma_chan *uc)
621{
622	pr_debug("%s: chan:%d dir:%s\n",
623		 __func__, uc->id, udma_get_dir_text(uc->config.dir));
624
625	udma_reset_counters(uc);
626	switch (uc->config.dir) {
627	case DMA_DEV_TO_MEM:
628		udma_stop_dev2mem(uc, true);
629		break;
630	case DMA_MEM_TO_DEV:
631		udma_stop_mem2dev(uc, true);
632		break;
633	case DMA_MEM_TO_MEM:
634		udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
635		udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
636		break;
637	default:
638		return -EINVAL;
639	}
640
641	return 0;
642}
643
644static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
645{
646	int i = 1;
647
648	while (udma_pop_from_ring(uc, paddr)) {
649		udelay(1);
650		if (!(i % 1000000))
651			printf(".");
652		i++;
653	}
654}
655
656static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
657{
658	DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
659
660	if (id >= 0) {
661		if (test_bit(id, ud->rflow_map)) {
662			dev_err(ud->dev, "rflow%d is in use\n", id);
663			return ERR_PTR(-ENOENT);
664		}
665	} else {
666		bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
667			  ud->rflow_cnt);
668
669		id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
670		if (id >= ud->rflow_cnt)
671			return ERR_PTR(-ENOENT);
672	}
673
674	__set_bit(id, ud->rflow_map);
675	return &ud->rflows[id];
676}
677
678#define UDMA_RESERVE_RESOURCE(res)					\
679static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,	\
680					       int id)			\
681{									\
682	if (id >= 0) {							\
683		if (test_bit(id, ud->res##_map)) {			\
684			dev_err(ud->dev, "res##%d is in use\n", id);	\
685			return ERR_PTR(-ENOENT);			\
686		}							\
687	} else {							\
688		id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
689		if (id == ud->res##_cnt) {				\
690			return ERR_PTR(-ENOENT);			\
691		}							\
692	}								\
693									\
694	__set_bit(id, ud->res##_map);					\
695	return &ud->res##s[id];						\
696}
697
698UDMA_RESERVE_RESOURCE(tchan);
699UDMA_RESERVE_RESOURCE(rchan);
700
701static int udma_get_tchan(struct udma_chan *uc)
702{
703	struct udma_dev *ud = uc->ud;
704
705	if (uc->tchan) {
706		dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
707			uc->id, uc->tchan->id);
708		return 0;
709	}
710
711	uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
712	if (IS_ERR(uc->tchan))
713		return PTR_ERR(uc->tchan);
714
715	if (ud->tflow_cnt) {
716		int tflow_id;
717
718		/* Only PKTDMA have support for tx flows */
719		if (uc->config.default_flow_id >= 0)
720			tflow_id = uc->config.default_flow_id;
721		else
722			tflow_id = uc->tchan->id;
723
724		if (test_bit(tflow_id, ud->tflow_map)) {
725			dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
726			__clear_bit(uc->tchan->id, ud->tchan_map);
727			uc->tchan = NULL;
728			return -ENOENT;
729		}
730
731		uc->tchan->tflow_id = tflow_id;
732		__set_bit(tflow_id, ud->tflow_map);
733	} else {
734		uc->tchan->tflow_id = -1;
735	}
736
737	pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
738
739	return 0;
740}
741
742static int udma_get_rchan(struct udma_chan *uc)
743{
744	struct udma_dev *ud = uc->ud;
745
746	if (uc->rchan) {
747		dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
748			uc->id, uc->rchan->id);
749		return 0;
750	}
751
752	uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
753	if (IS_ERR(uc->rchan))
754		return PTR_ERR(uc->rchan);
755
756	pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
757
758	return 0;
759}
760
761static int udma_get_chan_pair(struct udma_chan *uc)
762{
763	struct udma_dev *ud = uc->ud;
764	int chan_id, end;
765
766	if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
767		dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
768			 uc->id, uc->tchan->id);
769		return 0;
770	}
771
772	if (uc->tchan) {
773		dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
774			uc->id, uc->tchan->id);
775		return -EBUSY;
776	} else if (uc->rchan) {
777		dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
778			uc->id, uc->rchan->id);
779		return -EBUSY;
780	}
781
782	/* Can be optimized, but let's have it like this for now */
783	end = min(ud->tchan_cnt, ud->rchan_cnt);
784	for (chan_id = 0; chan_id < end; chan_id++) {
785		if (!test_bit(chan_id, ud->tchan_map) &&
786		    !test_bit(chan_id, ud->rchan_map))
787			break;
788	}
789
790	if (chan_id == end)
791		return -ENOENT;
792
793	__set_bit(chan_id, ud->tchan_map);
794	__set_bit(chan_id, ud->rchan_map);
795	uc->tchan = &ud->tchans[chan_id];
796	uc->rchan = &ud->rchans[chan_id];
797
798	pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
799
800	return 0;
801}
802
803static int udma_get_rflow(struct udma_chan *uc, int flow_id)
804{
805	struct udma_dev *ud = uc->ud;
806
807	if (uc->rflow) {
808		dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
809			uc->id, uc->rflow->id);
810		return 0;
811	}
812
813	if (!uc->rchan)
814		dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
815
816	uc->rflow = __udma_reserve_rflow(ud, flow_id);
817	if (IS_ERR(uc->rflow))
818		return PTR_ERR(uc->rflow);
819
820	pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
821	return 0;
822}
823
824static void udma_put_rchan(struct udma_chan *uc)
825{
826	struct udma_dev *ud = uc->ud;
827
828	if (uc->rchan) {
829		dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
830			uc->rchan->id);
831		__clear_bit(uc->rchan->id, ud->rchan_map);
832		uc->rchan = NULL;
833	}
834}
835
836static void udma_put_tchan(struct udma_chan *uc)
837{
838	struct udma_dev *ud = uc->ud;
839
840	if (uc->tchan) {
841		dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
842			uc->tchan->id);
843		__clear_bit(uc->tchan->id, ud->tchan_map);
844		if (uc->tchan->tflow_id >= 0)
845			__clear_bit(uc->tchan->tflow_id, ud->tflow_map);
846		uc->tchan = NULL;
847	}
848}
849
850static void udma_put_rflow(struct udma_chan *uc)
851{
852	struct udma_dev *ud = uc->ud;
853
854	if (uc->rflow) {
855		dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
856			uc->rflow->id);
857		__clear_bit(uc->rflow->id, ud->rflow_map);
858		uc->rflow = NULL;
859	}
860}
861
862static void udma_free_tx_resources(struct udma_chan *uc)
863{
864	if (!uc->tchan)
865		return;
866
867	k3_nav_ringacc_ring_free(uc->tchan->t_ring);
868	k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
869	uc->tchan->t_ring = NULL;
870	uc->tchan->tc_ring = NULL;
871
872	udma_put_tchan(uc);
873}
874
875static int udma_alloc_tx_resources(struct udma_chan *uc)
876{
877	struct k3_nav_ring_cfg ring_cfg;
878	struct udma_dev *ud = uc->ud;
879	struct udma_tchan *tchan;
880	int ring_idx, ret;
881
882	ret = udma_get_tchan(uc);
883	if (ret)
884		return ret;
885
886	tchan = uc->tchan;
887	if (tchan->tflow_id > 0)
888		ring_idx = tchan->tflow_id;
889	else
890		ring_idx = tchan->id;
891
892	ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
893						&uc->tchan->t_ring,
894						&uc->tchan->tc_ring);
895	if (ret) {
896		ret = -EBUSY;
897		goto err_tx_ring;
898	}
899
900	memset(&ring_cfg, 0, sizeof(ring_cfg));
901	ring_cfg.size = 16;
902	ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
903	ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
904
905	ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
906	ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
907
908	if (ret)
909		goto err_ringcfg;
910
911	return 0;
912
913err_ringcfg:
914	k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
915	uc->tchan->tc_ring = NULL;
916	k3_nav_ringacc_ring_free(uc->tchan->t_ring);
917	uc->tchan->t_ring = NULL;
918err_tx_ring:
919	udma_put_tchan(uc);
920
921	return ret;
922}
923
924static void udma_free_rx_resources(struct udma_chan *uc)
925{
926	if (!uc->rchan)
927		return;
928
929        if (uc->rflow) {
930		k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
931		k3_nav_ringacc_ring_free(uc->rflow->r_ring);
932		uc->rflow->fd_ring = NULL;
933		uc->rflow->r_ring = NULL;
934
935		udma_put_rflow(uc);
936	}
937
938	udma_put_rchan(uc);
939}
940
941static int udma_alloc_rx_resources(struct udma_chan *uc)
942{
943	struct k3_nav_ring_cfg ring_cfg;
944	struct udma_dev *ud = uc->ud;
945	struct udma_rflow *rflow;
946	int fd_ring_id;
947	int ret;
948
949	ret = udma_get_rchan(uc);
950	if (ret)
951		return ret;
952
953	/* For MEM_TO_MEM we don't need rflow or rings */
954	if (uc->config.dir == DMA_MEM_TO_MEM)
955		return 0;
956
957	if (uc->config.default_flow_id >= 0)
958		ret = udma_get_rflow(uc, uc->config.default_flow_id);
959	else
960		ret = udma_get_rflow(uc, uc->rchan->id);
961
962	if (ret) {
963		ret = -EBUSY;
964		goto err_rflow;
965	}
966
967	rflow = uc->rflow;
968	if (ud->tflow_cnt) {
969		fd_ring_id = ud->tflow_cnt + rflow->id;
970	} else {
971		fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
972			uc->rchan->id;
973	}
974
975	ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
976						&rflow->fd_ring, &rflow->r_ring);
977	if (ret) {
978		ret = -EBUSY;
979		goto err_rx_ring;
980	}
981
982	memset(&ring_cfg, 0, sizeof(ring_cfg));
983	ring_cfg.size = 16;
984	ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
985	ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
986
987	ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
988	ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
989	if (ret)
990		goto err_ringcfg;
991
992	return 0;
993
994err_ringcfg:
995	k3_nav_ringacc_ring_free(rflow->r_ring);
996	rflow->r_ring = NULL;
997	k3_nav_ringacc_ring_free(rflow->fd_ring);
998	rflow->fd_ring = NULL;
999err_rx_ring:
1000	udma_put_rflow(uc);
1001err_rflow:
1002	udma_put_rchan(uc);
1003
1004	return ret;
1005}
1006
1007static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
1008{
1009	struct udma_dev *ud = uc->ud;
1010	int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1011	struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
1012	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1013	u32 mode;
1014	int ret;
1015
1016	if (uc->config.pkt_mode)
1017		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1018	else
1019		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1020
1021	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1022			TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1023			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
1024	req.nav_id = tisci_rm->tisci_dev_id;
1025	req.index = uc->tchan->id;
1026	req.tx_chan_type = mode;
1027	if (uc->config.dir == DMA_MEM_TO_MEM)
1028		req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1029	else
1030		req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1031							  uc->config.psd_size,
1032							  0) >> 2;
1033	req.txcq_qnum = tc_ring;
1034
1035	ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
1036	if (ret) {
1037		dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
1038		return ret;
1039	}
1040
1041	/*
1042	 * Above TI SCI call handles firewall configuration, cfg
1043	 * register configuration still has to be done locally in
1044	 * absence of RM services.
1045	 */
1046	if (IS_ENABLED(CONFIG_K3_DM_FW))
1047		udma_alloc_tchan_raw(uc);
1048
1049	return 0;
1050}
1051
1052static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1053{
1054	struct udma_dev *ud = uc->ud;
1055	int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1056	int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
1057	int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1058	struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1059	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1060	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1061	u32 mode;
1062	int ret;
1063
1064	if (uc->config.pkt_mode)
1065		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1066	else
1067		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1068
1069	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1070			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
1071			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
1072	req.nav_id = tisci_rm->tisci_dev_id;
1073	req.index = uc->rchan->id;
1074	req.rx_chan_type = mode;
1075	if (uc->config.dir == DMA_MEM_TO_MEM) {
1076		req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1077		req.rxcq_qnum = tc_ring;
1078	} else {
1079		req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1080							  uc->config.psd_size,
1081							  0) >> 2;
1082		req.rxcq_qnum = rx_ring;
1083	}
1084	if (ud->match_data->type == DMA_TYPE_UDMA &&
1085	    uc->rflow->id != uc->rchan->id &&
1086	    uc->config.dir != DMA_MEM_TO_MEM) {
1087		req.flowid_start = uc->rflow->id;
1088		req.flowid_cnt = 1;
1089		req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1090				    TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
1091	}
1092
1093	ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
1094	if (ret) {
1095		dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1096			uc->rchan->id, ret);
1097		return ret;
1098	}
1099	if (uc->config.dir == DMA_MEM_TO_MEM)
1100		return ret;
1101
1102	flow_req.valid_params =
1103			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1104			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1105			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1106			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1107			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1108			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1109			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1110			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1111			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1112			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1113			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1114			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1115			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1116			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1117
1118	flow_req.nav_id = tisci_rm->tisci_dev_id;
1119	flow_req.flow_index = uc->rflow->id;
1120
1121	if (uc->config.needs_epib)
1122		flow_req.rx_einfo_present = 1;
1123	else
1124		flow_req.rx_einfo_present = 0;
1125
1126	if (uc->config.psd_size)
1127		flow_req.rx_psinfo_present = 1;
1128	else
1129		flow_req.rx_psinfo_present = 0;
1130
1131	flow_req.rx_error_handling = 0;
1132	flow_req.rx_desc_type = 0;
1133	flow_req.rx_dest_qnum = rx_ring;
1134	flow_req.rx_src_tag_hi_sel = 2;
1135	flow_req.rx_src_tag_lo_sel = 4;
1136	flow_req.rx_dest_tag_hi_sel = 5;
1137	flow_req.rx_dest_tag_lo_sel = 4;
1138	flow_req.rx_fdq0_sz0_qnum = fd_ring;
1139	flow_req.rx_fdq1_qnum = fd_ring;
1140	flow_req.rx_fdq2_qnum = fd_ring;
1141	flow_req.rx_fdq3_qnum = fd_ring;
1142	flow_req.rx_ps_location = 0;
1143
1144	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1145						     &flow_req);
1146	if (ret) {
1147		dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1148			uc->rchan->id, uc->rflow->id, ret);
1149		return ret;
1150	}
1151
1152	/*
1153	 * Above TI SCI call handles firewall configuration, cfg
1154	 * register configuration still has to be done locally in
1155	 * absence of RM services.
1156	 */
1157	if (IS_ENABLED(CONFIG_K3_DM_FW))
1158		udma_alloc_rchan_raw(uc);
1159
1160	return 0;
1161}
1162
1163static int udma_alloc_chan_resources(struct udma_chan *uc)
1164{
1165	struct udma_dev *ud = uc->ud;
1166	int ret;
1167
1168	pr_debug("%s: chan:%d as %s\n",
1169		 __func__, uc->id, udma_get_dir_text(uc->config.dir));
1170
1171	switch (uc->config.dir) {
1172	case DMA_MEM_TO_MEM:
1173		/* Non synchronized - mem to mem type of transfer */
1174		uc->config.pkt_mode = false;
1175		ret = udma_get_chan_pair(uc);
1176		if (ret)
1177			return ret;
1178
1179		ret = udma_alloc_tx_resources(uc);
1180		if (ret)
1181			goto err_free_res;
1182
1183		ret = udma_alloc_rx_resources(uc);
1184		if (ret)
1185			goto err_free_res;
1186
1187		uc->config.src_thread = ud->psil_base + uc->tchan->id;
1188		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1189		break;
1190	case DMA_MEM_TO_DEV:
1191		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
1192		ret = udma_alloc_tx_resources(uc);
1193		if (ret)
1194			goto err_free_res;
1195
1196		uc->config.src_thread = ud->psil_base + uc->tchan->id;
1197		uc->config.dst_thread = uc->config.remote_thread_id;
1198		uc->config.dst_thread |= 0x8000;
1199
1200		break;
1201	case DMA_DEV_TO_MEM:
1202		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
1203		ret = udma_alloc_rx_resources(uc);
1204		if (ret)
1205			goto err_free_res;
1206
1207		uc->config.src_thread = uc->config.remote_thread_id;
1208		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1209
1210		break;
1211	default:
1212		/* Can not happen */
1213		pr_debug("%s: chan:%d invalid direction (%u)\n",
1214			 __func__, uc->id, uc->config.dir);
1215		return -EINVAL;
1216	}
1217
1218	/* We have channel indexes and rings */
1219	if (uc->config.dir == DMA_MEM_TO_MEM) {
1220		ret = udma_alloc_tchan_sci_req(uc);
1221		if (ret)
1222			goto err_free_res;
1223
1224		ret = udma_alloc_rchan_sci_req(uc);
1225		if (ret)
1226			goto err_free_res;
1227	} else {
1228		/* Slave transfer */
1229		if (uc->config.dir == DMA_MEM_TO_DEV) {
1230			ret = udma_alloc_tchan_sci_req(uc);
1231			if (ret)
1232				goto err_free_res;
1233		} else {
1234			ret = udma_alloc_rchan_sci_req(uc);
1235			if (ret)
1236				goto err_free_res;
1237		}
1238	}
1239
1240	if (udma_is_chan_running(uc)) {
1241		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1242		udma_stop(uc);
1243		if (udma_is_chan_running(uc)) {
1244			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1245			goto err_free_res;
1246		}
1247	}
1248
1249	/* PSI-L pairing */
1250	ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1251	if (ret) {
1252		dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1253		goto err_free_res;
1254	}
1255
1256	return 0;
1257
1258err_free_res:
1259	udma_free_tx_resources(uc);
1260	udma_free_rx_resources(uc);
1261	uc->config.remote_thread_id = -1;
1262	return ret;
1263}
1264
1265static void udma_free_chan_resources(struct udma_chan *uc)
1266{
1267	/* Hard reset UDMA channel */
1268	udma_stop_hard(uc);
1269	udma_reset_counters(uc);
1270
1271	/* Release PSI-L pairing */
1272	udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
1273
1274	/* Reset the rings for a new start */
1275	udma_reset_rings(uc);
1276	udma_free_tx_resources(uc);
1277	udma_free_rx_resources(uc);
1278
1279	uc->config.remote_thread_id = -1;
1280	uc->config.dir = DMA_MEM_TO_MEM;
1281}
1282
1283static const char * const range_names[] = {
1284	[RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1285	[RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1286	[RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1287	[RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1288	[RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1289};
1290
1291static int udma_get_mmrs(struct udevice *dev)
1292{
1293	struct udma_dev *ud = dev_get_priv(dev);
1294	u32 cap2, cap3, cap4;
1295	int i;
1296
1297	ud->mmrs[MMR_GCFG] = dev_read_addr_name_ptr(dev, mmr_names[MMR_GCFG]);
1298	if (!ud->mmrs[MMR_GCFG])
1299		return -EINVAL;
1300
1301	cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1302	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1303
1304	switch (ud->match_data->type) {
1305	case DMA_TYPE_UDMA:
1306		ud->rflow_cnt = cap3 & 0x3fff;
1307		ud->tchan_cnt = cap2 & 0x1ff;
1308		ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1309		ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1310		break;
1311	case DMA_TYPE_BCDMA:
1312		ud->bchan_cnt = cap2 & 0x1ff;
1313		ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1314		ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1315		break;
1316	case DMA_TYPE_PKTDMA:
1317		cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1318		ud->tchan_cnt = cap2 & 0x1ff;
1319		ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1320		ud->rflow_cnt = cap3 & 0x3fff;
1321		ud->tflow_cnt = cap4 & 0x3fff;
1322		break;
1323	default:
1324		return -EINVAL;
1325	}
1326
1327	for (i = 1; i < MMR_LAST; i++) {
1328		if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1329			continue;
1330		if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1331			continue;
1332		if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1333			continue;
1334
1335		ud->mmrs[i] = dev_read_addr_name_ptr(dev, mmr_names[i]);
1336		if (!ud->mmrs[i])
1337			return -EINVAL;
1338	}
1339
1340	return 0;
1341}
1342
1343static int udma_setup_resources(struct udma_dev *ud)
1344{
1345	struct udevice *dev = ud->dev;
1346	int i;
1347	struct ti_sci_resource_desc *rm_desc;
1348	struct ti_sci_resource *rm_res;
1349	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1350
1351	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1352					   sizeof(unsigned long), GFP_KERNEL);
1353	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1354				  GFP_KERNEL);
1355	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1356					   sizeof(unsigned long), GFP_KERNEL);
1357	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1358				  GFP_KERNEL);
1359	ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1360					   sizeof(unsigned long), GFP_KERNEL);
1361	ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1362					      sizeof(unsigned long),
1363					      GFP_KERNEL);
1364	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1365				  GFP_KERNEL);
1366
1367	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1368	    !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1369	    !ud->rflows)
1370		return -ENOMEM;
1371
1372	/*
1373	 * RX flows with the same Ids as RX channels are reserved to be used
1374	 * as default flows if remote HW can't generate flow_ids. Those
1375	 * RX flows can be requested only explicitly by id.
1376	 */
1377	bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1378
1379	/* Get resource ranges from tisci */
1380	for (i = 0; i < RM_RANGE_LAST; i++) {
1381		if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1382			continue;
1383
1384		tisci_rm->rm_ranges[i] =
1385			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1386						    tisci_rm->tisci_dev_id,
1387						    (char *)range_names[i]);
1388	}
1389
1390	/* tchan ranges */
1391	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1392	if (IS_ERR(rm_res)) {
1393		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1394	} else {
1395		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1396		for (i = 0; i < rm_res->sets; i++) {
1397			rm_desc = &rm_res->desc[i];
1398			bitmap_clear(ud->tchan_map, rm_desc->start,
1399				     rm_desc->num);
1400		}
1401	}
1402
1403	/* rchan and matching default flow ranges */
1404	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1405	if (IS_ERR(rm_res)) {
1406		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1407		bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1408	} else {
1409		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1410		bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1411		for (i = 0; i < rm_res->sets; i++) {
1412			rm_desc = &rm_res->desc[i];
1413			bitmap_clear(ud->rchan_map, rm_desc->start,
1414				     rm_desc->num);
1415			bitmap_clear(ud->rflow_map, rm_desc->start,
1416				     rm_desc->num);
1417		}
1418	}
1419
1420	/* GP rflow ranges */
1421	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1422	if (IS_ERR(rm_res)) {
1423		bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1424			     ud->rflow_cnt - ud->rchan_cnt);
1425	} else {
1426		bitmap_set(ud->rflow_map, ud->rchan_cnt,
1427			   ud->rflow_cnt - ud->rchan_cnt);
1428		for (i = 0; i < rm_res->sets; i++) {
1429			rm_desc = &rm_res->desc[i];
1430			bitmap_clear(ud->rflow_map, rm_desc->start,
1431				     rm_desc->num);
1432		}
1433	}
1434
1435	return 0;
1436}
1437
1438static int bcdma_setup_resources(struct udma_dev *ud)
1439{
1440	int i;
1441	struct udevice *dev = ud->dev;
1442	struct ti_sci_resource_desc *rm_desc;
1443	struct ti_sci_resource *rm_res;
1444	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1445
1446	ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1447					   sizeof(unsigned long), GFP_KERNEL);
1448	ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1449				  GFP_KERNEL);
1450	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1451					   sizeof(unsigned long), GFP_KERNEL);
1452	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1453				  GFP_KERNEL);
1454	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1455					   sizeof(unsigned long), GFP_KERNEL);
1456	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1457				  GFP_KERNEL);
1458	ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1459				  GFP_KERNEL);
1460
1461	if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
1462	    !ud->bchans || !ud->tchans || !ud->rchans ||
1463	    !ud->rflows)
1464		return -ENOMEM;
1465
1466	/* Get resource ranges from tisci */
1467	for (i = 0; i < RM_RANGE_LAST; i++) {
1468		if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1469			continue;
1470
1471		tisci_rm->rm_ranges[i] =
1472			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1473						    tisci_rm->tisci_dev_id,
1474						    (char *)range_names[i]);
1475	}
1476
1477	/* bchan ranges */
1478	rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1479	if (IS_ERR(rm_res)) {
1480		bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1481	} else {
1482		bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1483		for (i = 0; i < rm_res->sets; i++) {
1484			rm_desc = &rm_res->desc[i];
1485			bitmap_clear(ud->bchan_map, rm_desc->start,
1486				     rm_desc->num);
1487			dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1488				rm_desc->start, rm_desc->num);
1489		}
1490	}
1491
1492	/* tchan ranges */
1493	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1494	if (IS_ERR(rm_res)) {
1495		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1496	} else {
1497		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1498		for (i = 0; i < rm_res->sets; i++) {
1499			rm_desc = &rm_res->desc[i];
1500			bitmap_clear(ud->tchan_map, rm_desc->start,
1501				     rm_desc->num);
1502			dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1503				rm_desc->start, rm_desc->num);
1504		}
1505	}
1506
1507	/* rchan ranges */
1508	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1509	if (IS_ERR(rm_res)) {
1510		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1511	} else {
1512		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1513		for (i = 0; i < rm_res->sets; i++) {
1514			rm_desc = &rm_res->desc[i];
1515			bitmap_clear(ud->rchan_map, rm_desc->start,
1516				     rm_desc->num);
1517			dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1518				rm_desc->start, rm_desc->num);
1519		}
1520	}
1521
1522	return 0;
1523}
1524
1525static int pktdma_setup_resources(struct udma_dev *ud)
1526{
1527	int i;
1528	struct udevice *dev = ud->dev;
1529	struct ti_sci_resource *rm_res;
1530	struct ti_sci_resource_desc *rm_desc;
1531	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1532
1533	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1534					   sizeof(unsigned long), GFP_KERNEL);
1535	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1536				  GFP_KERNEL);
1537	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1538					   sizeof(unsigned long), GFP_KERNEL);
1539	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1540				  GFP_KERNEL);
1541	ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1542				     sizeof(unsigned long),
1543				     GFP_KERNEL);
1544	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1545				  GFP_KERNEL);
1546	ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1547					   sizeof(unsigned long), GFP_KERNEL);
1548
1549	if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
1550	    !ud->rchans || !ud->rflows || !ud->rflow_map)
1551		return -ENOMEM;
1552
1553	/* Get resource ranges from tisci */
1554	for (i = 0; i < RM_RANGE_LAST; i++) {
1555		if (i == RM_RANGE_BCHAN)
1556			continue;
1557
1558		tisci_rm->rm_ranges[i] =
1559			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1560						    tisci_rm->tisci_dev_id,
1561						    (char *)range_names[i]);
1562	}
1563
1564	/* tchan ranges */
1565	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1566	if (IS_ERR(rm_res)) {
1567		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1568	} else {
1569		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1570		for (i = 0; i < rm_res->sets; i++) {
1571			rm_desc = &rm_res->desc[i];
1572			bitmap_clear(ud->tchan_map, rm_desc->start,
1573				     rm_desc->num);
1574			dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1575				rm_desc->start, rm_desc->num);
1576		}
1577	}
1578
1579	/* rchan ranges */
1580	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1581	if (IS_ERR(rm_res)) {
1582		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1583	} else {
1584		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1585		for (i = 0; i < rm_res->sets; i++) {
1586			rm_desc = &rm_res->desc[i];
1587			bitmap_clear(ud->rchan_map, rm_desc->start,
1588				     rm_desc->num);
1589			dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1590				rm_desc->start, rm_desc->num);
1591		}
1592	}
1593
1594	/* rflow ranges */
1595	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1596	if (IS_ERR(rm_res)) {
1597		/* all rflows are assigned exclusively to Linux */
1598		bitmap_zero(ud->rflow_map, ud->rflow_cnt);
1599	} else {
1600		bitmap_fill(ud->rflow_map, ud->rflow_cnt);
1601		for (i = 0; i < rm_res->sets; i++) {
1602			rm_desc = &rm_res->desc[i];
1603			bitmap_clear(ud->rflow_map, rm_desc->start,
1604				     rm_desc->num);
1605			dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1606				rm_desc->start, rm_desc->num);
1607		}
1608	}
1609
1610	/* tflow ranges */
1611	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1612	if (IS_ERR(rm_res)) {
1613		/* all tflows are assigned exclusively to Linux */
1614		bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1615	} else {
1616		bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1617		for (i = 0; i < rm_res->sets; i++) {
1618			rm_desc = &rm_res->desc[i];
1619			bitmap_clear(ud->tflow_map, rm_desc->start,
1620				     rm_desc->num);
1621			dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1622				rm_desc->start, rm_desc->num);
1623		}
1624	}
1625
1626	return 0;
1627}
1628
1629static int setup_resources(struct udma_dev *ud)
1630{
1631	struct udevice *dev = ud->dev;
1632	int ch_count, ret;
1633
1634	switch (ud->match_data->type) {
1635	case DMA_TYPE_UDMA:
1636		ret = udma_setup_resources(ud);
1637		break;
1638	case DMA_TYPE_BCDMA:
1639		ret = bcdma_setup_resources(ud);
1640		break;
1641	case DMA_TYPE_PKTDMA:
1642		ret = pktdma_setup_resources(ud);
1643		break;
1644	default:
1645		return -EINVAL;
1646	}
1647
1648	if (ret)
1649		return ret;
1650
1651	ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1652	if (ud->bchan_cnt)
1653		ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
1654	ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1655	ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1656	if (!ch_count)
1657		return -ENODEV;
1658
1659	ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1660				    GFP_KERNEL);
1661	if (!ud->channels)
1662		return -ENOMEM;
1663
1664	switch (ud->match_data->type) {
1665	case DMA_TYPE_UDMA:
1666		dev_dbg(dev,
1667			"Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1668			ch_count,
1669			ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1670						      ud->tchan_cnt),
1671			ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1672						      ud->rchan_cnt),
1673			ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1674						      ud->rflow_cnt));
1675		break;
1676	case DMA_TYPE_BCDMA:
1677		dev_dbg(dev,
1678			"Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1679			ch_count,
1680			ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1681						      ud->bchan_cnt),
1682			ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1683						      ud->tchan_cnt),
1684			ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1685						      ud->rchan_cnt));
1686		break;
1687	case DMA_TYPE_PKTDMA:
1688		dev_dbg(dev,
1689			"Channels: %d (tchan: %u, rchan: %u)\n",
1690			ch_count,
1691			ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1692						      ud->tchan_cnt),
1693			ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1694						      ud->rchan_cnt));
1695		break;
1696	default:
1697		break;
1698	}
1699
1700	return ch_count;
1701}
1702
1703static int udma_probe(struct udevice *dev)
1704{
1705	struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1706	struct udma_dev *ud = dev_get_priv(dev);
1707	int i, ret;
1708	struct udevice *tmp;
1709	struct udevice *tisci_dev = NULL;
1710	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1711	ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1712
1713
1714	ud->match_data = (void *)dev_get_driver_data(dev);
1715	ret = udma_get_mmrs(dev);
1716	if (ret)
1717		return ret;
1718
1719	ud->psil_base = ud->match_data->psil_base;
1720
1721	ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1722					   "ti,sci", &tisci_dev);
1723	if (ret) {
1724		debug("Failed to get TISCI phandle (%d)\n", ret);
1725		tisci_rm->tisci = NULL;
1726		return -EINVAL;
1727	}
1728	tisci_rm->tisci = (struct ti_sci_handle *)
1729			  (ti_sci_get_handle_from_sysfw(tisci_dev));
1730
1731	tisci_rm->tisci_dev_id = -1;
1732	ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1733	if (ret) {
1734		dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1735		return ret;
1736	}
1737
1738	tisci_rm->tisci_navss_dev_id = -1;
1739	ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1740			      &tisci_rm->tisci_navss_dev_id);
1741	if (ret) {
1742		dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1743		return ret;
1744	}
1745
1746	tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1747	tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1748
1749	if (ud->match_data->type == DMA_TYPE_UDMA) {
1750		ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1751						   "ti,ringacc", &tmp);
1752		ud->ringacc = dev_get_priv(tmp);
1753	} else {
1754		struct k3_ringacc_init_data ring_init_data;
1755
1756		ring_init_data.tisci = ud->tisci_rm.tisci;
1757		ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1758		if (ud->match_data->type == DMA_TYPE_BCDMA) {
1759			ring_init_data.num_rings = ud->bchan_cnt +
1760						   ud->tchan_cnt +
1761						   ud->rchan_cnt;
1762		} else {
1763			ring_init_data.num_rings = ud->rflow_cnt +
1764						   ud->tflow_cnt;
1765		}
1766
1767		ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1768	}
1769	if (IS_ERR(ud->ringacc))
1770		return PTR_ERR(ud->ringacc);
1771
1772	ud->dev = dev;
1773	ret = setup_resources(ud);
1774	if (ret < 0)
1775		return ret;
1776
1777	ud->ch_count = ret;
1778
1779	for (i = 0; i < ud->bchan_cnt; i++) {
1780		struct udma_bchan *bchan = &ud->bchans[i];
1781
1782		bchan->id = i;
1783		bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1784	}
1785
1786	for (i = 0; i < ud->tchan_cnt; i++) {
1787		struct udma_tchan *tchan = &ud->tchans[i];
1788
1789		tchan->id = i;
1790		tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
1791		tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1792	}
1793
1794	for (i = 0; i < ud->rchan_cnt; i++) {
1795		struct udma_rchan *rchan = &ud->rchans[i];
1796
1797		rchan->id = i;
1798		rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
1799		rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1800	}
1801
1802	for (i = 0; i < ud->rflow_cnt; i++) {
1803		struct udma_rflow *rflow = &ud->rflows[i];
1804
1805		rflow->id = i;
1806		rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
1807	}
1808
1809	for (i = 0; i < ud->ch_count; i++) {
1810		struct udma_chan *uc = &ud->channels[i];
1811
1812		uc->ud = ud;
1813		uc->id = i;
1814		uc->config.remote_thread_id = -1;
1815		uc->bchan = NULL;
1816		uc->tchan = NULL;
1817		uc->rchan = NULL;
1818		uc->config.mapped_channel_id = -1;
1819		uc->config.default_flow_id = -1;
1820		uc->config.dir = DMA_MEM_TO_MEM;
1821		sprintf(uc->name, "UDMA chan%d\n", i);
1822		if (!i)
1823			uc->in_use = true;
1824	}
1825
1826	pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1827		 dev->name,
1828		 udma_read(ud->mmrs[MMR_GCFG], 0),
1829		 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1830		 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1831		 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1832		 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1833
1834	uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1835
1836	return 0;
1837}
1838
1839static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1840{
1841	u64 addr = 0;
1842
1843	memcpy(&addr, &elem, sizeof(elem));
1844	return k3_nav_ringacc_ring_push(ring, &addr);
1845}
1846
1847static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1848				 dma_addr_t src, size_t len)
1849{
1850	u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1851	struct cppi5_tr_type15_t *tr_req;
1852	int num_tr;
1853	size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1854	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1855	unsigned long dummy;
1856	void *tr_desc;
1857	size_t desc_size;
1858
1859	if (len < SZ_64K) {
1860		num_tr = 1;
1861		tr0_cnt0 = len;
1862		tr0_cnt1 = 1;
1863	} else {
1864		unsigned long align_to = __ffs(src | dest);
1865
1866		if (align_to > 3)
1867			align_to = 3;
1868		/*
1869		 * Keep simple: tr0: SZ_64K-alignment blocks,
1870		 *		tr1: the remaining
1871		 */
1872		num_tr = 2;
1873		tr0_cnt0 = (SZ_64K - BIT(align_to));
1874		if (len / tr0_cnt0 >= SZ_64K) {
1875			dev_err(uc->ud->dev, "size %zu is not supported\n",
1876				len);
1877			return NULL;
1878		}
1879
1880		tr0_cnt1 = len / tr0_cnt0;
1881		tr1_cnt0 = len % tr0_cnt0;
1882	}
1883
1884	desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1885	tr_desc = dma_alloc_coherent(desc_size, &dummy);
1886	if (!tr_desc)
1887		return NULL;
1888	memset(tr_desc, 0, desc_size);
1889
1890	cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1891	cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1892	cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1893
1894	tr_req = tr_desc + tr_size;
1895
1896	cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1897		      CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1898	cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1899
1900	tr_req[0].addr = src;
1901	tr_req[0].icnt0 = tr0_cnt0;
1902	tr_req[0].icnt1 = tr0_cnt1;
1903	tr_req[0].icnt2 = 1;
1904	tr_req[0].icnt3 = 1;
1905	tr_req[0].dim1 = tr0_cnt0;
1906
1907	tr_req[0].daddr = dest;
1908	tr_req[0].dicnt0 = tr0_cnt0;
1909	tr_req[0].dicnt1 = tr0_cnt1;
1910	tr_req[0].dicnt2 = 1;
1911	tr_req[0].dicnt3 = 1;
1912	tr_req[0].ddim1 = tr0_cnt0;
1913
1914	if (num_tr == 2) {
1915		cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1916			      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1917		cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1918
1919		tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1920		tr_req[1].icnt0 = tr1_cnt0;
1921		tr_req[1].icnt1 = 1;
1922		tr_req[1].icnt2 = 1;
1923		tr_req[1].icnt3 = 1;
1924
1925		tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1926		tr_req[1].dicnt0 = tr1_cnt0;
1927		tr_req[1].dicnt1 = 1;
1928		tr_req[1].dicnt2 = 1;
1929		tr_req[1].dicnt3 = 1;
1930	}
1931
1932	cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1933
1934	flush_dcache_range((unsigned long)tr_desc,
1935			   ALIGN((unsigned long)tr_desc + desc_size,
1936				 ARCH_DMA_MINALIGN));
1937
1938	udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1939
1940	return 0;
1941}
1942
1943#define TISCI_BCDMA_BCHAN_VALID_PARAMS (			\
1944	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1945	TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1946
1947#define TISCI_BCDMA_TCHAN_VALID_PARAMS (			\
1948	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1949	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1950
1951#define TISCI_BCDMA_RCHAN_VALID_PARAMS (			\
1952	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1953
1954#define TISCI_UDMA_TCHAN_VALID_PARAMS (				\
1955	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1956	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |	\
1957	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |	\
1958	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1959	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |	\
1960	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1961	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1962	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1963
1964#define TISCI_UDMA_RCHAN_VALID_PARAMS (				\
1965	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1966	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1967	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1968	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1969	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |	\
1970	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |	\
1971	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |	\
1972	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |	\
1973	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1974
1975static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1976{
1977	struct udma_dev *ud = uc->ud;
1978	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1979	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1980	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1981	struct udma_bchan *bchan = uc->bchan;
1982	int ret = 0;
1983
1984	req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1985	req_tx.nav_id = tisci_rm->tisci_dev_id;
1986	req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1987	req_tx.index = bchan->id;
1988
1989	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1990	if (ret)
1991		dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1992
1993	return ret;
1994}
1995
1996static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1997{
1998	if (id >= 0) {
1999		if (test_bit(id, ud->bchan_map)) {
2000			dev_err(ud->dev, "bchan%d is in use\n", id);
2001			return ERR_PTR(-ENOENT);
2002		}
2003	} else {
2004		id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
2005		if (id == ud->bchan_cnt)
2006			return ERR_PTR(-ENOENT);
2007	}
2008	__set_bit(id, ud->bchan_map);
2009	return &ud->bchans[id];
2010}
2011
2012static int bcdma_get_bchan(struct udma_chan *uc)
2013{
2014	struct udma_dev *ud = uc->ud;
2015
2016	if (uc->bchan) {
2017		dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
2018			uc->id, uc->bchan->id);
2019		return 0;
2020	}
2021
2022	uc->bchan = __bcdma_reserve_bchan(ud, -1);
2023	if (IS_ERR(uc->bchan))
2024		return PTR_ERR(uc->bchan);
2025
2026	uc->tchan = uc->bchan;
2027
2028	return 0;
2029}
2030
2031static void bcdma_put_bchan(struct udma_chan *uc)
2032{
2033	struct udma_dev *ud = uc->ud;
2034
2035	if (uc->bchan) {
2036		dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
2037			uc->bchan->id);
2038		__clear_bit(uc->bchan->id, ud->bchan_map);
2039		uc->bchan = NULL;
2040		uc->tchan = NULL;
2041	}
2042}
2043
2044static void bcdma_free_bchan_resources(struct udma_chan *uc)
2045{
2046	if (!uc->bchan)
2047		return;
2048
2049	k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2050	k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2051	uc->bchan->tc_ring = NULL;
2052	uc->bchan->t_ring = NULL;
2053
2054	bcdma_put_bchan(uc);
2055}
2056
2057static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2058{
2059	struct k3_nav_ring_cfg ring_cfg;
2060	struct udma_dev *ud = uc->ud;
2061	int ret;
2062
2063	ret = bcdma_get_bchan(uc);
2064	if (ret)
2065		return ret;
2066
2067	ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2068						&uc->bchan->t_ring,
2069						&uc->bchan->tc_ring);
2070	if (ret) {
2071		ret = -EBUSY;
2072		goto err_ring;
2073	}
2074
2075	memset(&ring_cfg, 0, sizeof(ring_cfg));
2076	ring_cfg.size = 16;
2077	ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2078	ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2079
2080	ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2081	if (ret)
2082		goto err_ringcfg;
2083
2084	return 0;
2085
2086err_ringcfg:
2087	k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2088	uc->bchan->tc_ring = NULL;
2089	k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2090	uc->bchan->t_ring = NULL;
2091err_ring:
2092	bcdma_put_bchan(uc);
2093
2094	return ret;
2095}
2096
2097static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2098{
2099	struct udma_dev *ud = uc->ud;
2100	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2101	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2102	struct udma_tchan *tchan = uc->tchan;
2103	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2104	int ret = 0;
2105
2106	req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2107	req_tx.nav_id = tisci_rm->tisci_dev_id;
2108	req_tx.index = tchan->id;
2109	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2110	if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2111	    ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2112		/* wait for peer to complete the teardown for PDMAs */
2113		req_tx.valid_params |=
2114				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2115		req_tx.tx_tdtype = 1;
2116	}
2117
2118	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2119	if (ret)
2120		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2121
2122	return ret;
2123}
2124
2125#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2126
2127static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2128{
2129	struct udma_dev *ud = uc->ud;
2130	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2131	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2132	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2133	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2134	int ret = 0;
2135
2136	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2137	req_rx.nav_id = tisci_rm->tisci_dev_id;
2138	req_rx.index = uc->rchan->id;
2139
2140	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2141	if (ret) {
2142		dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2143		return ret;
2144	}
2145
2146	flow_req.valid_params =
2147		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2148		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2149		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2150
2151	flow_req.nav_id = tisci_rm->tisci_dev_id;
2152	flow_req.flow_index = uc->rflow->id;
2153
2154	if (uc->config.needs_epib)
2155		flow_req.rx_einfo_present = 1;
2156	else
2157		flow_req.rx_einfo_present = 0;
2158	if (uc->config.psd_size)
2159		flow_req.rx_psinfo_present = 1;
2160	else
2161		flow_req.rx_psinfo_present = 0;
2162	flow_req.rx_error_handling = 0;
2163
2164	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2165
2166	if (ret)
2167		dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2168			ret);
2169
2170	return ret;
2171}
2172
2173static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2174{
2175	int ret;
2176
2177	uc->config.pkt_mode = false;
2178
2179	switch (uc->config.dir) {
2180	case DMA_MEM_TO_MEM:
2181		/* Non synchronized - mem to mem type of transfer */
2182		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2183			uc->id);
2184
2185		ret = bcdma_alloc_bchan_resources(uc);
2186		if (ret)
2187			return ret;
2188
2189		ret = bcdma_tisci_m2m_channel_config(uc);
2190		break;
2191	default:
2192		/* Can not happen */
2193		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2194			__func__, uc->id, uc->config.dir);
2195		return -EINVAL;
2196	}
2197
2198	/* check if the channel configuration was successful */
2199	if (ret)
2200		goto err_res_free;
2201
2202	if (udma_is_chan_running(uc)) {
2203		dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2204		udma_stop(uc);
2205		if (udma_is_chan_running(uc)) {
2206			dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2207			goto err_res_free;
2208		}
2209	}
2210
2211	udma_reset_rings(uc);
2212
2213	return 0;
2214
2215err_res_free:
2216	bcdma_free_bchan_resources(uc);
2217	udma_free_tx_resources(uc);
2218	udma_free_rx_resources(uc);
2219
2220	udma_reset_uchan(uc);
2221
2222	return ret;
2223}
2224
2225static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2226{
2227	struct udma_dev *ud = uc->ud;
2228	int ret;
2229
2230	switch (uc->config.dir) {
2231	case DMA_MEM_TO_DEV:
2232		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2233		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2234			uc->id);
2235
2236		ret = udma_alloc_tx_resources(uc);
2237		if (ret) {
2238			uc->config.remote_thread_id = -1;
2239			return ret;
2240		}
2241
2242		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2243		uc->config.dst_thread = uc->config.remote_thread_id;
2244		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2245
2246		ret = pktdma_tisci_tx_channel_config(uc);
2247		break;
2248	case DMA_DEV_TO_MEM:
2249		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2250		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2251			uc->id);
2252
2253		ret = udma_alloc_rx_resources(uc);
2254		if (ret) {
2255			uc->config.remote_thread_id = -1;
2256			return ret;
2257		}
2258
2259		uc->config.src_thread = uc->config.remote_thread_id;
2260		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2261					K3_PSIL_DST_THREAD_ID_OFFSET;
2262
2263		ret = pktdma_tisci_rx_channel_config(uc);
2264		break;
2265	default:
2266		/* Can not happen */
2267		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2268			__func__, uc->id, uc->config.dir);
2269		return -EINVAL;
2270	}
2271
2272	/* check if the channel configuration was successful */
2273	if (ret)
2274		goto err_res_free;
2275
2276	/* PSI-L pairing */
2277	ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2278	if (ret) {
2279		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2280			uc->config.src_thread, uc->config.dst_thread);
2281		goto err_res_free;
2282	}
2283
2284	if (udma_is_chan_running(uc)) {
2285		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2286		udma_stop(uc);
2287		if (udma_is_chan_running(uc)) {
2288			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2289			goto err_res_free;
2290		}
2291	}
2292
2293	udma_reset_rings(uc);
2294
2295	if (uc->tchan)
2296		dev_dbg(ud->dev,
2297			"chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2298			uc->id, uc->tchan->id, uc->tchan->tflow_id,
2299			uc->config.remote_thread_id);
2300	else if (uc->rchan)
2301		dev_dbg(ud->dev,
2302			"chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2303			uc->id, uc->rchan->id, uc->rflow->id,
2304			uc->config.remote_thread_id);
2305	return 0;
2306
2307err_res_free:
2308	udma_free_tx_resources(uc);
2309	udma_free_rx_resources(uc);
2310
2311	udma_reset_uchan(uc);
2312
2313	return ret;
2314}
2315
2316static int udma_transfer(struct udevice *dev, int direction,
2317			 dma_addr_t dst, dma_addr_t src, size_t len)
2318{
2319	struct udma_dev *ud = dev_get_priv(dev);
2320	/* Channel0 is reserved for memcpy */
2321	struct udma_chan *uc = &ud->channels[0];
2322	dma_addr_t paddr = 0;
2323	int ret;
2324
2325	switch (ud->match_data->type) {
2326	case DMA_TYPE_UDMA:
2327		ret = udma_alloc_chan_resources(uc);
2328		break;
2329	case DMA_TYPE_BCDMA:
2330		ret = bcdma_alloc_chan_resources(uc);
2331		break;
2332	default:
2333		return -EINVAL;
2334	};
2335	if (ret)
2336		return ret;
2337
2338	udma_prep_dma_memcpy(uc, dst, src, len);
2339	udma_start(uc);
2340	udma_poll_completion(uc, &paddr);
2341	udma_stop(uc);
2342
2343	switch (ud->match_data->type) {
2344	case DMA_TYPE_UDMA:
2345		udma_free_chan_resources(uc);
2346		break;
2347	case DMA_TYPE_BCDMA:
2348		bcdma_free_bchan_resources(uc);
2349		break;
2350	default:
2351		return -EINVAL;
2352	};
2353
2354	return 0;
2355}
2356
2357static int udma_request(struct dma *dma)
2358{
2359	struct udma_dev *ud = dev_get_priv(dma->dev);
2360	struct udma_chan_config *ucc;
2361	struct udma_chan *uc;
2362	unsigned long dummy;
2363	int ret;
2364
2365	if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2366		dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2367		return -EINVAL;
2368	}
2369
2370	uc = &ud->channels[dma->id];
2371	ucc = &uc->config;
2372	switch (ud->match_data->type) {
2373	case DMA_TYPE_UDMA:
2374		ret = udma_alloc_chan_resources(uc);
2375		break;
2376	case DMA_TYPE_BCDMA:
2377		ret = bcdma_alloc_chan_resources(uc);
2378		break;
2379	case DMA_TYPE_PKTDMA:
2380		ret = pktdma_alloc_chan_resources(uc);
2381		break;
2382	default:
2383		return -EINVAL;
2384	}
2385	if (ret) {
2386		dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2387		return -EINVAL;
2388	}
2389
2390	if (uc->config.dir == DMA_MEM_TO_DEV) {
2391		uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2392		memset(uc->desc_tx, 0, ucc->hdesc_size);
2393	} else {
2394		uc->desc_rx = dma_alloc_coherent(
2395				ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2396		memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
2397	}
2398
2399	uc->in_use = true;
2400	uc->desc_rx_cur = 0;
2401	uc->num_rx_bufs = 0;
2402
2403	if (uc->config.dir == DMA_DEV_TO_MEM) {
2404		uc->cfg_data.flow_id_base = uc->rflow->id;
2405		uc->cfg_data.flow_id_cnt = 1;
2406	}
2407
2408	return 0;
2409}
2410
2411static int udma_rfree(struct dma *dma)
2412{
2413	struct udma_dev *ud = dev_get_priv(dma->dev);
2414	struct udma_chan *uc;
2415
2416	if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2417		dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2418		return -EINVAL;
2419	}
2420	uc = &ud->channels[dma->id];
2421
2422	if (udma_is_chan_running(uc))
2423		udma_stop(uc);
2424
2425	udma_navss_psil_unpair(ud, uc->config.src_thread,
2426			       uc->config.dst_thread);
2427
2428	bcdma_free_bchan_resources(uc);
2429	udma_free_tx_resources(uc);
2430	udma_free_rx_resources(uc);
2431	udma_reset_uchan(uc);
2432
2433	uc->in_use = false;
2434
2435	return 0;
2436}
2437
2438static int udma_enable(struct dma *dma)
2439{
2440	struct udma_dev *ud = dev_get_priv(dma->dev);
2441	struct udma_chan *uc;
2442	int ret;
2443
2444	if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2445		dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2446		return -EINVAL;
2447	}
2448	uc = &ud->channels[dma->id];
2449
2450	ret = udma_start(uc);
2451
2452	return ret;
2453}
2454
2455static int udma_disable(struct dma *dma)
2456{
2457	struct udma_dev *ud = dev_get_priv(dma->dev);
2458	struct udma_chan *uc;
2459	int ret = 0;
2460
2461	if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2462		dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2463		return -EINVAL;
2464	}
2465	uc = &ud->channels[dma->id];
2466
2467	if (udma_is_chan_running(uc))
2468		ret = udma_stop(uc);
2469	else
2470		dev_err(dma->dev, "%s not running\n", __func__);
2471
2472	return ret;
2473}
2474
2475static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2476{
2477	struct udma_dev *ud = dev_get_priv(dma->dev);
2478	struct cppi5_host_desc_t *desc_tx;
2479	dma_addr_t dma_src = (dma_addr_t)src;
2480	struct ti_udma_drv_packet_data packet_data = { 0 };
2481	dma_addr_t paddr;
2482	struct udma_chan *uc;
2483	u32 tc_ring_id;
2484	int ret;
2485
2486	if (metadata)
2487		packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2488
2489	if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2490		dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2491		return -EINVAL;
2492	}
2493	uc = &ud->channels[dma->id];
2494
2495	if (uc->config.dir != DMA_MEM_TO_DEV)
2496		return -EINVAL;
2497
2498	tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2499
2500	desc_tx = uc->desc_tx;
2501
2502	cppi5_hdesc_reset_hbdesc(desc_tx);
2503
2504	cppi5_hdesc_init(desc_tx,
2505			 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2506			 uc->config.psd_size);
2507	cppi5_hdesc_set_pktlen(desc_tx, len);
2508	cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2509	cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2510	cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2511	/* pass below information from caller */
2512	cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2513	cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2514
2515	flush_dcache_range((unsigned long)dma_src,
2516			   ALIGN((unsigned long)dma_src + len,
2517				 ARCH_DMA_MINALIGN));
2518	flush_dcache_range((unsigned long)desc_tx,
2519			   ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
2520				 ARCH_DMA_MINALIGN));
2521
2522	ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
2523	if (ret) {
2524		dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2525			dma->id, ret);
2526		return ret;
2527	}
2528
2529	udma_poll_completion(uc, &paddr);
2530
2531	return 0;
2532}
2533
2534static int udma_receive(struct dma *dma, void **dst, void *metadata)
2535{
2536	struct udma_dev *ud = dev_get_priv(dma->dev);
2537	struct udma_chan_config *ucc;
2538	struct cppi5_host_desc_t *desc_rx;
2539	dma_addr_t buf_dma;
2540	struct udma_chan *uc;
2541	u32 buf_dma_len, pkt_len;
2542	u32 port_id = 0;
2543	int ret;
2544
2545	if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2546		dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2547		return -EINVAL;
2548	}
2549	uc = &ud->channels[dma->id];
2550	ucc = &uc->config;
2551
2552	if (uc->config.dir != DMA_DEV_TO_MEM)
2553		return -EINVAL;
2554	if (!uc->num_rx_bufs)
2555		return -EINVAL;
2556
2557	ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
2558	if (ret && ret != -ENODATA) {
2559		dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2560		return ret;
2561	} else if (ret == -ENODATA) {
2562		return 0;
2563	}
2564
2565	/* invalidate cache data */
2566	invalidate_dcache_range((ulong)desc_rx,
2567				(ulong)(desc_rx + ucc->hdesc_size));
2568
2569	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2570	pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2571
2572	/* invalidate cache data */
2573	invalidate_dcache_range((ulong)buf_dma,
2574				(ulong)(buf_dma + buf_dma_len));
2575
2576	cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2577
2578	*dst = (void *)buf_dma;
2579	uc->num_rx_bufs--;
2580
2581	return pkt_len;
2582}
2583
2584static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2585{
2586	struct udma_chan_config *ucc;
2587	struct udma_dev *ud = dev_get_priv(dma->dev);
2588	struct udma_chan *uc = &ud->channels[0];
2589	struct psil_endpoint_config *ep_config;
2590	u32 val;
2591
2592	for (val = 0; val < ud->ch_count; val++) {
2593		uc = &ud->channels[val];
2594		if (!uc->in_use)
2595			break;
2596	}
2597
2598	if (val == ud->ch_count)
2599		return -EBUSY;
2600
2601	ucc = &uc->config;
2602	ucc->remote_thread_id = args->args[0];
2603	if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2604		ucc->dir = DMA_MEM_TO_DEV;
2605	else
2606		ucc->dir = DMA_DEV_TO_MEM;
2607
2608	ep_config = psil_get_ep_config(ucc->remote_thread_id);
2609	if (IS_ERR(ep_config)) {
2610		dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
2611			uc->config.remote_thread_id);
2612		ucc->dir = DMA_MEM_TO_MEM;
2613		ucc->remote_thread_id = -1;
2614		return false;
2615	}
2616
2617	ucc->pkt_mode = ep_config->pkt_mode;
2618	ucc->channel_tpl = ep_config->channel_tpl;
2619	ucc->notdpkt = ep_config->notdpkt;
2620	ucc->ep_type = ep_config->ep_type;
2621
2622	if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2623	    ep_config->mapped_channel_id >= 0) {
2624		ucc->mapped_channel_id = ep_config->mapped_channel_id;
2625		ucc->default_flow_id = ep_config->default_flow_id;
2626	} else {
2627		ucc->mapped_channel_id = -1;
2628		ucc->default_flow_id = -1;
2629	}
2630
2631	ucc->needs_epib = ep_config->needs_epib;
2632	ucc->psd_size = ep_config->psd_size;
2633	ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2634
2635	ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2636						ucc->psd_size, 0);
2637	ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
2638
2639	dma->id = uc->id;
2640	pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
2641		 dma->id, ucc->needs_epib,
2642		 ucc->psd_size, ucc->metadata_size,
2643		 ucc->remote_thread_id);
2644
2645	return 0;
2646}
2647
2648int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2649{
2650	struct udma_dev *ud = dev_get_priv(dma->dev);
2651	struct cppi5_host_desc_t *desc_rx;
2652	dma_addr_t dma_dst;
2653	struct udma_chan *uc;
2654	u32 desc_num;
2655
2656	if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2657		dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2658		return -EINVAL;
2659	}
2660	uc = &ud->channels[dma->id];
2661
2662	if (uc->config.dir != DMA_DEV_TO_MEM)
2663		return -EINVAL;
2664
2665	if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2666		return -EINVAL;
2667
2668	desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
2669	desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
2670	dma_dst = (dma_addr_t)dst;
2671
2672	cppi5_hdesc_reset_hbdesc(desc_rx);
2673
2674	cppi5_hdesc_init(desc_rx,
2675			 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2676			 uc->config.psd_size);
2677	cppi5_hdesc_set_pktlen(desc_rx, size);
2678	cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2679
2680	flush_dcache_range((unsigned long)desc_rx,
2681			   ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
2682				 ARCH_DMA_MINALIGN));
2683
2684	udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
2685
2686	uc->num_rx_bufs++;
2687	uc->desc_rx_cur++;
2688
2689	return 0;
2690}
2691
2692static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2693{
2694	struct udma_dev *ud = dev_get_priv(dma->dev);
2695	struct udma_chan *uc;
2696
2697	if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2698		dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2699		return -EINVAL;
2700	}
2701
2702	switch (id) {
2703	case TI_UDMA_CHAN_PRIV_INFO:
2704		uc = &ud->channels[dma->id];
2705		*data = &uc->cfg_data;
2706		return 0;
2707	}
2708
2709	return -EINVAL;
2710}
2711
2712static const struct dma_ops udma_ops = {
2713	.transfer	= udma_transfer,
2714	.of_xlate	= udma_of_xlate,
2715	.request	= udma_request,
2716	.rfree		= udma_rfree,
2717	.enable		= udma_enable,
2718	.disable	= udma_disable,
2719	.send		= udma_send,
2720	.receive	= udma_receive,
2721	.prepare_rcv_buf = udma_prepare_rcv_buf,
2722	.get_cfg	= udma_get_cfg,
2723};
2724
2725static struct udma_match_data am654_main_data = {
2726	.type = DMA_TYPE_UDMA,
2727	.psil_base = 0x1000,
2728	.enable_memcpy_support = true,
2729	.statictr_z_mask = GENMASK(11, 0),
2730	.oes = {
2731		.udma_rchan = 0x200,
2732	},
2733	.tpl_levels = 2,
2734	.level_start_idx = {
2735		[0] = 8, /* Normal channels */
2736		[1] = 0, /* High Throughput channels */
2737	},
2738};
2739
2740static struct udma_match_data am654_mcu_data = {
2741	.type = DMA_TYPE_UDMA,
2742	.psil_base = 0x6000,
2743	.enable_memcpy_support = true,
2744	.statictr_z_mask = GENMASK(11, 0),
2745	.oes = {
2746		.udma_rchan = 0x200,
2747	},
2748	.tpl_levels = 2,
2749	.level_start_idx = {
2750		[0] = 2, /* Normal channels */
2751		[1] = 0, /* High Throughput channels */
2752	},
2753};
2754
2755static struct udma_match_data j721e_main_data = {
2756	.type = DMA_TYPE_UDMA,
2757	.psil_base = 0x1000,
2758	.enable_memcpy_support = true,
2759	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2760	.statictr_z_mask = GENMASK(23, 0),
2761	.oes = {
2762		.udma_rchan = 0x400,
2763	},
2764	.tpl_levels = 3,
2765	.level_start_idx = {
2766		[0] = 16, /* Normal channels */
2767		[1] = 4, /* High Throughput channels */
2768		[2] = 0, /* Ultra High Throughput channels */
2769	},
2770};
2771
2772static struct udma_match_data j721e_mcu_data = {
2773	.type = DMA_TYPE_UDMA,
2774	.psil_base = 0x6000,
2775	.enable_memcpy_support = true,
2776	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2777	.statictr_z_mask = GENMASK(23, 0),
2778	.oes = {
2779		.udma_rchan = 0x400,
2780	},
2781	.tpl_levels = 2,
2782	.level_start_idx = {
2783		[0] = 2, /* Normal channels */
2784		[1] = 0, /* High Throughput channels */
2785	},
2786};
2787
2788static struct udma_match_data am64_bcdma_data = {
2789	.type = DMA_TYPE_BCDMA,
2790	.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2791	.enable_memcpy_support = true, /* Supported via bchan */
2792	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2793	.statictr_z_mask = GENMASK(23, 0),
2794	.oes = {
2795		.bcdma_bchan_data = 0x2200,
2796		.bcdma_bchan_ring = 0x2400,
2797		.bcdma_tchan_data = 0x2800,
2798		.bcdma_tchan_ring = 0x2a00,
2799		.bcdma_rchan_data = 0x2e00,
2800		.bcdma_rchan_ring = 0x3000,
2801	},
2802	/* No throughput levels */
2803};
2804
2805static struct udma_match_data am64_pktdma_data = {
2806	.type = DMA_TYPE_PKTDMA,
2807	.psil_base = 0x1000,
2808	.enable_memcpy_support = false,
2809	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2810	.statictr_z_mask = GENMASK(23, 0),
2811	.oes = {
2812		.pktdma_tchan_flow = 0x1200,
2813		.pktdma_rchan_flow = 0x1600,
2814	},
2815	/* No throughput levels */
2816};
2817
2818static const struct udevice_id udma_ids[] = {
2819	{
2820		.compatible = "ti,am654-navss-main-udmap",
2821		.data = (ulong)&am654_main_data,
2822	},
2823	{
2824		.compatible = "ti,am654-navss-mcu-udmap",
2825		.data = (ulong)&am654_mcu_data,
2826	}, {
2827		.compatible = "ti,j721e-navss-main-udmap",
2828		.data = (ulong)&j721e_main_data,
2829	}, {
2830		.compatible = "ti,j721e-navss-mcu-udmap",
2831		.data = (ulong)&j721e_mcu_data,
2832	},
2833	{
2834		.compatible = "ti,am64-dmss-bcdma",
2835		.data = (ulong)&am64_bcdma_data,
2836	},
2837	{
2838		.compatible = "ti,am64-dmss-pktdma",
2839		.data = (ulong)&am64_pktdma_data,
2840	},
2841	{ /* Sentinel */ },
2842};
2843
2844U_BOOT_DRIVER(ti_edma3) = {
2845	.name	= "ti-udma",
2846	.id	= UCLASS_DMA,
2847	.of_match = udma_ids,
2848	.ops	= &udma_ops,
2849	.probe	= udma_probe,
2850	.priv_auto	= sizeof(struct udma_dev),
2851};
2852