1// SPDX-License-Identifier: GPL-2.0
2/*
3 * K3 NAVSS DMA glue interface
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6 *
7 */
8
9#include <linux/module.h>
10#include <linux/atomic.h>
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/io.h>
14#include <linux/init.h>
15#include <linux/of.h>
16#include <linux/platform_device.h>
17#include <linux/soc/ti/k3-ringacc.h>
18#include <linux/dma/ti-cppi5.h>
19#include <linux/dma/k3-udma-glue.h>
20
21#include "k3-udma.h"
22#include "k3-psil-priv.h"
23
24struct k3_udma_glue_common {
25	struct device *dev;
26	struct device chan_dev;
27	struct udma_dev *udmax;
28	const struct udma_tisci_rm *tisci_rm;
29	struct k3_ringacc *ringacc;
30	u32 src_thread;
31	u32 dst_thread;
32
33	u32  hdesc_size;
34	bool epib;
35	u32  psdata_size;
36	u32  swdata_size;
37	u32  atype_asel;
38	struct psil_endpoint_config *ep_config;
39};
40
41struct k3_udma_glue_tx_channel {
42	struct k3_udma_glue_common common;
43
44	struct udma_tchan *udma_tchanx;
45	int udma_tchan_id;
46
47	struct k3_ring *ringtx;
48	struct k3_ring *ringtxcq;
49
50	bool psil_paired;
51
52	int virq;
53
54	atomic_t free_pkts;
55	bool tx_pause_on_err;
56	bool tx_filt_einfo;
57	bool tx_filt_pswords;
58	bool tx_supr_tdpkt;
59
60	int udma_tflow_id;
61};
62
63struct k3_udma_glue_rx_flow {
64	struct udma_rflow *udma_rflow;
65	int udma_rflow_id;
66	struct k3_ring *ringrx;
67	struct k3_ring *ringrxfdq;
68
69	int virq;
70};
71
72struct k3_udma_glue_rx_channel {
73	struct k3_udma_glue_common common;
74
75	struct udma_rchan *udma_rchanx;
76	int udma_rchan_id;
77	bool remote;
78
79	bool psil_paired;
80
81	u32  swdata_size;
82	int  flow_id_base;
83
84	struct k3_udma_glue_rx_flow *flows;
85	u32 flow_num;
86	u32 flows_ready;
87};
88
89static void k3_udma_chan_dev_release(struct device *dev)
90{
91	/* The struct containing the device is devm managed */
92}
93
94static struct class k3_udma_glue_devclass = {
95	.name		= "k3_udma_glue_chan",
96	.dev_release	= k3_udma_chan_dev_release,
97};
98
99#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
100
101static int of_k3_udma_glue_parse(struct device_node *udmax_np,
102				 struct k3_udma_glue_common *common)
103{
104	common->udmax = of_xudma_dev_get(udmax_np, NULL);
105	if (IS_ERR(common->udmax))
106		return PTR_ERR(common->udmax);
107
108	common->ringacc = xudma_get_ringacc(common->udmax);
109	common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
110
111	return 0;
112}
113
114static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
115					    bool tx_chn)
116{
117	if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
118		return -EINVAL;
119
120	if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
121		return -EINVAL;
122
123	/* get psil endpoint config */
124	common->ep_config = psil_get_ep_config(thread_id);
125	if (IS_ERR(common->ep_config)) {
126		dev_err(common->dev,
127			"No configuration for psi-l thread 0x%04x\n",
128			thread_id);
129		return PTR_ERR(common->ep_config);
130	}
131
132	common->epib = common->ep_config->needs_epib;
133	common->psdata_size = common->ep_config->psd_size;
134
135	if (tx_chn)
136		common->dst_thread = thread_id;
137	else
138		common->src_thread = thread_id;
139
140	return 0;
141}
142
143static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
144		const char *name, struct k3_udma_glue_common *common,
145		bool tx_chn)
146{
147	struct of_phandle_args dma_spec;
148	u32 thread_id;
149	int ret = 0;
150	int index;
151
152	if (unlikely(!name))
153		return -EINVAL;
154
155	index = of_property_match_string(chn_np, "dma-names", name);
156	if (index < 0)
157		return index;
158
159	if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
160				       &dma_spec))
161		return -ENOENT;
162
163	ret = of_k3_udma_glue_parse(dma_spec.np, common);
164	if (ret)
165		goto out_put_spec;
166
167	thread_id = dma_spec.args[0];
168	if (dma_spec.args_count == 2) {
169		if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
170			dev_err(common->dev, "Invalid channel atype: %u\n",
171				dma_spec.args[1]);
172			ret = -EINVAL;
173			goto out_put_spec;
174		}
175		if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
176			dev_err(common->dev, "Invalid channel asel: %u\n",
177				dma_spec.args[1]);
178			ret = -EINVAL;
179			goto out_put_spec;
180		}
181
182		common->atype_asel = dma_spec.args[1];
183	}
184
185	ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
186
187out_put_spec:
188	of_node_put(dma_spec.np);
189	return ret;
190}
191
192static int
193of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
194				bool tx_chn, u32 thread_id)
195{
196	int ret = 0;
197
198	if (unlikely(!udmax_np))
199		return -EINVAL;
200
201	ret = of_k3_udma_glue_parse(udmax_np, common);
202	if (ret)
203		goto out_put_spec;
204
205	ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
206
207out_put_spec:
208	of_node_put(udmax_np);
209	return ret;
210}
211
212static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
213{
214	struct device *dev = tx_chn->common.dev;
215
216	dev_dbg(dev, "dump_tx_chn:\n"
217		"udma_tchan_id: %d\n"
218		"src_thread: %08x\n"
219		"dst_thread: %08x\n",
220		tx_chn->udma_tchan_id,
221		tx_chn->common.src_thread,
222		tx_chn->common.dst_thread);
223}
224
225static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
226					char *mark)
227{
228	struct device *dev = chn->common.dev;
229
230	dev_dbg(dev, "=== dump ===> %s\n", mark);
231	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
232		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
233	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
234		xudma_tchanrt_read(chn->udma_tchanx,
235				   UDMA_CHAN_RT_PEER_RT_EN_REG));
236	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
237		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
238	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
239		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
240	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
241		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
242}
243
244static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
245{
246	const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
247	struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
248
249	memset(&req, 0, sizeof(req));
250
251	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
252			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
253			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
254			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
255			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
256			TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
257			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
258			TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
259	req.nav_id = tisci_rm->tisci_dev_id;
260	req.index = tx_chn->udma_tchan_id;
261	if (tx_chn->tx_pause_on_err)
262		req.tx_pause_on_err = 1;
263	if (tx_chn->tx_filt_einfo)
264		req.tx_filt_einfo = 1;
265	if (tx_chn->tx_filt_pswords)
266		req.tx_filt_pswords = 1;
267	req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
268	if (tx_chn->tx_supr_tdpkt)
269		req.tx_supr_tdpkt = 1;
270	req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
271	req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
272	req.tx_atype = tx_chn->common.atype_asel;
273
274	return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
275}
276
277static int
278k3_udma_glue_request_tx_chn_common(struct device *dev,
279				   struct k3_udma_glue_tx_channel *tx_chn,
280				   struct k3_udma_glue_tx_channel_cfg *cfg)
281{
282	int ret;
283
284	tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
285						tx_chn->common.psdata_size,
286						tx_chn->common.swdata_size);
287
288	if (xudma_is_pktdma(tx_chn->common.udmax))
289		tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
290	else
291		tx_chn->udma_tchan_id = -1;
292
293	/* request and cfg UDMAP TX channel */
294	tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
295					      tx_chn->udma_tchan_id);
296	if (IS_ERR(tx_chn->udma_tchanx)) {
297		ret = PTR_ERR(tx_chn->udma_tchanx);
298		dev_err(dev, "UDMAX tchanx get err %d\n", ret);
299		return ret;
300	}
301	tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
302
303	tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
304	tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
305	dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
306		     tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
307	ret = device_register(&tx_chn->common.chan_dev);
308	if (ret) {
309		dev_err(dev, "Channel Device registration failed %d\n", ret);
310		put_device(&tx_chn->common.chan_dev);
311		tx_chn->common.chan_dev.parent = NULL;
312		return ret;
313	}
314
315	if (xudma_is_pktdma(tx_chn->common.udmax)) {
316		/* prepare the channel device as coherent */
317		tx_chn->common.chan_dev.dma_coherent = true;
318		dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
319					     DMA_BIT_MASK(48));
320	}
321
322	atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
323
324	if (xudma_is_pktdma(tx_chn->common.udmax))
325		tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
326	else
327		tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
328
329	/* request and cfg rings */
330	ret =  k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
331					     tx_chn->udma_tflow_id, -1,
332					     &tx_chn->ringtx,
333					     &tx_chn->ringtxcq);
334	if (ret) {
335		dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
336		return ret;
337	}
338
339	/* Set the dma_dev for the rings to be configured */
340	cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
341	cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
342
343	/* Set the ASEL value for DMA rings of PKTDMA */
344	if (xudma_is_pktdma(tx_chn->common.udmax)) {
345		cfg->tx_cfg.asel = tx_chn->common.atype_asel;
346		cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
347	}
348
349	ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
350	if (ret) {
351		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
352		return ret;
353	}
354
355	ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
356	if (ret) {
357		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
358		return ret;
359	}
360
361	/* request and cfg psi-l */
362	tx_chn->common.src_thread =
363			xudma_dev_get_psil_base(tx_chn->common.udmax) +
364			tx_chn->udma_tchan_id;
365
366	ret = k3_udma_glue_cfg_tx_chn(tx_chn);
367	if (ret) {
368		dev_err(dev, "Failed to cfg tchan %d\n", ret);
369		return ret;
370	}
371
372	k3_udma_glue_dump_tx_chn(tx_chn);
373
374	return 0;
375}
376
377struct k3_udma_glue_tx_channel *
378k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
379			    struct k3_udma_glue_tx_channel_cfg *cfg)
380{
381	struct k3_udma_glue_tx_channel *tx_chn;
382	int ret;
383
384	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
385	if (!tx_chn)
386		return ERR_PTR(-ENOMEM);
387
388	tx_chn->common.dev = dev;
389	tx_chn->common.swdata_size = cfg->swdata_size;
390	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
391	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
392	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
393	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
394
395	/* parse of udmap channel */
396	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
397					&tx_chn->common, true);
398	if (ret)
399		goto err;
400
401	ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
402	if (ret)
403		goto err;
404
405	return tx_chn;
406
407err:
408	k3_udma_glue_release_tx_chn(tx_chn);
409	return ERR_PTR(ret);
410}
411EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
412
413struct k3_udma_glue_tx_channel *
414k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
415					  struct k3_udma_glue_tx_channel_cfg *cfg,
416					  struct device_node *udmax_np, u32 thread_id)
417{
418	struct k3_udma_glue_tx_channel *tx_chn;
419	int ret;
420
421	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
422	if (!tx_chn)
423		return ERR_PTR(-ENOMEM);
424
425	tx_chn->common.dev = dev;
426	tx_chn->common.swdata_size = cfg->swdata_size;
427	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
428	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
429	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
430	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
431
432	ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
433	if (ret)
434		goto err;
435
436	ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
437	if (ret)
438		goto err;
439
440	return tx_chn;
441
442err:
443	k3_udma_glue_release_tx_chn(tx_chn);
444	return ERR_PTR(ret);
445}
446EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
447
448void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
449{
450	if (tx_chn->psil_paired) {
451		xudma_navss_psil_unpair(tx_chn->common.udmax,
452					tx_chn->common.src_thread,
453					tx_chn->common.dst_thread);
454		tx_chn->psil_paired = false;
455	}
456
457	if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
458		xudma_tchan_put(tx_chn->common.udmax,
459				tx_chn->udma_tchanx);
460
461	if (tx_chn->ringtxcq)
462		k3_ringacc_ring_free(tx_chn->ringtxcq);
463
464	if (tx_chn->ringtx)
465		k3_ringacc_ring_free(tx_chn->ringtx);
466
467	if (tx_chn->common.chan_dev.parent) {
468		device_unregister(&tx_chn->common.chan_dev);
469		tx_chn->common.chan_dev.parent = NULL;
470	}
471}
472EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
473
474int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
475			     struct cppi5_host_desc_t *desc_tx,
476			     dma_addr_t desc_dma)
477{
478	u32 ringtxcq_id;
479
480	if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
481		return -ENOMEM;
482
483	ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
484	cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
485
486	return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
487}
488EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
489
490int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
491			    dma_addr_t *desc_dma)
492{
493	int ret;
494
495	ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
496	if (!ret)
497		atomic_inc(&tx_chn->free_pkts);
498
499	return ret;
500}
501EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
502
503int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
504{
505	int ret;
506
507	ret = xudma_navss_psil_pair(tx_chn->common.udmax,
508				    tx_chn->common.src_thread,
509				    tx_chn->common.dst_thread);
510	if (ret) {
511		dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
512		return ret;
513	}
514
515	tx_chn->psil_paired = true;
516
517	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
518			    UDMA_PEER_RT_EN_ENABLE);
519
520	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
521			    UDMA_CHAN_RT_CTL_EN);
522
523	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
524	return 0;
525}
526EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
527
528void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
529{
530	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
531
532	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
533
534	xudma_tchanrt_write(tx_chn->udma_tchanx,
535			    UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
536	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
537
538	if (tx_chn->psil_paired) {
539		xudma_navss_psil_unpair(tx_chn->common.udmax,
540					tx_chn->common.src_thread,
541					tx_chn->common.dst_thread);
542		tx_chn->psil_paired = false;
543	}
544}
545EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
546
547void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
548			       bool sync)
549{
550	int i = 0;
551	u32 val;
552
553	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
554
555	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
556			    UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
557
558	val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
559
560	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
561		val = xudma_tchanrt_read(tx_chn->udma_tchanx,
562					 UDMA_CHAN_RT_CTL_REG);
563		udelay(1);
564		if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
565			dev_err(tx_chn->common.dev, "TX tdown timeout\n");
566			break;
567		}
568		i++;
569	}
570
571	val = xudma_tchanrt_read(tx_chn->udma_tchanx,
572				 UDMA_CHAN_RT_PEER_RT_EN_REG);
573	if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
574		dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
575	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
576}
577EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
578
579void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
580			       void *data,
581			       void (*cleanup)(void *data, dma_addr_t desc_dma))
582{
583	struct device *dev = tx_chn->common.dev;
584	dma_addr_t desc_dma;
585	int occ_tx, i, ret;
586
587	/*
588	 * TXQ reset need to be special way as it is input for udma and its
589	 * state cached by udma, so:
590	 * 1) save TXQ occ
591	 * 2) clean up TXQ and call callback .cleanup() for each desc
592	 * 3) reset TXQ in a special way
593	 */
594	occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
595	dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
596
597	for (i = 0; i < occ_tx; i++) {
598		ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
599		if (ret) {
600			if (ret != -ENODATA)
601				dev_err(dev, "TX reset pop %d\n", ret);
602			break;
603		}
604		cleanup(data, desc_dma);
605	}
606
607	/* reset TXCQ as it is not input for udma - expected to be empty */
608	k3_ringacc_ring_reset(tx_chn->ringtxcq);
609	k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
610}
611EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
612
613u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
614{
615	return tx_chn->common.hdesc_size;
616}
617EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
618
619u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
620{
621	return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
622}
623EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
624
625int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
626{
627	if (xudma_is_pktdma(tx_chn->common.udmax)) {
628		tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
629							  tx_chn->udma_tflow_id);
630	} else {
631		tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
632	}
633
634	if (!tx_chn->virq)
635		return -ENXIO;
636
637	return tx_chn->virq;
638}
639EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
640
641struct device *
642	k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
643{
644	if (xudma_is_pktdma(tx_chn->common.udmax) &&
645	    (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
646		return &tx_chn->common.chan_dev;
647
648	return xudma_get_device(tx_chn->common.udmax);
649}
650EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
651
652void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
653				       dma_addr_t *addr)
654{
655	if (!xudma_is_pktdma(tx_chn->common.udmax) ||
656	    !tx_chn->common.atype_asel)
657		return;
658
659	*addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
660}
661EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
662
663void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
664				       dma_addr_t *addr)
665{
666	if (!xudma_is_pktdma(tx_chn->common.udmax) ||
667	    !tx_chn->common.atype_asel)
668		return;
669
670	*addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
671}
672EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
673
674static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
675{
676	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
677	struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
678	int ret;
679
680	memset(&req, 0, sizeof(req));
681
682	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
683			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
684			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
685			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
686
687	req.nav_id = tisci_rm->tisci_dev_id;
688	req.index = rx_chn->udma_rchan_id;
689	req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
690	/*
691	 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
692	 * and udmax impl, so just configure it to invalid value.
693	 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
694	 */
695	req.rxcq_qnum = 0xFFFF;
696	if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
697	    rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
698		/* Default flow + extra ones */
699		req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
700				    TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
701		req.flowid_start = rx_chn->flow_id_base;
702		req.flowid_cnt = rx_chn->flow_num;
703	}
704	req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
705	req.rx_atype = rx_chn->common.atype_asel;
706
707	ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
708	if (ret)
709		dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
710			rx_chn->udma_rchan_id, ret);
711
712	return ret;
713}
714
715static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
716					 u32 flow_num)
717{
718	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
719
720	if (IS_ERR_OR_NULL(flow->udma_rflow))
721		return;
722
723	if (flow->ringrxfdq)
724		k3_ringacc_ring_free(flow->ringrxfdq);
725
726	if (flow->ringrx)
727		k3_ringacc_ring_free(flow->ringrx);
728
729	xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
730	flow->udma_rflow = NULL;
731	rx_chn->flows_ready--;
732}
733
734static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
735				    u32 flow_idx,
736				    struct k3_udma_glue_rx_flow_cfg *flow_cfg)
737{
738	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
739	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
740	struct device *dev = rx_chn->common.dev;
741	struct ti_sci_msg_rm_udmap_flow_cfg req;
742	int rx_ring_id;
743	int rx_ringfdq_id;
744	int ret = 0;
745
746	flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
747					   flow->udma_rflow_id);
748	if (IS_ERR(flow->udma_rflow)) {
749		ret = PTR_ERR(flow->udma_rflow);
750		dev_err(dev, "UDMAX rflow get err %d\n", ret);
751		return ret;
752	}
753
754	if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
755		ret = -ENODEV;
756		goto err_rflow_put;
757	}
758
759	if (xudma_is_pktdma(rx_chn->common.udmax)) {
760		rx_ringfdq_id = flow->udma_rflow_id +
761				xudma_get_rflow_ring_offset(rx_chn->common.udmax);
762		rx_ring_id = 0;
763	} else {
764		rx_ring_id = flow_cfg->ring_rxq_id;
765		rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
766	}
767
768	/* request and cfg rings */
769	ret =  k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
770					     rx_ringfdq_id, rx_ring_id,
771					     &flow->ringrxfdq,
772					     &flow->ringrx);
773	if (ret) {
774		dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
775		goto err_rflow_put;
776	}
777
778	/* Set the dma_dev for the rings to be configured */
779	flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
780	flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
781
782	/* Set the ASEL value for DMA rings of PKTDMA */
783	if (xudma_is_pktdma(rx_chn->common.udmax)) {
784		flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
785		flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
786	}
787
788	ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
789	if (ret) {
790		dev_err(dev, "Failed to cfg ringrx %d\n", ret);
791		goto err_ringrxfdq_free;
792	}
793
794	ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
795	if (ret) {
796		dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
797		goto err_ringrxfdq_free;
798	}
799
800	if (rx_chn->remote) {
801		rx_ring_id = TI_SCI_RESOURCE_NULL;
802		rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
803	} else {
804		rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
805		rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
806	}
807
808	memset(&req, 0, sizeof(req));
809
810	req.valid_params =
811			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
812			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
813			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
814			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
815			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
816			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
817			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
818			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
819			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
820			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
821			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
822			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
823			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
824	req.nav_id = tisci_rm->tisci_dev_id;
825	req.flow_index = flow->udma_rflow_id;
826	if (rx_chn->common.epib)
827		req.rx_einfo_present = 1;
828	if (rx_chn->common.psdata_size)
829		req.rx_psinfo_present = 1;
830	if (flow_cfg->rx_error_handling)
831		req.rx_error_handling = 1;
832	req.rx_desc_type = 0;
833	req.rx_dest_qnum = rx_ring_id;
834	req.rx_src_tag_hi_sel = 0;
835	req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
836	req.rx_dest_tag_hi_sel = 0;
837	req.rx_dest_tag_lo_sel = 0;
838	req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
839	req.rx_fdq1_qnum = rx_ringfdq_id;
840	req.rx_fdq2_qnum = rx_ringfdq_id;
841	req.rx_fdq3_qnum = rx_ringfdq_id;
842
843	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
844	if (ret) {
845		dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
846			ret);
847		goto err_ringrxfdq_free;
848	}
849
850	rx_chn->flows_ready++;
851	dev_dbg(dev, "flow%d config done. ready:%d\n",
852		flow->udma_rflow_id, rx_chn->flows_ready);
853
854	return 0;
855
856err_ringrxfdq_free:
857	k3_ringacc_ring_free(flow->ringrxfdq);
858	k3_ringacc_ring_free(flow->ringrx);
859
860err_rflow_put:
861	xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
862	flow->udma_rflow = NULL;
863
864	return ret;
865}
866
867static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
868{
869	struct device *dev = chn->common.dev;
870
871	dev_dbg(dev, "dump_rx_chn:\n"
872		"udma_rchan_id: %d\n"
873		"src_thread: %08x\n"
874		"dst_thread: %08x\n"
875		"epib: %d\n"
876		"hdesc_size: %u\n"
877		"psdata_size: %u\n"
878		"swdata_size: %u\n"
879		"flow_id_base: %d\n"
880		"flow_num: %d\n",
881		chn->udma_rchan_id,
882		chn->common.src_thread,
883		chn->common.dst_thread,
884		chn->common.epib,
885		chn->common.hdesc_size,
886		chn->common.psdata_size,
887		chn->common.swdata_size,
888		chn->flow_id_base,
889		chn->flow_num);
890}
891
892static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
893					char *mark)
894{
895	struct device *dev = chn->common.dev;
896
897	dev_dbg(dev, "=== dump ===> %s\n", mark);
898
899	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
900		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
901	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
902		xudma_rchanrt_read(chn->udma_rchanx,
903				   UDMA_CHAN_RT_PEER_RT_EN_REG));
904	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
905		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
906	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
907		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
908	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
909		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
910}
911
912static int
913k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
914			       struct k3_udma_glue_rx_channel_cfg *cfg)
915{
916	int ret;
917
918	/* default rflow */
919	if (cfg->flow_id_use_rxchan_id)
920		return 0;
921
922	/* not a GP rflows */
923	if (rx_chn->flow_id_base != -1 &&
924	    !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
925		return 0;
926
927	/* Allocate range of GP rflows */
928	ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
929					 rx_chn->flow_id_base,
930					 rx_chn->flow_num);
931	if (ret < 0) {
932		dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
933			rx_chn->flow_id_base, rx_chn->flow_num, ret);
934		return ret;
935	}
936	rx_chn->flow_id_base = ret;
937
938	return 0;
939}
940
941static struct k3_udma_glue_rx_channel *
942k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
943				 struct k3_udma_glue_rx_channel_cfg *cfg)
944{
945	struct k3_udma_glue_rx_channel *rx_chn;
946	struct psil_endpoint_config *ep_cfg;
947	int ret, i;
948
949	if (cfg->flow_id_num <= 0)
950		return ERR_PTR(-EINVAL);
951
952	if (cfg->flow_id_num != 1 &&
953	    (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
954		return ERR_PTR(-EINVAL);
955
956	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
957	if (!rx_chn)
958		return ERR_PTR(-ENOMEM);
959
960	rx_chn->common.dev = dev;
961	rx_chn->common.swdata_size = cfg->swdata_size;
962	rx_chn->remote = false;
963
964	/* parse of udmap channel */
965	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
966					&rx_chn->common, false);
967	if (ret)
968		goto err;
969
970	rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
971						rx_chn->common.psdata_size,
972						rx_chn->common.swdata_size);
973
974	ep_cfg = rx_chn->common.ep_config;
975
976	if (xudma_is_pktdma(rx_chn->common.udmax))
977		rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
978	else
979		rx_chn->udma_rchan_id = -1;
980
981	/* request and cfg UDMAP RX channel */
982	rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
983					      rx_chn->udma_rchan_id);
984	if (IS_ERR(rx_chn->udma_rchanx)) {
985		ret = PTR_ERR(rx_chn->udma_rchanx);
986		dev_err(dev, "UDMAX rchanx get err %d\n", ret);
987		goto err;
988	}
989	rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
990
991	rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
992	rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
993	dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
994		     rx_chn->udma_rchan_id, rx_chn->common.src_thread);
995	ret = device_register(&rx_chn->common.chan_dev);
996	if (ret) {
997		dev_err(dev, "Channel Device registration failed %d\n", ret);
998		put_device(&rx_chn->common.chan_dev);
999		rx_chn->common.chan_dev.parent = NULL;
1000		goto err;
1001	}
1002
1003	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1004		/* prepare the channel device as coherent */
1005		rx_chn->common.chan_dev.dma_coherent = true;
1006		dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1007					     DMA_BIT_MASK(48));
1008	}
1009
1010	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1011		int flow_start = cfg->flow_id_base;
1012		int flow_end;
1013
1014		if (flow_start == -1)
1015			flow_start = ep_cfg->flow_start;
1016
1017		flow_end = flow_start + cfg->flow_id_num - 1;
1018		if (flow_start < ep_cfg->flow_start ||
1019		    flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
1020			dev_err(dev, "Invalid flow range requested\n");
1021			ret = -EINVAL;
1022			goto err;
1023		}
1024		rx_chn->flow_id_base = flow_start;
1025	} else {
1026		rx_chn->flow_id_base = cfg->flow_id_base;
1027
1028		/* Use RX channel id as flow id: target dev can't generate flow_id */
1029		if (cfg->flow_id_use_rxchan_id)
1030			rx_chn->flow_id_base = rx_chn->udma_rchan_id;
1031	}
1032
1033	rx_chn->flow_num = cfg->flow_id_num;
1034
1035	rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1036				     sizeof(*rx_chn->flows), GFP_KERNEL);
1037	if (!rx_chn->flows) {
1038		ret = -ENOMEM;
1039		goto err;
1040	}
1041
1042	ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1043	if (ret)
1044		goto err;
1045
1046	for (i = 0; i < rx_chn->flow_num; i++)
1047		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1048
1049	/* request and cfg psi-l */
1050	rx_chn->common.dst_thread =
1051			xudma_dev_get_psil_base(rx_chn->common.udmax) +
1052			rx_chn->udma_rchan_id;
1053
1054	ret = k3_udma_glue_cfg_rx_chn(rx_chn);
1055	if (ret) {
1056		dev_err(dev, "Failed to cfg rchan %d\n", ret);
1057		goto err;
1058	}
1059
1060	/* init default RX flow only if flow_num = 1 */
1061	if (cfg->def_flow_cfg) {
1062		ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
1063		if (ret)
1064			goto err;
1065	}
1066
1067	k3_udma_glue_dump_rx_chn(rx_chn);
1068
1069	return rx_chn;
1070
1071err:
1072	k3_udma_glue_release_rx_chn(rx_chn);
1073	return ERR_PTR(ret);
1074}
1075
1076static int
1077k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
1078					  struct k3_udma_glue_rx_channel_cfg *cfg,
1079					  struct device *dev)
1080{
1081	int ret, i;
1082
1083	rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1084						rx_chn->common.psdata_size,
1085						rx_chn->common.swdata_size);
1086
1087	rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1088				     sizeof(*rx_chn->flows), GFP_KERNEL);
1089	if (!rx_chn->flows)
1090		return -ENOMEM;
1091
1092	rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1093	rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1094	dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
1095		     rx_chn->common.src_thread, rx_chn->flow_id_base);
1096	ret = device_register(&rx_chn->common.chan_dev);
1097	if (ret) {
1098		dev_err(dev, "Channel Device registration failed %d\n", ret);
1099		put_device(&rx_chn->common.chan_dev);
1100		rx_chn->common.chan_dev.parent = NULL;
1101		return ret;
1102	}
1103
1104	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1105		/* prepare the channel device as coherent */
1106		rx_chn->common.chan_dev.dma_coherent = true;
1107		dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1108					     DMA_BIT_MASK(48));
1109	}
1110
1111	ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1112	if (ret)
1113		return ret;
1114
1115	for (i = 0; i < rx_chn->flow_num; i++)
1116		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1117
1118	k3_udma_glue_dump_rx_chn(rx_chn);
1119
1120	return 0;
1121}
1122
1123static struct k3_udma_glue_rx_channel *
1124k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
1125				   struct k3_udma_glue_rx_channel_cfg *cfg)
1126{
1127	struct k3_udma_glue_rx_channel *rx_chn;
1128	int ret;
1129
1130	if (cfg->flow_id_num <= 0 ||
1131	    cfg->flow_id_use_rxchan_id ||
1132	    cfg->def_flow_cfg ||
1133	    cfg->flow_id_base < 0)
1134		return ERR_PTR(-EINVAL);
1135
1136	/*
1137	 * Remote RX channel is under control of Remote CPU core, so
1138	 * Linux can only request and manipulate by dedicated RX flows
1139	 */
1140
1141	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1142	if (!rx_chn)
1143		return ERR_PTR(-ENOMEM);
1144
1145	rx_chn->common.dev = dev;
1146	rx_chn->common.swdata_size = cfg->swdata_size;
1147	rx_chn->remote = true;
1148	rx_chn->udma_rchan_id = -1;
1149	rx_chn->flow_num = cfg->flow_id_num;
1150	rx_chn->flow_id_base = cfg->flow_id_base;
1151	rx_chn->psil_paired = false;
1152
1153	/* parse of udmap channel */
1154	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1155					&rx_chn->common, false);
1156	if (ret)
1157		goto err;
1158
1159	ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
1160	if (ret)
1161		goto err;
1162
1163	return rx_chn;
1164
1165err:
1166	k3_udma_glue_release_rx_chn(rx_chn);
1167	return ERR_PTR(ret);
1168}
1169
1170struct k3_udma_glue_rx_channel *
1171k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
1172						 struct k3_udma_glue_rx_channel_cfg *cfg,
1173						 struct device_node *udmax_np, u32 thread_id)
1174{
1175	struct k3_udma_glue_rx_channel *rx_chn;
1176	int ret;
1177
1178	if (cfg->flow_id_num <= 0 ||
1179	    cfg->flow_id_use_rxchan_id ||
1180	    cfg->def_flow_cfg ||
1181	    cfg->flow_id_base < 0)
1182		return ERR_PTR(-EINVAL);
1183
1184	/*
1185	 * Remote RX channel is under control of Remote CPU core, so
1186	 * Linux can only request and manipulate by dedicated RX flows
1187	 */
1188
1189	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1190	if (!rx_chn)
1191		return ERR_PTR(-ENOMEM);
1192
1193	rx_chn->common.dev = dev;
1194	rx_chn->common.swdata_size = cfg->swdata_size;
1195	rx_chn->remote = true;
1196	rx_chn->udma_rchan_id = -1;
1197	rx_chn->flow_num = cfg->flow_id_num;
1198	rx_chn->flow_id_base = cfg->flow_id_base;
1199	rx_chn->psil_paired = false;
1200
1201	ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
1202	if (ret)
1203		goto err;
1204
1205	ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
1206	if (ret)
1207		goto err;
1208
1209	return rx_chn;
1210
1211err:
1212	k3_udma_glue_release_rx_chn(rx_chn);
1213	return ERR_PTR(ret);
1214}
1215EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
1216
1217struct k3_udma_glue_rx_channel *
1218k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
1219			    struct k3_udma_glue_rx_channel_cfg *cfg)
1220{
1221	if (cfg->remote)
1222		return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
1223	else
1224		return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
1225}
1226EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
1227
1228void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1229{
1230	int i;
1231
1232	if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1233		return;
1234
1235	if (rx_chn->psil_paired) {
1236		xudma_navss_psil_unpair(rx_chn->common.udmax,
1237					rx_chn->common.src_thread,
1238					rx_chn->common.dst_thread);
1239		rx_chn->psil_paired = false;
1240	}
1241
1242	for (i = 0; i < rx_chn->flow_num; i++)
1243		k3_udma_glue_release_rx_flow(rx_chn, i);
1244
1245	if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1246		xudma_free_gp_rflow_range(rx_chn->common.udmax,
1247					  rx_chn->flow_id_base,
1248					  rx_chn->flow_num);
1249
1250	if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1251		xudma_rchan_put(rx_chn->common.udmax,
1252				rx_chn->udma_rchanx);
1253
1254	if (rx_chn->common.chan_dev.parent) {
1255		device_unregister(&rx_chn->common.chan_dev);
1256		rx_chn->common.chan_dev.parent = NULL;
1257	}
1258}
1259EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
1260
1261int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
1262			      u32 flow_idx,
1263			      struct k3_udma_glue_rx_flow_cfg *flow_cfg)
1264{
1265	if (flow_idx >= rx_chn->flow_num)
1266		return -EINVAL;
1267
1268	return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
1269}
1270EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
1271
1272u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
1273				    u32 flow_idx)
1274{
1275	struct k3_udma_glue_rx_flow *flow;
1276
1277	if (flow_idx >= rx_chn->flow_num)
1278		return -EINVAL;
1279
1280	flow = &rx_chn->flows[flow_idx];
1281
1282	return k3_ringacc_get_ring_id(flow->ringrxfdq);
1283}
1284EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
1285
1286u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
1287{
1288	return rx_chn->flow_id_base;
1289}
1290EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
1291
1292int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
1293				u32 flow_idx)
1294{
1295	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1296	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1297	struct device *dev = rx_chn->common.dev;
1298	struct ti_sci_msg_rm_udmap_flow_cfg req;
1299	int rx_ring_id;
1300	int rx_ringfdq_id;
1301	int ret = 0;
1302
1303	if (!rx_chn->remote)
1304		return -EINVAL;
1305
1306	rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1307	rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1308
1309	memset(&req, 0, sizeof(req));
1310
1311	req.valid_params =
1312			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1313			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1314			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1315			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1316			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1317	req.nav_id = tisci_rm->tisci_dev_id;
1318	req.flow_index = flow->udma_rflow_id;
1319	req.rx_dest_qnum = rx_ring_id;
1320	req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1321	req.rx_fdq1_qnum = rx_ringfdq_id;
1322	req.rx_fdq2_qnum = rx_ringfdq_id;
1323	req.rx_fdq3_qnum = rx_ringfdq_id;
1324
1325	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1326	if (ret) {
1327		dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1328			ret);
1329	}
1330
1331	return ret;
1332}
1333EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1334
1335int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1336				 u32 flow_idx)
1337{
1338	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1339	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1340	struct device *dev = rx_chn->common.dev;
1341	struct ti_sci_msg_rm_udmap_flow_cfg req;
1342	int ret = 0;
1343
1344	if (!rx_chn->remote)
1345		return -EINVAL;
1346
1347	memset(&req, 0, sizeof(req));
1348	req.valid_params =
1349			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1350			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1351			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1352			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1353			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1354	req.nav_id = tisci_rm->tisci_dev_id;
1355	req.flow_index = flow->udma_rflow_id;
1356	req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1357	req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1358	req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1359	req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1360	req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1361
1362	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1363	if (ret) {
1364		dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1365			ret);
1366	}
1367
1368	return ret;
1369}
1370EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1371
1372int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1373{
1374	int ret;
1375
1376	if (rx_chn->remote)
1377		return -EINVAL;
1378
1379	if (rx_chn->flows_ready < rx_chn->flow_num)
1380		return -EINVAL;
1381
1382	ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1383				    rx_chn->common.src_thread,
1384				    rx_chn->common.dst_thread);
1385	if (ret) {
1386		dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1387		return ret;
1388	}
1389
1390	rx_chn->psil_paired = true;
1391
1392	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1393			    UDMA_CHAN_RT_CTL_EN);
1394
1395	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1396			    UDMA_PEER_RT_EN_ENABLE);
1397
1398	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1399	return 0;
1400}
1401EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1402
1403void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1404{
1405	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1406
1407	xudma_rchanrt_write(rx_chn->udma_rchanx,
1408			    UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1409	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1410
1411	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1412
1413	if (rx_chn->psil_paired) {
1414		xudma_navss_psil_unpair(rx_chn->common.udmax,
1415					rx_chn->common.src_thread,
1416					rx_chn->common.dst_thread);
1417		rx_chn->psil_paired = false;
1418	}
1419}
1420EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1421
1422void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1423			       bool sync)
1424{
1425	int i = 0;
1426	u32 val;
1427
1428	if (rx_chn->remote)
1429		return;
1430
1431	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1432
1433	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1434			    UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1435
1436	val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1437
1438	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1439		val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1440					 UDMA_CHAN_RT_CTL_REG);
1441		udelay(1);
1442		if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1443			dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1444			break;
1445		}
1446		i++;
1447	}
1448
1449	val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1450				 UDMA_CHAN_RT_PEER_RT_EN_REG);
1451	if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1452		dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1453	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1454}
1455EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1456
1457void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1458		u32 flow_num, void *data,
1459		void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1460{
1461	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1462	struct device *dev = rx_chn->common.dev;
1463	dma_addr_t desc_dma;
1464	int occ_rx, i, ret;
1465
1466	/* reset RXCQ as it is not input for udma - expected to be empty */
1467	occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1468	dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1469
1470	/* Skip RX FDQ in case one FDQ is used for the set of flows */
1471	if (skip_fdq)
1472		goto do_reset;
1473
1474	/*
1475	 * RX FDQ reset need to be special way as it is input for udma and its
1476	 * state cached by udma, so:
1477	 * 1) save RX FDQ occ
1478	 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1479	 * 3) reset RX FDQ in a special way
1480	 */
1481	occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1482	dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1483
1484	for (i = 0; i < occ_rx; i++) {
1485		ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1486		if (ret) {
1487			if (ret != -ENODATA)
1488				dev_err(dev, "RX reset pop %d\n", ret);
1489			break;
1490		}
1491		cleanup(data, desc_dma);
1492	}
1493
1494	k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1495
1496do_reset:
1497	k3_ringacc_ring_reset(flow->ringrx);
1498}
1499EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1500
1501int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1502			     u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1503			     dma_addr_t desc_dma)
1504{
1505	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1506
1507	return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1508}
1509EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1510
1511int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1512			    u32 flow_num, dma_addr_t *desc_dma)
1513{
1514	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1515
1516	return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1517}
1518EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1519
1520int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1521			    u32 flow_num)
1522{
1523	struct k3_udma_glue_rx_flow *flow;
1524
1525	flow = &rx_chn->flows[flow_num];
1526
1527	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1528		flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1529							flow->udma_rflow_id);
1530	} else {
1531		flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1532	}
1533
1534	return flow->virq;
1535}
1536EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1537
1538struct device *
1539	k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
1540{
1541	if (xudma_is_pktdma(rx_chn->common.udmax) &&
1542	    (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1543		return &rx_chn->common.chan_dev;
1544
1545	return xudma_get_device(rx_chn->common.udmax);
1546}
1547EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
1548
1549void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
1550				       dma_addr_t *addr)
1551{
1552	if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1553	    !rx_chn->common.atype_asel)
1554		return;
1555
1556	*addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1557}
1558EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
1559
1560void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
1561				       dma_addr_t *addr)
1562{
1563	if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1564	    !rx_chn->common.atype_asel)
1565		return;
1566
1567	*addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1568}
1569EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
1570
1571static int __init k3_udma_glue_class_init(void)
1572{
1573	return class_register(&k3_udma_glue_devclass);
1574}
1575
1576module_init(k3_udma_glue_class_init);
1577MODULE_LICENSE("GPL v2");
1578