1// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4// Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
5
6#include <linux/dmapool.h>
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/dma-mapping.h>
10#include <linux/pm_runtime.h>
11#include <linux/pm_domain.h>
12
13#include "fsl-edma-common.h"
14
15#define EDMA_CR			0x00
16#define EDMA_ES			0x04
17#define EDMA_ERQ		0x0C
18#define EDMA_EEI		0x14
19#define EDMA_SERQ		0x1B
20#define EDMA_CERQ		0x1A
21#define EDMA_SEEI		0x19
22#define EDMA_CEEI		0x18
23#define EDMA_CINT		0x1F
24#define EDMA_CERR		0x1E
25#define EDMA_SSRT		0x1D
26#define EDMA_CDNE		0x1C
27#define EDMA_INTR		0x24
28#define EDMA_ERR		0x2C
29
30#define EDMA64_ERQH		0x08
31#define EDMA64_EEIH		0x10
32#define EDMA64_SERQ		0x18
33#define EDMA64_CERQ		0x19
34#define EDMA64_SEEI		0x1a
35#define EDMA64_CEEI		0x1b
36#define EDMA64_CINT		0x1c
37#define EDMA64_CERR		0x1d
38#define EDMA64_SSRT		0x1e
39#define EDMA64_CDNE		0x1f
40#define EDMA64_INTH		0x20
41#define EDMA64_INTL		0x24
42#define EDMA64_ERRH		0x28
43#define EDMA64_ERRL		0x2c
44
45void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
46{
47	spin_lock(&fsl_chan->vchan.lock);
48
49	if (!fsl_chan->edesc) {
50		/* terminate_all called before */
51		spin_unlock(&fsl_chan->vchan.lock);
52		return;
53	}
54
55	if (!fsl_chan->edesc->iscyclic) {
56		list_del(&fsl_chan->edesc->vdesc.node);
57		vchan_cookie_complete(&fsl_chan->edesc->vdesc);
58		fsl_chan->edesc = NULL;
59		fsl_chan->status = DMA_COMPLETE;
60		fsl_chan->idle = true;
61	} else {
62		vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
63	}
64
65	if (!fsl_chan->edesc)
66		fsl_edma_xfer_desc(fsl_chan);
67
68	spin_unlock(&fsl_chan->vchan.lock);
69}
70
71static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
72{
73	u32 val, flags;
74
75	flags = fsl_edma_drvflags(fsl_chan);
76	val = edma_readl_chreg(fsl_chan, ch_sbr);
77	/* Remote/local swapped wrongly on iMX8 QM Audio edma */
78	if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
79		if (!fsl_chan->is_rxchan)
80			val |= EDMA_V3_CH_SBR_RD;
81		else
82			val |= EDMA_V3_CH_SBR_WR;
83	} else {
84		if (fsl_chan->is_rxchan)
85			val |= EDMA_V3_CH_SBR_RD;
86		else
87			val |= EDMA_V3_CH_SBR_WR;
88	}
89
90	if (fsl_chan->is_remote)
91		val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
92
93	edma_writel_chreg(fsl_chan, val, ch_sbr);
94
95	if (flags & FSL_EDMA_DRV_HAS_CHMUX) {
96		/*
97		 * ch_mux: With the exception of 0, attempts to write a value
98		 * already in use will be forced to 0.
99		 */
100		if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
101			edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
102	}
103
104	val = edma_readl_chreg(fsl_chan, ch_csr);
105	val |= EDMA_V3_CH_CSR_ERQ;
106	edma_writel_chreg(fsl_chan, val, ch_csr);
107}
108
109static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
110{
111	struct edma_regs *regs = &fsl_chan->edma->regs;
112	u32 ch = fsl_chan->vchan.chan.chan_id;
113
114	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
115		return fsl_edma3_enable_request(fsl_chan);
116
117	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
118		edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
119		edma_writeb(fsl_chan->edma, ch, regs->serq);
120	} else {
121		/* ColdFire is big endian, and accesses natively
122		 * big endian I/O peripherals
123		 */
124		iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
125		iowrite8(ch, regs->serq);
126	}
127}
128
129static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
130{
131	u32 val = edma_readl_chreg(fsl_chan, ch_csr);
132	u32 flags;
133
134	flags = fsl_edma_drvflags(fsl_chan);
135
136	if (flags & FSL_EDMA_DRV_HAS_CHMUX)
137		edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
138
139	val &= ~EDMA_V3_CH_CSR_ERQ;
140	edma_writel_chreg(fsl_chan, val, ch_csr);
141}
142
143void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
144{
145	struct edma_regs *regs = &fsl_chan->edma->regs;
146	u32 ch = fsl_chan->vchan.chan.chan_id;
147
148	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
149		return fsl_edma3_disable_request(fsl_chan);
150
151	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
152		edma_writeb(fsl_chan->edma, ch, regs->cerq);
153		edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
154	} else {
155		/* ColdFire is big endian, and accesses natively
156		 * big endian I/O peripherals
157		 */
158		iowrite8(ch, regs->cerq);
159		iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
160	}
161}
162
163static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
164			   u32 off, u32 slot, bool enable)
165{
166	u8 val8;
167
168	if (enable)
169		val8 = EDMAMUX_CHCFG_ENBL | slot;
170	else
171		val8 = EDMAMUX_CHCFG_DIS;
172
173	iowrite8(val8, addr + off);
174}
175
176static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
177			    u32 off, u32 slot, bool enable)
178{
179	u32 val;
180
181	if (enable)
182		val = EDMAMUX_CHCFG_ENBL << 24 | slot;
183	else
184		val = EDMAMUX_CHCFG_DIS;
185
186	iowrite32(val, addr + off * 4);
187}
188
189void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
190		       unsigned int slot, bool enable)
191{
192	u32 ch = fsl_chan->vchan.chan.chan_id;
193	void __iomem *muxaddr;
194	unsigned int chans_per_mux, ch_off;
195	int endian_diff[4] = {3, 1, -1, -3};
196	u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
197
198	if (!dmamux_nr)
199		return;
200
201	chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
202	ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
203
204	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
205		ch_off += endian_diff[ch_off % 4];
206
207	muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
208	slot = EDMAMUX_CHCFG_SOURCE(slot);
209
210	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
211		mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
212	else
213		mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
214}
215
216static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
217{
218	u32 val;
219
220	if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
221		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
222
223	val = ffs(addr_width) - 1;
224	return val | (val << 8);
225}
226
227void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
228{
229	struct fsl_edma_desc *fsl_desc;
230	int i;
231
232	fsl_desc = to_fsl_edma_desc(vdesc);
233	for (i = 0; i < fsl_desc->n_tcds; i++)
234		dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
235			      fsl_desc->tcd[i].ptcd);
236	kfree(fsl_desc);
237}
238
239int fsl_edma_terminate_all(struct dma_chan *chan)
240{
241	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
242	unsigned long flags;
243	LIST_HEAD(head);
244
245	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
246	fsl_edma_disable_request(fsl_chan);
247	fsl_chan->edesc = NULL;
248	fsl_chan->idle = true;
249	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
250	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
251	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
252
253	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
254		pm_runtime_allow(fsl_chan->pd_dev);
255
256	return 0;
257}
258
259int fsl_edma_pause(struct dma_chan *chan)
260{
261	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
262	unsigned long flags;
263
264	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
265	if (fsl_chan->edesc) {
266		fsl_edma_disable_request(fsl_chan);
267		fsl_chan->status = DMA_PAUSED;
268		fsl_chan->idle = true;
269	}
270	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
271	return 0;
272}
273
274int fsl_edma_resume(struct dma_chan *chan)
275{
276	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
277	unsigned long flags;
278
279	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
280	if (fsl_chan->edesc) {
281		fsl_edma_enable_request(fsl_chan);
282		fsl_chan->status = DMA_IN_PROGRESS;
283		fsl_chan->idle = false;
284	}
285	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
286	return 0;
287}
288
289static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
290{
291	if (fsl_chan->dma_dir != DMA_NONE)
292		dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
293				   fsl_chan->dma_dev_addr,
294				   fsl_chan->dma_dev_size,
295				   fsl_chan->dma_dir, 0);
296	fsl_chan->dma_dir = DMA_NONE;
297}
298
299static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
300				    enum dma_transfer_direction dir)
301{
302	struct device *dev = fsl_chan->vchan.chan.device->dev;
303	enum dma_data_direction dma_dir;
304	phys_addr_t addr = 0;
305	u32 size = 0;
306
307	switch (dir) {
308	case DMA_MEM_TO_DEV:
309		dma_dir = DMA_FROM_DEVICE;
310		addr = fsl_chan->cfg.dst_addr;
311		size = fsl_chan->cfg.dst_maxburst;
312		break;
313	case DMA_DEV_TO_MEM:
314		dma_dir = DMA_TO_DEVICE;
315		addr = fsl_chan->cfg.src_addr;
316		size = fsl_chan->cfg.src_maxburst;
317		break;
318	default:
319		dma_dir = DMA_NONE;
320		break;
321	}
322
323	/* Already mapped for this config? */
324	if (fsl_chan->dma_dir == dma_dir)
325		return true;
326
327	fsl_edma_unprep_slave_dma(fsl_chan);
328
329	fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
330	if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
331		return false;
332	fsl_chan->dma_dev_size = size;
333	fsl_chan->dma_dir = dma_dir;
334
335	return true;
336}
337
338int fsl_edma_slave_config(struct dma_chan *chan,
339				 struct dma_slave_config *cfg)
340{
341	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
342
343	memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
344	fsl_edma_unprep_slave_dma(fsl_chan);
345
346	return 0;
347}
348
349static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
350		struct virt_dma_desc *vdesc, bool in_progress)
351{
352	struct fsl_edma_desc *edesc = fsl_chan->edesc;
353	enum dma_transfer_direction dir = edesc->dirn;
354	dma_addr_t cur_addr, dma_addr, old_addr;
355	size_t len, size;
356	u32 nbytes = 0;
357	int i;
358
359	/* calculate the total size in this desc */
360	for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
361		nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
362		if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
363			nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
364		len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
365	}
366
367	if (!in_progress)
368		return len;
369
370	/* 64bit read is not atomic, need read retry when high 32bit changed */
371	do {
372		if (dir == DMA_MEM_TO_DEV) {
373			old_addr = edma_read_tcdreg(fsl_chan, saddr);
374			cur_addr = edma_read_tcdreg(fsl_chan, saddr);
375		} else {
376			old_addr = edma_read_tcdreg(fsl_chan, daddr);
377			cur_addr = edma_read_tcdreg(fsl_chan, daddr);
378		}
379	} while (upper_32_bits(cur_addr) != upper_32_bits(old_addr));
380
381	/* figure out the finished and calculate the residue */
382	for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
383		nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
384		if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
385			nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
386
387		size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
388
389		if (dir == DMA_MEM_TO_DEV)
390			dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr);
391		else
392			dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr);
393
394		len -= size;
395		if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
396			len += dma_addr + size - cur_addr;
397			break;
398		}
399	}
400
401	return len;
402}
403
404enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
405		dma_cookie_t cookie, struct dma_tx_state *txstate)
406{
407	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
408	struct virt_dma_desc *vdesc;
409	enum dma_status status;
410	unsigned long flags;
411
412	status = dma_cookie_status(chan, cookie, txstate);
413	if (status == DMA_COMPLETE)
414		return status;
415
416	if (!txstate)
417		return fsl_chan->status;
418
419	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
420	vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
421	if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
422		txstate->residue =
423			fsl_edma_desc_residue(fsl_chan, vdesc, true);
424	else if (vdesc)
425		txstate->residue =
426			fsl_edma_desc_residue(fsl_chan, vdesc, false);
427	else
428		txstate->residue = 0;
429
430	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
431
432	return fsl_chan->status;
433}
434
435static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd)
436{
437	u16 csr = 0;
438
439	/*
440	 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
441	 * endian format. However, we need to load the TCD registers in
442	 * big- or little-endian obeying the eDMA engine model endian,
443	 * and this is performed from specific edma_write functions
444	 */
445	edma_write_tcdreg(fsl_chan, 0, csr);
446
447	edma_cp_tcd_to_reg(fsl_chan, tcd, saddr);
448	edma_cp_tcd_to_reg(fsl_chan, tcd, daddr);
449
450	edma_cp_tcd_to_reg(fsl_chan, tcd, attr);
451	edma_cp_tcd_to_reg(fsl_chan, tcd, soff);
452
453	edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes);
454	edma_cp_tcd_to_reg(fsl_chan, tcd, slast);
455
456	edma_cp_tcd_to_reg(fsl_chan, tcd, citer);
457	edma_cp_tcd_to_reg(fsl_chan, tcd, biter);
458	edma_cp_tcd_to_reg(fsl_chan, tcd, doff);
459
460	edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga);
461
462	csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr);
463
464	if (fsl_chan->is_sw) {
465		csr |= EDMA_TCD_CSR_START;
466		fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
467	}
468
469	/*
470	 * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
471	 * eDMAv4 have not such requirement.
472	 * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
473	 */
474	if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
475		(csr & EDMA_TCD_CSR_E_SG)) ||
476	    ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
477		(csr & EDMA_TCD_CSR_E_LINK)))
478		edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
479
480
481	edma_cp_tcd_to_reg(fsl_chan, tcd, csr);
482}
483
484static inline
485void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
486		       struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
487		       u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
488		       u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int,
489		       bool disable_req, bool enable_sg)
490{
491	struct dma_slave_config *cfg = &fsl_chan->cfg;
492	u16 csr = 0;
493	u32 burst;
494
495	/*
496	 * eDMA hardware SGs require the TCDs to be stored in little
497	 * endian format irrespective of the register endian model.
498	 * So we put the value in little endian in memory, waiting
499	 * for fsl_edma_set_tcd_regs doing the swap.
500	 */
501	fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr);
502	fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr);
503
504	fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr);
505
506	fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
507
508	if (fsl_chan->is_multi_fifo) {
509		/* set mloff to support multiple fifo */
510		burst = cfg->direction == DMA_DEV_TO_MEM ?
511				cfg->src_maxburst : cfg->dst_maxburst;
512		nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
513		/* enable DMLOE/SMLOE */
514		if (cfg->direction == DMA_MEM_TO_DEV) {
515			nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
516			nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
517		} else {
518			nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
519			nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
520		}
521	}
522
523	fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes);
524	fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast);
525
526	fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer);
527	fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff);
528
529	fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga);
530
531	fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter);
532
533	if (major_int)
534		csr |= EDMA_TCD_CSR_INT_MAJOR;
535
536	if (disable_req)
537		csr |= EDMA_TCD_CSR_D_REQ;
538
539	if (enable_sg)
540		csr |= EDMA_TCD_CSR_E_SG;
541
542	if (fsl_chan->is_rxchan)
543		csr |= EDMA_TCD_CSR_ACTIVE;
544
545	if (fsl_chan->is_sw)
546		csr |= EDMA_TCD_CSR_START;
547
548	fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
549}
550
551static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
552		int sg_len)
553{
554	struct fsl_edma_desc *fsl_desc;
555	int i;
556
557	fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
558	if (!fsl_desc)
559		return NULL;
560
561	fsl_desc->echan = fsl_chan;
562	fsl_desc->n_tcds = sg_len;
563	for (i = 0; i < sg_len; i++) {
564		fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
565					GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
566		if (!fsl_desc->tcd[i].vtcd)
567			goto err;
568	}
569	return fsl_desc;
570
571err:
572	while (--i >= 0)
573		dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
574				fsl_desc->tcd[i].ptcd);
575	kfree(fsl_desc);
576	return NULL;
577}
578
579struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
580		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
581		size_t period_len, enum dma_transfer_direction direction,
582		unsigned long flags)
583{
584	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
585	struct fsl_edma_desc *fsl_desc;
586	dma_addr_t dma_buf_next;
587	bool major_int = true;
588	int sg_len, i;
589	dma_addr_t src_addr, dst_addr, last_sg;
590	u16 soff, doff, iter;
591	u32 nbytes;
592
593	if (!is_slave_direction(direction))
594		return NULL;
595
596	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
597		return NULL;
598
599	sg_len = buf_len / period_len;
600	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
601	if (!fsl_desc)
602		return NULL;
603	fsl_desc->iscyclic = true;
604	fsl_desc->dirn = direction;
605
606	dma_buf_next = dma_addr;
607	if (direction == DMA_MEM_TO_DEV) {
608		fsl_chan->attr =
609			fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
610		nbytes = fsl_chan->cfg.dst_addr_width *
611			fsl_chan->cfg.dst_maxburst;
612	} else {
613		fsl_chan->attr =
614			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
615		nbytes = fsl_chan->cfg.src_addr_width *
616			fsl_chan->cfg.src_maxburst;
617	}
618
619	iter = period_len / nbytes;
620
621	for (i = 0; i < sg_len; i++) {
622		if (dma_buf_next >= dma_addr + buf_len)
623			dma_buf_next = dma_addr;
624
625		/* get next sg's physical address */
626		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
627
628		if (direction == DMA_MEM_TO_DEV) {
629			src_addr = dma_buf_next;
630			dst_addr = fsl_chan->dma_dev_addr;
631			soff = fsl_chan->cfg.dst_addr_width;
632			doff = fsl_chan->is_multi_fifo ? 4 : 0;
633		} else if (direction == DMA_DEV_TO_MEM) {
634			src_addr = fsl_chan->dma_dev_addr;
635			dst_addr = dma_buf_next;
636			soff = fsl_chan->is_multi_fifo ? 4 : 0;
637			doff = fsl_chan->cfg.src_addr_width;
638		} else {
639			/* DMA_DEV_TO_DEV */
640			src_addr = fsl_chan->cfg.src_addr;
641			dst_addr = fsl_chan->cfg.dst_addr;
642			soff = doff = 0;
643			major_int = false;
644		}
645
646		fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
647				  fsl_chan->attr, soff, nbytes, 0, iter,
648				  iter, doff, last_sg, major_int, false, true);
649		dma_buf_next += period_len;
650	}
651
652	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
653}
654
655struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
656		struct dma_chan *chan, struct scatterlist *sgl,
657		unsigned int sg_len, enum dma_transfer_direction direction,
658		unsigned long flags, void *context)
659{
660	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
661	struct fsl_edma_desc *fsl_desc;
662	struct scatterlist *sg;
663	dma_addr_t src_addr, dst_addr, last_sg;
664	u16 soff, doff, iter;
665	u32 nbytes;
666	int i;
667
668	if (!is_slave_direction(direction))
669		return NULL;
670
671	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
672		return NULL;
673
674	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
675	if (!fsl_desc)
676		return NULL;
677	fsl_desc->iscyclic = false;
678	fsl_desc->dirn = direction;
679
680	if (direction == DMA_MEM_TO_DEV) {
681		fsl_chan->attr =
682			fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
683		nbytes = fsl_chan->cfg.dst_addr_width *
684			fsl_chan->cfg.dst_maxburst;
685	} else {
686		fsl_chan->attr =
687			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
688		nbytes = fsl_chan->cfg.src_addr_width *
689			fsl_chan->cfg.src_maxburst;
690	}
691
692	for_each_sg(sgl, sg, sg_len, i) {
693		if (direction == DMA_MEM_TO_DEV) {
694			src_addr = sg_dma_address(sg);
695			dst_addr = fsl_chan->dma_dev_addr;
696			soff = fsl_chan->cfg.dst_addr_width;
697			doff = 0;
698		} else if (direction == DMA_DEV_TO_MEM) {
699			src_addr = fsl_chan->dma_dev_addr;
700			dst_addr = sg_dma_address(sg);
701			soff = 0;
702			doff = fsl_chan->cfg.src_addr_width;
703		} else {
704			/* DMA_DEV_TO_DEV */
705			src_addr = fsl_chan->cfg.src_addr;
706			dst_addr = fsl_chan->cfg.dst_addr;
707			soff = 0;
708			doff = 0;
709		}
710
711		/*
712		 * Choose the suitable burst length if sg_dma_len is not
713		 * multiple of burst length so that the whole transfer length is
714		 * multiple of minor loop(burst length).
715		 */
716		if (sg_dma_len(sg) % nbytes) {
717			u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
718			u32 burst = (direction == DMA_DEV_TO_MEM) ?
719						fsl_chan->cfg.src_maxburst :
720						fsl_chan->cfg.dst_maxburst;
721			int j;
722
723			for (j = burst; j > 1; j--) {
724				if (!(sg_dma_len(sg) % (j * width))) {
725					nbytes = j * width;
726					break;
727				}
728			}
729			/* Set burst size as 1 if there's no suitable one */
730			if (j == 1)
731				nbytes = width;
732		}
733		iter = sg_dma_len(sg) / nbytes;
734		if (i < sg_len - 1) {
735			last_sg = fsl_desc->tcd[(i + 1)].ptcd;
736			fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
737					  dst_addr, fsl_chan->attr, soff,
738					  nbytes, 0, iter, iter, doff, last_sg,
739					  false, false, true);
740		} else {
741			last_sg = 0;
742			fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
743					  dst_addr, fsl_chan->attr, soff,
744					  nbytes, 0, iter, iter, doff, last_sg,
745					  true, true, false);
746		}
747	}
748
749	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
750}
751
752struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
753						     dma_addr_t dma_dst, dma_addr_t dma_src,
754						     size_t len, unsigned long flags)
755{
756	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
757	struct fsl_edma_desc *fsl_desc;
758
759	fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
760	if (!fsl_desc)
761		return NULL;
762	fsl_desc->iscyclic = false;
763
764	fsl_chan->is_sw = true;
765
766	/* To match with copy_align and max_seg_size so 1 tcd is enough */
767	fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
768			fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
769			32, len, 0, 1, 1, 32, 0, true, true, false);
770
771	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
772}
773
774void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
775{
776	struct virt_dma_desc *vdesc;
777
778	lockdep_assert_held(&fsl_chan->vchan.lock);
779
780	vdesc = vchan_next_desc(&fsl_chan->vchan);
781	if (!vdesc)
782		return;
783	fsl_chan->edesc = to_fsl_edma_desc(vdesc);
784	fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
785	fsl_edma_enable_request(fsl_chan);
786	fsl_chan->status = DMA_IN_PROGRESS;
787	fsl_chan->idle = false;
788}
789
790void fsl_edma_issue_pending(struct dma_chan *chan)
791{
792	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
793	unsigned long flags;
794
795	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
796
797	if (unlikely(fsl_chan->pm_state != RUNNING)) {
798		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
799		/* cannot submit due to suspend */
800		return;
801	}
802
803	if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
804		fsl_edma_xfer_desc(fsl_chan);
805
806	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
807}
808
809int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
810{
811	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
812
813	fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
814				fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
815				sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
816				32, 0);
817	return 0;
818}
819
820void fsl_edma_free_chan_resources(struct dma_chan *chan)
821{
822	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
823	struct fsl_edma_engine *edma = fsl_chan->edma;
824	unsigned long flags;
825	LIST_HEAD(head);
826
827	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
828	fsl_edma_disable_request(fsl_chan);
829	if (edma->drvdata->dmamuxs)
830		fsl_edma_chan_mux(fsl_chan, 0, false);
831	fsl_chan->edesc = NULL;
832	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
833	fsl_edma_unprep_slave_dma(fsl_chan);
834	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
835
836	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
837	dma_pool_destroy(fsl_chan->tcd_pool);
838	fsl_chan->tcd_pool = NULL;
839	fsl_chan->is_sw = false;
840	fsl_chan->srcid = 0;
841}
842
843void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
844{
845	struct fsl_edma_chan *chan, *_chan;
846
847	list_for_each_entry_safe(chan, _chan,
848				&dmadev->channels, vchan.chan.device_node) {
849		list_del(&chan->vchan.chan.device_node);
850		tasklet_kill(&chan->vchan.task);
851	}
852}
853
854/*
855 * On the 32 channels Vybrid/mpc577x edma version, register offsets are
856 * different compared to ColdFire mcf5441x 64 channels edma.
857 *
858 * This function sets up register offsets as per proper declared version
859 * so must be called in xxx_edma_probe() just after setting the
860 * edma "version" and "membase" appropriately.
861 */
862void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
863{
864	bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
865
866	edma->regs.cr = edma->membase + EDMA_CR;
867	edma->regs.es = edma->membase + EDMA_ES;
868	edma->regs.erql = edma->membase + EDMA_ERQ;
869	edma->regs.eeil = edma->membase + EDMA_EEI;
870
871	edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
872	edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
873	edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
874	edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
875	edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
876	edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
877	edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
878	edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
879	edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
880	edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
881
882	if (is64) {
883		edma->regs.erqh = edma->membase + EDMA64_ERQH;
884		edma->regs.eeih = edma->membase + EDMA64_EEIH;
885		edma->regs.errh = edma->membase + EDMA64_ERRH;
886		edma->regs.inth = edma->membase + EDMA64_INTH;
887	}
888}
889
890MODULE_LICENSE("GPL v2");
891