1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Driver for the Cirrus Logic EP93xx DMA Controller
4 *
5 * Copyright (C) 2011 Mika Westerberg
6 *
7 * DMA M2P implementation is based on the original
8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
9 *
10 *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
11 *   Copyright (C) 2006 Applied Data Systems
12 *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
13 *
14 * This driver is based on dw_dmac and amba-pl08x drivers.
15 */
16
17#include <linux/clk.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/dmaengine.h>
21#include <linux/module.h>
22#include <linux/mod_devicetable.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25
26#include <linux/platform_data/dma-ep93xx.h>
27
28#include "dmaengine.h"
29
30/* M2P registers */
31#define M2P_CONTROL			0x0000
32#define M2P_CONTROL_STALLINT		BIT(0)
33#define M2P_CONTROL_NFBINT		BIT(1)
34#define M2P_CONTROL_CH_ERROR_INT	BIT(3)
35#define M2P_CONTROL_ENABLE		BIT(4)
36#define M2P_CONTROL_ICE			BIT(6)
37
38#define M2P_INTERRUPT			0x0004
39#define M2P_INTERRUPT_STALL		BIT(0)
40#define M2P_INTERRUPT_NFB		BIT(1)
41#define M2P_INTERRUPT_ERROR		BIT(3)
42
43#define M2P_PPALLOC			0x0008
44#define M2P_STATUS			0x000c
45
46#define M2P_MAXCNT0			0x0020
47#define M2P_BASE0			0x0024
48#define M2P_MAXCNT1			0x0030
49#define M2P_BASE1			0x0034
50
51#define M2P_STATE_IDLE			0
52#define M2P_STATE_STALL			1
53#define M2P_STATE_ON			2
54#define M2P_STATE_NEXT			3
55
56/* M2M registers */
57#define M2M_CONTROL			0x0000
58#define M2M_CONTROL_DONEINT		BIT(2)
59#define M2M_CONTROL_ENABLE		BIT(3)
60#define M2M_CONTROL_START		BIT(4)
61#define M2M_CONTROL_DAH			BIT(11)
62#define M2M_CONTROL_SAH			BIT(12)
63#define M2M_CONTROL_PW_SHIFT		9
64#define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
65#define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
66#define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
67#define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_TM_SHIFT		13
69#define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
70#define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
71#define M2M_CONTROL_NFBINT		BIT(21)
72#define M2M_CONTROL_RSS_SHIFT		22
73#define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
74#define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
75#define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
76#define M2M_CONTROL_NO_HDSK		BIT(24)
77#define M2M_CONTROL_PWSC_SHIFT		25
78
79#define M2M_INTERRUPT			0x0004
80#define M2M_INTERRUPT_MASK		6
81
82#define M2M_STATUS			0x000c
83#define M2M_STATUS_CTL_SHIFT		1
84#define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
85#define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
86#define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
87#define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_BUF_SHIFT		4
91#define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
92#define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
93#define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
94#define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_DONE			BIT(6)
96
97#define M2M_BCR0			0x0010
98#define M2M_BCR1			0x0014
99#define M2M_SAR_BASE0			0x0018
100#define M2M_SAR_BASE1			0x001c
101#define M2M_DAR_BASE0			0x002c
102#define M2M_DAR_BASE1			0x0030
103
104#define DMA_MAX_CHAN_BYTES		0xffff
105#define DMA_MAX_CHAN_DESCRIPTORS	32
106
107struct ep93xx_dma_engine;
108static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
109					 enum dma_transfer_direction dir,
110					 struct dma_slave_config *config);
111
112/**
113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
114 * @src_addr: source address of the transaction
115 * @dst_addr: destination address of the transaction
116 * @size: size of the transaction (in bytes)
117 * @complete: this descriptor is completed
118 * @txd: dmaengine API descriptor
119 * @tx_list: list of linked descriptors
120 * @node: link used for putting this into a channel queue
121 */
122struct ep93xx_dma_desc {
123	u32				src_addr;
124	u32				dst_addr;
125	size_t				size;
126	bool				complete;
127	struct dma_async_tx_descriptor	txd;
128	struct list_head		tx_list;
129	struct list_head		node;
130};
131
132/**
133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
134 * @chan: dmaengine API channel
135 * @edma: pointer to the engine device
136 * @regs: memory mapped registers
137 * @irq: interrupt number of the channel
138 * @clk: clock used by this channel
139 * @tasklet: channel specific tasklet used for callbacks
140 * @lock: lock protecting the fields following
141 * @flags: flags for the channel
142 * @buffer: which buffer to use next (0/1)
143 * @active: flattened chain of descriptors currently being processed
144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
147 *                is set via .device_config before slave operation is
148 *                prepared
149 * @runtime_ctrl: M2M runtime values for the control register.
150 * @slave_config: slave configuration
151 *
152 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
153 * will have slightly different scheme here: @active points to a head of
154 * flattened DMA descriptor chain.
155 *
156 * @queue holds pending transactions. These are linked through the first
157 * descriptor in the chain. When a descriptor is moved to the @active queue,
158 * the first and chained descriptors are flattened into a single list.
159 *
160 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
161 * necessary channel configuration information. For memcpy channels this must
162 * be %NULL.
163 */
164struct ep93xx_dma_chan {
165	struct dma_chan			chan;
166	const struct ep93xx_dma_engine	*edma;
167	void __iomem			*regs;
168	int				irq;
169	struct clk			*clk;
170	struct tasklet_struct		tasklet;
171	/* protects the fields following */
172	spinlock_t			lock;
173	unsigned long			flags;
174/* Channel is configured for cyclic transfers */
175#define EP93XX_DMA_IS_CYCLIC		0
176
177	int				buffer;
178	struct list_head		active;
179	struct list_head		queue;
180	struct list_head		free_list;
181	u32				runtime_addr;
182	u32				runtime_ctrl;
183	struct dma_slave_config		slave_config;
184};
185
186/**
187 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
188 * @dma_dev: holds the dmaengine device
189 * @m2m: is this an M2M or M2P device
190 * @hw_setup: method which sets the channel up for operation
191 * @hw_synchronize: synchronizes DMA channel termination to current context
192 * @hw_shutdown: shuts the channel down and flushes whatever is left
193 * @hw_submit: pushes active descriptor(s) to the hardware
194 * @hw_interrupt: handle the interrupt
195 * @num_channels: number of channels for this instance
196 * @channels: array of channels
197 *
198 * There is one instance of this struct for the M2P channels and one for the
199 * M2M channels. hw_xxx() methods are used to perform operations which are
200 * different on M2M and M2P channels. These methods are called with channel
201 * lock held and interrupts disabled so they cannot sleep.
202 */
203struct ep93xx_dma_engine {
204	struct dma_device	dma_dev;
205	bool			m2m;
206	int			(*hw_setup)(struct ep93xx_dma_chan *);
207	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
208	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
209	void			(*hw_submit)(struct ep93xx_dma_chan *);
210	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
211#define INTERRUPT_UNKNOWN	0
212#define INTERRUPT_DONE		1
213#define INTERRUPT_NEXT_BUFFER	2
214
215	size_t			num_channels;
216	struct ep93xx_dma_chan	channels[] __counted_by(num_channels);
217};
218
219static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
220{
221	return &edmac->chan.dev->device;
222}
223
224static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
225{
226	return container_of(chan, struct ep93xx_dma_chan, chan);
227}
228
229/**
230 * ep93xx_dma_set_active - set new active descriptor chain
231 * @edmac: channel
232 * @desc: head of the new active descriptor chain
233 *
234 * Sets @desc to be the head of the new active descriptor chain. This is the
235 * chain which is processed next. The active list must be empty before calling
236 * this function.
237 *
238 * Called with @edmac->lock held and interrupts disabled.
239 */
240static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
241				  struct ep93xx_dma_desc *desc)
242{
243	BUG_ON(!list_empty(&edmac->active));
244
245	list_add_tail(&desc->node, &edmac->active);
246
247	/* Flatten the @desc->tx_list chain into @edmac->active list */
248	while (!list_empty(&desc->tx_list)) {
249		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
250			struct ep93xx_dma_desc, node);
251
252		/*
253		 * We copy the callback parameters from the first descriptor
254		 * to all the chained descriptors. This way we can call the
255		 * callback without having to find out the first descriptor in
256		 * the chain. Useful for cyclic transfers.
257		 */
258		d->txd.callback = desc->txd.callback;
259		d->txd.callback_param = desc->txd.callback_param;
260
261		list_move_tail(&d->node, &edmac->active);
262	}
263}
264
265/* Called with @edmac->lock held and interrupts disabled */
266static struct ep93xx_dma_desc *
267ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
268{
269	return list_first_entry_or_null(&edmac->active,
270					struct ep93xx_dma_desc, node);
271}
272
273/**
274 * ep93xx_dma_advance_active - advances to the next active descriptor
275 * @edmac: channel
276 *
277 * Function advances active descriptor to the next in the @edmac->active and
278 * returns %true if we still have descriptors in the chain to process.
279 * Otherwise returns %false.
280 *
281 * When the channel is in cyclic mode always returns %true.
282 *
283 * Called with @edmac->lock held and interrupts disabled.
284 */
285static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
286{
287	struct ep93xx_dma_desc *desc;
288
289	list_rotate_left(&edmac->active);
290
291	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
292		return true;
293
294	desc = ep93xx_dma_get_active(edmac);
295	if (!desc)
296		return false;
297
298	/*
299	 * If txd.cookie is set it means that we are back in the first
300	 * descriptor in the chain and hence done with it.
301	 */
302	return !desc->txd.cookie;
303}
304
305/*
306 * M2P DMA implementation
307 */
308
309static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
310{
311	writel(control, edmac->regs + M2P_CONTROL);
312	/*
313	 * EP93xx User's Guide states that we must perform a dummy read after
314	 * write to the control register.
315	 */
316	readl(edmac->regs + M2P_CONTROL);
317}
318
319static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
320{
321	struct ep93xx_dma_data *data = edmac->chan.private;
322	u32 control;
323
324	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
325
326	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
327		| M2P_CONTROL_ENABLE;
328	m2p_set_control(edmac, control);
329
330	edmac->buffer = 0;
331
332	return 0;
333}
334
335static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
336{
337	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
338}
339
340static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
341{
342	unsigned long flags;
343	u32 control;
344
345	spin_lock_irqsave(&edmac->lock, flags);
346	control = readl(edmac->regs + M2P_CONTROL);
347	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
348	m2p_set_control(edmac, control);
349	spin_unlock_irqrestore(&edmac->lock, flags);
350
351	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
352		schedule();
353}
354
355static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
356{
357	m2p_set_control(edmac, 0);
358
359	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
360		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
361}
362
363static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
364{
365	struct ep93xx_dma_desc *desc;
366	u32 bus_addr;
367
368	desc = ep93xx_dma_get_active(edmac);
369	if (!desc) {
370		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
371		return;
372	}
373
374	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
375		bus_addr = desc->src_addr;
376	else
377		bus_addr = desc->dst_addr;
378
379	if (edmac->buffer == 0) {
380		writel(desc->size, edmac->regs + M2P_MAXCNT0);
381		writel(bus_addr, edmac->regs + M2P_BASE0);
382	} else {
383		writel(desc->size, edmac->regs + M2P_MAXCNT1);
384		writel(bus_addr, edmac->regs + M2P_BASE1);
385	}
386
387	edmac->buffer ^= 1;
388}
389
390static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
391{
392	u32 control = readl(edmac->regs + M2P_CONTROL);
393
394	m2p_fill_desc(edmac);
395	control |= M2P_CONTROL_STALLINT;
396
397	if (ep93xx_dma_advance_active(edmac)) {
398		m2p_fill_desc(edmac);
399		control |= M2P_CONTROL_NFBINT;
400	}
401
402	m2p_set_control(edmac, control);
403}
404
405static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
406{
407	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
408	u32 control;
409
410	if (irq_status & M2P_INTERRUPT_ERROR) {
411		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
412
413		/* Clear the error interrupt */
414		writel(1, edmac->regs + M2P_INTERRUPT);
415
416		/*
417		 * It seems that there is no easy way of reporting errors back
418		 * to client so we just report the error here and continue as
419		 * usual.
420		 *
421		 * Revisit this when there is a mechanism to report back the
422		 * errors.
423		 */
424		dev_err(chan2dev(edmac),
425			"DMA transfer failed! Details:\n"
426			"\tcookie	: %d\n"
427			"\tsrc_addr	: 0x%08x\n"
428			"\tdst_addr	: 0x%08x\n"
429			"\tsize		: %zu\n",
430			desc->txd.cookie, desc->src_addr, desc->dst_addr,
431			desc->size);
432	}
433
434	/*
435	 * Even latest E2 silicon revision sometimes assert STALL interrupt
436	 * instead of NFB. Therefore we treat them equally, basing on the
437	 * amount of data we still have to transfer.
438	 */
439	if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
440		return INTERRUPT_UNKNOWN;
441
442	if (ep93xx_dma_advance_active(edmac)) {
443		m2p_fill_desc(edmac);
444		return INTERRUPT_NEXT_BUFFER;
445	}
446
447	/* Disable interrupts */
448	control = readl(edmac->regs + M2P_CONTROL);
449	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
450	m2p_set_control(edmac, control);
451
452	return INTERRUPT_DONE;
453}
454
455/*
456 * M2M DMA implementation
457 */
458
459static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
460{
461	const struct ep93xx_dma_data *data = edmac->chan.private;
462	u32 control = 0;
463
464	if (!data) {
465		/* This is memcpy channel, nothing to configure */
466		writel(control, edmac->regs + M2M_CONTROL);
467		return 0;
468	}
469
470	switch (data->port) {
471	case EP93XX_DMA_SSP:
472		/*
473		 * This was found via experimenting - anything less than 5
474		 * causes the channel to perform only a partial transfer which
475		 * leads to problems since we don't get DONE interrupt then.
476		 */
477		control = (5 << M2M_CONTROL_PWSC_SHIFT);
478		control |= M2M_CONTROL_NO_HDSK;
479
480		if (data->direction == DMA_MEM_TO_DEV) {
481			control |= M2M_CONTROL_DAH;
482			control |= M2M_CONTROL_TM_TX;
483			control |= M2M_CONTROL_RSS_SSPTX;
484		} else {
485			control |= M2M_CONTROL_SAH;
486			control |= M2M_CONTROL_TM_RX;
487			control |= M2M_CONTROL_RSS_SSPRX;
488		}
489		break;
490
491	case EP93XX_DMA_IDE:
492		/*
493		 * This IDE part is totally untested. Values below are taken
494		 * from the EP93xx Users's Guide and might not be correct.
495		 */
496		if (data->direction == DMA_MEM_TO_DEV) {
497			/* Worst case from the UG */
498			control = (3 << M2M_CONTROL_PWSC_SHIFT);
499			control |= M2M_CONTROL_DAH;
500			control |= M2M_CONTROL_TM_TX;
501		} else {
502			control = (2 << M2M_CONTROL_PWSC_SHIFT);
503			control |= M2M_CONTROL_SAH;
504			control |= M2M_CONTROL_TM_RX;
505		}
506
507		control |= M2M_CONTROL_NO_HDSK;
508		control |= M2M_CONTROL_RSS_IDE;
509		control |= M2M_CONTROL_PW_16;
510		break;
511
512	default:
513		return -EINVAL;
514	}
515
516	writel(control, edmac->regs + M2M_CONTROL);
517	return 0;
518}
519
520static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
521{
522	/* Just disable the channel */
523	writel(0, edmac->regs + M2M_CONTROL);
524}
525
526static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
527{
528	struct ep93xx_dma_desc *desc;
529
530	desc = ep93xx_dma_get_active(edmac);
531	if (!desc) {
532		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
533		return;
534	}
535
536	if (edmac->buffer == 0) {
537		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
538		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
539		writel(desc->size, edmac->regs + M2M_BCR0);
540	} else {
541		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
542		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
543		writel(desc->size, edmac->regs + M2M_BCR1);
544	}
545
546	edmac->buffer ^= 1;
547}
548
549static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
550{
551	struct ep93xx_dma_data *data = edmac->chan.private;
552	u32 control = readl(edmac->regs + M2M_CONTROL);
553
554	/*
555	 * Since we allow clients to configure PW (peripheral width) we always
556	 * clear PW bits here and then set them according what is given in
557	 * the runtime configuration.
558	 */
559	control &= ~M2M_CONTROL_PW_MASK;
560	control |= edmac->runtime_ctrl;
561
562	m2m_fill_desc(edmac);
563	control |= M2M_CONTROL_DONEINT;
564
565	if (ep93xx_dma_advance_active(edmac)) {
566		m2m_fill_desc(edmac);
567		control |= M2M_CONTROL_NFBINT;
568	}
569
570	/*
571	 * Now we can finally enable the channel. For M2M channel this must be
572	 * done _after_ the BCRx registers are programmed.
573	 */
574	control |= M2M_CONTROL_ENABLE;
575	writel(control, edmac->regs + M2M_CONTROL);
576
577	if (!data) {
578		/*
579		 * For memcpy channels the software trigger must be asserted
580		 * in order to start the memcpy operation.
581		 */
582		control |= M2M_CONTROL_START;
583		writel(control, edmac->regs + M2M_CONTROL);
584	}
585}
586
587/*
588 * According to EP93xx User's Guide, we should receive DONE interrupt when all
589 * M2M DMA controller transactions complete normally. This is not always the
590 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
591 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
592 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
593 * In effect, disabling the channel when only DONE bit is set could stop
594 * currently running DMA transfer. To avoid this, we use Buffer FSM and
595 * Control FSM to check current state of DMA channel.
596 */
597static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
598{
599	u32 status = readl(edmac->regs + M2M_STATUS);
600	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
601	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
602	bool done = status & M2M_STATUS_DONE;
603	bool last_done;
604	u32 control;
605	struct ep93xx_dma_desc *desc;
606
607	/* Accept only DONE and NFB interrupts */
608	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
609		return INTERRUPT_UNKNOWN;
610
611	if (done) {
612		/* Clear the DONE bit */
613		writel(0, edmac->regs + M2M_INTERRUPT);
614	}
615
616	/*
617	 * Check whether we are done with descriptors or not. This, together
618	 * with DMA channel state, determines action to take in interrupt.
619	 */
620	desc = ep93xx_dma_get_active(edmac);
621	last_done = !desc || desc->txd.cookie;
622
623	/*
624	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
625	 * DMA channel. Using DONE and NFB bits from channel status register
626	 * or bits from channel interrupt register is not reliable.
627	 */
628	if (!last_done &&
629	    (buf_fsm == M2M_STATUS_BUF_NO ||
630	     buf_fsm == M2M_STATUS_BUF_ON)) {
631		/*
632		 * Two buffers are ready for update when Buffer FSM is in
633		 * DMA_NO_BUF state. Only one buffer can be prepared without
634		 * disabling the channel or polling the DONE bit.
635		 * To simplify things, always prepare only one buffer.
636		 */
637		if (ep93xx_dma_advance_active(edmac)) {
638			m2m_fill_desc(edmac);
639			if (done && !edmac->chan.private) {
640				/* Software trigger for memcpy channel */
641				control = readl(edmac->regs + M2M_CONTROL);
642				control |= M2M_CONTROL_START;
643				writel(control, edmac->regs + M2M_CONTROL);
644			}
645			return INTERRUPT_NEXT_BUFFER;
646		} else {
647			last_done = true;
648		}
649	}
650
651	/*
652	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
653	 * and Control FSM is in DMA_STALL state.
654	 */
655	if (last_done &&
656	    buf_fsm == M2M_STATUS_BUF_NO &&
657	    ctl_fsm == M2M_STATUS_CTL_STALL) {
658		/* Disable interrupts and the channel */
659		control = readl(edmac->regs + M2M_CONTROL);
660		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
661			    | M2M_CONTROL_ENABLE);
662		writel(control, edmac->regs + M2M_CONTROL);
663		return INTERRUPT_DONE;
664	}
665
666	/*
667	 * Nothing to do this time.
668	 */
669	return INTERRUPT_NEXT_BUFFER;
670}
671
672/*
673 * DMA engine API implementation
674 */
675
676static struct ep93xx_dma_desc *
677ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
678{
679	struct ep93xx_dma_desc *desc, *_desc;
680	struct ep93xx_dma_desc *ret = NULL;
681	unsigned long flags;
682
683	spin_lock_irqsave(&edmac->lock, flags);
684	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
685		if (async_tx_test_ack(&desc->txd)) {
686			list_del_init(&desc->node);
687
688			/* Re-initialize the descriptor */
689			desc->src_addr = 0;
690			desc->dst_addr = 0;
691			desc->size = 0;
692			desc->complete = false;
693			desc->txd.cookie = 0;
694			desc->txd.callback = NULL;
695			desc->txd.callback_param = NULL;
696
697			ret = desc;
698			break;
699		}
700	}
701	spin_unlock_irqrestore(&edmac->lock, flags);
702	return ret;
703}
704
705static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
706				struct ep93xx_dma_desc *desc)
707{
708	if (desc) {
709		unsigned long flags;
710
711		spin_lock_irqsave(&edmac->lock, flags);
712		list_splice_init(&desc->tx_list, &edmac->free_list);
713		list_add(&desc->node, &edmac->free_list);
714		spin_unlock_irqrestore(&edmac->lock, flags);
715	}
716}
717
718/**
719 * ep93xx_dma_advance_work - start processing the next pending transaction
720 * @edmac: channel
721 *
722 * If we have pending transactions queued and we are currently idling, this
723 * function takes the next queued transaction from the @edmac->queue and
724 * pushes it to the hardware for execution.
725 */
726static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
727{
728	struct ep93xx_dma_desc *new;
729	unsigned long flags;
730
731	spin_lock_irqsave(&edmac->lock, flags);
732	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
733		spin_unlock_irqrestore(&edmac->lock, flags);
734		return;
735	}
736
737	/* Take the next descriptor from the pending queue */
738	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
739	list_del_init(&new->node);
740
741	ep93xx_dma_set_active(edmac, new);
742
743	/* Push it to the hardware */
744	edmac->edma->hw_submit(edmac);
745	spin_unlock_irqrestore(&edmac->lock, flags);
746}
747
748static void ep93xx_dma_tasklet(struct tasklet_struct *t)
749{
750	struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
751	struct ep93xx_dma_desc *desc, *d;
752	struct dmaengine_desc_callback cb;
753	LIST_HEAD(list);
754
755	memset(&cb, 0, sizeof(cb));
756	spin_lock_irq(&edmac->lock);
757	/*
758	 * If dma_terminate_all() was called before we get to run, the active
759	 * list has become empty. If that happens we aren't supposed to do
760	 * anything more than call ep93xx_dma_advance_work().
761	 */
762	desc = ep93xx_dma_get_active(edmac);
763	if (desc) {
764		if (desc->complete) {
765			/* mark descriptor complete for non cyclic case only */
766			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
767				dma_cookie_complete(&desc->txd);
768			list_splice_init(&edmac->active, &list);
769		}
770		dmaengine_desc_get_callback(&desc->txd, &cb);
771	}
772	spin_unlock_irq(&edmac->lock);
773
774	/* Pick up the next descriptor from the queue */
775	ep93xx_dma_advance_work(edmac);
776
777	/* Now we can release all the chained descriptors */
778	list_for_each_entry_safe(desc, d, &list, node) {
779		dma_descriptor_unmap(&desc->txd);
780		ep93xx_dma_desc_put(edmac, desc);
781	}
782
783	dmaengine_desc_callback_invoke(&cb, NULL);
784}
785
786static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
787{
788	struct ep93xx_dma_chan *edmac = dev_id;
789	struct ep93xx_dma_desc *desc;
790	irqreturn_t ret = IRQ_HANDLED;
791
792	spin_lock(&edmac->lock);
793
794	desc = ep93xx_dma_get_active(edmac);
795	if (!desc) {
796		dev_warn(chan2dev(edmac),
797			 "got interrupt while active list is empty\n");
798		spin_unlock(&edmac->lock);
799		return IRQ_NONE;
800	}
801
802	switch (edmac->edma->hw_interrupt(edmac)) {
803	case INTERRUPT_DONE:
804		desc->complete = true;
805		tasklet_schedule(&edmac->tasklet);
806		break;
807
808	case INTERRUPT_NEXT_BUFFER:
809		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
810			tasklet_schedule(&edmac->tasklet);
811		break;
812
813	default:
814		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
815		ret = IRQ_NONE;
816		break;
817	}
818
819	spin_unlock(&edmac->lock);
820	return ret;
821}
822
823/**
824 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
825 * @tx: descriptor to be executed
826 *
827 * Function will execute given descriptor on the hardware or if the hardware
828 * is busy, queue the descriptor to be executed later on. Returns cookie which
829 * can be used to poll the status of the descriptor.
830 */
831static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
832{
833	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
834	struct ep93xx_dma_desc *desc;
835	dma_cookie_t cookie;
836	unsigned long flags;
837
838	spin_lock_irqsave(&edmac->lock, flags);
839	cookie = dma_cookie_assign(tx);
840
841	desc = container_of(tx, struct ep93xx_dma_desc, txd);
842
843	/*
844	 * If nothing is currently prosessed, we push this descriptor
845	 * directly to the hardware. Otherwise we put the descriptor
846	 * to the pending queue.
847	 */
848	if (list_empty(&edmac->active)) {
849		ep93xx_dma_set_active(edmac, desc);
850		edmac->edma->hw_submit(edmac);
851	} else {
852		list_add_tail(&desc->node, &edmac->queue);
853	}
854
855	spin_unlock_irqrestore(&edmac->lock, flags);
856	return cookie;
857}
858
859/**
860 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
861 * @chan: channel to allocate resources
862 *
863 * Function allocates necessary resources for the given DMA channel and
864 * returns number of allocated descriptors for the channel. Negative errno
865 * is returned in case of failure.
866 */
867static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
868{
869	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
870	struct ep93xx_dma_data *data = chan->private;
871	const char *name = dma_chan_name(chan);
872	int ret, i;
873
874	/* Sanity check the channel parameters */
875	if (!edmac->edma->m2m) {
876		if (!data)
877			return -EINVAL;
878		if (data->port < EP93XX_DMA_I2S1 ||
879		    data->port > EP93XX_DMA_IRDA)
880			return -EINVAL;
881		if (data->direction != ep93xx_dma_chan_direction(chan))
882			return -EINVAL;
883	} else {
884		if (data) {
885			switch (data->port) {
886			case EP93XX_DMA_SSP:
887			case EP93XX_DMA_IDE:
888				if (!is_slave_direction(data->direction))
889					return -EINVAL;
890				break;
891			default:
892				return -EINVAL;
893			}
894		}
895	}
896
897	if (data && data->name)
898		name = data->name;
899
900	ret = clk_prepare_enable(edmac->clk);
901	if (ret)
902		return ret;
903
904	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
905	if (ret)
906		goto fail_clk_disable;
907
908	spin_lock_irq(&edmac->lock);
909	dma_cookie_init(&edmac->chan);
910	ret = edmac->edma->hw_setup(edmac);
911	spin_unlock_irq(&edmac->lock);
912
913	if (ret)
914		goto fail_free_irq;
915
916	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
917		struct ep93xx_dma_desc *desc;
918
919		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
920		if (!desc) {
921			dev_warn(chan2dev(edmac), "not enough descriptors\n");
922			break;
923		}
924
925		INIT_LIST_HEAD(&desc->tx_list);
926
927		dma_async_tx_descriptor_init(&desc->txd, chan);
928		desc->txd.flags = DMA_CTRL_ACK;
929		desc->txd.tx_submit = ep93xx_dma_tx_submit;
930
931		ep93xx_dma_desc_put(edmac, desc);
932	}
933
934	return i;
935
936fail_free_irq:
937	free_irq(edmac->irq, edmac);
938fail_clk_disable:
939	clk_disable_unprepare(edmac->clk);
940
941	return ret;
942}
943
944/**
945 * ep93xx_dma_free_chan_resources - release resources for the channel
946 * @chan: channel
947 *
948 * Function releases all the resources allocated for the given channel.
949 * The channel must be idle when this is called.
950 */
951static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
952{
953	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
954	struct ep93xx_dma_desc *desc, *d;
955	unsigned long flags;
956	LIST_HEAD(list);
957
958	BUG_ON(!list_empty(&edmac->active));
959	BUG_ON(!list_empty(&edmac->queue));
960
961	spin_lock_irqsave(&edmac->lock, flags);
962	edmac->edma->hw_shutdown(edmac);
963	edmac->runtime_addr = 0;
964	edmac->runtime_ctrl = 0;
965	edmac->buffer = 0;
966	list_splice_init(&edmac->free_list, &list);
967	spin_unlock_irqrestore(&edmac->lock, flags);
968
969	list_for_each_entry_safe(desc, d, &list, node)
970		kfree(desc);
971
972	clk_disable_unprepare(edmac->clk);
973	free_irq(edmac->irq, edmac);
974}
975
976/**
977 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
978 * @chan: channel
979 * @dest: destination bus address
980 * @src: source bus address
981 * @len: size of the transaction
982 * @flags: flags for the descriptor
983 *
984 * Returns a valid DMA descriptor or %NULL in case of failure.
985 */
986static struct dma_async_tx_descriptor *
987ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
988			   dma_addr_t src, size_t len, unsigned long flags)
989{
990	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
991	struct ep93xx_dma_desc *desc, *first;
992	size_t bytes, offset;
993
994	first = NULL;
995	for (offset = 0; offset < len; offset += bytes) {
996		desc = ep93xx_dma_desc_get(edmac);
997		if (!desc) {
998			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
999			goto fail;
1000		}
1001
1002		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1003
1004		desc->src_addr = src + offset;
1005		desc->dst_addr = dest + offset;
1006		desc->size = bytes;
1007
1008		if (!first)
1009			first = desc;
1010		else
1011			list_add_tail(&desc->node, &first->tx_list);
1012	}
1013
1014	first->txd.cookie = -EBUSY;
1015	first->txd.flags = flags;
1016
1017	return &first->txd;
1018fail:
1019	ep93xx_dma_desc_put(edmac, first);
1020	return NULL;
1021}
1022
1023/**
1024 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1025 * @chan: channel
1026 * @sgl: list of buffers to transfer
1027 * @sg_len: number of entries in @sgl
1028 * @dir: direction of tha DMA transfer
1029 * @flags: flags for the descriptor
1030 * @context: operation context (ignored)
1031 *
1032 * Returns a valid DMA descriptor or %NULL in case of failure.
1033 */
1034static struct dma_async_tx_descriptor *
1035ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1036			 unsigned int sg_len, enum dma_transfer_direction dir,
1037			 unsigned long flags, void *context)
1038{
1039	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1040	struct ep93xx_dma_desc *desc, *first;
1041	struct scatterlist *sg;
1042	int i;
1043
1044	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1045		dev_warn(chan2dev(edmac),
1046			 "channel was configured with different direction\n");
1047		return NULL;
1048	}
1049
1050	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1051		dev_warn(chan2dev(edmac),
1052			 "channel is already used for cyclic transfers\n");
1053		return NULL;
1054	}
1055
1056	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1057
1058	first = NULL;
1059	for_each_sg(sgl, sg, sg_len, i) {
1060		size_t len = sg_dma_len(sg);
1061
1062		if (len > DMA_MAX_CHAN_BYTES) {
1063			dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1064				 len);
1065			goto fail;
1066		}
1067
1068		desc = ep93xx_dma_desc_get(edmac);
1069		if (!desc) {
1070			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1071			goto fail;
1072		}
1073
1074		if (dir == DMA_MEM_TO_DEV) {
1075			desc->src_addr = sg_dma_address(sg);
1076			desc->dst_addr = edmac->runtime_addr;
1077		} else {
1078			desc->src_addr = edmac->runtime_addr;
1079			desc->dst_addr = sg_dma_address(sg);
1080		}
1081		desc->size = len;
1082
1083		if (!first)
1084			first = desc;
1085		else
1086			list_add_tail(&desc->node, &first->tx_list);
1087	}
1088
1089	first->txd.cookie = -EBUSY;
1090	first->txd.flags = flags;
1091
1092	return &first->txd;
1093
1094fail:
1095	ep93xx_dma_desc_put(edmac, first);
1096	return NULL;
1097}
1098
1099/**
1100 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1101 * @chan: channel
1102 * @dma_addr: DMA mapped address of the buffer
1103 * @buf_len: length of the buffer (in bytes)
1104 * @period_len: length of a single period
1105 * @dir: direction of the operation
1106 * @flags: tx descriptor status flags
1107 *
1108 * Prepares a descriptor for cyclic DMA operation. This means that once the
1109 * descriptor is submitted, we will be submitting in a @period_len sized
1110 * buffers and calling callback once the period has been elapsed. Transfer
1111 * terminates only when client calls dmaengine_terminate_all() for this
1112 * channel.
1113 *
1114 * Returns a valid DMA descriptor or %NULL in case of failure.
1115 */
1116static struct dma_async_tx_descriptor *
1117ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1118			   size_t buf_len, size_t period_len,
1119			   enum dma_transfer_direction dir, unsigned long flags)
1120{
1121	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1122	struct ep93xx_dma_desc *desc, *first;
1123	size_t offset = 0;
1124
1125	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1126		dev_warn(chan2dev(edmac),
1127			 "channel was configured with different direction\n");
1128		return NULL;
1129	}
1130
1131	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1132		dev_warn(chan2dev(edmac),
1133			 "channel is already used for cyclic transfers\n");
1134		return NULL;
1135	}
1136
1137	if (period_len > DMA_MAX_CHAN_BYTES) {
1138		dev_warn(chan2dev(edmac), "too big period length %zu\n",
1139			 period_len);
1140		return NULL;
1141	}
1142
1143	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1144
1145	/* Split the buffer into period size chunks */
1146	first = NULL;
1147	for (offset = 0; offset < buf_len; offset += period_len) {
1148		desc = ep93xx_dma_desc_get(edmac);
1149		if (!desc) {
1150			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1151			goto fail;
1152		}
1153
1154		if (dir == DMA_MEM_TO_DEV) {
1155			desc->src_addr = dma_addr + offset;
1156			desc->dst_addr = edmac->runtime_addr;
1157		} else {
1158			desc->src_addr = edmac->runtime_addr;
1159			desc->dst_addr = dma_addr + offset;
1160		}
1161
1162		desc->size = period_len;
1163
1164		if (!first)
1165			first = desc;
1166		else
1167			list_add_tail(&desc->node, &first->tx_list);
1168	}
1169
1170	first->txd.cookie = -EBUSY;
1171
1172	return &first->txd;
1173
1174fail:
1175	ep93xx_dma_desc_put(edmac, first);
1176	return NULL;
1177}
1178
1179/**
1180 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1181 * current context.
1182 * @chan: channel
1183 *
1184 * Synchronizes the DMA channel termination to the current context. When this
1185 * function returns it is guaranteed that all transfers for previously issued
1186 * descriptors have stopped and it is safe to free the memory associated
1187 * with them. Furthermore it is guaranteed that all complete callback functions
1188 * for a previously submitted descriptor have finished running and it is safe to
1189 * free resources accessed from within the complete callbacks.
1190 */
1191static void ep93xx_dma_synchronize(struct dma_chan *chan)
1192{
1193	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1194
1195	if (edmac->edma->hw_synchronize)
1196		edmac->edma->hw_synchronize(edmac);
1197}
1198
1199/**
1200 * ep93xx_dma_terminate_all - terminate all transactions
1201 * @chan: channel
1202 *
1203 * Stops all DMA transactions. All descriptors are put back to the
1204 * @edmac->free_list and callbacks are _not_ called.
1205 */
1206static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1207{
1208	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1209	struct ep93xx_dma_desc *desc, *_d;
1210	unsigned long flags;
1211	LIST_HEAD(list);
1212
1213	spin_lock_irqsave(&edmac->lock, flags);
1214	/* First we disable and flush the DMA channel */
1215	edmac->edma->hw_shutdown(edmac);
1216	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1217	list_splice_init(&edmac->active, &list);
1218	list_splice_init(&edmac->queue, &list);
1219	/*
1220	 * We then re-enable the channel. This way we can continue submitting
1221	 * the descriptors by just calling ->hw_submit() again.
1222	 */
1223	edmac->edma->hw_setup(edmac);
1224	spin_unlock_irqrestore(&edmac->lock, flags);
1225
1226	list_for_each_entry_safe(desc, _d, &list, node)
1227		ep93xx_dma_desc_put(edmac, desc);
1228
1229	return 0;
1230}
1231
1232static int ep93xx_dma_slave_config(struct dma_chan *chan,
1233				   struct dma_slave_config *config)
1234{
1235	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1236
1237	memcpy(&edmac->slave_config, config, sizeof(*config));
1238
1239	return 0;
1240}
1241
1242static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1243					 enum dma_transfer_direction dir,
1244					 struct dma_slave_config *config)
1245{
1246	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1247	enum dma_slave_buswidth width;
1248	unsigned long flags;
1249	u32 addr, ctrl;
1250
1251	if (!edmac->edma->m2m)
1252		return -EINVAL;
1253
1254	switch (dir) {
1255	case DMA_DEV_TO_MEM:
1256		width = config->src_addr_width;
1257		addr = config->src_addr;
1258		break;
1259
1260	case DMA_MEM_TO_DEV:
1261		width = config->dst_addr_width;
1262		addr = config->dst_addr;
1263		break;
1264
1265	default:
1266		return -EINVAL;
1267	}
1268
1269	switch (width) {
1270	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1271		ctrl = 0;
1272		break;
1273	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1274		ctrl = M2M_CONTROL_PW_16;
1275		break;
1276	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1277		ctrl = M2M_CONTROL_PW_32;
1278		break;
1279	default:
1280		return -EINVAL;
1281	}
1282
1283	spin_lock_irqsave(&edmac->lock, flags);
1284	edmac->runtime_addr = addr;
1285	edmac->runtime_ctrl = ctrl;
1286	spin_unlock_irqrestore(&edmac->lock, flags);
1287
1288	return 0;
1289}
1290
1291/**
1292 * ep93xx_dma_tx_status - check if a transaction is completed
1293 * @chan: channel
1294 * @cookie: transaction specific cookie
1295 * @state: state of the transaction is stored here if given
1296 *
1297 * This function can be used to query state of a given transaction.
1298 */
1299static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1300					    dma_cookie_t cookie,
1301					    struct dma_tx_state *state)
1302{
1303	return dma_cookie_status(chan, cookie, state);
1304}
1305
1306/**
1307 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1308 * @chan: channel
1309 *
1310 * When this function is called, all pending transactions are pushed to the
1311 * hardware and executed.
1312 */
1313static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1314{
1315	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1316}
1317
1318static int __init ep93xx_dma_probe(struct platform_device *pdev)
1319{
1320	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1321	struct ep93xx_dma_engine *edma;
1322	struct dma_device *dma_dev;
1323	int ret, i;
1324
1325	edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL);
1326	if (!edma)
1327		return -ENOMEM;
1328
1329	dma_dev = &edma->dma_dev;
1330	edma->m2m = platform_get_device_id(pdev)->driver_data;
1331	edma->num_channels = pdata->num_channels;
1332
1333	INIT_LIST_HEAD(&dma_dev->channels);
1334	for (i = 0; i < pdata->num_channels; i++) {
1335		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1336		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1337
1338		edmac->chan.device = dma_dev;
1339		edmac->regs = cdata->base;
1340		edmac->irq = cdata->irq;
1341		edmac->edma = edma;
1342
1343		edmac->clk = clk_get(NULL, cdata->name);
1344		if (IS_ERR(edmac->clk)) {
1345			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1346				 cdata->name);
1347			continue;
1348		}
1349
1350		spin_lock_init(&edmac->lock);
1351		INIT_LIST_HEAD(&edmac->active);
1352		INIT_LIST_HEAD(&edmac->queue);
1353		INIT_LIST_HEAD(&edmac->free_list);
1354		tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
1355
1356		list_add_tail(&edmac->chan.device_node,
1357			      &dma_dev->channels);
1358	}
1359
1360	dma_cap_zero(dma_dev->cap_mask);
1361	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1362	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1363
1364	dma_dev->dev = &pdev->dev;
1365	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1366	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1367	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1368	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1369	dma_dev->device_config = ep93xx_dma_slave_config;
1370	dma_dev->device_synchronize = ep93xx_dma_synchronize;
1371	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1372	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1373	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1374
1375	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1376
1377	if (edma->m2m) {
1378		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1379		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1380
1381		edma->hw_setup = m2m_hw_setup;
1382		edma->hw_shutdown = m2m_hw_shutdown;
1383		edma->hw_submit = m2m_hw_submit;
1384		edma->hw_interrupt = m2m_hw_interrupt;
1385	} else {
1386		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1387
1388		edma->hw_synchronize = m2p_hw_synchronize;
1389		edma->hw_setup = m2p_hw_setup;
1390		edma->hw_shutdown = m2p_hw_shutdown;
1391		edma->hw_submit = m2p_hw_submit;
1392		edma->hw_interrupt = m2p_hw_interrupt;
1393	}
1394
1395	ret = dma_async_device_register(dma_dev);
1396	if (unlikely(ret)) {
1397		for (i = 0; i < edma->num_channels; i++) {
1398			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1399			if (!IS_ERR_OR_NULL(edmac->clk))
1400				clk_put(edmac->clk);
1401		}
1402		kfree(edma);
1403	} else {
1404		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1405			 edma->m2m ? "M" : "P");
1406	}
1407
1408	return ret;
1409}
1410
1411static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1412	{ "ep93xx-dma-m2p", 0 },
1413	{ "ep93xx-dma-m2m", 1 },
1414	{ },
1415};
1416
1417static struct platform_driver ep93xx_dma_driver = {
1418	.driver		= {
1419		.name	= "ep93xx-dma",
1420	},
1421	.id_table	= ep93xx_dma_driver_ids,
1422};
1423
1424static int __init ep93xx_dma_module_init(void)
1425{
1426	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1427}
1428subsys_initcall(ep93xx_dma_module_init);
1429
1430MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1431MODULE_DESCRIPTION("EP93xx DMA driver");
1432