1/*
2 *
3 * BRIEF MODULE DESCRIPTION
4 *      The Descriptor Based DMA channel manager that first appeared
5 *	on the Au1550.  I started with dma.c, but I think all that is
6 *	left is this initial comment :-)
7 *
8 * Copyright 2004 Embedded Edge, LLC
9 *	dan@embeddededge.com
10 *
11 *  This program is free software; you can redistribute  it and/or modify it
12 *  under  the terms of  the GNU General  Public License as published by the
13 *  Free Software Foundation;  either version 2 of the  License, or (at your
14 *  option) any later version.
15 *
16 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
17 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
18 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
19 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
20 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
22 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
24 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 *  You should have received a copy of the  GNU General Public License along
28 *  with this program; if not, write  to the Free Software Foundation, Inc.,
29 *  675 Mass Ave, Cambridge, MA 02139, USA.
30 *
31 */
32
33#include <linux/dma-map-ops.h> /* for dma_default_coherent */
34#include <linux/init.h>
35#include <linux/kernel.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/interrupt.h>
39#include <linux/export.h>
40#include <linux/syscore_ops.h>
41#include <asm/mach-au1x00/au1000.h>
42#include <asm/mach-au1x00/au1xxx_dbdma.h>
43
44/*
45 * The Descriptor Based DMA supports up to 16 channels.
46 *
47 * There are 32 devices defined. We keep an internal structure
48 * of devices using these channels, along with additional
49 * information.
50 *
51 * We allocate the descriptors and allow access to them through various
52 * functions.  The drivers allocate the data buffers and assign them
53 * to the descriptors.
54 */
55static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
56
57/* I couldn't find a macro that did this... */
58#define ALIGN_ADDR(x, a)	((((u32)(x)) + (a-1)) & ~(a-1))
59
60static dbdma_global_t *dbdma_gptr =
61			(dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
62static int dbdma_initialized;
63
64static dbdev_tab_t *dbdev_tab;
65
66static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
67	/* UARTS */
68	{ AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
69	{ AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
70	{ AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
71	{ AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8, 0x11400000, 0, 0 },
72
73	/* EXT DMA */
74	{ AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
75	{ AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
76	{ AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
77	{ AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
78
79	/* USB DEV */
80	{ AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN,  4, 8, 0x10200000, 0, 0 },
81	{ AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
82	{ AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
83	{ AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
84	{ AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN,  4, 8, 0x10200010, 0, 0 },
85	{ AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN,  4, 8, 0x10200014, 0, 0 },
86
87	/* PSCs */
88	{ AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
89	{ AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,  0, 0, 0x11a0001c, 0, 0 },
90	{ AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
91	{ AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,  0, 0, 0x11b0001c, 0, 0 },
92	{ AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
93	{ AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,  0, 0, 0x10a0001c, 0, 0 },
94	{ AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
95	{ AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,  0, 0, 0x10b0001c, 0, 0 },
96
97	{ AU1550_DSCR_CMD0_PCI_WRITE,  0, 0, 0, 0x00000000, 0, 0 },  /* PCI */
98	{ AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
99
100	/* MAC 0 */
101	{ AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
102	{ AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
103
104	/* MAC 1 */
105	{ AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
106	{ AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
107
108	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
109	{ DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
110};
111
112static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
113	{ AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
114	{ AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
115	{ AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
116	{ AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8, 0x11200000, 0, 0 },
117
118	{ AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
119	{ AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
120
121	{ AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
122	{ AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
123	{ AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
124	{ AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
125
126	{ AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
127	{ AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8, 0x10600004, 0, 0 },
128	{ AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
129	{ AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  4, 8, 0x10680004, 0, 0 },
130
131	{ AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
132	{ AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
133
134	{ AU1200_DSCR_CMD0_PSC0_TX,   DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
135	{ AU1200_DSCR_CMD0_PSC0_RX,   DEV_FLAGS_IN,  0, 16, 0x11a0001c, 0, 0 },
136	{ AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
137	{ AU1200_DSCR_CMD0_PSC1_TX,   DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
138	{ AU1200_DSCR_CMD0_PSC1_RX,   DEV_FLAGS_IN,  0, 16, 0x11b0001c, 0, 0 },
139	{ AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
140
141	{ AU1200_DSCR_CMD0_CIM_RXA,  DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
142	{ AU1200_DSCR_CMD0_CIM_RXB,  DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
143	{ AU1200_DSCR_CMD0_CIM_RXC,  DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
144	{ AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
145
146	{ AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
147
148	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
149	{ DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
150};
151
152static dbdev_tab_t au1300_dbdev_tab[] __initdata = {
153	{ AU1300_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8,  0x10100004, 0, 0 },
154	{ AU1300_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8,  0x10100000, 0, 0 },
155	{ AU1300_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8,  0x10101004, 0, 0 },
156	{ AU1300_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8,  0x10101000, 0, 0 },
157	{ AU1300_DSCR_CMD0_UART2_TX, DEV_FLAGS_OUT, 0, 8,  0x10102004, 0, 0 },
158	{ AU1300_DSCR_CMD0_UART2_RX, DEV_FLAGS_IN,  0, 8,  0x10102000, 0, 0 },
159	{ AU1300_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8,  0x10103004, 0, 0 },
160	{ AU1300_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8,  0x10103000, 0, 0 },
161
162	{ AU1300_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8,  0x10600000, 0, 0 },
163	{ AU1300_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8,  0x10600004, 0, 0 },
164	{ AU1300_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 8, 8,  0x10601000, 0, 0 },
165	{ AU1300_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  8, 8,  0x10601004, 0, 0 },
166
167	{ AU1300_DSCR_CMD0_AES_RX, DEV_FLAGS_IN ,   4, 32, 0x10300008, 0, 0 },
168	{ AU1300_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT,   4, 32, 0x10300004, 0, 0 },
169
170	{ AU1300_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0001c, 0, 0 },
171	{ AU1300_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,   0, 16, 0x10a0001c, 0, 0 },
172	{ AU1300_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0101c, 0, 0 },
173	{ AU1300_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,   0, 16, 0x10a0101c, 0, 0 },
174	{ AU1300_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0201c, 0, 0 },
175	{ AU1300_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,   0, 16, 0x10a0201c, 0, 0 },
176	{ AU1300_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0301c, 0, 0 },
177	{ AU1300_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,   0, 16, 0x10a0301c, 0, 0 },
178
179	{ AU1300_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE,   0, 0,  0x00000000, 0, 0 },
180	{ AU1300_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
181
182	{ AU1300_DSCR_CMD0_SDMS_TX2, DEV_FLAGS_OUT, 4, 8,  0x10602000, 0, 0 },
183	{ AU1300_DSCR_CMD0_SDMS_RX2, DEV_FLAGS_IN,  4, 8,  0x10602004, 0, 0 },
184
185	{ AU1300_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
186
187	{ AU1300_DSCR_CMD0_UDMA, DEV_FLAGS_ANYUSE,  0, 32, 0x14001810, 0, 0 },
188
189	{ AU1300_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
190	{ AU1300_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
191
192	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
193	{ DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
194};
195
196/* 32 predefined plus 32 custom */
197#define DBDEV_TAB_SIZE		64
198
199static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
200
201static dbdev_tab_t *find_dbdev_id(u32 id)
202{
203	int i;
204	dbdev_tab_t *p;
205	for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
206		p = &dbdev_tab[i];
207		if (p->dev_id == id)
208			return p;
209	}
210	return NULL;
211}
212
213void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
214{
215	return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
216}
217EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
218
219u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
220{
221	u32 ret = 0;
222	dbdev_tab_t *p;
223	static u16 new_id = 0x1000;
224
225	p = find_dbdev_id(~0);
226	if (NULL != p) {
227		memcpy(p, dev, sizeof(dbdev_tab_t));
228		p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
229		ret = p->dev_id;
230		new_id++;
231#if 0
232		printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
233				  p->dev_id, p->dev_flags, p->dev_physaddr);
234#endif
235	}
236
237	return ret;
238}
239EXPORT_SYMBOL(au1xxx_ddma_add_device);
240
241void au1xxx_ddma_del_device(u32 devid)
242{
243	dbdev_tab_t *p = find_dbdev_id(devid);
244
245	if (p != NULL) {
246		memset(p, 0, sizeof(dbdev_tab_t));
247		p->dev_id = ~0;
248	}
249}
250EXPORT_SYMBOL(au1xxx_ddma_del_device);
251
252/* Allocate a channel and return a non-zero descriptor if successful. */
253u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
254       void (*callback)(int, void *), void *callparam)
255{
256	unsigned long	flags;
257	u32		used, chan;
258	u32		dcp;
259	int		i;
260	dbdev_tab_t	*stp, *dtp;
261	chan_tab_t	*ctp;
262	au1x_dma_chan_t *cp;
263
264	/*
265	 * We do the initialization on the first channel allocation.
266	 * We have to wait because of the interrupt handler initialization
267	 * which can't be done successfully during board set up.
268	 */
269	if (!dbdma_initialized)
270		return 0;
271
272	stp = find_dbdev_id(srcid);
273	if (stp == NULL)
274		return 0;
275	dtp = find_dbdev_id(destid);
276	if (dtp == NULL)
277		return 0;
278
279	used = 0;
280
281	/* Check to see if we can get both channels. */
282	spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
283	if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
284	     (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
285		/* Got source */
286		stp->dev_flags |= DEV_FLAGS_INUSE;
287		if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
288		     (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
289			/* Got destination */
290			dtp->dev_flags |= DEV_FLAGS_INUSE;
291		} else {
292			/* Can't get dest.  Release src. */
293			stp->dev_flags &= ~DEV_FLAGS_INUSE;
294			used++;
295		}
296	} else
297		used++;
298	spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
299
300	if (used)
301		return 0;
302
303	/* Let's see if we can allocate a channel for it. */
304	ctp = NULL;
305	chan = 0;
306	spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
307	for (i = 0; i < NUM_DBDMA_CHANS; i++)
308		if (chan_tab_ptr[i] == NULL) {
309			/*
310			 * If kmalloc fails, it is caught below same
311			 * as a channel not available.
312			 */
313			ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
314			chan_tab_ptr[i] = ctp;
315			break;
316		}
317	spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
318
319	if (ctp != NULL) {
320		memset(ctp, 0, sizeof(chan_tab_t));
321		ctp->chan_index = chan = i;
322		dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
323		dcp += (0x0100 * chan);
324		ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
325		cp = (au1x_dma_chan_t *)dcp;
326		ctp->chan_src = stp;
327		ctp->chan_dest = dtp;
328		ctp->chan_callback = callback;
329		ctp->chan_callparam = callparam;
330
331		/* Initialize channel configuration. */
332		i = 0;
333		if (stp->dev_intlevel)
334			i |= DDMA_CFG_SED;
335		if (stp->dev_intpolarity)
336			i |= DDMA_CFG_SP;
337		if (dtp->dev_intlevel)
338			i |= DDMA_CFG_DED;
339		if (dtp->dev_intpolarity)
340			i |= DDMA_CFG_DP;
341		if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
342			(dtp->dev_flags & DEV_FLAGS_SYNC))
343				i |= DDMA_CFG_SYNC;
344		cp->ddma_cfg = i;
345		wmb(); /* drain writebuffer */
346
347		/*
348		 * Return a non-zero value that can be used to find the channel
349		 * information in subsequent operations.
350		 */
351		return (u32)(&chan_tab_ptr[chan]);
352	}
353
354	/* Release devices */
355	stp->dev_flags &= ~DEV_FLAGS_INUSE;
356	dtp->dev_flags &= ~DEV_FLAGS_INUSE;
357
358	return 0;
359}
360EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
361
362/*
363 * Set the device width if source or destination is a FIFO.
364 * Should be 8, 16, or 32 bits.
365 */
366u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
367{
368	u32		rv;
369	chan_tab_t	*ctp;
370	dbdev_tab_t	*stp, *dtp;
371
372	ctp = *((chan_tab_t **)chanid);
373	stp = ctp->chan_src;
374	dtp = ctp->chan_dest;
375	rv = 0;
376
377	if (stp->dev_flags & DEV_FLAGS_IN) {	/* Source in fifo */
378		rv = stp->dev_devwidth;
379		stp->dev_devwidth = bits;
380	}
381	if (dtp->dev_flags & DEV_FLAGS_OUT) {	/* Destination out fifo */
382		rv = dtp->dev_devwidth;
383		dtp->dev_devwidth = bits;
384	}
385
386	return rv;
387}
388EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
389
390/* Allocate a descriptor ring, initializing as much as possible. */
391u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
392{
393	int			i;
394	u32			desc_base, srcid, destid;
395	u32			cmd0, cmd1, src1, dest1;
396	u32			src0, dest0;
397	chan_tab_t		*ctp;
398	dbdev_tab_t		*stp, *dtp;
399	au1x_ddma_desc_t	*dp;
400
401	/*
402	 * I guess we could check this to be within the
403	 * range of the table......
404	 */
405	ctp = *((chan_tab_t **)chanid);
406	stp = ctp->chan_src;
407	dtp = ctp->chan_dest;
408
409	/*
410	 * The descriptors must be 32-byte aligned.  There is a
411	 * possibility the allocation will give us such an address,
412	 * and if we try that first we are likely to not waste larger
413	 * slabs of memory.
414	 */
415	desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
416				       GFP_KERNEL|GFP_DMA);
417	if (desc_base == 0)
418		return 0;
419
420	if (desc_base & 0x1f) {
421		/*
422		 * Lost....do it again, allocate extra, and round
423		 * the address base.
424		 */
425		kfree((const void *)desc_base);
426		i = entries * sizeof(au1x_ddma_desc_t);
427		i += (sizeof(au1x_ddma_desc_t) - 1);
428		desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
429		if (desc_base == 0)
430			return 0;
431
432		ctp->cdb_membase = desc_base;
433		desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
434	} else
435		ctp->cdb_membase = desc_base;
436
437	dp = (au1x_ddma_desc_t *)desc_base;
438
439	/* Keep track of the base descriptor. */
440	ctp->chan_desc_base = dp;
441
442	/* Initialize the rings with as much information as we know. */
443	srcid = stp->dev_id;
444	destid = dtp->dev_id;
445
446	cmd0 = cmd1 = src1 = dest1 = 0;
447	src0 = dest0 = 0;
448
449	cmd0 |= DSCR_CMD0_SID(srcid);
450	cmd0 |= DSCR_CMD0_DID(destid);
451	cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
452	cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
453
454	/* Is it mem to mem transfer? */
455	if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
456	     (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
457	    ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
458	     (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
459		cmd0 |= DSCR_CMD0_MEM;
460
461	switch (stp->dev_devwidth) {
462	case 8:
463		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
464		break;
465	case 16:
466		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
467		break;
468	case 32:
469	default:
470		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
471		break;
472	}
473
474	switch (dtp->dev_devwidth) {
475	case 8:
476		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
477		break;
478	case 16:
479		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
480		break;
481	case 32:
482	default:
483		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
484		break;
485	}
486
487	/*
488	 * If the device is marked as an in/out FIFO, ensure it is
489	 * set non-coherent.
490	 */
491	if (stp->dev_flags & DEV_FLAGS_IN)
492		cmd0 |= DSCR_CMD0_SN;		/* Source in FIFO */
493	if (dtp->dev_flags & DEV_FLAGS_OUT)
494		cmd0 |= DSCR_CMD0_DN;		/* Destination out FIFO */
495
496	/*
497	 * Set up source1.  For now, assume no stride and increment.
498	 * A channel attribute update can change this later.
499	 */
500	switch (stp->dev_tsize) {
501	case 1:
502		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
503		break;
504	case 2:
505		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
506		break;
507	case 4:
508		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
509		break;
510	case 8:
511	default:
512		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
513		break;
514	}
515
516	/* If source input is FIFO, set static address. */
517	if (stp->dev_flags & DEV_FLAGS_IN) {
518		if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
519			src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
520		else
521			src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
522	}
523
524	if (stp->dev_physaddr)
525		src0 = stp->dev_physaddr;
526
527	/*
528	 * Set up dest1.  For now, assume no stride and increment.
529	 * A channel attribute update can change this later.
530	 */
531	switch (dtp->dev_tsize) {
532	case 1:
533		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
534		break;
535	case 2:
536		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
537		break;
538	case 4:
539		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
540		break;
541	case 8:
542	default:
543		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
544		break;
545	}
546
547	/* If destination output is FIFO, set static address. */
548	if (dtp->dev_flags & DEV_FLAGS_OUT) {
549		if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
550			dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
551		else
552			dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
553	}
554
555	if (dtp->dev_physaddr)
556		dest0 = dtp->dev_physaddr;
557
558#if 0
559		printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
560				  "source1:%x dest0:%x dest1:%x\n",
561				  dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
562				  src1, dest0, dest1);
563#endif
564	for (i = 0; i < entries; i++) {
565		dp->dscr_cmd0 = cmd0;
566		dp->dscr_cmd1 = cmd1;
567		dp->dscr_source0 = src0;
568		dp->dscr_source1 = src1;
569		dp->dscr_dest0 = dest0;
570		dp->dscr_dest1 = dest1;
571		dp->dscr_stat = 0;
572		dp->sw_context = 0;
573		dp->sw_status = 0;
574		dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
575		dp++;
576	}
577
578	/* Make last descriptor point to the first. */
579	dp--;
580	dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
581	ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
582
583	return (u32)ctp->chan_desc_base;
584}
585EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
586
587/*
588 * Put a source buffer into the DMA ring.
589 * This updates the source pointer and byte count.  Normally used
590 * for memory to fifo transfers.
591 */
592u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
593{
594	chan_tab_t		*ctp;
595	au1x_ddma_desc_t	*dp;
596
597	/*
598	 * I guess we could check this to be within the
599	 * range of the table......
600	 */
601	ctp = *(chan_tab_t **)chanid;
602
603	/*
604	 * We should have multiple callers for a particular channel,
605	 * an interrupt doesn't affect this pointer nor the descriptor,
606	 * so no locking should be needed.
607	 */
608	dp = ctp->put_ptr;
609
610	/*
611	 * If the descriptor is valid, we are way ahead of the DMA
612	 * engine, so just return an error condition.
613	 */
614	if (dp->dscr_cmd0 & DSCR_CMD0_V)
615		return 0;
616
617	/* Load up buffer address and byte count. */
618	dp->dscr_source0 = buf & ~0UL;
619	dp->dscr_cmd1 = nbytes;
620	/* Check flags */
621	if (flags & DDMA_FLAGS_IE)
622		dp->dscr_cmd0 |= DSCR_CMD0_IE;
623	if (flags & DDMA_FLAGS_NOIE)
624		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
625
626	/*
627	 * There is an erratum on certain Au1200/Au1550 revisions that could
628	 * result in "stale" data being DMA'ed. It has to do with the snoop
629	 * logic on the cache eviction buffer.  dma_default_coherent is set
630	 * to false on these parts.
631	 */
632	if (!dma_default_coherent)
633		dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
634	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
635	wmb(); /* drain writebuffer */
636	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
637	ctp->chan_ptr->ddma_dbell = 0;
638	wmb(); /* force doorbell write out to dma engine */
639
640	/* Get next descriptor pointer. */
641	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
642
643	/* Return something non-zero. */
644	return nbytes;
645}
646EXPORT_SYMBOL(au1xxx_dbdma_put_source);
647
648/* Put a destination buffer into the DMA ring.
649 * This updates the destination pointer and byte count.  Normally used
650 * to place an empty buffer into the ring for fifo to memory transfers.
651 */
652u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
653{
654	chan_tab_t		*ctp;
655	au1x_ddma_desc_t	*dp;
656
657	/* I guess we could check this to be within the
658	 * range of the table......
659	 */
660	ctp = *((chan_tab_t **)chanid);
661
662	/* We should have multiple callers for a particular channel,
663	 * an interrupt doesn't affect this pointer nor the descriptor,
664	 * so no locking should be needed.
665	 */
666	dp = ctp->put_ptr;
667
668	/* If the descriptor is valid, we are way ahead of the DMA
669	 * engine, so just return an error condition.
670	 */
671	if (dp->dscr_cmd0 & DSCR_CMD0_V)
672		return 0;
673
674	/* Load up buffer address and byte count */
675
676	/* Check flags  */
677	if (flags & DDMA_FLAGS_IE)
678		dp->dscr_cmd0 |= DSCR_CMD0_IE;
679	if (flags & DDMA_FLAGS_NOIE)
680		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
681
682	dp->dscr_dest0 = buf & ~0UL;
683	dp->dscr_cmd1 = nbytes;
684#if 0
685	printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
686			  dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
687			  dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
688#endif
689	/*
690	 * There is an erratum on certain Au1200/Au1550 revisions that could
691	 * result in "stale" data being DMA'ed. It has to do with the snoop
692	 * logic on the cache eviction buffer.  dma_default_coherent is set
693	 * to false on these parts.
694	 */
695	if (!dma_default_coherent)
696		dma_cache_inv(KSEG0ADDR(buf), nbytes);
697	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
698	wmb(); /* drain writebuffer */
699	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
700	ctp->chan_ptr->ddma_dbell = 0;
701	wmb(); /* force doorbell write out to dma engine */
702
703	/* Get next descriptor pointer. */
704	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
705
706	/* Return something non-zero. */
707	return nbytes;
708}
709EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
710
711/*
712 * Get a destination buffer into the DMA ring.
713 * Normally used to get a full buffer from the ring during fifo
714 * to memory transfers.  This does not set the valid bit, you will
715 * have to put another destination buffer to keep the DMA going.
716 */
717u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
718{
719	chan_tab_t		*ctp;
720	au1x_ddma_desc_t	*dp;
721	u32			rv;
722
723	/*
724	 * I guess we could check this to be within the
725	 * range of the table......
726	 */
727	ctp = *((chan_tab_t **)chanid);
728
729	/*
730	 * We should have multiple callers for a particular channel,
731	 * an interrupt doesn't affect this pointer nor the descriptor,
732	 * so no locking should be needed.
733	 */
734	dp = ctp->get_ptr;
735
736	/*
737	 * If the descriptor is valid, we are way ahead of the DMA
738	 * engine, so just return an error condition.
739	 */
740	if (dp->dscr_cmd0 & DSCR_CMD0_V)
741		return 0;
742
743	/* Return buffer address and byte count. */
744	*buf = (void *)(phys_to_virt(dp->dscr_dest0));
745	*nbytes = dp->dscr_cmd1;
746	rv = dp->dscr_stat;
747
748	/* Get next descriptor pointer. */
749	ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
750
751	/* Return something non-zero. */
752	return rv;
753}
754EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
755
756void au1xxx_dbdma_stop(u32 chanid)
757{
758	chan_tab_t	*ctp;
759	au1x_dma_chan_t *cp;
760	int halt_timeout = 0;
761
762	ctp = *((chan_tab_t **)chanid);
763
764	cp = ctp->chan_ptr;
765	cp->ddma_cfg &= ~DDMA_CFG_EN;	/* Disable channel */
766	wmb(); /* drain writebuffer */
767	while (!(cp->ddma_stat & DDMA_STAT_H)) {
768		udelay(1);
769		halt_timeout++;
770		if (halt_timeout > 100) {
771			printk(KERN_WARNING "warning: DMA channel won't halt\n");
772			break;
773		}
774	}
775	/* clear current desc valid and doorbell */
776	cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
777	wmb(); /* drain writebuffer */
778}
779EXPORT_SYMBOL(au1xxx_dbdma_stop);
780
781/*
782 * Start using the current descriptor pointer.  If the DBDMA encounters
783 * a non-valid descriptor, it will stop.  In this case, we can just
784 * continue by adding a buffer to the list and starting again.
785 */
786void au1xxx_dbdma_start(u32 chanid)
787{
788	chan_tab_t	*ctp;
789	au1x_dma_chan_t *cp;
790
791	ctp = *((chan_tab_t **)chanid);
792	cp = ctp->chan_ptr;
793	cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
794	cp->ddma_cfg |= DDMA_CFG_EN;	/* Enable channel */
795	wmb(); /* drain writebuffer */
796	cp->ddma_dbell = 0;
797	wmb(); /* drain writebuffer */
798}
799EXPORT_SYMBOL(au1xxx_dbdma_start);
800
801void au1xxx_dbdma_reset(u32 chanid)
802{
803	chan_tab_t		*ctp;
804	au1x_ddma_desc_t	*dp;
805
806	au1xxx_dbdma_stop(chanid);
807
808	ctp = *((chan_tab_t **)chanid);
809	ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
810
811	/* Run through the descriptors and reset the valid indicator. */
812	dp = ctp->chan_desc_base;
813
814	do {
815		dp->dscr_cmd0 &= ~DSCR_CMD0_V;
816		/*
817		 * Reset our software status -- this is used to determine
818		 * if a descriptor is in use by upper level software. Since
819		 * posting can reset 'V' bit.
820		 */
821		dp->sw_status = 0;
822		dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
823	} while (dp != ctp->chan_desc_base);
824}
825EXPORT_SYMBOL(au1xxx_dbdma_reset);
826
827u32 au1xxx_get_dma_residue(u32 chanid)
828{
829	chan_tab_t	*ctp;
830	au1x_dma_chan_t *cp;
831	u32		rv;
832
833	ctp = *((chan_tab_t **)chanid);
834	cp = ctp->chan_ptr;
835
836	/* This is only valid if the channel is stopped. */
837	rv = cp->ddma_bytecnt;
838	wmb(); /* drain writebuffer */
839
840	return rv;
841}
842EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
843
844void au1xxx_dbdma_chan_free(u32 chanid)
845{
846	chan_tab_t	*ctp;
847	dbdev_tab_t	*stp, *dtp;
848
849	ctp = *((chan_tab_t **)chanid);
850	stp = ctp->chan_src;
851	dtp = ctp->chan_dest;
852
853	au1xxx_dbdma_stop(chanid);
854
855	kfree((void *)ctp->cdb_membase);
856
857	stp->dev_flags &= ~DEV_FLAGS_INUSE;
858	dtp->dev_flags &= ~DEV_FLAGS_INUSE;
859	chan_tab_ptr[ctp->chan_index] = NULL;
860
861	kfree(ctp);
862}
863EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
864
865static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
866{
867	u32 intstat;
868	u32 chan_index;
869	chan_tab_t		*ctp;
870	au1x_ddma_desc_t	*dp;
871	au1x_dma_chan_t *cp;
872
873	intstat = dbdma_gptr->ddma_intstat;
874	wmb(); /* drain writebuffer */
875	chan_index = __ffs(intstat);
876
877	ctp = chan_tab_ptr[chan_index];
878	cp = ctp->chan_ptr;
879	dp = ctp->cur_ptr;
880
881	/* Reset interrupt. */
882	cp->ddma_irq = 0;
883	wmb(); /* drain writebuffer */
884
885	if (ctp->chan_callback)
886		ctp->chan_callback(irq, ctp->chan_callparam);
887
888	ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
889	return IRQ_RETVAL(1);
890}
891
892void au1xxx_dbdma_dump(u32 chanid)
893{
894	chan_tab_t	 *ctp;
895	au1x_ddma_desc_t *dp;
896	dbdev_tab_t	 *stp, *dtp;
897	au1x_dma_chan_t	 *cp;
898	u32 i		 = 0;
899
900	ctp = *((chan_tab_t **)chanid);
901	stp = ctp->chan_src;
902	dtp = ctp->chan_dest;
903	cp = ctp->chan_ptr;
904
905	printk(KERN_DEBUG "Chan %x, stp %x (dev %d)  dtp %x (dev %d)\n",
906			  (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
907			  dtp - dbdev_tab);
908	printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
909			  (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
910			  (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
911
912	printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
913	printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
914			  cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
915	printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
916			  cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
917			  cp->ddma_bytecnt);
918
919	/* Run through the descriptors */
920	dp = ctp->chan_desc_base;
921
922	do {
923		printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
924				  i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
925		printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
926				  dp->dscr_source0, dp->dscr_source1,
927				  dp->dscr_dest0, dp->dscr_dest1);
928		printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
929				  dp->dscr_stat, dp->dscr_nxtptr);
930		dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
931	} while (dp != ctp->chan_desc_base);
932}
933
934/* Put a descriptor into the DMA ring.
935 * This updates the source/destination pointers and byte count.
936 */
937u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
938{
939	chan_tab_t *ctp;
940	au1x_ddma_desc_t *dp;
941	u32 nbytes = 0;
942
943	/*
944	 * I guess we could check this to be within the
945	 * range of the table......
946	 */
947	ctp = *((chan_tab_t **)chanid);
948
949	/*
950	 * We should have multiple callers for a particular channel,
951	 * an interrupt doesn't affect this pointer nor the descriptor,
952	 * so no locking should be needed.
953	 */
954	dp = ctp->put_ptr;
955
956	/*
957	 * If the descriptor is valid, we are way ahead of the DMA
958	 * engine, so just return an error condition.
959	 */
960	if (dp->dscr_cmd0 & DSCR_CMD0_V)
961		return 0;
962
963	/* Load up buffer addresses and byte count. */
964	dp->dscr_dest0 = dscr->dscr_dest0;
965	dp->dscr_source0 = dscr->dscr_source0;
966	dp->dscr_dest1 = dscr->dscr_dest1;
967	dp->dscr_source1 = dscr->dscr_source1;
968	dp->dscr_cmd1 = dscr->dscr_cmd1;
969	nbytes = dscr->dscr_cmd1;
970	/* Allow the caller to specify if an interrupt is generated */
971	dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
972	dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
973	ctp->chan_ptr->ddma_dbell = 0;
974
975	/* Get next descriptor pointer. */
976	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
977
978	/* Return something non-zero. */
979	return nbytes;
980}
981
982
983static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
984
985static int alchemy_dbdma_suspend(void)
986{
987	int i;
988	void __iomem *addr;
989
990	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
991	alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
992	alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
993	alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
994	alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
995
996	/* save channel configurations */
997	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
998	for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
999		alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
1000		alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
1001		alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
1002		alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
1003		alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
1004		alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
1005
1006		/* halt channel */
1007		__raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
1008		wmb();
1009		while (!(__raw_readl(addr + 0x14) & 1))
1010			wmb();
1011
1012		addr += 0x100;	/* next channel base */
1013	}
1014	/* disable channel interrupts */
1015	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1016	__raw_writel(0, addr + 0x0c);
1017	wmb();
1018
1019	return 0;
1020}
1021
1022static void alchemy_dbdma_resume(void)
1023{
1024	int i;
1025	void __iomem *addr;
1026
1027	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1028	__raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
1029	__raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
1030	__raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
1031	__raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
1032
1033	/* restore channel configurations */
1034	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
1035	for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
1036		__raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
1037		__raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
1038		__raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
1039		__raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
1040		__raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
1041		__raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
1042		wmb();
1043		addr += 0x100;	/* next channel base */
1044	}
1045}
1046
1047static struct syscore_ops alchemy_dbdma_syscore_ops = {
1048	.suspend	= alchemy_dbdma_suspend,
1049	.resume		= alchemy_dbdma_resume,
1050};
1051
1052static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
1053{
1054	int ret;
1055
1056	dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL);
1057	if (!dbdev_tab)
1058		return -ENOMEM;
1059
1060	memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
1061	for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
1062		dbdev_tab[ret].dev_id = ~0;
1063
1064	dbdma_gptr->ddma_config = 0;
1065	dbdma_gptr->ddma_throttle = 0;
1066	dbdma_gptr->ddma_inten = 0xffff;
1067	wmb(); /* drain writebuffer */
1068
1069	ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr);
1070	if (ret)
1071		printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
1072	else {
1073		dbdma_initialized = 1;
1074		register_syscore_ops(&alchemy_dbdma_syscore_ops);
1075	}
1076
1077	return ret;
1078}
1079
1080static int __init alchemy_dbdma_init(void)
1081{
1082	switch (alchemy_get_cputype()) {
1083	case ALCHEMY_CPU_AU1550:
1084		return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
1085	case ALCHEMY_CPU_AU1200:
1086		return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
1087	case ALCHEMY_CPU_AU1300:
1088		return dbdma_setup(AU1300_DDMA_INT, au1300_dbdev_tab);
1089	}
1090	return 0;
1091}
1092subsys_initcall(alchemy_dbdma_init);
1093