• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/mips/include/asm/mach-au1x00/
1/*
2 * BRIEF MODULE DESCRIPTION
3 *	Defines for using and allocating DMA channels on the Alchemy
4 *      Au1x00 MIPS processors.
5 *
6 * Copyright 2000, 2008 MontaVista Software Inc.
7 * Author: MontaVista Software, Inc. <source@mvista.com>
8 *
9 *  This program is free software; you can redistribute  it and/or modify it
10 *  under  the terms of  the GNU General  Public License as published by the
11 *  Free Software Foundation;  either version 2 of the  License, or (at your
12 *  option) any later version.
13 *
14 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
15 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
16 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
18 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
20 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
22 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 *  You should have received a copy of the  GNU General Public License along
26 *  with this program; if not, write  to the Free Software Foundation, Inc.,
27 *  675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 */
30#ifndef __ASM_AU1000_DMA_H
31#define __ASM_AU1000_DMA_H
32
33#include <linux/io.h>		/* need byte IO */
34#include <linux/spinlock.h>	/* And spinlocks */
35#include <linux/delay.h>
36#include <asm/system.h>
37
38#define NUM_AU1000_DMA_CHANNELS	8
39
40/* DMA Channel Base Addresses */
41#define DMA_CHANNEL_BASE	0xB4002000
42#define DMA_CHANNEL_LEN		0x00000100
43
44/* DMA Channel Register Offsets */
45#define DMA_MODE_SET		0x00000000
46#define DMA_MODE_READ		DMA_MODE_SET
47#define DMA_MODE_CLEAR		0x00000004
48/* DMA Mode register bits follow */
49#define DMA_DAH_MASK		(0x0f << 20)
50#define DMA_DID_BIT		16
51#define DMA_DID_MASK		(0x0f << DMA_DID_BIT)
52#define DMA_DS			(1 << 15)
53#define DMA_BE			(1 << 13)
54#define DMA_DR			(1 << 12)
55#define DMA_TS8 		(1 << 11)
56#define DMA_DW_BIT		9
57#define DMA_DW_MASK		(0x03 << DMA_DW_BIT)
58#define DMA_DW8			(0 << DMA_DW_BIT)
59#define DMA_DW16		(1 << DMA_DW_BIT)
60#define DMA_DW32		(2 << DMA_DW_BIT)
61#define DMA_NC			(1 << 8)
62#define DMA_IE			(1 << 7)
63#define DMA_HALT		(1 << 6)
64#define DMA_GO			(1 << 5)
65#define DMA_AB			(1 << 4)
66#define DMA_D1			(1 << 3)
67#define DMA_BE1 		(1 << 2)
68#define DMA_D0			(1 << 1)
69#define DMA_BE0 		(1 << 0)
70
71#define DMA_PERIPHERAL_ADDR	0x00000008
72#define DMA_BUFFER0_START	0x0000000C
73#define DMA_BUFFER1_START	0x00000014
74#define DMA_BUFFER0_COUNT	0x00000010
75#define DMA_BUFFER1_COUNT	0x00000018
76#define DMA_BAH_BIT	16
77#define DMA_BAH_MASK	(0x0f << DMA_BAH_BIT)
78#define DMA_COUNT_BIT	0
79#define DMA_COUNT_MASK	(0xffff << DMA_COUNT_BIT)
80
81/* DMA Device IDs follow */
82enum {
83	DMA_ID_UART0_TX = 0,
84	DMA_ID_UART0_RX,
85	DMA_ID_GP04,
86	DMA_ID_GP05,
87	DMA_ID_AC97C_TX,
88	DMA_ID_AC97C_RX,
89	DMA_ID_UART3_TX,
90	DMA_ID_UART3_RX,
91	DMA_ID_USBDEV_EP0_RX,
92	DMA_ID_USBDEV_EP0_TX,
93	DMA_ID_USBDEV_EP2_TX,
94	DMA_ID_USBDEV_EP3_TX,
95	DMA_ID_USBDEV_EP4_RX,
96	DMA_ID_USBDEV_EP5_RX,
97	DMA_ID_I2S_TX,
98	DMA_ID_I2S_RX,
99	DMA_NUM_DEV
100};
101
102/* DMA Device ID's for 2nd bank (AU1100) follow */
103enum {
104	DMA_ID_SD0_TX = 0,
105	DMA_ID_SD0_RX,
106	DMA_ID_SD1_TX,
107	DMA_ID_SD1_RX,
108	DMA_NUM_DEV_BANK2
109};
110
111struct dma_chan {
112	int dev_id;		/* this channel is allocated if >= 0, */
113				/* free otherwise */
114	unsigned int io;
115	const char *dev_str;
116	int irq;
117	void *irq_dev;
118	unsigned int fifo_addr;
119	unsigned int mode;
120};
121
122/* These are in arch/mips/au1000/common/dma.c */
123extern struct dma_chan au1000_dma_table[];
124extern int request_au1000_dma(int dev_id,
125			      const char *dev_str,
126			      irq_handler_t irqhandler,
127			      unsigned long irqflags,
128			      void *irq_dev_id);
129extern void free_au1000_dma(unsigned int dmanr);
130extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
131				int length, int *eof, void *data);
132extern void dump_au1000_dma_channel(unsigned int dmanr);
133extern spinlock_t au1000_dma_spin_lock;
134
135static inline struct dma_chan *get_dma_chan(unsigned int dmanr)
136{
137	if (dmanr >= NUM_AU1000_DMA_CHANNELS ||
138	    au1000_dma_table[dmanr].dev_id < 0)
139		return NULL;
140	return &au1000_dma_table[dmanr];
141}
142
143static inline unsigned long claim_dma_lock(void)
144{
145	unsigned long flags;
146
147	spin_lock_irqsave(&au1000_dma_spin_lock, flags);
148	return flags;
149}
150
151static inline void release_dma_lock(unsigned long flags)
152{
153	spin_unlock_irqrestore(&au1000_dma_spin_lock, flags);
154}
155
156/*
157 * Set the DMA buffer enable bits in the mode register.
158 */
159static inline void enable_dma_buffer0(unsigned int dmanr)
160{
161	struct dma_chan *chan = get_dma_chan(dmanr);
162
163	if (!chan)
164		return;
165	au_writel(DMA_BE0, chan->io + DMA_MODE_SET);
166}
167
168static inline void enable_dma_buffer1(unsigned int dmanr)
169{
170	struct dma_chan *chan = get_dma_chan(dmanr);
171
172	if (!chan)
173		return;
174	au_writel(DMA_BE1, chan->io + DMA_MODE_SET);
175}
176static inline void enable_dma_buffers(unsigned int dmanr)
177{
178	struct dma_chan *chan = get_dma_chan(dmanr);
179
180	if (!chan)
181		return;
182	au_writel(DMA_BE0 | DMA_BE1, chan->io + DMA_MODE_SET);
183}
184
185static inline void start_dma(unsigned int dmanr)
186{
187	struct dma_chan *chan = get_dma_chan(dmanr);
188
189	if (!chan)
190		return;
191	au_writel(DMA_GO, chan->io + DMA_MODE_SET);
192}
193
194#define DMA_HALT_POLL 0x5000
195
196static inline void halt_dma(unsigned int dmanr)
197{
198	struct dma_chan *chan = get_dma_chan(dmanr);
199	int i;
200
201	if (!chan)
202		return;
203	au_writel(DMA_GO, chan->io + DMA_MODE_CLEAR);
204
205	/* Poll the halt bit */
206	for (i = 0; i < DMA_HALT_POLL; i++)
207		if (au_readl(chan->io + DMA_MODE_READ) & DMA_HALT)
208			break;
209	if (i == DMA_HALT_POLL)
210		printk(KERN_INFO "halt_dma: HALT poll expired!\n");
211}
212
213static inline void disable_dma(unsigned int dmanr)
214{
215	struct dma_chan *chan = get_dma_chan(dmanr);
216
217	if (!chan)
218		return;
219
220	halt_dma(dmanr);
221
222	/* Now we can disable the buffers */
223	au_writel(~DMA_GO, chan->io + DMA_MODE_CLEAR);
224}
225
226static inline int dma_halted(unsigned int dmanr)
227{
228	struct dma_chan *chan = get_dma_chan(dmanr);
229
230	if (!chan)
231		return 1;
232	return (au_readl(chan->io + DMA_MODE_READ) & DMA_HALT) ? 1 : 0;
233}
234
235/* Initialize a DMA channel. */
236static inline void init_dma(unsigned int dmanr)
237{
238	struct dma_chan *chan = get_dma_chan(dmanr);
239	u32 mode;
240
241	if (!chan)
242		return;
243
244	disable_dma(dmanr);
245
246	/* Set device FIFO address */
247	au_writel(CPHYSADDR(chan->fifo_addr), chan->io + DMA_PERIPHERAL_ADDR);
248
249	mode = chan->mode | (chan->dev_id << DMA_DID_BIT);
250	if (chan->irq)
251		mode |= DMA_IE;
252
253	au_writel(~mode, chan->io + DMA_MODE_CLEAR);
254	au_writel(mode,  chan->io + DMA_MODE_SET);
255}
256
257/*
258 * Set mode for a specific DMA channel
259 */
260static inline void set_dma_mode(unsigned int dmanr, unsigned int mode)
261{
262	struct dma_chan *chan = get_dma_chan(dmanr);
263
264	if (!chan)
265		return;
266	/*
267	 * set_dma_mode is only allowed to change endianess, direction,
268	 * transfer size, device FIFO width, and coherency settings.
269	 * Make sure anything else is masked off.
270	 */
271	mode &= (DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
272	chan->mode &= ~(DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
273	chan->mode |= mode;
274}
275
276static inline unsigned int get_dma_mode(unsigned int dmanr)
277{
278	struct dma_chan *chan = get_dma_chan(dmanr);
279
280	if (!chan)
281		return 0;
282	return chan->mode;
283}
284
285static inline int get_dma_active_buffer(unsigned int dmanr)
286{
287	struct dma_chan *chan = get_dma_chan(dmanr);
288
289	if (!chan)
290		return -1;
291	return (au_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? 1 : 0;
292}
293
294/*
295 * Set the device FIFO address for a specific DMA channel - only
296 * applicable to GPO4 and GPO5. All the other devices have fixed
297 * FIFO addresses.
298 */
299static inline void set_dma_fifo_addr(unsigned int dmanr, unsigned int a)
300{
301	struct dma_chan *chan = get_dma_chan(dmanr);
302
303	if (!chan)
304		return;
305
306	if (chan->mode & DMA_DS)	/* second bank of device IDs */
307		return;
308
309	if (chan->dev_id != DMA_ID_GP04 && chan->dev_id != DMA_ID_GP05)
310		return;
311
312	au_writel(CPHYSADDR(a), chan->io + DMA_PERIPHERAL_ADDR);
313}
314
315/*
316 * Clear the DMA buffer done bits in the mode register.
317 */
318static inline void clear_dma_done0(unsigned int dmanr)
319{
320	struct dma_chan *chan = get_dma_chan(dmanr);
321
322	if (!chan)
323		return;
324	au_writel(DMA_D0, chan->io + DMA_MODE_CLEAR);
325}
326
327static inline void clear_dma_done1(unsigned int dmanr)
328{
329	struct dma_chan *chan = get_dma_chan(dmanr);
330
331	if (!chan)
332		return;
333	au_writel(DMA_D1, chan->io + DMA_MODE_CLEAR);
334}
335
336/*
337 * This does nothing - not applicable to Au1000 DMA.
338 */
339static inline void set_dma_page(unsigned int dmanr, char pagenr)
340{
341}
342
343/*
344 * Set Buffer 0 transfer address for specific DMA channel.
345 */
346static inline void set_dma_addr0(unsigned int dmanr, unsigned int a)
347{
348	struct dma_chan *chan = get_dma_chan(dmanr);
349
350	if (!chan)
351		return;
352	au_writel(a, chan->io + DMA_BUFFER0_START);
353}
354
355/*
356 * Set Buffer 1 transfer address for specific DMA channel.
357 */
358static inline void set_dma_addr1(unsigned int dmanr, unsigned int a)
359{
360	struct dma_chan *chan = get_dma_chan(dmanr);
361
362	if (!chan)
363		return;
364	au_writel(a, chan->io + DMA_BUFFER1_START);
365}
366
367
368/*
369 * Set Buffer 0 transfer size (max 64k) for a specific DMA channel.
370 */
371static inline void set_dma_count0(unsigned int dmanr, unsigned int count)
372{
373	struct dma_chan *chan = get_dma_chan(dmanr);
374
375	if (!chan)
376		return;
377	count &= DMA_COUNT_MASK;
378	au_writel(count, chan->io + DMA_BUFFER0_COUNT);
379}
380
381/*
382 * Set Buffer 1 transfer size (max 64k) for a specific DMA channel.
383 */
384static inline void set_dma_count1(unsigned int dmanr, unsigned int count)
385{
386	struct dma_chan *chan = get_dma_chan(dmanr);
387
388	if (!chan)
389		return;
390	count &= DMA_COUNT_MASK;
391	au_writel(count, chan->io + DMA_BUFFER1_COUNT);
392}
393
394/*
395 * Set both buffer transfer sizes (max 64k) for a specific DMA channel.
396 */
397static inline void set_dma_count(unsigned int dmanr, unsigned int count)
398{
399	struct dma_chan *chan = get_dma_chan(dmanr);
400
401	if (!chan)
402		return;
403	count &= DMA_COUNT_MASK;
404	au_writel(count, chan->io + DMA_BUFFER0_COUNT);
405	au_writel(count, chan->io + DMA_BUFFER1_COUNT);
406}
407
408/*
409 * Returns which buffer has its done bit set in the mode register.
410 * Returns -1 if neither or both done bits set.
411 */
412static inline unsigned int get_dma_buffer_done(unsigned int dmanr)
413{
414	struct dma_chan *chan = get_dma_chan(dmanr);
415
416	if (!chan)
417		return 0;
418	return au_readl(chan->io + DMA_MODE_READ) & (DMA_D0 | DMA_D1);
419}
420
421
422/*
423 * Returns the DMA channel's Buffer Done IRQ number.
424 */
425static inline int get_dma_done_irq(unsigned int dmanr)
426{
427	struct dma_chan *chan = get_dma_chan(dmanr);
428
429	if (!chan)
430		return -1;
431	return chan->irq;
432}
433
434/*
435 * Get DMA residue count. Returns the number of _bytes_ left to transfer.
436 */
437static inline int get_dma_residue(unsigned int dmanr)
438{
439	int curBufCntReg, count;
440	struct dma_chan *chan = get_dma_chan(dmanr);
441
442	if (!chan)
443		return 0;
444
445	curBufCntReg = (au_readl(chan->io + DMA_MODE_READ) & DMA_AB) ?
446	    DMA_BUFFER1_COUNT : DMA_BUFFER0_COUNT;
447
448	count = au_readl(chan->io + curBufCntReg) & DMA_COUNT_MASK;
449
450	if ((chan->mode & DMA_DW_MASK) == DMA_DW16)
451		count <<= 1;
452	else if ((chan->mode & DMA_DW_MASK) == DMA_DW32)
453		count <<= 2;
454
455	return count;
456}
457
458#endif /* __ASM_AU1000_DMA_H */
459