• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/arm/plat-omap/
1/*
2 * linux/arch/arm/plat-omap/dma.c
3 *
4 * Copyright (C) 2003 - 2008 Nokia Corporation
5 * Author: Juha Yrj��l�� <juha.yrjola@nokia.com>
6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7 * Graphics DMA and LCD DMA graphics tranformations
8 * by Imre Deak <imre.deak@nokia.com>
9 * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10 * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12 *
13 * Copyright (C) 2009 Texas Instruments
14 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15 *
16 * Support functions for the OMAP internal DMA channels.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation.
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
30#include <linux/irq.h>
31#include <linux/io.h>
32#include <linux/slab.h>
33#include <linux/delay.h>
34
35#include <asm/system.h>
36#include <mach/hardware.h>
37#include <plat/dma.h>
38
39#include <plat/tc.h>
40
41#undef DEBUG
42
43#ifndef CONFIG_ARCH_OMAP1
44enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
45	DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
46};
47
48enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
49#endif
50
51#define OMAP_DMA_ACTIVE			0x01
52#define OMAP2_DMA_CSR_CLEAR_MASK	0xffe
53
54#define OMAP_FUNC_MUX_ARM_BASE		(0xfffe1000 + 0xec)
55
56static int enable_1510_mode;
57
58static struct omap_dma_global_context_registers {
59	u32 dma_irqenable_l0;
60	u32 dma_ocp_sysconfig;
61	u32 dma_gcr;
62} omap_dma_global_context;
63
64struct omap_dma_lch {
65	int next_lch;
66	int dev_id;
67	u16 saved_csr;
68	u16 enabled_irqs;
69	const char *dev_name;
70	void (*callback)(int lch, u16 ch_status, void *data);
71	void *data;
72
73#ifndef CONFIG_ARCH_OMAP1
74	/* required for Dynamic chaining */
75	int prev_linked_ch;
76	int next_linked_ch;
77	int state;
78	int chain_id;
79
80	int status;
81#endif
82	long flags;
83};
84
85struct dma_link_info {
86	int *linked_dmach_q;
87	int no_of_lchs_linked;
88
89	int q_count;
90	int q_tail;
91	int q_head;
92
93	int chain_state;
94	int chain_mode;
95
96};
97
98static struct dma_link_info *dma_linked_lch;
99
100#ifndef CONFIG_ARCH_OMAP1
101
102/* Chain handling macros */
103#define OMAP_DMA_CHAIN_QINIT(chain_id)					\
104	do {								\
105		dma_linked_lch[chain_id].q_head =			\
106		dma_linked_lch[chain_id].q_tail =			\
107		dma_linked_lch[chain_id].q_count = 0;			\
108	} while (0)
109#define OMAP_DMA_CHAIN_QFULL(chain_id)					\
110		(dma_linked_lch[chain_id].no_of_lchs_linked ==		\
111		dma_linked_lch[chain_id].q_count)
112#define OMAP_DMA_CHAIN_QLAST(chain_id)					\
113	do {								\
114		((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==	\
115		dma_linked_lch[chain_id].q_count)			\
116	} while (0)
117#define OMAP_DMA_CHAIN_QEMPTY(chain_id)					\
118		(0 == dma_linked_lch[chain_id].q_count)
119#define __OMAP_DMA_CHAIN_INCQ(end)					\
120	((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
121#define OMAP_DMA_CHAIN_INCQHEAD(chain_id)				\
122	do {								\
123		__OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head);	\
124		dma_linked_lch[chain_id].q_count--;			\
125	} while (0)
126
127#define OMAP_DMA_CHAIN_INCQTAIL(chain_id)				\
128	do {								\
129		__OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail);	\
130		dma_linked_lch[chain_id].q_count++; \
131	} while (0)
132#endif
133
134static int dma_lch_count;
135static int dma_chan_count;
136static int omap_dma_reserve_channels;
137
138static spinlock_t dma_chan_lock;
139static struct omap_dma_lch *dma_chan;
140static void __iomem *omap_dma_base;
141
142static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
143	INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
144	INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
145	INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
146	INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
147	INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
148};
149
150static inline void disable_lnk(int lch);
151static void omap_disable_channel_irq(int lch);
152static inline void omap_enable_channel_irq(int lch);
153
154#define REVISIT_24XX()		printk(KERN_ERR "FIXME: no %s on 24xx\n", \
155						__func__);
156
157#define dma_read(reg)							\
158({									\
159	u32 __val;							\
160	if (cpu_class_is_omap1())					\
161		__val = __raw_readw(omap_dma_base + OMAP1_DMA_##reg);	\
162	else								\
163		__val = __raw_readl(omap_dma_base + OMAP_DMA4_##reg);	\
164	__val;								\
165})
166
167#define dma_write(val, reg)						\
168({									\
169	if (cpu_class_is_omap1())					\
170		__raw_writew((u16)(val), omap_dma_base + OMAP1_DMA_##reg); \
171	else								\
172		__raw_writel((val), omap_dma_base + OMAP_DMA4_##reg);	\
173})
174
175#ifdef CONFIG_ARCH_OMAP15XX
176/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
177int omap_dma_in_1510_mode(void)
178{
179	return enable_1510_mode;
180}
181#else
182#define omap_dma_in_1510_mode()		0
183#endif
184
185#ifdef CONFIG_ARCH_OMAP1
186static inline int get_gdma_dev(int req)
187{
188	u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
189	int shift = ((req - 1) % 5) * 6;
190
191	return ((omap_readl(reg) >> shift) & 0x3f) + 1;
192}
193
194static inline void set_gdma_dev(int req, int dev)
195{
196	u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
197	int shift = ((req - 1) % 5) * 6;
198	u32 l;
199
200	l = omap_readl(reg);
201	l &= ~(0x3f << shift);
202	l |= (dev - 1) << shift;
203	omap_writel(l, reg);
204}
205#else
206#define set_gdma_dev(req, dev)	do {} while (0)
207#endif
208
209/* Omap1 only */
210static void clear_lch_regs(int lch)
211{
212	int i;
213	void __iomem *lch_base = omap_dma_base + OMAP1_DMA_CH_BASE(lch);
214
215	for (i = 0; i < 0x2c; i += 2)
216		__raw_writew(0, lch_base + i);
217}
218
219void omap_set_dma_priority(int lch, int dst_port, int priority)
220{
221	unsigned long reg;
222	u32 l;
223
224	if (cpu_class_is_omap1()) {
225		switch (dst_port) {
226		case OMAP_DMA_PORT_OCP_T1:	/* FFFECC00 */
227			reg = OMAP_TC_OCPT1_PRIOR;
228			break;
229		case OMAP_DMA_PORT_OCP_T2:	/* FFFECCD0 */
230			reg = OMAP_TC_OCPT2_PRIOR;
231			break;
232		case OMAP_DMA_PORT_EMIFF:	/* FFFECC08 */
233			reg = OMAP_TC_EMIFF_PRIOR;
234			break;
235		case OMAP_DMA_PORT_EMIFS:	/* FFFECC04 */
236			reg = OMAP_TC_EMIFS_PRIOR;
237			break;
238		default:
239			BUG();
240			return;
241		}
242		l = omap_readl(reg);
243		l &= ~(0xf << 8);
244		l |= (priority & 0xf) << 8;
245		omap_writel(l, reg);
246	}
247
248	if (cpu_class_is_omap2()) {
249		u32 ccr;
250
251		ccr = dma_read(CCR(lch));
252		if (priority)
253			ccr |= (1 << 6);
254		else
255			ccr &= ~(1 << 6);
256		dma_write(ccr, CCR(lch));
257	}
258}
259EXPORT_SYMBOL(omap_set_dma_priority);
260
261void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
262				  int frame_count, int sync_mode,
263				  int dma_trigger, int src_or_dst_synch)
264{
265	u32 l;
266
267	l = dma_read(CSDP(lch));
268	l &= ~0x03;
269	l |= data_type;
270	dma_write(l, CSDP(lch));
271
272	if (cpu_class_is_omap1()) {
273		u16 ccr;
274
275		ccr = dma_read(CCR(lch));
276		ccr &= ~(1 << 5);
277		if (sync_mode == OMAP_DMA_SYNC_FRAME)
278			ccr |= 1 << 5;
279		dma_write(ccr, CCR(lch));
280
281		ccr = dma_read(CCR2(lch));
282		ccr &= ~(1 << 2);
283		if (sync_mode == OMAP_DMA_SYNC_BLOCK)
284			ccr |= 1 << 2;
285		dma_write(ccr, CCR2(lch));
286	}
287
288	if (cpu_class_is_omap2() && dma_trigger) {
289		u32 val;
290
291		val = dma_read(CCR(lch));
292
293		/* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
294		val &= ~((1 << 23) | (3 << 19) | 0x1f);
295		val |= (dma_trigger & ~0x1f) << 14;
296		val |= dma_trigger & 0x1f;
297
298		if (sync_mode & OMAP_DMA_SYNC_FRAME)
299			val |= 1 << 5;
300		else
301			val &= ~(1 << 5);
302
303		if (sync_mode & OMAP_DMA_SYNC_BLOCK)
304			val |= 1 << 18;
305		else
306			val &= ~(1 << 18);
307
308		if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
309			val &= ~(1 << 24);	/* dest synch */
310			val |= (1 << 23);	/* Prefetch */
311		} else if (src_or_dst_synch) {
312			val |= 1 << 24;		/* source synch */
313		} else {
314			val &= ~(1 << 24);	/* dest synch */
315		}
316		dma_write(val, CCR(lch));
317	}
318
319	dma_write(elem_count, CEN(lch));
320	dma_write(frame_count, CFN(lch));
321}
322EXPORT_SYMBOL(omap_set_dma_transfer_params);
323
324void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
325{
326	BUG_ON(omap_dma_in_1510_mode());
327
328	if (cpu_class_is_omap1()) {
329		u16 w;
330
331		w = dma_read(CCR2(lch));
332		w &= ~0x03;
333
334		switch (mode) {
335		case OMAP_DMA_CONSTANT_FILL:
336			w |= 0x01;
337			break;
338		case OMAP_DMA_TRANSPARENT_COPY:
339			w |= 0x02;
340			break;
341		case OMAP_DMA_COLOR_DIS:
342			break;
343		default:
344			BUG();
345		}
346		dma_write(w, CCR2(lch));
347
348		w = dma_read(LCH_CTRL(lch));
349		w &= ~0x0f;
350		/* Default is channel type 2D */
351		if (mode) {
352			dma_write((u16)color, COLOR_L(lch));
353			dma_write((u16)(color >> 16), COLOR_U(lch));
354			w |= 1;		/* Channel type G */
355		}
356		dma_write(w, LCH_CTRL(lch));
357	}
358
359	if (cpu_class_is_omap2()) {
360		u32 val;
361
362		val = dma_read(CCR(lch));
363		val &= ~((1 << 17) | (1 << 16));
364
365		switch (mode) {
366		case OMAP_DMA_CONSTANT_FILL:
367			val |= 1 << 16;
368			break;
369		case OMAP_DMA_TRANSPARENT_COPY:
370			val |= 1 << 17;
371			break;
372		case OMAP_DMA_COLOR_DIS:
373			break;
374		default:
375			BUG();
376		}
377		dma_write(val, CCR(lch));
378
379		color &= 0xffffff;
380		dma_write(color, COLOR(lch));
381	}
382}
383EXPORT_SYMBOL(omap_set_dma_color_mode);
384
385void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
386{
387	if (cpu_class_is_omap2()) {
388		u32 csdp;
389
390		csdp = dma_read(CSDP(lch));
391		csdp &= ~(0x3 << 16);
392		csdp |= (mode << 16);
393		dma_write(csdp, CSDP(lch));
394	}
395}
396EXPORT_SYMBOL(omap_set_dma_write_mode);
397
398void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
399{
400	if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
401		u32 l;
402
403		l = dma_read(LCH_CTRL(lch));
404		l &= ~0x7;
405		l |= mode;
406		dma_write(l, LCH_CTRL(lch));
407	}
408}
409EXPORT_SYMBOL(omap_set_dma_channel_mode);
410
411/* Note that src_port is only for omap1 */
412void omap_set_dma_src_params(int lch, int src_port, int src_amode,
413			     unsigned long src_start,
414			     int src_ei, int src_fi)
415{
416	u32 l;
417
418	if (cpu_class_is_omap1()) {
419		u16 w;
420
421		w = dma_read(CSDP(lch));
422		w &= ~(0x1f << 2);
423		w |= src_port << 2;
424		dma_write(w, CSDP(lch));
425	}
426
427	l = dma_read(CCR(lch));
428	l &= ~(0x03 << 12);
429	l |= src_amode << 12;
430	dma_write(l, CCR(lch));
431
432	if (cpu_class_is_omap1()) {
433		dma_write(src_start >> 16, CSSA_U(lch));
434		dma_write((u16)src_start, CSSA_L(lch));
435	}
436
437	if (cpu_class_is_omap2())
438		dma_write(src_start, CSSA(lch));
439
440	dma_write(src_ei, CSEI(lch));
441	dma_write(src_fi, CSFI(lch));
442}
443EXPORT_SYMBOL(omap_set_dma_src_params);
444
445void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
446{
447	omap_set_dma_transfer_params(lch, params->data_type,
448				     params->elem_count, params->frame_count,
449				     params->sync_mode, params->trigger,
450				     params->src_or_dst_synch);
451	omap_set_dma_src_params(lch, params->src_port,
452				params->src_amode, params->src_start,
453				params->src_ei, params->src_fi);
454
455	omap_set_dma_dest_params(lch, params->dst_port,
456				 params->dst_amode, params->dst_start,
457				 params->dst_ei, params->dst_fi);
458	if (params->read_prio || params->write_prio)
459		omap_dma_set_prio_lch(lch, params->read_prio,
460				      params->write_prio);
461}
462EXPORT_SYMBOL(omap_set_dma_params);
463
464void omap_set_dma_src_index(int lch, int eidx, int fidx)
465{
466	if (cpu_class_is_omap2())
467		return;
468
469	dma_write(eidx, CSEI(lch));
470	dma_write(fidx, CSFI(lch));
471}
472EXPORT_SYMBOL(omap_set_dma_src_index);
473
474void omap_set_dma_src_data_pack(int lch, int enable)
475{
476	u32 l;
477
478	l = dma_read(CSDP(lch));
479	l &= ~(1 << 6);
480	if (enable)
481		l |= (1 << 6);
482	dma_write(l, CSDP(lch));
483}
484EXPORT_SYMBOL(omap_set_dma_src_data_pack);
485
486void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
487{
488	unsigned int burst = 0;
489	u32 l;
490
491	l = dma_read(CSDP(lch));
492	l &= ~(0x03 << 7);
493
494	switch (burst_mode) {
495	case OMAP_DMA_DATA_BURST_DIS:
496		break;
497	case OMAP_DMA_DATA_BURST_4:
498		if (cpu_class_is_omap2())
499			burst = 0x1;
500		else
501			burst = 0x2;
502		break;
503	case OMAP_DMA_DATA_BURST_8:
504		if (cpu_class_is_omap2()) {
505			burst = 0x2;
506			break;
507		}
508		/*
509		 * not supported by current hardware on OMAP1
510		 * w |= (0x03 << 7);
511		 * fall through
512		 */
513	case OMAP_DMA_DATA_BURST_16:
514		if (cpu_class_is_omap2()) {
515			burst = 0x3;
516			break;
517		}
518		/*
519		 * OMAP1 don't support burst 16
520		 * fall through
521		 */
522	default:
523		BUG();
524	}
525
526	l |= (burst << 7);
527	dma_write(l, CSDP(lch));
528}
529EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
530
531/* Note that dest_port is only for OMAP1 */
532void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
533			      unsigned long dest_start,
534			      int dst_ei, int dst_fi)
535{
536	u32 l;
537
538	if (cpu_class_is_omap1()) {
539		l = dma_read(CSDP(lch));
540		l &= ~(0x1f << 9);
541		l |= dest_port << 9;
542		dma_write(l, CSDP(lch));
543	}
544
545	l = dma_read(CCR(lch));
546	l &= ~(0x03 << 14);
547	l |= dest_amode << 14;
548	dma_write(l, CCR(lch));
549
550	if (cpu_class_is_omap1()) {
551		dma_write(dest_start >> 16, CDSA_U(lch));
552		dma_write(dest_start, CDSA_L(lch));
553	}
554
555	if (cpu_class_is_omap2())
556		dma_write(dest_start, CDSA(lch));
557
558	dma_write(dst_ei, CDEI(lch));
559	dma_write(dst_fi, CDFI(lch));
560}
561EXPORT_SYMBOL(omap_set_dma_dest_params);
562
563void omap_set_dma_dest_index(int lch, int eidx, int fidx)
564{
565	if (cpu_class_is_omap2())
566		return;
567
568	dma_write(eidx, CDEI(lch));
569	dma_write(fidx, CDFI(lch));
570}
571EXPORT_SYMBOL(omap_set_dma_dest_index);
572
573void omap_set_dma_dest_data_pack(int lch, int enable)
574{
575	u32 l;
576
577	l = dma_read(CSDP(lch));
578	l &= ~(1 << 13);
579	if (enable)
580		l |= 1 << 13;
581	dma_write(l, CSDP(lch));
582}
583EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
584
585void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
586{
587	unsigned int burst = 0;
588	u32 l;
589
590	l = dma_read(CSDP(lch));
591	l &= ~(0x03 << 14);
592
593	switch (burst_mode) {
594	case OMAP_DMA_DATA_BURST_DIS:
595		break;
596	case OMAP_DMA_DATA_BURST_4:
597		if (cpu_class_is_omap2())
598			burst = 0x1;
599		else
600			burst = 0x2;
601		break;
602	case OMAP_DMA_DATA_BURST_8:
603		if (cpu_class_is_omap2())
604			burst = 0x2;
605		else
606			burst = 0x3;
607		break;
608	case OMAP_DMA_DATA_BURST_16:
609		if (cpu_class_is_omap2()) {
610			burst = 0x3;
611			break;
612		}
613		/*
614		 * OMAP1 don't support burst 16
615		 * fall through
616		 */
617	default:
618		printk(KERN_ERR "Invalid DMA burst mode\n");
619		BUG();
620		return;
621	}
622	l |= (burst << 14);
623	dma_write(l, CSDP(lch));
624}
625EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
626
627static inline void omap_enable_channel_irq(int lch)
628{
629	u32 status;
630
631	/* Clear CSR */
632	if (cpu_class_is_omap1())
633		status = dma_read(CSR(lch));
634	else if (cpu_class_is_omap2())
635		dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
636
637	/* Enable some nice interrupts. */
638	dma_write(dma_chan[lch].enabled_irqs, CICR(lch));
639}
640
641static void omap_disable_channel_irq(int lch)
642{
643	if (cpu_class_is_omap2())
644		dma_write(0, CICR(lch));
645}
646
647void omap_enable_dma_irq(int lch, u16 bits)
648{
649	dma_chan[lch].enabled_irqs |= bits;
650}
651EXPORT_SYMBOL(omap_enable_dma_irq);
652
653void omap_disable_dma_irq(int lch, u16 bits)
654{
655	dma_chan[lch].enabled_irqs &= ~bits;
656}
657EXPORT_SYMBOL(omap_disable_dma_irq);
658
659static inline void enable_lnk(int lch)
660{
661	u32 l;
662
663	l = dma_read(CLNK_CTRL(lch));
664
665	if (cpu_class_is_omap1())
666		l &= ~(1 << 14);
667
668	/* Set the ENABLE_LNK bits */
669	if (dma_chan[lch].next_lch != -1)
670		l = dma_chan[lch].next_lch | (1 << 15);
671
672#ifndef CONFIG_ARCH_OMAP1
673	if (cpu_class_is_omap2())
674		if (dma_chan[lch].next_linked_ch != -1)
675			l = dma_chan[lch].next_linked_ch | (1 << 15);
676#endif
677
678	dma_write(l, CLNK_CTRL(lch));
679}
680
681static inline void disable_lnk(int lch)
682{
683	u32 l;
684
685	l = dma_read(CLNK_CTRL(lch));
686
687	/* Disable interrupts */
688	if (cpu_class_is_omap1()) {
689		dma_write(0, CICR(lch));
690		/* Set the STOP_LNK bit */
691		l |= 1 << 14;
692	}
693
694	if (cpu_class_is_omap2()) {
695		omap_disable_channel_irq(lch);
696		/* Clear the ENABLE_LNK bit */
697		l &= ~(1 << 15);
698	}
699
700	dma_write(l, CLNK_CTRL(lch));
701	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
702}
703
704static inline void omap2_enable_irq_lch(int lch)
705{
706	u32 val;
707	unsigned long flags;
708
709	if (!cpu_class_is_omap2())
710		return;
711
712	spin_lock_irqsave(&dma_chan_lock, flags);
713	val = dma_read(IRQENABLE_L0);
714	val |= 1 << lch;
715	dma_write(val, IRQENABLE_L0);
716	spin_unlock_irqrestore(&dma_chan_lock, flags);
717}
718
719static inline void omap2_disable_irq_lch(int lch)
720{
721	u32 val;
722	unsigned long flags;
723
724	if (!cpu_class_is_omap2())
725		return;
726
727	spin_lock_irqsave(&dma_chan_lock, flags);
728	val = dma_read(IRQENABLE_L0);
729	val &= ~(1 << lch);
730	dma_write(val, IRQENABLE_L0);
731	spin_unlock_irqrestore(&dma_chan_lock, flags);
732}
733
734int omap_request_dma(int dev_id, const char *dev_name,
735		     void (*callback)(int lch, u16 ch_status, void *data),
736		     void *data, int *dma_ch_out)
737{
738	int ch, free_ch = -1;
739	unsigned long flags;
740	struct omap_dma_lch *chan;
741
742	spin_lock_irqsave(&dma_chan_lock, flags);
743	for (ch = 0; ch < dma_chan_count; ch++) {
744		if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
745			free_ch = ch;
746			if (dev_id == 0)
747				break;
748		}
749	}
750	if (free_ch == -1) {
751		spin_unlock_irqrestore(&dma_chan_lock, flags);
752		return -EBUSY;
753	}
754	chan = dma_chan + free_ch;
755	chan->dev_id = dev_id;
756
757	if (cpu_class_is_omap1())
758		clear_lch_regs(free_ch);
759
760	if (cpu_class_is_omap2())
761		omap_clear_dma(free_ch);
762
763	spin_unlock_irqrestore(&dma_chan_lock, flags);
764
765	chan->dev_name = dev_name;
766	chan->callback = callback;
767	chan->data = data;
768	chan->flags = 0;
769
770#ifndef CONFIG_ARCH_OMAP1
771	if (cpu_class_is_omap2()) {
772		chan->chain_id = -1;
773		chan->next_linked_ch = -1;
774	}
775#endif
776
777	chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
778
779	if (cpu_class_is_omap1())
780		chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
781	else if (cpu_class_is_omap2())
782		chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
783			OMAP2_DMA_TRANS_ERR_IRQ;
784
785	if (cpu_is_omap16xx()) {
786		/* If the sync device is set, configure it dynamically. */
787		if (dev_id != 0) {
788			set_gdma_dev(free_ch + 1, dev_id);
789			dev_id = free_ch + 1;
790		}
791		/*
792		 * Disable the 1510 compatibility mode and set the sync device
793		 * id.
794		 */
795		dma_write(dev_id | (1 << 10), CCR(free_ch));
796	} else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
797		dma_write(dev_id, CCR(free_ch));
798	}
799
800	if (cpu_class_is_omap2()) {
801		omap2_enable_irq_lch(free_ch);
802		omap_enable_channel_irq(free_ch);
803		/* Clear the CSR register and IRQ status register */
804		dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch));
805		dma_write(1 << free_ch, IRQSTATUS_L0);
806	}
807
808	*dma_ch_out = free_ch;
809
810	return 0;
811}
812EXPORT_SYMBOL(omap_request_dma);
813
814void omap_free_dma(int lch)
815{
816	unsigned long flags;
817
818	if (dma_chan[lch].dev_id == -1) {
819		pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
820		       lch);
821		return;
822	}
823
824	if (cpu_class_is_omap1()) {
825		/* Disable all DMA interrupts for the channel. */
826		dma_write(0, CICR(lch));
827		/* Make sure the DMA transfer is stopped. */
828		dma_write(0, CCR(lch));
829	}
830
831	if (cpu_class_is_omap2()) {
832		omap2_disable_irq_lch(lch);
833
834		/* Clear the CSR register and IRQ status register */
835		dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
836		dma_write(1 << lch, IRQSTATUS_L0);
837
838		/* Disable all DMA interrupts for the channel. */
839		dma_write(0, CICR(lch));
840
841		/* Make sure the DMA transfer is stopped. */
842		dma_write(0, CCR(lch));
843		omap_clear_dma(lch);
844	}
845
846	spin_lock_irqsave(&dma_chan_lock, flags);
847	dma_chan[lch].dev_id = -1;
848	dma_chan[lch].next_lch = -1;
849	dma_chan[lch].callback = NULL;
850	spin_unlock_irqrestore(&dma_chan_lock, flags);
851}
852EXPORT_SYMBOL(omap_free_dma);
853
854/**
855 * @brief omap_dma_set_global_params : Set global priority settings for dma
856 *
857 * @param arb_rate
858 * @param max_fifo_depth
859 * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
860 * 						   DMA_THREAD_RESERVE_ONET
861 * 						   DMA_THREAD_RESERVE_TWOT
862 * 						   DMA_THREAD_RESERVE_THREET
863 */
864void
865omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
866{
867	u32 reg;
868
869	if (!cpu_class_is_omap2()) {
870		printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
871		return;
872	}
873
874	if (max_fifo_depth == 0)
875		max_fifo_depth = 1;
876	if (arb_rate == 0)
877		arb_rate = 1;
878
879	reg = 0xff & max_fifo_depth;
880	reg |= (0x3 & tparams) << 12;
881	reg |= (arb_rate & 0xff) << 16;
882
883	dma_write(reg, GCR);
884}
885EXPORT_SYMBOL(omap_dma_set_global_params);
886
887/**
888 * @brief omap_dma_set_prio_lch : Set channel wise priority settings
889 *
890 * @param lch
891 * @param read_prio - Read priority
892 * @param write_prio - Write priority
893 * Both of the above can be set with one of the following values :
894 * 	DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
895 */
896int
897omap_dma_set_prio_lch(int lch, unsigned char read_prio,
898		      unsigned char write_prio)
899{
900	u32 l;
901
902	if (unlikely((lch < 0 || lch >= dma_lch_count))) {
903		printk(KERN_ERR "Invalid channel id\n");
904		return -EINVAL;
905	}
906	l = dma_read(CCR(lch));
907	l &= ~((1 << 6) | (1 << 26));
908	if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
909		l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
910	else
911		l |= ((read_prio & 0x1) << 6);
912
913	dma_write(l, CCR(lch));
914
915	return 0;
916}
917EXPORT_SYMBOL(omap_dma_set_prio_lch);
918
919/*
920 * Clears any DMA state so the DMA engine is ready to restart with new buffers
921 * through omap_start_dma(). Any buffers in flight are discarded.
922 */
923void omap_clear_dma(int lch)
924{
925	unsigned long flags;
926
927	local_irq_save(flags);
928
929	if (cpu_class_is_omap1()) {
930		u32 l;
931
932		l = dma_read(CCR(lch));
933		l &= ~OMAP_DMA_CCR_EN;
934		dma_write(l, CCR(lch));
935
936		/* Clear pending interrupts */
937		l = dma_read(CSR(lch));
938	}
939
940	if (cpu_class_is_omap2()) {
941		int i;
942		void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch);
943		for (i = 0; i < 0x44; i += 4)
944			__raw_writel(0, lch_base + i);
945	}
946
947	local_irq_restore(flags);
948}
949EXPORT_SYMBOL(omap_clear_dma);
950
951void omap_start_dma(int lch)
952{
953	u32 l;
954
955	/*
956	 * The CPC/CDAC register needs to be initialized to zero
957	 * before starting dma transfer.
958	 */
959	if (cpu_is_omap15xx())
960		dma_write(0, CPC(lch));
961	else
962		dma_write(0, CDAC(lch));
963
964	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
965		int next_lch, cur_lch;
966		char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
967
968		dma_chan_link_map[lch] = 1;
969		/* Set the link register of the first channel */
970		enable_lnk(lch);
971
972		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
973		cur_lch = dma_chan[lch].next_lch;
974		do {
975			next_lch = dma_chan[cur_lch].next_lch;
976
977			/* The loop case: we've been here already */
978			if (dma_chan_link_map[cur_lch])
979				break;
980			/* Mark the current channel */
981			dma_chan_link_map[cur_lch] = 1;
982
983			enable_lnk(cur_lch);
984			omap_enable_channel_irq(cur_lch);
985
986			cur_lch = next_lch;
987		} while (next_lch != -1);
988	} else if (cpu_is_omap242x() ||
989		(cpu_is_omap243x() &&  omap_type() <= OMAP2430_REV_ES1_0)) {
990
991		/* Errata: Need to write lch even if not using chaining */
992		dma_write(lch, CLNK_CTRL(lch));
993	}
994
995	omap_enable_channel_irq(lch);
996
997	l = dma_read(CCR(lch));
998
999	if (cpu_is_omap2420() ||
1000	    (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
1001		l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1002
1003	l |= OMAP_DMA_CCR_EN;
1004	dma_write(l, CCR(lch));
1005
1006	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1007}
1008EXPORT_SYMBOL(omap_start_dma);
1009
1010void omap_stop_dma(int lch)
1011{
1012	u32 l;
1013
1014	/* Disable all interrupts on the channel */
1015	if (cpu_class_is_omap1())
1016		dma_write(0, CICR(lch));
1017
1018	l = dma_read(CCR(lch));
1019	/* OMAP3 Errata i541: sDMA FIFO draining does not finish */
1020	if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
1021		int i = 0;
1022		u32 sys_cf;
1023
1024		/* Configure No-Standby */
1025		l = dma_read(OCP_SYSCONFIG);
1026		sys_cf = l;
1027		l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
1028		l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
1029		dma_write(l , OCP_SYSCONFIG);
1030
1031		l = dma_read(CCR(lch));
1032		l &= ~OMAP_DMA_CCR_EN;
1033		dma_write(l, CCR(lch));
1034
1035		/* Wait for sDMA FIFO drain */
1036		l = dma_read(CCR(lch));
1037		while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
1038					OMAP_DMA_CCR_WR_ACTIVE))) {
1039			udelay(5);
1040			i++;
1041			l = dma_read(CCR(lch));
1042		}
1043		if (i >= 100)
1044			printk(KERN_ERR "DMA drain did not complete on "
1045					"lch %d\n", lch);
1046		/* Restore OCP_SYSCONFIG */
1047		dma_write(sys_cf, OCP_SYSCONFIG);
1048	} else {
1049		l &= ~OMAP_DMA_CCR_EN;
1050		dma_write(l, CCR(lch));
1051	}
1052
1053	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1054		int next_lch, cur_lch = lch;
1055		char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1056
1057		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1058		do {
1059			/* The loop case: we've been here already */
1060			if (dma_chan_link_map[cur_lch])
1061				break;
1062			/* Mark the current channel */
1063			dma_chan_link_map[cur_lch] = 1;
1064
1065			disable_lnk(cur_lch);
1066
1067			next_lch = dma_chan[cur_lch].next_lch;
1068			cur_lch = next_lch;
1069		} while (next_lch != -1);
1070	}
1071
1072	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1073}
1074EXPORT_SYMBOL(omap_stop_dma);
1075
1076/*
1077 * Allows changing the DMA callback function or data. This may be needed if
1078 * the driver shares a single DMA channel for multiple dma triggers.
1079 */
1080int omap_set_dma_callback(int lch,
1081			  void (*callback)(int lch, u16 ch_status, void *data),
1082			  void *data)
1083{
1084	unsigned long flags;
1085
1086	if (lch < 0)
1087		return -ENODEV;
1088
1089	spin_lock_irqsave(&dma_chan_lock, flags);
1090	if (dma_chan[lch].dev_id == -1) {
1091		printk(KERN_ERR "DMA callback for not set for free channel\n");
1092		spin_unlock_irqrestore(&dma_chan_lock, flags);
1093		return -EINVAL;
1094	}
1095	dma_chan[lch].callback = callback;
1096	dma_chan[lch].data = data;
1097	spin_unlock_irqrestore(&dma_chan_lock, flags);
1098
1099	return 0;
1100}
1101EXPORT_SYMBOL(omap_set_dma_callback);
1102
1103/*
1104 * Returns current physical source address for the given DMA channel.
1105 * If the channel is running the caller must disable interrupts prior calling
1106 * this function and process the returned value before re-enabling interrupt to
1107 * prevent races with the interrupt handler. Note that in continuous mode there
1108 * is a chance for CSSA_L register overflow inbetween the two reads resulting
1109 * in incorrect return value.
1110 */
1111dma_addr_t omap_get_dma_src_pos(int lch)
1112{
1113	dma_addr_t offset = 0;
1114
1115	if (cpu_is_omap15xx())
1116		offset = dma_read(CPC(lch));
1117	else
1118		offset = dma_read(CSAC(lch));
1119
1120	/*
1121	 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1122	 * read before the DMA controller finished disabling the channel.
1123	 */
1124	if (!cpu_is_omap15xx() && offset == 0)
1125		offset = dma_read(CSAC(lch));
1126
1127	if (cpu_class_is_omap1())
1128		offset |= (dma_read(CSSA_U(lch)) << 16);
1129
1130	return offset;
1131}
1132EXPORT_SYMBOL(omap_get_dma_src_pos);
1133
1134/*
1135 * Returns current physical destination address for the given DMA channel.
1136 * If the channel is running the caller must disable interrupts prior calling
1137 * this function and process the returned value before re-enabling interrupt to
1138 * prevent races with the interrupt handler. Note that in continuous mode there
1139 * is a chance for CDSA_L register overflow inbetween the two reads resulting
1140 * in incorrect return value.
1141 */
1142dma_addr_t omap_get_dma_dst_pos(int lch)
1143{
1144	dma_addr_t offset = 0;
1145
1146	if (cpu_is_omap15xx())
1147		offset = dma_read(CPC(lch));
1148	else
1149		offset = dma_read(CDAC(lch));
1150
1151	/*
1152	 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1153	 * read before the DMA controller finished disabling the channel.
1154	 */
1155	if (!cpu_is_omap15xx() && offset == 0)
1156		offset = dma_read(CDAC(lch));
1157
1158	if (cpu_class_is_omap1())
1159		offset |= (dma_read(CDSA_U(lch)) << 16);
1160
1161	return offset;
1162}
1163EXPORT_SYMBOL(omap_get_dma_dst_pos);
1164
1165int omap_get_dma_active_status(int lch)
1166{
1167	return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0;
1168}
1169EXPORT_SYMBOL(omap_get_dma_active_status);
1170
1171int omap_dma_running(void)
1172{
1173	int lch;
1174
1175	if (cpu_class_is_omap1())
1176		if (omap_lcd_dma_running())
1177			return 1;
1178
1179	for (lch = 0; lch < dma_chan_count; lch++)
1180		if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN)
1181			return 1;
1182
1183	return 0;
1184}
1185
1186/*
1187 * lch_queue DMA will start right after lch_head one is finished.
1188 * For this DMA link to start, you still need to start (see omap_start_dma)
1189 * the first one. That will fire up the entire queue.
1190 */
1191void omap_dma_link_lch(int lch_head, int lch_queue)
1192{
1193	if (omap_dma_in_1510_mode()) {
1194		if (lch_head == lch_queue) {
1195			dma_write(dma_read(CCR(lch_head)) | (3 << 8),
1196								CCR(lch_head));
1197			return;
1198		}
1199		printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1200		BUG();
1201		return;
1202	}
1203
1204	if ((dma_chan[lch_head].dev_id == -1) ||
1205	    (dma_chan[lch_queue].dev_id == -1)) {
1206		printk(KERN_ERR "omap_dma: trying to link "
1207		       "non requested channels\n");
1208		dump_stack();
1209	}
1210
1211	dma_chan[lch_head].next_lch = lch_queue;
1212}
1213EXPORT_SYMBOL(omap_dma_link_lch);
1214
1215/*
1216 * Once the DMA queue is stopped, we can destroy it.
1217 */
1218void omap_dma_unlink_lch(int lch_head, int lch_queue)
1219{
1220	if (omap_dma_in_1510_mode()) {
1221		if (lch_head == lch_queue) {
1222			dma_write(dma_read(CCR(lch_head)) & ~(3 << 8),
1223								CCR(lch_head));
1224			return;
1225		}
1226		printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1227		BUG();
1228		return;
1229	}
1230
1231	if (dma_chan[lch_head].next_lch != lch_queue ||
1232	    dma_chan[lch_head].next_lch == -1) {
1233		printk(KERN_ERR "omap_dma: trying to unlink "
1234		       "non linked channels\n");
1235		dump_stack();
1236	}
1237
1238	if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1239	    (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1240		printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1241		       "before unlinking\n");
1242		dump_stack();
1243	}
1244
1245	dma_chan[lch_head].next_lch = -1;
1246}
1247EXPORT_SYMBOL(omap_dma_unlink_lch);
1248
1249/*----------------------------------------------------------------------------*/
1250
1251#ifndef CONFIG_ARCH_OMAP1
1252/* Create chain of DMA channesls */
1253static void create_dma_lch_chain(int lch_head, int lch_queue)
1254{
1255	u32 l;
1256
1257	/* Check if this is the first link in chain */
1258	if (dma_chan[lch_head].next_linked_ch == -1) {
1259		dma_chan[lch_head].next_linked_ch = lch_queue;
1260		dma_chan[lch_head].prev_linked_ch = lch_queue;
1261		dma_chan[lch_queue].next_linked_ch = lch_head;
1262		dma_chan[lch_queue].prev_linked_ch = lch_head;
1263	}
1264
1265	/* a link exists, link the new channel in circular chain */
1266	else {
1267		dma_chan[lch_queue].next_linked_ch =
1268					dma_chan[lch_head].next_linked_ch;
1269		dma_chan[lch_queue].prev_linked_ch = lch_head;
1270		dma_chan[lch_head].next_linked_ch = lch_queue;
1271		dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1272					lch_queue;
1273	}
1274
1275	l = dma_read(CLNK_CTRL(lch_head));
1276	l &= ~(0x1f);
1277	l |= lch_queue;
1278	dma_write(l, CLNK_CTRL(lch_head));
1279
1280	l = dma_read(CLNK_CTRL(lch_queue));
1281	l &= ~(0x1f);
1282	l |= (dma_chan[lch_queue].next_linked_ch);
1283	dma_write(l, CLNK_CTRL(lch_queue));
1284}
1285
1286/**
1287 * @brief omap_request_dma_chain : Request a chain of DMA channels
1288 *
1289 * @param dev_id - Device id using the dma channel
1290 * @param dev_name - Device name
1291 * @param callback - Call back function
1292 * @chain_id -
1293 * @no_of_chans - Number of channels requested
1294 * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1295 * 					      OMAP_DMA_DYNAMIC_CHAIN
1296 * @params - Channel parameters
1297 *
1298 * @return - Success : 0
1299 * 	     Failure: -EINVAL/-ENOMEM
1300 */
1301int omap_request_dma_chain(int dev_id, const char *dev_name,
1302			   void (*callback) (int lch, u16 ch_status,
1303					     void *data),
1304			   int *chain_id, int no_of_chans, int chain_mode,
1305			   struct omap_dma_channel_params params)
1306{
1307	int *channels;
1308	int i, err;
1309
1310	/* Is the chain mode valid ? */
1311	if (chain_mode != OMAP_DMA_STATIC_CHAIN
1312			&& chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1313		printk(KERN_ERR "Invalid chain mode requested\n");
1314		return -EINVAL;
1315	}
1316
1317	if (unlikely((no_of_chans < 1
1318			|| no_of_chans > dma_lch_count))) {
1319		printk(KERN_ERR "Invalid Number of channels requested\n");
1320		return -EINVAL;
1321	}
1322
1323	/*
1324	 * Allocate a queue to maintain the status of the channels
1325	 * in the chain
1326	 */
1327	channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1328	if (channels == NULL) {
1329		printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1330		return -ENOMEM;
1331	}
1332
1333	/* request and reserve DMA channels for the chain */
1334	for (i = 0; i < no_of_chans; i++) {
1335		err = omap_request_dma(dev_id, dev_name,
1336					callback, NULL, &channels[i]);
1337		if (err < 0) {
1338			int j;
1339			for (j = 0; j < i; j++)
1340				omap_free_dma(channels[j]);
1341			kfree(channels);
1342			printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1343			return err;
1344		}
1345		dma_chan[channels[i]].prev_linked_ch = -1;
1346		dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1347
1348		/*
1349		 * Allowing client drivers to set common parameters now,
1350		 * so that later only relevant (src_start, dest_start
1351		 * and element count) can be set
1352		 */
1353		omap_set_dma_params(channels[i], &params);
1354	}
1355
1356	*chain_id = channels[0];
1357	dma_linked_lch[*chain_id].linked_dmach_q = channels;
1358	dma_linked_lch[*chain_id].chain_mode = chain_mode;
1359	dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1360	dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1361
1362	for (i = 0; i < no_of_chans; i++)
1363		dma_chan[channels[i]].chain_id = *chain_id;
1364
1365	/* Reset the Queue pointers */
1366	OMAP_DMA_CHAIN_QINIT(*chain_id);
1367
1368	/* Set up the chain */
1369	if (no_of_chans == 1)
1370		create_dma_lch_chain(channels[0], channels[0]);
1371	else {
1372		for (i = 0; i < (no_of_chans - 1); i++)
1373			create_dma_lch_chain(channels[i], channels[i + 1]);
1374	}
1375
1376	return 0;
1377}
1378EXPORT_SYMBOL(omap_request_dma_chain);
1379
1380/**
1381 * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1382 * params after setting it. Dont do this while dma is running!!
1383 *
1384 * @param chain_id - Chained logical channel id.
1385 * @param params
1386 *
1387 * @return - Success : 0
1388 * 	     Failure : -EINVAL
1389 */
1390int omap_modify_dma_chain_params(int chain_id,
1391				struct omap_dma_channel_params params)
1392{
1393	int *channels;
1394	u32 i;
1395
1396	/* Check for input params */
1397	if (unlikely((chain_id < 0
1398			|| chain_id >= dma_lch_count))) {
1399		printk(KERN_ERR "Invalid chain id\n");
1400		return -EINVAL;
1401	}
1402
1403	/* Check if the chain exists */
1404	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1405		printk(KERN_ERR "Chain doesn't exists\n");
1406		return -EINVAL;
1407	}
1408	channels = dma_linked_lch[chain_id].linked_dmach_q;
1409
1410	for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1411		/*
1412		 * Allowing client drivers to set common parameters now,
1413		 * so that later only relevant (src_start, dest_start
1414		 * and element count) can be set
1415		 */
1416		omap_set_dma_params(channels[i], &params);
1417	}
1418
1419	return 0;
1420}
1421EXPORT_SYMBOL(omap_modify_dma_chain_params);
1422
1423/**
1424 * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1425 *
1426 * @param chain_id
1427 *
1428 * @return - Success : 0
1429 * 	     Failure : -EINVAL
1430 */
1431int omap_free_dma_chain(int chain_id)
1432{
1433	int *channels;
1434	u32 i;
1435
1436	/* Check for input params */
1437	if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1438		printk(KERN_ERR "Invalid chain id\n");
1439		return -EINVAL;
1440	}
1441
1442	/* Check if the chain exists */
1443	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1444		printk(KERN_ERR "Chain doesn't exists\n");
1445		return -EINVAL;
1446	}
1447
1448	channels = dma_linked_lch[chain_id].linked_dmach_q;
1449	for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1450		dma_chan[channels[i]].next_linked_ch = -1;
1451		dma_chan[channels[i]].prev_linked_ch = -1;
1452		dma_chan[channels[i]].chain_id = -1;
1453		dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1454		omap_free_dma(channels[i]);
1455	}
1456
1457	kfree(channels);
1458
1459	dma_linked_lch[chain_id].linked_dmach_q = NULL;
1460	dma_linked_lch[chain_id].chain_mode = -1;
1461	dma_linked_lch[chain_id].chain_state = -1;
1462
1463	return (0);
1464}
1465EXPORT_SYMBOL(omap_free_dma_chain);
1466
1467/**
1468 * @brief omap_dma_chain_status - Check if the chain is in
1469 * active / inactive state.
1470 * @param chain_id
1471 *
1472 * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1473 * 	     Failure : -EINVAL
1474 */
1475int omap_dma_chain_status(int chain_id)
1476{
1477	/* Check for input params */
1478	if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1479		printk(KERN_ERR "Invalid chain id\n");
1480		return -EINVAL;
1481	}
1482
1483	/* Check if the chain exists */
1484	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1485		printk(KERN_ERR "Chain doesn't exists\n");
1486		return -EINVAL;
1487	}
1488	pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1489			dma_linked_lch[chain_id].q_count);
1490
1491	if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1492		return OMAP_DMA_CHAIN_INACTIVE;
1493
1494	return OMAP_DMA_CHAIN_ACTIVE;
1495}
1496EXPORT_SYMBOL(omap_dma_chain_status);
1497
1498/**
1499 * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1500 * set the params and start the transfer.
1501 *
1502 * @param chain_id
1503 * @param src_start - buffer start address
1504 * @param dest_start - Dest address
1505 * @param elem_count
1506 * @param frame_count
1507 * @param callbk_data - channel callback parameter data.
1508 *
1509 * @return  - Success : 0
1510 * 	      Failure: -EINVAL/-EBUSY
1511 */
1512int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1513			int elem_count, int frame_count, void *callbk_data)
1514{
1515	int *channels;
1516	u32 l, lch;
1517	int start_dma = 0;
1518
1519	/*
1520	 * if buffer size is less than 1 then there is
1521	 * no use of starting the chain
1522	 */
1523	if (elem_count < 1) {
1524		printk(KERN_ERR "Invalid buffer size\n");
1525		return -EINVAL;
1526	}
1527
1528	/* Check for input params */
1529	if (unlikely((chain_id < 0
1530			|| chain_id >= dma_lch_count))) {
1531		printk(KERN_ERR "Invalid chain id\n");
1532		return -EINVAL;
1533	}
1534
1535	/* Check if the chain exists */
1536	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1537		printk(KERN_ERR "Chain doesn't exist\n");
1538		return -EINVAL;
1539	}
1540
1541	/* Check if all the channels in chain are in use */
1542	if (OMAP_DMA_CHAIN_QFULL(chain_id))
1543		return -EBUSY;
1544
1545	/* Frame count may be negative in case of indexed transfers */
1546	channels = dma_linked_lch[chain_id].linked_dmach_q;
1547
1548	/* Get a free channel */
1549	lch = channels[dma_linked_lch[chain_id].q_tail];
1550
1551	/* Store the callback data */
1552	dma_chan[lch].data = callbk_data;
1553
1554	/* Increment the q_tail */
1555	OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1556
1557	/* Set the params to the free channel */
1558	if (src_start != 0)
1559		dma_write(src_start, CSSA(lch));
1560	if (dest_start != 0)
1561		dma_write(dest_start, CDSA(lch));
1562
1563	/* Write the buffer size */
1564	dma_write(elem_count, CEN(lch));
1565	dma_write(frame_count, CFN(lch));
1566
1567	/*
1568	 * If the chain is dynamically linked,
1569	 * then we may have to start the chain if its not active
1570	 */
1571	if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1572
1573		/*
1574		 * In Dynamic chain, if the chain is not started,
1575		 * queue the channel
1576		 */
1577		if (dma_linked_lch[chain_id].chain_state ==
1578						DMA_CHAIN_NOTSTARTED) {
1579			/* Enable the link in previous channel */
1580			if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1581								DMA_CH_QUEUED)
1582				enable_lnk(dma_chan[lch].prev_linked_ch);
1583			dma_chan[lch].state = DMA_CH_QUEUED;
1584		}
1585
1586		/*
1587		 * Chain is already started, make sure its active,
1588		 * if not then start the chain
1589		 */
1590		else {
1591			start_dma = 1;
1592
1593			if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1594							DMA_CH_STARTED) {
1595				enable_lnk(dma_chan[lch].prev_linked_ch);
1596				dma_chan[lch].state = DMA_CH_QUEUED;
1597				start_dma = 0;
1598				if (0 == ((1 << 7) & dma_read(
1599					CCR(dma_chan[lch].prev_linked_ch)))) {
1600					disable_lnk(dma_chan[lch].
1601						    prev_linked_ch);
1602					pr_debug("\n prev ch is stopped\n");
1603					start_dma = 1;
1604				}
1605			}
1606
1607			else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1608							== DMA_CH_QUEUED) {
1609				enable_lnk(dma_chan[lch].prev_linked_ch);
1610				dma_chan[lch].state = DMA_CH_QUEUED;
1611				start_dma = 0;
1612			}
1613			omap_enable_channel_irq(lch);
1614
1615			l = dma_read(CCR(lch));
1616
1617			if ((0 == (l & (1 << 24))))
1618				l &= ~(1 << 25);
1619			else
1620				l |= (1 << 25);
1621			if (start_dma == 1) {
1622				if (0 == (l & (1 << 7))) {
1623					l |= (1 << 7);
1624					dma_chan[lch].state = DMA_CH_STARTED;
1625					pr_debug("starting %d\n", lch);
1626					dma_write(l, CCR(lch));
1627				} else
1628					start_dma = 0;
1629			} else {
1630				if (0 == (l & (1 << 7)))
1631					dma_write(l, CCR(lch));
1632			}
1633			dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1634		}
1635	}
1636
1637	return 0;
1638}
1639EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1640
1641/**
1642 * @brief omap_start_dma_chain_transfers - Start the chain
1643 *
1644 * @param chain_id
1645 *
1646 * @return - Success : 0
1647 * 	     Failure : -EINVAL/-EBUSY
1648 */
1649int omap_start_dma_chain_transfers(int chain_id)
1650{
1651	int *channels;
1652	u32 l, i;
1653
1654	if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1655		printk(KERN_ERR "Invalid chain id\n");
1656		return -EINVAL;
1657	}
1658
1659	channels = dma_linked_lch[chain_id].linked_dmach_q;
1660
1661	if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1662		printk(KERN_ERR "Chain is already started\n");
1663		return -EBUSY;
1664	}
1665
1666	if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1667		for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1668									i++) {
1669			enable_lnk(channels[i]);
1670			omap_enable_channel_irq(channels[i]);
1671		}
1672	} else {
1673		omap_enable_channel_irq(channels[0]);
1674	}
1675
1676	l = dma_read(CCR(channels[0]));
1677	l |= (1 << 7);
1678	dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1679	dma_chan[channels[0]].state = DMA_CH_STARTED;
1680
1681	if ((0 == (l & (1 << 24))))
1682		l &= ~(1 << 25);
1683	else
1684		l |= (1 << 25);
1685	dma_write(l, CCR(channels[0]));
1686
1687	dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1688
1689	return 0;
1690}
1691EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1692
1693/**
1694 * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1695 *
1696 * @param chain_id
1697 *
1698 * @return - Success : 0
1699 * 	     Failure : EINVAL
1700 */
1701int omap_stop_dma_chain_transfers(int chain_id)
1702{
1703	int *channels;
1704	u32 l, i;
1705	u32 sys_cf;
1706
1707	/* Check for input params */
1708	if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1709		printk(KERN_ERR "Invalid chain id\n");
1710		return -EINVAL;
1711	}
1712
1713	/* Check if the chain exists */
1714	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1715		printk(KERN_ERR "Chain doesn't exists\n");
1716		return -EINVAL;
1717	}
1718	channels = dma_linked_lch[chain_id].linked_dmach_q;
1719
1720	/*
1721	 * DMA Errata:
1722	 * Special programming model needed to disable DMA before end of block
1723	 */
1724	sys_cf = dma_read(OCP_SYSCONFIG);
1725	l = sys_cf;
1726	/* Middle mode reg set no Standby */
1727	l &= ~((1 << 12)|(1 << 13));
1728	dma_write(l, OCP_SYSCONFIG);
1729
1730	for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1731
1732		/* Stop the Channel transmission */
1733		l = dma_read(CCR(channels[i]));
1734		l &= ~(1 << 7);
1735		dma_write(l, CCR(channels[i]));
1736
1737		/* Disable the link in all the channels */
1738		disable_lnk(channels[i]);
1739		dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1740
1741	}
1742	dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1743
1744	/* Reset the Queue pointers */
1745	OMAP_DMA_CHAIN_QINIT(chain_id);
1746
1747	/* Errata - put in the old value */
1748	dma_write(sys_cf, OCP_SYSCONFIG);
1749
1750	return 0;
1751}
1752EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1753
1754/* Get the index of the ongoing DMA in chain */
1755/**
1756 * @brief omap_get_dma_chain_index - Get the element and frame index
1757 * of the ongoing DMA in chain
1758 *
1759 * @param chain_id
1760 * @param ei - Element index
1761 * @param fi - Frame index
1762 *
1763 * @return - Success : 0
1764 * 	     Failure : -EINVAL
1765 */
1766int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1767{
1768	int lch;
1769	int *channels;
1770
1771	/* Check for input params */
1772	if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1773		printk(KERN_ERR "Invalid chain id\n");
1774		return -EINVAL;
1775	}
1776
1777	/* Check if the chain exists */
1778	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1779		printk(KERN_ERR "Chain doesn't exists\n");
1780		return -EINVAL;
1781	}
1782	if ((!ei) || (!fi))
1783		return -EINVAL;
1784
1785	channels = dma_linked_lch[chain_id].linked_dmach_q;
1786
1787	/* Get the current channel */
1788	lch = channels[dma_linked_lch[chain_id].q_head];
1789
1790	*ei = dma_read(CCEN(lch));
1791	*fi = dma_read(CCFN(lch));
1792
1793	return 0;
1794}
1795EXPORT_SYMBOL(omap_get_dma_chain_index);
1796
1797/**
1798 * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1799 * ongoing DMA in chain
1800 *
1801 * @param chain_id
1802 *
1803 * @return - Success : Destination position
1804 * 	     Failure : -EINVAL
1805 */
1806int omap_get_dma_chain_dst_pos(int chain_id)
1807{
1808	int lch;
1809	int *channels;
1810
1811	/* Check for input params */
1812	if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1813		printk(KERN_ERR "Invalid chain id\n");
1814		return -EINVAL;
1815	}
1816
1817	/* Check if the chain exists */
1818	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1819		printk(KERN_ERR "Chain doesn't exists\n");
1820		return -EINVAL;
1821	}
1822
1823	channels = dma_linked_lch[chain_id].linked_dmach_q;
1824
1825	/* Get the current channel */
1826	lch = channels[dma_linked_lch[chain_id].q_head];
1827
1828	return dma_read(CDAC(lch));
1829}
1830EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1831
1832/**
1833 * @brief omap_get_dma_chain_src_pos - Get the source position
1834 * of the ongoing DMA in chain
1835 * @param chain_id
1836 *
1837 * @return - Success : Destination position
1838 * 	     Failure : -EINVAL
1839 */
1840int omap_get_dma_chain_src_pos(int chain_id)
1841{
1842	int lch;
1843	int *channels;
1844
1845	/* Check for input params */
1846	if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1847		printk(KERN_ERR "Invalid chain id\n");
1848		return -EINVAL;
1849	}
1850
1851	/* Check if the chain exists */
1852	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1853		printk(KERN_ERR "Chain doesn't exists\n");
1854		return -EINVAL;
1855	}
1856
1857	channels = dma_linked_lch[chain_id].linked_dmach_q;
1858
1859	/* Get the current channel */
1860	lch = channels[dma_linked_lch[chain_id].q_head];
1861
1862	return dma_read(CSAC(lch));
1863}
1864EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1865#endif	/* ifndef CONFIG_ARCH_OMAP1 */
1866
1867/*----------------------------------------------------------------------------*/
1868
1869#ifdef CONFIG_ARCH_OMAP1
1870
1871static int omap1_dma_handle_ch(int ch)
1872{
1873	u32 csr;
1874
1875	if (enable_1510_mode && ch >= 6) {
1876		csr = dma_chan[ch].saved_csr;
1877		dma_chan[ch].saved_csr = 0;
1878	} else
1879		csr = dma_read(CSR(ch));
1880	if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1881		dma_chan[ch + 6].saved_csr = csr >> 7;
1882		csr &= 0x7f;
1883	}
1884	if ((csr & 0x3f) == 0)
1885		return 0;
1886	if (unlikely(dma_chan[ch].dev_id == -1)) {
1887		printk(KERN_WARNING "Spurious interrupt from DMA channel "
1888		       "%d (CSR %04x)\n", ch, csr);
1889		return 0;
1890	}
1891	if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1892		printk(KERN_WARNING "DMA timeout with device %d\n",
1893		       dma_chan[ch].dev_id);
1894	if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1895		printk(KERN_WARNING "DMA synchronization event drop occurred "
1896		       "with device %d\n", dma_chan[ch].dev_id);
1897	if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1898		dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1899	if (likely(dma_chan[ch].callback != NULL))
1900		dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1901
1902	return 1;
1903}
1904
1905static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1906{
1907	int ch = ((int) dev_id) - 1;
1908	int handled = 0;
1909
1910	for (;;) {
1911		int handled_now = 0;
1912
1913		handled_now += omap1_dma_handle_ch(ch);
1914		if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1915			handled_now += omap1_dma_handle_ch(ch + 6);
1916		if (!handled_now)
1917			break;
1918		handled += handled_now;
1919	}
1920
1921	return handled ? IRQ_HANDLED : IRQ_NONE;
1922}
1923
1924#else
1925#define omap1_dma_irq_handler	NULL
1926#endif
1927
1928#ifdef CONFIG_ARCH_OMAP2PLUS
1929
1930static int omap2_dma_handle_ch(int ch)
1931{
1932	u32 status = dma_read(CSR(ch));
1933
1934	if (!status) {
1935		if (printk_ratelimit())
1936			printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
1937				ch);
1938		dma_write(1 << ch, IRQSTATUS_L0);
1939		return 0;
1940	}
1941	if (unlikely(dma_chan[ch].dev_id == -1)) {
1942		if (printk_ratelimit())
1943			printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1944					"channel %d\n", status, ch);
1945		return 0;
1946	}
1947	if (unlikely(status & OMAP_DMA_DROP_IRQ))
1948		printk(KERN_INFO
1949		       "DMA synchronization event drop occurred with device "
1950		       "%d\n", dma_chan[ch].dev_id);
1951	if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1952		printk(KERN_INFO "DMA transaction error with device %d\n",
1953		       dma_chan[ch].dev_id);
1954		if (cpu_class_is_omap2()) {
1955			/*
1956			 * Errata: sDMA Channel is not disabled
1957			 * after a transaction error. So we explicitely
1958			 * disable the channel
1959			 */
1960			u32 ccr;
1961
1962			ccr = dma_read(CCR(ch));
1963			ccr &= ~OMAP_DMA_CCR_EN;
1964			dma_write(ccr, CCR(ch));
1965			dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1966		}
1967	}
1968	if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1969		printk(KERN_INFO "DMA secure error with device %d\n",
1970		       dma_chan[ch].dev_id);
1971	if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1972		printk(KERN_INFO "DMA misaligned error with device %d\n",
1973		       dma_chan[ch].dev_id);
1974
1975	dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
1976	dma_write(1 << ch, IRQSTATUS_L0);
1977
1978	/* If the ch is not chained then chain_id will be -1 */
1979	if (dma_chan[ch].chain_id != -1) {
1980		int chain_id = dma_chan[ch].chain_id;
1981		dma_chan[ch].state = DMA_CH_NOTSTARTED;
1982		if (dma_read(CLNK_CTRL(ch)) & (1 << 15))
1983			dma_chan[dma_chan[ch].next_linked_ch].state =
1984							DMA_CH_STARTED;
1985		if (dma_linked_lch[chain_id].chain_mode ==
1986						OMAP_DMA_DYNAMIC_CHAIN)
1987			disable_lnk(ch);
1988
1989		if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1990			OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1991
1992		status = dma_read(CSR(ch));
1993	}
1994
1995	dma_write(status, CSR(ch));
1996
1997	if (likely(dma_chan[ch].callback != NULL))
1998		dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1999
2000	return 0;
2001}
2002
2003/* STATUS register count is from 1-32 while our is 0-31 */
2004static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
2005{
2006	u32 val, enable_reg;
2007	int i;
2008
2009	val = dma_read(IRQSTATUS_L0);
2010	if (val == 0) {
2011		if (printk_ratelimit())
2012			printk(KERN_WARNING "Spurious DMA IRQ\n");
2013		return IRQ_HANDLED;
2014	}
2015	enable_reg = dma_read(IRQENABLE_L0);
2016	val &= enable_reg; /* Dispatch only relevant interrupts */
2017	for (i = 0; i < dma_lch_count && val != 0; i++) {
2018		if (val & 1)
2019			omap2_dma_handle_ch(i);
2020		val >>= 1;
2021	}
2022
2023	return IRQ_HANDLED;
2024}
2025
2026static struct irqaction omap24xx_dma_irq = {
2027	.name = "DMA",
2028	.handler = omap2_dma_irq_handler,
2029	.flags = IRQF_DISABLED
2030};
2031
2032#else
2033static struct irqaction omap24xx_dma_irq;
2034#endif
2035
2036/*----------------------------------------------------------------------------*/
2037
2038void omap_dma_global_context_save(void)
2039{
2040	omap_dma_global_context.dma_irqenable_l0 =
2041		dma_read(IRQENABLE_L0);
2042	omap_dma_global_context.dma_ocp_sysconfig =
2043		dma_read(OCP_SYSCONFIG);
2044	omap_dma_global_context.dma_gcr = dma_read(GCR);
2045}
2046
2047void omap_dma_global_context_restore(void)
2048{
2049	int ch;
2050
2051	dma_write(omap_dma_global_context.dma_gcr, GCR);
2052	dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2053		OCP_SYSCONFIG);
2054	dma_write(omap_dma_global_context.dma_irqenable_l0,
2055		IRQENABLE_L0);
2056
2057	/*
2058	 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
2059	 * after secure sram context save and restore. Hence we need to
2060	 * manually clear those IRQs to avoid spurious interrupts. This
2061	 * affects only secure devices.
2062	 */
2063	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2064		dma_write(0x3 , IRQSTATUS_L0);
2065
2066	for (ch = 0; ch < dma_chan_count; ch++)
2067		if (dma_chan[ch].dev_id != -1)
2068			omap_clear_dma(ch);
2069}
2070
2071/*----------------------------------------------------------------------------*/
2072
2073static int __init omap_init_dma(void)
2074{
2075	unsigned long base;
2076	int ch, r;
2077
2078	if (cpu_class_is_omap1()) {
2079		base = OMAP1_DMA_BASE;
2080		dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2081	} else if (cpu_is_omap24xx()) {
2082		base = OMAP24XX_DMA4_BASE;
2083		dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2084	} else if (cpu_is_omap34xx()) {
2085		base = OMAP34XX_DMA4_BASE;
2086		dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2087	} else if (cpu_is_omap44xx()) {
2088		base = OMAP44XX_DMA4_BASE;
2089		dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2090	} else {
2091		pr_err("DMA init failed for unsupported omap\n");
2092		return -ENODEV;
2093	}
2094
2095	omap_dma_base = ioremap(base, SZ_4K);
2096	BUG_ON(!omap_dma_base);
2097
2098	if (cpu_class_is_omap2() && omap_dma_reserve_channels
2099			&& (omap_dma_reserve_channels <= dma_lch_count))
2100		dma_lch_count = omap_dma_reserve_channels;
2101
2102	dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
2103				GFP_KERNEL);
2104	if (!dma_chan) {
2105		r = -ENOMEM;
2106		goto out_unmap;
2107	}
2108
2109	if (cpu_class_is_omap2()) {
2110		dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2111						dma_lch_count, GFP_KERNEL);
2112		if (!dma_linked_lch) {
2113			r = -ENOMEM;
2114			goto out_free;
2115		}
2116	}
2117
2118	if (cpu_is_omap15xx()) {
2119		printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2120		dma_chan_count = 9;
2121		enable_1510_mode = 1;
2122	} else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2123		printk(KERN_INFO "OMAP DMA hardware version %d\n",
2124		       dma_read(HW_ID));
2125		printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2126		       (dma_read(CAPS_0_U) << 16) |
2127		       dma_read(CAPS_0_L),
2128		       (dma_read(CAPS_1_U) << 16) |
2129		       dma_read(CAPS_1_L),
2130		       dma_read(CAPS_2), dma_read(CAPS_3),
2131		       dma_read(CAPS_4));
2132		if (!enable_1510_mode) {
2133			u16 w;
2134
2135			/* Disable OMAP 3.0/3.1 compatibility mode. */
2136			w = dma_read(GSCR);
2137			w |= 1 << 3;
2138			dma_write(w, GSCR);
2139			dma_chan_count = 16;
2140		} else
2141			dma_chan_count = 9;
2142	} else if (cpu_class_is_omap2()) {
2143		u8 revision = dma_read(REVISION) & 0xff;
2144		printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2145		       revision >> 4, revision & 0xf);
2146		dma_chan_count = dma_lch_count;
2147	} else {
2148		dma_chan_count = 0;
2149		return 0;
2150	}
2151
2152	spin_lock_init(&dma_chan_lock);
2153
2154	for (ch = 0; ch < dma_chan_count; ch++) {
2155		omap_clear_dma(ch);
2156		if (cpu_class_is_omap2())
2157			omap2_disable_irq_lch(ch);
2158
2159		dma_chan[ch].dev_id = -1;
2160		dma_chan[ch].next_lch = -1;
2161
2162		if (ch >= 6 && enable_1510_mode)
2163			continue;
2164
2165		if (cpu_class_is_omap1()) {
2166			/*
2167			 * request_irq() doesn't like dev_id (ie. ch) being
2168			 * zero, so we have to kludge around this.
2169			 */
2170			r = request_irq(omap1_dma_irq[ch],
2171					omap1_dma_irq_handler, 0, "DMA",
2172					(void *) (ch + 1));
2173			if (r != 0) {
2174				int i;
2175
2176				printk(KERN_ERR "unable to request IRQ %d "
2177				       "for DMA (error %d)\n",
2178				       omap1_dma_irq[ch], r);
2179				for (i = 0; i < ch; i++)
2180					free_irq(omap1_dma_irq[i],
2181						 (void *) (i + 1));
2182				goto out_free;
2183			}
2184		}
2185	}
2186
2187	if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
2188		omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2189				DMA_DEFAULT_FIFO_DEPTH, 0);
2190
2191	if (cpu_class_is_omap2()) {
2192		int irq;
2193		if (cpu_is_omap44xx())
2194			irq = OMAP44XX_IRQ_SDMA_0;
2195		else
2196			irq = INT_24XX_SDMA_IRQ0;
2197		setup_irq(irq, &omap24xx_dma_irq);
2198	}
2199
2200	if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
2201		/* Enable smartidle idlemodes and autoidle */
2202		u32 v = dma_read(OCP_SYSCONFIG);
2203		v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2204				DMA_SYSCONFIG_SIDLEMODE_MASK |
2205				DMA_SYSCONFIG_AUTOIDLE);
2206		v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2207			DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2208			DMA_SYSCONFIG_AUTOIDLE);
2209		dma_write(v , OCP_SYSCONFIG);
2210		/* reserve dma channels 0 and 1 in high security devices */
2211		if (cpu_is_omap34xx() &&
2212			(omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2213			printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2214					"HS ROM code\n");
2215			dma_chan[0].dev_id = 0;
2216			dma_chan[1].dev_id = 1;
2217		}
2218	}
2219
2220	return 0;
2221
2222out_free:
2223	kfree(dma_chan);
2224
2225out_unmap:
2226	iounmap(omap_dma_base);
2227
2228	return r;
2229}
2230
2231arch_initcall(omap_init_dma);
2232
2233/*
2234 * Reserve the omap SDMA channels using cmdline bootarg
2235 * "omap_dma_reserve_ch=". The valid range is 1 to 32
2236 */
2237static int __init omap_dma_cmdline_reserve_ch(char *str)
2238{
2239	if (get_option(&str, &omap_dma_reserve_channels) != 1)
2240		omap_dma_reserve_channels = 0;
2241	return 1;
2242}
2243
2244__setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2245