1/*
2 *  linux/arch/arm/mach-imx/dma.c
3 *
4 *  imx DMA registration and IRQ dispatching
5 *
6 *  This program is free software; you can redistribute it and/or modify
7 *  it under the terms of the GNU General Public License version 2 as
8 *  published by the Free Software Foundation.
9 *
10 *  2004-03-03 Sascha Hauer <sascha@saschahauer.de>
11 *             initial version heavily inspired by
12 *             linux/arch/arm/mach-pxa/dma.c
13 *
14 *  2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz>
15 *             Changed to support scatter gather DMA
16 *             by taking Russell's code from RiscPC
17 *
18 *  2006-05-31 Pavel Pisa <pisa@cmp.felk.cvut.cz>
19 *             Corrected error handling code.
20 *
21 */
22
23#undef DEBUG
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/interrupt.h>
29#include <linux/errno.h>
30
31#include <asm/system.h>
32#include <asm/irq.h>
33#include <asm/hardware.h>
34#include <asm/dma.h>
35#include <asm/arch/imx-dma.h>
36
37struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
38
39/*
40 * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation
41 * @dma_ch: i.MX DMA channel number
42 * @lastcount: number of bytes transferred during last transfer
43 *
44 * Functions prepares DMA controller for next sg data chunk transfer.
45 * The @lastcount argument informs function about number of bytes transferred
46 * during last block. Zero value can be used for @lastcount to setup DMA
47 * for the first chunk.
48 */
49static inline int imx_dma_sg_next(imx_dmach_t dma_ch, unsigned int lastcount)
50{
51	struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
52	unsigned int nextcount;
53	unsigned int nextaddr;
54
55	if (!imxdma->name) {
56		printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
57		       __FUNCTION__, dma_ch);
58		return 0;
59	}
60
61	imxdma->resbytes -= lastcount;
62
63	if (!imxdma->sg) {
64		pr_debug("imxdma%d: no sg data\n", dma_ch);
65		return 0;
66	}
67
68	imxdma->sgbc += lastcount;
69	if ((imxdma->sgbc >= imxdma->sg->length) || !imxdma->resbytes) {
70		if ((imxdma->sgcount <= 1) || !imxdma->resbytes) {
71			pr_debug("imxdma%d: sg transfer limit reached\n",
72				 dma_ch);
73			imxdma->sgcount=0;
74			imxdma->sg = NULL;
75			return 0;
76		} else {
77			imxdma->sgcount--;
78			imxdma->sg++;
79			imxdma->sgbc = 0;
80		}
81	}
82	nextcount = imxdma->sg->length - imxdma->sgbc;
83	nextaddr = imxdma->sg->dma_address + imxdma->sgbc;
84
85	if(imxdma->resbytes < nextcount)
86		nextcount = imxdma->resbytes;
87
88	if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
89		DAR(dma_ch) = nextaddr;
90	else
91		SAR(dma_ch) = nextaddr;
92
93	CNTR(dma_ch) = nextcount;
94	pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, size 0x%08x\n",
95		 dma_ch, DAR(dma_ch), SAR(dma_ch), CNTR(dma_ch));
96
97	return nextcount;
98}
99
100/*
101 * imx_dma_setup_sg_base - scatter-gather DMA emulation
102 * @dma_ch: i.MX DMA channel number
103 * @sg: pointer to the scatter-gather list/vector
104 * @sgcount: scatter-gather list hungs count
105 *
106 * Functions sets up i.MX DMA state for emulated scatter-gather transfer
107 * and sets up channel registers to be ready for the first chunk
108 */
109static int
110imx_dma_setup_sg_base(imx_dmach_t dma_ch,
111		      struct scatterlist *sg, unsigned int sgcount)
112{
113	struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
114
115	imxdma->sg = sg;
116	imxdma->sgcount = sgcount;
117	imxdma->sgbc = 0;
118	return imx_dma_sg_next(dma_ch, 0);
119}
120
121/**
122 * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from device transfer
123 * @dma_ch: i.MX DMA channel number
124 * @dma_address: the DMA/physical memory address of the linear data block
125 *		to transfer
126 * @dma_length: length of the data block in bytes
127 * @dev_addr: physical device port address
128 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
129 *           or %DMA_MODE_WRITE from memory to the device
130 *
131 * The function setups DMA channel source and destination addresses for transfer
132 * specified by provided parameters. The scatter-gather emulation is disabled,
133 * because linear data block
134 * form the physical address range is transferred.
135 * Return value: if incorrect parameters are provided -%EINVAL.
136 *		Zero indicates success.
137 */
138int
139imx_dma_setup_single(imx_dmach_t dma_ch, dma_addr_t dma_address,
140		     unsigned int dma_length, unsigned int dev_addr,
141		     dmamode_t dmamode)
142{
143	struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
144
145	imxdma->sg = NULL;
146	imxdma->sgcount = 0;
147	imxdma->dma_mode = dmamode;
148	imxdma->resbytes = dma_length;
149
150	if (!dma_address) {
151		printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
152		       dma_ch);
153		return -EINVAL;
154	}
155
156	if (!dma_length) {
157		printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
158		       dma_ch);
159		return -EINVAL;
160	}
161
162	if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
163		pr_debug("imxdma%d: mx_dma_setup_single2dev dma_addressg=0x%08x dma_length=%d dev_addr=0x%08x for read\n",
164			dma_ch, (unsigned int)dma_address, dma_length,
165			dev_addr);
166		SAR(dma_ch) = dev_addr;
167		DAR(dma_ch) = (unsigned int)dma_address;
168	} else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
169		pr_debug("imxdma%d: mx_dma_setup_single2dev dma_addressg=0x%08x dma_length=%d dev_addr=0x%08x for write\n",
170			dma_ch, (unsigned int)dma_address, dma_length,
171			dev_addr);
172		SAR(dma_ch) = (unsigned int)dma_address;
173		DAR(dma_ch) = dev_addr;
174	} else {
175		printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
176		       dma_ch);
177		return -EINVAL;
178	}
179
180	CNTR(dma_ch) = dma_length;
181
182	return 0;
183}
184
185/**
186 * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer
187 * @dma_ch: i.MX DMA channel number
188 * @sg: pointer to the scatter-gather list/vector
189 * @sgcount: scatter-gather list hungs count
190 * @dma_length: total length of the transfer request in bytes
191 * @dev_addr: physical device port address
192 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
193 *           or %DMA_MODE_WRITE from memory to the device
194 *
195 * The function sets up DMA channel state and registers to be ready for transfer
196 * specified by provided parameters. The scatter-gather emulation is set up
197 * according to the parameters.
198 *
199 * The full preparation of the transfer requires setup of more register
200 * by the caller before imx_dma_enable() can be called.
201 *
202 * %BLR(dma_ch) holds transfer burst length in bytes, 0 means 64 bytes
203 *
204 * %RSSR(dma_ch) has to be set to the DMA request line source %DMA_REQ_xxx
205 *
206 * %CCR(dma_ch) has to specify transfer parameters, the next settings is typical
207 * for linear or simple scatter-gather transfers if %DMA_MODE_READ is specified
208 *
209 * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x
210 *
211 * The typical setup for %DMA_MODE_WRITE is specified by next options combination
212 *
213 * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x
214 *
215 * Be careful here and do not mistakenly mix source and target device
216 * port sizes constants, they are really different:
217 * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32,
218 * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32
219 *
220 * Return value: if incorrect parameters are provided -%EINVAL.
221 * Zero indicates success.
222 */
223int
224imx_dma_setup_sg(imx_dmach_t dma_ch,
225		 struct scatterlist *sg, unsigned int sgcount, unsigned int dma_length,
226		 unsigned int dev_addr, dmamode_t dmamode)
227{
228	int res;
229	struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
230
231	imxdma->sg = NULL;
232	imxdma->sgcount = 0;
233	imxdma->dma_mode = dmamode;
234	imxdma->resbytes = dma_length;
235
236	if (!sg || !sgcount) {
237		printk(KERN_ERR "imxdma%d: imx_dma_setup_sg epty sg list\n",
238		       dma_ch);
239		return -EINVAL;
240	}
241
242	if (!sg->length) {
243		printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
244		       dma_ch);
245		return -EINVAL;
246	}
247
248	if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
249		pr_debug("imxdma%d: mx_dma_setup_sg2dev sg=%p sgcount=%d total length=%d dev_addr=0x%08x for read\n",
250			dma_ch, sg, sgcount, dma_length, dev_addr);
251		SAR(dma_ch) = dev_addr;
252	} else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
253		pr_debug("imxdma%d: mx_dma_setup_sg2dev sg=%p sgcount=%d total length=%d dev_addr=0x%08x for write\n",
254			dma_ch, sg, sgcount, dma_length, dev_addr);
255		DAR(dma_ch) = dev_addr;
256	} else {
257		printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
258		       dma_ch);
259		return -EINVAL;
260	}
261
262	res = imx_dma_setup_sg_base(dma_ch, sg, sgcount);
263	if (res <= 0) {
264		printk(KERN_ERR "imxdma%d: no sg chunk ready\n", dma_ch);
265		return -EINVAL;
266	}
267
268	return 0;
269}
270
271/**
272 * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification handlers
273 * @dma_ch: i.MX DMA channel number
274 * @irq_handler: the pointer to the function called if the transfer
275 *		ends successfully
276 * @err_handler: the pointer to the function called if the premature
277 *		end caused by error occurs
278 * @data: user specified value to be passed to the handlers
279 */
280int
281imx_dma_setup_handlers(imx_dmach_t dma_ch,
282		       void (*irq_handler) (int, void *),
283		       void (*err_handler) (int, void *, int),
284		       void *data)
285{
286	struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
287	unsigned long flags;
288
289	if (!imxdma->name) {
290		printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
291		       __FUNCTION__, dma_ch);
292		return -ENODEV;
293	}
294
295	local_irq_save(flags);
296	DISR = (1 << dma_ch);
297	imxdma->irq_handler = irq_handler;
298	imxdma->err_handler = err_handler;
299	imxdma->data = data;
300	local_irq_restore(flags);
301	return 0;
302}
303
304/**
305 * imx_dma_enable - function to start i.MX DMA channel operation
306 * @dma_ch: i.MX DMA channel number
307 *
308 * The channel has to be allocated by driver through imx_dma_request()
309 * or imx_dma_request_by_prio() function.
310 * The transfer parameters has to be set to the channel registers through
311 * call of the imx_dma_setup_single() or imx_dma_setup_sg() function
312 * and registers %BLR(dma_ch), %RSSR(dma_ch) and %CCR(dma_ch) has to
313 * be set prior this function call by the channel user.
314 */
315void imx_dma_enable(imx_dmach_t dma_ch)
316{
317	struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
318	unsigned long flags;
319
320	pr_debug("imxdma%d: imx_dma_enable\n", dma_ch);
321
322	if (!imxdma->name) {
323		printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
324		       __FUNCTION__, dma_ch);
325		return;
326	}
327
328	local_irq_save(flags);
329	DISR = (1 << dma_ch);
330	DIMR &= ~(1 << dma_ch);
331	CCR(dma_ch) |= CCR_CEN;
332	local_irq_restore(flags);
333}
334
335/**
336 * imx_dma_disable - stop, finish i.MX DMA channel operatin
337 * @dma_ch: i.MX DMA channel number
338 */
339void imx_dma_disable(imx_dmach_t dma_ch)
340{
341	unsigned long flags;
342
343	pr_debug("imxdma%d: imx_dma_disable\n", dma_ch);
344
345	local_irq_save(flags);
346	DIMR |= (1 << dma_ch);
347	CCR(dma_ch) &= ~CCR_CEN;
348	DISR = (1 << dma_ch);
349	local_irq_restore(flags);
350}
351
352/**
353 * imx_dma_request - request/allocate specified channel number
354 * @dma_ch: i.MX DMA channel number
355 * @name: the driver/caller own non-%NULL identification
356 */
357int imx_dma_request(imx_dmach_t dma_ch, const char *name)
358{
359	struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
360	unsigned long flags;
361
362	/* basic sanity checks */
363	if (!name)
364		return -EINVAL;
365
366	if (dma_ch >= IMX_DMA_CHANNELS) {
367		printk(KERN_CRIT "%s: called for  non-existed channel %d\n",
368		       __FUNCTION__, dma_ch);
369		return -EINVAL;
370	}
371
372	local_irq_save(flags);
373	if (imxdma->name) {
374		local_irq_restore(flags);
375		return -ENODEV;
376	}
377
378	imxdma->name = name;
379	imxdma->irq_handler = NULL;
380	imxdma->err_handler = NULL;
381	imxdma->data = NULL;
382	imxdma->sg = NULL;
383	local_irq_restore(flags);
384	return 0;
385}
386
387/**
388 * imx_dma_free - release previously acquired channel
389 * @dma_ch: i.MX DMA channel number
390 */
391void imx_dma_free(imx_dmach_t dma_ch)
392{
393	unsigned long flags;
394	struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
395
396	if (!imxdma->name) {
397		printk(KERN_CRIT
398		       "%s: trying to free channel %d which is already freed\n",
399		       __FUNCTION__, dma_ch);
400		return;
401	}
402
403	local_irq_save(flags);
404	/* Disable interrupts */
405	DIMR |= (1 << dma_ch);
406	CCR(dma_ch) &= ~CCR_CEN;
407	imxdma->name = NULL;
408	local_irq_restore(flags);
409}
410
411/**
412 * imx_dma_request_by_prio - find and request some of free channels best suiting requested priority
413 * @dma_ch: i.MX DMA channel number
414 * @name: the driver/caller own non-%NULL identification
415 * @prio: one of the hardware distinguished priority level:
416 *        %DMA_PRIO_HIGH, %DMA_PRIO_MEDIUM, %DMA_PRIO_LOW
417 *
418 * This function tries to find free channel in the specified priority group
419 * if the priority cannot be achieved it tries to look for free channel
420 * in the higher and then even lower priority groups.
421 *
422 * Return value: If there is no free channel to allocate, -%ENODEV is returned.
423 *               Zero value indicates successful channel allocation.
424 */
425int
426imx_dma_request_by_prio(imx_dmach_t * pdma_ch, const char *name,
427			imx_dma_prio prio)
428{
429	int i;
430	int best;
431
432	switch (prio) {
433	case (DMA_PRIO_HIGH):
434		best = 8;
435		break;
436	case (DMA_PRIO_MEDIUM):
437		best = 4;
438		break;
439	case (DMA_PRIO_LOW):
440	default:
441		best = 0;
442		break;
443	}
444
445	for (i = best; i < IMX_DMA_CHANNELS; i++) {
446		if (!imx_dma_request(i, name)) {
447			*pdma_ch = i;
448			return 0;
449		}
450	}
451
452	for (i = best - 1; i >= 0; i--) {
453		if (!imx_dma_request(i, name)) {
454			*pdma_ch = i;
455			return 0;
456		}
457	}
458
459	printk(KERN_ERR "%s: no free DMA channel found\n", __FUNCTION__);
460
461	return -ENODEV;
462}
463
464static irqreturn_t dma_err_handler(int irq, void *dev_id)
465{
466	int i, disr = DISR;
467	struct imx_dma_channel *channel;
468	unsigned int err_mask = DBTOSR | DRTOSR | DSESR | DBOSR;
469	int errcode;
470
471	DISR = disr & err_mask;
472	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
473		if(!(err_mask & (1 << i)))
474			continue;
475		channel = &imx_dma_channels[i];
476		errcode = 0;
477
478		if (DBTOSR & (1 << i)) {
479			DBTOSR = (1 << i);
480			errcode |= IMX_DMA_ERR_BURST;
481		}
482		if (DRTOSR & (1 << i)) {
483			DRTOSR = (1 << i);
484			errcode |= IMX_DMA_ERR_REQUEST;
485		}
486		if (DSESR & (1 << i)) {
487			DSESR = (1 << i);
488			errcode |= IMX_DMA_ERR_TRANSFER;
489		}
490		if (DBOSR & (1 << i)) {
491			DBOSR = (1 << i);
492			errcode |= IMX_DMA_ERR_BUFFER;
493		}
494
495		/*
496		 * The cleaning of @sg field would be questionable
497		 * there, because its value can help to compute
498		 * remaining/transferred bytes count in the handler
499		 */
500		/*imx_dma_channels[i].sg = NULL;*/
501
502		if (channel->name && channel->err_handler) {
503			channel->err_handler(i, channel->data, errcode);
504			continue;
505		}
506
507		imx_dma_channels[i].sg = NULL;
508
509		printk(KERN_WARNING
510		       "DMA timeout on channel %d (%s) -%s%s%s%s\n",
511		       i, channel->name,
512		       errcode&IMX_DMA_ERR_BURST?    " burst":"",
513		       errcode&IMX_DMA_ERR_REQUEST?  " request":"",
514		       errcode&IMX_DMA_ERR_TRANSFER? " transfer":"",
515		       errcode&IMX_DMA_ERR_BUFFER?   " buffer":"");
516	}
517	return IRQ_HANDLED;
518}
519
520static irqreturn_t dma_irq_handler(int irq, void *dev_id)
521{
522	int i, disr = DISR;
523
524	pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
525		     disr);
526
527	DISR = disr;
528	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
529		if (disr & (1 << i)) {
530			struct imx_dma_channel *channel = &imx_dma_channels[i];
531			if (channel->name) {
532				if (imx_dma_sg_next(i, CNTR(i))) {
533					CCR(i) &= ~CCR_CEN;
534					mb();
535					CCR(i) |= CCR_CEN;
536				} else {
537					if (channel->irq_handler)
538						channel->irq_handler(i,
539							channel->data);
540				}
541			} else {
542				/*
543				 * IRQ for an unregistered DMA channel:
544				 * let's clear the interrupts and disable it.
545				 */
546				printk(KERN_WARNING
547				       "spurious IRQ for DMA channel %d\n", i);
548			}
549		}
550	}
551	return IRQ_HANDLED;
552}
553
554static int __init imx_dma_init(void)
555{
556	int ret;
557	int i;
558
559	/* reset DMA module */
560	DCR = DCR_DRST;
561
562	ret = request_irq(DMA_INT, dma_irq_handler, 0, "DMA", NULL);
563	if (ret) {
564		printk(KERN_CRIT "Wow!  Can't register IRQ for DMA\n");
565		return ret;
566	}
567
568	ret = request_irq(DMA_ERR, dma_err_handler, 0, "DMA", NULL);
569	if (ret) {
570		printk(KERN_CRIT "Wow!  Can't register ERRIRQ for DMA\n");
571		free_irq(DMA_INT, NULL);
572	}
573
574	/* enable DMA module */
575	DCR = DCR_DEN;
576
577	/* clear all interrupts */
578	DISR = (1 << IMX_DMA_CHANNELS) - 1;
579
580	/* enable interrupts */
581	DIMR = (1 << IMX_DMA_CHANNELS) - 1;
582
583	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
584		imx_dma_channels[i].sg = NULL;
585		imx_dma_channels[i].dma_num = i;
586	}
587
588	return ret;
589}
590
591arch_initcall(imx_dma_init);
592
593EXPORT_SYMBOL(imx_dma_setup_single);
594EXPORT_SYMBOL(imx_dma_setup_sg);
595EXPORT_SYMBOL(imx_dma_setup_handlers);
596EXPORT_SYMBOL(imx_dma_enable);
597EXPORT_SYMBOL(imx_dma_disable);
598EXPORT_SYMBOL(imx_dma_request);
599EXPORT_SYMBOL(imx_dma_free);
600EXPORT_SYMBOL(imx_dma_request_by_prio);
601EXPORT_SYMBOL(imx_dma_channels);
602