1/* 2 * linux/arch/arm/plat-mxc/dma-v1.c 3 * 4 * i.MX DMA registration and IRQ dispatching 5 * 6 * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz> 7 * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de> 8 * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, 22 * MA 02110-1301, USA. 23 */ 24 25#include <linux/module.h> 26#include <linux/init.h> 27#include <linux/kernel.h> 28#include <linux/interrupt.h> 29#include <linux/errno.h> 30#include <linux/clk.h> 31#include <linux/scatterlist.h> 32#include <linux/io.h> 33 34#include <asm/system.h> 35#include <asm/irq.h> 36#include <mach/hardware.h> 37#include <mach/dma-v1.h> 38 39#define DMA_DCR 0x00 /* Control Register */ 40#define DMA_DISR 0x04 /* Interrupt status Register */ 41#define DMA_DIMR 0x08 /* Interrupt mask Register */ 42#define DMA_DBTOSR 0x0c /* Burst timeout status Register */ 43#define DMA_DRTOSR 0x10 /* Request timeout Register */ 44#define DMA_DSESR 0x14 /* Transfer Error Status Register */ 45#define DMA_DBOSR 0x18 /* Buffer overflow status Register */ 46#define DMA_DBTOCR 0x1c /* Burst timeout control Register */ 47#define DMA_WSRA 0x40 /* W-Size Register A */ 48#define DMA_XSRA 0x44 /* X-Size Register A */ 49#define DMA_YSRA 0x48 /* Y-Size Register A */ 50#define DMA_WSRB 0x4c /* W-Size Register B */ 51#define DMA_XSRB 0x50 /* X-Size Register B */ 52#define DMA_YSRB 0x54 /* Y-Size Register B */ 53#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ 54#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ 55#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ 56#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ 57#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ 58#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ 59#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ 60#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ 61#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ 62 63#define DCR_DRST (1<<1) 64#define DCR_DEN (1<<0) 65#define DBTOCR_EN (1<<15) 66#define DBTOCR_CNT(x) ((x) & 0x7fff) 67#define CNTR_CNT(x) ((x) & 0xffffff) 68#define CCR_ACRPT (1<<14) 69#define CCR_DMOD_LINEAR (0x0 << 12) 70#define CCR_DMOD_2D (0x1 << 12) 71#define CCR_DMOD_FIFO (0x2 << 12) 72#define CCR_DMOD_EOBFIFO (0x3 << 12) 73#define CCR_SMOD_LINEAR (0x0 << 10) 74#define CCR_SMOD_2D (0x1 << 10) 75#define CCR_SMOD_FIFO (0x2 << 10) 76#define CCR_SMOD_EOBFIFO (0x3 << 10) 77#define CCR_MDIR_DEC (1<<9) 78#define CCR_MSEL_B (1<<8) 79#define CCR_DSIZ_32 (0x0 << 6) 80#define CCR_DSIZ_8 (0x1 << 6) 81#define CCR_DSIZ_16 (0x2 << 6) 82#define CCR_SSIZ_32 (0x0 << 4) 83#define CCR_SSIZ_8 (0x1 << 4) 84#define CCR_SSIZ_16 (0x2 << 4) 85#define CCR_REN (1<<3) 86#define CCR_RPT (1<<2) 87#define CCR_FRC (1<<1) 88#define CCR_CEN (1<<0) 89#define RTOR_EN (1<<15) 90#define RTOR_CLK (1<<14) 91#define RTOR_PSC (1<<13) 92 93/* 94 * struct imx_dma_channel - i.MX specific DMA extension 95 * @name: name specified by DMA client 96 * @irq_handler: client callback for end of transfer 97 * @err_handler: client callback for error condition 98 * @data: clients context data for callbacks 99 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE 100 * @sg: pointer to the actual read/written chunk for scatter-gather emulation 101 * @resbytes: total residual number of bytes to transfer 102 * (it can be lower or same as sum of SG mapped chunk sizes) 103 * @sgcount: number of chunks to be read/written 104 * 105 * Structure is used for IMX DMA processing. It would be probably good 106 * @struct dma_struct in the future for external interfacing and use 107 * @struct imx_dma_channel only as extension to it. 108 */ 109 110struct imx_dma_channel { 111 const char *name; 112 void (*irq_handler) (int, void *); 113 void (*err_handler) (int, void *, int errcode); 114 void (*prog_handler) (int, void *, struct scatterlist *); 115 void *data; 116 unsigned int dma_mode; 117 struct scatterlist *sg; 118 unsigned int resbytes; 119 int dma_num; 120 121 int in_use; 122 123 u32 ccr_from_device; 124 u32 ccr_to_device; 125 126 struct timer_list watchdog; 127 128 int hw_chaining; 129}; 130 131static void __iomem *imx_dmav1_baseaddr; 132 133static void imx_dmav1_writel(unsigned val, unsigned offset) 134{ 135 __raw_writel(val, imx_dmav1_baseaddr + offset); 136} 137 138static unsigned imx_dmav1_readl(unsigned offset) 139{ 140 return __raw_readl(imx_dmav1_baseaddr + offset); 141} 142 143static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS]; 144 145static struct clk *dma_clk; 146 147static int imx_dma_hw_chain(struct imx_dma_channel *imxdma) 148{ 149 if (cpu_is_mx27()) 150 return imxdma->hw_chaining; 151 else 152 return 0; 153} 154 155/* 156 * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation 157 */ 158static inline int imx_dma_sg_next(int channel, struct scatterlist *sg) 159{ 160 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 161 unsigned long now; 162 163 if (!imxdma->name) { 164 printk(KERN_CRIT "%s: called for not allocated channel %d\n", 165 __func__, channel); 166 return 0; 167 } 168 169 now = min(imxdma->resbytes, sg->length); 170 if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) 171 imxdma->resbytes -= now; 172 173 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ) 174 imx_dmav1_writel(sg->dma_address, DMA_DAR(channel)); 175 else 176 imx_dmav1_writel(sg->dma_address, DMA_SAR(channel)); 177 178 imx_dmav1_writel(now, DMA_CNTR(channel)); 179 180 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " 181 "size 0x%08x\n", channel, 182 imx_dmav1_readl(DMA_DAR(channel)), 183 imx_dmav1_readl(DMA_SAR(channel)), 184 imx_dmav1_readl(DMA_CNTR(channel))); 185 186 return now; 187} 188 189/** 190 * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from 191 * device transfer 192 * 193 * @channel: i.MX DMA channel number 194 * @dma_address: the DMA/physical memory address of the linear data block 195 * to transfer 196 * @dma_length: length of the data block in bytes 197 * @dev_addr: physical device port address 198 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory 199 * or %DMA_MODE_WRITE from memory to the device 200 * 201 * Return value: if incorrect parameters are provided -%EINVAL. 202 * Zero indicates success. 203 */ 204int 205imx_dma_setup_single(int channel, dma_addr_t dma_address, 206 unsigned int dma_length, unsigned int dev_addr, 207 unsigned int dmamode) 208{ 209 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 210 211 imxdma->sg = NULL; 212 imxdma->dma_mode = dmamode; 213 214 if (!dma_address) { 215 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", 216 channel); 217 return -EINVAL; 218 } 219 220 if (!dma_length) { 221 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n", 222 channel); 223 return -EINVAL; 224 } 225 226 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { 227 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " 228 "dev_addr=0x%08x for read\n", 229 channel, __func__, (unsigned int)dma_address, 230 dma_length, dev_addr); 231 232 imx_dmav1_writel(dev_addr, DMA_SAR(channel)); 233 imx_dmav1_writel(dma_address, DMA_DAR(channel)); 234 imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel)); 235 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { 236 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " 237 "dev_addr=0x%08x for write\n", 238 channel, __func__, (unsigned int)dma_address, 239 dma_length, dev_addr); 240 241 imx_dmav1_writel(dma_address, DMA_SAR(channel)); 242 imx_dmav1_writel(dev_addr, DMA_DAR(channel)); 243 imx_dmav1_writel(imxdma->ccr_to_device, 244 DMA_CCR(channel)); 245 } else { 246 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n", 247 channel); 248 return -EINVAL; 249 } 250 251 imx_dmav1_writel(dma_length, DMA_CNTR(channel)); 252 253 return 0; 254} 255EXPORT_SYMBOL(imx_dma_setup_single); 256 257/** 258 * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer 259 * @channel: i.MX DMA channel number 260 * @sg: pointer to the scatter-gather list/vector 261 * @sgcount: scatter-gather list hungs count 262 * @dma_length: total length of the transfer request in bytes 263 * @dev_addr: physical device port address 264 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory 265 * or %DMA_MODE_WRITE from memory to the device 266 * 267 * The function sets up DMA channel state and registers to be ready for 268 * transfer specified by provided parameters. The scatter-gather emulation 269 * is set up according to the parameters. 270 * 271 * The full preparation of the transfer requires setup of more register 272 * by the caller before imx_dma_enable() can be called. 273 * 274 * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes 275 * 276 * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx 277 * 278 * %CCR(channel) has to specify transfer parameters, the next settings is 279 * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is 280 * specified 281 * 282 * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x 283 * 284 * The typical setup for %DMA_MODE_WRITE is specified by next options 285 * combination 286 * 287 * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x 288 * 289 * Be careful here and do not mistakenly mix source and target device 290 * port sizes constants, they are really different: 291 * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32, 292 * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32 293 * 294 * Return value: if incorrect parameters are provided -%EINVAL. 295 * Zero indicates success. 296 */ 297int 298imx_dma_setup_sg(int channel, 299 struct scatterlist *sg, unsigned int sgcount, 300 unsigned int dma_length, unsigned int dev_addr, 301 unsigned int dmamode) 302{ 303 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 304 305 if (imxdma->in_use) 306 return -EBUSY; 307 308 imxdma->sg = sg; 309 imxdma->dma_mode = dmamode; 310 imxdma->resbytes = dma_length; 311 312 if (!sg || !sgcount) { 313 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n", 314 channel); 315 return -EINVAL; 316 } 317 318 if (!sg->length) { 319 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n", 320 channel); 321 return -EINVAL; 322 } 323 324 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) { 325 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " 326 "dev_addr=0x%08x for read\n", 327 channel, __func__, sg, sgcount, dma_length, dev_addr); 328 329 imx_dmav1_writel(dev_addr, DMA_SAR(channel)); 330 imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel)); 331 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) { 332 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " 333 "dev_addr=0x%08x for write\n", 334 channel, __func__, sg, sgcount, dma_length, dev_addr); 335 336 imx_dmav1_writel(dev_addr, DMA_DAR(channel)); 337 imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel)); 338 } else { 339 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n", 340 channel); 341 return -EINVAL; 342 } 343 344 imx_dma_sg_next(channel, sg); 345 346 return 0; 347} 348EXPORT_SYMBOL(imx_dma_setup_sg); 349 350int 351imx_dma_config_channel(int channel, unsigned int config_port, 352 unsigned int config_mem, unsigned int dmareq, int hw_chaining) 353{ 354 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 355 u32 dreq = 0; 356 357 imxdma->hw_chaining = 0; 358 359 if (hw_chaining) { 360 imxdma->hw_chaining = 1; 361 if (!imx_dma_hw_chain(imxdma)) 362 return -EINVAL; 363 } 364 365 if (dmareq) 366 dreq = CCR_REN; 367 368 imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq; 369 imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq; 370 371 imx_dmav1_writel(dmareq, DMA_RSSR(channel)); 372 373 return 0; 374} 375EXPORT_SYMBOL(imx_dma_config_channel); 376 377void imx_dma_config_burstlen(int channel, unsigned int burstlen) 378{ 379 imx_dmav1_writel(burstlen, DMA_BLR(channel)); 380} 381EXPORT_SYMBOL(imx_dma_config_burstlen); 382 383/** 384 * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification 385 * handlers 386 * @channel: i.MX DMA channel number 387 * @irq_handler: the pointer to the function called if the transfer 388 * ends successfully 389 * @err_handler: the pointer to the function called if the premature 390 * end caused by error occurs 391 * @data: user specified value to be passed to the handlers 392 */ 393int 394imx_dma_setup_handlers(int channel, 395 void (*irq_handler) (int, void *), 396 void (*err_handler) (int, void *, int), 397 void *data) 398{ 399 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 400 unsigned long flags; 401 402 if (!imxdma->name) { 403 printk(KERN_CRIT "%s: called for not allocated channel %d\n", 404 __func__, channel); 405 return -ENODEV; 406 } 407 408 local_irq_save(flags); 409 imx_dmav1_writel(1 << channel, DMA_DISR); 410 imxdma->irq_handler = irq_handler; 411 imxdma->err_handler = err_handler; 412 imxdma->data = data; 413 local_irq_restore(flags); 414 return 0; 415} 416EXPORT_SYMBOL(imx_dma_setup_handlers); 417 418/** 419 * imx_dma_setup_progression_handler - setup i.MX DMA channel progression 420 * handlers 421 * @channel: i.MX DMA channel number 422 * @prog_handler: the pointer to the function called if the transfer progresses 423 */ 424int 425imx_dma_setup_progression_handler(int channel, 426 void (*prog_handler) (int, void*, struct scatterlist*)) 427{ 428 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 429 unsigned long flags; 430 431 if (!imxdma->name) { 432 printk(KERN_CRIT "%s: called for not allocated channel %d\n", 433 __func__, channel); 434 return -ENODEV; 435 } 436 437 local_irq_save(flags); 438 imxdma->prog_handler = prog_handler; 439 local_irq_restore(flags); 440 return 0; 441} 442EXPORT_SYMBOL(imx_dma_setup_progression_handler); 443 444/** 445 * imx_dma_enable - function to start i.MX DMA channel operation 446 * @channel: i.MX DMA channel number 447 * 448 * The channel has to be allocated by driver through imx_dma_request() 449 * or imx_dma_request_by_prio() function. 450 * The transfer parameters has to be set to the channel registers through 451 * call of the imx_dma_setup_single() or imx_dma_setup_sg() function 452 * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to 453 * be set prior this function call by the channel user. 454 */ 455void imx_dma_enable(int channel) 456{ 457 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 458 unsigned long flags; 459 460 pr_debug("imxdma%d: imx_dma_enable\n", channel); 461 462 if (!imxdma->name) { 463 printk(KERN_CRIT "%s: called for not allocated channel %d\n", 464 __func__, channel); 465 return; 466 } 467 468 if (imxdma->in_use) 469 return; 470 471 local_irq_save(flags); 472 473 imx_dmav1_writel(1 << channel, DMA_DISR); 474 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); 475 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | 476 CCR_ACRPT, DMA_CCR(channel)); 477 478#ifdef CONFIG_ARCH_MX2 479 if ((cpu_is_mx21() || cpu_is_mx27()) && 480 imxdma->sg && imx_dma_hw_chain(imxdma)) { 481 imxdma->sg = sg_next(imxdma->sg); 482 if (imxdma->sg) { 483 u32 tmp; 484 imx_dma_sg_next(channel, imxdma->sg); 485 tmp = imx_dmav1_readl(DMA_CCR(channel)); 486 imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, 487 DMA_CCR(channel)); 488 } 489 } 490#endif 491 imxdma->in_use = 1; 492 493 local_irq_restore(flags); 494} 495EXPORT_SYMBOL(imx_dma_enable); 496 497/** 498 * imx_dma_disable - stop, finish i.MX DMA channel operatin 499 * @channel: i.MX DMA channel number 500 */ 501void imx_dma_disable(int channel) 502{ 503 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 504 unsigned long flags; 505 506 pr_debug("imxdma%d: imx_dma_disable\n", channel); 507 508 if (imx_dma_hw_chain(imxdma)) 509 del_timer(&imxdma->watchdog); 510 511 local_irq_save(flags); 512 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); 513 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, 514 DMA_CCR(channel)); 515 imx_dmav1_writel(1 << channel, DMA_DISR); 516 imxdma->in_use = 0; 517 local_irq_restore(flags); 518} 519EXPORT_SYMBOL(imx_dma_disable); 520 521#ifdef CONFIG_ARCH_MX2 522static void imx_dma_watchdog(unsigned long chno) 523{ 524 struct imx_dma_channel *imxdma = &imx_dma_channels[chno]; 525 526 imx_dmav1_writel(0, DMA_CCR(chno)); 527 imxdma->in_use = 0; 528 imxdma->sg = NULL; 529 530 if (imxdma->err_handler) 531 imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT); 532} 533#endif 534 535static irqreturn_t dma_err_handler(int irq, void *dev_id) 536{ 537 int i, disr; 538 struct imx_dma_channel *imxdma; 539 unsigned int err_mask; 540 int errcode; 541 542 disr = imx_dmav1_readl(DMA_DISR); 543 544 err_mask = imx_dmav1_readl(DMA_DBTOSR) | 545 imx_dmav1_readl(DMA_DRTOSR) | 546 imx_dmav1_readl(DMA_DSESR) | 547 imx_dmav1_readl(DMA_DBOSR); 548 549 if (!err_mask) 550 return IRQ_HANDLED; 551 552 imx_dmav1_writel(disr & err_mask, DMA_DISR); 553 554 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 555 if (!(err_mask & (1 << i))) 556 continue; 557 imxdma = &imx_dma_channels[i]; 558 errcode = 0; 559 560 if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { 561 imx_dmav1_writel(1 << i, DMA_DBTOSR); 562 errcode |= IMX_DMA_ERR_BURST; 563 } 564 if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { 565 imx_dmav1_writel(1 << i, DMA_DRTOSR); 566 errcode |= IMX_DMA_ERR_REQUEST; 567 } 568 if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { 569 imx_dmav1_writel(1 << i, DMA_DSESR); 570 errcode |= IMX_DMA_ERR_TRANSFER; 571 } 572 if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { 573 imx_dmav1_writel(1 << i, DMA_DBOSR); 574 errcode |= IMX_DMA_ERR_BUFFER; 575 } 576 if (imxdma->name && imxdma->err_handler) { 577 imxdma->err_handler(i, imxdma->data, errcode); 578 continue; 579 } 580 581 imx_dma_channels[i].sg = NULL; 582 583 printk(KERN_WARNING 584 "DMA timeout on channel %d (%s) -%s%s%s%s\n", 585 i, imxdma->name, 586 errcode & IMX_DMA_ERR_BURST ? " burst" : "", 587 errcode & IMX_DMA_ERR_REQUEST ? " request" : "", 588 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", 589 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); 590 } 591 return IRQ_HANDLED; 592} 593 594static void dma_irq_handle_channel(int chno) 595{ 596 struct imx_dma_channel *imxdma = &imx_dma_channels[chno]; 597 598 if (!imxdma->name) { 599 /* 600 * IRQ for an unregistered DMA channel: 601 * let's clear the interrupts and disable it. 602 */ 603 printk(KERN_WARNING 604 "spurious IRQ for DMA channel %d\n", chno); 605 return; 606 } 607 608 if (imxdma->sg) { 609 u32 tmp; 610 struct scatterlist *current_sg = imxdma->sg; 611 imxdma->sg = sg_next(imxdma->sg); 612 613 if (imxdma->sg) { 614 imx_dma_sg_next(chno, imxdma->sg); 615 616 tmp = imx_dmav1_readl(DMA_CCR(chno)); 617 618 if (imx_dma_hw_chain(imxdma)) { 619 mod_timer(&imxdma->watchdog, 620 jiffies + msecs_to_jiffies(500)); 621 622 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; 623 imx_dmav1_writel(tmp, DMA_CCR(chno)); 624 } else { 625 imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); 626 tmp |= CCR_CEN; 627 } 628 629 imx_dmav1_writel(tmp, DMA_CCR(chno)); 630 631 if (imxdma->prog_handler) 632 imxdma->prog_handler(chno, imxdma->data, 633 current_sg); 634 635 return; 636 } 637 638 if (imx_dma_hw_chain(imxdma)) { 639 del_timer(&imxdma->watchdog); 640 return; 641 } 642 } 643 644 imx_dmav1_writel(0, DMA_CCR(chno)); 645 imxdma->in_use = 0; 646 if (imxdma->irq_handler) 647 imxdma->irq_handler(chno, imxdma->data); 648} 649 650static irqreturn_t dma_irq_handler(int irq, void *dev_id) 651{ 652 int i, disr; 653 654#ifdef CONFIG_ARCH_MX2 655 if (cpu_is_mx21() || cpu_is_mx27()) 656 dma_err_handler(irq, dev_id); 657#endif 658 659 disr = imx_dmav1_readl(DMA_DISR); 660 661 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", 662 disr); 663 664 imx_dmav1_writel(disr, DMA_DISR); 665 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 666 if (disr & (1 << i)) 667 dma_irq_handle_channel(i); 668 } 669 670 return IRQ_HANDLED; 671} 672 673/** 674 * imx_dma_request - request/allocate specified channel number 675 * @channel: i.MX DMA channel number 676 * @name: the driver/caller own non-%NULL identification 677 */ 678int imx_dma_request(int channel, const char *name) 679{ 680 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 681 unsigned long flags; 682 int ret = 0; 683 684 /* basic sanity checks */ 685 if (!name) 686 return -EINVAL; 687 688 if (channel >= IMX_DMA_CHANNELS) { 689 printk(KERN_CRIT "%s: called for non-existed channel %d\n", 690 __func__, channel); 691 return -EINVAL; 692 } 693 694 local_irq_save(flags); 695 if (imxdma->name) { 696 local_irq_restore(flags); 697 return -EBUSY; 698 } 699 memset(imxdma, 0, sizeof(imxdma)); 700 imxdma->name = name; 701 local_irq_restore(flags); /* request_irq() can block */ 702 703#ifdef CONFIG_ARCH_MX2 704 if (cpu_is_mx21() || cpu_is_mx27()) { 705 ret = request_irq(MX2x_INT_DMACH0 + channel, 706 dma_irq_handler, 0, "DMA", NULL); 707 if (ret) { 708 imxdma->name = NULL; 709 pr_crit("Can't register IRQ %d for DMA channel %d\n", 710 MX2x_INT_DMACH0 + channel, channel); 711 return ret; 712 } 713 init_timer(&imxdma->watchdog); 714 imxdma->watchdog.function = &imx_dma_watchdog; 715 imxdma->watchdog.data = channel; 716 } 717#endif 718 719 return ret; 720} 721EXPORT_SYMBOL(imx_dma_request); 722 723/** 724 * imx_dma_free - release previously acquired channel 725 * @channel: i.MX DMA channel number 726 */ 727void imx_dma_free(int channel) 728{ 729 unsigned long flags; 730 struct imx_dma_channel *imxdma = &imx_dma_channels[channel]; 731 732 if (!imxdma->name) { 733 printk(KERN_CRIT 734 "%s: trying to free free channel %d\n", 735 __func__, channel); 736 return; 737 } 738 739 local_irq_save(flags); 740 /* Disable interrupts */ 741 imx_dma_disable(channel); 742 imxdma->name = NULL; 743 744#ifdef CONFIG_ARCH_MX2 745 if (cpu_is_mx21() || cpu_is_mx27()) 746 free_irq(MX2x_INT_DMACH0 + channel, NULL); 747#endif 748 749 local_irq_restore(flags); 750} 751EXPORT_SYMBOL(imx_dma_free); 752 753/** 754 * imx_dma_request_by_prio - find and request some of free channels best 755 * suiting requested priority 756 * @channel: i.MX DMA channel number 757 * @name: the driver/caller own non-%NULL identification 758 * 759 * This function tries to find a free channel in the specified priority group 760 * if the priority cannot be achieved it tries to look for free channel 761 * in the higher and then even lower priority groups. 762 * 763 * Return value: If there is no free channel to allocate, -%ENODEV is returned. 764 * On successful allocation channel is returned. 765 */ 766int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio) 767{ 768 int i; 769 int best; 770 771 switch (prio) { 772 case (DMA_PRIO_HIGH): 773 best = 8; 774 break; 775 case (DMA_PRIO_MEDIUM): 776 best = 4; 777 break; 778 case (DMA_PRIO_LOW): 779 default: 780 best = 0; 781 break; 782 } 783 784 for (i = best; i < IMX_DMA_CHANNELS; i++) 785 if (!imx_dma_request(i, name)) 786 return i; 787 788 for (i = best - 1; i >= 0; i--) 789 if (!imx_dma_request(i, name)) 790 return i; 791 792 printk(KERN_ERR "%s: no free DMA channel found\n", __func__); 793 794 return -ENODEV; 795} 796EXPORT_SYMBOL(imx_dma_request_by_prio); 797 798static int __init imx_dma_init(void) 799{ 800 int ret = 0; 801 int i; 802 803#ifdef CONFIG_ARCH_MX1 804 if (cpu_is_mx1()) 805 imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); 806 else 807#endif 808#ifdef CONFIG_MACH_MX21 809 if (cpu_is_mx21()) 810 imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); 811 else 812#endif 813#ifdef CONFIG_MACH_MX27 814 if (cpu_is_mx27()) 815 imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); 816 else 817#endif 818 BUG(); 819 820 dma_clk = clk_get(NULL, "dma"); 821 clk_enable(dma_clk); 822 823 /* reset DMA module */ 824 imx_dmav1_writel(DCR_DRST, DMA_DCR); 825 826#ifdef CONFIG_ARCH_MX1 827 if (cpu_is_mx1()) { 828 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL); 829 if (ret) { 830 pr_crit("Wow! Can't register IRQ for DMA\n"); 831 return ret; 832 } 833 834 ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL); 835 if (ret) { 836 pr_crit("Wow! Can't register ERRIRQ for DMA\n"); 837 free_irq(MX1_DMA_INT, NULL); 838 return ret; 839 } 840 } 841#endif 842 /* enable DMA module */ 843 imx_dmav1_writel(DCR_DEN, DMA_DCR); 844 845 /* clear all interrupts */ 846 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); 847 848 /* disable interrupts */ 849 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); 850 851 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 852 imx_dma_channels[i].sg = NULL; 853 imx_dma_channels[i].dma_num = i; 854 } 855 856 return ret; 857} 858 859arch_initcall(imx_dma_init); 860