1/* 2 * EDMA3 support for DaVinci 3 * 4 * Copyright (C) 2006-2009 Texas Instruments. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20#include <linux/kernel.h> 21#include <linux/init.h> 22#include <linux/module.h> 23#include <linux/interrupt.h> 24#include <linux/platform_device.h> 25#include <linux/io.h> 26#include <linux/slab.h> 27 28#include <mach/edma.h> 29 30/* Offsets matching "struct edmacc_param" */ 31#define PARM_OPT 0x00 32#define PARM_SRC 0x04 33#define PARM_A_B_CNT 0x08 34#define PARM_DST 0x0c 35#define PARM_SRC_DST_BIDX 0x10 36#define PARM_LINK_BCNTRLD 0x14 37#define PARM_SRC_DST_CIDX 0x18 38#define PARM_CCNT 0x1c 39 40#define PARM_SIZE 0x20 41 42/* Offsets for EDMA CC global channel registers and their shadows */ 43#define SH_ER 0x00 /* 64 bits */ 44#define SH_ECR 0x08 /* 64 bits */ 45#define SH_ESR 0x10 /* 64 bits */ 46#define SH_CER 0x18 /* 64 bits */ 47#define SH_EER 0x20 /* 64 bits */ 48#define SH_EECR 0x28 /* 64 bits */ 49#define SH_EESR 0x30 /* 64 bits */ 50#define SH_SER 0x38 /* 64 bits */ 51#define SH_SECR 0x40 /* 64 bits */ 52#define SH_IER 0x50 /* 64 bits */ 53#define SH_IECR 0x58 /* 64 bits */ 54#define SH_IESR 0x60 /* 64 bits */ 55#define SH_IPR 0x68 /* 64 bits */ 56#define SH_ICR 0x70 /* 64 bits */ 57#define SH_IEVAL 0x78 58#define SH_QER 0x80 59#define SH_QEER 0x84 60#define SH_QEECR 0x88 61#define SH_QEESR 0x8c 62#define SH_QSER 0x90 63#define SH_QSECR 0x94 64#define SH_SIZE 0x200 65 66/* Offsets for EDMA CC global registers */ 67#define EDMA_REV 0x0000 68#define EDMA_CCCFG 0x0004 69#define EDMA_QCHMAP 0x0200 /* 8 registers */ 70#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ 71#define EDMA_QDMAQNUM 0x0260 72#define EDMA_QUETCMAP 0x0280 73#define EDMA_QUEPRI 0x0284 74#define EDMA_EMR 0x0300 /* 64 bits */ 75#define EDMA_EMCR 0x0308 /* 64 bits */ 76#define EDMA_QEMR 0x0310 77#define EDMA_QEMCR 0x0314 78#define EDMA_CCERR 0x0318 79#define EDMA_CCERRCLR 0x031c 80#define EDMA_EEVAL 0x0320 81#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ 82#define EDMA_QRAE 0x0380 /* 4 registers */ 83#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ 84#define EDMA_QSTAT 0x0600 /* 2 registers */ 85#define EDMA_QWMTHRA 0x0620 86#define EDMA_QWMTHRB 0x0624 87#define EDMA_CCSTAT 0x0640 88 89#define EDMA_M 0x1000 /* global channel registers */ 90#define EDMA_ECR 0x1008 91#define EDMA_ECRH 0x100C 92#define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */ 93#define EDMA_PARM 0x4000 /* 128 param entries */ 94 95#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) 96 97#define EDMA_DCHMAP 0x0100 /* 64 registers */ 98#define CHMAP_EXIST BIT(24) 99 100#define EDMA_MAX_DMACH 64 101#define EDMA_MAX_PARAMENTRY 512 102 103/*****************************************************************************/ 104 105static void __iomem *edmacc_regs_base[EDMA_MAX_CC]; 106 107static inline unsigned int edma_read(unsigned ctlr, int offset) 108{ 109 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset); 110} 111 112static inline void edma_write(unsigned ctlr, int offset, int val) 113{ 114 __raw_writel(val, edmacc_regs_base[ctlr] + offset); 115} 116static inline void edma_modify(unsigned ctlr, int offset, unsigned and, 117 unsigned or) 118{ 119 unsigned val = edma_read(ctlr, offset); 120 val &= and; 121 val |= or; 122 edma_write(ctlr, offset, val); 123} 124static inline void edma_and(unsigned ctlr, int offset, unsigned and) 125{ 126 unsigned val = edma_read(ctlr, offset); 127 val &= and; 128 edma_write(ctlr, offset, val); 129} 130static inline void edma_or(unsigned ctlr, int offset, unsigned or) 131{ 132 unsigned val = edma_read(ctlr, offset); 133 val |= or; 134 edma_write(ctlr, offset, val); 135} 136static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i) 137{ 138 return edma_read(ctlr, offset + (i << 2)); 139} 140static inline void edma_write_array(unsigned ctlr, int offset, int i, 141 unsigned val) 142{ 143 edma_write(ctlr, offset + (i << 2), val); 144} 145static inline void edma_modify_array(unsigned ctlr, int offset, int i, 146 unsigned and, unsigned or) 147{ 148 edma_modify(ctlr, offset + (i << 2), and, or); 149} 150static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or) 151{ 152 edma_or(ctlr, offset + (i << 2), or); 153} 154static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j, 155 unsigned or) 156{ 157 edma_or(ctlr, offset + ((i*2 + j) << 2), or); 158} 159static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j, 160 unsigned val) 161{ 162 edma_write(ctlr, offset + ((i*2 + j) << 2), val); 163} 164static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset) 165{ 166 return edma_read(ctlr, EDMA_SHADOW0 + offset); 167} 168static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset, 169 int i) 170{ 171 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2)); 172} 173static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val) 174{ 175 edma_write(ctlr, EDMA_SHADOW0 + offset, val); 176} 177static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i, 178 unsigned val) 179{ 180 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val); 181} 182static inline unsigned int edma_parm_read(unsigned ctlr, int offset, 183 int param_no) 184{ 185 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5)); 186} 187static inline void edma_parm_write(unsigned ctlr, int offset, int param_no, 188 unsigned val) 189{ 190 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val); 191} 192static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no, 193 unsigned and, unsigned or) 194{ 195 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or); 196} 197static inline void edma_parm_and(unsigned ctlr, int offset, int param_no, 198 unsigned and) 199{ 200 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and); 201} 202static inline void edma_parm_or(unsigned ctlr, int offset, int param_no, 203 unsigned or) 204{ 205 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or); 206} 207 208static inline void set_bits(int offset, int len, unsigned long *p) 209{ 210 for (; len > 0; len--) 211 set_bit(offset + (len - 1), p); 212} 213 214static inline void clear_bits(int offset, int len, unsigned long *p) 215{ 216 for (; len > 0; len--) 217 clear_bit(offset + (len - 1), p); 218} 219 220/*****************************************************************************/ 221 222/* actual number of DMA channels and slots on this silicon */ 223struct edma { 224 /* how many dma resources of each type */ 225 unsigned num_channels; 226 unsigned num_region; 227 unsigned num_slots; 228 unsigned num_tc; 229 unsigned num_cc; 230 enum dma_event_q default_queue; 231 232 /* list of channels with no even trigger; terminated by "-1" */ 233 const s8 *noevent; 234 235 /* The edma_inuse bit for each PaRAM slot is clear unless the 236 * channel is in use ... by ARM or DSP, for QDMA, or whatever. 237 */ 238 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY); 239 240 /* The edma_unused bit for each channel is clear unless 241 * it is not being used on this platform. It uses a bit 242 * of SOC-specific initialization code. 243 */ 244 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH); 245 246 unsigned irq_res_start; 247 unsigned irq_res_end; 248 249 struct dma_interrupt_data { 250 void (*callback)(unsigned channel, unsigned short ch_status, 251 void *data); 252 void *data; 253 } intr_data[EDMA_MAX_DMACH]; 254}; 255 256static struct edma *edma_cc[EDMA_MAX_CC]; 257static int arch_num_cc; 258 259/* dummy param set used to (re)initialize parameter RAM slots */ 260static const struct edmacc_param dummy_paramset = { 261 .link_bcntrld = 0xffff, 262 .ccnt = 1, 263}; 264 265/*****************************************************************************/ 266 267static void map_dmach_queue(unsigned ctlr, unsigned ch_no, 268 enum dma_event_q queue_no) 269{ 270 int bit = (ch_no & 0x7) * 4; 271 272 /* default to low priority queue */ 273 if (queue_no == EVENTQ_DEFAULT) 274 queue_no = edma_cc[ctlr]->default_queue; 275 276 queue_no &= 7; 277 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3), 278 ~(0x7 << bit), queue_no << bit); 279} 280 281static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no) 282{ 283 int bit = queue_no * 4; 284 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); 285} 286 287static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, 288 int priority) 289{ 290 int bit = queue_no * 4; 291 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit), 292 ((priority & 0x7) << bit)); 293} 294 295/** 296 * map_dmach_param - Maps channel number to param entry number 297 * 298 * This maps the dma channel number to param entry numberter. In 299 * other words using the DMA channel mapping registers a param entry 300 * can be mapped to any channel 301 * 302 * Callers are responsible for ensuring the channel mapping logic is 303 * included in that particular EDMA variant (Eg : dm646x) 304 * 305 */ 306static void __init map_dmach_param(unsigned ctlr) 307{ 308 int i; 309 for (i = 0; i < EDMA_MAX_DMACH; i++) 310 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5)); 311} 312 313static inline void 314setup_dma_interrupt(unsigned lch, 315 void (*callback)(unsigned channel, u16 ch_status, void *data), 316 void *data) 317{ 318 unsigned ctlr; 319 320 ctlr = EDMA_CTLR(lch); 321 lch = EDMA_CHAN_SLOT(lch); 322 323 if (!callback) 324 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5, 325 BIT(lch & 0x1f)); 326 327 edma_cc[ctlr]->intr_data[lch].callback = callback; 328 edma_cc[ctlr]->intr_data[lch].data = data; 329 330 if (callback) { 331 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5, 332 BIT(lch & 0x1f)); 333 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5, 334 BIT(lch & 0x1f)); 335 } 336} 337 338static int irq2ctlr(int irq) 339{ 340 if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end) 341 return 0; 342 else if (irq >= edma_cc[1]->irq_res_start && 343 irq <= edma_cc[1]->irq_res_end) 344 return 1; 345 346 return -1; 347} 348 349/****************************************************************************** 350 * 351 * DMA interrupt handler 352 * 353 *****************************************************************************/ 354static irqreturn_t dma_irq_handler(int irq, void *data) 355{ 356 int i; 357 unsigned ctlr; 358 unsigned int cnt = 0; 359 360 ctlr = irq2ctlr(irq); 361 362 dev_dbg(data, "dma_irq_handler\n"); 363 364 if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) && 365 (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0)) 366 return IRQ_NONE; 367 368 while (1) { 369 int j; 370 if (edma_shadow0_read_array(ctlr, SH_IPR, 0) & 371 edma_shadow0_read_array(ctlr, SH_IER, 0)) 372 j = 0; 373 else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) & 374 edma_shadow0_read_array(ctlr, SH_IER, 1)) 375 j = 1; 376 else 377 break; 378 dev_dbg(data, "IPR%d %08x\n", j, 379 edma_shadow0_read_array(ctlr, SH_IPR, j)); 380 for (i = 0; i < 32; i++) { 381 int k = (j << 5) + i; 382 if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i)) 383 && (edma_shadow0_read_array(ctlr, 384 SH_IER, j) & BIT(i))) { 385 /* Clear the corresponding IPR bits */ 386 edma_shadow0_write_array(ctlr, SH_ICR, j, 387 BIT(i)); 388 if (edma_cc[ctlr]->intr_data[k].callback) 389 edma_cc[ctlr]->intr_data[k].callback( 390 k, DMA_COMPLETE, 391 edma_cc[ctlr]->intr_data[k]. 392 data); 393 } 394 } 395 cnt++; 396 if (cnt > 10) 397 break; 398 } 399 edma_shadow0_write(ctlr, SH_IEVAL, 1); 400 return IRQ_HANDLED; 401} 402 403/****************************************************************************** 404 * 405 * DMA error interrupt handler 406 * 407 *****************************************************************************/ 408static irqreturn_t dma_ccerr_handler(int irq, void *data) 409{ 410 int i; 411 unsigned ctlr; 412 unsigned int cnt = 0; 413 414 ctlr = irq2ctlr(irq); 415 416 dev_dbg(data, "dma_ccerr_handler\n"); 417 418 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) && 419 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) && 420 (edma_read(ctlr, EDMA_QEMR) == 0) && 421 (edma_read(ctlr, EDMA_CCERR) == 0)) 422 return IRQ_NONE; 423 424 while (1) { 425 int j = -1; 426 if (edma_read_array(ctlr, EDMA_EMR, 0)) 427 j = 0; 428 else if (edma_read_array(ctlr, EDMA_EMR, 1)) 429 j = 1; 430 if (j >= 0) { 431 dev_dbg(data, "EMR%d %08x\n", j, 432 edma_read_array(ctlr, EDMA_EMR, j)); 433 for (i = 0; i < 32; i++) { 434 int k = (j << 5) + i; 435 if (edma_read_array(ctlr, EDMA_EMR, j) & 436 BIT(i)) { 437 /* Clear the corresponding EMR bits */ 438 edma_write_array(ctlr, EDMA_EMCR, j, 439 BIT(i)); 440 /* Clear any SER */ 441 edma_shadow0_write_array(ctlr, SH_SECR, 442 j, BIT(i)); 443 if (edma_cc[ctlr]->intr_data[k]. 444 callback) { 445 edma_cc[ctlr]->intr_data[k]. 446 callback(k, 447 DMA_CC_ERROR, 448 edma_cc[ctlr]->intr_data 449 [k].data); 450 } 451 } 452 } 453 } else if (edma_read(ctlr, EDMA_QEMR)) { 454 dev_dbg(data, "QEMR %02x\n", 455 edma_read(ctlr, EDMA_QEMR)); 456 for (i = 0; i < 8; i++) { 457 if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) { 458 /* Clear the corresponding IPR bits */ 459 edma_write(ctlr, EDMA_QEMCR, BIT(i)); 460 edma_shadow0_write(ctlr, SH_QSECR, 461 BIT(i)); 462 463 /* NOTE: not reported!! */ 464 } 465 } 466 } else if (edma_read(ctlr, EDMA_CCERR)) { 467 dev_dbg(data, "CCERR %08x\n", 468 edma_read(ctlr, EDMA_CCERR)); 469 for (i = 0; i < 8; i++) { 470 if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) { 471 /* Clear the corresponding IPR bits */ 472 edma_write(ctlr, EDMA_CCERRCLR, BIT(i)); 473 474 /* NOTE: not reported!! */ 475 } 476 } 477 } 478 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) && 479 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) && 480 (edma_read(ctlr, EDMA_QEMR) == 0) && 481 (edma_read(ctlr, EDMA_CCERR) == 0)) 482 break; 483 cnt++; 484 if (cnt > 10) 485 break; 486 } 487 edma_write(ctlr, EDMA_EEVAL, 1); 488 return IRQ_HANDLED; 489} 490 491/****************************************************************************** 492 * 493 * Transfer controller error interrupt handlers 494 * 495 *****************************************************************************/ 496 497#define tc_errs_handled false /* disabled as long as they're NOPs */ 498 499static irqreturn_t dma_tc0err_handler(int irq, void *data) 500{ 501 dev_dbg(data, "dma_tc0err_handler\n"); 502 return IRQ_HANDLED; 503} 504 505static irqreturn_t dma_tc1err_handler(int irq, void *data) 506{ 507 dev_dbg(data, "dma_tc1err_handler\n"); 508 return IRQ_HANDLED; 509} 510 511static int reserve_contiguous_slots(int ctlr, unsigned int id, 512 unsigned int num_slots, 513 unsigned int start_slot) 514{ 515 int i, j; 516 unsigned int count = num_slots; 517 int stop_slot = start_slot; 518 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY); 519 520 for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) { 521 j = EDMA_CHAN_SLOT(i); 522 if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) { 523 /* Record our current beginning slot */ 524 if (count == num_slots) 525 stop_slot = i; 526 527 count--; 528 set_bit(j, tmp_inuse); 529 530 if (count == 0) 531 break; 532 } else { 533 clear_bit(j, tmp_inuse); 534 535 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) { 536 stop_slot = i; 537 break; 538 } else { 539 count = num_slots; 540 } 541 } 542 } 543 544 /* 545 * We have to clear any bits that we set 546 * if we run out parameter RAM slots, i.e we do find a set 547 * of contiguous parameter RAM slots but do not find the exact number 548 * requested as we may reach the total number of parameter RAM slots 549 */ 550 if (i == edma_cc[ctlr]->num_slots) 551 stop_slot = i; 552 553 for (j = start_slot; j < stop_slot; j++) 554 if (test_bit(j, tmp_inuse)) 555 clear_bit(j, edma_cc[ctlr]->edma_inuse); 556 557 if (count) 558 return -EBUSY; 559 560 for (j = i - num_slots + 1; j <= i; ++j) 561 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j), 562 &dummy_paramset, PARM_SIZE); 563 564 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1); 565} 566 567static int prepare_unused_channel_list(struct device *dev, void *data) 568{ 569 struct platform_device *pdev = to_platform_device(dev); 570 int i, ctlr; 571 572 for (i = 0; i < pdev->num_resources; i++) { 573 if ((pdev->resource[i].flags & IORESOURCE_DMA) && 574 (int)pdev->resource[i].start >= 0) { 575 ctlr = EDMA_CTLR(pdev->resource[i].start); 576 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), 577 edma_cc[ctlr]->edma_unused); 578 } 579 } 580 581 return 0; 582} 583 584/*-----------------------------------------------------------------------*/ 585 586static bool unused_chan_list_done; 587 588/* Resource alloc/free: dma channels, parameter RAM slots */ 589 590/** 591 * edma_alloc_channel - allocate DMA channel and paired parameter RAM 592 * @channel: specific channel to allocate; negative for "any unmapped channel" 593 * @callback: optional; to be issued on DMA completion or errors 594 * @data: passed to callback 595 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer 596 * Controller (TC) executes requests using this channel. Use 597 * EVENTQ_DEFAULT unless you really need a high priority queue. 598 * 599 * This allocates a DMA channel and its associated parameter RAM slot. 600 * The parameter RAM is initialized to hold a dummy transfer. 601 * 602 * Normal use is to pass a specific channel number as @channel, to make 603 * use of hardware events mapped to that channel. When the channel will 604 * be used only for software triggering or event chaining, channels not 605 * mapped to hardware events (or mapped to unused events) are preferable. 606 * 607 * DMA transfers start from a channel using edma_start(), or by 608 * chaining. When the transfer described in that channel's parameter RAM 609 * slot completes, that slot's data may be reloaded through a link. 610 * 611 * DMA errors are only reported to the @callback associated with the 612 * channel driving that transfer, but transfer completion callbacks can 613 * be sent to another channel under control of the TCC field in 614 * the option word of the transfer's parameter RAM set. Drivers must not 615 * use DMA transfer completion callbacks for channels they did not allocate. 616 * (The same applies to TCC codes used in transfer chaining.) 617 * 618 * Returns the number of the channel, else negative errno. 619 */ 620int edma_alloc_channel(int channel, 621 void (*callback)(unsigned channel, u16 ch_status, void *data), 622 void *data, 623 enum dma_event_q eventq_no) 624{ 625 unsigned i, done = 0, ctlr = 0; 626 int ret = 0; 627 628 if (!unused_chan_list_done) { 629 /* 630 * Scan all the platform devices to find out the EDMA channels 631 * used and clear them in the unused list, making the rest 632 * available for ARM usage. 633 */ 634 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL, 635 prepare_unused_channel_list); 636 if (ret < 0) 637 return ret; 638 639 unused_chan_list_done = true; 640 } 641 642 if (channel >= 0) { 643 ctlr = EDMA_CTLR(channel); 644 channel = EDMA_CHAN_SLOT(channel); 645 } 646 647 if (channel < 0) { 648 for (i = 0; i < arch_num_cc; i++) { 649 channel = 0; 650 for (;;) { 651 channel = find_next_bit(edma_cc[i]->edma_unused, 652 edma_cc[i]->num_channels, 653 channel); 654 if (channel == edma_cc[i]->num_channels) 655 break; 656 if (!test_and_set_bit(channel, 657 edma_cc[i]->edma_inuse)) { 658 done = 1; 659 ctlr = i; 660 break; 661 } 662 channel++; 663 } 664 if (done) 665 break; 666 } 667 if (!done) 668 return -ENOMEM; 669 } else if (channel >= edma_cc[ctlr]->num_channels) { 670 return -EINVAL; 671 } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) { 672 return -EBUSY; 673 } 674 675 /* ensure access through shadow region 0 */ 676 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); 677 678 /* ensure no events are pending */ 679 edma_stop(EDMA_CTLR_CHAN(ctlr, channel)); 680 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), 681 &dummy_paramset, PARM_SIZE); 682 683 if (callback) 684 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel), 685 callback, data); 686 687 map_dmach_queue(ctlr, channel, eventq_no); 688 689 return EDMA_CTLR_CHAN(ctlr, channel); 690} 691EXPORT_SYMBOL(edma_alloc_channel); 692 693 694/** 695 * edma_free_channel - deallocate DMA channel 696 * @channel: dma channel returned from edma_alloc_channel() 697 * 698 * This deallocates the DMA channel and associated parameter RAM slot 699 * allocated by edma_alloc_channel(). 700 * 701 * Callers are responsible for ensuring the channel is inactive, and 702 * will not be reactivated by linking, chaining, or software calls to 703 * edma_start(). 704 */ 705void edma_free_channel(unsigned channel) 706{ 707 unsigned ctlr; 708 709 ctlr = EDMA_CTLR(channel); 710 channel = EDMA_CHAN_SLOT(channel); 711 712 if (channel >= edma_cc[ctlr]->num_channels) 713 return; 714 715 setup_dma_interrupt(channel, NULL, NULL); 716 /* REVISIT should probably take out of shadow region 0 */ 717 718 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), 719 &dummy_paramset, PARM_SIZE); 720 clear_bit(channel, edma_cc[ctlr]->edma_inuse); 721} 722EXPORT_SYMBOL(edma_free_channel); 723 724/** 725 * edma_alloc_slot - allocate DMA parameter RAM 726 * @slot: specific slot to allocate; negative for "any unused slot" 727 * 728 * This allocates a parameter RAM slot, initializing it to hold a 729 * dummy transfer. Slots allocated using this routine have not been 730 * mapped to a hardware DMA channel, and will normally be used by 731 * linking to them from a slot associated with a DMA channel. 732 * 733 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific 734 * slots may be allocated on behalf of DSP firmware. 735 * 736 * Returns the number of the slot, else negative errno. 737 */ 738int edma_alloc_slot(unsigned ctlr, int slot) 739{ 740 if (slot >= 0) 741 slot = EDMA_CHAN_SLOT(slot); 742 743 if (slot < 0) { 744 slot = edma_cc[ctlr]->num_channels; 745 for (;;) { 746 slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse, 747 edma_cc[ctlr]->num_slots, slot); 748 if (slot == edma_cc[ctlr]->num_slots) 749 return -ENOMEM; 750 if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) 751 break; 752 } 753 } else if (slot < edma_cc[ctlr]->num_channels || 754 slot >= edma_cc[ctlr]->num_slots) { 755 return -EINVAL; 756 } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) { 757 return -EBUSY; 758 } 759 760 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), 761 &dummy_paramset, PARM_SIZE); 762 763 return EDMA_CTLR_CHAN(ctlr, slot); 764} 765EXPORT_SYMBOL(edma_alloc_slot); 766 767/** 768 * edma_free_slot - deallocate DMA parameter RAM 769 * @slot: parameter RAM slot returned from edma_alloc_slot() 770 * 771 * This deallocates the parameter RAM slot allocated by edma_alloc_slot(). 772 * Callers are responsible for ensuring the slot is inactive, and will 773 * not be activated. 774 */ 775void edma_free_slot(unsigned slot) 776{ 777 unsigned ctlr; 778 779 ctlr = EDMA_CTLR(slot); 780 slot = EDMA_CHAN_SLOT(slot); 781 782 if (slot < edma_cc[ctlr]->num_channels || 783 slot >= edma_cc[ctlr]->num_slots) 784 return; 785 786 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), 787 &dummy_paramset, PARM_SIZE); 788 clear_bit(slot, edma_cc[ctlr]->edma_inuse); 789} 790EXPORT_SYMBOL(edma_free_slot); 791 792 793/** 794 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots 795 * The API will return the starting point of a set of 796 * contiguous parameter RAM slots that have been requested 797 * 798 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT 799 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT 800 * @count: number of contiguous Paramter RAM slots 801 * @slot - the start value of Parameter RAM slot that should be passed if id 802 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT 803 * 804 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of 805 * contiguous Parameter RAM slots from parameter RAM 64 in the case of 806 * DaVinci SOCs and 32 in the case of DA8xx SOCs. 807 * 808 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a 809 * set of contiguous parameter RAM slots from the "slot" that is passed as an 810 * argument to the API. 811 * 812 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries 813 * starts looking for a set of contiguous parameter RAMs from the "slot" 814 * that is passed as an argument to the API. On failure the API will try to 815 * find a set of contiguous Parameter RAM slots from the remaining Parameter 816 * RAM slots 817 */ 818int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count) 819{ 820 /* 821 * The start slot requested should be greater than 822 * the number of channels and lesser than the total number 823 * of slots 824 */ 825 if ((id != EDMA_CONT_PARAMS_ANY) && 826 (slot < edma_cc[ctlr]->num_channels || 827 slot >= edma_cc[ctlr]->num_slots)) 828 return -EINVAL; 829 830 /* 831 * The number of parameter RAM slots requested cannot be less than 1 832 * and cannot be more than the number of slots minus the number of 833 * channels 834 */ 835 if (count < 1 || count > 836 (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels)) 837 return -EINVAL; 838 839 switch (id) { 840 case EDMA_CONT_PARAMS_ANY: 841 return reserve_contiguous_slots(ctlr, id, count, 842 edma_cc[ctlr]->num_channels); 843 case EDMA_CONT_PARAMS_FIXED_EXACT: 844 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT: 845 return reserve_contiguous_slots(ctlr, id, count, slot); 846 default: 847 return -EINVAL; 848 } 849 850} 851EXPORT_SYMBOL(edma_alloc_cont_slots); 852 853/** 854 * edma_free_cont_slots - deallocate DMA parameter RAM slots 855 * @slot: first parameter RAM of a set of parameter RAM slots to be freed 856 * @count: the number of contiguous parameter RAM slots to be freed 857 * 858 * This deallocates the parameter RAM slots allocated by 859 * edma_alloc_cont_slots. 860 * Callers/applications need to keep track of sets of contiguous 861 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots 862 * API. 863 * Callers are responsible for ensuring the slots are inactive, and will 864 * not be activated. 865 */ 866int edma_free_cont_slots(unsigned slot, int count) 867{ 868 unsigned ctlr, slot_to_free; 869 int i; 870 871 ctlr = EDMA_CTLR(slot); 872 slot = EDMA_CHAN_SLOT(slot); 873 874 if (slot < edma_cc[ctlr]->num_channels || 875 slot >= edma_cc[ctlr]->num_slots || 876 count < 1) 877 return -EINVAL; 878 879 for (i = slot; i < slot + count; ++i) { 880 ctlr = EDMA_CTLR(i); 881 slot_to_free = EDMA_CHAN_SLOT(i); 882 883 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free), 884 &dummy_paramset, PARM_SIZE); 885 clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse); 886 } 887 888 return 0; 889} 890EXPORT_SYMBOL(edma_free_cont_slots); 891 892/*-----------------------------------------------------------------------*/ 893 894/* Parameter RAM operations (i) -- read/write partial slots */ 895 896/** 897 * edma_set_src - set initial DMA source address in parameter RAM slot 898 * @slot: parameter RAM slot being configured 899 * @src_port: physical address of source (memory, controller FIFO, etc) 900 * @addressMode: INCR, except in very rare cases 901 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the 902 * width to use when addressing the fifo (e.g. W8BIT, W32BIT) 903 * 904 * Note that the source address is modified during the DMA transfer 905 * according to edma_set_src_index(). 906 */ 907void edma_set_src(unsigned slot, dma_addr_t src_port, 908 enum address_mode mode, enum fifo_width width) 909{ 910 unsigned ctlr; 911 912 ctlr = EDMA_CTLR(slot); 913 slot = EDMA_CHAN_SLOT(slot); 914 915 if (slot < edma_cc[ctlr]->num_slots) { 916 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); 917 918 if (mode) { 919 /* set SAM and program FWID */ 920 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8)); 921 } else { 922 /* clear SAM */ 923 i &= ~SAM; 924 } 925 edma_parm_write(ctlr, PARM_OPT, slot, i); 926 927 /* set the source port address 928 in source register of param structure */ 929 edma_parm_write(ctlr, PARM_SRC, slot, src_port); 930 } 931} 932EXPORT_SYMBOL(edma_set_src); 933 934/** 935 * edma_set_dest - set initial DMA destination address in parameter RAM slot 936 * @slot: parameter RAM slot being configured 937 * @dest_port: physical address of destination (memory, controller FIFO, etc) 938 * @addressMode: INCR, except in very rare cases 939 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the 940 * width to use when addressing the fifo (e.g. W8BIT, W32BIT) 941 * 942 * Note that the destination address is modified during the DMA transfer 943 * according to edma_set_dest_index(). 944 */ 945void edma_set_dest(unsigned slot, dma_addr_t dest_port, 946 enum address_mode mode, enum fifo_width width) 947{ 948 unsigned ctlr; 949 950 ctlr = EDMA_CTLR(slot); 951 slot = EDMA_CHAN_SLOT(slot); 952 953 if (slot < edma_cc[ctlr]->num_slots) { 954 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); 955 956 if (mode) { 957 /* set DAM and program FWID */ 958 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8)); 959 } else { 960 /* clear DAM */ 961 i &= ~DAM; 962 } 963 edma_parm_write(ctlr, PARM_OPT, slot, i); 964 /* set the destination port address 965 in dest register of param structure */ 966 edma_parm_write(ctlr, PARM_DST, slot, dest_port); 967 } 968} 969EXPORT_SYMBOL(edma_set_dest); 970 971/** 972 * edma_get_position - returns the current transfer points 973 * @slot: parameter RAM slot being examined 974 * @src: pointer to source port position 975 * @dst: pointer to destination port position 976 * 977 * Returns current source and destination addresses for a particular 978 * parameter RAM slot. Its channel should not be active when this is called. 979 */ 980void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) 981{ 982 struct edmacc_param temp; 983 unsigned ctlr; 984 985 ctlr = EDMA_CTLR(slot); 986 slot = EDMA_CHAN_SLOT(slot); 987 988 edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp); 989 if (src != NULL) 990 *src = temp.src; 991 if (dst != NULL) 992 *dst = temp.dst; 993} 994EXPORT_SYMBOL(edma_get_position); 995 996/** 997 * edma_set_src_index - configure DMA source address indexing 998 * @slot: parameter RAM slot being configured 999 * @src_bidx: byte offset between source arrays in a frame 1000 * @src_cidx: byte offset between source frames in a block 1001 * 1002 * Offsets are specified to support either contiguous or discontiguous 1003 * memory transfers, or repeated access to a hardware register, as needed. 1004 * When accessing hardware registers, both offsets are normally zero. 1005 */ 1006void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx) 1007{ 1008 unsigned ctlr; 1009 1010 ctlr = EDMA_CTLR(slot); 1011 slot = EDMA_CHAN_SLOT(slot); 1012 1013 if (slot < edma_cc[ctlr]->num_slots) { 1014 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, 1015 0xffff0000, src_bidx); 1016 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, 1017 0xffff0000, src_cidx); 1018 } 1019} 1020EXPORT_SYMBOL(edma_set_src_index); 1021 1022/** 1023 * edma_set_dest_index - configure DMA destination address indexing 1024 * @slot: parameter RAM slot being configured 1025 * @dest_bidx: byte offset between destination arrays in a frame 1026 * @dest_cidx: byte offset between destination frames in a block 1027 * 1028 * Offsets are specified to support either contiguous or discontiguous 1029 * memory transfers, or repeated access to a hardware register, as needed. 1030 * When accessing hardware registers, both offsets are normally zero. 1031 */ 1032void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx) 1033{ 1034 unsigned ctlr; 1035 1036 ctlr = EDMA_CTLR(slot); 1037 slot = EDMA_CHAN_SLOT(slot); 1038 1039 if (slot < edma_cc[ctlr]->num_slots) { 1040 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, 1041 0x0000ffff, dest_bidx << 16); 1042 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, 1043 0x0000ffff, dest_cidx << 16); 1044 } 1045} 1046EXPORT_SYMBOL(edma_set_dest_index); 1047 1048/** 1049 * edma_set_transfer_params - configure DMA transfer parameters 1050 * @slot: parameter RAM slot being configured 1051 * @acnt: how many bytes per array (at least one) 1052 * @bcnt: how many arrays per frame (at least one) 1053 * @ccnt: how many frames per block (at least one) 1054 * @bcnt_rld: used only for A-Synchronized transfers; this specifies 1055 * the value to reload into bcnt when it decrements to zero 1056 * @sync_mode: ASYNC or ABSYNC 1057 * 1058 * See the EDMA3 documentation to understand how to configure and link 1059 * transfers using the fields in PaRAM slots. If you are not doing it 1060 * all at once with edma_write_slot(), you will use this routine 1061 * plus two calls each for source and destination, setting the initial 1062 * address and saying how to index that address. 1063 * 1064 * An example of an A-Synchronized transfer is a serial link using a 1065 * single word shift register. In that case, @acnt would be equal to 1066 * that word size; the serial controller issues a DMA synchronization 1067 * event to transfer each word, and memory access by the DMA transfer 1068 * controller will be word-at-a-time. 1069 * 1070 * An example of an AB-Synchronized transfer is a device using a FIFO. 1071 * In that case, @acnt equals the FIFO width and @bcnt equals its depth. 1072 * The controller with the FIFO issues DMA synchronization events when 1073 * the FIFO threshold is reached, and the DMA transfer controller will 1074 * transfer one frame to (or from) the FIFO. It will probably use 1075 * efficient burst modes to access memory. 1076 */ 1077void edma_set_transfer_params(unsigned slot, 1078 u16 acnt, u16 bcnt, u16 ccnt, 1079 u16 bcnt_rld, enum sync_dimension sync_mode) 1080{ 1081 unsigned ctlr; 1082 1083 ctlr = EDMA_CTLR(slot); 1084 slot = EDMA_CHAN_SLOT(slot); 1085 1086 if (slot < edma_cc[ctlr]->num_slots) { 1087 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot, 1088 0x0000ffff, bcnt_rld << 16); 1089 if (sync_mode == ASYNC) 1090 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM); 1091 else 1092 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM); 1093 /* Set the acount, bcount, ccount registers */ 1094 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt); 1095 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt); 1096 } 1097} 1098EXPORT_SYMBOL(edma_set_transfer_params); 1099 1100/** 1101 * edma_link - link one parameter RAM slot to another 1102 * @from: parameter RAM slot originating the link 1103 * @to: parameter RAM slot which is the link target 1104 * 1105 * The originating slot should not be part of any active DMA transfer. 1106 */ 1107void edma_link(unsigned from, unsigned to) 1108{ 1109 unsigned ctlr_from, ctlr_to; 1110 1111 ctlr_from = EDMA_CTLR(from); 1112 from = EDMA_CHAN_SLOT(from); 1113 ctlr_to = EDMA_CTLR(to); 1114 to = EDMA_CHAN_SLOT(to); 1115 1116 if (from >= edma_cc[ctlr_from]->num_slots) 1117 return; 1118 if (to >= edma_cc[ctlr_to]->num_slots) 1119 return; 1120 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000, 1121 PARM_OFFSET(to)); 1122} 1123EXPORT_SYMBOL(edma_link); 1124 1125/** 1126 * edma_unlink - cut link from one parameter RAM slot 1127 * @from: parameter RAM slot originating the link 1128 * 1129 * The originating slot should not be part of any active DMA transfer. 1130 * Its link is set to 0xffff. 1131 */ 1132void edma_unlink(unsigned from) 1133{ 1134 unsigned ctlr; 1135 1136 ctlr = EDMA_CTLR(from); 1137 from = EDMA_CHAN_SLOT(from); 1138 1139 if (from >= edma_cc[ctlr]->num_slots) 1140 return; 1141 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff); 1142} 1143EXPORT_SYMBOL(edma_unlink); 1144 1145/*-----------------------------------------------------------------------*/ 1146 1147/* Parameter RAM operations (ii) -- read/write whole parameter sets */ 1148 1149/** 1150 * edma_write_slot - write parameter RAM data for slot 1151 * @slot: number of parameter RAM slot being modified 1152 * @param: data to be written into parameter RAM slot 1153 * 1154 * Use this to assign all parameters of a transfer at once. This 1155 * allows more efficient setup of transfers than issuing multiple 1156 * calls to set up those parameters in small pieces, and provides 1157 * complete control over all transfer options. 1158 */ 1159void edma_write_slot(unsigned slot, const struct edmacc_param *param) 1160{ 1161 unsigned ctlr; 1162 1163 ctlr = EDMA_CTLR(slot); 1164 slot = EDMA_CHAN_SLOT(slot); 1165 1166 if (slot >= edma_cc[ctlr]->num_slots) 1167 return; 1168 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param, 1169 PARM_SIZE); 1170} 1171EXPORT_SYMBOL(edma_write_slot); 1172 1173/** 1174 * edma_read_slot - read parameter RAM data from slot 1175 * @slot: number of parameter RAM slot being copied 1176 * @param: where to store copy of parameter RAM data 1177 * 1178 * Use this to read data from a parameter RAM slot, perhaps to 1179 * save them as a template for later reuse. 1180 */ 1181void edma_read_slot(unsigned slot, struct edmacc_param *param) 1182{ 1183 unsigned ctlr; 1184 1185 ctlr = EDMA_CTLR(slot); 1186 slot = EDMA_CHAN_SLOT(slot); 1187 1188 if (slot >= edma_cc[ctlr]->num_slots) 1189 return; 1190 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot), 1191 PARM_SIZE); 1192} 1193EXPORT_SYMBOL(edma_read_slot); 1194 1195/*-----------------------------------------------------------------------*/ 1196 1197/* Various EDMA channel control operations */ 1198 1199/** 1200 * edma_pause - pause dma on a channel 1201 * @channel: on which edma_start() has been called 1202 * 1203 * This temporarily disables EDMA hardware events on the specified channel, 1204 * preventing them from triggering new transfers on its behalf 1205 */ 1206void edma_pause(unsigned channel) 1207{ 1208 unsigned ctlr; 1209 1210 ctlr = EDMA_CTLR(channel); 1211 channel = EDMA_CHAN_SLOT(channel); 1212 1213 if (channel < edma_cc[ctlr]->num_channels) { 1214 unsigned int mask = BIT(channel & 0x1f); 1215 1216 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask); 1217 } 1218} 1219EXPORT_SYMBOL(edma_pause); 1220 1221/** 1222 * edma_resume - resumes dma on a paused channel 1223 * @channel: on which edma_pause() has been called 1224 * 1225 * This re-enables EDMA hardware events on the specified channel. 1226 */ 1227void edma_resume(unsigned channel) 1228{ 1229 unsigned ctlr; 1230 1231 ctlr = EDMA_CTLR(channel); 1232 channel = EDMA_CHAN_SLOT(channel); 1233 1234 if (channel < edma_cc[ctlr]->num_channels) { 1235 unsigned int mask = BIT(channel & 0x1f); 1236 1237 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask); 1238 } 1239} 1240EXPORT_SYMBOL(edma_resume); 1241 1242/** 1243 * edma_start - start dma on a channel 1244 * @channel: channel being activated 1245 * 1246 * Channels with event associations will be triggered by their hardware 1247 * events, and channels without such associations will be triggered by 1248 * software. (At this writing there is no interface for using software 1249 * triggers except with channels that don't support hardware triggers.) 1250 * 1251 * Returns zero on success, else negative errno. 1252 */ 1253int edma_start(unsigned channel) 1254{ 1255 unsigned ctlr; 1256 1257 ctlr = EDMA_CTLR(channel); 1258 channel = EDMA_CHAN_SLOT(channel); 1259 1260 if (channel < edma_cc[ctlr]->num_channels) { 1261 int j = channel >> 5; 1262 unsigned int mask = BIT(channel & 0x1f); 1263 1264 /* EDMA channels without event association */ 1265 if (test_bit(channel, edma_cc[ctlr]->edma_unused)) { 1266 pr_debug("EDMA: ESR%d %08x\n", j, 1267 edma_shadow0_read_array(ctlr, SH_ESR, j)); 1268 edma_shadow0_write_array(ctlr, SH_ESR, j, mask); 1269 return 0; 1270 } 1271 1272 /* EDMA channel with event association */ 1273 pr_debug("EDMA: ER%d %08x\n", j, 1274 edma_shadow0_read_array(ctlr, SH_ER, j)); 1275 /* Clear any pending event or error */ 1276 edma_write_array(ctlr, EDMA_ECR, j, mask); 1277 edma_write_array(ctlr, EDMA_EMCR, j, mask); 1278 /* Clear any SER */ 1279 edma_shadow0_write_array(ctlr, SH_SECR, j, mask); 1280 edma_shadow0_write_array(ctlr, SH_EESR, j, mask); 1281 pr_debug("EDMA: EER%d %08x\n", j, 1282 edma_shadow0_read_array(ctlr, SH_EER, j)); 1283 return 0; 1284 } 1285 1286 return -EINVAL; 1287} 1288EXPORT_SYMBOL(edma_start); 1289 1290/** 1291 * edma_stop - stops dma on the channel passed 1292 * @channel: channel being deactivated 1293 * 1294 * When @lch is a channel, any active transfer is paused and 1295 * all pending hardware events are cleared. The current transfer 1296 * may not be resumed, and the channel's Parameter RAM should be 1297 * reinitialized before being reused. 1298 */ 1299void edma_stop(unsigned channel) 1300{ 1301 unsigned ctlr; 1302 1303 ctlr = EDMA_CTLR(channel); 1304 channel = EDMA_CHAN_SLOT(channel); 1305 1306 if (channel < edma_cc[ctlr]->num_channels) { 1307 int j = channel >> 5; 1308 unsigned int mask = BIT(channel & 0x1f); 1309 1310 edma_shadow0_write_array(ctlr, SH_EECR, j, mask); 1311 edma_shadow0_write_array(ctlr, SH_ECR, j, mask); 1312 edma_shadow0_write_array(ctlr, SH_SECR, j, mask); 1313 edma_write_array(ctlr, EDMA_EMCR, j, mask); 1314 1315 pr_debug("EDMA: EER%d %08x\n", j, 1316 edma_shadow0_read_array(ctlr, SH_EER, j)); 1317 1318 /* REVISIT: consider guarding against inappropriate event 1319 * chaining by overwriting with dummy_paramset. 1320 */ 1321 } 1322} 1323EXPORT_SYMBOL(edma_stop); 1324 1325 1326void edma_clean_channel(unsigned channel) 1327{ 1328 unsigned ctlr; 1329 1330 ctlr = EDMA_CTLR(channel); 1331 channel = EDMA_CHAN_SLOT(channel); 1332 1333 if (channel < edma_cc[ctlr]->num_channels) { 1334 int j = (channel >> 5); 1335 unsigned int mask = BIT(channel & 0x1f); 1336 1337 pr_debug("EDMA: EMR%d %08x\n", j, 1338 edma_read_array(ctlr, EDMA_EMR, j)); 1339 edma_shadow0_write_array(ctlr, SH_ECR, j, mask); 1340 /* Clear the corresponding EMR bits */ 1341 edma_write_array(ctlr, EDMA_EMCR, j, mask); 1342 /* Clear any SER */ 1343 edma_shadow0_write_array(ctlr, SH_SECR, j, mask); 1344 edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); 1345 } 1346} 1347EXPORT_SYMBOL(edma_clean_channel); 1348 1349/* 1350 * edma_clear_event - clear an outstanding event on the DMA channel 1351 * Arguments: 1352 * channel - channel number 1353 */ 1354void edma_clear_event(unsigned channel) 1355{ 1356 unsigned ctlr; 1357 1358 ctlr = EDMA_CTLR(channel); 1359 channel = EDMA_CHAN_SLOT(channel); 1360 1361 if (channel >= edma_cc[ctlr]->num_channels) 1362 return; 1363 if (channel < 32) 1364 edma_write(ctlr, EDMA_ECR, BIT(channel)); 1365 else 1366 edma_write(ctlr, EDMA_ECRH, BIT(channel - 32)); 1367} 1368EXPORT_SYMBOL(edma_clear_event); 1369 1370/*-----------------------------------------------------------------------*/ 1371 1372static int __init edma_probe(struct platform_device *pdev) 1373{ 1374 struct edma_soc_info **info = pdev->dev.platform_data; 1375 const s8 (*queue_priority_mapping)[2]; 1376 const s8 (*queue_tc_mapping)[2]; 1377 int i, j, off, ln, found = 0; 1378 int status = -1; 1379 const s16 (*rsv_chans)[2]; 1380 const s16 (*rsv_slots)[2]; 1381 int irq[EDMA_MAX_CC] = {0, 0}; 1382 int err_irq[EDMA_MAX_CC] = {0, 0}; 1383 struct resource *r[EDMA_MAX_CC] = {NULL}; 1384 resource_size_t len[EDMA_MAX_CC]; 1385 char res_name[10]; 1386 char irq_name[10]; 1387 1388 if (!info) 1389 return -ENODEV; 1390 1391 for (j = 0; j < EDMA_MAX_CC; j++) { 1392 sprintf(res_name, "edma_cc%d", j); 1393 r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1394 res_name); 1395 if (!r[j] || !info[j]) { 1396 if (found) 1397 break; 1398 else 1399 return -ENODEV; 1400 } else { 1401 found = 1; 1402 } 1403 1404 len[j] = resource_size(r[j]); 1405 1406 r[j] = request_mem_region(r[j]->start, len[j], 1407 dev_name(&pdev->dev)); 1408 if (!r[j]) { 1409 status = -EBUSY; 1410 goto fail1; 1411 } 1412 1413 edmacc_regs_base[j] = ioremap(r[j]->start, len[j]); 1414 if (!edmacc_regs_base[j]) { 1415 status = -EBUSY; 1416 goto fail1; 1417 } 1418 1419 edma_cc[j] = kmalloc(sizeof(struct edma), GFP_KERNEL); 1420 if (!edma_cc[j]) { 1421 status = -ENOMEM; 1422 goto fail1; 1423 } 1424 memset(edma_cc[j], 0, sizeof(struct edma)); 1425 1426 edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel, 1427 EDMA_MAX_DMACH); 1428 edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot, 1429 EDMA_MAX_PARAMENTRY); 1430 edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc, 1431 EDMA_MAX_CC); 1432 1433 edma_cc[j]->default_queue = info[j]->default_queue; 1434 if (!edma_cc[j]->default_queue) 1435 edma_cc[j]->default_queue = EVENTQ_1; 1436 1437 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", 1438 edmacc_regs_base[j]); 1439 1440 for (i = 0; i < edma_cc[j]->num_slots; i++) 1441 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i), 1442 &dummy_paramset, PARM_SIZE); 1443 1444 /* Mark all channels as unused */ 1445 memset(edma_cc[j]->edma_unused, 0xff, 1446 sizeof(edma_cc[j]->edma_unused)); 1447 1448 if (info[j]->rsv) { 1449 1450 /* Clear the reserved channels in unused list */ 1451 rsv_chans = info[j]->rsv->rsv_chans; 1452 if (rsv_chans) { 1453 for (i = 0; rsv_chans[i][0] != -1; i++) { 1454 off = rsv_chans[i][0]; 1455 ln = rsv_chans[i][1]; 1456 clear_bits(off, ln, 1457 edma_cc[j]->edma_unused); 1458 } 1459 } 1460 1461 /* Set the reserved slots in inuse list */ 1462 rsv_slots = info[j]->rsv->rsv_slots; 1463 if (rsv_slots) { 1464 for (i = 0; rsv_slots[i][0] != -1; i++) { 1465 off = rsv_slots[i][0]; 1466 ln = rsv_slots[i][1]; 1467 set_bits(off, ln, 1468 edma_cc[j]->edma_inuse); 1469 } 1470 } 1471 } 1472 1473 sprintf(irq_name, "edma%d", j); 1474 irq[j] = platform_get_irq_byname(pdev, irq_name); 1475 edma_cc[j]->irq_res_start = irq[j]; 1476 status = request_irq(irq[j], dma_irq_handler, 0, "edma", 1477 &pdev->dev); 1478 if (status < 0) { 1479 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", 1480 irq[j], status); 1481 goto fail; 1482 } 1483 1484 sprintf(irq_name, "edma%d_err", j); 1485 err_irq[j] = platform_get_irq_byname(pdev, irq_name); 1486 edma_cc[j]->irq_res_end = err_irq[j]; 1487 status = request_irq(err_irq[j], dma_ccerr_handler, 0, 1488 "edma_error", &pdev->dev); 1489 if (status < 0) { 1490 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", 1491 err_irq[j], status); 1492 goto fail; 1493 } 1494 1495 /* Everything lives on transfer controller 1 until otherwise 1496 * specified. This way, long transfers on the low priority queue 1497 * started by the codec engine will not cause audio defects. 1498 */ 1499 for (i = 0; i < edma_cc[j]->num_channels; i++) 1500 map_dmach_queue(j, i, EVENTQ_1); 1501 1502 queue_tc_mapping = info[j]->queue_tc_mapping; 1503 queue_priority_mapping = info[j]->queue_priority_mapping; 1504 1505 /* Event queue to TC mapping */ 1506 for (i = 0; queue_tc_mapping[i][0] != -1; i++) 1507 map_queue_tc(j, queue_tc_mapping[i][0], 1508 queue_tc_mapping[i][1]); 1509 1510 /* Event queue priority mapping */ 1511 for (i = 0; queue_priority_mapping[i][0] != -1; i++) 1512 assign_priority_to_queue(j, 1513 queue_priority_mapping[i][0], 1514 queue_priority_mapping[i][1]); 1515 1516 /* Map the channel to param entry if channel mapping logic 1517 * exist 1518 */ 1519 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) 1520 map_dmach_param(j); 1521 1522 for (i = 0; i < info[j]->n_region; i++) { 1523 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); 1524 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); 1525 edma_write_array(j, EDMA_QRAE, i, 0x0); 1526 } 1527 arch_num_cc++; 1528 } 1529 1530 if (tc_errs_handled) { 1531 status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0, 1532 "edma_tc0", &pdev->dev); 1533 if (status < 0) { 1534 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", 1535 IRQ_TCERRINT0, status); 1536 return status; 1537 } 1538 status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0, 1539 "edma_tc1", &pdev->dev); 1540 if (status < 0) { 1541 dev_dbg(&pdev->dev, "request_irq %d --> %d\n", 1542 IRQ_TCERRINT, status); 1543 return status; 1544 } 1545 } 1546 1547 return 0; 1548 1549fail: 1550 for (i = 0; i < EDMA_MAX_CC; i++) { 1551 if (err_irq[i]) 1552 free_irq(err_irq[i], &pdev->dev); 1553 if (irq[i]) 1554 free_irq(irq[i], &pdev->dev); 1555 } 1556fail1: 1557 for (i = 0; i < EDMA_MAX_CC; i++) { 1558 if (r[i]) 1559 release_mem_region(r[i]->start, len[i]); 1560 if (edmacc_regs_base[i]) 1561 iounmap(edmacc_regs_base[i]); 1562 kfree(edma_cc[i]); 1563 } 1564 return status; 1565} 1566 1567 1568static struct platform_driver edma_driver = { 1569 .driver.name = "edma", 1570}; 1571 1572static int __init edma_init(void) 1573{ 1574 return platform_driver_probe(&edma_driver, edma_probe); 1575} 1576arch_initcall(edma_init); 1577