1/* 2 * Renesas SuperH DMA Engine support 3 * 4 * base is drivers/dma/flsdma.c 5 * 6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 9 * 10 * This is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * - DMA of SuperH does not have Hardware DMA chain mode. 16 * - MAX DMA size is 16MB. 17 * 18 */ 19 20#include <linux/init.h> 21#include <linux/module.h> 22#include <linux/slab.h> 23#include <linux/interrupt.h> 24#include <linux/dmaengine.h> 25#include <linux/delay.h> 26#include <linux/dma-mapping.h> 27#include <linux/platform_device.h> 28#include <linux/pm_runtime.h> 29#include <linux/sh_dma.h> 30 31#include "shdma.h" 32 33/* DMA descriptor control */ 34enum sh_dmae_desc_status { 35 DESC_IDLE, 36 DESC_PREPARED, 37 DESC_SUBMITTED, 38 DESC_COMPLETED, /* completed, have to call callback */ 39 DESC_WAITING, /* callback called, waiting for ack / re-submit */ 40}; 41 42#define NR_DESCS_PER_CHANNEL 32 43/* Default MEMCPY transfer size = 2^2 = 4 bytes */ 44#define LOG2_DEFAULT_XFER_SIZE 2 45 46/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 47static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; 48 49static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 50 51static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 52{ 53 __raw_writel(data, sh_dc->base + reg / sizeof(u32)); 54} 55 56static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 57{ 58 return __raw_readl(sh_dc->base + reg / sizeof(u32)); 59} 60 61static u16 dmaor_read(struct sh_dmae_device *shdev) 62{ 63 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); 64} 65 66static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 67{ 68 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); 69} 70 71/* 72 * Reset DMA controller 73 * 74 * SH7780 has two DMAOR register 75 */ 76static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 77{ 78 unsigned short dmaor = dmaor_read(shdev); 79 80 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 81} 82 83static int sh_dmae_rst(struct sh_dmae_device *shdev) 84{ 85 unsigned short dmaor; 86 87 sh_dmae_ctl_stop(shdev); 88 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; 89 90 dmaor_write(shdev, dmaor); 91 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { 92 pr_warning("dma-sh: Can't initialize DMAOR.\n"); 93 return -EINVAL; 94 } 95 return 0; 96} 97 98static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 99{ 100 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 101 102 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 103 return true; /* working */ 104 105 return false; /* waiting */ 106} 107 108static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 109{ 110 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 111 struct sh_dmae_device, common); 112 struct sh_dmae_pdata *pdata = shdev->pdata; 113 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 114 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 115 116 if (cnt >= pdata->ts_shift_num) 117 cnt = 0; 118 119 return pdata->ts_shift[cnt]; 120} 121 122static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 123{ 124 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 125 struct sh_dmae_device, common); 126 struct sh_dmae_pdata *pdata = shdev->pdata; 127 int i; 128 129 for (i = 0; i < pdata->ts_shift_num; i++) 130 if (pdata->ts_shift[i] == l2size) 131 break; 132 133 if (i == pdata->ts_shift_num) 134 i = 0; 135 136 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 137 ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 138} 139 140static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 141{ 142 sh_dmae_writel(sh_chan, hw->sar, SAR); 143 sh_dmae_writel(sh_chan, hw->dar, DAR); 144 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 145} 146 147static void dmae_start(struct sh_dmae_chan *sh_chan) 148{ 149 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 150 151 chcr |= CHCR_DE | CHCR_IE; 152 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); 153} 154 155static void dmae_halt(struct sh_dmae_chan *sh_chan) 156{ 157 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 158 159 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); 160 sh_dmae_writel(sh_chan, chcr, CHCR); 161} 162 163static void dmae_init(struct sh_dmae_chan *sh_chan) 164{ 165 /* 166 * Default configuration for dual address memory-memory transfer. 167 * 0x400 represents auto-request. 168 */ 169 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 170 LOG2_DEFAULT_XFER_SIZE); 171 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 172 sh_dmae_writel(sh_chan, chcr, CHCR); 173} 174 175static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 176{ 177 /* When DMA was working, can not set data to CHCR */ 178 if (dmae_is_busy(sh_chan)) 179 return -EBUSY; 180 181 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 182 sh_dmae_writel(sh_chan, val, CHCR); 183 184 return 0; 185} 186 187static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 188{ 189 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 190 struct sh_dmae_device, common); 191 struct sh_dmae_pdata *pdata = shdev->pdata; 192 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 193 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 194 int shift = chan_pdata->dmars_bit; 195 196 if (dmae_is_busy(sh_chan)) 197 return -EBUSY; 198 199 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 200 addr); 201 202 return 0; 203} 204 205static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 206{ 207 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 208 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 209 dma_async_tx_callback callback = tx->callback; 210 dma_cookie_t cookie; 211 212 spin_lock_bh(&sh_chan->desc_lock); 213 214 cookie = sh_chan->common.cookie; 215 cookie++; 216 if (cookie < 0) 217 cookie = 1; 218 219 sh_chan->common.cookie = cookie; 220 tx->cookie = cookie; 221 222 /* Mark all chunks of this descriptor as submitted, move to the queue */ 223 list_for_each_entry_safe(chunk, c, desc->node.prev, node) { 224 /* 225 * All chunks are on the global ld_free, so, we have to find 226 * the end of the chain ourselves 227 */ 228 if (chunk != desc && (chunk->mark == DESC_IDLE || 229 chunk->async_tx.cookie > 0 || 230 chunk->async_tx.cookie == -EBUSY || 231 &chunk->node == &sh_chan->ld_free)) 232 break; 233 chunk->mark = DESC_SUBMITTED; 234 /* Callback goes to the last chunk */ 235 chunk->async_tx.callback = NULL; 236 chunk->cookie = cookie; 237 list_move_tail(&chunk->node, &sh_chan->ld_queue); 238 last = chunk; 239 } 240 241 last->async_tx.callback = callback; 242 last->async_tx.callback_param = tx->callback_param; 243 244 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", 245 tx->cookie, &last->async_tx, sh_chan->id, 246 desc->hw.sar, desc->hw.tcr, desc->hw.dar); 247 248 spin_unlock_bh(&sh_chan->desc_lock); 249 250 return cookie; 251} 252 253/* Called with desc_lock held */ 254static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) 255{ 256 struct sh_desc *desc; 257 258 list_for_each_entry(desc, &sh_chan->ld_free, node) 259 if (desc->mark != DESC_PREPARED) { 260 BUG_ON(desc->mark != DESC_IDLE); 261 list_del(&desc->node); 262 return desc; 263 } 264 265 return NULL; 266} 267 268static const struct sh_dmae_slave_config *sh_dmae_find_slave( 269 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) 270{ 271 struct dma_device *dma_dev = sh_chan->common.device; 272 struct sh_dmae_device *shdev = container_of(dma_dev, 273 struct sh_dmae_device, common); 274 struct sh_dmae_pdata *pdata = shdev->pdata; 275 int i; 276 277 if (param->slave_id >= SH_DMA_SLAVE_NUMBER) 278 return NULL; 279 280 for (i = 0; i < pdata->slave_num; i++) 281 if (pdata->slave[i].slave_id == param->slave_id) 282 return pdata->slave + i; 283 284 return NULL; 285} 286 287static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 288{ 289 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 290 struct sh_desc *desc; 291 struct sh_dmae_slave *param = chan->private; 292 int ret; 293 294 pm_runtime_get_sync(sh_chan->dev); 295 296 /* 297 * This relies on the guarantee from dmaengine that alloc_chan_resources 298 * never runs concurrently with itself or free_chan_resources. 299 */ 300 if (param) { 301 const struct sh_dmae_slave_config *cfg; 302 303 cfg = sh_dmae_find_slave(sh_chan, param); 304 if (!cfg) { 305 ret = -EINVAL; 306 goto efindslave; 307 } 308 309 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { 310 ret = -EBUSY; 311 goto etestused; 312 } 313 314 param->config = cfg; 315 316 dmae_set_dmars(sh_chan, cfg->mid_rid); 317 dmae_set_chcr(sh_chan, cfg->chcr); 318 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { 319 dmae_init(sh_chan); 320 } 321 322 spin_lock_bh(&sh_chan->desc_lock); 323 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 324 spin_unlock_bh(&sh_chan->desc_lock); 325 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); 326 if (!desc) { 327 spin_lock_bh(&sh_chan->desc_lock); 328 break; 329 } 330 dma_async_tx_descriptor_init(&desc->async_tx, 331 &sh_chan->common); 332 desc->async_tx.tx_submit = sh_dmae_tx_submit; 333 desc->mark = DESC_IDLE; 334 335 spin_lock_bh(&sh_chan->desc_lock); 336 list_add(&desc->node, &sh_chan->ld_free); 337 sh_chan->descs_allocated++; 338 } 339 spin_unlock_bh(&sh_chan->desc_lock); 340 341 if (!sh_chan->descs_allocated) { 342 ret = -ENOMEM; 343 goto edescalloc; 344 } 345 346 return sh_chan->descs_allocated; 347 348edescalloc: 349 if (param) 350 clear_bit(param->slave_id, sh_dmae_slave_used); 351etestused: 352efindslave: 353 pm_runtime_put(sh_chan->dev); 354 return ret; 355} 356 357/* 358 * sh_dma_free_chan_resources - Free all resources of the channel. 359 */ 360static void sh_dmae_free_chan_resources(struct dma_chan *chan) 361{ 362 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 363 struct sh_desc *desc, *_desc; 364 LIST_HEAD(list); 365 int descs = sh_chan->descs_allocated; 366 367 dmae_halt(sh_chan); 368 369 /* Prepared and not submitted descriptors can still be on the queue */ 370 if (!list_empty(&sh_chan->ld_queue)) 371 sh_dmae_chan_ld_cleanup(sh_chan, true); 372 373 if (chan->private) { 374 /* The caller is holding dma_list_mutex */ 375 struct sh_dmae_slave *param = chan->private; 376 clear_bit(param->slave_id, sh_dmae_slave_used); 377 } 378 379 spin_lock_bh(&sh_chan->desc_lock); 380 381 list_splice_init(&sh_chan->ld_free, &list); 382 sh_chan->descs_allocated = 0; 383 384 spin_unlock_bh(&sh_chan->desc_lock); 385 386 if (descs > 0) 387 pm_runtime_put(sh_chan->dev); 388 389 list_for_each_entry_safe(desc, _desc, &list, node) 390 kfree(desc); 391} 392 393/** 394 * sh_dmae_add_desc - get, set up and return one transfer descriptor 395 * @sh_chan: DMA channel 396 * @flags: DMA transfer flags 397 * @dest: destination DMA address, incremented when direction equals 398 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL 399 * @src: source DMA address, incremented when direction equals 400 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL 401 * @len: DMA transfer length 402 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 403 * @direction: needed for slave DMA to decide which address to keep constant, 404 * equals DMA_BIDIRECTIONAL for MEMCPY 405 * Returns 0 or an error 406 * Locks: called with desc_lock held 407 */ 408static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, 409 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, 410 struct sh_desc **first, enum dma_data_direction direction) 411{ 412 struct sh_desc *new; 413 size_t copy_size; 414 415 if (!*len) 416 return NULL; 417 418 /* Allocate the link descriptor from the free list */ 419 new = sh_dmae_get_desc(sh_chan); 420 if (!new) { 421 dev_err(sh_chan->dev, "No free link descriptor available\n"); 422 return NULL; 423 } 424 425 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); 426 427 new->hw.sar = *src; 428 new->hw.dar = *dest; 429 new->hw.tcr = copy_size; 430 431 if (!*first) { 432 /* First desc */ 433 new->async_tx.cookie = -EBUSY; 434 *first = new; 435 } else { 436 /* Other desc - invisible to the user */ 437 new->async_tx.cookie = -EINVAL; 438 } 439 440 dev_dbg(sh_chan->dev, 441 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", 442 copy_size, *len, *src, *dest, &new->async_tx, 443 new->async_tx.cookie, sh_chan->xmit_shift); 444 445 new->mark = DESC_PREPARED; 446 new->async_tx.flags = flags; 447 new->direction = direction; 448 449 *len -= copy_size; 450 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) 451 *src += copy_size; 452 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) 453 *dest += copy_size; 454 455 return new; 456} 457 458/* 459 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list 460 * 461 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 462 * converted to scatter-gather to guarantee consistent locking and a correct 463 * list manipulation. For slave DMA direction carries the usual meaning, and, 464 * logically, the SG list is RAM and the addr variable contains slave address, 465 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL 466 * and the SG list contains only one element and points at the source buffer. 467 */ 468static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, 469 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 470 enum dma_data_direction direction, unsigned long flags) 471{ 472 struct scatterlist *sg; 473 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 474 LIST_HEAD(tx_list); 475 int chunks = 0; 476 int i; 477 478 if (!sg_len) 479 return NULL; 480 481 for_each_sg(sgl, sg, sg_len, i) 482 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / 483 (SH_DMA_TCR_MAX + 1); 484 485 /* Have to lock the whole loop to protect against concurrent release */ 486 spin_lock_bh(&sh_chan->desc_lock); 487 488 /* 489 * Chaining: 490 * first descriptor is what user is dealing with in all API calls, its 491 * cookie is at first set to -EBUSY, at tx-submit to a positive 492 * number 493 * if more than one chunk is needed further chunks have cookie = -EINVAL 494 * the last chunk, if not equal to the first, has cookie = -ENOSPC 495 * all chunks are linked onto the tx_list head with their .node heads 496 * only during this function, then they are immediately spliced 497 * back onto the free list in form of a chain 498 */ 499 for_each_sg(sgl, sg, sg_len, i) { 500 dma_addr_t sg_addr = sg_dma_address(sg); 501 size_t len = sg_dma_len(sg); 502 503 if (!len) 504 goto err_get_desc; 505 506 do { 507 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", 508 i, sg, len, (unsigned long long)sg_addr); 509 510 if (direction == DMA_FROM_DEVICE) 511 new = sh_dmae_add_desc(sh_chan, flags, 512 &sg_addr, addr, &len, &first, 513 direction); 514 else 515 new = sh_dmae_add_desc(sh_chan, flags, 516 addr, &sg_addr, &len, &first, 517 direction); 518 if (!new) 519 goto err_get_desc; 520 521 new->chunks = chunks--; 522 list_add_tail(&new->node, &tx_list); 523 } while (len); 524 } 525 526 if (new != first) 527 new->async_tx.cookie = -ENOSPC; 528 529 /* Put them back on the free list, so, they don't get lost */ 530 list_splice_tail(&tx_list, &sh_chan->ld_free); 531 532 spin_unlock_bh(&sh_chan->desc_lock); 533 534 return &first->async_tx; 535 536err_get_desc: 537 list_for_each_entry(new, &tx_list, node) 538 new->mark = DESC_IDLE; 539 list_splice(&tx_list, &sh_chan->ld_free); 540 541 spin_unlock_bh(&sh_chan->desc_lock); 542 543 return NULL; 544} 545 546static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( 547 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 548 size_t len, unsigned long flags) 549{ 550 struct sh_dmae_chan *sh_chan; 551 struct scatterlist sg; 552 553 if (!chan || !len) 554 return NULL; 555 556 chan->private = NULL; 557 558 sh_chan = to_sh_chan(chan); 559 560 sg_init_table(&sg, 1); 561 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, 562 offset_in_page(dma_src)); 563 sg_dma_address(&sg) = dma_src; 564 sg_dma_len(&sg) = len; 565 566 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, 567 flags); 568} 569 570static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 571 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 572 enum dma_data_direction direction, unsigned long flags) 573{ 574 struct sh_dmae_slave *param; 575 struct sh_dmae_chan *sh_chan; 576 dma_addr_t slave_addr; 577 578 if (!chan) 579 return NULL; 580 581 sh_chan = to_sh_chan(chan); 582 param = chan->private; 583 584 /* Someone calling slave DMA on a public channel? */ 585 if (!param || !sg_len) { 586 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", 587 __func__, param, sg_len, param ? param->slave_id : -1); 588 return NULL; 589 } 590 591 slave_addr = param->config->addr; 592 593 /* 594 * if (param != NULL), this is a successfully requested slave channel, 595 * therefore param->config != NULL too. 596 */ 597 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr, 598 direction, flags); 599} 600 601static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 602 unsigned long arg) 603{ 604 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 605 606 /* Only supports DMA_TERMINATE_ALL */ 607 if (cmd != DMA_TERMINATE_ALL) 608 return -ENXIO; 609 610 if (!chan) 611 return -EINVAL; 612 613 dmae_halt(sh_chan); 614 615 spin_lock_bh(&sh_chan->desc_lock); 616 if (!list_empty(&sh_chan->ld_queue)) { 617 /* Record partial transfer */ 618 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, 619 struct sh_desc, node); 620 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 621 sh_chan->xmit_shift; 622 623 } 624 spin_unlock_bh(&sh_chan->desc_lock); 625 626 sh_dmae_chan_ld_cleanup(sh_chan, true); 627 628 return 0; 629} 630 631static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 632{ 633 struct sh_desc *desc, *_desc; 634 /* Is the "exposed" head of a chain acked? */ 635 bool head_acked = false; 636 dma_cookie_t cookie = 0; 637 dma_async_tx_callback callback = NULL; 638 void *param = NULL; 639 640 spin_lock_bh(&sh_chan->desc_lock); 641 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 642 struct dma_async_tx_descriptor *tx = &desc->async_tx; 643 644 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); 645 BUG_ON(desc->mark != DESC_SUBMITTED && 646 desc->mark != DESC_COMPLETED && 647 desc->mark != DESC_WAITING); 648 649 /* 650 * queue is ordered, and we use this loop to (1) clean up all 651 * completed descriptors, and to (2) update descriptor flags of 652 * any chunks in a (partially) completed chain 653 */ 654 if (!all && desc->mark == DESC_SUBMITTED && 655 desc->cookie != cookie) 656 break; 657 658 if (tx->cookie > 0) 659 cookie = tx->cookie; 660 661 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 662 if (sh_chan->completed_cookie != desc->cookie - 1) 663 dev_dbg(sh_chan->dev, 664 "Completing cookie %d, expected %d\n", 665 desc->cookie, 666 sh_chan->completed_cookie + 1); 667 sh_chan->completed_cookie = desc->cookie; 668 } 669 670 /* Call callback on the last chunk */ 671 if (desc->mark == DESC_COMPLETED && tx->callback) { 672 desc->mark = DESC_WAITING; 673 callback = tx->callback; 674 param = tx->callback_param; 675 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", 676 tx->cookie, tx, sh_chan->id); 677 BUG_ON(desc->chunks != 1); 678 break; 679 } 680 681 if (tx->cookie > 0 || tx->cookie == -EBUSY) { 682 if (desc->mark == DESC_COMPLETED) { 683 BUG_ON(tx->cookie < 0); 684 desc->mark = DESC_WAITING; 685 } 686 head_acked = async_tx_test_ack(tx); 687 } else { 688 switch (desc->mark) { 689 case DESC_COMPLETED: 690 desc->mark = DESC_WAITING; 691 /* Fall through */ 692 case DESC_WAITING: 693 if (head_acked) 694 async_tx_ack(&desc->async_tx); 695 } 696 } 697 698 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", 699 tx, tx->cookie); 700 701 if (((desc->mark == DESC_COMPLETED || 702 desc->mark == DESC_WAITING) && 703 async_tx_test_ack(&desc->async_tx)) || all) { 704 /* Remove from ld_queue list */ 705 desc->mark = DESC_IDLE; 706 list_move(&desc->node, &sh_chan->ld_free); 707 } 708 } 709 spin_unlock_bh(&sh_chan->desc_lock); 710 711 if (callback) 712 callback(param); 713 714 return callback; 715} 716 717/* 718 * sh_chan_ld_cleanup - Clean up link descriptors 719 * 720 * This function cleans up the ld_queue of DMA channel. 721 */ 722static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 723{ 724 while (__ld_cleanup(sh_chan, all)) 725 ; 726 727 if (all) 728 /* Terminating - forgive uncompleted cookies */ 729 sh_chan->completed_cookie = sh_chan->common.cookie; 730} 731 732static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 733{ 734 struct sh_desc *desc; 735 736 spin_lock_bh(&sh_chan->desc_lock); 737 /* DMA work check */ 738 if (dmae_is_busy(sh_chan)) { 739 spin_unlock_bh(&sh_chan->desc_lock); 740 return; 741 } 742 743 /* Find the first not transferred desciptor */ 744 list_for_each_entry(desc, &sh_chan->ld_queue, node) 745 if (desc->mark == DESC_SUBMITTED) { 746 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", 747 desc->async_tx.cookie, sh_chan->id, 748 desc->hw.tcr, desc->hw.sar, desc->hw.dar); 749 /* Get the ld start address from ld_queue */ 750 dmae_set_reg(sh_chan, &desc->hw); 751 dmae_start(sh_chan); 752 break; 753 } 754 755 spin_unlock_bh(&sh_chan->desc_lock); 756} 757 758static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 759{ 760 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 761 sh_chan_xfer_ld_queue(sh_chan); 762} 763 764static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, 765 dma_cookie_t cookie, 766 struct dma_tx_state *txstate) 767{ 768 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 769 dma_cookie_t last_used; 770 dma_cookie_t last_complete; 771 enum dma_status status; 772 773 sh_dmae_chan_ld_cleanup(sh_chan, false); 774 775 last_used = chan->cookie; 776 last_complete = sh_chan->completed_cookie; 777 BUG_ON(last_complete < 0); 778 dma_set_tx_state(txstate, last_complete, last_used, 0); 779 780 spin_lock_bh(&sh_chan->desc_lock); 781 782 status = dma_async_is_complete(cookie, last_complete, last_used); 783 784 /* 785 * If we don't find cookie on the queue, it has been aborted and we have 786 * to report error 787 */ 788 if (status != DMA_SUCCESS) { 789 struct sh_desc *desc; 790 status = DMA_ERROR; 791 list_for_each_entry(desc, &sh_chan->ld_queue, node) 792 if (desc->cookie == cookie) { 793 status = DMA_IN_PROGRESS; 794 break; 795 } 796 } 797 798 spin_unlock_bh(&sh_chan->desc_lock); 799 800 return status; 801} 802 803static irqreturn_t sh_dmae_interrupt(int irq, void *data) 804{ 805 irqreturn_t ret = IRQ_NONE; 806 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 807 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 808 809 if (chcr & CHCR_TE) { 810 /* DMA stop */ 811 dmae_halt(sh_chan); 812 813 ret = IRQ_HANDLED; 814 tasklet_schedule(&sh_chan->tasklet); 815 } 816 817 return ret; 818} 819 820#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 821static irqreturn_t sh_dmae_err(int irq, void *data) 822{ 823 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; 824 int i; 825 826 /* halt the dma controller */ 827 sh_dmae_ctl_stop(shdev); 828 829 /* We cannot detect, which channel caused the error, have to reset all */ 830 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { 831 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 832 if (sh_chan) { 833 struct sh_desc *desc; 834 /* Stop the channel */ 835 dmae_halt(sh_chan); 836 /* Complete all */ 837 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 838 struct dma_async_tx_descriptor *tx = &desc->async_tx; 839 desc->mark = DESC_IDLE; 840 if (tx->callback) 841 tx->callback(tx->callback_param); 842 } 843 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); 844 } 845 } 846 sh_dmae_rst(shdev); 847 848 return IRQ_HANDLED; 849} 850#endif 851 852static void dmae_do_tasklet(unsigned long data) 853{ 854 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 855 struct sh_desc *desc; 856 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 857 u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 858 859 spin_lock(&sh_chan->desc_lock); 860 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 861 if (desc->mark == DESC_SUBMITTED && 862 ((desc->direction == DMA_FROM_DEVICE && 863 (desc->hw.dar + desc->hw.tcr) == dar_buf) || 864 (desc->hw.sar + desc->hw.tcr) == sar_buf)) { 865 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 866 desc->async_tx.cookie, &desc->async_tx, 867 desc->hw.dar); 868 desc->mark = DESC_COMPLETED; 869 break; 870 } 871 } 872 spin_unlock(&sh_chan->desc_lock); 873 874 /* Next desc */ 875 sh_chan_xfer_ld_queue(sh_chan); 876 sh_dmae_chan_ld_cleanup(sh_chan, false); 877} 878 879static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 880 int irq, unsigned long flags) 881{ 882 int err; 883 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 884 struct platform_device *pdev = to_platform_device(shdev->common.dev); 885 struct sh_dmae_chan *new_sh_chan; 886 887 /* alloc channel */ 888 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); 889 if (!new_sh_chan) { 890 dev_err(shdev->common.dev, 891 "No free memory for allocating dma channels!\n"); 892 return -ENOMEM; 893 } 894 895 /* copy struct dma_device */ 896 new_sh_chan->common.device = &shdev->common; 897 898 new_sh_chan->dev = shdev->common.dev; 899 new_sh_chan->id = id; 900 new_sh_chan->irq = irq; 901 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); 902 903 /* Init DMA tasklet */ 904 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, 905 (unsigned long)new_sh_chan); 906 907 /* Init the channel */ 908 dmae_init(new_sh_chan); 909 910 spin_lock_init(&new_sh_chan->desc_lock); 911 912 /* Init descripter manage list */ 913 INIT_LIST_HEAD(&new_sh_chan->ld_queue); 914 INIT_LIST_HEAD(&new_sh_chan->ld_free); 915 916 /* Add the channel to DMA device channel list */ 917 list_add_tail(&new_sh_chan->common.device_node, 918 &shdev->common.channels); 919 shdev->common.chancnt++; 920 921 if (pdev->id >= 0) 922 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 923 "sh-dmae%d.%d", pdev->id, new_sh_chan->id); 924 else 925 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 926 "sh-dma%d", new_sh_chan->id); 927 928 /* set up channel irq */ 929 err = request_irq(irq, &sh_dmae_interrupt, flags, 930 new_sh_chan->dev_id, new_sh_chan); 931 if (err) { 932 dev_err(shdev->common.dev, "DMA channel %d request_irq error " 933 "with return %d\n", id, err); 934 goto err_no_irq; 935 } 936 937 shdev->chan[id] = new_sh_chan; 938 return 0; 939 940err_no_irq: 941 /* remove from dmaengine device node */ 942 list_del(&new_sh_chan->common.device_node); 943 kfree(new_sh_chan); 944 return err; 945} 946 947static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 948{ 949 int i; 950 951 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { 952 if (shdev->chan[i]) { 953 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 954 955 free_irq(sh_chan->irq, sh_chan); 956 957 list_del(&sh_chan->common.device_node); 958 kfree(sh_chan); 959 shdev->chan[i] = NULL; 960 } 961 } 962 shdev->common.chancnt = 0; 963} 964 965static int __init sh_dmae_probe(struct platform_device *pdev) 966{ 967 struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 968 unsigned long irqflags = IRQF_DISABLED, 969 chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 970 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 971 int err, i, irq_cnt = 0, irqres = 0; 972 struct sh_dmae_device *shdev; 973 struct resource *chan, *dmars, *errirq_res, *chanirq_res; 974 975 /* get platform data */ 976 if (!pdata || !pdata->channel_num) 977 return -ENODEV; 978 979 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 980 /* DMARS area is optional, if absent, this controller cannot do slave DMA */ 981 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 982 /* 983 * IRQ resources: 984 * 1. there always must be at least one IRQ IO-resource. On SH4 it is 985 * the error IRQ, in which case it is the only IRQ in this resource: 986 * start == end. If it is the only IRQ resource, all channels also 987 * use the same IRQ. 988 * 2. DMA channel IRQ resources can be specified one per resource or in 989 * ranges (start != end) 990 * 3. iff all events (channels and, optionally, error) on this 991 * controller use the same IRQ, only one IRQ resource can be 992 * specified, otherwise there must be one IRQ per channel, even if 993 * some of them are equal 994 * 4. if all IRQs on this controller are equal or if some specific IRQs 995 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 996 * requested with the IRQF_SHARED flag 997 */ 998 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 999 if (!chan || !errirq_res) 1000 return -ENODEV; 1001 1002 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { 1003 dev_err(&pdev->dev, "DMAC register region already claimed\n"); 1004 return -EBUSY; 1005 } 1006 1007 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { 1008 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); 1009 err = -EBUSY; 1010 goto ermrdmars; 1011 } 1012 1013 err = -ENOMEM; 1014 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 1015 if (!shdev) { 1016 dev_err(&pdev->dev, "Not enough memory\n"); 1017 goto ealloc; 1018 } 1019 1020 shdev->chan_reg = ioremap(chan->start, resource_size(chan)); 1021 if (!shdev->chan_reg) 1022 goto emapchan; 1023 if (dmars) { 1024 shdev->dmars = ioremap(dmars->start, resource_size(dmars)); 1025 if (!shdev->dmars) 1026 goto emapdmars; 1027 } 1028 1029 /* platform data */ 1030 shdev->pdata = pdata; 1031 1032 pm_runtime_enable(&pdev->dev); 1033 pm_runtime_get_sync(&pdev->dev); 1034 1035 /* reset dma controller */ 1036 err = sh_dmae_rst(shdev); 1037 if (err) 1038 goto rst_err; 1039 1040 INIT_LIST_HEAD(&shdev->common.channels); 1041 1042 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1043 if (dmars) 1044 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1045 1046 shdev->common.device_alloc_chan_resources 1047 = sh_dmae_alloc_chan_resources; 1048 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 1049 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 1050 shdev->common.device_tx_status = sh_dmae_tx_status; 1051 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 1052 1053 /* Compulsory for DMA_SLAVE fields */ 1054 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1055 shdev->common.device_control = sh_dmae_control; 1056 1057 shdev->common.dev = &pdev->dev; 1058 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1059 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; 1060 1061#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1062 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1063 1064 if (!chanirq_res) 1065 chanirq_res = errirq_res; 1066 else 1067 irqres++; 1068 1069 if (chanirq_res == errirq_res || 1070 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 1071 irqflags = IRQF_SHARED; 1072 1073 errirq = errirq_res->start; 1074 1075 err = request_irq(errirq, sh_dmae_err, irqflags, 1076 "DMAC Address Error", shdev); 1077 if (err) { 1078 dev_err(&pdev->dev, 1079 "DMA failed requesting irq #%d, error %d\n", 1080 errirq, err); 1081 goto eirq_err; 1082 } 1083 1084#else 1085 chanirq_res = errirq_res; 1086#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 1087 1088 if (chanirq_res->start == chanirq_res->end && 1089 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 1090 /* Special case - all multiplexed */ 1091 for (; irq_cnt < pdata->channel_num; irq_cnt++) { 1092 chan_irq[irq_cnt] = chanirq_res->start; 1093 chan_flag[irq_cnt] = IRQF_SHARED; 1094 } 1095 } else { 1096 do { 1097 for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 1098 if ((errirq_res->flags & IORESOURCE_BITS) == 1099 IORESOURCE_IRQ_SHAREABLE) 1100 chan_flag[irq_cnt] = IRQF_SHARED; 1101 else 1102 chan_flag[irq_cnt] = IRQF_DISABLED; 1103 dev_dbg(&pdev->dev, 1104 "Found IRQ %d for channel %d\n", 1105 i, irq_cnt); 1106 chan_irq[irq_cnt++] = i; 1107 } 1108 chanirq_res = platform_get_resource(pdev, 1109 IORESOURCE_IRQ, ++irqres); 1110 } while (irq_cnt < pdata->channel_num && chanirq_res); 1111 } 1112 1113 if (irq_cnt < pdata->channel_num) 1114 goto eirqres; 1115 1116 /* Create DMA Channel */ 1117 for (i = 0; i < pdata->channel_num; i++) { 1118 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1119 if (err) 1120 goto chan_probe_err; 1121 } 1122 1123 pm_runtime_put(&pdev->dev); 1124 1125 platform_set_drvdata(pdev, shdev); 1126 dma_async_device_register(&shdev->common); 1127 1128 return err; 1129 1130chan_probe_err: 1131 sh_dmae_chan_remove(shdev); 1132eirqres: 1133#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1134 free_irq(errirq, shdev); 1135eirq_err: 1136#endif 1137rst_err: 1138 pm_runtime_put(&pdev->dev); 1139 if (dmars) 1140 iounmap(shdev->dmars); 1141emapdmars: 1142 iounmap(shdev->chan_reg); 1143emapchan: 1144 kfree(shdev); 1145ealloc: 1146 if (dmars) 1147 release_mem_region(dmars->start, resource_size(dmars)); 1148ermrdmars: 1149 release_mem_region(chan->start, resource_size(chan)); 1150 1151 return err; 1152} 1153 1154static int __exit sh_dmae_remove(struct platform_device *pdev) 1155{ 1156 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1157 struct resource *res; 1158 int errirq = platform_get_irq(pdev, 0); 1159 1160 dma_async_device_unregister(&shdev->common); 1161 1162 if (errirq > 0) 1163 free_irq(errirq, shdev); 1164 1165 /* channel data remove */ 1166 sh_dmae_chan_remove(shdev); 1167 1168 pm_runtime_disable(&pdev->dev); 1169 1170 if (shdev->dmars) 1171 iounmap(shdev->dmars); 1172 iounmap(shdev->chan_reg); 1173 1174 kfree(shdev); 1175 1176 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1177 if (res) 1178 release_mem_region(res->start, resource_size(res)); 1179 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1180 if (res) 1181 release_mem_region(res->start, resource_size(res)); 1182 1183 return 0; 1184} 1185 1186static void sh_dmae_shutdown(struct platform_device *pdev) 1187{ 1188 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1189 sh_dmae_ctl_stop(shdev); 1190} 1191 1192static struct platform_driver sh_dmae_driver = { 1193 .remove = __exit_p(sh_dmae_remove), 1194 .shutdown = sh_dmae_shutdown, 1195 .driver = { 1196 .owner = THIS_MODULE, 1197 .name = "sh-dma-engine", 1198 }, 1199}; 1200 1201static int __init sh_dmae_init(void) 1202{ 1203 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 1204} 1205module_init(sh_dmae_init); 1206 1207static void __exit sh_dmae_exit(void) 1208{ 1209 platform_driver_unregister(&sh_dmae_driver); 1210} 1211module_exit(sh_dmae_exit); 1212 1213MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 1214MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 1215MODULE_LICENSE("GPL"); 1216