1/* 2 * driver/dma/ste_dma40.c 3 * 4 * Copyright (C) ST-Ericsson 2007-2010 5 * License terms: GNU General Public License (GPL) version 2 6 * Author: Per Friden <per.friden@stericsson.com> 7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 * 9 */ 10 11#include <linux/kernel.h> 12#include <linux/slab.h> 13#include <linux/dmaengine.h> 14#include <linux/platform_device.h> 15#include <linux/clk.h> 16#include <linux/delay.h> 17 18#include <plat/ste_dma40.h> 19 20#include "ste_dma40_ll.h" 21 22#define D40_NAME "dma40" 23 24#define D40_PHY_CHAN -1 25 26/* For masking out/in 2 bit channel positions */ 27#define D40_CHAN_POS(chan) (2 * (chan / 2)) 28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) 29 30/* Maximum iterations taken before giving up suspending a channel */ 31#define D40_SUSPEND_MAX_IT 500 32 33/* Hardware requirement on LCLA alignment */ 34#define LCLA_ALIGNMENT 0x40000 35/* Attempts before giving up to trying to get pages that are aligned */ 36#define MAX_LCLA_ALLOC_ATTEMPTS 256 37 38/* Bit markings for allocation map */ 39#define D40_ALLOC_FREE (1 << 31) 40#define D40_ALLOC_PHY (1 << 30) 41#define D40_ALLOC_LOG_FREE 0 42 43/* Hardware designer of the block */ 44#define D40_PERIPHID2_DESIGNER 0x8 45 46/** 47 * enum 40_command - The different commands and/or statuses. 48 * 49 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, 50 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. 51 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. 52 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. 53 */ 54enum d40_command { 55 D40_DMA_STOP = 0, 56 D40_DMA_RUN = 1, 57 D40_DMA_SUSPEND_REQ = 2, 58 D40_DMA_SUSPENDED = 3 59}; 60 61/** 62 * struct d40_lli_pool - Structure for keeping LLIs in memory 63 * 64 * @base: Pointer to memory area when the pre_alloc_lli's are not large 65 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 66 * pre_alloc_lli is used. 67 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 68 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 69 * one buffer to one buffer. 70 */ 71struct d40_lli_pool { 72 void *base; 73 int size; 74 /* Space for dst and src, plus an extra for padding */ 75 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 76}; 77 78/** 79 * struct d40_desc - A descriptor is one DMA job. 80 * 81 * @lli_phy: LLI settings for physical channel. Both src and dst= 82 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if 83 * lli_len equals one. 84 * @lli_log: Same as above but for logical channels. 85 * @lli_pool: The pool with two entries pre-allocated. 86 * @lli_len: Number of llis of current descriptor. 87 * @lli_count: Number of transfered llis. 88 * @lli_tx_len: Max number of LLIs per transfer, there can be 89 * many transfer for one descriptor. 90 * @txd: DMA engine struct. Used for among other things for communication 91 * during a transfer. 92 * @node: List entry. 93 * @dir: The transfer direction of this job. 94 * @is_in_client_list: true if the client owns this descriptor. 95 * 96 * This descriptor is used for both logical and physical transfers. 97 */ 98 99struct d40_desc { 100 /* LLI physical */ 101 struct d40_phy_lli_bidir lli_phy; 102 /* LLI logical */ 103 struct d40_log_lli_bidir lli_log; 104 105 struct d40_lli_pool lli_pool; 106 int lli_len; 107 int lli_count; 108 u32 lli_tx_len; 109 110 struct dma_async_tx_descriptor txd; 111 struct list_head node; 112 113 enum dma_data_direction dir; 114 bool is_in_client_list; 115}; 116 117/** 118 * struct d40_lcla_pool - LCLA pool settings and data. 119 * 120 * @base: The virtual address of LCLA. 18 bit aligned. 121 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. 122 * This pointer is only there for clean-up on error. 123 * @pages: The number of pages needed for all physical channels. 124 * Only used later for clean-up on error 125 * @lock: Lock to protect the content in this struct. 126 * @alloc_map: Bitmap mapping between physical channel and LCLA entries. 127 * @num_blocks: The number of entries of alloc_map. Equals to the 128 * number of physical channels. 129 */ 130struct d40_lcla_pool { 131 void *base; 132 void *base_unaligned; 133 int pages; 134 spinlock_t lock; 135 u32 *alloc_map; 136 int num_blocks; 137}; 138 139/** 140 * struct d40_phy_res - struct for handling eventlines mapped to physical 141 * channels. 142 * 143 * @lock: A lock protection this entity. 144 * @num: The physical channel number of this entity. 145 * @allocated_src: Bit mapped to show which src event line's are mapped to 146 * this physical channel. Can also be free or physically allocated. 147 * @allocated_dst: Same as for src but is dst. 148 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 149 * event line number. Both allocated_src and allocated_dst can not be 150 * allocated to a physical channel, since the interrupt handler has then 151 * no way of figure out which one the interrupt belongs to. 152 */ 153struct d40_phy_res { 154 spinlock_t lock; 155 int num; 156 u32 allocated_src; 157 u32 allocated_dst; 158}; 159 160struct d40_base; 161 162/** 163 * struct d40_chan - Struct that describes a channel. 164 * 165 * @lock: A spinlock to protect this struct. 166 * @log_num: The logical number, if any of this channel. 167 * @completed: Starts with 1, after first interrupt it is set to dma engine's 168 * current cookie. 169 * @pending_tx: The number of pending transfers. Used between interrupt handler 170 * and tasklet. 171 * @busy: Set to true when transfer is ongoing on this channel. 172 * @phy_chan: Pointer to physical channel which this instance runs on. If this 173 * point is NULL, then the channel is not allocated. 174 * @chan: DMA engine handle. 175 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 176 * transfer and call client callback. 177 * @client: Cliented owned descriptor list. 178 * @active: Active descriptor. 179 * @queue: Queued jobs. 180 * @dma_cfg: The client configuration of this dma channel. 181 * @base: Pointer to the device instance struct. 182 * @src_def_cfg: Default cfg register setting for src. 183 * @dst_def_cfg: Default cfg register setting for dst. 184 * @log_def: Default logical channel settings. 185 * @lcla: Space for one dst src pair for logical channel transfers. 186 * @lcpa: Pointer to dst and src lcpa settings. 187 * 188 * This struct can either "be" a logical or a physical channel. 189 */ 190struct d40_chan { 191 spinlock_t lock; 192 int log_num; 193 /* ID of the most recent completed transfer */ 194 int completed; 195 int pending_tx; 196 bool busy; 197 struct d40_phy_res *phy_chan; 198 struct dma_chan chan; 199 struct tasklet_struct tasklet; 200 struct list_head client; 201 struct list_head active; 202 struct list_head queue; 203 struct stedma40_chan_cfg dma_cfg; 204 struct d40_base *base; 205 /* Default register configurations */ 206 u32 src_def_cfg; 207 u32 dst_def_cfg; 208 struct d40_def_lcsp log_def; 209 struct d40_lcla_elem lcla; 210 struct d40_log_lli_full *lcpa; 211 /* Runtime reconfiguration */ 212 dma_addr_t runtime_addr; 213 enum dma_data_direction runtime_direction; 214}; 215 216/** 217 * struct d40_base - The big global struct, one for each probe'd instance. 218 * 219 * @interrupt_lock: Lock used to make sure one interrupt is handle a time. 220 * @execmd_lock: Lock for execute command usage since several channels share 221 * the same physical register. 222 * @dev: The device structure. 223 * @virtbase: The virtual base address of the DMA's register. 224 * @rev: silicon revision detected. 225 * @clk: Pointer to the DMA clock structure. 226 * @phy_start: Physical memory start of the DMA registers. 227 * @phy_size: Size of the DMA register map. 228 * @irq: The IRQ number. 229 * @num_phy_chans: The number of physical channels. Read from HW. This 230 * is the number of available channels for this driver, not counting "Secure 231 * mode" allocated physical channels. 232 * @num_log_chans: The number of logical channels. Calculated from 233 * num_phy_chans. 234 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 235 * @dma_slave: dma_device channels that can do only do slave transfers. 236 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 237 * @phy_chans: Room for all possible physical channels in system. 238 * @log_chans: Room for all possible logical channels in system. 239 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 240 * to log_chans entries. 241 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points 242 * to phy_chans entries. 243 * @plat_data: Pointer to provided platform_data which is the driver 244 * configuration. 245 * @phy_res: Vector containing all physical channels. 246 * @lcla_pool: lcla pool settings and data. 247 * @lcpa_base: The virtual mapped address of LCPA. 248 * @phy_lcpa: The physical address of the LCPA. 249 * @lcpa_size: The size of the LCPA area. 250 * @desc_slab: cache for descriptors. 251 */ 252struct d40_base { 253 spinlock_t interrupt_lock; 254 spinlock_t execmd_lock; 255 struct device *dev; 256 void __iomem *virtbase; 257 u8 rev:4; 258 struct clk *clk; 259 phys_addr_t phy_start; 260 resource_size_t phy_size; 261 int irq; 262 int num_phy_chans; 263 int num_log_chans; 264 struct dma_device dma_both; 265 struct dma_device dma_slave; 266 struct dma_device dma_memcpy; 267 struct d40_chan *phy_chans; 268 struct d40_chan *log_chans; 269 struct d40_chan **lookup_log_chans; 270 struct d40_chan **lookup_phy_chans; 271 struct stedma40_platform_data *plat_data; 272 /* Physical half channels */ 273 struct d40_phy_res *phy_res; 274 struct d40_lcla_pool lcla_pool; 275 void *lcpa_base; 276 dma_addr_t phy_lcpa; 277 resource_size_t lcpa_size; 278 struct kmem_cache *desc_slab; 279}; 280 281/** 282 * struct d40_interrupt_lookup - lookup table for interrupt handler 283 * 284 * @src: Interrupt mask register. 285 * @clr: Interrupt clear register. 286 * @is_error: true if this is an error interrupt. 287 * @offset: start delta in the lookup_log_chans in d40_base. If equals to 288 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. 289 */ 290struct d40_interrupt_lookup { 291 u32 src; 292 u32 clr; 293 bool is_error; 294 int offset; 295}; 296 297/** 298 * struct d40_reg_val - simple lookup struct 299 * 300 * @reg: The register. 301 * @val: The value that belongs to the register in reg. 302 */ 303struct d40_reg_val { 304 unsigned int reg; 305 unsigned int val; 306}; 307 308static int d40_pool_lli_alloc(struct d40_desc *d40d, 309 int lli_len, bool is_log) 310{ 311 u32 align; 312 void *base; 313 314 if (is_log) 315 align = sizeof(struct d40_log_lli); 316 else 317 align = sizeof(struct d40_phy_lli); 318 319 if (lli_len == 1) { 320 base = d40d->lli_pool.pre_alloc_lli; 321 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 322 d40d->lli_pool.base = NULL; 323 } else { 324 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); 325 326 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 327 d40d->lli_pool.base = base; 328 329 if (d40d->lli_pool.base == NULL) 330 return -ENOMEM; 331 } 332 333 if (is_log) { 334 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, 335 align); 336 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, 337 align); 338 } else { 339 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, 340 align); 341 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 342 align); 343 344 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); 345 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); 346 } 347 348 return 0; 349} 350 351static void d40_pool_lli_free(struct d40_desc *d40d) 352{ 353 kfree(d40d->lli_pool.base); 354 d40d->lli_pool.base = NULL; 355 d40d->lli_pool.size = 0; 356 d40d->lli_log.src = NULL; 357 d40d->lli_log.dst = NULL; 358 d40d->lli_phy.src = NULL; 359 d40d->lli_phy.dst = NULL; 360 d40d->lli_phy.src_addr = 0; 361 d40d->lli_phy.dst_addr = 0; 362} 363 364static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, 365 struct d40_desc *desc) 366{ 367 dma_cookie_t cookie = d40c->chan.cookie; 368 369 if (++cookie < 0) 370 cookie = 1; 371 372 d40c->chan.cookie = cookie; 373 desc->txd.cookie = cookie; 374 375 return cookie; 376} 377 378static void d40_desc_remove(struct d40_desc *d40d) 379{ 380 list_del(&d40d->node); 381} 382 383static struct d40_desc *d40_desc_get(struct d40_chan *d40c) 384{ 385 struct d40_desc *d; 386 struct d40_desc *_d; 387 388 if (!list_empty(&d40c->client)) { 389 list_for_each_entry_safe(d, _d, &d40c->client, node) 390 if (async_tx_test_ack(&d->txd)) { 391 d40_pool_lli_free(d); 392 d40_desc_remove(d); 393 break; 394 } 395 } else { 396 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); 397 if (d != NULL) { 398 memset(d, 0, sizeof(struct d40_desc)); 399 INIT_LIST_HEAD(&d->node); 400 } 401 } 402 return d; 403} 404 405static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 406{ 407 kmem_cache_free(d40c->base->desc_slab, d40d); 408} 409 410static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) 411{ 412 list_add_tail(&desc->node, &d40c->active); 413} 414 415static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 416{ 417 struct d40_desc *d; 418 419 if (list_empty(&d40c->active)) 420 return NULL; 421 422 d = list_first_entry(&d40c->active, 423 struct d40_desc, 424 node); 425 return d; 426} 427 428static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 429{ 430 list_add_tail(&desc->node, &d40c->queue); 431} 432 433static struct d40_desc *d40_first_queued(struct d40_chan *d40c) 434{ 435 struct d40_desc *d; 436 437 if (list_empty(&d40c->queue)) 438 return NULL; 439 440 d = list_first_entry(&d40c->queue, 441 struct d40_desc, 442 node); 443 return d; 444} 445 446/* Support functions for logical channels */ 447 448static int d40_lcla_id_get(struct d40_chan *d40c) 449{ 450 int src_id = 0; 451 int dst_id = 0; 452 struct d40_log_lli *lcla_lidx_base = 453 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; 454 int i; 455 int lli_per_log = d40c->base->plat_data->llis_per_log; 456 unsigned long flags; 457 458 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) 459 return 0; 460 461 if (d40c->base->lcla_pool.num_blocks > 32) 462 return -EINVAL; 463 464 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 465 466 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { 467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & 468 (0x1 << i))) { 469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= 470 (0x1 << i); 471 break; 472 } 473 } 474 src_id = i; 475 if (src_id >= d40c->base->lcla_pool.num_blocks) 476 goto err; 477 478 for (; i < d40c->base->lcla_pool.num_blocks; i++) { 479 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & 480 (0x1 << i))) { 481 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= 482 (0x1 << i); 483 break; 484 } 485 } 486 487 dst_id = i; 488 if (dst_id == src_id) 489 goto err; 490 491 d40c->lcla.src_id = src_id; 492 d40c->lcla.dst_id = dst_id; 493 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; 494 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; 495 496 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 497 return 0; 498err: 499 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 500 return -EINVAL; 501} 502 503 504static int d40_channel_execute_command(struct d40_chan *d40c, 505 enum d40_command command) 506{ 507 int status, i; 508 void __iomem *active_reg; 509 int ret = 0; 510 unsigned long flags; 511 u32 wmask; 512 513 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 514 515 if (d40c->phy_chan->num % 2 == 0) 516 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 517 else 518 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 519 520 if (command == D40_DMA_SUSPEND_REQ) { 521 status = (readl(active_reg) & 522 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 523 D40_CHAN_POS(d40c->phy_chan->num); 524 525 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 526 goto done; 527 } 528 529 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); 530 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), 531 active_reg); 532 533 if (command == D40_DMA_SUSPEND_REQ) { 534 535 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { 536 status = (readl(active_reg) & 537 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 538 D40_CHAN_POS(d40c->phy_chan->num); 539 540 cpu_relax(); 541 /* 542 * Reduce the number of bus accesses while 543 * waiting for the DMA to suspend. 544 */ 545 udelay(3); 546 547 if (status == D40_DMA_STOP || 548 status == D40_DMA_SUSPENDED) 549 break; 550 } 551 552 if (i == D40_SUSPEND_MAX_IT) { 553 dev_err(&d40c->chan.dev->device, 554 "[%s]: unable to suspend the chl %d (log: %d) status %x\n", 555 __func__, d40c->phy_chan->num, d40c->log_num, 556 status); 557 dump_stack(); 558 ret = -EBUSY; 559 } 560 561 } 562done: 563 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); 564 return ret; 565} 566 567static void d40_term_all(struct d40_chan *d40c) 568{ 569 struct d40_desc *d40d; 570 unsigned long flags; 571 572 /* Release active descriptors */ 573 while ((d40d = d40_first_active_get(d40c))) { 574 d40_desc_remove(d40d); 575 576 /* Return desc to free-list */ 577 d40_desc_free(d40c, d40d); 578 } 579 580 /* Release queued descriptors waiting for transfer */ 581 while ((d40d = d40_first_queued(d40c))) { 582 d40_desc_remove(d40d); 583 584 /* Return desc to free-list */ 585 d40_desc_free(d40c, d40d); 586 } 587 588 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 589 590 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= 591 (~(0x1 << d40c->lcla.dst_id)); 592 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= 593 (~(0x1 << d40c->lcla.src_id)); 594 595 d40c->lcla.src_id = -1; 596 d40c->lcla.dst_id = -1; 597 598 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 599 600 d40c->pending_tx = 0; 601 d40c->busy = false; 602} 603 604static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 605{ 606 u32 val; 607 unsigned long flags; 608 609 /* Notice, that disable requires the physical channel to be stopped */ 610 if (do_enable) 611 val = D40_ACTIVATE_EVENTLINE; 612 else 613 val = D40_DEACTIVATE_EVENTLINE; 614 615 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 616 617 /* Enable event line connected to device (or memcpy) */ 618 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 619 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 620 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 621 622 writel((val << D40_EVENTLINE_POS(event)) | 623 ~D40_EVENTLINE_MASK(event), 624 d40c->base->virtbase + D40_DREG_PCBASE + 625 d40c->phy_chan->num * D40_DREG_PCDELTA + 626 D40_CHAN_REG_SSLNK); 627 } 628 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 629 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 630 631 writel((val << D40_EVENTLINE_POS(event)) | 632 ~D40_EVENTLINE_MASK(event), 633 d40c->base->virtbase + D40_DREG_PCBASE + 634 d40c->phy_chan->num * D40_DREG_PCDELTA + 635 D40_CHAN_REG_SDLNK); 636 } 637 638 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); 639} 640 641static u32 d40_chan_has_events(struct d40_chan *d40c) 642{ 643 u32 val = 0; 644 645 /* If SSLNK or SDLNK is zero all events are disabled */ 646 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 647 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 648 val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 649 d40c->phy_chan->num * D40_DREG_PCDELTA + 650 D40_CHAN_REG_SSLNK); 651 652 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) 653 val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 654 d40c->phy_chan->num * D40_DREG_PCDELTA + 655 D40_CHAN_REG_SDLNK); 656 return val; 657} 658 659static void d40_config_enable_lidx(struct d40_chan *d40c) 660{ 661 /* Set LIDX for lcla */ 662 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 663 D40_SREG_ELEM_LOG_LIDX_MASK, 664 d40c->base->virtbase + D40_DREG_PCBASE + 665 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); 666 667 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 668 D40_SREG_ELEM_LOG_LIDX_MASK, 669 d40c->base->virtbase + D40_DREG_PCBASE + 670 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); 671} 672 673static int d40_config_write(struct d40_chan *d40c) 674{ 675 u32 addr_base; 676 u32 var; 677 int res; 678 679 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 680 if (res) 681 return res; 682 683 /* Odd addresses are even addresses + 4 */ 684 addr_base = (d40c->phy_chan->num % 2) * 4; 685 /* Setup channel mode to logical or physical */ 686 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << 687 D40_CHAN_POS(d40c->phy_chan->num); 688 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 689 690 /* Setup operational mode option register */ 691 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & 692 0x3) << D40_CHAN_POS(d40c->phy_chan->num); 693 694 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 695 696 if (d40c->log_num != D40_PHY_CHAN) { 697 /* Set default config for CFG reg */ 698 writel(d40c->src_def_cfg, 699 d40c->base->virtbase + D40_DREG_PCBASE + 700 d40c->phy_chan->num * D40_DREG_PCDELTA + 701 D40_CHAN_REG_SSCFG); 702 writel(d40c->dst_def_cfg, 703 d40c->base->virtbase + D40_DREG_PCBASE + 704 d40c->phy_chan->num * D40_DREG_PCDELTA + 705 D40_CHAN_REG_SDCFG); 706 707 d40_config_enable_lidx(d40c); 708 } 709 return res; 710} 711 712static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 713{ 714 if (d40d->lli_phy.dst && d40d->lli_phy.src) { 715 d40_phy_lli_write(d40c->base->virtbase, 716 d40c->phy_chan->num, 717 d40d->lli_phy.dst, 718 d40d->lli_phy.src); 719 } else if (d40d->lli_log.dst && d40d->lli_log.src) { 720 struct d40_log_lli *src = d40d->lli_log.src; 721 struct d40_log_lli *dst = d40d->lli_log.dst; 722 int s; 723 724 src += d40d->lli_count; 725 dst += d40d->lli_count; 726 s = d40_log_lli_write(d40c->lcpa, 727 d40c->lcla.src, d40c->lcla.dst, 728 dst, src, 729 d40c->base->plat_data->llis_per_log); 730 731 /* If s equals to zero, the job is not linked */ 732 if (s > 0) { 733 (void) dma_map_single(d40c->base->dev, d40c->lcla.src, 734 s * sizeof(struct d40_log_lli), 735 DMA_TO_DEVICE); 736 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, 737 s * sizeof(struct d40_log_lli), 738 DMA_TO_DEVICE); 739 } 740 } 741 d40d->lli_count += d40d->lli_tx_len; 742} 743 744static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 745{ 746 struct d40_chan *d40c = container_of(tx->chan, 747 struct d40_chan, 748 chan); 749 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 750 unsigned long flags; 751 752 spin_lock_irqsave(&d40c->lock, flags); 753 754 tx->cookie = d40_assign_cookie(d40c, d40d); 755 756 d40_desc_queue(d40c, d40d); 757 758 spin_unlock_irqrestore(&d40c->lock, flags); 759 760 return tx->cookie; 761} 762 763static int d40_start(struct d40_chan *d40c) 764{ 765 if (d40c->base->rev == 0) { 766 int err; 767 768 if (d40c->log_num != D40_PHY_CHAN) { 769 err = d40_channel_execute_command(d40c, 770 D40_DMA_SUSPEND_REQ); 771 if (err) 772 return err; 773 } 774 } 775 776 if (d40c->log_num != D40_PHY_CHAN) 777 d40_config_set_event(d40c, true); 778 779 return d40_channel_execute_command(d40c, D40_DMA_RUN); 780} 781 782static struct d40_desc *d40_queue_start(struct d40_chan *d40c) 783{ 784 struct d40_desc *d40d; 785 int err; 786 787 /* Start queued jobs, if any */ 788 d40d = d40_first_queued(d40c); 789 790 if (d40d != NULL) { 791 d40c->busy = true; 792 793 /* Remove from queue */ 794 d40_desc_remove(d40d); 795 796 /* Add to active queue */ 797 d40_desc_submit(d40c, d40d); 798 799 /* Initiate DMA job */ 800 d40_desc_load(d40c, d40d); 801 802 /* Start dma job */ 803 err = d40_start(d40c); 804 805 if (err) 806 return NULL; 807 } 808 809 return d40d; 810} 811 812/* called from interrupt context */ 813static void dma_tc_handle(struct d40_chan *d40c) 814{ 815 struct d40_desc *d40d; 816 817 if (!d40c->phy_chan) 818 return; 819 820 /* Get first active entry from list */ 821 d40d = d40_first_active_get(d40c); 822 823 if (d40d == NULL) 824 return; 825 826 if (d40d->lli_count < d40d->lli_len) { 827 828 d40_desc_load(d40c, d40d); 829 /* Start dma job */ 830 (void) d40_start(d40c); 831 return; 832 } 833 834 if (d40_queue_start(d40c) == NULL) 835 d40c->busy = false; 836 837 d40c->pending_tx++; 838 tasklet_schedule(&d40c->tasklet); 839 840} 841 842static void dma_tasklet(unsigned long data) 843{ 844 struct d40_chan *d40c = (struct d40_chan *) data; 845 struct d40_desc *d40d_fin; 846 unsigned long flags; 847 dma_async_tx_callback callback; 848 void *callback_param; 849 850 spin_lock_irqsave(&d40c->lock, flags); 851 852 /* Get first active entry from list */ 853 d40d_fin = d40_first_active_get(d40c); 854 855 if (d40d_fin == NULL) 856 goto err; 857 858 d40c->completed = d40d_fin->txd.cookie; 859 860 /* 861 * If terminating a channel pending_tx is set to zero. 862 * This prevents any finished active jobs to return to the client. 863 */ 864 if (d40c->pending_tx == 0) { 865 spin_unlock_irqrestore(&d40c->lock, flags); 866 return; 867 } 868 869 /* Callback to client */ 870 callback = d40d_fin->txd.callback; 871 callback_param = d40d_fin->txd.callback_param; 872 873 if (async_tx_test_ack(&d40d_fin->txd)) { 874 d40_pool_lli_free(d40d_fin); 875 d40_desc_remove(d40d_fin); 876 /* Return desc to free-list */ 877 d40_desc_free(d40c, d40d_fin); 878 } else { 879 if (!d40d_fin->is_in_client_list) { 880 d40_desc_remove(d40d_fin); 881 list_add_tail(&d40d_fin->node, &d40c->client); 882 d40d_fin->is_in_client_list = true; 883 } 884 } 885 886 d40c->pending_tx--; 887 888 if (d40c->pending_tx) 889 tasklet_schedule(&d40c->tasklet); 890 891 spin_unlock_irqrestore(&d40c->lock, flags); 892 893 if (callback) 894 callback(callback_param); 895 896 return; 897 898 err: 899 /* Rescue manouver if receiving double interrupts */ 900 if (d40c->pending_tx > 0) 901 d40c->pending_tx--; 902 spin_unlock_irqrestore(&d40c->lock, flags); 903} 904 905static irqreturn_t d40_handle_interrupt(int irq, void *data) 906{ 907 static const struct d40_interrupt_lookup il[] = { 908 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, 909 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, 910 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, 911 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, 912 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, 913 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, 914 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, 915 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, 916 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, 917 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, 918 }; 919 920 int i; 921 u32 regs[ARRAY_SIZE(il)]; 922 u32 tmp; 923 u32 idx; 924 u32 row; 925 long chan = -1; 926 struct d40_chan *d40c; 927 unsigned long flags; 928 struct d40_base *base = data; 929 930 spin_lock_irqsave(&base->interrupt_lock, flags); 931 932 /* Read interrupt status of both logical and physical channels */ 933 for (i = 0; i < ARRAY_SIZE(il); i++) 934 regs[i] = readl(base->virtbase + il[i].src); 935 936 for (;;) { 937 938 chan = find_next_bit((unsigned long *)regs, 939 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); 940 941 /* No more set bits found? */ 942 if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) 943 break; 944 945 row = chan / BITS_PER_LONG; 946 idx = chan & (BITS_PER_LONG - 1); 947 948 /* ACK interrupt */ 949 tmp = readl(base->virtbase + il[row].clr); 950 tmp |= 1 << idx; 951 writel(tmp, base->virtbase + il[row].clr); 952 953 if (il[row].offset == D40_PHY_CHAN) 954 d40c = base->lookup_phy_chans[idx]; 955 else 956 d40c = base->lookup_log_chans[il[row].offset + idx]; 957 spin_lock(&d40c->lock); 958 959 if (!il[row].is_error) 960 dma_tc_handle(d40c); 961 else 962 dev_err(base->dev, 963 "[%s] IRQ chan: %ld offset %d idx %d\n", 964 __func__, chan, il[row].offset, idx); 965 966 spin_unlock(&d40c->lock); 967 } 968 969 spin_unlock_irqrestore(&base->interrupt_lock, flags); 970 971 return IRQ_HANDLED; 972} 973 974 975static int d40_validate_conf(struct d40_chan *d40c, 976 struct stedma40_chan_cfg *conf) 977{ 978 int res = 0; 979 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); 980 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); 981 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) 982 == STEDMA40_CHANNEL_IN_LOG_MODE; 983 984 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && 985 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 986 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 987 __func__); 988 res = -EINVAL; 989 } 990 991 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && 992 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 993 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 994 __func__); 995 res = -EINVAL; 996 } 997 998 if (src_event_group == STEDMA40_DEV_SRC_MEMORY && 999 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { 1000 dev_err(&d40c->chan.dev->device, 1001 "[%s] No event line\n", __func__); 1002 res = -EINVAL; 1003 } 1004 1005 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && 1006 (src_event_group != dst_event_group)) { 1007 dev_err(&d40c->chan.dev->device, 1008 "[%s] Invalid event group\n", __func__); 1009 res = -EINVAL; 1010 } 1011 1012 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { 1013 /* 1014 * DMAC HW supports it. Will be added to this driver, 1015 * in case any dma client requires it. 1016 */ 1017 dev_err(&d40c->chan.dev->device, 1018 "[%s] periph to periph not supported\n", 1019 __func__); 1020 res = -EINVAL; 1021 } 1022 1023 return res; 1024} 1025 1026static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, 1027 int log_event_line, bool is_log) 1028{ 1029 unsigned long flags; 1030 spin_lock_irqsave(&phy->lock, flags); 1031 if (!is_log) { 1032 /* Physical interrupts are masked per physical full channel */ 1033 if (phy->allocated_src == D40_ALLOC_FREE && 1034 phy->allocated_dst == D40_ALLOC_FREE) { 1035 phy->allocated_dst = D40_ALLOC_PHY; 1036 phy->allocated_src = D40_ALLOC_PHY; 1037 goto found; 1038 } else 1039 goto not_found; 1040 } 1041 1042 /* Logical channel */ 1043 if (is_src) { 1044 if (phy->allocated_src == D40_ALLOC_PHY) 1045 goto not_found; 1046 1047 if (phy->allocated_src == D40_ALLOC_FREE) 1048 phy->allocated_src = D40_ALLOC_LOG_FREE; 1049 1050 if (!(phy->allocated_src & (1 << log_event_line))) { 1051 phy->allocated_src |= 1 << log_event_line; 1052 goto found; 1053 } else 1054 goto not_found; 1055 } else { 1056 if (phy->allocated_dst == D40_ALLOC_PHY) 1057 goto not_found; 1058 1059 if (phy->allocated_dst == D40_ALLOC_FREE) 1060 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1061 1062 if (!(phy->allocated_dst & (1 << log_event_line))) { 1063 phy->allocated_dst |= 1 << log_event_line; 1064 goto found; 1065 } else 1066 goto not_found; 1067 } 1068 1069not_found: 1070 spin_unlock_irqrestore(&phy->lock, flags); 1071 return false; 1072found: 1073 spin_unlock_irqrestore(&phy->lock, flags); 1074 return true; 1075} 1076 1077static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, 1078 int log_event_line) 1079{ 1080 unsigned long flags; 1081 bool is_free = false; 1082 1083 spin_lock_irqsave(&phy->lock, flags); 1084 if (!log_event_line) { 1085 /* Physical interrupts are masked per physical full channel */ 1086 phy->allocated_dst = D40_ALLOC_FREE; 1087 phy->allocated_src = D40_ALLOC_FREE; 1088 is_free = true; 1089 goto out; 1090 } 1091 1092 /* Logical channel */ 1093 if (is_src) { 1094 phy->allocated_src &= ~(1 << log_event_line); 1095 if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1096 phy->allocated_src = D40_ALLOC_FREE; 1097 } else { 1098 phy->allocated_dst &= ~(1 << log_event_line); 1099 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1100 phy->allocated_dst = D40_ALLOC_FREE; 1101 } 1102 1103 is_free = ((phy->allocated_src | phy->allocated_dst) == 1104 D40_ALLOC_FREE); 1105 1106out: 1107 spin_unlock_irqrestore(&phy->lock, flags); 1108 1109 return is_free; 1110} 1111 1112static int d40_allocate_channel(struct d40_chan *d40c) 1113{ 1114 int dev_type; 1115 int event_group; 1116 int event_line; 1117 struct d40_phy_res *phys; 1118 int i; 1119 int j; 1120 int log_num; 1121 bool is_src; 1122 bool is_log = (d40c->dma_cfg.channel_type & 1123 STEDMA40_CHANNEL_IN_OPER_MODE) 1124 == STEDMA40_CHANNEL_IN_LOG_MODE; 1125 1126 1127 phys = d40c->base->phy_res; 1128 1129 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1130 dev_type = d40c->dma_cfg.src_dev_type; 1131 log_num = 2 * dev_type; 1132 is_src = true; 1133 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1134 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1135 /* dst event lines are used for logical memcpy */ 1136 dev_type = d40c->dma_cfg.dst_dev_type; 1137 log_num = 2 * dev_type + 1; 1138 is_src = false; 1139 } else 1140 return -EINVAL; 1141 1142 event_group = D40_TYPE_TO_GROUP(dev_type); 1143 event_line = D40_TYPE_TO_EVENT(dev_type); 1144 1145 if (!is_log) { 1146 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1147 /* Find physical half channel */ 1148 for (i = 0; i < d40c->base->num_phy_chans; i++) { 1149 1150 if (d40_alloc_mask_set(&phys[i], is_src, 1151 0, is_log)) 1152 goto found_phy; 1153 } 1154 } else 1155 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1156 int phy_num = j + event_group * 2; 1157 for (i = phy_num; i < phy_num + 2; i++) { 1158 if (d40_alloc_mask_set(&phys[i], 1159 is_src, 1160 0, 1161 is_log)) 1162 goto found_phy; 1163 } 1164 } 1165 return -EINVAL; 1166found_phy: 1167 d40c->phy_chan = &phys[i]; 1168 d40c->log_num = D40_PHY_CHAN; 1169 goto out; 1170 } 1171 if (dev_type == -1) 1172 return -EINVAL; 1173 1174 /* Find logical channel */ 1175 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1176 int phy_num = j + event_group * 2; 1177 /* 1178 * Spread logical channels across all available physical rather 1179 * than pack every logical channel at the first available phy 1180 * channels. 1181 */ 1182 if (is_src) { 1183 for (i = phy_num; i < phy_num + 2; i++) { 1184 if (d40_alloc_mask_set(&phys[i], is_src, 1185 event_line, is_log)) 1186 goto found_log; 1187 } 1188 } else { 1189 for (i = phy_num + 1; i >= phy_num; i--) { 1190 if (d40_alloc_mask_set(&phys[i], is_src, 1191 event_line, is_log)) 1192 goto found_log; 1193 } 1194 } 1195 } 1196 return -EINVAL; 1197 1198found_log: 1199 d40c->phy_chan = &phys[i]; 1200 d40c->log_num = log_num; 1201out: 1202 1203 if (is_log) 1204 d40c->base->lookup_log_chans[d40c->log_num] = d40c; 1205 else 1206 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; 1207 1208 return 0; 1209 1210} 1211 1212static int d40_config_memcpy(struct d40_chan *d40c) 1213{ 1214 dma_cap_mask_t cap = d40c->chan.device->cap_mask; 1215 1216 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { 1217 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; 1218 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; 1219 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> 1220 memcpy[d40c->chan.chan_id]; 1221 1222 } else if (dma_has_cap(DMA_MEMCPY, cap) && 1223 dma_has_cap(DMA_SLAVE, cap)) { 1224 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 1225 } else { 1226 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", 1227 __func__); 1228 return -EINVAL; 1229 } 1230 1231 return 0; 1232} 1233 1234 1235static int d40_free_dma(struct d40_chan *d40c) 1236{ 1237 1238 int res = 0; 1239 u32 event; 1240 struct d40_phy_res *phy = d40c->phy_chan; 1241 bool is_src; 1242 struct d40_desc *d; 1243 struct d40_desc *_d; 1244 1245 1246 /* Terminate all queued and active transfers */ 1247 d40_term_all(d40c); 1248 1249 /* Release client owned descriptors */ 1250 if (!list_empty(&d40c->client)) 1251 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1252 d40_pool_lli_free(d); 1253 d40_desc_remove(d); 1254 /* Return desc to free-list */ 1255 d40_desc_free(d40c, d); 1256 } 1257 1258 if (phy == NULL) { 1259 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", 1260 __func__); 1261 return -EINVAL; 1262 } 1263 1264 if (phy->allocated_src == D40_ALLOC_FREE && 1265 phy->allocated_dst == D40_ALLOC_FREE) { 1266 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", 1267 __func__); 1268 return -EINVAL; 1269 } 1270 1271 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1272 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1273 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1274 is_src = false; 1275 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1276 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1277 is_src = true; 1278 } else { 1279 dev_err(&d40c->chan.dev->device, 1280 "[%s] Unknown direction\n", __func__); 1281 return -EINVAL; 1282 } 1283 1284 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1285 if (res) { 1286 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", 1287 __func__); 1288 return res; 1289 } 1290 1291 if (d40c->log_num != D40_PHY_CHAN) { 1292 /* Release logical channel, deactivate the event line */ 1293 1294 d40_config_set_event(d40c, false); 1295 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 1296 1297 /* 1298 * Check if there are more logical allocation 1299 * on this phy channel. 1300 */ 1301 if (!d40_alloc_mask_free(phy, is_src, event)) { 1302 /* Resume the other logical channels if any */ 1303 if (d40_chan_has_events(d40c)) { 1304 res = d40_channel_execute_command(d40c, 1305 D40_DMA_RUN); 1306 if (res) { 1307 dev_err(&d40c->chan.dev->device, 1308 "[%s] Executing RUN command\n", 1309 __func__); 1310 return res; 1311 } 1312 } 1313 return 0; 1314 } 1315 } else { 1316 (void) d40_alloc_mask_free(phy, is_src, 0); 1317 } 1318 1319 /* Release physical channel */ 1320 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1321 if (res) { 1322 dev_err(&d40c->chan.dev->device, 1323 "[%s] Failed to stop channel\n", __func__); 1324 return res; 1325 } 1326 d40c->phy_chan = NULL; 1327 /* Invalidate channel type */ 1328 d40c->dma_cfg.channel_type = 0; 1329 d40c->base->lookup_phy_chans[phy->num] = NULL; 1330 1331 return 0; 1332} 1333 1334static int d40_pause(struct dma_chan *chan) 1335{ 1336 struct d40_chan *d40c = 1337 container_of(chan, struct d40_chan, chan); 1338 int res; 1339 unsigned long flags; 1340 1341 spin_lock_irqsave(&d40c->lock, flags); 1342 1343 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1344 if (res == 0) { 1345 if (d40c->log_num != D40_PHY_CHAN) { 1346 d40_config_set_event(d40c, false); 1347 /* Resume the other logical channels if any */ 1348 if (d40_chan_has_events(d40c)) 1349 res = d40_channel_execute_command(d40c, 1350 D40_DMA_RUN); 1351 } 1352 } 1353 1354 spin_unlock_irqrestore(&d40c->lock, flags); 1355 return res; 1356} 1357 1358static bool d40_is_paused(struct d40_chan *d40c) 1359{ 1360 bool is_paused = false; 1361 unsigned long flags; 1362 void __iomem *active_reg; 1363 u32 status; 1364 u32 event; 1365 1366 spin_lock_irqsave(&d40c->lock, flags); 1367 1368 if (d40c->log_num == D40_PHY_CHAN) { 1369 if (d40c->phy_chan->num % 2 == 0) 1370 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1371 else 1372 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1373 1374 status = (readl(active_reg) & 1375 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1376 D40_CHAN_POS(d40c->phy_chan->num); 1377 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 1378 is_paused = true; 1379 1380 goto _exit; 1381 } 1382 1383 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1384 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) 1385 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1386 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 1387 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1388 else { 1389 dev_err(&d40c->chan.dev->device, 1390 "[%s] Unknown direction\n", __func__); 1391 goto _exit; 1392 } 1393 status = d40_chan_has_events(d40c); 1394 status = (status & D40_EVENTLINE_MASK(event)) >> 1395 D40_EVENTLINE_POS(event); 1396 1397 if (status != D40_DMA_RUN) 1398 is_paused = true; 1399_exit: 1400 spin_unlock_irqrestore(&d40c->lock, flags); 1401 return is_paused; 1402 1403} 1404 1405 1406static bool d40_tx_is_linked(struct d40_chan *d40c) 1407{ 1408 bool is_link; 1409 1410 if (d40c->log_num != D40_PHY_CHAN) 1411 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 1412 else 1413 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1414 d40c->phy_chan->num * D40_DREG_PCDELTA + 1415 D40_CHAN_REG_SDLNK) & 1416 D40_SREG_LNK_PHYS_LNK_MASK; 1417 return is_link; 1418} 1419 1420static u32 d40_residue(struct d40_chan *d40c) 1421{ 1422 u32 num_elt; 1423 1424 if (d40c->log_num != D40_PHY_CHAN) 1425 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 1426 >> D40_MEM_LCSP2_ECNT_POS; 1427 else 1428 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + 1429 d40c->phy_chan->num * D40_DREG_PCDELTA + 1430 D40_CHAN_REG_SDELT) & 1431 D40_SREG_ELEM_PHY_ECNT_MASK) >> 1432 D40_SREG_ELEM_PHY_ECNT_POS; 1433 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 1434} 1435 1436static int d40_resume(struct dma_chan *chan) 1437{ 1438 struct d40_chan *d40c = 1439 container_of(chan, struct d40_chan, chan); 1440 int res = 0; 1441 unsigned long flags; 1442 1443 spin_lock_irqsave(&d40c->lock, flags); 1444 1445 if (d40c->base->rev == 0) 1446 if (d40c->log_num != D40_PHY_CHAN) { 1447 res = d40_channel_execute_command(d40c, 1448 D40_DMA_SUSPEND_REQ); 1449 goto no_suspend; 1450 } 1451 1452 /* If bytes left to transfer or linked tx resume job */ 1453 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1454 if (d40c->log_num != D40_PHY_CHAN) 1455 d40_config_set_event(d40c, true); 1456 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1457 } 1458 1459no_suspend: 1460 spin_unlock_irqrestore(&d40c->lock, flags); 1461 return res; 1462} 1463 1464static u32 stedma40_residue(struct dma_chan *chan) 1465{ 1466 struct d40_chan *d40c = 1467 container_of(chan, struct d40_chan, chan); 1468 u32 bytes_left; 1469 unsigned long flags; 1470 1471 spin_lock_irqsave(&d40c->lock, flags); 1472 bytes_left = d40_residue(d40c); 1473 spin_unlock_irqrestore(&d40c->lock, flags); 1474 1475 return bytes_left; 1476} 1477 1478/* Public DMA functions in addition to the DMA engine framework */ 1479 1480int stedma40_set_psize(struct dma_chan *chan, 1481 int src_psize, 1482 int dst_psize) 1483{ 1484 struct d40_chan *d40c = 1485 container_of(chan, struct d40_chan, chan); 1486 unsigned long flags; 1487 1488 spin_lock_irqsave(&d40c->lock, flags); 1489 1490 if (d40c->log_num != D40_PHY_CHAN) { 1491 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; 1492 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; 1493 d40c->log_def.lcsp1 |= src_psize << 1494 D40_MEM_LCSP1_SCFG_PSIZE_POS; 1495 d40c->log_def.lcsp3 |= dst_psize << 1496 D40_MEM_LCSP1_SCFG_PSIZE_POS; 1497 goto out; 1498 } 1499 1500 if (src_psize == STEDMA40_PSIZE_PHY_1) 1501 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); 1502 else { 1503 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; 1504 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << 1505 D40_SREG_CFG_PSIZE_POS); 1506 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; 1507 } 1508 1509 if (dst_psize == STEDMA40_PSIZE_PHY_1) 1510 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); 1511 else { 1512 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; 1513 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << 1514 D40_SREG_CFG_PSIZE_POS); 1515 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; 1516 } 1517out: 1518 spin_unlock_irqrestore(&d40c->lock, flags); 1519 return 0; 1520} 1521EXPORT_SYMBOL(stedma40_set_psize); 1522 1523struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1524 struct scatterlist *sgl_dst, 1525 struct scatterlist *sgl_src, 1526 unsigned int sgl_len, 1527 unsigned long dma_flags) 1528{ 1529 int res; 1530 struct d40_desc *d40d; 1531 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1532 chan); 1533 unsigned long flags; 1534 1535 if (d40c->phy_chan == NULL) { 1536 dev_err(&d40c->chan.dev->device, 1537 "[%s] Unallocated channel.\n", __func__); 1538 return ERR_PTR(-EINVAL); 1539 } 1540 1541 spin_lock_irqsave(&d40c->lock, flags); 1542 d40d = d40_desc_get(d40c); 1543 1544 if (d40d == NULL) 1545 goto err; 1546 1547 d40d->lli_len = sgl_len; 1548 d40d->lli_tx_len = d40d->lli_len; 1549 d40d->txd.flags = dma_flags; 1550 1551 if (d40c->log_num != D40_PHY_CHAN) { 1552 if (d40d->lli_len > d40c->base->plat_data->llis_per_log) 1553 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; 1554 1555 if (sgl_len > 1) 1556 /* 1557 * Check if there is space available in lcla. If not, 1558 * split list into 1-length and run only in lcpa 1559 * space. 1560 */ 1561 if (d40_lcla_id_get(d40c) != 0) 1562 d40d->lli_tx_len = 1; 1563 1564 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1565 dev_err(&d40c->chan.dev->device, 1566 "[%s] Out of memory\n", __func__); 1567 goto err; 1568 } 1569 1570 (void) d40_log_sg_to_lli(d40c->lcla.src_id, 1571 sgl_src, 1572 sgl_len, 1573 d40d->lli_log.src, 1574 d40c->log_def.lcsp1, 1575 d40c->dma_cfg.src_info.data_width, 1576 dma_flags & DMA_PREP_INTERRUPT, 1577 d40d->lli_tx_len, 1578 d40c->base->plat_data->llis_per_log); 1579 1580 (void) d40_log_sg_to_lli(d40c->lcla.dst_id, 1581 sgl_dst, 1582 sgl_len, 1583 d40d->lli_log.dst, 1584 d40c->log_def.lcsp3, 1585 d40c->dma_cfg.dst_info.data_width, 1586 dma_flags & DMA_PREP_INTERRUPT, 1587 d40d->lli_tx_len, 1588 d40c->base->plat_data->llis_per_log); 1589 1590 1591 } else { 1592 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1593 dev_err(&d40c->chan.dev->device, 1594 "[%s] Out of memory\n", __func__); 1595 goto err; 1596 } 1597 1598 res = d40_phy_sg_to_lli(sgl_src, 1599 sgl_len, 1600 0, 1601 d40d->lli_phy.src, 1602 d40d->lli_phy.src_addr, 1603 d40c->src_def_cfg, 1604 d40c->dma_cfg.src_info.data_width, 1605 d40c->dma_cfg.src_info.psize, 1606 true); 1607 1608 if (res < 0) 1609 goto err; 1610 1611 res = d40_phy_sg_to_lli(sgl_dst, 1612 sgl_len, 1613 0, 1614 d40d->lli_phy.dst, 1615 d40d->lli_phy.dst_addr, 1616 d40c->dst_def_cfg, 1617 d40c->dma_cfg.dst_info.data_width, 1618 d40c->dma_cfg.dst_info.psize, 1619 true); 1620 1621 if (res < 0) 1622 goto err; 1623 1624 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1625 d40d->lli_pool.size, DMA_TO_DEVICE); 1626 } 1627 1628 dma_async_tx_descriptor_init(&d40d->txd, chan); 1629 1630 d40d->txd.tx_submit = d40_tx_submit; 1631 1632 spin_unlock_irqrestore(&d40c->lock, flags); 1633 1634 return &d40d->txd; 1635err: 1636 spin_unlock_irqrestore(&d40c->lock, flags); 1637 return NULL; 1638} 1639EXPORT_SYMBOL(stedma40_memcpy_sg); 1640 1641bool stedma40_filter(struct dma_chan *chan, void *data) 1642{ 1643 struct stedma40_chan_cfg *info = data; 1644 struct d40_chan *d40c = 1645 container_of(chan, struct d40_chan, chan); 1646 int err; 1647 1648 if (data) { 1649 err = d40_validate_conf(d40c, info); 1650 if (!err) 1651 d40c->dma_cfg = *info; 1652 } else 1653 err = d40_config_memcpy(d40c); 1654 1655 return err == 0; 1656} 1657EXPORT_SYMBOL(stedma40_filter); 1658 1659/* DMA ENGINE functions */ 1660static int d40_alloc_chan_resources(struct dma_chan *chan) 1661{ 1662 int err; 1663 unsigned long flags; 1664 struct d40_chan *d40c = 1665 container_of(chan, struct d40_chan, chan); 1666 bool is_free_phy; 1667 spin_lock_irqsave(&d40c->lock, flags); 1668 1669 d40c->completed = chan->cookie = 1; 1670 1671 /* 1672 * If no dma configuration is set (channel_type == 0) 1673 * use default configuration (memcpy) 1674 */ 1675 if (d40c->dma_cfg.channel_type == 0) { 1676 err = d40_config_memcpy(d40c); 1677 if (err) { 1678 dev_err(&d40c->chan.dev->device, 1679 "[%s] Failed to configure memcpy channel\n", 1680 __func__); 1681 goto fail; 1682 } 1683 } 1684 is_free_phy = (d40c->phy_chan == NULL); 1685 1686 err = d40_allocate_channel(d40c); 1687 if (err) { 1688 dev_err(&d40c->chan.dev->device, 1689 "[%s] Failed to allocate channel\n", __func__); 1690 goto fail; 1691 } 1692 1693 /* Fill in basic CFG register values */ 1694 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, 1695 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); 1696 1697 if (d40c->log_num != D40_PHY_CHAN) { 1698 d40_log_cfg(&d40c->dma_cfg, 1699 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1700 1701 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 1702 d40c->lcpa = d40c->base->lcpa_base + 1703 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; 1704 else 1705 d40c->lcpa = d40c->base->lcpa_base + 1706 d40c->dma_cfg.dst_dev_type * 1707 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; 1708 } 1709 1710 /* 1711 * Only write channel configuration to the DMA if the physical 1712 * resource is free. In case of multiple logical channels 1713 * on the same physical resource, only the first write is necessary. 1714 */ 1715 if (is_free_phy) { 1716 err = d40_config_write(d40c); 1717 if (err) { 1718 dev_err(&d40c->chan.dev->device, 1719 "[%s] Failed to configure channel\n", 1720 __func__); 1721 } 1722 } 1723fail: 1724 spin_unlock_irqrestore(&d40c->lock, flags); 1725 return err; 1726} 1727 1728static void d40_free_chan_resources(struct dma_chan *chan) 1729{ 1730 struct d40_chan *d40c = 1731 container_of(chan, struct d40_chan, chan); 1732 int err; 1733 unsigned long flags; 1734 1735 if (d40c->phy_chan == NULL) { 1736 dev_err(&d40c->chan.dev->device, 1737 "[%s] Cannot free unallocated channel\n", __func__); 1738 return; 1739 } 1740 1741 1742 spin_lock_irqsave(&d40c->lock, flags); 1743 1744 err = d40_free_dma(d40c); 1745 1746 if (err) 1747 dev_err(&d40c->chan.dev->device, 1748 "[%s] Failed to free channel\n", __func__); 1749 spin_unlock_irqrestore(&d40c->lock, flags); 1750} 1751 1752static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, 1753 dma_addr_t dst, 1754 dma_addr_t src, 1755 size_t size, 1756 unsigned long dma_flags) 1757{ 1758 struct d40_desc *d40d; 1759 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1760 chan); 1761 unsigned long flags; 1762 int err = 0; 1763 1764 if (d40c->phy_chan == NULL) { 1765 dev_err(&d40c->chan.dev->device, 1766 "[%s] Channel is not allocated.\n", __func__); 1767 return ERR_PTR(-EINVAL); 1768 } 1769 1770 spin_lock_irqsave(&d40c->lock, flags); 1771 d40d = d40_desc_get(d40c); 1772 1773 if (d40d == NULL) { 1774 dev_err(&d40c->chan.dev->device, 1775 "[%s] Descriptor is NULL\n", __func__); 1776 goto err; 1777 } 1778 1779 d40d->txd.flags = dma_flags; 1780 1781 dma_async_tx_descriptor_init(&d40d->txd, chan); 1782 1783 d40d->txd.tx_submit = d40_tx_submit; 1784 1785 if (d40c->log_num != D40_PHY_CHAN) { 1786 1787 if (d40_pool_lli_alloc(d40d, 1, true) < 0) { 1788 dev_err(&d40c->chan.dev->device, 1789 "[%s] Out of memory\n", __func__); 1790 goto err; 1791 } 1792 d40d->lli_len = 1; 1793 d40d->lli_tx_len = 1; 1794 1795 d40_log_fill_lli(d40d->lli_log.src, 1796 src, 1797 size, 1798 0, 1799 d40c->log_def.lcsp1, 1800 d40c->dma_cfg.src_info.data_width, 1801 false, true); 1802 1803 d40_log_fill_lli(d40d->lli_log.dst, 1804 dst, 1805 size, 1806 0, 1807 d40c->log_def.lcsp3, 1808 d40c->dma_cfg.dst_info.data_width, 1809 true, true); 1810 1811 } else { 1812 1813 if (d40_pool_lli_alloc(d40d, 1, false) < 0) { 1814 dev_err(&d40c->chan.dev->device, 1815 "[%s] Out of memory\n", __func__); 1816 goto err; 1817 } 1818 1819 err = d40_phy_fill_lli(d40d->lli_phy.src, 1820 src, 1821 size, 1822 d40c->dma_cfg.src_info.psize, 1823 0, 1824 d40c->src_def_cfg, 1825 true, 1826 d40c->dma_cfg.src_info.data_width, 1827 false); 1828 if (err) 1829 goto err_fill_lli; 1830 1831 err = d40_phy_fill_lli(d40d->lli_phy.dst, 1832 dst, 1833 size, 1834 d40c->dma_cfg.dst_info.psize, 1835 0, 1836 d40c->dst_def_cfg, 1837 true, 1838 d40c->dma_cfg.dst_info.data_width, 1839 false); 1840 1841 if (err) 1842 goto err_fill_lli; 1843 1844 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1845 d40d->lli_pool.size, DMA_TO_DEVICE); 1846 } 1847 1848 spin_unlock_irqrestore(&d40c->lock, flags); 1849 return &d40d->txd; 1850 1851err_fill_lli: 1852 dev_err(&d40c->chan.dev->device, 1853 "[%s] Failed filling in PHY LLI\n", __func__); 1854 d40_pool_lli_free(d40d); 1855err: 1856 spin_unlock_irqrestore(&d40c->lock, flags); 1857 return NULL; 1858} 1859 1860static int d40_prep_slave_sg_log(struct d40_desc *d40d, 1861 struct d40_chan *d40c, 1862 struct scatterlist *sgl, 1863 unsigned int sg_len, 1864 enum dma_data_direction direction, 1865 unsigned long dma_flags) 1866{ 1867 dma_addr_t dev_addr = 0; 1868 int total_size; 1869 1870 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { 1871 dev_err(&d40c->chan.dev->device, 1872 "[%s] Out of memory\n", __func__); 1873 return -ENOMEM; 1874 } 1875 1876 d40d->lli_len = sg_len; 1877 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) 1878 d40d->lli_tx_len = d40d->lli_len; 1879 else 1880 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; 1881 1882 if (sg_len > 1) 1883 /* 1884 * Check if there is space available in lcla. 1885 * If not, split list into 1-length and run only 1886 * in lcpa space. 1887 */ 1888 if (d40_lcla_id_get(d40c) != 0) 1889 d40d->lli_tx_len = 1; 1890 1891 if (direction == DMA_FROM_DEVICE) 1892 if (d40c->runtime_addr) 1893 dev_addr = d40c->runtime_addr; 1894 else 1895 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 1896 else if (direction == DMA_TO_DEVICE) 1897 if (d40c->runtime_addr) 1898 dev_addr = d40c->runtime_addr; 1899 else 1900 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 1901 1902 else 1903 return -EINVAL; 1904 1905 total_size = d40_log_sg_to_dev(&d40c->lcla, 1906 sgl, sg_len, 1907 &d40d->lli_log, 1908 &d40c->log_def, 1909 d40c->dma_cfg.src_info.data_width, 1910 d40c->dma_cfg.dst_info.data_width, 1911 direction, 1912 dma_flags & DMA_PREP_INTERRUPT, 1913 dev_addr, d40d->lli_tx_len, 1914 d40c->base->plat_data->llis_per_log); 1915 1916 if (total_size < 0) 1917 return -EINVAL; 1918 1919 return 0; 1920} 1921 1922static int d40_prep_slave_sg_phy(struct d40_desc *d40d, 1923 struct d40_chan *d40c, 1924 struct scatterlist *sgl, 1925 unsigned int sgl_len, 1926 enum dma_data_direction direction, 1927 unsigned long dma_flags) 1928{ 1929 dma_addr_t src_dev_addr; 1930 dma_addr_t dst_dev_addr; 1931 int res; 1932 1933 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1934 dev_err(&d40c->chan.dev->device, 1935 "[%s] Out of memory\n", __func__); 1936 return -ENOMEM; 1937 } 1938 1939 d40d->lli_len = sgl_len; 1940 d40d->lli_tx_len = sgl_len; 1941 1942 if (direction == DMA_FROM_DEVICE) { 1943 dst_dev_addr = 0; 1944 if (d40c->runtime_addr) 1945 src_dev_addr = d40c->runtime_addr; 1946 else 1947 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 1948 } else if (direction == DMA_TO_DEVICE) { 1949 if (d40c->runtime_addr) 1950 dst_dev_addr = d40c->runtime_addr; 1951 else 1952 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 1953 src_dev_addr = 0; 1954 } else 1955 return -EINVAL; 1956 1957 res = d40_phy_sg_to_lli(sgl, 1958 sgl_len, 1959 src_dev_addr, 1960 d40d->lli_phy.src, 1961 d40d->lli_phy.src_addr, 1962 d40c->src_def_cfg, 1963 d40c->dma_cfg.src_info.data_width, 1964 d40c->dma_cfg.src_info.psize, 1965 true); 1966 if (res < 0) 1967 return res; 1968 1969 res = d40_phy_sg_to_lli(sgl, 1970 sgl_len, 1971 dst_dev_addr, 1972 d40d->lli_phy.dst, 1973 d40d->lli_phy.dst_addr, 1974 d40c->dst_def_cfg, 1975 d40c->dma_cfg.dst_info.data_width, 1976 d40c->dma_cfg.dst_info.psize, 1977 true); 1978 if (res < 0) 1979 return res; 1980 1981 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1982 d40d->lli_pool.size, DMA_TO_DEVICE); 1983 return 0; 1984} 1985 1986static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, 1987 struct scatterlist *sgl, 1988 unsigned int sg_len, 1989 enum dma_data_direction direction, 1990 unsigned long dma_flags) 1991{ 1992 struct d40_desc *d40d; 1993 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1994 chan); 1995 unsigned long flags; 1996 int err; 1997 1998 if (d40c->phy_chan == NULL) { 1999 dev_err(&d40c->chan.dev->device, 2000 "[%s] Cannot prepare unallocated channel\n", __func__); 2001 return ERR_PTR(-EINVAL); 2002 } 2003 2004 if (d40c->dma_cfg.pre_transfer) 2005 d40c->dma_cfg.pre_transfer(chan, 2006 d40c->dma_cfg.pre_transfer_data, 2007 sg_dma_len(sgl)); 2008 2009 spin_lock_irqsave(&d40c->lock, flags); 2010 d40d = d40_desc_get(d40c); 2011 spin_unlock_irqrestore(&d40c->lock, flags); 2012 2013 if (d40d == NULL) 2014 return NULL; 2015 2016 if (d40c->log_num != D40_PHY_CHAN) 2017 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 2018 direction, dma_flags); 2019 else 2020 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, 2021 direction, dma_flags); 2022 if (err) { 2023 dev_err(&d40c->chan.dev->device, 2024 "[%s] Failed to prepare %s slave sg job: %d\n", 2025 __func__, 2026 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); 2027 return NULL; 2028 } 2029 2030 d40d->txd.flags = dma_flags; 2031 2032 dma_async_tx_descriptor_init(&d40d->txd, chan); 2033 2034 d40d->txd.tx_submit = d40_tx_submit; 2035 2036 return &d40d->txd; 2037} 2038 2039static enum dma_status d40_tx_status(struct dma_chan *chan, 2040 dma_cookie_t cookie, 2041 struct dma_tx_state *txstate) 2042{ 2043 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2044 dma_cookie_t last_used; 2045 dma_cookie_t last_complete; 2046 int ret; 2047 2048 if (d40c->phy_chan == NULL) { 2049 dev_err(&d40c->chan.dev->device, 2050 "[%s] Cannot read status of unallocated channel\n", 2051 __func__); 2052 return -EINVAL; 2053 } 2054 2055 last_complete = d40c->completed; 2056 last_used = chan->cookie; 2057 2058 if (d40_is_paused(d40c)) 2059 ret = DMA_PAUSED; 2060 else 2061 ret = dma_async_is_complete(cookie, last_complete, last_used); 2062 2063 dma_set_tx_state(txstate, last_complete, last_used, 2064 stedma40_residue(chan)); 2065 2066 return ret; 2067} 2068 2069static void d40_issue_pending(struct dma_chan *chan) 2070{ 2071 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2072 unsigned long flags; 2073 2074 if (d40c->phy_chan == NULL) { 2075 dev_err(&d40c->chan.dev->device, 2076 "[%s] Channel is not allocated!\n", __func__); 2077 return; 2078 } 2079 2080 spin_lock_irqsave(&d40c->lock, flags); 2081 2082 /* Busy means that pending jobs are already being processed */ 2083 if (!d40c->busy) 2084 (void) d40_queue_start(d40c); 2085 2086 spin_unlock_irqrestore(&d40c->lock, flags); 2087} 2088 2089/* Runtime reconfiguration extension */ 2090static void d40_set_runtime_config(struct dma_chan *chan, 2091 struct dma_slave_config *config) 2092{ 2093 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2094 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; 2095 enum dma_slave_buswidth config_addr_width; 2096 dma_addr_t config_addr; 2097 u32 config_maxburst; 2098 enum stedma40_periph_data_width addr_width; 2099 int psize; 2100 2101 if (config->direction == DMA_FROM_DEVICE) { 2102 dma_addr_t dev_addr_rx = 2103 d40c->base->plat_data->dev_rx[cfg->src_dev_type]; 2104 2105 config_addr = config->src_addr; 2106 if (dev_addr_rx) 2107 dev_dbg(d40c->base->dev, 2108 "channel has a pre-wired RX address %08x " 2109 "overriding with %08x\n", 2110 dev_addr_rx, config_addr); 2111 if (cfg->dir != STEDMA40_PERIPH_TO_MEM) 2112 dev_dbg(d40c->base->dev, 2113 "channel was not configured for peripheral " 2114 "to memory transfer (%d) overriding\n", 2115 cfg->dir); 2116 cfg->dir = STEDMA40_PERIPH_TO_MEM; 2117 2118 config_addr_width = config->src_addr_width; 2119 config_maxburst = config->src_maxburst; 2120 2121 } else if (config->direction == DMA_TO_DEVICE) { 2122 dma_addr_t dev_addr_tx = 2123 d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; 2124 2125 config_addr = config->dst_addr; 2126 if (dev_addr_tx) 2127 dev_dbg(d40c->base->dev, 2128 "channel has a pre-wired TX address %08x " 2129 "overriding with %08x\n", 2130 dev_addr_tx, config_addr); 2131 if (cfg->dir != STEDMA40_MEM_TO_PERIPH) 2132 dev_dbg(d40c->base->dev, 2133 "channel was not configured for memory " 2134 "to peripheral transfer (%d) overriding\n", 2135 cfg->dir); 2136 cfg->dir = STEDMA40_MEM_TO_PERIPH; 2137 2138 config_addr_width = config->dst_addr_width; 2139 config_maxburst = config->dst_maxburst; 2140 2141 } else { 2142 dev_err(d40c->base->dev, 2143 "unrecognized channel direction %d\n", 2144 config->direction); 2145 return; 2146 } 2147 2148 switch (config_addr_width) { 2149 case DMA_SLAVE_BUSWIDTH_1_BYTE: 2150 addr_width = STEDMA40_BYTE_WIDTH; 2151 break; 2152 case DMA_SLAVE_BUSWIDTH_2_BYTES: 2153 addr_width = STEDMA40_HALFWORD_WIDTH; 2154 break; 2155 case DMA_SLAVE_BUSWIDTH_4_BYTES: 2156 addr_width = STEDMA40_WORD_WIDTH; 2157 break; 2158 case DMA_SLAVE_BUSWIDTH_8_BYTES: 2159 addr_width = STEDMA40_DOUBLEWORD_WIDTH; 2160 break; 2161 default: 2162 dev_err(d40c->base->dev, 2163 "illegal peripheral address width " 2164 "requested (%d)\n", 2165 config->src_addr_width); 2166 return; 2167 } 2168 2169 if (config_maxburst >= 16) 2170 psize = STEDMA40_PSIZE_LOG_16; 2171 else if (config_maxburst >= 8) 2172 psize = STEDMA40_PSIZE_LOG_8; 2173 else if (config_maxburst >= 4) 2174 psize = STEDMA40_PSIZE_LOG_4; 2175 else 2176 psize = STEDMA40_PSIZE_LOG_1; 2177 2178 /* Set up all the endpoint configs */ 2179 cfg->src_info.data_width = addr_width; 2180 cfg->src_info.psize = psize; 2181 cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN; 2182 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2183 cfg->dst_info.data_width = addr_width; 2184 cfg->dst_info.psize = psize; 2185 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN; 2186 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2187 2188 /* These settings will take precedence later */ 2189 d40c->runtime_addr = config_addr; 2190 d40c->runtime_direction = config->direction; 2191 dev_dbg(d40c->base->dev, 2192 "configured channel %s for %s, data width %d, " 2193 "maxburst %d bytes, LE, no flow control\n", 2194 dma_chan_name(chan), 2195 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 2196 config_addr_width, 2197 config_maxburst); 2198} 2199 2200static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2201 unsigned long arg) 2202{ 2203 unsigned long flags; 2204 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2205 2206 if (d40c->phy_chan == NULL) { 2207 dev_err(&d40c->chan.dev->device, 2208 "[%s] Channel is not allocated!\n", __func__); 2209 return -EINVAL; 2210 } 2211 2212 switch (cmd) { 2213 case DMA_TERMINATE_ALL: 2214 spin_lock_irqsave(&d40c->lock, flags); 2215 d40_term_all(d40c); 2216 spin_unlock_irqrestore(&d40c->lock, flags); 2217 return 0; 2218 case DMA_PAUSE: 2219 return d40_pause(chan); 2220 case DMA_RESUME: 2221 return d40_resume(chan); 2222 case DMA_SLAVE_CONFIG: 2223 d40_set_runtime_config(chan, 2224 (struct dma_slave_config *) arg); 2225 return 0; 2226 default: 2227 break; 2228 } 2229 2230 /* Other commands are unimplemented */ 2231 return -ENXIO; 2232} 2233 2234/* Initialization functions */ 2235 2236static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2237 struct d40_chan *chans, int offset, 2238 int num_chans) 2239{ 2240 int i = 0; 2241 struct d40_chan *d40c; 2242 2243 INIT_LIST_HEAD(&dma->channels); 2244 2245 for (i = offset; i < offset + num_chans; i++) { 2246 d40c = &chans[i]; 2247 d40c->base = base; 2248 d40c->chan.device = dma; 2249 2250 /* Invalidate lcla element */ 2251 d40c->lcla.src_id = -1; 2252 d40c->lcla.dst_id = -1; 2253 2254 spin_lock_init(&d40c->lock); 2255 2256 d40c->log_num = D40_PHY_CHAN; 2257 2258 INIT_LIST_HEAD(&d40c->active); 2259 INIT_LIST_HEAD(&d40c->queue); 2260 INIT_LIST_HEAD(&d40c->client); 2261 2262 tasklet_init(&d40c->tasklet, dma_tasklet, 2263 (unsigned long) d40c); 2264 2265 list_add_tail(&d40c->chan.device_node, 2266 &dma->channels); 2267 } 2268} 2269 2270static int __init d40_dmaengine_init(struct d40_base *base, 2271 int num_reserved_chans) 2272{ 2273 int err ; 2274 2275 d40_chan_init(base, &base->dma_slave, base->log_chans, 2276 0, base->num_log_chans); 2277 2278 dma_cap_zero(base->dma_slave.cap_mask); 2279 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2280 2281 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; 2282 base->dma_slave.device_free_chan_resources = d40_free_chan_resources; 2283 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; 2284 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; 2285 base->dma_slave.device_tx_status = d40_tx_status; 2286 base->dma_slave.device_issue_pending = d40_issue_pending; 2287 base->dma_slave.device_control = d40_control; 2288 base->dma_slave.dev = base->dev; 2289 2290 err = dma_async_device_register(&base->dma_slave); 2291 2292 if (err) { 2293 dev_err(base->dev, 2294 "[%s] Failed to register slave channels\n", 2295 __func__); 2296 goto failure1; 2297 } 2298 2299 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2300 base->num_log_chans, base->plat_data->memcpy_len); 2301 2302 dma_cap_zero(base->dma_memcpy.cap_mask); 2303 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2304 2305 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; 2306 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; 2307 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; 2308 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; 2309 base->dma_memcpy.device_tx_status = d40_tx_status; 2310 base->dma_memcpy.device_issue_pending = d40_issue_pending; 2311 base->dma_memcpy.device_control = d40_control; 2312 base->dma_memcpy.dev = base->dev; 2313 /* 2314 * This controller can only access address at even 2315 * 32bit boundaries, i.e. 2^2 2316 */ 2317 base->dma_memcpy.copy_align = 2; 2318 2319 err = dma_async_device_register(&base->dma_memcpy); 2320 2321 if (err) { 2322 dev_err(base->dev, 2323 "[%s] Failed to regsiter memcpy only channels\n", 2324 __func__); 2325 goto failure2; 2326 } 2327 2328 d40_chan_init(base, &base->dma_both, base->phy_chans, 2329 0, num_reserved_chans); 2330 2331 dma_cap_zero(base->dma_both.cap_mask); 2332 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2333 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2334 2335 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; 2336 base->dma_both.device_free_chan_resources = d40_free_chan_resources; 2337 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; 2338 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; 2339 base->dma_both.device_tx_status = d40_tx_status; 2340 base->dma_both.device_issue_pending = d40_issue_pending; 2341 base->dma_both.device_control = d40_control; 2342 base->dma_both.dev = base->dev; 2343 base->dma_both.copy_align = 2; 2344 err = dma_async_device_register(&base->dma_both); 2345 2346 if (err) { 2347 dev_err(base->dev, 2348 "[%s] Failed to register logical and physical capable channels\n", 2349 __func__); 2350 goto failure3; 2351 } 2352 return 0; 2353failure3: 2354 dma_async_device_unregister(&base->dma_memcpy); 2355failure2: 2356 dma_async_device_unregister(&base->dma_slave); 2357failure1: 2358 return err; 2359} 2360 2361/* Initialization functions. */ 2362 2363static int __init d40_phy_res_init(struct d40_base *base) 2364{ 2365 int i; 2366 int num_phy_chans_avail = 0; 2367 u32 val[2]; 2368 int odd_even_bit = -2; 2369 2370 val[0] = readl(base->virtbase + D40_DREG_PRSME); 2371 val[1] = readl(base->virtbase + D40_DREG_PRSMO); 2372 2373 for (i = 0; i < base->num_phy_chans; i++) { 2374 base->phy_res[i].num = i; 2375 odd_even_bit += 2 * ((i % 2) == 0); 2376 if (((val[i % 2] >> odd_even_bit) & 3) == 1) { 2377 /* Mark security only channels as occupied */ 2378 base->phy_res[i].allocated_src = D40_ALLOC_PHY; 2379 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 2380 } else { 2381 base->phy_res[i].allocated_src = D40_ALLOC_FREE; 2382 base->phy_res[i].allocated_dst = D40_ALLOC_FREE; 2383 num_phy_chans_avail++; 2384 } 2385 spin_lock_init(&base->phy_res[i].lock); 2386 } 2387 2388 /* Mark disabled channels as occupied */ 2389 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { 2390 base->phy_res[i].allocated_src = D40_ALLOC_PHY; 2391 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 2392 num_phy_chans_avail--; 2393 } 2394 2395 dev_info(base->dev, "%d of %d physical DMA channels available\n", 2396 num_phy_chans_avail, base->num_phy_chans); 2397 2398 /* Verify settings extended vs standard */ 2399 val[0] = readl(base->virtbase + D40_DREG_PRTYP); 2400 2401 for (i = 0; i < base->num_phy_chans; i++) { 2402 2403 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && 2404 (val[0] & 0x3) != 1) 2405 dev_info(base->dev, 2406 "[%s] INFO: channel %d is misconfigured (%d)\n", 2407 __func__, i, val[0] & 0x3); 2408 2409 val[0] = val[0] >> 2; 2410 } 2411 2412 return num_phy_chans_avail; 2413} 2414 2415static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 2416{ 2417 static const struct d40_reg_val dma_id_regs[] = { 2418 /* Peripheral Id */ 2419 { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, 2420 { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, 2421 /* 2422 * D40_DREG_PERIPHID2 Depends on HW revision: 2423 * MOP500/HREF ED has 0x0008, 2424 * ? has 0x0018, 2425 * HREF V1 has 0x0028 2426 */ 2427 { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, 2428 2429 /* PCell Id */ 2430 { .reg = D40_DREG_CELLID0, .val = 0x000d}, 2431 { .reg = D40_DREG_CELLID1, .val = 0x00f0}, 2432 { .reg = D40_DREG_CELLID2, .val = 0x0005}, 2433 { .reg = D40_DREG_CELLID3, .val = 0x00b1} 2434 }; 2435 struct stedma40_platform_data *plat_data; 2436 struct clk *clk = NULL; 2437 void __iomem *virtbase = NULL; 2438 struct resource *res = NULL; 2439 struct d40_base *base = NULL; 2440 int num_log_chans = 0; 2441 int num_phy_chans; 2442 int i; 2443 u32 val; 2444 2445 clk = clk_get(&pdev->dev, NULL); 2446 2447 if (IS_ERR(clk)) { 2448 dev_err(&pdev->dev, "[%s] No matching clock found\n", 2449 __func__); 2450 goto failure; 2451 } 2452 2453 clk_enable(clk); 2454 2455 /* Get IO for DMAC base address */ 2456 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 2457 if (!res) 2458 goto failure; 2459 2460 if (request_mem_region(res->start, resource_size(res), 2461 D40_NAME " I/O base") == NULL) 2462 goto failure; 2463 2464 virtbase = ioremap(res->start, resource_size(res)); 2465 if (!virtbase) 2466 goto failure; 2467 2468 /* HW version check */ 2469 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2470 if (dma_id_regs[i].val != 2471 readl(virtbase + dma_id_regs[i].reg)) { 2472 dev_err(&pdev->dev, 2473 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2474 __func__, 2475 dma_id_regs[i].val, 2476 dma_id_regs[i].reg, 2477 readl(virtbase + dma_id_regs[i].reg)); 2478 goto failure; 2479 } 2480 } 2481 2482 /* Get silicon revision */ 2483 val = readl(virtbase + D40_DREG_PERIPHID2); 2484 2485 if ((val & 0xf) != D40_PERIPHID2_DESIGNER) { 2486 dev_err(&pdev->dev, 2487 "[%s] Unknown designer! Got %x wanted %x\n", 2488 __func__, val & 0xf, D40_PERIPHID2_DESIGNER); 2489 goto failure; 2490 } 2491 2492 /* The number of physical channels on this HW */ 2493 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 2494 2495 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2496 (val >> 4) & 0xf, res->start); 2497 2498 plat_data = pdev->dev.platform_data; 2499 2500 /* Count the number of logical channels in use */ 2501 for (i = 0; i < plat_data->dev_len; i++) 2502 if (plat_data->dev_rx[i] != 0) 2503 num_log_chans++; 2504 2505 for (i = 0; i < plat_data->dev_len; i++) 2506 if (plat_data->dev_tx[i] != 0) 2507 num_log_chans++; 2508 2509 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 2510 (num_phy_chans + num_log_chans + plat_data->memcpy_len) * 2511 sizeof(struct d40_chan), GFP_KERNEL); 2512 2513 if (base == NULL) { 2514 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); 2515 goto failure; 2516 } 2517 2518 base->rev = (val >> 4) & 0xf; 2519 base->clk = clk; 2520 base->num_phy_chans = num_phy_chans; 2521 base->num_log_chans = num_log_chans; 2522 base->phy_start = res->start; 2523 base->phy_size = resource_size(res); 2524 base->virtbase = virtbase; 2525 base->plat_data = plat_data; 2526 base->dev = &pdev->dev; 2527 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); 2528 base->log_chans = &base->phy_chans[num_phy_chans]; 2529 2530 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), 2531 GFP_KERNEL); 2532 if (!base->phy_res) 2533 goto failure; 2534 2535 base->lookup_phy_chans = kzalloc(num_phy_chans * 2536 sizeof(struct d40_chan *), 2537 GFP_KERNEL); 2538 if (!base->lookup_phy_chans) 2539 goto failure; 2540 2541 if (num_log_chans + plat_data->memcpy_len) { 2542 /* 2543 * The max number of logical channels are event lines for all 2544 * src devices and dst devices 2545 */ 2546 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * 2547 sizeof(struct d40_chan *), 2548 GFP_KERNEL); 2549 if (!base->lookup_log_chans) 2550 goto failure; 2551 } 2552 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), 2553 GFP_KERNEL); 2554 if (!base->lcla_pool.alloc_map) 2555 goto failure; 2556 2557 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 2558 0, SLAB_HWCACHE_ALIGN, 2559 NULL); 2560 if (base->desc_slab == NULL) 2561 goto failure; 2562 2563 return base; 2564 2565failure: 2566 if (clk) { 2567 clk_disable(clk); 2568 clk_put(clk); 2569 } 2570 if (virtbase) 2571 iounmap(virtbase); 2572 if (res) 2573 release_mem_region(res->start, 2574 resource_size(res)); 2575 if (virtbase) 2576 iounmap(virtbase); 2577 2578 if (base) { 2579 kfree(base->lcla_pool.alloc_map); 2580 kfree(base->lookup_log_chans); 2581 kfree(base->lookup_phy_chans); 2582 kfree(base->phy_res); 2583 kfree(base); 2584 } 2585 2586 return NULL; 2587} 2588 2589static void __init d40_hw_init(struct d40_base *base) 2590{ 2591 2592 static const struct d40_reg_val dma_init_reg[] = { 2593 /* Clock every part of the DMA block from start */ 2594 { .reg = D40_DREG_GCC, .val = 0x0000ff01}, 2595 2596 /* Interrupts on all logical channels */ 2597 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, 2598 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, 2599 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, 2600 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, 2601 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, 2602 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, 2603 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, 2604 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, 2605 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, 2606 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, 2607 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, 2608 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} 2609 }; 2610 int i; 2611 u32 prmseo[2] = {0, 0}; 2612 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; 2613 u32 pcmis = 0; 2614 u32 pcicr = 0; 2615 2616 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) 2617 writel(dma_init_reg[i].val, 2618 base->virtbase + dma_init_reg[i].reg); 2619 2620 /* Configure all our dma channels to default settings */ 2621 for (i = 0; i < base->num_phy_chans; i++) { 2622 2623 activeo[i % 2] = activeo[i % 2] << 2; 2624 2625 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src 2626 == D40_ALLOC_PHY) { 2627 activeo[i % 2] |= 3; 2628 continue; 2629 } 2630 2631 /* Enable interrupt # */ 2632 pcmis = (pcmis << 1) | 1; 2633 2634 /* Clear interrupt # */ 2635 pcicr = (pcicr << 1) | 1; 2636 2637 /* Set channel to physical mode */ 2638 prmseo[i % 2] = prmseo[i % 2] << 2; 2639 prmseo[i % 2] |= 1; 2640 2641 } 2642 2643 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); 2644 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); 2645 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); 2646 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); 2647 2648 /* Write which interrupt to enable */ 2649 writel(pcmis, base->virtbase + D40_DREG_PCMIS); 2650 2651 /* Write which interrupt to clear */ 2652 writel(pcicr, base->virtbase + D40_DREG_PCICR); 2653 2654} 2655 2656static int __init d40_lcla_allocate(struct d40_base *base) 2657{ 2658 unsigned long *page_list; 2659 int i, j; 2660 int ret = 0; 2661 2662 /* 2663 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, 2664 * To full fill this hardware requirement without wasting 256 kb 2665 * we allocate pages until we get an aligned one. 2666 */ 2667 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, 2668 GFP_KERNEL); 2669 2670 if (!page_list) { 2671 ret = -ENOMEM; 2672 goto failure; 2673 } 2674 2675 /* Calculating how many pages that are required */ 2676 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; 2677 2678 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { 2679 page_list[i] = __get_free_pages(GFP_KERNEL, 2680 base->lcla_pool.pages); 2681 if (!page_list[i]) { 2682 2683 dev_err(base->dev, 2684 "[%s] Failed to allocate %d pages.\n", 2685 __func__, base->lcla_pool.pages); 2686 2687 for (j = 0; j < i; j++) 2688 free_pages(page_list[j], base->lcla_pool.pages); 2689 goto failure; 2690 } 2691 2692 if ((virt_to_phys((void *)page_list[i]) & 2693 (LCLA_ALIGNMENT - 1)) == 0) 2694 break; 2695 } 2696 2697 for (j = 0; j < i; j++) 2698 free_pages(page_list[j], base->lcla_pool.pages); 2699 2700 if (i < MAX_LCLA_ALLOC_ATTEMPTS) { 2701 base->lcla_pool.base = (void *)page_list[i]; 2702 } else { 2703 /* After many attempts, no succees with finding the correct 2704 * alignment try with allocating a big buffer */ 2705 dev_warn(base->dev, 2706 "[%s] Failed to get %d pages @ 18 bit align.\n", 2707 __func__, base->lcla_pool.pages); 2708 base->lcla_pool.base_unaligned = kmalloc(SZ_1K * 2709 base->num_phy_chans + 2710 LCLA_ALIGNMENT, 2711 GFP_KERNEL); 2712 if (!base->lcla_pool.base_unaligned) { 2713 ret = -ENOMEM; 2714 goto failure; 2715 } 2716 2717 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, 2718 LCLA_ALIGNMENT); 2719 } 2720 2721 writel(virt_to_phys(base->lcla_pool.base), 2722 base->virtbase + D40_DREG_LCLA); 2723failure: 2724 kfree(page_list); 2725 return ret; 2726} 2727 2728static int __init d40_probe(struct platform_device *pdev) 2729{ 2730 int err; 2731 int ret = -ENOENT; 2732 struct d40_base *base; 2733 struct resource *res = NULL; 2734 int num_reserved_chans; 2735 u32 val; 2736 2737 base = d40_hw_detect_init(pdev); 2738 2739 if (!base) 2740 goto failure; 2741 2742 num_reserved_chans = d40_phy_res_init(base); 2743 2744 platform_set_drvdata(pdev, base); 2745 2746 spin_lock_init(&base->interrupt_lock); 2747 spin_lock_init(&base->execmd_lock); 2748 2749 /* Get IO for logical channel parameter address */ 2750 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 2751 if (!res) { 2752 ret = -ENOENT; 2753 dev_err(&pdev->dev, 2754 "[%s] No \"lcpa\" memory resource\n", 2755 __func__); 2756 goto failure; 2757 } 2758 base->lcpa_size = resource_size(res); 2759 base->phy_lcpa = res->start; 2760 2761 if (request_mem_region(res->start, resource_size(res), 2762 D40_NAME " I/O lcpa") == NULL) { 2763 ret = -EBUSY; 2764 dev_err(&pdev->dev, 2765 "[%s] Failed to request LCPA region 0x%x-0x%x\n", 2766 __func__, res->start, res->end); 2767 goto failure; 2768 } 2769 2770 /* We make use of ESRAM memory for this. */ 2771 val = readl(base->virtbase + D40_DREG_LCPA); 2772 if (res->start != val && val != 0) { 2773 dev_warn(&pdev->dev, 2774 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", 2775 __func__, val, res->start); 2776 } else 2777 writel(res->start, base->virtbase + D40_DREG_LCPA); 2778 2779 base->lcpa_base = ioremap(res->start, resource_size(res)); 2780 if (!base->lcpa_base) { 2781 ret = -ENOMEM; 2782 dev_err(&pdev->dev, 2783 "[%s] Failed to ioremap LCPA region\n", 2784 __func__); 2785 goto failure; 2786 } 2787 2788 ret = d40_lcla_allocate(base); 2789 if (ret) { 2790 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", 2791 __func__); 2792 goto failure; 2793 } 2794 2795 spin_lock_init(&base->lcla_pool.lock); 2796 2797 base->lcla_pool.num_blocks = base->num_phy_chans; 2798 2799 base->irq = platform_get_irq(pdev, 0); 2800 2801 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2802 2803 if (ret) { 2804 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); 2805 goto failure; 2806 } 2807 2808 err = d40_dmaengine_init(base, num_reserved_chans); 2809 if (err) 2810 goto failure; 2811 2812 d40_hw_init(base); 2813 2814 dev_info(base->dev, "initialized\n"); 2815 return 0; 2816 2817failure: 2818 if (base) { 2819 if (base->desc_slab) 2820 kmem_cache_destroy(base->desc_slab); 2821 if (base->virtbase) 2822 iounmap(base->virtbase); 2823 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 2824 free_pages((unsigned long)base->lcla_pool.base, 2825 base->lcla_pool.pages); 2826 if (base->lcla_pool.base_unaligned) 2827 kfree(base->lcla_pool.base_unaligned); 2828 if (base->phy_lcpa) 2829 release_mem_region(base->phy_lcpa, 2830 base->lcpa_size); 2831 if (base->phy_start) 2832 release_mem_region(base->phy_start, 2833 base->phy_size); 2834 if (base->clk) { 2835 clk_disable(base->clk); 2836 clk_put(base->clk); 2837 } 2838 2839 kfree(base->lcla_pool.alloc_map); 2840 kfree(base->lookup_log_chans); 2841 kfree(base->lookup_phy_chans); 2842 kfree(base->phy_res); 2843 kfree(base); 2844 } 2845 2846 dev_err(&pdev->dev, "[%s] probe failed\n", __func__); 2847 return ret; 2848} 2849 2850static struct platform_driver d40_driver = { 2851 .driver = { 2852 .owner = THIS_MODULE, 2853 .name = D40_NAME, 2854 }, 2855}; 2856 2857int __init stedma40_init(void) 2858{ 2859 return platform_driver_probe(&d40_driver, d40_probe); 2860} 2861arch_initcall(stedma40_init); 2862