• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/dma/

Lines Matching refs:d40c

364 static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
367 dma_cookie_t cookie = d40c->chan.cookie;
372 d40c->chan.cookie = cookie;
383 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
388 if (!list_empty(&d40c->client)) {
389 list_for_each_entry_safe(d, _d, &d40c->client, node)
396 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
405 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
407 kmem_cache_free(d40c->base->desc_slab, d40d);
410 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
412 list_add_tail(&desc->node, &d40c->active);
415 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
419 if (list_empty(&d40c->active))
422 d = list_first_entry(&d40c->active,
428 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
430 list_add_tail(&desc->node, &d40c->queue);
433 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
437 if (list_empty(&d40c->queue))
440 d = list_first_entry(&d40c->queue,
448 static int d40_lcla_id_get(struct d40_chan *d40c)
453 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
455 int lli_per_log = d40c->base->plat_data->llis_per_log;
458 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
461 if (d40c->base->lcla_pool.num_blocks > 32)
464 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
466 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
475 if (src_id >= d40c->base->lcla_pool.num_blocks)
478 for (; i < d40c->base->lcla_pool.num_blocks; i++) {
479 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
481 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
491 d40c->lcla.src_id = src_id;
492 d40c->lcla.dst_id = dst_id;
493 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
494 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
496 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
499 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
504 static int d40_channel_execute_command(struct d40_chan *d40c,
513 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
515 if (d40c->phy_chan->num % 2 == 0)
516 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
518 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
522 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
523 D40_CHAN_POS(d40c->phy_chan->num);
529 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
530 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
537 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
538 D40_CHAN_POS(d40c->phy_chan->num);
553 dev_err(&d40c->chan.dev->device,
555 __func__, d40c->phy_chan->num, d40c->log_num,
563 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
567 static void d40_term_all(struct d40_chan *d40c)
573 while ((d40d = d40_first_active_get(d40c))) {
577 d40_desc_free(d40c, d40d);
581 while ((d40d = d40_first_queued(d40c))) {
585 d40_desc_free(d40c, d40d);
588 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
590 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
591 (~(0x1 << d40c->lcla.dst_id));
592 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
593 (~(0x1 << d40c->lcla.src_id));
595 d40c->lcla.src_id = -1;
596 d40c->lcla.dst_id = -1;
598 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
600 d40c->pending_tx = 0;
601 d40c->busy = false;
604 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
615 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
618 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
619 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
620 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
624 d40c->base->virtbase + D40_DREG_PCBASE +
625 d40c->phy_chan->num * D40_DREG_PCDELTA +
628 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
629 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
633 d40c->base->virtbase + D40_DREG_PCBASE +
634 d40c->phy_chan->num * D40_DREG_PCDELTA +
638 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
641 static u32 d40_chan_has_events(struct d40_chan *d40c)
646 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
647 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
648 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
649 d40c->phy_chan->num * D40_DREG_PCDELTA +
652 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
653 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
654 d40c->phy_chan->num * D40_DREG_PCDELTA +
659 static void d40_config_enable_lidx(struct d40_chan *d40c)
662 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
664 d40c->base->virtbase + D40_DREG_PCBASE +
665 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
667 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
669 d40c->base->virtbase + D40_DREG_PCBASE +
670 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
673 static int d40_config_write(struct d40_chan *d40c)
679 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
684 addr_base = (d40c->phy_chan->num % 2) * 4;
686 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
687 D40_CHAN_POS(d40c->phy_chan->num);
688 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
691 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
692 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
694 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
696 if (d40c->log_num != D40_PHY_CHAN) {
698 writel(d40c->src_def_cfg,
699 d40c->base->virtbase + D40_DREG_PCBASE +
700 d40c->phy_chan->num * D40_DREG_PCDELTA +
702 writel(d40c->dst_def_cfg,
703 d40c->base->virtbase + D40_DREG_PCBASE +
704 d40c->phy_chan->num * D40_DREG_PCDELTA +
707 d40_config_enable_lidx(d40c);
712 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
715 d40_phy_lli_write(d40c->base->virtbase,
716 d40c->phy_chan->num,
726 s = d40_log_lli_write(d40c->lcpa,
727 d40c->lcla.src, d40c->lcla.dst,
729 d40c->base->plat_data->llis_per_log);
733 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
736 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
746 struct d40_chan *d40c = container_of(tx->chan,
752 spin_lock_irqsave(&d40c->lock, flags);
754 tx->cookie = d40_assign_cookie(d40c, d40d);
756 d40_desc_queue(d40c, d40d);
758 spin_unlock_irqrestore(&d40c->lock, flags);
763 static int d40_start(struct d40_chan *d40c)
765 if (d40c->base->rev == 0) {
768 if (d40c->log_num != D40_PHY_CHAN) {
769 err = d40_channel_execute_command(d40c,
776 if (d40c->log_num != D40_PHY_CHAN)
777 d40_config_set_event(d40c, true);
779 return d40_channel_execute_command(d40c, D40_DMA_RUN);
782 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
788 d40d = d40_first_queued(d40c);
791 d40c->busy = true;
797 d40_desc_submit(d40c, d40d);
800 d40_desc_load(d40c, d40d);
803 err = d40_start(d40c);
813 static void dma_tc_handle(struct d40_chan *d40c)
817 if (!d40c->phy_chan)
821 d40d = d40_first_active_get(d40c);
828 d40_desc_load(d40c, d40d);
830 (void) d40_start(d40c);
834 if (d40_queue_start(d40c) == NULL)
835 d40c->busy = false;
837 d40c->pending_tx++;
838 tasklet_schedule(&d40c->tasklet);
844 struct d40_chan *d40c = (struct d40_chan *) data;
850 spin_lock_irqsave(&d40c->lock, flags);
853 d40d_fin = d40_first_active_get(d40c);
858 d40c->completed = d40d_fin->txd.cookie;
864 if (d40c->pending_tx == 0) {
865 spin_unlock_irqrestore(&d40c->lock, flags);
877 d40_desc_free(d40c, d40d_fin);
881 list_add_tail(&d40d_fin->node, &d40c->client);
886 d40c->pending_tx--;
888 if (d40c->pending_tx)
889 tasklet_schedule(&d40c->tasklet);
891 spin_unlock_irqrestore(&d40c->lock, flags);
900 if (d40c->pending_tx > 0)
901 d40c->pending_tx--;
902 spin_unlock_irqrestore(&d40c->lock, flags);
926 struct d40_chan *d40c;
954 d40c = base->lookup_phy_chans[idx];
956 d40c = base->lookup_log_chans[il[row].offset + idx];
957 spin_lock(&d40c->lock);
960 dma_tc_handle(d40c);
966 spin_unlock(&d40c->lock);
975 static int d40_validate_conf(struct d40_chan *d40c,
984 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
986 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
991 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
993 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1000 dev_err(&d40c->chan.dev->device,
1007 dev_err(&d40c->chan.dev->device,
1017 dev_err(&d40c->chan.dev->device,
1112 static int d40_allocate_channel(struct d40_chan *d40c)
1122 bool is_log = (d40c->dma_cfg.channel_type &
1127 phys = d40c->base->phy_res;
1129 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1130 dev_type = d40c->dma_cfg.src_dev_type;
1133 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1134 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1136 dev_type = d40c->dma_cfg.dst_dev_type;
1146 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1148 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1155 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1167 d40c->phy_chan = &phys[i];
1168 d40c->log_num = D40_PHY_CHAN;
1175 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1199 d40c->phy_chan = &phys[i];
1200 d40c->log_num = log_num;
1204 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1206 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1212 static int d40_config_memcpy(struct d40_chan *d40c)
1214 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1217 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1218 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1219 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1220 memcpy[d40c->chan.chan_id];
1224 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1226 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1235 static int d40_free_dma(struct d40_chan *d40c)
1240 struct d40_phy_res *phy = d40c->phy_chan;
1247 d40_term_all(d40c);
1250 if (!list_empty(&d40c->client))
1251 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1255 d40_desc_free(d40c, d);
1259 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1266 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1271 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1272 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1273 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1275 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1276 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1279 dev_err(&d40c->chan.dev->device,
1284 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1286 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1291 if (d40c->log_num != D40_PHY_CHAN) {
1294 d40_config_set_event(d40c, false);
1295 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1303 if (d40_chan_has_events(d40c)) {
1304 res = d40_channel_execute_command(d40c,
1307 dev_err(&d40c->chan.dev->device,
1320 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1322 dev_err(&d40c->chan.dev->device,
1326 d40c->phy_chan = NULL;
1328 d40c->dma_cfg.channel_type = 0;
1329 d40c->base->lookup_phy_chans[phy->num] = NULL;
1336 struct d40_chan *d40c =
1341 spin_lock_irqsave(&d40c->lock, flags);
1343 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1345 if (d40c->log_num != D40_PHY_CHAN) {
1346 d40_config_set_event(d40c, false);
1348 if (d40_chan_has_events(d40c))
1349 res = d40_channel_execute_command(d40c,
1354 spin_unlock_irqrestore(&d40c->lock, flags);
1358 static bool d40_is_paused(struct d40_chan *d40c)
1366 spin_lock_irqsave(&d40c->lock, flags);
1368 if (d40c->log_num == D40_PHY_CHAN) {
1369 if (d40c->phy_chan->num % 2 == 0)
1370 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1372 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1375 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1376 D40_CHAN_POS(d40c->phy_chan->num);
1383 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1384 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1385 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1386 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1387 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1389 dev_err(&d40c->chan.dev->device,
1393 status = d40_chan_has_events(d40c);
1400 spin_unlock_irqrestore(&d40c->lock, flags);
1406 static bool d40_tx_is_linked(struct d40_chan *d40c)
1410 if (d40c->log_num != D40_PHY_CHAN)
1411 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1413 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1414 d40c->phy_chan->num * D40_DREG_PCDELTA +
1420 static u32 d40_residue(struct d40_chan *d40c)
1424 if (d40c->log_num != D40_PHY_CHAN)
1425 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1428 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1429 d40c->phy_chan->num * D40_DREG_PCDELTA +
1433 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1438 struct d40_chan *d40c =
1443 spin_lock_irqsave(&d40c->lock, flags);
1445 if (d40c->base->rev == 0)
1446 if (d40c->log_num != D40_PHY_CHAN) {
1447 res = d40_channel_execute_command(d40c,
1453 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1454 if (d40c->log_num != D40_PHY_CHAN)
1455 d40_config_set_event(d40c, true);
1456 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1460 spin_unlock_irqrestore(&d40c->lock, flags);
1466 struct d40_chan *d40c =
1471 spin_lock_irqsave(&d40c->lock, flags);
1472 bytes_left = d40_residue(d40c);
1473 spin_unlock_irqrestore(&d40c->lock, flags);
1484 struct d40_chan *d40c =
1488 spin_lock_irqsave(&d40c->lock, flags);
1490 if (d40c->log_num != D40_PHY_CHAN) {
1491 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1492 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1493 d40c->log_def.lcsp1 |= src_psize <<
1495 d40c->log_def.lcsp3 |= dst_psize <<
1501 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1503 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1504 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1506 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1510 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1512 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1513 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1515 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1518 spin_unlock_irqrestore(&d40c->lock, flags);
1531 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1535 if (d40c->phy_chan == NULL) {
1536 dev_err(&d40c->chan.dev->device,
1541 spin_lock_irqsave(&d40c->lock, flags);
1542 d40d = d40_desc_get(d40c);
1551 if (d40c->log_num != D40_PHY_CHAN) {
1552 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1553 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1561 if (d40_lcla_id_get(d40c) != 0)
1565 dev_err(&d40c->chan.dev->device,
1570 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1574 d40c->log_def.lcsp1,
1575 d40c->dma_cfg.src_info.data_width,
1578 d40c->base->plat_data->llis_per_log);
1580 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1584 d40c->log_def.lcsp3,
1585 d40c->dma_cfg.dst_info.data_width,
1588 d40c->base->plat_data->llis_per_log);
1593 dev_err(&d40c->chan.dev->device,
1603 d40c->src_def_cfg,
1604 d40c->dma_cfg.src_info.data_width,
1605 d40c->dma_cfg.src_info.psize,
1616 d40c->dst_def_cfg,
1617 d40c->dma_cfg.dst_info.data_width,
1618 d40c->dma_cfg.dst_info.psize,
1624 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1632 spin_unlock_irqrestore(&d40c->lock, flags);
1636 spin_unlock_irqrestore(&d40c->lock, flags);
1644 struct d40_chan *d40c =
1649 err = d40_validate_conf(d40c, info);
1651 d40c->dma_cfg = *info;
1653 err = d40_config_memcpy(d40c);
1664 struct d40_chan *d40c =
1667 spin_lock_irqsave(&d40c->lock, flags);
1669 d40c->completed = chan->cookie = 1;
1675 if (d40c->dma_cfg.channel_type == 0) {
1676 err = d40_config_memcpy(d40c);
1678 dev_err(&d40c->chan.dev->device,
1684 is_free_phy = (d40c->phy_chan == NULL);
1686 err = d40_allocate_channel(d40c);
1688 dev_err(&d40c->chan.dev->device,
1694 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1695 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1697 if (d40c->log_num != D40_PHY_CHAN) {
1698 d40_log_cfg(&d40c->dma_cfg,
1699 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1701 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1702 d40c->lcpa = d40c->base->lcpa_base +
1703 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1705 d40c->lcpa = d40c->base->lcpa_base +
1706 d40c->dma_cfg.dst_dev_type *
1716 err = d40_config_write(d40c);
1718 dev_err(&d40c->chan.dev->device,
1724 spin_unlock_irqrestore(&d40c->lock, flags);
1730 struct d40_chan *d40c =
1735 if (d40c->phy_chan == NULL) {
1736 dev_err(&d40c->chan.dev->device,
1742 spin_lock_irqsave(&d40c->lock, flags);
1744 err = d40_free_dma(d40c);
1747 dev_err(&d40c->chan.dev->device,
1749 spin_unlock_irqrestore(&d40c->lock, flags);
1759 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1764 if (d40c->phy_chan == NULL) {
1765 dev_err(&d40c->chan.dev->device,
1770 spin_lock_irqsave(&d40c->lock, flags);
1771 d40d = d40_desc_get(d40c);
1774 dev_err(&d40c->chan.dev->device,
1785 if (d40c->log_num != D40_PHY_CHAN) {
1788 dev_err(&d40c->chan.dev->device,
1799 d40c->log_def.lcsp1,
1800 d40c->dma_cfg.src_info.data_width,
1807 d40c->log_def.lcsp3,
1808 d40c->dma_cfg.dst_info.data_width,
1814 dev_err(&d40c->chan.dev->device,
1822 d40c->dma_cfg.src_info.psize,
1824 d40c->src_def_cfg,
1826 d40c->dma_cfg.src_info.data_width,
1834 d40c->dma_cfg.dst_info.psize,
1836 d40c->dst_def_cfg,
1838 d40c->dma_cfg.dst_info.data_width,
1844 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1848 spin_unlock_irqrestore(&d40c->lock, flags);
1852 dev_err(&d40c->chan.dev->device,
1856 spin_unlock_irqrestore(&d40c->lock, flags);
1861 struct d40_chan *d40c,
1871 dev_err(&d40c->chan.dev->device,
1877 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1880 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1888 if (d40_lcla_id_get(d40c) != 0)
1892 if (d40c->runtime_addr)
1893 dev_addr = d40c->runtime_addr;
1895 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1897 if (d40c->runtime_addr)
1898 dev_addr = d40c->runtime_addr;
1900 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1905 total_size = d40_log_sg_to_dev(&d40c->lcla,
1908 &d40c->log_def,
1909 d40c->dma_cfg.src_info.data_width,
1910 d40c->dma_cfg.dst_info.data_width,
1914 d40c->base->plat_data->llis_per_log);
1923 struct d40_chan *d40c,
1934 dev_err(&d40c->chan.dev->device,
1944 if (d40c->runtime_addr)
1945 src_dev_addr = d40c->runtime_addr;
1947 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1949 if (d40c->runtime_addr)
1950 dst_dev_addr = d40c->runtime_addr;
1952 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1962 d40c->src_def_cfg,
1963 d40c->dma_cfg.src_info.data_width,
1964 d40c->dma_cfg.src_info.psize,
1974 d40c->dst_def_cfg,
1975 d40c->dma_cfg.dst_info.data_width,
1976 d40c->dma_cfg.dst_info.psize,
1981 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1993 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1998 if (d40c->phy_chan == NULL) {
1999 dev_err(&d40c->chan.dev->device,
2004 if (d40c->dma_cfg.pre_transfer)
2005 d40c->dma_cfg.pre_transfer(chan,
2006 d40c->dma_cfg.pre_transfer_data,
2009 spin_lock_irqsave(&d40c->lock, flags);
2010 d40d = d40_desc_get(d40c);
2011 spin_unlock_irqrestore(&d40c->lock, flags);
2016 if (d40c->log_num != D40_PHY_CHAN)
2017 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2020 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2023 dev_err(&d40c->chan.dev->device,
2026 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2043 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2048 if (d40c->phy_chan == NULL) {
2049 dev_err(&d40c->chan.dev->device,
2055 last_complete = d40c->completed;
2058 if (d40_is_paused(d40c))
2071 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2074 if (d40c->phy_chan == NULL) {
2075 dev_err(&d40c->chan.dev->device,
2080 spin_lock_irqsave(&d40c->lock, flags);
2083 if (!d40c->busy)
2084 (void) d40_queue_start(d40c);
2086 spin_unlock_irqrestore(&d40c->lock, flags);
2093 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2094 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2103 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2107 dev_dbg(d40c->base->dev,
2112 dev_dbg(d40c->base->dev,
2123 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2127 dev_dbg(d40c->base->dev,
2132 dev_dbg(d40c->base->dev,
2142 dev_err(d40c->base->dev,
2162 dev_err(d40c->base->dev,
2189 d40c->runtime_addr = config_addr;
2190 d40c->runtime_direction = config->direction;
2191 dev_dbg(d40c->base->dev,
2204 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2206 if (d40c->phy_chan == NULL) {
2207 dev_err(&d40c->chan.dev->device,
2214 spin_lock_irqsave(&d40c->lock, flags);
2215 d40_term_all(d40c);
2216 spin_unlock_irqrestore(&d40c->lock, flags);
2241 struct d40_chan *d40c;
2246 d40c = &chans[i];
2247 d40c->base = base;
2248 d40c->chan.device = dma;
2251 d40c->lcla.src_id = -1;
2252 d40c->lcla.dst_id = -1;
2254 spin_lock_init(&d40c->lock);
2256 d40c->log_num = D40_PHY_CHAN;
2258 INIT_LIST_HEAD(&d40c->active);
2259 INIT_LIST_HEAD(&d40c->queue);
2260 INIT_LIST_HEAD(&d40c->client);
2262 tasklet_init(&d40c->tasklet, dma_tasklet,
2263 (unsigned long) d40c);
2265 list_add_tail(&d40c->chan.device_node,