Lines Matching defs:mxs_dma

38 #define dma_is_apbh(mxs_dma)	((mxs_dma)->type == MXS_DMA_APBH)
39 #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
109 struct mxs_dma_engine *mxs_dma;
186 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
199 } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
201 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
205 void __iomem *reg_dbg1 = mxs_dma->base +
206 HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
221 dev_err(&mxs_chan->mxs_dma->pdev->dev,
226 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
235 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
240 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
248 writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
250 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
265 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
269 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
271 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
274 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
283 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
287 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
289 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
292 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
310 static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
314 for (i = 0; i != mxs_dma->nr_channels; ++i)
315 if (mxs_dma->mxs_chans[i].chan_irq == irq)
323 struct mxs_dma_engine *mxs_dma = dev_id;
327 int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
333 completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
338 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
341 err = readl(mxs_dma->base + HW_APBHX_CTRL2);
353 mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
364 mxs_chan = &mxs_dma->mxs_chans[chan];
367 dev_dbg(mxs_dma->dma_device.dev,
376 writel(1, mxs_dma->base +
377 HW_APBHX_CHn_SEMA(mxs_dma, chan));
398 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
401 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
410 0, "mxs-dma", mxs_dma);
414 ret = clk_prepare_enable(mxs_dma->clk);
429 free_irq(mxs_chan->chan_irq, mxs_dma);
431 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
440 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
444 free_irq(mxs_chan->chan_irq, mxs_dma);
446 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
449 clk_disable_unprepare(mxs_dma->clk);
480 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
491 dev_err(mxs_dma->dma_device.dev,
536 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
579 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
591 dev_err(mxs_dma->dma_device.dev,
598 dev_err(mxs_dma->dma_device.dev,
650 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
661 bar = readl(mxs_dma->base +
662 HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
672 static int mxs_dma_init(struct mxs_dma_engine *mxs_dma)
676 ret = clk_prepare_enable(mxs_dma->clk);
680 ret = stmp_reset_block(mxs_dma->base);
685 if (dma_is_apbh(mxs_dma)) {
687 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
689 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
694 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
697 clk_disable_unprepare(mxs_dma->clk);
709 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
715 chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
727 struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
728 dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
736 if (param.chan_id >= mxs_dma->nr_channels)
747 struct mxs_dma_engine *mxs_dma;
750 mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
751 if (!mxs_dma)
754 ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
761 mxs_dma->type = dma_type->type;
762 mxs_dma->dev_id = dma_type->id;
764 mxs_dma->base = devm_platform_ioremap_resource(pdev, 0);
765 if (IS_ERR(mxs_dma->base))
766 return PTR_ERR(mxs_dma->base);
768 mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
769 if (IS_ERR(mxs_dma->clk))
770 return PTR_ERR(mxs_dma->clk);
772 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
773 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
775 INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
779 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
781 mxs_chan->mxs_dma = mxs_dma;
782 mxs_chan->chan.device = &mxs_dma->dma_device;
790 &mxs_dma->dma_device.channels);
793 ret = mxs_dma_init(mxs_dma);
797 mxs_dma->pdev = pdev;
798 mxs_dma->dma_device.dev = &pdev->dev;
800 /* mxs_dma gets 65535 bytes maximum sg size */
801 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
803 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
804 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
805 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
806 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
807 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
808 mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
809 mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
810 mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
811 mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
812 mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
813 mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
814 mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
815 mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
817 ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
819 dev_err(mxs_dma->dma_device.dev, "unable to register\n");
823 ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
825 dev_err(mxs_dma->dma_device.dev,
829 dev_info(mxs_dma->dma_device.dev, "initialized\n");