Lines Matching refs:fsl_edma

35 	struct fsl_edma_engine *fsl_edma = dev_id;
37 struct edma_regs *regs = &fsl_edma->regs;
39 intr = edma_readl(fsl_edma, regs->intl);
43 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
45 edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
46 fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
70 struct fsl_edma_engine *fsl_edma = dev_id;
72 struct edma_regs *regs = &fsl_edma->regs;
74 err = edma_readl(fsl_edma, regs->errl);
78 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
80 fsl_edma_disable_request(&fsl_edma->chans[ch]);
81 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
82 fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
99 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
102 u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
103 unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
108 guard(mutex)(&fsl_edma->fsl_edma_mutex);
110 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
138 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
147 b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
149 mutex_lock(&fsl_edma->fsl_edma_mutex);
150 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
157 i = fsl_chan - fsl_edma->chans;
173 mutex_unlock(&fsl_edma->fsl_edma_mutex);
180 mutex_unlock(&fsl_edma->fsl_edma_mutex);
184 mutex_unlock(&fsl_edma->fsl_edma_mutex);
189 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
193 edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
195 fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
196 if (fsl_edma->txirq < 0)
197 return fsl_edma->txirq;
199 fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
200 if (fsl_edma->errirq < 0)
201 return fsl_edma->errirq;
203 if (fsl_edma->txirq == fsl_edma->errirq) {
204 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
205 fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
211 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
212 fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
218 ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
219 fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
229 static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
234 for (i = 0; i < fsl_edma->n_chans; i++) {
236 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
238 if (fsl_edma->chan_masked & BIT(i))
260 struct fsl_edma_engine *fsl_edma)
265 edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
288 0, "eDMA2-ERR", fsl_edma);
292 fsl_edma->chans[i].chan_name,
293 fsl_edma);
302 struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
304 if (fsl_edma->txirq == fsl_edma->errirq) {
305 devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
307 devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
308 devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
312 static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
317 clk_disable_unprepare(fsl_edma->muxclk[i]);
400 static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
410 for (i = 0; i < fsl_edma->n_chans; i++) {
411 if (fsl_edma->chan_masked & BIT(i))
414 fsl_chan = &fsl_edma->chans[i];
443 struct fsl_edma_engine *fsl_edma;
463 fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
465 if (!fsl_edma)
468 fsl_edma->drvdata = drvdata;
469 fsl_edma->n_chans = chans;
470 mutex_init(&fsl_edma->fsl_edma_mutex);
472 fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
473 if (IS_ERR(fsl_edma->membase))
474 return PTR_ERR(fsl_edma->membase);
477 fsl_edma_setup_regs(fsl_edma);
478 regs = &fsl_edma->regs;
482 fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
483 if (IS_ERR(fsl_edma->dmaclk)) {
485 return PTR_ERR(fsl_edma->dmaclk);
492 fsl_edma->chan_masked = chan_mask[1];
493 fsl_edma->chan_masked <<= 32;
494 fsl_edma->chan_masked |= chan_mask[0];
497 for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
504 fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
506 if (IS_ERR(fsl_edma->muxbase[i])) {
508 fsl_disable_clocks(fsl_edma, i);
509 return PTR_ERR(fsl_edma->muxbase[i]);
513 fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
514 if (IS_ERR(fsl_edma->muxclk[i])) {
517 return PTR_ERR(fsl_edma->muxclk[i]);
521 fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
524 ret = fsl_edma3_attach_pd(pdev, fsl_edma);
532 INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
533 for (i = 0; i < fsl_edma->n_chans; i++) {
534 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
537 if (fsl_edma->chan_masked & BIT(i))
543 fsl_chan->edma = fsl_edma;
552 fsl_chan->tcd = fsl_edma->membase
554 fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
565 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
573 ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
577 dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
578 dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
579 dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
580 dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask);
582 fsl_edma->dma_dev.dev = &pdev->dev;
583 fsl_edma->dma_dev.device_alloc_chan_resources
585 fsl_edma->dma_dev.device_free_chan_resources
587 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
588 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
589 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
590 fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
591 fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
592 fsl_edma->dma_dev.device_pause = fsl_edma_pause;
593 fsl_edma->dma_dev.device_resume = fsl_edma_resume;
594 fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
595 fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
596 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
598 fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
599 fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
602 fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
603 fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
606 fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
608 fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
610 fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
615 dma_set_max_seg_size(fsl_edma->dma_dev.dev,
618 fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
620 platform_set_drvdata(pdev, fsl_edma);
622 ret = dma_async_device_register(&fsl_edma->dma_dev);
631 fsl_edma);
635 dma_async_device_unregister(&fsl_edma->dma_dev);
641 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
649 struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
651 fsl_edma_irq_exit(pdev, fsl_edma);
652 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
654 dma_async_device_unregister(&fsl_edma->dma_dev);
655 fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
660 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
665 for (i = 0; i < fsl_edma->n_chans; i++) {
666 fsl_chan = &fsl_edma->chans[i];
667 if (fsl_edma->chan_masked & BIT(i))
686 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
688 struct edma_regs *regs = &fsl_edma->regs;
691 for (i = 0; i < fsl_edma->n_chans; i++) {
692 fsl_chan = &fsl_edma->chans[i];
693 if (fsl_edma->chan_masked & BIT(i))
701 if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
702 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);