Lines Matching refs:od

266 static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
270 regval = readl(od->base + reg);
277 writel(regval, od->base + reg);
280 static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
282 writel(data, od->base + reg);
285 static u32 dma_readl(struct owl_dma *od, u32 reg)
287 return readl(od->base + reg);
342 static void owl_dma_free_lli(struct owl_dma *od,
346 dma_pool_free(od->lli_pool, lli, lli->phys);
349 static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
354 lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys);
388 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
445 if (od->devid == S700_DMA) {
468 static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
475 for (i = 0; i < od->nr_pchans; i++) {
476 pchan = &od->pchans[i];
478 spin_lock_irqsave(&od->lock, flags);
481 spin_unlock_irqrestore(&od->lock, flags);
485 spin_unlock_irqrestore(&od->lock, flags);
491 static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan)
495 val = dma_readl(od, OWL_DMA_IDLE_STAT);
500 static void owl_dma_terminate_pchan(struct owl_dma *od,
509 spin_lock_irqsave(&od->lock, flags);
510 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false);
512 irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0);
514 dev_warn(od->dma.dev,
517 dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id));
522 spin_unlock_irqrestore(&od->lock, flags);
537 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
550 while (owl_dma_pchan_busy(od, pchan))
570 spin_lock_irqsave(&od->lock, flags);
572 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true);
574 spin_unlock_irqrestore(&od->lock, flags);
584 static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan)
587 owl_dma_terminate_pchan(od, vchan->pchan);
594 struct owl_dma *od = dev_id;
601 spin_lock(&od->lock);
603 pending = dma_readl(od, OWL_DMA_IRQ_PD0);
606 for_each_set_bit(i, &pending, od->nr_pchans) {
607 pchan = &od->pchans[i];
612 dma_writel(od, OWL_DMA_IRQ_PD0, pending);
615 for (i = 0; i < od->nr_pchans; i++) {
616 pchan = &od->pchans[i];
621 dma_readl(od, OWL_DMA_IRQ_PD0);
623 global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
626 dev_dbg(od->dma.dev,
638 spin_unlock(&od->lock);
640 for_each_set_bit(i, &pending, od->nr_pchans) {
643 pchan = &od->pchans[i];
647 dev_warn(od->dma.dev, "no vchan attached on pchan %d\n",
667 owl_dma_phy_free(od, vchan);
676 static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd)
684 owl_dma_free_lli(od, lli);
691 struct owl_dma *od = to_owl_dma(vd->tx.chan->device);
694 owl_dma_free_txd(od, txd);
699 struct owl_dma *od = to_owl_dma(chan->device);
707 owl_dma_phy_free(od, vchan);
840 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
843 pchan = owl_dma_get_pchan(od, vchan);
847 dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id);
871 struct owl_dma *od = to_owl_dma(chan->device);
889 lli = owl_dma_alloc_lli(od);
911 owl_dma_free_txd(od, txd);
922 struct owl_dma *od = to_owl_dma(chan->device);
943 dev_err(od->dma.dev,
948 lli = owl_dma_alloc_lli(od);
975 owl_dma_free_txd(od, txd);
987 struct owl_dma *od = to_owl_dma(chan->device);
1004 lli = owl_dma_alloc_lli(od);
1037 owl_dma_free_txd(od, txd);
1050 static inline void owl_dma_free(struct owl_dma *od)
1056 next, &od->dma.channels, vc.chan.device_node) {
1065 struct owl_dma *od = ofdma->of_dma_data;
1070 if (drq > od->nr_vchans)
1073 chan = dma_get_any_slave_channel(&od->dma);
1094 struct owl_dma *od;
1097 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1098 if (!od)
1101 od->base = devm_platform_ioremap_resource(pdev, 0);
1102 if (IS_ERR(od->base))
1103 return PTR_ERR(od->base);
1120 od->devid = (uintptr_t)of_device_get_match_data(&pdev->dev);
1122 od->nr_pchans = nr_channels;
1123 od->nr_vchans = nr_requests;
1127 platform_set_drvdata(pdev, od);
1128 spin_lock_init(&od->lock);
1130 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
1131 dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
1132 dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
1134 od->dma.dev = &pdev->dev;
1135 od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
1136 od->dma.device_tx_status = owl_dma_tx_status;
1137 od->dma.device_issue_pending = owl_dma_issue_pending;
1138 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
1139 od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
1140 od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
1141 od->dma.device_config = owl_dma_config;
1142 od->dma.device_pause = owl_dma_pause;
1143 od->dma.device_resume = owl_dma_resume;
1144 od->dma.device_terminate_all = owl_dma_terminate_all;
1145 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1146 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1147 od->dma.directions = BIT(DMA_MEM_TO_MEM);
1148 od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1150 INIT_LIST_HEAD(&od->dma.channels);
1152 od->clk = devm_clk_get(&pdev->dev, NULL);
1153 if (IS_ERR(od->clk)) {
1155 return PTR_ERR(od->clk);
1163 od->irq = platform_get_irq(pdev, 0);
1164 ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
1165 dev_name(&pdev->dev), od);
1172 od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
1174 if (!od->pchans)
1177 for (i = 0; i < od->nr_pchans; i++) {
1178 struct owl_dma_pchan *pchan = &od->pchans[i];
1181 pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
1185 od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
1187 if (!od->vchans)
1190 for (i = 0; i < od->nr_vchans; i++) {
1191 struct owl_dma_vchan *vchan = &od->vchans[i];
1194 vchan_init(&vchan->vc, &od->dma);
1198 od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
1202 if (!od->lli_pool) {
1207 clk_prepare_enable(od->clk);
1209 ret = dma_async_device_register(&od->dma);
1217 owl_dma_of_xlate, od);
1226 dma_async_device_unregister(&od->dma);
1228 clk_disable_unprepare(od->clk);
1229 dma_pool_destroy(od->lli_pool);
1236 struct owl_dma *od = platform_get_drvdata(pdev);
1239 dma_async_device_unregister(&od->dma);
1242 dma_writel(od, OWL_DMA_IRQ_EN0, 0x0);
1245 devm_free_irq(od->dma.dev, od->irq, od);
1247 owl_dma_free(od);
1249 clk_disable_unprepare(od->clk);
1250 dma_pool_destroy(od->lli_pool);