Lines Matching defs:dc

24 static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
26 return dc->ch_regs;
30 const struct txx9dmac_chan *dc)
32 return dc->ch_regs;
35 #define channel64_readq(dc, name) \
36 __raw_readq(&(__dma_regs(dc)->name))
37 #define channel64_writeq(dc, name, val) \
38 __raw_writeq((val), &(__dma_regs(dc)->name))
39 #define channel64_readl(dc, name) \
40 __raw_readl(&(__dma_regs(dc)->name))
41 #define channel64_writel(dc, name, val) \
42 __raw_writel((val), &(__dma_regs(dc)->name))
44 #define channel32_readl(dc, name) \
45 __raw_readl(&(__dma_regs32(dc)->name))
46 #define channel32_writel(dc, name, val) \
47 __raw_writel((val), &(__dma_regs32(dc)->name))
49 #define channel_readq(dc, name) channel64_readq(dc, name)
50 #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
51 #define channel_readl(dc, name) \
52 (is_dmac64(dc) ? \
53 channel64_readl(dc, name) : channel32_readl(dc, name))
54 #define channel_writel(dc, name, val) \
55 (is_dmac64(dc) ? \
56 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
58 static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
60 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
61 return channel64_readq(dc, CHAR);
63 return channel64_readl(dc, CHAR);
66 static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
68 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
69 channel64_writeq(dc, CHAR, val);
71 channel64_writel(dc, CHAR, val);
74 static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
77 channel64_writel(dc, CHAR, 0);
78 channel64_writel(dc, __pad_CHAR, 0);
80 channel64_writeq(dc, CHAR, 0);
84 static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
86 if (is_dmac64(dc))
87 return channel64_read_CHAR(dc);
89 return channel32_readl(dc, CHAR);
92 static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
94 if (is_dmac64(dc))
95 channel64_write_CHAR(dc, val);
97 channel32_writel(dc, CHAR, val);
144 static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
147 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
150 static void desc_write_CHAR(const struct txx9dmac_chan *dc,
153 if (is_dmac64(dc))
163 static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
165 return list_entry(dc->active_list.next,
169 static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
171 return list_entry(dc->active_list.prev,
175 static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
177 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
189 static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
192 struct txx9dmac_dev *ddev = dc->ddev;
199 dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
203 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
208 static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
214 spin_lock_bh(&dc->lock);
215 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
221 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
224 spin_unlock_bh(&dc->lock);
226 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
229 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
231 spin_lock_bh(&dc->lock);
232 dc->descs_allocated++;
233 spin_unlock_bh(&dc->lock);
235 dev_err(chan2dev(&dc->chan),
241 static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
244 struct txx9dmac_dev *ddev = dc->ddev;
248 dma_sync_single_for_cpu(chan2parent(&dc->chan),
251 dma_sync_single_for_cpu(chan2parent(&dc->chan),
260 static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
266 txx9dmac_sync_desc_for_cpu(dc, desc);
268 spin_lock_bh(&dc->lock);
270 dev_vdbg(chan2dev(&dc->chan),
273 list_splice_init(&desc->tx_list, &dc->free_list);
274 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
276 list_add(&desc->desc_node, &dc->free_list);
277 spin_unlock_bh(&dc->lock);
283 static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
285 if (is_dmac64(dc))
286 dev_err(chan2dev(&dc->chan),
289 (u64)channel64_read_CHAR(dc),
290 channel64_readq(dc, SAR),
291 channel64_readq(dc, DAR),
292 channel64_readl(dc, CNTR),
293 channel64_readl(dc, SAIR),
294 channel64_readl(dc, DAIR),
295 channel64_readl(dc, CCR),
296 channel64_readl(dc, CSR));
298 dev_err(chan2dev(&dc->chan),
301 channel32_readl(dc, CHAR),
302 channel32_readl(dc, SAR),
303 channel32_readl(dc, DAR),
304 channel32_readl(dc, CNTR),
305 channel32_readl(dc, SAIR),
306 channel32_readl(dc, DAIR),
307 channel32_readl(dc, CCR),
308 channel32_readl(dc, CSR));
311 static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
313 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
314 if (is_dmac64(dc)) {
315 channel64_clear_CHAR(dc);
316 channel_writeq(dc, SAR, 0);
317 channel_writeq(dc, DAR, 0);
319 channel_writel(dc, CHAR, 0);
320 channel_writel(dc, SAR, 0);
321 channel_writel(dc, DAR, 0);
323 channel_writel(dc, CNTR, 0);
324 channel_writel(dc, SAIR, 0);
325 channel_writel(dc, DAIR, 0);
326 channel_writel(dc, CCR, 0);
329 /* Called with dc->lock held and bh disabled */
330 static void txx9dmac_dostart(struct txx9dmac_chan *dc,
333 struct txx9dmac_slave *ds = dc->chan.private;
336 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
339 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
340 dev_err(chan2dev(&dc->chan),
342 txx9dmac_dump_regs(dc);
347 if (is_dmac64(dc)) {
348 channel64_writel(dc, CNTR, 0);
349 channel64_writel(dc, CSR, 0xffffffff);
362 channel64_writel(dc, SAIR, sai);
363 channel64_writel(dc, DAIR, dai);
365 channel64_writel(dc, CCR, dc->ccr);
367 channel64_write_CHAR(dc, first->txd.phys);
369 channel32_writel(dc, CNTR, 0);
370 channel32_writel(dc, CSR, 0xffffffff);
383 channel32_writel(dc, SAIR, sai);
384 channel32_writel(dc, DAIR, dai);
386 channel32_writel(dc, CCR, dc->ccr);
388 channel32_writel(dc, CHAR, first->txd.phys);
390 channel32_writel(dc, CHAR, first->txd.phys);
391 channel32_writel(dc, CCR, dc->ccr);
399 txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
405 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
411 txx9dmac_sync_desc_for_cpu(dc, desc);
412 list_splice_init(&desc->tx_list, &dc->free_list);
413 list_move(&desc->desc_node, &dc->free_list);
424 static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
426 struct txx9dmac_dev *ddev = dc->ddev;
432 desc = txx9dmac_first_queued(dc);
434 desc_write_CHAR(dc, prev, desc->txd.phys);
435 dma_sync_single_for_device(chan2parent(&dc->chan),
443 !txx9dmac_chan_INTENT(dc))
445 } while (!list_empty(&dc->queue));
448 static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
457 list_splice_init(&dc->active_list, &list);
458 if (!list_empty(&dc->queue)) {
459 txx9dmac_dequeue(dc, &dc->active_list);
460 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
464 txx9dmac_descriptor_complete(dc, desc);
467 static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
470 if (is_dmac64(dc)) {
472 dev_crit(chan2dev(&dc->chan),
476 dev_crit(chan2dev(&dc->chan),
485 dev_crit(chan2dev(&dc->chan),
489 dev_crit(chan2dev(&dc->chan),
498 static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
509 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
510 txx9dmac_dump_regs(dc);
512 bad_desc = txx9dmac_first_active(dc);
519 channel_writel(dc, CSR, errors);
521 if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
522 txx9dmac_dequeue(dc, &dc->active_list);
523 if (!list_empty(&dc->active_list))
524 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
526 dev_crit(chan2dev(&dc->chan),
529 txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
531 txx9dmac_dump_desc(dc, &child->hwdesc);
533 txx9dmac_descriptor_complete(dc, bad_desc);
536 static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
543 if (is_dmac64(dc)) {
544 chain = channel64_read_CHAR(dc);
545 csr = channel64_readl(dc, CSR);
546 channel64_writel(dc, CSR, csr);
548 chain = channel32_readl(dc, CHAR);
549 csr = channel32_readl(dc, CSR);
550 channel32_writel(dc, CSR, csr);
555 txx9dmac_complete_all(dc);
561 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
564 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
565 if (desc_read_CHAR(dc, desc) == chain) {
573 if (desc_read_CHAR(dc, child) == chain) {
584 txx9dmac_descriptor_complete(dc, desc);
588 txx9dmac_handle_error(dc, csr);
592 dev_err(chan2dev(&dc->chan),
596 txx9dmac_reset_chan(dc);
598 if (!list_empty(&dc->queue)) {
599 txx9dmac_dequeue(dc, &dc->active_list);
600 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
608 struct txx9dmac_chan *dc;
610 dc = from_tasklet(dc, t, tasklet);
611 csr = channel_readl(dc, CSR);
612 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
614 spin_lock(&dc->lock);
617 txx9dmac_scan_descriptors(dc);
618 spin_unlock(&dc->lock);
619 irq = dc->irq;
626 struct txx9dmac_chan *dc = dev_id;
628 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
629 channel_readl(dc, CSR));
631 tasklet_schedule(&dc->tasklet);
645 struct txx9dmac_chan *dc;
655 dc = ddev->chan[i];
656 csr = channel_readl(dc, CSR);
657 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
659 spin_lock(&dc->lock);
662 txx9dmac_scan_descriptors(dc);
663 spin_unlock(&dc->lock);
693 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
696 spin_lock_bh(&dc->lock);
702 list_add_tail(&desc->desc_node, &dc->queue);
703 spin_unlock_bh(&dc->lock);
712 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
713 struct txx9dmac_dev *ddev = dc->ddev;
748 desc = txx9dmac_desc_get(dc);
750 txx9dmac_desc_put(dc, first);
759 dc->ccr | TXX9_DMA_CCR_XFACT);
765 dc->ccr | TXX9_DMA_CCR_XFACT);
770 * the dc->queue list or dc->active_list after a
778 desc_write_CHAR(dc, prev, desc->txd.phys);
779 dma_sync_single_for_device(chan2parent(&dc->chan),
791 desc_write_CHAR(dc, prev, 0);
792 dma_sync_single_for_device(chan2parent(&dc->chan),
807 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
808 struct txx9dmac_dev *ddev = dc->ddev;
832 desc = txx9dmac_desc_get(dc);
834 txx9dmac_desc_put(dc, first);
867 dc->ccr | TXX9_DMA_CCR_XFACT);
872 desc_write_CHAR(dc, prev, desc->txd.phys);
873 dma_sync_single_for_device(chan2parent(&dc->chan),
886 desc_write_CHAR(dc, prev, 0);
887 dma_sync_single_for_device(chan2parent(&dc->chan),
899 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
904 spin_lock_bh(&dc->lock);
906 txx9dmac_reset_chan(dc);
909 list_splice_init(&dc->queue, &list);
910 list_splice_init(&dc->active_list, &list);
912 spin_unlock_bh(&dc->lock);
916 txx9dmac_descriptor_complete(dc, desc);
925 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
932 spin_lock_bh(&dc->lock);
933 txx9dmac_scan_descriptors(dc);
934 spin_unlock_bh(&dc->lock);
939 static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
942 struct txx9dmac_dev *ddev = dc->ddev;
947 txx9dmac_dequeue(dc, &list);
949 desc_write_CHAR(dc, prev, desc->txd.phys);
950 dma_sync_single_for_device(chan2parent(&dc->chan),
953 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
954 channel_read_CHAR(dc) == prev->txd.phys)
956 channel_write_CHAR(dc, desc->txd.phys);
957 list_splice_tail(&list, &dc->active_list);
962 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
964 spin_lock_bh(&dc->lock);
966 if (!list_empty(&dc->active_list))
967 txx9dmac_scan_descriptors(dc);
968 if (!list_empty(&dc->queue)) {
969 if (list_empty(&dc->active_list)) {
970 txx9dmac_dequeue(dc, &dc->active_list);
971 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
973 struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
976 txx9dmac_chan_INTENT(dc))
977 txx9dmac_chain_dynamic(dc, prev);
981 spin_unlock_bh(&dc->lock);
986 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
994 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
1001 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1002 txx9dmac_chan_set_SMPCHN(dc);
1003 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
1004 dc->ccr |= TXX9_DMA_CCR_INTENC;
1008 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
1013 dc->ccr |= TXX9_DMA_CCR_EXTRQ |
1015 txx9dmac_chan_set_INTENT(dc);
1018 spin_lock_bh(&dc->lock);
1019 i = dc->descs_allocated;
1020 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
1021 spin_unlock_bh(&dc->lock);
1023 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
1027 spin_lock_bh(&dc->lock);
1030 txx9dmac_desc_put(dc, desc);
1032 spin_lock_bh(&dc->lock);
1033 i = ++dc->descs_allocated;
1035 spin_unlock_bh(&dc->lock);
1045 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1046 struct txx9dmac_dev *ddev = dc->ddev;
1051 dc->descs_allocated);
1054 BUG_ON(!list_empty(&dc->active_list));
1055 BUG_ON(!list_empty(&dc->queue));
1056 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
1058 spin_lock_bh(&dc->lock);
1059 list_splice_init(&dc->free_list, &list);
1060 dc->descs_allocated = 0;
1061 spin_unlock_bh(&dc->lock);
1086 struct txx9dmac_chan *dc;
1091 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
1092 if (!dc)
1095 dc->dma.dev = &pdev->dev;
1096 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1097 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1098 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1099 dc->dma.device_tx_status = txx9dmac_tx_status;
1100 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1102 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1103 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
1105 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1106 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
1107 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
1110 INIT_LIST_HEAD(&dc->dma.channels);
1111 dc->ddev = platform_get_drvdata(dmac_dev);
1112 if (dc->ddev->irq < 0) {
1116 tasklet_setup(&dc->tasklet, txx9dmac_chan_tasklet);
1117 dc->irq = irq;
1118 err = devm_request_irq(&pdev->dev, dc->irq,
1119 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
1123 dc->irq = -1;
1124 dc->ddev->chan[ch] = dc;
1125 dc->chan.device = &dc->dma;
1126 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1127 dma_cookie_init(&dc->chan);
1129 if (is_dmac64(dc))
1130 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
1132 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
1133 spin_lock_init(&dc->lock);
1135 INIT_LIST_HEAD(&dc->active_list);
1136 INIT_LIST_HEAD(&dc->queue);
1137 INIT_LIST_HEAD(&dc->free_list);
1139 txx9dmac_reset_chan(dc);
1141 platform_set_drvdata(pdev, dc);
1143 err = dma_async_device_register(&dc->dma);
1147 dc->dma.dev_id,
1148 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
1149 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
1156 struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
1159 dma_async_device_unregister(&dc->dma);
1160 if (dc->irq >= 0) {
1161 devm_free_irq(&pdev->dev, dc->irq, dc);
1162 tasklet_kill(&dc->tasklet);
1164 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;