Lines Matching defs:mv_chan

176  * Caller must hold &mv_chan->lock while calling this function
178 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
181 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
185 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
187 mv_chan->pending++;
188 mv_xor_issue_pending(&mv_chan->dmachan);
193 struct mv_xor_chan *mv_chan,
215 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
219 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
220 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
224 list_move_tail(&iter->node, &mv_chan->free_slots);
227 &mv_chan->free_slots);
236 struct mv_xor_chan *mv_chan)
238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
246 list_move_tail(&desc->node, &mv_chan->completed_slots);
249 &mv_chan->completed_slots);
252 list_move_tail(&desc->node, &mv_chan->free_slots);
255 &mv_chan->free_slots);
263 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
267 int busy = mv_chan_is_busy(mv_chan);
268 u32 current_desc = mv_chan_get_current_desc(mv_chan);
272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
273 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
274 mv_chan_clean_completed_slots(mv_chan);
280 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
286 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
290 mv_desc_clean_slot(iter, mv_chan);
305 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
311 iter = list_entry(mv_chan->chain.next,
314 mv_chan_start_new_chain(mv_chan, iter);
316 if (!list_is_last(&iter->node, &mv_chan->chain)) {
324 mv_chan_start_new_chain(mv_chan, iter);
330 tasklet_schedule(&mv_chan->irq_tasklet);
336 mv_chan->dmachan.completed_cookie = cookie;
349 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
353 spin_lock_bh(&mv_chan->lock);
355 if (!list_empty(&mv_chan->free_slots)) {
356 iter = list_first_entry(&mv_chan->free_slots,
360 list_move_tail(&iter->node, &mv_chan->allocated_slots);
362 spin_unlock_bh(&mv_chan->lock);
372 spin_unlock_bh(&mv_chan->lock);
375 tasklet_schedule(&mv_chan->irq_tasklet);
385 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
390 dev_dbg(mv_chan_to_devp(mv_chan),
394 spin_lock_bh(&mv_chan->lock);
397 if (list_empty(&mv_chan->chain))
398 list_move_tail(&sw_desc->node, &mv_chan->chain);
402 old_chain_tail = list_entry(mv_chan->chain.prev,
405 list_move_tail(&sw_desc->node, &mv_chan->chain);
407 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
414 if (!mv_chan_is_busy(mv_chan)) {
415 u32 current_desc = mv_chan_get_current_desc(mv_chan);
426 mv_chan_start_new_chain(mv_chan, sw_desc);
428 spin_unlock_bh(&mv_chan->lock);
439 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
444 idx = mv_chan->slots_allocated;
448 dev_info(mv_chan_to_devp(mv_chan),
453 virt_desc = mv_chan->dma_desc_pool_virt;
460 dma_desc = mv_chan->dma_desc_pool;
464 spin_lock_bh(&mv_chan->lock);
465 mv_chan->slots_allocated = idx;
466 list_add_tail(&slot->node, &mv_chan->free_slots);
467 spin_unlock_bh(&mv_chan->lock);
470 dev_dbg(mv_chan_to_devp(mv_chan),
472 mv_chan->slots_allocated);
474 return mv_chan->slots_allocated ? : -ENOMEM;
483 static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
485 struct mv_xor_device *xordev = mv_chan->xordev;
486 void __iomem *base = mv_chan->mmr_high_base;
558 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
567 dev_dbg(mv_chan_to_devp(mv_chan),
572 ret = mv_xor_add_io_win(mv_chan, dest);
576 sw_desc = mv_chan_alloc_slot(mv_chan);
581 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
585 ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
592 dev_dbg(mv_chan_to_devp(mv_chan),
612 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
616 src = mv_chan->dummy_src_addr;
617 dest = mv_chan->dummy_dst_addr;
629 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
633 spin_lock_bh(&mv_chan->lock);
635 mv_chan_slot_cleanup(mv_chan);
637 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
640 list_move_tail(&iter->node, &mv_chan->free_slots);
642 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
645 list_move_tail(&iter->node, &mv_chan->free_slots);
647 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
650 list_move_tail(&iter->node, &mv_chan->free_slots);
653 iter, _iter, &mv_chan->free_slots, node) {
656 mv_chan->slots_allocated--;
659 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
660 __func__, mv_chan->slots_allocated);
661 spin_unlock_bh(&mv_chan->lock);
664 dev_err(mv_chan_to_devp(mv_chan),
678 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
685 spin_lock_bh(&mv_chan->lock);
686 mv_chan_slot_cleanup(mv_chan);
687 spin_unlock_bh(&mv_chan->lock);
749 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
751 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
752 mv_chan->pending = 0;
753 mv_chan_activate(mv_chan);
761 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
786 dma_chan = &mv_chan->dmachan;
872 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
919 dma_chan = &mv_chan->dmachan;
1009 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1012 struct device *dev = mv_chan->dmadev.dev;
1014 dma_async_device_unregister(&mv_chan->dmadev);
1017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1018 dma_unmap_single(dev, mv_chan->dummy_src_addr,
1020 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1023 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1028 free_irq(mv_chan->irq, mv_chan);
1039 struct mv_xor_chan *mv_chan;
1042 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1043 if (!mv_chan)
1046 mv_chan->idx = idx;
1047 mv_chan->irq = irq;
1049 mv_chan->op_in_desc = XOR_MODE_IN_REG;
1051 mv_chan->op_in_desc = XOR_MODE_IN_DESC;
1053 dma_dev = &mv_chan->dmadev;
1055 mv_chan->xordev = xordev;
1062 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1063 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1064 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1065 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1071 mv_chan->dma_desc_pool_virt =
1072 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1074 if (!mv_chan->dma_desc_pool_virt)
1098 mv_chan->mmr_base = xordev->xor_base;
1099 mv_chan->mmr_high_base = xordev->xor_high_base;
1100 tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet);
1103 mv_chan_clear_err_status(mv_chan);
1105 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1106 0, dev_name(&pdev->dev), mv_chan);
1110 mv_chan_unmask_interrupts(mv_chan);
1112 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1113 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
1115 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1117 spin_lock_init(&mv_chan->lock);
1118 INIT_LIST_HEAD(&mv_chan->chain);
1119 INIT_LIST_HEAD(&mv_chan->completed_slots);
1120 INIT_LIST_HEAD(&mv_chan->free_slots);
1121 INIT_LIST_HEAD(&mv_chan->allocated_slots);
1122 mv_chan->dmachan.device = dma_dev;
1123 dma_cookie_init(&mv_chan->dmachan);
1125 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1128 ret = mv_chan_memcpy_self_test(mv_chan);
1135 ret = mv_chan_xor_self_test(mv_chan);
1142 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1151 return mv_chan;
1154 free_irq(mv_chan->irq, mv_chan);
1157 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1237 struct mv_xor_chan *mv_chan = xordev->channels[i];
1239 if (!mv_chan)
1242 mv_chan->saved_config_reg =
1243 readl_relaxed(XOR_CONFIG(mv_chan));
1244 mv_chan->saved_int_mask_reg =
1245 readl_relaxed(XOR_INTR_MASK(mv_chan));
1258 struct mv_xor_chan *mv_chan = xordev->channels[i];
1260 if (!mv_chan)
1263 writel_relaxed(mv_chan->saved_config_reg,
1264 XOR_CONFIG(mv_chan));
1265 writel_relaxed(mv_chan->saved_int_mask_reg,
1266 XOR_INTR_MASK(mv_chan));