• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/dma/

Lines Matching refs:mv_chan

246  * Caller must hold &mv_chan->lock while calling this function
248 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
251 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
261 * Caller must hold &mv_chan->lock while calling this function
263 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
266 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
268 if (sw_desc->type != mv_chan->current_type)
269 mv_set_mode(mv_chan, sw_desc->type);
276 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
277 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
278 mv_chan_set_value(mv_chan, sw_desc->value);
281 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
283 mv_chan->pending += sw_desc->slot_cnt;
284 mv_xor_issue_pending(&mv_chan->common);
289 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
309 &mv_chan->device->pdev->dev;
349 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
353 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
354 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
359 mv_xor_free_slots(mv_chan, iter);
367 struct mv_xor_chan *mv_chan)
369 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
377 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
381 mv_xor_free_slots(mv_chan, desc);
385 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
389 int busy = mv_chan_is_busy(mv_chan);
390 u32 current_desc = mv_chan_get_current_desc(mv_chan);
393 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
394 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
395 mv_xor_clean_completed_slots(mv_chan);
401 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
422 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
424 if (mv_xor_clean_slot(iter, mv_chan))
428 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
430 chain_head = list_entry(mv_chan->chain.next,
434 mv_xor_start_new_chain(mv_chan, chain_head);
438 mv_chan->completed_cookie = cookie;
442 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
444 spin_lock_bh(&mv_chan->lock);
445 __mv_xor_slot_cleanup(mv_chan);
446 spin_unlock_bh(&mv_chan->lock);
456 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
470 iter = mv_chan->last_used;
472 iter = list_entry(&mv_chan->all_slots,
477 iter, _iter, &mv_chan->all_slots, slot_node) {
522 mv_chan->last_used = last_used;
532 tasklet_schedule(&mv_chan->irq_tasklet);
538 mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
541 dma_cookie_t cookie = mv_chan->common.cookie;
545 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
554 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
559 dev_dbg(mv_chan->device->common.dev,
565 spin_lock_bh(&mv_chan->lock);
566 cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
568 if (list_empty(&mv_chan->chain))
569 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
573 old_chain_tail = list_entry(mv_chan->chain.prev,
582 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
589 if (!mv_chan_is_busy(mv_chan)) {
590 u32 current_desc = mv_chan_get_current_desc(mv_chan);
601 mv_xor_start_new_chain(mv_chan, grp_start);
604 spin_unlock_bh(&mv_chan->lock);
614 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
617 mv_chan->device->pdev->dev.platform_data;
621 idx = mv_chan->slots_allocated;
629 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
637 hw_desc = (char *) mv_chan->device->dma_desc_pool;
642 spin_lock_bh(&mv_chan->lock);
643 mv_chan->slots_allocated = idx;
644 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
645 spin_unlock_bh(&mv_chan->lock);
648 if (mv_chan->slots_allocated && !mv_chan->last_used)
649 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
653 dev_dbg(mv_chan->device->common.dev,
655 mv_chan->slots_allocated, mv_chan->last_used);
657 return mv_chan->slots_allocated ? : -ENOMEM;
664 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
668 dev_dbg(mv_chan->device->common.dev,
676 spin_lock_bh(&mv_chan->lock);
678 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
690 spin_unlock_bh(&mv_chan->lock);
692 dev_dbg(mv_chan->device->common.dev,
703 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
707 dev_dbg(mv_chan->device->common.dev,
715 spin_lock_bh(&mv_chan->lock);
717 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
729 spin_unlock_bh(&mv_chan->lock);
730 dev_dbg(mv_chan->device->common.dev,
740 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
749 dev_dbg(mv_chan->device->common.dev,
753 spin_lock_bh(&mv_chan->lock);
755 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
769 spin_unlock_bh(&mv_chan->lock);
770 dev_dbg(mv_chan->device->common.dev,
778 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
782 mv_xor_slot_cleanup(mv_chan);
784 spin_lock_bh(&mv_chan->lock);
785 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
790 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
796 iter, _iter, &mv_chan->all_slots, slot_node) {
799 mv_chan->slots_allocated--;
801 mv_chan->last_used = NULL;
803 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
804 __func__, mv_chan->slots_allocated);
805 spin_unlock_bh(&mv_chan->lock);
808 dev_err(mv_chan->device->common.dev,
822 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
828 last_complete = mv_chan->completed_cookie;
829 mv_chan->is_complete_cookie = cookie;
834 mv_xor_clean_completed_slots(mv_chan);
837 mv_xor_slot_cleanup(mv_chan);
840 last_complete = mv_chan->completed_cookie;
911 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
913 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
914 mv_chan->pending = 0;
915 mv_chan_activate(mv_chan);
933 struct mv_xor_chan *mv_chan;
979 mv_chan = to_mv_xor_chan(dma_chan);
980 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1012 struct mv_xor_chan *mv_chan;
1077 mv_chan = to_mv_xor_chan(dma_chan);
1078 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1106 struct mv_xor_chan *mv_chan;
1116 mv_chan = to_mv_xor_chan(chan);
1128 struct mv_xor_chan *mv_chan;
1178 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1179 if (!mv_chan) {
1183 mv_chan->device = adev;
1184 mv_chan->idx = plat_data->hw_id;
1185 mv_chan->mmr_base = adev->shared->xor_base;
1187 if (!mv_chan->mmr_base) {
1191 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1192 mv_chan);
1195 mv_xor_device_clear_err_status(mv_chan);
1204 0, dev_name(&pdev->dev), mv_chan);
1208 mv_chan_unmask_interrupts(mv_chan);
1210 mv_set_mode(mv_chan, DMA_MEMCPY);
1212 spin_lock_init(&mv_chan->lock);
1213 INIT_LIST_HEAD(&mv_chan->chain);
1214 INIT_LIST_HEAD(&mv_chan->completed_slots);
1215 INIT_LIST_HEAD(&mv_chan->all_slots);
1216 mv_chan->common.device = dma_dev;
1218 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);