• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/dma/

Lines Matching defs:iop_chan

48  * Caller must hold &iop_chan->lock while calling this function
63 iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
67 struct device *dev = &iop_chan->device->pdev->dev;
75 dest = iop_desc_get_dest_addr(unmap, iop_chan);
89 addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
99 iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
103 struct device *dev = &iop_chan->device->pdev->dev;
107 dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
108 dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
123 addr = iop_desc_get_src_addr(unmap, iop_chan, i);
138 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
158 iop_desc_unmap_pq(iop_chan, desc);
160 iop_desc_unmap(iop_chan, desc);
172 struct iop_adma_chan *iop_chan)
183 if (desc->chain_node.next == &iop_chan->chain)
186 dev_dbg(iop_chan->device->common.dev,
196 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
200 u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
201 int busy = iop_chan_is_busy(iop_chan);
204 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
208 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
265 &iop_chan->chain, chain_node) {
283 &iop_chan->chain, chain_node) {
285 grp_iter, iop_chan, cookie);
289 iop_chan);
313 iter, iop_chan, cookie);
315 if (iop_adma_clean_slot(iter, iop_chan))
320 iop_chan->completed_cookie = cookie;
326 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
328 spin_lock_bh(&iop_chan->lock);
329 __iop_adma_slot_cleanup(iop_chan);
330 spin_unlock_bh(&iop_chan->lock);
335 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
342 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
343 __iop_adma_slot_cleanup(iop_chan);
344 spin_unlock(&iop_chan->lock);
348 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
362 iter = iop_chan->last_used;
364 iter = list_entry(&iop_chan->all_slots,
369 iter, _iter, &iop_chan->all_slots, slot_node) {
399 dev_dbg(iop_chan->device->common.dev,
426 iop_chan->last_used = last_used;
436 __iop_adma_slot_cleanup(iop_chan);
442 iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
445 dma_cookie_t cookie = iop_chan->common.cookie;
449 iop_chan->common.cookie = desc->async_tx.cookie = cookie;
453 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
455 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
456 iop_chan->pending);
458 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
459 iop_chan->pending = 0;
460 iop_chan_append(iop_chan);
468 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
479 spin_lock_bh(&iop_chan->lock);
480 cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
482 old_chain_tail = list_entry(iop_chan->chain.prev,
500 iop_chan->pending += slot_cnt;
501 iop_adma_check_threshold(iop_chan);
502 spin_unlock_bh(&iop_chan->lock);
504 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
510 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
511 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
518 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
527 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
529 int init = iop_chan->slots_allocated ? 0 : 1;
531 iop_chan->device->pdev->dev.platform_data;
536 idx = iop_chan->slots_allocated;
546 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
554 hw_desc = (char *) iop_chan->device->dma_desc_pool;
559 spin_lock_bh(&iop_chan->lock);
560 iop_chan->slots_allocated++;
561 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
562 spin_unlock_bh(&iop_chan->lock);
563 } while (iop_chan->slots_allocated < num_descs_in_pool);
565 if (idx && !iop_chan->last_used)
566 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
570 dev_dbg(iop_chan->device->common.dev,
572 iop_chan->slots_allocated, iop_chan->last_used);
577 iop_chan->device->common.cap_mask))
578 iop_chan_start_null_memcpy(iop_chan);
580 iop_chan->device->common.cap_mask))
581 iop_chan_start_null_xor(iop_chan);
592 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
596 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
598 spin_lock_bh(&iop_chan->lock);
599 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
600 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
603 iop_desc_init_interrupt(grp_start, iop_chan);
607 spin_unlock_bh(&iop_chan->lock);
616 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
624 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
627 spin_lock_bh(&iop_chan->lock);
629 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
633 iop_desc_set_byte_count(grp_start, iop_chan, len);
634 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
640 spin_unlock_bh(&iop_chan->lock);
649 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
657 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
660 spin_lock_bh(&iop_chan->lock);
662 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
666 iop_desc_set_byte_count(grp_start, iop_chan, len);
668 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
673 spin_unlock_bh(&iop_chan->lock);
683 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
691 dev_dbg(iop_chan->device->common.dev,
695 spin_lock_bh(&iop_chan->lock);
697 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
701 iop_desc_set_byte_count(grp_start, iop_chan, len);
702 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
710 spin_unlock_bh(&iop_chan->lock);
720 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
727 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
730 spin_lock_bh(&iop_chan->lock);
732 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
747 spin_unlock_bh(&iop_chan->lock);
757 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
766 dev_dbg(iop_chan->device->common.dev,
777 spin_lock_bh(&iop_chan->lock);
779 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
784 iop_desc_set_byte_count(g, iop_chan, len);
813 spin_unlock_bh(&iop_chan->lock);
824 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
832 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
835 spin_lock_bh(&iop_chan->lock);
837 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
859 spin_unlock_bh(&iop_chan->lock);
866 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
870 iop_adma_slot_cleanup(iop_chan);
872 spin_lock_bh(&iop_chan->lock);
873 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
879 iter, _iter, &iop_chan->all_slots, slot_node) {
882 iop_chan->slots_allocated--;
884 iop_chan->last_used = NULL;
886 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
887 __func__, iop_chan->slots_allocated);
888 spin_unlock_bh(&iop_chan->lock);
906 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
912 last_complete = iop_chan->completed_cookie;
918 iop_adma_slot_cleanup(iop_chan);
921 last_complete = iop_chan->completed_cookie;
977 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
979 if (iop_chan->pending) {
980 iop_chan->pending = 0;
981 iop_chan_append(iop_chan);
999 struct iop_adma_chan *iop_chan;
1045 iop_chan = to_iop_adma_chan(dma_chan);
1046 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1080 struct iop_adma_chan *iop_chan;
1145 iop_chan = to_iop_adma_chan(dma_chan);
1146 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1157 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1432 struct iop_adma_chan *iop_chan;
1442 iop_chan = to_iop_adma_chan(chan);
1444 kfree(iop_chan);
1456 struct iop_adma_chan *iop_chan;
1529 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1530 if (!iop_chan) {
1534 iop_chan->device = adev;
1536 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1538 if (!iop_chan->mmr_base) {
1542 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1543 iop_chan);
1546 iop_adma_device_clear_err_status(iop_chan);
1558 handler[i], 0, pdev->name, iop_chan);
1564 spin_lock_init(&iop_chan->lock);
1565 INIT_LIST_HEAD(&iop_chan->chain);
1566 INIT_LIST_HEAD(&iop_chan->all_slots);
1567 iop_chan->common.device = dma_dev;
1568 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1614 kfree(iop_chan);
1624 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1630 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1632 spin_lock_bh(&iop_chan->lock);
1634 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1638 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1641 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1642 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1645 cookie = iop_chan->common.cookie;
1653 iop_chan->completed_cookie = cookie - 1;
1654 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1657 BUG_ON(iop_chan_is_busy(iop_chan));
1660 iop_adma_device_clear_err_status(iop_chan);
1663 iop_chan_disable(iop_chan);
1666 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1674 iop_chan_enable(iop_chan);
1676 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1678 spin_unlock_bh(&iop_chan->lock);
1681 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1687 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1689 spin_lock_bh(&iop_chan->lock);
1691 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1694 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1697 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1698 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1702 cookie = iop_chan->common.cookie;
1710 iop_chan->completed_cookie = cookie - 1;
1711 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1714 BUG_ON(iop_chan_is_busy(iop_chan));
1717 iop_adma_device_clear_err_status(iop_chan);
1720 iop_chan_disable(iop_chan);
1723 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1731 iop_chan_enable(iop_chan);
1733 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1735 spin_unlock_bh(&iop_chan->lock);