• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/dma/

Lines Matching refs:sw_desc

467 	struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
475 grp_start = sw_desc->group_head;
480 cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
484 list_splice_init(&sw_desc->tx_list,
493 iop_paranoia(iop_desc_get_next_desc(sw_desc));
505 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
593 struct iop_adma_desc_slot *sw_desc, *grp_start;
600 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
601 if (sw_desc) {
602 grp_start = sw_desc->group_head;
605 sw_desc->async_tx.flags = flags;
609 return sw_desc ? &sw_desc->async_tx : NULL;
617 struct iop_adma_desc_slot *sw_desc, *grp_start;
629 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
630 if (sw_desc) {
631 grp_start = sw_desc->group_head;
636 sw_desc->unmap_src_cnt = 1;
637 sw_desc->unmap_len = len;
638 sw_desc->async_tx.flags = flags;
642 return sw_desc ? &sw_desc->async_tx : NULL;
650 struct iop_adma_desc_slot *sw_desc, *grp_start;
662 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
663 if (sw_desc) {
664 grp_start = sw_desc->group_head;
669 sw_desc->unmap_src_cnt = 1;
670 sw_desc->unmap_len = len;
671 sw_desc->async_tx.flags = flags;
675 return sw_desc ? &sw_desc->async_tx : NULL;
684 struct iop_adma_desc_slot *sw_desc, *grp_start;
697 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
698 if (sw_desc) {
699 grp_start = sw_desc->group_head;
703 sw_desc->unmap_src_cnt = src_cnt;
704 sw_desc->unmap_len = len;
705 sw_desc->async_tx.flags = flags;
712 return sw_desc ? &sw_desc->async_tx : NULL;
721 struct iop_adma_desc_slot *sw_desc, *grp_start;
732 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
733 if (sw_desc) {
734 grp_start = sw_desc->group_head;
740 sw_desc->unmap_src_cnt = src_cnt;
741 sw_desc->unmap_len = len;
742 sw_desc->async_tx.flags = flags;
749 return sw_desc ? &sw_desc->async_tx : NULL;
758 struct iop_adma_desc_slot *sw_desc, *g;
779 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
780 if (sw_desc) {
783 g = sw_desc->group_head;
794 sw_desc->unmap_src_cnt = src_cnt;
795 sw_desc->unmap_len = len;
796 sw_desc->async_tx.flags = flags;
815 return sw_desc ? &sw_desc->async_tx : NULL;
825 struct iop_adma_desc_slot *sw_desc, *g;
837 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
838 if (sw_desc) {
844 g = sw_desc->group_head;
850 sw_desc->unmap_src_cnt = src_cnt+2;
851 sw_desc->unmap_len = len;
852 sw_desc->async_tx.flags = flags;
861 return sw_desc ? &sw_desc->async_tx : NULL;
1626 struct iop_adma_desc_slot *sw_desc, *grp_start;
1634 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1635 if (sw_desc) {
1636 grp_start = sw_desc->group_head;
1638 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1639 async_tx_ack(&sw_desc->async_tx);
1654 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1666 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1671 BUG_ON(iop_desc_get_next_desc(sw_desc));
1683 struct iop_adma_desc_slot *sw_desc, *grp_start;
1691 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1692 if (sw_desc) {
1693 grp_start = sw_desc->group_head;
1694 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1695 async_tx_ack(&sw_desc->async_tx);
1711 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1723 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1728 BUG_ON(iop_desc_get_next_desc(sw_desc));