• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/dma/

Lines Matching refs:sh_chan

49 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
98 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
100 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
108 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
110 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
122 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
124 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
140 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
142 sh_dmae_writel(sh_chan, hw->sar, SAR);
143 sh_dmae_writel(sh_chan, hw->dar, DAR);
144 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
147 static void dmae_start(struct sh_dmae_chan *sh_chan)
149 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
152 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
155 static void dmae_halt(struct sh_dmae_chan *sh_chan)
157 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
160 sh_dmae_writel(sh_chan, chcr, CHCR);
163 static void dmae_init(struct sh_dmae_chan *sh_chan)
169 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
171 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
172 sh_dmae_writel(sh_chan, chcr, CHCR);
175 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
178 if (dmae_is_busy(sh_chan))
181 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
182 sh_dmae_writel(sh_chan, val, CHCR);
187 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
189 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
192 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
196 if (dmae_is_busy(sh_chan))
208 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
212 spin_lock_bh(&sh_chan->desc_lock);
214 cookie = sh_chan->common.cookie;
219 sh_chan->common.cookie = cookie;
231 &chunk->node == &sh_chan->ld_free))
237 list_move_tail(&chunk->node, &sh_chan->ld_queue);
244 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
245 tx->cookie, &last->async_tx, sh_chan->id,
248 spin_unlock_bh(&sh_chan->desc_lock);
254 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
258 list_for_each_entry(desc, &sh_chan->ld_free, node)
269 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
271 struct dma_device *dma_dev = sh_chan->common.device;
289 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
294 pm_runtime_get_sync(sh_chan->dev);
303 cfg = sh_dmae_find_slave(sh_chan, param);
316 dmae_set_dmars(sh_chan, cfg->mid_rid);
317 dmae_set_chcr(sh_chan, cfg->chcr);
318 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
319 dmae_init(sh_chan);
322 spin_lock_bh(&sh_chan->desc_lock);
323 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
324 spin_unlock_bh(&sh_chan->desc_lock);
327 spin_lock_bh(&sh_chan->desc_lock);
331 &sh_chan->common);
335 spin_lock_bh(&sh_chan->desc_lock);
336 list_add(&desc->node, &sh_chan->ld_free);
337 sh_chan->descs_allocated++;
339 spin_unlock_bh(&sh_chan->desc_lock);
341 if (!sh_chan->descs_allocated) {
346 return sh_chan->descs_allocated;
353 pm_runtime_put(sh_chan->dev);
362 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
365 int descs = sh_chan->descs_allocated;
367 dmae_halt(sh_chan);
370 if (!list_empty(&sh_chan->ld_queue))
371 sh_dmae_chan_ld_cleanup(sh_chan, true);
379 spin_lock_bh(&sh_chan->desc_lock);
381 list_splice_init(&sh_chan->ld_free, &list);
382 sh_chan->descs_allocated = 0;
384 spin_unlock_bh(&sh_chan->desc_lock);
387 pm_runtime_put(sh_chan->dev);
395 * @sh_chan: DMA channel
408 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
419 new = sh_dmae_get_desc(sh_chan);
421 dev_err(sh_chan->dev, "No free link descriptor available\n");
440 dev_dbg(sh_chan->dev,
443 new->async_tx.cookie, sh_chan->xmit_shift);
468 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
486 spin_lock_bh(&sh_chan->desc_lock);
507 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
511 new = sh_dmae_add_desc(sh_chan, flags,
515 new = sh_dmae_add_desc(sh_chan, flags,
530 list_splice_tail(&tx_list, &sh_chan->ld_free);
532 spin_unlock_bh(&sh_chan->desc_lock);
539 list_splice(&tx_list, &sh_chan->ld_free);
541 spin_unlock_bh(&sh_chan->desc_lock);
550 struct sh_dmae_chan *sh_chan;
558 sh_chan = to_sh_chan(chan);
566 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
575 struct sh_dmae_chan *sh_chan;
581 sh_chan = to_sh_chan(chan);
586 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
597 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
604 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
613 dmae_halt(sh_chan);
615 spin_lock_bh(&sh_chan->desc_lock);
616 if (!list_empty(&sh_chan->ld_queue)) {
618 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
620 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
621 sh_chan->xmit_shift;
624 spin_unlock_bh(&sh_chan->desc_lock);
626 sh_dmae_chan_ld_cleanup(sh_chan, true);
631 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
640 spin_lock_bh(&sh_chan->desc_lock);
641 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
662 if (sh_chan->completed_cookie != desc->cookie - 1)
663 dev_dbg(sh_chan->dev,
666 sh_chan->completed_cookie + 1);
667 sh_chan->completed_cookie = desc->cookie;
675 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
676 tx->cookie, tx, sh_chan->id);
698 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
706 list_move(&desc->node, &sh_chan->ld_free);
709 spin_unlock_bh(&sh_chan->desc_lock);
722 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
724 while (__ld_cleanup(sh_chan, all))
729 sh_chan->completed_cookie = sh_chan->common.cookie;
732 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
736 spin_lock_bh(&sh_chan->desc_lock);
738 if (dmae_is_busy(sh_chan)) {
739 spin_unlock_bh(&sh_chan->desc_lock);
744 list_for_each_entry(desc, &sh_chan->ld_queue, node)
746 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
747 desc->async_tx.cookie, sh_chan->id,
750 dmae_set_reg(sh_chan, &desc->hw);
751 dmae_start(sh_chan);
755 spin_unlock_bh(&sh_chan->desc_lock);
760 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
761 sh_chan_xfer_ld_queue(sh_chan);
768 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
773 sh_dmae_chan_ld_cleanup(sh_chan, false);
776 last_complete = sh_chan->completed_cookie;
780 spin_lock_bh(&sh_chan->desc_lock);
791 list_for_each_entry(desc, &sh_chan->ld_queue, node)
798 spin_unlock_bh(&sh_chan->desc_lock);
806 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
807 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
811 dmae_halt(sh_chan);
814 tasklet_schedule(&sh_chan->tasklet);
831 struct sh_dmae_chan *sh_chan = shdev->chan[i];
832 if (sh_chan) {
835 dmae_halt(sh_chan);
837 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
843 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
854 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
856 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
857 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
859 spin_lock(&sh_chan->desc_lock);
860 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
865 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
872 spin_unlock(&sh_chan->desc_lock);
875 sh_chan_xfer_ld_queue(sh_chan);
876 sh_dmae_chan_ld_cleanup(sh_chan, false);
953 struct sh_dmae_chan *sh_chan = shdev->chan[i];
955 free_irq(sh_chan->irq, sh_chan);
957 list_del(&sh_chan->common.device_node);
958 kfree(sh_chan);