Lines Matching defs:chan

32 struct device *chan2dev(struct dw_edma_chan *chan)
34 return &chan->vc.chan.dev->device;
44 u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
46 struct dw_edma_chip *chip = chan->dw->chip;
78 struct dw_edma_chip *chip = desc->chan->dw->chip;
79 struct dw_edma_chan *chan = desc->chan;
87 chunk->chan = chan;
95 if (chan->dir == EDMA_DIR_WRITE) {
96 chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
97 chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
99 chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
100 chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
121 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
129 desc->chan = chan;
185 static int dw_edma_start_transfer(struct dw_edma_chan *chan)
187 struct dw_edma *dw = chan->dw;
192 vd = vchan_next_desc(&chan->vc);
218 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
220 if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
221 if (chan->dir == EDMA_DIR_READ)
226 if (chan->dir == EDMA_DIR_WRITE)
236 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
238 memcpy(&chan->config, config, sizeof(*config));
239 chan->configured = true;
246 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
249 if (!chan->configured)
251 else if (chan->status != EDMA_ST_BUSY)
253 else if (chan->request != EDMA_REQ_NONE)
256 chan->request = EDMA_REQ_PAUSE;
263 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
266 if (!chan->configured) {
268 } else if (chan->status != EDMA_ST_PAUSE) {
270 } else if (chan->request != EDMA_REQ_NONE) {
273 chan->status = EDMA_ST_BUSY;
274 dw_edma_start_transfer(chan);
282 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
285 if (!chan->configured) {
287 } else if (chan->status == EDMA_ST_PAUSE) {
288 chan->status = EDMA_ST_IDLE;
289 chan->configured = false;
290 } else if (chan->status == EDMA_ST_IDLE) {
291 chan->configured = false;
292 } else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) {
297 chan->status = EDMA_ST_IDLE;
298 chan->configured = false;
299 } else if (chan->request > EDMA_REQ_PAUSE) {
302 chan->request = EDMA_REQ_STOP;
310 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
313 if (!chan->configured)
316 spin_lock_irqsave(&chan->vc.lock, flags);
317 if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
318 chan->status == EDMA_ST_IDLE) {
319 chan->status = EDMA_ST_BUSY;
320 dw_edma_start_transfer(chan);
322 spin_unlock_irqrestore(&chan->vc.lock, flags);
329 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
340 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
346 spin_lock_irqsave(&chan->vc.lock, flags);
347 vd = vchan_find_desc(&chan->vc, cookie);
353 spin_unlock_irqrestore(&chan->vc.lock, flags);
364 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
375 if (!chan->configured)
404 if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
405 if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
406 (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
409 if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
410 (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
429 desc = dw_edma_alloc_desc(chan);
441 src_addr = chan->config.src_addr;
442 dst_addr = chan->config.dst_addr;
446 src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
448 dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
464 if (chunk->bursts_alloc == chan->ll_max) {
536 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
598 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
604 spin_lock_irqsave(&chan->vc.lock, flags);
605 vd = vchan_next_desc(&chan->vc);
607 switch (chan->request) {
617 chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
623 chan->request = EDMA_REQ_NONE;
624 chan->status = EDMA_ST_IDLE;
628 chan->request = EDMA_REQ_NONE;
629 chan->status = EDMA_ST_PAUSE;
636 spin_unlock_irqrestore(&chan->vc.lock, flags);
639 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
644 spin_lock_irqsave(&chan->vc.lock, flags);
645 vd = vchan_next_desc(&chan->vc);
650 spin_unlock_irqrestore(&chan->vc.lock, flags);
651 chan->request = EDMA_REQ_NONE;
652 chan->status = EDMA_ST_IDLE;
685 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
687 if (chan->status != EDMA_ST_IDLE)
714 struct dw_edma_chan *chan;
726 chan = &dw->chan[i];
728 chan->dw = dw;
731 chan->id = i;
732 chan->dir = EDMA_DIR_WRITE;
734 chan->id = i - dw->wr_ch_cnt;
735 chan->dir = EDMA_DIR_READ;
738 chan->configured = false;
739 chan->request = EDMA_REQ_NONE;
740 chan->status = EDMA_ST_IDLE;
742 if (chan->dir == EDMA_DIR_WRITE)
743 chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
745 chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
746 chan->ll_max -= 1;
749 chan->dir == EDMA_DIR_WRITE ? "write" : "read",
750 chan->id, chan->ll_max);
754 else if (chan->dir == EDMA_DIR_WRITE)
755 pos = chan->id % wr_alloc;
757 pos = wr_alloc + chan->id % rd_alloc;
761 if (chan->dir == EDMA_DIR_WRITE)
762 irq->wr_mask |= BIT(chan->id);
764 irq->rd_mask |= BIT(chan->id);
767 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
770 chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
771 chan->msi.address_hi, chan->msi.address_lo,
772 chan->msi.data);
774 chan->vc.desc_free = vchan_free_desc;
775 chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
776 &dw->chip->dt_region_wr[chan->id] :
777 &dw->chip->dt_region_rd[chan->id];
779 vchan_init(&chan->vc, dma);
781 dw_edma_core_ch_config(chan);
948 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
949 sizeof(*dw->chan), GFP_KERNEL);
950 if (!dw->chan)
986 struct dw_edma_chan *chan, *_chan;
1004 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1005 vc.chan.device_node) {
1006 tasklet_kill(&chan->vc.task);
1007 list_del(&chan->vc.chan.device_node);