Lines Matching refs:desc

360 	struct dma_pl330_desc *desc;
420 /* Schedule desc completion */
540 /* The channel which currently holds this desc */
552 struct dma_pl330_desc *desc;
561 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
1005 struct dma_pl330_desc *desc;
1016 if (thrd->req[idx].desc != NULL) {
1020 if (thrd->req[idx].desc != NULL)
1034 desc = req->desc;
1036 ns = desc->rqcfg.nonsecure ? 1 : 0;
1099 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
1197 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
1199 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1200 off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype,
1201 pxs->desc->peri);
1202 off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype,
1203 pxs->desc->peri);
1218 switch (pxs->desc->rqtype) {
1267 switch (pxs->desc->rqtype) {
1375 struct pl330_xfer *x = &pxs->desc->px;
1396 struct pl330_xfer *x = &pxs->desc->px;
1475 struct dma_pl330_desc *desc)
1484 switch (desc->rqtype) {
1506 if (desc->rqtype != DMA_MEM_TO_MEM &&
1507 desc->peri >= pl330->pcfg.num_peri) {
1510 __func__, __LINE__, desc->peri);
1523 desc->rqcfg.nonsecure = 0;
1525 desc->rqcfg.nonsecure = 1;
1527 ccr = _prepare_ccr(&desc->rqcfg);
1529 idx = thrd->req[0].desc == NULL ? 0 : 1;
1532 xs.desc = desc;
1546 thrd->req[idx].desc = desc;
1557 static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
1562 if (!desc)
1565 pch = desc->pchan;
1567 /* If desc aborted */
1573 desc->status = DONE;
1620 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1621 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
1624 thrd->req[0].desc = NULL;
1625 thrd->req[1].desc = NULL;
1705 descdone = thrd->req[active].desc;
1706 thrd->req[active].desc = NULL;
1782 thrd->req[0].desc = NULL;
1783 thrd->req[1].desc = NULL;
1812 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1813 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1872 thrd->req[0].desc = NULL;
1878 thrd->req[1].desc = NULL;
2044 struct dma_pl330_desc *desc;
2047 list_for_each_entry(desc, &pch->work_list, node) {
2050 if (desc->status == BUSY || desc->status == PAUSED)
2053 ret = pl330_submit_req(pch->thread, desc);
2055 desc->status = BUSY;
2061 desc->status = DONE;
2063 __func__, __LINE__, desc->txd.cookie);
2072 struct dma_pl330_desc *desc, *_dt;
2079 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2080 if (desc->status == DONE) {
2082 dma_cookie_complete(&desc->txd);
2083 list_move_tail(&desc->node, &pch->completed_list);
2105 desc = list_first_entry(&pch->completed_list,
2108 dmaengine_desc_get_callback(&desc->txd, &cb);
2111 desc->status = PREP;
2112 list_move_tail(&desc->node, &pch->work_list);
2121 desc->status = FREE;
2122 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2125 dma_descriptor_unmap(&desc->txd);
2283 struct dma_pl330_desc *desc;
2293 pch->thread->req[0].desc = NULL;
2294 pch->thread->req[1].desc = NULL;
2301 /* Mark all desc done */
2302 list_for_each_entry(desc, &pch->submitted_list, node) {
2303 desc->status = FREE;
2304 dma_cookie_complete(&desc->txd);
2307 list_for_each_entry(desc, &pch->work_list , node) {
2308 desc->status = FREE;
2309 dma_cookie_complete(&desc->txd);
2335 struct dma_pl330_desc *desc;
2345 list_for_each_entry(desc, &pch->work_list, node) {
2346 if (desc->status == BUSY)
2347 desc->status = PAUSED;
2380 struct dma_pl330_desc *desc)
2389 if (desc->rqcfg.src_inc) {
2391 addr = desc->px.src_addr;
2394 addr = desc->px.dst_addr;
2412 struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
2428 running = pch->thread->req[pch->thread->req_running].desc;
2430 last_enq = pch->thread->req[pch->thread->lstenq].desc;
2433 list_for_each_entry(desc, &pch->work_list, node) {
2434 if (desc->status == DONE)
2435 transferred = desc->bytes_requested;
2436 else if (running && desc == running)
2438 pl330_get_current_xferred_count(pch, desc);
2439 else if (desc->status == BUSY || desc->status == PAUSED)
2444 if (desc == last_enq)
2447 transferred = desc->bytes_requested;
2450 residual += desc->bytes_requested - transferred;
2451 if (desc->txd.cookie == cookie) {
2452 switch (desc->status) {
2468 if (desc->last)
2509 struct dma_pl330_desc *desc, *last = to_desc(tx);
2518 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2520 desc->txd.callback = last->txd.callback;
2521 desc->txd.callback_param = last->txd.callback_param;
2523 desc->last = false;
2525 dma_cookie_assign(&desc->txd);
2527 list_move_tail(&desc->node, &pch->submitted_list);
2538 static inline void _init_desc(struct dma_pl330_desc *desc)
2540 desc->rqcfg.swap = SWAP_NO;
2541 desc->rqcfg.scctl = CCTRL0;
2542 desc->rqcfg.dcctl = CCTRL0;
2543 desc->txd.tx_submit = pl330_tx_submit;
2545 INIT_LIST_HEAD(&desc->node);
2552 struct dma_pl330_desc *desc;
2556 desc = kcalloc(count, sizeof(*desc), flg);
2557 if (!desc)
2563 _init_desc(&desc[i]);
2564 list_add_tail(&desc[i].node, pool);
2575 struct dma_pl330_desc *desc = NULL;
2581 desc = list_entry(pool->next,
2584 list_del_init(&desc->node);
2586 desc->status = PREP;
2587 desc->txd.callback = NULL;
2588 desc->txd.callback_result = NULL;
2593 return desc;
2600 struct dma_pl330_desc *desc;
2602 /* Pluck one desc from the pool of DMAC */
2603 desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
2606 if (!desc) {
2613 desc = pluck_desc(&pool, &lock);
2614 WARN_ON(!desc || !list_empty(&pool));
2618 desc->pchan = pch;
2619 desc->txd.cookie = 0;
2620 async_tx_ack(&desc->txd);
2622 desc->peri = peri_id ? pch->chan.chan_id : 0;
2623 desc->rqcfg.pcfg = &pch->dmac->pcfg;
2625 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2627 return desc;
2642 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2644 if (!desc) {
2645 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2660 fill_px(&desc->px, dst, src, len);
2662 return desc;
2666 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2668 struct dma_pl330_chan *pch = desc->pchan;
2674 burst_len >>= desc->rqcfg.brst_size;
2688 struct dma_pl330_desc *desc = NULL, *first = NULL;
2710 desc = pl330_get_desc(pch);
2711 if (!desc) {
2714 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2723 desc = list_entry(first->node.next,
2725 list_move_tail(&desc->node, &pl330->desc_pool);
2737 desc->rqcfg.src_inc = 1;
2738 desc->rqcfg.dst_inc = 0;
2743 desc->rqcfg.src_inc = 0;
2744 desc->rqcfg.dst_inc = 1;
2752 desc->rqtype = direction;
2753 desc->rqcfg.brst_size = pch->burst_sz;
2754 desc->rqcfg.brst_len = pch->burst_len;
2755 desc->bytes_requested = period_len;
2756 fill_px(&desc->px, dst, src, period_len);
2759 first = desc;
2761 list_add_tail(&desc->node, &first->node);
2766 if (!desc)
2771 return &desc->txd;
2778 struct dma_pl330_desc *desc;
2788 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2789 if (!desc)
2792 desc->rqcfg.src_inc = 1;
2793 desc->rqcfg.dst_inc = 1;
2794 desc->rqtype = DMA_MEM_TO_MEM;
2807 desc->rqcfg.brst_size = 0;
2808 while (burst != (1 << desc->rqcfg.brst_size))
2809 desc->rqcfg.brst_size++;
2811 desc->rqcfg.brst_len = get_burst_len(desc, len);
2817 desc->rqcfg.brst_len = 1;
2819 desc->bytes_requested = len;
2821 return &desc->txd;
2828 struct dma_pl330_desc *desc;
2836 desc = list_entry(first->node.next,
2838 list_move_tail(&desc->node, &pl330->desc_pool);
2851 struct dma_pl330_desc *first, *desc = NULL;
2868 desc = pl330_get_desc(pch);
2869 if (!desc) {
2873 "%s:%d Unable to fetch desc\n",
2881 first = desc;
2883 list_add_tail(&desc->node, &first->node);
2886 desc->rqcfg.src_inc = 1;
2887 desc->rqcfg.dst_inc = 0;
2888 fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg),
2891 desc->rqcfg.src_inc = 0;
2892 desc->rqcfg.dst_inc = 1;
2893 fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma,
2897 desc->rqcfg.brst_size = pch->burst_sz;
2898 desc->rqcfg.brst_len = pch->burst_len;
2899 desc->rqtype = direction;
2900 desc->bytes_requested = sg_dma_len(sg);
2903 /* Return the last desc in the chain */
2904 return &desc->txd;
3092 dev_warn(&adev->dev, "unable to allocate desc\n");