Lines Matching defs:pq

20  * extended descriptor of an xor or pq descriptor
56 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
60 pq->coef[idx] = coef;
66 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
74 pq->coef[idx] = coef;
280 struct ioat_pq_descriptor *pq = desc->pq;
282 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
283 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
287 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
290 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
291 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
292 pq->ctl_f.int_en, pq->ctl_f.compl_write,
293 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
294 pq->ctl_f.src_cnt);
297 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
298 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
299 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
300 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
307 struct ioat_pq_descriptor *pq = desc->pq;
308 struct ioat_raw_descriptor *descs[] = { (void *)pq,
309 (void *)pq,
310 (void *)pq };
311 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
320 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
323 (unsigned long long) pq->next,
324 desc->txd.flags, pq->size, pq->ctl,
325 pq->ctl_f.op, pq->ctl_f.int_en,
326 pq->ctl_f.compl_write,
327 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
328 pq->ctl_f.src_cnt);
332 pq->coef[i]);
334 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
335 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
350 struct ioat_pq_descriptor *pq;
393 pq = desc->pq;
402 descs[0] = (struct ioat_raw_descriptor *) pq;
416 pq->size = xfer_size;
417 pq->p_addr = dst[0] + offset;
418 pq->q_addr = dst[1] + offset;
419 pq->ctl = 0;
420 pq->ctl_f.op = op;
423 pq->ctl_f.wb_en = result ? 1 : 0;
424 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
425 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
426 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
432 /* last pq descriptor carries the unmap parameters and fence bit */
437 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
441 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
442 pq->ctl_f.compl_write = 1;
472 struct ioat_pq_descriptor *pq;
485 * 16 source pq is only available on cb3.3 and has no completion
501 pq = desc->pq;
503 descs[0] = (struct ioat_raw_descriptor *) pq;
512 pq->sed_addr = desc->sed->dma;
530 pq->size = xfer_size;
531 pq->p_addr = dst[0] + offset;
532 pq->q_addr = dst[1] + offset;
533 pq->ctl = 0;
534 pq->ctl_f.op = op;
535 pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
538 pq->ctl_f.wb_en = result ? 1 : 0;
539 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
540 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
546 /* last pq descriptor carries the unmap parameters and fence bit */
551 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
554 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
555 pq->ctl_f.compl_write = 1;
619 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
630 pq[0] = pq[1];
632 pq[1] = pq[0];
640 __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
642 __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
651 dma_addr_t pq[2];
661 pq[0] = dst;
663 pq[1] = dst; /* specify valid address for disabled result */
666 __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
668 __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
678 dma_addr_t pq[2];
693 pq[0] = src[0];
695 pq[1] = pq[0]; /* specify valid address for disabled result */
698 __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
700 __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,