Lines Matching refs:ring

38 	 * assume each interface/ring to be in its own memory domain
511 /* ring base address */
512 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
514 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
559 /* ring base address */
560 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
562 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
585 /* enable ring interrupt */
599 dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
721 /* Clear interrupts for this ring */
805 /* Called with ring's lock taken */
807 int ring)
809 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
817 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
820 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
829 req = priv->ring[ring].req;
830 backlog = priv->ring[ring].backlog;
835 spin_lock_bh(&priv->ring[ring].queue_lock);
836 backlog = crypto_get_backlog(&priv->ring[ring].queue);
837 req = crypto_dequeue_request(&priv->ring[ring].queue);
838 spin_unlock_bh(&priv->ring[ring].queue_lock);
841 priv->ring[ring].req = NULL;
842 priv->ring[ring].backlog = NULL;
848 ret = ctx->send(req, ring, &commands, &results);
869 * the request and the backlog for the next dequeue call (per-ring).
871 priv->ring[ring].req = req;
872 priv->ring[ring].backlog = backlog;
878 spin_lock_bh(&priv->ring[ring].lock);
880 priv->ring[ring].requests += nreq;
882 if (!priv->ring[ring].busy) {
883 safexcel_try_push_requests(priv, ring);
884 priv->ring[ring].busy = true;
887 spin_unlock_bh(&priv->ring[ring].lock);
891 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
895 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
941 int ring,
945 int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
947 priv->ring[ring].rdr_req[i] = req;
951 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
953 int i = safexcel_ring_first_rdr_index(priv, ring);
955 return priv->ring[ring].rdr_req[i];
958 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
964 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
975 dma_addr_t ctxr_dma, int ring)
983 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
994 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
1001 safexcel_rdr_req_set(priv, ring, rdesc, async);
1006 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
1012 int ring)
1022 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1029 req = safexcel_rdr_req_get(priv, ring);
1032 ndesc = ctx->handle_result(priv, ring, req,
1054 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1063 spin_lock_bh(&priv->ring[ring].lock);
1065 priv->ring[ring].requests -= handled;
1066 safexcel_try_push_requests(priv, ring);
1068 if (!priv->ring[ring].requests)
1069 priv->ring[ring].busy = false;
1071 spin_unlock_bh(&priv->ring[ring].lock);
1079 safexcel_dequeue(data->priv, data->ring);
1084 int ring;
1091 int ring = irq_data->ring, rc = IRQ_NONE;
1094 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
1099 if (status & EIP197_RDR_IRQ(ring)) {
1100 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1115 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1119 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
1128 int ring = irq_data->ring;
1130 safexcel_handle_result_descriptor(priv, ring);
1132 queue_work(priv->ring[ring].workqueue,
1133 &priv->ring[ring].work_data.work);
1162 snprintf(irq_name, 6, "ring%d", irqid);
1330 /* Cannot currently support more rings than we have ring AICs! */
1563 /* Scan for ring AIC's */
1571 /* Low-end EIP196 may not have any ring AIC's ... */
1573 dev_err(priv->dev, "No ring interrupt controller present!\n");
1595 * Request MSI vectors for global + 1 per ring -
1610 /* Register the ring IRQ handlers and configure the rings */
1611 priv->ring = devm_kcalloc(dev, priv->config.rings,
1612 sizeof(*priv->ring),
1614 if (!priv->ring)
1623 &priv->ring[i].cdr,
1624 &priv->ring[i].rdr);
1630 priv->ring[i].rdr_req = devm_kcalloc(dev,
1632 sizeof(*priv->ring[i].rdr_req),
1634 if (!priv->ring[i].rdr_req) {
1646 ring_irq->ring = i;
1656 dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1661 priv->ring[i].irq = irq;
1662 priv->ring[i].work_data.priv = priv;
1663 priv->ring[i].work_data.ring = i;
1664 INIT_WORK(&priv->ring[i].work_data.work,
1668 priv->ring[i].workqueue =
1670 if (!priv->ring[i].workqueue) {
1675 priv->ring[i].requests = 0;
1676 priv->ring[i].busy = false;
1678 crypto_init_queue(&priv->ring[i].queue,
1681 spin_lock_init(&priv->ring[i].lock);
1682 spin_lock_init(&priv->ring[i].queue_lock);
1703 if (priv->ring[i].irq)
1704 irq_set_affinity_hint(priv->ring[i].irq, NULL);
1705 if (priv->ring[i].workqueue)
1706 destroy_workqueue(priv->ring[i].workqueue);
1812 irq_set_affinity_hint(priv->ring[i].irq, NULL);
1813 destroy_workqueue(priv->ring[i].workqueue);
1976 destroy_workqueue(priv->ring[i].workqueue);