Lines Matching defs:ieq

337 							cq->vsi->ieq;
876 ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n");
882 "PUDA: error ieq qp destroy done\n");
912 rsrc = vsi->ieq;
914 vsi->ieq = NULL;
996 * irdma_puda_create_rsrc - create resource (ilq or ieq)
1040 vsi->ieq = vmem->va;
1052 /* Initialize all ieq lists */
1252 * @ieq: ieq resource
1253 * @rxlist: ieq's received buffer list
1258 static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
1299 irdma_puda_ret_bufpool(ieq, buf);
1311 irdma_puda_ret_bufpool(ieq, buf);
1317 * @rxlist: resource list for receive ieq buffes
1360 * @ieq: ieq resource
1365 static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
1377 ieq->partials_handled++;
1386 txbuf = irdma_puda_get_bufpool(ieq);
1393 irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1398 if (ieq->check_crc) {
1399 status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
1402 ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n");
1409 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1412 irdma_puda_send_buf(ieq, txbuf);
1422 irdma_puda_ret_bufpool(ieq, txbuf);
1428 * irdma_ieq_process_buf - process buffer rcvd for ieq
1429 * @ieq: ieq resource
1433 static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
1455 ibdev_dbg(to_ibdev(ieq->dev),
1467 if (ieq->check_crc)
1468 ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
1472 ibdev_dbg(to_ibdev(ieq->dev),
1478 ieq->fpdu_processed++;
1485 txbuf = irdma_puda_get_bufpool(ieq);
1494 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1509 irdma_puda_send_buf(ieq, txbuf);
1513 irdma_puda_ret_bufpool(ieq, buf);
1522 return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1530 * @ieq: ieq resource
1533 struct irdma_puda_rsrc *ieq)
1545 ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n");
1555 status = irdma_ieq_process_buf(ieq, pfpdu, buf);
1560 irdma_puda_ret_bufpool(ieq, buf);
1562 ieq->crc_err++;
1565 irdma_ieq_mpa_crc_ae(ieq->dev, qp);
1588 * @ieq: ieq resource
1592 static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
1616 irdma_ieq_cleanup_qp(ieq, qp);
1617 ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n");
1629 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
1630 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
1632 ieq->pmode_count++;
1639 ieq->bad_seq_num++;
1661 irdma_ieq_process_fpdus(qp, ieq);
1663 irdma_ieq_process_fpdus(qp, ieq);
1670 irdma_puda_ret_bufpool(ieq, buf);
1682 struct irdma_puda_rsrc *ieq = vsi->ieq;
1684 u32 wqe_idx = ieq->compl_rxwqe_idx;
1688 ieq->stats_bad_qp_id++;
1689 irdma_puda_ret_bufpool(ieq, buf);
1691 irdma_ieq_handle_exception(ieq, qp, buf);
1694 * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq()
1697 if (!ieq->rxq_invalid_cnt)
1698 ieq->rx_wqe_idx = wqe_idx;
1699 ieq->rxq_invalid_cnt++;
1709 struct irdma_puda_rsrc *ieq = vsi->ieq;
1712 irdma_puda_ret_bufpool(ieq, buf);
1717 * @ieq: ieq resource
1720 void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
1727 irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
1737 irdma_puda_ret_bufpool(ieq, buf);