Lines Matching refs:qp

58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
64 dev->gsi_qp = qp;
76 struct qedr_qp *qp = dev->gsi_qp;
81 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
88 spin_lock_irqsave(&qp->q_lock, flags);
89 qedr_inc_sw_gsi_cons(&qp->sq);
90 spin_unlock_irqrestore(&qp->q_lock, flags);
101 struct qedr_qp *qp = dev->gsi_qp;
104 spin_lock_irqsave(&qp->q_lock, flags);
106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
112 *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
114 *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
117 qedr_inc_sw_gsi_cons(&qp->rq);
119 spin_unlock_irqrestore(&qp->q_lock, flags);
158 " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
165 " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
172 " create gsi qp: failed. max_send_wr is too large %d>%d\n",
263 struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
323 struct qedr_qp *qp)
331 rc = qedr_ll2_start(dev, attrs, qp);
333 DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
338 qp->ibqp.qp_num = 1;
339 qp->rq.max_wr = attrs->cap.max_recv_wr;
340 qp->sq.max_wr = attrs->cap.max_send_wr;
342 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
344 if (!qp->rqe_wr_id)
346 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
348 if (!qp->wqe_wr_id)
351 qedr_store_gsi_qp_cq(dev, qp, attrs);
358 DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
363 kfree(qp->rqe_wr_id);
367 DP_ERR(dev, "create gsi qp: failed destroy on create\n");
380 struct qedr_qp *qp,
449 udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
491 struct qedr_qp *qp,
504 rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
545 struct qedr_qp *qp = get_qedr_qp(ibqp);
546 struct qedr_dev *dev = qp->dev;
550 if (qp->state != QED_ROCE_QP_STATE_RTS) {
554 qp->state);
573 spin_lock_irqsave(&qp->q_lock, flags);
575 rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
577 spin_unlock_irqrestore(&qp->q_lock, flags);
584 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
585 qedr_inc_sw_prod(&qp->sq);
586 DP_DEBUG(qp->dev, QEDR_MSG_GSI,
595 spin_unlock_irqrestore(&qp->q_lock, flags);
615 struct qedr_qp *qp = get_qedr_qp(ibqp);
619 if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
620 (qp->state != QED_ROCE_QP_STATE_RTS)) {
624 qp->state);
628 spin_lock_irqsave(&qp->q_lock, flags);
651 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
652 sizeof(qp->rqe_wr_id[qp->rq.prod]));
653 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
654 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
656 qedr_inc_sw_prod(&qp->rq);
661 spin_unlock_irqrestore(&qp->q_lock, flags);
665 spin_unlock_irqrestore(&qp->q_lock, flags);
674 struct qedr_qp *qp = dev->gsi_qp;
681 while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
684 wc[i].qp = &qp->ibqp;
685 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
688 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
691 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
693 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
696 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
701 wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
705 qedr_inc_sw_cons(&qp->rq);
709 while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
712 wc[i].qp = &qp->ibqp;
713 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
717 qedr_inc_sw_cons(&qp->sq);
724 "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
725 num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
726 qp->sq.gsi_cons, qp->ibqp.qp_num);