• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/mthca/

Lines Matching refs:ibqp

260 	event.element.qp  = &qp->ibqp;
261 if (qp->ibqp.event_handler)
262 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
424 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
427 struct mthca_dev *dev = to_mdev(ibqp->device);
428 struct mthca_qp *qp = to_mqp(ibqp);
543 static int __mthca_modify_qp(struct ib_qp *ibqp,
547 struct mthca_dev *dev = to_mdev(ibqp->device);
548 struct mthca_qp *qp = to_mqp(ibqp);
609 if (qp->ibqp.uobject)
611 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
651 if (ibqp->qp_type == IB_QPT_RC &&
653 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
693 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
719 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
743 if (ibqp->srq)
758 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
768 if (ibqp->srq)
770 to_msrq(ibqp->srq)->srqn);
818 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
819 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
820 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
821 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
822 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
842 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
845 struct mthca_dev *dev = to_mdev(ibqp->device);
846 struct mthca_qp *qp = to_mqp(ibqp);
863 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
904 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1425 send_cq = to_mcq(qp->ibqp.send_cq);
1426 recv_cq = to_mcq(qp->ibqp.recv_cq);
1453 if (!qp->ibqp.uobject) {
1455 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1466 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1493 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1514 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1518 if (!sqp->qp.ibqp.qp_num)
1529 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1536 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1600 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1603 struct mthca_dev *dev = to_mdev(ibqp->device);
1604 struct mthca_qp *qp = to_mqp(ibqp);
1631 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1803 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1806 struct mthca_dev *dev = to_mdev(ibqp->device);
1807 struct mthca_qp *qp = to_mqp(ibqp);
1832 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1914 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1917 struct mthca_dev *dev = to_mdev(ibqp->device);
1918 struct mthca_qp *qp = to_mqp(ibqp);
1972 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
2154 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2157 struct mthca_dev *dev = to_mdev(ibqp->device);
2158 struct mthca_qp *qp = to_mqp(ibqp);
2173 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2234 if (qp->ibqp.srq && !is_send) {