Lines Matching refs:qp

190 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
194 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
202 if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
208 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
238 dseg = hns_roce_get_extend_sge(qp,
239 idx & (qp->sge.sge_cnt - 1));
249 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
256 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
268 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
270 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
271 int mtu = ib_mtu_enum_to_int(qp->path_mtu);
273 if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
276 len, qp->max_inline_data, mtu);
283 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
287 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
300 if (!check_inl_data_len(qp, msg_len))
316 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
335 struct hns_roce_qp *qp = to_hr_qp(ibqp);
340 (*sge_ind) & (qp->sge.sge_cnt - 1));
345 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
363 set_extend_sge(qp, wr->sg_list + i, sge_ind,
462 static inline int set_ud_wqe(struct hns_roce_qp *qp,
487 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn);
490 curr_idx & (qp->sge.sge_cnt - 1));
493 qp->qkey : ud_wr(wr)->remote_qkey);
500 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
502 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
510 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
564 static inline int set_rc_wqe(struct hns_roce_qp *qp,
569 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
597 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
606 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
616 struct hns_roce_qp *qp)
618 if (unlikely(qp->state == IB_QPS_ERR)) {
619 flush_cqe(hr_dev, qp);
623 hr_reg_write(&sq_db, DB_TAG, qp->qpn);
625 hr_reg_write(&sq_db, DB_PI, qp->sq.head);
626 hr_reg_write(&sq_db, DB_SL, qp->sl);
628 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
633 struct hns_roce_qp *qp)
635 if (unlikely(qp->state == IB_QPS_ERR)) {
636 flush_cqe(hr_dev, qp);
638 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
639 *qp->rdb.db_record =
640 qp->rq.head & V2_DB_PRODUCER_IDX_M;
644 hr_reg_write(&rq_db, DB_TAG, qp->qpn);
646 hr_reg_write(&rq_db, DB_PI, qp->rq.head);
649 qp->rq.db_reg);
668 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
676 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
678 qp->sl >> HNS_ROCE_SL_SHIFT);
679 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
681 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
690 struct hns_roce_qp *qp = to_hr_qp(ibqp);
699 spin_lock_irqsave(&qp->sq.lock, flags);
701 ret = check_send_valid(hr_dev, qp);
708 sge_idx = qp->next_sge;
711 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
717 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
719 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
720 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
721 wr->num_sge, qp->sq.max_gs);
727 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
728 qp->sq.wrid[wqe_idx] = wr->wr_id;
730 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
734 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
736 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
746 qp->sq.head += nreq;
747 qp->next_sge = sge_idx;
750 (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
751 write_dwqe(hr_dev, qp, wqe);
753 update_sq_db(hr_dev, qp);
756 spin_unlock_irqrestore(&qp->sq.lock, flags);
2619 struct ib_qp *qp;
2626 qp = &hr_qp->ibqp;
2627 qp->device = ibdev;
2629 ret = hns_roce_create_qp(qp, init_attr, NULL);
2631 ibdev_err(ibdev, "failed to create qp for free mr.\n");
2647 struct ib_qp *qp;
2652 qp = &free_mr->rsv_qp[i]->ibqp;
2653 hns_roce_v2_destroy_qp(qp, NULL);
2749 ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
2772 ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
2786 ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
3433 "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
3642 wc->qp = &hr_qp->ibqp;
3676 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3730 flush_cqe(hr_dev, qp);
3890 struct hns_roce_qp *qp = *cur_qp;
3906 ret = get_cur_qp(hr_cq, cqe, &qp);
3910 wc->qp = &qp->ibqp;
3917 wq = &qp->sq;
3922 if (qp->sq_signal_bits)
3931 if (qp->ibqp.srq) {
3932 srq = to_hr_srq(qp->ibqp.srq);
3936 wq = &qp->rq;
3944 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
4253 * when modifying qp. If software need modify some fields in context,
4309 * when modifying qp. If software need modify some fields in context,
4342 /* Search qp buf's mtts */
4359 * when modifying qp. If software need modify some fields in context,
4417 /* search qp buf's mtts */
4437 * when modifying qp. If software need modify some fields in context,
4506 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4514 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4914 * If free_mr_en of qp is set, it means that this qp comes from
4915 * free mr. This qp will perform the loopback operation.
5260 * when modifying qp. If software need modify some fields in context,
5529 /* Modify qp to reset before destroying qp */