Lines Matching refs:qp

7 #include <linux/mlx5/qp.h>
54 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
88 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
91 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size,
260 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
278 if (unlikely(inl > qp->max_inline_data))
285 handle_post_send_edge(&qp->sq, wqe,
419 struct mlx5_ib_qp *qp, void **seg, int *size,
514 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
523 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
558 struct mlx5_ib_qp *qp, void **seg, int *size,
565 u32 pdn = to_mpd(qp->ibqp.pd)->pdn;
571 unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
591 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
597 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
599 ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
635 static int set_reg_wr(struct mlx5_ib_qp *qp,
641 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
654 to_mdev(qp->ibqp.device),
660 mlx5_ib_warn(to_mdev(qp->ibqp.device),
673 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
678 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
681 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
692 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
698 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
702 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
705 static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
713 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
716 idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
724 int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
729 if (unlikely(mlx5r_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
732 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
733 *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
737 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
743 *cur_edge = qp->sq.cur_edge;
748 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
753 return mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq,
758 void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
764 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
766 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
768 if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE))
771 qp->sq.wrid[idx] = wr_id;
772 qp->sq.w_list[idx].opcode = mlx5_opcode;
773 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
774 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
775 qp->sq.w_list[idx].next = qp->sq.cur_post;
781 qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
782 get_sq_edge(&qp->sq, qp->sq.cur_post &
783 (qp->sq.wqe_cnt - 1)) :
794 static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
798 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
800 set_linv_wr(qp, seg, size, cur_edge);
803 static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
807 qp->sq.wr_data[idx] = IB_WR_REG_MR;
809 return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
812 static int handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
824 err = mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq,
836 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
844 struct mlx5_ib_qp *qp,
858 qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY;
873 err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false);
877 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx,
880 err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
904 err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
909 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
913 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
919 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
925 qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
931 static int handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
954 handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
959 err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
966 err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
994 static void handle_qpt_hw_gsi(struct mlx5_ib_qp *qp,
1001 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
1004 static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
1010 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
1012 /* handle qp that supports ud offload */
1013 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
1020 set_eth_seg(wr, qp, seg, size, cur_edge);
1021 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
1025 void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
1028 struct mlx5_bf *bf = &qp->bf;
1030 qp->sq.head += nreq;
1037 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
1057 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1077 if (qp->type == IB_QPT_GSI)
1080 spin_lock_irqsave(&qp->sq.lock, flags);
1091 if (unlikely(num_sge > qp->sq.max_gs)) {
1098 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
1113 if (qp->next_fence)
1118 fence = qp->next_fence;
1122 switch (qp->type) {
1129 err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
1144 if (unlikely(!dev->port_caps[qp->port - 1].has_smi)) {
1152 handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
1155 handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
1163 err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
1171 handle_post_send_edge(&qp->sq, &seg, size,
1184 qp->next_fence = next_fence;
1185 mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id,
1189 dump_wqe(qp, idx, size);
1194 mlx5r_ring_db(qp, nreq, ctrl);
1196 spin_unlock_irqrestore(&qp->sq.lock, flags);
1209 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1226 if (qp->type == IB_QPT_GSI)
1229 spin_lock_irqsave(&qp->rq.lock, flags);
1231 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
1234 if (mlx5r_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1240 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1246 scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
1247 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
1253 if (i < qp->rq.max_gs) {
1259 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
1261 set_sig_seg(sig, qp->rq.max_gs);
1264 qp->rq.wrid[ind] = wr->wr_id;
1266 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
1271 qp->rq.head += nreq;
1278 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
1281 spin_unlock_irqrestore(&qp->rq.lock, flags);