Lines Matching refs:qp

44 #include "qp.h"
80 struct mlx5_core_qp *qp;
146 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
155 wqe_index = wqe_index & qp->sq.fbc.sz_m1;
158 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
174 wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
175 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
181 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
184 struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
186 struct mlx5_ib_wq *wq = &qp->sq;
232 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
235 struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
242 return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
245 return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
248 static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
251 struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
253 struct mlx5_ib_wq *wq = &qp->rq;
268 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
271 struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
273 struct mlx5_ib_wq *wq = &qp->rq;
282 return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
321 struct mlx5_ib_qp *qp = to_mqp(ibqp);
334 err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen,
359 struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
363 event.element.qp = ibqp;
391 qpe_work->type, qpe_work->qp->qpn);
402 mlx5_core_res_put(&qpe_work->qp->common);
406 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
408 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
413 to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
423 qpe_work->qp = qp;
430 mlx5_core_res_put(&qp->common);
434 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
444 qp->rq.max_gs = 0;
445 qp->rq.wqe_cnt = 0;
446 qp->rq.wqe_shift = 0;
450 int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
453 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
456 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
457 if ((1 << qp->rq.wqe_shift) /
461 qp->rq.max_gs =
462 (1 << qp->rq.wqe_shift) /
465 qp->rq.max_post = qp->rq.wqe_cnt;
474 qp->rq.wqe_cnt = wq_size / wqe_size;
482 qp->rq.wqe_shift = ilog2(wqe_size);
483 qp->rq.max_gs =
484 (1 << qp->rq.wqe_shift) /
487 qp->rq.max_post = qp->rq.wqe_cnt;
592 struct mlx5_ib_qp *qp)
611 qp->max_inline_data = wqe_size - sq_overhead(attr) -
613 attr->cap.max_inline_data = qp->max_inline_data;
616 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
617 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
620 qp->sq.wqe_cnt,
624 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
625 qp->sq.max_gs = get_send_sge(attr, wqe_size);
626 if (qp->sq.max_gs < attr->cap.max_send_sge)
629 attr->cap.max_send_sge = qp->sq.max_gs;
630 qp->sq.max_post = wq_size / wqe_size;
631 attr->cap.max_send_wr = qp->sq.max_post;
637 struct mlx5_ib_qp *qp,
642 int desc_sz = 1 << qp->sq.wqe_shift;
656 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
658 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
660 qp->sq.wqe_cnt,
666 qp->flags & IB_QP_CREATE_SOURCE_QPN) {
667 base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
668 qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
670 base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
671 (qp->sq.wqe_cnt << 6);
942 struct mlx5_ib_qp *qp, struct ib_udata *udata,
963 uar_flags = qp->flags_en &
978 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
993 qp->rq.offset = 0;
994 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
995 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
997 err = set_user_buf_size(dev, qp, ucmd, base, attr);
1045 qp->bfregn = bfregn;
1047 err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db);
1067 static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1075 mlx5_ib_db_unmap_user(context, &qp->db);
1082 if (qp->bfregn != MLX5_IB_INVALID_BFREG)
1083 mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
1088 kvfree(qp->sq.wqe_head);
1089 kvfree(qp->sq.w_list);
1090 kvfree(qp->sq.wrid);
1091 kvfree(qp->sq.wr_data);
1092 kvfree(qp->rq.wrid);
1093 if (qp->db.db)
1094 mlx5_db_free(dev->mdev, &qp->db);
1095 if (qp->buf.frags)
1096 mlx5_frag_buf_free(dev->mdev, &qp->buf);
1101 struct mlx5_ib_qp *qp, u32 **in, int *inlen,
1109 qp->bf.bfreg = &dev->fp_bfreg;
1110 else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
1111 qp->bf.bfreg = &dev->wc_bfreg;
1113 qp->bf.bfreg = &dev->bfreg;
1118 qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
1119 uar_index = qp->bf.bfreg->index;
1121 err = calc_sq_size(dev, init_attr, qp);
1127 qp->rq.offset = 0;
1128 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
1129 base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
1132 &qp->buf, dev->mdev->priv.numa_node);
1138 if (qp->rq.wqe_cnt)
1139 mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
1140 ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
1142 if (qp->sq.wqe_cnt) {
1143 int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) /
1145 mlx5_init_fbc_offset(qp->buf.frags +
1146 (qp->sq.offset / PAGE_SIZE),
1148 ilog2(qp->sq.wqe_cnt),
1149 sq_strides_offset, &qp->sq.fbc);
1151 qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
1155 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
1165 MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1171 if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
1174 mlx5_fill_page_frag_array(&qp->buf,
1178 err = mlx5_db_alloc(dev->mdev, &qp->db);
1184 qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
1185 sizeof(*qp->sq.wrid), GFP_KERNEL);
1186 qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
1187 sizeof(*qp->sq.wr_data), GFP_KERNEL);
1188 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
1189 sizeof(*qp->rq.wrid), GFP_KERNEL);
1190 qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
1191 sizeof(*qp->sq.w_list), GFP_KERNEL);
1192 qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
1193 sizeof(*qp->sq.wqe_head), GFP_KERNEL);
1195 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
1196 !qp->sq.w_list || !qp->sq.wqe_head) {
1204 kvfree(qp->sq.wqe_head);
1205 kvfree(qp->sq.w_list);
1206 kvfree(qp->sq.wrid);
1207 kvfree(qp->sq.wr_data);
1208 kvfree(qp->rq.wrid);
1209 mlx5_db_free(dev->mdev, &qp->db);
1215 mlx5_frag_buf_free(dev->mdev, &qp->buf);
1219 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1221 if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
1222 (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI))
1224 else if (!qp->has_rq)
1231 struct mlx5_ib_qp *qp,
1243 if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
1244 MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
1569 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1575 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1585 if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
1587 if (qp->sq.wqe_cnt) {
1588 err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
1604 sq->base.container_mibqp = qp;
1608 if (qp->rq.wqe_cnt) {
1609 rq->base.container_mibqp = qp;
1611 if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING)
1613 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
1620 err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
1648 qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
1655 if (!qp->sq.wqe_cnt)
1665 struct mlx5_ib_qp *qp)
1667 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1671 if (qp->rq.wqe_cnt) {
1672 destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
1676 if (qp->sq.wqe_cnt) {
1678 destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
1682 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
1688 sq->sq = &qp->sq;
1689 rq->rq = &qp->rq;
1690 sq->doorbell = &qp->db;
1691 rq->doorbell = &qp->db;
1694 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1696 if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1699 mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1700 to_mpd(qp->ibqp.pd)->uid);
1716 struct mlx5_ib_qp *qp,
1748 qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1750 if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
1753 if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
1878 qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
1883 mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1892 params->resp.tirn = qp->rss_qp.tirn;
1912 qp->trans_qp.base.mqp.qpn = 0;
1913 qp->is_rss = true;
1922 struct mlx5_ib_qp *qp,
1929 allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
1992 static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2008 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2020 if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
2022 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
2024 if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
2026 if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
2036 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2042 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
2046 qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
2049 base = &qp->trans_qp.base;
2055 base->container_mibqp = qp;
2061 list_add_tail(&qp->qps_list, &dev->qp_list);
2064 qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
2069 struct mlx5_ib_qp *qp,
2090 spin_lock_init(&qp->sq.lock);
2091 spin_lock_init(&qp->rq.lock);
2093 mlx5_st = to_mlx5_st(qp->type);
2098 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2100 base = &qp->trans_qp.base;
2102 qp->has_rq = qp_has_rq(init_attr);
2103 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
2109 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
2110 ucmd->rq_wqe_count != qp->rq.wqe_cnt)
2122 err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
2135 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
2138 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
2140 if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
2142 if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE)
2143 configure_requester_scat_cqe(dev, qp, init_attr, qpc);
2145 if (qp->rq.wqe_cnt) {
2146 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2147 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2150 if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) {
2158 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
2160 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2181 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2187 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
2191 qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
2200 base->container_mibqp = qp;
2205 get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
2212 list_add_tail(&qp->qps_list, &dev->qp_list);
2216 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2218 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2225 destroy_qp(dev, qp, base, udata);
2230 struct mlx5_ib_qp *qp,
2251 spin_lock_init(&qp->sq.lock);
2252 spin_lock_init(&qp->rq.lock);
2254 mlx5_st = to_mlx5_st(qp->type);
2259 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2261 if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
2262 qp->underlay_qpn = init_attr->source_qpn;
2265 qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
2266 &qp->raw_packet_qp.rq.base :
2267 &qp->trans_qp.base;
2269 qp->has_rq = qp_has_rq(init_attr);
2270 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
2276 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
2277 ucmd->rq_wqe_count != qp->rq.wqe_cnt)
2290 err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
2296 qp->port = init_attr->port_num;
2306 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
2309 if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
2312 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
2314 if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
2316 if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
2318 if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)
2320 if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
2329 if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
2330 (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
2331 configure_requester_scat_cqe(dev, qp, init_attr, qpc);
2333 if (qp->rq.wqe_cnt) {
2334 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2335 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2341 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
2343 if (qp->sq.wqe_cnt) {
2344 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2376 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2382 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
2387 qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
2391 qp->flags & IB_QP_CREATE_SOURCE_QPN) {
2392 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
2393 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
2394 err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
2403 base->container_mibqp = qp;
2408 get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
2415 list_add_tail(&qp->qps_list, &dev->qp_list);
2419 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2421 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2428 destroy_qp(dev, qp, base, udata);
2433 struct mlx5_ib_qp *qp,
2451 spin_lock_init(&qp->sq.lock);
2452 spin_lock_init(&qp->rq.lock);
2454 mlx5_st = to_mlx5_st(qp->type);
2459 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2461 base = &qp->trans_qp.base;
2463 qp->has_rq = qp_has_rq(attr);
2464 err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
2470 err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
2475 qp->port = attr->port_num;
2488 if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
2491 if (qp->rq.wqe_cnt) {
2492 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2493 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2496 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
2498 if (qp->sq.wqe_cnt)
2499 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2519 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2525 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
2526 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
2529 if (qp->flags & IB_QP_CREATE_INTEGRITY_EN &&
2538 base->container_mibqp = qp;
2541 get_cqs(qp->type, attr->send_cq, attr->recv_cq,
2548 list_add_tail(&qp->qps_list, &dev->qp_list);
2552 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2554 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2561 destroy_qp(dev, qp, base, NULL);
2654 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2658 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2666 if (qp->is_rss) {
2667 destroy_rss_raw_qp_tir(dev, qp);
2671 base = (qp->type == IB_QPT_RAW_PACKET ||
2672 qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
2673 &qp->raw_packet_qp.rq.base :
2674 &qp->trans_qp.base;
2676 if (qp->state != IB_QPS_RESET) {
2677 if (qp->type != IB_QPT_RAW_PACKET &&
2678 !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
2686 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
2693 get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
2699 list_del(&qp->qps_list);
2701 list_del(&qp->cq_send_list);
2704 list_del(&qp->cq_recv_list);
2708 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
2716 if (qp->type == IB_QPT_RAW_PACKET ||
2717 qp->flags & IB_QP_CREATE_SOURCE_QPN) {
2718 destroy_raw_packet_qp(dev, qp);
2726 destroy_qp(dev, qp, base, udata);
2730 struct mlx5_ib_qp *qp,
2741 qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
2742 if (!qp->dct.in)
2745 MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
2746 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
2755 if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
2762 qp->state = IB_QPS_RESET;
2845 bool cond, struct mlx5_ib_qp *qp)
2851 qp->flags_en |= flag;
2872 static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2886 qp->type = MLX5_IB_QPT_DCI;
2889 qp->type = MLX5_IB_QPT_DCT;
2892 if (qp->type != IB_QPT_DRIVER)
2901 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
2902 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
2905 qp);
2907 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
2909 MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
2911 MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
2913 if (qp->type == IB_QPT_RAW_PACKET) {
2918 cond, qp);
2921 qp);
2924 qp);
2927 if (qp->type == IB_QPT_RC)
2930 MLX5_CAP_GEN(mdev, qp_packet_based), qp);
2932 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp);
2933 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp);
2935 cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
2951 bool cond, struct mlx5_ib_qp *qp)
2957 qp->flags |= flag;
2973 static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2976 enum ib_qp_type qp_type = qp->type;
2990 qp);
2993 MLX5_CAP_GEN(mdev, sho), qp);
2996 MLX5_CAP_GEN(mdev, block_lb_mc), qp);
2998 MLX5_CAP_GEN(mdev, cd), qp);
3000 MLX5_CAP_GEN(mdev, cd), qp);
3002 MLX5_CAP_GEN(mdev, cd), qp);
3008 qp);
3011 cond, qp);
3018 IB_QP_CREATE_SCATTER_FCS, cond, qp);
3023 IB_QP_CREATE_CVLAN_STRIPPING, cond, qp);
3028 MLX5_CAP_GEN(mdev, end_pad), qp);
3031 qp_type != MLX5_IB_QPT_REG_UMR, qp);
3033 true, qp);
3087 struct mlx5_ib_qp *qp,
3093 err = create_rss_raw_qp_tir(dev, pd, qp, params);
3097 switch (qp->type) {
3099 err = create_dct(dev, pd, qp, params);
3102 err = create_dci(dev, pd, qp, params);
3105 err = create_xrc_tgt_qp(dev, qp, params);
3108 err = mlx5_ib_create_gsi(pd, qp, params->attr);
3111 rdma_restrack_no_track(&qp->ibqp.res);
3116 err = create_user_qp(dev, pd, qp, params);
3118 err = create_kernel_qp(dev, pd, qp, params);
3123 mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
3127 if (is_qp0(qp->type))
3128 qp->ibqp.qp_num = 0;
3129 else if (is_qp1(qp->type))
3130 qp->ibqp.qp_num = 1;
3132 qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
3136 qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
3146 static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3151 switch (qp->type) {
3168 mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type);
3173 static int get_qp_uidx(struct mlx5_ib_qp *qp,
3244 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3280 mutex_init(&qp->mutex);
3281 qp->type = type;
3283 err = process_vendor_flags(dev, qp, params.ucmd, attr);
3287 err = get_qp_uidx(qp, &params);
3291 err = process_create_flags(dev, qp, attr);
3295 err = check_qp_attr(dev, qp, attr);
3299 err = create_qp(dev, pd, qp, &params);
3319 switch (qp->type) {
3321 mlx5_ib_destroy_dct(qp);
3324 mlx5_ib_destroy_gsi(qp);
3327 destroy_qp_common(dev, qp, udata);
3335 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
3337 struct mlx5_ib_dev *dev = to_mdev(qp->device);
3338 struct mlx5_ib_qp *mqp = to_mqp(qp);
3350 static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
3354 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
3361 dest_rd_atomic = qp->trans_qp.resp_depth;
3366 access_flags = qp->trans_qp.atomic_rd_en;
3376 atomic_mode = get_atomic_mode(dev, qp->type);
3515 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3548 if ((qp->type == IB_QPT_RC ||
3549 qp->type == IB_QPT_UC ||
3550 qp->type == IB_QPT_XRC_INI ||
3551 qp->type == IB_QPT_XRC_TGT) &&
3555 qp->ibqp.qp_num,
3590 if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
3592 &qp->raw_packet_qp.sq,
3593 sl & 0xf, qp->ibqp.pd);
3897 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3901 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
3904 int modify_rq = !!qp->rq.wqe_cnt;
3905 int modify_sq = !!qp->sq.wqe_cnt;
3944 qp->ibqp.pd);
3955 qp->ibqp.pd);
3966 raw_qp_param, qp->ibqp.pd);
4001 static bool qp_supports_affinity(struct mlx5_ib_qp *qp)
4003 if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) ||
4004 (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) ||
4005 (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) ||
4006 (qp->type == MLX5_IB_QPT_DCI))
4011 static unsigned int get_tx_affinity(struct ib_qp *qp,
4018 struct mlx5_ib_dev *dev = to_mdev(qp->device);
4019 struct mlx5_ib_qp *mqp = to_mqp(qp);
4047 static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
4050 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
4055 if (!qp->rq.wqe_cnt)
4059 MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
4071 static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
4074 struct mlx5_ib_dev *dev = to_mdev(qp->device);
4076 struct mlx5_ib_qp *mqp = to_mqp(qp);
4148 struct mlx5_ib_qp *qp = to_mqp(ibqp);
4149 struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
4161 mlx5_st = to_mlx5_st(qp->type);
4169 pd = to_mpd(qp->ibqp.pd);
4197 if (is_sqp(qp->type)) {
4200 } else if ((qp->type == IB_QPT_UD &&
4201 !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
4202 qp->type == MLX5_IB_QPT_REG_UMR) {
4228 if (is_sqp(qp->type))
4229 MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
4235 err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
4237 qp->port,
4247 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
4256 get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
4284 err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
4298 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
4299 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
4303 qp->port) - 1;
4306 if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
4319 if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
4335 if (qp->type == IB_QPT_RAW_PACKET ||
4336 qp->flags & IB_QP_CREATE_SOURCE_QPN) {
4376 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
4392 qp->state = new_state;
4395 qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
4397 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
4399 qp->port = attr->port_num;
4401 qp->trans_qp.alt_port = attr->alt_port_num;
4408 !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) {
4414 qp->rq.head = 0;
4415 qp->rq.tail = 0;
4416 qp->sq.head = 0;
4417 qp->sq.tail = 0;
4418 qp->sq.cur_post = 0;
4419 if (qp->sq.wqe_cnt)
4420 qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
4421 qp->sq.last_poll = 0;
4422 qp->db.db[MLX5_RCV_DBR] = 0;
4423 qp->db.db[MLX5_SND_DBR] = 0;
4426 if ((new_state == IB_QPS_RTS) && qp->counter_pending) {
4429 qp->counter_pending = 0;
4494 struct mlx5_ib_qp *qp = to_mqp(ibqp);
4504 cur_state = qp->state;
4507 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
4580 err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
4583 err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out);
4586 resp.dctn = qp->dct.mdct.mqp.qpn;
4591 mlx5_core_destroy_dct(dev, &qp->dct.mdct);
4599 qp->state = new_state;
4604 struct mlx5_ib_qp *qp)
4609 if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR)
4613 if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
4658 struct mlx5_ib_qp *qp = to_mqp(ibqp);
4664 if (!mlx5_ib_modify_qp_allowed(dev, qp))
4693 if (qp->type == IB_QPT_GSI)
4696 qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type;
4701 mutex_lock(&qp->mutex);
4703 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
4706 if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
4717 cur_state, new_state, qp->type, attr_mask);
4758 mutex_unlock(&qp->mutex);
4861 struct mlx5_ib_qp *qp, u8 *qp_state)
4894 qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
4895 qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
4900 *qp_state = qp->state;
4906 struct mlx5_ib_qp *qp,
4909 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
4916 if (qp->sq.wqe_cnt) {
4922 if (qp->rq.wqe_cnt) {
4928 return sqrq_state_to_qp_state(sq_state, rq_state, qp,
4932 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
4944 err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen,
4951 qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state));
4978 if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC ||
4979 qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) {
5065 struct mlx5_ib_qp *qp = to_mqp(ibqp);
5072 if (qp->type == IB_QPT_GSI)
5080 if (unlikely(qp->type == MLX5_IB_QPT_DCT))
5081 return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
5084 mutex_lock(&qp->mutex);
5086 if (qp->type == IB_QPT_RAW_PACKET ||
5087 qp->flags & IB_QP_CREATE_SOURCE_QPN) {
5088 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
5091 qp->state = raw_packet_qp_state;
5094 err = query_qp_attr(dev, qp, qp_attr);
5099 qp_attr->qp_state = qp->state;
5101 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
5102 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
5105 qp_attr->cap.max_send_wr = qp->sq.max_post;
5106 qp_attr->cap.max_send_sge = qp->sq.max_gs;
5113 qp_init_attr->qp_type = qp->type;
5117 qp_attr->cap.max_inline_data = qp->max_inline_data;
5121 qp_init_attr->create_flags = qp->flags;
5123 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
5127 mutex_unlock(&qp->mutex);
5736 void mlx5_ib_drain_sq(struct ib_qp *qp)
5738 struct ib_cq *cq = qp->send_cq;
5750 struct mlx5_ib_dev *dev = to_mdev(qp->device);
5753 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
5762 ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr);
5771 void mlx5_ib_drain_rq(struct ib_qp *qp)
5773 struct ib_cq *cq = qp->recv_cq;
5779 struct mlx5_ib_dev *dev = to_mdev(qp->device);
5782 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
5792 ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr);
5802 * Bind a qp to a counter. If @counter is NULL then bind the qp to
5805 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
5807 struct mlx5_ib_dev *dev = to_mdev(qp->device);
5808 struct mlx5_ib_qp *mqp = to_mqp(qp);
5813 qp->counter = counter;
5823 err = __mlx5_ib_qp_set_counter(qp, counter);
5825 qp->counter = counter;
5831 qp->counter = counter;