Lines Matching refs:qp

58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
72 scq = qp->scq;
73 rcq = qp->rcq;
75 if (!qp->sq.flushed) {
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
82 if (!qp->srq) {
83 if (!qp->rq.flushed) {
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
100 spin_lock(&qp->rcq->flush_lock);
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
129 if (!qp->srq) {
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
143 qp->sq.hwq.prod = 0;
144 qp->sq.hwq.cons = 0;
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 qp->rq.hwq.prod = 0;
147 qp->rq.hwq.cons = 0;
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
175 struct bnxt_qplib_qp *qp)
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
180 if (qp->rq_hdr_buf)
182 rq->max_wqe * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 if (qp->sq_hdr_buf)
186 sq->max_wqe * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
197 struct bnxt_qplib_qp *qp)
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->max_wqe * qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
218 qp->rq_hdr_buf_size,
219 &qp->rq_hdr_buf_map,
221 if (!qp->rq_hdr_buf) {
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
826 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
832 struct bnxt_qplib_q *sq = &qp->sq;
833 struct bnxt_qplib_q *rq = &qp->rq;
846 req.type = qp->type;
847 req.dpi = cpu_to_le32(qp->dpi->dpi);
848 req.qp_handle = cpu_to_le64(qp->qp_handle);
864 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
874 req.scq_cid = cpu_to_le32(qp->scq->id);
902 req.rcq_cid = cpu_to_le32(qp->rcq->id);
904 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
911 req.pd_id = cpu_to_le32(qp->pd->id);
918 qp->id = le32_to_cpu(resp.xid);
919 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
920 qp->cctx = res->cctx;
922 sq->dbinfo.xid = qp->id;
923 sq->dbinfo.db = qp->dpi->dbr;
924 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
927 rq->dbinfo.xid = qp->id;
928 rq->dbinfo.db = qp->dpi->dbr;
931 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
932 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
933 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
938 bnxt_qplib_free_qp_hdr_buf(res, qp);
950 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
957 sq = &qp->sq;
968 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
975 struct bnxt_qplib_q *sq = &qp->sq;
976 struct bnxt_qplib_q *rq = &qp->rq;
987 qp->dev_cap_flags = res->dattr->dev_cap_flags;
995 req.type = qp->type;
996 req.dpi = cpu_to_le32(qp->dpi->dpi);
997 req.qp_handle = cpu_to_le64(qp->qp_handle);
1000 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1005 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1007 qp->msn = 0;
1016 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1019 if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
1020 hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1021 qp->msn_tbl_sz = hwq_attr.aux_depth;
1022 qp->msn = 0;
1035 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1037 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1047 req.scq_cid = cpu_to_le32(qp->scq->id);
1050 if (!qp->srq) {
1073 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1082 req.srq_cid = cpu_to_le32(qp->srq->id);
1084 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1088 if (qp->sig_type)
1090 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1099 xrrq = &qp->orrq;
1101 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1121 xrrq = &qp->irrq;
1123 qp->max_dest_rd_atomic);
1137 req.pd_id = cpu_to_le32(qp->pd->id);
1145 qp->id = le32_to_cpu(resp.xid);
1146 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1147 INIT_LIST_HEAD(&qp->sq_flush);
1148 INIT_LIST_HEAD(&qp->rq_flush);
1149 qp->cctx = res->cctx;
1151 sq->dbinfo.xid = qp->id;
1152 sq->dbinfo.db = qp->dpi->dbr;
1153 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1156 rq->dbinfo.xid = qp->id;
1157 rq->dbinfo.db = qp->dpi->dbr;
1160 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1161 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1162 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1166 bnxt_qplib_free_hwq(res, &qp->irrq);
1168 bnxt_qplib_free_hwq(res, &qp->orrq);
1180 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1182 switch (qp->state) {
1187 if (!(qp->modify_flags &
1189 qp->modify_flags |=
1191 qp->path_mtu =
1194 qp->modify_flags &=
1197 if (qp->max_dest_rd_atomic < 1)
1198 qp->max_dest_rd_atomic = 1;
1199 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1201 if (!(qp->modify_flags &
1203 qp->modify_flags |=
1205 qp->ah.sgid_index = 0;
1213 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1215 switch (qp->state) {
1218 if (qp->max_rd_atomic < 1)
1219 qp->max_rd_atomic = 1;
1226 qp->modify_flags &=
1245 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1247 switch (qp->cur_qp_state) {
1251 __modify_flags_from_init_state(qp);
1254 __modify_flags_from_rtr_state(qp);
1269 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1284 __filter_modify_flags(qp);
1285 bmask = qp->modify_flags;
1286 req.modify_mask = cpu_to_le32(qp->modify_flags);
1287 req.qp_cid = cpu_to_le32(qp->id);
1290 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1291 (qp->en_sqd_async_notify ?
1294 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1297 req.access = qp->access;
1303 req.qkey = cpu_to_le32(qp->qkey);
1306 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1313 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1317 [qp->ah.sgid_index]);
1320 req.hop_limit = qp->ah.hop_limit;
1323 req.traffic_class = qp->ah.traffic_class;
1326 memcpy(req.dest_mac, qp->ah.dmac, 6);
1329 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1332 req.timeout = qp->timeout;
1335 req.retry_cnt = qp->retry_cnt;
1338 req.rnr_retry = qp->rnr_retry;
1341 req.min_rnr_timer = qp->min_rnr_timer;
1344 req.rq_psn = cpu_to_le32(qp->rq.psn);
1347 req.sq_psn = cpu_to_le32(qp->sq.psn);
1351 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1355 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1357 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1358 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1359 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1360 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1361 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1363 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1365 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1371 qp->cur_qp_state = qp->state;
1375 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1397 req.qp_cid = cpu_to_le32(qp->id);
1405 qp->state = sb->en_sqd_async_notify_state &
1407 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1409 qp->access = sb->access;
1410 qp->pkey_index = le16_to_cpu(sb->pkey);
1411 qp->qkey = le32_to_cpu(sb->qkey);
1417 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1419 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1421 qp->ah.sgid_index = 0;
1424 qp->ah.sgid_index = i;
1431 qp->ah.hop_limit = sb->hop_limit;
1432 qp->ah.traffic_class = sb->traffic_class;
1433 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1434 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1437 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1440 qp->timeout = sb->timeout;
1441 qp->retry_cnt = sb->retry_cnt;
1442 qp->rnr_retry = sb->rnr_retry;
1443 qp->min_rnr_timer = sb->min_rnr_timer;
1444 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1445 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1446 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1447 qp->max_dest_rd_atomic =
1449 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1450 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1451 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1452 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1453 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1454 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1455 memcpy(qp->smac, sb->src_mac, 6);
1456 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1463 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1487 if (qp == le64_to_cpu(cqe->qp_handle))
1497 if (qp == le64_to_cpu(cqe->qp_handle))
1510 struct bnxt_qplib_qp *qp)
1519 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1527 req.qp_cid = cpu_to_le32(qp->id);
1532 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1533 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1541 struct bnxt_qplib_qp *qp)
1543 bnxt_qplib_free_qp_hdr_buf(res, qp);
1544 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1545 kfree(qp->sq.swq);
1547 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1548 kfree(qp->rq.swq);
1550 if (qp->irrq.max_elements)
1551 bnxt_qplib_free_hwq(res, &qp->irrq);
1552 if (qp->orrq.max_elements)
1553 bnxt_qplib_free_hwq(res, &qp->orrq);
1557 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1560 struct bnxt_qplib_q *sq = &qp->sq;
1565 if (qp->sq_hdr_buf) {
1567 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1568 sw_prod * qp->sq_hdr_buf_size);
1570 sge->size = qp->sq_hdr_buf_size;
1571 return qp->sq_hdr_buf + sw_prod * sge->size;
1576 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1578 struct bnxt_qplib_q *rq = &qp->rq;
1583 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1585 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1588 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1591 struct bnxt_qplib_q *rq = &qp->rq;
1596 if (qp->rq_hdr_buf) {
1598 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1599 sw_prod * qp->rq_hdr_buf_size);
1601 sge->size = qp->rq_hdr_buf_size;
1602 return qp->rq_hdr_buf + sw_prod * sge->size;
1608 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1624 qp->msn++;
1625 qp->msn %= qp->msn_tbl_sz;
1628 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1640 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1641 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1655 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1665 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1677 hwq = &qp->sq.hwq;
1683 if (t_len > qp->max_inline_data)
1728 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1740 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1745 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1753 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1766 /* For HW retx use qp msn index */
1767 tail = qp->msn;
1768 tail %= qp->msn_tbl_sz;
1777 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1779 struct bnxt_qplib_q *sq = &qp->sq;
1784 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1789 struct bnxt_qplib_q *sq = &qp->sq;
1803 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1804 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1807 qp->id, qp->state);
1812 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1822 bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
1831 if (qp->sig_type)
1834 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1848 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1859 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1887 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1888 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1899 if (qp->mtu)
1900 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1921 if (qp->mtu)
1922 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1940 if (qp->mtu)
1941 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2012 if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
2014 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2019 qp->wqe_cnt++;
2024 nq_work->cq = qp->scq;
2025 nq_work->nq = qp->scq->nq;
2027 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2037 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2039 struct bnxt_qplib_q *rq = &qp->rq;
2044 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2048 struct bnxt_qplib_q *rq = &qp->rq;
2059 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2062 qp->id, qp->state);
2069 "FP: QP (0x%x) RQ is full!\n", qp->id);
2078 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2111 nq_work->cq = qp->rcq;
2112 nq_work->nq = qp->rcq->nq;
2114 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2273 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2289 bnxt_qplib_cancel_phantom_processing(qp);
2295 cqe->qp_handle = (u64)(unsigned long)qp;
2297 cqe->src_qp = qp->id;
2314 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2322 switch (qp->type) {
2346 cqe->qp_handle = (unsigned long)qp;
2364 struct bnxt_qplib_qp *qp = qp_handle;
2366 if (!qp)
2370 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2371 bnxt_qplib_cancel_phantom_processing(qp);
2377 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2381 struct bnxt_qplib_q *sq = &qp->sq;
2399 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2400 cq_cons, qp->id, swq_last, cqe_sq_cons);
2466 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2467 cq_cons, qp->id, swq_last, cqe_sq_cons);
2481 struct bnxt_qplib_qp *qp;
2486 qp = (struct bnxt_qplib_qp *)((unsigned long)
2488 if (!qp) {
2490 "FP: Process Req qp is NULL\n");
2493 sq = &qp->sq;
2496 if (qp->sq.flushed) {
2498 "%s: QP in Flush QP = %p\n", __func__, qp);
2514 cqe->qp_handle = (u64)(unsigned long)qp;
2515 cqe->src_qp = qp->id;
2533 bnxt_qplib_mark_qp_error(qp);
2534 /* Add qp to flush list of the CQ */
2535 bnxt_qplib_add_flush_qp(qp);
2538 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2540 *lib_qp = qp;
2590 struct bnxt_qplib_qp *qp;
2594 qp = (struct bnxt_qplib_qp *)((unsigned long)
2596 if (!qp) {
2597 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2600 if (qp->rq.flushed) {
2602 "%s: QP in Flush QP = %p\n", __func__, qp);
2613 cqe->qp_handle = (u64)(unsigned long)qp;
2618 srq = qp->srq;
2635 rq = &qp->rq;
2654 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2655 /* Add qp to flush list of the CQ */
2656 bnxt_qplib_add_flush_qp(qp);
2670 struct bnxt_qplib_qp *qp;
2674 qp = (struct bnxt_qplib_qp *)((unsigned long)
2676 if (!qp) {
2677 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2680 if (qp->rq.flushed) {
2682 "%s: QP in Flush QP = %p\n", __func__, qp);
2692 cqe->qp_handle = (u64)(unsigned long)qp;
2703 srq = qp->srq;
2721 rq = &qp->rq;
2741 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2742 /* Add qp to flush list of the CQ */
2743 bnxt_qplib_add_flush_qp(qp);
2766 struct bnxt_qplib_qp *qp;
2772 qp = (struct bnxt_qplib_qp *)((unsigned long)
2774 if (!qp) {
2775 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2778 if (qp->rq.flushed) {
2780 "%s: QP in Flush QP = %p\n", __func__, qp);
2786 cqe->qp_handle = (u64)(unsigned long)qp;
2791 cqe->src_qp = qp->id;
2792 if (qp->id == 1 && !cqe->length) {
2798 cqe->pkey_index = qp->pkey_index;
2799 memcpy(cqe->smac, qp->smac, 6);
2806 srq = qp->srq;
2826 rq = &qp->rq;
2845 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2846 /* Add qp to flush list of the CQ */
2847 bnxt_qplib_add_flush_qp(qp);
2859 struct bnxt_qplib_qp *qp;
2871 qp = (struct bnxt_qplib_qp *)((unsigned long)
2873 if (!qp)
2877 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2879 sq = &qp->sq;
2880 rq = &qp->rq;
2887 if (qp->sq.flushed) {
2889 "%s: QP in Flush QP = %p\n", __func__, qp);
2906 cqe->qp_handle = (u64)(unsigned long)qp;
2907 cqe->src_qp = qp->id;
2938 if (qp->rq.flushed) {
2940 "%s: QP in Flush QP = %p\n", __func__, qp);
2950 /* Add qp to flush list of the CQ */
2951 bnxt_qplib_add_flush_qp(qp);
2976 struct bnxt_qplib_qp *qp = NULL;
2981 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2982 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2983 __flush_sq(&qp->sq, qp, &cqe, &budget);
2986 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2987 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2988 __flush_rq(&qp->rq, qp, &cqe, &budget);
3090 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3092 flush_workqueue(qp->scq->nq->cqn_wq);
3093 if (qp->scq != qp->rcq)
3094 flush_workqueue(qp->rcq->nq->cqn_wq);