Searched refs:cqe (Results 1 - 25 of 87) sorted by relevance

1234

/freebsd-11-stable/sys/dev/mlx5/mlx5_en/
H A Dmlx5_en_txrx.c33 struct mlx5_cqe64 *cqe; local
35 cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq));
37 if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK)
40 /* ensure cqe content is read after cqe ownership bit */
43 return (cqe);
H A Dmlx5_en_rx.c127 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe) argument
143 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
148 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
165 if (get_cqe_lro_tcppsh(cqe))
170 th->th_ack = cqe->lro_ack_seq_num;
171 th->th_win = cqe->lro_tcp_win;
184 if (get_cqe_lro_timestamp_valid(cqe) &&
189 * cqe->timestamp is 64bit long.
193 ts_ptr[1] = *(uint32_t *)&cqe->timestamp;
194 ts_ptr[2] = *((uint32_t *)&cqe
211 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct mbuf *mb, u32 cqe_bcnt) argument
380 struct mlx5_cqe64 *cqe; local
[all...]
/freebsd-11-stable/contrib/ofed/libmlx4/
H A Dcq.c104 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); local
105 struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe;
108 !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe;
116 static enum ibv_wc_status mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe) argument
118 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR)
122 htobe32(cqe->vlan_my_qpn), htobe32(cqe
158 handle_good_req(struct ibv_wc *wc, struct mlx4_cqe *cqe) argument
206 struct mlx4_cqe *cqe; local
235 mlx4_parse_cqe(struct mlx4_cq *cq, struct mlx4_cqe *cqe, struct mlx4_qp **cur_qp, struct ibv_wc *wc, int lazy) argument
373 mlx4_parse_lazy_cqe(struct mlx4_cq *cq, struct mlx4_cqe *cqe) argument
387 struct mlx4_cqe *cqe; local
441 struct mlx4_cqe *cqe; local
469 struct mlx4_cqe *cqe; local
716 struct mlx4_cqe *cqe, *dest; local
789 struct mlx4_cqe *cqe; local
[all...]
H A Dverbs.c415 ret = ibv_cmd_create_cq(context, cq_attr->cqe, cq_attr->channel,
460 if (cq_attr->cqe > 0x3fffff) {
479 /* mlx4 devices don't support slid and sl in cqe when completion
497 cq_attr->cqe = align_queue_size(cq_attr->cqe + 1);
499 if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cq_attr->cqe, mctx->cqe_size))
517 --cq_attr->cqe;
544 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe, argument
549 struct ibv_cq_init_attr_ex cq_attr = {.cqe = cqe,
574 mlx4_resize_cq(struct ibv_cq *ibcq, int cqe) argument
[all...]
/freebsd-11-stable/sys/dev/cxgb/ulp/iw_cxgb/
H A Diw_cxgb_ev.c92 __func__, CQE_STATUS(rsp_msg->cqe),
93 CQE_QPID(rsp_msg->cqe));
102 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
109 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
110 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
111 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
146 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
[all...]
H A Diw_cxgb_cq.c83 * 0 cqe returned
92 struct t3_cqe cqe, *rd_cqe; local
111 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
127 wc->vendor_err = CQE_STATUS(cqe);
130 CQE_QPID(cqe), CQE_TYPE(cqe),
131 CQE_OPCODE(cqe), CQE_STATUS(cqe));
133 CQE_WRID_HI(cqe), CQE_WRID_LOW(cqe), (unsigne
[all...]
H A Diw_cxgb_hal.c113 struct t3_cqe *cqe; local
145 * Now rptr is the index for the (last) cqe that was
149 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
150 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
385 struct t3_cqe cqe; local
389 memset(&cqe, 0, sizeof(cqe));
390 cqe.header = htobe32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
397 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
424 struct t3_cqe cqe; local
466 struct t3_cqe *cqe, *swcqe; local
482 cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) argument
503 struct t3_cqe *cqe; local
521 struct t3_cqe *cqe; local
1154 cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
[all...]
H A Diw_cxgb_wr.h619 struct t3_cqe cqe; member in struct:t3_swsq
670 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
671 CQE_GENBIT(*cqe))
696 struct t3_cqe *cqe; local
698 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
699 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
700 return cqe;
706 struct t3_cqe *cqe; local
709 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
710 return cqe;
717 struct t3_cqe *cqe; local
[all...]
/freebsd-11-stable/sys/dev/mthca/
H A Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
191 __be32 *cqe = cqe_ptr; local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[
269 is_recv_cqe(struct mthca_cqe *cqe) argument
281 struct mthca_cqe *cqe; local
316 cqe, MTHCA_CQ_ENTRY_SIZE); local
371 mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) argument
377 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) argument
490 struct mthca_cqe *cqe; local
[all...]
/freebsd-11-stable/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_cq.c81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); local
82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) argument
135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
140 u64 buf_addr, int cqe)
145 *umem = ib_umem_get(context, buf_addr, cqe * cqe_siz
138 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) argument
350 struct mlx4_cqe *cqe, *new_cqe; local
495 dump_cqe(void *cqe) argument
505 mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) argument
580 use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, unsigned tail, struct mlx4_cqe *cqe, int is_eth) argument
659 struct mlx4_cqe *cqe; local
922 struct mlx4_cqe *cqe, *dest; local
[all...]
/freebsd-11-stable/contrib/ofed/libcxgb4/
H A Dcq.c44 struct t4_cqe cqe; local
48 memset(&cqe, 0, sizeof(cqe));
49 cqe.header = htobe32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
54 cqe.bits_type_ts = htobe64(V_CQE_GENBIT((u64)cq->gen));
55 cq->sw_queue[cq->sw_pidx] = cqe;
77 struct t4_cqe cqe; local
81 memset(&cqe, 0, sizeof(cqe));
82 cqe
284 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
302 struct t4_cqe *cqe; local
345 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
563 struct t4_cqe cqe, *rd_cqe; local
[all...]
H A Dlibcxgb4.h186 struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
189 int c4iw_resize_cq(struct ibv_cq *cq, int cqe);
252 unsigned long cqe; member in struct:c4iw_stats
/freebsd-11-stable/sys/dev/cxgbe/iw_cxgbe/
H A Dcq.c204 struct t4_cqe cqe; local
208 memset(&cqe, 0, sizeof(cqe));
209 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
214 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
215 cq->sw_queue[cq->sw_pidx] = cqe;
237 struct t4_cqe cqe; local
241 memset(&cqe, 0, sizeof(cqe));
242 cqe
438 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
456 struct t4_cqe *cqe; local
489 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
716 struct t4_cqe cqe = {0, 0}, *rd_cqe; local
1036 c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) argument
[all...]
/freebsd-11-stable/sys/dev/qlnx/qlnxe/
H A Decore_sp_api.h57 * ramrod on the cqe ring
60 * @param cqe
65 struct eth_slow_path_rx_cqe *cqe);
H A Dqlnx_os.c4060 struct eth_fast_path_rx_tpa_start_cqe *cqe)
4077 agg_index = cqe->tpa_agg_index;
4095 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
4096 cqe->pars_flags.flags, cqe->vlan_tag,
4097 cqe->rss_hash, cqe->len_on_first_bd, cqe
4057 qlnx_tpa_start(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_rx_queue *rxq, struct eth_fast_path_rx_tpa_start_cqe *cqe) argument
4388 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_rx_queue *rxq, struct eth_fast_path_rx_tpa_cont_cqe *cqe) argument
4512 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_rx_queue *rxq, struct eth_fast_path_rx_tpa_end_cqe *cqe) argument
4723 union eth_rx_cqe *cqe; local
[all...]
/freebsd-11-stable/contrib/ofed/libmlx5/
H A Dcq.c66 static inline uint8_t get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) argument
68 return (cqe->l4_hdr_type_etc >> 2) & 0x3;
83 void *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); local
86 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64;
89 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibv_cq.cqe + 1)))) {
90 return cqe;
106 static inline void handle_good_req(struct ibv_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_wq *wq, int idx) argument
108 switch (be32toh(cqe
143 handle_responder_lazy(struct mlx5_cq *cq, struct mlx5_cqe64 *cqe, struct mlx5_resource *cur_rsc, struct mlx5_srq *srq) argument
184 handle_responder(struct ibv_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_resource *cur_rsc, struct mlx5_srq *srq) argument
271 mlx5_handle_error_cqe(struct mlx5_err_cqe *cqe) argument
471 void *cqe; local
516 mlx5_parse_cqe(struct mlx5_cq *cq, struct mlx5_cqe64 *cqe64, void *cqe, struct mlx5_resource **cur_rsc, struct mlx5_srq **cur_srq, struct ibv_wc *wc, int cqe_ver, int lazy) argument
701 mlx5_parse_lazy_cqe(struct mlx5_cq *cq, struct mlx5_cqe64 *cqe64, void *cqe, int cqe_ver) argument
719 void *cqe; local
836 void *cqe; local
905 void *cqe; local
1368 void *cqe, *dest; local
1484 struct mlx5_cqe64 *cqe; local
[all...]
H A Dmlx5dv.h312 uint8_t mlx5dv_get_cqe_owner(struct mlx5_cqe64 *cqe) argument
314 return cqe->op_own & 0x1;
318 void mlx5dv_set_cqe_owner(struct mlx5_cqe64 *cqe, uint8_t val) argument
320 cqe->op_own = (val & 0x1) | (cqe->op_own & ~0x1);
325 uint8_t mlx5dv_get_cqe_se(struct mlx5_cqe64 *cqe) argument
327 return (cqe->op_own >> 1) & 0x1;
331 uint8_t mlx5dv_get_cqe_format(struct mlx5_cqe64 *cqe) argument
333 return (cqe->op_own >> 2) & 0x3;
337 uint8_t mlx5dv_get_cqe_opcode(struct mlx5_cqe64 *cqe) argument
[all...]
/freebsd-11-stable/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_cq.c79 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); local
82 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
85 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
86 return cqe;
115 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, argument
119 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
133 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
162 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, argument
265 dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) argument
277 mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, struct ib_wc *wc) argument
402 get_sig_err_item(struct mlx5_sig_err_cqe *cqe, struct ib_sig_err *item) argument
527 void *cqe; local
827 void *cqe; local
1039 void *cqe, *dest; local
[all...]
/freebsd-11-stable/sys/dev/oce/
H A Doce_queue.c1091 struct oce_nic_tx_cqe *cqe; local
1098 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1099 if (cqe->u0.dw[3] == 0)
1101 cqe->u0.dw[3] = 0;
1137 struct oce_nic_rx_cqe *cqe; local
1144 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1145 /* dequeue till you reach an invalid cqe */
1146 while (RQ_CQE_VALID(cqe)) {
1147 RQ_CQE_INVALIDATE(cqe);
1149 cqe
1185 struct nic_hwlro_singleton_cqe *cqe; local
1261 struct oce_nic_rx_cqe *cqe; local
[all...]
H A Doce_if.c163 static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
164 static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
165 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
169 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
1441 struct oce_nic_tx_cqe *cqe; local
1447 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1448 while (cqe->u0.dw[3]) {
1449 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1451 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1457 cqe
1632 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2) argument
1714 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) argument
1842 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) argument
1859 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) argument
2032 struct nic_hwlro_singleton_cqe *cqe; local
2105 struct oce_nic_rx_cqe *cqe; local
2702 oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe) argument
2733 struct oce_mq_cqe *cqe; local
[all...]
/freebsd-11-stable/sys/dev/mlx5/
H A Ddevice.h672 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe) argument
674 return (cqe->lro_tcppsh_abort_dupack >> 7) & 1;
677 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) argument
679 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
682 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) argument
684 return (cqe->l4_hdr_type_etc >> 4) & 0x7;
687 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe) argument
689 return be16_to_cpu(cqe->vlan_info) & 0xfff;
692 static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac) argument
694 memcpy(smac, &cqe
698 cqe_has_vlan(struct mlx5_cqe64 *cqe) argument
703 cqe_is_tunneled(struct mlx5_cqe64 *cqe) argument
1222 mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe) argument
[all...]
/freebsd-11-stable/sys/dev/mlx5/mlx5_fpga/
H A Dmlx5fpga_conn.c250 struct mlx5_cqe64 *cqe, u8 status)
255 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
259 buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
291 struct mlx5_cqe64 *cqe, u8 status)
299 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
333 struct mlx5_cqe64 *cqe)
337 opcode = cqe->op_own >> 4;
341 status = ((struct mlx5_err_cqe *)cqe)->syndrome;
344 mlx5_fpga_conn_sq_cqe(conn, cqe, status);
348 status = ((struct mlx5_err_cqe *)cqe)
249 mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) argument
290 mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) argument
332 mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe) argument
385 struct mlx5_cqe64 *cqe; local
433 struct mlx5_cqe64 *cqe; local
[all...]
/freebsd-11-stable/sys/dev/mlx4/mlx4_en/
H A Dmlx4_en_rx.c604 struct mlx4_cqe *cqe)
607 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
610 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
611 ((struct mlx4_err_cqe *)cqe)->syndrome);
614 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
732 /* For cpu arch with cache line of 64B the performance is better when cqe size==64B
733 * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc)
734 * was added in the beginning of each cqe (the real data is in the corresponding 32B).
736 * and we get the real cqe data*/
741 struct mlx4_cqe *cqe; local
603 invalid_cqe(struct mlx4_en_priv *priv, struct mlx4_cqe *cqe) argument
[all...]
/freebsd-11-stable/contrib/ofed/libibverbs/
H A Dcompat-1_0.c140 int cqe; member in struct:ibv_cq_1_0
175 struct ibv_cq * (*create_cq)(struct ibv_context *context, int cqe,
183 int (*resize_cq)(struct ibv_cq *cq, int cqe);
256 struct ibv_cq_1_0 *__ibv_create_cq_1_0(struct ibv_context_1_0 *context, int cqe,
260 int __ibv_resize_cq_1_0(struct ibv_cq_1_0 *cq, int cqe);
739 struct ibv_cq_1_0 *__ibv_create_cq_1_0(struct ibv_context_1_0 *context, int cqe, argument
751 real_cq = ibv_create_cq(context->real_context, cqe, cq_context,
760 cq->cqe = cqe;
769 int __ibv_resize_cq_1_0(struct ibv_cq_1_0 *cq, int cqe) argument
[all...]
/freebsd-11-stable/contrib/ofed/librdmacm/examples/
H A Dcmatose.c112 int cqe, ret; local
121 cqe = message_count ? message_count : 1;
122 node->cq[SEND_CQ_INDEX] = ibv_create_cq(node->cma_id->verbs, cqe, node, NULL, 0);
123 node->cq[RECV_CQ_INDEX] = ibv_create_cq(node->cma_id->verbs, cqe, node, NULL, 0);
131 init_qp_attr.cap.max_send_wr = cqe;
132 init_qp_attr.cap.max_recv_wr = cqe;

Completed in 358 milliseconds

1234