Searched refs:cqe (Results 1 - 25 of 233) sorted by relevance

12345678910

/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_cq.c50 struct siw_cqe *cqe; local
55 cqe = &cq->queue[cq->cq_get % cq->num_cqe];
56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
58 wc->wr_id = cqe->id;
59 wc->byte_len = cqe->bytes;
67 if (cqe->flags & SIW_WQE_REM_INVAL) {
68 wc->ex.invalidate_rkey = cqe->inval_stag;
71 wc->qp = cqe->base_qp;
72 wc->opcode = map_wc_opcode[cqe->opcode];
73 wc->status = map_cqe_status[cqe
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_cq.c12 int cqe, int comp_vector)
16 if (cqe <= 0) {
17 rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe);
21 if (cqe > rxe->attr.max_cqe) {
22 rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n",
23 cqe, rxe->attr.max_cqe);
29 if (cqe < count) {
30 rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
31 cqe, coun
11 rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector) argument
42 rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp) argument
72 rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct rxe_resize_cq_resp __user *uresp, struct ib_udata *udata) argument
88 rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) argument
[all...]
/linux-master/drivers/infiniband/hw/mlx4/
H A Dcq.c81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); local
82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) argument
135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
140 struct ib_umem **umem, u64 buf_addr, int cqe)
147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_siz
138 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) argument
356 struct mlx4_cqe *cqe, *new_cqe; local
500 dump_cqe(void *cqe) argument
510 mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) argument
580 use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, unsigned tail, struct mlx4_cqe *cqe, int is_eth) argument
660 struct mlx4_cqe *cqe; local
919 struct mlx4_cqe *cqe, *dest; local
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
191 __be32 *cqe = cqe_ptr; local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[
264 is_recv_cqe(struct mthca_cqe *cqe) argument
276 struct mthca_cqe *cqe; local
311 cqe, MTHCA_CQ_ENTRY_SIZE); local
366 mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) argument
372 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) argument
485 struct mthca_cqe *cqe; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlxsw/
H A Dpci_hw.h111 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
116 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
118 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
120 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
124 char *cqe, u32 val) \
129 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
132 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
135 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
153 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
164 MLXSW_ITEM32(pci, cqe, wqe_counte
281 mlxsw_pci_cqe2_mirror_cong_get(const char *cqe) argument
338 mlxsw_pci_cqe2_time_stamp_get(const char *cqe) argument
346 mlxsw_pci_cqe2_time_stamp_sec_get(const char *cqe) argument
353 mlxsw_pci_cqe2_time_stamp_nsec_get(const char *cqe) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dmacsec.h32 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) argument
34 return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
38 struct mlx5_cqe64 *cqe);
46 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } argument
49 struct mlx5_cqe64 *cqe)
47 mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, struct mlx5_cqe64 *cqe) argument
H A Dktls_txrx.h23 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
87 struct mlx5_cqe64 *cqe,
85 mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) argument
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_cq.c83 cq->ibcq.cqe, &head);
105 int entries = attr->cqe;
132 cq->ibcq.cqe = entries;
186 cmd->cqe = entries;
195 cq->ibcq.cqe = resp->cqe;
288 cq->ibcq.cqe, &head);
293 cq->ibcq.cqe);
294 struct pvrdma_cqe *cqe; local
298 (cq->ibcq.cqe
329 struct pvrdma_cqe *cqe; local
[all...]
/linux-master/drivers/net/ethernet/qlogic/qede/
H A Dqede_ptp.h23 union eth_rx_cqe *cqe,
27 if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) &
29 if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags)
22 qede_ptp_record_rx_ts(struct qede_dev *edev, union eth_rx_cqe *cqe, struct sk_buff *skb) argument
H A Dqede_fp.c650 struct eth_fast_path_rx_tpa_start_cqe *cqe)
652 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
660 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
661 cqe->header_len;
833 struct eth_fast_path_rx_tpa_start_cqe *cqe)
835 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
840 pad = cqe->placement_offset + rxq->rx_headroom;
843 le16_to_cpu(cqe->len_on_first_bd),
864 if ((le16_to_cpu(cqe->pars_flags.flags) >>
867 tpa_info->vlan_tag = le16_to_cpu(cqe
648 qede_set_gro_params(struct qede_dev *edev, struct sk_buff *skb, struct eth_fast_path_rx_tpa_start_cqe *cqe) argument
831 qede_tpa_start(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_start_cqe *cqe) argument
957 qede_tpa_cont(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_cont_cqe *cqe) argument
972 qede_tpa_end(struct qede_dev *edev, struct qede_fastpath *fp, struct eth_fast_path_rx_tpa_end_cqe *cqe) argument
1064 qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, u16 flag) argument
1079 qede_rx_xdp(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq, struct bpf_prog *prog, struct sw_rx_data *bd, struct eth_fast_path_rx_reg_cqe *cqe, u16 *data_offset, u16 *len) argument
1169 qede_rx_build_jumbo(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sk_buff *skb, struct eth_fast_path_rx_reg_cqe *cqe, u16 first_bd_len) argument
1223 qede_rx_process_tpa_cqe(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq, union eth_rx_cqe *cqe, enum eth_rx_cqe_type type) argument
1251 union eth_rx_cqe *cqe; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.h16 struct mlx5_cqe64 *cqe,
22 struct mlx5_cqe64 *cqe,
/linux-master/drivers/infiniband/hw/cxgb4/
H A Dcq.c186 struct t4_cqe cqe; local
190 memset(&cqe, 0, sizeof(cqe));
191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
199 cq->sw_queue[cq->sw_pidx] = cqe;
220 struct t4_cqe cqe; local
224 memset(&cqe, 0, sizeof(cqe));
422 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
445 struct t4_cqe *cqe; local
544 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit, struct t4_srq *srq) argument
757 struct t4_cqe cqe; local
[all...]
/linux-master/drivers/infiniband/sw/rdmavt/
H A Dcq.h16 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
H A Dcq.c54 if (head >= (unsigned)cq->ibcq.cqe) {
55 head = cq->ibcq.cqe;
167 unsigned int entries = attr->cqe;
237 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
249 cq->ibcq.cqe = entries;
338 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) argument
350 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
357 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
363 sz = sizeof(struct ib_wc) * (cqe
[all...]
/linux-master/tools/testing/selftests/net/
H A Dio_uring_zerocopy_tx.c97 struct io_uring_cqe *cqe; local
157 ret = io_uring_wait_cqe(&ring, &cqe);
159 error(1, ret, "wait cqe");
161 if (cqe->user_data != NONZC_TAG &&
162 cqe->user_data != ZC_TAG)
163 error(1, -EINVAL, "invalid cqe->user_data");
165 if (cqe->flags & IORING_CQE_F_NOTIF) {
166 if (cqe->flags & IORING_CQE_F_MORE)
175 if (cqe->flags & IORING_CQE_F_MORE) {
176 if (cqe
[all...]
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_cq.c11 __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci, local
14 be32_to_cpu(READ_ONCE(*cqe)));
16 return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
115 struct erdma_cqe *cqe; local
122 cqe = get_next_valid_cqe(cq);
123 if (!cqe)
131 qpn = be32_to_cpu(cqe->qpn);
132 wqe_idx = be32_to_cpu(cqe->qe_idx);
133 cqe_hdr = be32_to_cpu(cqe->hdr);
158 wc->byte_len = be32_to_cpu(cqe
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rx.c65 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
69 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
90 struct mlx5_cqe64 *cqe)
95 memcpy(title, cqe, sizeof(struct mlx5_cqe64));
138 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); local
140 cqe->op_own = op_own;
146 struct mlx5_cqe64 *cqe local
89 mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
194 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq, struct mlx5_cqwq *wq, struct mlx5_cqe64 *cqe, int budget_rem) argument
999 struct mlx5_cqe64 *cqe; local
1146 mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) argument
1162 mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) argument
1262 mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct tcphdr *skb_tcp_hd) argument
1274 mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, struct mlx5_cqe64 *cqe, bool match) argument
1298 mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, struct mlx5_cqe64 *cqe, bool match) argument
1319 mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) argument
1352 mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, struct sk_buff *skb) argument
1477 mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb, bool lro) argument
1548 mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_rq *rq, struct sk_buff *skb) argument
1607 mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) argument
1628 mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) argument
1661 mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, void *va, u16 headroom, u32 frame_sz, u32 len, struct mlx5e_xdp_buff *mxbuf) argument
1672 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) argument
1720 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) argument
1804 trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
1816 mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
1822 mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
1866 mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
1909 mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
1994 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) argument
2139 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) argument
2197 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) argument
2260 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) argument
2297 mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
2382 mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
2445 struct mlx5_cqe64 *cqe, *title_cqe = NULL; local
2494 struct mlx5_cqe64 *cqe; local
2557 mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) argument
2642 mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
2732 mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) argument
[all...]
H A Dwq.h202 struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix); local
205 cqe += wq->fbc.log_stride == 7;
207 return cqe;
233 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); local
234 u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
240 /* ensure cqe content is read after cqe ownership bit */
243 return cqe;
251 struct mlx5_cqe64 *cqe; local
253 cqe
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dcq.c81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); local
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
88 return cqe;
117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, argument
121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
137 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, argument
270 dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, struct ib_wc *wc, const char *level) argument
279 mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, struct ib_wc *wc) argument
362 get_sig_err_item(struct mlx5_sig_err_cqe *cqe, struct ib_sig_err *item) argument
459 void *cqe; local
869 void *cqe; local
1074 void *cqe, *dest; local
[all...]
/linux-master/drivers/infiniband/ulp/iser/
H A Discsi_iser.h231 * @cqe: completion handler
244 struct ib_cqe cqe; member in struct:iser_tx_desc
262 * @cqe: completion handler
271 struct ib_cqe cqe; member in struct:iser_rx_desc
283 * @cqe: completion handler
291 struct ib_cqe cqe; member in struct:iser_login_desc
556 iser_rx(struct ib_cqe *cqe) argument
558 return container_of(cqe, struct iser_rx_desc, cqe);
562 iser_tx(struct ib_cqe *cqe) argument
568 iser_login(struct ib_cqe *cqe) argument
[all...]
/linux-master/io_uring/
H A Dfdinfo.c78 * we may get imprecise sqe and cqe info if uring is actively running
128 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; local
131 entry & cq_mask, cqe->user_data, cqe->res,
132 cqe->flags);
135 cqe->big_cqe[0], cqe->big_cqe[1]);
218 struct io_uring_cqe *cqe = &ocqe->cqe; local
221 cqe
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/rep/
H A Dtc.h39 void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
68 mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, argument
/linux-master/drivers/infiniband/hw/mana/
H A Dcq.c36 if (attr->cqe > mdev->adapter_caps.max_qp_wr) {
37 ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
41 cq->cqe = attr->cqe;
42 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
/linux-master/drivers/nvme/target/
H A Dfabrics-cmd.c81 req->cqe->result.u64 = cpu_to_le64(val);
149 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
165 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
182 req->cqe->sq_head = cpu_to_le16(0xffff);
230 req->cqe->result.u32 = 0;
244 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
279 req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
308 req->cqe->result.u32 = 0;
329 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
338 req->cqe
[all...]
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.c1484 struct cq_req *cqe = (struct cq_req *)hw_cqe; local
1486 if (qp == le64_to_cpu(cqe->qp_handle))
1487 cqe->qp_handle = 0;
1494 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; local
1496 if (qp == le64_to_cpu(cqe->qp_handle))
1497 cqe->qp_handle = 0;
2275 struct bnxt_qplib_cqe *cqe; local
2281 cqe = *pcqe;
2291 memset(cqe, 0, sizeof(*cqe));
2316 struct bnxt_qplib_cqe *cqe; local
2479 struct bnxt_qplib_cqe *cqe; local
2588 struct bnxt_qplib_cqe *cqe; local
2668 struct bnxt_qplib_cqe *cqe; local
2768 struct bnxt_qplib_cqe *cqe; local
2860 struct bnxt_qplib_cqe *cqe; local
2971 bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes) argument
2994 bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes, struct bnxt_qplib_qp **lib_qp) argument
[all...]

Completed in 223 milliseconds

12345678910