Searched refs:cqe (Results 176 - 200 of 233) sorted by relevance

12345678910

/linux-master/drivers/infiniband/core/
H A Duverbs_cmd.c1035 attr.cqe = cmd->cqe;
1066 resp.base.cqe = cq->cqe;
1093 cmd_ex.cqe = cmd.cqe;
1133 ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1137 resp.cqe = cq->cqe;
/linux-master/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_rx.c206 static const void *cqe_to_info(const void *cqe) argument
208 return cqe + FUNETH_CQE_INFO_OFFSET;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dktls_rx.c563 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
567 switch (get_cqe_tls_offload(cqe)) {
562 mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) argument
/linux-master/net/sunrpc/xprtrdma/
H A Dsvc_rdma_sendto.c298 struct ib_cqe *cqe = wc->wr_cqe; local
300 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
/linux-master/net/smc/
H A Dsmc_ib.c842 .cqe = SMC_MAX_CQE, .comp_vector = 0 };
854 cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
/linux-master/drivers/nvme/host/
H A Dpci.c1014 struct nvme_completion *cqe = &nvmeq->cqes[idx]; local
1015 __u16 command_id = READ_ONCE(cqe->command_id);
1026 cqe->status, &cqe->result);
1034 command_id, le16_to_cpu(cqe->sq_id));
1038 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1039 if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
1066 * the cqe requires a full read memory barrier
H A Dfc.c1934 struct nvme_completion *cqe = &op->rsp_iu.cqe; local
1964 * every field in the cqe - in cases where the FC transport must
1967 * cqe.sqid, cqe.sqhd, cqe.command_id
1995 * non-zero status and the content of the cqe isn't important.
2003 * extract the status and result from the cqe (create it
2032 * Validate ERSP IU and look at cqe.
2039 sqe->common.command_id != cqe
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den.h357 /* data path - accessed per cqe */
374 /* cqe decompression */
604 struct mlx5_cqe64 *cqe, u16 cqe_bcnt,
608 struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
H A Den_stats.h464 u64 cqe; member in struct:mlx5e_ptp_cq_stats
/linux-master/drivers/infiniband/ulp/isert/
H A Dib_isert.c2040 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2088 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2100 struct ib_cqe *cqe = NULL; local
2109 cqe = &isert_cmd->tx_desc.tx_cqe;
2129 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2039 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) argument
/linux-master/drivers/scsi/bnx2fc/
H A Dbnx2fc_hwi.c1068 struct fcoe_cqe *cqe; local
1087 cqe = &cq[cq_cons];
1089 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1101 cqe++;
1107 cqe = cq;
/linux-master/drivers/nvme/target/
H A Dtcp.c240 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
247 !cmd->req.cqe->status;
471 pdu->command_id = cmd->req.cqe->command_id;
1476 c->req.cqe = &c->rsp_pdu->cqe;
H A Dzns.c525 req->cqe->result.u64 =
/linux-master/drivers/scsi/lpfc/
H A Dlpfc_sli.c80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
92 struct lpfc_cqe *cqe);
592 struct lpfc_cqe *cqe = NULL; local
611 cqe = lpfc_sli4_cq_get(cq);
612 while (cqe) {
613 __lpfc_sli4_consume_cqe(phba, cq, cqe);
615 cqe = lpfc_sli4_cq_get(cq);
685 struct lpfc_cqe *cqe; local
690 cqe = lpfc_sli4_qe(q, q->host_index);
693 if (bf_get_le32(lpfc_cqe_valid, cqe) !
709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) argument
14425 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) argument
14731 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) argument
14854 struct lpfc_cqe *cqe; local
15278 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) argument
[all...]
/linux-master/drivers/vdpa/mlx5/net/
H A Dmlx5_vnet.c62 int cqe; member in struct:mlx5_vdpa_cq
333 void *cqe; local
337 cqe = get_cqe(vcq, i);
338 cqe64 = cqe;
345 struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1));
348 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe)))
603 vcq->cqe = num_ent;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_send.c1065 struct mlx5_cqe64 *cqe; local
1089 cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1090 cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
/linux-master/drivers/mmc/core/
H A Dcore.c265 bool cqe)
275 mmc_hostname(host), cqe ? "CQE direct " : "",
277 } else if (cqe) {
264 mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq, bool cqe) argument
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_dev.h499 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dmlx4_en.h809 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
/linux-master/include/linux/
H A Dhisi_acc_qm.h422 struct qm_cqe *cqe; member in struct:hisi_qp
/linux-master/net/rds/
H A Dib_cm.c543 cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
557 cq_attr.cqe = ic->i_recv_ring.w_nr;
/linux-master/drivers/infiniband/hw/mlx4/
H A Dqp.c4383 struct ib_cqe cqe; member in struct:mlx4_ib_drain_cqe
4389 struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe, local
4391 cqe);
4393 complete(&cqe->done);
4455 { .wr_cqe = &sdrain.cqe, },
4469 sdrain.cqe.done = mlx4_ib_drain_qp_done;
4498 rwr.wr_cqe = &rdrain.cqe;
4499 rdrain.cqe.done = mlx4_ib_drain_qp_done;
/linux-master/drivers/infiniband/ulp/srp/
H A Dib_srp.c1870 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1901 iu->cqe.done = srp_send_done;
1904 wr.wr_cqe = &iu->cqe;
1923 iu->cqe.done = srp_recv_done;
1926 wr.wr_cqe = &iu->cqe;
2061 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
/linux-master/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_ethtool.c2506 union eth_rx_cqe *cqe; local
2658 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
2659 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2664 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
2672 data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
/linux-master/drivers/infiniband/hw/irdma/
H A Dutils.c2379 __le64 *cqe; local
2383 cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
2384 get_64bit_val(cqe, 24, &qword3);

Completed in 512 milliseconds

12345678910