Searched refs:cqe (Results 101 - 125 of 233) sorted by relevance

12345678910

/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_provider.h175 int cqe; member in struct:mthca_cq_resize
/linux-master/drivers/infiniband/core/
H A Dmad_priv.h67 struct ib_cqe cqe; member in struct:ib_mad_list_head
/linux-master/drivers/infiniband/ulp/srpt/
H A Dib_srpt.h163 * @cqe: Completion queue element.
170 struct ib_cqe cqe; member in struct:srpt_ioctx
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.h139 struct qmem *cqe; member in struct:otx2_cq_queue
/linux-master/drivers/infiniband/ulp/srp/
H A Dib_srp.h279 struct ib_cqe cqe; member in struct:srp_iu
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/rep/
H A Dtc.c700 void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, argument
712 reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
724 reg_c1 = be32_to_cpu(cqe->ft_metadata);
744 if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv,
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dxdp.h50 struct mlx5_cqe64 *cqe; member in struct:mlx5e_xdp_buff
/linux-master/drivers/vfio/pci/mlx5/
H A Dcmd.c1014 void *cqe; local
1018 cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
1019 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
1644 mlx5vf_rq_cqe(struct mlx5_vhca_qp *qp, struct mlx5_cqe64 *cqe, argument
1651 *tracker_status = be32_to_cpu(cqe->immediate) >> 28;
1652 size = be32_to_cpu(cqe->byte_cnt);
1653 ix = be16_to_cpu(cqe->wqe_counter) & (qp->rq.wqe_cnt - 1);
1671 void *cqe = get_cqe(cq, n & (cq->ncqe - 1)); local
1674 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe
1688 struct mlx5_cqe64 *cqe; local
[all...]
/linux-master/drivers/nvme/target/
H A Drdma.c46 struct ib_cqe cqe; member in struct:nvmet_rdma_cmd
206 !rsp->req.cqe->status &&
341 c->cqe.done = nvmet_rdma_recv_done;
343 c->wr.wr_cqe = &c->cqe;
410 r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
411 if (!r->req.cqe)
414 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
415 sizeof(*r->req.cqe), DMA_TO_DEVICE);
421 r->send_sge.length = sizeof(*r->req.cqe);
[all...]
H A Dfc.c2155 struct nvme_completion *cqe = &ersp->cqe; local
2156 u32 *cqewd = (u32 *)cqe;
2171 * must send an ersp for any condition where the cqe won't match
2188 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2190 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2461 struct nvme_completion *cqe = &fod->rspiubuf.cqe; local
2471 fod->queue->sqhd = cqe->sq_head;
2481 memset(cqe,
[all...]
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_hw_v2.c2591 cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
3444 "failed to poll cqe for free mr, remain %d cqe.\n",
3451 "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
3475 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); local
3477 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
3478 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe
3503 struct hns_roce_v2_cqe *cqe, *dest; local
3674 get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe, struct ib_wc *wc) argument
3730 get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe, struct hns_roce_qp **cur_qp) argument
3803 fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe) argument
3836 fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe) argument
3889 struct hns_roce_v2_cqe *cqe; local
[all...]
/linux-master/drivers/infiniband/hw/efa/
H A Defa_com.c410 struct efa_admin_acq_entry *cqe)
415 cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
427 memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
435 struct efa_admin_acq_entry *cqe; local
446 cqe = &aq->cq.entries[ci];
449 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
456 efa_com_handle_single_admin_completion(aq, cqe);
465 cqe = &aq->cq.entries[ci];
409 efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq, struct efa_admin_acq_entry *cqe) argument
/linux-master/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c176 cq_attr.cqe = size;
185 cq_attr.cqe = ipoib_sendq_size;
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Den_tx.c435 struct mlx4_cqe *cqe; local
457 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
464 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
474 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
477 mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
481 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
490 timestamp = mlx4_en_get_cqe_ts(cqe);
510 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
/linux-master/include/linux/
H A Dio_uring_types.h627 struct io_cqe cqe; member in struct:io_kiocb
678 struct io_uring_cqe cqe; member in struct:io_overflow_cqe
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_resp.c1054 struct rxe_cqe cqe; local
1055 struct ib_wc *wc = &cqe.ibwc;
1056 struct ib_uverbs_wc *uwc = &cqe.uibwc;
1064 memset(&cqe, 0, sizeof(cqe));
1146 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
1428 struct rxe_cqe cqe = {}; local
1429 struct ib_wc *wc = &cqe.ibwc;
1430 struct ib_uverbs_wc *uwc = &cqe.uibwc;
1443 err = rxe_cq_post(qp->rcq, &cqe,
[all...]
/linux-master/drivers/scsi/elx/efct/
H A Defct_hw.c88 efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
361 efct_hw_wq_process_io(void *arg, u8 *cqe, int status) argument
365 struct sli4_fc_wcqe *wcqe = (void *)cqe;
379 sli_fc_els_did(&hw->sli, cqe, &ext);
380 len = sli_fc_response_length(&hw->sli, cqe);
386 len = sli_fc_response_length(&hw->sli, cqe);
389 len = sli_fc_io_length(&hw->sli, cqe);
392 len = sli_fc_io_length(&hw->sli, cqe);
406 ext = sli_fc_ext_status(&hw->sli, cqe);
1847 efct_hw_wq_process_abort(void *arg, u8 *cqe, in argument
2090 efct_hw_reqtag_alloc(struct efct_hw *hw, void (*callback)(void *arg, u8 *cqe, int status), void *arg) argument
2315 u8 cqe[sizeof(struct sli4_mcqe)]; local
2412 efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq, u8 *cqe, int status, u16 rid) argument
2439 efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq, u8 *cqe, u16 rid) argument
2969 efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr, u8 sof, u8 eof, struct efc_dma *payload, struct efct_hw_send_frame_context *ctx, void (*callback)(void *arg, u8 *cqe, int status), void *arg) argument
[all...]
/linux-master/drivers/scsi/qedi/
H A Dqedi_main.c1205 static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe, argument
1214 iscsi_cid = cqe->cqe_common.conn_id;
1223 switch (cqe->cqe_common.cqe_type) {
1226 qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
1233 memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
1248 memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
1255 QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
1269 union iscsi_cqe *cqe; local
[all...]
/linux-master/fs/smb/server/
H A Dtransport_rdma.c182 struct ib_cqe cqe; member in struct:smb_direct_sendmsg
191 struct ib_cqe cqe; member in struct:smb_direct_recvmsg
198 struct ib_cqe cqe; member in struct:smb_direct_rdma_rw_msg
540 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
653 recvmsg->cqe.done = recv_done;
655 wr.wr_cqe = &recvmsg->cqe;
860 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
947 last->wr.wr_cqe = &last->cqe;
1134 msg->cqe.done = send_done;
1156 msg->wr.wr_cqe = &msg->cqe;
[all...]
/linux-master/drivers/net/ethernet/qlogic/qed/
H A Dqed_spq.c453 struct eth_slow_path_rx_cqe *cqe,
461 * used to complete the ramrod using the echo value on the cqe
463 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
467 struct eth_slow_path_rx_cqe *cqe)
471 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
475 cqe->ramrod_cmd_id);
452 qed_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) argument
466 qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe) argument
/linux-master/include/uapi/rdma/
H A Dib_user_verbs.h419 __u32 cqe; member in struct:ib_uverbs_create_cq
433 __u32 cqe; member in struct:ib_uverbs_ex_create_cq
443 __u32 cqe; member in struct:ib_uverbs_create_cq_resp
456 __u32 cqe; member in struct:ib_uverbs_resize_cq
461 __u32 cqe; member in struct:ib_uverbs_resize_cq_resp
/linux-master/drivers/infiniband/hw/mlx5/
H A Dumr.c227 static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, argument
261 id.ib_cqe = cqe;
276 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
284 context->cqe.done = mlx5r_umr_done;
316 err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
/linux-master/drivers/scsi/qedf/
H A Dqedf_main.c2227 struct fcoe_cqe *cqe; local
2251 cqe = &que->cq[que->cq_cons_idx];
2253 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2263 qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
2271 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2299 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2698 comp_type = (io_work->cqe.cqe_data >>
2705 qedf_process_cqe(io_work->qedf, &io_work->cqe);
2803 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) argument
[all...]
/linux-master/drivers/infiniband/ulp/iser/
H A Diser_verbs.c817 desc->cqe.done = iser_login_rsp;
818 wr.wr_cqe = &desc->cqe;
836 rx_desc->cqe.done = iser_task_rsp;
837 wr.wr_cqe = &rx_desc->cqe;
868 wr->wr_cqe = &tx_desc->cqe;
/linux-master/io_uring/
H A Dtimeout.c152 res = link->cqe.res;
313 .data = prev->cqe.user_data,
385 if (user_data == tmp->cqe.user_data) {

Completed in 316 milliseconds

12345678910