/linux-master/drivers/infiniband/core/ |
H A D | cq.c | 217 .cqe = nr_cqe, 417 * @nr_cqe: number of needed cqe entries 466 if (cq->cqe_used + nr_cqe > cq->cqe)
|
H A D | rw.c | 488 * @cqe: completion queue entry for the last WR 494 * If @chain_wr is not set @cqe must be set so that the caller gets a 498 u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 533 last_wr->wr_cqe = cqe; 546 * @cqe: completion queue entry for the last WR 552 * is not set @cqe must be set so that the caller gets a completion 556 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 560 first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr); 497 rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) argument 555 rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) argument
|
H A D | verbs.c | 2188 int ib_resize_cq(struct ib_cq *cq, int cqe) argument 2194 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; 2802 struct ib_cqe cqe; member in struct:ib_drain_cqe 2808 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, local 2809 cqe); 2811 complete(&cqe->done); 2825 { .wr_cqe = &sdrain.cqe, }, 2837 sdrain.cqe.done = ib_drain_qp_done; 2870 rwr.wr_cqe = &rdrain.cqe; 2871 rdrain.cqe [all...] |
/linux-master/drivers/net/ethernet/qlogic/qed/ |
H A D | qed_ll2.c | 530 union core_rx_cqe_union *cqe = NULL; local 551 cqe = 559 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type); 561 switch (cqe->rx_cqe_sp.type) { 564 cqe, &flags); 569 cqe, &flags, 652 union core_rx_cqe_union *cqe = NULL; local 668 cqe = qed_chain_consume(&p_rx->rcq_chain); 670 cqe_type = cqe->rx_cqe_sp.type; 674 &cqe [all...] |
H A D | qed_sp.h | 35 * ramrod on the cqe ring. 38 * @cqe: CQE. 43 struct eth_slow_path_rx_cqe *cqe);
|
/linux-master/drivers/nvme/host/ |
H A D | tcp.c | 584 struct nvme_completion *cqe) 589 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); 592 "got bad cqe.command_id %#x on queue %d\n", 593 cqe->command_id, nvme_tcp_queue_id(queue)); 600 req->status = cqe->status; 602 if (!nvme_try_complete_req(rq, req->status, cqe->result)) 646 struct nvme_completion *cqe = &pdu->cqe; local 656 cqe->command_id))) 657 nvme_complete_async_event(&queue->ctrl->ctrl, cqe 583 nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, struct nvme_completion *cqe) argument [all...] |
/linux-master/drivers/infiniband/hw/qedr/ |
H A D | verbs.c | 920 int entries = attr->cqe; 975 cq->ibcq.cqe = chain_entries; 987 cq->ibcq.cqe = cq->pbl.capacity; 3808 * cqe. This covers for the smp_rmb as well. 3987 * cqe. This covers for the smp_rmb as well. 4007 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe) argument 4009 struct rdma_cqe_requester *resp_cqe = &cqe->req; 4015 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe) argument 4017 struct rdma_cqe_requester *resp_cqe = &cqe->req; 4026 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe) argument 4413 union rdma_cqe *cqe; local [all...] |
/linux-master/include/linux/ |
H A D | nvme-fc.h | 79 struct nvme_completion cqe; member in struct:nvme_fc_ersp_iu
|
/linux-master/drivers/scsi/elx/efct/ |
H A D | efct_hw_queues.c | 522 u8 *cqe) 533 rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, 590 sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len); 593 seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe); 521 efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq, u8 *cqe) argument
|
/linux-master/drivers/scsi/qedf/ |
H A D | qedf_els.c | 142 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, argument 179 mp_info = &cqe->cqe_info.midpath_info; 744 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) 752 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) { 754 "cqe is NULL or timeout event (0x%x)", io_req->event); 743 qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
|
/linux-master/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_qp.h | 113 struct hinic_rq_cqe **cqe; member in struct:hinic_rq
|
/linux-master/net/sunrpc/xprtrdma/ |
H A D | verbs.c | 142 struct ib_cqe *cqe = wc->wr_cqe; local 144 container_of(cqe, struct rpcrdma_sendctx, sc_cqe); 161 struct ib_cqe *cqe = wc->wr_cqe; local 162 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
|
H A D | svc_rdma_recvfrom.c | 331 struct ib_cqe *cqe = wc->wr_cqe; local 337 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
|
/linux-master/drivers/infiniband/hw/bnxt_re/ |
H A D | ib_verbs.h | 225 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
H A D | qplib_fp.h | 557 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 569 struct bnxt_qplib_cqe *cqe,
|
/linux-master/tools/testing/selftests/x86/ |
H A D | lam.c | 471 struct io_uring_cqe *cqe; local 482 cqe = &cring->queue.cqes[head & *s->cq_ring.ring_mask]; 483 fi = (struct file_io *)cqe->user_data; 484 if (cqe->res < 0)
|
/linux-master/drivers/infiniband/hw/mlx5/ |
H A D | mem.c | 199 struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
|
/linux-master/drivers/infiniband/hw/irdma/ |
H A D | puda.c | 216 __le64 *cqe; local 227 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk); 228 get_64bit_val(cqe, 24, &qword3); 259 print_hex_dump_debug("PUDA: PUDA CQE", DUMP_PREFIX_OFFSET, 16, 8, cqe, 274 get_64bit_val(cqe, 0, &qword0); 275 get_64bit_val(cqe, 16, &qword2); 282 get_64bit_val(cqe, 8, &comp_ctx);
|
/linux-master/drivers/net/ethernet/amazon/ena/ |
H A D | ena_com.c | 435 struct ena_admin_acq_entry *cqe) 440 cmd_id = cqe->acq_common_descriptor.command & 452 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 455 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 463 struct ena_admin_acq_entry *cqe = NULL; local 471 cqe = &admin_queue->cq.entries[head_masked]; 474 while ((READ_ONCE(cqe->acq_common_descriptor.flags) & 480 ena_com_handle_single_admin_completion(admin_queue, cqe); 489 cqe = &admin_queue->cq.entries[head_masked]; 434 ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, struct ena_admin_acq_entry *cqe) argument
|
/linux-master/drivers/scsi/elx/libefc_sli/ |
H A D | sli4.h | 3535 * a queue entry is valid when a cqe 3830 sli_fc_rqe_length(struct sli4 *sli4, void *cqe, u32 *len_hdr, argument 3833 struct sli4_fc_async_rcqe *rcqe = cqe; 3847 sli_fc_rqe_fcfi(struct sli4 *sli4, void *cqe) argument 3849 u8 code = ((u8 *)cqe)[SLI4_CQE_CODE_OFFSET]; 3854 struct sli4_fc_async_rcqe *rcqe = cqe; 3860 struct sli4_fc_async_rcqe_v1 *rcqev1 = cqe; 3866 struct sli4_fc_optimized_write_cmd_cqe *opt_wr = cqe; 4026 sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe, 4038 u8 *cqe, enu [all...] |
/linux-master/drivers/infiniband/hw/cxgb4/ |
H A D | device.c | 92 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) argument 105 le.cqe_sge_ts = CQE_TS(cqe); 106 if (SQ_TYPE(cqe)) { 108 le.opcode = CQE_OPCODE(cqe); 111 le.wr_id = CQE_WRID_SQ_IDX(cqe); 117 le.wr_id = CQE_WRID_MSN(cqe);
|
/linux-master/drivers/net/ethernet/microsoft/mana/ |
H A D | gdma_main.c | 1169 struct gdma_cqe *cqe; local 1171 cqe = &cq_cqe[cq->head % num_cqe]; 1172 owner_bits = cqe->cqe_info.owner_bits; 1189 comp->wq_num = cqe->cqe_info.wq_num; 1190 comp->is_sq = cqe->cqe_info.is_sq; 1191 memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
|
/linux-master/io_uring/ |
H A D | net.c | 1215 notif->cqe.user_data = req->cqe.user_data; 1216 notif->cqe.res = 0; 1217 notif->cqe.flags = IORING_CQE_F_NOTIF; 1483 req->cqe.res = sr->done_io; 1487 req->cqe.flags |= IORING_CQE_F_MORE;
|
/linux-master/drivers/nvme/target/ |
H A D | fabrics-cmd-auth.c | 335 req->cqe->result.u64 = 0; 518 req->cqe->result.u64 = 0;
|
H A D | passthru.c | 241 req->cqe->result = nvme_req(rq)->result; 254 req->cqe->result = nvme_req(rq)->result;
|