Searched refs:cqe (Results 51 - 75 of 233) sorted by relevance

12345678910

/linux-master/io_uring/
H A Drw.c469 if (unlikely(res != req->cqe.res)) {
481 req->cqe.res = res;
514 req->cqe.flags |= io_put_kbuf(req, 0);
541 if (unlikely(res != req->cqe.res)) {
546 req->cqe.res = res;
839 req->cqe.res = iov_iter_count(&io->iter);
853 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
873 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
904 req->cqe.res = iov_iter_count(&io->iter);
1019 req->cqe
[all...]
/linux-master/drivers/infiniband/hw/cxgb4/
H A Drestrack.c105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
307 static int fill_cqe(struct sk_buff *msg, struct t4_cqe *cqe, u16 idx, argument
313 be32_to_cpu(cqe->header)))
315 if (rdma_nl_put_driver_u32(msg, "len", be32_to_cpu(cqe->len)))
318 be32_to_cpu(cqe->u.gen.wrid_hi)))
321 be32_to_cpu(cqe->u.gen.wrid_low)))
324 be64_to_cpu(cqe->bits_type_ts)))
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_rx.c45 #define LRO_PKT_HDR_LEN(cqe) \
46 (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
363 struct hinic_rq_cqe *cqe; local
387 cqe = rq->cqe[ci];
388 status = be32_to_cpu(cqe->status);
410 offload_type = be32_to_cpu(cqe->offload_type);
411 vlan_len = be32_to_cpu(cqe->len);
432 LRO_PKT_HDR_LEN(cqe));
439 cqe
[all...]
/linux-master/drivers/infiniband/ulp/rtrs/
H A Drtrs.c45 iu->cqe.done = done;
88 .wr_cqe = &iu->cqe,
97 int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe) argument
102 .wr_cqe = cqe,
143 .wr_cqe = &iu->cqe,
165 .wr.wr_cqe = &iu->cqe,
188 struct ib_cqe *cqe,
201 .wr.wr_cqe = cqe,
386 void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe, argument
391 path->hb_cqe = cqe;
187 rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe, u32 imm_data, struct ib_send_wr *head) argument
[all...]
H A Drtrs-pri.h126 struct ib_cqe cqe; member in struct:rtrs_iu
311 int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
319 void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
/linux-master/drivers/scsi/bnx2i/
H A Dbnx2i_hwi.c1332 * @cqe: pointer to newly DMA'ed CQE entry for processing
1338 struct cqe *cqe)
1348 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1383 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1429 * @cqe: pointer to newly DMA'ed CQE entry for processing
1435 struct cqe *cqe)
1444 login = (struct bnx2i_login_response *) cqe;
1497 * @cqe
1336 bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1433 bnx2i_process_login_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1501 bnx2i_process_text_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1562 bnx2i_process_tmf_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1601 bnx2i_process_logout_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1647 bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1688 bnx2i_process_nopin_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1740 bnx2i_process_async_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1790 bnx2i_process_reject_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1827 bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) argument
1900 bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct bnx2i_nop_in_msg *cqe) argument
[all...]
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_cmdq.c268 __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci, local
271 be32_to_cpu(READ_ONCE(*cqe)));
273 return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
304 __be32 *cqe; local
308 cqe = get_next_valid_cmdq_cqe(cmdq);
309 if (!cqe)
315 hdr0 = be32_to_cpu(*cqe);
316 sqe_idx = be32_to_cpu(*(cqe + 1));
328 /* Copy 16B comp data after cqe hdr to outer */
329 be32_to_cpu_array(comp_wait->comp_data, cqe
[all...]
/linux-master/drivers/net/ethernet/ibm/ehea/
H A Dehea_main.c532 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) argument
534 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
535 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
537 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
538 (cqe->header_length == 0))
544 struct sk_buff *skb, struct ehea_cqe *cqe,
547 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
554 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
556 skb->csum = csum_unfold(~cqe->inet_checksum_value);
565 struct ehea_cqe *cqe)
543 ehea_fill_skb(struct net_device *dev, struct sk_buff *skb, struct ehea_cqe *cqe, struct ehea_port_res *pr) argument
563 get_skb_by_index(struct sk_buff **skb_array, int arr_len, struct ehea_cqe *cqe) argument
617 ehea_treat_poll_error(struct ehea_port_res *pr, int rq, struct ehea_cqe *cqe, int *processed_rq2, int *processed_rq3) argument
659 struct ehea_cqe *cqe; local
805 struct ehea_cqe *cqe; local
882 struct ehea_cqe *cqe; local
[all...]
/linux-master/net/sunrpc/xprtrdma/
H A Dsvc_rdma_rw.c257 struct ib_cqe *cqe = wc->wr_cqe; local
259 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
286 struct ib_cqe *cqe = wc->wr_cqe; local
288 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
320 struct ib_cqe *cqe = wc->wr_cqe; local
322 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
368 struct ib_cqe *cqe; local
377 cqe = &cc->cc_cqe;
383 rdma->sc_port_num, cqe, first_wr);
384 cqe
688 struct ib_cqe *cqe; local
[all...]
H A Dfrwr_ops.c360 struct ib_cqe *cqe = wc->wr_cqe; local
361 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
455 struct ib_cqe *cqe = wc->wr_cqe; local
456 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
474 struct ib_cqe *cqe = wc->wr_cqe; local
475 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
571 struct ib_cqe *cqe = wc->wr_cqe; local
572 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/fpga/
H A Dconn.c251 struct mlx5_cqe64 *cqe, u8 status)
256 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
276 buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
291 struct mlx5_cqe64 *cqe, u8 status)
299 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
333 struct mlx5_cqe64 *cqe)
337 opcode = get_cqe_opcode(cqe);
341 status = ((struct mlx5_err_cqe *)cqe)->syndrome;
344 mlx5_fpga_conn_sq_cqe(conn, cqe, status);
348 status = ((struct mlx5_err_cqe *)cqe)
250 mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) argument
290 mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) argument
332 mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe) argument
368 struct mlx5_cqe64 *cqe; local
418 struct mlx5_cqe64 *cqe; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.c249 struct mlx5_cqe64 *cqe,
270 /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
271 mxbuf->cqe = cqe;
306 struct mlx5_cqe64 *cqe,
319 /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
320 mxbuf->cqe = cqe;
247 mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) argument
304 mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) argument
/linux-master/drivers/scsi/qedf/
H A Dqedf_io.c1121 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, argument
1135 if (!cqe)
1148 fcp_rsp = &cqe->cqe_info.rsp_info;
1202 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1209 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1432 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, argument
1440 if (!cqe) {
1442 "cqe is NULL for io_req %p xid=0x%x\n",
1451 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1452 le32_to_cpu(cqe
1502 qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
1957 qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
2278 qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
2476 qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
2489 qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, struct fcoe_cqe *cqe) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_tx.c745 struct mlx5_cqe64 *cqe, int napi_budget)
749 u64 ts = get_cqe_ts(cqe);
765 struct mlx5_cqe64 *cqe, int napi_budget)
772 mlx5e_consume_skb(sq, skb, cqe, napi_budget);
791 struct mlx5_cqe64 *cqe; local
803 cqe = mlx5_cqwq_get_cqe(&cq->wq);
804 if (!cqe)
817 /* avoid dirtying sq cache line every cqe */
829 wqe_counter = be16_to_cpu(cqe->wqe_counter);
841 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budge
744 mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, int napi_budget) argument
764 mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, struct mlx5_cqe64 *cqe, int napi_budget) argument
[all...]
/linux-master/drivers/scsi/qedi/
H A Dqedi_iscsi.h234 #define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
/linux-master/include/linux/
H A Dnvme-tcp.h145 * @cqe: nvme completion queue entry
149 struct nvme_completion cqe; member in struct:nvme_tcp_rsp_pdu
/linux-master/drivers/infiniband/core/
H A Duverbs_std_types_cq.c81 ret = uverbs_copy_from(&attr.cqe, attrs,
140 ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
141 sizeof(cq->cqe));
/linux-master/fs/smb/client/
H A Dsmbdirect.h234 struct ib_cqe cqe; member in struct:smbd_request
250 struct ib_cqe cqe; member in struct:smbd_response
297 struct ib_cqe cqe; member in struct:smbd_mr
/linux-master/drivers/nvme/target/
H A Dtrace.h122 __entry->cid = req->cqe->command_id;
123 __entry->result = le64_to_cpu(req->cqe->result.u64);
124 __entry->status = le16_to_cpu(req->cqe->status) >> 1;
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_loc.h22 int cqe, int comp_vector);
24 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
32 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dib_verbs.c2961 int cqe = attr->cqe; local
2969 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2978 entries = bnxt_re_init_depth(cqe + 1, uctx);
3027 cq->ib_cq.cqe = entries;
3087 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) argument
3113 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3114 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe
3255 bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) argument
3388 bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, struct bnxt_qplib_cqe *cqe) argument
3499 bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) argument
3546 bnxt_re_process_res_rc_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) argument
3561 bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) argument
3609 bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) argument
3670 struct bnxt_qplib_cqe *cqe; local
[all...]
/linux-master/net/9p/
H A Dtrans_rdma.c99 * @cqe: completion queue entry
105 struct ib_cqe cqe; member in struct:p9_rdma_context
295 container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
346 container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
396 c->cqe.done = recv_done;
403 wr.wr_cqe = &c->cqe;
493 c->cqe.done = send_done;
500 wr.wr_cqe = &c->cqe;
/linux-master/include/trace/events/
H A Dio_uring.h124 __entry->user_data = req->cqe.user_data;
161 __entry->user_data = req->cqe.user_data;
202 __entry->data = req->cqe.user_data;
302 __entry->user_data = req->cqe.user_data;
390 __entry->user_data = req->cqe.user_data;
434 __entry->user_data = req->cqe.user_data;
474 __entry->user_data = req->cqe.user_data;
568 * @ocqe: pointer to the overflow cqe (if available)
/linux-master/drivers/ufs/core/
H A Dufs-mcq.c261 static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe) argument
269 addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
278 struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); local
279 int tag = ufshcd_mcq_get_tag(hba, cqe);
281 if (cqe->command_desc_base_addr) {
282 ufshcd_compl_one_cqe(hba, tag, cqe);
283 /* After processed the cqe, mark it empty (invalid) entry */
284 cqe->command_desc_base_addr = 0;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dptp.c180 struct mlx5_cqe64 *cqe,
186 u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
187 bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
209 hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
212 ptpsq->cq_stats->cqe++;
230 struct mlx5_cqe64 *cqe; local
238 cqe = mlx5_cqwq_get_cqe(cqwq);
239 if (!cqe)
245 mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
247 } while ((++work_done < budget) && (cqe
179 mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, struct mlx5_cqe64 *cqe, u8 *md_buff, u8 *md_buff_sz, int budget) argument
[all...]

Completed in 586 milliseconds

12345678910