Searched refs:cqes (Results 1 - 23 of 23) sorted by relevance

/linux-master/tools/include/io_uring/
H A Dmini_liburing.h24 struct io_uring_cqe *cqes; member in struct:io_cq_ring
49 struct io_uring_cqe *cqes; member in struct:io_uring_cq
98 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
111 cq->cqes = ptr + p->cq_off.cqes;
173 *cqe_ptr = &cq->cqes[head & mask];
/linux-master/drivers/infiniband/hw/cxgb4/
H A Drestrack.c334 struct t4_cqe *cqes)
339 if (fill_cqe(msg, cqes, idx, "hwcq_idx"))
342 if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx"))
351 struct t4_cqe *cqes)
359 if (fill_cqe(msg, cqes, idx, "swcq_idx"))
364 if (fill_cqe(msg, cqes + 1, idx, "swcq_idx"))
395 /* get 2 hw cqes: cidx-1, and cidx */
402 /* get first and last sw cqes */
333 fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq, struct t4_cqe *cqes) argument
350 fill_swcqes(struct sk_buff *msg, struct t4_cq *cq, struct t4_cqe *cqes) argument
/linux-master/drivers/net/ethernet/fungible/funcore/
H A Dfun_queue.c295 cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2);
366 funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth,
370 return funq->cqes ? 0 : -ENOMEM;
389 funq->cqes, funq->cq_dma_addr, NULL);
H A Dfun_queue.h36 void *cqes; member in struct:fun_queue
/linux-master/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_rx.c353 q->next_cqe_info = cqe_to_info(q->cqes);
644 q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0,
647 if (!q->cqes)
665 dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes,
686 q->cqes, q->cq_dma_addr);
722 q->next_cqe_info = cqe_to_info(q->cqes);
H A Dfuneth_txrx.h167 void *cqes; /* base of CQ descriptor ring */ member in struct:funeth_rxq
/linux-master/tools/testing/selftests/x86/
H A Dlam.c96 struct io_uring_cqe *cqes; member in union:io_uring_queue::__anon763
390 cring->ring_sz = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
445 cring->queue.cqes = cq_ptr + p.cq_off.cqes;
482 cqe = &cring->queue.cqes[head & *s->cq_ring.ring_mask];
/linux-master/drivers/net/ethernet/broadcom/
H A Dcnic.c1431 struct kcqe *cqes[], u32 num_cqes)
1440 cqes, num_cqes);
1558 struct kcqe *cqes[1]; local
1588 cqes[0] = (struct kcqe *) &kcqe;
1589 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1887 struct kcqe *cqes[1]; local
1940 cqes[0] = (struct kcqe *) &kcqe;
1941 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2005 struct kcqe *cqes[1]; local
2038 cqes[
1430 cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, struct kcqe *cqes[], u32 num_cqes) argument
2234 struct kcqe *cqes[1]; local
2249 struct kcqe *cqes[1]; local
2359 struct kcqe *cqes[1]; local
2506 struct kcqe *cqes[1]; local
2590 struct kcqe *cqes[1]; local
[all...]
H A Dcnic_if.h370 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
/linux-master/drivers/nvme/host/
H A Dapple.c135 struct nvme_completion *cqes; member in struct:apple_nvme_queue
571 struct nvme_completion *hcqe = &q->cqes[q->cq_head];
589 struct nvme_completion *cqe = &q->cqes[idx];
977 memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
1300 q->cqes = dmam_alloc_coherent(anv->dev,
1303 if (!q->cqes)
H A Dpci.c195 struct nvme_completion *cqes; member in struct:nvme_queue
990 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
1014 struct nvme_completion *cqe = &nvmeq->cqes[idx];
1405 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1530 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
1532 if (!nvmeq->cqes)
1550 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
1579 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_stats.h433 u64 cqes ____cacheline_aligned_in_smp;
446 u64 cqes ____cacheline_aligned_in_smp;
H A Den_stats.c282 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
294 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
305 s->tx_xsk_cqes += xsksq_stats->cqes;
440 s->tx_cqes += sq_stats->cqes;
2084 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2096 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2106 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2136 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2160 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2235 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
[all...]
H A Den_tx.c872 stats->cqes += i;
876 /* ensure cq space is freed before enabling more cqes */
/linux-master/drivers/nvme/target/
H A Dpassthru.c134 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
H A Dadmin-cmd.c428 id->cqes = (0x4 << 4) | 0x4;
/linux-master/tools/include/uapi/linux/
H A Dio_uring.h460 __u32 cqes; member in struct:io_cqring_offsets
/linux-master/include/uapi/linux/
H A Dio_uring.h470 __u32 cqes; member in struct:io_cqring_offsets
/linux-master/io_uring/
H A Dfdinfo.c128 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
H A Dio_uring.c860 ctx->cqe_cached = &rings->cqes[off];
2831 off = struct_size(rings, cqes, cq_entries);
3985 p->cq_off.cqes = offsetof(struct io_rings, cqes);
/linux-master/include/linux/
H A Dio_uring_types.h183 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
H A Dnvme.h332 __u8 cqes; member in struct:nvme_id_ctrl
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dxdp.c808 sq->stats->cqes += i;
812 /* ensure cq space is freed before enabling more cqes */

Completed in 291 milliseconds