/linux-master/tools/include/io_uring/ |
H A D | mini_liburing.h | 24 struct io_uring_cqe *cqes; member in struct:io_cq_ring 49 struct io_uring_cqe *cqes; member in struct:io_uring_cq 98 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); 111 cq->cqes = ptr + p->cq_off.cqes; 173 *cqe_ptr = &cq->cqes[head & mask];
|
/linux-master/drivers/infiniband/hw/cxgb4/ |
H A D | restrack.c | 334 struct t4_cqe *cqes) 339 if (fill_cqe(msg, cqes, idx, "hwcq_idx")) 342 if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx")) 351 struct t4_cqe *cqes) 359 if (fill_cqe(msg, cqes, idx, "swcq_idx")) 364 if (fill_cqe(msg, cqes + 1, idx, "swcq_idx")) 395 /* get 2 hw cqes: cidx-1, and cidx */ 402 /* get first and last sw cqes */ 333 fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq, struct t4_cqe *cqes) argument 350 fill_swcqes(struct sk_buff *msg, struct t4_cq *cq, struct t4_cqe *cqes) argument
|
/linux-master/drivers/net/ethernet/fungible/funcore/ |
H A D | fun_queue.c | 295 cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2); 366 funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth, 370 return funq->cqes ? 0 : -ENOMEM; 389 funq->cqes, funq->cq_dma_addr, NULL);
|
H A D | fun_queue.h | 36 void *cqes; member in struct:fun_queue
|
/linux-master/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_rx.c | 353 q->next_cqe_info = cqe_to_info(q->cqes); 644 q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, 647 if (!q->cqes) 665 dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, 686 q->cqes, q->cq_dma_addr); 722 q->next_cqe_info = cqe_to_info(q->cqes);
|
H A D | funeth_txrx.h | 167 void *cqes; /* base of CQ descriptor ring */ member in struct:funeth_rxq
|
/linux-master/tools/testing/selftests/x86/ |
H A D | lam.c | 96 struct io_uring_cqe *cqes; member in union:io_uring_queue::__anon3763 390 cring->ring_sz = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe); 445 cring->queue.cqes = cq_ptr + p.cq_off.cqes; 482 cqe = &cring->queue.cqes[head & *s->cq_ring.ring_mask];
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_stats.c | 287 s->tx_xdp_cqes += xdpsq_red_stats->cqes; 299 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; 310 s->tx_xsk_cqes += xsksq_stats->cqes; 445 s->tx_cqes += sq_stats->cqes; 2137 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2149 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2159 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2189 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2213 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2290 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) }, [all...] |
H A D | en_stats.h | 438 u64 cqes ____cacheline_aligned_in_smp; 451 u64 cqes ____cacheline_aligned_in_smp;
|
H A D | en_tx.c | 878 stats->cqes += i; 882 /* ensure cq space is freed before enabling more cqes */
|
/linux-master/drivers/net/ethernet/broadcom/ |
H A D | cnic.c | 1431 struct kcqe *cqes[], u32 num_cqes) 1440 cqes, num_cqes); 1558 struct kcqe *cqes[1]; local 1588 cqes[0] = (struct kcqe *) &kcqe; 1589 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1887 struct kcqe *cqes[1]; local 1940 cqes[0] = (struct kcqe *) &kcqe; 1941 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 2005 struct kcqe *cqes[1]; local 2038 cqes[ 1430 cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, struct kcqe *cqes[], u32 num_cqes) argument 2234 struct kcqe *cqes[1]; local 2249 struct kcqe *cqes[1]; local 2359 struct kcqe *cqes[1]; local 2506 struct kcqe *cqes[1]; local 2590 struct kcqe *cqes[1]; local [all...] |
H A D | cnic_if.h | 370 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
|
/linux-master/drivers/nvme/host/ |
H A D | apple.c | 135 struct nvme_completion *cqes; member in struct:apple_nvme_queue 571 struct nvme_completion *hcqe = &q->cqes[q->cq_head]; 589 struct nvme_completion *cqe = &q->cqes[idx]; 977 memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); 1300 q->cqes = dmam_alloc_coherent(anv->dev, 1303 if (!q->cqes)
|
H A D | pci.c | 195 struct nvme_completion *cqes; member in struct:nvme_queue 991 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 1015 struct nvme_completion *cqe = &nvmeq->cqes[idx]; 1412 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1537 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 1539 if (!nvmeq->cqes) 1557 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 1586 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
|
/linux-master/drivers/nvme/target/ |
H A D | passthru.c | 134 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
|
H A D | admin-cmd.c | 428 id->cqes = (0x4 << 4) | 0x4;
|
/linux-master/tools/include/uapi/linux/ |
H A D | io_uring.h | 460 __u32 cqes; member in struct:io_cqring_offsets
|
/linux-master/io_uring/ |
H A D | fdinfo.c | 128 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
|
H A D | io_uring.c | 848 ctx->cqe_cached = &rings->cqes[off]; 2585 off = struct_size(rings, cqes, cq_entries); 3559 p->cq_off.cqes = offsetof(struct io_rings, cqes);
|
/linux-master/include/linux/ |
H A D | io_uring_types.h | 183 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
|
H A D | nvme.h | 332 __u8 cqes; member in struct:nvme_id_ctrl
|
/linux-master/include/uapi/linux/ |
H A D | io_uring.h | 487 __u32 cqes; member in struct:io_cqring_offsets
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | xdp.c | 808 sq->stats->cqes += i; 812 /* ensure cq space is freed before enabling more cqes */
|