Searched refs:cqe_size (Results 1 - 25 of 39) sorted by relevance

12

/linux-master/include/uapi/rdma/
H A Dhns-abi.h42 __u32 cqe_size; member in struct:hns_roce_ib_create_cq
126 __u32 cqe_size; member in struct:hns_roce_ib_alloc_ucontext_resp
H A Dmlx4-abi.h71 __u32 cqe_size; member in struct:mlx4_ib_alloc_ucontext_resp
H A Dmlx5-abi.h291 __u32 cqe_size; member in struct:mlx5_ib_create_cq
307 __u16 cqe_size; member in struct:mlx5_ib_resize_cq
/linux-master/drivers/infiniband/hw/mlx5/
H A Dcq.c671 int cqe_size)
674 u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
675 u8 log_wq_sz = ilog2(cqe_size);
679 nent * cqe_size,
687 buf->cqe_size = cqe_size;
717 int *cqe_size, int *index, int *inlen)
742 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
746 *cqe_size
668 alloc_cq_frag_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, int nent, int cqe_size) argument
715 create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, struct mlx5_ib_cq *cq, int entries, u32 **cqb, int *cqe_size, int *index, int *inlen) argument
879 create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, int cqe_size, u32 **cqb, int *index, int *inlen) argument
957 int cqe_size; local
1152 resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, struct ib_udata *udata, int *cqe_size) argument
1185 resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, int cqe_size) argument
1274 int cqe_size; local
[all...]
H A Dmlx5_ib.h551 int cqe_size; member in struct:mlx5_ib_cq_buf
575 int cqe_size; member in struct:mlx5_ib_cq
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dcq.c290 static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) argument
292 int entries_per_copy = PAGE_SIZE / cqe_size;
318 array_size(entries, cqe_size)) ?
330 int cqe_size)
335 memset(buf->direct.buf, 0xcc, entries * cqe_size);
395 dev->caps.cqe_size);
400 dev->caps.cqe_size);
328 mlx4_init_kernel_cqes(struct mlx4_buf *buf, int entries, int cqe_size) argument
H A Dfw.h203 u16 cqe_size; /* For use only when CQE stride feature enabled */ member in struct:mlx4_init_hca_param
H A Den_cq.c62 cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
H A Dmlx4_en.h578 int cqe_size; member in struct:mlx4_en_priv
H A Den_rx.c741 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
960 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
H A Den_tx.c457 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
510 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_cq.c206 buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
335 hr_cq->cqe_size = hr_dev->caps.cqe_sz;
339 if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
340 if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
341 ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
343 "invalid cqe size %u.\n", ucmd->cqe_size);
347 hr_cq->cqe_size = ucmd->cqe_size;
349 hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
H A Dhns_roce_restrack.c27 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
/linux-master/net/ethtool/
H A Drings.c103 (kr->cqe_size &&
104 (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
217 ethnl_update_u32(&kernel_ringparam.cqe_size,
/linux-master/drivers/infiniband/hw/mlx4/
H A Dcq.c105 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
111 buf->entry_size = dev->dev->caps.cqe_size;
143 int cqe_size = dev->dev->caps.cqe_size; local
147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
358 int cqe_size = cq->buf.entry_size; local
359 int cqe_inc = cqe_size == 64 ? 1 : 0;
368 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.h132 u16 cqe_size; member in struct:otx2_cq_queue
H A Dotx2_ethtool.c389 kernel_ring->cqe_size = pfvf->hw.xqe_size;
400 u32 xqe_size = kernel_ring->cqe_size;
/linux-master/drivers/ufs/core/
H A Dufs-mcq.c202 size_t utrdl_size, cqe_size; local
218 cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
219 hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
/linux-master/drivers/vfio/pci/mlx5/
H A Dcmd.h122 int cqe_size; member in struct:mlx5_vhca_cq_buf
H A Dcmd.c993 int cqe_size)
996 u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
997 u8 log_wq_sz = ilog2(cqe_size);
1000 err = mlx5_frag_buf_alloc_node(mdev, nent * cqe_size, frag_buf,
1006 buf->cqe_size = cqe_size;
1019 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
1095 int cqe_size = cache_line_size() == 128 ? 128 : 64; local
1112 cq->mcq.cqe_sz = cqe_size;
1113 err = alloc_cq_frag_buf(mdev, &cq->buf, ncqe, cqe_size);
991 alloc_cq_frag_buf(struct mlx5_core_dev *mdev, struct mlx5_vhca_cq_buf *buf, int nent, int cqe_size) argument
[all...]
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c320 size_t cqe_dma_size, cqe_size; local
324 cqe_size = wq->q_depth * sizeof(*rq->cqe);
325 rq->cqe = vzalloc(cqe_size);
/linux-master/drivers/scsi/bnx2i/
H A Dbnx2i.h603 * @cqe_size: size of each CQ entry
661 u32 cqe_size; member in struct:qp_info
H A Dbnx2i_hwi.c171 if (cq_index > ep->qp.cqe_size * 2)
172 cq_index -= ep->qp.cqe_size * 2;
1123 ep->qp.cqe_size = hba->max_cqes;
2063 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
/linux-master/drivers/infiniband/hw/ocrdma/
H A Docrdma_hw.c1786 u32 hw_pages, cqe_size, page_size, cqe_count; local
1799 cqe_size = OCRDMA_DPP_CQE_SIZE;
1804 cqe_size = sizeof(struct ocrdma_cqe);
1808 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1828 cqe_count = cq->len / cqe_size;
1857 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
1859 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
/linux-master/include/linux/
H A Dethtool.h77 * @cqe_size: Size of TX/RX completion queue event
86 u32 cqe_size; member in struct:kernel_ethtool_ringparam
94 * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size

Completed in 305 milliseconds

12