Searched refs:cqc (Results 1 - 20 of 20) sorted by relevance

/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Daso.c80 void *in, *cqc; local
94 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
96 memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
101 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
102 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
103 MLX5_SET(cqc, cqc, uar_pag
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Dcq.c92 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
216 void *cqc; local
219 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
220 MLX5_SET(cqc, cqc, cq_period, cq_period);
221 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
H A Dwq.c160 void *cqc, struct mlx5_cqwq *wq,
164 u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
165 u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
159 mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, struct mlx5_wq_ctrl *wq_ctrl) argument
H A Dwq.h90 void *cqc, struct mlx5_cqwq *wq,
H A Ddebugfs.c436 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
439 param = MLX5_GET(cqc, ctx, log_page_size);
H A Den_main.c2011 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
2070 void *cqc; local
2085 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
2087 memcpy(cqc, param->cqc, sizeof(param->cqc));
2092 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
2093 MLX5_SET(cqc, cqc, c_eqn_or_apu_elemen
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dcq.c725 void *cqc; local
757 cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
786 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
787 MLX5_SET(cqc, cqc, log_page_size,
789 MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
823 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
824 MLX5_SET(cqc, cq
884 void *cqc; local
956 void *cqc; local
1266 void *cqc; local
[all...]
H A Ddevx.c700 void *cqc; local
703 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
704 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
1455 !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dparams.c889 void *cqc = param->cqc; local
891 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
893 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
919 void *cqc = param->cqc; local
935 MLX5_SET(cqc, cqc, log_cq_siz
1049 void *cqc = param->cqc; local
1091 void *cqc = param->cqc; local
[all...]
H A Dhealth.c24 void *cqc; local
27 cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context);
28 hw_status = MLX5_GET(cqc, cqc, status);
H A Dparams.h16 u32 cqc[MLX5_ST_SZ_DW(cqc)]; member in struct:mlx5e_cq_param
/linux-master/include/linux/mlx5/
H A Dcq.h132 #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
133 #define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1)
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/fpga/
H A Dconn.c415 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
420 void *cqc, *in; local
425 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
454 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
455 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
456 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
457 MLX5_SET(cqc, cqc, uar_pag
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_send.c1062 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {};
1068 void *cqc, *in; local
1078 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe));
1106 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1107 MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
1108 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
1109 MLX5_SET(cqc, cqc, uar_pag
[all...]
/linux-master/drivers/crypto/hisilicon/
H A Ddebugfs.c206 struct qm_cqc cqc; local
219 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
221 dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
227 if (qm->cqc) {
228 cqc_curr = qm->cqc + qp_id;
416 dev_info(dev, "cqc <num>\n");
433 .cmd = "cqc",
H A Dqm.c51 /* cqc shift */
61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1)
635 tmp_xqc = qm->xqc_buf.cqc;
1941 struct qm_cqc cqc = {0}; local
1944 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE));
1945 cqc.w8 = cpu_to_le16(qp->cq_depth - 1);
1947 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
1948 cqc.w8 = 0; /* rand_qc */
1955 cqc
2050 struct qm_cqc cqc; local
[all...]
/linux-master/drivers/vfio/pci/mlx5/
H A Dcmd.c1099 void *cqc, *in; local
1132 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1133 MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
1134 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
1135 MLX5_SET(cqc, cqc, uar_page, tracker->uar->index);
1136 MLX5_SET(cqc, cqc, log_page_siz
[all...]
/linux-master/drivers/vdpa/mlx5/net/
H A Dmlx5_vnet.c551 void *cqc; local
582 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
583 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
592 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
593 MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
594 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
595 MLX5_SET(cqc, cq
[all...]
/linux-master/include/linux/
H A Dhisi_acc_qm.h317 struct qm_cqc *cqc; member in struct:qm_rsv_buf
352 struct qm_cqc *cqc; member in struct:hisi_qm
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dresource_tracker.c3077 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) argument
3079 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3082 static int cq_get_mtt_size(struct mlx4_cq_context *cqc) argument
3084 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3085 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3442 struct mlx4_cq_context *cqc = inbox->buf; local
3443 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3453 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3531 struct mlx4_cq_context *cqc = inbox->buf; local
3532 int mtt_base = cq_get_mtt_addr(cqc) / de
[all...]

Completed in 305 milliseconds