Searched refs:cq (Results 226 - 250 of 351) sorted by path

1234567891011>>

/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dxdp.c281 struct mlx5e_cq *cq; member in struct:mlx5e_xsk_tx_complete
291 if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
292 return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
294 return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
663 struct mlx5e_cq *cq,
729 .cq = cq,
748 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) argument
759 sq = container_of(cq, struc
659 mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_wqe_info *wi, u32 *xsk_frames, struct xdp_frame_bulk *bq, struct mlx5e_cq *cq, struct mlx5_cqe64 *cqe) argument
[all...]
H A Dxdp.h109 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.c231 skb = napi_alloc_skb(rq->cq.napi, totallen);
H A Dsetup.c133 &c->xskrq.cq);
142 &c->xsksq.cq);
163 mlx5e_close_cq(&c->xsksq.cq);
169 mlx5e_close_cq(&c->xskrq.cq);
183 mlx5e_close_cq(&c->xskrq.cq);
185 mlx5e_close_cq(&c->xsksq.cq);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_dim.c52 mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
62 mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
107 c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq);
134 c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq);
H A Den_ethtool.c672 mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
692 mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts,
866 mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
876 mlx5_core_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
H A Den_main.c907 pp_params.napi = rq->cq.napi;
1044 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
1099 struct mlx5_cqwq *cqwq = &rq->cq.wq;
1787 csp.cqn = sq->cq.mcq.cqn;
1831 netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
1944 csp.cqn = sq->cq.mcq.cqn;
2004 csp.cqn = sq->cq.mcq.cqn;
2069 struct mlx5e_cq *cq)
2071 struct mlx5_core_cq *mcq = &cq->mcq;
2075 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq
2065 mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, struct net_device *netdev, struct workqueue_struct *workqueue, struct mlx5e_cq_param *param, struct mlx5e_cq *cq) argument
2103 mlx5e_alloc_cq(struct mlx5_core_dev *mdev, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq) argument
2122 mlx5e_free_cq(struct mlx5e_cq *cq) argument
2127 mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) argument
2176 mlx5e_destroy_cq(struct mlx5e_cq *cq) argument
2181 mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq) argument
2207 mlx5e_close_cq(struct mlx5e_cq *cq) argument
2213 mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u8 cq_period_mode) argument
2229 mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u16 cq_period, u16 cq_max_count, u8 cq_period_mode) argument
3438 mlx5e_alloc_drop_cq(struct mlx5e_priv *priv, struct mlx5e_cq *cq, struct mlx5e_cq_param *param) argument
3456 struct mlx5e_cq *cq = &drop_rq->cq; local
[all...]
H A Den_rx.c996 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) argument
998 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
1006 cqe = mlx5_cqwq_get_cqe(&cq->wq);
1011 * otherwise a cq overrun may occur
1020 mlx5_cqwq_pop(&cq->wq);
1035 netdev_WARN_ONCE(cq->netdev,
1038 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
1042 queue_work(cq->workqueue, &sq->recover_work);
1066 netdev_WARN_ONCE(cq
2518 mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) argument
[all...]
H A Den_stats.c2412 &priv->ptp_stats.cq[tc],
H A Den_tx.c787 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) argument
798 sq = container_of(cq, struct mlx5e_txqsq, cq);
803 cqe = mlx5_cqwq_get_cqe(&cq->wq);
813 * otherwise a cq overrun may occur
827 mlx5_cqwq_pop(&cq->wq);
864 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
867 queue_work(cq->workqueue, &sq->recover_work);
872 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
876 mlx5_cqwq_update_db_record(&cq
[all...]
H A Den_txrx.c57 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
69 dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
150 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
160 busy |= mlx5e_poll_tx_cq(&sq->cq, budget);
168 busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
171 busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
174 work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
177 work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
181 mlx5e_poll_ico_cq(&c->icosq.cq);
182 if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
260 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); local
269 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); local
[all...]
H A Deq.c93 /* caller must eventually call mlx5_cq_put on the returned cq */
97 struct mlx5_core_cq *cq = NULL; local
100 cq = radix_tree_lookup(&table->tree, cqn);
101 if (likely(cq))
102 mlx5_cq_hold(cq);
105 return cq;
124 struct mlx5_core_cq *cq; local
133 cq = mlx5_eq_cq_get(eq, cqn);
134 if (likely(cq)) {
135 ++cq
407 mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) argument
419 mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) argument
505 struct mlx5_core_cq *cq; local
[all...]
H A Dmain.c43 #include <linux/mlx5/cq.h>
H A Dmlx5_core.h41 #include <linux/mlx5/cq.h>
H A Dwq.h37 #include <linux/mlx5/cq.h>
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/fpga/
H A Dconn.c361 mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
362 conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
371 cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
376 mlx5_cqwq_pop(&conn->cq.wq);
378 mlx5_cqwq_update_db_record(&conn->cq.wq);
381 tasklet_schedule(&conn->cq.tasklet);
385 mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
386 /* ensure cq space is freed before enabling more cqes */
393 struct mlx5_fpga_conn *conn = from_tasklet(conn, t, cq.tasklet);
405 conn = container_of(mcq, struct mlx5_fpga_conn, cq
[all...]
H A Dconn.h37 #include <linux/mlx5/cq.h>
60 } cq; member in struct:mlx5_fpga_conn
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Daso.c28 struct mlx5_aso_cq cq; member in struct:mlx5_aso
40 static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq) argument
42 mlx5_wq_destroy(&cq->wq_ctrl);
46 void *cqc_data, struct mlx5_aso_cq *cq)
48 struct mlx5_core_cq *mcq = &cq->mcq;
56 err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
61 mcq->set_ci_db = cq->wq_ctrl.db.db;
62 mcq->arm_db = cq->wq_ctrl.db.db + 1;
64 for (i = 0; i < mlx5_cqwq_get_size(&cq
45 mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node, void *cqc_data, struct mlx5_aso_cq *cq) argument
75 create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data) argument
115 mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq) argument
121 mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node, struct mlx5_aso_cq *cq) argument
393 struct mlx5_aso_cq *cq = &aso->cq; local
[all...]
H A Deq.h8 #include <linux/mlx5/cq.h>
83 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
84 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_dbg.c893 ring->cq->mcq.cqn, ring->qp->qpn);
H A Ddr_send.c535 * the function will drain the cq till it empty.
552 ne = dr_poll_cq(send_ring->cq, 1);
1066 struct mlx5dr_cq *cq; local
1073 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1074 if (!cq)
1083 err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq,
1084 &cq->wq_ctrl);
1088 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1089 cqe = mlx5_cqwq_get_wqe(&cq
1148 dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq) argument
[all...]
H A Ddr_types.h1449 struct mlx5dr_cq *cq; member in struct:mlx5dr_send_ring
/linux-master/drivers/net/ethernet/mellanox/mlxsw/
H A Dcmd.h1021 MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
H A Dpci.c91 } cq; member in union:mlxsw_pci_queue::__anon829
296 struct mlxsw_pci_queue *cq; local
326 cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
327 cq->u.cq.dq = q;
411 struct mlxsw_pci_queue *cq; local
435 cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
436 cq->u.cq.dq = q;
458 cq
[all...]
/linux-master/drivers/net/ethernet/microsoft/mana/
H A Dgdma_main.c290 e.cq.id = qid;
291 e.cq.tail_ptr = tail_ptr;
292 e.cq.arm = num_req;
332 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit) argument
334 struct gdma_context *gc = cq->gdma_dev->gdma_context;
336 u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
338 u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
340 mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq
352 struct gdma_queue *cq; local
1164 mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) argument
1196 mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe) argument
[all...]

Completed in 440 milliseconds

1234567891011>>