Lines Matching refs:sq

289 				       struct mlx5e_icosq *sq,
301 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
1374 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
1376 kvfree(sq->db.xdpi_fifo.xi);
1377 kvfree(sq->db.wqe_info);
1380 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
1382 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
1383 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1396 xdpi_fifo->pc = &sq->xdpi_fifo_pc;
1397 xdpi_fifo->cc = &sq->xdpi_fifo_cc;
1403 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
1405 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1409 size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
1410 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1411 if (!sq->db.wqe_info)
1414 err = mlx5e_alloc_xdpsq_fifo(sq, numa);
1416 mlx5e_free_xdpsq_db(sq);
1427 struct mlx5e_xdpsq *sq,
1432 struct mlx5_wq_cyc *wq = &sq->wq;
1435 sq->pdev = c->pdev;
1436 sq->mkey_be = c->mkey_be;
1437 sq->channel = c;
1438 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1439 sq->min_inline_mode = params->tx_min_inline_mode;
1440 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
1441 sq->xsk_pool = xsk_pool;
1443 sq->stats = sq->xsk_pool ?
1448 sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
1450 sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
1453 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1458 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1465 mlx5_wq_destroy(&sq->wq_ctrl);
1470 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1472 mlx5e_free_xdpsq_db(sq);
1473 mlx5_wq_destroy(&sq->wq_ctrl);
1476 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1478 kvfree(sq->db.wqe_info);
1481 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1483 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1486 size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
1487 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1488 if (!sq->db.wqe_info)
1496 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1499 mlx5e_reporter_icosq_cqe_err(sq);
1504 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1509 netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
1514 struct mlx5e_icosq *sq,
1519 struct mlx5_wq_cyc *wq = &sq->wq;
1522 sq->channel = c;
1523 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1524 sq->reserved_room = param->stop_room;
1527 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1532 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1536 INIT_WORK(&sq->recover_work, recover_work_func);
1541 mlx5_wq_destroy(&sq->wq_ctrl);
1546 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1548 mlx5e_free_icosq_db(sq);
1549 mlx5_wq_destroy(&sq->wq_ctrl);
1552 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1554 kvfree(sq->db.wqe_info);
1555 kvfree(sq->db.skb_fifo.fifo);
1556 kvfree(sq->db.dma_fifo);
1559 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1561 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1564 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1565 sizeof(*sq->db.dma_fifo)),
1567 sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
1568 sizeof(*sq->db.skb_fifo.fifo)),
1570 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1571 sizeof(*sq->db.wqe_info)),
1573 if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
1574 mlx5e_free_txqsq_db(sq);
1578 sq->dma_fifo_mask = df_sz - 1;
1580 sq->db.skb_fifo.pc = &sq->skb_fifo_pc;
1581 sq->db.skb_fifo.cc = &sq->skb_fifo_cc;
1582 sq->db.skb_fifo.mask = df_sz - 1;
1591 struct mlx5e_txqsq *sq,
1596 struct mlx5_wq_cyc *wq = &sq->wq;
1599 sq->pdev = c->pdev;
1600 sq->clock = &mdev->clock;
1601 sq->mkey_be = c->mkey_be;
1602 sq->netdev = c->netdev;
1603 sq->mdev = c->mdev;
1604 sq->channel = c;
1605 sq->priv = c->priv;
1606 sq->ch_ix = c->ix;
1607 sq->txq_ix = txq_ix;
1608 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1609 sq->min_inline_mode = params->tx_min_inline_mode;
1610 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1611 sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
1612 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
1614 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
1616 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1618 set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
1619 sq->stop_room = param->stop_room;
1620 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
1623 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1628 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1635 mlx5_wq_destroy(&sq->wq_ctrl);
1640 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1642 kvfree(sq->dim);
1643 mlx5e_free_txqsq_db(sq);
1644 mlx5_wq_destroy(&sq->wq_ctrl);
1768 struct mlx5e_txqsq *sq, u32 rate);
1772 struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
1779 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
1783 sq->stats = sq_stats;
1787 csp.cqn = sq->cq.mcq.cqn;
1788 csp.wq_ctrl = &sq->wq_ctrl;
1789 csp.min_inline_mode = sq->min_inline_mode;
1790 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
1794 tx_rate = c->priv->tx_rates[sq->txq_ix];
1796 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1798 if (sq->channel && !params->tx_dim_enabled) {
1799 sq->channel->tx_cq_moder = params->tx_cq_moderation;
1800 } else if (sq->channel) {
1806 mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder,
1810 err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled);
1818 mlx5e_destroy_sq(c->mdev, sq->sqn);
1820 mlx5e_free_txqsq(sq);
1825 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1827 sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
1828 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1829 netdev_tx_reset_queue(sq->txq);
1830 netif_tx_start_queue(sq->txq);
1831 netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
1841 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1843 struct mlx5_wq_cyc *wq = &sq->wq;
1845 netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, NULL);
1846 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1849 mlx5e_tx_disable_queue(sq->txq);
1852 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1853 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1856 sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
1860 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1861 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1865 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1867 struct mlx5_core_dev *mdev = sq->mdev;
1870 if (sq->dim)
1871 cancel_work_sync(&sq->dim->work);
1872 cancel_work_sync(&sq->recover_work);
1873 mlx5e_destroy_sq(mdev, sq->sqn);
1874 if (sq->rate_limit) {
1875 rl.rate = sq->rate_limit;
1878 mlx5e_free_txqsq_descs(sq);
1879 mlx5e_free_txqsq(sq);
1884 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
1887 mlx5e_reporter_tx_err_cqe(sq);
1923 dim_enabled = !!chs->c[i]->sq[tc].dim;
1934 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
1940 err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
1944 csp.cqn = sq->cq.mcq.cqn;
1945 csp.wq_ctrl = &sq->wq_ctrl;
1947 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
1952 sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
1953 if (IS_ERR(sq->ktls_resync)) {
1954 err = PTR_ERR(sq->ktls_resync);
1961 mlx5e_destroy_sq(c->mdev, sq->sqn);
1963 mlx5e_free_icosq(sq);
1979 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1981 struct mlx5e_channel *c = sq->channel;
1983 if (sq->ktls_resync)
1984 mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync);
1985 mlx5e_destroy_sq(c->mdev, sq->sqn);
1986 mlx5e_free_icosq_descs(sq);
1987 mlx5e_free_icosq(sq);
1992 struct mlx5e_xdpsq *sq, bool is_redirect)
1997 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
2004 csp.cqn = sq->cq.mcq.cqn;
2005 csp.wq_ctrl = &sq->wq_ctrl;
2006 csp.min_inline_mode = sq->min_inline_mode;
2007 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
2010 set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
2012 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
2016 mlx5e_set_xmit_fp(sq, param->is_mpw);
2018 if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
2023 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
2029 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
2030 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
2034 sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
2039 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
2047 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
2048 mlx5e_free_xdpsq(sq);
2053 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
2055 struct mlx5e_channel *c = sq->channel;
2057 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
2060 mlx5e_destroy_sq(c->mdev, sq->sqn);
2061 mlx5e_free_xdpsq_descs(sq);
2062 mlx5e_free_xdpsq(sq);
2257 ccp, &c->sq[tc].cq);
2266 mlx5e_close_cq(&c->sq[tc].cq);
2276 mlx5e_close_cq(&c->sq[tc].cq);
2333 params, &cparam->txq_sq, &c->sq[tc], tc,
2335 &c->priv->channel_stats[c->ix]->sq[tc]);
2344 mlx5e_close_txqsq(&c->sq[tc]);
2354 mlx5e_close_txqsq(&c->sq[tc]);
2358 struct mlx5e_txqsq *sq, u32 rate)
2367 if (rate == sq->rate_limit)
2371 if (sq->rate_limit) {
2372 rl.rate = sq->rate_limit;
2377 sq->rate_limit = 0;
2393 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
2403 sq->rate_limit = rate;
2411 struct mlx5e_txqsq *sq = priv->txq2sq[index];
2430 err = mlx5e_set_sq_maxrate(dev, sq, rate);
2712 mlx5e_activate_txqsq(&c->sq[tc]);
2738 mlx5e_deactivate_txqsq(&c->sq[tc]);
3111 struct mlx5e_txqsq *sq = &c->sq[tc];
3113 priv->txq2sq[sq->txq_ix] = sq;
3125 struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
3127 priv->txq2sq[sq->txq_ix] = sq;
3829 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
3838 struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[i];
4939 struct mlx5e_txqsq *sq = priv->txq2sq[i];
4944 if (mlx5e_reporter_tx_timeout(sq))