156893Sfenner// SPDX-License-Identifier: GPL-2.0
256893Sfenner/* Copyright (c) 2019, Intel Corporation. */
356893Sfenner
456893Sfenner#include <linux/bpf_trace.h>
556893Sfenner#include <net/xdp_sock_drv.h>
656893Sfenner#include <net/xdp.h>
756893Sfenner#include "ice.h"
856893Sfenner#include "ice_base.h"
956893Sfenner#include "ice_type.h"
1056893Sfenner#include "ice_xsk.h"
1156893Sfenner#include "ice_txrx.h"
1256893Sfenner#include "ice_txrx_lib.h"
1356893Sfenner#include "ice_lib.h"
1456893Sfenner
1556893Sfennerstatic struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
1656893Sfenner{
1756893Sfenner	return &rx_ring->xdp_buf[idx];
1856893Sfenner}
1956893Sfenner
2056893Sfenner/**
2156893Sfenner * ice_qp_reset_stats - Resets all stats for rings of given index
2256893Sfenner * @vsi: VSI that contains rings of interest
2356893Sfenner * @q_idx: ring index in array
2456893Sfenner */
2556893Sfennerstatic void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
2656893Sfenner{
2756893Sfenner	struct ice_vsi_stats *vsi_stat;
2856893Sfenner	struct ice_pf *pf;
2956893Sfenner
3056893Sfenner	pf = vsi->back;
3156893Sfenner	if (!pf->vsi_stats)
3256893Sfenner		return;
3356893Sfenner
3456893Sfenner	vsi_stat = pf->vsi_stats[vsi->idx];
3556893Sfenner	if (!vsi_stat)
3656893Sfenner		return;
3756893Sfenner
3856893Sfenner	memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
3956893Sfenner	       sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
4056893Sfenner	memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
4156893Sfenner	       sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
4256893Sfenner	if (ice_is_xdp_ena_vsi(vsi))
4356893Sfenner		memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
4456893Sfenner		       sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
4556893Sfenner}
4656893Sfenner
4756893Sfenner/**
4856893Sfenner * ice_qp_clean_rings - Cleans all the rings of a given index
4956893Sfenner * @vsi: VSI that contains rings of interest
5056893Sfenner * @q_idx: ring index in array
5156893Sfenner */
5256893Sfennerstatic void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
5356893Sfenner{
5456893Sfenner	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
5556893Sfenner	if (ice_is_xdp_ena_vsi(vsi)) {
5656893Sfenner		synchronize_rcu();
5756893Sfenner		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
5856893Sfenner	}
5956893Sfenner	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
6056893Sfenner}
6156893Sfenner
6256893Sfenner/**
6356893Sfenner * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
6456893Sfenner * @vsi: VSI that has netdev
6556893Sfenner * @q_vector: q_vector that has NAPI context
6656893Sfenner * @enable: true for enable, false for disable
6756893Sfenner */
6856893Sfennerstatic void
6956893Sfennerice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
7056893Sfenner		     bool enable)
7156893Sfenner{
7256893Sfenner	if (!vsi->netdev || !q_vector)
7356893Sfenner		return;
7456893Sfenner
7556893Sfenner	if (enable)
7656893Sfenner		napi_enable(&q_vector->napi);
7756893Sfenner	else
7856893Sfenner		napi_disable(&q_vector->napi);
7956893Sfenner}
8056893Sfenner
8156893Sfenner/**
8256893Sfenner * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
8356893Sfenner * @vsi: the VSI that contains queue vector being un-configured
8456893Sfenner * @rx_ring: Rx ring that will have its IRQ disabled
8556893Sfenner * @q_vector: queue vector
8656893Sfenner */
8756893Sfennerstatic void
8856893Sfennerice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
8956893Sfenner		 struct ice_q_vector *q_vector)
9056893Sfenner{
9156893Sfenner	struct ice_pf *pf = vsi->back;
9256893Sfenner	struct ice_hw *hw = &pf->hw;
9356893Sfenner	u16 reg;
9456893Sfenner	u32 val;
9556893Sfenner
9656893Sfenner	/* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
9756893Sfenner	 * here only QINT_RQCTL
9856893Sfenner	 */
9956893Sfenner	reg = rx_ring->reg_idx;
10056893Sfenner	val = rd32(hw, QINT_RQCTL(reg));
10156893Sfenner	val &= ~QINT_RQCTL_CAUSE_ENA_M;
10256893Sfenner	wr32(hw, QINT_RQCTL(reg), val);
10356893Sfenner
10456893Sfenner	if (q_vector) {
10556893Sfenner		wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
10656893Sfenner		ice_flush(hw);
10756893Sfenner		synchronize_irq(q_vector->irq.virq);
10856893Sfenner	}
10956893Sfenner}
11056893Sfenner
11156893Sfenner/**
11256893Sfenner * ice_qvec_cfg_msix - Enable IRQ for given queue vector
11356893Sfenner * @vsi: the VSI that contains queue vector
11456893Sfenner * @q_vector: queue vector
11556893Sfenner */
11656893Sfennerstatic void
11756893Sfennerice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
11856893Sfenner{
11956893Sfenner	u16 reg_idx = q_vector->reg_idx;
12056893Sfenner	struct ice_pf *pf = vsi->back;
12156893Sfenner	struct ice_hw *hw = &pf->hw;
12256893Sfenner	struct ice_tx_ring *tx_ring;
12356893Sfenner	struct ice_rx_ring *rx_ring;
12456893Sfenner
12556893Sfenner	ice_cfg_itr(hw, q_vector);
12656893Sfenner
12756893Sfenner	ice_for_each_tx_ring(tx_ring, q_vector->tx)
12856893Sfenner		ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
12956893Sfenner				      q_vector->tx.itr_idx);
13056893Sfenner
13156893Sfenner	ice_for_each_rx_ring(rx_ring, q_vector->rx)
13256893Sfenner		ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
13356893Sfenner				      q_vector->rx.itr_idx);
13456893Sfenner
13556893Sfenner	ice_flush(hw);
13656893Sfenner}
13756893Sfenner
13856893Sfenner/**
13956893Sfenner * ice_qvec_ena_irq - Enable IRQ for given queue vector
14056893Sfenner * @vsi: the VSI that contains queue vector
14156893Sfenner * @q_vector: queue vector
14256893Sfenner */
14356893Sfennerstatic void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
14456893Sfenner{
14556893Sfenner	struct ice_pf *pf = vsi->back;
14656893Sfenner	struct ice_hw *hw = &pf->hw;
14756893Sfenner
14856893Sfenner	ice_irq_dynamic_ena(hw, vsi, q_vector);
14956893Sfenner
15056893Sfenner	ice_flush(hw);
15156893Sfenner}
15256893Sfenner
15356893Sfenner/**
15456893Sfenner * ice_qp_dis - Disables a queue pair
15556893Sfenner * @vsi: VSI of interest
15656893Sfenner * @q_idx: ring index in array
15756893Sfenner *
15856893Sfenner * Returns 0 on success, negative on failure.
15956893Sfenner */
16056893Sfennerstatic int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
16156893Sfenner{
16256893Sfenner	struct ice_txq_meta txq_meta = { };
16356893Sfenner	struct ice_q_vector *q_vector;
16456893Sfenner	struct ice_tx_ring *tx_ring;
16556893Sfenner	struct ice_rx_ring *rx_ring;
16656893Sfenner	int timeout = 50;
16756893Sfenner	int err;
16856893Sfenner
16956893Sfenner	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
17056893Sfenner		return -EINVAL;
17156893Sfenner
17256893Sfenner	tx_ring = vsi->tx_rings[q_idx];
17356893Sfenner	rx_ring = vsi->rx_rings[q_idx];
17456893Sfenner	q_vector = rx_ring->q_vector;
17556893Sfenner
17656893Sfenner	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
17756893Sfenner		timeout--;
17856893Sfenner		if (!timeout)
17956893Sfenner			return -EBUSY;
18056893Sfenner		usleep_range(1000, 2000);
18156893Sfenner	}
18256893Sfenner
18356893Sfenner	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
18456893Sfenner	ice_qvec_toggle_napi(vsi, q_vector, false);
18556893Sfenner
18656893Sfenner	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
18756893Sfenner
18856893Sfenner	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
18956893Sfenner	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
19056893Sfenner	if (err)
19156893Sfenner		return err;
19256893Sfenner	if (ice_is_xdp_ena_vsi(vsi)) {
19356893Sfenner		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
19456893Sfenner
19556893Sfenner		memset(&txq_meta, 0, sizeof(txq_meta));
19656893Sfenner		ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
19756893Sfenner		err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
19856893Sfenner					   &txq_meta);
19956893Sfenner		if (err)
20056893Sfenner			return err;
20156893Sfenner	}
20256893Sfenner	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
20356893Sfenner	if (err)
20456893Sfenner		return err;
20556893Sfenner
20656893Sfenner	ice_qp_clean_rings(vsi, q_idx);
20756893Sfenner	ice_qp_reset_stats(vsi, q_idx);
20856893Sfenner
20956893Sfenner	return 0;
21056893Sfenner}
21156893Sfenner
21256893Sfenner/**
21356893Sfenner * ice_qp_ena - Enables a queue pair
21456893Sfenner * @vsi: VSI of interest
21556893Sfenner * @q_idx: ring index in array
21656893Sfenner *
21756893Sfenner * Returns 0 on success, negative on failure.
21856893Sfenner */
21956893Sfennerstatic int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
22056893Sfenner{
22156893Sfenner	struct ice_q_vector *q_vector;
22256893Sfenner	int err;
22356893Sfenner
22456893Sfenner	err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
22556893Sfenner	if (err)
22656893Sfenner		return err;
22756893Sfenner
22856893Sfenner	if (ice_is_xdp_ena_vsi(vsi)) {
22956893Sfenner		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
23056893Sfenner
23156893Sfenner		err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
23256893Sfenner		if (err)
23356893Sfenner			return err;
23456893Sfenner		ice_set_ring_xdp(xdp_ring);
23556893Sfenner		ice_tx_xsk_pool(vsi, q_idx);
23656893Sfenner	}
23756893Sfenner
23856893Sfenner	err = ice_vsi_cfg_single_rxq(vsi, q_idx);
23956893Sfenner	if (err)
24056893Sfenner		return err;
24156893Sfenner
24256893Sfenner	q_vector = vsi->rx_rings[q_idx]->q_vector;
24356893Sfenner	ice_qvec_cfg_msix(vsi, q_vector);
24456893Sfenner
24556893Sfenner	err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
24656893Sfenner	if (err)
24756893Sfenner		return err;
24856893Sfenner
24956893Sfenner	ice_qvec_toggle_napi(vsi, q_vector, true);
25056893Sfenner	ice_qvec_ena_irq(vsi, q_vector);
25156893Sfenner
25256893Sfenner	netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
25356893Sfenner	clear_bit(ICE_CFG_BUSY, vsi->state);
25456893Sfenner
25556893Sfenner	return 0;
25656893Sfenner}
25756893Sfenner
25856893Sfenner/**
25956893Sfenner * ice_xsk_pool_disable - disable a buffer pool region
26056893Sfenner * @vsi: Current VSI
26156893Sfenner * @qid: queue ID
26256893Sfenner *
26356893Sfenner * Returns 0 on success, negative on failure
26456893Sfenner */
26556893Sfennerstatic int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
26656893Sfenner{
26756893Sfenner	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
26856893Sfenner
26956893Sfenner	if (!pool)
27056893Sfenner		return -EINVAL;
27156893Sfenner
27256893Sfenner	clear_bit(qid, vsi->af_xdp_zc_qps);
27356893Sfenner	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
27456893Sfenner
27556893Sfenner	return 0;
27656893Sfenner}
27756893Sfenner
27856893Sfenner/**
27956893Sfenner * ice_xsk_pool_enable - enable a buffer pool region
28056893Sfenner * @vsi: Current VSI
28156893Sfenner * @pool: pointer to a requested buffer pool region
28256893Sfenner * @qid: queue ID
28356893Sfenner *
28456893Sfenner * Returns 0 on success, negative on failure
28556893Sfenner */
28656893Sfennerstatic int
28756893Sfennerice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
28856893Sfenner{
28956893Sfenner	int err;
29056893Sfenner
29156893Sfenner	if (vsi->type != ICE_VSI_PF)
29256893Sfenner		return -EINVAL;
29356893Sfenner
29456893Sfenner	if (qid >= vsi->netdev->real_num_rx_queues ||
29556893Sfenner	    qid >= vsi->netdev->real_num_tx_queues)
29656893Sfenner		return -EINVAL;
29756893Sfenner
29856893Sfenner	err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
29956893Sfenner			       ICE_RX_DMA_ATTR);
30056893Sfenner	if (err)
30156893Sfenner		return err;
30256893Sfenner
30356893Sfenner	set_bit(qid, vsi->af_xdp_zc_qps);
30456893Sfenner
30556893Sfenner	return 0;
30656893Sfenner}
30756893Sfenner
30856893Sfenner/**
30956893Sfenner * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
31056893Sfenner * @rx_ring: Rx ring
31156893Sfenner * @pool_present: is pool for XSK present
31256893Sfenner *
31356893Sfenner * Try allocating memory and return ENOMEM, if failed to allocate.
31456893Sfenner * If allocation was successful, substitute buffer with allocated one.
31556893Sfenner * Returns 0 on success, negative on failure
31656893Sfenner */
31756893Sfennerstatic int
31856893Sfennerice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
31956893Sfenner{
32056893Sfenner	size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
32156893Sfenner					  sizeof(*rx_ring->rx_buf);
32256893Sfenner	void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
32356893Sfenner
32456893Sfenner	if (!sw_ring)
32556893Sfenner		return -ENOMEM;
32656893Sfenner
32756893Sfenner	if (pool_present) {
32856893Sfenner		kfree(rx_ring->rx_buf);
32956893Sfenner		rx_ring->rx_buf = NULL;
33056893Sfenner		rx_ring->xdp_buf = sw_ring;
33156893Sfenner	} else {
33256893Sfenner		kfree(rx_ring->xdp_buf);
33356893Sfenner		rx_ring->xdp_buf = NULL;
33456893Sfenner		rx_ring->rx_buf = sw_ring;
33556893Sfenner	}
33656893Sfenner
33756893Sfenner	return 0;
33856893Sfenner}
33956893Sfenner
34056893Sfenner/**
34156893Sfenner * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
34256893Sfenner * @vsi: Current VSI
34356893Sfenner * @zc: is zero copy set
34456893Sfenner *
34556893Sfenner * Reallocate buffer for rx_rings that might be used by XSK.
34656893Sfenner * XDP requires more memory, than rx_buf provides.
34756893Sfenner * Returns 0 on success, negative on failure
34856893Sfenner */
34956893Sfennerint ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
35056893Sfenner{
35156893Sfenner	struct ice_rx_ring *rx_ring;
35256893Sfenner	unsigned long q;
35356893Sfenner
35456893Sfenner	for_each_set_bit(q, vsi->af_xdp_zc_qps,
35556893Sfenner			 max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
35656893Sfenner		rx_ring = vsi->rx_rings[q];
35756893Sfenner		if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
35856893Sfenner			return -ENOMEM;
35956893Sfenner	}
36056893Sfenner
36156893Sfenner	return 0;
36256893Sfenner}
36356893Sfenner
36456893Sfenner/**
36556893Sfenner * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
36656893Sfenner * @vsi: Current VSI
36756893Sfenner * @pool: buffer pool to enable/associate to a ring, NULL to disable
36856893Sfenner * @qid: queue ID
36956893Sfenner *
37056893Sfenner * Returns 0 on success, negative on failure
37156893Sfenner */
37256893Sfennerint ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
37356893Sfenner{
37456893Sfenner	bool if_running, pool_present = !!pool;
37556893Sfenner	int ret = 0, pool_failure = 0;
37656893Sfenner
37756893Sfenner	if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
37856893Sfenner		netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
37956893Sfenner		pool_failure = -EINVAL;
38056893Sfenner		goto failure;
38156893Sfenner	}
38256893Sfenner
38356893Sfenner	if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
38456893Sfenner
38556893Sfenner	if (if_running) {
38656893Sfenner		struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
38756893Sfenner
38856893Sfenner		ret = ice_qp_dis(vsi, qid);
38956893Sfenner		if (ret) {
39056893Sfenner			netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
39156893Sfenner			goto xsk_pool_if_up;
39256893Sfenner		}
39356893Sfenner
39456893Sfenner		ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
39556893Sfenner		if (ret)
39656893Sfenner			goto xsk_pool_if_up;
39756893Sfenner	}
39856893Sfenner
39956893Sfenner	pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
40056893Sfenner				      ice_xsk_pool_disable(vsi, qid);
40156893Sfenner
40256893Sfennerxsk_pool_if_up:
40356893Sfenner	if (if_running) {
40456893Sfenner		ret = ice_qp_ena(vsi, qid);
40556893Sfenner		if (!ret && pool_present)
40656893Sfenner			napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
40756893Sfenner		else if (ret)
40856893Sfenner			netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
40956893Sfenner	}
41056893Sfenner
41156893Sfennerfailure:
41256893Sfenner	if (pool_failure) {
41356893Sfenner		netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
41456893Sfenner			   pool_present ? "en" : "dis", pool_failure);
41556893Sfenner		return pool_failure;
41656893Sfenner	}
41756893Sfenner
41856893Sfenner	return ret;
41956893Sfenner}
42056893Sfenner
42156893Sfenner/**
42256893Sfenner * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
42356893Sfenner * @pool: XSK Buffer pool to pull the buffers from
42456893Sfenner * @xdp: SW ring of xdp_buff that will hold the buffers
42556893Sfenner * @rx_desc: Pointer to Rx descriptors that will be filled
42656893Sfenner * @count: The number of buffers to allocate
42756893Sfenner *
42856893Sfenner * This function allocates a number of Rx buffers from the fill ring
42956893Sfenner * or the internal recycle mechanism and places them on the Rx ring.
43056893Sfenner *
43156893Sfenner * Note that ring wrap should be handled by caller of this function.
43256893Sfenner *
43356893Sfenner * Returns the amount of allocated Rx descriptors
43456893Sfenner */
43556893Sfennerstatic u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
43656893Sfenner			     union ice_32b_rx_flex_desc *rx_desc, u16 count)
43756893Sfenner{
43856893Sfenner	dma_addr_t dma;
43956893Sfenner	u16 buffs;
44056893Sfenner	int i;
44156893Sfenner
44256893Sfenner	buffs = xsk_buff_alloc_batch(pool, xdp, count);
44356893Sfenner	for (i = 0; i < buffs; i++) {
44456893Sfenner		dma = xsk_buff_xdp_get_dma(*xdp);
44556893Sfenner		rx_desc->read.pkt_addr = cpu_to_le64(dma);
44656893Sfenner		rx_desc->wb.status_error0 = 0;
44756893Sfenner
448		/* Put private info that changes on a per-packet basis
449		 * into xdp_buff_xsk->cb.
450		 */
451		ice_xdp_meta_set_desc(*xdp, rx_desc);
452
453		rx_desc++;
454		xdp++;
455	}
456
457	return buffs;
458}
459
460/**
461 * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
462 * @rx_ring: Rx ring
463 * @count: The number of buffers to allocate
464 *
465 * Place the @count of descriptors onto Rx ring. Handle the ring wrap
466 * for case where space from next_to_use up to the end of ring is less
467 * than @count. Finally do a tail bump.
468 *
469 * Returns true if all allocations were successful, false if any fail.
470 */
471static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
472{
473	u32 nb_buffs_extra = 0, nb_buffs = 0;
474	union ice_32b_rx_flex_desc *rx_desc;
475	u16 ntu = rx_ring->next_to_use;
476	u16 total_count = count;
477	struct xdp_buff **xdp;
478
479	rx_desc = ICE_RX_DESC(rx_ring, ntu);
480	xdp = ice_xdp_buf(rx_ring, ntu);
481
482	if (ntu + count >= rx_ring->count) {
483		nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
484						   rx_desc,
485						   rx_ring->count - ntu);
486		if (nb_buffs_extra != rx_ring->count - ntu) {
487			ntu += nb_buffs_extra;
488			goto exit;
489		}
490		rx_desc = ICE_RX_DESC(rx_ring, 0);
491		xdp = ice_xdp_buf(rx_ring, 0);
492		ntu = 0;
493		count -= nb_buffs_extra;
494		ice_release_rx_desc(rx_ring, 0);
495	}
496
497	nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
498
499	ntu += nb_buffs;
500	if (ntu == rx_ring->count)
501		ntu = 0;
502
503exit:
504	if (rx_ring->next_to_use != ntu)
505		ice_release_rx_desc(rx_ring, ntu);
506
507	return total_count == (nb_buffs_extra + nb_buffs);
508}
509
510/**
511 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
512 * @rx_ring: Rx ring
513 * @count: The number of buffers to allocate
514 *
515 * Wrapper for internal allocation routine; figure out how many tail
516 * bumps should take place based on the given threshold
517 *
518 * Returns true if all calls to internal alloc routine succeeded
519 */
520bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
521{
522	u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
523	u16 leftover, i, tail_bumps;
524
525	tail_bumps = count / rx_thresh;
526	leftover = count - (tail_bumps * rx_thresh);
527
528	for (i = 0; i < tail_bumps; i++)
529		if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
530			return false;
531	return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
532}
533
534/**
535 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
536 * @rx_ring: Rx ring
537 * @xdp: Pointer to XDP buffer
538 *
539 * This function allocates a new skb from a zero-copy Rx buffer.
540 *
541 * Returns the skb on success, NULL on failure.
542 */
543static struct sk_buff *
544ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
545{
546	unsigned int totalsize = xdp->data_end - xdp->data_meta;
547	unsigned int metasize = xdp->data - xdp->data_meta;
548	struct skb_shared_info *sinfo = NULL;
549	struct sk_buff *skb;
550	u32 nr_frags = 0;
551
552	if (unlikely(xdp_buff_has_frags(xdp))) {
553		sinfo = xdp_get_shared_info_from_buff(xdp);
554		nr_frags = sinfo->nr_frags;
555	}
556	net_prefetch(xdp->data_meta);
557
558	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
559			       GFP_ATOMIC | __GFP_NOWARN);
560	if (unlikely(!skb))
561		return NULL;
562
563	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
564	       ALIGN(totalsize, sizeof(long)));
565
566	if (metasize) {
567		skb_metadata_set(skb, metasize);
568		__skb_pull(skb, metasize);
569	}
570
571	if (likely(!xdp_buff_has_frags(xdp)))
572		goto out;
573
574	for (int i = 0; i < nr_frags; i++) {
575		struct skb_shared_info *skinfo = skb_shinfo(skb);
576		skb_frag_t *frag = &sinfo->frags[i];
577		struct page *page;
578		void *addr;
579
580		page = dev_alloc_page();
581		if (!page) {
582			dev_kfree_skb(skb);
583			return NULL;
584		}
585		addr = page_to_virt(page);
586
587		memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
588
589		__skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
590					   addr, 0, skb_frag_size(frag));
591	}
592
593out:
594	xsk_buff_free(xdp);
595	return skb;
596}
597
598/**
599 * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
600 * @xdp_ring: XDP Tx ring
601 */
602static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
603{
604	u16 ntc = xdp_ring->next_to_clean;
605	struct ice_tx_desc *tx_desc;
606	u16 cnt = xdp_ring->count;
607	struct ice_tx_buf *tx_buf;
608	u16 completed_frames = 0;
609	u16 xsk_frames = 0;
610	u16 last_rs;
611	int i;
612
613	last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
614	tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
615	if (tx_desc->cmd_type_offset_bsz &
616	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
617		if (last_rs >= ntc)
618			completed_frames = last_rs - ntc + 1;
619		else
620			completed_frames = last_rs + cnt - ntc + 1;
621	}
622
623	if (!completed_frames)
624		return 0;
625
626	if (likely(!xdp_ring->xdp_tx_active)) {
627		xsk_frames = completed_frames;
628		goto skip;
629	}
630
631	ntc = xdp_ring->next_to_clean;
632	for (i = 0; i < completed_frames; i++) {
633		tx_buf = &xdp_ring->tx_buf[ntc];
634
635		if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
636			tx_buf->type = ICE_TX_BUF_EMPTY;
637			xsk_buff_free(tx_buf->xdp);
638			xdp_ring->xdp_tx_active--;
639		} else {
640			xsk_frames++;
641		}
642
643		ntc++;
644		if (ntc >= xdp_ring->count)
645			ntc = 0;
646	}
647skip:
648	tx_desc->cmd_type_offset_bsz = 0;
649	xdp_ring->next_to_clean += completed_frames;
650	if (xdp_ring->next_to_clean >= cnt)
651		xdp_ring->next_to_clean -= cnt;
652	if (xsk_frames)
653		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
654
655	return completed_frames;
656}
657
658/**
659 * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
660 * @xdp: XDP buffer to xmit
661 * @xdp_ring: XDP ring to produce descriptor onto
662 *
663 * note that this function works directly on xdp_buff, no need to convert
664 * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
665 * side will be able to xsk_buff_free() it.
666 *
667 * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
668 * was not enough space on XDP ring
669 */
670static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
671			      struct ice_tx_ring *xdp_ring)
672{
673	struct skb_shared_info *sinfo = NULL;
674	u32 size = xdp->data_end - xdp->data;
675	u32 ntu = xdp_ring->next_to_use;
676	struct ice_tx_desc *tx_desc;
677	struct ice_tx_buf *tx_buf;
678	struct xdp_buff *head;
679	u32 nr_frags = 0;
680	u32 free_space;
681	u32 frag = 0;
682
683	free_space = ICE_DESC_UNUSED(xdp_ring);
684	if (free_space < ICE_RING_QUARTER(xdp_ring))
685		free_space += ice_clean_xdp_irq_zc(xdp_ring);
686
687	if (unlikely(!free_space))
688		goto busy;
689
690	if (unlikely(xdp_buff_has_frags(xdp))) {
691		sinfo = xdp_get_shared_info_from_buff(xdp);
692		nr_frags = sinfo->nr_frags;
693		if (free_space < nr_frags + 1)
694			goto busy;
695	}
696
697	tx_desc = ICE_TX_DESC(xdp_ring, ntu);
698	tx_buf = &xdp_ring->tx_buf[ntu];
699	head = xdp;
700
701	for (;;) {
702		dma_addr_t dma;
703
704		dma = xsk_buff_xdp_get_dma(xdp);
705		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
706
707		tx_buf->xdp = xdp;
708		tx_buf->type = ICE_TX_BUF_XSK_TX;
709		tx_desc->buf_addr = cpu_to_le64(dma);
710		tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
711		/* account for each xdp_buff from xsk_buff_pool */
712		xdp_ring->xdp_tx_active++;
713
714		if (++ntu == xdp_ring->count)
715			ntu = 0;
716
717		if (frag == nr_frags)
718			break;
719
720		tx_desc = ICE_TX_DESC(xdp_ring, ntu);
721		tx_buf = &xdp_ring->tx_buf[ntu];
722
723		xdp = xsk_buff_get_frag(head);
724		size = skb_frag_size(&sinfo->frags[frag]);
725		frag++;
726	}
727
728	xdp_ring->next_to_use = ntu;
729	/* update last descriptor from a frame with EOP */
730	tx_desc->cmd_type_offset_bsz |=
731		cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
732
733	return ICE_XDP_TX;
734
735busy:
736	xdp_ring->ring_stats->tx_stats.tx_busy++;
737
738	return ICE_XDP_CONSUMED;
739}
740
741/**
742 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
743 * @rx_ring: Rx ring
744 * @xdp: xdp_buff used as input to the XDP program
745 * @xdp_prog: XDP program to run
746 * @xdp_ring: ring to be used for XDP_TX action
747 *
748 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
749 */
750static int
751ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
752	       struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
753{
754	int err, result = ICE_XDP_PASS;
755	u32 act;
756
757	act = bpf_prog_run_xdp(xdp_prog, xdp);
758
759	if (likely(act == XDP_REDIRECT)) {
760		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
761		if (!err)
762			return ICE_XDP_REDIR;
763		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
764			result = ICE_XDP_EXIT;
765		else
766			result = ICE_XDP_CONSUMED;
767		goto out_failure;
768	}
769
770	switch (act) {
771	case XDP_PASS:
772		break;
773	case XDP_TX:
774		result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
775		if (result == ICE_XDP_CONSUMED)
776			goto out_failure;
777		break;
778	case XDP_DROP:
779		result = ICE_XDP_CONSUMED;
780		break;
781	default:
782		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
783		fallthrough;
784	case XDP_ABORTED:
785		result = ICE_XDP_CONSUMED;
786out_failure:
787		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
788		break;
789	}
790
791	return result;
792}
793
794static int
795ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
796		 struct xdp_buff *xdp, const unsigned int size)
797{
798	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
799
800	if (!size)
801		return 0;
802
803	if (!xdp_buff_has_frags(first)) {
804		sinfo->nr_frags = 0;
805		sinfo->xdp_frags_size = 0;
806		xdp_buff_set_frags_flag(first);
807	}
808
809	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
810		xsk_buff_free(first);
811		return -ENOMEM;
812	}
813
814	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
815				   virt_to_page(xdp->data_hard_start),
816				   XDP_PACKET_HEADROOM, size);
817	sinfo->xdp_frags_size += size;
818	xsk_buff_add_frag(xdp);
819
820	return 0;
821}
822
823/**
824 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
825 * @rx_ring: AF_XDP Rx ring
826 * @budget: NAPI budget
827 *
828 * Returns number of processed packets on success, remaining budget on failure.
829 */
830int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
831{
832	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
833	struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
834	u32 ntc = rx_ring->next_to_clean;
835	u32 ntu = rx_ring->next_to_use;
836	struct xdp_buff *first = NULL;
837	struct ice_tx_ring *xdp_ring;
838	unsigned int xdp_xmit = 0;
839	struct bpf_prog *xdp_prog;
840	u32 cnt = rx_ring->count;
841	bool failure = false;
842	int entries_to_alloc;
843
844	/* ZC patch is enabled only when XDP program is set,
845	 * so here it can not be NULL
846	 */
847	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
848	xdp_ring = rx_ring->xdp_ring;
849
850	if (ntc != rx_ring->first_desc)
851		first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
852
853	while (likely(total_rx_packets < (unsigned int)budget)) {
854		union ice_32b_rx_flex_desc *rx_desc;
855		unsigned int size, xdp_res = 0;
856		struct xdp_buff *xdp;
857		struct sk_buff *skb;
858		u16 stat_err_bits;
859		u16 vlan_tci;
860
861		rx_desc = ICE_RX_DESC(rx_ring, ntc);
862
863		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
864		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
865			break;
866
867		/* This memory barrier is needed to keep us from reading
868		 * any other fields out of the rx_desc until we have
869		 * verified the descriptor has been written back.
870		 */
871		dma_rmb();
872
873		if (unlikely(ntc == ntu))
874			break;
875
876		xdp = *ice_xdp_buf(rx_ring, ntc);
877
878		size = le16_to_cpu(rx_desc->wb.pkt_len) &
879				   ICE_RX_FLX_DESC_PKT_LEN_M;
880
881		xsk_buff_set_size(xdp, size);
882		xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
883
884		if (!first) {
885			first = xdp;
886		} else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
887			break;
888		}
889
890		if (++ntc == cnt)
891			ntc = 0;
892
893		if (ice_is_non_eop(rx_ring, rx_desc))
894			continue;
895
896		xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
897		if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
898			xdp_xmit |= xdp_res;
899		} else if (xdp_res == ICE_XDP_EXIT) {
900			failure = true;
901			first = NULL;
902			rx_ring->first_desc = ntc;
903			break;
904		} else if (xdp_res == ICE_XDP_CONSUMED) {
905			xsk_buff_free(first);
906		} else if (xdp_res == ICE_XDP_PASS) {
907			goto construct_skb;
908		}
909
910		total_rx_bytes += xdp_get_buff_len(first);
911		total_rx_packets++;
912
913		first = NULL;
914		rx_ring->first_desc = ntc;
915		continue;
916
917construct_skb:
918		/* XDP_PASS path */
919		skb = ice_construct_skb_zc(rx_ring, first);
920		if (!skb) {
921			rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
922			break;
923		}
924
925		first = NULL;
926		rx_ring->first_desc = ntc;
927
928		if (eth_skb_pad(skb)) {
929			skb = NULL;
930			continue;
931		}
932
933		total_rx_bytes += skb->len;
934		total_rx_packets++;
935
936		vlan_tci = ice_get_vlan_tci(rx_desc);
937
938		ice_process_skb_fields(rx_ring, rx_desc, skb);
939		ice_receive_skb(rx_ring, skb, vlan_tci);
940	}
941
942	rx_ring->next_to_clean = ntc;
943	entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
944	if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
945		failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
946
947	ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
948	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
949
950	if (xsk_uses_need_wakeup(xsk_pool)) {
951		/* ntu could have changed when allocating entries above, so
952		 * use rx_ring value instead of stack based one
953		 */
954		if (failure || ntc == rx_ring->next_to_use)
955			xsk_set_rx_need_wakeup(xsk_pool);
956		else
957			xsk_clear_rx_need_wakeup(xsk_pool);
958
959		return (int)total_rx_packets;
960	}
961
962	return failure ? budget : (int)total_rx_packets;
963}
964
965/**
966 * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
967 * @xdp_ring: XDP ring to produce the HW Tx descriptor on
968 * @desc: AF_XDP descriptor to pull the DMA address and length from
969 * @total_bytes: bytes accumulator that will be used for stats update
970 */
971static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
972			 unsigned int *total_bytes)
973{
974	struct ice_tx_desc *tx_desc;
975	dma_addr_t dma;
976
977	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
978	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
979
980	tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
981	tx_desc->buf_addr = cpu_to_le64(dma);
982	tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc),
983						      0, desc->len, 0);
984
985	*total_bytes += desc->len;
986}
987
988/**
989 * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
990 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
991 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
992 * @total_bytes: bytes accumulator that will be used for stats update
993 */
994static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
995			       unsigned int *total_bytes)
996{
997	u16 ntu = xdp_ring->next_to_use;
998	struct ice_tx_desc *tx_desc;
999	u32 i;
1000
1001	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
1002		dma_addr_t dma;
1003
1004		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
1005		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
1006
1007		tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
1008		tx_desc->buf_addr = cpu_to_le64(dma);
1009		tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]),
1010							      0, descs[i].len, 0);
1011
1012		*total_bytes += descs[i].len;
1013	}
1014
1015	xdp_ring->next_to_use = ntu;
1016}
1017
1018/**
1019 * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
1020 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1021 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
1022 * @nb_pkts: count of packets to be send
1023 * @total_bytes: bytes accumulator that will be used for stats update
1024 */
1025static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
1026				u32 nb_pkts, unsigned int *total_bytes)
1027{
1028	u32 batched, leftover, i;
1029
1030	batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
1031	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
1032	for (i = 0; i < batched; i += PKTS_PER_BATCH)
1033		ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
1034	for (; i < batched + leftover; i++)
1035		ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
1036}
1037
1038/**
1039 * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
1040 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1041 *
1042 * Returns true if there is no more work that needs to be done, false otherwise
1043 */
1044bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
1045{
1046	struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
1047	u32 nb_pkts, nb_processed = 0;
1048	unsigned int total_bytes = 0;
1049	int budget;
1050
1051	ice_clean_xdp_irq_zc(xdp_ring);
1052
1053	budget = ICE_DESC_UNUSED(xdp_ring);
1054	budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
1055
1056	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
1057	if (!nb_pkts)
1058		return true;
1059
1060	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
1061		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
1062		ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
1063		xdp_ring->next_to_use = 0;
1064	}
1065
1066	ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
1067			    &total_bytes);
1068
1069	ice_set_rs_bit(xdp_ring);
1070	ice_xdp_ring_update_tail(xdp_ring);
1071	ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
1072
1073	if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
1074		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
1075
1076	return nb_pkts < budget;
1077}
1078
1079/**
1080 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
1081 * @netdev: net_device
1082 * @queue_id: queue to wake up
1083 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
1084 *
1085 * Returns negative on error, zero otherwise.
1086 */
1087int
1088ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
1089	       u32 __always_unused flags)
1090{
1091	struct ice_netdev_priv *np = netdev_priv(netdev);
1092	struct ice_q_vector *q_vector;
1093	struct ice_vsi *vsi = np->vsi;
1094	struct ice_tx_ring *ring;
1095
1096	if (test_bit(ICE_VSI_DOWN, vsi->state))
1097		return -ENETDOWN;
1098
1099	if (!ice_is_xdp_ena_vsi(vsi))
1100		return -EINVAL;
1101
1102	if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
1103		return -EINVAL;
1104
1105	ring = vsi->rx_rings[queue_id]->xdp_ring;
1106
1107	if (!ring->xsk_pool)
1108		return -EINVAL;
1109
1110	/* The idea here is that if NAPI is running, mark a miss, so
1111	 * it will run again. If not, trigger an interrupt and
1112	 * schedule the NAPI from interrupt context. If NAPI would be
1113	 * scheduled here, the interrupt affinity would not be
1114	 * honored.
1115	 */
1116	q_vector = ring->q_vector;
1117	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
1118		ice_trigger_sw_intr(&vsi->back->hw, q_vector);
1119
1120	return 0;
1121}
1122
1123/**
1124 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
1125 * @vsi: VSI to be checked
1126 *
1127 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
1128 */
1129bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
1130{
1131	int i;
1132
1133	ice_for_each_rxq(vsi, i) {
1134		if (xsk_get_pool_from_qid(vsi->netdev, i))
1135			return true;
1136	}
1137
1138	return false;
1139}
1140
1141/**
1142 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
1143 * @rx_ring: ring to be cleaned
1144 */
1145void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
1146{
1147	u16 ntc = rx_ring->next_to_clean;
1148	u16 ntu = rx_ring->next_to_use;
1149
1150	while (ntc != ntu) {
1151		struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
1152
1153		xsk_buff_free(xdp);
1154		ntc++;
1155		if (ntc >= rx_ring->count)
1156			ntc = 0;
1157	}
1158}
1159
1160/**
1161 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
1162 * @xdp_ring: XDP_Tx ring
1163 */
1164void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
1165{
1166	u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
1167	u32 xsk_frames = 0;
1168
1169	while (ntc != ntu) {
1170		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
1171
1172		if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
1173			tx_buf->type = ICE_TX_BUF_EMPTY;
1174			xsk_buff_free(tx_buf->xdp);
1175		} else {
1176			xsk_frames++;
1177		}
1178
1179		ntc++;
1180		if (ntc >= xdp_ring->count)
1181			ntc = 0;
1182	}
1183
1184	if (xsk_frames)
1185		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
1186}
1187