Lines Matching refs:queue

134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
208 u32 queue;
210 for (queue = 0; queue < maxq; queue++) {
211 struct stmmac_channel *ch = &priv->channel[queue];
214 test_bit(queue, priv->af_xdp_zc_qps)) {
219 if (queue < rx_queues_cnt)
221 if (queue < tx_queues_cnt)
234 u32 queue;
237 for (queue = 0; queue < rx_queues_cnt; queue++) {
238 rx_q = &priv->dma_conf.rx_queue[queue];
257 u32 queue;
259 for (queue = 0; queue < maxq; queue++) {
260 struct stmmac_channel *ch = &priv->channel[queue];
263 test_bit(queue, priv->af_xdp_zc_qps)) {
268 if (queue < rx_queues_cnt)
270 if (queue < tx_queues_cnt)
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
376 * stmmac_rx_dirty - Get RX queue dirty
378 * @queue: RX queue index
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
412 u32 queue;
415 for (queue = 0; queue < tx_cnt; queue++) {
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
1256 u32 queue;
1259 for (queue = 0; queue < rx_cnt; queue++) {
1260 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1262 pr_info("\tRX Queue %u rings\n", queue);
1284 u32 queue;
1287 for (queue = 0; queue < tx_cnt; queue++) {
1288 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1290 pr_info("\tTX Queue %d rings\n", queue);
1340 * @queue: RX queue index
1346 u32 queue)
1348 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1369 * @queue: TX queue index.
1375 u32 queue)
1377 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1408 u32 queue;
1411 for (queue = 0; queue < rx_queue_cnt; queue++)
1412 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1415 for (queue = 0; queue < tx_queue_cnt; queue++)
1416 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1426 * @queue: RX queue index
1433 int i, gfp_t flags, u32 queue)
1435 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1473 * @rx_q: RX queue
1495 * @queue: RX queue index
1500 u32 queue, int i)
1502 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1542 * @queue: RX queue index
1546 u32 queue)
1548 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1557 u32 queue, gfp_t flags)
1559 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1572 queue);
1586 * @queue: RX queue index
1590 u32 queue)
1592 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 u32 queue)
1610 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1643 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1645 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1648 return xsk_get_pool_from_qid(priv->dev, queue);
1652 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1655 * @queue: RX queue index
1663 u32 queue, gfp_t flags)
1665 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1672 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1676 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1699 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1701 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1727 int queue;
1734 for (queue = 0; queue < rx_count; queue++) {
1735 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1743 while (queue >= 0) {
1744 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1747 dma_free_rx_xskbufs(priv, dma_conf, queue);
1749 dma_free_rx_skbufs(priv, dma_conf, queue);
1754 queue--;
1761 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1764 * @queue: TX queue index
1771 u32 queue)
1773 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1792 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1821 u32 queue;
1825 for (queue = 0; queue < tx_queue_cnt; queue++)
1826 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1865 * @queue: TX queue index
1869 u32 queue)
1871 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1877 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 u32 queue;
1895 for (queue = 0; queue < tx_queue_cnt; queue++)
1896 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1900 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1903 * @queue: RX queue index
1907 u32 queue)
1909 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1913 dma_free_rx_xskbufs(priv, dma_conf, queue);
1915 dma_free_rx_skbufs(priv, dma_conf, queue);
1942 u32 queue;
1944 /* Free RX queue resources */
1945 for (queue = 0; queue < rx_count; queue++)
1946 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1950 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1953 * @queue: TX queue index
1957 u32 queue)
1959 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1964 dma_free_tx_skbufs(priv, dma_conf, queue);
1989 u32 queue;
1991 /* Free TX queue resources */
1992 for (queue = 0; queue < tx_count; queue++)
1993 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1997 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2000 * @queue: RX queue index
2008 u32 queue)
2010 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011 struct stmmac_channel *ch = &priv->channel[queue];
2018 rx_q->queue_index = queue;
2064 test_bit(queue, priv->af_xdp_zc_qps))
2084 u32 queue;
2088 for (queue = 0; queue < rx_count; queue++) {
2089 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2103 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2106 * @queue: TX queue index
2114 u32 queue)
2116 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2120 tx_q->queue_index = queue;
2163 u32 queue;
2167 for (queue = 0; queue < tx_count; queue++) {
2168 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2228 int queue;
2231 for (queue = 0; queue < rx_queues_count; queue++) {
2232 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2233 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2366 /* Adjust for real per queue fifo size */
2459 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2461 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2462 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2463 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2474 budget = min(budget, stmmac_tx_avail(priv, queue));
2485 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2495 priv->plat->est->max_sdu[queue] &&
2496 xdp_desc.len > priv->plat->est->max_sdu[queue]) {
2497 priv->xstats.max_sdu_txq_drop[queue]++;
2530 if (!priv->tx_coal_frames[queue])
2532 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2565 stmmac_flush_tx_descriptors(priv, queue);
2596 * @queue: TX queue index
2602 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2605 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2606 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2611 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2661 stmmac_bump_dma_threshold(priv, queue);
2732 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2736 queue))) &&
2737 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2741 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2758 work_done = stmmac_xdp_xmit_zc(priv, queue,
2784 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2840 /* Adjust for real per queue fifo size */
3060 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3062 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3063 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3159 * stmmac_set_tx_queue_weight - Set TX queue weight
3167 u32 queue;
3169 for (queue = 0; queue < tx_queues_count; queue++) {
3170 weight = priv->plat->tx_queues_cfg[queue].weight;
3171 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3176 * stmmac_configure_cbs - Configure CBS in TX queue
3184 u32 queue;
3186 /* queue 0 is reserved for legacy traffic */
3187 for (queue = 1; queue < tx_queues_count; queue++) {
3188 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3193 priv->plat->tx_queues_cfg[queue].send_slope,
3194 priv->plat->tx_queues_cfg[queue].idle_slope,
3195 priv->plat->tx_queues_cfg[queue].high_credit,
3196 priv->plat->tx_queues_cfg[queue].low_credit,
3197 queue);
3202 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3209 u32 queue;
3212 for (queue = 0; queue < rx_queues_count; queue++) {
3213 chan = priv->plat->rx_queues_cfg[queue].chan;
3214 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3226 u32 queue;
3229 for (queue = 0; queue < rx_queues_count; queue++) {
3230 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3233 prio = priv->plat->rx_queues_cfg[queue].prio;
3234 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3246 u32 queue;
3249 for (queue = 0; queue < tx_queues_count; queue++) {
3250 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3253 prio = priv->plat->tx_queues_cfg[queue].prio;
3254 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3261 * Description: It is used for configuring the RX queue routing
3266 u32 queue;
3269 for (queue = 0; queue < rx_queues_count; queue++) {
3270 /* no specific packet type routing specified for the queue */
3271 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3274 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3275 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3470 u32 queue;
3472 for (queue = 0; queue < rx_cnt; queue++) {
3473 if (!priv->rx_riwt[queue])
3474 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3477 priv->rx_riwt[queue], queue);
3851 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3853 * @mtu: MTU to setup the dma queue and buf with
3855 * Allocate the Tx/Rx DMA queue and init them.
4139 * @queue: TX queue index
4145 int total_len, bool last_segment, u32 queue)
4147 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4184 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4186 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4203 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4238 u32 queue = skb_get_queue_mapping(skb);
4249 tx_q = &priv->dma_conf.tx_queue[queue];
4250 txq_stats = &priv->xstats.txq_stats[queue];
4263 if (unlikely(stmmac_tx_avail(priv, queue) <
4265 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4267 queue));
4270 "%s: Tx Ring full when queue awake\n",
4343 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4356 (i == nfrags - 1), queue);
4376 else if (!priv->tx_coal_frames[queue])
4378 else if (tx_packets > priv->tx_coal_frames[queue])
4381 priv->tx_coal_frames[queue]) < tx_packets)
4403 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4406 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4455 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4457 stmmac_flush_tx_descriptors(priv, queue);
4458 stmmac_tx_timer_arm(priv, queue);
4505 u32 queue = skb_get_queue_mapping(skb);
4516 tx_q = &priv->dma_conf.tx_queue[queue];
4517 txq_stats = &priv->xstats.txq_stats[queue];
4532 priv->plat->est->max_sdu[queue] &&
4533 skb->len > priv->plat->est->max_sdu[queue]){
4534 priv->xstats.max_sdu_txq_drop[queue]++;
4538 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4539 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4541 queue));
4544 "%s: Tx Ring full when queue awake\n",
4566 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4644 else if (!priv->tx_coal_frames[queue])
4646 else if (tx_packets > priv->tx_coal_frames[queue])
4649 priv->tx_coal_frames[queue]) < tx_packets)
4684 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4687 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4744 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4748 stmmac_flush_tx_descriptors(priv, queue);
4749 stmmac_tx_timer_arm(priv, queue);
4782 * @queue: RX queue index
4786 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4788 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4789 int dirty = stmmac_rx_dirty(priv, queue);
4830 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4831 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4834 use_rx_wd = !priv->rx_coal_frames[queue];
4847 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4899 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4902 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4903 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4909 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4913 priv->plat->est->max_sdu[queue] &&
4914 xdpf->len > priv->plat->est->max_sdu[queue]) {
4915 priv->xstats.max_sdu_txq_drop[queue]++;
4960 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5001 int queue;
5007 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5008 nq = netdev_get_tx_queue(priv->dev, queue);
5014 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5016 stmmac_flush_tx_descriptors(priv, queue);
5079 int queue;
5081 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5084 stmmac_tx_timer_arm(priv, queue);
5111 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5115 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5116 struct stmmac_channel *ch = &priv->channel[queue];
5146 skb_record_rx_queue(skb, queue);
5155 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5157 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5162 budget = min(budget, stmmac_rx_dirty(priv, queue));
5188 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5189 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5192 use_rx_wd = !priv->rx_coal_frames[queue];
5207 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5223 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5225 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5226 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5228 int dirty = stmmac_rx_dirty(priv, queue);
5279 !stmmac_rx_refill_zc(priv, queue, dirty);
5362 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5396 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5411 * @queue: RX queue index.
5415 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5418 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5419 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5420 struct stmmac_channel *ch = &priv->channel[queue];
5667 skb_record_rx_queue(skb, queue);
5685 stmmac_rx_refill(priv, queue);
5814 * @txqueue: the index of the hanging transmit queue
6020 u32 queue;
6052 for (queue = 0; queue < queues_count; queue++)
6053 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6336 u32 queue;
6341 for (queue = 0; queue < rx_count; queue++) {
6342 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6344 seq_printf(seq, "RX Queue %d:\n", queue);
6357 for (queue = 0; queue < tx_count; queue++) {
6358 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6360 seq_printf(seq, "TX Queue %d:\n", queue);
6738 int queue;
6746 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6747 nq = netdev_get_tx_queue(priv->dev, queue);
6756 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6764 stmmac_flush_tx_descriptors(priv, queue);
6765 stmmac_tx_timer_arm(priv, queue);
6773 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6775 struct stmmac_channel *ch = &priv->channel[queue];
6779 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6782 stmmac_stop_rx_dma(priv, queue);
6783 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6786 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6788 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6789 struct stmmac_channel *ch = &priv->channel[queue];
6794 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6800 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6802 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6807 stmmac_reset_rx_queue(priv, queue);
6808 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6829 stmmac_start_rx_dma(priv, queue);
6832 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6836 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6838 struct stmmac_channel *ch = &priv->channel[queue];
6842 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6845 stmmac_stop_tx_dma(priv, queue);
6846 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6849 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6851 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6852 struct stmmac_channel *ch = &priv->channel[queue];
6856 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6862 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6864 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6869 stmmac_reset_tx_queue(priv, queue);
6870 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6882 stmmac_start_tx_dma(priv, queue);
6885 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
7031 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7045 if (queue >= priv->plat->rx_queues_to_use ||
7046 queue >= priv->plat->tx_queues_to_use)
7049 rx_q = &priv->dma_conf.rx_queue[queue];
7050 tx_q = &priv->dma_conf.tx_queue[queue];
7051 ch = &priv->channel[queue];
7279 u32 queue, maxq;
7283 for (queue = 0; queue < maxq; queue++) {
7284 struct stmmac_channel *ch = &priv->channel[queue];
7287 ch->index = queue;
7290 if (queue < priv->plat->rx_queues_to_use) {
7293 if (queue < priv->plat->tx_queues_to_use) {
7297 if (queue < priv->plat->rx_queues_to_use &&
7298 queue < priv->plat->tx_queues_to_use) {
7308 u32 queue, maxq;
7312 for (queue = 0; queue < maxq; queue++) {
7313 struct stmmac_channel *ch = &priv->channel[queue];
7315 if (queue < priv->plat->rx_queues_to_use)
7317 if (queue < priv->plat->tx_queues_to_use)
7319 if (queue < priv->plat->rx_queues_to_use &&
7320 queue < priv->plat->tx_queues_to_use) {
7851 * by the platform driver to stop the network queue, release the resources,
7920 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7922 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7928 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7930 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7936 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7940 * stmmac_reset_queues_param - reset queue parameters
7947 u32 queue;
7949 for (queue = 0; queue < rx_cnt; queue++)
7950 stmmac_reset_rx_queue(priv, queue);
7952 for (queue = 0; queue < tx_cnt; queue++)
7953 stmmac_reset_tx_queue(priv, queue);