Lines Matching defs:tx_ring

614 		ring = adapter->tx_ring[n];
665 ring = adapter->tx_ring[n];
954 &adapter->tx_ring[i]->state);
997 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
999 tc = tx_ring->dcb_tc;
1001 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1028 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1030 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1031 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1032 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1034 clear_check_for_tx_hang(tx_ring);
1051 &tx_ring->state);
1053 tx_ring->tx_stats.tx_done_old = tx_done;
1055 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1110 * @tx_ring: ring to update
1115 void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
1119 u64_stats_update_begin(&tx_ring->syncp);
1120 tx_ring->stats.bytes += bytes;
1121 tx_ring->stats.packets += pkts;
1122 u64_stats_update_end(&tx_ring->syncp);
1149 * @tx_ring: tx ring to clean
1153 struct ixgbe_ring *tx_ring, int napi_budget)
1160 unsigned int i = tx_ring->next_to_clean;
1166 tx_buffer = &tx_ring->tx_buffer_info[i];
1167 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1168 i -= tx_ring->count;
1194 if (ring_is_xdp(tx_ring))
1200 dma_unmap_single(tx_ring->dev,
1214 i -= tx_ring->count;
1215 tx_buffer = tx_ring->tx_buffer_info;
1216 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1221 dma_unmap_page(tx_ring->dev,
1234 i -= tx_ring->count;
1235 tx_buffer = tx_ring->tx_buffer_info;
1236 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1246 i += tx_ring->count;
1247 tx_ring->next_to_clean = i;
1248 ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
1252 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1263 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1264 tx_ring->queue_index,
1265 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1266 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1267 tx_ring->next_to_use, i,
1268 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1270 if (!ring_is_xdp(tx_ring))
1271 netif_stop_subqueue(tx_ring->netdev,
1272 tx_ring->queue_index);
1276 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1285 if (ring_is_xdp(tx_ring))
1289 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
1291 ixgbe_desc_unused(tx_ring),
1293 !netif_carrier_ok(tx_ring->netdev) ||
1295 ++tx_ring->tx_stats.restart_queue;
1302 struct ixgbe_ring *tx_ring,
1310 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1314 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1318 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
3118 struct ixgbe_ring *ring = adapter->tx_ring[i];
3657 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
5909 struct ixgbe_ring *ring = adapter->tx_ring[i];
5958 struct ixgbe_ring *ring = adapter->tx_ring[i];
6059 * @tx_ring: ring to be cleaned
6061 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
6063 u16 i = tx_ring->next_to_clean;
6064 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
6066 if (tx_ring->xsk_pool) {
6067 ixgbe_xsk_clean_tx_ring(tx_ring);
6071 while (i != tx_ring->next_to_use) {
6075 if (ring_is_xdp(tx_ring))
6081 dma_unmap_single(tx_ring->dev,
6088 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6095 if (unlikely(i == tx_ring->count)) {
6097 tx_buffer = tx_ring->tx_buffer_info;
6098 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6103 dma_unmap_page(tx_ring->dev,
6112 if (unlikely(i == tx_ring->count)) {
6114 tx_buffer = tx_ring->tx_buffer_info;
6119 if (!ring_is_xdp(tx_ring))
6120 netdev_tx_reset_queue(txring_txq(tx_ring));
6124 tx_ring->next_to_use = 0;
6125 tx_ring->next_to_clean = 0;
6149 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6527 * @tx_ring: tx descriptor ring (for a specific queue) to setup
6531 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6533 struct device *dev = tx_ring->dev;
6538 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6540 if (tx_ring->q_vector)
6541 ring_node = tx_ring->q_vector->numa_node;
6543 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6544 if (!tx_ring->tx_buffer_info)
6545 tx_ring->tx_buffer_info = vmalloc(size);
6546 if (!tx_ring->tx_buffer_info)
6550 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6551 tx_ring->size = ALIGN(tx_ring->size, 4096);
6554 tx_ring->desc = dma_alloc_coherent(dev,
6555 tx_ring->size,
6556 &tx_ring->dma,
6559 if (!tx_ring->desc)
6560 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6561 &tx_ring->dma, GFP_KERNEL);
6562 if (!tx_ring->desc)
6565 tx_ring->next_to_use = 0;
6566 tx_ring->next_to_clean = 0;
6570 vfree(tx_ring->tx_buffer_info);
6571 tx_ring->tx_buffer_info = NULL;
6591 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6613 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6721 * @tx_ring: Tx descriptor ring for a specific queue
6725 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6727 ixgbe_clean_tx_ring(tx_ring);
6729 vfree(tx_ring->tx_buffer_info);
6730 tx_ring->tx_buffer_info = NULL;
6733 if (!tx_ring->desc)
6736 dma_free_coherent(tx_ring->dev, tx_ring->size,
6737 tx_ring->desc, tx_ring->dma);
6739 tx_ring->desc = NULL;
6753 if (adapter->tx_ring[i]->desc)
6754 ixgbe_free_tx_resources(adapter->tx_ring[i]);
7166 struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
7168 if (!tx_ring)
7170 restart_queue += tx_ring->tx_stats.restart_queue;
7171 tx_busy += tx_ring->tx_stats.tx_busy;
7172 bytes += tx_ring->stats.bytes;
7173 packets += tx_ring->stats.packets;
7404 &(adapter->tx_ring[i]->state));
7440 set_check_for_tx_hang(adapter->tx_ring[i]);
7651 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7653 if (tx_ring->next_to_use != tx_ring->next_to_clean)
8076 static int ixgbe_tso(struct ixgbe_ring *tx_ring,
8176 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8182 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8229 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8293 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8295 if (!netif_subqueue_try_stop(tx_ring->netdev, tx_ring->queue_index,
8296 ixgbe_desc_unused(tx_ring), size))
8299 ++tx_ring->tx_stats.restart_queue;
8303 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8305 if (likely(ixgbe_desc_unused(tx_ring) >= size))
8308 return __ixgbe_maybe_stop_tx(tx_ring, size);
8311 static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8323 u16 i = tx_ring->next_to_use;
8325 tx_desc = IXGBE_TX_DESC(tx_ring, i);
8343 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8348 if (dma_mapping_error(tx_ring->dev, dma))
8363 if (i == tx_ring->count) {
8364 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8382 if (i == tx_ring->count) {
8383 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8395 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8398 tx_buffer = &tx_ring->tx_buffer_info[i];
8405 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8426 if (i == tx_ring->count)
8429 tx_ring->next_to_use = i;
8431 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8433 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8434 writel(i, tx_ring->tail);
8439 dev_err(tx_ring->dev, "TX DMA map failed\n");
8443 tx_buffer = &tx_ring->tx_buffer_info[i];
8445 dma_unmap_page(tx_ring->dev,
8453 i += tx_ring->count;
8460 tx_ring->next_to_use = i;
8743 struct ixgbe_ring *tx_ring)
8765 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8766 tx_ring->tx_stats.tx_busy++;
8771 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8846 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8847 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8858 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8861 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8865 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8868 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8869 ixgbe_atr(tx_ring, first);
8874 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8898 struct ixgbe_ring *tx_ring;
8907 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8908 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8911 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
9095 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
10466 struct ixgbe_ring *tx_ring)
10470 u8 reg_idx = tx_ring->reg_idx;
10495 struct ixgbe_ring *tx_ring)
10497 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10498 ixgbe_disable_txr_hw(adapter, tx_ring);
10540 static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10542 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10543 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10600 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10603 tx_ring = adapter->tx_ring[ring];
10611 ixgbe_disable_txr(adapter, tx_ring);
10619 ixgbe_clean_tx_ring(tx_ring);
10624 ixgbe_reset_txr_stats(tx_ring);
10640 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10643 tx_ring = adapter->tx_ring[ring];
10646 ixgbe_configure_tx_ring(adapter, tx_ring);
10651 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
11153 u64_stats_init(&adapter->tx_ring[i]->syncp);