Lines Matching defs:tx_ring

743 static int fm10k_tso(struct fm10k_ring *tx_ring,
776 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
783 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
785 netdev_err(tx_ring->netdev,
790 static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
812 dev_warn(tx_ring->dev,
814 tx_ring->tx_stats.csum_err++;
855 dev_warn(tx_ring->dev,
860 tx_ring->tx_stats.csum_err++;
866 tx_ring->tx_stats.csum_good++;
870 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
892 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
906 return i == tx_ring->count;
909 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
911 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
917 if (likely(fm10k_desc_unused(tx_ring) < size))
921 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
922 ++tx_ring->tx_stats.restart_queue;
926 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
928 if (likely(fm10k_desc_unused(tx_ring) >= size))
930 return __fm10k_maybe_stop_tx(tx_ring, size);
933 static void fm10k_tx_map(struct fm10k_ring *tx_ring,
944 u16 i = tx_ring->next_to_use;
947 tx_desc = FM10K_TX_DESC(tx_ring, i);
958 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
964 if (dma_mapping_error(tx_ring->dev, dma))
972 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
974 tx_desc = FM10K_TX_DESC(tx_ring, 0);
985 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
987 tx_desc = FM10K_TX_DESC(tx_ring, 0);
994 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
997 tx_buffer = &tx_ring->tx_buffer[i];
1003 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
1007 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1024 tx_ring->next_to_use = i;
1027 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1030 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1031 writel(i, tx_ring->tail);
1036 dev_err(tx_ring->dev, "TX DMA map failed\n");
1040 tx_buffer = &tx_ring->tx_buffer[i];
1041 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1045 i = tx_ring->count;
1049 tx_ring->next_to_use = i;
1053 struct fm10k_ring *tx_ring)
1072 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
1073 tx_ring->tx_stats.tx_busy++;
1078 first = &tx_ring->tx_buffer[tx_ring->next_to_use];
1086 tso = fm10k_tso(tx_ring, first);
1090 fm10k_tx_csum(tx_ring, first);
1092 fm10k_tx_map(tx_ring, first);
1130 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
1132 u32 tx_done = fm10k_get_tx_completed(tx_ring);
1133 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1134 u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
1136 clear_check_for_tx_hang(tx_ring);
1148 tx_ring->tx_stats.tx_done_old = tx_done;
1150 clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
1156 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
1176 * @tx_ring: tx ring to clean
1180 struct fm10k_ring *tx_ring, int napi_budget)
1187 unsigned int i = tx_ring->next_to_clean;
1192 tx_buffer = &tx_ring->tx_buffer[i];
1193 tx_desc = FM10K_TX_DESC(tx_ring, i);
1194 i -= tx_ring->count;
1221 dma_unmap_single(tx_ring->dev,
1236 i -= tx_ring->count;
1237 tx_buffer = tx_ring->tx_buffer;
1238 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1243 dma_unmap_page(tx_ring->dev,
1256 i -= tx_ring->count;
1257 tx_buffer = tx_ring->tx_buffer;
1258 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1268 i += tx_ring->count;
1269 tx_ring->next_to_clean = i;
1270 u64_stats_update_begin(&tx_ring->syncp);
1271 tx_ring->stats.bytes += total_bytes;
1272 tx_ring->stats.packets += total_packets;
1273 u64_stats_update_end(&tx_ring->syncp);
1277 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
1281 netif_err(interface, drv, tx_ring->netdev,
1287 tx_ring->queue_index,
1288 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
1289 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
1290 tx_ring->next_to_use, i);
1292 netif_stop_subqueue(tx_ring->netdev,
1293 tx_ring->queue_index);
1295 netif_info(interface, probe, tx_ring->netdev,
1298 tx_ring->queue_index);
1307 netdev_tx_completed_queue(txring_txq(tx_ring),
1311 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1312 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1317 if (__netif_subqueue_stopped(tx_ring->netdev,
1318 tx_ring->queue_index) &&
1320 netif_wake_subqueue(tx_ring->netdev,
1321 tx_ring->queue_index);
1322 ++tx_ring->tx_stats.restart_queue;
1636 interface->tx_ring[txr_idx] = ring;
1698 interface->tx_ring[ring->queue_index] = NULL;
1877 interface->tx_ring[offset + i]->reg_idx = q_idx;
1878 interface->tx_ring[offset + i]->qos_pc = pc;
1902 interface->tx_ring[i]->reg_idx = i;