Lines Matching defs:tx_ring

702  * @tx_ring: tx ring to clean
706 struct wx_ring *tx_ring, int napi_budget)
710 unsigned int i = tx_ring->next_to_clean;
714 if (!netif_carrier_ok(tx_ring->netdev))
717 tx_buffer = &tx_ring->tx_buffer_info[i];
718 tx_desc = WX_TX_DESC(tx_ring, i);
719 i -= tx_ring->count;
746 dma_unmap_single(tx_ring->dev,
760 i -= tx_ring->count;
761 tx_buffer = tx_ring->tx_buffer_info;
762 tx_desc = WX_TX_DESC(tx_ring, 0);
767 dma_unmap_page(tx_ring->dev,
780 i -= tx_ring->count;
781 tx_buffer = tx_ring->tx_buffer_info;
782 tx_desc = WX_TX_DESC(tx_ring, 0);
792 i += tx_ring->count;
793 tx_ring->next_to_clean = i;
794 u64_stats_update_begin(&tx_ring->syncp);
795 tx_ring->stats.bytes += total_bytes;
796 tx_ring->stats.packets += total_packets;
797 u64_stats_update_end(&tx_ring->syncp);
801 netdev_tx_completed_queue(wx_txring_txq(tx_ring),
805 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
806 (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
812 if (__netif_subqueue_stopped(tx_ring->netdev,
813 tx_ring->queue_index) &&
814 netif_running(tx_ring->netdev)) {
815 netif_wake_subqueue(tx_ring->netdev,
816 tx_ring->queue_index);
817 ++tx_ring->tx_stats.restart_queue;
877 static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size)
879 if (likely(wx_desc_unused(tx_ring) >= size))
882 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
890 if (likely(wx_desc_unused(tx_ring) < size))
894 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
895 ++tx_ring->tx_stats.restart_queue;
937 static void wx_tx_map(struct wx_ring *tx_ring,
944 u16 i = tx_ring->next_to_use;
952 tx_desc = WX_TX_DESC(tx_ring, i);
957 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
962 if (dma_mapping_error(tx_ring->dev, dma))
977 if (i == tx_ring->count) {
978 tx_desc = WX_TX_DESC(tx_ring, 0);
996 if (i == tx_ring->count) {
997 tx_desc = WX_TX_DESC(tx_ring, 0);
1006 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1009 tx_buffer = &tx_ring->tx_buffer_info[i];
1016 netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
1033 if (i == tx_ring->count)
1036 tx_ring->next_to_use = i;
1038 wx_maybe_stop_tx(tx_ring, DESC_NEEDED);
1040 if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
1041 writel(i, tx_ring->tail);
1045 dev_err(tx_ring->dev, "TX DMA map failed\n");
1049 tx_buffer = &tx_ring->tx_buffer_info[i];
1051 dma_unmap_page(tx_ring->dev,
1059 i += tx_ring->count;
1066 tx_ring->next_to_use = i;
1069 static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
1073 u16 i = tx_ring->next_to_use;
1075 context_desc = WX_TX_CTXTDESC(tx_ring, i);
1077 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1209 static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
1213 struct net_device *netdev = tx_ring->netdev;
1324 wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
1330 static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
1334 struct net_device *netdev = tx_ring->netdev;
1449 wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
1454 struct wx_ring *tx_ring)
1473 if (wx_maybe_stop_tx(tx_ring, count + 3)) {
1474 tx_ring->tx_stats.tx_busy++;
1479 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1496 tso = wx_tso(tx_ring, first, &hdr_len, ptype);
1500 wx_tx_csum(tx_ring, first, ptype);
1501 wx_tx_map(tx_ring, first, hdr_len);
1516 struct wx_ring *tx_ring;
1531 tx_ring = wx->tx_ring[r_idx];
1533 return wx_xmit_frame_ring(skb, tx_ring);
1708 wx->tx_ring[i]->reg_idx = i;
1792 wx->tx_ring[txr_idx] = ring;
1846 wx->tx_ring[ring->queue_index] = NULL;
2278 * @tx_ring: ring to be cleaned
2280 static void wx_clean_tx_ring(struct wx_ring *tx_ring)
2283 u16 i = tx_ring->next_to_clean;
2285 tx_buffer = &tx_ring->tx_buffer_info[i];
2287 while (i != tx_ring->next_to_use) {
2294 dma_unmap_single(tx_ring->dev,
2301 tx_desc = WX_TX_DESC(tx_ring, i);
2308 if (unlikely(i == tx_ring->count)) {
2310 tx_buffer = tx_ring->tx_buffer_info;
2311 tx_desc = WX_TX_DESC(tx_ring, 0);
2316 dma_unmap_page(tx_ring->dev,
2325 if (unlikely(i == tx_ring->count)) {
2327 tx_buffer = tx_ring->tx_buffer_info;
2331 netdev_tx_reset_queue(wx_txring_txq(tx_ring));
2334 tx_ring->next_to_use = 0;
2335 tx_ring->next_to_clean = 0;
2347 wx_clean_tx_ring(wx->tx_ring[i]);
2353 * @tx_ring: Tx descriptor ring for a specific queue
2357 static void wx_free_tx_resources(struct wx_ring *tx_ring)
2359 wx_clean_tx_ring(tx_ring);
2360 kvfree(tx_ring->tx_buffer_info);
2361 tx_ring->tx_buffer_info = NULL;
2364 if (!tx_ring->desc)
2367 dma_free_coherent(tx_ring->dev, tx_ring->size,
2368 tx_ring->desc, tx_ring->dma);
2369 tx_ring->desc = NULL;
2383 wx_free_tx_resources(wx->tx_ring[i]);
2511 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2515 static int wx_setup_tx_resources(struct wx_ring *tx_ring)
2517 struct device *dev = tx_ring->dev;
2522 size = sizeof(struct wx_tx_buffer) * tx_ring->count;
2524 if (tx_ring->q_vector)
2525 numa_node = tx_ring->q_vector->numa_node;
2527 tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
2528 if (!tx_ring->tx_buffer_info)
2529 tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL);
2530 if (!tx_ring->tx_buffer_info)
2534 tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc);
2535 tx_ring->size = ALIGN(tx_ring->size, 4096);
2538 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2539 &tx_ring->dma, GFP_KERNEL);
2540 if (!tx_ring->desc) {
2542 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2543 &tx_ring->dma, GFP_KERNEL);
2546 if (!tx_ring->desc)
2549 tx_ring->next_to_use = 0;
2550 tx_ring->next_to_clean = 0;
2555 kvfree(tx_ring->tx_buffer_info);
2556 tx_ring->tx_buffer_info = NULL;
2576 err = wx_setup_tx_resources(wx->tx_ring[i]);
2588 wx_free_tx_resources(wx->tx_ring[i]);
2653 struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]);
2762 memcpy(&temp_ring[i], wx->tx_ring[i],
2778 wx_free_tx_resources(wx->tx_ring[i]);
2780 memcpy(wx->tx_ring[i], &temp_ring[i],