Lines Matching defs:tx_ring

17  * @tx_ring: Tx ring to send buffer on
22 static void i40e_fdir(struct i40e_ring *tx_ring,
26 struct i40e_pf *pf = tx_ring->vsi->back;
31 i = tx_ring->next_to_use;
32 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
87 struct i40e_ring *tx_ring;
99 tx_ring = vsi->tx_rings[0];
100 dev = tx_ring->dev;
103 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
115 i = tx_ring->next_to_use;
116 first = &tx_ring->tx_bi[i];
117 i40e_fdir(tx_ring, fdir_data, add);
120 i = tx_ring->next_to_use;
121 tx_desc = I40E_TX_DESC(tx_ring, i);
122 tx_buf = &tx_ring->tx_bi[i];
124 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
149 writel(tx_ring->next_to_use, tx_ring->tail);
780 * @tx_ring: ring to be cleaned
782 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
787 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
788 i40e_xsk_clean_tx_ring(tx_ring);
791 if (!tx_ring->tx_bi)
795 for (i = 0; i < tx_ring->count; i++)
796 i40e_unmap_and_free_tx_resource(tx_ring,
797 &tx_ring->tx_bi[i]);
800 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
801 memset(tx_ring->tx_bi, 0, bi_size);
804 memset(tx_ring->desc, 0, tx_ring->size);
806 tx_ring->next_to_use = 0;
807 tx_ring->next_to_clean = 0;
809 if (!tx_ring->netdev)
813 netdev_tx_reset_queue(txring_txq(tx_ring));
818 * @tx_ring: Tx descriptor ring for a specific queue
822 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
824 i40e_clean_tx_ring(tx_ring);
825 kfree(tx_ring->tx_bi);
826 tx_ring->tx_bi = NULL;
828 if (tx_ring->desc) {
829 dma_free_coherent(tx_ring->dev, tx_ring->size,
830 tx_ring->desc, tx_ring->dma);
831 tx_ring->desc = NULL;
873 struct i40e_ring *tx_ring = NULL;
892 tx_ring = vsi->tx_rings[i];
893 if (tx_ring && tx_ring->desc) {
901 packets = tx_ring->stats.packets & INT_MAX;
902 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
903 i40e_force_wb(vsi, tx_ring->q_vector);
911 tx_ring->tx_stats.prev_pkt_ctr =
912 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
920 * @tx_ring: Tx ring to clean
927 struct i40e_ring *tx_ring, int napi_budget,
930 int i = tx_ring->next_to_clean;
937 tx_buf = &tx_ring->tx_bi[i];
938 tx_desc = I40E_TX_DESC(tx_ring, i);
939 i -= tx_ring->count;
941 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
953 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
966 if (ring_is_xdp(tx_ring))
972 dma_unmap_single(tx_ring->dev,
984 tx_ring, tx_desc, tx_buf);
990 i -= tx_ring->count;
991 tx_buf = tx_ring->tx_bi;
992 tx_desc = I40E_TX_DESC(tx_ring, 0);
997 dma_unmap_page(tx_ring->dev,
1010 i -= tx_ring->count;
1011 tx_buf = tx_ring->tx_bi;
1012 tx_desc = I40E_TX_DESC(tx_ring, 0);
1021 i += tx_ring->count;
1022 tx_ring->next_to_clean = i;
1023 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
1024 i40e_arm_wb(tx_ring, vsi, budget);
1026 if (ring_is_xdp(tx_ring))
1030 netdev_tx_completed_queue(txring_txq(tx_ring),
1034 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1035 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
1040 if (__netif_subqueue_stopped(tx_ring->netdev,
1041 tx_ring->queue_index) &&
1043 netif_wake_subqueue(tx_ring->netdev,
1044 tx_ring->queue_index);
1045 ++tx_ring->tx_stats.restart_queue;
1411 * @tx_ring: the tx ring to set up
1415 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1417 struct device *dev = tx_ring->dev;
1424 WARN_ON(tx_ring->tx_bi);
1425 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1426 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1427 if (!tx_ring->tx_bi)
1430 u64_stats_init(&tx_ring->syncp);
1433 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1437 tx_ring->size += sizeof(u32);
1438 tx_ring->size = ALIGN(tx_ring->size, 4096);
1439 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1440 &tx_ring->dma, GFP_KERNEL);
1441 if (!tx_ring->desc) {
1443 tx_ring->size);
1447 tx_ring->next_to_use = 0;
1448 tx_ring->next_to_clean = 0;
1449 tx_ring->tx_stats.prev_pkt_ctr = -1;
1453 kfree(tx_ring->tx_bi);
1454 tx_ring->tx_bi = NULL;
2856 * @tx_ring: ring to add programming descriptor to
2860 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2864 struct i40e_pf *pf = tx_ring->vsi->back;
2884 if (!tx_ring->atr_sample_rate)
2930 tx_ring->atr_count++;
2936 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2939 tx_ring->atr_count = 0;
2942 i = tx_ring->next_to_use;
2943 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2946 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2949 tx_ring->queue_index);
2956 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2994 * @tx_ring: ring to send buffer on
3004 struct i40e_ring *tx_ring,
3011 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
3040 if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
3193 * @tx_ring: ptr to the ring to send
3200 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3215 pf = i40e_netdev_to_pf(tx_ring->netdev);
3241 * @tx_ring: Tx descriptor ring
3246 struct i40e_ring *tx_ring,
3422 * @tx_ring: ring to create the descriptor on
3427 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3432 int i = tx_ring->next_to_use;
3439 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3442 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3453 * @tx_ring: the ring to be checked
3458 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3460 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3464 ++tx_ring->tx_stats.tx_stopped;
3467 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3471 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3472 ++tx_ring->tx_stats.restart_queue;
3562 * @tx_ring: ring to send buffer on
3572 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3581 u16 i = tx_ring->next_to_use;
3593 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3595 tx_desc = I40E_TX_DESC(tx_ring, i);
3601 if (dma_mapping_error(tx_ring->dev, dma))
3621 if (i == tx_ring->count) {
3622 tx_desc = I40E_TX_DESC(tx_ring, 0);
3643 if (i == tx_ring->count) {
3644 tx_desc = I40E_TX_DESC(tx_ring, 0);
3651 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3654 tx_bi = &tx_ring->tx_bi[i];
3657 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3660 if (i == tx_ring->count)
3663 tx_ring->next_to_use = i;
3665 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3673 desc_count |= ++tx_ring->packet_stride;
3678 tx_ring->packet_stride = 0;
3698 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3699 writel(i, tx_ring->tail);
3705 dev_info(tx_ring->dev, "TX DMA map failed\n");
3709 tx_bi = &tx_ring->tx_bi[i];
3710 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3714 i = tx_ring->count;
3718 tx_ring->next_to_use = i;
3866 * @tx_ring: ring to send buffer on
3871 struct i40e_ring *tx_ring)
3886 i40e_trace(xmit_frame_ring, skb, tx_ring);
3895 tx_ring->tx_stats.tx_linearize++;
3904 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3905 tx_ring->tx_stats.tx_busy++;
3910 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3916 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3928 tx_ring, &cd_tunneling);
3932 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3940 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3947 i40e_atr(tx_ring, skb, tx_flags);
3949 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3956 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3961 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3982 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3990 return i40e_xmit_frame_ring(skb, tx_ring);