Lines Matching defs:tx_ring

328 	struct igb_ring *tx_ring;
363 tx_ring = adapter->tx_ring[n];
364 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
366 n, tx_ring->next_to_use, tx_ring->next_to_clean,
391 tx_ring = adapter->tx_ring[n];
393 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
397 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
400 tx_desc = IGB_TX_DESC(tx_ring, i);
401 buffer_info = &tx_ring->tx_buffer_info[i];
403 if (i == tx_ring->next_to_use &&
404 i == tx_ring->next_to_clean)
406 else if (i == tx_ring->next_to_use)
408 else if (i == tx_ring->next_to_clean)
697 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
1001 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1247 adapter->tx_ring[txr_idx] = ring;
1631 if (adapter->tx_ring[i]->cbs_enable)
1643 if (adapter->tx_ring[i]->launchtime_enable)
1670 ring = adapter->tx_ring[queue];
1844 ring = adapter->tx_ring[queue];
1859 ring = adapter->tx_ring[queue];
2934 return adapter->tx_ring[r_idx];
2941 struct igb_ring *tx_ring;
2951 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2952 if (unlikely(!tx_ring))
2955 nq = txring_txq(tx_ring);
2959 ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2970 struct igb_ring *tx_ring;
2984 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2985 if (unlikely(!tx_ring))
2988 nq = txring_txq(tx_ring);
2998 err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
3007 igb_xdp_ring_update_tail(tx_ring);
4257 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4261 int igb_setup_tx_resources(struct igb_ring *tx_ring)
4263 struct device *dev = tx_ring->dev;
4266 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4268 tx_ring->tx_buffer_info = vmalloc(size);
4269 if (!tx_ring->tx_buffer_info)
4273 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4274 tx_ring->size = ALIGN(tx_ring->size, 4096);
4276 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4277 &tx_ring->dma, GFP_KERNEL);
4278 if (!tx_ring->desc)
4281 tx_ring->next_to_use = 0;
4282 tx_ring->next_to_clean = 0;
4287 vfree(tx_ring->tx_buffer_info);
4288 tx_ring->tx_buffer_info = NULL;
4306 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4311 igb_free_tx_resources(adapter->tx_ring[i]);
4395 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4401 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4843 * @tx_ring: Tx descriptor ring for a specific queue
4847 void igb_free_tx_resources(struct igb_ring *tx_ring)
4849 igb_clean_tx_ring(tx_ring);
4851 vfree(tx_ring->tx_buffer_info);
4852 tx_ring->tx_buffer_info = NULL;
4855 if (!tx_ring->desc)
4858 dma_free_coherent(tx_ring->dev, tx_ring->size,
4859 tx_ring->desc, tx_ring->dma);
4861 tx_ring->desc = NULL;
4875 if (adapter->tx_ring[i])
4876 igb_free_tx_resources(adapter->tx_ring[i]);
4881 * @tx_ring: ring to be cleaned
4883 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4885 u16 i = tx_ring->next_to_clean;
4886 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4888 while (i != tx_ring->next_to_use) {
4898 dma_unmap_single(tx_ring->dev,
4905 tx_desc = IGB_TX_DESC(tx_ring, i);
4912 if (unlikely(i == tx_ring->count)) {
4914 tx_buffer = tx_ring->tx_buffer_info;
4915 tx_desc = IGB_TX_DESC(tx_ring, 0);
4920 dma_unmap_page(tx_ring->dev,
4931 if (unlikely(i == tx_ring->count)) {
4933 tx_buffer = tx_ring->tx_buffer_info;
4938 netdev_tx_reset_queue(txring_txq(tx_ring));
4941 tx_ring->next_to_use = 0;
4942 tx_ring->next_to_clean = 0;
4954 if (adapter->tx_ring[i])
4955 igb_clean_tx_ring(adapter->tx_ring[i]);
5662 struct igb_ring *tx_ring = adapter->tx_ring[i];
5669 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5678 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5920 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5926 u16 i = tx_ring->next_to_use;
5929 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5932 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5938 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5939 mss_l4len_idx |= tx_ring->reg_idx << 4;
5948 if (tx_ring->launchtime_enable) {
5957 static int igb_tso(struct igb_ring *tx_ring,
6046 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
6052 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
6061 !tx_ring->launchtime_enable)
6092 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
6125 static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
6132 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6133 olinfo_status |= tx_ring->reg_idx << 4;
6148 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6150 struct net_device *netdev = tx_ring->netdev;
6152 netif_stop_subqueue(netdev, tx_ring->queue_index);
6163 if (igb_desc_unused(tx_ring) < size)
6167 netif_wake_subqueue(netdev, tx_ring->queue_index);
6169 u64_stats_update_begin(&tx_ring->tx_syncp2);
6170 tx_ring->tx_stats.restart_queue2++;
6171 u64_stats_update_end(&tx_ring->tx_syncp2);
6176 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6178 if (igb_desc_unused(tx_ring) >= size)
6180 return __igb_maybe_stop_tx(tx_ring, size);
6183 static int igb_tx_map(struct igb_ring *tx_ring,
6195 u16 i = tx_ring->next_to_use;
6197 tx_desc = IGB_TX_DESC(tx_ring, i);
6199 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
6204 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6209 if (dma_mapping_error(tx_ring->dev, dma))
6224 if (i == tx_ring->count) {
6225 tx_desc = IGB_TX_DESC(tx_ring, 0);
6243 if (i == tx_ring->count) {
6244 tx_desc = IGB_TX_DESC(tx_ring, 0);
6252 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6255 tx_buffer = &tx_ring->tx_buffer_info[i];
6262 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6282 if (i == tx_ring->count)
6285 tx_ring->next_to_use = i;
6288 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6290 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6291 writel(i, tx_ring->tail);
6296 dev_err(tx_ring->dev, "TX DMA map failed\n");
6297 tx_buffer = &tx_ring->tx_buffer_info[i];
6302 dma_unmap_page(tx_ring->dev,
6309 i += tx_ring->count;
6310 tx_buffer = &tx_ring->tx_buffer_info[i];
6314 dma_unmap_single(tx_ring->dev,
6323 tx_ring->next_to_use = i;
6329 struct igb_ring *tx_ring,
6334 u16 count, i, index = tx_ring->next_to_use;
6335 struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index];
6337 union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index);
6345 if (igb_maybe_stop_tx(tx_ring, count + 3))
6357 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6358 olinfo_status |= tx_ring->reg_idx << 4;
6364 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
6365 if (dma_mapping_error(tx_ring->dev, dma))
6381 if (++index == tx_ring->count)
6387 tx_buffer = &tx_ring->tx_buffer_info[index];
6388 tx_desc = IGB_TX_DESC(tx_ring, index);
6397 netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount);
6406 tx_ring->next_to_use = index;
6409 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6411 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
6412 writel(index, tx_ring->tail);
6418 tx_buffer = &tx_ring->tx_buffer_info[index];
6420 dma_unmap_page(tx_ring->dev,
6429 index += tx_ring->count;
6437 struct igb_ring *tx_ring)
6457 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6463 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6470 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6496 tso = igb_tso(tx_ring, first, &hdr_len);
6500 igb_tx_csum(tx_ring, first);
6502 if (igb_tx_map(tx_ring, first, hdr_len))
6512 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6532 return adapter->tx_ring[r_idx];
6712 struct igb_ring *ring = adapter->tx_ring[i];
7064 struct igb_ring *tx_ring,
7068 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
7081 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
8221 struct igb_ring *tx_ring = q_vector->tx.ring;
8226 unsigned int i = tx_ring->next_to_clean;
8231 tx_buffer = &tx_ring->tx_buffer_info[i];
8232 tx_desc = IGB_TX_DESC(tx_ring, i);
8233 i -= tx_ring->count;
8263 dma_unmap_single(tx_ring->dev,
8277 i -= tx_ring->count;
8278 tx_buffer = tx_ring->tx_buffer_info;
8279 tx_desc = IGB_TX_DESC(tx_ring, 0);
8284 dma_unmap_page(tx_ring->dev,
8297 i -= tx_ring->count;
8298 tx_buffer = tx_ring->tx_buffer_info;
8299 tx_desc = IGB_TX_DESC(tx_ring, 0);
8309 netdev_tx_completed_queue(txring_txq(tx_ring),
8311 i += tx_ring->count;
8312 tx_ring->next_to_clean = i;
8313 u64_stats_update_begin(&tx_ring->tx_syncp);
8314 tx_ring->tx_stats.bytes += total_bytes;
8315 tx_ring->tx_stats.packets += total_packets;
8316 u64_stats_update_end(&tx_ring->tx_syncp);
8320 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
8326 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
8333 dev_err(tx_ring->dev,
8345 tx_ring->queue_index,
8346 rd32(E1000_TDH(tx_ring->reg_idx)),
8347 readl(tx_ring->tail),
8348 tx_ring->next_to_use,
8349 tx_ring->next_to_clean,
8354 netif_stop_subqueue(tx_ring->netdev,
8355 tx_ring->queue_index);
8364 netif_carrier_ok(tx_ring->netdev) &&
8365 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
8370 if (__netif_subqueue_stopped(tx_ring->netdev,
8371 tx_ring->queue_index) &&
8373 netif_wake_subqueue(tx_ring->netdev,
8374 tx_ring->queue_index);
8376 u64_stats_update_begin(&tx_ring->tx_syncp);
8377 tx_ring->tx_stats.restart_queue++;
8378 u64_stats_update_end(&tx_ring->tx_syncp);
8988 struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
8990 igb_xdp_ring_update_tail(tx_ring);