Lines Matching defs:tx_ring

193  * @tx_ring: ring to be cleaned
195 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
197 u16 i = tx_ring->next_to_clean;
198 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
201 while (i != tx_ring->next_to_use) {
210 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
214 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
217 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
223 tx_desc = IGC_TX_DESC(tx_ring, i);
230 if (unlikely(i == tx_ring->count)) {
232 tx_buffer = tx_ring->tx_buffer_info;
233 tx_desc = IGC_TX_DESC(tx_ring, 0);
238 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
246 if (unlikely(i == tx_ring->count)) {
248 tx_buffer = tx_ring->tx_buffer_info;
252 if (tx_ring->xsk_pool && xsk_frames)
253 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
256 netdev_tx_reset_queue(txring_txq(tx_ring));
259 memset(tx_ring->tx_buffer_info, 0,
260 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
263 memset(tx_ring->desc, 0, tx_ring->size);
266 tx_ring->next_to_use = 0;
267 tx_ring->next_to_clean = 0;
272 * @tx_ring: Tx descriptor ring for a specific queue
276 void igc_free_tx_resources(struct igc_ring *tx_ring)
278 igc_disable_tx_ring(tx_ring);
280 vfree(tx_ring->tx_buffer_info);
281 tx_ring->tx_buffer_info = NULL;
284 if (!tx_ring->desc)
287 dma_free_coherent(tx_ring->dev, tx_ring->size,
288 tx_ring->desc, tx_ring->dma);
290 tx_ring->desc = NULL;
304 igc_free_tx_resources(adapter->tx_ring[i]);
316 if (adapter->tx_ring[i])
317 igc_clean_tx_ring(adapter->tx_ring[i]);
341 struct igc_ring *tx_ring = adapter->tx_ring[i];
343 igc_disable_tx_ring_hw(tx_ring);
349 * @tx_ring: tx descriptor ring (for a specific queue) to setup
353 int igc_setup_tx_resources(struct igc_ring *tx_ring)
355 struct net_device *ndev = tx_ring->netdev;
356 struct device *dev = tx_ring->dev;
359 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
360 tx_ring->tx_buffer_info = vzalloc(size);
361 if (!tx_ring->tx_buffer_info)
365 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
366 tx_ring->size = ALIGN(tx_ring->size, 4096);
368 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
369 &tx_ring->dma, GFP_KERNEL);
371 if (!tx_ring->desc)
374 tx_ring->next_to_use = 0;
375 tx_ring->next_to_clean = 0;
380 vfree(tx_ring->tx_buffer_info);
397 err = igc_setup_tx_resources(adapter->tx_ring[i]);
401 igc_free_tx_resources(adapter->tx_ring[i]);
772 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
1149 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1155 u16 i = tx_ring->next_to_use;
1157 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1160 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1166 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1167 mss_l4len_idx |= tx_ring->reg_idx << 4;
1178 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
1188 !tx_ring->launchtime_enable)
1219 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1223 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1225 struct net_device *netdev = tx_ring->netdev;
1227 netif_stop_subqueue(netdev, tx_ring->queue_index);
1235 if (igc_desc_unused(tx_ring) < size)
1239 netif_wake_subqueue(netdev, tx_ring->queue_index);
1241 u64_stats_update_begin(&tx_ring->tx_syncp2);
1242 tx_ring->tx_stats.restart_queue2++;
1243 u64_stats_update_end(&tx_ring->tx_syncp2);
1248 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1250 if (igc_desc_unused(tx_ring) >= size)
1252 return __igc_maybe_stop_tx(tx_ring, size);
1296 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1317 static int igc_tx_map(struct igc_ring *tx_ring,
1326 u16 i = tx_ring->next_to_use;
1332 tx_desc = IGC_TX_DESC(tx_ring, i);
1334 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1339 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1344 if (dma_mapping_error(tx_ring->dev, dma))
1359 if (i == tx_ring->count) {
1360 tx_desc = IGC_TX_DESC(tx_ring, 0);
1378 if (i == tx_ring->count) {
1379 tx_desc = IGC_TX_DESC(tx_ring, 0);
1387 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1390 tx_buffer = &tx_ring->tx_buffer_info[i];
1397 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1417 if (i == tx_ring->count)
1420 tx_ring->next_to_use = i;
1423 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1425 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1426 writel(i, tx_ring->tail);
1431 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1432 tx_buffer = &tx_ring->tx_buffer_info[i];
1437 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1440 i += tx_ring->count;
1441 tx_buffer = &tx_ring->tx_buffer_info[i];
1445 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1450 tx_ring->next_to_use = i;
1455 static int igc_tso(struct igc_ring *tx_ring,
1544 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1571 struct igc_ring *tx_ring)
1573 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1595 if (igc_maybe_stop_tx(tx_ring, count + 5)) {
1600 if (!tx_ring->launchtime_enable)
1605 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
1612 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1620 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
1622 if (igc_init_tx_empty_descriptor(tx_ring,
1630 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1636 if (adapter->qbv_transition || tx_ring->oper_gate_closed)
1639 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) {
1644 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
1671 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
1675 igc_tx_csum(tx_ring, first, launch_time, first_flag);
1677 igc_tx_map(tx_ring, first, hdr_len);
1696 return adapter->tx_ring[r_idx];
2461 return adapter->tx_ring[index];
2880 struct igc_ring *tx_ring = meta_req->tx_ring;
2888 if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) {
2889 adapter = netdev_priv(tx_ring->netdev);
2918 tstamp->xsk_queue_index = tx_ring->queue_index;
2998 meta_req.tx_ring = ring;
3044 struct igc_ring *tx_ring = q_vector->tx.ring;
3045 unsigned int i = tx_ring->next_to_clean;
3053 tx_buffer = &tx_ring->tx_buffer_info[i];
3054 tx_desc = IGC_TX_DESC(tx_ring, i);
3055 i -= tx_ring->count;
3091 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3095 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3098 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
3108 i -= tx_ring->count;
3109 tx_buffer = tx_ring->tx_buffer_info;
3110 tx_desc = IGC_TX_DESC(tx_ring, 0);
3115 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3123 i -= tx_ring->count;
3124 tx_buffer = tx_ring->tx_buffer_info;
3125 tx_desc = IGC_TX_DESC(tx_ring, 0);
3135 netdev_tx_completed_queue(txring_txq(tx_ring),
3138 i += tx_ring->count;
3139 tx_ring->next_to_clean = i;
3143 if (tx_ring->xsk_pool) {
3145 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
3146 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
3147 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
3148 igc_xdp_xmit_zc(tx_ring);
3151 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
3157 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3162 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) &&
3163 !tx_ring->oper_gate_closed) {
3165 netdev_err(tx_ring->netdev,
3177 tx_ring->queue_index,
3178 rd32(IGC_TDH(tx_ring->reg_idx)),
3179 readl(tx_ring->tail),
3180 tx_ring->next_to_use,
3181 tx_ring->next_to_clean,
3186 netif_stop_subqueue(tx_ring->netdev,
3187 tx_ring->queue_index);
3196 netif_carrier_ok(tx_ring->netdev) &&
3197 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
3202 if (__netif_subqueue_stopped(tx_ring->netdev,
3203 tx_ring->queue_index) &&
3205 netif_wake_subqueue(tx_ring->netdev,
3206 tx_ring->queue_index);
3208 u64_stats_update_begin(&tx_ring->tx_syncp);
3209 tx_ring->tx_stats.restart_queue++;
3210 u64_stats_update_end(&tx_ring->tx_syncp);
4261 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
4649 adapter->tx_ring[j]->reg_idx = j;
4775 adapter->tx_ring[txr_idx] = ring;
5038 struct igc_ring *ring = adapter->tx_ring[i];
5790 struct igc_ring *tx_ring = adapter->tx_ring[i];
5798 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5807 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
6156 ring = adapter->tx_ring[queue];
6252 struct igc_ring *ring = adapter->tx_ring[i];
6264 struct igc_ring *ring = adapter->tx_ring[i];
6364 struct igc_ring *ring = adapter->tx_ring[i];
6392 struct igc_ring *ring = adapter->tx_ring[i];
6415 struct igc_ring *ring = adapter->tx_ring[i];
6458 ring = adapter->tx_ring[queue];
6461 if (adapter->tx_ring[i])
6462 cbs_status[i] = adapter->tx_ring[i]->cbs_enable;
6819 struct igc_ring *tx_ring = adapter->tx_ring[i];
6821 if (tx_ring->admin_gate_closed) {
6822 tx_ring->admin_gate_closed = false;
6823 tx_ring->oper_gate_closed = true;
6825 tx_ring->oper_gate_closed = false;