Lines Matching defs:tx_ring

53 	struct ena_ring *tx_ring;
62 tx_ring = &adapter->tx_ring[txqueue];
64 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
65 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED);
216 txr = &adapter->tx_ring[i];
246 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues];
259 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
263 if (tx_ring->tx_buffer_info) {
269 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
272 tx_ring->tx_buffer_info = vzalloc_node(size, node);
273 if (!tx_ring->tx_buffer_info) {
274 tx_ring->tx_buffer_info = vzalloc(size);
275 if (!tx_ring->tx_buffer_info)
279 size = sizeof(u16) * tx_ring->ring_size;
280 tx_ring->free_ids = vzalloc_node(size, node);
281 if (!tx_ring->free_ids) {
282 tx_ring->free_ids = vzalloc(size);
283 if (!tx_ring->free_ids)
287 size = tx_ring->tx_max_header_size;
288 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
289 if (!tx_ring->push_buf_intermediate_buf) {
290 tx_ring->push_buf_intermediate_buf = vzalloc(size);
291 if (!tx_ring->push_buf_intermediate_buf)
296 for (i = 0; i < tx_ring->ring_size; i++)
297 tx_ring->free_ids[i] = i;
300 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
302 tx_ring->next_to_use = 0;
303 tx_ring->next_to_clean = 0;
304 tx_ring->cpu = ena_irq->cpu;
305 tx_ring->numa_node = node;
309 vfree(tx_ring->free_ids);
310 tx_ring->free_ids = NULL;
312 vfree(tx_ring->tx_buffer_info);
313 tx_ring->tx_buffer_info = NULL;
326 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
328 vfree(tx_ring->tx_buffer_info);
329 tx_ring->tx_buffer_info = NULL;
331 vfree(tx_ring->free_ids);
332 tx_ring->free_ids = NULL;
334 vfree(tx_ring->push_buf_intermediate_buf);
335 tx_ring->push_buf_intermediate_buf = NULL;
685 void ena_unmap_tx_buff(struct ena_ring *tx_ring,
699 dma_unmap_single(tx_ring->dev,
709 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
716 * @tx_ring: TX ring for which buffers be freed
718 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
724 is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
726 for (i = 0; i < tx_ring->ring_size; i++) {
727 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
733 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
735 tx_ring->qid, i);
738 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
740 tx_ring->qid, i);
743 ena_unmap_tx_buff(tx_ring, tx_info);
752 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
753 tx_ring->qid));
758 struct ena_ring *tx_ring;
762 tx_ring = &adapter->tx_ring[i];
763 ena_free_tx_bufs(tx_ring);
819 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
823 tx_info = &tx_ring->tx_buffer_info[req_id];
827 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
830 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
841 next_to_clean = tx_ring->next_to_clean;
842 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
848 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
852 handle_invalid_req_id(tx_ring, req_id, NULL, false);
857 rc = validate_tx_req_id(tx_ring, req_id);
861 tx_info = &tx_ring->tx_buffer_info[req_id];
870 ena_unmap_tx_buff(tx_ring, tx_info);
872 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
873 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
881 tx_ring->free_ids[next_to_clean] = req_id;
883 tx_ring->ring_size);
886 tx_ring->next_to_clean = next_to_clean;
887 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
891 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
893 tx_ring->qid, tx_pkts);
900 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
905 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
908 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
910 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
911 &tx_ring->syncp);
1391 void ena_unmask_interrupt(struct ena_ring *tx_ring,
1394 u32 rx_interval = tx_ring->smoothed_interval;
1410 tx_ring->smoothed_interval,
1413 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
1414 &tx_ring->syncp);
1421 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1424 void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1431 if (likely(tx_ring->cpu == cpu))
1434 tx_ring->cpu = cpu;
1440 if (likely(tx_ring->numa_node == numa_node))
1446 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1447 tx_ring->numa_node = numa_node;
1463 struct ena_ring *tx_ring, *rx_ring;
1470 tx_ring = ena_napi->tx_ring;
1473 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1475 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1476 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1481 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1491 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1492 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1512 ena_update_ring_numa_node(tx_ring, rx_ring);
1513 ena_unmask_interrupt(tx_ring, rx_ring);
1521 u64_stats_update_begin(&tx_ring->syncp);
1522 tx_ring->tx_stats.napi_comp += napi_comp_call;
1523 tx_ring->tx_stats.tx_poll++;
1524 u64_stats_update_end(&tx_ring->syncp);
1526 tx_ring->tx_stats.last_napi_jiffies = jiffies;
1789 struct ena_ring *rx_ring, *tx_ring;
1794 tx_ring = &adapter->tx_ring[i];
1805 napi->tx_ring = tx_ring;
1889 struct ena_ring *tx_ring;
1896 tx_ring = &adapter->tx_ring[qid];
1906 ctx.queue_size = tx_ring->ring_size;
1907 ctx.numa_node = tx_ring->numa_node;
1918 &tx_ring->ena_com_io_sq,
1919 &tx_ring->ena_com_io_cq);
1928 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2036 adapter->tx_ring[i].ring_size = new_tx_size;
2110 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2190 ena_unmask_interrupt(&adapter->tx_ring[i],
2468 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2476 if (num_frags < tx_ring->sgl_size)
2479 if ((num_frags == tx_ring->sgl_size) &&
2480 (header_len < tx_ring->tx_max_header_size))
2483 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
2487 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
2488 &tx_ring->syncp);
2494 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2500 struct ena_adapter *adapter = tx_ring->adapter;
2512 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2523 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2525 tx_ring->push_buf_intermediate_buf);
2528 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
2529 &tx_ring->syncp);
2536 tx_ring->tx_max_header_size);
2544 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2546 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2571 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2573 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2586 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
2587 &tx_ring->syncp);
2593 ena_unmap_tx_buff(tx_ring, tx_info);
2604 struct ena_ring *tx_ring;
2613 tx_ring = &adapter->tx_ring[qid];
2616 rc = ena_check_and_linearize_skb(tx_ring, skb);
2620 next_to_use = tx_ring->next_to_use;
2621 req_id = tx_ring->free_ids[next_to_use];
2622 tx_info = &tx_ring->tx_buffer_info[req_id];
2627 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2639 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
2642 tx_ring,
2656 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2657 tx_ring->sgl_size + 2))) {
2662 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
2663 &tx_ring->syncp);
2675 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2678 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
2679 &tx_ring->syncp);
2689 ena_ring_tx_doorbell(tx_ring);
2694 ena_unmap_tx_buff(tx_ring, tx_info);
2818 struct ena_ring *rx_ring, *tx_ring;
2831 tx_ring = &adapter->tx_ring[i];
2834 start = u64_stats_fetch_begin(&tx_ring->syncp);
2835 packets = tx_ring->tx_stats.cnt;
2836 bytes = tx_ring->tx_stats.bytes;
2837 } while (u64_stats_fetch_retry(&tx_ring->syncp, start));
2908 if (adapter->tx_ring->ring_size)
2909 tx_queue_size = adapter->tx_ring->ring_size;
3317 txr = &adapter->tx_ring[i];
3416 struct ena_ring *tx_ring)
3418 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
3431 for (i = 0; i < tx_ring->ring_size; i++) {
3432 tx_buf = &tx_ring->tx_buffer_info[i];
3448 tx_ring->qid);
3458 jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
3480 tx_ring->qid, i, time_since_last_napi, napi_scheduled);
3499 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
3500 &tx_ring->syncp);
3507 struct ena_ring *tx_ring;
3533 tx_ring = &adapter->tx_ring[qid];
3536 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);