Lines Matching refs:rx_ring

71 	struct ena_ring *rx_ring;
83 rx_ring = que->rx_ring;
89 rx_ring->first_interrupt = true;
92 rxc = ena_rx_cleanup(rx_ring);
333 ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
336 struct ena_adapter *adapter = rx_ring->adapter;
392 mbuf->m_pkthdr.flowid = rx_ring->qid;
399 * @rx_ring: ring for which we want to clean packets
406 ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
416 adapter = rx_ring->adapter;
420 rx_info = &rx_ring->rx_buffer_info[req_id];
439 mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp;
442 ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
448 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
451 rx_ring->free_rx_ids[ntc] = req_id;
452 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
462 rx_info = &rx_ring->rx_buffer_info[req_id];
483 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
492 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
496 rx_ring->free_rx_ids[ntc] = req_id;
497 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
509 ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
518 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
529 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
543 ena_rx_cleanup(struct ena_ring *rx_ring)
564 adapter = rx_ring->que->adapter;
566 qid = rx_ring->que->id;
570 next_to_clean = rx_ring->next_to_clean;
573 if (netmap_rx_irq(adapter->ifp, rx_ring->qid, &done) != NM_IRQ_PASS)
580 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
590 counter_u64_add(rx_ring->rx_stats.bad_desc_num,
594 counter_u64_add(rx_ring->rx_stats.bad_req_id,
607 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
611 mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs,
618 rx_ring->free_rx_ids[next_to_clean] =
619 rx_ring->ena_bufs[i].req_id;
622 rx_ring->ring_size);
630 ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf);
634 counter_u64_add_protected(rx_ring->rx_stats.bytes,
653 if ((rx_ring->lro.lro_cnt != 0) &&
654 (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0))
664 counter_u64_add_protected(rx_ring->rx_stats.cnt, 1);
669 rx_ring->next_to_clean = next_to_clean;
673 rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
677 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
678 ena_refill_rx_bufs(rx_ring, refill_required);
681 tcp_lro_flush_all(&rx_ring->lro);