Lines Matching refs:rx_ring

415 		rxr = &adapter->rx_ring[i];
429 que->rx_ring = rxr;
447 rxr = &adapter->rx_ring[i];
489 struct ena_ring *rxr = &adapter->rx_ring[qid];
851 struct ena_ring *rx_ring = que->rx_ring;
854 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
858 rx_ring->initialized = false;
867 rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
869 size = sizeof(uint16_t) * rx_ring->ring_size;
870 rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
872 for (i = 0; i < rx_ring->ring_size; i++)
873 rx_ring->free_rx_ids[i] = i;
876 ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
877 sizeof(rx_ring->rx_stats));
879 rx_ring->next_to_clean = 0;
880 rx_ring->next_to_use = 0;
883 for (i = 0; i < rx_ring->ring_size; i++) {
885 &(rx_ring->rx_buffer_info[i].map));
895 int err = tcp_lro_init(&rx_ring->lro);
902 rx_ring->lro.ifp = adapter->ifp;
911 rx_ring->rx_buffer_info[i].map);
914 free(rx_ring->free_rx_ids, M_DEVBUF);
915 rx_ring->free_rx_ids = NULL;
916 free(rx_ring->rx_buffer_info, M_DEVBUF);
917 rx_ring->rx_buffer_info = NULL;
931 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
934 for (int i = 0; i < rx_ring->ring_size; i++) {
936 rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
937 m_freem(rx_ring->rx_buffer_info[i].mbuf);
938 rx_ring->rx_buffer_info[i].mbuf = NULL;
940 rx_ring->rx_buffer_info[i].map);
942 rx_ring->rx_buffer_info[i].map);
946 tcp_lro_free(&rx_ring->lro);
949 free(rx_ring->rx_buffer_info, M_DEVBUF);
950 rx_ring->rx_buffer_info = NULL;
952 free(rx_ring->free_rx_ids, M_DEVBUF);
953 rx_ring->free_rx_ids = NULL;
1001 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
1014 rx_ring->rx_mbuf_sz);
1017 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1020 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1025 mlen = rx_ring->rx_mbuf_sz;
1039 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1063 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1081 * @rx_ring: the ring which we want to feed with free descriptors
1086 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1088 struct ena_adapter *adapter = rx_ring->adapter;
1094 rx_ring->qid);
1096 next_to_use = rx_ring->next_to_use;
1104 req_id = rx_ring->free_rx_ids[next_to_use];
1105 rx_info = &rx_ring->rx_buffer_info[req_id];
1107 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1108 rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info);
1111 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1115 rx_ring->qid);
1118 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1123 rx_ring->qid);
1127 rx_ring->ring_size);
1131 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1134 rx_ring->qid, i, num);
1138 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1140 rx_ring->next_to_use = next_to_use;
1306 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1309 for (i = 0; i < rx_ring->ring_size; i++) {
1310 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1313 ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1318 ena_netmap_free_rx_slot(adapter, rx_ring,
1333 struct ena_ring *rx_ring;
1337 rx_ring = &adapter->rx_ring[i];
1338 bufs_num = rx_ring->ring_size - 1;
1339 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1344 rx_ring->initialized = true;
1503 ring = &adapter->rx_ring[i];
1993 adapter->rx_ring[i].ring_size = new_rx_size;
2052 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
3012 struct ena_ring *rx_ring)
3014 if (likely(rx_ring->first_interrupt))
3017 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3020 rx_ring->no_interrupt_event_cnt++;
3022 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3024 "Queue = %d. Reset the device\n", rx_ring->qid);
3105 struct ena_ring *rx_ring;
3124 rx_ring = &adapter->rx_ring[i];
3130 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3159 struct ena_ring *rx_ring;
3169 rx_ring = &adapter->rx_ring[i];
3171 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3172 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3173 rx_ring->empty_rx_queue++;
3175 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3176 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3182 taskqueue_enqueue(rx_ring->que->cleanup_tq,
3183 &rx_ring->que->cleanup_task);
3184 rx_ring->empty_rx_queue = 0;
3187 rx_ring->empty_rx_queue = 0;