Lines Matching refs:rx_queue

38 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
40 struct efx_nic *efx = rx_queue->efx;
45 if (unlikely(!rx_queue->page_ring))
47 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
48 page = rx_queue->page_ring[index];
52 rx_queue->page_ring[index] = NULL;
54 if (rx_queue->page_remove != rx_queue->page_add)
55 ++rx_queue->page_remove;
59 ++rx_queue->page_recycle_count;
67 ++rx_queue->page_recycle_failed;
80 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
81 struct efx_nic *efx = rx_queue->efx;
89 index = rx_queue->page_add & rx_queue->page_ptr_mask;
90 if (rx_queue->page_ring[index] == NULL) {
91 unsigned int read_index = rx_queue->page_remove &
92 rx_queue->page_ptr_mask;
99 ++rx_queue->page_remove;
100 rx_queue->page_ring[index] = page;
101 ++rx_queue->page_add;
104 ++rx_queue->page_recycle_full;
114 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
116 if (unlikely(!rx_queue->page_ring))
121 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
129 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
133 efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
136 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
139 struct efx_nic *efx = rx_queue->efx;
144 rx_queue->page_ring = kcalloc(page_ring_size,
145 sizeof(*rx_queue->page_ring), GFP_KERNEL);
146 if (!rx_queue->page_ring)
147 rx_queue->page_ptr_mask = 0;
149 rx_queue->page_ptr_mask = page_ring_size - 1;
152 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
154 struct efx_nic *efx = rx_queue->efx;
157 if (unlikely(!rx_queue->page_ring))
161 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
162 struct page *page = rx_queue->page_ring[i];
174 kfree(rx_queue->page_ring);
175 rx_queue->page_ring = NULL;
178 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
187 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
188 efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
193 int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue)
195 struct efx_nic *efx = rx_queue->efx;
202 rx_queue->ptr_mask = entries - 1;
206 efx_rx_queue_index(rx_queue), efx->rxq_entries,
207 rx_queue->ptr_mask);
210 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
212 if (!rx_queue->buffer)
215 rc = efx_nic_probe_rx(rx_queue);
217 kfree(rx_queue->buffer);
218 rx_queue->buffer = NULL;
224 void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
227 struct efx_nic *efx = rx_queue->efx;
230 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
231 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
234 rx_queue->added_count = 0;
235 rx_queue->notified_count = 0;
236 rx_queue->removed_count = 0;
237 rx_queue->min_fill = -1U;
238 efx_init_rx_recycle_ring(rx_queue);
240 rx_queue->page_remove = 0;
241 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
242 rx_queue->page_recycle_count = 0;
243 rx_queue->page_recycle_failed = 0;
244 rx_queue->page_recycle_full = 0;
258 rx_queue->max_fill = max_fill;
259 rx_queue->fast_fill_trigger = trigger;
260 rx_queue->refill_enabled = true;
263 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
264 rx_queue->core_index, 0);
272 rx_queue->xdp_rxq_info_valid = true;
276 efx_nic_init_rx(rx_queue);
279 void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
284 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
285 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
287 del_timer_sync(&rx_queue->slow_fill);
290 if (rx_queue->buffer) {
291 for (i = rx_queue->removed_count; i < rx_queue->added_count;
293 unsigned int index = i & rx_queue->ptr_mask;
295 rx_buf = efx_rx_buffer(rx_queue, index);
296 efx_fini_rx_buffer(rx_queue, rx_buf);
300 efx_fini_rx_recycle_ring(rx_queue);
302 if (rx_queue->xdp_rxq_info_valid)
303 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
305 rx_queue->xdp_rxq_info_valid = false;
308 void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
310 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
311 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
313 efx_nic_remove_rx(rx_queue);
315 kfree(rx_queue->buffer);
316 rx_queue->buffer = NULL;
337 void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
346 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
352 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
355 efx_nic_generate_fill_event(rx_queue);
356 ++rx_queue->slow_fill_count;
359 static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
361 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
366 * @rx_queue: Efx RX queue
373 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
376 struct efx_nic *efx = rx_queue->efx;
384 page = efx_reuse_page(rx_queue);
411 index = rx_queue->added_count & rx_queue->ptr_mask;
412 rx_buf = efx_rx_buffer(rx_queue, index);
420 ++rx_queue->added_count;
447 * @rx_queue: RX descriptor queue
450 * @rx_queue->@max_fill. If there is insufficient atomic
457 void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
460 struct efx_nic *efx = rx_queue->efx;
464 if (!rx_queue->refill_enabled)
468 fill_level = (rx_queue->added_count - rx_queue->removed_count);
469 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
470 if (fill_level >= rx_queue->fast_fill_trigger)
474 if (unlikely(fill_level < rx_queue->min_fill)) {
476 rx_queue->min_fill = fill_level;
480 space = rx_queue->max_fill - fill_level;
483 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
486 efx_rx_queue_index(rx_queue), fill_level,
487 rx_queue->max_fill);
490 rc = efx_init_rx_buffers(rx_queue, atomic);
493 efx_schedule_slow_fill(rx_queue);
498 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
500 "to level %d\n", efx_rx_queue_index(rx_queue),
501 rx_queue->added_count - rx_queue->removed_count);
504 if (rx_queue->notified_count != rx_queue->added_count)
505 efx_nic_notify_rx_desc(rx_queue);
522 struct efx_rx_queue *rx_queue;
524 rx_queue = efx_channel_get_rx_queue(channel);
525 efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
550 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
556 skb_record_rx_queue(skb, channel->rx_queue.core_index);