Lines Matching refs:rx_queue

35 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
37 struct efx_nic *efx = rx_queue->efx;
42 if (unlikely(!rx_queue->page_ring))
44 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
45 page = rx_queue->page_ring[index];
49 rx_queue->page_ring[index] = NULL;
51 if (rx_queue->page_remove != rx_queue->page_add)
52 ++rx_queue->page_remove;
56 ++rx_queue->page_recycle_count;
64 ++rx_queue->page_recycle_failed;
77 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
78 struct efx_nic *efx = rx_queue->efx;
86 index = rx_queue->page_add & rx_queue->page_ptr_mask;
87 if (rx_queue->page_ring[index] == NULL) {
88 unsigned int read_index = rx_queue->page_remove &
89 rx_queue->page_ptr_mask;
96 ++rx_queue->page_remove;
97 rx_queue->page_ring[index] = page;
98 ++rx_queue->page_add;
101 ++rx_queue->page_recycle_full;
111 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
113 if (unlikely(!rx_queue->page_ring))
118 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
126 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
130 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
133 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
136 struct efx_nic *efx = rx_queue->efx;
141 rx_queue->page_ring = kcalloc(page_ring_size,
142 sizeof(*rx_queue->page_ring), GFP_KERNEL);
143 if (!rx_queue->page_ring)
144 rx_queue->page_ptr_mask = 0;
146 rx_queue->page_ptr_mask = page_ring_size - 1;
149 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
151 struct efx_nic *efx = rx_queue->efx;
154 if (unlikely(!rx_queue->page_ring))
158 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
159 struct page *page = rx_queue->page_ring[i];
171 kfree(rx_queue->page_ring);
172 rx_queue->page_ring = NULL;
175 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
184 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
185 efx_free_rx_buffers(rx_queue, rx_buf, 1);
190 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
192 struct efx_nic *efx = rx_queue->efx;
199 rx_queue->ptr_mask = entries - 1;
203 efx_rx_queue_index(rx_queue), efx->rxq_entries,
204 rx_queue->ptr_mask);
207 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
209 if (!rx_queue->buffer)
212 rc = efx_nic_probe_rx(rx_queue);
214 kfree(rx_queue->buffer);
215 rx_queue->buffer = NULL;
221 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
224 struct efx_nic *efx = rx_queue->efx;
227 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
228 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
231 rx_queue->added_count = 0;
232 rx_queue->notified_count = 0;
233 rx_queue->granted_count = 0;
234 rx_queue->removed_count = 0;
235 rx_queue->min_fill = -1U;
236 efx_init_rx_recycle_ring(rx_queue);
238 rx_queue->page_remove = 0;
239 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
240 rx_queue->page_recycle_count = 0;
241 rx_queue->page_recycle_failed = 0;
242 rx_queue->page_recycle_full = 0;
256 rx_queue->max_fill = max_fill;
257 rx_queue->fast_fill_trigger = trigger;
258 rx_queue->refill_enabled = true;
261 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
262 rx_queue->core_index, 0);
270 rx_queue->xdp_rxq_info_valid = true;
274 efx_nic_init_rx(rx_queue);
277 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
282 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
283 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
285 del_timer_sync(&rx_queue->slow_fill);
286 if (rx_queue->grant_credits)
287 flush_work(&rx_queue->grant_work);
290 if (rx_queue->buffer) {
291 for (i = rx_queue->removed_count; i < rx_queue->added_count;
293 unsigned int index = i & rx_queue->ptr_mask;
295 rx_buf = efx_rx_buffer(rx_queue, index);
296 efx_fini_rx_buffer(rx_queue, rx_buf);
300 efx_fini_rx_recycle_ring(rx_queue);
302 if (rx_queue->xdp_rxq_info_valid)
303 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
305 rx_queue->xdp_rxq_info_valid = false;
308 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
310 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
311 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
313 efx_nic_remove_rx(rx_queue);
315 kfree(rx_queue->buffer);
316 rx_queue->buffer = NULL;
337 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
346 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
352 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
355 efx_nic_generate_fill_event(rx_queue);
356 ++rx_queue->slow_fill_count;
359 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
361 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
366 * @rx_queue: Efx RX queue
373 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
376 struct efx_nic *efx = rx_queue->efx;
384 page = efx_reuse_page(rx_queue);
411 index = rx_queue->added_count & rx_queue->ptr_mask;
412 rx_buf = efx_rx_buffer(rx_queue, index);
420 ++rx_queue->added_count;
447 * @rx_queue: RX descriptor queue
450 * @rx_queue->@max_fill. If there is insufficient atomic
457 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
459 struct efx_nic *efx = rx_queue->efx;
463 if (!rx_queue->refill_enabled)
467 fill_level = (rx_queue->added_count - rx_queue->removed_count);
468 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
469 if (fill_level >= rx_queue->fast_fill_trigger)
473 if (unlikely(fill_level < rx_queue->min_fill)) {
475 rx_queue->min_fill = fill_level;
479 space = rx_queue->max_fill - fill_level;
482 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
485 efx_rx_queue_index(rx_queue), fill_level,
486 rx_queue->max_fill);
489 rc = efx_init_rx_buffers(rx_queue, atomic);
492 efx_schedule_slow_fill(rx_queue);
497 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
499 "to level %d\n", efx_rx_queue_index(rx_queue),
500 rx_queue->added_count - rx_queue->removed_count);
503 if (rx_queue->notified_count != rx_queue->added_count)
504 efx_nic_notify_rx_desc(rx_queue);
520 struct efx_rx_queue *rx_queue;
522 rx_queue = efx_channel_get_rx_queue(channel);
523 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
549 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
555 skb_record_rx_queue(skb, channel->rx_queue.core_index);