Lines Matching refs:rx_queue

76 ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
78 if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
79 return ef4_rx_buffer(rx_queue, 0);
106 static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
108 struct ef4_nic *efx = rx_queue->efx;
113 if (unlikely(!rx_queue->page_ring))
115 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
116 page = rx_queue->page_ring[index];
120 rx_queue->page_ring[index] = NULL;
122 if (rx_queue->page_remove != rx_queue->page_add)
123 ++rx_queue->page_remove;
127 ++rx_queue->page_recycle_count;
135 ++rx_queue->page_recycle_failed;
144 * @rx_queue: Efx RX queue
152 static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
154 struct ef4_nic *efx = rx_queue->efx;
164 page = ef4_reuse_page(rx_queue);
191 index = rx_queue->added_count & rx_queue->ptr_mask;
192 rx_buf = ef4_rx_buffer(rx_queue, index);
198 ++rx_queue->added_count;
227 static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
236 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
248 struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
249 struct ef4_nic *efx = rx_queue->efx;
256 index = rx_queue->page_add & rx_queue->page_ptr_mask;
257 if (rx_queue->page_ring[index] == NULL) {
258 unsigned read_index = rx_queue->page_remove &
259 rx_queue->page_ptr_mask;
266 ++rx_queue->page_remove;
267 rx_queue->page_ring[index] = page;
268 ++rx_queue->page_add;
271 ++rx_queue->page_recycle_full;
276 static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
285 ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
286 ef4_free_rx_buffers(rx_queue, rx_buf, 1);
296 struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
298 if (unlikely(!rx_queue->page_ring))
303 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
311 struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
315 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
320 * @rx_queue: RX descriptor queue
323 * @rx_queue->@max_fill. If there is insufficient atomic
331 void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
333 struct ef4_nic *efx = rx_queue->efx;
337 if (!rx_queue->refill_enabled)
341 fill_level = (rx_queue->added_count - rx_queue->removed_count);
342 EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
343 if (fill_level >= rx_queue->fast_fill_trigger)
347 if (unlikely(fill_level < rx_queue->min_fill)) {
349 rx_queue->min_fill = fill_level;
353 space = rx_queue->max_fill - fill_level;
356 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
359 ef4_rx_queue_index(rx_queue), fill_level,
360 rx_queue->max_fill);
364 rc = ef4_init_rx_buffers(rx_queue, atomic);
367 if (rx_queue->added_count == rx_queue->removed_count)
368 ef4_schedule_slow_fill(rx_queue);
373 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
375 "to level %d\n", ef4_rx_queue_index(rx_queue),
376 rx_queue->added_count - rx_queue->removed_count);
379 if (rx_queue->notified_count != rx_queue->added_count)
380 ef4_nic_notify_rx_desc(rx_queue);
385 struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
388 ef4_nic_generate_fill_event(rx_queue);
389 ++rx_queue->slow_fill_count;
392 static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
396 struct ef4_nic *efx = rx_queue->efx;
412 ef4_rx_queue_index(rx_queue), len, max_len,
420 ef4_rx_queue_index(rx_queue), len, max_len);
423 ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
439 struct ef4_rx_queue *rx_queue;
441 rx_queue = ef4_channel_get_rx_queue(channel);
442 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
461 rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
467 skb_record_rx_queue(skb, channel->rx_queue.core_index);
512 rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
530 void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
533 struct ef4_nic *efx = rx_queue->efx;
534 struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
537 rx_queue->rx_packets++;
539 rx_buf = ef4_rx_buffer(rx_queue, index);
545 ef4_rx_packet__check_len(rx_queue, rx_buf, len);
559 ef4_rx_queue_index(rx_queue), index,
560 (index + n_frags - 1) & rx_queue->ptr_mask, len,
596 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
606 rx_buf = ef4_rx_buffer(rx_queue, index);
626 struct ef4_rx_queue *rx_queue;
628 rx_queue = ef4_channel_get_rx_queue(channel);
629 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
632 skb_record_rx_queue(skb, channel->rx_queue.core_index);
652 ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
666 struct ef4_rx_queue *rx_queue;
669 rx_queue = ef4_channel_get_rx_queue(channel);
670 ef4_free_rx_buffers(rx_queue, rx_buf,
686 int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
688 struct ef4_nic *efx = rx_queue->efx;
695 rx_queue->ptr_mask = entries - 1;
699 ef4_rx_queue_index(rx_queue), efx->rxq_entries,
700 rx_queue->ptr_mask);
703 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
705 if (!rx_queue->buffer)
708 rc = ef4_nic_probe_rx(rx_queue);
710 kfree(rx_queue->buffer);
711 rx_queue->buffer = NULL;
718 struct ef4_rx_queue *rx_queue)
736 rx_queue->page_ring = kcalloc(page_ring_size,
737 sizeof(*rx_queue->page_ring), GFP_KERNEL);
738 if (!rx_queue->page_ring)
739 rx_queue->page_ptr_mask = 0;
741 rx_queue->page_ptr_mask = page_ring_size - 1;
744 void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
746 struct ef4_nic *efx = rx_queue->efx;
749 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
750 "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
753 rx_queue->added_count = 0;
754 rx_queue->notified_count = 0;
755 rx_queue->removed_count = 0;
756 rx_queue->min_fill = -1U;
757 ef4_init_rx_recycle_ring(efx, rx_queue);
759 rx_queue->page_remove = 0;
760 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
761 rx_queue->page_recycle_count = 0;
762 rx_queue->page_recycle_failed = 0;
763 rx_queue->page_recycle_full = 0;
777 rx_queue->max_fill = max_fill;
778 rx_queue->fast_fill_trigger = trigger;
779 rx_queue->refill_enabled = true;
782 ef4_nic_init_rx(rx_queue);
785 void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
788 struct ef4_nic *efx = rx_queue->efx;
791 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
792 "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
794 del_timer_sync(&rx_queue->slow_fill);
797 if (rx_queue->buffer) {
798 for (i = rx_queue->removed_count; i < rx_queue->added_count;
800 unsigned index = i & rx_queue->ptr_mask;
801 rx_buf = ef4_rx_buffer(rx_queue, index);
802 ef4_fini_rx_buffer(rx_queue, rx_buf);
807 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
808 struct page *page = rx_queue->page_ring[i];
820 kfree(rx_queue->page_ring);
821 rx_queue->page_ring = NULL;
824 void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
826 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
827 "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
829 ef4_nic_remove_rx(rx_queue);
831 kfree(rx_queue->buffer);
832 rx_queue->buffer = NULL;