Lines Matching refs:rx_buf

391 	if (!rx_ring->rx_buf)
406 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
408 if (!rx_buf->page)
414 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
415 rx_buf->page_offset,
420 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
422 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
424 rx_buf->page = NULL;
425 rx_buf->page_offset = 0;
432 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
464 kfree(rx_ring->rx_buf);
465 rx_ring->rx_buf = NULL;
492 WARN_ON(rx_ring->rx_buf);
493 rx_ring->rx_buf =
494 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
495 if (!rx_ring->rx_buf)
519 kfree(rx_ring->rx_buf);
520 rx_ring->rx_buf = NULL;
554 * @rx_buf: Rx buffer to store the XDP action
562 struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
690 * @bi: rx_buf struct to modify
760 bi = &rx_ring->rx_buf[ntu];
783 bi = rx_ring->rx_buf;
801 * @rx_buf: Rx buffer to adjust
810 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
814 rx_buf->page_offset ^= size;
817 rx_buf->page_offset += size;
823 * @rx_buf: buffer containing the page
831 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
833 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
834 struct page *page = rx_buf->page;
842 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
847 if (rx_buf->page_offset > ICE_LAST_OFFSET)
857 rx_buf->pagecnt_bias = USHRT_MAX;
867 * @rx_buf: buffer containing page to add
870 * This function will add the data contained in rx_buf->page to the xdp buf.
875 struct ice_rx_buf *rx_buf, const unsigned int size)
893 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
894 rx_buf->page_offset, size);
901 if (page_is_pfmemalloc(rx_buf->page))
920 new_buf = &rx_ring->rx_buf[nta];
949 struct ice_rx_buf *rx_buf;
951 rx_buf = &rx_ring->rx_buf[ntc];
952 rx_buf->pgcnt =
954 page_count(rx_buf->page);
958 prefetchw(rx_buf->page);
961 return rx_buf;
963 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
964 rx_buf->page_offset, size,
968 rx_buf->pagecnt_bias--;
970 return rx_buf;
1040 struct ice_rx_buf *rx_buf;
1058 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
1080 skb_add_rx_frag(skb, 0, rx_buf->page,
1081 rx_buf->page_offset + headlen, size,
1089 rx_buf->act = ICE_SKB_CONSUMED;
1110 * @rx_buf: Rx buffer to pull data from
1112 * This function will clean up the contents of the rx_buf. It will either
1116 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
1118 if (!rx_buf)
1121 if (ice_can_reuse_rx_page(rx_buf)) {
1123 ice_reuse_rx_page(rx_ring, rx_buf);
1126 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1129 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1133 rx_buf->page = NULL;
1177 struct ice_rx_buf *rx_buf;
1218 rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
1223 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1231 } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
1241 ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
1242 if (rx_buf->act == ICE_XDP_PASS)
1259 rx_buf->act = ICE_XDP_CONSUMED;
1301 struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];