Lines Matching refs:rx_buf

76 ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
78 if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
81 return rx_buf + 1;
85 struct ef4_rx_buffer *rx_buf,
88 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
155 struct ef4_rx_buffer *rx_buf;
192 rx_buf = ef4_rx_buffer(rx_queue, index);
193 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
194 rx_buf->page = page;
195 rx_buf->page_offset = page_offset + efx->rx_ip_align;
196 rx_buf->len = efx->rx_dma_len;
197 rx_buf->flags = 0;
204 rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
214 struct ef4_rx_buffer *rx_buf)
216 struct page *page = rx_buf->page;
228 struct ef4_rx_buffer *rx_buf,
232 if (rx_buf->page) {
233 put_page(rx_buf->page);
234 rx_buf->page = NULL;
236 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
245 struct ef4_rx_buffer *rx_buf)
247 struct page *page = rx_buf->page;
253 if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
272 ef4_unmap_rx_buffer(efx, rx_buf);
273 put_page(rx_buf->page);
277 struct ef4_rx_buffer *rx_buf)
280 if (rx_buf->page)
281 put_page(rx_buf->page);
284 if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
285 ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
286 ef4_free_rx_buffers(rx_queue, rx_buf, 1);
288 rx_buf->page = NULL;
293 struct ef4_rx_buffer *rx_buf,
302 ef4_recycle_rx_page(channel, rx_buf);
303 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
308 struct ef4_rx_buffer *rx_buf,
313 ef4_recycle_rx_pages(channel, rx_buf, n_frags);
315 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
393 struct ef4_rx_buffer *rx_buf,
397 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
405 rx_buf->flags |= EF4_RX_PKT_DISCARD;
407 if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
430 ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
442 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
449 skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
454 rx_buf->page, rx_buf->page_offset,
455 rx_buf->len);
456 rx_buf->page = NULL;
457 skb->len += rx_buf->len;
461 rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
474 struct ef4_rx_buffer *rx_buf,
490 EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
498 if (rx_buf->len > hdr_len) {
499 rx_buf->page_offset += hdr_len;
500 rx_buf->len -= hdr_len;
504 rx_buf->page, rx_buf->page_offset,
505 rx_buf->len);
506 rx_buf->page = NULL;
507 skb->len += rx_buf->len;
508 skb->data_len += rx_buf->len;
512 rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
515 __free_pages(rx_buf->page, efx->rx_buffer_order);
516 rx_buf->page = NULL;
535 struct ef4_rx_buffer *rx_buf;
539 rx_buf = ef4_rx_buffer(rx_queue, index);
540 rx_buf->flags |= flags;
545 ef4_rx_packet__check_len(rx_queue, rx_buf, len);
553 WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
554 rx_buf->flags |= EF4_RX_PKT_DISCARD;
561 (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
562 (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
567 if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
569 ef4_discard_rx_packet(channel, rx_buf, n_frags);
574 rx_buf->len = len;
579 ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
584 prefetch(ef4_rx_buf_va(rx_buf));
586 rx_buf->page_offset += efx->rx_prefix_size;
587 rx_buf->len -= efx->rx_prefix_size;
596 rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
599 ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
601 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
602 ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
606 rx_buf = ef4_rx_buffer(rx_queue, index);
607 ef4_recycle_rx_pages(channel, rx_buf, n_frags);
618 struct ef4_rx_buffer *rx_buf,
622 u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
624 skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
629 ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
636 if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
651 struct ef4_rx_buffer *rx_buf =
653 u8 *eh = ef4_rx_buf_va(rx_buf);
658 if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
659 rx_buf->len = le16_to_cpup((__le16 *)
663 * loopback layer, and free the rx_buf here
668 ef4_loopback_rx_packet(efx, eh, rx_buf->len);
670 ef4_free_rx_buffers(rx_queue, rx_buf,
676 rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
678 if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb)
679 ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
681 ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
789 struct ef4_rx_buffer *rx_buf;
801 rx_buf = ef4_rx_buffer(rx_queue, index);
802 ef4_fini_rx_buffer(rx_queue, rx_buf);