Lines Matching refs:buff

23 static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
38 page_ref_inc(buff->rxdata.page);
292 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
294 if (likely(buff->is_mapped)) {
295 if (unlikely(buff->is_sop)) {
296 if (!buff->is_eop &&
297 buff->eop_index != 0xffffU &&
299 buff->eop_index,
303 dma_unmap_single(dev, buff->pa, buff->len,
306 dma_unmap_page(dev, buff->pa, buff->len,
311 if (likely(!buff->is_eop))
314 if (buff->skb) {
317 self->stats.tx.bytes += buff->skb->len;
319 dev_kfree_skb_any(buff->skb);
320 } else if (buff->xdpf) {
323 self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
325 xdp_return_frame_rx_napi(buff->xdpf);
329 buff->skb = NULL;
330 buff->xdpf = NULL;
331 buff->pa = 0U;
332 buff->eop_index = 0xffffU;
340 struct aq_ring_buff_s *buff,
346 if (unlikely(buff->is_cso_err)) {
353 if (buff->is_ip_cso) {
359 if (buff->is_udp_cso || buff->is_tcp_cso)
388 struct aq_ring_buff_s *buff)
401 aq_get_rxpages_xdp(buff, xdp);
408 struct aq_ring_buff_s *buff)
424 return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
435 skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
453 aq_get_rxpages_xdp(buff, xdp);
462 aq_get_rxpages_xdp(buff, xdp);
486 struct aq_ring_buff_s *buff,
490 struct aq_ring_buff_s *buff_ = buff;
513 buff->is_ip_cso &= buff_->is_ip_cso;
514 buff->is_udp_cso &= buff_->is_udp_cso;
515 buff->is_tcp_cso &= buff_->is_tcp_cso;
516 buff->is_cso_err |= buff_->is_cso_err;
537 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
545 if (buff->is_cleaned)
548 if (!buff->is_eop) {
550 buff_ = buff;
573 buff->is_error |= buff_->is_error;
574 buff->is_cso_err |= buff_->is_cso_err;
578 if (buff->is_error ||
579 (buff->is_lro && buff->is_cso_err)) {
580 buff_ = buff;
599 if (buff->is_error) {
607 buff->rxdata.daddr,
608 buff->rxdata.pg_off,
609 buff->len, DMA_FROM_DEVICE);
620 buff->len -=
622 aq_buf_vaddr(&buff->rxdata),
623 buff->len);
625 hdr_len = buff->len;
628 aq_buf_vaddr(&buff->rxdata),
631 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
634 if (buff->len - hdr_len > 0) {
635 skb_add_rx_frag(skb, i++, buff->rxdata.page,
636 buff->rxdata.pg_off + hdr_len,
637 buff->len - hdr_len,
639 page_ref_inc(buff->rxdata.page);
642 if (!buff->is_eop) {
643 buff_ = buff;
661 buff->is_ip_cso &= buff_->is_ip_cso;
662 buff->is_udp_cso &= buff_->is_udp_cso;
663 buff->is_tcp_cso &= buff_->is_tcp_cso;
664 buff->is_cso_err |= buff_->is_cso_err;
669 if (buff->is_vlan)
671 buff->vlan_rx_tag);
675 aq_rx_checksum(self, buff, skb);
677 skb_set_hash(skb, buff->rss_hash,
678 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
713 struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
723 if (buff->is_cleaned)
726 if (!buff->is_eop) {
727 buff_ = buff;
743 buff->is_error |= buff_->is_error;
744 buff->is_cso_err |= buff_->is_cso_err;
751 if (buff->is_error ||
752 (buff->is_lro && buff->is_cso_err)) {
753 buff_ = buff;
772 if (buff->is_error) {
780 buff->rxdata.daddr,
781 buff->rxdata.pg_off,
782 buff->len, DMA_FROM_DEVICE);
783 hard_start = page_address(buff->rxdata.page) +
784 buff->rxdata.pg_off - rx_ring->page_offset;
788 aq_buf_vaddr(&buff->rxdata),
789 buff->len);
790 buff->len -= ptp_hwtstamp_len;
795 buff->len, false);
796 if (!buff->is_eop) {
797 if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
807 skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
814 if (buff->is_vlan)
816 buff->vlan_rx_tag);
818 aq_rx_checksum(rx_ring, buff, skb);
820 skb_set_hash(skb, buff->rss_hash,
821 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
866 struct aq_ring_buff_s *buff = NULL;
876 buff = &self->buff_ring[self->sw_tail];
878 buff->flags = 0U;
879 buff->len = self->frame_max;
881 err = aq_get_rxpages(self, buff);
885 buff->pa = aq_buf_daddr(&buff->rxdata);
886 buff = NULL;
900 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
902 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));