Lines Matching defs:rx_q

716  * @rx_q: Rx completion queue
724 static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
751 idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
756 * @rx_q: Rx completion queue
764 static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
791 idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
796 * @rx_q: Rx completion queue
804 static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
811 if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
826 * @rx_q: Rx completion queue
834 static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
839 if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
851 * @rx_q: Rx ring being processed
860 static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
866 rx_q->vport->rx_ptype_lkup[ptype];
869 skb->protocol = eth_type_trans(skb, rx_q->vport->netdev);
872 if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
873 idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded);
874 idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype);
876 idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded);
877 idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype);
883 * @rx_q: queue for which the hw buffers are allocated
888 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
892 u16 nta = rx_q->next_to_alloc;
898 desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
899 buf = &rx_q->rx_buf.buf[nta];
904 addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size);
917 if (unlikely(nta == rx_q->desc_count)) {
918 desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
919 buf = rx_q->rx_buf.buf;
926 if (rx_q->next_to_alloc != nta) {
927 idpf_rx_buf_hw_update(rx_q, nta);
928 rx_q->next_to_alloc = nta;
936 * @rx_q: Rx descriptor queue
946 static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
960 * @rx_q: Rx descriptor queue
970 static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
982 * @rx_q: Rx descriptor queue
987 static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
991 if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
992 idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields);
994 idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields);
999 * @rx_q: rx queue to clean
1004 static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
1007 struct sk_buff *skb = rx_q->skb;
1008 u16 ntc = rx_q->next_to_clean;
1019 rx_desc = IDPF_RX_DESC(rx_q, ntc);
1037 idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
1039 rx_buf = &rx_q->rx_buf.buf[ntc];
1049 skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size);
1056 IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
1061 if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc))
1083 idpf_rx_singleq_process_skb_fields(rx_q, skb,
1087 napi_gro_receive(&rx_q->q_vector->napi, skb);
1094 rx_q->skb = skb;
1096 rx_q->next_to_clean = ntc;
1099 failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
1101 u64_stats_update_begin(&rx_q->stats_sync);
1102 u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts);
1103 u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes);
1104 u64_stats_update_end(&rx_q->stats_sync);