Lines Matching refs:rq

64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
92 struct mlx5e_cq_decomp *cqd = &rq->cqd;
97 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
100 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
105 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
108 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
112 struct mlx5e_cq_decomp *cqd = &rq->cqd;
118 rq->stats->cqe_compress_blks++;
153 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
157 struct mlx5e_cq_decomp *cqd = &rq->cqd;
169 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
176 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
180 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
183 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
187 struct mlx5e_cq_decomp *cqd = &rq->cqd;
189 mlx5e_decompress_cqe(rq, wq, cqcc);
194 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
199 struct mlx5e_cq_decomp *cqd = &rq->cqd;
215 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
216 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
218 rq, &cqd->title);
221 rq->stats->cqe_compress_pkts += left;
226 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
231 struct mlx5e_cq_decomp *cqd = &rq->cqd;
243 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
244 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
246 rq, &cqd->title);
251 rq->stats->cqe_compress_pkts += cqe_count;
256 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
260 struct mlx5e_cq_decomp *cqd = &rq->cqd;
263 mlx5e_read_title_slot(rq, wq, cc);
265 mlx5e_decompress_cqe(rq, wq, cc);
266 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
268 rq, &cqd->title);
271 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
276 static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
281 page = page_pool_dev_alloc_pages(rq->page_pool);
295 static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
302 page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
305 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
316 err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
331 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
335 mlx5e_page_release_fragmented(rq, frag->frag_page);
338 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
340 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
343 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
346 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
350 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
354 err = mlx5e_get_rx_frag(rq, frag);
360 headroom = i == 0 ? rq->buff.headroom : 0;
369 mlx5e_put_rx_frag(rq, --frag);
374 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
379 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
380 mlx5e_put_rx_frag(rq, wi);
389 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
391 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
393 if (rq->xsk_pool) {
396 mlx5e_free_rx_wqe(rq, wi);
402 for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
407 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
409 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
416 wi = get_frag(rq, j);
425 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
427 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
434 wi = get_frag(rq, j);
435 mlx5e_free_rx_wqe(rq, wi);
439 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
441 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
450 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
457 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
469 refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
471 mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
472 refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
483 mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
486 int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
489 frag = get_frag(rq, j);
490 for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
498 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
506 dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
525 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
531 dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
532 rq->buff.map_dir);
538 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
546 dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
547 rq->buff.map_dir);
552 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
558 if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
561 no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
563 if (rq->xsk_pool) {
570 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
574 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
579 mlx5e_page_release_fragmented(rq, frag_page);
585 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
587 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
638 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
642 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
644 u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
652 headroom = rq->buff.headroom;
673 err = mlx5e_page_alloc_fragmented(rq, frag_page);
713 mlx5e_page_release_fragmented(rq, dma_info->frag_page);
716 rq->stats->buff_alloc_err++;
720 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
722 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
724 struct mlx5e_icosq *sq = rq->icosq;
727 max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
749 err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
752 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
759 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
761 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
762 struct mlx5e_icosq *sq = rq->icosq;
771 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
772 err = mlx5e_alloc_rx_hd_mpwqe(rq);
777 pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
779 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
783 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
786 err = mlx5e_page_alloc_fragmented(rq, frag_page);
798 if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
799 int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
800 rq->mpwqe.pages_per_wqe;
802 memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
806 bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
813 offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
818 .num_wqebbs = rq->mpwqe.umr_wqebbs,
819 .umr.rq = rq,
822 sq->pc += rq->mpwqe.umr_wqebbs;
831 mlx5e_page_release_fragmented(rq, frag_page);
834 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
837 rq->stats->buff_alloc_err++;
848 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
850 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
867 mlx5e_page_release_fragmented(rq, hd_info->frag_page);
882 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
884 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
885 /* This function is called on rq/netdev close. */
886 mlx5e_free_rx_mpwqe(rq, wi);
891 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
894 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
896 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
901 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
904 if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
907 if (rq->page_pool)
908 page_pool_nid_changed(rq->page_pool, numa_mem_id());
916 wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
918 if (!rq->xsk_pool) {
919 count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
920 } else if (likely(!dma_dev_need_sync(rq->pdev))) {
921 mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
922 count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
924 mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
930 count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
935 rq->stats->buff_alloc_err++;
980 struct mlx5e_rq *rq = &c->rq;
983 shampo = rq->mpwqe.shampo;
1048 wi->umr.rq->mpwqe.umr_completed++;
1080 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
1082 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1083 u8 umr_completed = rq->mpwqe.umr_completed;
1084 struct mlx5e_icosq *sq = rq->icosq;
1089 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1093 mlx5e_post_rx_mpwqe(rq, umr_completed);
1094 rq->mpwqe.umr_in_progress -= umr_completed;
1095 rq->mpwqe.umr_completed = 0;
1098 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
1100 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
1101 rq->stats->congst_umr++;
1103 if (likely(missing < rq->mpwqe.min_wqe_bulk))
1106 if (rq->page_pool)
1107 page_pool_nid_changed(rq->page_pool, numa_mem_id());
1109 head = rq->mpwqe.actual_wq_head;
1112 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
1115 mlx5e_free_rx_mpwqe(rq, wi);
1117 alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
1118 mlx5e_alloc_rx_mpwqe(rq, head);
1125 rq->mpwqe.umr_last_bulk = missing - i;
1131 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
1132 rq->mpwqe.actual_wq_head = head;
1140 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
1216 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
1218 struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
1219 u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
1224 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
1226 int udp_off = rq->hw_gro_data->fk.control.thoff;
1227 struct sk_buff *skb = rq->hw_gro_data->skb;
1243 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
1245 int udp_off = rq->hw_gro_data->fk.control.thoff;
1246 struct sk_buff *skb = rq->hw_gro_data->skb;
1262 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1265 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
1269 last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
1270 last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
1274 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
1277 int tcp_off = rq->hw_gro_data->fk.control.thoff;
1278 struct sk_buff *skb = rq->hw_gro_data->skb;
1283 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1288 if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
1298 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
1301 int tcp_off = rq->hw_gro_data->fk.control.thoff;
1302 struct sk_buff *skb = rq->hw_gro_data->skb;
1307 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1319 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
1321 bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
1322 struct sk_buff *skb = rq->hw_gro_data->skb;
1328 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
1336 mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
1338 mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
1340 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
1346 mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
1348 mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
1377 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
1391 rq->stats->ecn_mark += !!rc;
1479 struct mlx5e_rq *rq,
1483 struct mlx5e_rq_stats *stats = rq->stats;
1497 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
1520 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1550 struct mlx5e_rq *rq,
1554 struct mlx5e_rq_stats *stats = rq->stats;
1555 struct net_device *netdev = rq->netdev;
1560 mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1580 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1581 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1582 rq->clock, get_cqe_ts(cqe));
1583 skb_record_rx_queue(skb, rq->ix);
1596 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1599 mlx5e_enable_ecn(rq, skb);
1607 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
1612 struct mlx5e_rq_stats *stats = rq->stats;
1620 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1622 if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
1623 napi_gro_receive(rq->cq.napi, skb);
1624 rq->hw_gro_data->skb = NULL;
1628 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1633 struct mlx5e_rq_stats *stats = rq->stats;
1637 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1641 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1648 rq->stats->buff_alloc_err++;
1661 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1665 xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
1668 mxbuf->rq = rq;
1672 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1676 u16 rx_headroom = rq->buff.headroom;
1689 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1690 frag_size, rq->buff.map_dir);
1693 prog = rcu_dereference(rq->xdp_prog);
1698 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1700 if (mlx5e_xdp_handle(rq, prog, &mxbuf))
1708 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
1720 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1723 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1725 u16 rx_headroom = rq->buff.headroom;
1742 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1743 rq->buff.frame0_sz, rq->buff.map_dir);
1747 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1761 mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
1770 prog = rcu_dereference(rq->xdp_prog);
1771 if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
1772 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1781 skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
1804 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1807 struct mlx5e_priv *priv = rq->priv;
1810 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1811 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1812 queue_work(priv->wq, &rq->recover_work);
1816 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1818 trigger_report(rq, cqe);
1819 rq->stats->wqe_err++;
1822 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1824 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1831 wi = get_frag(rq, ci);
1835 mlx5e_handle_rx_err_cqe(rq, cqe);
1839 skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
1843 rq, wi, cqe, cqe_bcnt);
1846 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1851 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1859 napi_gro_receive(rq->cq.napi, skb);
1866 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1868 struct net_device *netdev = rq->netdev;
1872 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1879 wi = get_frag(rq, ci);
1883 mlx5e_handle_rx_err_cqe(rq, cqe);
1887 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1890 rq, wi, cqe, cqe_bcnt);
1893 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1898 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1903 mlx5e_rep_tc_receive(cqe, rq, skb);
1909 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1913 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
1915 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
1916 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
1917 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
1926 mlx5e_handle_rx_err_cqe(rq, cqe);
1931 struct mlx5e_rq_stats *stats = rq->stats;
1940 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1943 rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
1947 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1949 mlx5e_rep_tc_receive(cqe, rq, skb);
1952 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1955 wq = &rq->mpwqe.wq;
1967 mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
1978 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1981 truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1984 mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
1994 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2013 prog = rcu_dereference(rq->xdp_prog);
2018 if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
2019 rq->stats->buff_alloc_err++;
2028 skb = napi_alloc_skb(rq->cq.napi,
2031 rq->stats->buff_alloc_err++;
2050 mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
2058 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
2061 truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
2063 mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
2071 if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
2072 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
2080 mlx5e_page_release_fragmented(rq, &wi->linear_page);
2084 skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
2089 mlx5e_page_release_fragmented(rq, &wi->linear_page);
2095 mlx5e_page_release_fragmented(rq, &wi->linear_page);
2128 mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
2139 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2144 u16 rx_headroom = rq->buff.headroom;
2153 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
2154 rq->stats->oversize_pkts_sw_drop++;
2163 dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
2164 frag_size, rq->buff.map_dir);
2167 prog = rcu_dereference(rq->xdp_prog);
2172 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
2174 if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
2175 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2185 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
2197 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2200 struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
2203 u16 rx_headroom = rq->buff.headroom;
2214 dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
2217 skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
2225 rq->stats->gro_large_hds++;
2226 skb = napi_alloc_skb(rq->cq.napi,
2229 rq->stats->buff_alloc_err++;
2234 mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
2260 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
2262 struct sk_buff *skb = rq->hw_gro_data->skb;
2263 struct mlx5e_rq_stats *stats = rq->stats;
2267 mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
2269 mlx5e_shampo_update_hdr(rq, cqe, match);
2270 napi_gro_receive(rq->cq.napi, skb);
2271 rq->hw_gro_data->skb = NULL;
2283 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
2285 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
2292 mlx5e_page_release_fragmented(rq, dma_info->frag_page);
2297 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2300 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
2308 struct sk_buff **skb = &rq->hw_gro_data->skb;
2311 struct mlx5e_rq_stats *stats = rq->stats;
2316 wi = mlx5e_get_mpw_info(rq, wqe_id);
2320 mlx5e_handle_rx_err_cqe(rq, cqe);
2334 mlx5e_shampo_flush_skb(rq, cqe, match);
2339 *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2341 *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
2351 rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
2352 void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
2353 int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
2357 rq->hw_gro_data->second_ip_id = ntohs(iph->id);
2365 mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
2368 mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
2370 mlx5e_shampo_flush_skb(rq, cqe, match);
2372 mlx5e_free_rx_shampo_hd_entry(rq, header_index);
2374 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2377 wq = &rq->mpwqe.wq;
2382 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2386 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
2388 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
2389 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
2390 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
2399 mlx5e_handle_rx_err_cqe(rq, cqe);
2404 struct mlx5e_rq_stats *stats = rq->stats;
2413 skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
2417 rq, wi, cqe, cqe_bcnt, head_offset,
2422 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2430 napi_gro_receive(rq->cq.napi, skb);
2433 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2436 wq = &rq->mpwqe.wq;
2441 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
2446 struct mlx5e_cq_decomp *cqd = &rq->cqd;
2455 rq->stats->cqe_compress_blks++;
2462 mlx5e_read_enhanced_title_slot(rq, title_cqe);
2464 rq->stats->cqe_compress_blks++;
2467 mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
2474 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2476 rq, cqe);
2483 mlx5e_read_enhanced_title_slot(rq, title_cqe);
2490 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
2497 if (rq->cqd.left)
2498 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
2503 mlx5e_decompress_cqes_start(rq, cqwq,
2509 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2511 rq, cqe);
2520 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2524 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
2527 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
2528 work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
2531 work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
2537 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
2538 mlx5e_shampo_flush_skb(rq, NULL, false);
2540 if (rcu_access_pointer(rq->xdp_prog))
2541 mlx5e_xdp_rx_poll_complete(rq);
2557 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
2573 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
2587 stats = &priv->channel_stats[rq->ix]->rq;
2623 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
2624 rq->clock, get_cqe_ts(cqe));
2625 skb_record_rx_queue(skb, rq->ix);
2642 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2644 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2651 wi = get_frag(rq, ci);
2655 rq->stats->wqe_err++;
2659 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
2662 rq, wi, cqe, cqe_bcnt);
2666 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2671 napi_gro_receive(rq->cq.napi, skb);
2683 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
2685 struct net_device *netdev = rq->netdev;
2686 struct mlx5_core_dev *mdev = rq->mdev;
2687 struct mlx5e_priv *priv = rq->priv;
2689 switch (rq->wq_type) {
2691 rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
2696 rq->post_wqes = mlx5e_post_rx_mpwqes;
2697 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
2700 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
2701 if (!rq->handle_rx_cqe) {
2706 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
2707 if (!rq->handle_rx_cqe) {
2715 rq->wqe.skb_from_cqe = xsk ?
2720 rq->post_wqes = mlx5e_post_rx_wqes;
2721 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2722 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
2723 if (!rq->handle_rx_cqe) {
2732 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2734 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2743 wi = get_frag(rq, ci);
2747 rq->stats->wqe_err++;
2751 skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
2755 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2758 mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
2759 rq->netdev->devlink_port);
2766 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
2768 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
2771 rq->post_wqes = mlx5e_post_rx_wqes;
2772 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2773 rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;