Lines Matching refs:rq

288 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
297 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
298 rq->mpwqe.umr_mode),
303 cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
306 octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
311 static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
313 rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
315 if (!rq->mpwqe.shampo)
320 static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
322 kvfree(rq->mpwqe.shampo);
325 static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
327 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
350 static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
352 kvfree(rq->mpwqe.shampo->bitmap);
353 kvfree(rq->mpwqe.shampo->info);
354 kvfree(rq->mpwqe.shampo->pages);
357 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
359 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
362 alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info,
364 rq->mpwqe.pages_per_wqe));
366 rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
367 if (!rq->mpwqe.info)
374 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, i);
376 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
379 mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
540 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
542 u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
543 u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
548 max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
551 if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
555 __func__, wq_size, rq->mpwqe.mtts_per_wqe,
558 err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
559 &umr_mkey, rq->wqe_overflow.addr,
560 rq->mpwqe.umr_mode, xsk_chunk_size);
561 rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
566 struct mlx5e_rq *rq)
570 if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
572 max_klm_size, rq->mpwqe.shampo->hd_per_wq);
575 return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
576 &rq->mpwqe.shampo->mkey);
579 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
585 WARN_ON(rq->xsk_pool);
587 next_frag.frag_page = &rq->wqe.alloc_units->frag_pages[0];
592 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
593 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
595 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
598 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
618 static void mlx5e_init_xsk_buffs(struct mlx5e_rq *rq)
623 WARN_ON(rq->wqe.info.num_frags != 1);
624 WARN_ON(rq->wqe.info.log_num_frags != 0);
625 WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
630 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
631 rq->wqe.frags[i].xskp = &rq->wqe.alloc_units->xsk_buffs[i];
636 rq->wqe.frags[i].flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
640 static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node)
642 int wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
643 int len = wq_sz << rq->wqe.info.log_num_frags;
648 if (rq->xsk_pool)
663 rq->wqe.alloc_units = aus;
664 rq->wqe.frags = frags;
666 if (rq->xsk_pool)
667 mlx5e_init_xsk_buffs(rq);
669 mlx5e_init_frags_partition(rq);
674 static void mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq)
676 kvfree(rq->wqe.frags);
677 kvfree(rq->wqe.alloc_units);
682 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
684 mlx5e_reporter_rq_cqe_err(rq);
687 static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
689 rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
690 if (!rq->wqe_overflow.page)
693 rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
694 PAGE_SIZE, rq->buff.map_dir);
695 if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
696 __free_page(rq->wqe_overflow.page);
702 static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
704 dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
705 rq->buff.map_dir);
706 __free_page(rq->wqe_overflow.page);
710 u32 xdp_frag_size, struct mlx5e_rq *rq)
715 rq->wq_type = params->rq_wq_type;
716 rq->pdev = c->pdev;
717 rq->netdev = c->netdev;
718 rq->priv = c->priv;
719 rq->tstamp = c->tstamp;
720 rq->clock = &mdev->clock;
721 rq->icosq = &c->icosq;
722 rq->ix = c->ix;
723 rq->channel = c;
724 rq->mdev = mdev;
725 rq->hw_mtu =
727 rq->xdpsq = &c->rq_xdpsq;
728 rq->stats = &c->priv->channel_stats[c->ix]->rq;
729 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
730 err = mlx5e_rq_set_handlers(rq, params, NULL);
734 return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id,
741 struct mlx5e_rq *rq,
749 if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
751 err = mlx5e_rq_shampo_hd_alloc(rq, node);
754 rq->mpwqe.shampo->hd_per_wq =
756 err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
759 err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
762 rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
763 if (!rq->hw_gro_data) {
767 rq->mpwqe.shampo->key =
768 cpu_to_be32(rq->mpwqe.shampo->mkey);
769 rq->mpwqe.shampo->hd_per_wqe =
772 *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
777 mlx5e_rq_shampo_hd_info_free(rq);
779 mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
781 mlx5e_rq_shampo_hd_free(rq);
786 static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
788 if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
791 kvfree(rq->hw_gro_data);
792 mlx5e_rq_shampo_hd_info_free(rq);
793 mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
794 mlx5e_rq_shampo_hd_free(rq);
800 int node, struct mlx5e_rq *rq)
802 struct mlx5_core_dev *mdev = rq->mdev;
811 INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
815 RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
817 rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
818 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
821 rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
823 switch (rq->wq_type) {
825 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
826 &rq->wq_ctrl);
830 err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
834 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
836 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
838 rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
839 rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
840 rq->mpwqe.pages_per_wqe =
841 mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
842 rq->mpwqe.umr_mode);
843 rq->mpwqe.umr_wqebbs =
844 mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
845 rq->mpwqe.umr_mode);
846 rq->mpwqe.mtts_per_wqe =
847 mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
848 rq->mpwqe.umr_mode);
850 pool_size = rq->mpwqe.pages_per_wqe <<
856 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
857 rq->mpwqe.num_strides =
859 rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
861 rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
863 err = mlx5e_create_rq_umr_mkey(mdev, rq);
867 err = mlx5e_rq_alloc_mpwqe_info(rq, node);
871 err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
877 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
878 &rq->wq_ctrl);
882 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
884 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
886 rq->wqe.info = rqp->frags_info;
887 rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
889 err = mlx5e_init_wqe_alloc_info(rq, node);
895 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
897 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
906 pp_params.dev = rq->pdev;
907 pp_params.napi = rq->cq.napi;
908 pp_params.netdev = rq->netdev;
909 pp_params.dma_dir = rq->buff.map_dir;
912 /* page_pool can be used even when there is no rq->xdp_prog,
917 rq->page_pool = page_pool_create(&pp_params);
918 if (IS_ERR(rq->page_pool)) {
919 err = PTR_ERR(rq->page_pool);
920 rq->page_pool = NULL;
923 if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
924 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
925 MEM_TYPE_PAGE_POOL, rq->page_pool);
931 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
933 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
935 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
936 u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
937 rq->mpwqe.page_shift;
938 u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
939 0 : rq->buff.headroom;
943 wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
946 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
949 for (f = 0; f < rq->wqe.info.num_frags; f++) {
950 u32 frag_size = rq->wqe.info.arr[f].frag_size |
954 wqe->data[f].lkey = rq->mkey_be;
957 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
968 page_pool_destroy(rq->page_pool);
970 switch (rq->wq_type) {
972 mlx5e_rq_free_shampo(rq);
974 kvfree(rq->mpwqe.info);
976 mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
978 mlx5e_free_mpwqe_rq_drop_page(rq);
981 mlx5e_free_wqe_alloc_info(rq);
984 mlx5_wq_destroy(&rq->wq_ctrl);
992 static void mlx5e_free_rq(struct mlx5e_rq *rq)
996 if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
997 old_prog = rcu_dereference_protected(rq->xdp_prog,
998 lockdep_is_held(&rq->priv->state_lock));
1003 switch (rq->wq_type) {
1005 kvfree(rq->mpwqe.info);
1006 mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
1007 mlx5e_free_mpwqe_rq_drop_page(rq);
1008 mlx5e_rq_free_shampo(rq);
1011 mlx5e_free_wqe_alloc_info(rq);
1014 kvfree(rq->dim);
1015 xdp_rxq_info_unreg(&rq->xdp_rxq);
1016 page_pool_destroy(rq->page_pool);
1017 mlx5_wq_destroy(&rq->wq_ctrl);
1020 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
1022 struct mlx5_core_dev *mdev = rq->mdev;
1031 sizeof(u64) * rq->wq_ctrl.buf.npages;
1044 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
1048 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
1050 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
1052 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
1054 order_base_2(rq->mpwqe.shampo->hd_per_wq));
1055 MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
1058 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
1061 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
1068 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
1070 struct mlx5_core_dev *mdev = rq->mdev;
1083 mlx5e_rqwq_reset(rq);
1090 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
1097 static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
1099 struct mlx5_cqwq *cqwq = &rq->cq.wq;
1102 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) {
1113 int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
1115 struct net_device *dev = rq->netdev;
1118 err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
1120 netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
1124 mlx5e_free_rx_descs(rq);
1125 mlx5e_flush_rq_cq(rq);
1127 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1129 netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
1136 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
1138 struct mlx5_core_dev *mdev = rq->mdev;
1157 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
1164 void mlx5e_destroy_rq(struct mlx5e_rq *rq)
1166 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
1169 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
1173 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
1176 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
1182 netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
1183 rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
1185 mlx5e_reporter_rx_timeout(rq);
1189 void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
1195 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
1198 wq = &rq->mpwqe.wq;
1207 rq->dealloc_wqe(rq, head);
1211 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
1214 len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
1215 (rq->mpwqe.shampo->hd_per_wq - 1);
1216 mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
1217 rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
1220 rq->mpwqe.actual_wq_head = wq->head;
1221 rq->mpwqe.umr_in_progress = 0;
1222 rq->mpwqe.umr_completed = 0;
1225 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
1230 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
1231 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1233 mlx5e_free_rx_missing_descs(rq);
1241 rq->dealloc_wqe(rq, wqe_ix);
1246 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1247 mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
1250 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1256 rq->dealloc_wqe(rq, wqe_ix);
1264 rq->dealloc_wqe(rq, wqe_ix);
1272 struct mlx5e_rq *rq)
1274 struct mlx5_core_dev *mdev = rq->mdev;
1278 __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
1280 err = mlx5e_alloc_rq(params, xsk, param, node, rq);
1284 err = mlx5e_create_rq(rq, param, q_counter);
1288 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1293 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
1295 if (rq->channel && !params->rx_dim_enabled) {
1296 rq->channel->rx_cq_moder = params->rx_cq_moderation;
1297 } else if (rq->channel) {
1303 mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode,
1306 err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled);
1316 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state);
1323 __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
1330 __set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state);
1335 mlx5e_destroy_rq(rq);
1337 mlx5e_free_rq(rq);
1342 void mlx5e_activate_rq(struct mlx5e_rq *rq)
1344 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
1347 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
1349 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
1353 void mlx5e_close_rq(struct mlx5e_rq *rq)
1355 if (rq->dim)
1356 cancel_work_sync(&rq->dim->work);
1357 cancel_work_sync(&rq->recover_work);
1358 mlx5e_destroy_rq(rq);
1359 mlx5e_free_rx_descs(rq);
1360 mlx5e_free_rq(rq);
2444 err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
2448 return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
2480 err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
2481 &c->rq.cq);
2508 err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
2530 mlx5e_close_rq(&c->rq);
2546 mlx5e_close_cq(&c->rq.cq);
2570 mlx5e_close_rq(&c->rq);
2577 mlx5e_close_cq(&c->rq.cq);
2719 mlx5e_activate_rq(&c->rq);
2733 mlx5e_deactivate_rq(&c->rq);
2844 err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout);
3410 static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
3412 mlx5_wq_destroy(&rq->wq_ctrl);
3416 struct mlx5e_rq *rq,
3425 err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
3426 &rq->wq_ctrl);
3431 xdp_rxq_info_unused(&rq->xdp_rxq);
3433 rq->mdev = mdev;
3519 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3524 return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
3821 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
3846 struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
4091 dim_enabled = !!chs->c[i]->rq.dim;
4975 static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
4979 old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
4980 lockdep_is_held(&rq->priv->state_lock));
5027 mlx5e_rq_replace_xdp_prog(&c->rq, prog);
5532 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);