Lines Matching defs:mdev

13 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
15 u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
20 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
23 u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
33 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
44 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
102 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
110 max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
120 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
123 u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
143 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
146 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
158 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
161 return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode),
165 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
168 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
179 u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
184 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
202 static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
205 u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode);
206 u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode);
211 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
214 return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) +
215 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
256 static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
268 return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
270 no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
284 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
288 u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
289 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
290 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
292 return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
296 bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
320 static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
326 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode))
336 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
342 bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
346 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
347 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
348 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
349 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
351 return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
356 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
360 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
361 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
366 if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
369 log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
370 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
377 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
382 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
386 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
389 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
390 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
391 max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
410 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
416 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
422 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
425 u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
431 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
435 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
436 return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
442 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
445 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
449 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
450 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
453 log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
454 log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
467 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
476 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
485 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
490 stop_room = mlx5e_ktls_get_stop_room(mdev, params);
491 stop_room += mlx5e_stop_room_for_max_wqe(mdev);
497 stop_room += mlx5e_stop_room_for_mpwqe(mdev);
502 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
507 stop_room = mlx5e_calc_sq_stop_room(mdev, params);
509 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
517 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
522 mlx5_port_max_linkspeed(mdev, &link_speed);
523 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
524 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
533 int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
535 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL);
536 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
538 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
544 int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
547 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
548 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
551 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
552 mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n",
557 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
558 mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
566 mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
568 mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
576 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
584 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
591 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
602 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
603 !mlx5e_mpwrq_validate_regular(mdev, params) &&
604 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
605 !mlx5e_rx_is_linear_skb(mdev, params, NULL)))
607 mlx5e_set_rq_type(mdev, params);
608 mlx5e_init_rq_type_params(mdev, params);
679 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
693 if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
696 frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
724 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
791 mlx5_core_dbg(mdev, "%s: wqe_bulk = %u, wqe_bulk_refill_unit = %u\n",
816 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
821 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
822 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
826 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
830 int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
831 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
832 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
833 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
834 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
843 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
854 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
856 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
858 log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
859 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
870 MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
876 mlx5e_build_common_cq_param(mdev, param);
880 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
883 bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
889 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
901 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
902 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
903 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
904 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
906 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
909 mlx5_core_err(mdev,
920 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
924 mlx5e_shampo_get_log_rsrv_size(mdev, params));
927 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
929 mlx5e_shampo_get_log_hd_entry_size(mdev, params));
941 err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info,
949 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
952 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
956 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
957 mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp);
962 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
972 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
975 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
983 mlx5e_build_common_cq_param(mdev, param);
987 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
994 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
996 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
999 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
1007 allow_swp = mlx5_geneve_tx_allowed(mdev) ||
1008 (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
1009 mlx5e_build_sq_param_common(mdev, param);
1013 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
1014 mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
1017 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
1025 mlx5e_build_common_cq_param(mdev, param);
1035 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
1039 int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
1040 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
1041 int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
1042 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
1048 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
1057 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
1065 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
1070 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
1079 max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
1080 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
1090 static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
1094 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
1095 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
1098 umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
1100 return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
1103 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
1114 wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
1140 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1145 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1150 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1155 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1162 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
1172 useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
1179 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
1181 if (mlx5e_is_ktls_rx(mdev))
1187 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
1194 mlx5e_build_sq_param_common(mdev, param);
1197 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
1198 mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
1201 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
1208 mlx5e_build_sq_param_common(mdev, param);
1209 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
1210 param->is_tls = mlx5e_is_ktls_rx(mdev);
1212 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
1213 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
1215 mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
1218 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
1226 mlx5e_build_sq_param_common(mdev, param);
1229 param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
1230 mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
1233 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
1240 err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
1244 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
1245 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
1247 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
1248 mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
1249 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
1250 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);