Lines Matching defs:mdev

77 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
83 striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
84 MLX5_CAP_ETH(mdev, reg_umr_sq);
88 umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
89 max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
101 struct mlx5_core_dev *mdev = priv->mdev;
105 port_state = mlx5_query_vport_state(mdev,
177 mlx5_notifier_register(priv->mdev, &priv->events_nb);
182 mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
209 priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
217 if (mlx5_core_is_mp_master(priv->mdev)) {
231 if (mlx5_core_is_mp_master(priv->mdev)) {
270 mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
275 mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
297 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
401 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
419 !MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
420 mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
426 inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
442 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
444 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
466 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
471 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
479 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
494 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
501 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
507 static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
529 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
531 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
534 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
540 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
548 max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
554 mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
558 err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
565 static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
568 u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
571 mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
575 return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
712 struct mlx5_core_dev *mdev = c->mdev;
720 rq->clock = &mdev->clock;
724 rq->mdev = mdev;
729 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
738 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
755 mlx5e_shampo_hd_per_wq(mdev, params, rqp);
756 err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
770 mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
779 mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
793 mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
802 struct mlx5_core_dev *mdev = rq->mdev;
818 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
821 rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
825 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
838 rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
839 rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
841 mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
844 mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
847 mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
851 mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
853 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog)
856 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
858 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
863 err = mlx5e_create_rq_umr_mkey(mdev, rq);
871 err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
877 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
976 mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
1006 mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
1022 struct mlx5_core_dev *mdev = rq->mdev;
1036 ts_format = mlx5_is_real_time_rq(mdev) ?
1061 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
1070 struct mlx5_core_dev *mdev = rq->mdev;
1090 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
1138 struct mlx5_core_dev *mdev = rq->mdev;
1157 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
1166 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
1274 struct mlx5_core_dev *mdev = rq->mdev;
1292 if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
1322 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
1329 MLX5_CAP_GEN(mdev, enhanced_cqe_compression))
1363 u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev,
1369 return profile->get_tisn(mdev, priv, lag_port, tc);
1371 return mdev->mlx5e_res.hw_objs.tisn[lag_port][tc];
1431 struct mlx5_core_dev *mdev = c->mdev;
1438 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1448 sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
1449 mlx5e_stop_room_for_max_wqe(mdev);
1450 sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
1453 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1518 struct mlx5_core_dev *mdev = c->mdev;
1523 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1527 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1595 struct mlx5_core_dev *mdev = c->mdev;
1600 sq->clock = &mdev->clock;
1603 sq->mdev = c->mdev;
1608 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1611 sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
1613 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
1615 if (mlx5_ipsec_device_caps(c->priv->mdev))
1620 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
1623 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1647 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1665 ts_format = mlx5_is_real_time_sq(mdev) ?
1679 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1686 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
1694 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1701 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1729 err = mlx5_core_modify_sq(mdev, sqn, in);
1736 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1738 mlx5_core_destroy_sq(mdev, sqn);
1741 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1750 err = mlx5e_create_sq(mdev, param, csp, sqn);
1760 err = mlx5e_modify_sq(mdev, *sqn, &msp);
1762 mlx5e_destroy_sq(mdev, *sqn);
1790 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
1818 mlx5e_destroy_sq(c->mdev, sq->sqn);
1867 struct mlx5_core_dev *mdev = sq->mdev;
1873 mlx5e_destroy_sq(mdev, sq->sqn);
1876 mlx5_rl_remove_rate(mdev, &rl);
1947 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
1961 mlx5e_destroy_sq(c->mdev, sq->sqn);
1985 mlx5e_destroy_sq(c->mdev, sq->sqn);
2002 csp.tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
2012 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
2060 mlx5e_destroy_sq(c->mdev, sq->sqn);
2065 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
2075 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
2096 cq->mdev = mdev;
2103 static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
2114 err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq);
2130 struct mlx5_core_dev *mdev = cq->mdev;
2139 err = mlx5_comp_eqn_get(mdev, param->eq_ix, &eqn);
2159 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
2164 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
2178 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
2181 int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
2187 err = mlx5e_alloc_cq(mdev, param, ccp, cq);
2195 if (MLX5_CAP_GEN(mdev, cq_moderation) &&
2196 MLX5_CAP_GEN(mdev, cq_period_mode_modify))
2197 mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts,
2256 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->txq_sq.cqp,
2326 tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
2361 struct mlx5_core_dev *mdev = priv->mdev;
2374 mlx5_rl_remove_rate(mdev, &rl);
2381 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
2393 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
2399 mlx5_rl_remove_rate(mdev, &rl);
2410 struct mlx5_core_dev *mdev = priv->mdev;
2414 if (!mlx5_rl_is_supported(mdev)) {
2423 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
2461 err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
2466 err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
2475 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
2480 err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
2485 err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
2584 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
2586 u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
2588 return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
2636 struct mlx5_core_dev *mdev;
2644 mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
2645 vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
2646 cpu = mlx5_comp_vector_get_cpu(mdev, vec_ix);
2648 err = mlx5_comp_irqn_get(mdev, vec_ix, &irq);
2661 c->mdev = mdev;
2665 c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix);
2667 c->pdev = mlx5_core_dma_dev(mdev);
2669 c->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
2674 c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
2769 err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
2890 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2896 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2901 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2905 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2911 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2913 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2922 struct mlx5_core_dev *mdev = priv->mdev;
2926 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2930 mlx5e_query_mtu(mdev, params, &mtu);
2945 struct mlx5_core_dev *mdev = priv->mdev;
2951 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
3008 struct mlx5_core_dev *mdev = priv->mdev;
3011 num_comp_vectors = mlx5_comp_vectors_max(mdev);
3017 int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
3306 static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
3309 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3312 mlx5_set_port_admin_status(mdev, state);
3314 if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
3315 !MLX5_CAP_GEN(mdev, uplink_follow))
3368 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
3403 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
3415 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3425 err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
3433 rq->mdev = mdev;
3442 struct mlx5_core_dev *mdev = priv->mdev;
3444 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3445 param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3447 return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq);
3453 struct mlx5_core_dev *mdev = priv->mdev;
3459 mlx5e_build_drop_rq_param(mdev, &rq_param);
3469 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
3479 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3664 err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
3695 static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
3708 err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
3728 rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
3950 struct mlx5_core_dev *mdev = priv->mdev;
3971 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
3972 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
4054 struct mlx5_core_dev *mdev = priv->mdev;
4056 return mlx5_set_port_fcs(mdev, !enable);
4100 static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
4106 if (!MLX5_CAP_GEN(mdev, ports_check))
4109 err = mlx5_query_ports_check(mdev, in, sizeof(in));
4122 return mlx5_set_ports_check(mdev, in, sizeof(in));
4127 struct mlx5_core_dev *mdev = priv->mdev;
4130 return mlx5e_set_rx_port_ts(mdev, enable);
4396 struct mlx5_core_dev *mdev)
4413 if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
4436 struct mlx5_core_dev *mdev,
4445 mlx5e_rx_is_linear_skb(mdev, params, NULL) :
4446 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL);
4456 !mlx5e_verify_params_rx_mpwqe_strides(mdev, params, NULL)) {
4482 err = mlx5e_validate_params(priv->mdev, &new_params);
4486 if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
4494 &new_params, priv->mdev)) {
4504 bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
4505 bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
4507 u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
4508 u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
4581 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
4582 (mlx5_clock_get_ptp_index(priv->mdev) == -1))
4656 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
4680 struct mlx5_core_dev *mdev = priv->mdev;
4682 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
4689 struct mlx5_core_dev *mdev = priv->mdev;
4694 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
4701 struct mlx5_core_dev *mdev = priv->mdev;
4703 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
4709 struct mlx5_core_dev *mdev = priv->mdev;
4711 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
4718 struct mlx5_core_dev *mdev = priv->mdev;
4720 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
4750 struct mlx5_core_dev *mdev = priv->mdev;
4755 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4763 struct mlx5_core_dev *mdev = priv->mdev;
4769 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
4780 struct mlx5_core_dev *mdev = priv->mdev;
4782 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
4813 static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
4817 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
4820 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
4821 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
4827 static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
4837 return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
4864 if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
4869 if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
4877 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
4882 if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
4960 static int mlx5e_xdp_allowed(struct net_device *netdev, struct mlx5_core_dev *mdev,
4968 if (!mlx5e_params_validate_xdp(netdev, mdev, params))
4999 err = mlx5e_xdp_allowed(netdev, priv->mdev, &new_params);
5062 struct mlx5_core_dev *mdev = priv->mdev;
5066 err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
5079 struct mlx5_core_dev *mdev = priv->mdev;
5101 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
5147 static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
5153 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
5156 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
5162 struct mlx5_core_dev *mdev = priv->mdev;
5174 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
5177 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
5181 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
5182 MLX5_CAP_GEN(mdev, vport_group_manager))
5183 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
5189 mlx5e_build_rq_params(mdev, params);
5191 params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev);
5193 params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
5196 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
5197 MLX5_CAP_GEN(mdev, cq_period_mode_modify);
5198 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
5199 MLX5_CAP_GEN(mdev, cq_period_mode_modify);
5200 params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
5208 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
5224 mlx5_query_mac_address(priv->mdev, addr);
5226 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
5228 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
5240 return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
5248 return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
5253 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
5263 mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
5268 static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
5273 if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5_get_proto_by_tunnel_type(tt)))
5276 return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
5282 struct mlx5_core_dev *mdev = priv->mdev;
5286 SET_NETDEV_DEV(netdev, mdev->device);
5321 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
5322 !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
5323 !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
5324 mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
5334 if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
5341 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
5351 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
5360 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
5372 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
5377 if (MLX5_CAP_ETH(mdev, scatter_fcs))
5380 if (mlx5_qos_is_supported(mdev))
5392 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
5422 struct mlx5_core_dev *mdev = priv->mdev;
5428 mlx5_sd_for_each_dev(i, mdev, pos) {
5435 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5449 mlx5_sd_for_each_dev(i, priv->mdev, pos) {
5460 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5464 static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
5478 mlx5_debugfs_get_dev_root(mdev));
5480 fs = mlx5e_fs_init(priv->profile, mdev,
5485 mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
5493 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
5523 struct mlx5_core_dev *mdev = priv->mdev;
5531 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
5536 if (mlx5_tunnel_inner_ft_supported(mdev))
5538 if (mlx5_get_sd(priv->mdev))
5541 priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn,
5547 mlx5_core_err(mdev, "create rx resources failed, %d\n", err);
5554 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
5567 priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
5608 rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
5632 struct mlx5_core_dev *mdev = priv->mdev;
5640 mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
5644 mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
5649 mlx5_lag_add_netdev(mdev, netdev);
5673 struct mlx5_core_dev *mdev = priv->mdev;
5697 mlx5_lag_remove_netdev(mdev, priv->netdev);
5698 mlx5_vxlan_reset_to_default(mdev->vxlan);
5731 static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
5736 nch = mlx5e_get_max_num_channels(mdev);
5739 nch = min_t(int, nch, profile->max_nch_limit(mdev));
5744 mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
5751 max_nch = mlx5e_profile_max_num_channels(mdev, profile);
5758 if (mlx5_qos_is_supported(mdev))
5759 tmp -= mlx5e_qos_max_leaf_nodes(mdev);
5760 if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
5768 int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
5774 + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
5789 struct mlx5_core_dev *mdev)
5795 nch = mlx5e_calc_max_nch(mdev, netdev, profile);
5796 node = dev_to_node(mlx5_core_dma_dev(mdev));
5799 priv->mdev = mdev;
5855 if (!priv->mdev)
5874 static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
5879 nch = mlx5e_profile_max_num_channels(mdev, profile);
5881 ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
5885 qos_txqs = mlx5_qos_is_supported(mdev) &&
5887 mlx5e_qos_max_leaf_nodes(mdev) : 0;
5892 static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
5895 return mlx5e_profile_max_num_channels(mdev, profile);
5899 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
5905 txqs = mlx5e_get_max_num_txqs(mdev, profile);
5906 rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
5910 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5914 err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
5916 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
5922 dev_net_set(netdev, mlx5_core_net(mdev));
5960 if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
5961 mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %u\n",
5962 mlx5e_get_max_sq_wqebbs(priv->mdev), (unsigned int)MLX5E_MAX_TX_WQEBBS);
5967 max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
5969 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5976 mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
5981 mlx5_core_warn(priv->mdev,
6051 mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
6057 err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
6059 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
6065 err = new_profile->init(priv->mdev, priv->netdev);
6077 mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
6083 err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
6103 struct mlx5_core_dev *mdev = priv->mdev;
6112 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6113 mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
6118 err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
6127 rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
6153 struct mlx5_core_dev *mdev = edev->mdev;
6160 mlx5_sd_for_each_dev(i, mdev, pos) {
6174 mlx5_sd_for_each_dev_to(i, mdev, to, pos)
6182 struct mlx5_core_dev *mdev = edev->mdev;
6186 err = mlx5_sd_init(mdev);
6190 actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
6201 struct mlx5_core_dev *mdev = priv->mdev;
6207 mlx5_sd_for_each_dev(i, mdev, pos)
6213 mlx5_sd_for_each_dev(i, mdev, pos)
6222 struct mlx5_core_dev *mdev = edev->mdev;
6226 actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
6230 mlx5_sd_cleanup(mdev);
6238 struct mlx5_core_dev *mdev = edev->mdev;
6244 mlx5e_dev = mlx5e_create_devlink(&adev->dev, mdev);
6249 err = mlx5e_devlink_port_register(mlx5e_dev, mdev);
6251 mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
6255 netdev = mlx5e_create_netdev(mdev, profile);
6257 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
6271 err = profile->init(mdev, netdev);
6273 mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
6279 mlx5_core_err(mdev, "_mlx5e_resume failed, %d\n", err);
6285 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
6290 mlx5_core_uplink_netdev_set(mdev, netdev);
6291 mlx5e_params_print_info(mdev, &priv->channels.params);
6311 struct mlx5_core_dev *mdev = edev->mdev;
6315 err = mlx5_sd_init(mdev);
6319 actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
6330 struct mlx5_core_dev *mdev = edev->mdev;
6332 mlx5_core_uplink_netdev_set(mdev, NULL);
6345 struct mlx5_core_dev *mdev = edev->mdev;
6348 actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
6352 mlx5_sd_cleanup(mdev);