Lines Matching refs:esw

41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
56 #define mlx5_esw_for_each_rep(esw, i, rep) \
57 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
76 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
79 return xa_load(&esw->offloads.vport_reps, vport_num);
83 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
87 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
105 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
107 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
122 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
136 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
159 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
175 esw_setup_decap_indir(struct mlx5_eswitch *esw,
183 ft = mlx5_esw_indir_table_get(esw, attr,
189 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
193 mlx5_esw_indir_table_put(esw,
229 struct mlx5_eswitch *esw,
238 return esw_setup_decap_indir(esw, attr);
254 struct mlx5_eswitch *esw, int i)
256 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
259 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
281 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
285 struct mlx5_fs_chains *chains = esw_chains(esw);
291 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
293 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
297 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
310 struct mlx5_eswitch *esw,
340 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
345 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
349 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
362 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
376 struct mlx5_eswitch *esw,
390 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
399 err = esw_setup_decap_indir(esw, attr);
407 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
411 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
415 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
416 esw_cleanup_decap_indir(esw, attr);
430 static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
434 if (esw->offloads.ft_ipsec_tx_pol &&
440 esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
446 static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
451 if (!esw->offloads.ft_ipsec_tx_pol)
455 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
459 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
468 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
473 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
478 mlx5_lag_is_mpesw(esw->dev))
493 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
496 dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
507 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
510 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
511 esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
514 esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
520 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
526 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
531 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
533 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
534 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
535 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
568 struct mlx5_eswitch *esw,
574 struct mlx5_fs_chains *chains = esw_chains(esw);
577 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
578 esw_src_port_rewrite_supported(esw))
582 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
596 } else if (esw_is_indir_table(esw, attr)) {
597 err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
598 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
599 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
601 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
604 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
618 esw_cleanup_dests(struct mlx5_eswitch *esw,
622 struct mlx5_fs_chains *chains = esw_chains(esw);
625 esw_cleanup_decap_indir(esw, attr);
629 else if (esw_is_indir_table(esw, attr))
630 esw_cleanup_indir_table(esw, attr);
631 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
632 esw_cleanup_chain_src_port_rewrite(esw, attr);
651 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
657 struct mlx5_fs_chains *chains = esw_chains(esw);
665 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
668 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
671 if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
691 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
696 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
705 esw_warn(esw->dev,
739 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
748 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
762 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
763 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
770 atomic64_inc(&esw->offloads.num_flows);
777 mlx5_esw_vporttbl_put(esw, &fwd_attr);
781 esw_cleanup_dests(esw, attr);
788 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
794 struct mlx5_fs_chains *chains = esw_chains(esw);
816 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
830 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
841 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
856 atomic64_inc(&esw->offloads.num_flows);
861 mlx5_esw_vporttbl_put(esw, &fwd_attr);
870 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
876 struct mlx5_fs_chains *chains = esw_chains(esw);
887 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
891 atomic64_dec(&esw->offloads.num_flows);
901 mlx5_esw_vporttbl_put(esw, &fwd_attr);
905 mlx5_esw_vporttbl_put(esw, &fwd_attr);
908 esw_cleanup_dests(esw, attr);
913 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
917 __mlx5_eswitch_del_rule(esw, rule, attr, false);
921 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
925 __mlx5_eswitch_del_rule(esw, rule, attr, true);
955 /* source vport is the esw manager */
988 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
1000 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
1031 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
1054 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
1057 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1060 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
1067 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
1069 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1073 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
1081 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
1082 !mlx5_eswitch_vport_match_metadata_enabled(esw))
1087 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
1094 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1107 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
1110 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1112 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1118 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1125 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1154 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1161 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1174 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1181 int nvports = esw->total_vports;
1189 if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
1196 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1208 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1209 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1210 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1213 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1222 if (mlx5_ecpf_vport_exists(esw->dev)) {
1223 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1225 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1234 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1235 esw_set_peer_miss_rule_source_port(esw,
1239 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1248 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1249 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1252 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1254 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1266 esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
1271 esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
1277 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1283 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1288 if (mlx5_ecpf_vport_exists(esw->dev)) {
1289 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1293 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1294 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1298 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1305 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1313 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
1317 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1318 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1328 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1331 if (mlx5_ecpf_vport_exists(esw->dev)) {
1332 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1336 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1337 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1342 esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
1345 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1371 dest.vport.num = esw->manager_vport;
1374 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1378 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1382 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1389 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1393 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1394 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1398 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1406 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1409 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1416 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1433 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1439 dest.ft = esw->offloads.ft_offloads;
1445 esw_warn(esw->dev,
1456 mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1464 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1483 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1491 mlx5_esw_for_each_vport(esw, i, vport) {
1494 mlx5_esw_vporttbl_put(esw, &attr);
1498 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1507 mlx5_esw_for_each_vport(esw, i, vport) {
1510 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1517 esw_vport_tbl_put(esw);
1521 #define fdb_modify_header_fwd_to_table_supported(esw) \
1522 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1523 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1525 struct mlx5_core_dev *dev = esw->dev;
1531 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1534 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1537 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1549 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1554 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1556 struct mlx5_core_dev *dev = esw->dev;
1562 esw_init_chains_offload_flags(esw, &attr.flags);
1564 attr.max_grp_num = esw->params.large_group_num;
1566 attr.mapping = esw->offloads.reg_c0_obj_pool;
1576 esw->fdb_table.offloads.esw_chains_priv = chains;
1595 err = esw_vport_tbl_get(esw);
1610 esw->fdb_table.offloads.esw_chains_priv = NULL;
1616 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1619 esw_vport_tbl_put(esw);
1628 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1632 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1638 esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
1650 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
1655 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1656 MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1664 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1672 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1675 esw->fdb_table.offloads.send_to_vport_grp = g;
1682 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
1692 if (!esw_src_port_rewrite_supported(esw))
1710 end_flow_index, *ix + esw->total_vports - 1);
1711 *ix += esw->total_vports;
1716 esw_warn(esw->dev,
1720 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1729 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
1734 int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1);
1740 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1745 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
1747 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1767 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
1770 esw->fdb_table.offloads.peer_miss_grp = g;
1777 esw_create_miss_group(struct mlx5_eswitch *esw,
1805 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
1808 esw->fdb_table.offloads.miss_grp = g;
1810 err = esw_add_fdb_miss_rule(esw);
1817 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1822 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1826 struct mlx5_core_dev *dev = esw->dev;
1832 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1844 esw->fdb_table.offloads.ns = root_ns;
1846 esw->dev->priv.steering->mode);
1853 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1855 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1861 * total vports of the peer (currently is also uses esw->total_vports).
1863 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1864 esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS;
1869 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1883 esw->fdb_table.offloads.slow_fdb = fdb;
1892 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1893 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1894 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1899 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1905 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1909 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1913 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
1917 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
1925 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1926 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1928 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1929 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1931 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1933 esw_chains_destroy(esw, esw_chains(esw));
1935 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1937 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1946 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1948 if (!mlx5_eswitch_get_slow_fdb(esw))
1951 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1952 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1953 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1954 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1955 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1956 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1957 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1958 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1959 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1961 esw_chains_destroy(esw, esw_chains(esw));
1963 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1964 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1966 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1968 atomic64_set(&esw->user_count, 0);
1971 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
1975 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1976 if (mlx5e_tc_int_port_supported(esw))
1982 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1985 struct mlx5_core_dev *dev = esw->dev;
1992 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1996 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
2003 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
2007 esw->offloads.ft_offloads = ft_offloads;
2011 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
2013 struct mlx5_esw_offload *offloads = &esw->offloads;
2018 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
2026 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
2031 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
2036 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
2040 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
2044 esw->offloads.vport_rx_group = g;
2050 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
2052 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
2055 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
2061 return esw_get_nr_ft_offloads_steering_src_ports(esw);
2064 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
2072 flow_index = esw_create_vport_rx_drop_rule_index(esw);
2081 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
2085 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
2089 esw->offloads.vport_rx_drop_group = g;
2095 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
2097 if (esw->offloads.vport_rx_drop_group)
2098 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
2102 mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
2108 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2111 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
2130 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
2143 mlx5_esw_set_spec_source_port(esw, vport, spec);
2146 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
2149 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
2158 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2164 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
2167 esw_warn(esw->dev,
2173 esw->offloads.vport_rx_drop_rule = flow_rule;
2178 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
2180 if (esw->offloads.vport_rx_drop_rule)
2181 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
2184 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2187 struct mlx5_core_dev *dev = esw->dev;
2194 if (!mlx5_esw_is_fdb_created(esw))
2209 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2210 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2222 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2224 struct mlx5_esw_offload *offloads = &esw->offloads;
2226 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2229 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2234 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2239 struct mlx5_core_dev *dev = esw->dev;
2248 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2253 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2267 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2297 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2307 esw->offloads.ft_offloads_restore = ft;
2308 esw->offloads.restore_group = g;
2309 esw->offloads.restore_copy_hdr_id = mod_hdr;
2325 static int esw_offloads_start(struct mlx5_eswitch *esw,
2330 esw->mode = MLX5_ESWITCH_OFFLOADS;
2331 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
2335 esw->mode = MLX5_ESWITCH_LEGACY;
2336 mlx5_rescan_drivers(esw->dev);
2339 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2340 if (mlx5_eswitch_inline_mode_get(esw,
2341 &esw->offloads.inline_mode)) {
2342 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2350 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2365 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2376 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2379 xa_erase(&esw->offloads.vport_reps, rep->vport);
2383 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2388 mlx5_esw_for_each_rep(esw, i, rep)
2389 mlx5_esw_offloads_rep_cleanup(esw, rep);
2390 xa_destroy(&esw->offloads.vport_reps);
2393 static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2399 xa_init(&esw->offloads.vport_reps);
2401 mlx5_esw_for_each_vport(esw, i, vport) {
2402 err = mlx5_esw_offloads_rep_init(esw, vport);
2409 esw_offloads_cleanup_reps(esw);
2417 struct mlx5_eswitch *esw = dev->priv.eswitch;
2420 down_write(&esw->mode_lock);
2421 if (mlx5_esw_is_fdb_created(esw)) {
2425 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2430 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2432 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2434 up_write(&esw->mode_lock);
2472 int esw_offloads_init(struct mlx5_eswitch *esw)
2476 err = esw_offloads_init_reps(esw);
2480 if (MLX5_ESWITCH_MANAGER(esw->dev) &&
2481 mlx5_esw_vport_match_metadata_supported(esw))
2482 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2484 err = devl_params_register(priv_to_devlink(esw->dev),
2493 esw_offloads_cleanup_reps(esw);
2497 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2499 devl_params_unregister(priv_to_devlink(esw->dev),
2502 esw_offloads_cleanup_reps(esw);
2505 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2510 esw->offloads.rep_ops[rep_type]->unload(rep);
2513 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2518 mlx5_esw_for_each_rep(esw, i, rep)
2519 __esw_offloads_unload_rep(esw, rep, rep_type);
2522 static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2528 rep = mlx5_eswitch_get_rep(esw, vport_num);
2532 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2542 __esw_offloads_unload_rep(esw, rep, rep_type);
2546 static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2551 rep = mlx5_eswitch_get_rep(esw, vport_num);
2553 __esw_offloads_unload_rep(esw, rep, rep_type);
2556 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2558 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2561 return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
2564 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2566 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2569 mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
2572 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
2576 return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
2579 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2581 mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
2584 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2588 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2591 err = mlx5_esw_offloads_devlink_port_register(esw, vport);
2595 err = mlx5_esw_offloads_rep_load(esw, vport->vport);
2601 mlx5_esw_offloads_devlink_port_unregister(esw, vport);
2605 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
2607 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2610 mlx5_esw_offloads_rep_unload(esw, vport->vport);
2612 mlx5_esw_offloads_devlink_port_unregister(esw, vport);
2705 static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
2726 if (vport->vport || mlx5_core_is_ecpf(esw->dev))
2784 struct mlx5_eswitch *esw = master->priv.eswitch;
2790 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2803 err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
2869 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
2877 mlx5_esw_for_each_rep(esw, i, rep) {
2880 ops = esw->offloads.rep_ops[rep_type];
2883 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
2888 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
2892 mlx5e_tc_clean_fdb_peer_flows(esw);
2894 mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
2895 esw_del_fdb_peer_miss_rules(esw, peer_esw->dev);
2898 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2907 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2911 mlx5_esw_for_each_rep(esw, i, rep) {
2913 ops = esw->offloads.rep_ops[rep_type];
2916 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2926 mlx5_esw_offloads_unpair(esw, peer_esw);
2930 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2935 u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
2941 ns = esw->dev->priv.steering->fdb_root_ns;
2965 struct mlx5_eswitch *esw = my_data;
2972 esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
2973 esw_paired = !!xa_load(&esw->paired, peer_esw_i);
2977 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2984 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2988 err = mlx5_esw_offloads_pair(esw, peer_esw);
2992 err = mlx5_esw_offloads_pair(peer_esw, esw);
2996 err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
3000 err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
3004 esw->num_peers++;
3006 mlx5_devcom_comp_set_ready(esw->devcom, true);
3014 esw->num_peers--;
3015 if (!esw->num_peers && !peer_esw->num_peers)
3016 mlx5_devcom_comp_set_ready(esw->devcom, false);
3018 xa_erase(&esw->paired, peer_esw_i);
3019 mlx5_esw_offloads_unpair(peer_esw, esw);
3020 mlx5_esw_offloads_unpair(esw, peer_esw);
3021 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
3028 xa_erase(&esw->paired, peer_esw_i);
3030 mlx5_esw_offloads_unpair(peer_esw, esw);
3032 mlx5_esw_offloads_unpair(esw, peer_esw);
3034 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
3036 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
3041 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
3046 INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
3047 mutex_init(&esw->offloads.peer_mutex);
3049 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
3052 if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) &&
3053 !mlx5_lag_is_supported(esw->dev))
3056 xa_init(&esw->paired);
3057 esw->num_peers = 0;
3058 esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
3062 esw);
3063 if (IS_ERR(esw->devcom))
3066 mlx5_devcom_send_event(esw->devcom,
3069 esw);
3072 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
3074 if (IS_ERR_OR_NULL(esw->devcom))
3077 mlx5_devcom_send_event(esw->devcom,
3080 esw);
3082 mlx5_devcom_unregister_component(esw->devcom);
3083 xa_destroy(&esw->paired);
3084 esw->devcom = NULL;
3087 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
3089 return mlx5_devcom_comp_is_ready(esw->devcom);
3092 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
3094 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
3097 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
3112 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
3117 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
3126 pf_num = mlx5_get_dev_index(esw->dev);
3132 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
3141 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
3146 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
3149 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
3153 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
3155 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
3161 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
3171 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
3174 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
3179 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3182 mlx5_esw_for_each_vport(esw, i, vport)
3183 esw_offloads_vport_metadata_cleanup(esw, vport);
3186 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
3192 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
3195 mlx5_esw_for_each_vport(esw, i, vport) {
3196 err = esw_offloads_vport_metadata_setup(esw, vport);
3204 esw_offloads_metadata_uninit(esw);
3209 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
3214 err = esw_acl_ingress_ofld_setup(esw, vport);
3218 err = esw_acl_egress_ofld_setup(esw, vport);
3225 esw_acl_ingress_ofld_cleanup(esw, vport);
3230 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
3234 esw_acl_ingress_ofld_cleanup(esw, vport);
3237 static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
3242 uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3246 ret = esw_vport_create_offloads_acl_tables(esw, uplink);
3250 manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
3256 ret = esw_vport_create_offloads_acl_tables(esw, manager);
3263 esw_vport_destroy_offloads_acl_tables(esw, uplink);
3267 static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
3271 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
3273 esw_vport_destroy_offloads_acl_tables(esw, vport);
3275 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3277 esw_vport_destroy_offloads_acl_tables(esw, vport);
3280 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
3286 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3289 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3293 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3297 mlx5_esw_for_each_rep(esw, i, rep) {
3299 mlx5_esw_offloads_rep_load(esw, rep->vport);
3305 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3310 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3311 mutex_init(&esw->fdb_table.offloads.vports.lock);
3312 hash_init(esw->fdb_table.offloads.vports.table);
3313 atomic64_set(&esw->user_count, 0);
3320 esw->fdb_table.offloads.indir = indir;
3322 err = esw_create_offloads_acl_tables(esw);
3326 err = esw_create_offloads_table(esw);
3330 err = esw_create_restore_table(esw);
3334 err = esw_create_offloads_fdb_tables(esw);
3338 err = esw_create_vport_rx_group(esw);
3342 err = esw_create_vport_rx_drop_group(esw);
3346 err = esw_create_vport_rx_drop_rule(esw);
3353 esw_destroy_vport_rx_drop_group(esw);
3355 esw_destroy_vport_rx_group(esw);
3357 esw_destroy_offloads_fdb_tables(esw);
3359 esw_destroy_restore_table(esw);
3361 esw_destroy_offloads_table(esw);
3363 esw_destroy_offloads_acl_tables(esw);
3365 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3367 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3371 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3373 esw_destroy_vport_rx_drop_rule(esw);
3374 esw_destroy_vport_rx_drop_group(esw);
3375 esw_destroy_vport_rx_group(esw);
3376 esw_destroy_offloads_fdb_tables(esw);
3377 esw_destroy_restore_table(esw);
3378 esw_destroy_offloads_table(esw);
3379 esw_destroy_offloads_acl_tables(esw);
3380 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3381 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3385 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3396 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3399 devlink = priv_to_devlink(esw->dev);
3402 if (esw->esw_funcs.num_vfs > 0) {
3403 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3407 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3414 esw->esw_funcs.num_vfs = new_num_vfs;
3421 struct mlx5_eswitch *esw;
3425 esw = host_work->esw;
3427 out = mlx5_esw_query_functions(esw->dev);
3431 esw_vfs_changed_event_handler(esw, out);
3441 struct mlx5_eswitch *esw;
3448 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3450 host_work->esw = esw;
3453 queue_work(esw->work_queue, &host_work->work);
3458 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3462 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3465 query_host_out = mlx5_esw_query_functions(esw->dev);
3470 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3476 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3482 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3486 return (controller == esw->offloads.host_number + 1);
3489 int esw_offloads_enable(struct mlx5_eswitch *esw)
3497 mutex_init(&esw->offloads.termtbl_mutex);
3498 mlx5_rdma_enable_roce(esw->dev);
3500 err = mlx5_esw_host_number_init(esw);
3504 err = esw_offloads_metadata_init(esw);
3508 err = esw_set_passing_vport_metadata(esw, true);
3512 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3523 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3525 err = esw_offloads_steering_init(esw);
3530 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3532 if (mlx5_core_ec_sriov_enabled(esw->dev))
3533 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
3537 err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3541 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3548 mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
3550 esw_offloads_steering_cleanup(esw);
3554 esw_set_passing_vport_metadata(esw, false);
3556 esw_offloads_metadata_uninit(esw);
3558 mlx5_rdma_disable_roce(esw->dev);
3559 mutex_destroy(&esw->offloads.termtbl_mutex);
3563 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3568 esw->mode = MLX5_ESWITCH_LEGACY;
3573 if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
3576 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3583 void esw_offloads_disable(struct mlx5_eswitch *esw)
3585 mlx5_eswitch_disable_pf_vf_vports(esw);
3586 mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
3587 esw_set_passing_vport_metadata(esw, false);
3588 esw_offloads_steering_cleanup(esw);
3589 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3590 esw_offloads_metadata_uninit(esw);
3591 mlx5_rdma_disable_roce(esw->dev);
3592 mutex_destroy(&esw->offloads.termtbl_mutex);
3673 struct mlx5_eswitch *esw = dev->priv.eswitch;
3676 if (!mlx5_esw_allowed(esw))
3680 err = mlx5_esw_try_lock(esw);
3684 esw->offloads.num_block_mode++;
3685 mlx5_esw_unlock(esw);
3691 struct mlx5_eswitch *esw = dev->priv.eswitch;
3693 if (!mlx5_esw_allowed(esw))
3696 down_write(&esw->mode_lock);
3697 esw->offloads.num_block_mode--;
3698 up_write(&esw->mode_lock);
3705 struct mlx5_eswitch *esw;
3708 esw = mlx5_devlink_eswitch_get(devlink);
3709 if (IS_ERR(esw))
3710 return PTR_ERR(esw);
3715 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) {
3721 mlx5_lag_disable_change(esw->dev);
3722 err = mlx5_esw_try_lock(esw);
3733 if (esw->offloads.num_block_mode) {
3740 esw->eswitch_operation_in_progress = true;
3741 up_write(&esw->mode_lock);
3743 mlx5_eswitch_disable_locked(esw);
3745 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3751 err = esw_offloads_start(esw, extack);
3753 err = esw_offloads_stop(esw, extack);
3754 mlx5_rescan_drivers(esw->dev);
3760 down_write(&esw->mode_lock);
3761 esw->eswitch_operation_in_progress = false;
3763 mlx5_esw_unlock(esw);
3765 mlx5_lag_enable_change(esw->dev);
3771 struct mlx5_eswitch *esw;
3773 esw = mlx5_devlink_eswitch_get(devlink);
3774 if (IS_ERR(esw))
3775 return PTR_ERR(esw);
3777 return esw_mode_to_devlink(esw->mode, mode);
3780 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3783 struct mlx5_core_dev *dev = esw->dev;
3789 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3798 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
3799 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3812 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
3817 esw->offloads.inline_mode);
3820 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3825 esw->offloads.inline_mode);
3834 struct mlx5_eswitch *esw;
3838 esw = mlx5_devlink_eswitch_get(devlink);
3839 if (IS_ERR(esw))
3840 return PTR_ERR(esw);
3842 down_write(&esw->mode_lock);
3860 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3871 esw->eswitch_operation_in_progress = true;
3872 up_write(&esw->mode_lock);
3874 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3876 esw->offloads.inline_mode = mlx5_mode;
3878 down_write(&esw->mode_lock);
3879 esw->eswitch_operation_in_progress = false;
3880 up_write(&esw->mode_lock);
3884 up_write(&esw->mode_lock);
3890 struct mlx5_eswitch *esw;
3892 esw = mlx5_devlink_eswitch_get(devlink);
3893 if (IS_ERR(esw))
3894 return PTR_ERR(esw);
3896 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3901 struct mlx5_eswitch *esw = dev->priv.eswitch;
3903 if (!mlx5_esw_allowed(esw))
3906 down_write(&esw->mode_lock);
3907 if (esw->mode != MLX5_ESWITCH_LEGACY &&
3908 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
3909 up_write(&esw->mode_lock);
3913 esw->offloads.num_block_encap++;
3914 up_write(&esw->mode_lock);
3920 struct mlx5_eswitch *esw = dev->priv.eswitch;
3922 if (!mlx5_esw_allowed(esw))
3925 down_write(&esw->mode_lock);
3926 esw->offloads.num_block_encap--;
3927 up_write(&esw->mode_lock);
3935 struct mlx5_eswitch *esw;
3938 esw = mlx5_devlink_eswitch_get(devlink);
3939 if (IS_ERR(esw))
3940 return PTR_ERR(esw);
3942 down_write(&esw->mode_lock);
3956 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3957 esw->offloads.encap = encap;
3961 if (esw->offloads.encap == encap)
3964 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3971 if (esw->offloads.num_block_encap) {
3978 esw->eswitch_operation_in_progress = true;
3979 up_write(&esw->mode_lock);
3981 esw_destroy_offloads_fdb_tables(esw);
3983 esw->offloads.encap = encap;
3985 err = esw_create_offloads_fdb_tables(esw);
3990 esw->offloads.encap = !encap;
3991 (void)esw_create_offloads_fdb_tables(esw);
3994 down_write(&esw->mode_lock);
3995 esw->eswitch_operation_in_progress = false;
3998 up_write(&esw->mode_lock);
4005 struct mlx5_eswitch *esw;
4007 esw = mlx5_devlink_eswitch_get(devlink);
4008 if (IS_ERR(esw))
4009 return PTR_ERR(esw);
4011 *encap = esw->offloads.encap;
4016 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
4020 !mlx5_core_is_ecpf_esw_manager(esw->dev))
4024 !mlx5_ecpf_vport_exists(esw->dev))
4030 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
4038 esw->offloads.rep_ops[rep_type] = ops;
4039 mlx5_esw_for_each_rep(esw, i, rep) {
4040 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
4041 rep->esw = esw;
4049 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
4054 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
4055 __unload_reps_all_vport(esw, rep_type);
4057 mlx5_esw_for_each_rep(esw, i, rep)
4062 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
4066 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
4070 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
4076 rep = mlx5_eswitch_get_rep(esw, vport);
4079 esw->offloads.rep_ops[rep_type]->get_proto_dev)
4080 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
4085 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
4087 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
4091 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
4094 return mlx5_eswitch_get_rep(esw, vport);
4098 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
4100 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
4104 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
4106 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
4110 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
4113 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
4122 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
4135 err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
4147 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
4152 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
4154 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
4164 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
4173 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
4178 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
4180 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
4183 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
4187 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
4189 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
4198 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
4201 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
4214 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4217 mutex_lock(&esw->state_lock);
4220 mutex_unlock(&esw->state_lock);
4228 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4231 return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr);
4237 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4240 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4245 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4250 mutex_lock(&esw->state_lock);
4252 mutex_unlock(&esw->state_lock);
4259 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4266 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4271 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4276 mutex_lock(&esw->state_lock);
4289 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4299 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4311 mutex_unlock(&esw->state_lock);
4318 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4321 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4326 mutex_lock(&esw->state_lock);
4328 mutex_unlock(&esw->state_lock);
4335 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
4343 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
4348 mutex_lock(&esw->state_lock);
4361 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4371 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
4383 mutex_unlock(&esw->state_lock);
4388 mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
4394 if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
4397 esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
4398 esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
4407 struct mlx5_eswitch *esw;
4411 esw = mlx5_devlink_eswitch_get(port->devlink);
4412 if (IS_ERR(esw))
4413 return PTR_ERR(esw);
4415 if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
4422 mutex_lock(&esw->state_lock);
4430 mutex_unlock(&esw->state_lock);
4437 struct mlx5_eswitch *esw;
4442 esw = mlx5_devlink_eswitch_get(port->devlink);
4443 if (IS_ERR(esw))
4444 return PTR_ERR(esw);
4447 err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
4456 mutex_lock(&esw->state_lock);
4466 if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
4471 err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
4479 esw->enabled_ipsec_vf_count++;
4481 esw->enabled_ipsec_vf_count--;
4483 mutex_unlock(&esw->state_lock);
4490 struct mlx5_eswitch *esw;
4494 esw = mlx5_devlink_eswitch_get(port->devlink);
4495 if (IS_ERR(esw))
4496 return PTR_ERR(esw);
4498 if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
4505 mutex_lock(&esw->state_lock);
4513 mutex_unlock(&esw->state_lock);
4521 struct mlx5_eswitch *esw;
4526 esw = mlx5_devlink_eswitch_get(port->devlink);
4527 if (IS_ERR(esw))
4528 return PTR_ERR(esw);
4531 err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
4539 mutex_lock(&esw->state_lock);
4549 if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
4554 err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
4563 esw->enabled_ipsec_vf_count++;
4565 esw->enabled_ipsec_vf_count--;
4567 mutex_unlock(&esw->state_lock);