• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/dev/mlx5/mlx5_core/

Lines Matching defs:esw

218 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
246 esw_debug(esw->dev,
250 mlx5_add_flow_rule(esw->fdb_table.fdb,
266 static int esw_create_fdb_table(struct mlx5_eswitch *esw)
269 struct mlx5_core_dev *dev = esw->dev;
318 esw->fdb_table.addr_grp = g;
319 esw->fdb_table.fdb = fdb;
327 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
329 if (!esw->fdb_table.fdb)
332 esw_debug(esw->dev, "Destroy FDB Table\n");
333 mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
334 mlx5_destroy_flow_table(esw->fdb_table.fdb);
335 esw->fdb_table.fdb = NULL;
336 esw->fdb_table.addr_grp = NULL;
340 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
343 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
345 struct hlist_head *hash = esw->l2_table.l2_hash;
353 esw_warn(esw->dev,
364 err = mlx5_mpfs_add_mac(esw->dev, &esw_uc->table_index, mac, 0, 0);
368 if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
369 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
371 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
379 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
381 struct hlist_head *hash = esw->l2_table.l2_hash;
388 esw_debug(esw->dev,
393 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
396 mlx5_mpfs_del_mac(esw->dev, esw_uc->table_index);
406 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
408 struct hlist_head *hash = esw->mc_table;
413 if (!esw->fdb_table.fdb)
425 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
429 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
430 esw_debug(esw->dev,
437 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
439 struct hlist_head *hash = esw->mc_table;
444 if (!esw->fdb_table.fdb)
449 esw_warn(esw->dev,
454 esw_debug(esw->dev,
474 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
477 struct mlx5_vport *vport = &esw->vports[vport_num];
497 vport_addr_add(esw, addr);
501 vport_addr_del(esw, addr);
509 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
512 struct mlx5_vport *vport = &esw->vports[vport_num];
524 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
525 MLX5_MAX_MC_PER_VPORT(esw->dev);
538 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
542 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
561 esw_warn(esw->dev,
577 struct mlx5_eswitch *esw = dev->priv.eswitch;
585 esw_update_vport_addr_list(esw, vport->vport,
587 esw_apply_vport_addr_list(esw, vport->vport,
592 esw_update_vport_addr_list(esw, vport->vport,
594 esw_apply_vport_addr_list(esw, vport->vport,
598 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
604 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
610 struct mlx5_core_dev *dev = esw->dev;
681 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
694 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
700 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
702 esw_vport_cleanup_egress_rules(esw, vport);
711 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
715 struct mlx5_core_dev *dev = esw->dev;
772 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
780 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
786 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
788 esw_vport_cleanup_ingress_rules(esw, vport);
795 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
804 esw_warn(esw->dev,
810 esw_vport_cleanup_ingress_rules(esw, vport);
815 esw_debug(esw->dev,
823 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
851 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
860 esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n",
865 esw_vport_cleanup_egress_rules(esw, vport);
870 esw_debug(esw->dev,
878 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
927 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
930 struct mlx5_vport *vport = &esw->vports[vport_num];
936 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
939 esw_vport_enable_ingress_acl(esw, vport);
940 esw_vport_enable_egress_acl(esw, vport);
941 esw_vport_ingress_config(esw, vport);
942 esw_vport_egress_config(esw, vport);
945 mlx5_modify_vport_admin_state(esw->dev,
958 arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
960 esw->enabled_vports++;
961 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
965 static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
967 struct mlx5_vport *vport = &esw->vports[vport_num];
977 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_UC);
983 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_MC);
986 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
988 struct mlx5_vport *vport = &esw->vports[vport_num];
997 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1004 mlx5_modify_vport_admin_state(esw->dev,
1009 flush_workqueue(esw->work_queue);
1011 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1013 esw_cleanup_vport(esw, vport_num);
1015 esw_vport_disable_egress_acl(esw, vport);
1016 esw_vport_disable_ingress_acl(esw, vport);
1018 esw->enabled_vports--;
1023 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
1028 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1029 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1032 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1033 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1034 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1038 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1039 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1041 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1042 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1044 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
1046 esw_disable_vport(esw, 0);
1048 err = esw_create_fdb_table(esw);
1053 esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
1055 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1056 esw->enabled_vports);
1060 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1064 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1068 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1069 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1072 esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
1073 esw->enabled_vports);
1075 for (i = 0; i < esw->total_vports; i++)
1076 esw_disable_vport(esw, i);
1078 esw_destroy_fdb_table(esw);
1081 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1087 struct mlx5_eswitch *esw;
1101 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1102 if (!esw)
1105 esw->dev = dev;
1107 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1109 if (!esw->l2_table.bitmap) {
1113 esw->l2_table.size = l2_table_size;
1115 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1116 if (!esw->work_queue) {
1121 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1123 if (!esw->vports) {
1129 struct mlx5_vport *vport = &esw->vports[vport_num];
1139 esw->total_vports = total_vports;
1140 esw->enabled_vports = 0;
1142 dev->priv.eswitch = esw;
1143 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1147 if (esw->work_queue)
1148 destroy_workqueue(esw->work_queue);
1149 kfree(esw->l2_table.bitmap);
1150 kfree(esw->vports);
1151 kfree(esw);
1155 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1157 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1158 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1161 esw_info(esw->dev, "cleanup\n");
1162 esw_disable_vport(esw, 0);
1164 esw->dev->priv.eswitch = NULL;
1165 destroy_workqueue(esw->work_queue);
1166 kfree(esw->l2_table.bitmap);
1167 kfree(esw->vports);
1168 kfree(esw);
1171 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1177 if (!esw) {
1182 vport = &esw->vports[vport_num];
1185 queue_work(esw->work_queue, &vport->vport_change_handler);
1190 #define ESW_ALLOWED(esw) \
1191 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1192 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1206 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1212 if (!ESW_ALLOWED(esw))
1214 if (!LEGAL_VPORT(esw, vport))
1217 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1219 mlx5_core_warn(esw->dev,
1226 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1228 mlx5_core_warn(esw->dev,
1237 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1240 if (!ESW_ALLOWED(esw))
1242 if (!LEGAL_VPORT(esw, vport))
1245 return mlx5_modify_vport_admin_state(esw->dev,
1250 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1256 if (!ESW_ALLOWED(esw))
1258 if (!LEGAL_VPORT(esw, vport))
1264 mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
1265 ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
1268 query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
1276 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1283 if (!ESW_ALLOWED(esw))
1285 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1291 evport = &esw->vports[vport];
1293 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1301 esw_vport_ingress_config(esw, evport);
1302 esw_vport_egress_config(esw, evport);