Lines Matching refs:ldev

68 static u8 lag_active_port_bits(struct mlx5_lag *ldev)
75 mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
181 struct mlx5_lag *ldev,
195 mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
206 for (i = 0; i < ldev->ports; i++) {
207 for (j = 0; j < ldev->buckets; j++) {
208 idx = i * ldev->buckets + j;
210 " port %d:%d", i + 1, ldev->v2p_map[idx]);
226 struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
228 if (ldev->nb.notifier_call)
229 unregister_netdevice_notifier_net(&init_net, &ldev->nb);
230 mlx5_lag_mp_cleanup(ldev);
231 cancel_delayed_work_sync(&ldev->bond_work);
232 destroy_workqueue(ldev->wq);
233 mutex_destroy(&ldev->lock);
234 kfree(ldev);
237 static void mlx5_ldev_put(struct mlx5_lag *ldev)
239 kref_put(&ldev->ref, mlx5_ldev_free);
242 static void mlx5_ldev_get(struct mlx5_lag *ldev)
244 kref_get(&ldev->ref);
249 struct mlx5_lag *ldev;
252 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
253 if (!ldev)
256 ldev->wq = create_singlethread_workqueue("mlx5_lag");
257 if (!ldev->wq) {
258 kfree(ldev);
262 kref_init(&ldev->ref);
263 mutex_init(&ldev->lock);
264 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
266 ldev->nb.notifier_call = mlx5_lag_netdev_event;
267 if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
268 ldev->nb.notifier_call = NULL;
271 ldev->mode = MLX5_LAG_MODE_NONE;
273 err = mlx5_lag_mp_init(ldev);
278 ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports);
279 ldev->buckets = 1;
281 return ldev;
284 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
289 for (i = 0; i < ldev->ports; i++)
290 if (ldev->pf[i].netdev == ndev)
296 static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
298 return ldev->mode == MLX5_LAG_MODE_ROCE;
301 static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
303 return ldev->mode == MLX5_LAG_MODE_SRIOV;
307 * As we have ldev->buckets slots per port first assume the native
357 static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
361 for (i = 0; i < ldev->ports; i++)
362 if (ldev->pf[i].has_drop)
367 static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
371 for (i = 0; i < ldev->ports; i++) {
372 if (!ldev->pf[i].has_drop)
375 mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
377 ldev->pf[i].has_drop = false;
381 static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
394 mlx5_lag_drop_rule_cleanup(ldev);
396 if (!ldev->tracker.has_inactive)
399 mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
403 dev = ldev->pf[disabled_index].dev;
407 ldev->pf[disabled_index].has_drop = true;
429 static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
431 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
435 if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
436 ret = mlx5_lag_port_sel_modify(ldev, ports);
441 active_ports = lag_active_port_bits(ldev);
445 return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
448 void mlx5_modify_lag(struct mlx5_lag *ldev,
452 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
458 mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
460 for (i = 0; i < ldev->ports; i++) {
461 for (j = 0; j < ldev->buckets; j++) {
462 idx = i * ldev->buckets + j;
463 if (ports[idx] == ldev->v2p_map[idx])
465 err = _mlx5_modify_lag(ldev, ports);
472 memcpy(ldev->v2p_map, ports, sizeof(ports));
474 mlx5_lag_print_mapping(dev0, ldev, tracker,
475 ldev->mode_flags);
481 !(ldev->mode == MLX5_LAG_MODE_ROCE))
482 mlx5_lag_drop_rule_setup(ldev, tracker);
485 static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
488 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
491 if (ldev->ports > 2)
496 if (ldev->ports > 2)
497 ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
504 static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
509 struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
516 if (ldev->ports > 2)
517 ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
522 static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
538 return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
540 mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
556 static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
558 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
563 for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
564 struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
567 slave_esw, ldev->ports);
575 ldev->pf[i].dev->priv.eswitch);
579 static int mlx5_create_lag(struct mlx5_lag *ldev,
585 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
590 mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
594 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
603 err = mlx5_lag_create_single_fdb(ldev);
620 int mlx5_activate_lag(struct mlx5_lag *ldev,
626 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
630 err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
635 mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
637 err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
638 ldev->v2p_map);
648 err = mlx5_create_lag(ldev, tracker, mode, flags);
651 mlx5_lag_port_sel_destroy(ldev);
664 mlx5_lag_drop_rule_setup(ldev, tracker);
666 ldev->mode = mode;
667 ldev->mode_flags = flags;
671 int mlx5_deactivate_lag(struct mlx5_lag *ldev)
673 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
676 bool roce_lag = __mlx5_lag_is_roce(ldev);
677 unsigned long flags = ldev->mode_flags;
681 ldev->mode = MLX5_LAG_MODE_NONE;
682 ldev->mode_flags = 0;
683 mlx5_lag_mp_reset(ldev);
686 for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
688 ldev->pf[i].dev->priv.eswitch);
707 mlx5_lag_port_sel_destroy(ldev);
708 ldev->buckets = 1;
710 if (mlx5_lag_has_drop_rule(ldev))
711 mlx5_lag_drop_rule_cleanup(ldev);
716 bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
724 for (i = 0; i < ldev->ports; i++)
725 if (!ldev->pf[i].dev)
729 for (i = 0; i < ldev->ports; i++) {
730 dev = ldev->pf[i].dev;
735 dev = ldev->pf[MLX5_LAG_P1].dev;
737 for (i = 0; i < ldev->ports; i++)
738 if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
742 for (i = 0; i < ldev->ports; i++)
743 if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
749 void mlx5_lag_add_devices(struct mlx5_lag *ldev)
753 for (i = 0; i < ldev->ports; i++) {
754 if (!ldev->pf[i].dev)
757 if (ldev->pf[i].dev->priv.flags &
761 ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
762 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
766 void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
770 for (i = 0; i < ldev->ports; i++) {
771 if (!ldev->pf[i].dev)
774 if (ldev->pf[i].dev->priv.flags &
778 ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
779 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
783 void mlx5_disable_lag(struct mlx5_lag *ldev)
785 bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
786 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
791 roce_lag = __mlx5_lag_is_roce(ldev);
794 mlx5_lag_remove_devices(ldev);
800 for (i = 1; i < ldev->ports; i++)
801 mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
804 err = mlx5_deactivate_lag(ldev);
809 mlx5_lag_add_devices(ldev);
812 for (i = 0; i < ldev->ports; i++)
813 if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
814 mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
817 static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
822 for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
823 dev = ldev->pf[i].dev;
834 dev = ldev->pf[MLX5_LAG_P1].dev;
845 static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
850 for (i = 0; i < ldev->ports; i++)
851 roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
854 for (i = 0; i < ldev->ports; i++)
855 roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
861 static bool mlx5_lag_should_modify_lag(struct mlx5_lag *ldev, bool do_bond)
863 return do_bond && __mlx5_lag_is_active(ldev) &&
864 ldev->mode != MLX5_LAG_MODE_MPESW;
867 static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
869 return !do_bond && __mlx5_lag_is_active(ldev) &&
870 ldev->mode != MLX5_LAG_MODE_MPESW;
873 static void mlx5_do_bond(struct mlx5_lag *ldev)
875 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
881 if (!mlx5_lag_is_ready(ldev)) {
888 tracker = ldev->tracker;
890 do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
893 if (do_bond && !__mlx5_lag_is_active(ldev)) {
894 bool shared_fdb = mlx5_shared_fdb_supported(ldev);
896 roce_lag = mlx5_lag_is_roce_lag(ldev);
899 mlx5_lag_remove_devices(ldev);
901 err = mlx5_activate_lag(ldev, &tracker,
907 mlx5_lag_add_devices(ldev);
913 for (i = 1; i < ldev->ports; i++)
914 mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
921 for (i = 0; i < ldev->ports; i++) {
922 err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
930 mlx5_deactivate_lag(ldev);
931 mlx5_lag_add_devices(ldev);
932 for (i = 0; i < ldev->ports; i++)
933 mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
938 } else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
939 mlx5_modify_lag(ldev, &tracker);
940 } else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
941 mlx5_disable_lag(ldev);
949 struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
954 mutex_lock(&ldev->lock);
955 for (i = 0; i < ldev->ports; i++) {
956 if (ldev->pf[i].dev) {
957 devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
961 mutex_unlock(&ldev->lock);
965 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
967 queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
973 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
978 devcom = mlx5_lag_get_devcom_comp(ldev);
984 mlx5_queue_bond_work(ldev, HZ);
988 mutex_lock(&ldev->lock);
989 if (ldev->mode_changes_in_progress) {
990 mutex_unlock(&ldev->lock);
992 mlx5_queue_bond_work(ldev, HZ);
996 mlx5_do_bond(ldev);
997 mutex_unlock(&ldev->lock);
1001 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
1028 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
1041 if (!(bond_status & GENMASK(ldev->ports - 1, 0)))
1054 is_in_lag = num_slaves == ldev->ports &&
1055 bond_status == GENMASK(ldev->ports - 1, 0);
1070 if (!mlx5_lag_is_ready(ldev))
1080 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
1091 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
1107 static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
1121 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
1145 struct mlx5_lag *ldev;
1153 ldev = container_of(this, struct mlx5_lag, nb);
1155 tracker = ldev->tracker;
1159 changed = mlx5_handle_changeupper_event(ldev, &tracker, ptr);
1162 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
1166 changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
1170 ldev->tracker = tracker;
1173 mlx5_queue_bond_work(ldev, 0);
1178 static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
1185 if (fn >= ldev->ports)
1189 ldev->pf[fn].netdev = netdev;
1190 ldev->tracker.netdev_state[fn].link_up = 0;
1191 ldev->tracker.netdev_state[fn].tx_enabled = 0;
1195 static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
1202 for (i = 0; i < ldev->ports; i++) {
1203 if (ldev->pf[i].netdev == netdev) {
1204 ldev->pf[i].netdev = NULL;
1211 static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
1216 if (fn >= ldev->ports)
1219 ldev->pf[fn].dev = dev;
1220 dev->priv.lag = ldev;
1223 static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
1228 for (i = 0; i < ldev->ports; i++)
1229 if (ldev->pf[i].dev == dev)
1232 if (i == ldev->ports)
1235 ldev->pf[i].dev = NULL;
1243 struct mlx5_lag *ldev = NULL;
1248 ldev = mlx5_lag_dev(tmp_dev);
1250 if (!ldev) {
1251 ldev = mlx5_lag_dev_alloc(dev);
1252 if (!ldev) {
1256 mlx5_ldev_add_mdev(ldev, dev);
1260 mutex_lock(&ldev->lock);
1261 if (ldev->mode_changes_in_progress) {
1262 mutex_unlock(&ldev->lock);
1265 mlx5_ldev_get(ldev);
1266 mlx5_ldev_add_mdev(ldev, dev);
1267 mutex_unlock(&ldev->lock);
1274 struct mlx5_lag *ldev;
1276 ldev = mlx5_lag_dev(dev);
1277 if (!ldev)
1285 mutex_lock(&ldev->lock);
1286 if (ldev->mode_changes_in_progress) {
1287 mutex_unlock(&ldev->lock);
1291 mlx5_ldev_remove_mdev(ldev, dev);
1292 mutex_unlock(&ldev->lock);
1293 mlx5_ldev_put(ldev);
1321 struct mlx5_lag *ldev;
1324 ldev = mlx5_lag_dev(dev);
1325 if (!ldev)
1328 mutex_lock(&ldev->lock);
1329 mlx5_ldev_remove_netdev(ldev, netdev);
1330 clear_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
1332 lag_is_active = __mlx5_lag_is_active(ldev);
1333 mutex_unlock(&ldev->lock);
1336 mlx5_queue_bond_work(ldev, 0);
1342 struct mlx5_lag *ldev;
1345 ldev = mlx5_lag_dev(dev);
1346 if (!ldev)
1349 mutex_lock(&ldev->lock);
1350 mlx5_ldev_add_netdev(ldev, dev, netdev);
1352 for (i = 0; i < ldev->ports; i++)
1353 if (!ldev->pf[i].netdev)
1356 if (i >= ldev->ports)
1357 set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
1358 mutex_unlock(&ldev->lock);
1359 mlx5_queue_bond_work(ldev, 0);
1364 struct mlx5_lag *ldev;
1369 ldev = mlx5_lag_dev(dev);
1370 res = ldev && __mlx5_lag_is_roce(ldev);
1379 struct mlx5_lag *ldev;
1384 ldev = mlx5_lag_dev(dev);
1385 res = ldev && __mlx5_lag_is_active(ldev);
1394 struct mlx5_lag *ldev;
1399 ldev = mlx5_lag_dev(dev);
1400 if (ldev)
1401 res = test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags);
1410 struct mlx5_lag *ldev;
1415 ldev = mlx5_lag_dev(dev);
1416 res = ldev && __mlx5_lag_is_active(ldev) &&
1417 dev == ldev->pf[MLX5_LAG_P1].dev;
1426 struct mlx5_lag *ldev;
1431 ldev = mlx5_lag_dev(dev);
1432 res = ldev && __mlx5_lag_is_sriov(ldev);
1441 struct mlx5_lag *ldev;
1446 ldev = mlx5_lag_dev(dev);
1447 res = ldev && test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
1456 struct mlx5_lag *ldev;
1458 ldev = mlx5_lag_dev(dev);
1459 if (!ldev)
1463 mutex_lock(&ldev->lock);
1465 ldev->mode_changes_in_progress++;
1466 if (__mlx5_lag_is_active(ldev))
1467 mlx5_disable_lag(ldev);
1469 mutex_unlock(&ldev->lock);
1475 struct mlx5_lag *ldev;
1477 ldev = mlx5_lag_dev(dev);
1478 if (!ldev)
1481 mutex_lock(&ldev->lock);
1482 ldev->mode_changes_in_progress--;
1483 mutex_unlock(&ldev->lock);
1484 mlx5_queue_bond_work(ldev, 0);
1490 struct mlx5_lag *ldev;
1495 ldev = mlx5_lag_dev(dev);
1497 if (!(ldev && __mlx5_lag_is_roce(ldev)))
1500 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1501 for (i = 0; i < ldev->ports; i++)
1502 if (ldev->tracker.netdev_state[i].tx_enabled)
1503 ndev = ldev->pf[i].netdev;
1505 ndev = ldev->pf[ldev->ports - 1].netdev;
1507 ndev = ldev->pf[MLX5_LAG_P1].netdev;
1522 struct mlx5_lag *ldev;
1528 ldev = mlx5_lag_dev(dev);
1529 if (!(ldev && __mlx5_lag_is_roce(ldev)))
1532 for (i = 0; i < ldev->ports; i++) {
1533 if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
1539 port = ldev->v2p_map[port * ldev->buckets];
1549 struct mlx5_lag *ldev;
1551 ldev = mlx5_lag_dev(dev);
1552 if (!ldev)
1555 return ldev->ports;
1562 struct mlx5_lag *ldev;
1567 ldev = mlx5_lag_dev(dev);
1568 if (!ldev)
1571 if (*i == ldev->ports)
1573 for (idx = *i; idx < ldev->ports; idx++)
1574 if (ldev->pf[idx].dev != dev)
1577 if (idx == ldev->ports) {
1583 peer_dev = ldev->pf[idx].dev;
1598 struct mlx5_lag *ldev;
1617 ldev = mlx5_lag_dev(dev);
1618 if (ldev && __mlx5_lag_is_active(ldev)) {
1619 num_ports = ldev->ports;
1620 for (i = 0; i < ldev->ports; i++)
1621 mdev[i] = ldev->pf[i].dev;