Lines Matching defs:ldev

11 static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
18 for (i = 0; i < ldev->ports; i++) {
19 dev = ldev->pf[i].dev;
21 pf_metadata = ldev->lag_mpesw.pf_metadata[i];
28 ldev->lag_mpesw.pf_metadata[i] = 0;
32 static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
39 for (i = 0; i < ldev->ports; i++) {
40 dev = ldev->pf[i].dev;
48 ldev->lag_mpesw.pf_metadata[i] = pf_metadata;
55 for (i = 0; i < ldev->ports; i++) {
56 dev = ldev->pf[i].dev;
64 mlx5_mpesw_metadata_cleanup(ldev);
69 static int enable_mpesw(struct mlx5_lag *ldev)
71 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
75 if (ldev->mode != MLX5_LAG_MODE_NONE)
78 if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
84 !mlx5_lag_check_prereq(ldev))
87 err = mlx5_mpesw_metadata_set(ldev);
91 mlx5_lag_remove_devices(ldev);
93 err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, true);
101 for (i = 0; i < ldev->ports; i++) {
102 err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
112 mlx5_deactivate_lag(ldev);
114 mlx5_lag_add_devices(ldev);
115 for (i = 0; i < ldev->ports; i++)
116 mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
117 mlx5_mpesw_metadata_cleanup(ldev);
121 static void disable_mpesw(struct mlx5_lag *ldev)
123 if (ldev->mode == MLX5_LAG_MODE_MPESW) {
124 mlx5_mpesw_metadata_cleanup(ldev);
125 mlx5_disable_lag(ldev);
133 struct mlx5_lag *ldev = mpesww->lag;
135 devcom = mlx5_lag_get_devcom_comp(ldev);
140 mutex_lock(&ldev->lock);
141 if (ldev->mode_changes_in_progress) {
147 mpesww->result = enable_mpesw(ldev);
149 disable_mpesw(ldev);
151 mutex_unlock(&ldev->lock);
159 struct mlx5_lag *ldev = mlx5_lag_dev(dev);
163 if (!ldev)
173 work->lag = ldev;
175 if (!queue_work(ldev->wq, &work->work)) {
201 struct mlx5_lag *ldev = mlx5_lag_dev(mdev);
203 if (!netif_is_bond_master(out_dev) || !ldev)
206 if (ldev->mode != MLX5_LAG_MODE_MPESW)
215 struct mlx5_lag *ldev = mlx5_lag_dev(dev);
217 return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;