Lines Matching defs:tracker

75 	mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
151 static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
158 if (!tracker->netdev_state[i].tx_enabled ||
159 !tracker->netdev_state[i].link_up)
164 void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
171 if (tracker->netdev_state[i].tx_enabled &&
172 tracker->netdev_state[i].link_up)
177 mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
182 struct lag_tracker *tracker,
195 mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
312 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
327 if (tracker->netdev_state[i].tx_enabled &&
328 tracker->netdev_state[i].link_up)
382 struct lag_tracker *tracker)
396 if (!ldev->tracker.has_inactive)
399 mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
449 struct lag_tracker *tracker)
458 mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
474 mlx5_lag_print_mapping(dev0, ldev, tracker,
480 if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
482 mlx5_lag_drop_rule_setup(ldev, tracker);
505 struct lag_tracker *tracker,
515 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
523 struct lag_tracker *tracker, bool shared_fdb,
540 mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
580 struct lag_tracker *tracker,
589 if (tracker)
590 mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
621 struct lag_tracker *tracker,
630 err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
635 mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
637 err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
648 err = mlx5_create_lag(ldev, tracker, mode, flags);
662 if (tracker && tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
664 mlx5_lag_drop_rule_setup(ldev, tracker);
876 struct lag_tracker tracker = { };
888 tracker = ldev->tracker;
890 do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
901 err = mlx5_activate_lag(ldev, &tracker,
939 mlx5_modify_lag(ldev, &tracker);
1002 struct lag_tracker *tracker,
1045 tracker->tx_type = lag_upper_info->tx_type;
1046 tracker->hash_type = lag_upper_info->hash_type;
1049 tracker->has_inactive = has_inactive;
1058 mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
1059 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
1062 if (tracker->is_bonded != is_bonded) {
1063 tracker->is_bonded = is_bonded;
1081 struct lag_tracker *tracker,
1102 tracker->netdev_state[idx] = *lag_lower_info;
1108 struct lag_tracker *tracker,
1131 if (tracker->has_inactive == has_inactive)
1134 tracker->has_inactive = has_inactive;
1144 struct lag_tracker tracker;
1155 tracker = ldev->tracker;
1159 changed = mlx5_handle_changeupper_event(ldev, &tracker, ptr);
1162 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
1166 changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
1170 ldev->tracker = tracker;
1190 ldev->tracker.netdev_state[fn].link_up = 0;
1191 ldev->tracker.netdev_state[fn].tx_enabled = 0;
1500 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1502 if (ldev->tracker.netdev_state[i].tx_enabled)