Lines Matching defs:flow

171  * last flow from a group and then deleting a group, we get into del_sw_flow_group()
178 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
179 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
180 static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow,
452 mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
603 struct mlx5e_tc_flow *flow);
605 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
607 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
609 return flow;
612 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
614 if (refcount_dec_and_test(&flow->refcnt)) {
615 mlx5e_tc_del_flow(priv, flow);
616 kfree_rcu(flow, rcu_head);
620 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
622 return flow_flag_test(flow, ESWITCH);
625 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
627 return flow_flag_test(flow, FT);
630 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
632 return flow_flag_test(flow, OFFLOADED);
635 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
637 return mlx5e_is_eswitch_flow(flow) ?
642 get_flow_counter_dev(struct mlx5e_tc_flow *flow)
644 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
648 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
653 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
659 struct mlx5e_tc_flow *flow,
664 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
665 mlx5e_get_flow_namespace(flow),
678 struct mlx5e_tc_flow *flow,
681 /* flow wasn't fully initialized */
685 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
707 * after dev_put(netdev), while we're in the context of adding a tc flow.
1092 struct mlx5e_tc_flow *flow,
1196 flow_flag_set(flow, HAIRPIN_RSS);
1197 flow->attr->nic_attr->hairpin_ft =
1200 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
1203 flow->hpe = hpe;
1205 list_add(&flow->hairpin, &hpe->flows);
1216 struct mlx5e_tc_flow *flow)
1218 /* flow wasn't fully initialized */
1219 if (!flow->hpe)
1222 spin_lock(&flow->hpe->flows_lock);
1223 list_del(&flow->hairpin);
1224 spin_unlock(&flow->hpe->flows_lock);
1226 mlx5e_hairpin_put(priv, flow->hpe);
1227 flow->hpe = NULL;
1361 struct mlx5e_tc_flow *flow,
1365 struct mlx5_flow_attr *attr = flow->attr;
1371 if (flow_flag_test(flow, HAIRPIN)) {
1372 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1384 err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
1389 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, attr);
1390 return PTR_ERR_OR_ZERO(flow->rule[0]);
1413 struct mlx5e_tc_flow *flow)
1416 struct mlx5_flow_attr *attr = flow->attr;
1418 flow_flag_clear(flow, OFFLOADED);
1420 if (!IS_ERR_OR_NULL(flow->rule[0]))
1421 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1436 mlx5e_tc_detach_mod_hdr(priv, flow, attr);
1442 if (flow_flag_test(flow, HAIRPIN))
1443 mlx5e_hairpin_flow_del(priv, flow);
1445 free_flow_post_acts(flow);
1446 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
1449 kfree(flow->attr);
1454 struct mlx5e_tc_flow *flow,
1463 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
1469 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1470 if (IS_ERR(flow->rule[1]))
1477 mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
1478 return flow->rule[1];
1482 struct mlx5e_tc_flow *flow,
1485 flow_flag_clear(flow, OFFLOADED);
1488 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1491 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1493 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
1498 struct mlx5e_tc_flow *flow,
1513 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1522 err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
1531 mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
1542 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1548 flow->attr->slow_mh = mh;
1549 flow->chain_mapping = chain_mapping;
1550 flow_flag_set(flow, SLOW);
1559 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
1571 struct mlx5e_tc_flow *flow)
1573 struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh;
1578 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1582 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1590 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1592 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh);
1593 mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
1594 flow->chain_mapping = 0;
1595 flow->attr->slow_mh = NULL;
1597 flow_flag_clear(flow, SLOW);
1604 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1607 flow_flag_set(flow, NOT_READY);
1608 list_add_tail(&flow->unready, unready_flows);
1614 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1616 list_del(&flow->unready);
1617 flow_flag_clear(flow, NOT_READY);
1620 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1626 esw = flow->priv->mdev->priv.eswitch;
1631 unready_flow_add(flow, &uplink_priv->unready_flows);
1635 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1641 esw = flow->priv->mdev->priv.eswitch;
1646 if (flow_flag_test(flow, NOT_READY))
1647 unready_flow_del(flow);
1743 post_process_attr(struct mlx5e_tc_flow *flow,
1754 if (mlx5e_is_eswitch_flow(flow) && has_encap_dests(attr)) {
1755 err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
1761 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
1768 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true);
1775 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false);
1781 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
1792 struct mlx5e_tc_flow *flow,
1797 struct mlx5_flow_attr *attr = flow->attr;
1811 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1826 if (flow_flag_test(flow, TUN_RX)) {
1827 err = mlx5e_attach_decap_route(priv, flow);
1853 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1854 err = mlx5e_attach_decap(priv, flow, extack);
1878 flow_flag_test(flow, EGRESS) ?
1889 err = post_process_attr(flow, attr, extack);
1893 err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow);
1901 if (flow_flag_test(flow, SLOW))
1902 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1904 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1906 if (IS_ERR(flow->rule[0])) {
1907 err = PTR_ERR(flow->rule[0]);
1910 flow_flag_set(flow, OFFLOADED);
1915 flow_flag_set(flow, FAILED);
1919 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1921 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1932 static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
1937 mlx5_free_flow_attr_actions(flow, attr);
1943 struct mlx5e_tc_flow *flow)
1946 struct mlx5_flow_attr *attr = flow->attr;
1948 mlx5e_put_flow_tunnel_id(flow);
1950 remove_unready_flow(flow);
1952 if (mlx5e_is_offloaded_flow(flow)) {
1953 if (flow_flag_test(flow, SLOW))
1954 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1956 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1958 complete_all(&flow->del_hw_done);
1960 if (mlx5_flow_has_geneve_opt(flow))
1963 if (flow->decap_route)
1964 mlx5e_detach_decap_route(priv, flow);
1966 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1968 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1969 mlx5e_detach_decap(priv, flow);
1971 mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
1973 free_flow_post_acts(flow);
1974 mlx5_free_flow_attr_actions(flow, attr);
1978 kfree(flow->attr);
1981 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1985 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
1992 struct mlx5e_tc_flow *flow, *tmp;
1994 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1995 mlx5e_flow_put(priv, flow);
1998 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
2001 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
2005 if (!flow_flag_test(flow, ESWITCH) ||
2006 !flow_flag_test(flow, DUP))
2010 list_del(&flow->peer[peer_index]);
2013 list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
2024 if (list_empty(&flow->peer_flows))
2025 flow_flag_clear(flow, DUP);
2028 static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
2033 if (i == mlx5_get_dev_index(flow->priv->mdev))
2035 mlx5e_tc_del_fdb_peer_flow(flow, i);
2040 struct mlx5e_tc_flow *flow)
2042 if (mlx5e_is_eswitch_flow(flow)) {
2043 struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
2046 mlx5e_tc_del_fdb_flow(priv, flow);
2050 mlx5e_tc_del_fdb_peers_flow(flow);
2052 mlx5e_tc_del_fdb_flow(priv, flow);
2054 mlx5e_tc_del_nic_flow(priv, flow);
2129 struct mlx5e_tc_flow *flow,
2139 struct mlx5_flow_attr *attr = flow->attr;
2211 flow->attr->tunnel_id = value;
2223 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2225 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
2226 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
2231 esw = flow->priv->mdev->priv.eswitch;
2397 struct mlx5e_tc_flow *flow,
2410 if (!mlx5e_is_eswitch_flow(flow)) {
2415 needs_mapping = !!flow->attr->chain;
2416 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2428 if (!flow->attr->chain) {
2443 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2444 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2465 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2474 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2573 struct mlx5e_tc_flow *flow,
2601 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2635 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
3031 struct mlx5e_tc_flow *flow,
3048 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
3053 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
3068 flow->attr->inner_match_level = inner_match_level;
3069 flow->attr->outer_match_level = outer_match_level;
3452 struct mlx5e_tc_flow *flow,
3455 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3473 struct mlx5e_tc_flow *flow,
3481 if (mlx5e_is_eswitch_flow(flow) &&
3482 !actions_match_supported_fdb(priv, flow, extack))
3509 struct mlx5e_tc_flow *flow,
3522 ns_type = mlx5e_get_flow_namespace(flow);
3586 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
3592 list_for_each_entry(attr, &flow->attrs, list) {
3604 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
3606 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3609 list_for_each_entry(attr, &flow->attrs, list) {
3610 if (list_is_last(&attr->list, &flow->attrs))
3618 free_flow_post_acts(struct mlx5e_tc_flow *flow)
3622 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
3623 if (list_is_last(&attr->list, &flow->attrs))
3626 mlx5_free_flow_attr_actions(flow, attr);
3635 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
3637 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3641 list_for_each_entry(attr, &flow->attrs, list) {
3642 if (list_is_last(&attr->list, &flow->attrs))
3674 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
3676 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3684 list_for_each_entry(attr, &flow->attrs, list) {
3691 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
3699 if (list_is_last(&attr->list, &flow->attrs))
3702 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
3706 err = post_process_attr(flow, attr, extack);
3719 err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr);
3727 if (flow_flag_test(flow, SLOW))
3730 err = mlx5e_tc_offload_flow_post_acts(flow);
3738 free_flow_post_acts(flow);
3757 alloc_branch_attr(struct mlx5e_tc_flow *flow,
3766 *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr,
3767 mlx5e_get_flow_namespace(flow));
3779 err = set_branch_dest_ft(flow->priv, attr);
3790 err = set_branch_dest_ft(flow->priv, attr);
3852 struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr,
3865 err = alloc_branch_attr(flow, &cond_true,
3873 err = alloc_branch_attr(flow, &cond_false,
3885 flow_flag_set(flow, USE_ACT_STATS);
3890 free_branch_attr(flow, attr->branch_true);
3900 struct mlx5e_tc_flow *flow = parse_state->flow;
3902 struct mlx5_flow_attr *attr = flow->attr;
3904 struct mlx5e_priv *priv = flow->priv;
3911 ns_type = mlx5e_get_flow_namespace(flow);
3912 list_add(&attr->list, &flow->attrs);
3937 err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
3955 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
3963 list_add(&attr->list, &flow->attrs);
3969 flow_flag_set(flow, USE_ACT_STATS);
3981 err = alloc_flow_post_acts(flow, extack);
3988 free_flow_post_acts(flow);
4014 struct mlx5e_tc_flow *flow,
4019 struct mlx5_flow_attr *attr = flow->attr;
4029 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4036 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4045 parse_attr, flow, extack))
4148 struct mlx5e_tc_flow *flow,
4153 struct mlx5_flow_attr *attr = flow->attr;
4166 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4189 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4194 parse_attr, flow, extack))
4239 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4241 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4242 struct mlx5_flow_attr *attr = flow->attr;
4244 flow_flag_test(flow, INGRESS);
4280 mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
4282 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
4289 mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
4291 mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr);
4298 mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
4301 if (mlx5e_is_eswitch_flow(flow)) {
4305 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
4309 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
4313 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
4315 free_branch_attr(flow, attr->branch_true);
4316 free_branch_attr(flow, attr->branch_false);
4327 struct mlx5e_tc_flow *flow;
4331 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4333 if (!parse_attr || !flow)
4336 flow->flags = flow_flags;
4337 flow->cookie = f->cookie;
4338 flow->priv = priv;
4340 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
4344 flow->attr = attr;
4347 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4348 INIT_LIST_HEAD(&flow->hairpin);
4349 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4350 INIT_LIST_HEAD(&flow->attrs);
4351 INIT_LIST_HEAD(&flow->peer_flows);
4352 refcount_set(&flow->refcnt, 1);
4353 init_completion(&flow->init_done);
4354 init_completion(&flow->del_hw_done);
4356 *__flow = flow;
4362 kfree(flow);
4411 struct mlx5e_tc_flow *flow;
4417 &parse_attr, &flow);
4422 mlx5e_flow_esw_attr_init(flow->attr,
4426 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4433 &flow->attr->ct_attr, extack);
4437 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4441 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4442 complete_all(&flow->init_done);
4447 add_unready_flow(flow);
4450 return flow;
4453 mlx5e_flow_put(priv, flow);
4459 struct mlx5e_tc_flow *flow,
4463 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4465 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4478 * original flow and packets redirected from uplink use the
4488 parse_attr = flow->attr->parse_attr;
4497 list_add_tail(&peer_flow->peer_flows, &flow->peer_flows);
4498 flow_flag_set(flow, DUP);
4500 list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]);
4519 struct mlx5e_tc_flow *flow;
4522 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4524 if (IS_ERR(flow))
4525 return PTR_ERR(flow);
4527 if (!is_peer_flow_needed(flow)) {
4528 *__flow = flow;
4538 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
4545 *__flow = flow;
4549 mlx5e_tc_del_fdb_peers_flow(flow);
4552 mlx5e_tc_del_fdb_flow(priv, flow);
4566 struct mlx5e_tc_flow *flow;
4579 &parse_attr, &flow);
4584 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4586 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4592 &flow->attr->ct_attr, extack);
4596 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4600 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4604 flow_flag_set(flow, OFFLOADED);
4605 *__flow = flow;
4610 flow_flag_set(flow, FAILED);
4612 mlx5e_flow_put(priv, flow);
4622 struct mlx5e_tc_flow **flow)
4635 filter_dev, flow);
4638 filter_dev, flow);
4646 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4699 struct mlx5e_tc_flow *flow;
4712 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4713 if (flow) {
4714 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4717 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4721 "flow cookie already exists, ignoring");
4723 "flow cookie %lx already exists, ignoring\n",
4730 if (flow)
4734 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4739 * set the flow's owner dev.
4742 flow->orig_dev = dev;
4744 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4752 mlx5e_flow_put(priv, flow);
4761 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4766 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4767 flow_flag_test(flow, EGRESS) == dir_egress;
4774 struct mlx5e_tc_flow *flow;
4778 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4779 if (!flow || !same_flow_direction(flow, flags)) {
4784 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4787 if (flow_flag_test_and_set(flow, DELETED)) {
4791 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4795 mlx5e_flow_put(priv, flow);
4817 struct mlx5e_tc_flow *flow;
4825 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4828 if (IS_ERR(flow))
4829 return PTR_ERR(flow);
4831 if (!same_flow_direction(flow, flags)) {
4836 if (mlx5e_is_offloaded_flow(flow)) {
4837 if (flow_flag_test(flow, USE_ACT_STATS)) {
4840 counter = mlx5e_tc_get_counter(flow);
4854 if (flow_flag_test(flow, DUP)) {
4857 list_for_each_entry(peer_flow, &flow->peer_flows, peer_flows) {
4864 if (flow_flag_test(flow, USE_ACT_STATS)) {
4889 mlx5e_flow_put(priv, flow);
5214 struct mlx5e_tc_flow *flow = ptr;
5215 struct mlx5e_priv *priv = flow->priv;
5217 mlx5e_tc_del_flow(priv, flow);
5218 kfree(flow);
5390 struct mlx5e_tc_flow *flow, *tmp;
5396 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
5397 mlx5e_tc_del_fdb_peers_flow(flow);
5406 struct mlx5e_tc_flow *flow, *tmp;
5409 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5410 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5411 unready_flow_del(flow);