1/* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <net/flow_dissector.h> 34#include <net/flow_offload.h> 35#include <net/sch_generic.h> 36#include <net/pkt_cls.h> 37#include <linux/mlx5/fs.h> 38#include <linux/mlx5/device.h> 39#include <linux/rhashtable.h> 40#include <linux/refcount.h> 41#include <linux/completion.h> 42#include <net/arp.h> 43#include <net/ipv6_stubs.h> 44#include <net/bareudp.h> 45#include <net/bonding.h> 46#include <net/dst_metadata.h> 47#include "devlink.h" 48#include "en.h" 49#include "en/tc/post_act.h" 50#include "en/tc/act_stats.h" 51#include "en_rep.h" 52#include "en/rep/tc.h" 53#include "en/rep/neigh.h" 54#include "en_tc.h" 55#include "eswitch.h" 56#include "fs_core.h" 57#include "en/port.h" 58#include "en/tc_tun.h" 59#include "en/mapping.h" 60#include "en/tc_ct.h" 61#include "en/mod_hdr.h" 62#include "en/tc_tun_encap.h" 63#include "en/tc/sample.h" 64#include "en/tc/act/act.h" 65#include "en/tc/post_meter.h" 66#include "lib/devcom.h" 67#include "lib/geneve.h" 68#include "lib/fs_chains.h" 69#include "diag/en_tc_tracepoint.h" 70#include <asm/div64.h> 71#include "lag/lag.h" 72#include "lag/mp.h" 73 74#define MLX5E_TC_TABLE_NUM_GROUPS 4 75#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) 76 77struct mlx5e_tc_table { 78 /* Protects the dynamic assignment of the t parameter 79 * which is the nic tc root table. 80 */ 81 struct mutex t_lock; 82 struct mlx5e_priv *priv; 83 struct mlx5_flow_table *t; 84 struct mlx5_flow_table *miss_t; 85 struct mlx5_fs_chains *chains; 86 struct mlx5e_post_act *post_act; 87 88 struct rhashtable ht; 89 90 struct mod_hdr_tbl mod_hdr; 91 struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */ 92 DECLARE_HASHTABLE(hairpin_tbl, 8); 93 94 struct notifier_block netdevice_nb; 95 struct netdev_net_notifier netdevice_nn; 96 97 struct mlx5_tc_ct_priv *ct; 98 struct mapping_ctx *mapping; 99 struct dentry *dfs_root; 100 101 /* tc action stats */ 102 struct mlx5e_tc_act_stats_handle *action_stats_handle; 103}; 104 105struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { 106 [MAPPED_OBJ_TO_REG] = { 107 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 108 .moffset = 0, 109 .mlen = 16, 110 }, 111 [VPORT_TO_REG] = { 112 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, 113 .moffset = 16, 114 .mlen = 16, 115 }, 116 [TUNNEL_TO_REG] = { 117 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, 118 .moffset = 8, 119 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS, 120 .soffset = MLX5_BYTE_OFF(fte_match_param, 121 misc_parameters_2.metadata_reg_c_1), 122 }, 123 [ZONE_TO_REG] = zone_to_reg_ct, 124 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct, 125 [CTSTATE_TO_REG] = ctstate_to_reg_ct, 126 [MARK_TO_REG] = mark_to_reg_ct, 127 [LABELS_TO_REG] = labels_to_reg_ct, 128 [FTEID_TO_REG] = fteid_to_reg_ct, 129 /* For NIC rules we store the restore metadata directly 130 * into reg_b that is passed to SW since we don't 131 * jump between steering domains. 132 */ 133 [NIC_MAPPED_OBJ_TO_REG] = { 134 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B, 135 .moffset = 0, 136 .mlen = 16, 137 }, 138 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct, 139 [PACKET_COLOR_TO_REG] = packet_color_to_reg, 140}; 141 142struct mlx5e_tc_jump_state { 143 u32 jump_count; 144 bool jump_target; 145 struct mlx5_flow_attr *jumping_attr; 146 147 enum flow_action_id last_id; 148 u32 last_index; 149}; 150 151struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) 152{ 153 struct mlx5e_tc_table *tc; 154 155 tc = kvzalloc(sizeof(*tc), GFP_KERNEL); 156 return tc ? tc : ERR_PTR(-ENOMEM); 157} 158 159void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) 160{ 161 kvfree(tc); 162} 163 164struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc) 165{ 166 return tc->chains; 167} 168 169/* To avoid false lock dependency warning set the tc_ht lock 170 * class different than the lock class of the ht being used when deleting 171 * last flow from a group and then deleting a group, we get into del_sw_flow_group() 172 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but 173 * it's different than the ht->mutex here. 174 */ 175static struct lock_class_key tc_ht_lock_key; 176static struct lock_class_key tc_ht_wq_key; 177 178static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); 179static void free_flow_post_acts(struct mlx5e_tc_flow *flow); 180static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, 181 struct mlx5_flow_attr *attr); 182 183void 184mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, 185 enum mlx5e_tc_attr_to_reg type, 186 u32 val, 187 u32 mask) 188{ 189 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; 190 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 191 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 192 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 193 u32 max_mask = GENMASK(match_len - 1, 0); 194 __be32 curr_mask_be, curr_val_be; 195 u32 curr_mask, curr_val; 196 197 fmask = headers_c + soffset; 198 fval = headers_v + soffset; 199 200 memcpy(&curr_mask_be, fmask, 4); 201 memcpy(&curr_val_be, fval, 4); 202 203 curr_mask = be32_to_cpu(curr_mask_be); 204 curr_val = be32_to_cpu(curr_val_be); 205 206 //move to correct offset 207 WARN_ON(mask > max_mask); 208 mask <<= moffset; 209 val <<= moffset; 210 max_mask <<= moffset; 211 212 //zero val and mask 213 curr_mask &= ~max_mask; 214 curr_val &= ~max_mask; 215 216 //add current to mask 217 curr_mask |= mask; 218 curr_val |= val; 219 220 //back to be32 and write 221 curr_mask_be = cpu_to_be32(curr_mask); 222 curr_val_be = cpu_to_be32(curr_val); 223 224 memcpy(fmask, &curr_mask_be, 4); 225 memcpy(fval, &curr_val_be, 4); 226 227 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 228} 229 230void 231mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, 232 enum mlx5e_tc_attr_to_reg type, 233 u32 *val, 234 u32 *mask) 235{ 236 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; 237 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; 238 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 239 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; 240 u32 max_mask = GENMASK(match_len - 1, 0); 241 __be32 curr_mask_be, curr_val_be; 242 u32 curr_mask, curr_val; 243 244 fmask = headers_c + soffset; 245 fval = headers_v + soffset; 246 247 memcpy(&curr_mask_be, fmask, 4); 248 memcpy(&curr_val_be, fval, 4); 249 250 curr_mask = be32_to_cpu(curr_mask_be); 251 curr_val = be32_to_cpu(curr_val_be); 252 253 *mask = (curr_mask >> moffset) & max_mask; 254 *val = (curr_val >> moffset) & max_mask; 255} 256 257int 258mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, 259 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 260 enum mlx5_flow_namespace_type ns, 261 enum mlx5e_tc_attr_to_reg type, 262 u32 data) 263{ 264 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 265 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 266 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 267 char *modact; 268 int err; 269 270 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts); 271 if (IS_ERR(modact)) 272 return PTR_ERR(modact); 273 274 /* Firmware has 5bit length field and 0 means 32bits */ 275 if (mlen == 32) 276 mlen = 0; 277 278 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 279 MLX5_SET(set_action_in, modact, field, mfield); 280 MLX5_SET(set_action_in, modact, offset, moffset); 281 MLX5_SET(set_action_in, modact, length, mlen); 282 MLX5_SET(set_action_in, modact, data, data); 283 err = mod_hdr_acts->num_actions; 284 mod_hdr_acts->num_actions++; 285 286 return err; 287} 288 289static struct mlx5e_tc_act_stats_handle * 290get_act_stats_handle(struct mlx5e_priv *priv) 291{ 292 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 293 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 294 struct mlx5_rep_uplink_priv *uplink_priv; 295 struct mlx5e_rep_priv *uplink_rpriv; 296 297 if (is_mdev_switchdev_mode(priv->mdev)) { 298 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 299 uplink_priv = &uplink_rpriv->uplink_priv; 300 301 return uplink_priv->action_stats_handle; 302 } 303 304 return tc->action_stats_handle; 305} 306 307struct mlx5e_tc_int_port_priv * 308mlx5e_get_int_port_priv(struct mlx5e_priv *priv) 309{ 310 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 311 struct mlx5_rep_uplink_priv *uplink_priv; 312 struct mlx5e_rep_priv *uplink_rpriv; 313 314 if (is_mdev_switchdev_mode(priv->mdev)) { 315 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 316 uplink_priv = &uplink_rpriv->uplink_priv; 317 318 return uplink_priv->int_port_priv; 319 } 320 321 return NULL; 322} 323 324struct mlx5e_flow_meters * 325mlx5e_get_flow_meters(struct mlx5_core_dev *dev) 326{ 327 struct mlx5_eswitch *esw = dev->priv.eswitch; 328 struct mlx5_rep_uplink_priv *uplink_priv; 329 struct mlx5e_rep_priv *uplink_rpriv; 330 struct mlx5e_priv *priv; 331 332 if (is_mdev_switchdev_mode(dev)) { 333 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 334 uplink_priv = &uplink_rpriv->uplink_priv; 335 priv = netdev_priv(uplink_rpriv->netdev); 336 if (!uplink_priv->flow_meters) 337 uplink_priv->flow_meters = 338 mlx5e_flow_meters_init(priv, 339 MLX5_FLOW_NAMESPACE_FDB, 340 uplink_priv->post_act); 341 if (!IS_ERR(uplink_priv->flow_meters)) 342 return uplink_priv->flow_meters; 343 } 344 345 return NULL; 346} 347 348static struct mlx5_tc_ct_priv * 349get_ct_priv(struct mlx5e_priv *priv) 350{ 351 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 352 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 353 struct mlx5_rep_uplink_priv *uplink_priv; 354 struct mlx5e_rep_priv *uplink_rpriv; 355 356 if (is_mdev_switchdev_mode(priv->mdev)) { 357 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 358 uplink_priv = &uplink_rpriv->uplink_priv; 359 360 return uplink_priv->ct_priv; 361 } 362 363 return tc->ct; 364} 365 366static struct mlx5e_tc_psample * 367get_sample_priv(struct mlx5e_priv *priv) 368{ 369 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 370 struct mlx5_rep_uplink_priv *uplink_priv; 371 struct mlx5e_rep_priv *uplink_rpriv; 372 373 if (is_mdev_switchdev_mode(priv->mdev)) { 374 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 375 uplink_priv = &uplink_rpriv->uplink_priv; 376 377 return uplink_priv->tc_psample; 378 } 379 380 return NULL; 381} 382 383static struct mlx5e_post_act * 384get_post_action(struct mlx5e_priv *priv) 385{ 386 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 387 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 388 struct mlx5_rep_uplink_priv *uplink_priv; 389 struct mlx5e_rep_priv *uplink_rpriv; 390 391 if (is_mdev_switchdev_mode(priv->mdev)) { 392 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 393 uplink_priv = &uplink_rpriv->uplink_priv; 394 395 return uplink_priv->post_act; 396 } 397 398 return tc->post_act; 399} 400 401struct mlx5_flow_handle * 402mlx5_tc_rule_insert(struct mlx5e_priv *priv, 403 struct mlx5_flow_spec *spec, 404 struct mlx5_flow_attr *attr) 405{ 406 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 407 408 if (is_mdev_switchdev_mode(priv->mdev)) 409 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 410 411 return mlx5e_add_offloaded_nic_rule(priv, spec, attr); 412} 413 414void 415mlx5_tc_rule_delete(struct mlx5e_priv *priv, 416 struct mlx5_flow_handle *rule, 417 struct mlx5_flow_attr *attr) 418{ 419 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 420 421 if (is_mdev_switchdev_mode(priv->mdev)) { 422 mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 423 return; 424 } 425 426 mlx5e_del_offloaded_nic_rule(priv, rule, attr); 427} 428 429static bool 430is_flow_meter_action(struct mlx5_flow_attr *attr) 431{ 432 return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) && 433 (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) || 434 attr->flags & MLX5_ATTR_FLAG_MTU); 435} 436 437static int 438mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv, 439 struct mlx5_flow_attr *attr) 440{ 441 struct mlx5e_post_act *post_act = get_post_action(priv); 442 struct mlx5e_post_meter_priv *post_meter; 443 enum mlx5_flow_namespace_type ns_type; 444 struct mlx5e_flow_meter_handle *meter; 445 enum mlx5e_post_meter_type type; 446 447 if (IS_ERR(post_act)) 448 return PTR_ERR(post_act); 449 450 meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params); 451 if (IS_ERR(meter)) { 452 mlx5_core_err(priv->mdev, "Failed to get flow meter\n"); 453 return PTR_ERR(meter); 454 } 455 456 ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters); 457 type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE; 458 post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, 459 type, 460 meter->act_counter, meter->drop_counter, 461 attr->branch_true, attr->branch_false); 462 if (IS_ERR(post_meter)) { 463 mlx5_core_err(priv->mdev, "Failed to init post meter\n"); 464 goto err_meter_init; 465 } 466 467 attr->meter_attr.meter = meter; 468 attr->meter_attr.post_meter = post_meter; 469 attr->dest_ft = mlx5e_post_meter_get_ft(post_meter); 470 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 471 472 return 0; 473 474err_meter_init: 475 mlx5e_tc_meter_put(meter); 476 return PTR_ERR(post_meter); 477} 478 479static void 480mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 481{ 482 mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter); 483 mlx5e_tc_meter_put(attr->meter_attr.meter); 484} 485 486struct mlx5_flow_handle * 487mlx5e_tc_rule_offload(struct mlx5e_priv *priv, 488 struct mlx5_flow_spec *spec, 489 struct mlx5_flow_attr *attr) 490{ 491 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 492 int err; 493 494 if (!is_mdev_switchdev_mode(priv->mdev)) 495 return mlx5e_add_offloaded_nic_rule(priv, spec, attr); 496 497 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) 498 return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr); 499 500 if (is_flow_meter_action(attr)) { 501 err = mlx5e_tc_add_flow_meter(priv, attr); 502 if (err) 503 return ERR_PTR(err); 504 } 505 506 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 507} 508 509void 510mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv, 511 struct mlx5_flow_handle *rule, 512 struct mlx5_flow_attr *attr) 513{ 514 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 515 516 if (!is_mdev_switchdev_mode(priv->mdev)) { 517 mlx5e_del_offloaded_nic_rule(priv, rule, attr); 518 return; 519 } 520 521 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) { 522 mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr); 523 return; 524 } 525 526 mlx5_eswitch_del_offloaded_rule(esw, rule, attr); 527 528 if (attr->meter_attr.meter) 529 mlx5e_tc_del_flow_meter(esw, attr); 530} 531 532int 533mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, 534 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 535 enum mlx5_flow_namespace_type ns, 536 enum mlx5e_tc_attr_to_reg type, 537 u32 data) 538{ 539 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data); 540 541 return ret < 0 ? ret : 0; 542} 543 544void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, 545 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, 546 enum mlx5e_tc_attr_to_reg type, 547 int act_id, u32 data) 548{ 549 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; 550 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; 551 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; 552 char *modact; 553 554 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id); 555 556 /* Firmware has 5bit length field and 0 means 32bits */ 557 if (mlen == 32) 558 mlen = 0; 559 560 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); 561 MLX5_SET(set_action_in, modact, field, mfield); 562 MLX5_SET(set_action_in, modact, offset, moffset); 563 MLX5_SET(set_action_in, modact, length, mlen); 564 MLX5_SET(set_action_in, modact, data, data); 565} 566 567struct mlx5e_hairpin { 568 struct mlx5_hairpin *pair; 569 570 struct mlx5_core_dev *func_mdev; 571 struct mlx5e_priv *func_priv; 572 u32 tdn; 573 struct mlx5e_tir direct_tir; 574 575 int num_channels; 576 u8 log_num_packets; 577 struct mlx5e_rqt indir_rqt; 578 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; 579 struct mlx5_ttc_table *ttc; 580}; 581 582struct mlx5e_hairpin_entry { 583 /* a node of a hash table which keeps all the hairpin entries */ 584 struct hlist_node hairpin_hlist; 585 586 /* protects flows list */ 587 spinlock_t flows_lock; 588 /* flows sharing the same hairpin */ 589 struct list_head flows; 590 /* hpe's that were not fully initialized when dead peer update event 591 * function traversed them. 592 */ 593 struct list_head dead_peer_wait_list; 594 595 u16 peer_vhca_id; 596 u8 prio; 597 struct mlx5e_hairpin *hp; 598 refcount_t refcnt; 599 struct completion res_ready; 600}; 601 602static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 603 struct mlx5e_tc_flow *flow); 604 605struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) 606{ 607 if (!flow || !refcount_inc_not_zero(&flow->refcnt)) 608 return ERR_PTR(-EINVAL); 609 return flow; 610} 611 612void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) 613{ 614 if (refcount_dec_and_test(&flow->refcnt)) { 615 mlx5e_tc_del_flow(priv, flow); 616 kfree_rcu(flow, rcu_head); 617 } 618} 619 620bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) 621{ 622 return flow_flag_test(flow, ESWITCH); 623} 624 625bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) 626{ 627 return flow_flag_test(flow, FT); 628} 629 630bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) 631{ 632 return flow_flag_test(flow, OFFLOADED); 633} 634 635int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow) 636{ 637 return mlx5e_is_eswitch_flow(flow) ? 638 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; 639} 640 641static struct mlx5_core_dev * 642get_flow_counter_dev(struct mlx5e_tc_flow *flow) 643{ 644 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev; 645} 646 647static struct mod_hdr_tbl * 648get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) 649{ 650 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 651 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 652 653 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ? 654 &esw->offloads.mod_hdr : 655 &tc->mod_hdr; 656} 657 658int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv, 659 struct mlx5e_tc_flow *flow, 660 struct mlx5_flow_attr *attr) 661{ 662 struct mlx5e_mod_hdr_handle *mh; 663 664 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), 665 mlx5e_get_flow_namespace(flow), 666 &attr->parse_attr->mod_hdr_acts); 667 if (IS_ERR(mh)) 668 return PTR_ERR(mh); 669 670 WARN_ON(attr->modify_hdr); 671 attr->modify_hdr = mlx5e_mod_hdr_get(mh); 672 attr->mh = mh; 673 674 return 0; 675} 676 677void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv, 678 struct mlx5e_tc_flow *flow, 679 struct mlx5_flow_attr *attr) 680{ 681 /* flow wasn't fully initialized */ 682 if (!attr->mh) 683 return; 684 685 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow), 686 attr->mh); 687 attr->mh = NULL; 688} 689 690static 691struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) 692{ 693 struct mlx5_core_dev *mdev; 694 struct net_device *netdev; 695 struct mlx5e_priv *priv; 696 697 netdev = dev_get_by_index(net, ifindex); 698 if (!netdev) 699 return ERR_PTR(-ENODEV); 700 701 priv = netdev_priv(netdev); 702 mdev = priv->mdev; 703 dev_put(netdev); 704 705 /* Mirred tc action holds a refcount on the ifindex net_device (see 706 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev 707 * after dev_put(netdev), while we're in the context of adding a tc flow. 708 * 709 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then 710 * stored in a hairpin object, which exists until all flows, that refer to it, get 711 * removed. 712 * 713 * On the other hand, after a hairpin object has been created, the peer net_device may 714 * be removed/unbound while there are still some hairpin flows that are using it. This 715 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to 716 * NETDEV_UNREGISTER event of the peer net_device. 717 */ 718 return mdev; 719} 720 721static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) 722{ 723 struct mlx5e_tir_builder *builder; 724 int err; 725 726 builder = mlx5e_tir_builder_alloc(false); 727 if (!builder) 728 return -ENOMEM; 729 730 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); 731 if (err) 732 goto out; 733 734 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]); 735 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false); 736 if (err) 737 goto create_tir_err; 738 739out: 740 mlx5e_tir_builder_free(builder); 741 return err; 742 743create_tir_err: 744 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 745 746 goto out; 747} 748 749static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) 750{ 751 mlx5e_tir_destroy(&hp->direct_tir); 752 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); 753} 754 755static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) 756{ 757 struct mlx5e_priv *priv = hp->func_priv; 758 struct mlx5_core_dev *mdev = priv->mdev; 759 struct mlx5e_rss_params_indir indir; 760 int err; 761 762 err = mlx5e_rss_params_indir_init(&indir, mdev, 763 mlx5e_rqt_size(mdev, hp->num_channels), 764 mlx5e_rqt_size(mdev, hp->num_channels)); 765 if (err) 766 return err; 767 768 mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels); 769 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, NULL, hp->num_channels, 770 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, 771 &indir); 772 773 mlx5e_rss_params_indir_cleanup(&indir); 774 return err; 775} 776 777static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) 778{ 779 struct mlx5e_priv *priv = hp->func_priv; 780 struct mlx5e_rss_params_hash rss_hash; 781 enum mlx5_traffic_types tt, max_tt; 782 struct mlx5e_tir_builder *builder; 783 int err = 0; 784 785 builder = mlx5e_tir_builder_alloc(false); 786 if (!builder) 787 return -ENOMEM; 788 789 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res); 790 791 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 792 struct mlx5e_rss_params_traffic_type rss_tt; 793 794 rss_tt = mlx5e_rss_get_default_tt_config(tt); 795 796 mlx5e_tir_builder_build_rqt(builder, hp->tdn, 797 mlx5e_rqt_get_rqtn(&hp->indir_rqt), 798 false); 799 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false); 800 801 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false); 802 if (err) { 803 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); 804 goto err_destroy_tirs; 805 } 806 807 mlx5e_tir_builder_clear(builder); 808 } 809 810out: 811 mlx5e_tir_builder_free(builder); 812 return err; 813 814err_destroy_tirs: 815 max_tt = tt; 816 for (tt = 0; tt < max_tt; tt++) 817 mlx5e_tir_destroy(&hp->indir_tir[tt]); 818 819 goto out; 820} 821 822static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) 823{ 824 int tt; 825 826 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 827 mlx5e_tir_destroy(&hp->indir_tir[tt]); 828} 829 830static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, 831 struct ttc_params *ttc_params) 832{ 833 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 834 int tt; 835 836 memset(ttc_params, 0, sizeof(*ttc_params)); 837 838 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, 839 MLX5_FLOW_NAMESPACE_KERNEL); 840 for (tt = 0; tt < MLX5_NUM_TT; tt++) { 841 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 842 ttc_params->dests[tt].tir_num = 843 tt == MLX5_TT_ANY ? 844 mlx5e_tir_get_tirn(&hp->direct_tir) : 845 mlx5e_tir_get_tirn(&hp->indir_tir[tt]); 846 } 847 848 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; 849 ft_attr->prio = MLX5E_TC_PRIO; 850} 851 852static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) 853{ 854 struct mlx5e_priv *priv = hp->func_priv; 855 struct ttc_params ttc_params; 856 struct mlx5_ttc_table *ttc; 857 int err; 858 859 err = mlx5e_hairpin_create_indirect_rqt(hp); 860 if (err) 861 return err; 862 863 err = mlx5e_hairpin_create_indirect_tirs(hp); 864 if (err) 865 goto err_create_indirect_tirs; 866 867 mlx5e_hairpin_set_ttc_params(hp, &ttc_params); 868 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); 869 if (IS_ERR(hp->ttc)) { 870 err = PTR_ERR(hp->ttc); 871 goto err_create_ttc_table; 872 } 873 874 ttc = mlx5e_fs_get_ttc(priv->fs, false); 875 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", 876 hp->num_channels, 877 mlx5_get_ttc_flow_table(ttc)->id); 878 879 return 0; 880 881err_create_ttc_table: 882 mlx5e_hairpin_destroy_indirect_tirs(hp); 883err_create_indirect_tirs: 884 mlx5e_rqt_destroy(&hp->indir_rqt); 885 886 return err; 887} 888 889static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp) 890{ 891 mlx5_destroy_ttc_table(hp->ttc); 892 mlx5e_hairpin_destroy_indirect_tirs(hp); 893 mlx5e_rqt_destroy(&hp->indir_rqt); 894} 895 896static struct mlx5e_hairpin * 897mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params, 898 int peer_ifindex) 899{ 900 struct mlx5_core_dev *func_mdev, *peer_mdev; 901 struct mlx5e_hairpin *hp; 902 struct mlx5_hairpin *pair; 903 int err; 904 905 hp = kzalloc(sizeof(*hp), GFP_KERNEL); 906 if (!hp) 907 return ERR_PTR(-ENOMEM); 908 909 func_mdev = priv->mdev; 910 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 911 if (IS_ERR(peer_mdev)) { 912 err = PTR_ERR(peer_mdev); 913 goto create_pair_err; 914 } 915 916 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); 917 if (IS_ERR(pair)) { 918 err = PTR_ERR(pair); 919 goto create_pair_err; 920 } 921 hp->pair = pair; 922 hp->func_mdev = func_mdev; 923 hp->func_priv = priv; 924 hp->num_channels = params->num_channels; 925 hp->log_num_packets = params->log_num_packets; 926 927 err = mlx5e_hairpin_create_transport(hp); 928 if (err) 929 goto create_transport_err; 930 931 if (hp->num_channels > 1) { 932 err = mlx5e_hairpin_rss_init(hp); 933 if (err) 934 goto rss_init_err; 935 } 936 937 return hp; 938 939rss_init_err: 940 mlx5e_hairpin_destroy_transport(hp); 941create_transport_err: 942 mlx5_core_hairpin_destroy(hp->pair); 943create_pair_err: 944 kfree(hp); 945 return ERR_PTR(err); 946} 947 948static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp) 949{ 950 if (hp->num_channels > 1) 951 mlx5e_hairpin_rss_cleanup(hp); 952 mlx5e_hairpin_destroy_transport(hp); 953 mlx5_core_hairpin_destroy(hp->pair); 954 kvfree(hp); 955} 956 957static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio) 958{ 959 return (peer_vhca_id << 16 | prio); 960} 961 962static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, 963 u16 peer_vhca_id, u8 prio) 964{ 965 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 966 struct mlx5e_hairpin_entry *hpe; 967 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio); 968 969 hash_for_each_possible(tc->hairpin_tbl, hpe, 970 hairpin_hlist, hash_key) { 971 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) { 972 refcount_inc(&hpe->refcnt); 973 return hpe; 974 } 975 } 976 977 return NULL; 978} 979 980static void mlx5e_hairpin_put(struct mlx5e_priv *priv, 981 struct mlx5e_hairpin_entry *hpe) 982{ 983 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 984 /* no more hairpin flows for us, release the hairpin pair */ 985 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock)) 986 return; 987 hash_del(&hpe->hairpin_hlist); 988 mutex_unlock(&tc->hairpin_tbl_lock); 989 990 if (!IS_ERR_OR_NULL(hpe->hp)) { 991 netdev_dbg(priv->netdev, "del hairpin: peer %s\n", 992 dev_name(hpe->hp->pair->peer_mdev->device)); 993 994 mlx5e_hairpin_destroy(hpe->hp); 995 } 996 997 WARN_ON(!list_empty(&hpe->flows)); 998 kfree(hpe); 999} 1000 1001#define UNKNOWN_MATCH_PRIO 8 1002 1003static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, 1004 struct mlx5_flow_spec *spec, u8 *match_prio, 1005 struct netlink_ext_ack *extack) 1006{ 1007 void *headers_c, *headers_v; 1008 u8 prio_val, prio_mask = 0; 1009 bool vlan_present; 1010 1011#ifdef CONFIG_MLX5_CORE_EN_DCB 1012 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { 1013 NL_SET_ERR_MSG_MOD(extack, 1014 "only PCP trust state supported for hairpin"); 1015 return -EOPNOTSUPP; 1016 } 1017#endif 1018 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); 1019 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 1020 1021 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag); 1022 if (vlan_present) { 1023 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); 1024 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); 1025 } 1026 1027 if (!vlan_present || !prio_mask) { 1028 prio_val = UNKNOWN_MATCH_PRIO; 1029 } else if (prio_mask != 0x7) { 1030 NL_SET_ERR_MSG_MOD(extack, 1031 "masked priority match not supported for hairpin"); 1032 return -EOPNOTSUPP; 1033 } 1034 1035 *match_prio = prio_val; 1036 return 0; 1037} 1038 1039static int debugfs_hairpin_num_active_get(void *data, u64 *val) 1040{ 1041 struct mlx5e_tc_table *tc = data; 1042 struct mlx5e_hairpin_entry *hpe; 1043 u32 cnt = 0; 1044 u32 bkt; 1045 1046 mutex_lock(&tc->hairpin_tbl_lock); 1047 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) 1048 cnt++; 1049 mutex_unlock(&tc->hairpin_tbl_lock); 1050 1051 *val = cnt; 1052 1053 return 0; 1054} 1055DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active, 1056 debugfs_hairpin_num_active_get, NULL, "%llu\n"); 1057 1058static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv) 1059 1060{ 1061 struct mlx5e_tc_table *tc = file->private; 1062 struct mlx5e_hairpin_entry *hpe; 1063 u32 bkt; 1064 1065 mutex_lock(&tc->hairpin_tbl_lock); 1066 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) 1067 seq_printf(file, 1068 "Hairpin peer_vhca_id %u prio %u refcnt %u num_channels %u num_packets %lu\n", 1069 hpe->peer_vhca_id, hpe->prio, 1070 refcount_read(&hpe->refcnt), hpe->hp->num_channels, 1071 BIT(hpe->hp->log_num_packets)); 1072 mutex_unlock(&tc->hairpin_tbl_lock); 1073 1074 return 0; 1075} 1076DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump); 1077 1078static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc, 1079 struct dentry *dfs_root) 1080{ 1081 if (IS_ERR_OR_NULL(dfs_root)) 1082 return; 1083 1084 tc->dfs_root = debugfs_create_dir("tc", dfs_root); 1085 1086 debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc, 1087 &fops_hairpin_num_active); 1088 debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc, 1089 &debugfs_hairpin_table_dump_fops); 1090} 1091 1092static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, 1093 struct mlx5e_tc_flow *flow, 1094 struct mlx5e_tc_flow_parse_attr *parse_attr, 1095 struct netlink_ext_ack *extack) 1096{ 1097 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 1098 struct devlink *devlink = priv_to_devlink(priv->mdev); 1099 int peer_ifindex = parse_attr->mirred_ifindex[0]; 1100 union devlink_param_value val = {}; 1101 struct mlx5_hairpin_params params; 1102 struct mlx5_core_dev *peer_mdev; 1103 struct mlx5e_hairpin_entry *hpe; 1104 struct mlx5e_hairpin *hp; 1105 u8 match_prio; 1106 u16 peer_id; 1107 int err; 1108 1109 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); 1110 if (IS_ERR(peer_mdev)) { 1111 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device"); 1112 return PTR_ERR(peer_mdev); 1113 } 1114 1115 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { 1116 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); 1117 return -EOPNOTSUPP; 1118 } 1119 1120 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 1121 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio, 1122 extack); 1123 if (err) 1124 return err; 1125 1126 mutex_lock(&tc->hairpin_tbl_lock); 1127 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); 1128 if (hpe) { 1129 mutex_unlock(&tc->hairpin_tbl_lock); 1130 wait_for_completion(&hpe->res_ready); 1131 1132 if (IS_ERR(hpe->hp)) { 1133 err = -EREMOTEIO; 1134 goto out_err; 1135 } 1136 goto attach_flow; 1137 } 1138 1139 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); 1140 if (!hpe) { 1141 mutex_unlock(&tc->hairpin_tbl_lock); 1142 return -ENOMEM; 1143 } 1144 1145 spin_lock_init(&hpe->flows_lock); 1146 INIT_LIST_HEAD(&hpe->flows); 1147 INIT_LIST_HEAD(&hpe->dead_peer_wait_list); 1148 hpe->peer_vhca_id = peer_id; 1149 hpe->prio = match_prio; 1150 refcount_set(&hpe->refcnt, 1); 1151 init_completion(&hpe->res_ready); 1152 1153 hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist, 1154 hash_hairpin_info(peer_id, match_prio)); 1155 mutex_unlock(&tc->hairpin_tbl_lock); 1156 1157 err = devl_param_driverinit_value_get( 1158 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, &val); 1159 if (err) { 1160 err = -ENOMEM; 1161 goto out_err; 1162 } 1163 1164 params.log_num_packets = ilog2(val.vu32); 1165 params.log_data_size = 1166 clamp_t(u32, 1167 params.log_num_packets + 1168 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev), 1169 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz), 1170 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); 1171 1172 params.q_counter = priv->q_counter[0]; 1173 err = devl_param_driverinit_value_get( 1174 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val); 1175 if (err) { 1176 err = -ENOMEM; 1177 goto out_err; 1178 } 1179 1180 params.num_channels = val.vu32; 1181 1182 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); 1183 hpe->hp = hp; 1184 complete_all(&hpe->res_ready); 1185 if (IS_ERR(hp)) { 1186 err = PTR_ERR(hp); 1187 goto out_err; 1188 } 1189 1190 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", 1191 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0], 1192 dev_name(hp->pair->peer_mdev->device), 1193 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); 1194 1195attach_flow: 1196 if (hpe->hp->num_channels > 1) { 1197 flow_flag_set(flow, HAIRPIN_RSS); 1198 flow->attr->nic_attr->hairpin_ft = 1199 mlx5_get_ttc_flow_table(hpe->hp->ttc); 1200 } else { 1201 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir); 1202 } 1203 1204 flow->hpe = hpe; 1205 spin_lock(&hpe->flows_lock); 1206 list_add(&flow->hairpin, &hpe->flows); 1207 spin_unlock(&hpe->flows_lock); 1208 1209 return 0; 1210 1211out_err: 1212 mlx5e_hairpin_put(priv, hpe); 1213 return err; 1214} 1215 1216static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, 1217 struct mlx5e_tc_flow *flow) 1218{ 1219 /* flow wasn't fully initialized */ 1220 if (!flow->hpe) 1221 return; 1222 1223 spin_lock(&flow->hpe->flows_lock); 1224 list_del(&flow->hairpin); 1225 spin_unlock(&flow->hpe->flows_lock); 1226 1227 mlx5e_hairpin_put(priv, flow->hpe); 1228 flow->hpe = NULL; 1229} 1230 1231struct mlx5_flow_handle * 1232mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, 1233 struct mlx5_flow_spec *spec, 1234 struct mlx5_flow_attr *attr) 1235{ 1236 struct mlx5_flow_context *flow_context = &spec->flow_context; 1237 struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs); 1238 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 1239 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr; 1240 struct mlx5_flow_destination dest[2] = {}; 1241 struct mlx5_fs_chains *nic_chains; 1242 struct mlx5_flow_act flow_act = { 1243 .action = attr->action, 1244 .flags = FLOW_ACT_NO_APPEND, 1245 }; 1246 struct mlx5_flow_handle *rule; 1247 struct mlx5_flow_table *ft; 1248 int dest_ix = 0; 1249 1250 nic_chains = mlx5e_nic_chains(tc); 1251 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1252 flow_context->flow_tag = nic_attr->flow_tag; 1253 1254 if (attr->dest_ft) { 1255 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1256 dest[dest_ix].ft = attr->dest_ft; 1257 dest_ix++; 1258 } else if (nic_attr->hairpin_ft) { 1259 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1260 dest[dest_ix].ft = nic_attr->hairpin_ft; 1261 dest_ix++; 1262 } else if (nic_attr->hairpin_tirn) { 1263 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1264 dest[dest_ix].tir_num = nic_attr->hairpin_tirn; 1265 dest_ix++; 1266 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 1267 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1268 if (attr->dest_chain) { 1269 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains, 1270 attr->dest_chain, 1, 1271 MLX5E_TC_FT_LEVEL); 1272 if (IS_ERR(dest[dest_ix].ft)) 1273 return ERR_CAST(dest[dest_ix].ft); 1274 } else { 1275 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan); 1276 } 1277 dest_ix++; 1278 } 1279 1280 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && 1281 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) 1282 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 1283 1284 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1285 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1286 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter); 1287 dest_ix++; 1288 } 1289 1290 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1291 flow_act.modify_hdr = attr->modify_hdr; 1292 1293 mutex_lock(&tc->t_lock); 1294 if (IS_ERR_OR_NULL(tc->t)) { 1295 /* Create the root table here if doesn't exist yet */ 1296 tc->t = 1297 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL); 1298 1299 if (IS_ERR(tc->t)) { 1300 mutex_unlock(&tc->t_lock); 1301 netdev_err(priv->netdev, 1302 "Failed to create tc offload table\n"); 1303 rule = ERR_CAST(tc->t); 1304 goto err_ft_get; 1305 } 1306 } 1307 mutex_unlock(&tc->t_lock); 1308 1309 if (attr->chain || attr->prio) 1310 ft = mlx5_chains_get_table(nic_chains, 1311 attr->chain, attr->prio, 1312 MLX5E_TC_FT_LEVEL); 1313 else 1314 ft = attr->ft; 1315 1316 if (IS_ERR(ft)) { 1317 rule = ERR_CAST(ft); 1318 goto err_ft_get; 1319 } 1320 1321 if (attr->outer_match_level != MLX5_MATCH_NONE) 1322 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 1323 1324 rule = mlx5_add_flow_rules(ft, spec, 1325 &flow_act, dest, dest_ix); 1326 if (IS_ERR(rule)) 1327 goto err_rule; 1328 1329 return rule; 1330 1331err_rule: 1332 if (attr->chain || attr->prio) 1333 mlx5_chains_put_table(nic_chains, 1334 attr->chain, attr->prio, 1335 MLX5E_TC_FT_LEVEL); 1336err_ft_get: 1337 if (attr->dest_chain) 1338 mlx5_chains_put_table(nic_chains, 1339 attr->dest_chain, 1, 1340 MLX5E_TC_FT_LEVEL); 1341 1342 return ERR_CAST(rule); 1343} 1344 1345static int 1346alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev, 1347 struct mlx5_flow_attr *attr) 1348 1349{ 1350 struct mlx5_fc *counter; 1351 1352 counter = mlx5_fc_create(counter_dev, true); 1353 if (IS_ERR(counter)) 1354 return PTR_ERR(counter); 1355 1356 attr->counter = counter; 1357 return 0; 1358} 1359 1360static int 1361mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, 1362 struct mlx5e_tc_flow *flow, 1363 struct netlink_ext_ack *extack) 1364{ 1365 struct mlx5e_tc_flow_parse_attr *parse_attr; 1366 struct mlx5_flow_attr *attr = flow->attr; 1367 struct mlx5_core_dev *dev = priv->mdev; 1368 int err; 1369 1370 parse_attr = attr->parse_attr; 1371 1372 if (flow_flag_test(flow, HAIRPIN)) { 1373 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); 1374 if (err) 1375 return err; 1376 } 1377 1378 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1379 err = alloc_flow_attr_counter(dev, attr); 1380 if (err) 1381 return err; 1382 } 1383 1384 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1385 err = mlx5e_tc_attach_mod_hdr(priv, flow, attr); 1386 if (err) 1387 return err; 1388 } 1389 1390 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, attr); 1391 return PTR_ERR_OR_ZERO(flow->rule[0]); 1392} 1393 1394void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, 1395 struct mlx5_flow_handle *rule, 1396 struct mlx5_flow_attr *attr) 1397{ 1398 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 1399 struct mlx5_fs_chains *nic_chains; 1400 1401 nic_chains = mlx5e_nic_chains(tc); 1402 mlx5_del_flow_rules(rule); 1403 1404 if (attr->chain || attr->prio) 1405 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio, 1406 MLX5E_TC_FT_LEVEL); 1407 1408 if (attr->dest_chain) 1409 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1, 1410 MLX5E_TC_FT_LEVEL); 1411} 1412 1413static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, 1414 struct mlx5e_tc_flow *flow) 1415{ 1416 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 1417 struct mlx5_flow_attr *attr = flow->attr; 1418 1419 flow_flag_clear(flow, OFFLOADED); 1420 1421 if (!IS_ERR_OR_NULL(flow->rule[0])) 1422 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr); 1423 1424 /* Remove root table if no rules are left to avoid 1425 * extra steering hops. 1426 */ 1427 mutex_lock(&tc->t_lock); 1428 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && 1429 !IS_ERR_OR_NULL(tc->t)) { 1430 mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL); 1431 tc->t = NULL; 1432 } 1433 mutex_unlock(&tc->t_lock); 1434 1435 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1436 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 1437 mlx5e_tc_detach_mod_hdr(priv, flow, attr); 1438 } 1439 1440 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1441 mlx5_fc_destroy(priv->mdev, attr->counter); 1442 1443 if (flow_flag_test(flow, HAIRPIN)) 1444 mlx5e_hairpin_flow_del(priv, flow); 1445 1446 free_flow_post_acts(flow); 1447 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr); 1448 1449 kvfree(attr->parse_attr); 1450 kfree(flow->attr); 1451} 1452 1453struct mlx5_flow_handle * 1454mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, 1455 struct mlx5e_tc_flow *flow, 1456 struct mlx5_flow_spec *spec, 1457 struct mlx5_flow_attr *attr) 1458{ 1459 struct mlx5_flow_handle *rule; 1460 1461 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) 1462 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); 1463 1464 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr); 1465 1466 if (IS_ERR(rule)) 1467 return rule; 1468 1469 if (attr->esw_attr->split_count) { 1470 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); 1471 if (IS_ERR(flow->rule[1])) 1472 goto err_rule1; 1473 } 1474 1475 return rule; 1476 1477err_rule1: 1478 mlx5e_tc_rule_unoffload(flow->priv, rule, attr); 1479 return flow->rule[1]; 1480} 1481 1482void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, 1483 struct mlx5e_tc_flow *flow, 1484 struct mlx5_flow_attr *attr) 1485{ 1486 flow_flag_clear(flow, OFFLOADED); 1487 1488 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) 1489 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); 1490 1491 if (attr->esw_attr->split_count) 1492 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); 1493 1494 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr); 1495} 1496 1497struct mlx5_flow_handle * 1498mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, 1499 struct mlx5e_tc_flow *flow, 1500 struct mlx5_flow_spec *spec) 1501{ 1502 struct mlx5e_tc_mod_hdr_acts mod_acts = {}; 1503 struct mlx5e_mod_hdr_handle *mh = NULL; 1504 struct mlx5_flow_attr *slow_attr; 1505 struct mlx5_flow_handle *rule; 1506 bool fwd_and_modify_cap; 1507 u32 chain_mapping = 0; 1508 int err; 1509 1510 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); 1511 if (!slow_attr) 1512 return ERR_PTR(-ENOMEM); 1513 1514 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ); 1515 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1516 slow_attr->esw_attr->split_count = 0; 1517 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; 1518 1519 fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table); 1520 if (!fwd_and_modify_cap) 1521 goto skip_restore; 1522 1523 err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping); 1524 if (err) 1525 goto err_get_chain; 1526 1527 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, 1528 MAPPED_OBJ_TO_REG, chain_mapping); 1529 if (err) 1530 goto err_reg_set; 1531 1532 mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow), 1533 MLX5_FLOW_NAMESPACE_FDB, &mod_acts); 1534 if (IS_ERR(mh)) { 1535 err = PTR_ERR(mh); 1536 goto err_attach; 1537 } 1538 1539 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1540 slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh); 1541 1542skip_restore: 1543 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); 1544 if (IS_ERR(rule)) { 1545 err = PTR_ERR(rule); 1546 goto err_offload; 1547 } 1548 1549 flow->attr->slow_mh = mh; 1550 flow->chain_mapping = chain_mapping; 1551 flow_flag_set(flow, SLOW); 1552 1553 mlx5e_mod_hdr_dealloc(&mod_acts); 1554 kfree(slow_attr); 1555 1556 return rule; 1557 1558err_offload: 1559 if (fwd_and_modify_cap) 1560 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh); 1561err_attach: 1562err_reg_set: 1563 if (fwd_and_modify_cap) 1564 mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping); 1565err_get_chain: 1566 mlx5e_mod_hdr_dealloc(&mod_acts); 1567 kfree(slow_attr); 1568 return ERR_PTR(err); 1569} 1570 1571void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, 1572 struct mlx5e_tc_flow *flow) 1573{ 1574 struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh; 1575 struct mlx5_flow_attr *slow_attr; 1576 1577 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); 1578 if (!slow_attr) { 1579 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n"); 1580 return; 1581 } 1582 1583 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ); 1584 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1585 slow_attr->esw_attr->split_count = 0; 1586 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; 1587 if (slow_mh) { 1588 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1589 slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh); 1590 } 1591 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); 1592 if (slow_mh) { 1593 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh); 1594 mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping); 1595 flow->chain_mapping = 0; 1596 flow->attr->slow_mh = NULL; 1597 } 1598 flow_flag_clear(flow, SLOW); 1599 kfree(slow_attr); 1600} 1601 1602/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1603 * function. 1604 */ 1605static void unready_flow_add(struct mlx5e_tc_flow *flow, 1606 struct list_head *unready_flows) 1607{ 1608 flow_flag_set(flow, NOT_READY); 1609 list_add_tail(&flow->unready, unready_flows); 1610} 1611 1612/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this 1613 * function. 1614 */ 1615static void unready_flow_del(struct mlx5e_tc_flow *flow) 1616{ 1617 list_del(&flow->unready); 1618 flow_flag_clear(flow, NOT_READY); 1619} 1620 1621static void add_unready_flow(struct mlx5e_tc_flow *flow) 1622{ 1623 struct mlx5_rep_uplink_priv *uplink_priv; 1624 struct mlx5e_rep_priv *rpriv; 1625 struct mlx5_eswitch *esw; 1626 1627 esw = flow->priv->mdev->priv.eswitch; 1628 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1629 uplink_priv = &rpriv->uplink_priv; 1630 1631 mutex_lock(&uplink_priv->unready_flows_lock); 1632 unready_flow_add(flow, &uplink_priv->unready_flows); 1633 mutex_unlock(&uplink_priv->unready_flows_lock); 1634} 1635 1636static void remove_unready_flow(struct mlx5e_tc_flow *flow) 1637{ 1638 struct mlx5_rep_uplink_priv *uplink_priv; 1639 struct mlx5e_rep_priv *rpriv; 1640 struct mlx5_eswitch *esw; 1641 1642 esw = flow->priv->mdev->priv.eswitch; 1643 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 1644 uplink_priv = &rpriv->uplink_priv; 1645 1646 mutex_lock(&uplink_priv->unready_flows_lock); 1647 if (flow_flag_test(flow, NOT_READY)) 1648 unready_flow_del(flow); 1649 mutex_unlock(&uplink_priv->unready_flows_lock); 1650} 1651 1652bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) 1653{ 1654 struct mlx5_core_dev *out_mdev, *route_mdev; 1655 struct mlx5e_priv *out_priv, *route_priv; 1656 1657 out_priv = netdev_priv(out_dev); 1658 out_mdev = out_priv->mdev; 1659 route_priv = netdev_priv(route_dev); 1660 route_mdev = route_priv->mdev; 1661 1662 if (out_mdev->coredev_type != MLX5_COREDEV_PF) 1663 return false; 1664 1665 if (route_mdev->coredev_type != MLX5_COREDEV_VF && 1666 route_mdev->coredev_type != MLX5_COREDEV_SF) 1667 return false; 1668 1669 return mlx5e_same_hw_devs(out_priv, route_priv); 1670} 1671 1672int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) 1673{ 1674 struct mlx5e_priv *out_priv, *route_priv; 1675 struct mlx5_core_dev *route_mdev; 1676 struct mlx5_devcom_comp_dev *pos; 1677 struct mlx5_eswitch *esw; 1678 u16 vhca_id; 1679 int err; 1680 1681 out_priv = netdev_priv(out_dev); 1682 esw = out_priv->mdev->priv.eswitch; 1683 route_priv = netdev_priv(route_dev); 1684 route_mdev = route_priv->mdev; 1685 1686 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); 1687 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); 1688 if (!err) 1689 return err; 1690 1691 if (!mlx5_lag_is_active(out_priv->mdev)) 1692 return err; 1693 1694 rcu_read_lock(); 1695 err = -ENODEV; 1696 mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) { 1697 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); 1698 if (!err) 1699 break; 1700 } 1701 rcu_read_unlock(); 1702 1703 return err; 1704} 1705 1706static int 1707verify_attr_actions(u32 actions, struct netlink_ext_ack *extack) 1708{ 1709 if (!(actions & 1710 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 1711 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action"); 1712 return -EOPNOTSUPP; 1713 } 1714 1715 if (!(~actions & 1716 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 1717 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); 1718 return -EOPNOTSUPP; 1719 } 1720 1721 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 1722 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { 1723 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); 1724 return -EOPNOTSUPP; 1725 } 1726 1727 return 0; 1728} 1729 1730static bool 1731has_encap_dests(struct mlx5_flow_attr *attr) 1732{ 1733 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 1734 int out_index; 1735 1736 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 1737 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) 1738 return true; 1739 1740 return false; 1741} 1742 1743static int 1744post_process_attr(struct mlx5e_tc_flow *flow, 1745 struct mlx5_flow_attr *attr, 1746 struct netlink_ext_ack *extack) 1747{ 1748 bool vf_tun; 1749 int err = 0; 1750 1751 err = verify_attr_actions(attr->action, extack); 1752 if (err) 1753 goto err_out; 1754 1755 if (mlx5e_is_eswitch_flow(flow) && has_encap_dests(attr)) { 1756 err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun); 1757 if (err) 1758 goto err_out; 1759 } 1760 1761 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1762 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr); 1763 if (err) 1764 goto err_out; 1765 } 1766 1767 if (attr->branch_true && 1768 attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1769 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true); 1770 if (err) 1771 goto err_out; 1772 } 1773 1774 if (attr->branch_false && 1775 attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1776 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false); 1777 if (err) 1778 goto err_out; 1779 } 1780 1781 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 1782 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr); 1783 if (err) 1784 goto err_out; 1785 } 1786 1787err_out: 1788 return err; 1789} 1790 1791static int 1792mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 1793 struct mlx5e_tc_flow *flow, 1794 struct netlink_ext_ack *extack) 1795{ 1796 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1797 struct mlx5e_tc_flow_parse_attr *parse_attr; 1798 struct mlx5_flow_attr *attr = flow->attr; 1799 struct mlx5_esw_flow_attr *esw_attr; 1800 u32 max_prio, max_chain; 1801 int err = 0; 1802 1803 parse_attr = attr->parse_attr; 1804 esw_attr = attr->esw_attr; 1805 1806 /* We check chain range only for tc flows. 1807 * For ft flows, we checked attr->chain was originally 0 and set it to 1808 * FDB_FT_CHAIN which is outside tc range. 1809 * See mlx5e_rep_setup_ft_cb(). 1810 */ 1811 max_chain = mlx5_chains_get_chain_range(esw_chains(esw)); 1812 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { 1813 NL_SET_ERR_MSG_MOD(extack, 1814 "Requested chain is out of supported range"); 1815 err = -EOPNOTSUPP; 1816 goto err_out; 1817 } 1818 1819 max_prio = mlx5_chains_get_prio_range(esw_chains(esw)); 1820 if (attr->prio > max_prio) { 1821 NL_SET_ERR_MSG_MOD(extack, 1822 "Requested priority is out of supported range"); 1823 err = -EOPNOTSUPP; 1824 goto err_out; 1825 } 1826 1827 if (flow_flag_test(flow, TUN_RX)) { 1828 err = mlx5e_attach_decap_route(priv, flow); 1829 if (err) 1830 goto err_out; 1831 1832 if (!attr->chain && esw_attr->int_port && 1833 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 1834 /* If decap route device is internal port, change the 1835 * source vport value in reg_c0 back to uplink just in 1836 * case the rule performs goto chain > 0. If we have a miss 1837 * on chain > 0 we want the metadata regs to hold the 1838 * chain id so SW will resume handling of this packet 1839 * from the proper chain. 1840 */ 1841 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw, 1842 esw_attr->in_rep->vport); 1843 1844 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, 1845 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, 1846 metadata); 1847 if (err) 1848 goto err_out; 1849 1850 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1851 } 1852 } 1853 1854 if (flow_flag_test(flow, L3_TO_L2_DECAP)) { 1855 err = mlx5e_attach_decap(priv, flow, extack); 1856 if (err) 1857 goto err_out; 1858 } 1859 1860 if (netif_is_ovs_master(parse_attr->filter_dev)) { 1861 struct mlx5e_tc_int_port *int_port; 1862 1863 if (attr->chain) { 1864 NL_SET_ERR_MSG_MOD(extack, 1865 "Internal port rule is only supported on chain 0"); 1866 err = -EOPNOTSUPP; 1867 goto err_out; 1868 } 1869 1870 if (attr->dest_chain) { 1871 NL_SET_ERR_MSG_MOD(extack, 1872 "Internal port rule offload doesn't support goto action"); 1873 err = -EOPNOTSUPP; 1874 goto err_out; 1875 } 1876 1877 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), 1878 parse_attr->filter_dev->ifindex, 1879 flow_flag_test(flow, EGRESS) ? 1880 MLX5E_TC_INT_PORT_EGRESS : 1881 MLX5E_TC_INT_PORT_INGRESS); 1882 if (IS_ERR(int_port)) { 1883 err = PTR_ERR(int_port); 1884 goto err_out; 1885 } 1886 1887 esw_attr->int_port = int_port; 1888 } 1889 1890 err = post_process_attr(flow, attr, extack); 1891 if (err) 1892 goto err_out; 1893 1894 err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow); 1895 if (err) 1896 goto err_out; 1897 1898 /* we get here if one of the following takes place: 1899 * (1) there's no error 1900 * (2) there's an encap action and we don't have valid neigh 1901 */ 1902 if (flow_flag_test(flow, SLOW)) 1903 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); 1904 else 1905 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); 1906 1907 if (IS_ERR(flow->rule[0])) { 1908 err = PTR_ERR(flow->rule[0]); 1909 goto err_out; 1910 } 1911 flow_flag_set(flow, OFFLOADED); 1912 1913 return 0; 1914 1915err_out: 1916 flow_flag_set(flow, FAILED); 1917 return err; 1918} 1919 1920static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) 1921{ 1922 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec; 1923 void *headers_v = MLX5_ADDR_OF(fte_match_param, 1924 spec->match_value, 1925 misc_parameters_3); 1926 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3, 1927 headers_v, 1928 geneve_tlv_option_0_data); 1929 1930 return !!geneve_tlv_opt_0_data; 1931} 1932 1933static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) 1934{ 1935 if (!attr) 1936 return; 1937 1938 mlx5_free_flow_attr_actions(flow, attr); 1939 kvfree(attr->parse_attr); 1940 kfree(attr); 1941} 1942 1943static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, 1944 struct mlx5e_tc_flow *flow) 1945{ 1946 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1947 struct mlx5_flow_attr *attr = flow->attr; 1948 1949 mlx5e_put_flow_tunnel_id(flow); 1950 1951 remove_unready_flow(flow); 1952 1953 if (mlx5e_is_offloaded_flow(flow)) { 1954 if (flow_flag_test(flow, SLOW)) 1955 mlx5e_tc_unoffload_from_slow_path(esw, flow); 1956 else 1957 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); 1958 } 1959 complete_all(&flow->del_hw_done); 1960 1961 if (mlx5_flow_has_geneve_opt(flow)) 1962 mlx5_geneve_tlv_option_del(priv->mdev->geneve); 1963 1964 if (flow->decap_route) 1965 mlx5e_detach_decap_route(priv, flow); 1966 1967 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr); 1968 1969 if (flow_flag_test(flow, L3_TO_L2_DECAP)) 1970 mlx5e_detach_decap(priv, flow); 1971 1972 mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow); 1973 1974 free_flow_post_acts(flow); 1975 mlx5_free_flow_attr_actions(flow, attr); 1976 1977 kvfree(attr->esw_attr->rx_tun_attr); 1978 kvfree(attr->parse_attr); 1979 kfree(flow->attr); 1980} 1981 1982struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) 1983{ 1984 struct mlx5_flow_attr *attr; 1985 1986 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list); 1987 return attr->counter; 1988} 1989 1990/* Iterate over tmp_list of flows attached to flow_list head. */ 1991void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) 1992{ 1993 struct mlx5e_tc_flow *flow, *tmp; 1994 1995 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list) 1996 mlx5e_flow_put(priv, flow); 1997} 1998 1999static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow, 2000 int peer_index) 2001{ 2002 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; 2003 struct mlx5e_tc_flow *peer_flow; 2004 struct mlx5e_tc_flow *tmp; 2005 2006 if (!flow_flag_test(flow, ESWITCH) || 2007 !flow_flag_test(flow, DUP)) 2008 return; 2009 2010 mutex_lock(&esw->offloads.peer_mutex); 2011 list_del(&flow->peer[peer_index]); 2012 mutex_unlock(&esw->offloads.peer_mutex); 2013 2014 list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) { 2015 if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev)) 2016 continue; 2017 2018 list_del(&peer_flow->peer_flows); 2019 if (refcount_dec_and_test(&peer_flow->refcnt)) { 2020 mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow); 2021 kfree(peer_flow); 2022 } 2023 } 2024 2025 if (list_empty(&flow->peer_flows)) 2026 flow_flag_clear(flow, DUP); 2027} 2028 2029static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow) 2030{ 2031 int i; 2032 2033 for (i = 0; i < MLX5_MAX_PORTS; i++) { 2034 if (i == mlx5_get_dev_index(flow->priv->mdev)) 2035 continue; 2036 mlx5e_tc_del_fdb_peer_flow(flow, i); 2037 } 2038} 2039 2040static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 2041 struct mlx5e_tc_flow *flow) 2042{ 2043 if (mlx5e_is_eswitch_flow(flow)) { 2044 struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom; 2045 2046 if (!mlx5_devcom_for_each_peer_begin(devcom)) { 2047 mlx5e_tc_del_fdb_flow(priv, flow); 2048 return; 2049 } 2050 2051 mlx5e_tc_del_fdb_peers_flow(flow); 2052 mlx5_devcom_for_each_peer_end(devcom); 2053 mlx5e_tc_del_fdb_flow(priv, flow); 2054 } else { 2055 mlx5e_tc_del_nic_flow(priv, flow); 2056 } 2057} 2058 2059static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f) 2060{ 2061 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2062 struct flow_action *flow_action = &rule->action; 2063 const struct flow_action_entry *act; 2064 int i; 2065 2066 if (chain) 2067 return false; 2068 2069 flow_action_for_each(i, act, flow_action) { 2070 switch (act->id) { 2071 case FLOW_ACTION_GOTO: 2072 return true; 2073 case FLOW_ACTION_SAMPLE: 2074 return true; 2075 default: 2076 continue; 2077 } 2078 } 2079 2080 return false; 2081} 2082 2083static int 2084enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, 2085 struct flow_dissector_key_enc_opts *opts, 2086 struct netlink_ext_ack *extack, 2087 bool *dont_care) 2088{ 2089 struct geneve_opt *opt; 2090 int off = 0; 2091 2092 *dont_care = true; 2093 2094 while (opts->len > off) { 2095 opt = (struct geneve_opt *)&opts->data[off]; 2096 2097 if (!(*dont_care) || opt->opt_class || opt->type || 2098 memchr_inv(opt->opt_data, 0, opt->length * 4)) { 2099 *dont_care = false; 2100 2101 if (opt->opt_class != htons(U16_MAX) || 2102 opt->type != U8_MAX) { 2103 NL_SET_ERR_MSG_MOD(extack, 2104 "Partial match of tunnel options in chain > 0 isn't supported"); 2105 netdev_warn(priv->netdev, 2106 "Partial match of tunnel options in chain > 0 isn't supported"); 2107 return -EOPNOTSUPP; 2108 } 2109 } 2110 2111 off += sizeof(struct geneve_opt) + opt->length * 4; 2112 } 2113 2114 return 0; 2115} 2116 2117#define COPY_DISSECTOR(rule, diss_key, dst)\ 2118({ \ 2119 struct flow_rule *__rule = (rule);\ 2120 typeof(dst) __dst = dst;\ 2121\ 2122 memcpy(__dst,\ 2123 skb_flow_dissector_target(__rule->match.dissector,\ 2124 diss_key,\ 2125 __rule->match.key),\ 2126 sizeof(*__dst));\ 2127}) 2128 2129static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv, 2130 struct mlx5e_tc_flow *flow, 2131 struct flow_cls_offload *f, 2132 struct net_device *filter_dev) 2133{ 2134 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2135 struct netlink_ext_ack *extack = f->common.extack; 2136 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; 2137 struct flow_match_enc_opts enc_opts_match; 2138 struct tunnel_match_enc_opts tun_enc_opts; 2139 struct mlx5_rep_uplink_priv *uplink_priv; 2140 struct mlx5_flow_attr *attr = flow->attr; 2141 struct mlx5e_rep_priv *uplink_rpriv; 2142 struct tunnel_match_key tunnel_key; 2143 bool enc_opts_is_dont_care = true; 2144 u32 tun_id, enc_opts_id = 0; 2145 struct mlx5_eswitch *esw; 2146 u32 value, mask; 2147 int err; 2148 2149 esw = priv->mdev->priv.eswitch; 2150 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 2151 uplink_priv = &uplink_rpriv->uplink_priv; 2152 2153 memset(&tunnel_key, 0, sizeof(tunnel_key)); 2154 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, 2155 &tunnel_key.enc_control); 2156 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 2157 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 2158 &tunnel_key.enc_ipv4); 2159 else 2160 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 2161 &tunnel_key.enc_ipv6); 2162 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip); 2163 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, 2164 &tunnel_key.enc_tp); 2165 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, 2166 &tunnel_key.enc_key_id); 2167 tunnel_key.filter_ifindex = filter_dev->ifindex; 2168 2169 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id); 2170 if (err) 2171 return err; 2172 2173 flow_rule_match_enc_opts(rule, &enc_opts_match); 2174 err = enc_opts_is_dont_care_or_full_match(priv, 2175 enc_opts_match.mask, 2176 extack, 2177 &enc_opts_is_dont_care); 2178 if (err) 2179 goto err_enc_opts; 2180 2181 if (!enc_opts_is_dont_care) { 2182 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts)); 2183 memcpy(&tun_enc_opts.key, enc_opts_match.key, 2184 sizeof(*enc_opts_match.key)); 2185 memcpy(&tun_enc_opts.mask, enc_opts_match.mask, 2186 sizeof(*enc_opts_match.mask)); 2187 2188 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping, 2189 &tun_enc_opts, &enc_opts_id); 2190 if (err) 2191 goto err_enc_opts; 2192 } 2193 2194 value = tun_id << ENC_OPTS_BITS | enc_opts_id; 2195 mask = enc_opts_id ? TUNNEL_ID_MASK : 2196 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK); 2197 2198 if (attr->chain) { 2199 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec, 2200 TUNNEL_TO_REG, value, mask); 2201 } else { 2202 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; 2203 err = mlx5e_tc_match_to_reg_set(priv->mdev, 2204 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB, 2205 TUNNEL_TO_REG, value); 2206 if (err) 2207 goto err_set; 2208 2209 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2210 } 2211 2212 flow->attr->tunnel_id = value; 2213 return 0; 2214 2215err_set: 2216 if (enc_opts_id) 2217 mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 2218 enc_opts_id); 2219err_enc_opts: 2220 mapping_remove(uplink_priv->tunnel_mapping, tun_id); 2221 return err; 2222} 2223 2224static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow) 2225{ 2226 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK; 2227 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS; 2228 struct mlx5_rep_uplink_priv *uplink_priv; 2229 struct mlx5e_rep_priv *uplink_rpriv; 2230 struct mlx5_eswitch *esw; 2231 2232 esw = flow->priv->mdev->priv.eswitch; 2233 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 2234 uplink_priv = &uplink_rpriv->uplink_priv; 2235 2236 if (tun_id) 2237 mapping_remove(uplink_priv->tunnel_mapping, tun_id); 2238 if (enc_opts_id) 2239 mapping_remove(uplink_priv->tunnel_enc_opts_mapping, 2240 enc_opts_id); 2241} 2242 2243void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, 2244 struct flow_match_basic *match, bool outer, 2245 void *headers_c, void *headers_v) 2246{ 2247 bool ip_version_cap; 2248 2249 ip_version_cap = outer ? 2250 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2251 ft_field_support.outer_ip_version) : 2252 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2253 ft_field_support.inner_ip_version); 2254 2255 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) && 2256 (match->key->n_proto == htons(ETH_P_IP) || 2257 match->key->n_proto == htons(ETH_P_IPV6))) { 2258 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version); 2259 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 2260 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6); 2261 } else { 2262 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, 2263 ntohs(match->mask->n_proto)); 2264 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 2265 ntohs(match->key->n_proto)); 2266 } 2267} 2268 2269u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) 2270{ 2271 void *headers_v; 2272 u16 ethertype; 2273 u8 ip_version; 2274 2275 if (outer) 2276 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 2277 else 2278 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); 2279 2280 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version); 2281 /* Return ip_version converted from ethertype anyway */ 2282 if (!ip_version) { 2283 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 2284 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP) 2285 ip_version = 4; 2286 else if (ethertype == ETH_P_IPV6) 2287 ip_version = 6; 2288 } 2289 return ip_version; 2290} 2291 2292/* Tunnel device follows RFC 6040, see include/net/inet_ecn.h. 2293 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows: 2294 * +---------+----------------------------------------+ 2295 * |Arriving | Arriving Outer Header | 2296 * | Inner +---------+---------+---------+----------+ 2297 * | Header | Not-ECT | ECT(0) | ECT(1) | CE | 2298 * +---------+---------+---------+---------+----------+ 2299 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> | 2300 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* | 2301 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* | 2302 * | CE | CE | CE | CE | CE | 2303 * +---------+---------+---------+---------+----------+ 2304 * 2305 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches 2306 * the inner ip_ecn value before hardware decap action. 2307 * 2308 * Cells marked are changed from original inner packet ip_ecn value during decap, and 2309 * so matching those values on inner ip_ecn before decap will fail. 2310 * 2311 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn, 2312 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE, 2313 * and such we can drop the inner ip_ecn=CE match. 2314 */ 2315 2316static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv, 2317 struct flow_cls_offload *f, 2318 bool *match_inner_ecn) 2319{ 2320 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0; 2321 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2322 struct netlink_ext_ack *extack = f->common.extack; 2323 struct flow_match_ip match; 2324 2325 *match_inner_ecn = true; 2326 2327 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 2328 flow_rule_match_enc_ip(rule, &match); 2329 outer_ecn_key = match.key->tos & INET_ECN_MASK; 2330 outer_ecn_mask = match.mask->tos & INET_ECN_MASK; 2331 } 2332 2333 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 2334 flow_rule_match_ip(rule, &match); 2335 inner_ecn_key = match.key->tos & INET_ECN_MASK; 2336 inner_ecn_mask = match.mask->tos & INET_ECN_MASK; 2337 } 2338 2339 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) { 2340 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported"); 2341 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported"); 2342 return -EOPNOTSUPP; 2343 } 2344 2345 if (!outer_ecn_mask) { 2346 if (!inner_ecn_mask) 2347 return 0; 2348 2349 NL_SET_ERR_MSG_MOD(extack, 2350 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); 2351 netdev_warn(priv->netdev, 2352 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); 2353 return -EOPNOTSUPP; 2354 } 2355 2356 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) { 2357 NL_SET_ERR_MSG_MOD(extack, 2358 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); 2359 netdev_warn(priv->netdev, 2360 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); 2361 return -EOPNOTSUPP; 2362 } 2363 2364 if (!inner_ecn_mask) 2365 return 0; 2366 2367 /* Both inner and outer have full mask on ecn */ 2368 2369 if (outer_ecn_key == INET_ECN_ECT_1) { 2370 /* inner ecn might change by DECAP action */ 2371 2372 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported"); 2373 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported"); 2374 return -EOPNOTSUPP; 2375 } 2376 2377 if (outer_ecn_key != INET_ECN_CE) 2378 return 0; 2379 2380 if (inner_ecn_key != INET_ECN_CE) { 2381 /* Can't happen in software, as packet ecn will be changed to CE after decap */ 2382 NL_SET_ERR_MSG_MOD(extack, 2383 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); 2384 netdev_warn(priv->netdev, 2385 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); 2386 return -EOPNOTSUPP; 2387 } 2388 2389 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase, 2390 * drop match on inner ecn 2391 */ 2392 *match_inner_ecn = false; 2393 2394 return 0; 2395} 2396 2397static int parse_tunnel_attr(struct mlx5e_priv *priv, 2398 struct mlx5e_tc_flow *flow, 2399 struct mlx5_flow_spec *spec, 2400 struct flow_cls_offload *f, 2401 struct net_device *filter_dev, 2402 u8 *match_level, 2403 bool *match_inner) 2404{ 2405 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); 2406 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 2407 struct netlink_ext_ack *extack = f->common.extack; 2408 bool needs_mapping, sets_mapping; 2409 int err; 2410 2411 if (!mlx5e_is_eswitch_flow(flow)) { 2412 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported"); 2413 return -EOPNOTSUPP; 2414 } 2415 2416 needs_mapping = !!flow->attr->chain; 2417 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f); 2418 *match_inner = !needs_mapping; 2419 2420 if ((needs_mapping || sets_mapping) && 2421 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 2422 NL_SET_ERR_MSG_MOD(extack, 2423 "Chains on tunnel devices isn't supported without register loopback support"); 2424 netdev_warn(priv->netdev, 2425 "Chains on tunnel devices isn't supported without register loopback support"); 2426 return -EOPNOTSUPP; 2427 } 2428 2429 if (!flow->attr->chain) { 2430 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 2431 match_level); 2432 if (err) { 2433 NL_SET_ERR_MSG_MOD(extack, 2434 "Failed to parse tunnel attributes"); 2435 netdev_warn(priv->netdev, 2436 "Failed to parse tunnel attributes"); 2437 return err; 2438 } 2439 2440 /* With mpls over udp we decapsulate using packet reformat 2441 * object 2442 */ 2443 if (!netif_is_bareudp(filter_dev)) 2444 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 2445 err = mlx5e_tc_set_attr_rx_tun(flow, spec); 2446 if (err) 2447 return err; 2448 } else if (tunnel) { 2449 struct mlx5_flow_spec *tmp_spec; 2450 2451 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL); 2452 if (!tmp_spec) { 2453 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec"); 2454 netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec"); 2455 return -ENOMEM; 2456 } 2457 memcpy(tmp_spec, spec, sizeof(*tmp_spec)); 2458 2459 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level); 2460 if (err) { 2461 kvfree(tmp_spec); 2462 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes"); 2463 netdev_warn(priv->netdev, "Failed to parse tunnel attributes"); 2464 return err; 2465 } 2466 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec); 2467 kvfree(tmp_spec); 2468 if (err) 2469 return err; 2470 } 2471 2472 if (!needs_mapping && !sets_mapping) 2473 return 0; 2474 2475 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev); 2476} 2477 2478static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec) 2479{ 2480 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2481 inner_headers); 2482} 2483 2484static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec) 2485{ 2486 return MLX5_ADDR_OF(fte_match_param, spec->match_value, 2487 inner_headers); 2488} 2489 2490static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec) 2491{ 2492 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2493 outer_headers); 2494} 2495 2496static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec) 2497{ 2498 return MLX5_ADDR_OF(fte_match_param, spec->match_value, 2499 outer_headers); 2500} 2501 2502void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec) 2503{ 2504 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 2505 get_match_inner_headers_value(spec) : 2506 get_match_outer_headers_value(spec); 2507} 2508 2509void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec) 2510{ 2511 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? 2512 get_match_inner_headers_criteria(spec) : 2513 get_match_outer_headers_criteria(spec); 2514} 2515 2516static int mlx5e_flower_parse_meta(struct net_device *filter_dev, 2517 struct flow_cls_offload *f) 2518{ 2519 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2520 struct netlink_ext_ack *extack = f->common.extack; 2521 struct net_device *ingress_dev; 2522 struct flow_match_meta match; 2523 2524 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 2525 return 0; 2526 2527 flow_rule_match_meta(rule, &match); 2528 2529 if (match.mask->l2_miss) { 2530 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\""); 2531 return -EOPNOTSUPP; 2532 } 2533 2534 if (!match.mask->ingress_ifindex) 2535 return 0; 2536 2537 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 2538 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 2539 return -EOPNOTSUPP; 2540 } 2541 2542 ingress_dev = __dev_get_by_index(dev_net(filter_dev), 2543 match.key->ingress_ifindex); 2544 if (!ingress_dev) { 2545 NL_SET_ERR_MSG_MOD(extack, 2546 "Can't find the ingress port to match on"); 2547 return -ENOENT; 2548 } 2549 2550 if (ingress_dev != filter_dev) { 2551 NL_SET_ERR_MSG_MOD(extack, 2552 "Can't match on the ingress filter port"); 2553 return -EOPNOTSUPP; 2554 } 2555 2556 return 0; 2557} 2558 2559static bool skip_key_basic(struct net_device *filter_dev, 2560 struct flow_cls_offload *f) 2561{ 2562 /* When doing mpls over udp decap, the user needs to provide 2563 * MPLS_UC as the protocol in order to be able to match on mpls 2564 * label fields. However, the actual ethertype is IP so we want to 2565 * avoid matching on this, otherwise we'll fail the match. 2566 */ 2567 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0) 2568 return true; 2569 2570 return false; 2571} 2572 2573static int __parse_cls_flower(struct mlx5e_priv *priv, 2574 struct mlx5e_tc_flow *flow, 2575 struct mlx5_flow_spec *spec, 2576 struct flow_cls_offload *f, 2577 struct net_device *filter_dev, 2578 u8 *inner_match_level, u8 *outer_match_level) 2579{ 2580 struct netlink_ext_ack *extack = f->common.extack; 2581 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2582 outer_headers); 2583 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2584 outer_headers); 2585 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2586 misc_parameters); 2587 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2588 misc_parameters); 2589 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 2590 misc_parameters_3); 2591 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2592 misc_parameters_3); 2593 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2594 struct flow_dissector *dissector = rule->match.dissector; 2595 enum fs_flow_table_type fs_type; 2596 bool match_inner_ecn = true; 2597 u16 addr_type = 0; 2598 u8 ip_proto = 0; 2599 u8 *match_level; 2600 int err; 2601 2602 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX; 2603 match_level = outer_match_level; 2604 2605 if (dissector->used_keys & 2606 ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | 2607 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 2608 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 2609 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2610 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 2611 BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | 2612 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2613 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2614 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 2615 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | 2616 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 2617 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 2618 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | 2619 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | 2620 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | 2621 BIT_ULL(FLOW_DISSECTOR_KEY_IP) | 2622 BIT_ULL(FLOW_DISSECTOR_KEY_CT) | 2623 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | 2624 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | 2625 BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | 2626 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS))) { 2627 NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); 2628 netdev_dbg(priv->netdev, "Unsupported key used: 0x%llx\n", 2629 dissector->used_keys); 2630 return -EOPNOTSUPP; 2631 } 2632 2633 if (mlx5e_get_tc_tun(filter_dev)) { 2634 bool match_inner = false; 2635 2636 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev, 2637 outer_match_level, &match_inner); 2638 if (err) 2639 return err; 2640 2641 if (match_inner) { 2642 /* header pointers should point to the inner headers 2643 * if the packet was decapsulated already. 2644 * outer headers are set by parse_tunnel_attr. 2645 */ 2646 match_level = inner_match_level; 2647 headers_c = get_match_inner_headers_criteria(spec); 2648 headers_v = get_match_inner_headers_value(spec); 2649 } 2650 2651 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn); 2652 if (err) 2653 return err; 2654 } 2655 2656 err = mlx5e_flower_parse_meta(filter_dev, f); 2657 if (err) 2658 return err; 2659 2660 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) && 2661 !skip_key_basic(filter_dev, f)) { 2662 struct flow_match_basic match; 2663 2664 flow_rule_match_basic(rule, &match); 2665 mlx5e_tc_set_ethertype(priv->mdev, &match, 2666 match_level == outer_match_level, 2667 headers_c, headers_v); 2668 2669 if (match.mask->n_proto) 2670 *match_level = MLX5_MATCH_L2; 2671 } 2672 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || 2673 is_vlan_dev(filter_dev)) { 2674 struct flow_dissector_key_vlan filter_dev_mask; 2675 struct flow_dissector_key_vlan filter_dev_key; 2676 struct flow_match_vlan match; 2677 2678 if (is_vlan_dev(filter_dev)) { 2679 match.key = &filter_dev_key; 2680 match.key->vlan_id = vlan_dev_vlan_id(filter_dev); 2681 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev); 2682 match.key->vlan_priority = 0; 2683 match.mask = &filter_dev_mask; 2684 memset(match.mask, 0xff, sizeof(*match.mask)); 2685 match.mask->vlan_priority = 0; 2686 } else { 2687 flow_rule_match_vlan(rule, &match); 2688 } 2689 if (match.mask->vlan_id || 2690 match.mask->vlan_priority || 2691 match.mask->vlan_tpid) { 2692 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 2693 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2694 svlan_tag, 1); 2695 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2696 svlan_tag, 1); 2697 } else { 2698 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2699 cvlan_tag, 1); 2700 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2701 cvlan_tag, 1); 2702 } 2703 2704 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, 2705 match.mask->vlan_id); 2706 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, 2707 match.key->vlan_id); 2708 2709 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, 2710 match.mask->vlan_priority); 2711 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, 2712 match.key->vlan_priority); 2713 2714 *match_level = MLX5_MATCH_L2; 2715 2716 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) && 2717 match.mask->vlan_eth_type && 2718 MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, 2719 ft_field_support.outer_second_vid, 2720 fs_type)) { 2721 MLX5_SET(fte_match_set_misc, misc_c, 2722 outer_second_cvlan_tag, 1); 2723 spec->match_criteria_enable |= 2724 MLX5_MATCH_MISC_PARAMETERS; 2725 } 2726 } 2727 } else if (*match_level != MLX5_MATCH_NONE) { 2728 /* cvlan_tag enabled in match criteria and 2729 * disabled in match value means both S & C tags 2730 * don't exist (untagged of both) 2731 */ 2732 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 2733 *match_level = MLX5_MATCH_L2; 2734 } 2735 2736 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 2737 struct flow_match_vlan match; 2738 2739 flow_rule_match_cvlan(rule, &match); 2740 if (match.mask->vlan_id || 2741 match.mask->vlan_priority || 2742 match.mask->vlan_tpid) { 2743 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid, 2744 fs_type)) { 2745 NL_SET_ERR_MSG_MOD(extack, 2746 "Matching on CVLAN is not supported"); 2747 return -EOPNOTSUPP; 2748 } 2749 2750 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { 2751 MLX5_SET(fte_match_set_misc, misc_c, 2752 outer_second_svlan_tag, 1); 2753 MLX5_SET(fte_match_set_misc, misc_v, 2754 outer_second_svlan_tag, 1); 2755 } else { 2756 MLX5_SET(fte_match_set_misc, misc_c, 2757 outer_second_cvlan_tag, 1); 2758 MLX5_SET(fte_match_set_misc, misc_v, 2759 outer_second_cvlan_tag, 1); 2760 } 2761 2762 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, 2763 match.mask->vlan_id); 2764 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, 2765 match.key->vlan_id); 2766 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, 2767 match.mask->vlan_priority); 2768 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, 2769 match.key->vlan_priority); 2770 2771 *match_level = MLX5_MATCH_L2; 2772 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 2773 } 2774 } 2775 2776 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2777 struct flow_match_eth_addrs match; 2778 2779 flow_rule_match_eth_addrs(rule, &match); 2780 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2781 dmac_47_16), 2782 match.mask->dst); 2783 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2784 dmac_47_16), 2785 match.key->dst); 2786 2787 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2788 smac_47_16), 2789 match.mask->src); 2790 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2791 smac_47_16), 2792 match.key->src); 2793 2794 if (!is_zero_ether_addr(match.mask->src) || 2795 !is_zero_ether_addr(match.mask->dst)) 2796 *match_level = MLX5_MATCH_L2; 2797 } 2798 2799 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2800 struct flow_match_control match; 2801 2802 flow_rule_match_control(rule, &match); 2803 addr_type = match.key->addr_type; 2804 2805 /* the HW doesn't support frag first/later */ 2806 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 2807 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported"); 2808 return -EOPNOTSUPP; 2809 } 2810 2811 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 2812 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 2813 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 2814 match.key->flags & FLOW_DIS_IS_FRAGMENT); 2815 2816 /* the HW doesn't need L3 inline to match on frag=no */ 2817 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT)) 2818 *match_level = MLX5_MATCH_L2; 2819 /* *** L2 attributes parsing up to here *** */ 2820 else 2821 *match_level = MLX5_MATCH_L3; 2822 } 2823 } 2824 2825 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2826 struct flow_match_basic match; 2827 2828 flow_rule_match_basic(rule, &match); 2829 ip_proto = match.key->ip_proto; 2830 2831 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2832 match.mask->ip_proto); 2833 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2834 match.key->ip_proto); 2835 2836 if (match.mask->ip_proto) 2837 *match_level = MLX5_MATCH_L3; 2838 } 2839 2840 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2841 struct flow_match_ipv4_addrs match; 2842 2843 flow_rule_match_ipv4_addrs(rule, &match); 2844 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2845 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2846 &match.mask->src, sizeof(match.mask->src)); 2847 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2848 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2849 &match.key->src, sizeof(match.key->src)); 2850 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2851 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2852 &match.mask->dst, sizeof(match.mask->dst)); 2853 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2854 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2855 &match.key->dst, sizeof(match.key->dst)); 2856 2857 if (match.mask->src || match.mask->dst) 2858 *match_level = MLX5_MATCH_L3; 2859 } 2860 2861 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2862 struct flow_match_ipv6_addrs match; 2863 2864 flow_rule_match_ipv6_addrs(rule, &match); 2865 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2866 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2867 &match.mask->src, sizeof(match.mask->src)); 2868 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2869 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2870 &match.key->src, sizeof(match.key->src)); 2871 2872 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2873 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2874 &match.mask->dst, sizeof(match.mask->dst)); 2875 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2876 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2877 &match.key->dst, sizeof(match.key->dst)); 2878 2879 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY || 2880 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY) 2881 *match_level = MLX5_MATCH_L3; 2882 } 2883 2884 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 2885 struct flow_match_ip match; 2886 2887 flow_rule_match_ip(rule, &match); 2888 if (match_inner_ecn) { 2889 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, 2890 match.mask->tos & 0x3); 2891 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, 2892 match.key->tos & 0x3); 2893 } 2894 2895 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, 2896 match.mask->tos >> 2); 2897 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, 2898 match.key->tos >> 2); 2899 2900 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, 2901 match.mask->ttl); 2902 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, 2903 match.key->ttl); 2904 2905 if (match.mask->ttl && 2906 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, 2907 ft_field_support.outer_ipv4_ttl)) { 2908 NL_SET_ERR_MSG_MOD(extack, 2909 "Matching on TTL is not supported"); 2910 return -EOPNOTSUPP; 2911 } 2912 2913 if (match.mask->tos || match.mask->ttl) 2914 *match_level = MLX5_MATCH_L3; 2915 } 2916 2917 /* *** L3 attributes parsing up to here *** */ 2918 2919 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2920 struct flow_match_ports match; 2921 2922 flow_rule_match_ports(rule, &match); 2923 switch (ip_proto) { 2924 case IPPROTO_TCP: 2925 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2926 tcp_sport, ntohs(match.mask->src)); 2927 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2928 tcp_sport, ntohs(match.key->src)); 2929 2930 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2931 tcp_dport, ntohs(match.mask->dst)); 2932 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2933 tcp_dport, ntohs(match.key->dst)); 2934 break; 2935 2936 case IPPROTO_UDP: 2937 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2938 udp_sport, ntohs(match.mask->src)); 2939 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2940 udp_sport, ntohs(match.key->src)); 2941 2942 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2943 udp_dport, ntohs(match.mask->dst)); 2944 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2945 udp_dport, ntohs(match.key->dst)); 2946 break; 2947 default: 2948 NL_SET_ERR_MSG_MOD(extack, 2949 "Only UDP and TCP transports are supported for L4 matching"); 2950 netdev_err(priv->netdev, 2951 "Only UDP and TCP transport are supported\n"); 2952 return -EINVAL; 2953 } 2954 2955 if (match.mask->src || match.mask->dst) 2956 *match_level = MLX5_MATCH_L4; 2957 } 2958 2959 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 2960 struct flow_match_tcp match; 2961 2962 flow_rule_match_tcp(rule, &match); 2963 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, 2964 ntohs(match.mask->flags)); 2965 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, 2966 ntohs(match.key->flags)); 2967 2968 if (match.mask->flags) 2969 *match_level = MLX5_MATCH_L4; 2970 } 2971 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 2972 struct flow_match_icmp match; 2973 2974 flow_rule_match_icmp(rule, &match); 2975 switch (ip_proto) { 2976 case IPPROTO_ICMP: 2977 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & 2978 MLX5_FLEX_PROTO_ICMP)) { 2979 NL_SET_ERR_MSG_MOD(extack, 2980 "Match on Flex protocols for ICMP is not supported"); 2981 return -EOPNOTSUPP; 2982 } 2983 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type, 2984 match.mask->type); 2985 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type, 2986 match.key->type); 2987 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code, 2988 match.mask->code); 2989 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code, 2990 match.key->code); 2991 break; 2992 case IPPROTO_ICMPV6: 2993 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & 2994 MLX5_FLEX_PROTO_ICMPV6)) { 2995 NL_SET_ERR_MSG_MOD(extack, 2996 "Match on Flex protocols for ICMPV6 is not supported"); 2997 return -EOPNOTSUPP; 2998 } 2999 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type, 3000 match.mask->type); 3001 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type, 3002 match.key->type); 3003 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code, 3004 match.mask->code); 3005 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code, 3006 match.key->code); 3007 break; 3008 default: 3009 NL_SET_ERR_MSG_MOD(extack, 3010 "Code and type matching only with ICMP and ICMPv6"); 3011 netdev_err(priv->netdev, 3012 "Code and type matching only with ICMP and ICMPv6\n"); 3013 return -EINVAL; 3014 } 3015 if (match.mask->code || match.mask->type) { 3016 *match_level = MLX5_MATCH_L4; 3017 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; 3018 } 3019 } 3020 /* Currently supported only for MPLS over UDP */ 3021 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && 3022 !netif_is_bareudp(filter_dev)) { 3023 NL_SET_ERR_MSG_MOD(extack, 3024 "Matching on MPLS is supported only for MPLS over UDP"); 3025 netdev_err(priv->netdev, 3026 "Matching on MPLS is supported only for MPLS over UDP\n"); 3027 return -EOPNOTSUPP; 3028 } 3029 3030 return 0; 3031} 3032 3033static int parse_cls_flower(struct mlx5e_priv *priv, 3034 struct mlx5e_tc_flow *flow, 3035 struct mlx5_flow_spec *spec, 3036 struct flow_cls_offload *f, 3037 struct net_device *filter_dev) 3038{ 3039 u8 inner_match_level, outer_match_level, non_tunnel_match_level; 3040 struct netlink_ext_ack *extack = f->common.extack; 3041 struct mlx5_core_dev *dev = priv->mdev; 3042 struct mlx5_eswitch *esw = dev->priv.eswitch; 3043 struct mlx5e_rep_priv *rpriv = priv->ppriv; 3044 struct mlx5_eswitch_rep *rep; 3045 bool is_eswitch_flow; 3046 int err; 3047 3048 inner_match_level = MLX5_MATCH_NONE; 3049 outer_match_level = MLX5_MATCH_NONE; 3050 3051 err = __parse_cls_flower(priv, flow, spec, f, filter_dev, 3052 &inner_match_level, &outer_match_level); 3053 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? 3054 outer_match_level : inner_match_level; 3055 3056 is_eswitch_flow = mlx5e_is_eswitch_flow(flow); 3057 if (!err && is_eswitch_flow) { 3058 rep = rpriv->rep; 3059 if (rep->vport != MLX5_VPORT_UPLINK && 3060 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 3061 esw->offloads.inline_mode < non_tunnel_match_level)) { 3062 NL_SET_ERR_MSG_MOD(extack, 3063 "Flow is not offloaded due to min inline setting"); 3064 netdev_warn(priv->netdev, 3065 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 3066 non_tunnel_match_level, esw->offloads.inline_mode); 3067 return -EOPNOTSUPP; 3068 } 3069 } 3070 3071 flow->attr->inner_match_level = inner_match_level; 3072 flow->attr->outer_match_level = outer_match_level; 3073 3074 3075 return err; 3076} 3077 3078struct mlx5_fields { 3079 u8 field; 3080 u8 field_bsize; 3081 u32 field_mask; 3082 u32 offset; 3083 u32 match_offset; 3084}; 3085 3086#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \ 3087 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \ 3088 offsetof(struct pedit_headers, field) + (off), \ 3089 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} 3090 3091/* masked values are the same and there are no rewrites that do not have a 3092 * match. 3093 */ 3094#define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \ 3095 type matchmaskx = *(type *)(matchmaskp); \ 3096 type matchvalx = *(type *)(matchvalp); \ 3097 type maskx = *(type *)(maskp); \ 3098 type valx = *(type *)(valp); \ 3099 \ 3100 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \ 3101 matchmaskx)); \ 3102}) 3103 3104static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, 3105 void *matchmaskp, u8 bsize) 3106{ 3107 bool same = false; 3108 3109 switch (bsize) { 3110 case 8: 3111 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp); 3112 break; 3113 case 16: 3114 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp); 3115 break; 3116 case 32: 3117 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp); 3118 break; 3119 } 3120 3121 return same; 3122} 3123 3124static struct mlx5_fields fields[] = { 3125 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16), 3126 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0), 3127 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16), 3128 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0), 3129 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype), 3130 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid), 3131 3132 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp), 3133 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit), 3134 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), 3135 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 3136 3137 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0, 3138 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]), 3139 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0, 3140 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]), 3141 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0, 3142 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]), 3143 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0, 3144 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]), 3145 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0, 3146 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]), 3147 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0, 3148 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]), 3149 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0, 3150 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]), 3151 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0, 3152 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), 3153 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit), 3154 OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp), 3155 3156 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport), 3157 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport), 3158 /* in linux iphdr tcp_flags is 8 bits long */ 3159 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags), 3160 3161 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport), 3162 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), 3163}; 3164 3165static u32 mask_field_get(void *mask, struct mlx5_fields *f) 3166{ 3167 switch (f->field_bsize) { 3168 case 32: 3169 return be32_to_cpu(*(__be32 *)mask) & f->field_mask; 3170 case 16: 3171 return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask; 3172 default: 3173 return *(u8 *)mask & (u8)f->field_mask; 3174 } 3175} 3176 3177static void mask_field_clear(void *mask, struct mlx5_fields *f) 3178{ 3179 switch (f->field_bsize) { 3180 case 32: 3181 *(__be32 *)mask &= ~cpu_to_be32(f->field_mask); 3182 break; 3183 case 16: 3184 *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask); 3185 break; 3186 default: 3187 *(u8 *)mask &= ~(u8)f->field_mask; 3188 break; 3189 } 3190} 3191 3192static int offload_pedit_fields(struct mlx5e_priv *priv, 3193 int namespace, 3194 struct mlx5e_tc_flow_parse_attr *parse_attr, 3195 u32 *action_flags, 3196 struct netlink_ext_ack *extack) 3197{ 3198 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; 3199 struct pedit_headers_action *hdrs = parse_attr->hdrs; 3200 void *headers_c, *headers_v, *action, *vals_p; 3201 struct mlx5e_tc_mod_hdr_acts *mod_acts; 3202 void *s_masks_p, *a_masks_p; 3203 int i, first, last, next_z; 3204 struct mlx5_fields *f; 3205 unsigned long mask; 3206 u32 s_mask, a_mask; 3207 u8 cmd; 3208 3209 mod_acts = &parse_attr->mod_hdr_acts; 3210 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec); 3211 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec); 3212 3213 set_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].masks; 3214 add_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].masks; 3215 set_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].vals; 3216 add_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].vals; 3217 3218 for (i = 0; i < ARRAY_SIZE(fields); i++) { 3219 bool skip; 3220 3221 f = &fields[i]; 3222 s_masks_p = (void *)set_masks + f->offset; 3223 a_masks_p = (void *)add_masks + f->offset; 3224 3225 s_mask = mask_field_get(s_masks_p, f); 3226 a_mask = mask_field_get(a_masks_p, f); 3227 3228 if (!s_mask && !a_mask) /* nothing to offload here */ 3229 continue; 3230 3231 if (s_mask && a_mask) { 3232 NL_SET_ERR_MSG_MOD(extack, 3233 "can't set and add to the same HW field"); 3234 netdev_warn(priv->netdev, 3235 "mlx5: can't set and add to the same HW field (%x)\n", 3236 f->field); 3237 return -EOPNOTSUPP; 3238 } 3239 3240 skip = false; 3241 if (s_mask) { 3242 void *match_mask = headers_c + f->match_offset; 3243 void *match_val = headers_v + f->match_offset; 3244 3245 cmd = MLX5_ACTION_TYPE_SET; 3246 mask = s_mask; 3247 vals_p = (void *)set_vals + f->offset; 3248 /* don't rewrite if we have a match on the same value */ 3249 if (cmp_val_mask(vals_p, s_masks_p, match_val, 3250 match_mask, f->field_bsize)) 3251 skip = true; 3252 /* clear to denote we consumed this field */ 3253 mask_field_clear(s_masks_p, f); 3254 } else { 3255 cmd = MLX5_ACTION_TYPE_ADD; 3256 mask = a_mask; 3257 vals_p = (void *)add_vals + f->offset; 3258 /* add 0 is no change */ 3259 if (!mask_field_get(vals_p, f)) 3260 skip = true; 3261 /* clear to denote we consumed this field */ 3262 mask_field_clear(a_masks_p, f); 3263 } 3264 if (skip) 3265 continue; 3266 3267 first = find_first_bit(&mask, f->field_bsize); 3268 next_z = find_next_zero_bit(&mask, f->field_bsize, first); 3269 last = find_last_bit(&mask, f->field_bsize); 3270 if (first < next_z && next_z < last) { 3271 NL_SET_ERR_MSG_MOD(extack, 3272 "rewrite of few sub-fields isn't supported"); 3273 netdev_warn(priv->netdev, 3274 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", 3275 mask); 3276 return -EOPNOTSUPP; 3277 } 3278 3279 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts); 3280 if (IS_ERR(action)) { 3281 NL_SET_ERR_MSG_MOD(extack, 3282 "too many pedit actions, can't offload"); 3283 mlx5_core_warn(priv->mdev, 3284 "mlx5: parsed %d pedit actions, can't do more\n", 3285 mod_acts->num_actions); 3286 return PTR_ERR(action); 3287 } 3288 3289 MLX5_SET(set_action_in, action, action_type, cmd); 3290 MLX5_SET(set_action_in, action, field, f->field); 3291 3292 if (cmd == MLX5_ACTION_TYPE_SET) { 3293 unsigned long field_mask = f->field_mask; 3294 int start; 3295 3296 /* if field is bit sized it can start not from first bit */ 3297 start = find_first_bit(&field_mask, f->field_bsize); 3298 3299 MLX5_SET(set_action_in, action, offset, first - start); 3300 /* length is num of bits to be written, zero means length of 32 */ 3301 MLX5_SET(set_action_in, action, length, (last - first + 1)); 3302 } 3303 3304 if (f->field_bsize == 32) 3305 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first); 3306 else if (f->field_bsize == 16) 3307 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first); 3308 else if (f->field_bsize == 8) 3309 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); 3310 3311 ++mod_acts->num_actions; 3312 } 3313 3314 return 0; 3315} 3316 3317static const struct pedit_headers zero_masks = {}; 3318 3319static int verify_offload_pedit_fields(struct mlx5e_priv *priv, 3320 struct mlx5e_tc_flow_parse_attr *parse_attr, 3321 struct netlink_ext_ack *extack) 3322{ 3323 struct pedit_headers *cmd_masks; 3324 u8 cmd; 3325 3326 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { 3327 cmd_masks = &parse_attr->hdrs[cmd].masks; 3328 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { 3329 NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field"); 3330 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); 3331 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 3332 16, 1, cmd_masks, sizeof(zero_masks), true); 3333 return -EOPNOTSUPP; 3334 } 3335 } 3336 3337 return 0; 3338} 3339 3340static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, 3341 struct mlx5e_tc_flow_parse_attr *parse_attr, 3342 u32 *action_flags, 3343 struct netlink_ext_ack *extack) 3344{ 3345 int err; 3346 3347 err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack); 3348 if (err) 3349 goto out_dealloc_parsed_actions; 3350 3351 err = verify_offload_pedit_fields(priv, parse_attr, extack); 3352 if (err) 3353 goto out_dealloc_parsed_actions; 3354 3355 return 0; 3356 3357out_dealloc_parsed_actions: 3358 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 3359 return err; 3360} 3361 3362struct ip_ttl_word { 3363 __u8 ttl; 3364 __u8 protocol; 3365 __sum16 check; 3366}; 3367 3368struct ipv6_hoplimit_word { 3369 __be16 payload_len; 3370 __u8 nexthdr; 3371 __u8 hop_limit; 3372}; 3373 3374static bool 3375is_flow_action_modify_ip_header(struct flow_action *flow_action) 3376{ 3377 const struct flow_action_entry *act; 3378 u32 mask, offset; 3379 u8 htype; 3380 int i; 3381 3382 /* For IPv4 & IPv6 header check 4 byte word, 3383 * to determine that modified fields 3384 * are NOT ttl & hop_limit only. 3385 */ 3386 flow_action_for_each(i, act, flow_action) { 3387 if (act->id != FLOW_ACTION_MANGLE && 3388 act->id != FLOW_ACTION_ADD) 3389 continue; 3390 3391 htype = act->mangle.htype; 3392 offset = act->mangle.offset; 3393 mask = ~act->mangle.mask; 3394 3395 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { 3396 struct ip_ttl_word *ttl_word = 3397 (struct ip_ttl_word *)&mask; 3398 3399 if (offset != offsetof(struct iphdr, ttl) || 3400 ttl_word->protocol || 3401 ttl_word->check) 3402 return true; 3403 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { 3404 struct ipv6_hoplimit_word *hoplimit_word = 3405 (struct ipv6_hoplimit_word *)&mask; 3406 3407 if (offset != offsetof(struct ipv6hdr, payload_len) || 3408 hoplimit_word->payload_len || 3409 hoplimit_word->nexthdr) 3410 return true; 3411 } 3412 } 3413 3414 return false; 3415} 3416 3417static bool modify_header_match_supported(struct mlx5e_priv *priv, 3418 struct mlx5_flow_spec *spec, 3419 struct flow_action *flow_action, 3420 u32 actions, 3421 struct netlink_ext_ack *extack) 3422{ 3423 bool modify_ip_header; 3424 void *headers_c; 3425 void *headers_v; 3426 u16 ethertype; 3427 u8 ip_proto; 3428 3429 headers_c = mlx5e_get_match_headers_criteria(actions, spec); 3430 headers_v = mlx5e_get_match_headers_value(actions, spec); 3431 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 3432 3433 /* for non-IP we only re-write MACs, so we're okay */ 3434 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 && 3435 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) 3436 goto out_ok; 3437 3438 modify_ip_header = is_flow_action_modify_ip_header(flow_action); 3439 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 3440 if (modify_ip_header && ip_proto != IPPROTO_TCP && 3441 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { 3442 NL_SET_ERR_MSG_MOD(extack, 3443 "can't offload re-write of non TCP/UDP"); 3444 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n", 3445 ip_proto); 3446 return false; 3447 } 3448 3449out_ok: 3450 return true; 3451} 3452 3453static bool 3454actions_match_supported_fdb(struct mlx5e_priv *priv, 3455 struct mlx5e_tc_flow *flow, 3456 struct netlink_ext_ack *extack) 3457{ 3458 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; 3459 3460 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { 3461 NL_SET_ERR_MSG_MOD(extack, 3462 "current firmware doesn't support split rule for port mirroring"); 3463 netdev_warn_once(priv->netdev, 3464 "current firmware doesn't support split rule for port mirroring\n"); 3465 return false; 3466 } 3467 3468 return true; 3469} 3470 3471static bool 3472actions_match_supported(struct mlx5e_priv *priv, 3473 struct flow_action *flow_action, 3474 u32 actions, 3475 struct mlx5e_tc_flow_parse_attr *parse_attr, 3476 struct mlx5e_tc_flow *flow, 3477 struct netlink_ext_ack *extack) 3478{ 3479 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 3480 !modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions, 3481 extack)) 3482 return false; 3483 3484 if (mlx5e_is_eswitch_flow(flow) && 3485 !actions_match_supported_fdb(priv, flow, extack)) 3486 return false; 3487 3488 return true; 3489} 3490 3491static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3492{ 3493 return priv->mdev == peer_priv->mdev; 3494} 3495 3496bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3497{ 3498 struct mlx5_core_dev *fmdev, *pmdev; 3499 u64 fsystem_guid, psystem_guid; 3500 3501 fmdev = priv->mdev; 3502 pmdev = peer_priv->mdev; 3503 3504 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); 3505 psystem_guid = mlx5_query_nic_system_image_guid(pmdev); 3506 3507 return (fsystem_guid == psystem_guid); 3508} 3509 3510static int 3511actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, 3512 struct mlx5e_tc_flow *flow, 3513 struct mlx5_flow_attr *attr, 3514 struct netlink_ext_ack *extack) 3515{ 3516 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; 3517 struct pedit_headers_action *hdrs = parse_attr->hdrs; 3518 enum mlx5_flow_namespace_type ns_type; 3519 int err; 3520 3521 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits && 3522 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) 3523 return 0; 3524 3525 ns_type = mlx5e_get_flow_namespace(flow); 3526 3527 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack); 3528 if (err) 3529 return err; 3530 3531 if (parse_attr->mod_hdr_acts.num_actions > 0) 3532 return 0; 3533 3534 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */ 3535 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 3536 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 3537 3538 if (ns_type != MLX5_FLOW_NAMESPACE_FDB) 3539 return 0; 3540 3541 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 3542 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) 3543 attr->esw_attr->split_count = 0; 3544 3545 return 0; 3546} 3547 3548static struct mlx5_flow_attr* 3549mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, 3550 enum mlx5_flow_namespace_type ns_type) 3551{ 3552 struct mlx5e_tc_flow_parse_attr *parse_attr; 3553 u32 attr_sz = ns_to_attr_sz(ns_type); 3554 struct mlx5_flow_attr *attr2; 3555 3556 attr2 = mlx5_alloc_flow_attr(ns_type); 3557 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); 3558 if (!attr2 || !parse_attr) { 3559 kvfree(parse_attr); 3560 kfree(attr2); 3561 return NULL; 3562 } 3563 3564 memcpy(attr2, attr, attr_sz); 3565 INIT_LIST_HEAD(&attr2->list); 3566 parse_attr->filter_dev = attr->parse_attr->filter_dev; 3567 attr2->action = 0; 3568 attr2->counter = NULL; 3569 attr2->tc_act_cookies_count = 0; 3570 attr2->flags = 0; 3571 attr2->parse_attr = parse_attr; 3572 attr2->dest_chain = 0; 3573 attr2->dest_ft = NULL; 3574 attr2->act_id_restore_rule = NULL; 3575 memset(&attr2->ct_attr, 0, sizeof(attr2->ct_attr)); 3576 3577 if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { 3578 attr2->esw_attr->out_count = 0; 3579 attr2->esw_attr->split_count = 0; 3580 } 3581 3582 attr2->branch_true = NULL; 3583 attr2->branch_false = NULL; 3584 attr2->jumping_attr = NULL; 3585 return attr2; 3586} 3587 3588struct mlx5_flow_attr * 3589mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow) 3590{ 3591 struct mlx5_esw_flow_attr *esw_attr; 3592 struct mlx5_flow_attr *attr; 3593 int i; 3594 3595 list_for_each_entry(attr, &flow->attrs, list) { 3596 esw_attr = attr->esw_attr; 3597 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 3598 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) 3599 return attr; 3600 } 3601 } 3602 3603 return NULL; 3604} 3605 3606void 3607mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow) 3608{ 3609 struct mlx5e_post_act *post_act = get_post_action(flow->priv); 3610 struct mlx5_flow_attr *attr; 3611 3612 list_for_each_entry(attr, &flow->attrs, list) { 3613 if (list_is_last(&attr->list, &flow->attrs)) 3614 break; 3615 3616 mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle); 3617 } 3618} 3619 3620static void 3621free_flow_post_acts(struct mlx5e_tc_flow *flow) 3622{ 3623 struct mlx5_flow_attr *attr, *tmp; 3624 3625 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) { 3626 if (list_is_last(&attr->list, &flow->attrs)) 3627 break; 3628 3629 mlx5_free_flow_attr_actions(flow, attr); 3630 3631 list_del(&attr->list); 3632 kvfree(attr->parse_attr); 3633 kfree(attr); 3634 } 3635} 3636 3637int 3638mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow) 3639{ 3640 struct mlx5e_post_act *post_act = get_post_action(flow->priv); 3641 struct mlx5_flow_attr *attr; 3642 int err = 0; 3643 3644 list_for_each_entry(attr, &flow->attrs, list) { 3645 if (list_is_last(&attr->list, &flow->attrs)) 3646 break; 3647 3648 err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle); 3649 if (err) 3650 break; 3651 } 3652 3653 return err; 3654} 3655 3656/* TC filter rule HW translation: 3657 * 3658 * +---------------------+ 3659 * + ft prio (tc chain) + 3660 * + original match + 3661 * +---------------------+ 3662 * | 3663 * | if multi table action 3664 * | 3665 * v 3666 * +---------------------+ 3667 * + post act ft |<----. 3668 * + match fte id | | split on multi table action 3669 * + do actions |-----' 3670 * +---------------------+ 3671 * | 3672 * | 3673 * v 3674 * Do rest of the actions after last multi table action. 3675 */ 3676static int 3677alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) 3678{ 3679 struct mlx5e_post_act *post_act = get_post_action(flow->priv); 3680 struct mlx5_flow_attr *attr, *next_attr = NULL; 3681 struct mlx5e_post_act_handle *handle; 3682 int err; 3683 3684 /* This is going in reverse order as needed. 3685 * The first entry is the last attribute. 3686 */ 3687 list_for_each_entry(attr, &flow->attrs, list) { 3688 if (!next_attr) { 3689 /* Set counter action on last post act rule. */ 3690 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3691 } 3692 3693 if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) { 3694 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr); 3695 if (err) 3696 goto out_free; 3697 } 3698 3699 /* Don't add post_act rule for first attr (last in the list). 3700 * It's being handled by the caller. 3701 */ 3702 if (list_is_last(&attr->list, &flow->attrs)) 3703 break; 3704 3705 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack); 3706 if (err) 3707 goto out_free; 3708 3709 err = post_process_attr(flow, attr, extack); 3710 if (err) 3711 goto out_free; 3712 3713 handle = mlx5e_tc_post_act_add(post_act, attr); 3714 if (IS_ERR(handle)) { 3715 err = PTR_ERR(handle); 3716 goto out_free; 3717 } 3718 3719 attr->post_act_handle = handle; 3720 3721 if (attr->jumping_attr) { 3722 err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr); 3723 if (err) 3724 goto out_free; 3725 } 3726 3727 next_attr = attr; 3728 } 3729 3730 if (flow_flag_test(flow, SLOW)) 3731 goto out; 3732 3733 err = mlx5e_tc_offload_flow_post_acts(flow); 3734 if (err) 3735 goto out_free; 3736 3737out: 3738 return 0; 3739 3740out_free: 3741 free_flow_post_acts(flow); 3742 return err; 3743} 3744 3745static int 3746set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) 3747{ 3748 struct mlx5e_post_act *post_act = get_post_action(priv); 3749 3750 if (IS_ERR(post_act)) 3751 return PTR_ERR(post_act); 3752 3753 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3754 attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act); 3755 3756 return 0; 3757} 3758 3759static int 3760alloc_branch_attr(struct mlx5e_tc_flow *flow, 3761 struct mlx5e_tc_act_branch_ctrl *cond, 3762 struct mlx5_flow_attr **cond_attr, 3763 u32 *jump_count, 3764 struct netlink_ext_ack *extack) 3765{ 3766 struct mlx5_flow_attr *attr; 3767 int err = 0; 3768 3769 *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, 3770 mlx5e_get_flow_namespace(flow)); 3771 if (!(*cond_attr)) 3772 return -ENOMEM; 3773 3774 attr = *cond_attr; 3775 3776 switch (cond->act_id) { 3777 case FLOW_ACTION_DROP: 3778 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 3779 break; 3780 case FLOW_ACTION_ACCEPT: 3781 case FLOW_ACTION_PIPE: 3782 err = set_branch_dest_ft(flow->priv, attr); 3783 if (err) 3784 goto out_err; 3785 break; 3786 case FLOW_ACTION_JUMP: 3787 if (*jump_count) { 3788 NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps"); 3789 err = -EOPNOTSUPP; 3790 goto out_err; 3791 } 3792 *jump_count = cond->extval; 3793 err = set_branch_dest_ft(flow->priv, attr); 3794 if (err) 3795 goto out_err; 3796 break; 3797 default: 3798 err = -EOPNOTSUPP; 3799 goto out_err; 3800 } 3801 3802 return err; 3803out_err: 3804 kfree(*cond_attr); 3805 *cond_attr = NULL; 3806 return err; 3807} 3808 3809static void 3810dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act, 3811 struct mlx5_flow_attr *attr, struct mlx5e_priv *priv, 3812 struct mlx5e_tc_jump_state *jump_state) 3813{ 3814 if (!jump_state->jump_count) 3815 return; 3816 3817 /* Single tc action can instantiate multiple offload actions (e.g. pedit) 3818 * Jump only over a tc action 3819 */ 3820 if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index) 3821 return; 3822 3823 jump_state->last_id = act->id; 3824 jump_state->last_index = act->hw_index; 3825 3826 /* nothing to do for intermediate actions */ 3827 if (--jump_state->jump_count > 1) 3828 return; 3829 3830 if (jump_state->jump_count == 1) { /* last action in the jump action list */ 3831 3832 /* create a new attribute after this action */ 3833 jump_state->jump_target = true; 3834 3835 if (tc_act->is_terminating_action) { /* the branch ends here */ 3836 attr->flags |= MLX5_ATTR_FLAG_TERMINATING; 3837 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3838 } else { /* the branch continues executing the rest of the actions */ 3839 struct mlx5e_post_act *post_act; 3840 3841 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3842 post_act = get_post_action(priv); 3843 attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act); 3844 } 3845 } else if (jump_state->jump_count == 0) { /* first attr after the jump action list */ 3846 /* This is the post action for the jumping attribute (either red or green) 3847 * Use the stored jumping_attr to set the post act id on the jumping attribute 3848 */ 3849 attr->jumping_attr = jump_state->jumping_attr; 3850 } 3851} 3852 3853static int 3854parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act, 3855 struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr, 3856 struct mlx5e_tc_jump_state *jump_state, 3857 struct netlink_ext_ack *extack) 3858{ 3859 struct mlx5e_tc_act_branch_ctrl cond_true, cond_false; 3860 u32 jump_count = jump_state->jump_count; 3861 int err; 3862 3863 if (!tc_act->get_branch_ctrl) 3864 return 0; 3865 3866 tc_act->get_branch_ctrl(act, &cond_true, &cond_false); 3867 3868 err = alloc_branch_attr(flow, &cond_true, 3869 &attr->branch_true, &jump_count, extack); 3870 if (err) 3871 goto out_err; 3872 3873 if (jump_count) 3874 jump_state->jumping_attr = attr->branch_true; 3875 3876 err = alloc_branch_attr(flow, &cond_false, 3877 &attr->branch_false, &jump_count, extack); 3878 if (err) 3879 goto err_branch_false; 3880 3881 if (jump_count && !jump_state->jumping_attr) 3882 jump_state->jumping_attr = attr->branch_false; 3883 3884 jump_state->jump_count = jump_count; 3885 3886 /* branching action requires its own counter */ 3887 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3888 flow_flag_set(flow, USE_ACT_STATS); 3889 3890 return 0; 3891 3892err_branch_false: 3893 free_branch_attr(flow, attr->branch_true); 3894out_err: 3895 return err; 3896} 3897 3898static int 3899parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, 3900 struct flow_action *flow_action) 3901{ 3902 struct netlink_ext_ack *extack = parse_state->extack; 3903 struct mlx5e_tc_flow *flow = parse_state->flow; 3904 struct mlx5e_tc_jump_state jump_state = {}; 3905 struct mlx5_flow_attr *attr = flow->attr; 3906 enum mlx5_flow_namespace_type ns_type; 3907 struct mlx5e_priv *priv = flow->priv; 3908 struct mlx5_flow_attr *prev_attr; 3909 struct flow_action_entry *act; 3910 struct mlx5e_tc_act *tc_act; 3911 int err, i, i_split = 0; 3912 bool is_missable; 3913 3914 ns_type = mlx5e_get_flow_namespace(flow); 3915 list_add(&attr->list, &flow->attrs); 3916 3917 flow_action_for_each(i, act, flow_action) { 3918 jump_state.jump_target = false; 3919 is_missable = false; 3920 prev_attr = attr; 3921 3922 tc_act = mlx5e_tc_act_get(act->id, ns_type); 3923 if (!tc_act) { 3924 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action"); 3925 err = -EOPNOTSUPP; 3926 goto out_free_post_acts; 3927 } 3928 3929 if (tc_act->can_offload && !tc_act->can_offload(parse_state, act, i, attr)) { 3930 err = -EOPNOTSUPP; 3931 goto out_free_post_acts; 3932 } 3933 3934 err = tc_act->parse_action(parse_state, act, priv, attr); 3935 if (err) 3936 goto out_free_post_acts; 3937 3938 dec_jump_count(act, tc_act, attr, priv, &jump_state); 3939 3940 err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack); 3941 if (err) 3942 goto out_free_post_acts; 3943 3944 parse_state->actions |= attr->action; 3945 3946 /* Split attr for multi table act if not the last act. */ 3947 if (jump_state.jump_target || 3948 (tc_act->is_multi_table_act && 3949 tc_act->is_multi_table_act(priv, act, attr) && 3950 i < flow_action->num_entries - 1)) { 3951 is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false; 3952 3953 err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, 3954 ns_type); 3955 if (err) 3956 goto out_free_post_acts; 3957 3958 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type); 3959 if (!attr) { 3960 err = -ENOMEM; 3961 goto out_free_post_acts; 3962 } 3963 3964 i_split = i + 1; 3965 parse_state->if_count = 0; 3966 list_add(&attr->list, &flow->attrs); 3967 } 3968 3969 if (is_missable) { 3970 /* Add counter to prev, and assign act to new (next) attr */ 3971 prev_attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3972 flow_flag_set(flow, USE_ACT_STATS); 3973 3974 attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie; 3975 } else if (!tc_act->stats_action) { 3976 prev_attr->tc_act_cookies[prev_attr->tc_act_cookies_count++] = act->cookie; 3977 } 3978 } 3979 3980 err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type); 3981 if (err) 3982 goto out_free_post_acts; 3983 3984 err = alloc_flow_post_acts(flow, extack); 3985 if (err) 3986 goto out_free_post_acts; 3987 3988 return 0; 3989 3990out_free_post_acts: 3991 free_flow_post_acts(flow); 3992 3993 return err; 3994} 3995 3996static int 3997flow_action_supported(struct flow_action *flow_action, 3998 struct netlink_ext_ack *extack) 3999{ 4000 if (!flow_action_has_entries(flow_action)) { 4001 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); 4002 return -EINVAL; 4003 } 4004 4005 if (!flow_action_hw_stats_check(flow_action, extack, 4006 FLOW_ACTION_HW_STATS_DELAYED_BIT)) { 4007 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); 4008 return -EOPNOTSUPP; 4009 } 4010 4011 return 0; 4012} 4013 4014static int 4015parse_tc_nic_actions(struct mlx5e_priv *priv, 4016 struct flow_action *flow_action, 4017 struct mlx5e_tc_flow *flow, 4018 struct netlink_ext_ack *extack) 4019{ 4020 struct mlx5e_tc_act_parse_state *parse_state; 4021 struct mlx5e_tc_flow_parse_attr *parse_attr; 4022 struct mlx5_flow_attr *attr = flow->attr; 4023 int err; 4024 4025 err = flow_action_supported(flow_action, extack); 4026 if (err) 4027 return err; 4028 4029 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 4030 parse_attr = attr->parse_attr; 4031 parse_state = &parse_attr->parse_state; 4032 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); 4033 parse_state->ct_priv = get_ct_priv(priv); 4034 4035 err = parse_tc_actions(parse_state, flow_action); 4036 if (err) 4037 return err; 4038 4039 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack); 4040 if (err) 4041 return err; 4042 4043 err = verify_attr_actions(attr->action, extack); 4044 if (err) 4045 return err; 4046 4047 if (!actions_match_supported(priv, flow_action, parse_state->actions, 4048 parse_attr, flow, extack)) 4049 return -EOPNOTSUPP; 4050 4051 return 0; 4052} 4053 4054static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, 4055 struct net_device *peer_netdev) 4056{ 4057 struct mlx5e_priv *peer_priv; 4058 4059 peer_priv = netdev_priv(peer_netdev); 4060 4061 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && 4062 mlx5e_eswitch_vf_rep(priv->netdev) && 4063 mlx5e_eswitch_vf_rep(peer_netdev) && 4064 mlx5e_same_hw_devs(priv, peer_priv)); 4065} 4066 4067static bool same_hw_reps(struct mlx5e_priv *priv, 4068 struct net_device *peer_netdev) 4069{ 4070 struct mlx5e_priv *peer_priv; 4071 4072 peer_priv = netdev_priv(peer_netdev); 4073 4074 return mlx5e_eswitch_rep(priv->netdev) && 4075 mlx5e_eswitch_rep(peer_netdev) && 4076 mlx5e_same_hw_devs(priv, peer_priv); 4077} 4078 4079static bool is_lag_dev(struct mlx5e_priv *priv, 4080 struct net_device *peer_netdev) 4081{ 4082 return ((mlx5_lag_is_sriov(priv->mdev) || 4083 mlx5_lag_is_multipath(priv->mdev)) && 4084 same_hw_reps(priv, peer_netdev)); 4085} 4086 4087static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev) 4088{ 4089 return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev); 4090} 4091 4092bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 4093 struct net_device *out_dev) 4094{ 4095 if (is_merged_eswitch_vfs(priv, out_dev)) 4096 return true; 4097 4098 if (is_multiport_eligible(priv, out_dev)) 4099 return true; 4100 4101 if (is_lag_dev(priv, out_dev)) 4102 return true; 4103 4104 return mlx5e_eswitch_rep(out_dev) && 4105 same_port_devs(priv, netdev_priv(out_dev)); 4106} 4107 4108int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, 4109 struct mlx5_flow_attr *attr, 4110 int ifindex, 4111 enum mlx5e_tc_int_port_type type, 4112 u32 *action, 4113 int out_index) 4114{ 4115 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 4116 struct mlx5e_tc_int_port_priv *int_port_priv; 4117 struct mlx5e_tc_flow_parse_attr *parse_attr; 4118 struct mlx5e_tc_int_port *dest_int_port; 4119 int err; 4120 4121 parse_attr = attr->parse_attr; 4122 int_port_priv = mlx5e_get_int_port_priv(priv); 4123 4124 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type); 4125 if (IS_ERR(dest_int_port)) 4126 return PTR_ERR(dest_int_port); 4127 4128 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts, 4129 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, 4130 mlx5e_tc_int_port_get_metadata(dest_int_port)); 4131 if (err) { 4132 mlx5e_tc_int_port_put(int_port_priv, dest_int_port); 4133 return err; 4134 } 4135 4136 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 4137 4138 esw_attr->dest_int_port = dest_int_port; 4139 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE; 4140 esw_attr->split_count = out_index; 4141 4142 /* Forward to root fdb for matching against the new source vport */ 4143 attr->dest_chain = 0; 4144 4145 return 0; 4146} 4147 4148static int 4149parse_tc_fdb_actions(struct mlx5e_priv *priv, 4150 struct flow_action *flow_action, 4151 struct mlx5e_tc_flow *flow, 4152 struct netlink_ext_ack *extack) 4153{ 4154 struct mlx5e_tc_act_parse_state *parse_state; 4155 struct mlx5e_tc_flow_parse_attr *parse_attr; 4156 struct mlx5_flow_attr *attr = flow->attr; 4157 struct mlx5_esw_flow_attr *esw_attr; 4158 struct net_device *filter_dev; 4159 int err; 4160 4161 err = flow_action_supported(flow_action, extack); 4162 if (err) 4163 return err; 4164 4165 esw_attr = attr->esw_attr; 4166 parse_attr = attr->parse_attr; 4167 filter_dev = parse_attr->filter_dev; 4168 parse_state = &parse_attr->parse_state; 4169 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); 4170 parse_state->ct_priv = get_ct_priv(priv); 4171 4172 err = parse_tc_actions(parse_state, flow_action); 4173 if (err) 4174 return err; 4175 4176 /* Forward to/from internal port can only have 1 dest */ 4177 if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) && 4178 esw_attr->out_count > 1) { 4179 NL_SET_ERR_MSG_MOD(extack, 4180 "Rules with internal port can have only one destination"); 4181 return -EOPNOTSUPP; 4182 } 4183 4184 /* Forward from tunnel/internal port to internal port is not supported */ 4185 if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) && 4186 esw_attr->dest_int_port) { 4187 NL_SET_ERR_MSG_MOD(extack, 4188 "Forwarding from tunnel/internal port to internal port is not supported"); 4189 return -EOPNOTSUPP; 4190 } 4191 4192 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack); 4193 if (err) 4194 return err; 4195 4196 if (!actions_match_supported(priv, flow_action, parse_state->actions, 4197 parse_attr, flow, extack)) 4198 return -EOPNOTSUPP; 4199 4200 return 0; 4201} 4202 4203static void get_flags(int flags, unsigned long *flow_flags) 4204{ 4205 unsigned long __flow_flags = 0; 4206 4207 if (flags & MLX5_TC_FLAG(INGRESS)) 4208 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS); 4209 if (flags & MLX5_TC_FLAG(EGRESS)) 4210 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS); 4211 4212 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) 4213 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 4214 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) 4215 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 4216 if (flags & MLX5_TC_FLAG(FT_OFFLOAD)) 4217 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT); 4218 4219 *flow_flags = __flow_flags; 4220} 4221 4222static const struct rhashtable_params tc_ht_params = { 4223 .head_offset = offsetof(struct mlx5e_tc_flow, node), 4224 .key_offset = offsetof(struct mlx5e_tc_flow, cookie), 4225 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), 4226 .automatic_shrinking = true, 4227}; 4228 4229static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, 4230 unsigned long flags) 4231{ 4232 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 4233 struct mlx5e_rep_priv *rpriv; 4234 4235 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { 4236 rpriv = priv->ppriv; 4237 return &rpriv->tc_ht; 4238 } else /* NIC offload */ 4239 return &tc->ht; 4240} 4241 4242static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) 4243{ 4244 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; 4245 struct mlx5_flow_attr *attr = flow->attr; 4246 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK && 4247 flow_flag_test(flow, INGRESS); 4248 bool act_is_encap = !!(attr->action & 4249 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); 4250 bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom); 4251 4252 if (!esw_paired) 4253 return false; 4254 4255 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) || 4256 mlx5_lag_is_multipath(esw_attr->in_mdev)) && 4257 (is_rep_ingress || act_is_encap)) 4258 return true; 4259 4260 if (mlx5_lag_is_mpesw(esw_attr->in_mdev)) 4261 return true; 4262 4263 return false; 4264} 4265 4266struct mlx5_flow_attr * 4267mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type) 4268{ 4269 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ? 4270 sizeof(struct mlx5_esw_flow_attr) : 4271 sizeof(struct mlx5_nic_flow_attr); 4272 struct mlx5_flow_attr *attr; 4273 4274 attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL); 4275 if (!attr) 4276 return attr; 4277 4278 INIT_LIST_HEAD(&attr->list); 4279 return attr; 4280} 4281 4282static void 4283mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) 4284{ 4285 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow); 4286 struct mlx5_esw_flow_attr *esw_attr; 4287 4288 if (!attr) 4289 return; 4290 4291 if (attr->post_act_handle) 4292 mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle); 4293 4294 mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr); 4295 4296 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 4297 mlx5_fc_destroy(counter_dev, attr->counter); 4298 4299 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 4300 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 4301 mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr); 4302 } 4303 4304 if (mlx5e_is_eswitch_flow(flow)) { 4305 esw_attr = attr->esw_attr; 4306 4307 if (esw_attr->int_port) 4308 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv), 4309 esw_attr->int_port); 4310 4311 if (esw_attr->dest_int_port) 4312 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv), 4313 esw_attr->dest_int_port); 4314 } 4315 4316 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr); 4317 4318 free_branch_attr(flow, attr->branch_true); 4319 free_branch_attr(flow, attr->branch_false); 4320} 4321 4322static int 4323mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, 4324 struct flow_cls_offload *f, unsigned long flow_flags, 4325 struct mlx5e_tc_flow_parse_attr **__parse_attr, 4326 struct mlx5e_tc_flow **__flow) 4327{ 4328 struct mlx5e_tc_flow_parse_attr *parse_attr; 4329 struct mlx5_flow_attr *attr; 4330 struct mlx5e_tc_flow *flow; 4331 int err = -ENOMEM; 4332 int out_index; 4333 4334 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 4335 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); 4336 if (!parse_attr || !flow) 4337 goto err_free; 4338 4339 flow->flags = flow_flags; 4340 flow->cookie = f->cookie; 4341 flow->priv = priv; 4342 4343 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow)); 4344 if (!attr) 4345 goto err_free; 4346 4347 flow->attr = attr; 4348 4349 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 4350 INIT_LIST_HEAD(&flow->encaps[out_index].list); 4351 INIT_LIST_HEAD(&flow->hairpin); 4352 INIT_LIST_HEAD(&flow->l3_to_l2_reformat); 4353 INIT_LIST_HEAD(&flow->attrs); 4354 INIT_LIST_HEAD(&flow->peer_flows); 4355 refcount_set(&flow->refcnt, 1); 4356 init_completion(&flow->init_done); 4357 init_completion(&flow->del_hw_done); 4358 4359 *__flow = flow; 4360 *__parse_attr = parse_attr; 4361 4362 return 0; 4363 4364err_free: 4365 kfree(flow); 4366 kvfree(parse_attr); 4367 return err; 4368} 4369 4370static void 4371mlx5e_flow_attr_init(struct mlx5_flow_attr *attr, 4372 struct mlx5e_tc_flow_parse_attr *parse_attr, 4373 struct flow_cls_offload *f) 4374{ 4375 attr->parse_attr = parse_attr; 4376 attr->chain = f->common.chain_index; 4377 attr->prio = f->common.prio; 4378} 4379 4380static void 4381mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr, 4382 struct mlx5e_priv *priv, 4383 struct mlx5e_tc_flow_parse_attr *parse_attr, 4384 struct flow_cls_offload *f, 4385 struct mlx5_eswitch_rep *in_rep, 4386 struct mlx5_core_dev *in_mdev) 4387{ 4388 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4389 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 4390 4391 mlx5e_flow_attr_init(attr, parse_attr, f); 4392 4393 esw_attr->in_rep = in_rep; 4394 esw_attr->in_mdev = in_mdev; 4395 4396 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == 4397 MLX5_COUNTER_SOURCE_ESWITCH) 4398 esw_attr->counter_dev = in_mdev; 4399 else 4400 esw_attr->counter_dev = priv->mdev; 4401} 4402 4403static struct mlx5e_tc_flow * 4404__mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 4405 struct flow_cls_offload *f, 4406 unsigned long flow_flags, 4407 struct net_device *filter_dev, 4408 struct mlx5_eswitch_rep *in_rep, 4409 struct mlx5_core_dev *in_mdev) 4410{ 4411 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 4412 struct netlink_ext_ack *extack = f->common.extack; 4413 struct mlx5e_tc_flow_parse_attr *parse_attr; 4414 struct mlx5e_tc_flow *flow; 4415 int attr_size, err; 4416 4417 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); 4418 attr_size = sizeof(struct mlx5_esw_flow_attr); 4419 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 4420 &parse_attr, &flow); 4421 if (err) 4422 goto out; 4423 4424 parse_attr->filter_dev = filter_dev; 4425 mlx5e_flow_esw_attr_init(flow->attr, 4426 priv, parse_attr, 4427 f, in_rep, in_mdev); 4428 4429 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 4430 f, filter_dev); 4431 if (err) 4432 goto err_free; 4433 4434 /* actions validation depends on parsing the ct matches first */ 4435 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f, 4436 &flow->attr->ct_attr, extack); 4437 if (err) 4438 goto err_free; 4439 4440 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); 4441 if (err) 4442 goto err_free; 4443 4444 err = mlx5e_tc_add_fdb_flow(priv, flow, extack); 4445 complete_all(&flow->init_done); 4446 if (err) { 4447 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev))) 4448 goto err_free; 4449 4450 add_unready_flow(flow); 4451 } 4452 4453 return flow; 4454 4455err_free: 4456 mlx5e_flow_put(priv, flow); 4457out: 4458 return ERR_PTR(err); 4459} 4460 4461static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, 4462 struct mlx5e_tc_flow *flow, 4463 unsigned long flow_flags, 4464 struct mlx5_eswitch *peer_esw) 4465{ 4466 struct mlx5e_priv *priv = flow->priv, *peer_priv; 4467 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4468 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; 4469 struct mlx5e_tc_flow_parse_attr *parse_attr; 4470 int i = mlx5_get_dev_index(peer_esw->dev); 4471 struct mlx5e_rep_priv *peer_urpriv; 4472 struct mlx5e_tc_flow *peer_flow; 4473 struct mlx5_core_dev *in_mdev; 4474 int err = 0; 4475 4476 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH); 4477 peer_priv = netdev_priv(peer_urpriv->netdev); 4478 4479 /* in_mdev is assigned of which the packet originated from. 4480 * So packets redirected to uplink use the same mdev of the 4481 * original flow and packets redirected from uplink use the 4482 * peer mdev. 4483 * In multiport eswitch it's a special case that we need to 4484 * keep the original mdev. 4485 */ 4486 if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev)) 4487 in_mdev = peer_priv->mdev; 4488 else 4489 in_mdev = priv->mdev; 4490 4491 parse_attr = flow->attr->parse_attr; 4492 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags, 4493 parse_attr->filter_dev, 4494 attr->in_rep, in_mdev); 4495 if (IS_ERR(peer_flow)) { 4496 err = PTR_ERR(peer_flow); 4497 goto out; 4498 } 4499 4500 list_add_tail(&peer_flow->peer_flows, &flow->peer_flows); 4501 flow_flag_set(flow, DUP); 4502 mutex_lock(&esw->offloads.peer_mutex); 4503 list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]); 4504 mutex_unlock(&esw->offloads.peer_mutex); 4505 4506out: 4507 return err; 4508} 4509 4510static int 4511mlx5e_add_fdb_flow(struct mlx5e_priv *priv, 4512 struct flow_cls_offload *f, 4513 unsigned long flow_flags, 4514 struct net_device *filter_dev, 4515 struct mlx5e_tc_flow **__flow) 4516{ 4517 struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos; 4518 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4519 struct mlx5_eswitch_rep *in_rep = rpriv->rep; 4520 struct mlx5_core_dev *in_mdev = priv->mdev; 4521 struct mlx5_eswitch *peer_esw; 4522 struct mlx5e_tc_flow *flow; 4523 int err; 4524 4525 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, 4526 in_mdev); 4527 if (IS_ERR(flow)) 4528 return PTR_ERR(flow); 4529 4530 if (!is_peer_flow_needed(flow)) { 4531 *__flow = flow; 4532 return 0; 4533 } 4534 4535 if (!mlx5_devcom_for_each_peer_begin(devcom)) { 4536 err = -ENODEV; 4537 goto clean_flow; 4538 } 4539 4540 mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { 4541 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw); 4542 if (err) 4543 goto peer_clean; 4544 } 4545 4546 mlx5_devcom_for_each_peer_end(devcom); 4547 4548 *__flow = flow; 4549 return 0; 4550 4551peer_clean: 4552 mlx5e_tc_del_fdb_peers_flow(flow); 4553 mlx5_devcom_for_each_peer_end(devcom); 4554clean_flow: 4555 mlx5e_tc_del_fdb_flow(priv, flow); 4556 return err; 4557} 4558 4559static int 4560mlx5e_add_nic_flow(struct mlx5e_priv *priv, 4561 struct flow_cls_offload *f, 4562 unsigned long flow_flags, 4563 struct net_device *filter_dev, 4564 struct mlx5e_tc_flow **__flow) 4565{ 4566 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 4567 struct netlink_ext_ack *extack = f->common.extack; 4568 struct mlx5e_tc_flow_parse_attr *parse_attr; 4569 struct mlx5e_tc_flow *flow; 4570 int attr_size, err; 4571 4572 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { 4573 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) 4574 return -EOPNOTSUPP; 4575 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) { 4576 return -EOPNOTSUPP; 4577 } 4578 4579 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); 4580 attr_size = sizeof(struct mlx5_nic_flow_attr); 4581 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, 4582 &parse_attr, &flow); 4583 if (err) 4584 goto out; 4585 4586 parse_attr->filter_dev = filter_dev; 4587 mlx5e_flow_attr_init(flow->attr, parse_attr, f); 4588 4589 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, 4590 f, filter_dev); 4591 if (err) 4592 goto err_free; 4593 4594 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f, 4595 &flow->attr->ct_attr, extack); 4596 if (err) 4597 goto err_free; 4598 4599 err = parse_tc_nic_actions(priv, &rule->action, flow, extack); 4600 if (err) 4601 goto err_free; 4602 4603 err = mlx5e_tc_add_nic_flow(priv, flow, extack); 4604 if (err) 4605 goto err_free; 4606 4607 flow_flag_set(flow, OFFLOADED); 4608 *__flow = flow; 4609 4610 return 0; 4611 4612err_free: 4613 flow_flag_set(flow, FAILED); 4614 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 4615 mlx5e_flow_put(priv, flow); 4616out: 4617 return err; 4618} 4619 4620static int 4621mlx5e_tc_add_flow(struct mlx5e_priv *priv, 4622 struct flow_cls_offload *f, 4623 unsigned long flags, 4624 struct net_device *filter_dev, 4625 struct mlx5e_tc_flow **flow) 4626{ 4627 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4628 unsigned long flow_flags; 4629 int err; 4630 4631 get_flags(flags, &flow_flags); 4632 4633 if (!tc_can_offload_extack(priv->netdev, f->common.extack)) 4634 return -EOPNOTSUPP; 4635 4636 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 4637 err = mlx5e_add_fdb_flow(priv, f, flow_flags, 4638 filter_dev, flow); 4639 else 4640 err = mlx5e_add_nic_flow(priv, f, flow_flags, 4641 filter_dev, flow); 4642 4643 return err; 4644} 4645 4646static bool is_flow_rule_duplicate_allowed(struct net_device *dev, 4647 struct mlx5e_rep_priv *rpriv) 4648{ 4649 /* Offloaded flow rule is allowed to duplicate on non-uplink representor 4650 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this 4651 * function is called from NIC mode. 4652 */ 4653 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; 4654} 4655 4656/* As IPsec and TC order is not aligned between software and hardware-offload, 4657 * either IPsec offload or TC offload, not both, is allowed for a specific interface. 4658 */ 4659static bool is_tc_ipsec_order_check_needed(struct net_device *filter, struct mlx5e_priv *priv) 4660{ 4661 if (!IS_ENABLED(CONFIG_MLX5_EN_IPSEC)) 4662 return false; 4663 4664 if (filter != priv->netdev) 4665 return false; 4666 4667 if (mlx5e_eswitch_vf_rep(priv->netdev)) 4668 return false; 4669 4670 return true; 4671} 4672 4673static int mlx5e_tc_block_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv) 4674{ 4675 struct mlx5_core_dev *mdev = priv->mdev; 4676 4677 if (!is_tc_ipsec_order_check_needed(filter, priv)) 4678 return 0; 4679 4680 if (mdev->num_block_tc) 4681 return -EBUSY; 4682 4683 mdev->num_block_ipsec++; 4684 4685 return 0; 4686} 4687 4688static void mlx5e_tc_unblock_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv) 4689{ 4690 if (!is_tc_ipsec_order_check_needed(filter, priv)) 4691 return; 4692 4693 priv->mdev->num_block_ipsec--; 4694} 4695 4696int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, 4697 struct flow_cls_offload *f, unsigned long flags) 4698{ 4699 struct netlink_ext_ack *extack = f->common.extack; 4700 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4701 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4702 struct mlx5e_tc_flow *flow; 4703 int err = 0; 4704 4705 if (!mlx5_esw_hold(priv->mdev)) 4706 return -EBUSY; 4707 4708 err = mlx5e_tc_block_ipsec_offload(dev, priv); 4709 if (err) 4710 goto esw_release; 4711 4712 mlx5_esw_get(priv->mdev); 4713 4714 rcu_read_lock(); 4715 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 4716 if (flow) { 4717 /* Same flow rule offloaded to non-uplink representor sharing tc block, 4718 * just return 0. 4719 */ 4720 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) 4721 goto rcu_unlock; 4722 4723 NL_SET_ERR_MSG_MOD(extack, 4724 "flow cookie already exists, ignoring"); 4725 netdev_warn_once(priv->netdev, 4726 "flow cookie %lx already exists, ignoring\n", 4727 f->cookie); 4728 err = -EEXIST; 4729 goto rcu_unlock; 4730 } 4731rcu_unlock: 4732 rcu_read_unlock(); 4733 if (flow) 4734 goto out; 4735 4736 trace_mlx5e_configure_flower(f); 4737 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); 4738 if (err) 4739 goto out; 4740 4741 /* Flow rule offloaded to non-uplink representor sharing tc block, 4742 * set the flow's owner dev. 4743 */ 4744 if (is_flow_rule_duplicate_allowed(dev, rpriv)) 4745 flow->orig_dev = dev; 4746 4747 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); 4748 if (err) 4749 goto err_free; 4750 4751 mlx5_esw_release(priv->mdev); 4752 return 0; 4753 4754err_free: 4755 mlx5e_flow_put(priv, flow); 4756out: 4757 mlx5e_tc_unblock_ipsec_offload(dev, priv); 4758 mlx5_esw_put(priv->mdev); 4759esw_release: 4760 mlx5_esw_release(priv->mdev); 4761 return err; 4762} 4763 4764static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) 4765{ 4766 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS)); 4767 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS)); 4768 4769 return flow_flag_test(flow, INGRESS) == dir_ingress && 4770 flow_flag_test(flow, EGRESS) == dir_egress; 4771} 4772 4773int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, 4774 struct flow_cls_offload *f, unsigned long flags) 4775{ 4776 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4777 struct mlx5e_tc_flow *flow; 4778 int err; 4779 4780 rcu_read_lock(); 4781 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); 4782 if (!flow || !same_flow_direction(flow, flags)) { 4783 err = -EINVAL; 4784 goto errout; 4785 } 4786 4787 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag 4788 * set. 4789 */ 4790 if (flow_flag_test_and_set(flow, DELETED)) { 4791 err = -EINVAL; 4792 goto errout; 4793 } 4794 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); 4795 rcu_read_unlock(); 4796 4797 trace_mlx5e_delete_flower(f); 4798 mlx5e_flow_put(priv, flow); 4799 4800 mlx5e_tc_unblock_ipsec_offload(dev, priv); 4801 mlx5_esw_put(priv->mdev); 4802 return 0; 4803 4804errout: 4805 rcu_read_unlock(); 4806 return err; 4807} 4808 4809int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv, 4810 struct flow_offload_action *fl_act) 4811{ 4812 return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act); 4813} 4814 4815int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, 4816 struct flow_cls_offload *f, unsigned long flags) 4817{ 4818 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 4819 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 4820 struct mlx5e_tc_flow *flow; 4821 struct mlx5_fc *counter; 4822 u64 lastuse = 0; 4823 u64 packets = 0; 4824 u64 bytes = 0; 4825 int err = 0; 4826 4827 rcu_read_lock(); 4828 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie, 4829 tc_ht_params)); 4830 rcu_read_unlock(); 4831 if (IS_ERR(flow)) 4832 return PTR_ERR(flow); 4833 4834 if (!same_flow_direction(flow, flags)) { 4835 err = -EINVAL; 4836 goto errout; 4837 } 4838 4839 if (mlx5e_is_offloaded_flow(flow)) { 4840 if (flow_flag_test(flow, USE_ACT_STATS)) { 4841 f->use_act_stats = true; 4842 } else { 4843 counter = mlx5e_tc_get_counter(flow); 4844 if (!counter) 4845 goto errout; 4846 4847 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 4848 } 4849 } 4850 4851 /* Under multipath it's possible for one rule to be currently 4852 * un-offloaded while the other rule is offloaded. 4853 */ 4854 if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom)) 4855 goto out; 4856 4857 if (flow_flag_test(flow, DUP)) { 4858 struct mlx5e_tc_flow *peer_flow; 4859 4860 list_for_each_entry(peer_flow, &flow->peer_flows, peer_flows) { 4861 u64 packets2; 4862 u64 lastuse2; 4863 u64 bytes2; 4864 4865 if (!flow_flag_test(peer_flow, OFFLOADED)) 4866 continue; 4867 if (flow_flag_test(flow, USE_ACT_STATS)) { 4868 f->use_act_stats = true; 4869 break; 4870 } 4871 4872 counter = mlx5e_tc_get_counter(peer_flow); 4873 if (!counter) 4874 goto no_peer_counter; 4875 mlx5_fc_query_cached(counter, &bytes2, &packets2, 4876 &lastuse2); 4877 4878 bytes += bytes2; 4879 packets += packets2; 4880 lastuse = max_t(u64, lastuse, lastuse2); 4881 } 4882 } 4883 4884no_peer_counter: 4885 if (esw) 4886 mlx5_devcom_for_each_peer_end(esw->devcom); 4887out: 4888 flow_stats_update(&f->stats, bytes, packets, 0, lastuse, 4889 FLOW_ACTION_HW_STATS_DELAYED); 4890 trace_mlx5e_stats_flower(f); 4891errout: 4892 mlx5e_flow_put(priv, flow); 4893 return err; 4894} 4895 4896static int apply_police_params(struct mlx5e_priv *priv, u64 rate, 4897 struct netlink_ext_ack *extack) 4898{ 4899 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4900 struct mlx5_eswitch *esw; 4901 u32 rate_mbps = 0; 4902 u16 vport_num; 4903 int err; 4904 4905 vport_num = rpriv->rep->vport; 4906 if (vport_num >= MLX5_VPORT_ECPF) { 4907 NL_SET_ERR_MSG_MOD(extack, 4908 "Ingress rate limit is supported only for Eswitch ports connected to VFs"); 4909 return -EOPNOTSUPP; 4910 } 4911 4912 esw = priv->mdev->priv.eswitch; 4913 /* rate is given in bytes/sec. 4914 * First convert to bits/sec and then round to the nearest mbit/secs. 4915 * mbit means million bits. 4916 * Moreover, if rate is non zero we choose to configure to a minimum of 4917 * 1 mbit/sec. 4918 */ 4919 if (rate) { 4920 rate = (rate * BITS_PER_BYTE) + 500000; 4921 do_div(rate, 1000000); 4922 rate_mbps = max_t(u32, rate, 1); 4923 } 4924 4925 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps); 4926 if (err) 4927 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); 4928 4929 return err; 4930} 4931 4932static int 4933tc_matchall_police_validate(const struct flow_action *action, 4934 const struct flow_action_entry *act, 4935 struct netlink_ext_ack *extack) 4936{ 4937 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) { 4938 NL_SET_ERR_MSG_MOD(extack, 4939 "Offload not supported when conform action is not continue"); 4940 return -EOPNOTSUPP; 4941 } 4942 4943 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 4944 NL_SET_ERR_MSG_MOD(extack, 4945 "Offload not supported when exceed action is not drop"); 4946 return -EOPNOTSUPP; 4947 } 4948 4949 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 4950 !flow_action_is_last_entry(action, act)) { 4951 NL_SET_ERR_MSG_MOD(extack, 4952 "Offload not supported when conform action is ok, but action is not last"); 4953 return -EOPNOTSUPP; 4954 } 4955 4956 if (act->police.peakrate_bytes_ps || 4957 act->police.avrate || act->police.overhead) { 4958 NL_SET_ERR_MSG_MOD(extack, 4959 "Offload not supported when peakrate/avrate/overhead is configured"); 4960 return -EOPNOTSUPP; 4961 } 4962 4963 return 0; 4964} 4965 4966static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, 4967 struct flow_action *flow_action, 4968 struct netlink_ext_ack *extack) 4969{ 4970 struct mlx5e_rep_priv *rpriv = priv->ppriv; 4971 const struct flow_action_entry *act; 4972 int err; 4973 int i; 4974 4975 if (!flow_action_has_entries(flow_action)) { 4976 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); 4977 return -EINVAL; 4978 } 4979 4980 if (!flow_offload_has_one_action(flow_action)) { 4981 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); 4982 return -EOPNOTSUPP; 4983 } 4984 4985 if (!flow_action_basic_hw_stats_check(flow_action, extack)) { 4986 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); 4987 return -EOPNOTSUPP; 4988 } 4989 4990 flow_action_for_each(i, act, flow_action) { 4991 switch (act->id) { 4992 case FLOW_ACTION_POLICE: 4993 err = tc_matchall_police_validate(flow_action, act, extack); 4994 if (err) 4995 return err; 4996 4997 err = apply_police_params(priv, act->police.rate_bytes_ps, extack); 4998 if (err) 4999 return err; 5000 5001 mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats, 5002 &priv->stats.rep_stats); 5003 break; 5004 default: 5005 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); 5006 return -EOPNOTSUPP; 5007 } 5008 } 5009 5010 return 0; 5011} 5012 5013int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, 5014 struct tc_cls_matchall_offload *ma) 5015{ 5016 struct netlink_ext_ack *extack = ma->common.extack; 5017 5018 if (ma->common.prio != 1) { 5019 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); 5020 return -EINVAL; 5021 } 5022 5023 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); 5024} 5025 5026int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, 5027 struct tc_cls_matchall_offload *ma) 5028{ 5029 struct netlink_ext_ack *extack = ma->common.extack; 5030 5031 return apply_police_params(priv, 0, extack); 5032} 5033 5034static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, 5035 struct mlx5e_priv *peer_priv) 5036{ 5037 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5038 struct mlx5_core_dev *peer_mdev = peer_priv->mdev; 5039 struct mlx5e_hairpin_entry *hpe, *tmp; 5040 LIST_HEAD(init_wait_list); 5041 u16 peer_vhca_id; 5042 int bkt; 5043 5044 if (!mlx5e_same_hw_devs(priv, peer_priv)) 5045 return; 5046 5047 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); 5048 5049 mutex_lock(&tc->hairpin_tbl_lock); 5050 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) 5051 if (refcount_inc_not_zero(&hpe->refcnt)) 5052 list_add(&hpe->dead_peer_wait_list, &init_wait_list); 5053 mutex_unlock(&tc->hairpin_tbl_lock); 5054 5055 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) { 5056 wait_for_completion(&hpe->res_ready); 5057 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id) 5058 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair); 5059 5060 mlx5e_hairpin_put(priv, hpe); 5061 } 5062} 5063 5064static int mlx5e_tc_netdev_event(struct notifier_block *this, 5065 unsigned long event, void *ptr) 5066{ 5067 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 5068 struct mlx5e_priv *peer_priv; 5069 struct mlx5e_tc_table *tc; 5070 struct mlx5e_priv *priv; 5071 5072 if (ndev->netdev_ops != &mlx5e_netdev_ops || 5073 event != NETDEV_UNREGISTER || 5074 ndev->reg_state == NETREG_REGISTERED) 5075 return NOTIFY_DONE; 5076 5077 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb); 5078 priv = tc->priv; 5079 peer_priv = netdev_priv(ndev); 5080 if (priv == peer_priv || 5081 !(priv->netdev->features & NETIF_F_HW_TC)) 5082 return NOTIFY_DONE; 5083 5084 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv); 5085 5086 return NOTIFY_DONE; 5087} 5088 5089static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv) 5090{ 5091 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5092 struct mlx5_flow_table **ft = &tc->miss_t; 5093 struct mlx5_flow_table_attr ft_attr = {}; 5094 struct mlx5_flow_namespace *ns; 5095 int err = 0; 5096 5097 ft_attr.max_fte = 1; 5098 ft_attr.autogroup.max_num_groups = 1; 5099 ft_attr.level = MLX5E_TC_MISS_LEVEL; 5100 ft_attr.prio = 0; 5101 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); 5102 5103 *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 5104 if (IS_ERR(*ft)) { 5105 err = PTR_ERR(*ft); 5106 netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err); 5107 } 5108 5109 return err; 5110} 5111 5112static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv) 5113{ 5114 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5115 5116 mlx5_destroy_flow_table(tc->miss_t); 5117} 5118 5119int mlx5e_tc_nic_init(struct mlx5e_priv *priv) 5120{ 5121 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5122 struct mlx5_core_dev *dev = priv->mdev; 5123 struct mapping_ctx *chains_mapping; 5124 struct mlx5_chains_attr attr = {}; 5125 u64 mapping_id; 5126 int err; 5127 5128 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); 5129 mutex_init(&tc->t_lock); 5130 mutex_init(&tc->hairpin_tbl_lock); 5131 hash_init(tc->hairpin_tbl); 5132 tc->priv = priv; 5133 5134 err = rhashtable_init(&tc->ht, &tc_ht_params); 5135 if (err) 5136 return err; 5137 5138 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); 5139 lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); 5140 5141 mapping_id = mlx5_query_nic_system_image_guid(dev); 5142 5143 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, 5144 sizeof(struct mlx5_mapped_obj), 5145 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); 5146 5147 if (IS_ERR(chains_mapping)) { 5148 err = PTR_ERR(chains_mapping); 5149 goto err_mapping; 5150 } 5151 tc->mapping = chains_mapping; 5152 5153 err = mlx5e_tc_nic_create_miss_table(priv); 5154 if (err) 5155 goto err_chains; 5156 5157 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) 5158 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | 5159 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 5160 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; 5161 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; 5162 attr.default_ft = tc->miss_t; 5163 attr.mapping = chains_mapping; 5164 attr.fs_base_prio = MLX5E_TC_PRIO; 5165 5166 tc->chains = mlx5_chains_create(dev, &attr); 5167 if (IS_ERR(tc->chains)) { 5168 err = PTR_ERR(tc->chains); 5169 goto err_miss; 5170 } 5171 5172 mlx5_chains_print_info(tc->chains); 5173 5174 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); 5175 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr, 5176 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); 5177 5178 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; 5179 err = register_netdevice_notifier_dev_net(priv->netdev, 5180 &tc->netdevice_nb, 5181 &tc->netdevice_nn); 5182 if (err) { 5183 tc->netdevice_nb.notifier_call = NULL; 5184 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n"); 5185 goto err_reg; 5186 } 5187 5188 mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs)); 5189 5190 tc->action_stats_handle = mlx5e_tc_act_stats_create(); 5191 if (IS_ERR(tc->action_stats_handle)) { 5192 err = PTR_ERR(tc->action_stats_handle); 5193 goto err_act_stats; 5194 } 5195 5196 return 0; 5197 5198err_act_stats: 5199 unregister_netdevice_notifier_dev_net(priv->netdev, 5200 &tc->netdevice_nb, 5201 &tc->netdevice_nn); 5202err_reg: 5203 mlx5_tc_ct_clean(tc->ct); 5204 mlx5e_tc_post_act_destroy(tc->post_act); 5205 mlx5_chains_destroy(tc->chains); 5206err_miss: 5207 mlx5e_tc_nic_destroy_miss_table(priv); 5208err_chains: 5209 mapping_destroy(chains_mapping); 5210err_mapping: 5211 rhashtable_destroy(&tc->ht); 5212 return err; 5213} 5214 5215static void _mlx5e_tc_del_flow(void *ptr, void *arg) 5216{ 5217 struct mlx5e_tc_flow *flow = ptr; 5218 struct mlx5e_priv *priv = flow->priv; 5219 5220 mlx5e_tc_del_flow(priv, flow); 5221 kfree(flow); 5222} 5223 5224void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) 5225{ 5226 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5227 5228 debugfs_remove_recursive(tc->dfs_root); 5229 5230 if (tc->netdevice_nb.notifier_call) 5231 unregister_netdevice_notifier_dev_net(priv->netdev, 5232 &tc->netdevice_nb, 5233 &tc->netdevice_nn); 5234 5235 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr); 5236 mutex_destroy(&tc->hairpin_tbl_lock); 5237 5238 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); 5239 5240 if (!IS_ERR_OR_NULL(tc->t)) { 5241 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL); 5242 tc->t = NULL; 5243 } 5244 mutex_destroy(&tc->t_lock); 5245 5246 mlx5_tc_ct_clean(tc->ct); 5247 mlx5e_tc_post_act_destroy(tc->post_act); 5248 mapping_destroy(tc->mapping); 5249 mlx5_chains_destroy(tc->chains); 5250 mlx5e_tc_nic_destroy_miss_table(priv); 5251 mlx5e_tc_act_stats_free(tc->action_stats_handle); 5252} 5253 5254int mlx5e_tc_ht_init(struct rhashtable *tc_ht) 5255{ 5256 int err; 5257 5258 err = rhashtable_init(tc_ht, &tc_ht_params); 5259 if (err) 5260 return err; 5261 5262 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); 5263 lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); 5264 5265 return 0; 5266} 5267 5268void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) 5269{ 5270 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); 5271} 5272 5273int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) 5274{ 5275 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); 5276 struct netdev_phys_item_id ppid; 5277 struct mlx5e_rep_priv *rpriv; 5278 struct mapping_ctx *mapping; 5279 struct mlx5_eswitch *esw; 5280 struct mlx5e_priv *priv; 5281 u64 mapping_id, key; 5282 int err = 0; 5283 5284 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); 5285 priv = netdev_priv(rpriv->netdev); 5286 esw = priv->mdev->priv.eswitch; 5287 5288 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw), 5289 MLX5_FLOW_NAMESPACE_FDB); 5290 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev), 5291 esw_chains(esw), 5292 &esw->offloads.mod_hdr, 5293 MLX5_FLOW_NAMESPACE_FDB, 5294 uplink_priv->post_act); 5295 5296 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev)); 5297 5298 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); 5299 5300 mapping_id = mlx5_query_nic_system_image_guid(esw->dev); 5301 5302 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL, 5303 sizeof(struct tunnel_match_key), 5304 TUNNEL_INFO_BITS_MASK, true); 5305 5306 if (IS_ERR(mapping)) { 5307 err = PTR_ERR(mapping); 5308 goto err_tun_mapping; 5309 } 5310 uplink_priv->tunnel_mapping = mapping; 5311 5312 /* Two last values are reserved for stack devices slow path table mark 5313 * and bridge ingress push mark. 5314 */ 5315 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, 5316 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true); 5317 if (IS_ERR(mapping)) { 5318 err = PTR_ERR(mapping); 5319 goto err_enc_opts_mapping; 5320 } 5321 uplink_priv->tunnel_enc_opts_mapping = mapping; 5322 5323 uplink_priv->encap = mlx5e_tc_tun_init(priv); 5324 if (IS_ERR(uplink_priv->encap)) { 5325 err = PTR_ERR(uplink_priv->encap); 5326 goto err_register_fib_notifier; 5327 } 5328 5329 uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create(); 5330 if (IS_ERR(uplink_priv->action_stats_handle)) { 5331 err = PTR_ERR(uplink_priv->action_stats_handle); 5332 goto err_action_counter; 5333 } 5334 5335 err = dev_get_port_parent_id(priv->netdev, &ppid, false); 5336 if (!err) { 5337 memcpy(&key, &ppid.id, sizeof(key)); 5338 mlx5_esw_offloads_devcom_init(esw, key); 5339 } 5340 5341 return 0; 5342 5343err_action_counter: 5344 mlx5e_tc_tun_cleanup(uplink_priv->encap); 5345err_register_fib_notifier: 5346 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 5347err_enc_opts_mapping: 5348 mapping_destroy(uplink_priv->tunnel_mapping); 5349err_tun_mapping: 5350 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); 5351 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); 5352 mlx5_tc_ct_clean(uplink_priv->ct_priv); 5353 netdev_warn(priv->netdev, 5354 "Failed to initialize tc (eswitch), err: %d", err); 5355 mlx5e_tc_post_act_destroy(uplink_priv->post_act); 5356 return err; 5357} 5358 5359void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) 5360{ 5361 struct mlx5e_rep_priv *rpriv; 5362 struct mlx5_eswitch *esw; 5363 struct mlx5e_priv *priv; 5364 5365 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); 5366 priv = netdev_priv(rpriv->netdev); 5367 esw = priv->mdev->priv.eswitch; 5368 5369 mlx5_esw_offloads_devcom_cleanup(esw); 5370 5371 mlx5e_tc_tun_cleanup(uplink_priv->encap); 5372 5373 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); 5374 mapping_destroy(uplink_priv->tunnel_mapping); 5375 5376 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); 5377 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); 5378 mlx5_tc_ct_clean(uplink_priv->ct_priv); 5379 mlx5e_flow_meters_cleanup(uplink_priv->flow_meters); 5380 mlx5e_tc_post_act_destroy(uplink_priv->post_act); 5381 mlx5e_tc_act_stats_free(uplink_priv->action_stats_handle); 5382} 5383 5384int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) 5385{ 5386 struct rhashtable *tc_ht = get_tc_ht(priv, flags); 5387 5388 return atomic_read(&tc_ht->nelems); 5389} 5390 5391void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) 5392{ 5393 struct mlx5e_tc_flow *flow, *tmp; 5394 int i; 5395 5396 for (i = 0; i < MLX5_MAX_PORTS; i++) { 5397 if (i == mlx5_get_dev_index(esw->dev)) 5398 continue; 5399 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i]) 5400 mlx5e_tc_del_fdb_peers_flow(flow); 5401 } 5402} 5403 5404void mlx5e_tc_reoffload_flows_work(struct work_struct *work) 5405{ 5406 struct mlx5_rep_uplink_priv *rpriv = 5407 container_of(work, struct mlx5_rep_uplink_priv, 5408 reoffload_flows_work); 5409 struct mlx5e_tc_flow *flow, *tmp; 5410 5411 mutex_lock(&rpriv->unready_flows_lock); 5412 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { 5413 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) 5414 unready_flow_del(flow); 5415 } 5416 mutex_unlock(&rpriv->unready_flows_lock); 5417} 5418 5419static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, 5420 struct flow_cls_offload *cls_flower, 5421 unsigned long flags) 5422{ 5423 switch (cls_flower->command) { 5424 case FLOW_CLS_REPLACE: 5425 return mlx5e_configure_flower(priv->netdev, priv, cls_flower, 5426 flags); 5427 case FLOW_CLS_DESTROY: 5428 return mlx5e_delete_flower(priv->netdev, priv, cls_flower, 5429 flags); 5430 case FLOW_CLS_STATS: 5431 return mlx5e_stats_flower(priv->netdev, priv, cls_flower, 5432 flags); 5433 default: 5434 return -EOPNOTSUPP; 5435 } 5436} 5437 5438int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 5439 void *cb_priv) 5440{ 5441 unsigned long flags = MLX5_TC_FLAG(INGRESS); 5442 struct mlx5e_priv *priv = cb_priv; 5443 5444 if (!priv->netdev || !netif_device_present(priv->netdev)) 5445 return -EOPNOTSUPP; 5446 5447 if (mlx5e_is_uplink_rep(priv)) 5448 flags |= MLX5_TC_FLAG(ESW_OFFLOAD); 5449 else 5450 flags |= MLX5_TC_FLAG(NIC_OFFLOAD); 5451 5452 switch (type) { 5453 case TC_SETUP_CLSFLOWER: 5454 return mlx5e_setup_tc_cls_flower(priv, type_data, flags); 5455 default: 5456 return -EOPNOTSUPP; 5457 } 5458} 5459 5460static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, 5461 struct mlx5e_tc_update_priv *tc_priv, 5462 u32 tunnel_id) 5463{ 5464 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 5465 struct tunnel_match_enc_opts enc_opts = {}; 5466 struct mlx5_rep_uplink_priv *uplink_priv; 5467 struct mlx5e_rep_priv *uplink_rpriv; 5468 struct metadata_dst *tun_dst; 5469 struct tunnel_match_key key; 5470 u32 tun_id, enc_opts_id; 5471 struct net_device *dev; 5472 int err; 5473 5474 enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK; 5475 tun_id = tunnel_id >> ENC_OPTS_BITS; 5476 5477 if (!tun_id) 5478 return true; 5479 5480 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 5481 uplink_priv = &uplink_rpriv->uplink_priv; 5482 5483 err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key); 5484 if (err) { 5485 netdev_dbg(priv->netdev, 5486 "Couldn't find tunnel for tun_id: %d, err: %d\n", 5487 tun_id, err); 5488 return false; 5489 } 5490 5491 if (enc_opts_id) { 5492 err = mapping_find(uplink_priv->tunnel_enc_opts_mapping, 5493 enc_opts_id, &enc_opts); 5494 if (err) { 5495 netdev_dbg(priv->netdev, 5496 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n", 5497 enc_opts_id, err); 5498 return false; 5499 } 5500 } 5501 5502 switch (key.enc_control.addr_type) { 5503 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 5504 tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst, 5505 key.enc_ip.tos, key.enc_ip.ttl, 5506 key.enc_tp.dst, TUNNEL_KEY, 5507 key32_to_tunnel_id(key.enc_key_id.keyid), 5508 enc_opts.key.len); 5509 break; 5510 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 5511 tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst, 5512 key.enc_ip.tos, key.enc_ip.ttl, 5513 key.enc_tp.dst, 0, TUNNEL_KEY, 5514 key32_to_tunnel_id(key.enc_key_id.keyid), 5515 enc_opts.key.len); 5516 break; 5517 default: 5518 netdev_dbg(priv->netdev, 5519 "Couldn't restore tunnel, unsupported addr_type: %d\n", 5520 key.enc_control.addr_type); 5521 return false; 5522 } 5523 5524 if (!tun_dst) { 5525 netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n"); 5526 return false; 5527 } 5528 5529 tun_dst->u.tun_info.key.tp_src = key.enc_tp.src; 5530 5531 if (enc_opts.key.len) 5532 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, 5533 enc_opts.key.data, 5534 enc_opts.key.len, 5535 enc_opts.key.dst_opt_type); 5536 5537 skb_dst_set(skb, (struct dst_entry *)tun_dst); 5538 dev = dev_get_by_index(&init_net, key.filter_ifindex); 5539 if (!dev) { 5540 netdev_dbg(priv->netdev, 5541 "Couldn't find tunnel device with ifindex: %d\n", 5542 key.filter_ifindex); 5543 return false; 5544 } 5545 5546 /* Set fwd_dev so we do dev_put() after datapath */ 5547 tc_priv->fwd_dev = dev; 5548 5549 skb->dev = dev; 5550 5551 return true; 5552} 5553 5554static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv, 5555 struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id, 5556 u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv) 5557{ 5558 struct mlx5e_priv *priv = netdev_priv(skb->dev); 5559 struct tc_skb_ext *tc_skb_ext; 5560 u64 act_miss_cookie; 5561 u32 chain; 5562 5563 chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0; 5564 act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ? 5565 mapped_obj->act_miss_cookie : 0; 5566 if (chain || act_miss_cookie) { 5567 if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id)) 5568 return false; 5569 5570 tc_skb_ext = tc_skb_ext_alloc(skb); 5571 if (!tc_skb_ext) { 5572 WARN_ON(1); 5573 return false; 5574 } 5575 5576 if (act_miss_cookie) { 5577 tc_skb_ext->act_miss_cookie = act_miss_cookie; 5578 tc_skb_ext->act_miss = 1; 5579 } else { 5580 tc_skb_ext->chain = chain; 5581 } 5582 } 5583 5584 if (tc_priv) 5585 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id); 5586 5587 return true; 5588} 5589 5590static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb, 5591 struct mlx5_mapped_obj *mapped_obj, 5592 struct mlx5e_tc_update_priv *tc_priv) 5593{ 5594 if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) { 5595 netdev_dbg(priv->netdev, 5596 "Failed to restore tunnel info for sampled packet\n"); 5597 return; 5598 } 5599 mlx5e_tc_sample_skb(skb, mapped_obj); 5600} 5601 5602static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb, 5603 struct mlx5_mapped_obj *mapped_obj, 5604 struct mlx5e_tc_update_priv *tc_priv, 5605 u32 tunnel_id) 5606{ 5607 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 5608 struct mlx5_rep_uplink_priv *uplink_priv; 5609 struct mlx5e_rep_priv *uplink_rpriv; 5610 bool forward_tx = false; 5611 5612 /* Tunnel restore takes precedence over int port restore */ 5613 if (tunnel_id) 5614 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id); 5615 5616 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 5617 uplink_priv = &uplink_rpriv->uplink_priv; 5618 5619 if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb, 5620 mapped_obj->int_port_metadata, &forward_tx)) { 5621 /* Set fwd_dev for future dev_put */ 5622 tc_priv->fwd_dev = skb->dev; 5623 tc_priv->forward_tx = forward_tx; 5624 5625 return true; 5626 } 5627 5628 return false; 5629} 5630 5631bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, 5632 struct mapping_ctx *mapping_ctx, u32 mapped_obj_id, 5633 struct mlx5_tc_ct_priv *ct_priv, 5634 u32 zone_restore_id, u32 tunnel_id, 5635 struct mlx5e_tc_update_priv *tc_priv) 5636{ 5637 struct mlx5e_priv *priv = netdev_priv(skb->dev); 5638 struct mlx5_mapped_obj mapped_obj; 5639 int err; 5640 5641 err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj); 5642 if (err) { 5643 netdev_dbg(skb->dev, 5644 "Couldn't find mapped object for mapped_obj_id: %d, err: %d\n", 5645 mapped_obj_id, err); 5646 return false; 5647 } 5648 5649 switch (mapped_obj.type) { 5650 case MLX5_MAPPED_OBJ_CHAIN: 5651 case MLX5_MAPPED_OBJ_ACT_MISS: 5652 return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id, 5653 tunnel_id, tc_priv); 5654 case MLX5_MAPPED_OBJ_SAMPLE: 5655 mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv); 5656 tc_priv->skb_done = true; 5657 return true; 5658 case MLX5_MAPPED_OBJ_INT_PORT_METADATA: 5659 return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id); 5660 default: 5661 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); 5662 return false; 5663 } 5664 5665 return false; 5666} 5667 5668bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb) 5669{ 5670 struct mlx5e_priv *priv = netdev_priv(skb->dev); 5671 u32 mapped_obj_id, reg_b, zone_restore_id; 5672 struct mlx5_tc_ct_priv *ct_priv; 5673 struct mapping_ctx *mapping_ctx; 5674 struct mlx5e_tc_table *tc; 5675 5676 reg_b = be32_to_cpu(cqe->ft_metadata); 5677 tc = mlx5e_fs_get_tc(priv->fs); 5678 mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; 5679 zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) & 5680 ESW_ZONE_ID_MASK; 5681 ct_priv = tc->ct; 5682 mapping_ctx = tc->mapping; 5683 5684 return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id, 5685 0, NULL); 5686} 5687 5688static struct mapping_ctx * 5689mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv) 5690{ 5691 struct mlx5e_tc_table *tc; 5692 struct mlx5_eswitch *esw; 5693 struct mapping_ctx *ctx; 5694 5695 if (is_mdev_switchdev_mode(priv->mdev)) { 5696 esw = priv->mdev->priv.eswitch; 5697 ctx = esw->offloads.reg_c0_obj_pool; 5698 } else { 5699 tc = mlx5e_fs_get_tc(priv->fs); 5700 ctx = tc->mapping; 5701 } 5702 5703 return ctx; 5704} 5705 5706int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, 5707 u64 act_miss_cookie, u32 *act_miss_mapping) 5708{ 5709 struct mlx5_mapped_obj mapped_obj = {}; 5710 struct mlx5_eswitch *esw; 5711 struct mapping_ctx *ctx; 5712 int err; 5713 5714 ctx = mlx5e_get_priv_obj_mapping(priv); 5715 mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS; 5716 mapped_obj.act_miss_cookie = act_miss_cookie; 5717 err = mapping_add(ctx, &mapped_obj, act_miss_mapping); 5718 if (err) 5719 return err; 5720 5721 if (!is_mdev_switchdev_mode(priv->mdev)) 5722 return 0; 5723 5724 esw = priv->mdev->priv.eswitch; 5725 attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); 5726 if (IS_ERR(attr->act_id_restore_rule)) { 5727 err = PTR_ERR(attr->act_id_restore_rule); 5728 goto err_rule; 5729 } 5730 5731 return 0; 5732 5733err_rule: 5734 mapping_remove(ctx, *act_miss_mapping); 5735 return err; 5736} 5737 5738void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, 5739 u32 act_miss_mapping) 5740{ 5741 struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv); 5742 5743 if (is_mdev_switchdev_mode(priv->mdev)) 5744 mlx5_del_flow_rules(attr->act_id_restore_rule); 5745 mapping_remove(ctx, act_miss_mapping); 5746} 5747