1/*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28#include "en.h" 29 30#include <linux/list.h> 31#include <dev/mlx5/fs.h> 32#include <dev/mlx5/mpfs.h> 33 34#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) 35 36enum { 37 MLX5E_FULLMATCH = 0, 38 MLX5E_ALLMULTI = 1, 39 MLX5E_PROMISC = 2, 40}; 41 42enum { 43 MLX5E_UC = 0, 44 MLX5E_MC_IPV4 = 1, 45 MLX5E_MC_IPV6 = 2, 46 MLX5E_MC_OTHER = 3, 47}; 48 49enum { 50 MLX5E_ACTION_NONE = 0, 51 MLX5E_ACTION_ADD = 1, 52 MLX5E_ACTION_DEL = 2, 53}; 54 55struct mlx5e_eth_addr_hash_node { 56 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist; 57 u8 action; 58 u32 mpfs_index; 59 struct mlx5e_eth_addr_info ai; 60}; 61 62static inline int 63mlx5e_hash_eth_addr(const u8 * addr) 64{ 65 return (addr[5]); 66} 67 68static bool 69mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash, 70 struct mlx5e_eth_addr_hash_node *hn_new) 71{ 72 struct mlx5e_eth_addr_hash_node *hn; 73 u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr); 74 75 LIST_FOREACH(hn, &hash[ix], hlist) { 76 if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) { 77 if (hn->action == MLX5E_ACTION_DEL) 78 hn->action = MLX5E_ACTION_NONE; 79 free(hn_new, M_MLX5EN); 80 return (false); 81 } 82 } 83 LIST_INSERT_HEAD(&hash[ix], hn_new, hlist); 84 return (true); 85} 86 87static void 88mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) 89{ 90 LIST_REMOVE(hn, hlist); 91 free(hn, M_MLX5EN); 92} 93 94static void 95mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, 96 struct mlx5e_eth_addr_info *ai) 97{ 98 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP)) 99 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); 100 101 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP)) 102 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); 103 104 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH)) 105 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); 106 107 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH)) 108 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); 109 110 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) 111 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); 112 113 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) 114 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); 115 116 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) 117 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); 118 119 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) 120 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); 121 122 if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) 123 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); 124 125 if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) 126 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); 127 128 if (ai->tt_vec & (1 << MLX5E_TT_ANY)) 129 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); 130 131 /* ensure the rules are not freed again */ 132 ai->tt_vec = 0; 133} 134 135static int 136mlx5e_get_eth_addr_type(const u8 * addr) 137{ 138 if (ETHER_IS_MULTICAST(addr) == 0) 139 return (MLX5E_UC); 140 141 if ((addr[0] == 0x01) && 142 (addr[1] == 0x00) && 143 (addr[2] == 0x5e) && 144 !(addr[3] & 0x80)) 145 return (MLX5E_MC_IPV4); 146 147 if ((addr[0] == 0x33) && 148 (addr[1] == 0x33)) 149 return (MLX5E_MC_IPV6); 150 151 return (MLX5E_MC_OTHER); 152} 153 154static u32 155mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) 156{ 157 int eth_addr_type; 158 u32 ret; 159 160 switch (type) { 161 case MLX5E_FULLMATCH: 162 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); 163 switch (eth_addr_type) { 164 case MLX5E_UC: 165 ret = 166 (1 << MLX5E_TT_IPV4_TCP) | 167 (1 << MLX5E_TT_IPV6_TCP) | 168 (1 << MLX5E_TT_IPV4_UDP) | 169 (1 << MLX5E_TT_IPV6_UDP) | 170 (1 << MLX5E_TT_IPV4) | 171 (1 << MLX5E_TT_IPV6) | 172 (1 << MLX5E_TT_ANY) | 173 0; 174 break; 175 176 case MLX5E_MC_IPV4: 177 ret = 178 (1 << MLX5E_TT_IPV4_UDP) | 179 (1 << MLX5E_TT_IPV4) | 180 0; 181 break; 182 183 case MLX5E_MC_IPV6: 184 ret = 185 (1 << MLX5E_TT_IPV6_UDP) | 186 (1 << MLX5E_TT_IPV6) | 187 0; 188 break; 189 190 default: 191 ret = 192 (1 << MLX5E_TT_ANY) | 193 0; 194 break; 195 } 196 break; 197 198 case MLX5E_ALLMULTI: 199 ret = 200 (1 << MLX5E_TT_IPV4_UDP) | 201 (1 << MLX5E_TT_IPV6_UDP) | 202 (1 << MLX5E_TT_IPV4) | 203 (1 << MLX5E_TT_IPV6) | 204 (1 << MLX5E_TT_ANY) | 205 0; 206 break; 207 208 default: /* MLX5E_PROMISC */ 209 ret = 210 (1 << MLX5E_TT_IPV4_TCP) | 211 (1 << MLX5E_TT_IPV6_TCP) | 212 (1 << MLX5E_TT_IPV4_UDP) | 213 (1 << MLX5E_TT_IPV6_UDP) | 214 (1 << MLX5E_TT_IPV4) | 215 (1 << MLX5E_TT_IPV6) | 216 (1 << MLX5E_TT_ANY) | 217 0; 218 break; 219 } 220 221 return (ret); 222} 223 224static int 225mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv, 226 struct mlx5e_eth_addr_info *ai, int type, 227 u32 *mc, u32 *mv) 228{ 229 struct mlx5_flow_destination dest = {}; 230 u8 mc_enable = 0; 231 struct mlx5_flow_rule **rule_p; 232 struct mlx5_flow_table *ft = priv->fts.main.t; 233 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, 234 outer_headers.dmac_47_16); 235 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, 236 outer_headers.dmac_47_16); 237 u32 *tirn = priv->tirn; 238 u32 tt_vec; 239 int err = 0; 240 241 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 242 243 switch (type) { 244 case MLX5E_FULLMATCH: 245 mc_enable = MLX5_MATCH_OUTER_HEADERS; 246 memset(mc_dmac, 0xff, ETH_ALEN); 247 ether_addr_copy(mv_dmac, ai->addr); 248 break; 249 250 case MLX5E_ALLMULTI: 251 mc_enable = MLX5_MATCH_OUTER_HEADERS; 252 mc_dmac[0] = 0x01; 253 mv_dmac[0] = 0x01; 254 break; 255 256 case MLX5E_PROMISC: 257 break; 258 default: 259 break; 260 } 261 262 tt_vec = mlx5e_get_tt_vec(ai, type); 263 264 if (tt_vec & BIT(MLX5E_TT_ANY)) { 265 rule_p = &ai->ft_rule[MLX5E_TT_ANY]; 266 dest.tir_num = tirn[MLX5E_TT_ANY]; 267 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 268 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 269 MLX5_FS_ETH_FLOW_TAG, &dest); 270 if (IS_ERR_OR_NULL(*rule_p)) 271 goto err_del_ai; 272 ai->tt_vec |= BIT(MLX5E_TT_ANY); 273 } 274 275 mc_enable = MLX5_MATCH_OUTER_HEADERS; 276 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 277 278 if (tt_vec & BIT(MLX5E_TT_IPV4)) { 279 rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; 280 dest.tir_num = tirn[MLX5E_TT_IPV4]; 281 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 282 ETHERTYPE_IP); 283 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 284 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 285 MLX5_FS_ETH_FLOW_TAG, &dest); 286 if (IS_ERR_OR_NULL(*rule_p)) 287 goto err_del_ai; 288 ai->tt_vec |= BIT(MLX5E_TT_IPV4); 289 } 290 291 if (tt_vec & BIT(MLX5E_TT_IPV6)) { 292 rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; 293 dest.tir_num = tirn[MLX5E_TT_IPV6]; 294 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 295 ETHERTYPE_IPV6); 296 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 297 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 298 MLX5_FS_ETH_FLOW_TAG, &dest); 299 if (IS_ERR_OR_NULL(*rule_p)) 300 goto err_del_ai; 301 ai->tt_vec |= BIT(MLX5E_TT_IPV6); 302 } 303 304 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 305 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); 306 307 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { 308 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; 309 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; 310 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 311 ETHERTYPE_IP); 312 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 313 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 314 MLX5_FS_ETH_FLOW_TAG, &dest); 315 if (IS_ERR_OR_NULL(*rule_p)) 316 goto err_del_ai; 317 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); 318 } 319 320 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { 321 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; 322 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; 323 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 324 ETHERTYPE_IPV6); 325 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 326 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 327 MLX5_FS_ETH_FLOW_TAG, &dest); 328 if (IS_ERR_OR_NULL(*rule_p)) 329 goto err_del_ai; 330 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); 331 } 332 333 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); 334 335 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { 336 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; 337 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; 338 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 339 ETHERTYPE_IP); 340 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 341 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 342 MLX5_FS_ETH_FLOW_TAG, &dest); 343 if (IS_ERR_OR_NULL(*rule_p)) 344 goto err_del_ai; 345 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); 346 } 347 348 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { 349 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; 350 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; 351 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 352 ETHERTYPE_IPV6); 353 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 354 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 355 MLX5_FS_ETH_FLOW_TAG, &dest); 356 if (IS_ERR_OR_NULL(*rule_p)) 357 goto err_del_ai; 358 359 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); 360 } 361 362 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); 363 364 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { 365 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; 366 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; 367 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 368 ETHERTYPE_IP); 369 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 370 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 371 MLX5_FS_ETH_FLOW_TAG, &dest); 372 if (IS_ERR_OR_NULL(*rule_p)) 373 goto err_del_ai; 374 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); 375 } 376 377 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { 378 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; 379 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; 380 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 381 ETHERTYPE_IPV6); 382 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 383 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 384 MLX5_FS_ETH_FLOW_TAG, &dest); 385 if (IS_ERR_OR_NULL(*rule_p)) 386 goto err_del_ai; 387 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); 388 } 389 390 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); 391 392 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { 393 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; 394 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; 395 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 396 ETHERTYPE_IP); 397 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 398 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 399 MLX5_FS_ETH_FLOW_TAG, &dest); 400 if (IS_ERR_OR_NULL(*rule_p)) 401 goto err_del_ai; 402 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); 403 } 404 405 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { 406 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; 407 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; 408 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 409 ETHERTYPE_IPV6); 410 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 411 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 412 MLX5_FS_ETH_FLOW_TAG, &dest); 413 if (IS_ERR_OR_NULL(*rule_p)) 414 goto err_del_ai; 415 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); 416 } 417 418 return 0; 419 420err_del_ai: 421 err = PTR_ERR(*rule_p); 422 *rule_p = NULL; 423 mlx5e_del_eth_addr_from_flow_table(priv, ai); 424 425 return err; 426} 427 428static int 429mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, 430 struct mlx5e_eth_addr_info *ai, int type) 431{ 432 u32 *match_criteria; 433 u32 *match_value; 434 int err = 0; 435 436 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 437 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 438 if (!match_value || !match_criteria) { 439 mlx5_en_err(priv->ifp, "alloc failed\n"); 440 err = -ENOMEM; 441 goto add_eth_addr_rule_out; 442 } 443 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria, 444 match_value); 445 446add_eth_addr_rule_out: 447 kvfree(match_criteria); 448 kvfree(match_value); 449 450 return (err); 451} 452 453static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) 454{ 455 struct ifnet *ifp = priv->ifp; 456 int max_list_size; 457 int list_size; 458 u16 *vlans; 459 int vlan; 460 int err; 461 int i; 462 463 list_size = 0; 464 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) 465 list_size++; 466 467 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); 468 469 if (list_size > max_list_size) { 470 mlx5_en_err(ifp, 471 "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", 472 list_size, max_list_size); 473 list_size = max_list_size; 474 } 475 476 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); 477 if (!vlans) 478 return -ENOMEM; 479 480 i = 0; 481 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { 482 if (i >= list_size) 483 break; 484 vlans[i++] = vlan; 485 } 486 487 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); 488 if (err) 489 mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n", 490 err); 491 492 kfree(vlans); 493 return err; 494} 495 496enum mlx5e_vlan_rule_type { 497 MLX5E_VLAN_RULE_TYPE_UNTAGGED, 498 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 499 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 500 MLX5E_VLAN_RULE_TYPE_MATCH_VID, 501}; 502 503static int 504mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv, 505 enum mlx5e_vlan_rule_type rule_type, u16 vid, 506 u32 *mc, u32 *mv) 507{ 508 struct mlx5_flow_table *ft = priv->fts.vlan.t; 509 struct mlx5_flow_destination dest = {}; 510 u8 mc_enable = 0; 511 struct mlx5_flow_rule **rule_p; 512 int err = 0; 513 514 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 515 dest.ft = priv->fts.main.t; 516 517 mc_enable = MLX5_MATCH_OUTER_HEADERS; 518 519 switch (rule_type) { 520 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 521 rule_p = &priv->vlan.untagged_ft_rule; 522 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 523 break; 524 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 525 rule_p = &priv->vlan.any_cvlan_ft_rule; 526 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 527 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1); 528 break; 529 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 530 rule_p = &priv->vlan.any_svlan_ft_rule; 531 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 532 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1); 533 break; 534 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ 535 rule_p = &priv->vlan.active_vlans_ft_rule[vid]; 536 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 537 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1); 538 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 539 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); 540 mlx5e_vport_context_update_vlans(priv); 541 break; 542 } 543 544 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 545 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 546 MLX5_FS_ETH_FLOW_TAG, 547 &dest); 548 549 if (IS_ERR(*rule_p)) { 550 err = PTR_ERR(*rule_p); 551 *rule_p = NULL; 552 mlx5_en_err(priv->ifp, "add rule failed\n"); 553 } 554 555 return (err); 556} 557 558static int 559mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 560 enum mlx5e_vlan_rule_type rule_type, u16 vid) 561{ 562 u32 *match_criteria; 563 u32 *match_value; 564 int err = 0; 565 566 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 567 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 568 if (!match_value || !match_criteria) { 569 mlx5_en_err(priv->ifp, "alloc failed\n"); 570 err = -ENOMEM; 571 goto add_vlan_rule_out; 572 } 573 574 err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria, 575 match_value); 576 577add_vlan_rule_out: 578 kvfree(match_criteria); 579 kvfree(match_value); 580 581 return (err); 582} 583 584static void 585mlx5e_del_vlan_rule(struct mlx5e_priv *priv, 586 enum mlx5e_vlan_rule_type rule_type, u16 vid) 587{ 588 switch (rule_type) { 589 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 590 if (priv->vlan.untagged_ft_rule) { 591 mlx5_del_flow_rule(priv->vlan.untagged_ft_rule); 592 priv->vlan.untagged_ft_rule = NULL; 593 } 594 break; 595 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 596 if (priv->vlan.any_cvlan_ft_rule) { 597 mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule); 598 priv->vlan.any_cvlan_ft_rule = NULL; 599 } 600 break; 601 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 602 if (priv->vlan.any_svlan_ft_rule) { 603 mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule); 604 priv->vlan.any_svlan_ft_rule = NULL; 605 } 606 break; 607 case MLX5E_VLAN_RULE_TYPE_MATCH_VID: 608 if (priv->vlan.active_vlans_ft_rule[vid]) { 609 mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]); 610 priv->vlan.active_vlans_ft_rule[vid] = NULL; 611 } 612 mlx5e_vport_context_update_vlans(priv); 613 break; 614 default: 615 break; 616 } 617} 618 619static void 620mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) 621{ 622 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 623 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 624} 625 626static int 627mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) 628{ 629 int err; 630 631 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 632 if (err) 633 return (err); 634 635 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 636 if (err) 637 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 638 639 return (err); 640} 641 642void 643mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) 644{ 645 if (priv->vlan.filter_disabled) { 646 priv->vlan.filter_disabled = false; 647 if (priv->ifp->if_flags & IFF_PROMISC) 648 return; 649 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 650 mlx5e_del_any_vid_rules(priv); 651 } 652} 653 654void 655mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) 656{ 657 if (!priv->vlan.filter_disabled) { 658 priv->vlan.filter_disabled = true; 659 if (priv->ifp->if_flags & IFF_PROMISC) 660 return; 661 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 662 mlx5e_add_any_vid_rules(priv); 663 } 664} 665 666void 667mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid) 668{ 669 struct mlx5e_priv *priv = arg; 670 671 if (ifp != priv->ifp) 672 return; 673 674 PRIV_LOCK(priv); 675 if (!test_and_set_bit(vid, priv->vlan.active_vlans) && 676 test_bit(MLX5E_STATE_OPENED, &priv->state)) 677 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); 678 PRIV_UNLOCK(priv); 679} 680 681void 682mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid) 683{ 684 struct mlx5e_priv *priv = arg; 685 686 if (ifp != priv->ifp) 687 return; 688 689 PRIV_LOCK(priv); 690 clear_bit(vid, priv->vlan.active_vlans); 691 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 692 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); 693 PRIV_UNLOCK(priv); 694} 695 696int 697mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) 698{ 699 int err; 700 int i; 701 702 set_bit(0, priv->vlan.active_vlans); 703 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) { 704 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, 705 i); 706 if (err) 707 goto error; 708 } 709 710 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 711 if (err) 712 goto error; 713 714 if (priv->vlan.filter_disabled) { 715 err = mlx5e_add_any_vid_rules(priv); 716 if (err) 717 goto error; 718 } 719 return (0); 720error: 721 mlx5e_del_all_vlan_rules(priv); 722 return (err); 723} 724 725void 726mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) 727{ 728 int i; 729 730 if (priv->vlan.filter_disabled) 731 mlx5e_del_any_vid_rules(priv); 732 733 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 734 735 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) 736 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i); 737 clear_bit(0, priv->vlan.active_vlans); 738} 739 740#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ 741 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ 742 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp) 743 744static void 745mlx5e_execute_action(struct mlx5e_priv *priv, 746 struct mlx5e_eth_addr_hash_node *hn) 747{ 748 switch (hn->action) { 749 case MLX5E_ACTION_ADD: 750 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); 751 hn->action = MLX5E_ACTION_NONE; 752 break; 753 754 case MLX5E_ACTION_DEL: 755 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); 756 if (hn->mpfs_index != -1U) 757 mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index); 758 mlx5e_del_eth_addr_from_hash(hn); 759 break; 760 761 default: 762 break; 763 } 764} 765 766static struct mlx5e_eth_addr_hash_node * 767mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh) 768{ 769 struct mlx5e_eth_addr_hash_node *hn; 770 771 hn = LIST_FIRST(fh); 772 if (hn != NULL) { 773 LIST_REMOVE(hn, hlist); 774 LIST_INSERT_HEAD(uh, hn, hlist); 775 } 776 return (hn); 777} 778 779static struct mlx5e_eth_addr_hash_node * 780mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh) 781{ 782 struct mlx5e_eth_addr_hash_node *hn; 783 784 hn = LIST_FIRST(fh); 785 if (hn != NULL) 786 LIST_REMOVE(hn, hlist); 787 return (hn); 788} 789 790static void 791mlx5e_sync_ifp_addr(struct mlx5e_priv *priv) 792{ 793 struct mlx5e_eth_addr_hash_head head_free; 794 struct mlx5e_eth_addr_hash_head head_uc; 795 struct mlx5e_eth_addr_hash_head head_mc; 796 struct mlx5e_eth_addr_hash_node *hn; 797 struct ifnet *ifp = priv->ifp; 798 struct ifaddr *ifa; 799 struct ifmultiaddr *ifma; 800 bool success = false; 801 size_t x; 802 size_t num; 803 804 PRIV_ASSERT_LOCKED(priv); 805 806 LIST_INIT(&head_free); 807 LIST_INIT(&head_uc); 808 LIST_INIT(&head_mc); 809retry: 810 num = 1; 811 812 if_addr_rlock(ifp); 813 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 814 if (ifa->ifa_addr->sa_family != AF_LINK) 815 continue; 816 num++; 817 } 818 if_addr_runlock(ifp); 819 820 if_maddr_rlock(ifp); 821 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 822 if (ifma->ifma_addr->sa_family != AF_LINK) 823 continue; 824 num++; 825 } 826 if_maddr_runlock(ifp); 827 828 /* allocate place holders */ 829 for (x = 0; x != num; x++) { 830 hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO); 831 hn->action = MLX5E_ACTION_ADD; 832 hn->mpfs_index = -1U; 833 LIST_INSERT_HEAD(&head_free, hn, hlist); 834 } 835 836 hn = mlx5e_move_hn(&head_free, &head_uc); 837 if (hn == NULL) 838 goto cleanup; 839 840 ether_addr_copy(hn->ai.addr, 841 LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr))); 842 843 if_addr_rlock(ifp); 844 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 845 if (ifa->ifa_addr->sa_family != AF_LINK) 846 continue; 847 hn = mlx5e_move_hn(&head_free, &head_uc); 848 if (hn == NULL) 849 break; 850 ether_addr_copy(hn->ai.addr, 851 LLADDR((struct sockaddr_dl *)ifa->ifa_addr)); 852 } 853 if_addr_runlock(ifp); 854 if (ifa != NULL) 855 goto cleanup; 856 857 if_maddr_rlock(ifp); 858 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 859 if (ifma->ifma_addr->sa_family != AF_LINK) 860 continue; 861 hn = mlx5e_move_hn(&head_free, &head_mc); 862 if (hn == NULL) 863 break; 864 ether_addr_copy(hn->ai.addr, 865 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 866 } 867 if_maddr_runlock(ifp); 868 if (ifma != NULL) 869 goto cleanup; 870 871 /* insert L2 unicast addresses into hash list */ 872 873 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) { 874 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0) 875 continue; 876 if (hn->mpfs_index == -1U) 877 mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index, 878 hn->ai.addr, 0, 0); 879 } 880 881 /* insert L2 multicast addresses into hash list */ 882 883 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) { 884 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0) 885 continue; 886 } 887 888 success = true; 889 890cleanup: 891 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) 892 free(hn, M_MLX5EN); 893 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) 894 free(hn, M_MLX5EN); 895 while ((hn = mlx5e_remove_hn(&head_free)) != NULL) 896 free(hn, M_MLX5EN); 897 898 if (success == false) 899 goto retry; 900} 901 902static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, 903 u8 addr_array[][ETH_ALEN], int size) 904{ 905 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC); 906 struct ifnet *ifp = priv->ifp; 907 struct mlx5e_eth_addr_hash_node *hn; 908 struct mlx5e_eth_addr_hash_head *addr_list; 909 struct mlx5e_eth_addr_hash_node *tmp; 910 int i = 0; 911 int hi; 912 913 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc; 914 915 if (is_uc) /* Make sure our own address is pushed first */ 916 ether_addr_copy(addr_array[i++], IF_LLADDR(ifp)); 917 else if (priv->eth_addr.broadcast_enabled) 918 ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr); 919 920 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { 921 if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr)) 922 continue; 923 if (i >= size) 924 break; 925 ether_addr_copy(addr_array[i++], hn->ai.addr); 926 } 927} 928 929static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, 930 int list_type) 931{ 932 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC); 933 struct mlx5e_eth_addr_hash_node *hn; 934 u8 (*addr_array)[ETH_ALEN] = NULL; 935 struct mlx5e_eth_addr_hash_head *addr_list; 936 struct mlx5e_eth_addr_hash_node *tmp; 937 int max_size; 938 int size; 939 int err; 940 int hi; 941 942 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); 943 max_size = is_uc ? 944 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 945 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); 946 947 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc; 948 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) 949 size++; 950 951 if (size > max_size) { 952 mlx5_en_err(priv->ifp, 953 "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", 954 is_uc ? "UC" : "MC", size, max_size); 955 size = max_size; 956 } 957 958 if (size) { 959 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); 960 if (!addr_array) { 961 err = -ENOMEM; 962 goto out; 963 } 964 mlx5e_fill_addr_array(priv, list_type, addr_array, size); 965 } 966 967 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); 968out: 969 if (err) 970 mlx5_en_err(priv->ifp, 971 "Failed to modify vport %s list err(%d)\n", 972 is_uc ? "UC" : "MC", err); 973 kfree(addr_array); 974} 975 976static void mlx5e_vport_context_update(struct mlx5e_priv *priv) 977{ 978 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 979 980 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC); 981 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC); 982 mlx5_modify_nic_vport_promisc(priv->mdev, 0, 983 ea->allmulti_enabled, 984 ea->promisc_enabled); 985} 986 987static void 988mlx5e_apply_ifp_addr(struct mlx5e_priv *priv) 989{ 990 struct mlx5e_eth_addr_hash_node *hn; 991 struct mlx5e_eth_addr_hash_node *tmp; 992 int i; 993 994 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i) 995 mlx5e_execute_action(priv, hn); 996 997 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i) 998 mlx5e_execute_action(priv, hn); 999} 1000 1001static void 1002mlx5e_handle_ifp_addr(struct mlx5e_priv *priv) 1003{ 1004 struct mlx5e_eth_addr_hash_node *hn; 1005 struct mlx5e_eth_addr_hash_node *tmp; 1006 int i; 1007 1008 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i) 1009 hn->action = MLX5E_ACTION_DEL; 1010 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i) 1011 hn->action = MLX5E_ACTION_DEL; 1012 1013 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 1014 mlx5e_sync_ifp_addr(priv); 1015 1016 mlx5e_apply_ifp_addr(priv); 1017} 1018 1019void 1020mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) 1021{ 1022 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 1023 struct ifnet *ndev = priv->ifp; 1024 1025 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); 1026 bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC); 1027 bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI); 1028 bool broadcast_enabled = rx_mode_enable; 1029 1030 bool enable_promisc = !ea->promisc_enabled && promisc_enabled; 1031 bool disable_promisc = ea->promisc_enabled && !promisc_enabled; 1032 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; 1033 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; 1034 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; 1035 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; 1036 1037 /* update broadcast address */ 1038 ether_addr_copy(priv->eth_addr.broadcast.addr, 1039 priv->ifp->if_broadcastaddr); 1040 1041 if (enable_promisc) { 1042 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); 1043 if (!priv->vlan.filter_disabled) 1044 mlx5e_add_any_vid_rules(priv); 1045 } 1046 if (enable_allmulti) 1047 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); 1048 if (enable_broadcast) 1049 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); 1050 1051 mlx5e_handle_ifp_addr(priv); 1052 1053 if (disable_broadcast) 1054 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); 1055 if (disable_allmulti) 1056 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); 1057 if (disable_promisc) { 1058 if (!priv->vlan.filter_disabled) 1059 mlx5e_del_any_vid_rules(priv); 1060 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); 1061 } 1062 1063 ea->promisc_enabled = promisc_enabled; 1064 ea->allmulti_enabled = allmulti_enabled; 1065 ea->broadcast_enabled = broadcast_enabled; 1066 1067 mlx5e_vport_context_update(priv); 1068} 1069 1070void 1071mlx5e_set_rx_mode_work(struct work_struct *work) 1072{ 1073 struct mlx5e_priv *priv = 1074 container_of(work, struct mlx5e_priv, set_rx_mode_work); 1075 1076 PRIV_LOCK(priv); 1077 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 1078 mlx5e_set_rx_mode_core(priv); 1079 PRIV_UNLOCK(priv); 1080} 1081 1082static void 1083mlx5e_destroy_groups(struct mlx5e_flow_table *ft) 1084{ 1085 int i; 1086 1087 for (i = ft->num_groups - 1; i >= 0; i--) { 1088 if (!IS_ERR_OR_NULL(ft->g[i])) 1089 mlx5_destroy_flow_group(ft->g[i]); 1090 ft->g[i] = NULL; 1091 } 1092 ft->num_groups = 0; 1093} 1094 1095static void 1096mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) 1097{ 1098 mlx5e_destroy_groups(ft); 1099 kfree(ft->g); 1100 mlx5_destroy_flow_table(ft->t); 1101 ft->t = NULL; 1102} 1103 1104#define MLX5E_NUM_MAIN_GROUPS 10 1105#define MLX5E_MAIN_GROUP0_SIZE BIT(4) 1106#define MLX5E_MAIN_GROUP1_SIZE BIT(3) 1107#define MLX5E_MAIN_GROUP2_SIZE BIT(1) 1108#define MLX5E_MAIN_GROUP3_SIZE BIT(0) 1109#define MLX5E_MAIN_GROUP4_SIZE BIT(14) 1110#define MLX5E_MAIN_GROUP5_SIZE BIT(13) 1111#define MLX5E_MAIN_GROUP6_SIZE BIT(11) 1112#define MLX5E_MAIN_GROUP7_SIZE BIT(2) 1113#define MLX5E_MAIN_GROUP8_SIZE BIT(1) 1114#define MLX5E_MAIN_GROUP9_SIZE BIT(0) 1115#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ 1116 MLX5E_MAIN_GROUP1_SIZE +\ 1117 MLX5E_MAIN_GROUP2_SIZE +\ 1118 MLX5E_MAIN_GROUP3_SIZE +\ 1119 MLX5E_MAIN_GROUP4_SIZE +\ 1120 MLX5E_MAIN_GROUP5_SIZE +\ 1121 MLX5E_MAIN_GROUP6_SIZE +\ 1122 MLX5E_MAIN_GROUP7_SIZE +\ 1123 MLX5E_MAIN_GROUP8_SIZE +\ 1124 MLX5E_MAIN_GROUP9_SIZE +\ 1125 0) 1126 1127static int 1128mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1129 int inlen) 1130{ 1131 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1132 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, 1133 match_criteria.outer_headers.dmac_47_16); 1134 int err; 1135 int ix = 0; 1136 1137 /* Tunnel rules need to be first in this list of groups */ 1138 1139 /* Start tunnel rules */ 1140 memset(in, 0, inlen); 1141 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1142 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1143 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1144 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport); 1145 MLX5_SET_CFG(in, start_flow_index, ix); 1146 ix += MLX5E_MAIN_GROUP0_SIZE; 1147 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1148 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1149 if (IS_ERR(ft->g[ft->num_groups])) 1150 goto err_destory_groups; 1151 ft->num_groups++; 1152 /* End Tunnel Rules */ 1153 1154 memset(in, 0, inlen); 1155 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1156 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1157 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1158 MLX5_SET_CFG(in, start_flow_index, ix); 1159 ix += MLX5E_MAIN_GROUP1_SIZE; 1160 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1161 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1162 if (IS_ERR(ft->g[ft->num_groups])) 1163 goto err_destory_groups; 1164 ft->num_groups++; 1165 1166 memset(in, 0, inlen); 1167 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1168 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1169 MLX5_SET_CFG(in, start_flow_index, ix); 1170 ix += MLX5E_MAIN_GROUP2_SIZE; 1171 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1172 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1173 if (IS_ERR(ft->g[ft->num_groups])) 1174 goto err_destory_groups; 1175 ft->num_groups++; 1176 1177 memset(in, 0, inlen); 1178 MLX5_SET_CFG(in, start_flow_index, ix); 1179 ix += MLX5E_MAIN_GROUP3_SIZE; 1180 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1181 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1182 if (IS_ERR(ft->g[ft->num_groups])) 1183 goto err_destory_groups; 1184 ft->num_groups++; 1185 1186 memset(in, 0, inlen); 1187 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1188 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1189 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1190 memset(dmac, 0xff, ETH_ALEN); 1191 MLX5_SET_CFG(in, start_flow_index, ix); 1192 ix += MLX5E_MAIN_GROUP4_SIZE; 1193 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1194 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1195 if (IS_ERR(ft->g[ft->num_groups])) 1196 goto err_destory_groups; 1197 ft->num_groups++; 1198 1199 memset(in, 0, inlen); 1200 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1201 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1202 memset(dmac, 0xff, ETH_ALEN); 1203 MLX5_SET_CFG(in, start_flow_index, ix); 1204 ix += MLX5E_MAIN_GROUP5_SIZE; 1205 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1206 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1207 if (IS_ERR(ft->g[ft->num_groups])) 1208 goto err_destory_groups; 1209 ft->num_groups++; 1210 1211 memset(in, 0, inlen); 1212 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1213 memset(dmac, 0xff, ETH_ALEN); 1214 MLX5_SET_CFG(in, start_flow_index, ix); 1215 ix += MLX5E_MAIN_GROUP6_SIZE; 1216 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1217 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1218 if (IS_ERR(ft->g[ft->num_groups])) 1219 goto err_destory_groups; 1220 ft->num_groups++; 1221 1222 memset(in, 0, inlen); 1223 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1224 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1225 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1226 dmac[0] = 0x01; 1227 MLX5_SET_CFG(in, start_flow_index, ix); 1228 ix += MLX5E_MAIN_GROUP7_SIZE; 1229 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1230 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1231 if (IS_ERR(ft->g[ft->num_groups])) 1232 goto err_destory_groups; 1233 ft->num_groups++; 1234 1235 memset(in, 0, inlen); 1236 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1237 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1238 dmac[0] = 0x01; 1239 MLX5_SET_CFG(in, start_flow_index, ix); 1240 ix += MLX5E_MAIN_GROUP8_SIZE; 1241 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1242 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1243 if (IS_ERR(ft->g[ft->num_groups])) 1244 goto err_destory_groups; 1245 ft->num_groups++; 1246 1247 memset(in, 0, inlen); 1248 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1249 dmac[0] = 0x01; 1250 MLX5_SET_CFG(in, start_flow_index, ix); 1251 ix += MLX5E_MAIN_GROUP9_SIZE; 1252 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1253 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1254 if (IS_ERR(ft->g[ft->num_groups])) 1255 goto err_destory_groups; 1256 ft->num_groups++; 1257 1258 return (0); 1259 1260err_destory_groups: 1261 err = PTR_ERR(ft->g[ft->num_groups]); 1262 ft->g[ft->num_groups] = NULL; 1263 mlx5e_destroy_groups(ft); 1264 1265 return (err); 1266} 1267 1268static int 1269mlx5e_create_main_groups(struct mlx5e_flow_table *ft) 1270{ 1271 u32 *in; 1272 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1273 int err; 1274 1275 in = mlx5_vzalloc(inlen); 1276 if (!in) 1277 return (-ENOMEM); 1278 1279 err = mlx5e_create_main_groups_sub(ft, in, inlen); 1280 1281 kvfree(in); 1282 return (err); 1283} 1284 1285static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) 1286{ 1287 struct mlx5e_flow_table *ft = &priv->fts.main; 1288 int err; 1289 1290 ft->num_groups = 0; 1291 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main", 1292 MLX5E_MAIN_TABLE_SIZE); 1293 1294 if (IS_ERR(ft->t)) { 1295 err = PTR_ERR(ft->t); 1296 ft->t = NULL; 1297 return (err); 1298 } 1299 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1300 if (!ft->g) { 1301 err = -ENOMEM; 1302 goto err_destroy_main_flow_table; 1303 } 1304 1305 err = mlx5e_create_main_groups(ft); 1306 if (err) 1307 goto err_free_g; 1308 return (0); 1309 1310err_free_g: 1311 kfree(ft->g); 1312 1313err_destroy_main_flow_table: 1314 mlx5_destroy_flow_table(ft->t); 1315 ft->t = NULL; 1316 1317 return (err); 1318} 1319 1320static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) 1321{ 1322 mlx5e_destroy_flow_table(&priv->fts.main); 1323} 1324 1325#define MLX5E_NUM_VLAN_GROUPS 3 1326#define MLX5E_VLAN_GROUP0_SIZE BIT(12) 1327#define MLX5E_VLAN_GROUP1_SIZE BIT(1) 1328#define MLX5E_VLAN_GROUP2_SIZE BIT(0) 1329#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ 1330 MLX5E_VLAN_GROUP1_SIZE +\ 1331 MLX5E_VLAN_GROUP2_SIZE +\ 1332 0) 1333 1334static int 1335mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1336 int inlen) 1337{ 1338 int err; 1339 int ix = 0; 1340 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1341 1342 memset(in, 0, inlen); 1343 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1344 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1345 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 1346 MLX5_SET_CFG(in, start_flow_index, ix); 1347 ix += MLX5E_VLAN_GROUP0_SIZE; 1348 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1349 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1350 if (IS_ERR(ft->g[ft->num_groups])) 1351 goto err_destory_groups; 1352 ft->num_groups++; 1353 1354 memset(in, 0, inlen); 1355 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1356 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1357 MLX5_SET_CFG(in, start_flow_index, ix); 1358 ix += MLX5E_VLAN_GROUP1_SIZE; 1359 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1360 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1361 if (IS_ERR(ft->g[ft->num_groups])) 1362 goto err_destory_groups; 1363 ft->num_groups++; 1364 1365 memset(in, 0, inlen); 1366 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1367 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 1368 MLX5_SET_CFG(in, start_flow_index, ix); 1369 ix += MLX5E_VLAN_GROUP2_SIZE; 1370 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1371 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1372 if (IS_ERR(ft->g[ft->num_groups])) 1373 goto err_destory_groups; 1374 ft->num_groups++; 1375 1376 return (0); 1377 1378err_destory_groups: 1379 err = PTR_ERR(ft->g[ft->num_groups]); 1380 ft->g[ft->num_groups] = NULL; 1381 mlx5e_destroy_groups(ft); 1382 1383 return (err); 1384} 1385 1386static int 1387mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) 1388{ 1389 u32 *in; 1390 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1391 int err; 1392 1393 in = mlx5_vzalloc(inlen); 1394 if (!in) 1395 return (-ENOMEM); 1396 1397 err = mlx5e_create_vlan_groups_sub(ft, in, inlen); 1398 1399 kvfree(in); 1400 return (err); 1401} 1402 1403static int 1404mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) 1405{ 1406 struct mlx5e_flow_table *ft = &priv->fts.vlan; 1407 int err; 1408 1409 ft->num_groups = 0; 1410 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan", 1411 MLX5E_VLAN_TABLE_SIZE); 1412 1413 if (IS_ERR(ft->t)) { 1414 err = PTR_ERR(ft->t); 1415 ft->t = NULL; 1416 return (err); 1417 } 1418 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1419 if (!ft->g) { 1420 err = -ENOMEM; 1421 goto err_destroy_vlan_flow_table; 1422 } 1423 1424 err = mlx5e_create_vlan_groups(ft); 1425 if (err) 1426 goto err_free_g; 1427 1428 return (0); 1429 1430err_free_g: 1431 kfree(ft->g); 1432 1433err_destroy_vlan_flow_table: 1434 mlx5_destroy_flow_table(ft->t); 1435 ft->t = NULL; 1436 1437 return (err); 1438} 1439 1440static void 1441mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) 1442{ 1443 mlx5e_destroy_flow_table(&priv->fts.vlan); 1444} 1445 1446#define MLX5E_NUM_INNER_RSS_GROUPS 3 1447#define MLX5E_INNER_RSS_GROUP0_SIZE BIT(3) 1448#define MLX5E_INNER_RSS_GROUP1_SIZE BIT(1) 1449#define MLX5E_INNER_RSS_GROUP2_SIZE BIT(0) 1450#define MLX5E_INNER_RSS_TABLE_SIZE (MLX5E_INNER_RSS_GROUP0_SIZE +\ 1451 MLX5E_INNER_RSS_GROUP1_SIZE +\ 1452 MLX5E_INNER_RSS_GROUP2_SIZE +\ 1453 0) 1454 1455static int 1456mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1457 int inlen) 1458{ 1459 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1460 int err; 1461 int ix = 0; 1462 1463 memset(in, 0, inlen); 1464 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1465 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype); 1466 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); 1467 MLX5_SET_CFG(in, start_flow_index, ix); 1468 ix += MLX5E_INNER_RSS_GROUP0_SIZE; 1469 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1470 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1471 if (IS_ERR(ft->g[ft->num_groups])) 1472 goto err_destory_groups; 1473 ft->num_groups++; 1474 1475 memset(in, 0, inlen); 1476 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1477 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype); 1478 MLX5_SET_CFG(in, start_flow_index, ix); 1479 ix += MLX5E_INNER_RSS_GROUP1_SIZE; 1480 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1481 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1482 if (IS_ERR(ft->g[ft->num_groups])) 1483 goto err_destory_groups; 1484 ft->num_groups++; 1485 1486 memset(in, 0, inlen); 1487 MLX5_SET_CFG(in, start_flow_index, ix); 1488 ix += MLX5E_INNER_RSS_GROUP2_SIZE; 1489 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1490 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1491 if (IS_ERR(ft->g[ft->num_groups])) 1492 goto err_destory_groups; 1493 ft->num_groups++; 1494 1495 return (0); 1496 1497err_destory_groups: 1498 err = PTR_ERR(ft->g[ft->num_groups]); 1499 ft->g[ft->num_groups] = NULL; 1500 mlx5e_destroy_groups(ft); 1501 1502 return (err); 1503} 1504 1505static int 1506mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft) 1507{ 1508 u32 *in; 1509 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1510 int err; 1511 1512 in = mlx5_vzalloc(inlen); 1513 if (!in) 1514 return (-ENOMEM); 1515 1516 err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen); 1517 1518 kvfree(in); 1519 return (err); 1520} 1521 1522static int 1523mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv) 1524{ 1525 struct mlx5e_flow_table *ft = &priv->fts.inner_rss; 1526 int err; 1527 1528 ft->num_groups = 0; 1529 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss", 1530 MLX5E_INNER_RSS_TABLE_SIZE); 1531 1532 if (IS_ERR(ft->t)) { 1533 err = PTR_ERR(ft->t); 1534 ft->t = NULL; 1535 return (err); 1536 } 1537 ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g), 1538 GFP_KERNEL); 1539 if (!ft->g) { 1540 err = -ENOMEM; 1541 goto err_destroy_inner_rss_flow_table; 1542 } 1543 1544 err = mlx5e_create_inner_rss_groups(ft); 1545 if (err) 1546 goto err_free_g; 1547 1548 return (0); 1549 1550err_free_g: 1551 kfree(ft->g); 1552 1553err_destroy_inner_rss_flow_table: 1554 mlx5_destroy_flow_table(ft->t); 1555 ft->t = NULL; 1556 1557 return (err); 1558} 1559 1560static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv) 1561{ 1562 mlx5e_destroy_flow_table(&priv->fts.inner_rss); 1563} 1564 1565int 1566mlx5e_open_flow_table(struct mlx5e_priv *priv) 1567{ 1568 int err; 1569 1570 priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, 1571 MLX5_FLOW_NAMESPACE_KERNEL); 1572 1573 err = mlx5e_create_vlan_flow_table(priv); 1574 if (err) 1575 return (err); 1576 1577 err = mlx5e_create_main_flow_table(priv); 1578 if (err) 1579 goto err_destroy_vlan_flow_table; 1580 1581 err = mlx5e_create_inner_rss_flow_table(priv); 1582 if (err) 1583 goto err_destroy_main_flow_table; 1584 1585 return (0); 1586 1587err_destroy_main_flow_table: 1588 mlx5e_destroy_main_flow_table(priv); 1589err_destroy_vlan_flow_table: 1590 mlx5e_destroy_vlan_flow_table(priv); 1591 1592 return (err); 1593} 1594 1595void 1596mlx5e_close_flow_table(struct mlx5e_priv *priv) 1597{ 1598 1599 mlx5e_handle_ifp_addr(priv); 1600 mlx5e_destroy_inner_rss_flow_table(priv); 1601 mlx5e_destroy_main_flow_table(priv); 1602 mlx5e_destroy_vlan_flow_table(priv); 1603} 1604