1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2/* Copyright (c) 2019 Mellanox Technologies. */ 3 4#include <linux/types.h> 5#include <linux/crc32.h> 6#include "dr_ste.h" 7 8struct dr_hw_ste_format { 9 u8 ctrl[DR_STE_SIZE_CTRL]; 10 u8 tag[DR_STE_SIZE_TAG]; 11 u8 mask[DR_STE_SIZE_MASK]; 12}; 13 14static u32 dr_ste_crc32_calc(const void *input_data, size_t length) 15{ 16 u32 crc = crc32(0, input_data, length); 17 18 return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) | 19 ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000); 20} 21 22bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps) 23{ 24 return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5; 25} 26 27u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) 28{ 29 u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk); 30 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 31 u8 masked[DR_STE_SIZE_TAG] = {}; 32 u32 crc32, index; 33 u16 bit; 34 int i; 35 36 /* Don't calculate CRC if the result is predicted */ 37 if (num_entries == 1 || htbl->byte_mask == 0) 38 return 0; 39 40 /* Mask tag using byte mask, bit per byte */ 41 bit = 1 << (DR_STE_SIZE_TAG - 1); 42 for (i = 0; i < DR_STE_SIZE_TAG; i++) { 43 if (htbl->byte_mask & bit) 44 masked[i] = hw_ste->tag[i]; 45 46 bit = bit >> 1; 47 } 48 49 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG); 50 index = crc32 & (num_entries - 1); 51 52 return index; 53} 54 55u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask) 56{ 57 u16 byte_mask = 0; 58 int i; 59 60 for (i = 0; i < DR_STE_SIZE_MASK; i++) { 61 byte_mask = byte_mask << 1; 62 if (bit_mask[i] == 0xff) 63 byte_mask |= 1; 64 } 65 return byte_mask; 66} 67 68static u8 *dr_ste_get_tag(u8 *hw_ste_p) 69{ 70 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 71 72 return hw_ste->tag; 73} 74 75void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask) 76{ 77 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 78 79 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK); 80} 81 82static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste) 83{ 84 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag)); 85 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask)); 86} 87 88static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste) 89{ 90 hw_ste->tag[0] = 0xdc; 91 hw_ste->mask[0] = 0; 92} 93 94bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx, 95 u8 *hw_ste_p) 96{ 97 if (!ste_ctx->is_miss_addr_set) 98 return false; 99 100 /* check if miss address is already set for this type of STE */ 101 return ste_ctx->is_miss_addr_set(hw_ste_p); 102} 103 104void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx, 105 u8 *hw_ste_p, u64 miss_addr) 106{ 107 ste_ctx->set_miss_addr(hw_ste_p, miss_addr); 108} 109 110static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx, 111 u8 *hw_ste, u64 miss_addr) 112{ 113 ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE); 114 ste_ctx->set_miss_addr(hw_ste, miss_addr); 115 dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste); 116} 117 118void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx, 119 u8 *hw_ste, u64 icm_addr, u32 ht_size) 120{ 121 ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size); 122} 123 124u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste) 125{ 126 u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk); 127 u32 index = ste - ste->htbl->chunk->ste_arr; 128 129 return base_icm_addr + DR_STE_SIZE * index; 130} 131 132u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste) 133{ 134 u32 index = ste - ste->htbl->chunk->ste_arr; 135 136 return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index; 137} 138 139u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste) 140{ 141 u64 index = ste - ste->htbl->chunk->ste_arr; 142 143 return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index; 144} 145 146struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste) 147{ 148 u32 index = ste - ste->htbl->chunk->ste_arr; 149 150 return &ste->htbl->chunk->miss_list[index]; 151} 152 153static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx, 154 u8 *hw_ste, 155 struct mlx5dr_ste_htbl *next_htbl) 156{ 157 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; 158 159 ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask); 160 ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type); 161 ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk), 162 mlx5dr_icm_pool_get_chunk_num_of_entries(chunk)); 163 164 dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste); 165} 166 167bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher, 168 u8 ste_location) 169{ 170 return ste_location == nic_matcher->num_of_builders; 171} 172 173/* Replace relevant fields, except of: 174 * htbl - keep the origin htbl 175 * miss_list + list - already took the src from the list. 176 * icm_addr/mr_addr - depends on the hosting table. 177 * 178 * Before: 179 * | a | -> | b | -> | c | -> 180 * 181 * After: 182 * | a | -> | c | -> 183 * While the data that was in b copied to a. 184 */ 185static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) 186{ 187 memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src), 188 DR_STE_SIZE_REDUCED); 189 dst->next_htbl = src->next_htbl; 190 if (dst->next_htbl) 191 dst->next_htbl->pointing_ste = dst; 192 193 dst->refcount = src->refcount; 194} 195 196/* Free ste which is the head and the only one in miss_list */ 197static void 198dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx, 199 struct mlx5dr_ste *ste, 200 struct mlx5dr_matcher_rx_tx *nic_matcher, 201 struct mlx5dr_ste_send_info *ste_info_head, 202 struct list_head *send_ste_list, 203 struct mlx5dr_ste_htbl *stats_tbl) 204{ 205 u8 tmp_data_ste[DR_STE_SIZE] = {}; 206 u64 miss_addr; 207 208 miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk); 209 210 /* Use temp ste because dr_ste_always_miss_addr 211 * touches bit_mask area which doesn't exist at ste->hw_ste. 212 * Need to use a full-sized (DR_STE_SIZE) hw_ste. 213 */ 214 memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED); 215 dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr); 216 memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED); 217 218 list_del_init(&ste->miss_list_node); 219 220 /* Write full STE size in order to have "always_miss" */ 221 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 222 0, tmp_data_ste, 223 ste_info_head, 224 send_ste_list, 225 true /* Copy data */); 226 227 stats_tbl->ctrl.num_of_valid_entries--; 228} 229 230/* Free ste which is the head but NOT the only one in miss_list: 231 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0 232 */ 233static void 234dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher, 235 struct mlx5dr_ste *ste, 236 struct mlx5dr_ste *next_ste, 237 struct mlx5dr_ste_send_info *ste_info_head, 238 struct list_head *send_ste_list, 239 struct mlx5dr_ste_htbl *stats_tbl) 240 241{ 242 struct mlx5dr_ste_htbl *next_miss_htbl; 243 u8 hw_ste[DR_STE_SIZE] = {}; 244 int sb_idx; 245 246 next_miss_htbl = next_ste->htbl; 247 248 /* Remove from the miss_list the next_ste before copy */ 249 list_del_init(&next_ste->miss_list_node); 250 251 /* Move data from next into ste */ 252 dr_ste_replace(ste, next_ste); 253 254 /* Update the rule on STE change */ 255 mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false); 256 257 /* Copy all 64 hw_ste bytes */ 258 memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED); 259 sb_idx = ste->ste_chain_location - 1; 260 mlx5dr_ste_set_bit_mask(hw_ste, 261 nic_matcher->ste_builder[sb_idx].bit_mask); 262 263 /* Del the htbl that contains the next_ste. 264 * The origin htbl stay with the same number of entries. 265 */ 266 mlx5dr_htbl_put(next_miss_htbl); 267 268 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 269 0, hw_ste, 270 ste_info_head, 271 send_ste_list, 272 true /* Copy data */); 273 274 stats_tbl->ctrl.num_of_collisions--; 275 stats_tbl->ctrl.num_of_valid_entries--; 276} 277 278/* Free ste that is located in the middle of the miss list: 279 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_| 280 */ 281static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx, 282 struct mlx5dr_ste *ste, 283 struct mlx5dr_ste_send_info *ste_info, 284 struct list_head *send_ste_list, 285 struct mlx5dr_ste_htbl *stats_tbl) 286{ 287 struct mlx5dr_ste *prev_ste; 288 u64 miss_addr; 289 290 prev_ste = list_prev_entry(ste, miss_list_node); 291 if (WARN_ON(!prev_ste)) 292 return; 293 294 miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste)); 295 ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr); 296 297 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0, 298 mlx5dr_ste_get_hw_ste(prev_ste), 299 ste_info, send_ste_list, 300 true /* Copy data*/); 301 302 list_del_init(&ste->miss_list_node); 303 304 stats_tbl->ctrl.num_of_valid_entries--; 305 stats_tbl->ctrl.num_of_collisions--; 306} 307 308void mlx5dr_ste_free(struct mlx5dr_ste *ste, 309 struct mlx5dr_matcher *matcher, 310 struct mlx5dr_matcher_rx_tx *nic_matcher) 311{ 312 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info; 313 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 314 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; 315 struct mlx5dr_ste_send_info ste_info_head; 316 struct mlx5dr_ste *next_ste, *first_ste; 317 bool put_on_origin_table = true; 318 struct mlx5dr_ste_htbl *stats_tbl; 319 LIST_HEAD(send_ste_list); 320 321 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste), 322 struct mlx5dr_ste, miss_list_node); 323 stats_tbl = first_ste->htbl; 324 325 /* Two options: 326 * 1. ste is head: 327 * a. head ste is the only ste in the miss list 328 * b. head ste is not the only ste in the miss-list 329 * 2. ste is not head 330 */ 331 if (first_ste == ste) { /* Ste is the head */ 332 struct mlx5dr_ste *last_ste; 333 334 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste), 335 struct mlx5dr_ste, miss_list_node); 336 if (last_ste == first_ste) 337 next_ste = NULL; 338 else 339 next_ste = list_next_entry(ste, miss_list_node); 340 341 if (!next_ste) { 342 /* One and only entry in the list */ 343 dr_ste_remove_head_ste(ste_ctx, ste, 344 nic_matcher, 345 &ste_info_head, 346 &send_ste_list, 347 stats_tbl); 348 } else { 349 /* First but not only entry in the list */ 350 dr_ste_replace_head_ste(nic_matcher, ste, 351 next_ste, &ste_info_head, 352 &send_ste_list, stats_tbl); 353 put_on_origin_table = false; 354 } 355 } else { /* Ste in the middle of the list */ 356 dr_ste_remove_middle_ste(ste_ctx, ste, 357 &ste_info_head, &send_ste_list, 358 stats_tbl); 359 } 360 361 /* Update HW */ 362 list_for_each_entry_safe(cur_ste_info, tmp_ste_info, 363 &send_ste_list, send_list) { 364 list_del(&cur_ste_info->send_list); 365 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste, 366 cur_ste_info->data, cur_ste_info->size, 367 cur_ste_info->offset); 368 } 369 370 if (put_on_origin_table) 371 mlx5dr_htbl_put(ste->htbl); 372} 373 374bool mlx5dr_ste_equal_tag(void *src, void *dst) 375{ 376 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src; 377 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst; 378 379 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG); 380} 381 382void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx, 383 u8 *hw_ste, 384 struct mlx5dr_ste_htbl *next_htbl) 385{ 386 u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk); 387 u32 num_entries = 388 mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk); 389 390 ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries); 391} 392 393void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx, 394 u8 *hw_ste_p, u32 ste_size) 395{ 396 if (ste_ctx->prepare_for_postsend) 397 ste_ctx->prepare_for_postsend(hw_ste_p, ste_size); 398} 399 400/* Init one ste as a pattern for ste data array */ 401void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, 402 u16 gvmi, 403 enum mlx5dr_domain_nic_type nic_type, 404 struct mlx5dr_ste_htbl *htbl, 405 u8 *formatted_ste, 406 struct mlx5dr_htbl_connect_info *connect_info) 407{ 408 bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX; 409 u8 tmp_hw_ste[DR_STE_SIZE] = {0}; 410 411 ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi); 412 413 /* Use temp ste because dr_ste_always_miss_addr/hit_htbl 414 * touches bit_mask area which doesn't exist at ste->hw_ste. 415 * Need to use a full-sized (DR_STE_SIZE) hw_ste. 416 */ 417 memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED); 418 if (connect_info->type == CONNECT_HIT) 419 dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste, 420 connect_info->hit_next_htbl); 421 else 422 dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste, 423 connect_info->miss_icm_addr); 424 memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED); 425} 426 427int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, 428 struct mlx5dr_domain_rx_tx *nic_dmn, 429 struct mlx5dr_ste_htbl *htbl, 430 struct mlx5dr_htbl_connect_info *connect_info, 431 bool update_hw_ste) 432{ 433 u8 formatted_ste[DR_STE_SIZE] = {}; 434 435 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, 436 dmn->info.caps.gvmi, 437 nic_dmn->type, 438 htbl, 439 formatted_ste, 440 connect_info); 441 442 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste); 443} 444 445int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, 446 struct mlx5dr_matcher_rx_tx *nic_matcher, 447 struct mlx5dr_ste *ste, 448 u8 *cur_hw_ste, 449 enum mlx5dr_icm_chunk_size log_table_size) 450{ 451 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; 452 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 453 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; 454 struct mlx5dr_htbl_connect_info info; 455 struct mlx5dr_ste_htbl *next_htbl; 456 457 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { 458 u16 next_lu_type; 459 u16 byte_mask; 460 461 next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste); 462 byte_mask = ste_ctx->get_byte_mask(cur_hw_ste); 463 464 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, 465 log_table_size, 466 next_lu_type, 467 byte_mask); 468 if (!next_htbl) { 469 mlx5dr_dbg(dmn, "Failed allocating table\n"); 470 return -ENOMEM; 471 } 472 473 /* Write new table to HW */ 474 info.type = CONNECT_MISS; 475 info.miss_icm_addr = 476 mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk); 477 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl, 478 &info, false)) { 479 mlx5dr_info(dmn, "Failed writing table to HW\n"); 480 goto free_table; 481 } 482 483 mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx, 484 cur_hw_ste, next_htbl); 485 ste->next_htbl = next_htbl; 486 next_htbl->pointing_ste = ste; 487 } 488 489 return 0; 490 491free_table: 492 mlx5dr_ste_htbl_free(next_htbl); 493 return -ENOENT; 494} 495 496struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, 497 enum mlx5dr_icm_chunk_size chunk_size, 498 u16 lu_type, u16 byte_mask) 499{ 500 struct mlx5dr_icm_chunk *chunk; 501 struct mlx5dr_ste_htbl *htbl; 502 u32 num_entries; 503 int i; 504 505 htbl = mlx5dr_icm_pool_alloc_htbl(pool); 506 if (!htbl) 507 return NULL; 508 509 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size); 510 if (!chunk) 511 goto out_free_htbl; 512 513 htbl->chunk = chunk; 514 htbl->lu_type = lu_type; 515 htbl->byte_mask = byte_mask; 516 htbl->refcount = 0; 517 htbl->pointing_ste = NULL; 518 htbl->ctrl.num_of_valid_entries = 0; 519 htbl->ctrl.num_of_collisions = 0; 520 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk); 521 522 for (i = 0; i < num_entries; i++) { 523 struct mlx5dr_ste *ste = &chunk->ste_arr[i]; 524 525 ste->htbl = htbl; 526 ste->refcount = 0; 527 INIT_LIST_HEAD(&ste->miss_list_node); 528 INIT_LIST_HEAD(&chunk->miss_list[i]); 529 } 530 531 return htbl; 532 533out_free_htbl: 534 mlx5dr_icm_pool_free_htbl(pool, htbl); 535 return NULL; 536} 537 538int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) 539{ 540 struct mlx5dr_icm_pool *pool = htbl->chunk->buddy_mem->pool; 541 542 if (htbl->refcount) 543 return -EBUSY; 544 545 mlx5dr_icm_free_chunk(htbl->chunk); 546 mlx5dr_icm_pool_free_htbl(pool, htbl); 547 548 return 0; 549} 550 551void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, 552 struct mlx5dr_domain *dmn, 553 u8 *action_type_set, 554 u8 *hw_ste_arr, 555 struct mlx5dr_ste_actions_attr *attr, 556 u32 *added_stes) 557{ 558 ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps, 559 hw_ste_arr, attr, added_stes); 560} 561 562void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, 563 struct mlx5dr_domain *dmn, 564 u8 *action_type_set, 565 u8 *hw_ste_arr, 566 struct mlx5dr_ste_actions_attr *attr, 567 u32 *added_stes) 568{ 569 ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps, 570 hw_ste_arr, attr, added_stes); 571} 572 573const struct mlx5dr_ste_action_modify_field * 574mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field) 575{ 576 const struct mlx5dr_ste_action_modify_field *hw_field; 577 578 if (sw_field >= ste_ctx->modify_field_arr_sz) 579 return NULL; 580 581 hw_field = &ste_ctx->modify_field_arr[sw_field]; 582 if (!hw_field->end && !hw_field->start) 583 return NULL; 584 585 return hw_field; 586} 587 588void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx, 589 __be64 *hw_action, 590 u8 hw_field, 591 u8 shifter, 592 u8 length, 593 u32 data) 594{ 595 ste_ctx->set_action_set((u8 *)hw_action, 596 hw_field, shifter, length, data); 597} 598 599void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx, 600 __be64 *hw_action, 601 u8 hw_field, 602 u8 shifter, 603 u8 length, 604 u32 data) 605{ 606 ste_ctx->set_action_add((u8 *)hw_action, 607 hw_field, shifter, length, data); 608} 609 610void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx, 611 __be64 *hw_action, 612 u8 dst_hw_field, 613 u8 dst_shifter, 614 u8 dst_len, 615 u8 src_hw_field, 616 u8 src_shifter) 617{ 618 ste_ctx->set_action_copy((u8 *)hw_action, 619 dst_hw_field, dst_shifter, dst_len, 620 src_hw_field, src_shifter); 621} 622 623int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, 624 void *data, u32 data_sz, 625 u8 *hw_action, u32 hw_action_sz, 626 u16 *used_hw_action_num) 627{ 628 /* Only Ethernet frame is supported, with VLAN (18) or without (14) */ 629 if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN) 630 return -EINVAL; 631 632 return ste_ctx->set_action_decap_l3_list(data, data_sz, 633 hw_action, hw_action_sz, 634 used_hw_action_num); 635} 636 637static int 638dr_ste_alloc_modify_hdr_chunk(struct mlx5dr_action *action) 639{ 640 struct mlx5dr_domain *dmn = action->rewrite->dmn; 641 u32 chunk_size; 642 int ret; 643 644 chunk_size = ilog2(roundup_pow_of_two(action->rewrite->num_of_actions)); 645 646 /* HW modify action index granularity is at least 64B */ 647 chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8); 648 649 action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, 650 chunk_size); 651 if (!action->rewrite->chunk) 652 return -ENOMEM; 653 654 action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(action->rewrite->chunk) - 655 dmn->info.caps.hdr_modify_icm_addr) / 656 DR_ACTION_CACHE_LINE_SIZE; 657 658 ret = mlx5dr_send_postsend_action(action->rewrite->dmn, action); 659 if (ret) 660 goto free_chunk; 661 662 return 0; 663 664free_chunk: 665 mlx5dr_icm_free_chunk(action->rewrite->chunk); 666 return -ENOMEM; 667} 668 669static void dr_ste_free_modify_hdr_chunk(struct mlx5dr_action *action) 670{ 671 mlx5dr_icm_free_chunk(action->rewrite->chunk); 672} 673 674int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action) 675{ 676 struct mlx5dr_domain *dmn = action->rewrite->dmn; 677 678 if (mlx5dr_domain_is_support_ptrn_arg(dmn)) 679 return dmn->ste_ctx->alloc_modify_hdr_chunk(action); 680 681 return dr_ste_alloc_modify_hdr_chunk(action); 682} 683 684void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action) 685{ 686 struct mlx5dr_domain *dmn = action->rewrite->dmn; 687 688 if (mlx5dr_domain_is_support_ptrn_arg(dmn)) 689 return dmn->ste_ctx->dealloc_modify_hdr_chunk(action); 690 691 return dr_ste_free_modify_hdr_chunk(action); 692} 693 694static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn, 695 struct mlx5dr_match_spec *spec) 696{ 697 if (spec->ip_version) { 698 if (spec->ip_version != 0xf) { 699 mlx5dr_err(dmn, 700 "Partial ip_version mask with src/dst IP is not supported\n"); 701 return -EINVAL; 702 } 703 } else if (spec->ethertype != 0xffff && 704 (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) { 705 mlx5dr_err(dmn, 706 "Partial/no ethertype mask with src/dst IP is not supported\n"); 707 return -EINVAL; 708 } 709 710 return 0; 711} 712 713int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, 714 u8 match_criteria, 715 struct mlx5dr_match_param *mask, 716 struct mlx5dr_match_param *value) 717{ 718 if (value) 719 return 0; 720 721 if (match_criteria & DR_MATCHER_CRITERIA_MISC) { 722 if (mask->misc.source_port && mask->misc.source_port != 0xffff) { 723 mlx5dr_err(dmn, 724 "Partial mask source_port is not supported\n"); 725 return -EINVAL; 726 } 727 if (mask->misc.source_eswitch_owner_vhca_id && 728 mask->misc.source_eswitch_owner_vhca_id != 0xffff) { 729 mlx5dr_err(dmn, 730 "Partial mask source_eswitch_owner_vhca_id is not supported\n"); 731 return -EINVAL; 732 } 733 } 734 735 if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) && 736 dr_ste_build_pre_check_spec(dmn, &mask->outer)) 737 return -EINVAL; 738 739 if ((match_criteria & DR_MATCHER_CRITERIA_INNER) && 740 dr_ste_build_pre_check_spec(dmn, &mask->inner)) 741 return -EINVAL; 742 743 return 0; 744} 745 746int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, 747 struct mlx5dr_matcher_rx_tx *nic_matcher, 748 struct mlx5dr_match_param *value, 749 u8 *ste_arr) 750{ 751 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; 752 bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; 753 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 754 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; 755 struct mlx5dr_ste_build *sb; 756 int ret, i; 757 758 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria, 759 &matcher->mask, value); 760 if (ret) 761 return ret; 762 763 sb = nic_matcher->ste_builder; 764 for (i = 0; i < nic_matcher->num_of_builders; i++) { 765 ste_ctx->ste_init(ste_arr, 766 sb->lu_type, 767 is_rx, 768 dmn->info.caps.gvmi); 769 770 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask); 771 772 ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr)); 773 if (ret) 774 return ret; 775 776 /* Connect the STEs */ 777 if (i < (nic_matcher->num_of_builders - 1)) { 778 /* Need the next builder for these fields, 779 * not relevant for the last ste in the chain. 780 */ 781 sb++; 782 ste_ctx->set_next_lu_type(ste_arr, sb->lu_type); 783 ste_ctx->set_byte_mask(ste_arr, sb->byte_mask); 784 } 785 ste_arr += DR_STE_SIZE; 786 } 787 return 0; 788} 789 790#define IFC_GET_CLR(typ, p, fld, clear) ({ \ 791 void *__p = (p); \ 792 u32 __t = MLX5_GET(typ, __p, fld); \ 793 if (clear) \ 794 MLX5_SET(typ, __p, fld, 0); \ 795 __t; \ 796}) 797 798#define memcpy_and_clear(to, from, len, clear) ({ \ 799 void *__to = (to), *__from = (from); \ 800 size_t __len = (len); \ 801 memcpy(__to, __from, __len); \ 802 if (clear) \ 803 memset(__from, 0, __len); \ 804}) 805 806static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr) 807{ 808 spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr); 809 spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr); 810 spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr); 811 spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr); 812 spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr); 813 814 spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr); 815 spec->source_eswitch_owner_vhca_id = 816 IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr); 817 818 spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr); 819 spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr); 820 spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr); 821 spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr); 822 spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr); 823 spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr); 824 825 spec->outer_second_cvlan_tag = 826 IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr); 827 spec->inner_second_cvlan_tag = 828 IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr); 829 spec->outer_second_svlan_tag = 830 IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr); 831 spec->inner_second_svlan_tag = 832 IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr); 833 spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr); 834 835 spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr); 836 spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr); 837 838 spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr); 839 840 spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr); 841 spec->geneve_tlv_option_0_exist = 842 IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr); 843 spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr); 844 845 spec->outer_ipv6_flow_label = 846 IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr); 847 848 spec->inner_ipv6_flow_label = 849 IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr); 850 851 spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr); 852 spec->geneve_protocol_type = 853 IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr); 854 855 spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr); 856} 857 858static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr) 859{ 860 __be32 raw_ip[4]; 861 862 spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr); 863 864 spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr); 865 spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr); 866 867 spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr); 868 869 spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr); 870 spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr); 871 spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr); 872 spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr); 873 874 spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr); 875 spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr); 876 spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr); 877 spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr); 878 spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr); 879 spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr); 880 spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr); 881 spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr); 882 spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr); 883 spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr); 884 885 spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr); 886 spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr); 887 888 spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr); 889 spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr); 890 891 memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, 892 src_ipv4_src_ipv6.ipv6_layout.ipv6), 893 sizeof(raw_ip), clr); 894 895 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]); 896 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]); 897 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]); 898 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]); 899 900 memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, 901 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 902 sizeof(raw_ip), clr); 903 904 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]); 905 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]); 906 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]); 907 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]); 908} 909 910static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr) 911{ 912 spec->outer_first_mpls_label = 913 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr); 914 spec->outer_first_mpls_exp = 915 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr); 916 spec->outer_first_mpls_s_bos = 917 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr); 918 spec->outer_first_mpls_ttl = 919 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr); 920 spec->inner_first_mpls_label = 921 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr); 922 spec->inner_first_mpls_exp = 923 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr); 924 spec->inner_first_mpls_s_bos = 925 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr); 926 spec->inner_first_mpls_ttl = 927 IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr); 928 spec->outer_first_mpls_over_gre_label = 929 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr); 930 spec->outer_first_mpls_over_gre_exp = 931 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr); 932 spec->outer_first_mpls_over_gre_s_bos = 933 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr); 934 spec->outer_first_mpls_over_gre_ttl = 935 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr); 936 spec->outer_first_mpls_over_udp_label = 937 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr); 938 spec->outer_first_mpls_over_udp_exp = 939 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr); 940 spec->outer_first_mpls_over_udp_s_bos = 941 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr); 942 spec->outer_first_mpls_over_udp_ttl = 943 IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr); 944 spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr); 945 spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr); 946 spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr); 947 spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr); 948 spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr); 949 spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr); 950 spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr); 951 spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr); 952 spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr); 953} 954 955static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr) 956{ 957 spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr); 958 spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr); 959 spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr); 960 spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr); 961 spec->outer_vxlan_gpe_vni = 962 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr); 963 spec->outer_vxlan_gpe_next_protocol = 964 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr); 965 spec->outer_vxlan_gpe_flags = 966 IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr); 967 spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr); 968 spec->icmpv6_header_data = 969 IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr); 970 spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr); 971 spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr); 972 spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr); 973 spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr); 974 spec->geneve_tlv_option_0_data = 975 IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr); 976 spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr); 977 spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr); 978 spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr); 979 spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr); 980 spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr); 981 spec->gtpu_first_ext_dw_0 = 982 IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr); 983} 984 985static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr) 986{ 987 spec->prog_sample_field_id_0 = 988 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr); 989 spec->prog_sample_field_value_0 = 990 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr); 991 spec->prog_sample_field_id_1 = 992 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr); 993 spec->prog_sample_field_value_1 = 994 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr); 995 spec->prog_sample_field_id_2 = 996 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr); 997 spec->prog_sample_field_value_2 = 998 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr); 999 spec->prog_sample_field_id_3 = 1000 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr); 1001 spec->prog_sample_field_value_3 = 1002 IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr); 1003} 1004 1005static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr) 1006{ 1007 spec->macsec_tag_0 = 1008 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr); 1009 spec->macsec_tag_1 = 1010 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr); 1011 spec->macsec_tag_2 = 1012 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr); 1013 spec->macsec_tag_3 = 1014 IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr); 1015 spec->tunnel_header_0 = 1016 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr); 1017 spec->tunnel_header_1 = 1018 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr); 1019 spec->tunnel_header_2 = 1020 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr); 1021 spec->tunnel_header_3 = 1022 IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr); 1023} 1024 1025void mlx5dr_ste_copy_param(u8 match_criteria, 1026 struct mlx5dr_match_param *set_param, 1027 struct mlx5dr_match_parameters *mask, 1028 bool clr) 1029{ 1030 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {}; 1031 u8 *data = (u8 *)mask->match_buf; 1032 size_t param_location; 1033 void *buff; 1034 1035 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) { 1036 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) { 1037 memcpy(tail_param, data, mask->match_sz); 1038 buff = tail_param; 1039 } else { 1040 buff = mask->match_buf; 1041 } 1042 dr_ste_copy_mask_spec(buff, &set_param->outer, clr); 1043 } 1044 param_location = sizeof(struct mlx5dr_match_spec); 1045 1046 if (match_criteria & DR_MATCHER_CRITERIA_MISC) { 1047 if (mask->match_sz < param_location + 1048 sizeof(struct mlx5dr_match_misc)) { 1049 memcpy(tail_param, data + param_location, 1050 mask->match_sz - param_location); 1051 buff = tail_param; 1052 } else { 1053 buff = data + param_location; 1054 } 1055 dr_ste_copy_mask_misc(buff, &set_param->misc, clr); 1056 } 1057 param_location += sizeof(struct mlx5dr_match_misc); 1058 1059 if (match_criteria & DR_MATCHER_CRITERIA_INNER) { 1060 if (mask->match_sz < param_location + 1061 sizeof(struct mlx5dr_match_spec)) { 1062 memcpy(tail_param, data + param_location, 1063 mask->match_sz - param_location); 1064 buff = tail_param; 1065 } else { 1066 buff = data + param_location; 1067 } 1068 dr_ste_copy_mask_spec(buff, &set_param->inner, clr); 1069 } 1070 param_location += sizeof(struct mlx5dr_match_spec); 1071 1072 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) { 1073 if (mask->match_sz < param_location + 1074 sizeof(struct mlx5dr_match_misc2)) { 1075 memcpy(tail_param, data + param_location, 1076 mask->match_sz - param_location); 1077 buff = tail_param; 1078 } else { 1079 buff = data + param_location; 1080 } 1081 dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr); 1082 } 1083 1084 param_location += sizeof(struct mlx5dr_match_misc2); 1085 1086 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) { 1087 if (mask->match_sz < param_location + 1088 sizeof(struct mlx5dr_match_misc3)) { 1089 memcpy(tail_param, data + param_location, 1090 mask->match_sz - param_location); 1091 buff = tail_param; 1092 } else { 1093 buff = data + param_location; 1094 } 1095 dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr); 1096 } 1097 1098 param_location += sizeof(struct mlx5dr_match_misc3); 1099 1100 if (match_criteria & DR_MATCHER_CRITERIA_MISC4) { 1101 if (mask->match_sz < param_location + 1102 sizeof(struct mlx5dr_match_misc4)) { 1103 memcpy(tail_param, data + param_location, 1104 mask->match_sz - param_location); 1105 buff = tail_param; 1106 } else { 1107 buff = data + param_location; 1108 } 1109 dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr); 1110 } 1111 1112 param_location += sizeof(struct mlx5dr_match_misc4); 1113 1114 if (match_criteria & DR_MATCHER_CRITERIA_MISC5) { 1115 if (mask->match_sz < param_location + 1116 sizeof(struct mlx5dr_match_misc5)) { 1117 memcpy(tail_param, data + param_location, 1118 mask->match_sz - param_location); 1119 buff = tail_param; 1120 } else { 1121 buff = data + param_location; 1122 } 1123 dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr); 1124 } 1125} 1126 1127void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, 1128 struct mlx5dr_ste_build *sb, 1129 struct mlx5dr_match_param *mask, 1130 bool inner, bool rx) 1131{ 1132 sb->rx = rx; 1133 sb->inner = inner; 1134 ste_ctx->build_eth_l2_src_dst_init(sb, mask); 1135} 1136 1137void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx, 1138 struct mlx5dr_ste_build *sb, 1139 struct mlx5dr_match_param *mask, 1140 bool inner, bool rx) 1141{ 1142 sb->rx = rx; 1143 sb->inner = inner; 1144 ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask); 1145} 1146 1147void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx, 1148 struct mlx5dr_ste_build *sb, 1149 struct mlx5dr_match_param *mask, 1150 bool inner, bool rx) 1151{ 1152 sb->rx = rx; 1153 sb->inner = inner; 1154 ste_ctx->build_eth_l3_ipv6_src_init(sb, mask); 1155} 1156 1157void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx, 1158 struct mlx5dr_ste_build *sb, 1159 struct mlx5dr_match_param *mask, 1160 bool inner, bool rx) 1161{ 1162 sb->rx = rx; 1163 sb->inner = inner; 1164 ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask); 1165} 1166 1167void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx, 1168 struct mlx5dr_ste_build *sb, 1169 struct mlx5dr_match_param *mask, 1170 bool inner, bool rx) 1171{ 1172 sb->rx = rx; 1173 sb->inner = inner; 1174 ste_ctx->build_eth_l2_src_init(sb, mask); 1175} 1176 1177void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx, 1178 struct mlx5dr_ste_build *sb, 1179 struct mlx5dr_match_param *mask, 1180 bool inner, bool rx) 1181{ 1182 sb->rx = rx; 1183 sb->inner = inner; 1184 ste_ctx->build_eth_l2_dst_init(sb, mask); 1185} 1186 1187void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx, 1188 struct mlx5dr_ste_build *sb, 1189 struct mlx5dr_match_param *mask, bool inner, bool rx) 1190{ 1191 sb->rx = rx; 1192 sb->inner = inner; 1193 ste_ctx->build_eth_l2_tnl_init(sb, mask); 1194} 1195 1196void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx, 1197 struct mlx5dr_ste_build *sb, 1198 struct mlx5dr_match_param *mask, 1199 bool inner, bool rx) 1200{ 1201 sb->rx = rx; 1202 sb->inner = inner; 1203 ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask); 1204} 1205 1206void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx, 1207 struct mlx5dr_ste_build *sb, 1208 struct mlx5dr_match_param *mask, 1209 bool inner, bool rx) 1210{ 1211 sb->rx = rx; 1212 sb->inner = inner; 1213 ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask); 1214} 1215 1216static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value, 1217 struct mlx5dr_ste_build *sb, 1218 u8 *tag) 1219{ 1220 return 0; 1221} 1222 1223void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx) 1224{ 1225 sb->rx = rx; 1226 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE; 1227 sb->byte_mask = 0; 1228 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag; 1229} 1230 1231void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx, 1232 struct mlx5dr_ste_build *sb, 1233 struct mlx5dr_match_param *mask, 1234 bool inner, bool rx) 1235{ 1236 sb->rx = rx; 1237 sb->inner = inner; 1238 ste_ctx->build_mpls_init(sb, mask); 1239} 1240 1241void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx, 1242 struct mlx5dr_ste_build *sb, 1243 struct mlx5dr_match_param *mask, 1244 bool inner, bool rx) 1245{ 1246 sb->rx = rx; 1247 sb->inner = inner; 1248 ste_ctx->build_tnl_gre_init(sb, mask); 1249} 1250 1251void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx, 1252 struct mlx5dr_ste_build *sb, 1253 struct mlx5dr_match_param *mask, 1254 struct mlx5dr_cmd_caps *caps, 1255 bool inner, bool rx) 1256{ 1257 sb->rx = rx; 1258 sb->inner = inner; 1259 sb->caps = caps; 1260 return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask); 1261} 1262 1263void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx, 1264 struct mlx5dr_ste_build *sb, 1265 struct mlx5dr_match_param *mask, 1266 struct mlx5dr_cmd_caps *caps, 1267 bool inner, bool rx) 1268{ 1269 sb->rx = rx; 1270 sb->inner = inner; 1271 sb->caps = caps; 1272 return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask); 1273} 1274 1275void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, 1276 struct mlx5dr_ste_build *sb, 1277 struct mlx5dr_match_param *mask, 1278 struct mlx5dr_cmd_caps *caps, 1279 bool inner, bool rx) 1280{ 1281 sb->rx = rx; 1282 sb->inner = inner; 1283 sb->caps = caps; 1284 ste_ctx->build_icmp_init(sb, mask); 1285} 1286 1287void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, 1288 struct mlx5dr_ste_build *sb, 1289 struct mlx5dr_match_param *mask, 1290 bool inner, bool rx) 1291{ 1292 sb->rx = rx; 1293 sb->inner = inner; 1294 ste_ctx->build_general_purpose_init(sb, mask); 1295} 1296 1297void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx, 1298 struct mlx5dr_ste_build *sb, 1299 struct mlx5dr_match_param *mask, 1300 bool inner, bool rx) 1301{ 1302 sb->rx = rx; 1303 sb->inner = inner; 1304 ste_ctx->build_eth_l4_misc_init(sb, mask); 1305} 1306 1307void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx, 1308 struct mlx5dr_ste_build *sb, 1309 struct mlx5dr_match_param *mask, 1310 bool inner, bool rx) 1311{ 1312 sb->rx = rx; 1313 sb->inner = inner; 1314 ste_ctx->build_tnl_vxlan_gpe_init(sb, mask); 1315} 1316 1317void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, 1318 struct mlx5dr_ste_build *sb, 1319 struct mlx5dr_match_param *mask, 1320 bool inner, bool rx) 1321{ 1322 sb->rx = rx; 1323 sb->inner = inner; 1324 ste_ctx->build_tnl_geneve_init(sb, mask); 1325} 1326 1327void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx, 1328 struct mlx5dr_ste_build *sb, 1329 struct mlx5dr_match_param *mask, 1330 struct mlx5dr_cmd_caps *caps, 1331 bool inner, bool rx) 1332{ 1333 sb->rx = rx; 1334 sb->caps = caps; 1335 sb->inner = inner; 1336 ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask); 1337} 1338 1339void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx, 1340 struct mlx5dr_ste_build *sb, 1341 struct mlx5dr_match_param *mask, 1342 struct mlx5dr_cmd_caps *caps, 1343 bool inner, bool rx) 1344{ 1345 if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init) 1346 return; 1347 1348 sb->rx = rx; 1349 sb->caps = caps; 1350 sb->inner = inner; 1351 ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask); 1352} 1353 1354void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx, 1355 struct mlx5dr_ste_build *sb, 1356 struct mlx5dr_match_param *mask, 1357 bool inner, bool rx) 1358{ 1359 sb->rx = rx; 1360 sb->inner = inner; 1361 ste_ctx->build_tnl_gtpu_init(sb, mask); 1362} 1363 1364void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx, 1365 struct mlx5dr_ste_build *sb, 1366 struct mlx5dr_match_param *mask, 1367 struct mlx5dr_cmd_caps *caps, 1368 bool inner, bool rx) 1369{ 1370 sb->rx = rx; 1371 sb->caps = caps; 1372 sb->inner = inner; 1373 ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask); 1374} 1375 1376void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, 1377 struct mlx5dr_ste_build *sb, 1378 struct mlx5dr_match_param *mask, 1379 struct mlx5dr_cmd_caps *caps, 1380 bool inner, bool rx) 1381{ 1382 sb->rx = rx; 1383 sb->caps = caps; 1384 sb->inner = inner; 1385 ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask); 1386} 1387 1388void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx, 1389 struct mlx5dr_ste_build *sb, 1390 struct mlx5dr_match_param *mask, 1391 bool inner, bool rx) 1392{ 1393 sb->rx = rx; 1394 sb->inner = inner; 1395 ste_ctx->build_register_0_init(sb, mask); 1396} 1397 1398void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx, 1399 struct mlx5dr_ste_build *sb, 1400 struct mlx5dr_match_param *mask, 1401 bool inner, bool rx) 1402{ 1403 sb->rx = rx; 1404 sb->inner = inner; 1405 ste_ctx->build_register_1_init(sb, mask); 1406} 1407 1408void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, 1409 struct mlx5dr_ste_build *sb, 1410 struct mlx5dr_match_param *mask, 1411 struct mlx5dr_domain *dmn, 1412 bool inner, bool rx) 1413{ 1414 /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */ 1415 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id; 1416 1417 sb->rx = rx; 1418 sb->dmn = dmn; 1419 sb->inner = inner; 1420 ste_ctx->build_src_gvmi_qpn_init(sb, mask); 1421} 1422 1423void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx, 1424 struct mlx5dr_ste_build *sb, 1425 struct mlx5dr_match_param *mask, 1426 bool inner, bool rx) 1427{ 1428 sb->rx = rx; 1429 sb->inner = inner; 1430 ste_ctx->build_flex_parser_0_init(sb, mask); 1431} 1432 1433void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, 1434 struct mlx5dr_ste_build *sb, 1435 struct mlx5dr_match_param *mask, 1436 bool inner, bool rx) 1437{ 1438 sb->rx = rx; 1439 sb->inner = inner; 1440 ste_ctx->build_flex_parser_1_init(sb, mask); 1441} 1442 1443void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx, 1444 struct mlx5dr_ste_build *sb, 1445 struct mlx5dr_match_param *mask, 1446 bool inner, bool rx) 1447{ 1448 sb->rx = rx; 1449 sb->inner = inner; 1450 ste_ctx->build_tnl_header_0_1_init(sb, mask); 1451} 1452 1453struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version) 1454{ 1455 if (version == MLX5_STEERING_FORMAT_CONNECTX_5) 1456 return mlx5dr_ste_get_ctx_v0(); 1457 else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX) 1458 return mlx5dr_ste_get_ctx_v1(); 1459 else if (version == MLX5_STEERING_FORMAT_CONNECTX_7) 1460 return mlx5dr_ste_get_ctx_v2(); 1461 1462 return NULL; 1463} 1464