mlx4_mcg.c revision 279584
1/* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/string.h> 35#include <linux/etherdevice.h> 36 37#include <linux/mlx4/cmd.h> 38#include <linux/module.h> 39#include <linux/printk.h> 40 41#include "mlx4.h" 42 43int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 44{ 45 return 1 << dev->oper_log_mgm_entry_size; 46} 47 48int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 49{ 50 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 51} 52 53static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 54 struct mlx4_cmd_mailbox *mailbox, 55 u32 size, 56 u64 *reg_id) 57{ 58 u64 imm; 59 int err = 0; 60 61 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 62 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 63 MLX4_CMD_NATIVE); 64 if (err) 65 return err; 66 *reg_id = imm; 67 68 return err; 69} 70 71static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 72{ 73 int err = 0; 74 75 err = mlx4_cmd(dev, regid, 0, 0, 76 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 77 MLX4_CMD_NATIVE); 78 79 return err; 80} 81 82static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 83 struct mlx4_cmd_mailbox *mailbox) 84{ 85 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 86 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 87} 88 89static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 90 struct mlx4_cmd_mailbox *mailbox) 91{ 92 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 93 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 94} 95 96static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 97 struct mlx4_cmd_mailbox *mailbox) 98{ 99 u32 in_mod; 100 101 in_mod = (u32) port << 16 | steer << 1; 102 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 103 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 104 MLX4_CMD_NATIVE); 105} 106 107static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 108 u16 *hash, u8 op_mod) 109{ 110 u64 imm; 111 int err; 112 113 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 114 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 115 MLX4_CMD_NATIVE); 116 117 if (!err) 118 *hash = imm; 119 120 return err; 121} 122 123static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 124 enum mlx4_steer_type steer, 125 u32 qpn) 126{ 127 struct mlx4_steer *s_steer; 128 struct mlx4_promisc_qp *pqp; 129 130 if (port < 1 || port > dev->caps.num_ports) 131 return NULL; 132 133 s_steer = &mlx4_priv(dev)->steer[port - 1]; 134 135 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 136 if (pqp->qpn == qpn) 137 return pqp; 138 } 139 /* not found */ 140 return NULL; 141} 142 143/* 144 * Add new entry to steering data structure. 145 * All promisc QPs should be added as well 146 */ 147static int new_steering_entry(struct mlx4_dev *dev, u8 port, 148 enum mlx4_steer_type steer, 149 unsigned int index, u32 qpn) 150{ 151 struct mlx4_steer *s_steer; 152 struct mlx4_cmd_mailbox *mailbox; 153 struct mlx4_mgm *mgm; 154 u32 members_count; 155 struct mlx4_steer_index *new_entry; 156 struct mlx4_promisc_qp *pqp; 157 struct mlx4_promisc_qp *dqp = NULL; 158 u32 prot; 159 int err; 160 161 if (port < 1 || port > dev->caps.num_ports) 162 return -EINVAL; 163 164 s_steer = &mlx4_priv(dev)->steer[port - 1]; 165 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 166 if (!new_entry) 167 return -ENOMEM; 168 169 INIT_LIST_HEAD(&new_entry->duplicates); 170 new_entry->index = index; 171 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 172 173 /* If the given qpn is also a promisc qp, 174 * it should be inserted to duplicates list 175 */ 176 pqp = get_promisc_qp(dev, port, steer, qpn); 177 if (pqp) { 178 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 179 if (!dqp) { 180 err = -ENOMEM; 181 goto out_alloc; 182 } 183 dqp->qpn = qpn; 184 list_add_tail(&dqp->list, &new_entry->duplicates); 185 } 186 187 /* if no promisc qps for this vep, we are done */ 188 if (list_empty(&s_steer->promisc_qps[steer])) 189 return 0; 190 191 /* now need to add all the promisc qps to the new 192 * steering entry, as they should also receive the packets 193 * destined to this address */ 194 mailbox = mlx4_alloc_cmd_mailbox(dev); 195 if (IS_ERR(mailbox)) { 196 err = -ENOMEM; 197 goto out_alloc; 198 } 199 mgm = mailbox->buf; 200 201 err = mlx4_READ_ENTRY(dev, index, mailbox); 202 if (err) 203 goto out_mailbox; 204 205 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 206 prot = be32_to_cpu(mgm->members_count) >> 30; 207 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 208 /* don't add already existing qpn */ 209 if (pqp->qpn == qpn) 210 continue; 211 if (members_count == dev->caps.num_qp_per_mgm) { 212 /* out of space */ 213 err = -ENOMEM; 214 goto out_mailbox; 215 } 216 217 /* add the qpn */ 218 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 219 } 220 /* update the qps count and update the entry with all the promisc qps*/ 221 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 222 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 223 224out_mailbox: 225 mlx4_free_cmd_mailbox(dev, mailbox); 226 if (!err) 227 return 0; 228out_alloc: 229 if (dqp) { 230 list_del(&dqp->list); 231 kfree(dqp); 232 } 233 list_del(&new_entry->list); 234 kfree(new_entry); 235 return err; 236} 237 238/* update the data structures with existing steering entry */ 239static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 240 enum mlx4_steer_type steer, 241 unsigned int index, u32 qpn) 242{ 243 struct mlx4_steer *s_steer; 244 struct mlx4_steer_index *tmp_entry, *entry = NULL; 245 struct mlx4_promisc_qp *pqp; 246 struct mlx4_promisc_qp *dqp; 247 248 if (port < 1 || port > dev->caps.num_ports) 249 return -EINVAL; 250 251 s_steer = &mlx4_priv(dev)->steer[port - 1]; 252 253 pqp = get_promisc_qp(dev, port, steer, qpn); 254 if (!pqp) 255 return 0; /* nothing to do */ 256 257 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 258 if (tmp_entry->index == index) { 259 entry = tmp_entry; 260 break; 261 } 262 } 263 if (unlikely(!entry)) { 264 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 265 return -EINVAL; 266 } 267 268 /* the given qpn is listed as a promisc qpn 269 * we need to add it as a duplicate to this entry 270 * for future references */ 271 list_for_each_entry(dqp, &entry->duplicates, list) { 272 if (qpn == dqp->qpn) 273 return 0; /* qp is already duplicated */ 274 } 275 276 /* add the qp as a duplicate on this index */ 277 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 278 if (!dqp) 279 return -ENOMEM; 280 dqp->qpn = qpn; 281 list_add_tail(&dqp->list, &entry->duplicates); 282 283 return 0; 284} 285 286/* Check whether a qpn is a duplicate on steering entry 287 * If so, it should not be removed from mgm */ 288static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 289 enum mlx4_steer_type steer, 290 unsigned int index, u32 qpn) 291{ 292 struct mlx4_steer *s_steer; 293 struct mlx4_steer_index *tmp_entry, *entry = NULL; 294 struct mlx4_promisc_qp *dqp, *tmp_dqp; 295 296 if (port < 1 || port > dev->caps.num_ports) 297 return NULL; 298 299 s_steer = &mlx4_priv(dev)->steer[port - 1]; 300 301 /* if qp is not promisc, it cannot be duplicated */ 302 if (!get_promisc_qp(dev, port, steer, qpn)) 303 return false; 304 305 /* The qp is promisc qp so it is a duplicate on this index 306 * Find the index entry, and remove the duplicate */ 307 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 308 if (tmp_entry->index == index) { 309 entry = tmp_entry; 310 break; 311 } 312 } 313 if (unlikely(!entry)) { 314 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 315 return false; 316 } 317 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 318 if (dqp->qpn == qpn) { 319 list_del(&dqp->list); 320 kfree(dqp); 321 } 322 } 323 return true; 324} 325 326/* 327 * returns true if all the QPs != tqpn contained in this entry 328 * are Promisc QPs. return false otherwise. 329 */ 330static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port, 331 enum mlx4_steer_type steer, 332 unsigned int index, u32 tqpn, u32 *members_count) 333{ 334 struct mlx4_steer *s_steer; 335 struct mlx4_cmd_mailbox *mailbox; 336 struct mlx4_mgm *mgm; 337 u32 m_count; 338 bool ret = false; 339 int i; 340 341 if (port < 1 || port > dev->caps.num_ports) 342 return false; 343 344 s_steer = &mlx4_priv(dev)->steer[port - 1]; 345 346 mailbox = mlx4_alloc_cmd_mailbox(dev); 347 if (IS_ERR(mailbox)) 348 return false; 349 mgm = mailbox->buf; 350 351 if (mlx4_READ_ENTRY(dev, index, mailbox)) 352 goto out; 353 m_count = be32_to_cpu(mgm->members_count) & 0xffffff; 354 if (members_count) 355 *members_count = m_count; 356 357 for (i = 0; i < m_count; i++) { 358 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 359 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 360 /* the qp is not promisc, the entry can't be removed */ 361 goto out; 362 } 363 } 364 ret = true; 365out: 366 mlx4_free_cmd_mailbox(dev, mailbox); 367 return ret; 368} 369 370/* IF a steering entry contains only promisc QPs, it can be removed. */ 371static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 372 enum mlx4_steer_type steer, 373 unsigned int index, u32 tqpn) 374{ 375 struct mlx4_steer *s_steer; 376 struct mlx4_steer_index *entry = NULL, *tmp_entry; 377 u32 members_count; 378 bool ret = false; 379 380 if (port < 1 || port > dev->caps.num_ports) 381 return NULL; 382 383 s_steer = &mlx4_priv(dev)->steer[port - 1]; 384 385 if (!promisc_steering_entry(dev, port, steer, index, tqpn, &members_count)) 386 goto out; 387 388 /* All the qps currently registered for this entry are promiscuous, 389 * Checking for duplicates */ 390 ret = true; 391 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 392 if (entry->index == index) { 393 if (list_empty(&entry->duplicates) || members_count == 1) { 394 struct mlx4_promisc_qp *pqp, *tmp_pqp; 395 /* 396 * If there is only 1 entry in duplicates than 397 * this is the QP we want to delete, going over 398 * the list and deleting the entry. 399 */ 400 list_del(&entry->list); 401 list_for_each_entry_safe(pqp, tmp_pqp, 402 &entry->duplicates, 403 list) { 404 list_del(&pqp->list); 405 kfree(pqp); 406 } 407 kfree(entry); 408 } else { 409 /* This entry contains duplicates so it shouldn't be removed */ 410 ret = false; 411 goto out; 412 } 413 } 414 } 415 416out: 417 return ret; 418} 419 420static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 421 enum mlx4_steer_type steer, u32 qpn) 422{ 423 struct mlx4_steer *s_steer; 424 struct mlx4_cmd_mailbox *mailbox; 425 struct mlx4_mgm *mgm; 426 struct mlx4_steer_index *entry; 427 struct mlx4_promisc_qp *pqp; 428 struct mlx4_promisc_qp *dqp; 429 u32 members_count; 430 u32 prot; 431 int i; 432 bool found; 433 int err; 434 struct mlx4_priv *priv = mlx4_priv(dev); 435 436 if (port < 1 || port > dev->caps.num_ports) 437 return -EINVAL; 438 439 s_steer = &mlx4_priv(dev)->steer[port - 1]; 440 441 mutex_lock(&priv->mcg_table.mutex); 442 443 if (get_promisc_qp(dev, port, steer, qpn)) { 444 err = 0; /* Noting to do, already exists */ 445 goto out_mutex; 446 } 447 448 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 449 if (!pqp) { 450 err = -ENOMEM; 451 goto out_mutex; 452 } 453 pqp->qpn = qpn; 454 455 mailbox = mlx4_alloc_cmd_mailbox(dev); 456 if (IS_ERR(mailbox)) { 457 err = -ENOMEM; 458 goto out_alloc; 459 } 460 mgm = mailbox->buf; 461 462 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { 463 /* the promisc qp needs to be added for each one of the steering 464 * entries, if it already exists, needs to be added as a duplicate 465 * for this entry */ 466 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 467 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 468 if (err) 469 goto out_mailbox; 470 471 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 472 prot = be32_to_cpu(mgm->members_count) >> 30; 473 found = false; 474 for (i = 0; i < members_count; i++) { 475 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 476 /* Entry already exists, add to duplicates */ 477 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 478 if (!dqp) { 479 err = -ENOMEM; 480 goto out_mailbox; 481 } 482 dqp->qpn = qpn; 483 list_add_tail(&dqp->list, &entry->duplicates); 484 found = true; 485 } 486 } 487 if (!found) { 488 /* Need to add the qpn to mgm */ 489 if (members_count == dev->caps.num_qp_per_mgm) { 490 /* entry is full */ 491 err = -ENOMEM; 492 goto out_mailbox; 493 } 494 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 495 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 496 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 497 if (err) 498 goto out_mailbox; 499 } 500 } 501 } 502 503 /* add the new qpn to list of promisc qps */ 504 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 505 /* now need to add all the promisc qps to default entry */ 506 memset(mgm, 0, sizeof *mgm); 507 members_count = 0; 508 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { 509 if (members_count == dev->caps.num_qp_per_mgm) { 510 /* entry is full */ 511 err = -ENOMEM; 512 goto out_list; 513 } 514 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 515 } 516 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 517 518 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 519 if (err) 520 goto out_list; 521 522 mlx4_free_cmd_mailbox(dev, mailbox); 523 mutex_unlock(&priv->mcg_table.mutex); 524 return 0; 525 526out_list: 527 list_del(&pqp->list); 528out_mailbox: 529 mlx4_free_cmd_mailbox(dev, mailbox); 530out_alloc: 531 kfree(pqp); 532out_mutex: 533 mutex_unlock(&priv->mcg_table.mutex); 534 return err; 535} 536 537static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 538 enum mlx4_steer_type steer, u32 qpn) 539{ 540 struct mlx4_priv *priv = mlx4_priv(dev); 541 struct mlx4_steer *s_steer; 542 struct mlx4_cmd_mailbox *mailbox; 543 struct mlx4_mgm *mgm; 544 struct mlx4_steer_index *entry, *tmp_entry; 545 struct mlx4_promisc_qp *pqp; 546 struct mlx4_promisc_qp *dqp; 547 u32 members_count; 548 bool found; 549 bool back_to_list = false; 550 int i, loc = -1; 551 int err; 552 553 if (port < 1 || port > dev->caps.num_ports) 554 return -EINVAL; 555 556 s_steer = &mlx4_priv(dev)->steer[port - 1]; 557 mutex_lock(&priv->mcg_table.mutex); 558 559 pqp = get_promisc_qp(dev, port, steer, qpn); 560 if (unlikely(!pqp)) { 561 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 562 /* nothing to do */ 563 err = 0; 564 goto out_mutex; 565 } 566 567 /*remove from list of promisc qps */ 568 list_del(&pqp->list); 569 570 /* set the default entry not to include the removed one */ 571 mailbox = mlx4_alloc_cmd_mailbox(dev); 572 if (IS_ERR(mailbox)) { 573 err = -ENOMEM; 574 back_to_list = true; 575 goto out_list; 576 } 577 mgm = mailbox->buf; 578 memset(mgm, 0, sizeof *mgm); 579 members_count = 0; 580 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 581 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 582 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 583 584 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 585 if (err) 586 goto out_mailbox; 587 588 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { 589 /* remove the qp from all the steering entries*/ 590 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 591 found = false; 592 list_for_each_entry(dqp, &entry->duplicates, list) { 593 if (dqp->qpn == qpn) { 594 found = true; 595 break; 596 } 597 } 598 if (found) { 599 /* a duplicate, no need to change the mgm, 600 * only update the duplicates list */ 601 list_del(&dqp->list); 602 kfree(dqp); 603 } else { 604 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 605 if (err) 606 goto out_mailbox; 607 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 608 if (!members_count) { 609 mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0." 610 " deleting entry...\n", qpn, entry->index); 611 list_del(&entry->list); 612 kfree(entry); 613 continue; 614 } 615 616 for (i = 0; i < members_count; ++i) 617 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 618 loc = i; 619 break; 620 } 621 622 if (loc < 0) { 623 mlx4_err(dev, "QP %06x wasn't found in entry %d\n", 624 qpn, entry->index); 625 err = -EINVAL; 626 goto out_mailbox; 627 } 628 629 /* copy the last QP in this MGM over removed QP */ 630 mgm->qp[loc] = mgm->qp[members_count - 1]; 631 mgm->qp[members_count - 1] = 0; 632 mgm->members_count = cpu_to_be32(--members_count | 633 (MLX4_PROT_ETH << 30)); 634 635 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 636 if (err) 637 goto out_mailbox; 638 } 639 } 640 } 641 642out_mailbox: 643 mlx4_free_cmd_mailbox(dev, mailbox); 644out_list: 645 if (back_to_list) 646 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 647 else 648 kfree(pqp); 649out_mutex: 650 mutex_unlock(&priv->mcg_table.mutex); 651 return err; 652} 653 654/* 655 * Caller must hold MCG table semaphore. gid and mgm parameters must 656 * be properly aligned for command interface. 657 * 658 * Returns 0 unless a firmware command error occurs. 659 * 660 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 661 * and *mgm holds MGM entry. 662 * 663 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 664 * previous entry in hash chain and *mgm holds AMGM entry. 665 * 666 * If no AMGM exists for given gid, *index = -1, *prev = index of last 667 * entry in hash chain and *mgm holds end of hash chain. 668 */ 669static int find_entry(struct mlx4_dev *dev, u8 port, 670 u8 *gid, enum mlx4_protocol prot, 671 struct mlx4_cmd_mailbox *mgm_mailbox, 672 int *prev, int *index) 673{ 674 struct mlx4_cmd_mailbox *mailbox; 675 struct mlx4_mgm *mgm = mgm_mailbox->buf; 676 u8 *mgid; 677 int err; 678 u16 hash; 679 u8 op_mod = (prot == MLX4_PROT_ETH) ? 680 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 681 682 mailbox = mlx4_alloc_cmd_mailbox(dev); 683 if (IS_ERR(mailbox)) 684 return -ENOMEM; 685 mgid = mailbox->buf; 686 687 memcpy(mgid, gid, 16); 688 689 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 690 mlx4_free_cmd_mailbox(dev, mailbox); 691 if (err) 692 return err; 693 694 if (0) { 695 mlx4_dbg(dev, "Hash for "GID_PRINT_FMT" is %04x\n", 696 GID_PRINT_ARGS(gid), hash); 697 } 698 699 *index = hash; 700 *prev = -1; 701 702 do { 703 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 704 if (err) 705 return err; 706 707 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 708 if (*index != hash) { 709 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 710 err = -EINVAL; 711 } 712 return err; 713 } 714 715 if (!memcmp(mgm->gid, gid, 16) && 716 be32_to_cpu(mgm->members_count) >> 30 == prot) 717 return err; 718 719 *prev = *index; 720 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 721 } while (*index); 722 723 *index = -1; 724 return err; 725} 726 727static const u8 __promisc_mode[] = { 728 [MLX4_FS_REGULAR] = 0x0, 729 [MLX4_FS_ALL_DEFAULT] = 0x1, 730 [MLX4_FS_MC_DEFAULT] = 0x3, 731 [MLX4_FS_UC_SNIFFER] = 0x4, 732 [MLX4_FS_MC_SNIFFER] = 0x5, 733}; 734 735int map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 736 enum mlx4_net_trans_promisc_mode flow_type) 737{ 738 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { 739 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 740 return -EINVAL; 741 } 742 return __promisc_mode[flow_type]; 743} 744EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_mode); 745 746static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 747 struct mlx4_net_trans_rule_hw_ctrl *hw) 748{ 749 u8 flags = 0; 750 751 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 752 flags |= ctrl->exclusive ? (1 << 2) : 0; 753 flags |= ctrl->allow_loopback ? (1 << 3) : 0; 754 755 hw->flags = flags; 756 hw->type = __promisc_mode[ctrl->promisc_mode]; 757 hw->prio = cpu_to_be16(ctrl->priority); 758 hw->port = ctrl->port; 759 hw->qpn = cpu_to_be32(ctrl->qpn); 760} 761 762const u16 __sw_id_hw[] = { 763 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 764 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 765 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 766 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 767 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 768 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 769}; 770 771int map_sw_to_hw_steering_id(struct mlx4_dev *dev, 772 enum mlx4_net_trans_rule_id id) 773{ 774 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 775 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 776 return -EINVAL; 777 } 778 return __sw_id_hw[id]; 779} 780EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_id); 781 782static const int __rule_hw_sz[] = { 783 [MLX4_NET_TRANS_RULE_ID_ETH] = 784 sizeof(struct mlx4_net_trans_rule_hw_eth), 785 [MLX4_NET_TRANS_RULE_ID_IB] = 786 sizeof(struct mlx4_net_trans_rule_hw_ib), 787 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 788 [MLX4_NET_TRANS_RULE_ID_IPV4] = 789 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 790 [MLX4_NET_TRANS_RULE_ID_TCP] = 791 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 792 [MLX4_NET_TRANS_RULE_ID_UDP] = 793 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) 794}; 795 796int hw_rule_sz(struct mlx4_dev *dev, 797 enum mlx4_net_trans_rule_id id) 798{ 799 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 800 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 801 return -EINVAL; 802 } 803 804 return __rule_hw_sz[id]; 805} 806EXPORT_SYMBOL_GPL(hw_rule_sz); 807 808static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 809 struct _rule_hw *rule_hw) 810{ 811 if (hw_rule_sz(dev, spec->id) < 0) 812 return -EINVAL; 813 memset(rule_hw, 0, hw_rule_sz(dev, spec->id)); 814 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 815 rule_hw->size = hw_rule_sz(dev, spec->id) >> 2; 816 817 switch (spec->id) { 818 case MLX4_NET_TRANS_RULE_ID_ETH: 819 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 820 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 821 ETH_ALEN); 822 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 823 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 824 ETH_ALEN); 825 if (spec->eth.ether_type_enable) { 826 rule_hw->eth.ether_type_enable = 1; 827 rule_hw->eth.ether_type = spec->eth.ether_type; 828 } 829 rule_hw->eth.vlan_tag = spec->eth.vlan_id; 830 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; 831 break; 832 833 case MLX4_NET_TRANS_RULE_ID_IB: 834 rule_hw->ib.l3_qpn = spec->ib.l3_qpn; 835 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 836 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 837 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 838 break; 839 840 case MLX4_NET_TRANS_RULE_ID_IPV6: 841 return -EOPNOTSUPP; 842 843 case MLX4_NET_TRANS_RULE_ID_IPV4: 844 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 845 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 846 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 847 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 848 break; 849 850 case MLX4_NET_TRANS_RULE_ID_TCP: 851 case MLX4_NET_TRANS_RULE_ID_UDP: 852 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 853 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 854 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 855 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 856 break; 857 858 default: 859 return -EINVAL; 860 } 861 862 return __rule_hw_sz[spec->id]; 863} 864 865static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 866 struct mlx4_net_trans_rule *rule) 867{ 868#define BUF_SIZE 256 869 struct mlx4_spec_list *cur; 870 char buf[BUF_SIZE]; 871 int len = 0; 872 873 mlx4_err(dev, "%s", str); 874 len += snprintf(buf + len, BUF_SIZE - len, 875 "port = %d prio = 0x%x qp = 0x%x ", 876 rule->port, rule->priority, rule->qpn); 877 878 list_for_each_entry(cur, &rule->list, list) { 879 switch (cur->id) { 880 case MLX4_NET_TRANS_RULE_ID_ETH: 881 len += snprintf(buf + len, BUF_SIZE - len, 882 "dmac = %pM ", &cur->eth.dst_mac); 883 if (cur->eth.ether_type) 884 len += snprintf(buf + len, BUF_SIZE - len, 885 "ethertype = 0x%x ", 886 be16_to_cpu(cur->eth.ether_type)); 887 if (cur->eth.vlan_id) 888 len += snprintf(buf + len, BUF_SIZE - len, 889 "vlan-id = %d ", 890 be16_to_cpu(cur->eth.vlan_id)); 891 break; 892 893 case MLX4_NET_TRANS_RULE_ID_IPV4: 894 if (cur->ipv4.src_ip) 895 len += snprintf(buf + len, BUF_SIZE - len, 896 "src-ip = %pI4 ", 897 &cur->ipv4.src_ip); 898 if (cur->ipv4.dst_ip) 899 len += snprintf(buf + len, BUF_SIZE - len, 900 "dst-ip = %pI4 ", 901 &cur->ipv4.dst_ip); 902 break; 903 904 case MLX4_NET_TRANS_RULE_ID_TCP: 905 case MLX4_NET_TRANS_RULE_ID_UDP: 906 if (cur->tcp_udp.src_port) 907 len += snprintf(buf + len, BUF_SIZE - len, 908 "src-port = %d ", 909 be16_to_cpu(cur->tcp_udp.src_port)); 910 if (cur->tcp_udp.dst_port) 911 len += snprintf(buf + len, BUF_SIZE - len, 912 "dst-port = %d ", 913 be16_to_cpu(cur->tcp_udp.dst_port)); 914 break; 915 916 case MLX4_NET_TRANS_RULE_ID_IB: 917 len += snprintf(buf + len, BUF_SIZE - len, 918 "dst-gid = "GID_PRINT_FMT"\n", 919 GID_PRINT_ARGS(cur->ib.dst_gid)); 920 len += snprintf(buf + len, BUF_SIZE - len, 921 "dst-gid-mask = "GID_PRINT_FMT"\n", 922 GID_PRINT_ARGS(cur->ib.dst_gid_msk)); 923 break; 924 925 case MLX4_NET_TRANS_RULE_ID_IPV6: 926 break; 927 928 default: 929 break; 930 } 931 } 932 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 933 mlx4_err(dev, "%s", buf); 934 935 if (len >= BUF_SIZE) 936 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 937} 938 939int mlx4_flow_attach(struct mlx4_dev *dev, 940 struct mlx4_net_trans_rule *rule, u64 *reg_id) 941{ 942 struct mlx4_cmd_mailbox *mailbox; 943 struct mlx4_spec_list *cur; 944 u32 size = 0; 945 int ret; 946 947 mailbox = mlx4_alloc_cmd_mailbox(dev); 948 if (IS_ERR(mailbox)) 949 return PTR_ERR(mailbox); 950 951 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); 952 trans_rule_ctrl_to_hw(rule, mailbox->buf); 953 954 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 955 956 list_for_each_entry(cur, &rule->list, list) { 957 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 958 if (ret < 0) { 959 mlx4_free_cmd_mailbox(dev, mailbox); 960 return -EINVAL; 961 } 962 size += ret; 963 } 964 965 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 966 if (ret == -ENOMEM) 967 mlx4_err_rule(dev, 968 "mcg table is full. Fail to register network rule.\n", 969 rule); 970 else if (ret) 971 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 972 973 mlx4_free_cmd_mailbox(dev, mailbox); 974 975 return ret; 976} 977EXPORT_SYMBOL_GPL(mlx4_flow_attach); 978 979int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 980{ 981 int err; 982 983 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 984 if (err) 985 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 986 (unsigned long long)reg_id); 987 return err; 988} 989EXPORT_SYMBOL_GPL(mlx4_flow_detach); 990 991int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn) 992{ 993 int err; 994 u64 in_param; 995 996 in_param = ((u64) min_range_qpn) << 32; 997 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; 998 999 err = mlx4_cmd(dev, in_param, 0, 0, 1000 MLX4_FLOW_STEERING_IB_UC_QP_RANGE, 1001 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1002 1003 return err; 1004} 1005EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); 1006 1007int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1008 int block_mcast_loopback, enum mlx4_protocol prot, 1009 enum mlx4_steer_type steer) 1010{ 1011 struct mlx4_priv *priv = mlx4_priv(dev); 1012 struct mlx4_cmd_mailbox *mailbox; 1013 struct mlx4_mgm *mgm; 1014 u32 members_count; 1015 int index, prev; 1016 int link = 0; 1017 int i; 1018 int err; 1019 u8 port = gid[5]; 1020 u8 new_entry = 0; 1021 1022 mailbox = mlx4_alloc_cmd_mailbox(dev); 1023 if (IS_ERR(mailbox)) 1024 return PTR_ERR(mailbox); 1025 mgm = mailbox->buf; 1026 1027 mutex_lock(&priv->mcg_table.mutex); 1028 err = find_entry(dev, port, gid, prot, 1029 mailbox, &prev, &index); 1030 if (err) 1031 goto out; 1032 1033 if (index != -1) { 1034 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 1035 new_entry = 1; 1036 memcpy(mgm->gid, gid, 16); 1037 } 1038 } else { 1039 link = 1; 1040 1041 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 1042 if (index == -1) { 1043 mlx4_err(dev, "No AMGM entries left\n"); 1044 err = -ENOMEM; 1045 goto out; 1046 } 1047 index += dev->caps.num_mgms; 1048 1049 new_entry = 1; 1050 memset(mgm, 0, sizeof *mgm); 1051 memcpy(mgm->gid, gid, 16); 1052 } 1053 1054 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1055 if (members_count == dev->caps.num_qp_per_mgm) { 1056 mlx4_err(dev, "MGM at index %x is full.\n", index); 1057 err = -ENOMEM; 1058 goto out; 1059 } 1060 1061 for (i = 0; i < members_count; ++i) 1062 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1063 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 1064 err = 0; 1065 goto out; 1066 } 1067 1068 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 1069 (!!mlx4_blck_lb << MGM_BLCK_LB_BIT)); 1070 1071 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 1072 1073 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1074 if (err) 1075 goto out; 1076 1077 /* if !link, still add the new entry. */ 1078 if (!link) 1079 goto skip_link; 1080 1081 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1082 if (err) 1083 goto out; 1084 1085 mgm->next_gid_index = cpu_to_be32(index << 6); 1086 1087 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1088 if (err) 1089 goto out; 1090 1091skip_link: 1092 if (prot == MLX4_PROT_ETH) { 1093 /* manage the steering entry for promisc mode */ 1094 if (new_entry) 1095 new_steering_entry(dev, port, steer, index, qp->qpn); 1096 else 1097 existing_steering_entry(dev, port, steer, 1098 index, qp->qpn); 1099 } 1100 1101out: 1102 if (err && link && index != -1) { 1103 if (index < dev->caps.num_mgms) 1104 mlx4_warn(dev, "Got AMGM index %d < %d", 1105 index, dev->caps.num_mgms); 1106 else 1107 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1108 index - dev->caps.num_mgms, MLX4_USE_RR); 1109 } 1110 mutex_unlock(&priv->mcg_table.mutex); 1111 1112 mlx4_free_cmd_mailbox(dev, mailbox); 1113 return err; 1114} 1115 1116int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1117 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1118{ 1119 struct mlx4_priv *priv = mlx4_priv(dev); 1120 struct mlx4_cmd_mailbox *mailbox; 1121 struct mlx4_mgm *mgm; 1122 u32 members_count; 1123 int prev, index; 1124 int i, loc = -1; 1125 int err; 1126 u8 port = gid[5]; 1127 bool removed_entry = false; 1128 1129 mailbox = mlx4_alloc_cmd_mailbox(dev); 1130 if (IS_ERR(mailbox)) 1131 return PTR_ERR(mailbox); 1132 mgm = mailbox->buf; 1133 1134 mutex_lock(&priv->mcg_table.mutex); 1135 1136 err = find_entry(dev, port, gid, prot, 1137 mailbox, &prev, &index); 1138 if (err) 1139 goto out; 1140 1141 if (index == -1) { 1142 mlx4_err(dev, "MGID "GID_PRINT_FMT" not found\n", 1143 GID_PRINT_ARGS(gid)); 1144 err = -EINVAL; 1145 goto out; 1146 } 1147 1148 /* 1149 if this QP is also a promisc QP, it shouldn't be removed only if 1150 at least one none promisc QP is also attached to this MCG 1151 */ 1152 if (prot == MLX4_PROT_ETH && 1153 check_duplicate_entry(dev, port, steer, index, qp->qpn) && 1154 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) 1155 goto out; 1156 1157 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1158 for (i = 0; i < members_count; ++i) 1159 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1160 loc = i; 1161 break; 1162 } 1163 1164 if (loc == -1) { 1165 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1166 err = -EINVAL; 1167 goto out; 1168 } 1169 1170 /* copy the last QP in this MGM over removed QP */ 1171 mgm->qp[loc] = mgm->qp[members_count - 1]; 1172 mgm->qp[members_count - 1] = 0; 1173 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1174 1175 if (prot == MLX4_PROT_ETH) 1176 removed_entry = can_remove_steering_entry(dev, port, steer, 1177 index, qp->qpn); 1178 if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) { 1179 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1180 goto out; 1181 } 1182 1183 /* We are going to delete the entry, members count should be 0 */ 1184 mgm->members_count = cpu_to_be32((u32) prot << 30); 1185 1186 if (prev == -1) { 1187 /* Remove entry from MGM */ 1188 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1189 if (amgm_index) { 1190 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1191 if (err) 1192 goto out; 1193 } else 1194 memset(mgm->gid, 0, 16); 1195 1196 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1197 if (err) 1198 goto out; 1199 1200 if (amgm_index) { 1201 if (amgm_index < dev->caps.num_mgms) 1202 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1203 index, amgm_index, dev->caps.num_mgms); 1204 else 1205 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1206 amgm_index - dev->caps.num_mgms, MLX4_USE_RR); 1207 } 1208 } else { 1209 /* Remove entry from AMGM */ 1210 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1211 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1212 if (err) 1213 goto out; 1214 1215 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1216 1217 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1218 if (err) 1219 goto out; 1220 1221 if (index < dev->caps.num_mgms) 1222 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1223 prev, index, dev->caps.num_mgms); 1224 else 1225 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1226 index - dev->caps.num_mgms, MLX4_USE_RR); 1227 } 1228 1229out: 1230 mutex_unlock(&priv->mcg_table.mutex); 1231 1232 mlx4_free_cmd_mailbox(dev, mailbox); 1233 return err; 1234} 1235 1236static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1237 u8 gid[16], u8 attach, u8 block_loopback, 1238 enum mlx4_protocol prot) 1239{ 1240 struct mlx4_cmd_mailbox *mailbox; 1241 int err = 0; 1242 int qpn; 1243 1244 if (!mlx4_is_mfunc(dev)) 1245 return -EBADF; 1246 1247 mailbox = mlx4_alloc_cmd_mailbox(dev); 1248 if (IS_ERR(mailbox)) 1249 return PTR_ERR(mailbox); 1250 1251 memcpy(mailbox->buf, gid, 16); 1252 qpn = qp->qpn; 1253 qpn |= (prot << 28); 1254 if (attach && block_loopback) 1255 qpn |= (1 << 31); 1256 1257 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1258 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1259 MLX4_CMD_WRAPPED); 1260 1261 mlx4_free_cmd_mailbox(dev, mailbox); 1262 return err; 1263} 1264 1265int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1266 u8 gid[16], u8 port, 1267 int block_mcast_loopback, 1268 enum mlx4_protocol prot, u64 *reg_id) 1269{ 1270 struct mlx4_spec_list spec = { {NULL} }; 1271 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1272 1273 struct mlx4_net_trans_rule rule = { 1274 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1275 .exclusive = 0, 1276 .promisc_mode = MLX4_FS_REGULAR, 1277 .priority = MLX4_DOMAIN_NIC, 1278 }; 1279 1280 rule.allow_loopback = !block_mcast_loopback; 1281 rule.port = port; 1282 rule.qpn = qp->qpn; 1283 INIT_LIST_HEAD(&rule.list); 1284 1285 switch (prot) { 1286 case MLX4_PROT_ETH: 1287 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1288 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1289 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1290 break; 1291 1292 case MLX4_PROT_IB_IPV6: 1293 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1294 memcpy(spec.ib.dst_gid, gid, 16); 1295 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1296 break; 1297 default: 1298 return -EINVAL; 1299 } 1300 list_add_tail(&spec.list, &rule.list); 1301 1302 return mlx4_flow_attach(dev, &rule, reg_id); 1303} 1304 1305int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1306 u8 port, int block_mcast_loopback, 1307 enum mlx4_protocol prot, u64 *reg_id) 1308{ 1309 enum mlx4_steer_type steer; 1310 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 1311 1312 switch (dev->caps.steering_mode) { 1313 case MLX4_STEERING_MODE_A0: 1314 if (prot == MLX4_PROT_ETH) 1315 return 0; 1316 1317 case MLX4_STEERING_MODE_B0: 1318 if (prot == MLX4_PROT_ETH) 1319 gid[7] |= (steer << 1); 1320 1321 if (mlx4_is_mfunc(dev)) 1322 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1323 block_mcast_loopback, prot); 1324 return mlx4_qp_attach_common(dev, qp, gid, 1325 block_mcast_loopback, prot, 1326 MLX4_MC_STEER); 1327 1328 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1329 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 1330 block_mcast_loopback, 1331 prot, reg_id); 1332 default: 1333 return -EINVAL; 1334 } 1335} 1336EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1337 1338int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1339 enum mlx4_protocol prot, u64 reg_id) 1340{ 1341 enum mlx4_steer_type steer; 1342 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 1343 1344 switch (dev->caps.steering_mode) { 1345 case MLX4_STEERING_MODE_A0: 1346 if (prot == MLX4_PROT_ETH) 1347 return 0; 1348 1349 case MLX4_STEERING_MODE_B0: 1350 if (prot == MLX4_PROT_ETH) 1351 gid[7] |= (steer << 1); 1352 1353 if (mlx4_is_mfunc(dev)) 1354 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1355 1356 return mlx4_qp_detach_common(dev, qp, gid, prot, 1357 MLX4_MC_STEER); 1358 1359 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1360 return mlx4_flow_detach(dev, reg_id); 1361 1362 default: 1363 return -EINVAL; 1364 } 1365} 1366EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1367 1368int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1369 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1370{ 1371 struct mlx4_net_trans_rule rule; 1372 u64 *regid_p; 1373 1374 switch (mode) { 1375 case MLX4_FS_ALL_DEFAULT: 1376 regid_p = &dev->regid_promisc_array[port]; 1377 break; 1378 case MLX4_FS_MC_DEFAULT: 1379 regid_p = &dev->regid_allmulti_array[port]; 1380 break; 1381 default: 1382 return -1; 1383 } 1384 1385 if (*regid_p != 0) 1386 return -1; 1387 1388 rule.promisc_mode = mode; 1389 rule.port = port; 1390 rule.qpn = qpn; 1391 INIT_LIST_HEAD(&rule.list); 1392 mlx4_err(dev, "going promisc on %x\n", port); 1393 1394 return mlx4_flow_attach(dev, &rule, regid_p); 1395} 1396EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1397 1398int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1399 enum mlx4_net_trans_promisc_mode mode) 1400{ 1401 int ret; 1402 u64 *regid_p; 1403 1404 switch (mode) { 1405 case MLX4_FS_ALL_DEFAULT: 1406 regid_p = &dev->regid_promisc_array[port]; 1407 break; 1408 case MLX4_FS_MC_DEFAULT: 1409 regid_p = &dev->regid_allmulti_array[port]; 1410 break; 1411 default: 1412 return -1; 1413 } 1414 1415 if (*regid_p == 0) 1416 return -1; 1417 1418 ret = mlx4_flow_detach(dev, *regid_p); 1419 if (ret == 0) 1420 *regid_p = 0; 1421 1422 return ret; 1423} 1424EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1425 1426int mlx4_unicast_attach(struct mlx4_dev *dev, 1427 struct mlx4_qp *qp, u8 gid[16], 1428 int block_mcast_loopback, enum mlx4_protocol prot) 1429{ 1430 if (prot == MLX4_PROT_ETH) 1431 gid[7] |= (MLX4_UC_STEER << 1); 1432 1433 if (mlx4_is_mfunc(dev)) 1434 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1435 block_mcast_loopback, prot); 1436 1437 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1438 prot, MLX4_UC_STEER); 1439} 1440EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1441 1442int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1443 u8 gid[16], enum mlx4_protocol prot) 1444{ 1445 if (prot == MLX4_PROT_ETH) 1446 gid[7] |= (MLX4_UC_STEER << 1); 1447 1448 if (mlx4_is_mfunc(dev)) 1449 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1450 1451 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1452} 1453EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1454 1455int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1456 struct mlx4_vhcr *vhcr, 1457 struct mlx4_cmd_mailbox *inbox, 1458 struct mlx4_cmd_mailbox *outbox, 1459 struct mlx4_cmd_info *cmd) 1460{ 1461 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1462 u8 port = vhcr->in_param >> 62; 1463 enum mlx4_steer_type steer = vhcr->in_modifier; 1464 1465 /* Promiscuous unicast is not allowed in mfunc for VFs */ 1466 if ((slave != dev->caps.function) && (steer == MLX4_UC_STEER)) 1467 return 0; 1468 1469 if (vhcr->op_modifier) 1470 return add_promisc_qp(dev, port, steer, qpn); 1471 else 1472 return remove_promisc_qp(dev, port, steer, qpn); 1473} 1474 1475static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1476 enum mlx4_steer_type steer, u8 add, u8 port) 1477{ 1478 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1479 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1480 MLX4_CMD_WRAPPED); 1481} 1482 1483int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1484{ 1485 if (mlx4_is_mfunc(dev)) 1486 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1487 1488 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1489} 1490EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1491 1492int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1493{ 1494 if (mlx4_is_mfunc(dev)) 1495 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1496 1497 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1498} 1499EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1500 1501int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1502{ 1503 if (mlx4_is_mfunc(dev)) 1504 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1505 1506 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1507} 1508EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1509 1510int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1511{ 1512 if (mlx4_is_mfunc(dev)) 1513 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1514 1515 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1516} 1517EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1518 1519int mlx4_init_mcg_table(struct mlx4_dev *dev) 1520{ 1521 struct mlx4_priv *priv = mlx4_priv(dev); 1522 int err; 1523 1524 /* No need for mcg_table when fw managed the mcg table*/ 1525 if (dev->caps.steering_mode == 1526 MLX4_STEERING_MODE_DEVICE_MANAGED) 1527 return 0; 1528 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1529 dev->caps.num_amgms - 1, 0, 0); 1530 if (err) 1531 return err; 1532 1533 mutex_init(&priv->mcg_table.mutex); 1534 1535 return 0; 1536} 1537 1538void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1539{ 1540 if (dev->caps.steering_mode != 1541 MLX4_STEERING_MODE_DEVICE_MANAGED) 1542 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1543} 1544