mlx5_main.c revision 359540
1/*- 2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c 359540 2020-04-01 22:42:42Z kib $ 26 */ 27 28#include <linux/kmod.h> 29#include <linux/module.h> 30#include <linux/errno.h> 31#include <linux/pci.h> 32#include <linux/dma-mapping.h> 33#include <linux/slab.h> 34#include <linux/io-mapping.h> 35#include <linux/interrupt.h> 36#include <linux/hardirq.h> 37#include <dev/mlx5/driver.h> 38#include <dev/mlx5/cq.h> 39#include <dev/mlx5/qp.h> 40#include <dev/mlx5/srq.h> 41#include <dev/mlx5/mpfs.h> 42#include <linux/delay.h> 43#include <dev/mlx5/mlx5_ifc.h> 44#include <dev/mlx5/mlx5_fpga/core.h> 45#include <dev/mlx5/mlx5_lib/mlx5.h> 46#include "mlx5_core.h" 47#include "fs_core.h" 48#ifdef PCI_IOV 49#include <sys/nv.h> 50#include <dev/pci/pci_iov.h> 51#include <sys/iov_schema.h> 52#endif 53 54static const char mlx5_version[] = "Mellanox Core driver " 55 DRIVER_VERSION " (" DRIVER_RELDATE ")"; 56MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 57MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); 58MODULE_LICENSE("Dual BSD/GPL"); 59MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); 60MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1); 61MODULE_DEPEND(mlx5, firmware, 1, 1, 1); 62MODULE_VERSION(mlx5, 1); 63 64SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW, 0, "mlx5 hardware controls"); 65 66int mlx5_core_debug_mask; 67SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN, 68 &mlx5_core_debug_mask, 0, 69 "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 70 71#define MLX5_DEFAULT_PROF 2 72static int mlx5_prof_sel = MLX5_DEFAULT_PROF; 73SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN, 74 &mlx5_prof_sel, 0, 75 "profile selector. Valid range 0 - 2"); 76 77static int mlx5_fast_unload_enabled = 1; 78SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN, 79 &mlx5_fast_unload_enabled, 0, 80 "Set to enable fast unload. Clear to disable."); 81 82#define NUMA_NO_NODE -1 83 84static LIST_HEAD(intf_list); 85static LIST_HEAD(dev_list); 86static DEFINE_MUTEX(intf_mutex); 87 88struct mlx5_device_context { 89 struct list_head list; 90 struct mlx5_interface *intf; 91 void *context; 92}; 93 94enum { 95 MLX5_ATOMIC_REQ_MODE_BE = 0x0, 96 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, 97}; 98 99static struct mlx5_profile profiles[] = { 100 [0] = { 101 .mask = 0, 102 }, 103 [1] = { 104 .mask = MLX5_PROF_MASK_QP_SIZE, 105 .log_max_qp = 12, 106 }, 107 [2] = { 108 .mask = MLX5_PROF_MASK_QP_SIZE | 109 MLX5_PROF_MASK_MR_CACHE, 110 .log_max_qp = 17, 111 .mr_cache[0] = { 112 .size = 500, 113 .limit = 250 114 }, 115 .mr_cache[1] = { 116 .size = 500, 117 .limit = 250 118 }, 119 .mr_cache[2] = { 120 .size = 500, 121 .limit = 250 122 }, 123 .mr_cache[3] = { 124 .size = 500, 125 .limit = 250 126 }, 127 .mr_cache[4] = { 128 .size = 500, 129 .limit = 250 130 }, 131 .mr_cache[5] = { 132 .size = 500, 133 .limit = 250 134 }, 135 .mr_cache[6] = { 136 .size = 500, 137 .limit = 250 138 }, 139 .mr_cache[7] = { 140 .size = 500, 141 .limit = 250 142 }, 143 .mr_cache[8] = { 144 .size = 500, 145 .limit = 250 146 }, 147 .mr_cache[9] = { 148 .size = 500, 149 .limit = 250 150 }, 151 .mr_cache[10] = { 152 .size = 500, 153 .limit = 250 154 }, 155 .mr_cache[11] = { 156 .size = 500, 157 .limit = 250 158 }, 159 .mr_cache[12] = { 160 .size = 64, 161 .limit = 32 162 }, 163 .mr_cache[13] = { 164 .size = 32, 165 .limit = 16 166 }, 167 .mr_cache[14] = { 168 .size = 16, 169 .limit = 8 170 }, 171 }, 172 [3] = { 173 .mask = MLX5_PROF_MASK_QP_SIZE, 174 .log_max_qp = 17, 175 }, 176}; 177 178static int set_dma_caps(struct pci_dev *pdev) 179{ 180 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 181 int err; 182 183 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 184 if (err) { 185 mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n"); 186 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 187 if (err) { 188 mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n"); 189 return err; 190 } 191 } 192 193 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 194 if (err) { 195 mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n"); 196 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 197 if (err) { 198 mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n"); 199 return err; 200 } 201 } 202 203 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); 204 return err; 205} 206 207int mlx5_pci_read_power_status(struct mlx5_core_dev *dev, 208 u16 *p_power, u8 *p_status) 209{ 210 u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {}; 211 u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {}; 212 int err; 213 214 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 215 MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0); 216 217 *p_status = MLX5_GET(mpein_reg, out, pwr_status); 218 *p_power = MLX5_GET(mpein_reg, out, pci_power); 219 return err; 220} 221 222static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) 223{ 224 struct pci_dev *pdev = dev->pdev; 225 int err = 0; 226 227 mutex_lock(&dev->pci_status_mutex); 228 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { 229 err = pci_enable_device(pdev); 230 if (!err) 231 dev->pci_status = MLX5_PCI_STATUS_ENABLED; 232 } 233 mutex_unlock(&dev->pci_status_mutex); 234 235 return err; 236} 237 238static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) 239{ 240 struct pci_dev *pdev = dev->pdev; 241 242 mutex_lock(&dev->pci_status_mutex); 243 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { 244 pci_disable_device(pdev); 245 dev->pci_status = MLX5_PCI_STATUS_DISABLED; 246 } 247 mutex_unlock(&dev->pci_status_mutex); 248} 249 250static int request_bar(struct pci_dev *pdev) 251{ 252 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 253 int err = 0; 254 255 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 256 mlx5_core_err(dev, "Missing registers BAR, aborting\n"); 257 return -ENODEV; 258 } 259 260 err = pci_request_regions(pdev, DRIVER_NAME); 261 if (err) 262 mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n"); 263 264 return err; 265} 266 267static void release_bar(struct pci_dev *pdev) 268{ 269 pci_release_regions(pdev); 270} 271 272static int mlx5_enable_msix(struct mlx5_core_dev *dev) 273{ 274 struct mlx5_priv *priv = &dev->priv; 275 struct mlx5_eq_table *table = &priv->eq_table; 276 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 277 int limit = dev->msix_eqvec; 278 int nvec = MLX5_EQ_VEC_COMP_BASE; 279 int i; 280 281 if (limit > 0) 282 nvec += limit; 283 else 284 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); 285 286 if (nvec > num_eqs) 287 nvec = num_eqs; 288 if (nvec > 256) 289 nvec = 256; /* limit of firmware API */ 290 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 291 return -ENOMEM; 292 293 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); 294 295 for (i = 0; i < nvec; i++) 296 priv->msix_arr[i].entry = i; 297 298 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, 299 MLX5_EQ_VEC_COMP_BASE + 1, nvec); 300 if (nvec < 0) 301 return nvec; 302 303 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 304 return 0; 305} 306 307static void mlx5_disable_msix(struct mlx5_core_dev *dev) 308{ 309 struct mlx5_priv *priv = &dev->priv; 310 311 pci_disable_msix(dev->pdev); 312 kfree(priv->msix_arr); 313} 314 315struct mlx5_reg_host_endianess { 316 u8 he; 317 u8 rsvd[15]; 318}; 319 320 321#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 322 323enum { 324 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 325 MLX5_DEV_CAP_FLAG_DCT | 326 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, 327}; 328 329static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) 330{ 331 switch (size) { 332 case 128: 333 return 0; 334 case 256: 335 return 1; 336 case 512: 337 return 2; 338 case 1024: 339 return 3; 340 case 2048: 341 return 4; 342 case 4096: 343 return 5; 344 default: 345 mlx5_core_warn(dev, "invalid pkey table size %d\n", size); 346 return 0; 347 } 348} 349 350static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, 351 enum mlx5_cap_type cap_type, 352 enum mlx5_cap_mode cap_mode) 353{ 354 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 355 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 356 void *out, *hca_caps; 357 u16 opmod = (cap_type << 1) | (cap_mode & 0x01); 358 int err; 359 360 memset(in, 0, sizeof(in)); 361 out = kzalloc(out_sz, GFP_KERNEL); 362 363 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 364 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 365 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 366 if (err) { 367 mlx5_core_warn(dev, 368 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", 369 cap_type, cap_mode, err); 370 goto query_ex; 371 } 372 373 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 374 375 switch (cap_mode) { 376 case HCA_CAP_OPMOD_GET_MAX: 377 memcpy(dev->hca_caps_max[cap_type], hca_caps, 378 MLX5_UN_SZ_BYTES(hca_cap_union)); 379 break; 380 case HCA_CAP_OPMOD_GET_CUR: 381 memcpy(dev->hca_caps_cur[cap_type], hca_caps, 382 MLX5_UN_SZ_BYTES(hca_cap_union)); 383 break; 384 default: 385 mlx5_core_warn(dev, 386 "Tried to query dev cap type(%x) with wrong opmode(%x)\n", 387 cap_type, cap_mode); 388 err = -EINVAL; 389 break; 390 } 391query_ex: 392 kfree(out); 393 return err; 394} 395 396int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) 397{ 398 int ret; 399 400 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); 401 if (ret) 402 return ret; 403 404 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); 405} 406 407static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) 408{ 409 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; 410 411 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); 412 413 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 414} 415 416static int handle_hca_cap(struct mlx5_core_dev *dev) 417{ 418 void *set_ctx = NULL; 419 struct mlx5_profile *prof = dev->profile; 420 int err = -ENOMEM; 421 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 422 void *set_hca_cap; 423 424 set_ctx = kzalloc(set_sz, GFP_KERNEL); 425 426 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 427 if (err) 428 goto query_ex; 429 430 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 431 capability); 432 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], 433 MLX5_ST_SZ_BYTES(cmd_hca_cap)); 434 435 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", 436 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 437 128); 438 /* we limit the size of the pkey table to 128 entries for now */ 439 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 440 to_fw_pkey_sz(dev, 128)); 441 442 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 443 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 444 prof->log_max_qp); 445 446 /* disable cmdif checksum */ 447 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 448 449 /* enable drain sigerr */ 450 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); 451 452 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 453 454 err = set_caps(dev, set_ctx, set_sz); 455 456query_ex: 457 kfree(set_ctx); 458 return err; 459} 460 461static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) 462{ 463 void *set_ctx; 464 void *set_hca_cap; 465 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 466 int req_endianness; 467 int err; 468 469 if (MLX5_CAP_GEN(dev, atomic)) { 470 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 471 if (err) 472 return err; 473 } else { 474 return 0; 475 } 476 477 req_endianness = 478 MLX5_CAP_ATOMIC(dev, 479 supported_atomic_req_8B_endianess_mode_1); 480 481 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) 482 return 0; 483 484 set_ctx = kzalloc(set_sz, GFP_KERNEL); 485 if (!set_ctx) 486 return -ENOMEM; 487 488 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, 489 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); 490 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 491 492 /* Set requestor to host endianness */ 493 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, 494 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); 495 496 err = set_caps(dev, set_ctx, set_sz); 497 498 kfree(set_ctx); 499 return err; 500} 501 502static int set_hca_ctrl(struct mlx5_core_dev *dev) 503{ 504 struct mlx5_reg_host_endianess he_in; 505 struct mlx5_reg_host_endianess he_out; 506 int err; 507 508 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && 509 !MLX5_CAP_GEN(dev, roce)) 510 return 0; 511 512 memset(&he_in, 0, sizeof(he_in)); 513 he_in.he = MLX5_SET_HOST_ENDIANNESS; 514 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), 515 &he_out, sizeof(he_out), 516 MLX5_REG_HOST_ENDIANNESS, 0, 1); 517 return err; 518} 519 520static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) 521{ 522 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; 523 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; 524 525 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 526 MLX5_SET(enable_hca_in, in, function_id, func_id); 527 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 528} 529 530static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 531{ 532 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; 533 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; 534 535 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 536 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 537} 538 539static int mlx5_core_set_issi(struct mlx5_core_dev *dev) 540{ 541 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; 542 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; 543 u32 sup_issi; 544 int err; 545 546 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); 547 548 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); 549 if (err) { 550 u32 syndrome; 551 u8 status; 552 553 mlx5_cmd_mbox_status(query_out, &status, &syndrome); 554 if (status == MLX5_CMD_STAT_BAD_OP_ERR) { 555 mlx5_core_dbg(dev, "Only ISSI 0 is supported\n"); 556 return 0; 557 } 558 559 mlx5_core_err(dev, "failed to query ISSI\n"); 560 return err; 561 } 562 563 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); 564 565 if (sup_issi & (1 << 1)) { 566 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; 567 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; 568 569 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); 570 MLX5_SET(set_issi_in, set_in, current_issi, 1); 571 572 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); 573 if (err) { 574 mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err); 575 return err; 576 } 577 578 dev->issi = 1; 579 580 return 0; 581 } else if (sup_issi & (1 << 0)) { 582 return 0; 583 } 584 585 return -ENOTSUPP; 586} 587 588 589int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) 590{ 591 struct mlx5_eq_table *table = &dev->priv.eq_table; 592 struct mlx5_eq *eq; 593 int err = -ENOENT; 594 595 spin_lock(&table->lock); 596 list_for_each_entry(eq, &table->comp_eqs_list, list) { 597 if (eq->index == vector) { 598 *eqn = eq->eqn; 599 *irqn = eq->irqn; 600 err = 0; 601 break; 602 } 603 } 604 spin_unlock(&table->lock); 605 606 return err; 607} 608EXPORT_SYMBOL(mlx5_vector2eqn); 609 610static void free_comp_eqs(struct mlx5_core_dev *dev) 611{ 612 struct mlx5_eq_table *table = &dev->priv.eq_table; 613 struct mlx5_eq *eq, *n; 614 615 spin_lock(&table->lock); 616 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { 617 list_del(&eq->list); 618 spin_unlock(&table->lock); 619 if (mlx5_destroy_unmap_eq(dev, eq)) 620 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", 621 eq->eqn); 622 kfree(eq); 623 spin_lock(&table->lock); 624 } 625 spin_unlock(&table->lock); 626} 627 628static int alloc_comp_eqs(struct mlx5_core_dev *dev) 629{ 630 struct mlx5_eq_table *table = &dev->priv.eq_table; 631 struct mlx5_eq *eq; 632 int ncomp_vec; 633 int nent; 634 int err; 635 int i; 636 637 INIT_LIST_HEAD(&table->comp_eqs_list); 638 ncomp_vec = table->num_comp_vectors; 639 nent = MLX5_COMP_EQ_SIZE; 640 for (i = 0; i < ncomp_vec; i++) { 641 eq = kzalloc(sizeof(*eq), GFP_KERNEL); 642 643 err = mlx5_create_map_eq(dev, eq, 644 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 645 &dev->priv.uuari.uars[0]); 646 if (err) { 647 kfree(eq); 648 goto clean; 649 } 650 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); 651 eq->index = i; 652 spin_lock(&table->lock); 653 list_add_tail(&eq->list, &table->comp_eqs_list); 654 spin_unlock(&table->lock); 655 } 656 657 return 0; 658 659clean: 660 free_comp_eqs(dev); 661 return err; 662} 663 664static int map_bf_area(struct mlx5_core_dev *dev) 665{ 666 resource_size_t bf_start = pci_resource_start(dev->pdev, 0); 667 resource_size_t bf_len = pci_resource_len(dev->pdev, 0); 668 669 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); 670 671 return dev->priv.bf_mapping ? 0 : -ENOMEM; 672} 673 674static void unmap_bf_area(struct mlx5_core_dev *dev) 675{ 676 if (dev->priv.bf_mapping) 677 io_mapping_free(dev->priv.bf_mapping); 678} 679 680static inline int fw_initializing(struct mlx5_core_dev *dev) 681{ 682 return ioread32be(&dev->iseg->initializing) >> 31; 683} 684 685static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, 686 u32 warn_time_mili) 687{ 688 int warn = jiffies + msecs_to_jiffies(warn_time_mili); 689 int end = jiffies + msecs_to_jiffies(max_wait_mili); 690 int err = 0; 691 692 MPASS(max_wait_mili > warn_time_mili); 693 694 while (fw_initializing(dev) == 1) { 695 if (time_after(jiffies, end)) { 696 err = -EBUSY; 697 break; 698 } 699 if (warn_time_mili && time_after(jiffies, warn)) { 700 mlx5_core_warn(dev, 701 "Waiting for FW initialization, timeout abort in %u s\n", 702 (unsigned int)(jiffies_to_msecs(end - warn) / 1000)); 703 warn = jiffies + msecs_to_jiffies(warn_time_mili); 704 } 705 msleep(FW_INIT_WAIT_MS); 706 } 707 708 if (err != 0) 709 mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n", 710 ioread32be(&dev->iseg->initializing)); 711 712 return err; 713} 714 715static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 716{ 717 struct mlx5_device_context *dev_ctx; 718 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 719 720 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); 721 if (!dev_ctx) 722 return; 723 724 dev_ctx->intf = intf; 725 CURVNET_SET_QUIET(vnet0); 726 dev_ctx->context = intf->add(dev); 727 CURVNET_RESTORE(); 728 729 if (dev_ctx->context) { 730 spin_lock_irq(&priv->ctx_lock); 731 list_add_tail(&dev_ctx->list, &priv->ctx_list); 732 spin_unlock_irq(&priv->ctx_lock); 733 } else { 734 kfree(dev_ctx); 735 } 736} 737 738static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 739{ 740 struct mlx5_device_context *dev_ctx; 741 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 742 743 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 744 if (dev_ctx->intf == intf) { 745 spin_lock_irq(&priv->ctx_lock); 746 list_del(&dev_ctx->list); 747 spin_unlock_irq(&priv->ctx_lock); 748 749 intf->remove(dev, dev_ctx->context); 750 kfree(dev_ctx); 751 return; 752 } 753} 754 755int 756mlx5_register_device(struct mlx5_core_dev *dev) 757{ 758 struct mlx5_priv *priv = &dev->priv; 759 struct mlx5_interface *intf; 760 761 mutex_lock(&intf_mutex); 762 list_add_tail(&priv->dev_list, &dev_list); 763 list_for_each_entry(intf, &intf_list, list) 764 mlx5_add_device(intf, priv); 765 mutex_unlock(&intf_mutex); 766 767 return 0; 768} 769 770void 771mlx5_unregister_device(struct mlx5_core_dev *dev) 772{ 773 struct mlx5_priv *priv = &dev->priv; 774 struct mlx5_interface *intf; 775 776 mutex_lock(&intf_mutex); 777 list_for_each_entry(intf, &intf_list, list) 778 mlx5_remove_device(intf, priv); 779 list_del(&priv->dev_list); 780 mutex_unlock(&intf_mutex); 781} 782 783int mlx5_register_interface(struct mlx5_interface *intf) 784{ 785 struct mlx5_priv *priv; 786 787 if (!intf->add || !intf->remove) 788 return -EINVAL; 789 790 mutex_lock(&intf_mutex); 791 list_add_tail(&intf->list, &intf_list); 792 list_for_each_entry(priv, &dev_list, dev_list) 793 mlx5_add_device(intf, priv); 794 mutex_unlock(&intf_mutex); 795 796 return 0; 797} 798EXPORT_SYMBOL(mlx5_register_interface); 799 800void mlx5_unregister_interface(struct mlx5_interface *intf) 801{ 802 struct mlx5_priv *priv; 803 804 mutex_lock(&intf_mutex); 805 list_for_each_entry(priv, &dev_list, dev_list) 806 mlx5_remove_device(intf, priv); 807 list_del(&intf->list); 808 mutex_unlock(&intf_mutex); 809} 810EXPORT_SYMBOL(mlx5_unregister_interface); 811 812void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) 813{ 814 struct mlx5_priv *priv = &mdev->priv; 815 struct mlx5_device_context *dev_ctx; 816 unsigned long flags; 817 void *result = NULL; 818 819 spin_lock_irqsave(&priv->ctx_lock, flags); 820 821 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) 822 if ((dev_ctx->intf->protocol == protocol) && 823 dev_ctx->intf->get_dev) { 824 result = dev_ctx->intf->get_dev(dev_ctx->context); 825 break; 826 } 827 828 spin_unlock_irqrestore(&priv->ctx_lock, flags); 829 830 return result; 831} 832EXPORT_SYMBOL(mlx5_get_protocol_dev); 833 834static int mlx5_auto_fw_update; 835SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 836 &mlx5_auto_fw_update, 0, 837 "Allow automatic firmware update on driver start"); 838static int 839mlx5_firmware_update(struct mlx5_core_dev *dev) 840{ 841 const struct firmware *fw; 842 int err; 843 844 TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update); 845 if (!mlx5_auto_fw_update) 846 return (0); 847 fw = firmware_get("mlx5fw_mfa"); 848 if (fw) { 849 err = mlx5_firmware_flash(dev, fw); 850 firmware_put(fw, FIRMWARE_UNLOAD); 851 } 852 else 853 return (-ENOENT); 854 855 return err; 856} 857 858static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 859{ 860 struct pci_dev *pdev = dev->pdev; 861 device_t bsddev; 862 int err; 863 864 pdev = dev->pdev; 865 bsddev = pdev->dev.bsddev; 866 pci_set_drvdata(dev->pdev, dev); 867 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); 868 priv->name[MLX5_MAX_NAME_LEN - 1] = 0; 869 870 mutex_init(&priv->pgdir_mutex); 871 INIT_LIST_HEAD(&priv->pgdir_list); 872 spin_lock_init(&priv->mkey_lock); 873 874 priv->numa_node = NUMA_NO_NODE; 875 876 err = mlx5_pci_enable_device(dev); 877 if (err) { 878 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n"); 879 goto err_dbg; 880 } 881 882 err = request_bar(pdev); 883 if (err) { 884 mlx5_core_err(dev, "error requesting BARs, aborting\n"); 885 goto err_disable; 886 } 887 888 pci_set_master(pdev); 889 890 err = set_dma_caps(pdev); 891 if (err) { 892 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n"); 893 goto err_clr_master; 894 } 895 896 dev->iseg_base = pci_resource_start(dev->pdev, 0); 897 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); 898 if (!dev->iseg) { 899 err = -ENOMEM; 900 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n"); 901 goto err_clr_master; 902 } 903 904 return 0; 905 906err_clr_master: 907 release_bar(dev->pdev); 908err_disable: 909 mlx5_pci_disable_device(dev); 910err_dbg: 911 return err; 912} 913 914static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 915{ 916#ifdef PCI_IOV 917 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) 918 pci_iov_detach(dev->pdev->dev.bsddev); 919#endif 920 iounmap(dev->iseg); 921 release_bar(dev->pdev); 922 mlx5_pci_disable_device(dev); 923} 924 925static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 926{ 927 int err; 928 929 err = mlx5_vsc_find_cap(dev); 930 if (err) 931 mlx5_core_err(dev, "Unable to find vendor specific capabilities\n"); 932 933 err = mlx5_query_hca_caps(dev); 934 if (err) { 935 mlx5_core_err(dev, "query hca failed\n"); 936 goto out; 937 } 938 939 err = mlx5_query_board_id(dev); 940 if (err) { 941 mlx5_core_err(dev, "query board id failed\n"); 942 goto out; 943 } 944 945 err = mlx5_eq_init(dev); 946 if (err) { 947 mlx5_core_err(dev, "failed to initialize eq\n"); 948 goto out; 949 } 950 951 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 952 953 err = mlx5_init_cq_table(dev); 954 if (err) { 955 mlx5_core_err(dev, "failed to initialize cq table\n"); 956 goto err_eq_cleanup; 957 } 958 959 mlx5_init_qp_table(dev); 960 mlx5_init_srq_table(dev); 961 mlx5_init_mr_table(dev); 962 963 mlx5_init_reserved_gids(dev); 964 mlx5_fpga_init(dev); 965 966 return 0; 967 968err_eq_cleanup: 969 mlx5_eq_cleanup(dev); 970 971out: 972 return err; 973} 974 975static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 976{ 977 mlx5_fpga_cleanup(dev); 978 mlx5_cleanup_reserved_gids(dev); 979 mlx5_cleanup_mr_table(dev); 980 mlx5_cleanup_srq_table(dev); 981 mlx5_cleanup_qp_table(dev); 982 mlx5_cleanup_cq_table(dev); 983 mlx5_eq_cleanup(dev); 984} 985 986static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 987 bool boot) 988{ 989 int err; 990 991 mutex_lock(&dev->intf_state_mutex); 992 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 993 mlx5_core_warn(dev, "interface is up, NOP\n"); 994 goto out; 995 } 996 997 mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n", 998 fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); 999 1000 /* 1001 * On load removing any previous indication of internal error, 1002 * device is up 1003 */ 1004 dev->state = MLX5_DEVICE_STATE_UP; 1005 1006 /* wait for firmware to accept initialization segments configurations 1007 */ 1008 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, 1009 FW_INIT_WARN_MESSAGE_INTERVAL); 1010 if (err) { 1011 dev_err(&dev->pdev->dev, 1012 "Firmware over %d MS in pre-initializing state, aborting\n", 1013 FW_PRE_INIT_TIMEOUT_MILI); 1014 goto out_err; 1015 } 1016 1017 err = mlx5_cmd_init(dev); 1018 if (err) { 1019 mlx5_core_err(dev, 1020 "Failed initializing command interface, aborting\n"); 1021 goto out_err; 1022 } 1023 1024 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0); 1025 if (err) { 1026 mlx5_core_err(dev, 1027 "Firmware over %d MS in initializing state, aborting\n", 1028 FW_INIT_TIMEOUT_MILI); 1029 goto err_cmd_cleanup; 1030 } 1031 1032 err = mlx5_core_enable_hca(dev, 0); 1033 if (err) { 1034 mlx5_core_err(dev, "enable hca failed\n"); 1035 goto err_cmd_cleanup; 1036 } 1037 1038 err = mlx5_core_set_issi(dev); 1039 if (err) { 1040 mlx5_core_err(dev, "failed to set issi\n"); 1041 goto err_disable_hca; 1042 } 1043 1044 err = mlx5_pagealloc_start(dev); 1045 if (err) { 1046 mlx5_core_err(dev, "mlx5_pagealloc_start failed\n"); 1047 goto err_disable_hca; 1048 } 1049 1050 err = mlx5_satisfy_startup_pages(dev, 1); 1051 if (err) { 1052 mlx5_core_err(dev, "failed to allocate boot pages\n"); 1053 goto err_pagealloc_stop; 1054 } 1055 1056 err = set_hca_ctrl(dev); 1057 if (err) { 1058 mlx5_core_err(dev, "set_hca_ctrl failed\n"); 1059 goto reclaim_boot_pages; 1060 } 1061 1062 err = handle_hca_cap(dev); 1063 if (err) { 1064 mlx5_core_err(dev, "handle_hca_cap failed\n"); 1065 goto reclaim_boot_pages; 1066 } 1067 1068 err = handle_hca_cap_atomic(dev); 1069 if (err) { 1070 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); 1071 goto reclaim_boot_pages; 1072 } 1073 1074 err = mlx5_satisfy_startup_pages(dev, 0); 1075 if (err) { 1076 mlx5_core_err(dev, "failed to allocate init pages\n"); 1077 goto reclaim_boot_pages; 1078 } 1079 1080 err = mlx5_cmd_init_hca(dev); 1081 if (err) { 1082 mlx5_core_err(dev, "init hca failed\n"); 1083 goto reclaim_boot_pages; 1084 } 1085 1086 mlx5_start_health_poll(dev); 1087 1088 if (boot && mlx5_init_once(dev, priv)) { 1089 mlx5_core_err(dev, "sw objs init failed\n"); 1090 goto err_stop_poll; 1091 } 1092 1093 err = mlx5_enable_msix(dev); 1094 if (err) { 1095 mlx5_core_err(dev, "enable msix failed\n"); 1096 goto err_cleanup_once; 1097 } 1098 1099 err = mlx5_alloc_uuars(dev, &priv->uuari); 1100 if (err) { 1101 mlx5_core_err(dev, "Failed allocating uar, aborting\n"); 1102 goto err_disable_msix; 1103 } 1104 1105 err = mlx5_start_eqs(dev); 1106 if (err) { 1107 mlx5_core_err(dev, "Failed to start pages and async EQs\n"); 1108 goto err_free_uar; 1109 } 1110 1111 err = alloc_comp_eqs(dev); 1112 if (err) { 1113 mlx5_core_err(dev, "Failed to alloc completion EQs\n"); 1114 goto err_stop_eqs; 1115 } 1116 1117 if (map_bf_area(dev)) 1118 mlx5_core_err(dev, "Failed to map blue flame area\n"); 1119 1120 err = mlx5_init_fs(dev); 1121 if (err) { 1122 mlx5_core_err(dev, "flow steering init %d\n", err); 1123 goto err_free_comp_eqs; 1124 } 1125 1126 err = mlx5_mpfs_init(dev); 1127 if (err) { 1128 mlx5_core_err(dev, "mpfs init failed %d\n", err); 1129 goto err_fs; 1130 } 1131 1132 err = mlx5_fpga_device_start(dev); 1133 if (err) { 1134 mlx5_core_err(dev, "fpga device start failed %d\n", err); 1135 goto err_mpfs; 1136 } 1137 1138 err = mlx5_register_device(dev); 1139 if (err) { 1140 mlx5_core_err(dev, "mlx5_register_device failed %d\n", err); 1141 goto err_fpga; 1142 } 1143 1144 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1145 1146out: 1147 mutex_unlock(&dev->intf_state_mutex); 1148 return 0; 1149 1150err_fpga: 1151 mlx5_fpga_device_stop(dev); 1152 1153err_mpfs: 1154 mlx5_mpfs_destroy(dev); 1155 1156err_fs: 1157 mlx5_cleanup_fs(dev); 1158 1159err_free_comp_eqs: 1160 free_comp_eqs(dev); 1161 unmap_bf_area(dev); 1162 1163err_stop_eqs: 1164 mlx5_stop_eqs(dev); 1165 1166err_free_uar: 1167 mlx5_free_uuars(dev, &priv->uuari); 1168 1169err_disable_msix: 1170 mlx5_disable_msix(dev); 1171 1172err_cleanup_once: 1173 if (boot) 1174 mlx5_cleanup_once(dev); 1175 1176err_stop_poll: 1177 mlx5_stop_health_poll(dev, boot); 1178 if (mlx5_cmd_teardown_hca(dev)) { 1179 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1180 goto out_err; 1181 } 1182 1183reclaim_boot_pages: 1184 mlx5_reclaim_startup_pages(dev); 1185 1186err_pagealloc_stop: 1187 mlx5_pagealloc_stop(dev); 1188 1189err_disable_hca: 1190 mlx5_core_disable_hca(dev); 1191 1192err_cmd_cleanup: 1193 mlx5_cmd_cleanup(dev); 1194 1195out_err: 1196 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1197 mutex_unlock(&dev->intf_state_mutex); 1198 1199 return err; 1200} 1201 1202static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1203 bool cleanup) 1204{ 1205 int err = 0; 1206 1207 if (cleanup) 1208 mlx5_drain_health_recovery(dev); 1209 1210 mutex_lock(&dev->intf_state_mutex); 1211 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1212 mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); 1213 if (cleanup) 1214 mlx5_cleanup_once(dev); 1215 goto out; 1216 } 1217 1218 mlx5_unregister_device(dev); 1219 1220 mlx5_fpga_device_stop(dev); 1221 mlx5_mpfs_destroy(dev); 1222 mlx5_cleanup_fs(dev); 1223 unmap_bf_area(dev); 1224 mlx5_wait_for_reclaim_vfs_pages(dev); 1225 free_comp_eqs(dev); 1226 mlx5_stop_eqs(dev); 1227 mlx5_free_uuars(dev, &priv->uuari); 1228 mlx5_disable_msix(dev); 1229 if (cleanup) 1230 mlx5_cleanup_once(dev); 1231 mlx5_stop_health_poll(dev, cleanup); 1232 err = mlx5_cmd_teardown_hca(dev); 1233 if (err) { 1234 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1235 goto out; 1236 } 1237 mlx5_pagealloc_stop(dev); 1238 mlx5_reclaim_startup_pages(dev); 1239 mlx5_core_disable_hca(dev); 1240 mlx5_cmd_cleanup(dev); 1241 1242out: 1243 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1244 mutex_unlock(&dev->intf_state_mutex); 1245 return err; 1246} 1247 1248void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 1249 unsigned long param) 1250{ 1251 struct mlx5_priv *priv = &dev->priv; 1252 struct mlx5_device_context *dev_ctx; 1253 unsigned long flags; 1254 1255 spin_lock_irqsave(&priv->ctx_lock, flags); 1256 1257 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 1258 if (dev_ctx->intf->event) 1259 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 1260 1261 spin_unlock_irqrestore(&priv->ctx_lock, flags); 1262} 1263 1264struct mlx5_core_event_handler { 1265 void (*event)(struct mlx5_core_dev *dev, 1266 enum mlx5_dev_event event, 1267 void *data); 1268}; 1269 1270#define MLX5_STATS_DESC(a, b, c, d, e, ...) d, e, 1271 1272#define MLX5_PORT_MODULE_ERROR_STATS(m) \ 1273m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \ 1274m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \ 1275m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \ 1276m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \ 1277m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \ 1278m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \ 1279m(+1, u64, high_temp, "high_temp", "Module High Temperature") \ 1280m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted") 1281 1282static const char *mlx5_pme_err_desc[] = { 1283 MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC) 1284}; 1285 1286static int init_one(struct pci_dev *pdev, 1287 const struct pci_device_id *id) 1288{ 1289 struct mlx5_core_dev *dev; 1290 struct mlx5_priv *priv; 1291 device_t bsddev = pdev->dev.bsddev; 1292#ifdef PCI_IOV 1293 nvlist_t *pf_schema, *vf_schema; 1294#endif 1295 int i,err; 1296 struct sysctl_oid *pme_sysctl_node; 1297 struct sysctl_oid *pme_err_sysctl_node; 1298 struct sysctl_oid *cap_sysctl_node; 1299 struct sysctl_oid *current_cap_sysctl_node; 1300 struct sysctl_oid *max_cap_sysctl_node; 1301 1302 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1303 priv = &dev->priv; 1304 if (id) 1305 priv->pci_dev_data = id->driver_data; 1306 1307 if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) { 1308 device_printf(bsddev, 1309 "WARN: selected profile out of range, selecting default (%d)\n", 1310 MLX5_DEFAULT_PROF); 1311 mlx5_prof_sel = MLX5_DEFAULT_PROF; 1312 } 1313 dev->profile = &profiles[mlx5_prof_sel]; 1314 dev->pdev = pdev; 1315 dev->event = mlx5_core_event; 1316 1317 /* Set desc */ 1318 device_set_desc(bsddev, mlx5_version); 1319 1320 sysctl_ctx_init(&dev->sysctl_ctx); 1321 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1322 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1323 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, 1324 "Maximum number of MSIX event queue vectors, if set"); 1325 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1326 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1327 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0, 1328 "0:Invalid 1:Sufficient 2:Insufficient"); 1329 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1330 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1331 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0, 1332 "Current power value in Watts"); 1333 1334 pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1335 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1336 OID_AUTO, "pme_stats", CTLFLAG_RD, NULL, 1337 "Port module event statistics"); 1338 if (pme_sysctl_node == NULL) { 1339 err = -ENOMEM; 1340 goto clean_sysctl_ctx; 1341 } 1342 pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1343 SYSCTL_CHILDREN(pme_sysctl_node), 1344 OID_AUTO, "errors", CTLFLAG_RD, NULL, 1345 "Port module event error statistics"); 1346 if (pme_err_sysctl_node == NULL) { 1347 err = -ENOMEM; 1348 goto clean_sysctl_ctx; 1349 } 1350 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1351 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1352 "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1353 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED], 1354 0, "Number of time module plugged"); 1355 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1356 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1357 "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1358 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED], 1359 0, "Number of time module unplugged"); 1360 for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) { 1361 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1362 SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO, 1363 mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE, 1364 &dev->priv.pme_stats.error_counters[i], 1365 0, mlx5_pme_err_desc[2 * i + 1]); 1366 } 1367 1368 cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1369 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1370 OID_AUTO, "caps", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1371 "hardware capabilities raw bitstrings"); 1372 if (cap_sysctl_node == NULL) { 1373 err = -ENOMEM; 1374 goto clean_sysctl_ctx; 1375 } 1376 current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1377 SYSCTL_CHILDREN(cap_sysctl_node), 1378 OID_AUTO, "current", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1379 ""); 1380 if (current_cap_sysctl_node == NULL) { 1381 err = -ENOMEM; 1382 goto clean_sysctl_ctx; 1383 } 1384 max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1385 SYSCTL_CHILDREN(cap_sysctl_node), 1386 OID_AUTO, "max", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1387 ""); 1388 if (max_cap_sysctl_node == NULL) { 1389 err = -ENOMEM; 1390 goto clean_sysctl_ctx; 1391 } 1392 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1393 SYSCTL_CHILDREN(current_cap_sysctl_node), 1394 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1395 &dev->hca_caps_cur[MLX5_CAP_GENERAL], 1396 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1397 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1398 SYSCTL_CHILDREN(max_cap_sysctl_node), 1399 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1400 &dev->hca_caps_max[MLX5_CAP_GENERAL], 1401 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1402 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1403 SYSCTL_CHILDREN(current_cap_sysctl_node), 1404 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1405 &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], 1406 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1407 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1408 SYSCTL_CHILDREN(max_cap_sysctl_node), 1409 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1410 &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], 1411 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1412 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1413 SYSCTL_CHILDREN(current_cap_sysctl_node), 1414 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1415 &dev->hca_caps_cur[MLX5_CAP_ODP], 1416 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1417 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1418 SYSCTL_CHILDREN(max_cap_sysctl_node), 1419 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1420 &dev->hca_caps_max[MLX5_CAP_ODP], 1421 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1422 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1423 SYSCTL_CHILDREN(current_cap_sysctl_node), 1424 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1425 &dev->hca_caps_cur[MLX5_CAP_ATOMIC], 1426 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1427 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1428 SYSCTL_CHILDREN(max_cap_sysctl_node), 1429 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1430 &dev->hca_caps_max[MLX5_CAP_ATOMIC], 1431 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1432 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1433 SYSCTL_CHILDREN(current_cap_sysctl_node), 1434 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1435 &dev->hca_caps_cur[MLX5_CAP_ROCE], 1436 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1437 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1438 SYSCTL_CHILDREN(max_cap_sysctl_node), 1439 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1440 &dev->hca_caps_max[MLX5_CAP_ROCE], 1441 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1442 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1443 SYSCTL_CHILDREN(current_cap_sysctl_node), 1444 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1445 &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS], 1446 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1447 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1448 SYSCTL_CHILDREN(max_cap_sysctl_node), 1449 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1450 &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS], 1451 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1452 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1453 SYSCTL_CHILDREN(current_cap_sysctl_node), 1454 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1455 &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], 1456 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1457 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1458 SYSCTL_CHILDREN(max_cap_sysctl_node), 1459 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1460 &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], 1461 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1462 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1463 SYSCTL_CHILDREN(current_cap_sysctl_node), 1464 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1465 &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], 1466 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1467 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1468 SYSCTL_CHILDREN(max_cap_sysctl_node), 1469 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1470 &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE], 1471 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1472 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1473 SYSCTL_CHILDREN(current_cap_sysctl_node), 1474 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1475 &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], 1476 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1477 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1478 SYSCTL_CHILDREN(max_cap_sysctl_node), 1479 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1480 &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], 1481 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1482 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1483 SYSCTL_CHILDREN(current_cap_sysctl_node), 1484 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1485 &dev->hca_caps_cur[MLX5_CAP_ESWITCH], 1486 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1487 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1488 SYSCTL_CHILDREN(max_cap_sysctl_node), 1489 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1490 &dev->hca_caps_max[MLX5_CAP_ESWITCH], 1491 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1492 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1493 SYSCTL_CHILDREN(current_cap_sysctl_node), 1494 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1495 &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT], 1496 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1497 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1498 SYSCTL_CHILDREN(max_cap_sysctl_node), 1499 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1500 &dev->hca_caps_max[MLX5_CAP_SNAPSHOT], 1501 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1502 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1503 SYSCTL_CHILDREN(current_cap_sysctl_node), 1504 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1505 &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], 1506 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1507 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1508 SYSCTL_CHILDREN(max_cap_sysctl_node), 1509 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1510 &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC], 1511 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1512 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1513 SYSCTL_CHILDREN(current_cap_sysctl_node), 1514 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1515 &dev->hca_caps_cur[MLX5_CAP_QOS], 1516 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1517 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1518 SYSCTL_CHILDREN(max_cap_sysctl_node), 1519 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1520 &dev->hca_caps_max[MLX5_CAP_QOS], 1521 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1522 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1523 SYSCTL_CHILDREN(current_cap_sysctl_node), 1524 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1525 &dev->hca_caps_cur[MLX5_CAP_DEBUG], 1526 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1527 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1528 SYSCTL_CHILDREN(max_cap_sysctl_node), 1529 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1530 &dev->hca_caps_max[MLX5_CAP_DEBUG], 1531 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1532 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1533 SYSCTL_CHILDREN(cap_sysctl_node), 1534 OID_AUTO, "pcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1535 &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", ""); 1536 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1537 SYSCTL_CHILDREN(cap_sysctl_node), 1538 OID_AUTO, "mcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1539 &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", ""); 1540 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1541 SYSCTL_CHILDREN(cap_sysctl_node), 1542 OID_AUTO, "qcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1543 &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", ""); 1544 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1545 SYSCTL_CHILDREN(cap_sysctl_node), 1546 OID_AUTO, "fpga", CTLFLAG_RD | CTLFLAG_MPSAFE, 1547 &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", ""); 1548 1549 INIT_LIST_HEAD(&priv->ctx_list); 1550 spin_lock_init(&priv->ctx_lock); 1551 mutex_init(&dev->pci_status_mutex); 1552 mutex_init(&dev->intf_state_mutex); 1553 mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW); 1554 err = mlx5_pci_init(dev, priv); 1555 if (err) { 1556 mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err); 1557 goto clean_dev; 1558 } 1559 1560 err = mlx5_health_init(dev); 1561 if (err) { 1562 mlx5_core_err(dev, "mlx5_health_init failed %d\n", err); 1563 goto close_pci; 1564 } 1565 1566 mlx5_pagealloc_init(dev); 1567 1568 err = mlx5_load_one(dev, priv, true); 1569 if (err) { 1570 mlx5_core_err(dev, "mlx5_load_one failed %d\n", err); 1571 goto clean_health; 1572 } 1573 1574 mlx5_fwdump_prep(dev); 1575 1576 mlx5_firmware_update(dev); 1577 1578#ifdef PCI_IOV 1579 if (MLX5_CAP_GEN(dev, vport_group_manager)) { 1580 pf_schema = pci_iov_schema_alloc_node(); 1581 vf_schema = pci_iov_schema_alloc_node(); 1582 err = pci_iov_attach(bsddev, pf_schema, vf_schema); 1583 if (err != 0) { 1584 device_printf(bsddev, 1585 "Failed to initialize SR-IOV support, error %d\n", 1586 err); 1587 } 1588 } 1589#endif 1590 1591 pci_save_state(bsddev); 1592 return 0; 1593 1594clean_health: 1595 mlx5_pagealloc_cleanup(dev); 1596 mlx5_health_cleanup(dev); 1597close_pci: 1598 mlx5_pci_close(dev, priv); 1599clean_dev: 1600 mtx_destroy(&dev->dump_lock); 1601clean_sysctl_ctx: 1602 sysctl_ctx_free(&dev->sysctl_ctx); 1603 kfree(dev); 1604 return err; 1605} 1606 1607static void remove_one(struct pci_dev *pdev) 1608{ 1609 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1610 struct mlx5_priv *priv = &dev->priv; 1611 1612 if (mlx5_unload_one(dev, priv, true)) { 1613 mlx5_core_err(dev, "mlx5_unload_one failed\n"); 1614 mlx5_health_cleanup(dev); 1615 return; 1616 } 1617 1618 mlx5_pagealloc_cleanup(dev); 1619 mlx5_health_cleanup(dev); 1620 mlx5_fwdump_clean(dev); 1621 mlx5_pci_close(dev, priv); 1622 mtx_destroy(&dev->dump_lock); 1623 pci_set_drvdata(pdev, NULL); 1624 sysctl_ctx_free(&dev->sysctl_ctx); 1625 kfree(dev); 1626} 1627 1628static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1629 pci_channel_state_t state) 1630{ 1631 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1632 struct mlx5_priv *priv = &dev->priv; 1633 1634 mlx5_core_info(dev, "%s was called\n", __func__); 1635 mlx5_enter_error_state(dev, false); 1636 mlx5_unload_one(dev, priv, false); 1637 1638 if (state) { 1639 mlx5_drain_health_wq(dev); 1640 mlx5_pci_disable_device(dev); 1641 } 1642 1643 return state == pci_channel_io_perm_failure ? 1644 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1645} 1646 1647static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) 1648{ 1649 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1650 int err = 0; 1651 1652 mlx5_core_info(dev,"%s was called\n", __func__); 1653 1654 err = mlx5_pci_enable_device(dev); 1655 if (err) { 1656 mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n" 1657 ,err); 1658 return PCI_ERS_RESULT_DISCONNECT; 1659 } 1660 pci_set_master(pdev); 1661 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); 1662 pci_restore_state(pdev->dev.bsddev); 1663 pci_save_state(pdev->dev.bsddev); 1664 1665 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1666} 1667 1668/* wait for the device to show vital signs. For now we check 1669 * that we can read the device ID and that the health buffer 1670 * shows a non zero value which is different than 0xffffffff 1671 */ 1672static void wait_vital(struct pci_dev *pdev) 1673{ 1674 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1675 struct mlx5_core_health *health = &dev->priv.health; 1676 const int niter = 100; 1677 u32 count; 1678 u16 did; 1679 int i; 1680 1681 /* Wait for firmware to be ready after reset */ 1682 msleep(1000); 1683 for (i = 0; i < niter; i++) { 1684 if (pci_read_config_word(pdev, 2, &did)) { 1685 mlx5_core_warn(dev, "failed reading config word\n"); 1686 break; 1687 } 1688 if (did == pdev->device) { 1689 mlx5_core_info(dev, 1690 "device ID correctly read after %d iterations\n", i); 1691 break; 1692 } 1693 msleep(50); 1694 } 1695 if (i == niter) 1696 mlx5_core_warn(dev, "could not read device ID\n"); 1697 1698 for (i = 0; i < niter; i++) { 1699 count = ioread32be(health->health_counter); 1700 if (count && count != 0xffffffff) { 1701 mlx5_core_info(dev, 1702 "Counter value 0x%x after %d iterations\n", count, i); 1703 break; 1704 } 1705 msleep(50); 1706 } 1707 1708 if (i == niter) 1709 mlx5_core_warn(dev, "could not read device ID\n"); 1710} 1711 1712static void mlx5_pci_resume(struct pci_dev *pdev) 1713{ 1714 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1715 struct mlx5_priv *priv = &dev->priv; 1716 int err; 1717 1718 mlx5_core_info(dev,"%s was called\n", __func__); 1719 1720 wait_vital(pdev); 1721 1722 err = mlx5_load_one(dev, priv, false); 1723 if (err) 1724 mlx5_core_err(dev, 1725 "mlx5_load_one failed with error code: %d\n" ,err); 1726 else 1727 mlx5_core_info(dev,"device recovered\n"); 1728} 1729 1730static const struct pci_error_handlers mlx5_err_handler = { 1731 .error_detected = mlx5_pci_err_detected, 1732 .slot_reset = mlx5_pci_slot_reset, 1733 .resume = mlx5_pci_resume 1734}; 1735 1736#ifdef PCI_IOV 1737static int 1738mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 1739{ 1740 struct pci_dev *pdev; 1741 struct mlx5_core_dev *core_dev; 1742 struct mlx5_priv *priv; 1743 1744 pdev = device_get_softc(dev); 1745 core_dev = pci_get_drvdata(pdev); 1746 priv = &core_dev->priv; 1747 1748 return (0); 1749} 1750 1751static void 1752mlx5_iov_uninit(device_t dev) 1753{ 1754 struct pci_dev *pdev; 1755 struct mlx5_core_dev *core_dev; 1756 struct mlx5_priv *priv; 1757 1758 pdev = device_get_softc(dev); 1759 core_dev = pci_get_drvdata(pdev); 1760 priv = &core_dev->priv; 1761} 1762 1763static int 1764mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 1765{ 1766 struct pci_dev *pdev; 1767 struct mlx5_core_dev *core_dev; 1768 struct mlx5_priv *priv; 1769 int error; 1770 1771 pdev = device_get_softc(dev); 1772 core_dev = pci_get_drvdata(pdev); 1773 priv = &core_dev->priv; 1774 1775 error = -mlx5_core_enable_hca(core_dev, vfnum + 1); 1776 if (error != 0) { 1777 mlx5_core_err(core_dev, "enabling VF %d failed, error %d\n", 1778 vfnum, error); 1779 } 1780 return (error); 1781} 1782#endif 1783 1784static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) 1785{ 1786 bool fast_teardown, force_teardown; 1787 int err; 1788 1789 if (!mlx5_fast_unload_enabled) { 1790 mlx5_core_dbg(dev, "fast unload is disabled by user\n"); 1791 return -EOPNOTSUPP; 1792 } 1793 1794 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); 1795 force_teardown = MLX5_CAP_GEN(dev, force_teardown); 1796 1797 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); 1798 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); 1799 1800 if (!fast_teardown && !force_teardown) 1801 return -EOPNOTSUPP; 1802 1803 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1804 mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); 1805 return -EAGAIN; 1806 } 1807 1808 /* Panic tear down fw command will stop the PCI bus communication 1809 * with the HCA, so the health polll is no longer needed. 1810 */ 1811 mlx5_drain_health_wq(dev); 1812 mlx5_stop_health_poll(dev, false); 1813 1814 err = mlx5_cmd_fast_teardown_hca(dev); 1815 if (!err) 1816 goto done; 1817 1818 err = mlx5_cmd_force_teardown_hca(dev); 1819 if (!err) 1820 goto done; 1821 1822 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); 1823 mlx5_start_health_poll(dev); 1824 return err; 1825done: 1826 mlx5_enter_error_state(dev, true); 1827 return 0; 1828} 1829 1830static void mlx5_shutdown_disable_interrupts(struct mlx5_core_dev *mdev) 1831{ 1832 int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; 1833 int x; 1834 1835 mdev->priv.disable_irqs = 1; 1836 1837 /* wait for all IRQ handlers to finish processing */ 1838 for (x = 0; x != nvec; x++) 1839 synchronize_irq(mdev->priv.msix_arr[x].vector); 1840} 1841 1842static void shutdown_one(struct pci_dev *pdev) 1843{ 1844 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1845 struct mlx5_priv *priv = &dev->priv; 1846 int err; 1847 1848 /* enter polling mode */ 1849 mlx5_cmd_use_polling(dev); 1850 1851 set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state); 1852 1853 /* disable all interrupts */ 1854 mlx5_shutdown_disable_interrupts(dev); 1855 1856 err = mlx5_try_fast_unload(dev); 1857 if (err) 1858 mlx5_unload_one(dev, priv, false); 1859 mlx5_pci_disable_device(dev); 1860} 1861 1862static const struct pci_device_id mlx5_core_pci_table[] = { 1863 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 1864 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 1865 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 1866 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 1867 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 1868 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 1869 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */ 1870 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ 1871 { PCI_VDEVICE(MELLANOX, 4121) }, 1872 { PCI_VDEVICE(MELLANOX, 4122) }, 1873 { PCI_VDEVICE(MELLANOX, 4123) }, 1874 { PCI_VDEVICE(MELLANOX, 4124) }, 1875 { PCI_VDEVICE(MELLANOX, 4125) }, 1876 { PCI_VDEVICE(MELLANOX, 4126) }, 1877 { PCI_VDEVICE(MELLANOX, 4127) }, 1878 { PCI_VDEVICE(MELLANOX, 4128) }, 1879 { PCI_VDEVICE(MELLANOX, 4129) }, 1880 { PCI_VDEVICE(MELLANOX, 4130) }, 1881 { PCI_VDEVICE(MELLANOX, 4131) }, 1882 { PCI_VDEVICE(MELLANOX, 4132) }, 1883 { PCI_VDEVICE(MELLANOX, 4133) }, 1884 { PCI_VDEVICE(MELLANOX, 4134) }, 1885 { PCI_VDEVICE(MELLANOX, 4135) }, 1886 { PCI_VDEVICE(MELLANOX, 4136) }, 1887 { PCI_VDEVICE(MELLANOX, 4137) }, 1888 { PCI_VDEVICE(MELLANOX, 4138) }, 1889 { PCI_VDEVICE(MELLANOX, 4139) }, 1890 { PCI_VDEVICE(MELLANOX, 4140) }, 1891 { PCI_VDEVICE(MELLANOX, 4141) }, 1892 { PCI_VDEVICE(MELLANOX, 4142) }, 1893 { PCI_VDEVICE(MELLANOX, 4143) }, 1894 { PCI_VDEVICE(MELLANOX, 4144) }, 1895 { 0, } 1896}; 1897 1898MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 1899 1900void mlx5_disable_device(struct mlx5_core_dev *dev) 1901{ 1902 mlx5_pci_err_detected(dev->pdev, 0); 1903} 1904 1905void mlx5_recover_device(struct mlx5_core_dev *dev) 1906{ 1907 mlx5_pci_disable_device(dev); 1908 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 1909 mlx5_pci_resume(dev->pdev); 1910} 1911 1912struct pci_driver mlx5_core_driver = { 1913 .name = DRIVER_NAME, 1914 .id_table = mlx5_core_pci_table, 1915 .shutdown = shutdown_one, 1916 .probe = init_one, 1917 .remove = remove_one, 1918 .err_handler = &mlx5_err_handler, 1919#ifdef PCI_IOV 1920 .bsd_iov_init = mlx5_iov_init, 1921 .bsd_iov_uninit = mlx5_iov_uninit, 1922 .bsd_iov_add_vf = mlx5_iov_add_vf, 1923#endif 1924}; 1925 1926static int __init init(void) 1927{ 1928 int err; 1929 1930 err = pci_register_driver(&mlx5_core_driver); 1931 if (err) 1932 goto err_debug; 1933 1934 err = mlx5_ctl_init(); 1935 if (err) 1936 goto err_ctl; 1937 1938 return 0; 1939 1940err_ctl: 1941 pci_unregister_driver(&mlx5_core_driver); 1942 1943err_debug: 1944 return err; 1945} 1946 1947static void __exit cleanup(void) 1948{ 1949 mlx5_ctl_fini(); 1950 pci_unregister_driver(&mlx5_core_driver); 1951} 1952 1953module_init(init); 1954module_exit(cleanup); 1955