mlx5_main.c revision 341934
1/*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c 341934 2018-12-12 12:22:40Z hselasky $ 26 */ 27 28#define LINUXKPI_PARAM_PREFIX mlx5_ 29 30#include <linux/kmod.h> 31#include <linux/module.h> 32#include <linux/errno.h> 33#include <linux/pci.h> 34#include <linux/dma-mapping.h> 35#include <linux/slab.h> 36#include <linux/io-mapping.h> 37#include <linux/interrupt.h> 38#include <dev/mlx5/driver.h> 39#include <dev/mlx5/cq.h> 40#include <dev/mlx5/qp.h> 41#include <dev/mlx5/srq.h> 42#include <linux/delay.h> 43#include <dev/mlx5/mlx5_ifc.h> 44#include "mlx5_core.h" 45#include "fs_core.h" 46 47MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 48MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); 49MODULE_LICENSE("Dual BSD/GPL"); 50#if (__FreeBSD_version >= 1100000) 51MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); 52#endif 53MODULE_VERSION(mlx5, 1); 54 55int mlx5_core_debug_mask; 56module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); 57MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 58 59#define MLX5_DEFAULT_PROF 2 60static int prof_sel = MLX5_DEFAULT_PROF; 61module_param_named(prof_sel, prof_sel, int, 0444); 62MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); 63 64SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW, 0, "mlx5 HW controls"); 65 66#define NUMA_NO_NODE -1 67 68static LIST_HEAD(intf_list); 69static LIST_HEAD(dev_list); 70static DEFINE_MUTEX(intf_mutex); 71 72struct mlx5_device_context { 73 struct list_head list; 74 struct mlx5_interface *intf; 75 void *context; 76}; 77 78enum { 79 MLX5_ATOMIC_REQ_MODE_BE = 0x0, 80 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, 81}; 82 83static struct mlx5_profile profiles[] = { 84 [0] = { 85 .mask = 0, 86 }, 87 [1] = { 88 .mask = MLX5_PROF_MASK_QP_SIZE, 89 .log_max_qp = 12, 90 }, 91 [2] = { 92 .mask = MLX5_PROF_MASK_QP_SIZE | 93 MLX5_PROF_MASK_MR_CACHE, 94 .log_max_qp = 17, 95 .mr_cache[0] = { 96 .size = 500, 97 .limit = 250 98 }, 99 .mr_cache[1] = { 100 .size = 500, 101 .limit = 250 102 }, 103 .mr_cache[2] = { 104 .size = 500, 105 .limit = 250 106 }, 107 .mr_cache[3] = { 108 .size = 500, 109 .limit = 250 110 }, 111 .mr_cache[4] = { 112 .size = 500, 113 .limit = 250 114 }, 115 .mr_cache[5] = { 116 .size = 500, 117 .limit = 250 118 }, 119 .mr_cache[6] = { 120 .size = 500, 121 .limit = 250 122 }, 123 .mr_cache[7] = { 124 .size = 500, 125 .limit = 250 126 }, 127 .mr_cache[8] = { 128 .size = 500, 129 .limit = 250 130 }, 131 .mr_cache[9] = { 132 .size = 500, 133 .limit = 250 134 }, 135 .mr_cache[10] = { 136 .size = 500, 137 .limit = 250 138 }, 139 .mr_cache[11] = { 140 .size = 500, 141 .limit = 250 142 }, 143 .mr_cache[12] = { 144 .size = 64, 145 .limit = 32 146 }, 147 .mr_cache[13] = { 148 .size = 32, 149 .limit = 16 150 }, 151 .mr_cache[14] = { 152 .size = 16, 153 .limit = 8 154 }, 155 }, 156 [3] = { 157 .mask = MLX5_PROF_MASK_QP_SIZE, 158 .log_max_qp = 17, 159 }, 160}; 161 162static int set_dma_caps(struct pci_dev *pdev) 163{ 164 int err; 165 166 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 167 if (err) { 168 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n"); 169 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 170 if (err) { 171 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n"); 172 return err; 173 } 174 } 175 176 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 177 if (err) { 178 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 179 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 180 if (err) { 181 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n"); 182 return err; 183 } 184 } 185 186 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); 187 return err; 188} 189 190static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) 191{ 192 struct pci_dev *pdev = dev->pdev; 193 int err = 0; 194 195 mutex_lock(&dev->pci_status_mutex); 196 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { 197 err = pci_enable_device(pdev); 198 if (!err) 199 dev->pci_status = MLX5_PCI_STATUS_ENABLED; 200 } 201 mutex_unlock(&dev->pci_status_mutex); 202 203 return err; 204} 205 206static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) 207{ 208 struct pci_dev *pdev = dev->pdev; 209 210 mutex_lock(&dev->pci_status_mutex); 211 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { 212 pci_disable_device(pdev); 213 dev->pci_status = MLX5_PCI_STATUS_DISABLED; 214 } 215 mutex_unlock(&dev->pci_status_mutex); 216} 217 218static int request_bar(struct pci_dev *pdev) 219{ 220 int err = 0; 221 222 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 223 device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n"); 224 return -ENODEV; 225 } 226 227 err = pci_request_regions(pdev, DRIVER_NAME); 228 if (err) 229 device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n"); 230 231 return err; 232} 233 234static void release_bar(struct pci_dev *pdev) 235{ 236 pci_release_regions(pdev); 237} 238 239static int mlx5_enable_msix(struct mlx5_core_dev *dev) 240{ 241 struct mlx5_priv *priv = &dev->priv; 242 struct mlx5_eq_table *table = &priv->eq_table; 243 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 244 int limit = dev->msix_eqvec; 245 int nvec = MLX5_EQ_VEC_COMP_BASE; 246 int i; 247 248 if (limit > 0) 249 nvec += limit; 250 else 251 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); 252 253 nvec = min_t(int, nvec, num_eqs); 254 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 255 return -ENOMEM; 256 257 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); 258 259 priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL); 260 261 for (i = 0; i < nvec; i++) 262 priv->msix_arr[i].entry = i; 263 264 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, 265 MLX5_EQ_VEC_COMP_BASE + 1, nvec); 266 if (nvec < 0) 267 return nvec; 268 269 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 270 271 return 0; 272 273} 274 275static void mlx5_disable_msix(struct mlx5_core_dev *dev) 276{ 277 struct mlx5_priv *priv = &dev->priv; 278 279 pci_disable_msix(dev->pdev); 280 kfree(priv->irq_info); 281 kfree(priv->msix_arr); 282} 283 284struct mlx5_reg_host_endianess { 285 u8 he; 286 u8 rsvd[15]; 287}; 288 289 290#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 291 292enum { 293 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 294 MLX5_DEV_CAP_FLAG_DCT | 295 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, 296}; 297 298static u16 to_fw_pkey_sz(u32 size) 299{ 300 switch (size) { 301 case 128: 302 return 0; 303 case 256: 304 return 1; 305 case 512: 306 return 2; 307 case 1024: 308 return 3; 309 case 2048: 310 return 4; 311 case 4096: 312 return 5; 313 default: 314 printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size); 315 return 0; 316 } 317} 318 319static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, 320 enum mlx5_cap_type cap_type, 321 enum mlx5_cap_mode cap_mode) 322{ 323 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 324 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 325 void *out, *hca_caps; 326 u16 opmod = (cap_type << 1) | (cap_mode & 0x01); 327 int err; 328 329 memset(in, 0, sizeof(in)); 330 out = kzalloc(out_sz, GFP_KERNEL); 331 332 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 333 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 334 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 335 if (err) { 336 mlx5_core_warn(dev, 337 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", 338 cap_type, cap_mode, err); 339 goto query_ex; 340 } 341 342 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 343 344 switch (cap_mode) { 345 case HCA_CAP_OPMOD_GET_MAX: 346 memcpy(dev->hca_caps_max[cap_type], hca_caps, 347 MLX5_UN_SZ_BYTES(hca_cap_union)); 348 break; 349 case HCA_CAP_OPMOD_GET_CUR: 350 memcpy(dev->hca_caps_cur[cap_type], hca_caps, 351 MLX5_UN_SZ_BYTES(hca_cap_union)); 352 break; 353 default: 354 mlx5_core_warn(dev, 355 "Tried to query dev cap type(%x) with wrong opmode(%x)\n", 356 cap_type, cap_mode); 357 err = -EINVAL; 358 break; 359 } 360query_ex: 361 kfree(out); 362 return err; 363} 364 365int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) 366{ 367 int ret; 368 369 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); 370 if (ret) 371 return ret; 372 373 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); 374} 375 376static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) 377{ 378 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; 379 380 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); 381 382 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 383} 384 385static int handle_hca_cap(struct mlx5_core_dev *dev) 386{ 387 void *set_ctx = NULL; 388 struct mlx5_profile *prof = dev->profile; 389 int err = -ENOMEM; 390 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 391 void *set_hca_cap; 392 393 set_ctx = kzalloc(set_sz, GFP_KERNEL); 394 395 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 396 if (err) 397 goto query_ex; 398 399 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 400 capability); 401 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], 402 MLX5_ST_SZ_BYTES(cmd_hca_cap)); 403 404 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", 405 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 406 128); 407 /* we limit the size of the pkey table to 128 entries for now */ 408 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 409 to_fw_pkey_sz(128)); 410 411 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 412 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 413 prof->log_max_qp); 414 415 /* disable cmdif checksum */ 416 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 417 418 /* enable drain sigerr */ 419 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); 420 421 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 422 423 err = set_caps(dev, set_ctx, set_sz); 424 425query_ex: 426 kfree(set_ctx); 427 return err; 428} 429 430static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) 431{ 432 void *set_ctx; 433 void *set_hca_cap; 434 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 435 int req_endianness; 436 int err; 437 438 if (MLX5_CAP_GEN(dev, atomic)) { 439 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 440 if (err) 441 return err; 442 } else { 443 return 0; 444 } 445 446 req_endianness = 447 MLX5_CAP_ATOMIC(dev, 448 supported_atomic_req_8B_endianess_mode_1); 449 450 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) 451 return 0; 452 453 set_ctx = kzalloc(set_sz, GFP_KERNEL); 454 if (!set_ctx) 455 return -ENOMEM; 456 457 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, 458 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); 459 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 460 461 /* Set requestor to host endianness */ 462 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, 463 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); 464 465 err = set_caps(dev, set_ctx, set_sz); 466 467 kfree(set_ctx); 468 return err; 469} 470 471static int set_hca_ctrl(struct mlx5_core_dev *dev) 472{ 473 struct mlx5_reg_host_endianess he_in; 474 struct mlx5_reg_host_endianess he_out; 475 int err; 476 477 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && 478 !MLX5_CAP_GEN(dev, roce)) 479 return 0; 480 481 memset(&he_in, 0, sizeof(he_in)); 482 he_in.he = MLX5_SET_HOST_ENDIANNESS; 483 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), 484 &he_out, sizeof(he_out), 485 MLX5_REG_HOST_ENDIANNESS, 0, 1); 486 return err; 487} 488 489static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) 490{ 491 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; 492 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; 493 494 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 495 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 496} 497 498static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 499{ 500 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; 501 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; 502 503 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 504 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 505} 506 507static int mlx5_core_set_issi(struct mlx5_core_dev *dev) 508{ 509 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; 510 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; 511 u32 sup_issi; 512 int err; 513 514 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); 515 516 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); 517 if (err) { 518 u32 syndrome; 519 u8 status; 520 521 mlx5_cmd_mbox_status(query_out, &status, &syndrome); 522 if (status == MLX5_CMD_STAT_BAD_OP_ERR) { 523 pr_debug("Only ISSI 0 is supported\n"); 524 return 0; 525 } 526 527 printf("mlx5_core: ERR: ""failed to query ISSI\n"); 528 return err; 529 } 530 531 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); 532 533 if (sup_issi & (1 << 1)) { 534 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; 535 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; 536 537 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); 538 MLX5_SET(set_issi_in, set_in, current_issi, 1); 539 540 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); 541 if (err) { 542 printf("mlx5_core: ERR: ""failed to set ISSI=1 err(%d)\n", err); 543 return err; 544 } 545 546 dev->issi = 1; 547 548 return 0; 549 } else if (sup_issi & (1 << 0)) { 550 return 0; 551 } 552 553 return -ENOTSUPP; 554} 555 556 557int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) 558{ 559 struct mlx5_eq_table *table = &dev->priv.eq_table; 560 struct mlx5_eq *eq; 561 int err = -ENOENT; 562 563 spin_lock(&table->lock); 564 list_for_each_entry(eq, &table->comp_eqs_list, list) { 565 if (eq->index == vector) { 566 *eqn = eq->eqn; 567 *irqn = eq->irqn; 568 err = 0; 569 break; 570 } 571 } 572 spin_unlock(&table->lock); 573 574 return err; 575} 576EXPORT_SYMBOL(mlx5_vector2eqn); 577 578int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name) 579{ 580 struct mlx5_priv *priv = &dev->priv; 581 struct mlx5_eq_table *table = &priv->eq_table; 582 struct mlx5_eq *eq; 583 int err = -ENOENT; 584 585 spin_lock(&table->lock); 586 list_for_each_entry(eq, &table->comp_eqs_list, list) { 587 if (eq->index == eq_ix) { 588 int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE; 589 590 snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME, 591 "%s-%d", name, eq_ix); 592 593 err = 0; 594 break; 595 } 596 } 597 spin_unlock(&table->lock); 598 599 return err; 600} 601 602static void free_comp_eqs(struct mlx5_core_dev *dev) 603{ 604 struct mlx5_eq_table *table = &dev->priv.eq_table; 605 struct mlx5_eq *eq, *n; 606 607 spin_lock(&table->lock); 608 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { 609 list_del(&eq->list); 610 spin_unlock(&table->lock); 611 if (mlx5_destroy_unmap_eq(dev, eq)) 612 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", 613 eq->eqn); 614 kfree(eq); 615 spin_lock(&table->lock); 616 } 617 spin_unlock(&table->lock); 618} 619 620static int alloc_comp_eqs(struct mlx5_core_dev *dev) 621{ 622 struct mlx5_eq_table *table = &dev->priv.eq_table; 623 char name[MLX5_MAX_IRQ_NAME]; 624 struct mlx5_eq *eq; 625 int ncomp_vec; 626 int nent; 627 int err; 628 int i; 629 630 INIT_LIST_HEAD(&table->comp_eqs_list); 631 ncomp_vec = table->num_comp_vectors; 632 nent = MLX5_COMP_EQ_SIZE; 633 for (i = 0; i < ncomp_vec; i++) { 634 eq = kzalloc(sizeof(*eq), GFP_KERNEL); 635 636 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); 637 err = mlx5_create_map_eq(dev, eq, 638 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 639 name, &dev->priv.uuari.uars[0]); 640 if (err) { 641 kfree(eq); 642 goto clean; 643 } 644 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); 645 eq->index = i; 646 spin_lock(&table->lock); 647 list_add_tail(&eq->list, &table->comp_eqs_list); 648 spin_unlock(&table->lock); 649 } 650 651 return 0; 652 653clean: 654 free_comp_eqs(dev); 655 return err; 656} 657 658static int map_bf_area(struct mlx5_core_dev *dev) 659{ 660 resource_size_t bf_start = pci_resource_start(dev->pdev, 0); 661 resource_size_t bf_len = pci_resource_len(dev->pdev, 0); 662 663 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); 664 665 return dev->priv.bf_mapping ? 0 : -ENOMEM; 666} 667 668static void unmap_bf_area(struct mlx5_core_dev *dev) 669{ 670 if (dev->priv.bf_mapping) 671 io_mapping_free(dev->priv.bf_mapping); 672} 673 674static inline int fw_initializing(struct mlx5_core_dev *dev) 675{ 676 return ioread32be(&dev->iseg->initializing) >> 31; 677} 678 679static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) 680{ 681 u64 end = jiffies + msecs_to_jiffies(max_wait_mili); 682 int err = 0; 683 684 while (fw_initializing(dev)) { 685 if (time_after(jiffies, end)) { 686 err = -EBUSY; 687 break; 688 } 689 msleep(FW_INIT_WAIT_MS); 690 } 691 692 return err; 693} 694 695static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 696{ 697 struct mlx5_device_context *dev_ctx; 698 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 699 700 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); 701 if (!dev_ctx) 702 return; 703 704 dev_ctx->intf = intf; 705 CURVNET_SET_QUIET(vnet0); 706 dev_ctx->context = intf->add(dev); 707 CURVNET_RESTORE(); 708 709 if (dev_ctx->context) { 710 spin_lock_irq(&priv->ctx_lock); 711 list_add_tail(&dev_ctx->list, &priv->ctx_list); 712 spin_unlock_irq(&priv->ctx_lock); 713 } else { 714 kfree(dev_ctx); 715 } 716} 717 718static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 719{ 720 struct mlx5_device_context *dev_ctx; 721 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 722 723 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 724 if (dev_ctx->intf == intf) { 725 spin_lock_irq(&priv->ctx_lock); 726 list_del(&dev_ctx->list); 727 spin_unlock_irq(&priv->ctx_lock); 728 729 intf->remove(dev, dev_ctx->context); 730 kfree(dev_ctx); 731 return; 732 } 733} 734 735static int mlx5_register_device(struct mlx5_core_dev *dev) 736{ 737 struct mlx5_priv *priv = &dev->priv; 738 struct mlx5_interface *intf; 739 740 mutex_lock(&intf_mutex); 741 list_add_tail(&priv->dev_list, &dev_list); 742 list_for_each_entry(intf, &intf_list, list) 743 mlx5_add_device(intf, priv); 744 mutex_unlock(&intf_mutex); 745 746 return 0; 747} 748 749static void mlx5_unregister_device(struct mlx5_core_dev *dev) 750{ 751 struct mlx5_priv *priv = &dev->priv; 752 struct mlx5_interface *intf; 753 754 mutex_lock(&intf_mutex); 755 list_for_each_entry(intf, &intf_list, list) 756 mlx5_remove_device(intf, priv); 757 list_del(&priv->dev_list); 758 mutex_unlock(&intf_mutex); 759} 760 761int mlx5_register_interface(struct mlx5_interface *intf) 762{ 763 struct mlx5_priv *priv; 764 765 if (!intf->add || !intf->remove) 766 return -EINVAL; 767 768 mutex_lock(&intf_mutex); 769 list_add_tail(&intf->list, &intf_list); 770 list_for_each_entry(priv, &dev_list, dev_list) 771 mlx5_add_device(intf, priv); 772 mutex_unlock(&intf_mutex); 773 774 return 0; 775} 776EXPORT_SYMBOL(mlx5_register_interface); 777 778void mlx5_unregister_interface(struct mlx5_interface *intf) 779{ 780 struct mlx5_priv *priv; 781 782 mutex_lock(&intf_mutex); 783 list_for_each_entry(priv, &dev_list, dev_list) 784 mlx5_remove_device(intf, priv); 785 list_del(&intf->list); 786 mutex_unlock(&intf_mutex); 787} 788EXPORT_SYMBOL(mlx5_unregister_interface); 789 790void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) 791{ 792 struct mlx5_priv *priv = &mdev->priv; 793 struct mlx5_device_context *dev_ctx; 794 unsigned long flags; 795 void *result = NULL; 796 797 spin_lock_irqsave(&priv->ctx_lock, flags); 798 799 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) 800 if ((dev_ctx->intf->protocol == protocol) && 801 dev_ctx->intf->get_dev) { 802 result = dev_ctx->intf->get_dev(dev_ctx->context); 803 break; 804 } 805 806 spin_unlock_irqrestore(&priv->ctx_lock, flags); 807 808 return result; 809} 810EXPORT_SYMBOL(mlx5_get_protocol_dev); 811 812static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 813{ 814 struct pci_dev *pdev = dev->pdev; 815 int err = 0; 816 817 pci_set_drvdata(dev->pdev, dev); 818 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); 819 priv->name[MLX5_MAX_NAME_LEN - 1] = 0; 820 821 mutex_init(&priv->pgdir_mutex); 822 INIT_LIST_HEAD(&priv->pgdir_list); 823 spin_lock_init(&priv->mkey_lock); 824 825 priv->numa_node = NUMA_NO_NODE; 826 827 err = mlx5_pci_enable_device(dev); 828 if (err) { 829 device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n"); 830 goto err_dbg; 831 } 832 833 err = request_bar(pdev); 834 if (err) { 835 device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n"); 836 goto err_disable; 837 } 838 839 pci_set_master(pdev); 840 841 err = set_dma_caps(pdev); 842 if (err) { 843 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n"); 844 goto err_clr_master; 845 } 846 847 dev->iseg_base = pci_resource_start(dev->pdev, 0); 848 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); 849 if (!dev->iseg) { 850 err = -ENOMEM; 851 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n"); 852 goto err_clr_master; 853 } 854 855 return 0; 856 857err_clr_master: 858 pci_clear_master(dev->pdev); 859 release_bar(dev->pdev); 860err_disable: 861 mlx5_pci_disable_device(dev); 862err_dbg: 863 return err; 864} 865 866static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 867{ 868 iounmap(dev->iseg); 869 pci_clear_master(dev->pdev); 870 release_bar(dev->pdev); 871 mlx5_pci_disable_device(dev); 872} 873 874static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 875{ 876 struct pci_dev *pdev = dev->pdev; 877 int err; 878 879 err = mlx5_vsc_find_cap(dev); 880 if (err) 881 dev_err(&pdev->dev, "Unable to find vendor specific capabilities\n"); 882 883 err = mlx5_query_hca_caps(dev); 884 if (err) { 885 dev_err(&pdev->dev, "query hca failed\n"); 886 goto out; 887 } 888 889 err = mlx5_query_board_id(dev); 890 if (err) { 891 dev_err(&pdev->dev, "query board id failed\n"); 892 goto out; 893 } 894 895 err = mlx5_eq_init(dev); 896 if (err) { 897 dev_err(&pdev->dev, "failed to initialize eq\n"); 898 goto out; 899 } 900 901 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 902 903 err = mlx5_init_cq_table(dev); 904 if (err) { 905 dev_err(&pdev->dev, "failed to initialize cq table\n"); 906 goto err_eq_cleanup; 907 } 908 909 mlx5_init_qp_table(dev); 910 mlx5_init_srq_table(dev); 911 mlx5_init_mr_table(dev); 912 913 return 0; 914 915err_eq_cleanup: 916 mlx5_eq_cleanup(dev); 917 918out: 919 return err; 920} 921 922static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 923{ 924 mlx5_cleanup_mr_table(dev); 925 mlx5_cleanup_srq_table(dev); 926 mlx5_cleanup_qp_table(dev); 927 mlx5_cleanup_cq_table(dev); 928 mlx5_eq_cleanup(dev); 929} 930 931static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 932 bool boot) 933{ 934 struct pci_dev *pdev = dev->pdev; 935 int err; 936 937 mutex_lock(&dev->intf_state_mutex); 938 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 939 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", 940 __func__); 941 goto out; 942 } 943 944 device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); 945 946 /* 947 * On load removing any previous indication of internal error, 948 * device is up 949 */ 950 dev->state = MLX5_DEVICE_STATE_UP; 951 952 err = mlx5_cmd_init(dev); 953 if (err) { 954 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n"); 955 goto out_err; 956 } 957 958 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI); 959 if (err) { 960 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI); 961 goto err_cmd_cleanup; 962 } 963 964 err = mlx5_core_enable_hca(dev); 965 if (err) { 966 device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n"); 967 goto err_cmd_cleanup; 968 } 969 970 err = mlx5_core_set_issi(dev); 971 if (err) { 972 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n"); 973 goto err_disable_hca; 974 } 975 976 err = mlx5_pagealloc_start(dev); 977 if (err) { 978 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n"); 979 goto err_disable_hca; 980 } 981 982 err = mlx5_satisfy_startup_pages(dev, 1); 983 if (err) { 984 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n"); 985 goto err_pagealloc_stop; 986 } 987 988 err = set_hca_ctrl(dev); 989 if (err) { 990 device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n"); 991 goto reclaim_boot_pages; 992 } 993 994 err = handle_hca_cap(dev); 995 if (err) { 996 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n"); 997 goto reclaim_boot_pages; 998 } 999 1000 err = handle_hca_cap_atomic(dev); 1001 if (err) { 1002 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap_atomic failed\n"); 1003 goto reclaim_boot_pages; 1004 } 1005 1006 err = mlx5_satisfy_startup_pages(dev, 0); 1007 if (err) { 1008 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n"); 1009 goto reclaim_boot_pages; 1010 } 1011 1012 err = mlx5_cmd_init_hca(dev); 1013 if (err) { 1014 device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n"); 1015 goto reclaim_boot_pages; 1016 } 1017 1018 mlx5_start_health_poll(dev); 1019 1020 if (boot && mlx5_init_once(dev, priv)) { 1021 dev_err(&pdev->dev, "sw objs init failed\n"); 1022 goto err_stop_poll; 1023 } 1024 1025 err = mlx5_enable_msix(dev); 1026 if (err) { 1027 device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n"); 1028 goto err_cleanup_once; 1029 } 1030 1031 err = mlx5_alloc_uuars(dev, &priv->uuari); 1032 if (err) { 1033 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n"); 1034 goto err_disable_msix; 1035 } 1036 1037 err = mlx5_start_eqs(dev); 1038 if (err) { 1039 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n"); 1040 goto err_free_uar; 1041 } 1042 1043 err = alloc_comp_eqs(dev); 1044 if (err) { 1045 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n"); 1046 goto err_stop_eqs; 1047 } 1048 1049 if (map_bf_area(dev)) 1050 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n"); 1051 1052 err = mlx5_init_fs(dev); 1053 if (err) { 1054 mlx5_core_err(dev, "flow steering init %d\n", err); 1055 goto err_free_comp_eqs; 1056 } 1057 1058 err = mlx5_register_device(dev); 1059 if (err) { 1060 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); 1061 goto err_fs; 1062 } 1063 1064 clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); 1065 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1066 1067out: 1068 mutex_unlock(&dev->intf_state_mutex); 1069 return 0; 1070 1071err_fs: 1072 mlx5_cleanup_fs(dev); 1073 1074err_free_comp_eqs: 1075 free_comp_eqs(dev); 1076 unmap_bf_area(dev); 1077 1078err_stop_eqs: 1079 mlx5_stop_eqs(dev); 1080 1081err_free_uar: 1082 mlx5_free_uuars(dev, &priv->uuari); 1083 1084err_disable_msix: 1085 mlx5_disable_msix(dev); 1086 1087err_cleanup_once: 1088 if (boot) 1089 mlx5_cleanup_once(dev); 1090 1091err_stop_poll: 1092 mlx5_stop_health_poll(dev, boot); 1093 if (mlx5_cmd_teardown_hca(dev)) { 1094 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n"); 1095 goto out_err; 1096 } 1097 1098reclaim_boot_pages: 1099 mlx5_reclaim_startup_pages(dev); 1100 1101err_pagealloc_stop: 1102 mlx5_pagealloc_stop(dev); 1103 1104err_disable_hca: 1105 mlx5_core_disable_hca(dev); 1106 1107err_cmd_cleanup: 1108 mlx5_cmd_cleanup(dev); 1109 1110out_err: 1111 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1112 mutex_unlock(&dev->intf_state_mutex); 1113 1114 return err; 1115} 1116 1117static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1118 bool cleanup) 1119{ 1120 int err = 0; 1121 1122 if (cleanup) 1123 mlx5_drain_health_recovery(dev); 1124 1125 mutex_lock(&dev->intf_state_mutex); 1126 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { 1127 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__); 1128 if (cleanup) 1129 mlx5_cleanup_once(dev); 1130 goto out; 1131 } 1132 1133 mlx5_unregister_device(dev); 1134 1135 mlx5_cleanup_fs(dev); 1136 unmap_bf_area(dev); 1137 mlx5_wait_for_reclaim_vfs_pages(dev); 1138 free_comp_eqs(dev); 1139 mlx5_stop_eqs(dev); 1140 mlx5_free_uuars(dev, &priv->uuari); 1141 mlx5_disable_msix(dev); 1142 if (cleanup) 1143 mlx5_cleanup_once(dev); 1144 mlx5_stop_health_poll(dev, cleanup); 1145 err = mlx5_cmd_teardown_hca(dev); 1146 if (err) { 1147 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n"); 1148 goto out; 1149 } 1150 mlx5_pagealloc_stop(dev); 1151 mlx5_reclaim_startup_pages(dev); 1152 mlx5_core_disable_hca(dev); 1153 mlx5_cmd_cleanup(dev); 1154 1155out: 1156 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1157 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); 1158 mutex_unlock(&dev->intf_state_mutex); 1159 return err; 1160} 1161 1162void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 1163 unsigned long param) 1164{ 1165 struct mlx5_priv *priv = &dev->priv; 1166 struct mlx5_device_context *dev_ctx; 1167 unsigned long flags; 1168 1169 spin_lock_irqsave(&priv->ctx_lock, flags); 1170 1171 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 1172 if (dev_ctx->intf->event) 1173 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 1174 1175 spin_unlock_irqrestore(&priv->ctx_lock, flags); 1176} 1177 1178struct mlx5_core_event_handler { 1179 void (*event)(struct mlx5_core_dev *dev, 1180 enum mlx5_dev_event event, 1181 void *data); 1182}; 1183 1184static int init_one(struct pci_dev *pdev, 1185 const struct pci_device_id *id) 1186{ 1187 struct mlx5_core_dev *dev; 1188 struct mlx5_priv *priv; 1189 device_t bsddev = pdev->dev.bsddev; 1190 int err; 1191 1192 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1193 priv = &dev->priv; 1194 if (id) 1195 priv->pci_dev_data = id->driver_data; 1196 1197 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) { 1198 device_printf(bsddev, "WARN: selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF); 1199 prof_sel = MLX5_DEFAULT_PROF; 1200 } 1201 dev->profile = &profiles[prof_sel]; 1202 dev->pdev = pdev; 1203 dev->event = mlx5_core_event; 1204 1205 sysctl_ctx_init(&dev->sysctl_ctx); 1206 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1207 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1208 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, 1209 "Maximum number of MSIX event queue vectors, if set"); 1210 1211 INIT_LIST_HEAD(&priv->ctx_list); 1212 spin_lock_init(&priv->ctx_lock); 1213 mutex_init(&dev->pci_status_mutex); 1214 mutex_init(&dev->intf_state_mutex); 1215 err = mlx5_pci_init(dev, priv); 1216 if (err) { 1217 device_printf(bsddev, "ERR: mlx5_pci_init failed %d\n", err); 1218 goto clean_dev; 1219 } 1220 1221 err = mlx5_health_init(dev); 1222 if (err) { 1223 device_printf(bsddev, "ERR: mlx5_health_init failed %d\n", err); 1224 goto close_pci; 1225 } 1226 1227 mlx5_pagealloc_init(dev); 1228 1229 err = mlx5_load_one(dev, priv, true); 1230 if (err) { 1231 device_printf(bsddev, "ERR: mlx5_load_one failed %d\n", err); 1232 goto clean_health; 1233 } 1234 1235 mlx5_fwdump_prep(dev); 1236 1237 pci_save_state(bsddev); 1238 return 0; 1239 1240clean_health: 1241 mlx5_pagealloc_cleanup(dev); 1242 mlx5_health_cleanup(dev); 1243close_pci: 1244 mlx5_pci_close(dev, priv); 1245clean_dev: 1246 sysctl_ctx_free(&dev->sysctl_ctx); 1247 kfree(dev); 1248 return err; 1249} 1250 1251static void remove_one(struct pci_dev *pdev) 1252{ 1253 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1254 struct mlx5_priv *priv = &dev->priv; 1255 1256 if (mlx5_unload_one(dev, priv, true)) { 1257 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); 1258 mlx5_health_cleanup(dev); 1259 return; 1260 } 1261 1262 mlx5_fwdump_clean(dev); 1263 mlx5_pagealloc_cleanup(dev); 1264 mlx5_health_cleanup(dev); 1265 mlx5_pci_close(dev, priv); 1266 pci_set_drvdata(pdev, NULL); 1267 sysctl_ctx_free(&dev->sysctl_ctx); 1268 kfree(dev); 1269} 1270 1271static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1272 pci_channel_state_t state) 1273{ 1274 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1275 struct mlx5_priv *priv = &dev->priv; 1276 1277 dev_info(&pdev->dev, "%s was called\n", __func__); 1278 mlx5_enter_error_state(dev, false); 1279 mlx5_unload_one(dev, priv, false); 1280 1281 if (state) { 1282 mlx5_drain_health_wq(dev); 1283 mlx5_pci_disable_device(dev); 1284 } 1285 1286 return state == pci_channel_io_perm_failure ? 1287 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1288} 1289 1290static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) 1291{ 1292 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1293 int err = 0; 1294 1295 dev_info(&pdev->dev, "%s was called\n", __func__); 1296 1297 err = mlx5_pci_enable_device(dev); 1298 if (err) { 1299 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" 1300 , __func__, err); 1301 return PCI_ERS_RESULT_DISCONNECT; 1302 } 1303 pci_set_master(pdev); 1304 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); 1305 pci_restore_state(pdev->dev.bsddev); 1306 pci_save_state(pdev->dev.bsddev); 1307 1308 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1309} 1310 1311/* wait for the device to show vital signs. For now we check 1312 * that we can read the device ID and that the health buffer 1313 * shows a non zero value which is different than 0xffffffff 1314 */ 1315static void wait_vital(struct pci_dev *pdev) 1316{ 1317 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1318 struct mlx5_core_health *health = &dev->priv.health; 1319 const int niter = 100; 1320 u32 count; 1321 u16 did; 1322 int i; 1323 1324 /* Wait for firmware to be ready after reset */ 1325 msleep(1000); 1326 for (i = 0; i < niter; i++) { 1327 if (pci_read_config_word(pdev, 2, &did)) { 1328 dev_warn(&pdev->dev, "failed reading config word\n"); 1329 break; 1330 } 1331 if (did == pdev->device) { 1332 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i); 1333 break; 1334 } 1335 msleep(50); 1336 } 1337 if (i == niter) 1338 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); 1339 1340 for (i = 0; i < niter; i++) { 1341 count = ioread32be(health->health_counter); 1342 if (count && count != 0xffffffff) { 1343 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); 1344 break; 1345 } 1346 msleep(50); 1347 } 1348 1349 if (i == niter) 1350 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); 1351} 1352 1353static void mlx5_pci_resume(struct pci_dev *pdev) 1354{ 1355 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1356 struct mlx5_priv *priv = &dev->priv; 1357 int err; 1358 1359 dev_info(&pdev->dev, "%s was called\n", __func__); 1360 1361 wait_vital(pdev); 1362 1363 err = mlx5_load_one(dev, priv, false); 1364 if (err) 1365 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" 1366 , __func__, err); 1367 else 1368 dev_info(&pdev->dev, "%s: device recovered\n", __func__); 1369} 1370 1371static const struct pci_error_handlers mlx5_err_handler = { 1372 .error_detected = mlx5_pci_err_detected, 1373 .slot_reset = mlx5_pci_slot_reset, 1374 .resume = mlx5_pci_resume 1375}; 1376 1377static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) 1378{ 1379 int err; 1380 1381 if (!MLX5_CAP_GEN(dev, force_teardown)) { 1382 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); 1383 return -EOPNOTSUPP; 1384 } 1385 1386 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1387 mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); 1388 return -EAGAIN; 1389 } 1390 1391 /* Panic tear down fw command will stop the PCI bus communication 1392 * with the HCA, so the health polll is no longer needed. 1393 */ 1394 mlx5_drain_health_wq(dev); 1395 mlx5_stop_health_poll(dev, false); 1396 1397 err = mlx5_cmd_force_teardown_hca(dev); 1398 if (err) { 1399 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); 1400 return err; 1401 } 1402 1403 mlx5_enter_error_state(dev, true); 1404 1405 return 0; 1406} 1407 1408static void shutdown_one(struct pci_dev *pdev) 1409{ 1410 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1411 struct mlx5_priv *priv = &dev->priv; 1412 int err; 1413 1414 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); 1415 err = mlx5_try_fast_unload(dev); 1416 if (err) 1417 mlx5_unload_one(dev, priv, false); 1418 mlx5_pci_disable_device(dev); 1419} 1420 1421static const struct pci_device_id mlx5_core_pci_table[] = { 1422 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 1423 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 1424 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 1425 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 1426 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 1427 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 1428 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */ 1429 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ 1430 { PCI_VDEVICE(MELLANOX, 4121) }, 1431 { PCI_VDEVICE(MELLANOX, 4122) }, 1432 { PCI_VDEVICE(MELLANOX, 4123) }, 1433 { PCI_VDEVICE(MELLANOX, 4124) }, 1434 { PCI_VDEVICE(MELLANOX, 4125) }, 1435 { PCI_VDEVICE(MELLANOX, 4126) }, 1436 { PCI_VDEVICE(MELLANOX, 4127) }, 1437 { PCI_VDEVICE(MELLANOX, 4128) }, 1438 { PCI_VDEVICE(MELLANOX, 4129) }, 1439 { PCI_VDEVICE(MELLANOX, 4130) }, 1440 { PCI_VDEVICE(MELLANOX, 4131) }, 1441 { PCI_VDEVICE(MELLANOX, 4132) }, 1442 { PCI_VDEVICE(MELLANOX, 4133) }, 1443 { PCI_VDEVICE(MELLANOX, 4134) }, 1444 { PCI_VDEVICE(MELLANOX, 4135) }, 1445 { PCI_VDEVICE(MELLANOX, 4136) }, 1446 { PCI_VDEVICE(MELLANOX, 4137) }, 1447 { PCI_VDEVICE(MELLANOX, 4138) }, 1448 { PCI_VDEVICE(MELLANOX, 4139) }, 1449 { PCI_VDEVICE(MELLANOX, 4140) }, 1450 { PCI_VDEVICE(MELLANOX, 4141) }, 1451 { PCI_VDEVICE(MELLANOX, 4142) }, 1452 { PCI_VDEVICE(MELLANOX, 4143) }, 1453 { PCI_VDEVICE(MELLANOX, 4144) }, 1454 { 0, } 1455}; 1456 1457MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 1458 1459void mlx5_disable_device(struct mlx5_core_dev *dev) 1460{ 1461 mlx5_pci_err_detected(dev->pdev, 0); 1462} 1463 1464void mlx5_recover_device(struct mlx5_core_dev *dev) 1465{ 1466 mlx5_pci_disable_device(dev); 1467 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 1468 mlx5_pci_resume(dev->pdev); 1469} 1470 1471struct pci_driver mlx5_core_driver = { 1472 .name = DRIVER_NAME, 1473 .id_table = mlx5_core_pci_table, 1474 .shutdown = shutdown_one, 1475 .probe = init_one, 1476 .remove = remove_one, 1477 .err_handler = &mlx5_err_handler 1478}; 1479 1480static int __init init(void) 1481{ 1482 int err; 1483 1484 err = pci_register_driver(&mlx5_core_driver); 1485 if (err) 1486 goto err_debug; 1487 1488 err = mlx5_fwdump_init(); 1489 if (err) 1490 goto err_fwdump; 1491 1492 return 0; 1493 1494err_fwdump: 1495 pci_unregister_driver(&mlx5_core_driver); 1496 1497err_debug: 1498 return err; 1499} 1500 1501static void __exit cleanup(void) 1502{ 1503 mlx5_fwdump_fini(); 1504 pci_unregister_driver(&mlx5_core_driver); 1505} 1506 1507module_init(init); 1508module_exit(cleanup); 1509